diff --git a/.envrc.vars b/.envrc.vars index 26bc02584b..14ba7abc01 100644 --- a/.envrc.vars +++ b/.envrc.vars @@ -16,7 +16,7 @@ export PUBLIC_CONFIGS_PATH=${SPLICE_ROOT}/cluster/configs/configs # Inrease code heap sizes to avoid issues # Defaults NonNMethodCodeHeapSize=7M,NonProfiledCodeHeapSize=122M,ProfiledCodeHeapSize=122M -export SBT_OPTS="-Xmx6G -Xms2G -Xss2M -XX:+UseG1GC -XX:NonNMethodCodeHeapSize=32M -XX:NonProfiledCodeHeapSize=256M -XX:ProfiledCodeHeapSize=256M -XX:ReservedCodeCacheSize=544M" +export SBT_OPTS="-Xmx8G -Xms2G -Xss2M -XX:+UseG1GC -XX:NonNMethodCodeHeapSize=32M -XX:NonProfiledCodeHeapSize=256M -XX:ProfiledCodeHeapSize=256M -XX:ReservedCodeCacheSize=544M" # Provide a simple way to get the path to `sbt-launch.jar` for IntelliJ setup export SBT_LAUNCH_PATH="$(dirname "$(dirname "$(which sbt)")")/share/sbt/bin/sbt-launch.jar" @@ -31,7 +31,6 @@ export POSTGRES_HOST="localhost" export POSTGRES_USER=postgres export POSTGRES_PASSWORD=postgres - # ** Docker&Helm registry configs export GHCR=ghcr.io @@ -47,7 +46,6 @@ export RELEASE_HELM_REGISTRY=$RELEASE_REGISTRY/helm export OCI_RELEASE_HELM_REGISTRY=oci://$RELEASE_HELM_REGISTRY export RELEASE_DOCKER_REGISTRY=$RELEASE_REGISTRY/docker - # ** Cluster deployment configs export CLOUDSDK_COMPUTE_REGION="us-central1" @@ -102,7 +100,6 @@ export SPLICE_OAUTH_DEV_CLIENT_ID_SPLITWELL_VALIDATOR=hqpZ6TP0wGyG2yYwhH6NLpuo0M export SPLICE_OAUTH_SV_TEST_AUTHORITY=canton-network-sv-test.us.auth0.com export SPLICE_OAUTH_SV_TEST_CLIENT_ID_VALIDATOR=bUfFRpl2tEfZBB7wzIo9iRNGTj8wMeIn - # Force auth through gke-gcloud-auth-plugin # See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke export USE_GKE_GCLOUD_AUTH_PLUGIN=true diff --git a/.github/actions/tests/skip_on_static/action.yml b/.github/actions/tests/skip_on_static/action.yml index 0a3df88958..ee485599a6 100644 --- a/.github/actions/tests/skip_on_static/action.yml +++ b/.github/actions/tests/skip_on_static/action.yml @@ -26,6 +26,7 @@ runs: # is _before_ the approval already e.g. when an external contributor # created the PR and not when the maintainer approved it after adding the static label. pr_labels=$(curl -sSL --fail-with-body -H "Authorization: Bearer ${{ inputs.gh_token }}" \ + --retry 10 --retry-delay 10 --retry-all-errors \ -H "Accept: application/vnd.github.v3+json" \ "${{ github.event.pull_request.url }}" | jq '.labels') echo "Pull request labels: $pr_labels" diff --git a/.github/workflows/assign_issues_external.yml b/.github/workflows/assign_issues_external.yml new file mode 100644 index 0000000000..39e898e3ce --- /dev/null +++ b/.github/workflows/assign_issues_external.yml @@ -0,0 +1,26 @@ +name: Auto-assign issues from external contributors + +on: + issues: + types: [opened] + +jobs: + assign: + runs-on: ubuntu-24.04 + steps: + - name: Assign issue from external contributors + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const issue = context.payload.issue; + const author = issue.user.login.toLowerCase(); + const isInternalContributor = author.endsWith('-da') || author === 'cocreature'; + if (issue.assignees.length === 0 && !isInternalContributor) { + console.log('Assigning issue to the triage team...'); + await github.rest.issues.addAssignees({ + issue_number: issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + assignees: ['isegall-da', 'martinflorian-da', 'ray-roestenburg-da'], + }); + } diff --git a/LATEST_RELEASE b/LATEST_RELEASE index 9bd36d9d4c..2b2a18d265 100644 --- a/LATEST_RELEASE +++ b/LATEST_RELEASE @@ -1 +1 @@ -0.4.18 +0.4.20 diff --git a/MAINTENANCE.md b/MAINTENANCE.md index bdae005f8d..6067f1e224 100644 --- a/MAINTENANCE.md +++ b/MAINTENANCE.md @@ -14,16 +14,17 @@ ## Bumping Canton 1. Generate a patch file of the JSON API v2 OpenAPI definition by running `diff-openapi.sh` in `token-standard/dependencies/canton-json-api-v2/openapi/`. -2. Update the Canton Enterprise `version` in `nix/canton-sources.json`. The currently published versions on +2. Choose the Canton version you wish to upgrade to. The currently published versions on Artifactory can be found [here](https://digitalasset.jfrog.io/ui/repos/tree/General/canton-enterprise). -3. Update the `sha256` hash in the same file by first running `direnv reload` to make the hash validation fail - and using the 'got' hash printed by nix. This is usually easier and more accurate than copying the sha256 hash - displayed for the release version in Artifactory. -4. In case you have also made configuration changes to Canton in `simple-topology-canton.conf`, remember +3. Compute the hashes of the corresponding enterprise and oss versions by running: + `nix store prefetch-file --json --hash-type sha256 https://digitalasset.jfrog.io/artifactory/canton-enterprise/canton-enterprise-.tar.gz | jq -r '.hash'` and + `nix store prefetch-file --json --hash-type sha256 https://www.canton.io/releases/canton-open-source-.tar.gz | jq -r '.hash'` +4. Update the Canton version and hashes of the oss and enterprise versions in `nix/canton-sources.json`. +5. In case you have also made configuration changes to Canton in `simple-topology-canton.conf`, remember to also make the corresponding changes for our cluster deployments. It is recommended to test any configuration changes on scratchnet first. -5. Update the OpenAPI definitions from step 1 by running `update-openapi.sh` in `token-standard/dependencies/canton-json-api-v2/openapi/`. -6. Cleanup the `openapi.patch` file. +6. Update the OpenAPI definitions from step 1 by running `update-openapi.sh` in `token-standard/dependencies/canton-json-api-v2/openapi/`. +7. Cleanup the `openapi.patch` file. Check `token-standard/dependencies/canton-json-api-v2/openapi/CHANGES.md` and apply any changes manually if CI breaks due to token standard CLI issues that look caused by bad OpenAPI definitions. @@ -46,7 +47,7 @@ Initial setup: 1. Check out the [Canton **Open Source** repo](https://github.com/digital-asset/canton) 2. Define the environment variable used in the commands below using `export PATH_TO_CANTON_OSS=`. This can be added to your private env vars. -Current Canton commit: `0467621f75718cedee33887a535fab598954b639` +Current Canton commit: `79e645eb60ba378536a6d62cabbeab78d1be6c61` 1. Checkout the **current Canton commit listed above** in the Canton open source repo from above, so we can diff our current fork against this checkout. 2. Change to your checkout of the Splice repo and execute the following steps: @@ -66,9 +67,11 @@ Current Canton commit: `0467621f75718cedee33887a535fab598954b639` 1. The current Canton commit in this `README.md` 2. If we're also updating the sdk version (this can lead to dar changes so we might skip it) 1. Set `version` in `CantonDependencies.scala` to the SDK version from Step 3.1 - 2. Set `sdk_version` in `nix/canton-sources.json` to the SDK release version from Step 3.1. - 3. Bump the sdk version in our own `daml.yaml` and `*.nix` files via `./set-sdk.sh $sdkversion` to the same Daml SDK version. - 4. Change the hashes for both the linux and macos releases in `daml2js.nix`. To do so change a character of the `sha256` digest (e.g. "ef..." -> "0f...") in `daml2js.nix`, + 2. Set `tooling_sdk_version` in `nix/canton-sources.json` to the SDK release version from Step 3.1. + 3. Find in [Daml releases](https://github.com/digital-asset/daml/releases) the daml release that is "based on SDK" with the SDK from Step 3.1. + Set `daml_release` in `nix/cantno-sources.json` to that release. + 4. Bump the sdk version in our own `daml.yaml` and `*.nix` files via `./set-sdk.sh $sdkversion` to the same Daml SDK version. + 5. Change the hashes for both the linux and macos releases in `daml2js.nix`. To do so change a character of the `sha256` digest (e.g. "ef..." -> "0f...") in `daml2js.nix`, and then call `direnv reload` to make the hash validation fail. Adjust the `sha256` digest by copying back the new hash when Nix throws an error during validation. Note that nix may print the hash in base64, when you specified it in base16, or vice versa. Just copying the 'got' hash should work in either case. 6. Create another commit, `git add -A && git reset '*.rej' && git commit -s -m"Bump Canton commit and Canton/SDK versions" --no-verify` diff --git a/Makefile b/Makefile index f9344c01e9..aeb8be2162 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ wallet-payments-dar := ${SPLICE_ROOT}/daml/splice-wallet-payments/.daml/dist/spl build: $(app-bundle) $(load-tester) cluster/build ## Build the Splice app bundle and ensure cluster scripts are ready to run. $(app-bundle): $(canton-amulet-dar) $(wallet-payments-dar) - sbt --batch bundle + sbt --client --batch bundle $(canton-amulet-dar) $(wallet-payments-dar) &: sbt --batch 'splice-amulet-daml'/damlBuild 'splice-wallet-payments-daml'/damlBuild @@ -31,7 +31,7 @@ $(load-tester): cd "${SPLICE_ROOT}/load-tester" && npm ci && npm run build $(party-allocator): - sbt --batch 'party-allocator/npmBuild' + sbt --client --batch 'party-allocator/npmBuild' .PHONY: update-expected update-expected: cluster/pulumi/update-expected @@ -52,13 +52,13 @@ clean: cluster/clean .PHONY: clean-all clean-all: clean ## Completely clean all local build state, including model codegen. - sbt --batch clean-splice + sbt --client --batch clean-splice find . -type d -name ".daml" -exec rm -rf {} + find . -type d -name "target" -exec rm -rf {} + .PHONY: format format: cluster/format ## Automatically reformat and apply scalaFix to source code - sbt --batch formatFix + sbt --client --batch formatFix .PHONY: help help: ## Show list of available make targets diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SpliceInstanceReference.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SpliceInstanceReference.scala index f7f94dffe8..d5de8bd42e 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SpliceInstanceReference.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SpliceInstanceReference.scala @@ -16,6 +16,7 @@ import org.lfdecentralizedtrust.splice.environment.{ } import org.lfdecentralizedtrust.splice.util.HasHealth import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand import com.digitalasset.canton.admin.api.client.data.NodeStatus import com.digitalasset.canton.config.NonNegativeDuration @@ -40,6 +41,8 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.config.RemoteParticipantConfig import com.digitalasset.canton.synchronizer.sequencer.config.RemoteSequencerConfig import com.digitalasset.canton.topology.NodeIdentity +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.VettedPackage import java.io.File import scala.concurrent.ExecutionContext @@ -250,10 +253,25 @@ class ParticipantClientReference( def upload_dar_unless_exists( path: String ): Unit = { - val hash = DarParser.assertReadArchiveFromFile(new File(path)).main.getHash + val dar = DarParser.assertReadArchiveFromFile(new File(path)) + val hash = dar.main.getHash val pkgs = this.ledger_api.packages.list() if (!pkgs.map(_.packageId).contains(hash)) { - discard[String](this.dars.upload(path)) + discard[String](this.dars.upload(path, vetAllPackages = false)) + val connected = this.synchronizers.list_connected() + if (connected.isEmpty) { + logger.error(s"Trying to vet $path on ${this.id} but not connected to any synchronizer") + } + connected.foreach { sync => + this.topology.vetted_packages.propose_delta( + this.id, + adds = dar.all + .map(p => LfPackageId.assertFromString(p.getHash)) + .distinct + .map(VettedPackage(_, None, None)), + store = TopologyStoreId.Synchronizer(sync.synchronizerId), + ) + } } } } diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala index 93839f9e98..c28bcb755d 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala @@ -343,10 +343,10 @@ class SvAppBackendReference( } @Help.Summary("Prepare a validator onboarding and return an onboarding secret (via admin API)") - def prepareValidatorOnboarding(expiresIn: FiniteDuration): String = + def prepareValidatorOnboarding(expiresIn: FiniteDuration, partyHint: Option[String]): String = consoleEnvironment.run { httpCommand( - HttpSvAdminAppClient.PrepareValidatorOnboarding(expiresIn) + HttpSvAdminAppClient.PrepareValidatorOnboarding(expiresIn, partyHint) ) } diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ScanApps.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ScanApps.scala index a3a44e992c..7039334ce1 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ScanApps.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ScanApps.scala @@ -10,12 +10,10 @@ import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorServic import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.environment.ManagedNodes import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.DbMigrationsFactory /** Scan app instances. */ class ScanApps( create: (String, ScanAppBackendConfig) => ScanAppBootstrap, - migrationsFactory: DbMigrationsFactory, _timeouts: ProcessingTimeout, configs: Map[String, ScanAppBackendConfig], parametersFor: String => SharedSpliceAppParameters, @@ -30,7 +28,6 @@ class ScanApps( ScanAppBootstrap, ]( create, - migrationsFactory, _timeouts, configs, parametersFor, diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceConsoleEnvironment.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceConsoleEnvironment.scala index 1baeb45c45..22d4b42e71 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceConsoleEnvironment.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceConsoleEnvironment.scala @@ -11,6 +11,8 @@ import com.digitalasset.canton.console.{ NodeReferences, StandardConsoleOutput, } +import com.digitalasset.daml.lf.data.Ref.PackageId +import com.digitalasset.daml.lf.typesig.PackageSignature import org.apache.pekko.actor.ActorSystem import org.lfdecentralizedtrust.splice.config.SpliceConfig import org.lfdecentralizedtrust.splice.console.* @@ -31,16 +33,11 @@ class SpliceConsoleEnvironment( override type Config = SpliceConfig - val packageSignatures = ResourceTemplateDecoder.loadPackageSignaturesFromResources( - DarResources.TokenStandard.allPackageResources.flatMap(_.all) ++ - DarResources.splitwell.all ++ - DarResources.validatorLifecycle.all ++ - DarResources.wallet.all ++ - DarResources.amulet.all ++ - DarResources.dsoGovernance.all - ) implicit val actorSystem: ActorSystem = environment.actorSystem - val templateDecoder = new ResourceTemplateDecoder(packageSignatures, environment.loggerFactory) + private lazy val templateDecoder = new ResourceTemplateDecoder( + SpliceConsoleEnvironment.packageSignatures, + environment.loggerFactory, + ) lazy val httpCommandRunner: ConsoleHttpCommandRunner = new ConsoleHttpCommandRunner( environment, @@ -341,3 +338,17 @@ class SpliceConsoleEnvironment( case _ => 5 } } + +object SpliceConsoleEnvironment { + + private lazy val packageSignatures: Map[PackageId, PackageSignature] = + ResourceTemplateDecoder.loadPackageSignaturesFromResources( + DarResources.TokenStandard.allPackageResources.flatMap(_.all) ++ + DarResources.splitwell.all ++ + DarResources.validatorLifecycle.all ++ + DarResources.wallet.all ++ + DarResources.amulet.all ++ + DarResources.dsoGovernance.all + ) + +} diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceEnvironment.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceEnvironment.scala index 7781794f64..8cbd4ce876 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceEnvironment.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceEnvironment.scala @@ -9,7 +9,6 @@ import com.digitalasset.canton.console.ConsoleOutput import com.digitalasset.canton.environment.* import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.CommunityParticipantNodeBootstrapFactory -import com.digitalasset.canton.resource.CommunityDbMigrationsMetaFactory import com.digitalasset.canton.synchronizer.mediator.CommunityMediatorNodeBootstrapFactory import com.digitalasset.canton.synchronizer.sequencer.CommunitySequencerNodeBootstrapFactory import org.lfdecentralizedtrust.splice.config.SpliceConfig @@ -34,7 +33,6 @@ class SpliceEnvironment( CommunityParticipantNodeBootstrapFactory, CommunitySequencerNodeBootstrapFactory, CommunityMediatorNodeBootstrapFactory, - new CommunityDbMigrationsMetaFactory(loggerFactory), loggerFactory, ) { @@ -73,7 +71,6 @@ class SpliceEnvironment( lazy val validators = new ValidatorApps( createValidator, - migrationsFactoryFactory.create(clock), timeouts, config.validatorsByString, config.tryValidatorAppParametersByString, @@ -105,7 +102,6 @@ class SpliceEnvironment( lazy val svs = new SvApps( createSv, - migrationsFactoryFactory.create(clock), timeouts, config.svsByString, config.trySvAppParametersByString, @@ -137,7 +133,6 @@ class SpliceEnvironment( lazy val scans = new ScanApps( createScan, - migrationsFactoryFactory.create(clock), timeouts, config.scansByString, config.tryScanAppParametersByString, @@ -169,7 +164,6 @@ class SpliceEnvironment( lazy val splitwells = new SplitwellApps( createSplitwell, - migrationsFactoryFactory.create(clock), timeouts, config.splitwellsByString, config.trySplitwellAppParametersByString, diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SplitwellApps.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SplitwellApps.scala index f4113581fb..31abf8facf 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SplitwellApps.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SplitwellApps.scala @@ -10,12 +10,10 @@ import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorServic import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.environment.ManagedNodes import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.DbMigrationsFactory /** Splitwell app instances. */ class SplitwellApps( create: (String, SplitwellAppBackendConfig) => SplitwellAppBootstrap, - migrationsFactory: DbMigrationsFactory, _timeouts: ProcessingTimeout, configs: Map[String, SplitwellAppBackendConfig], parametersFor: String => SharedSpliceAppParameters, @@ -30,7 +28,6 @@ class SplitwellApps( SplitwellAppBootstrap, ]( create, - migrationsFactory, _timeouts, configs, parametersFor, diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SvApps.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SvApps.scala index e0135b7106..c611811fa1 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SvApps.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/SvApps.scala @@ -10,12 +10,10 @@ import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorServic import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.environment.ManagedNodes import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.DbMigrationsFactory /** SV app instances. */ class SvApps( create: (String, SvAppBackendConfig) => SvAppBootstrap, - migrationsFactory: DbMigrationsFactory, _timeouts: ProcessingTimeout, configs: Map[String, SvAppBackendConfig], parametersFor: String => SharedSpliceAppParameters, @@ -30,7 +28,6 @@ class SvApps( SvAppBootstrap, ]( create, - migrationsFactory, _timeouts, configs, parametersFor, diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ValidatorApps.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ValidatorApps.scala index fbe1c1eb0b..1aafa5a496 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ValidatorApps.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/environment/ValidatorApps.scala @@ -10,12 +10,10 @@ import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorServic import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.environment.ManagedNodes import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.DbMigrationsFactory /** Validator app instances. */ class ValidatorApps( create: (String, ValidatorAppBackendConfig) => ValidatorAppBootstrap, - migrationsFactory: DbMigrationsFactory, _timeouts: ProcessingTimeout, configs: Map[String, ValidatorAppBackendConfig], parametersFor: String => SharedSpliceAppParameters, @@ -30,7 +28,6 @@ class ValidatorApps( ValidatorAppBootstrap, ]( create, - migrationsFactory, _timeouts, configs, parametersFor, diff --git a/apps/app/src/test/resources/include/mediators.conf b/apps/app/src/test/resources/include/mediators.conf index 32690d168d..a390d9a51d 100644 --- a/apps/app/src/test/resources/include/mediators.conf +++ b/apps/app/src/test/resources/include/mediators.conf @@ -13,4 +13,7 @@ _mediator_template { # so that mediator will not block other sequencers from pruning even there is lack of activities # ref: https://github.com/DACH-NY/canton/issues/16371#issuecomment-1885005687 time-tracker.min-observation-duration = 10s + sequencer-client { + use-new-connection-pool = false + } } diff --git a/apps/app/src/test/resources/include/participants.conf b/apps/app/src/test/resources/include/participants.conf index 6471129d29..c755ddc403 100644 --- a/apps/app/src/test/resources/include/participants.conf +++ b/apps/app/src/test/resources/include/participants.conf @@ -23,6 +23,10 @@ _participant_template { journal-garbage-collection-delay = 24h # Extra assertions that we want to turn on in integration tests but not in prod. engine.enable-additional-consistency-checks = true + # TODO(DACH-NY/canton-network-internal#2050) Remove once ACS commitment processing does not block anymore. + do-not-await-on-checking-incoming-commitments = true + # Bump batch size to make acs imports faster + batching.max-acs-import-batch-size = 10000 } admin-api { @@ -37,6 +41,7 @@ _participant_template { sequencer-client { # Use a higher number of in flight batches to increase throughput maximum-in-flight-event-batches = 50 + use-new-connection-pool = false } # TODO(DACH-NY/canton-network-node#8331) Tune cache sizes @@ -69,5 +74,14 @@ _participant_template { } } + admin-api.stream.limits { + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ExportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.PartyManagementService/ExportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ImportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ImportAcs": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshot": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshotV2": 1 + } + topology.broadcast-batch-size = 1 } diff --git a/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf b/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf index c7b7037054..b17526d94f 100644 --- a/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf +++ b/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf @@ -1 +1 @@ -canton.participants.validatorParticipant.http-ledger-api = null +canton.participants.validatorParticipant.http-ledger-api.enabled = false diff --git a/apps/app/src/test/resources/include/sequencers.conf b/apps/app/src/test/resources/include/sequencers.conf index 9917f7e27c..f50f24a962 100644 --- a/apps/app/src/test/resources/include/sequencers.conf +++ b/apps/app/src/test/resources/include/sequencers.conf @@ -22,6 +22,9 @@ _sequencer_reference_template { generate-topology-transactions-and-keys = false identity.type = manual } + sequencer-client { + use-new-connection-pool = false + } sequencer { config { storage = ${_shared.storage} @@ -39,9 +42,16 @@ _sequencer_reference_template { # so that sequencer will not block other sequencers from pruning even there is lack of activities # ref: https://github.com/DACH-NY/canton/issues/16371#issuecomment-1885005687 time-tracker.min-observation-duration = 10s - # TODO(#1324) Reenable once Canton forward ports - # parameters.sequencer-api-limits = { - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/SubscribeV2" : 1000, - # } + public-api.stream.limits = { + "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, + "com.digitalasset.canton.sequencer.api.v30.SequencerService/Subscribe" : 1000, + } + admin-api.stream.limits { + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisState": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisStateV2": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshot": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshotV2": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingState": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingStateV2": 1 + } } diff --git a/apps/app/src/test/resources/include/svs/_sv.conf b/apps/app/src/test/resources/include/svs/_sv.conf index cbc73d9208..1d7c232ae9 100644 --- a/apps/app/src/test/resources/include/svs/_sv.conf +++ b/apps/app/src/test/resources/include/svs/_sv.conf @@ -48,7 +48,6 @@ time-tracker-min-observation-duration = 10s # Set to a lower value (the Canton default) than the 5s default to make tests faster. time-tracker-observation-latency = 250ms - topology-change-delay-duration = 250ms # Don't want to sleep in tests max-vetting-delay = 0s parameters { diff --git a/apps/app/src/test/resources/simple-topology-canton-simtime.conf b/apps/app/src/test/resources/simple-topology-canton-simtime.conf index 49ac307dc0..7f28520381 100644 --- a/apps/app/src/test/resources/simple-topology-canton-simtime.conf +++ b/apps/app/src/test/resources/simple-topology-canton-simtime.conf @@ -3,9 +3,6 @@ include required("simple-topology-canton.conf") _participant_template.testing-time.type = monotonic-time -# Ensure that the test does not use an outdated topology timestamp -# see also https://github.com/hyperledger-labs/splice/issues/2036 -_participant_template.parameters.reassignments-config.time-proof-freshness-proportion = 0 # disable circuit breaker as it's based on block time vs wall clock time _sequencer_reference_template.sequencer.block.circuit-breaker.enabled = false diff --git a/apps/app/src/test/resources/simple-topology-canton.conf b/apps/app/src/test/resources/simple-topology-canton.conf index 51cd380dbd..87f226abce 100644 --- a/apps/app/src/test/resources/simple-topology-canton.conf +++ b/apps/app/src/test/resources/simple-topology-canton.conf @@ -3,10 +3,6 @@ include required("include/sequencers.conf") include required("include/mediators.conf") include required("include/participants.conf") -# Ensure that the test does not use an outdated topology timestamp -# see also https://github.com/hyperledger-labs/splice/issues/2036 -_participant_template.parameters.reassignments-config.time-proof-freshness-proportion = 0 - _sv1Participant_client { admin-api.port = 5102 ledger-api.port = 5101 diff --git a/apps/app/src/test/resources/standalone-participant-extra.conf b/apps/app/src/test/resources/standalone-participant-extra.conf index a749e28ef8..1d4dbc96c2 100644 --- a/apps/app/src/test/resources/standalone-participant-extra.conf +++ b/apps/app/src/test/resources/standalone-participant-extra.conf @@ -14,7 +14,7 @@ canton { user-management-service.additional-admin-user-id = ${EXTRA_PARTICIPANT_ADMIN_USER} } storage.config.properties.databaseName = ${EXTRA_PARTICIPANT_DB} - http-ledger-api = null + http-ledger-api.enabled = false } extraStandaloneParticipant.admin-api.port = ${?EXTRA_PARTICIPANT_ADMIN_API_PORT} diff --git a/apps/app/src/test/resources/standalone-participant-second-extra.conf b/apps/app/src/test/resources/standalone-participant-second-extra.conf index 5601c8c78f..f6281f9fdc 100644 --- a/apps/app/src/test/resources/standalone-participant-second-extra.conf +++ b/apps/app/src/test/resources/standalone-participant-second-extra.conf @@ -13,7 +13,7 @@ canton { port = 27701 user-management-service.additional-admin-user-id = ${SECOND_EXTRA_PARTICIPANT_ADMIN_USER} } - http-ledger-api = null + http-ledger-api.enabled = false storage.config.properties.databaseName = ${SECOND_EXTRA_PARTICIPANT_DB} } } diff --git a/apps/app/src/test/resources/standalone-participant-sv4.conf b/apps/app/src/test/resources/standalone-participant-sv4.conf index 83912aae1a..e34bc35fd0 100644 --- a/apps/app/src/test/resources/standalone-participant-sv4.conf +++ b/apps/app/src/test/resources/standalone-participant-sv4.conf @@ -13,7 +13,7 @@ canton { port = 27401 user-management-service.additional-admin-user-id = ${SV4_ADMIN_USER} } - http-ledger-api = null + http-ledger-api.enabled = false storage.config.properties.databaseName = "participant_sv4_standalone" } diff --git a/apps/app/src/test/resources/standalone-participants-sv123.conf b/apps/app/src/test/resources/standalone-participants-sv123.conf index f58a48048e..f4b520af24 100644 --- a/apps/app/src/test/resources/standalone-participants-sv123.conf +++ b/apps/app/src/test/resources/standalone-participants-sv123.conf @@ -14,7 +14,7 @@ canton { port = 27101 user-management-service.additional-admin-user-id = ${SV1_ADMIN_USER} } - http-ledger-api = null + http-ledger-api.enabled = false storage.config.properties.databaseName = "participant_sv1_standalone" } @@ -28,7 +28,7 @@ canton { port = 27201 user-management-service.additional-admin-user-id = ${SV2_ADMIN_USER} } - http-ledger-api = null + http-ledger-api.enabled = false storage.config.properties.databaseName = "participant_sv2_standalone" } @@ -42,7 +42,7 @@ canton { port = 27301 user-management-service.additional-admin-user-id = ${SV3_ADMIN_USER} } - http-ledger-api = null + http-ledger-api.enabled = false storage.config.properties.databaseName = "participant_sv3_standalone" } diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/EnvironmentDefinition.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/EnvironmentDefinition.scala index 8f69a44b80..92adfa4688 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/EnvironmentDefinition.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/EnvironmentDefinition.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.integration.{ TestEnvironment, } import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, SuppressingLogger} +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.{ForceFlag, ForceFlags} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref.PackageVersion @@ -168,15 +169,28 @@ case class EnvironmentDefinition( preSetup = implicit env => { this.preSetup(env) participants(env).foreach { p => - logger.info(s"Removing all vetted packages for ${p.name}")(TraceContext.empty) - p.topology.vetted_packages.propose( - p.id, - Seq.empty, - force = ForceFlags( - ForceFlag.AllowUnvetPackage, - ForceFlag.AllowUnvetPackageWithActiveContracts, - ), - ) + p.synchronizers.list_connected().foreach { connected => + val currentVettedPackages = p.topology.vetted_packages.list( + store = Some(TopologyStoreId.Synchronizer(connected.synchronizerId)), + filterParticipant = p.id.filterString, + ) + currentVettedPackages match { + case Seq(mapping) if mapping.item.packages.length > 1 => + logger.info( + s"Removing all vetted packages for ${p.name} on ${connected.synchronizerId}" + )(TraceContext.empty) + p.topology.vetted_packages.propose( + p.id, + Seq.empty, + force = ForceFlags(ForceFlag.AllowUnvetPackageWithActiveContracts), + store = TopologyStoreId.Synchronizer(connected.synchronizerId), + ) + case _ => + logger.info(s"No vetted packages for ${p.name} on ${connected.synchronizerId}")( + TraceContext.empty + ) + } + } } participants(env).foreach { p => logger.info(s"Ensuring vetting topology is effective for ${p.name}")(TraceContext.empty) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala index 9adf1b8903..fc35bdd2bb 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala @@ -14,7 +14,7 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.api.IdentityProviderConfig import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.sequencing.GrpcSequencerConnection -import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.{ForceFlag, ForceFlags, PartyId} import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.util.FutureInstances.parallelFuture import com.digitalasset.canton.util.HexString @@ -423,7 +423,7 @@ class DecentralizedSynchronizerMigrationIntegrationTest val onboarding @ OnboardingResult(externalParty, _, _) = onboardExternalParty(validatorBackend) walletClient.tap(50.0) - createTransferPreapprovalIfNotExists(walletClient) + createTransferPreapprovalEnsuringItExists(walletClient, validatorBackend) createAndAcceptExternalPartySetupProposal(validatorBackend, onboarding) eventually() { validatorBackend.lookupTransferPreapprovalByParty(externalParty) should not be empty @@ -491,8 +491,11 @@ class DecentralizedSynchronizerMigrationIntegrationTest ), enableBftSequencer = true, )() { - aliceValidatorBackend.participantClient.upload_dar_unless_exists(splitwellDarPath) val aliceUserParty = startValidatorAndTapAmulet(aliceValidatorBackend, aliceWalletClient) + // Upload after starting validator which connects to global + // synchronizers as upload_dar_unless_exists vets on all + // connected synchronizers. + aliceValidatorBackend.participantClient.upload_dar_unless_exists(splitwellDarPath) val charlieUserParty = onboardWalletUser(charlieWalletClient, aliceValidatorBackend) val splitwellGroupKey = createSplitwellGroupAndTransfer(aliceUserParty, charlieUserParty) val externalPartyOnboarding = clue("Create external party and transfer 40 amulet to it") { @@ -601,11 +604,12 @@ class DecentralizedSynchronizerMigrationIntegrationTest // reset to not crash other tests { clue( - s"reset confirmationRequestsMaxRate to ${domainDynamicParams.confirmationRequestsMaxRate} to not crash other tests" + s"reset domain parameters to old values confirmationRequestsMaxRate=${domainDynamicParams.confirmationRequestsMaxRate},mediatorReactionTimeout=${domainDynamicParams.mediatorReactionTimeout}" ) { - changeDomainRatePerParticipant( + changeDomainParameters( allNodes.map(_.oldParticipantConnection), domainDynamicParams.confirmationRequestsMaxRate, + domainDynamicParams.mediatorReactionTimeout, ) } deleteDirectoryRecursively(migrationDumpDir.toFile) @@ -1128,9 +1132,10 @@ class DecentralizedSynchronizerMigrationIntegrationTest } } - private def changeDomainRatePerParticipant( + private def changeDomainParameters( nodes: Seq[ParticipantAdminConnection], - rate: NonNegativeInt, + confirmationRequestsMaxRate: NonNegativeInt, + mediatorReactionTimeout: com.digitalasset.canton.time.NonNegativeFiniteDuration, )(implicit env: SpliceTestConsoleEnvironment, ec: ExecutionContextExecutor, @@ -1140,7 +1145,11 @@ class DecentralizedSynchronizerMigrationIntegrationTest node .ensureDomainParameters( decentralizedSynchronizerId, - _.tryUpdate(confirmationRequestsMaxRate = rate), + _.tryUpdate( + confirmationRequestsMaxRate = confirmationRequestsMaxRate, + mediatorReactionTimeout = mediatorReactionTimeout, + ), + forceChanges = ForceFlags(ForceFlag.AllowOutOfBoundsValue), ) } .futureValue @@ -1364,6 +1373,8 @@ class DecentralizedSynchronizerMigrationIntegrationTest Map("fake-key-1" -> "fake-value-1"), s"fake-idp-enabled-${suffix}", false, + executeAs = Set(someParties(0)), + executeAsAnyParty = true, ) if (createNewParties) { participant.ledger_api.users.create( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala index 8be53b080d..3f34aa1614 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala @@ -180,7 +180,7 @@ class RecoverExternalPartyIntegrationTest // Tap so we have money for creating the preapproval bobValidatorWalletClient.tap(5000.0) - createTransferPreapprovalIfNotExists(bobValidatorWalletClient) + createTransferPreapprovalEnsuringItExists(bobValidatorWalletClient, bobValidatorBackend) // Grant rights to bob's validator backend the rights to prepare transactions // and submit signed on behalf of the party. diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala index b6e1bec57d..22f80e2be6 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala @@ -2,7 +2,6 @@ package org.lfdecentralizedtrust.splice.integration.tests import com.daml.ledger.javaapi.data.codegen.json.JsonLfReader import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology.PartyId import org.lfdecentralizedtrust.splice.codegen.java.splice.amulet.Amulet import org.lfdecentralizedtrust.splice.codegen.java.splice.ans.AnsEntry import org.lfdecentralizedtrust.splice.config.ConfigTransforms @@ -219,104 +218,68 @@ class ScanTimeBasedIntegrationTest ) }) - def compareLeaderboard( - result: Seq[(PartyId, BigDecimal)], - expected: Seq[(WalletAppClientReference, BigDecimal)], - ) = { - result shouldBe expected.map((v) => - (Codec.decode(Codec.Party)((v._1.userStatus().party)).value, v._2) - ) - } def walletClientParty(walletClient: WalletAppClientReference) = Codec.decode(Codec.Party)(walletClient.userStatus().party).value - + val latestRound = baseRoundWithLatestData + 4 actAndCheck( "Advance four more rounds, for the previous rounds to close (where rewards were collected)", - Range(0, 4).foreach(_ => advanceRoundsByOneTick), + Range(0, 3).foreach(_ => advanceRoundsByOneTick), )( - s"Test leaderboards for ends of rounds ${firstRound + 4} and ${firstRound + 5}", + s"Test leaderboards for latest rounds ${latestRound}", _ => { val ledgerTime = getLedgerTime.toInstant sv1ScanBackend.automation.trigger[ScanAggregationTrigger].runOnce().futureValue - sv1ScanBackend.getRoundOfLatestData() should be((baseRoundWithLatestData + 5, ledgerTime)) - - // TODO(#805): consider de-hard-coding the expected values here somehow, e.g. by only checking them relative to each other - val appRewardsBobR3 = BigDecimal(4.2000000000) - val appRewardsAliceR3 = BigDecimal(3.8400000000) - val validatorRewardsBobR3 = BigDecimal(1.4000000000) - val validatorRewardsAliceR3 = BigDecimal(1.2800000000) + sv1ScanBackend.getRoundOfLatestData() should be((latestRound, ledgerTime)) - (baseRoundWithLatestData.toInt to baseRoundWithLatestData.toInt + 3).map { round => - sv1ScanBackend.getRewardsCollectedInRound(round.toLong) - }.sum shouldBe appRewardsAliceR3 + appRewardsBobR3 + validatorRewardsAliceR3 + validatorRewardsBobR3 val aliceValidatorWalletClientParty = walletClientParty(aliceValidatorWalletClient).toProtoPrimitive val bobValidatorWalletClientParty = walletClientParty(bobValidatorWalletClient).toProtoPrimitive - clue("Compare leaderboard getTopProvidersByAppRewards + 3") { + clue("Compare leaderboard getTopProvidersByAppRewards latestRound") { sv1ScanBackend - .listRoundPartyTotals(firstRound, baseRoundWithLatestData + 3) + .listRoundPartyTotals(firstRound, latestRound) .map { rpt => + // only keeps latest closed round and app rewards for that round per party rpt.party -> (rpt.closedRound, BigDecimal(rpt.cumulativeAppRewards)) } - .filter { case (p, (_, appRewards)) => - appRewards > 0 && (p == aliceValidatorWalletClientParty || p == bobValidatorWalletClientParty) + .filter { case (p, (cr, appRewards)) => + appRewards > 0 && (p == aliceValidatorWalletClientParty || p == bobValidatorWalletClientParty) && cr == latestRound } - .toMap should contain theSameElementsAs Map( - aliceValidatorWalletClientParty -> (baseRoundWithLatestData + 3, appRewardsAliceR3), - bobValidatorWalletClientParty -> (baseRoundWithLatestData + 3, appRewardsBobR3), - ) - - compareLeaderboard( - sv1ScanBackend.getTopProvidersByAppRewards(baseRoundWithLatestData + 3, 10), - Seq( - (bobValidatorWalletClient, appRewardsBobR3), - (aliceValidatorWalletClient, appRewardsAliceR3), - ), - ) + .sortBy(_._2._2)(Ordering.BigDecimal.reverse) + .map(_._1) should contain theSameElementsInOrderAs (Seq( + bobValidatorWalletClientParty, + aliceValidatorWalletClientParty, + )) + sv1ScanBackend + .getTopProvidersByAppRewards(latestRound, 10) + .map(_._1.toProtoPrimitive) shouldBe (Seq( + bobValidatorWalletClientParty, + aliceValidatorWalletClientParty, + )) } - clue("Compare leaderboard getTopValidatorsByValidatorRewards + 3") { + clue("Compare leaderboard getTopValidatorsByValidatorRewards latestRound") { sv1ScanBackend - .listRoundPartyTotals(firstRound + 0, baseRoundWithLatestData + 3) + .listRoundPartyTotals(firstRound, latestRound) .map { rpt => + // only keeps latest closed round and app rewards for that round per party rpt.party -> (rpt.closedRound, BigDecimal(rpt.cumulativeValidatorRewards)) } - .filter { case (p, (_, validatorRewards)) => - validatorRewards > 0 && (p == aliceValidatorWalletClientParty || p == bobValidatorWalletClientParty) + .filter { case (p, (cr, validatorRewards)) => + validatorRewards > 0 && (p == aliceValidatorWalletClientParty || p == bobValidatorWalletClientParty) && cr == latestRound } - .toMap should contain theSameElementsAs Map( - aliceValidatorWalletClientParty -> (baseRoundWithLatestData + 3, validatorRewardsAliceR3), - bobValidatorWalletClientParty -> (baseRoundWithLatestData + 3, validatorRewardsBobR3), - ) + .sortBy(_._2._2)(Ordering.BigDecimal.reverse) + .map(_._1) should contain theSameElementsInOrderAs (Seq( + bobValidatorWalletClientParty, + aliceValidatorWalletClientParty, + )) - compareLeaderboard( - sv1ScanBackend.getTopValidatorsByValidatorRewards(baseRoundWithLatestData + 3, 10), - Seq( - (bobValidatorWalletClient, validatorRewardsBobR3), - (aliceValidatorWalletClient, validatorRewardsAliceR3), - ), - ) - } - clue("Compare leaderboard getTopProvidersByAppRewards + 4") { - compareLeaderboard( - sv1ScanBackend.getTopProvidersByAppRewards(baseRoundWithLatestData + 4, 10), - Seq( - // TODO(#805): consider de-hard-coding the expected values here - (bobValidatorWalletClient, BigDecimal(8.4060000000)), - (aliceValidatorWalletClient, BigDecimal(7.6860000000)), - ), - ) - } - clue("Compare leaderboard getTopValidatorsByValidatorRewards + 4") { - compareLeaderboard( - sv1ScanBackend.getTopValidatorsByValidatorRewards(baseRoundWithLatestData + 4, 10), - Seq( - // TODO(#805): consider de-hard-coding the expected values here - (bobValidatorWalletClient, BigDecimal(2.8020000000)), - (aliceValidatorWalletClient, BigDecimal(2.5620000000)), - ), - ) + sv1ScanBackend + .getTopValidatorsByValidatorRewards(latestRound, 10) + .map(_._1.toProtoPrimitive) should contain theSameElementsInOrderAs (Seq( + bobValidatorWalletClientParty, + aliceValidatorWalletClientParty, + )) } }, ) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala index 86ebfc59f6..9061725b1a 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala @@ -10,7 +10,7 @@ import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.PartyId import com.digitalasset.daml.lf.data.Time.Timestamp as LfTimestamp import com.google.cloud.bigquery as bq -import bq.{Field, JobInfo, Schema, TableId} +import bq.{Field, FieldValueList, JobInfo, Schema, TableId, TableResult} import bq.storage.v1.{JsonStreamWriter, TableSchema} import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.* import slick.jdbc.GetResult @@ -83,6 +83,7 @@ class ScanTotalSupplyBigQueryIntegrationTest // The peak is 17 transactions in a (simulated) minute, or 0.28333 tps over a minute, // so we assert 15-20 transactions, or 0.25-0.34 tps private val peakTps = (0.25, 0.34) + private val totalRounds = 4 override def beforeAll() = { super.beforeAll() @@ -152,12 +153,14 @@ class ScanTotalSupplyBigQueryIntegrationTest createBigQueryFunctions() } - val results = withClue("running total supply queries in BigQuery") { - runTotalSupplyQueries() + withClue("testing total supply queries in BigQuery") { + val results = runDashboardQueries() + verifyDashboardResults(results) } - withClue(s"verify total supply results") { - verifyResults(results) + withClue("testing finance queries") { + val results = runFinanceQueries() + verifyFinanceResults(results) } } @@ -523,16 +526,31 @@ class ScanTotalSupplyBigQueryIntegrationTest job.waitFor() } - /** Runs the total supply queries from the SQL file + private def runFinanceQueries()(implicit env: FixtureParam): FinanceMetrics = { + val project = bigquery.getOptions.getProjectId + // The TPS query assumes staleness of up to 4 hours, so we query for stats 5 hours after the current ledger time. + val timestamp = getLedgerTime.toInstant.plus(5, ChronoUnit.HOURS).toString + logger.info(s"Querying all dashboard stats as of $timestamp") + val sql = + s"SELECT * FROM `$project.$functionsDatasetName.all_finance_stats`('$timestamp', 0);" + + parseFinanceResults(runTableSqlQuery(sql)) + } + + /** Runs the dashboard queries from the SQL file */ - private def runTotalSupplyQueries()(implicit env: FixtureParam): ExpectedMetrics = { + private def runDashboardQueries()(implicit env: FixtureParam): DashboardMetrics = { val project = bigquery.getOptions.getProjectId // The TPS query assumes staleness of up to 4 hours, so we query for stats 5 hours after the current ledger time. val timestamp = getLedgerTime.toInstant.plus(5, ChronoUnit.HOURS).toString + logger.info(s"Querying all dashboard stats as of $timestamp") val sql = s"SELECT * FROM `$project.$functionsDatasetName.all_dashboard_stats`('$timestamp', 0);" - logger.info(s"Querying all stats as of $timestamp") + parseDashboardResults(runTableSqlQuery(sql)) + } + + private def runTableSqlQuery(sql: String): TableResult = { // Execute the query val queryConfig = bq.QueryJobConfiguration @@ -549,11 +567,15 @@ class ScanTotalSupplyBigQueryIntegrationTest job.waitFor() // results should be available now - val result = job.getQueryResults() - parseQueryResults(result) + job.getQueryResults() } - private case class ExpectedMetrics( + private case class FinanceMetrics( + // Most metrics reuse the same code as the dasboard computation, so we don't bother validating them again + latestRound: Long + ) + + private case class DashboardMetrics( locked: BigDecimal, unlocked: BigDecimal, currentSupplyTotal: BigDecimal, @@ -572,51 +594,64 @@ class ScanTotalSupplyBigQueryIntegrationTest avgCoinPrice: BigDecimal, ) - private def parseQueryResults(result: bq.TableResult) = { - // We expect the final query to return a single row with all metrics - val row = result.iterateAll().iterator().next() - logger.debug(s"Query row: $row; schema ${result.getSchema}") + private def required(row: FieldValueList, column: String) = { + val field = row get column + if (field.isNull) + fail(s"Column '$column' in all-stats results is null") + field + } - def required(column: String) = { - val field = row get column - if (field.isNull) - fail(s"Column '$column' in all-stats results is null") - field - } + def bd(row: FieldValueList, column: String) = { + BigDecimal(required(row, column).getStringValue) + } - def bd(column: String) = { - BigDecimal(required(column).getStringValue) - } + def int(row: FieldValueList, column: String) = { + required(row, column).getLongValue + } - def int(column: String) = { - required(column).getLongValue - } + def float(row: FieldValueList, column: String) = { + required(row, column).getDoubleValue + } - def float(column: String) = { - required(column).getDoubleValue - } + private def parseFinanceResults(result: bq.TableResult) = { + val row = result.iterateAll().iterator().next() + logger.debug(s"Query row: $row; schema ${result.getSchema}") + + FinanceMetrics( + latestRound = int(row, "latest_round") + ) + } + + private def parseDashboardResults(result: bq.TableResult) = { + // We expect the final query to return a single row with all metrics + val row = result.iterateAll().iterator().next() + logger.debug(s"Query row: $row; schema ${result.getSchema}") - ExpectedMetrics( - locked = bd("locked"), - unlocked = bd("unlocked"), - currentSupplyTotal = bd("current_supply_total"), - unminted = bd("unminted"), - mintedAppRewards = bd("daily_mint_app_rewards"), - mintedValidatorRewards = bd("daily_mint_validator_rewards"), - mintedSvRewards = bd("daily_mint_sv_rewards"), - mintedUnclaimed = bd("daily_mint_unclaimed_activity_records"), - burned = bd("daily_burn"), - numAmuletHolders = int("num_amulet_holders"), - numActiveValidators = int("num_active_validators"), - avgTps = float("average_tps"), - peakTps = float("peak_tps"), - minCoinPrice = bd("daily_min_coin_price"), - maxCoinPrice = bd("daily_max_coin_price"), - avgCoinPrice = bd("daily_avg_coin_price"), + DashboardMetrics( + locked = bd(row, "locked"), + unlocked = bd(row, "unlocked"), + currentSupplyTotal = bd(row, "current_supply_total"), + unminted = bd(row, "unminted"), + mintedAppRewards = bd(row, "daily_mint_app_rewards"), + mintedValidatorRewards = bd(row, "daily_mint_validator_rewards"), + mintedSvRewards = bd(row, "daily_mint_sv_rewards"), + mintedUnclaimed = bd(row, "daily_mint_unclaimed_activity_records"), + burned = bd(row, "daily_burn"), + numAmuletHolders = int(row, "num_amulet_holders"), + numActiveValidators = int(row, "num_active_validators"), + avgTps = float(row, "average_tps"), + peakTps = float(row, "peak_tps"), + minCoinPrice = bd(row, "daily_min_coin_price"), + maxCoinPrice = bd(row, "daily_max_coin_price"), + avgCoinPrice = bd(row, "daily_avg_coin_price"), ) } - private def verifyResults(results: ExpectedMetrics): Unit = { + private def verifyFinanceResults(results: FinanceMetrics): Unit = { + results.latestRound shouldBe totalRounds withClue "total_rounds" + } + + private def verifyDashboardResults(results: DashboardMetrics): Unit = { // Verify individual metrics forEvery( Seq( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SplitwellUpgradeIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SplitwellUpgradeIntegrationTest.scala index 4497ff467c..5605a244fd 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SplitwellUpgradeIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SplitwellUpgradeIntegrationTest.scala @@ -12,10 +12,13 @@ import org.lfdecentralizedtrust.splice.util.{MultiDomainTestUtil, SplitwellTestU import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.topology.{SynchronizerId, PartyId} +import org.scalatest.Ignore import org.slf4j.event.Level import scala.concurrent.duration.DurationInt import scala.util.Try +// TODO(#2703) Reenable or delete +@Ignore class SplitwellUpgradeIntegrationTest extends IntegrationTestWithSharedEnvironment with MultiDomainTestUtil diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvFrontendIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvFrontendIntegrationTest.scala index 8d3b0dfed1..e38ea5fc1b 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvFrontendIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvFrontendIntegrationTest.scala @@ -104,14 +104,29 @@ class SvFrontendIntegrationTest _ => { find(className("onboarding-secret-table")) should not be empty val rows = findAll(className("onboarding-secret-table-row")).toSeq + find(id("create-party-hint")) should not be empty find(id("create-validator-onboarding-secret")) should not be empty rows.size }, ) val (_, newSecret) = actAndCheck( - "click on the button to create an onboarding secret", { - click on "create-validator-onboarding-secret" + "fill the party hint field and click on the button to create an onboarding secret", { + clue("fill party hint") { + inside(find(id("create-party-hint"))) { case Some(element) => + element.underlying.sendKeys("splice-client-2") + } + } + + clue("wait for the create button to become enabled") { + eventually() { + find(id("create-validator-onboarding-secret")).value.isEnabled shouldBe true + } + } + + clue("click the create validator onboarding secret button") { + click on "create-validator-onboarding-secret" + } }, )( "a new secret row is added", @@ -125,7 +140,7 @@ class SvFrontendIntegrationTest ) val licenseRows = getLicensesTableRows - val newValidatorParty = allocateRandomSvParty("validatorX") + val newValidatorParty = allocateRandomSvParty("splice-client", Some(2)) actAndCheck( "onboard new validator using the secret", diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvOnboardingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvOnboardingIntegrationTest.scala index 5e8c133eee..15c0ca1dc6 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvOnboardingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvOnboardingIntegrationTest.scala @@ -107,9 +107,12 @@ class SvOnboardingIntegrationTest extends SvIntegrationTestBase { val svParty = sv.getDsoInfo().svParty val sv1Party = sv1Backend.getDsoInfo().svParty sv.listOngoingValidatorOnboardings() should have length 0 + + val name = "dummy" + env.environment.config.name.getOrElse("") + val (secret, onboardingContract) = actAndCheck( "the sv operator prepares the onboarding", { - sv.prepareValidatorOnboarding(1.hour) + sv.prepareValidatorOnboarding(1.hour, Some(name)) }, )( "a validator onboarding contract is created", @@ -153,6 +156,7 @@ class SvOnboardingIntegrationTest extends SvIntegrationTestBase { ValidatorOnboardingSecret( sv1Party, onboardingContract.payload.candidateSecret, + None, ).toApiResponse, contactPoint, ), @@ -161,6 +165,23 @@ class SvOnboardingIntegrationTest extends SvIntegrationTestBase { ), ) } + + clue("try to onboard with a secret that has an incorrect party hint") { + val partyId = PartyId + .fromProtoPrimitive("invalid-name-1::dummy", "partyId") + .getOrElse(sys.error("Could not parse PartyId")) + + assertThrowsAndLogsCommandFailures( + sv.onboardValidator( + partyId, + onboardingContract.payload.candidateSecret, + contactPoint, + ), + _.errorMessage should include( + s"The onboarding secret entered does not match the secret issued for validatorPartyHint: $name" + ), + ) + } actAndCheck( "request to onboard the candidate", sv.onboardValidator(candidate, secret, s"${candidate.uid.identifier}@example.com"), diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvTimeBasedRewardCouponIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvTimeBasedRewardCouponIntegrationTest.scala index 1a324eec68..2a8b5448c0 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvTimeBasedRewardCouponIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SvTimeBasedRewardCouponIntegrationTest.scala @@ -28,6 +28,7 @@ import org.lfdecentralizedtrust.splice.wallet.store.TxLogEntry.TransferTransacti import com.digitalasset.canton.config.RequireTypes.NonNegativeLong import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.{ForceFlag, ForceFlags, PartyId} import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.daml.lf.data.Ref.PackageId @@ -109,7 +110,7 @@ class SvTimeBasedRewardCouponIntegrationTest eventually() { val vettedByAlice = aliceValidatorBackend.participantClientWithAdminToken.topology.vetted_packages - .list() + .list(Some(TopologyStoreId.Synchronizer(decentralizedSynchronizerId))) .flatMap( _.item.packages.map(_.packageId) ) @@ -329,8 +330,11 @@ class SvTimeBasedRewardCouponIntegrationTest aliceValidatorBackend.participantClient.topology.vetted_packages.propose_delta( aliceParticipantId, removes = Seq(PackageId.assertFromString(latestAmuletPackageId)), - force = - ForceFlags(ForceFlag.AllowUnvetPackage, ForceFlag.AllowUnvetPackageWithActiveContracts), + force = ForceFlags( + ForceFlag.AllowUnvettedDependencies, + ForceFlag.AllowUnvetPackageWithActiveContracts, + ), + store = TopologyStoreId.Synchronizer(decentralizedSynchronizerId), ), )( "Alice's participant has unvetted the latest amulet package, and SV4 is aware of that", @@ -379,6 +383,7 @@ class SvTimeBasedRewardCouponIntegrationTest aliceValidatorBackend.participantClient.topology.vetted_packages.propose_delta( aliceParticipantId, adds = Seq(VettedPackage(PackageId.assertFromString(latestAmuletPackageId), None, None)), + store = TopologyStoreId.Synchronizer(decentralizedSynchronizerId), ) }, )( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardAllocationIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardAllocationIntegrationTest.scala index 7ca831d0b8..119d9c507e 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardAllocationIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardAllocationIntegrationTest.scala @@ -1,7 +1,7 @@ package org.lfdecentralizedtrust.splice.integration.tests -import com.daml.ledger.api.v2.value.Identifier import com.daml.ledger.javaapi.data.CreatedEvent +import com.digitalasset.canton.admin.api.client.data.TemplateId import com.digitalasset.canton.HasExecutionContext import com.digitalasset.canton.topology.PartyId import org.lfdecentralizedtrust.splice.codegen.java.splice.testing.apps.tradingapp @@ -90,7 +90,7 @@ class TokenStandardAllocationIntegrationTest participantClient.ledger_api.state.acs.of_party( party = sender, filterInterfaces = Seq(holdingv1.Holding.TEMPLATE_ID).map(templateId => - Identifier( + TemplateId( templateId.getPackageId, templateId.getModuleName, templateId.getEntityName, @@ -305,7 +305,7 @@ class TokenStandardAllocationIntegrationTest splitwellValidatorBackend.participantClient.ledger_api.state.acs.of_party( party = allocatedOtcTrade.venueParty, filterInterfaces = Seq(allocationv1.Allocation.TEMPLATE_ID).map(templateId => - Identifier( + TemplateId( templateId.getPackageId, templateId.getModuleName, templateId.getEntityName, diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardCliTestDataTimeBasedIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardCliTestDataTimeBasedIntegrationTest.scala index c76a6e7e4a..e721ccd169 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardCliTestDataTimeBasedIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardCliTestDataTimeBasedIntegrationTest.scala @@ -198,8 +198,8 @@ class TokenStandardCliTestDataTimeBasedIntegrationTest val aliceValidator = RichPartyId.local(aliceValidatorBackend.getValidatorPartyId()) aliceValidatorWalletClient.tap(BigDecimal(1000)) - createTransferPreapprovalIfNotExists(aliceWalletClient) - createTransferPreapprovalIfNotExists(aliceValidatorWalletClient) + createTransferPreapprovalEnsuringItExists(aliceWalletClient, aliceValidatorBackend) + createTransferPreapprovalEnsuringItExists(aliceValidatorWalletClient, aliceValidatorBackend) val charlieParty = onboardWalletUser(charlieWalletClient, aliceValidatorBackend) @@ -549,7 +549,10 @@ class TokenStandardCliTestDataTimeBasedIntegrationTest // TransferIn (derived by tx-kind), while making sure that charlie has no leftovers val charlieAmount = 500.0 charlieWalletClient.tap(walletAmuletToUsd(charlieAmount)) - createTransferPreapprovalIfNotExists(aliceWalletClient) // it was deleted before + createTransferPreapprovalEnsuringItExists( + aliceWalletClient, + aliceValidatorBackend, + ) // it was deleted before charlieWalletClient.transferPreapprovalSend( alice.partyId, charlieAmount - 11, diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardTest.scala index 47440b2e61..adc4f60336 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/TokenStandardTest.scala @@ -1,8 +1,8 @@ package org.lfdecentralizedtrust.splice.integration.tests import com.daml.ledger.api.v2 -import com.daml.ledger.api.v2.value.Identifier import com.daml.ledger.javaapi +import com.digitalasset.canton.admin.api.client.data.TemplateId import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.topology.PartyId import org.lfdecentralizedtrust.splice.codegen.java.splice.api.token.allocationrequestv1.AllocationRequestView @@ -155,7 +155,7 @@ trait TokenStandardTest extends ExternallySignedPartyTestUtil { participantClient.ledger_api.state.acs.of_party( party = party, filterInterfaces = Seq(holdingv1.Holding.TEMPLATE_ID).map(templateId => - Identifier( + TemplateId( templateId.getPackageId, templateId.getModuleName, templateId.getEntityName, @@ -190,7 +190,7 @@ trait TokenStandardTest extends ExternallySignedPartyTestUtil { party = party, filterInterfaces = Seq(transferinstructionv1.TransferInstruction.TEMPLATE_ID).map(templateId => - Identifier( + TemplateId( templateId.getPackageId, templateId.getModuleName, templateId.getEntityName, diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorIntegrationTest.scala index 16c31d8c51..4ce94c7da3 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorIntegrationTest.scala @@ -12,6 +12,7 @@ import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.Integration import org.lfdecentralizedtrust.splice.util.WalletTestUtil import org.lfdecentralizedtrust.splice.validator.config.ValidatorAppBackendConfig import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.sequencing.SubmissionRequestAmplification @@ -21,6 +22,7 @@ import org.apache.pekko.http.scaladsl.Http import org.apache.pekko.http.scaladsl.client.RequestBuilding.{Get, Post} import org.apache.pekko.http.scaladsl.model.StatusCodes import org.apache.pekko.http.scaladsl.model.headers.{Authorization, OAuth2BearerToken} +import org.lfdecentralizedtrust.splice.config.ConfigTransforms import org.slf4j.event.Level import scala.concurrent.Future @@ -70,6 +72,13 @@ class ValidatorIntegrationTest extends IntegrationTest with WalletTestUtil { ) }) }) + // The topology metrics trigger is disabled by default. + // Enable it here to check that it starts and runs without errors + .addConfigTransform((_, config) => + ConfigTransforms.updateAllAutomationConfigs( + _.copy(topologyMetricsPollingInterval = Some(NonNegativeFiniteDuration.ofSeconds(1))) + )(config) + ) "start and restart cleanly" in { implicit env => initDsoWithSv1Only() @@ -492,12 +501,14 @@ class ValidatorIntegrationTest extends IntegrationTest with WalletTestUtil { } - "support existing party with invalid hit" in { implicit env => + "support existing party with invalid hint" in { implicit env => initDsoWithSv1Only() val validator = v(invalidValidator) val participantClientWithAdminToken = validator.participantClientWithAdminToken + val partyId = + PartyId.tryCreate(validatorPartyHint, participantClientWithAdminToken.id.namespace) participantClientWithAdminToken.topology.party_to_participant_mappings.propose( - PartyId.tryCreate(validatorPartyHint, participantClientWithAdminToken.id.namespace), + partyId, Seq( ( participantClientWithAdminToken.id, @@ -505,6 +516,15 @@ class ValidatorIntegrationTest extends IntegrationTest with WalletTestUtil { ) ), ) + + clue("party is seen on the ledger api") { + eventually() { + participantClientWithAdminToken.ledger_api.parties + .list() + .exists(_.party == partyId) shouldBe true + } + } + val configuredUser = validator.config.ledgerApiUser def getUser = { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletIntegrationTest.scala index b4825718b4..d2948168c0 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletIntegrationTest.scala @@ -572,7 +572,7 @@ class WalletIntegrationTest sv1ScanBackend.lookupTransferPreapprovalByParty(aliceUserParty) shouldBe None val (_, cid) = actAndCheck( "Create TransferPreapproval", - createTransferPreapprovalIfNotExists(aliceWalletClient), + createTransferPreapprovalEnsuringItExists(aliceWalletClient, aliceValidatorBackend), )( "Scan lookup returns TransferPreapproval", c => { @@ -719,7 +719,7 @@ class WalletIntegrationTest aliceValidatorWalletClient.tap(10.0) actAndCheck( "Create TransferPreapproval for end user", - createTransferPreapprovalIfNotExists(aliceWalletClient), + createTransferPreapprovalEnsuringItExists(aliceWalletClient, aliceValidatorBackend), )( "Scan lookup returns TransferPreapproval for end user", c => { @@ -730,7 +730,10 @@ class WalletIntegrationTest ) actAndCheck( "Create TransferPreapproval for validator operator", - createTransferPreapprovalIfNotExists(aliceValidatorWalletClient), + createTransferPreapprovalEnsuringItExists( + aliceValidatorWalletClient, + aliceValidatorBackend, + ), )( "Scan lookup returns TransferPreapproval", c => { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletSweepIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletSweepIntegrationTest.scala index ae26fcd649..b06af3e5ed 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletSweepIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletSweepIntegrationTest.scala @@ -222,7 +222,7 @@ abstract class WalletSweepIntegrationTest 50.0 ) // validator needs to have some funds to create preapproval walletClient.tap(50.0) - createTransferPreapprovalIfNotExists(walletClient) + createTransferPreapprovalEnsuringItExists(walletClient, aliceValidatorBackend) }, )( "Transfer preapproval is visible in scan", diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTransactionHistoryFrontendIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTransactionHistoryFrontendIntegrationTest.scala index 34109fb844..617085fbe0 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTransactionHistoryFrontendIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTransactionHistoryFrontendIntegrationTest.scala @@ -421,7 +421,7 @@ class WalletTransactionHistoryFrontendIntegrationTest browseToSv1Wallet(sv1ValidatorWalletUser) actAndCheck( "SV1 creates a transfer preapproval and automation renews it immediately", - createTransferPreapprovalIfNotExists(sv1WalletClient), + createTransferPreapprovalEnsuringItExists(sv1WalletClient, sv1ValidatorBackend), )( "SV1 sees the creation and renewal transactions", _ => { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala index 359ed22a4f..ca849bfa65 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala @@ -1629,7 +1629,7 @@ class WalletTxLogIntegrationTest val charlieUserParty = onboardWalletUser(charlieWalletClient, aliceValidatorBackend) aliceValidatorWalletClient.tap(100) // funds to create preapproval - createTransferPreapprovalIfNotExists(charlieWalletClient) + createTransferPreapprovalEnsuringItExists(charlieWalletClient, aliceValidatorBackend) assertCommandFailsDueToInsufficientFunds( aliceWalletClient.transferPreapprovalSend( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/RateLimitPreflightIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/RateLimitPreflightIntegrationTest.scala index 3e9b2c826d..fa13ea72f7 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/RateLimitPreflightIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/RateLimitPreflightIntegrationTest.scala @@ -1,6 +1,7 @@ package org.lfdecentralizedtrust.splice.integration.tests.runbook import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.util.FutureInstances.parallelFuture import com.digitalasset.canton.util.MonadUtil @@ -10,6 +11,7 @@ import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.{ SpliceTestConsoleEnvironment, } import org.scalatest.Assertion +import org.slf4j.event.Level import scala.concurrent.{Future, blocking} import scala.util.control.NonFatal @@ -62,8 +64,20 @@ class RateLimitPreflightIntegrationTest extends IntegrationTestWithSharedEnviron def rateLimitIsEnforced(limit: Int, call: => Unit)(implicit env: SpliceTestConsoleEnvironment ): Assertion = { - val results = collectResponses(limit, call) - allWereSuccessfull(results) + val results = loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(Level.ERROR))( + collectResponses(limit, call), + forAll(_)( + // This hits the Canton limit on concurrent requests + _.message should include( + "Reached the limit of concurrent streams for com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ExportAcsOld" + ) + ), + ) + // Note: failures are expected due to the Canton rate limiter. + forAtLeast(1, results) { + _ shouldBe a[scala.util.Success[_]] + } + // This now hits istio rate limit assertThrowsAndLogsCommandFailures( call, entry => entry.message should include("HTTP 429 Too Many Requests"), diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/SvUiPreflightIntegrationTestUtil.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/SvUiPreflightIntegrationTestUtil.scala index 27efa1cd8b..54993acab9 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/SvUiPreflightIntegrationTestUtil.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/runbook/SvUiPreflightIntegrationTestUtil.scala @@ -103,6 +103,7 @@ trait SvUiPreflightIntegrationTestUtil extends TestCommon { )( s"Creating an onboarding secret", _ => { + waitForQuery(id("create-party-hint")) waitForQuery(id("create-validator-onboarding-secret")) waitForQuery(className("onboarding-secret-table")) val secretsItr = findAll(className("onboarding-secret-table-secret")) @@ -110,8 +111,23 @@ trait SvUiPreflightIntegrationTestUtil extends TestCommon { }, ) actAndCheck(timeUntilSuccess = 2.minutes)( - "click", - click on "create-validator-onboarding-secret", + "fill the party hint field and click on the button to create an onboarding secret", { + clue("fill party hint") { + inside(find(id("create-party-hint"))) { case Some(element) => + element.underlying.sendKeys("splice-client-10") + } + } + + clue("wait for the create button to become enabled") { + eventually() { + find(id("create-validator-onboarding-secret")).value.isEnabled shouldBe true + } + } + + clue("click the create validator onboarding secret button") { + click on "create-validator-onboarding-secret" + } + }, )( s"We see that this SV has created an onboarding secret", _ => { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/StandaloneCanton.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/StandaloneCanton.scala index 21d4250069..eaa600c9bd 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/StandaloneCanton.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/StandaloneCanton.scala @@ -84,36 +84,32 @@ trait StandaloneCanton extends PostgresAroundEach with NamedLogging with Process val dbNamesEnv = { val svDbsSuffix = overrideSvDbsSuffix.getOrElse(dbsSuffix) val sequencerDriverDbSuffix = overrideSequencerDriverDbSuffix.getOrElse(dbsSuffix) - (1 to 4) - .map(i => - Seq( - s"SV${i}_PARTICIPANT_DB" -> s"participant_sv${i}_${svDbsSuffix}", - s"SV${i}_SEQUENCER_DB_BFT" -> s"sequencer_sv${i}_${svDbsSuffix}_bft", - s"SV${i}_SEQUENCER_DB" -> s"sequencer_sv${i}_${svDbsSuffix}", - s"SV${i}_MEDIATOR_DB" -> s"mediator_sv${i}_${svDbsSuffix}", - ) + (1 to 4).flatMap(i => + Seq( + s"SV${i}_PARTICIPANT_DB" -> s"participant_sv${i}_${svDbsSuffix}", + s"SV${i}_SEQUENCER_DB_BFT" -> s"sequencer_sv${i}_${svDbsSuffix}_bft", + s"SV${i}_SEQUENCER_DB" -> s"sequencer_sv${i}_${svDbsSuffix}", + s"SV${i}_MEDIATOR_DB" -> s"mediator_sv${i}_${svDbsSuffix}", ) - .flatten :+ + ) :+ "SEQUENCER_DRIVER_DB" -> s"sequencer_driver_${sequencerDriverDbSuffix}" } val portsEnv = portsRange.fold(Seq(): Seq[(String, String)])(range => - (1 to 4) - .map(i => - Seq( - s"SV${i}_PARTICIPANT_LEDGER_API_PORT" -> (range * 1000 + i * 100 + 1).toString, - s"SV${i}_PARTICIPANT_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 2).toString, - s"SV${i}_MEDIATOR_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 7).toString, - s"SV${i}_SEQUENCER_PUBLIC_API_PORT" -> (range * 1000 + i * 100 + 8).toString, - s"SV${i}_SEQUENCER_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 9).toString, - ) + (1 to 4).flatMap(i => + Seq( + s"SV${i}_PARTICIPANT_LEDGER_API_PORT" -> (range * 1000 + i * 100 + 1).toString, + s"SV${i}_PARTICIPANT_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 2).toString, + s"SV${i}_MEDIATOR_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 7).toString, + s"SV${i}_SEQUENCER_PUBLIC_API_PORT" -> (range * 1000 + i * 100 + 8).toString, + s"SV${i}_SEQUENCER_ADMIN_API_PORT" -> (range * 1000 + i * 100 + 9).toString, ) - .flatten + ) ) val allExtraEnv = (extraEnv ++ - (1 to 4).map(adminUserEnv(_)).flatten ++ + (1 to 4).flatMap(adminUserEnv(_)) ++ portsEnv ++ dbNamesEnv) ++ extraParticipantsEnvMap.toList diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/SvTestUtil.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/SvTestUtil.scala index 884a7f6660..b19c5aacb6 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/SvTestUtil.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/SvTestUtil.scala @@ -45,9 +45,11 @@ trait SvTestUtil extends TestCommon { protected def svs(implicit env: SpliceTestConsoleEnvironment) = Seq(sv1Backend, sv2Backend, sv3Backend, sv4Backend) - def allocateRandomSvParty(name: String)(implicit env: SpliceTestConsoleEnvironment) = { - val id = (new scala.util.Random).nextInt().toHexString - val partyIdHint = s"$name-$id" + def allocateRandomSvParty(name: String, id: Option[Int] = None)(implicit + env: SpliceTestConsoleEnvironment + ) = { + val enumerator = id.getOrElse((new scala.util.Random).nextInt()).toHexString + val partyIdHint = s"$name-$enumerator" eventuallySucceeds() { try { sv1Backend.participantClient.ledger_api.parties.allocate(partyIdHint).party } catch { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala index 13057e8ea9..7f6c821f3b 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala @@ -356,6 +356,7 @@ trait UpdateHistoryTestUtil extends TestCommon { e.getObservers, e.createdAt, e.isAcsDelta, + e.getRepresentativePackageId, ) def dropTrailingNones(e: ExercisedEvent): ExercisedEvent = new ExercisedEvent( e.getWitnessParties, diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/WalletTestUtil.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/WalletTestUtil.scala index 572af161b4..30203e38b3 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/WalletTestUtil.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/WalletTestUtil.scala @@ -1309,17 +1309,29 @@ trait WalletTestUtil extends TestCommon with AnsTestUtil { .supported } - def createTransferPreapprovalIfNotExists( - partyWalletClient: WalletAppClientReference + /** @param partyWalletClient the wallet in which to create the transfer preapproval + * @param checkValidator a validator to use to check that the preapproval was created and ingested by enough Scans + */ + def createTransferPreapprovalEnsuringItExists( + partyWalletClient: WalletAppClientReference, + checkValidator: ValidatorAppBackendReference, ): TransferPreapproval.ContractId = { // creating the transfer preapproval can fail because there are no funds (which this won't recover), // but also by the validator being slow to approve the preapproval, which we can recover here - eventuallySucceeds() { + val cid = eventuallySucceeds() { partyWalletClient.createTransferPreapproval() match { case CreateTransferPreapprovalResponse.Created(contractId) => contractId case CreateTransferPreapprovalResponse.AlreadyExists(contractId) => contractId } } + // Ensure enough Scans have ingested it for it to be usable + eventually() { + checkValidator.lookupTransferPreapprovalByParty( + PartyId.tryFromProtoPrimitive(partyWalletClient.userStatus().party) + ) shouldBe defined + } + + cid } } diff --git a/apps/common/frontend/src/components/index.ts b/apps/common/frontend/src/components/index.ts index 5dd22ad5c3..057ca1be44 100644 --- a/apps/common/frontend/src/components/index.ts +++ b/apps/common/frontend/src/components/index.ts @@ -21,7 +21,7 @@ import Loading from './Loading'; import Login from './Login'; import LoginFailed from './LoginFailed'; import PartyId from './PartyId'; -import { computeDiff } from './PrettyJsonDiff'; +import { computeDiff, PrettyJsonDiff } from './PrettyJsonDiff'; import RateDisplay from './RateDisplay'; import TitledTable from './TitledTable'; import { updateIdFromEventId, UpdateId } from './UpdateId'; @@ -39,47 +39,48 @@ import { export { ActionView, - AmountDisplay, - Alerting, AlertState, - AuthProvider, + Alerting, + AmountDisplay, AnsEntry, AnsEntryDisplay, AnsEntryProps, AnsField, - BaseAnsField, AnsFieldProps, - UserInput, + AuthProvider, + BaseAnsField, + BaseVotesHooks, ConfirmationDialog, CopyableTypography, Copyright, DateDisplay, + DateWithDurationDisplay, DisableConditionally, + DsoInfo, + DsoViewPrettyJSON, ErrorBoundary, ErrorDisplay, ErrorRouterPage, Header, IntervalDisplay, + ListVoteRequests, Loading, Login, LoginFailed, PartyId, + PrettyJsonDiff, RateDisplay, + SubscriptionButton, TitledTable, TransferButton, - SubscriptionButton, - DateWithDurationDisplay, - DsoViewPrettyJSON, - DsoInfo, - VotesHooksContext, - BaseVotesHooks, - VotesHooks, - useVotesHooks, - ListVoteRequests, - ViewMoreButton, + UpdateId, + UserInput, ValidatorLicenses, ValidatorLicensesPage, + ViewMoreButton, + VotesHooks, + VotesHooksContext, computeDiff, - UpdateId, updateIdFromEventId, + useVotesHooks, }; diff --git a/apps/common/frontend/src/utils/index.ts b/apps/common/frontend/src/utils/index.ts index ca0e9e5251..91a2c4431e 100644 --- a/apps/common/frontend/src/utils/index.ts +++ b/apps/common/frontend/src/utils/index.ts @@ -1,6 +1,21 @@ // Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 + +import BigNumber from 'bignumber.js'; + export * from './auth'; export * from './amuletRules'; export * from './helpers'; export * from './voteRequests'; + +export const medianPriceVotes = (votedPrices: BigNumber[]): BigNumber | undefined => { + if (votedPrices && votedPrices.length > 0) { + const sorted = [...votedPrices].sort((a, b) => { + return a.isEqualTo(b) ? 0 : a.isLessThan(b) ? -1 : 1; + }); + const length = sorted.length; + const half = Math.floor(length / 2); + return length % 2 !== 0 ? sorted[half] : sorted[half - 1].plus(sorted[half]).multipliedBy(0.5); + } + return undefined; +}; diff --git a/apps/common/src/main/openapi/common-internal.yaml b/apps/common/src/main/openapi/common-internal.yaml index 567342010f..dd2fd9053e 100644 --- a/apps/common/src/main/openapi/common-internal.yaml +++ b/apps/common/src/main/openapi/common-internal.yaml @@ -189,7 +189,7 @@ components: properties: kind: type: string - enum: [ "participantAdmin", "canActAs", "canReadAs", "identityProviderAdmin", "canReadAsAnyParty" ] + enum: [ "participantAdmin", "canActAs", "canReadAs", "canExecuteAs", "identityProviderAdmin", "canReadAsAnyParty", "canExecuteAsAnyParty" ] party: type: string diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala index f609bb20a6..a0b9decee6 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala @@ -80,7 +80,8 @@ abstract class SpliceAppAutomationService[Store <: AppStore]( SpliceCircuitBreaker( s"$name-priority-connection", config, - logger, + clock, + loggerFactory, ), completionOffsetCallback, ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/AutomationConfig.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/AutomationConfig.scala index 8300b79491..e44544e64f 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/AutomationConfig.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/AutomationConfig.scala @@ -46,6 +46,11 @@ case class AutomationConfig( */ domainIngestionPollingInterval: NonNegativeFiniteDuration = NonNegativeFiniteDuration ofSeconds 30, + /** Polling interval to recompute and export topology metrics. + * + * Set to None to disable the topology metrics trigger. + */ + topologyMetricsPollingInterval: Option[NonNegativeFiniteDuration] = None, /** Maximal number of retries that the time-based triggers retry transient failures w/o raising a warning. */ maxNumSilentPollingRetries: Int = 3, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala index c5500cca0d..9c17956358 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala @@ -34,14 +34,15 @@ abstract class GrpcClientConfig extends NodeConfig {} abstract class HttpClientConfig extends NetworkAppNodeConfig {} final case class CircuitBreakerConfig( - // TODO(hyperledger-labs/splice#2462) Revert back to lower values once we also reset failures after some time of inactivity - maxFailures: Int = 40, + maxFailures: Int = 20, callTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(0), // disable timeout resetTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(30), maxResetTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMinutes(10), exponentialBackoffFactor: Double = 2.0, randomFactor: Double = 0.2, + // If the last failure was more than resetFailuresAfter ago, reset the failures to 0. + resetFailuresAfter: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMinutes(15), ) final case class CircuitBreakersConfig( @@ -49,13 +50,11 @@ final case class CircuitBreakersConfig( maxResetTimeout = NonNegativeFiniteDuration.ofMinutes(2) ), mediumPriority: CircuitBreakerConfig = CircuitBreakerConfig( - // TODO(hyperledger-labs/splice#2462) Revert back to lower values once we also reset failures after some time of inactivity - maxFailures = 20, + maxFailures = 10, maxResetTimeout = NonNegativeFiniteDuration.ofMinutes(3), ), lowPriority: CircuitBreakerConfig = CircuitBreakerConfig( - // TODO(hyperledger-labs/splice#2462) Revert back to lower values once we also reset failures after some time of inactivity - maxFailures = 10, + maxFailures = 5, maxResetTimeout = NonNegativeFiniteDuration.ofMinutes(7), ), // Amulet expiry is different from essentially any other trigger run in the SV app in that for it to complete successfully diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala index b491353a07..7e71c0989f 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala @@ -20,6 +20,7 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ DefaultBoundedTimeout, DefaultUnboundedTimeout, ServerEnforcedTimeout, + TimeoutType, } import com.digitalasset.canton.config.{ApiLoggingConfig, ClientConfig} import com.digitalasset.canton.admin.api.client.data.NodeStatus @@ -115,6 +116,7 @@ abstract class AppConnection( protected def runCmd[Req, Res, Result]( cmd: GrpcAdminCommand[Req, Res, Result], credentials: Option[CallCredentials] = None, + timeoutOverride: Option[TimeoutType] = None, )(implicit traceContext: TraceContext): Future[Result] = { val dso = cmd @@ -132,7 +134,7 @@ abstract class AppConnection( case None => dso } - val timeout = cmd.timeoutType match { + val timeout = timeoutOverride.getOrElse(cmd.timeoutType) match { case ServerEnforcedTimeout => None case CustomClientTimeout(timeout) => diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala index d1e4b2fdd6..901495adf1 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.sequencing.{ SequencerConnection, + SequencerConnectionPoolDelays, SequencerConnectionValidation, SequencerConnections, SubmissionRequestAmplification, @@ -69,6 +70,8 @@ class MediatorAdminConnection( // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ), SequencerConnectionValidation.ThresholdActive, ) @@ -105,6 +108,8 @@ class MediatorAdminConnection( // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ), SequencerConnectionValidation.ThresholdActive, ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala index 1f8f6f79ce..6043c9aa81 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala @@ -39,7 +39,13 @@ import com.digitalasset.canton.topology.transaction.{ SignedTopologyTransaction, TopologyChangeOp, } -import com.digitalasset.canton.topology.{NodeIdentity, ParticipantId, PartyId, SynchronizerId} +import com.digitalasset.canton.topology.{ + NodeIdentity, + ParticipantId, + PartyId, + PhysicalSynchronizerId, + SynchronizerId, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* import com.google.protobuf.ByteString @@ -112,6 +118,11 @@ class ParticipantAdminConnection( def getSynchronizerId(synchronizerAlias: SynchronizerAlias)(implicit traceContext: TraceContext ): Future[SynchronizerId] = + getPhysicalSynchronizerId(synchronizerAlias).map(_.logical) + + def getPhysicalSynchronizerId(synchronizerAlias: SynchronizerAlias)(implicit + traceContext: TraceContext + ): Future[PhysicalSynchronizerId] = // We avoid ParticipantAdminCommands.SynchronizerConnectivity.GetSynchronizerId which tries to make // a new request to the sequencer to query the domain id. ListConnectedSynchronizers // on the other hand relies on a cache @@ -122,7 +133,7 @@ class ParticipantAdminConnection( throw Status.NOT_FOUND .withDescription(s"Domain with alias $synchronizerAlias is not connected") .asRuntimeException() - )(_.synchronizerId.logical) + )(_.physicalSynchronizerId) ) /** Usually you want getSynchronizerId instead which is much faster if the domain is connected @@ -132,9 +143,14 @@ class ParticipantAdminConnection( def getSynchronizerIdWithoutConnecting(synchronizerAlias: SynchronizerAlias)(implicit traceContext: TraceContext ): Future[SynchronizerId] = + getPhysicalSynchronizerIdWithoutConnecting(synchronizerAlias).map(_.logical) + + def getPhysicalSynchronizerIdWithoutConnecting(synchronizerAlias: SynchronizerAlias)(implicit + traceContext: TraceContext + ): Future[PhysicalSynchronizerId] = runCmd( ParticipantAdminCommands.SynchronizerConnectivity.GetSynchronizerId(synchronizerAlias) - ).map(_.logical) + ) def reconnectAllDomains()(implicit traceContext: TraceContext @@ -310,7 +326,6 @@ class ParticipantAdminConnection( filterSynchronizerId, timestamp, observer, - Map.empty, force, ) ).discard @@ -329,7 +344,8 @@ class ParticipantAdminConnection( acsBytes, IMPORT_ACS_WORKFLOW_ID_PREFIX, allowContractIdSuffixRecomputation = false, - ) + ), + timeoutOverride = Some(GrpcAdminCommand.DefaultUnboundedTimeout), ).map(_ => ()), logger, ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminDarsConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminDarsConnection.scala index 96ee43f3e3..96998940a1 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminDarsConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminDarsConnection.scala @@ -4,7 +4,6 @@ package org.lfdecentralizedtrust.splice.environment import cats.data.EitherT -import cats.implicits.catsSyntaxParallelTraverse_ import com.digitalasset.canton.admin.api.client.commands.{ ParticipantAdminCommands, TopologyAdminCommands, @@ -25,7 +24,6 @@ import com.google.protobuf.ByteString import io.grpc.Status import monocle.Monocle.toAppliedFocusOps import org.lfdecentralizedtrust.splice.environment.ParticipantAdminConnection.HasParticipantId -import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.TopologyTransactionType.AuthorizedState import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.{ TopologyResult, TopologyTransactionType, @@ -43,13 +41,8 @@ trait ParticipantAdminDarsConnection { def uploadDarFiles( pkg: Seq[UploadablePackage], retryFor: RetryFor, - vetTheDar: Boolean = false, )(implicit traceContext: TraceContext): Future[Unit] = { - uploadDarsLocally( - pkg, - retryFor, - vetTheDar, - ) + uploadDarsLocally(pkg, retryFor) } def uploadDarFileWithVettingOnAllConnectedSynchronizers( @@ -66,7 +59,6 @@ trait ParticipantAdminDarsConnection { _ <- uploadDarsLocally( Seq(UploadablePackage.fromByteString(path.getFileName.toString, darFile)), retryFor, - vetTheDar = false, ) domains <- listConnectedDomains().map(_.map(_.synchronizerId)) darResource = DarResource(path) @@ -76,7 +68,7 @@ trait ParticipantAdminDarsConnection { } yield () def vetDars( - domainId: SynchronizerId, + synchronizerId: SynchronizerId, dars: Seq[DarResource], fromDate: Option[Instant], maxVettingDelay: Option[(Clock, NonNegativeFiniteDuration)], @@ -84,53 +76,64 @@ trait ParticipantAdminDarsConnection { tc: TraceContext ): Future[Unit] = { val cantonFromDate = fromDate.map(CantonTimestamp.assertFromInstant) - ensureTopologyMapping[VettedPackages]( - // we publish to the authorized store so that it pushed on all the domains and the console commands are still useful when dealing with dars - TopologyStoreId.Authorized, - s"dars ${dars.map(_.packageId)} are vetted in the authorized store with from $fromDate", - topologyTransactionType => - EitherT( - getVettingState(None, topologyTransactionType).map { vettedPackages => - if ( - dars - .forall(dar => vettedPackages.mapping.packages.exists(_.packageId == dar.packageId)) - ) { - // we don't check the validFrom value, we assume that once it's part of the vetting state it can no longer be updated - Right(vettedPackages) - } else { - Left(vettedPackages) - } - } - ), - currentVettingState => - Right( - updateVettingStateForDars( - dars = dars, - packageValidFrom = cantonFromDate, - currentVetting = currentVettingState, - ) - ), + + retryProvider.retry( RetryFor.Automation, - maxSubmissionDelay = maxVettingDelay, - ).flatMap(_ => - retryProvider.waitUntil( - RetryFor.Automation, - s"vet_dars_on_sync", - s"Dars ${dars.map(_.packageId)} are vetted on synchronizer $domainId", - getVettingState(domainId, AuthorizedState).map { vettingState => - val packagesNotVetted = dars.filterNot(dar => - vettingState.mapping.packages.exists(_.packageId == dar.packageId) - ) - if (packagesNotVetted.nonEmpty) { - throw Status.NOT_FOUND - .withDescription( - s"Dar ${packagesNotVetted.map(_.packageId)} are not vetted on synchronizer $domainId" + "dar_vetting", + s"dars ${dars.map(_.packageId)} are vetted on $synchronizerId from $fromDate", + lookupVettingState( + Some(synchronizerId), + TopologyAdminConnection.TopologyTransactionType.AuthorizedState, + ).flatMap { + case None => + for { + participantId <- getParticipantId() + _ <- ensureInitialMapping( + Right( + updateVettingStateForDars( + dars, + cantonFromDate, + VettedPackages.tryCreate( + participantId, + Seq.empty, + ), + ) ) - .asRuntimeException - } - }, - logger, - ) + ) + } yield () + case Some(_) => + ensureTopologyMapping[VettedPackages]( + TopologyStoreId.Synchronizer(synchronizerId), + s"dars ${dars.map(_.packageId)} are vetted on $synchronizerId from $fromDate", + topologyTransactionType => + EitherT( + getVettingState(synchronizerId, topologyTransactionType).map { vettedPackages => + if ( + dars + .forall(dar => + vettedPackages.mapping.packages.exists(_.packageId == dar.packageId) + ) + ) { + // we don't check the validFrom value, we assume that once it's part of the vetting state it can no longer be updated + Right(vettedPackages) + } else { + Left(vettedPackages) + } + } + ), + currentVettingState => + Right( + updateVettingStateForDars( + dars = dars, + packageValidFrom = cantonFromDate, + currentVetting = currentVettingState, + ) + ), + RetryFor.Automation, + maxSubmissionDelay = maxVettingDelay, + ).map(_ => ()) + }, + logger, ) } @@ -227,23 +230,33 @@ trait ParticipantAdminDarsConnection { def getVettingState( domain: Option[SynchronizerId], topologyTransactionType: TopologyTransactionType, - )(implicit tc: TraceContext): Future[TopologyResult[VettedPackages]] = { + )(implicit tc: TraceContext): Future[TopologyResult[VettedPackages]] = + lookupVettingState(domain, topologyTransactionType).map( + _.getOrElse( + throw Status.NOT_FOUND + .withDescription(s"No package vetting state found for domain $domain") + .asRuntimeException + ) + ) + + @SuppressWarnings(Array("org.wartremover.warts.IterableOps")) + def lookupVettingState( + domain: Option[SynchronizerId], + topologyTransactionType: TopologyTransactionType, + )(implicit tc: TraceContext): Future[Option[TopologyResult[VettedPackages]]] = { for { participantId <- getParticipantId() vettedState <- listVettedPackages(participantId, domain, topologyTransactionType) } yield { vettedState match { - case Seq() => - throw Status.NOT_FOUND - .withDescription(s"No package vetting state found for domain $domain") - .asRuntimeException - case Seq(state) => state + case Seq() => None + case Seq(state) => Some(state) case other => logger.warn( s"Vetted state contains multiple entries on domain $domain for $participantId: $other. Will use the last entry" ) // TODO(DACH-NY/canton-network-node#18175) - remove once canton can handle this and fixed the issue - other.maxBy(_.base.serial) + Some(other.maxBy(_.base.serial)) } } } @@ -258,30 +271,32 @@ trait ParticipantAdminDarsConnection { private def uploadDarsLocally( dars: Seq[UploadablePackage], retryFor: RetryFor, - vetTheDar: Boolean, )(implicit traceContext: TraceContext ): Future[Unit] = { for { existingDars <- listDars().map(_.map(_.mainPackageId)) darsToUploads = dars.filterNot(dar => existingDars.contains(dar.packageId)) - _ <- darsToUploads.parTraverse_(uploadDar(_, vetTheDar, retryFor)) + _ <- MonadUtil.parTraverseWithLimit(PositiveInt.tryCreate(5))(darsToUploads)( + uploadDar(_, retryFor) + ) } yield {} } - private def uploadDar(dar: UploadablePackage, vetTheDar: Boolean, retryFor: RetryFor)(implicit + private def uploadDar(dar: UploadablePackage, retryFor: RetryFor)(implicit tc: TraceContext ) = { retryProvider.retry( retryFor, "upload_dar", - s"Upload dar ${dar.packageId} with vetting $vetTheDar", + s"Upload dar ${dar.packageId} (without vetting)", runCmd( ParticipantAdminCommands.Package .UploadDar( - dar.resourcePath, - vetAllPackages = vetTheDar, - synchronizeVetting = vetTheDar, + darPath = dar.resourcePath, + synchronizerId = None, + vetAllPackages = false, + synchronizeVetting = false, description = "", expectedMainPackageId = dar.packageId, requestHeaders = Map.empty, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SequencerAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SequencerAdminConnection.scala index 8c485ac44a..4e2005f17c 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SequencerAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SequencerAdminConnection.scala @@ -17,12 +17,12 @@ import com.digitalasset.canton.grpc.ByteStringStreamObserver import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.StaticSynchronizerParameters -import com.digitalasset.canton.sequencer.admin.v30.OnboardingStateResponse +import com.digitalasset.canton.sequencer.admin.v30.OnboardingStateV2Response import com.digitalasset.canton.sequencing.protocol import com.digitalasset.canton.synchronizer.sequencer.SequencerPruningStatus import com.digitalasset.canton.synchronizer.sequencer.admin.grpc.InitializeSequencerResponse import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.admin.v30.GenesisStateResponse +import com.digitalasset.canton.topology.admin.v30.GenesisStateV2Response import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.topology.transaction.SequencerSynchronizerState import com.digitalasset.canton.topology.{Member, NodeIdentity, SequencerId} @@ -70,10 +70,10 @@ class SequencerAdminConnection( def getGenesisState(timestamp: CantonTimestamp)(implicit traceContext: TraceContext ): Future[ByteString] = { - val responseObserver = new ByteStringStreamObserver[GenesisStateResponse](_.chunk) + val responseObserver = new ByteStringStreamObserver[GenesisStateV2Response](_.chunk) runCmd( TopologyAdminCommands.Read - .GenesisState( + .GenesisStateV2( timestamp = Some(timestamp), synchronizerStore = None, observer = responseObserver, @@ -85,9 +85,9 @@ class SequencerAdminConnection( traceContext: TraceContext ): Future[ByteString] = { val responseObserver = - new ByteStringStreamObserver[OnboardingStateResponse](_.onboardingStateForSequencer) + new ByteStringStreamObserver[OnboardingStateV2Response](_.onboardingStateForSequencer) runCmd( - SequencerAdminCommands.OnboardingState(responseObserver, Left(sequencerId)) + SequencerAdminCommands.OnboardingStateV2(responseObserver, Left(sequencerId)) ).flatMap(_ => responseObserver.resultBytes) } @@ -96,14 +96,16 @@ class SequencerAdminConnection( def initializeFromBeginning( topologySnapshot: GenericStoredTopologyTransactions, domainParameters: StaticSynchronizerParameters, - )(implicit traceContext: TraceContext): Future[InitializeSequencerResponse] = + )(implicit traceContext: TraceContext): Future[InitializeSequencerResponse] = { + val builder = ByteString.newOutput() + topologySnapshot.result.foreach(_.writeDelimitedTo(domainParameters.protocolVersion, builder)) runCmd( - SequencerAdminCommands.InitializeFromGenesisState( - // TODO(DACH-NY/canton-network-node#10953) Stop doing that. - topologySnapshot.toByteString(domainParameters.protocolVersion), + SequencerAdminCommands.InitializeFromGenesisStateV2( + builder.toByteString, domainParameters, ) ) + } /** This is used for initializing the sequencer after hard domain migrations. */ @@ -112,7 +114,7 @@ class SequencerAdminConnection( domainParameters: StaticSynchronizerParameters, )(implicit traceContext: TraceContext): Future[InitializeSequencerResponse] = runCmd( - SequencerAdminCommands.InitializeFromGenesisState( + SequencerAdminCommands.InitializeFromGenesisStateV2( genesisState, domainParameters, ) @@ -122,7 +124,7 @@ class SequencerAdminConnection( onboardingState: ByteString )(implicit traceContext: TraceContext): Future[InitializeSequencerResponse] = runCmd( - SequencerAdminCommands.InitializeFromOnboardingState( + SequencerAdminCommands.InitializeFromOnboardingStateV2( onboardingState ) ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceLedgerConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceLedgerConnection.scala index 3d9e97376a..2397bd7710 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceLedgerConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/SpliceLedgerConnection.scala @@ -13,7 +13,7 @@ import com.digitalasset.base.error.ErrorResource import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.base.error.utils.ErrorDetails.ResourceInfoDetail import com.digitalasset.canton.SynchronizerAlias -import com.digitalasset.canton.admin.api.client.data.PartyDetails +import com.digitalasset.canton.admin.api.client.data.parties.PartyDetails import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.ledger.error.LedgerApiErrors diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/TopologyAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/TopologyAdminConnection.scala index d0b189ffe0..3cfcee2a06 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/TopologyAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/TopologyAdminConnection.scala @@ -606,7 +606,7 @@ abstract class TopologyAdminConnection( ) ) - private def ensureInitialMapping[M <: TopologyMapping: ClassTag]( + protected def ensureInitialMapping[M <: TopologyMapping: ClassTag]( mappingE: Either[String, M] )(implicit traceContext: TraceContext): Future[SignedTopologyTransaction[TopologyChangeOp, M]] = { val mapping = @@ -1637,7 +1637,7 @@ object TopologyAdminConnection { param("validFrom", _.validFrom), param("validUntil", _.validUntil), param("operation", _.operation), - param("transactionHash", _.transactionHash), + param("transactionHash", _.transactionHash.hash), param("serial", _.serial), param("signedBy", _.signedBy), ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ledger/api/LedgerClient.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ledger/api/LedgerClient.scala index 5a00086e70..659a3b3103 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ledger/api/LedgerClient.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ledger/api/LedgerClient.scala @@ -36,7 +36,7 @@ import org.lfdecentralizedtrust.splice.environment.ledger.api.LedgerClient.GetTr import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.IngestionFilter import org.lfdecentralizedtrust.splice.util.DisclosedContracts import com.digitalasset.canton.SynchronizerAlias -import com.digitalasset.canton.admin.api.client.data.PartyDetails +import com.digitalasset.canton.admin.api.client.data.parties.PartyDetails import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.crypto.Fingerprint import com.digitalasset.canton.data.CantonTimestamp @@ -337,11 +337,11 @@ private[environment] class LedgerClient( lapi.interactive.interactive_submission_service.SinglePartySignatures( party.toProtoPrimitive, Seq( - lapi.interactive.interactive_submission_service.Signature( - lapi.interactive.interactive_submission_service.SignatureFormat.SIGNATURE_FORMAT_RAW, + lapi.crypto.Signature( + lapi.crypto.SignatureFormat.SIGNATURE_FORMAT_RAW, signature.signature, signature.signedBy.toProtoPrimitive, - lapi.interactive.interactive_submission_service.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + lapi.crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, ) ), ) @@ -497,6 +497,8 @@ private[environment] class LedgerClient( v1User.Right.defaultInstance.withCanActAs(v1User.Right.CanActAs(as.party)) case as: Right.CanReadAs => v1User.Right.defaultInstance.withCanReadAs(v1User.Right.CanReadAs(as.party)) + case as: Right.CanExecuteAs => + v1User.Right.defaultInstance.withCanExecuteAs(v1User.Right.CanExecuteAs(as.party)) case _: Right.IdentityProviderAdmin => v1User.Right.defaultInstance.withIdentityProviderAdmin( v1User.Right.IdentityProviderAdmin() @@ -505,6 +507,8 @@ private[environment] class LedgerClient( v1User.Right.defaultInstance.withParticipantAdmin(v1User.Right.ParticipantAdmin()) case _: Right.CanReadAsAnyParty => v1User.Right.defaultInstance.withCanReadAsAnyParty(v1User.Right.CanReadAsAnyParty()) + case _: Right.CanExecuteAsAnyParty => + v1User.Right.defaultInstance.withCanExecuteAsAnyParty(v1User.Right.CanExecuteAsAnyParty()) case unsupported => throw new IllegalArgumentException(s"unsupported right: $unsupported") } diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/ParticipantUsersData.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/ParticipantUsersData.scala index 222b513dbb..19f5feb795 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/ParticipantUsersData.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/ParticipantUsersData.scala @@ -88,12 +88,19 @@ final case class ParticipantUser( http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanActAs, Some(actAs.party)) case readAs: User.Right.CanReadAs => http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanReadAs, Some(readAs.party)) + case executeAs: User.Right.CanExecuteAs => + http.ParticipantUserRight( + http.ParticipantUserRight.Kind.CanExecuteAs, + Some(executeAs.party), + ) case _: User.Right.IdentityProviderAdmin => http.ParticipantUserRight(http.ParticipantUserRight.Kind.IdentityProviderAdmin) case _: User.Right.ParticipantAdmin => http.ParticipantUserRight(http.ParticipantUserRight.Kind.ParticipantAdmin) case _: User.Right.CanReadAsAnyParty => http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanReadAsAnyParty) + case _: User.Right.CanExecuteAsAnyParty => + http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanExecuteAsAnyParty) case _ => throw new IllegalArgumentException("Unsupported user right") }.toVector, isDeactivated, @@ -123,12 +130,16 @@ object ParticipantUser { new User.Right.CanActAs(party) case http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanReadAs, Some(party)) => new User.Right.CanReadAs(party) + case http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanExecuteAs, Some(party)) => + new User.Right.CanExecuteAs(party) case http.ParticipantUserRight(http.ParticipantUserRight.Kind.ParticipantAdmin, None) => User.Right.ParticipantAdmin.INSTANCE case http.ParticipantUserRight(http.ParticipantUserRight.Kind.IdentityProviderAdmin, None) => User.Right.IdentityProviderAdmin.INSTANCE case http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanReadAsAnyParty, None) => User.Right.CanReadAsAnyParty.INSTANCE + case http.ParticipantUserRight(http.ParticipantUserRight.Kind.CanExecuteAsAnyParty, None) => + User.Right.CanExecuteAsAnyParty.INSTANCE case _ => throw new IllegalArgumentException("Unsupported or invalid user right") }, response.isDeactivated, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantInitializer.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantInitializer.scala index 96703200d4..918564f053 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantInitializer.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantInitializer.scala @@ -70,6 +70,9 @@ class ParticipantInitializer( private val nodeInitializer = new NodeInitializer(participantAdminConnection, retryProvider, loggerFactory) + def waitForNodeInitialized(): Future[Unit] = + nodeInitializer.waitForNodeInitialized() + def ensureInitializedWithExpectedId(): Future[Unit] = dumpConfig match { case Some(c: ParticipantBootstrapDumpConfig.File) => diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantPartyMigrator.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantPartyMigrator.scala index 181c5f5478..99c3cff5aa 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantPartyMigrator.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/setup/ParticipantPartyMigrator.scala @@ -11,9 +11,9 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.transaction.{ HostingParticipant, + TopologyChangeOp, ParticipantPermission, PartyToParticipant, - TopologyChangeOp, } import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId, UniqueIdentifier} import com.digitalasset.canton.tracing.TraceContext diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala index 765721ae97..563d6ea69c 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala @@ -8,7 +8,7 @@ import cats.syntax.semigroup.* import com.daml.ledger.api.v2.TraceContextOuterClass import com.daml.ledger.javaapi.data.codegen.{ContractId, DamlRecord} import com.daml.ledger.javaapi.data.{CreatedEvent, Event, ExercisedEvent, Identifier, Transaction} -import com.google.protobuf.ByteString +import com.google.protobuf.ByteString; import org.lfdecentralizedtrust.splice.environment.ledger.api.ReassignmentEvent.{Assign, Unassign} import org.lfdecentralizedtrust.splice.environment.ledger.api.{ ActiveContract, @@ -1399,6 +1399,7 @@ class UpdateHistory( /*observers = */ updateRow.observers.getOrElse(missingStringSeq).asJava, /*createdAt = */ updateRow.createdAt.toInstant, /*acsDelta = */ false, + /*representativePackageId = */ updateRow.templatePackageId, ) UpdateHistoryResponse( @@ -1517,6 +1518,7 @@ class UpdateHistory( /*observers = */ row.observers.getOrElse(missingStringSeq).asJava, /*createdAt = */ row.createdAt.toInstant, /*acsDelta = */ false, + /*representativePackageId = */ row.templatePackageId, ), counter = row.reassignmentCounter, ), @@ -2329,6 +2331,7 @@ object UpdateHistory { /*observers = */ observers.getOrElse(missingStringSeq).asJava, /*createdAt = */ createdAt.toInstant, /*acsDelta = */ false, + /*representativePackageId = */ templatePackageId, ), ) } diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsJdbcTypes.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsJdbcTypes.scala index b3ba9f845f..9a608a3489 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsJdbcTypes.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsJdbcTypes.scala @@ -8,8 +8,8 @@ import com.daml.ledger.javaapi.data.{CreatedEvent, Identifier} import com.daml.ledger.javaapi.data.codegen.json.JsonLfWriter import com.daml.ledger.javaapi.data.codegen.{ContractId, DamlRecord, DefinedDataType} import com.digitalasset.canton.config.CantonRequireTypes.{String2066, String300} -import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import com.digitalasset.canton.topology.{Member, PartyId, SynchronizerId} import com.digitalasset.daml.lf.data.Ref.HexString import com.digitalasset.daml.lf.data.Time.Timestamp diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsQueries.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsQueries.scala index a58c2b054e..d25ddfaeca 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsQueries.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AcsQueries.scala @@ -153,7 +153,7 @@ trait AcsQueries extends AcsJdbcTypes { and o.migration_id = acs.migration_id and acs.package_name = ${packageQualifiedName.packageName} and acs.template_id_qualified_name = ${packageQualifiedName.qualifiedName} - and """ ++ where ++ sql""" + and (""" ++ where ++ sql""") where sd.id = $storeId and o.migration_id = $migrationId """ ++ orderLimit).toActionBuilder .as[AcsQueries.SelectFromAcsTableResultWithOffset] diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AdvisoryLockIds.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AdvisoryLockIds.scala new file mode 100644 index 0000000000..8fac9aaf57 --- /dev/null +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/AdvisoryLockIds.scala @@ -0,0 +1,13 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.store.db + +/** Registry for advisory lock identifiers used in splice applications. + */ +object AdvisoryLockIds { + // 0x73706c equals ASCII encoded "spl". Modeled after Canton's HaConfig, which uses ASCII "dml". + private val base: Long = 0x73706c00 + + final val acsSnapshotDataInsert: Long = base + 1 +} diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/Contract.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/Contract.scala index ad31b49cfb..e11795fa5f 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/Contract.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/Contract.scala @@ -17,10 +17,10 @@ import com.daml.ledger.javaapi.data.codegen.{ import com.digitalasset.daml.lf.value as lf import com.digitalasset.daml.lf.data.Ref.Identifier as LfIdentifier import com.digitalasset.daml.lf.data.Time.Timestamp -import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import org.lfdecentralizedtrust.splice.http.v0.definitions as http import org.lfdecentralizedtrust.splice.http.v0.definitions.MaybeCachedContract import org.lfdecentralizedtrust.splice.util.JavaDecodeUtil +import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.ledger.api.validation.ValueValidator import com.digitalasset.canton.logging.ErrorLoggingContext diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/DarUtil.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/DarUtil.scala index 997294885f..253882c3bf 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/DarUtil.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/DarUtil.scala @@ -6,25 +6,13 @@ package org.lfdecentralizedtrust.splice.util import cats.syntax.either.* import com.digitalasset.daml.lf.archive.{Dar, DarDecoder, DarParser} import com.digitalasset.daml.lf.data.Ref.PackageId -import com.digitalasset.daml.lf.language.Ast.{Package, PackageMetadata} +import com.digitalasset.daml.lf.language.Ast.Package import java.io.{File, FileInputStream, InputStream} import java.util.zip.ZipInputStream import scala.util.Using object DarUtil { - def readDarMetadata(resourcePath: String): PackageMetadata = { - val resourceStream = getClass.getClassLoader.getResourceAsStream(resourcePath) - if (resourceStream == null) { - throw new IllegalArgumentException(s"Failed to parse resource: $resourcePath") - } - readDarMetadata(resourcePath, resourceStream) - } - - def readDarMetadata(name: String, stream: InputStream): PackageMetadata = { - val dar = readDar(name, stream) - dar.main._2.metadata - } def readDar(file: File): Dar[(PackageId, Package)] = readDar(file.getName, new FileInputStream(file)) @@ -37,9 +25,6 @@ object DarUtil { } } - def readPackageId(resourcePath: String): String = - readPackageId(resourcePath, getClass.getClassLoader.getResourceAsStream(resourcePath)) - def readPackageId(name: String, stream: InputStream): String = { Using.resource(new ZipInputStream(stream)) { zipStream => val dar = DarParser diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreaker.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreaker.scala index 85177ad7a0..565d0bb077 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreaker.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreaker.scala @@ -12,19 +12,30 @@ import com.digitalasset.base.error.ErrorCategory.{ InvalidIndependentOfSystemState, } import com.digitalasset.base.error.utils.ErrorDetails -import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext import io.grpc.StatusRuntimeException import org.apache.pekko.actor.Scheduler import org.apache.pekko.pattern.{CircuitBreaker, CircuitBreakerOpenException} import org.lfdecentralizedtrust.splice.config.CircuitBreakerConfig +import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} -class SpliceCircuitBreaker(name: String, underlying: CircuitBreaker)(implicit - ec: ExecutionContext -) { +class SpliceCircuitBreaker( + name: String, + config: CircuitBreakerConfig, + clock: Clock, + override val loggerFactory: NamedLoggerFactory, +)(implicit + ec: ExecutionContext, + scheduler: Scheduler, +) extends NamedLogging { + + private val lastFailure: AtomicReference[Option[CantonTimestamp]] = new AtomicReference(None) private val errorCategoriesToIgnore: Set[ErrorCategory] = Set( InvalidIndependentOfSystemState, @@ -34,7 +45,25 @@ class SpliceCircuitBreaker(name: String, underlying: CircuitBreaker)(implicit InvalidGivenCurrentSystemStateSeekAfterEnd, ) - def withCircuitBreaker[T](body: => Future[T]): Future[T] = { + val underlying = new CircuitBreaker( + scheduler, + maxFailures = config.maxFailures, + callTimeout = config.callTimeout.underlying, + resetTimeout = config.resetTimeout.underlying, + maxResetTimeout = config.maxResetTimeout.underlying, + exponentialBackoffFactor = config.exponentialBackoffFactor, + randomFactor = config.randomFactor, + ).onOpen { + logger.warn( + s"Circuit breaker $name tripped after ${config.maxFailures} failures" + )(TraceContext.empty) + }.onHalfOpen { + logger.info(s"Circuit breaker $name moving to half-open state")(TraceContext.empty) + }.onClose { + logger.info(s"Circuit breaker $name moving to closed state")(TraceContext.empty) + } + + def withCircuitBreaker[T](body: => Future[T])(implicit tc: TraceContext): Future[T] = { if (underlying.isClosed || underlying.isHalfOpen) { callAndMark(body) } else { @@ -47,11 +76,28 @@ class SpliceCircuitBreaker(name: String, underlying: CircuitBreaker)(implicit } } - private def callAndMark[T](body: => Future[T]) = { + private def callAndMark[T](body: => Future[T])(implicit tc: TraceContext) = { + // Only run this when the circuit breaker is closed, otherwise rely on the pekko circuit breaker to handle reopening. + if (underlying.isClosed) { + lastFailure.updateAndGet(_.filter { lastFailureTime => + val elapsed = clock.now - lastFailureTime + if (elapsed.compareTo(config.resetFailuresAfter.asJava) >= 0) { + logger.info( + s"Resetting circuit breaker as last failure was $elapsed ago which is more than ${config.resetFailuresAfter}" + ) + underlying.succeed() + false + } else { + true + } + }) + } + body.andThen { case Failure(exception) => if (!isFailureIgnored(exception)) { underlying.fail() + lastFailure.set(Some(clock.now)) } case Success(_) => underlying.succeed() } @@ -86,26 +132,13 @@ object SpliceCircuitBreaker { def apply( name: String, config: CircuitBreakerConfig, - logger: TracedLogger, + clock: Clock, + loggerFactory: NamedLoggerFactory, )(implicit scheduler: Scheduler, ec: ExecutionContext): SpliceCircuitBreaker = new SpliceCircuitBreaker( name, - new CircuitBreaker( - scheduler, - maxFailures = config.maxFailures, - callTimeout = config.callTimeout.underlying, - resetTimeout = config.resetTimeout.underlying, - maxResetTimeout = config.maxResetTimeout.underlying, - exponentialBackoffFactor = config.exponentialBackoffFactor, - randomFactor = config.randomFactor, - ).onOpen { - logger.warn( - s"Circuit breaker $name tripped after ${config.maxFailures} failures" - )(TraceContext.empty) - }.onHalfOpen { - logger.info(s"Circuit breaker $name moving to half-open state")(TraceContext.empty) - }.onClose { - logger.info(s"Circuit breaker $name moving to closed state")(TraceContext.empty) - }, + config, + clock, + loggerFactory, ) } diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceUtil.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceUtil.scala index ed0df4d534..789e47683f 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceUtil.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SpliceUtil.scala @@ -53,7 +53,7 @@ import scala.jdk.OptionConverters.* object SpliceUtil { private def readDarVersion(resource: DarResource): PackageVersion = - DarUtil.readDarMetadata(resource.path).version + resource.metadata.version def readPackageConfig(): splice.amuletconfig.PackageConfig = { new splice.amuletconfig.PackageConfig( diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SynchronizerMigrationUtil.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SynchronizerMigrationUtil.scala index 9027e3528e..37c377f007 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SynchronizerMigrationUtil.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/SynchronizerMigrationUtil.scala @@ -7,6 +7,7 @@ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.topology.{ForceFlag, ForceFlags} import com.digitalasset.canton.topology.transaction.SynchronizerParametersState import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection @@ -38,6 +39,8 @@ final object SynchronizerMigrationUtil { confirmationRequestsMaxRate = NonNegativeInt.zero, mediatorReactionTimeout = NonNegativeFiniteDuration.Zero, ), + forceChanges = + ForceFlags(ForceFlag.AllowOutOfBoundsValue), // required for mediatorReactionTimeout = 0 ) def ensureSynchronizerIsUnpaused( diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/TemplateJsonDecoder.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/TemplateJsonDecoder.scala index c2ef93b247..eb22d7f0b7 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/TemplateJsonDecoder.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/util/TemplateJsonDecoder.scala @@ -12,7 +12,7 @@ import com.daml.ledger.javaapi.data.codegen.{ InterfaceCompanion, ValueDecoder, } -import org.lfdecentralizedtrust.splice.environment.DarResource +import com.digitalasset.canton.caching.CaffeineCache import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import com.digitalasset.canton.ledger.api.util.LfEngineToApi import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} @@ -22,7 +22,9 @@ import com.digitalasset.daml.lf.archive.{ArchivePayload, Dar, DarReader} import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{DottedName, ModuleName, PackageId, QualifiedName} import com.digitalasset.daml.lf.typesig +import com.github.benmanes.caffeine.cache.Caffeine import io.circe.Json +import org.lfdecentralizedtrust.splice.environment.DarResource import java.util.zip.ZipInputStream @@ -129,16 +131,29 @@ class ResourceTemplateDecoder( object ResourceTemplateDecoder { - def loadPackageSignaturesFromResource( + // caching loaded package signatures to avoid re-reading the same DAR multiple times + private val loadedPackageCache = CaffeineCache[String, Map[PackageId, typesig.PackageSignature]]( + Caffeine + .newBuilder() + .maximumSize(10000), + None, + ) + + private def loadPackageSignaturesFromResource( resource: DarResource ): Map[PackageId, typesig.PackageSignature] = { - val inputStream = getClass.getClassLoader.getResourceAsStream(resource.path) - val dar: Dar[ArchivePayload] = DarReader - .readArchive(resource.path, new ZipInputStream(inputStream)) - .valueOr(e => - throw new IllegalArgumentException(s"Failed to read DAR at path ${resource.path}: $e") - ) - dar.all.map(a => a.pkgId -> typesig.reader.SignatureReader.readPackageSignature(a)._2).toMap + loadedPackageCache.getOrAcquire( + resource.path, + { path => + val inputStream = getClass.getClassLoader.getResourceAsStream(path) + val dar: Dar[ArchivePayload] = DarReader + .readArchive(resource.path, new ZipInputStream(inputStream)) + .valueOr(e => + throw new IllegalArgumentException(s"Failed to read DAR at path ${resource.path}: $e") + ) + dar.all.map(a => a.pkgId -> typesig.reader.SignatureReader.readPackageSignature(a)._2).toMap + }, + ) } def loadPackageSignaturesFromResources( diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/environment/CommandCircuitBreakerTest.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/environment/CommandCircuitBreakerTest.scala index 33cbcb2ff1..7847b0d87f 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/environment/CommandCircuitBreakerTest.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/environment/CommandCircuitBreakerTest.scala @@ -1,6 +1,5 @@ package org.lfdecentralizedtrust.splice.environment -import org.apache.pekko.pattern.CircuitBreaker import com.daml.ledger.javaapi.data.Command import com.daml.metrics.api.noop.NoOpMetricsFactory import com.digitalasset.canton.{BaseTest, HasActorSystem, HasExecutionContext} @@ -11,9 +10,11 @@ import com.digitalasset.canton.config.{ ProcessingTimeout, } import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.time.SimClock import com.digitalasset.canton.topology.{PartyId, SynchronizerId} import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext} import org.lfdecentralizedtrust.splice.codegen.java.splice.amulet.ValidatorRight +import org.lfdecentralizedtrust.splice.config.CircuitBreakerConfig import org.lfdecentralizedtrust.splice.environment.ledger.api.{DedupConfig, LedgerClient} import LedgerClient.SubmitAndWaitFor import org.lfdecentralizedtrust.splice.util.{DisclosedContracts, SpliceCircuitBreaker} @@ -30,18 +31,21 @@ class CommandCircuitBreakerTest with HasActorSystem with HasExecutionContext { val ledgerClient = mock[LedgerClient] + val clock = new SimClock(loggerFactory = loggerFactory) + + implicit val scheduler: org.apache.pekko.actor.Scheduler = actorSystem.scheduler val circuitBreaker = new SpliceCircuitBreaker( "test", - new CircuitBreaker( - actorSystem.scheduler, + CircuitBreakerConfig( maxFailures = 5, - callTimeout = 0.seconds, - resetTimeout = 5.seconds, - maxResetTimeout = 5.seconds, - exponentialBackoffFactor = 2.0, + resetTimeout = NonNegativeFiniteDuration.ofSeconds(5), + maxResetTimeout = NonNegativeFiniteDuration.ofSeconds(5), randomFactor = 0.0, + resetFailuresAfter = NonNegativeFiniteDuration.ofMinutes(1), ), + clock, + loggerFactory, ) val retryProvider = new RetryProvider( diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/StoreTest.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/StoreTest.scala index 3ec0cce90d..febf36d824 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/StoreTest.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/StoreTest.scala @@ -651,6 +651,7 @@ abstract class StoreTest extends AsyncWordSpec with BaseTest { observers.map(_.toProtoPrimitive).asJava, contract.createdAt, false, + contract.identifier.getPackageId, ) } @@ -737,6 +738,7 @@ abstract class StoreTest extends AsyncWordSpec with BaseTest { created.getObservers, created.createdAt, created.isAcsDelta(), + created.getTemplateId.getPackageId, ) case exercised: ExercisedEvent => new ExercisedEvent( diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTest.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTest.scala index 1c9e11e31c..900171b667 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTest.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTest.scala @@ -113,6 +113,7 @@ class UpdateHistoryTest extends UpdateHistoryTestBase { /*observers*/ Seq(party1, party2).asJava, /*createdAt*/ effectiveAt, /*acsDelta*/ false, + /*representativePackageId*/ id1.getPackageId, ), new ExercisedEvent( /*witnessParties*/ Seq(party1).asJava, @@ -730,6 +731,7 @@ class UpdateHistoryTest extends UpdateHistoryTestBase { /*observers*/ Seq(party1, party2).asJava, /*createdAt*/ effectiveAt, /*acsDelta*/ false, + /*representativePackageId*/ id1.getPackageId, ), new CreatedEvent( /*witnessParties*/ Seq(party1).asJava, @@ -749,6 +751,7 @@ class UpdateHistoryTest extends UpdateHistoryTestBase { /*observers*/ Seq(party1, party2).asJava, /*createdAt*/ effectiveAt, /*acsDelta*/ false, + /*representativePackageId*/ id1.getPackageId, ), ), synchronizerId = domain1, @@ -782,6 +785,7 @@ class UpdateHistoryTest extends UpdateHistoryTestBase { /*observers*/ Seq(party1, party2).asJava, /*createdAt*/ effectiveAt, /*acsDelta*/ false, + /*representativePackageId*/ id1.getPackageId, ) ), synchronizerId = domain1, @@ -813,6 +817,7 @@ class UpdateHistoryTest extends UpdateHistoryTestBase { /*observers*/ Seq(party1, party2).asJava, /*createdAt*/ effectiveAt, /*acsDelta*/ false, + /*representativePackageId*/ id1.getPackageId, ) ), synchronizerId = domain1, diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala index c682b1d030..07154d2f05 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala @@ -414,6 +414,7 @@ object UpdateHistoryTestBase { /*observers = */ created.getObservers, /*createdAt = */ created.getCreatedAt, /*acsDelta = */ false, // Not preserved + /*representativePackageId = */ created.getRepresentativePackageId, ) } diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreakerTest.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreakerTest.scala index b08121f958..8335df853e 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreakerTest.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/util/SpliceCircuitBreakerTest.scala @@ -4,6 +4,7 @@ import com.digitalasset.base.error.ErrorCode import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors import com.digitalasset.canton.{BaseTest, HasActorSystem, HasExecutionContext} +import com.digitalasset.canton.time.SimClock import io.grpc.StatusRuntimeException import org.apache.pekko.actor.Scheduler import org.apache.pekko.pattern.CircuitBreakerOpenException @@ -26,6 +27,8 @@ class SpliceCircuitBreakerTest private implicit val scheduler: Scheduler = actorSystem.scheduler + val clock = new SimClock(loggerFactory = loggerFactory) + private def createCircuitBreaker(): SpliceCircuitBreaker = { val config = CircuitBreakerConfig( maxFailures = 2, @@ -34,8 +37,9 @@ class SpliceCircuitBreakerTest maxResetTimeout = NonNegativeFiniteDuration.ofSeconds(1), exponentialBackoffFactor = 1.0, randomFactor = 0.0, + resetFailuresAfter = NonNegativeFiniteDuration.ofMinutes(1), ) - SpliceCircuitBreaker("test", config, logger) + SpliceCircuitBreaker("test", config, clock, loggerFactory) } "SpliceCircuitBreaker" should { @@ -155,5 +159,27 @@ class SpliceCircuitBreakerTest cb.isClosed shouldBe true } + + "failures reset after resetFailuresAfter time elapsed" in { + val cb = createCircuitBreaker() + val future1 = cb.withCircuitBreaker(Future.failed(new RuntimeException("test failure 1"))) + whenReady(future1.failed) { ex => + ex shouldBe a[RuntimeException] + cb.isClosed shouldBe true + } + clock.advance(java.time.Duration.ofMinutes(1)) + // Second failure does not open the circuit breaker as it is after resetFailuresAfter + val future2 = cb.withCircuitBreaker(Future.failed(new RuntimeException("test failure 2"))) + whenReady(future2.failed) { ex => + ex shouldBe a[RuntimeException] + cb.isClosed shouldBe true + } + // Third failure is not after resetFailuresAfter so the last 2 failures now exceed the failure threshold and it opens. + val future3 = cb.withCircuitBreaker(Future.failed(new RuntimeException("test failure 3"))) + whenReady(future3.failed) { ex => + ex shouldBe a[RuntimeException] + cb.isOpen shouldBe true + } + } } } diff --git a/apps/metrics-docs/src/main/scala/org/lfdecentralizedtrust/splice/metrics/MetricsDocs.scala b/apps/metrics-docs/src/main/scala/org/lfdecentralizedtrust/splice/metrics/MetricsDocs.scala index 9dd88e9eed..42c82d73b5 100644 --- a/apps/metrics-docs/src/main/scala/org/lfdecentralizedtrust/splice/metrics/MetricsDocs.scala +++ b/apps/metrics-docs/src/main/scala/org/lfdecentralizedtrust/splice/metrics/MetricsDocs.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.{MetricDoc, MetricsDocGenerator} -import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.{ParticipantId, PartyId} import org.lfdecentralizedtrust.splice.admin.api.client.DamlGrpcClientMetrics import org.lfdecentralizedtrust.splice.automation.TriggerMetrics import org.lfdecentralizedtrust.splice.scan.store.db.DbScanStoreMetrics @@ -18,6 +18,7 @@ import org.lfdecentralizedtrust.splice.sv.automation.singlesv.SequencerPruningMe import org.lfdecentralizedtrust.splice.sv.automation.ReportSvStatusMetricsExportTrigger import org.lfdecentralizedtrust.splice.sv.store.db.DbSvDsoStoreMetrics import org.lfdecentralizedtrust.splice.store.{DomainParamsStore, HistoryMetrics, StoreMetrics} +import org.lfdecentralizedtrust.splice.validator.metrics.TopologyMetrics import org.lfdecentralizedtrust.splice.wallet.metrics.AmuletMetrics final case class GeneratedMetrics( @@ -47,6 +48,8 @@ final case class GeneratedMetrics( def renderSection(prefix: String, metrics: List[MetricDoc.Item]): String = { val header = s"$prefix Metrics" (Seq( + s".. _${prefix.toLowerCase}-metrics-reference:", + "", header, "+" * header.length, ) ++ @@ -81,6 +84,11 @@ object MetricsDocs { generator.reset() // validator new AmuletMetrics(walletUserParty, generator) + val topologyMetrics = new TopologyMetrics(generator) + // force creation of a gauge for a dummy participant + val _ = topologyMetrics.getNumPartiesPerParticipantGauge( + ParticipantId.tryFromProtoPrimitive("PAR::participantId::namespace") + ) val validatorMetrics = generator.getAll() generator.reset() // sv diff --git a/apps/scan/frontend/src/components/MedianAmuletPrice.tsx b/apps/scan/frontend/src/components/MedianAmuletPrice.tsx new file mode 100644 index 0000000000..3888a90ee0 --- /dev/null +++ b/apps/scan/frontend/src/components/MedianAmuletPrice.tsx @@ -0,0 +1,60 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + AmountDisplay, + Loading, + medianPriceVotes, + useVotesHooks, +} from '@lfdecentralizedtrust/splice-common-frontend'; +import BigNumber from 'bignumber.js'; +import React, { useMemo } from 'react'; + +import { Stack } from '@mui/material'; +import Typography from '@mui/material/Typography'; + +interface MedianAmuletPriceProps { + amuletName: string; +} + +export const MedianAmuletPrice: React.FC = props => { + const { amuletName } = props; + + const voteHooks = useVotesHooks(); + const amuletPriceVotesQuery = voteHooks.useAmuletPriceVotes(); + + const amuletPrices = useMemo( + () => + amuletPriceVotesQuery.data + ?.map(v => (v.amuletPrice ? new BigNumber(v.amuletPrice) : undefined)) + .filter((p): p is BigNumber => !!p), + [amuletPriceVotesQuery.data] + ); + + const medianAmuletPrice = useMemo( + () => (amuletPrices ? medianPriceVotes(amuletPrices) : undefined), + [amuletPrices] + ); + + if (amuletPriceVotesQuery.isLoading) { + return ; + } + + if (amuletPriceVotesQuery.isError) { + return

Error, something went wrong.

; + } + + return ( + + + {amuletName} Price for Next Open Mining Round + + + {medianAmuletPrice && } + + + Median of {amuletName} prices voted by all Super Validators + + + ); +}; diff --git a/apps/scan/frontend/src/routes/amuletPriceVotes.tsx b/apps/scan/frontend/src/routes/amuletPriceVotes.tsx index 263384dd0b..ca9d929528 100644 --- a/apps/scan/frontend/src/routes/amuletPriceVotes.tsx +++ b/apps/scan/frontend/src/routes/amuletPriceVotes.tsx @@ -4,12 +4,17 @@ import { Box, Container } from '@mui/material'; import Layout from '../components/Layout'; import ListAmuletPriceVotes from '../components/votes/ListAmuletPriceVotes'; +import { MedianAmuletPrice } from '../components/MedianAmuletPrice'; +import { useScanConfig } from '../utils'; const AmuletPriceVotes: React.FC = () => { + const config = useScanConfig(); + return ( + diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanAppBootstrap.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanAppBootstrap.scala index 78d487c2e1..239c1ccf59 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanAppBootstrap.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanAppBootstrap.scala @@ -109,7 +109,7 @@ object ScanAppBootstrap { testingConfigInternal, clock, scanMetrics, - new CommunityStorageFactory(scanConfig.storage), + new StorageSingleFactory(scanConfig.storage), loggerFactory, futureSupervisor, configuredOpenTelemetry, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala index 19505ef13d..cef5344e68 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala @@ -523,14 +523,20 @@ class HttpScanHandler( store .getTopProvidersByAppRewards(asOfEndOfRound, limit) .map(res => - v0.ScanResource.GetTopProvidersByAppRewardsResponse.OK( - definitions - .GetTopProvidersByAppRewardsResponse( - res - .map(p => definitions.PartyAndRewards(Codec.encode(p._1), Codec.encode(p._2))) - .toVector - ) - ) + if (res.isEmpty) { + v0.ScanResource.GetTopProvidersByAppRewardsResponse.NotFound( + ErrorResponse(s"No top providers by app rewards found for round $asOfEndOfRound") + ) + } else { + v0.ScanResource.GetTopProvidersByAppRewardsResponse.OK( + definitions + .GetTopProvidersByAppRewardsResponse( + res + .map(p => definitions.PartyAndRewards(Codec.encode(p._1), Codec.encode(p._2))) + .toVector + ) + ) + } ) .transform( HttpErrorHandler.onGrpcNotFound(s"Data for round ${asOfEndOfRound} not yet computed") @@ -549,14 +555,22 @@ class HttpScanHandler( store .getTopValidatorsByValidatorRewards(asOfEndOfRound, limit) .map(res => - v0.ScanResource.GetTopValidatorsByValidatorRewardsResponse.OK( - definitions - .GetTopValidatorsByValidatorRewardsResponse( - res - .map(p => definitions.PartyAndRewards(Codec.encode(p._1), Codec.encode(p._2))) - .toVector + if (res.isEmpty) { + v0.ScanResource.GetTopValidatorsByValidatorRewardsResponse.NotFound( + ErrorResponse( + s"No top validators by validator rewards found for round $asOfEndOfRound" ) - ) + ) + } else { + v0.ScanResource.GetTopValidatorsByValidatorRewardsResponse.OK( + definitions + .GetTopValidatorsByValidatorRewardsResponse( + res + .map(p => definitions.PartyAndRewards(Codec.encode(p._1), Codec.encode(p._2))) + .toVector + ) + ) + } ) .transform( HttpErrorHandler.onGrpcNotFound(s"Data for round ${asOfEndOfRound} not yet computed") @@ -596,21 +610,29 @@ class HttpScanHandler( store .getTopValidatorsByPurchasedTraffic(asOfEndOfRound, limit) .map(validatorTraffic => - v0.ScanResource.GetTopValidatorsByPurchasedTrafficResponse.OK( - definitions.GetTopValidatorsByPurchasedTrafficResponse( - validatorTraffic - .map(t => - definitions.ValidatorPurchasedTraffic( - Codec.encode(t.validator), - t.numPurchases, - t.totalTrafficPurchased, - Codec.encode(t.totalCcSpent), - t.lastPurchasedInRound, + if (validatorTraffic.isEmpty) { + v0.ScanResource.GetTopValidatorsByPurchasedTrafficResponse.NotFound( + ErrorResponse( + s"No top validators by purchased traffic found for round $asOfEndOfRound" + ) + ) + } else { + v0.ScanResource.GetTopValidatorsByPurchasedTrafficResponse.OK( + definitions.GetTopValidatorsByPurchasedTrafficResponse( + validatorTraffic + .map(t => + definitions.ValidatorPurchasedTraffic( + Codec.encode(t.validator), + t.numPurchases, + t.totalTrafficPurchased, + Codec.encode(t.totalCcSpent), + t.lastPurchasedInRound, + ) ) - ) - .toVector + .toVector + ) ) - ) + } ) .transform( HttpErrorHandler.onGrpcNotFound(s"Data for round ${asOfEndOfRound} not yet computed") diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala index f896fcc979..b4228e8178 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala @@ -337,6 +337,7 @@ sealed trait ScanHttpEncodings { http.observers.asJava, http.createdAt.toInstant, /* acsDelta = */ false, + /* representativePackageId = */ templateId.getPackageId, ) } @@ -579,6 +580,7 @@ object ScanHttpEncodings { assign.createdEvent.getObservers, assign.createdEvent.createdAt, assign.createdEvent.isAcsDelta, + assign.createdEvent.getRepresentativePackageId, ) ), ) @@ -629,6 +631,7 @@ object ScanHttpEncodings { created.getObservers, created.createdAt, created.isAcsDelta, + created.getRepresentativePackageId, ) case (nodeId, exercised: javaApi.ExercisedEvent) => val newNodeId = mapping(exercised.getNodeId) diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala index 9b3255a34d..ea10e593de 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala @@ -14,7 +14,7 @@ import org.lfdecentralizedtrust.splice.scan.store.AcsSnapshotStore.{ } import org.lfdecentralizedtrust.splice.store.UpdateHistory.SelectFromCreateEvents import org.lfdecentralizedtrust.splice.store.{HardLimit, Limit, LimitHelpers, UpdateHistory} -import org.lfdecentralizedtrust.splice.store.db.{AcsJdbcTypes, AcsQueries} +import org.lfdecentralizedtrust.splice.store.db.{AcsJdbcTypes, AcsQueries, AdvisoryLockIds} import org.lfdecentralizedtrust.splice.util.{Contract, HoldingsSummary, PackageQualifiedName} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} @@ -25,7 +25,7 @@ import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.toSQLAc import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.store.events.SpliceCreatedEvent -import slick.dbio.DBIOAction +import slick.dbio.{DBIOAction, Effect, NoStream} import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton import slick.jdbc.{GetResult, JdbcProfile} @@ -158,12 +158,41 @@ class AcsSnapshotStore( join creates_to_insert on inserted_rows.create_id = creates_to_insert.row_id having min(inserted_rows.row_id) is not null; """).toActionBuilder.asUpdate - storage.update(statement, "insertNewSnapshot") + storage.queryAndUpdate(withExclusiveSnapshotDataLock(statement), "insertNewSnapshot") }.andThen { _ => AcsSnapshotStore.PreventConcurrentSnapshotsSemaphore.release() } } + /** Wraps the given action in a transaction that holds an exclusive lock on the acs_snapshot_data table. + * + * Note: The acs_snapshot_data table must not have interleaved rows from two different acs snapshots. + * In rare cases, it can happen that the application crashes while writing a snapshot, then + * restarts and starts writing a different snapshot while the previous statement is still running. + * + * The exclusive lock prevents this. + * We use a transaction-scoped advisory lock, which is released when the transaction ends. + * Regular locks (e.g. obtained via `LOCK TABLE ... IN EXCLUSIVE MODE`) would conflict with harmless + * background operations like autovacuum or create index concurrently. + * + * In case the application crashes while holding the lock, the server _should_ close the connection + * and abort the transaction as soon as it detects a disconnect. + * TODO(#2488): Verify that the server indeed closes connections in a reasonable time. + */ + private def withExclusiveSnapshotDataLock[T, E <: Effect]( + action: DBIOAction[T, NoStream, E] + ): DBIOAction[T, NoStream, Effect.Read & Effect.Transactional & E] = + (for { + lockResult <- sql"SELECT pg_try_advisory_xact_lock(${AdvisoryLockIds.acsSnapshotDataInsert})" + .as[Boolean] + .head + result <- lockResult match { + case true => action + // Lock conflicts should almost never happen. If they do, we fail immediately and rely on the trigger infrastructure to retry and log errors. + case false => DBIOAction.failed(new Exception("Failed to acquire exclusive lock")) + } + } yield result).transactionally + def deleteSnapshot( snapshot: AcsSnapshot )(implicit diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala index c04b2ae4d1..98baceec33 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala @@ -681,27 +681,27 @@ class DbScanStore( tc: TraceContext ): Future[Seq[(PartyId, BigDecimal)]] = waitUntilAcsIngested { for { - rows <- ensureAggregated(asOfEndOfRound) { _ => - storage.query( - sql""" - with ranked_providers_by_app_rewards as ( - select party as provider, - max(cumulative_app_rewards) as cumulative_app_rewards, - rank() over (order by max(cumulative_app_rewards) desc) as rank_nr - from round_party_totals - where store_id = $roundTotalsStoreId - and closed_round <= $asOfEndOfRound - and cumulative_app_rewards > 0 - group by party - ) - select provider, - cumulative_app_rewards - from ranked_providers_by_app_rewards - where rank_nr <= $limit - order by rank_nr; + rows <- ensureAggregated(asOfEndOfRound) { lastAggregatedRound => + if (lastAggregatedRound == asOfEndOfRound) { + storage.query( + sql""" + select rpt.party as provider, + rpt.cumulative_app_rewards as cumulative_app_rewards + from round_party_totals rpt + join active_parties ap + on rpt.store_id = ap.store_id + and rpt.party = ap.party + and rpt.closed_round = ap.closed_round + and rpt.store_id = $roundTotalsStoreId + and cumulative_app_rewards > 0 + order by cumulative_app_rewards desc, rpt.party desc + limit $limit; """.as[(PartyId, BigDecimal)], - "getTopProvidersByAppRewards", - ) + "getTopProvidersByAppRewards", + ) + } else { + Future.successful(Seq()) + } } } yield rows } @@ -710,27 +710,27 @@ class DbScanStore( tc: TraceContext ): Future[Seq[(PartyId, BigDecimal)]] = waitUntilAcsIngested { for { - rows <- ensureAggregated(asOfEndOfRound) { _ => - storage.query( - sql""" - with ranked_validators_by_validator_rewards as ( - select party as validator, - max(cumulative_validator_rewards) as cumulative_validator_rewards, - rank() over (order by max(cumulative_validator_rewards) desc) as rank_nr - from round_party_totals - where store_id = $roundTotalsStoreId - and closed_round <= $asOfEndOfRound - and cumulative_validator_rewards > 0 - group by party - ) - select validator, - cumulative_validator_rewards - from ranked_validators_by_validator_rewards - where rank_nr <= $limit - order by rank_nr; - """.as[(PartyId, BigDecimal)], - "getTopValidatorsByValidatorRewards", - ) + rows <- ensureAggregated(asOfEndOfRound) { lastAggregatedRound => + if (lastAggregatedRound == asOfEndOfRound) { + storage.query( + sql""" + select rpt.party as validator, + rpt.cumulative_validator_rewards as cumulative_validator_rewards + from round_party_totals rpt + join active_parties ap + on rpt.store_id = ap.store_id + and rpt.party = ap.party + and rpt.closed_round = ap.closed_round + and rpt.store_id = $roundTotalsStoreId + and cumulative_validator_rewards > 0 + order by cumulative_validator_rewards desc, rpt.party desc + limit $limit; + """.as[(PartyId, BigDecimal)], + "getTopValidatorsByValidatorRewards", + ) + } else { + Future.successful(Seq()) + } } } yield rows } @@ -739,46 +739,42 @@ class DbScanStore( tc: TraceContext ): Future[Seq[HttpScanAppClient.ValidatorPurchasedTraffic]] = waitUntilAcsIngested { for { - rows <- ensureAggregated(asOfEndOfRound) { _ => - // There might not be a row for a party where closed_round = asOfEndOfRound, so we need to use the - // max cumulatives for each party up to including asOfEndOfRound - // and separately get the last purchased round for each party in the leaderboard - storage.query( - sql""" - with ranked_validators_by_purchased_traffic as ( - select party as validator, - max(cumulative_traffic_num_purchases) as cumulative_traffic_num_purchases, - max(cumulative_traffic_purchased) as cumulative_traffic_purchased, - max(cumulative_traffic_purchased_cc_spent) as cumulative_traffic_purchased_cc_spent, - rank() over (order by max(cumulative_traffic_purchased) desc) as rank_nr - from round_party_totals - where store_id = $roundTotalsStoreId - and closed_round <= $asOfEndOfRound - and cumulative_traffic_purchased > 0 - group by party - ), - last_purchases as ( - select party as validator, - max(closed_round) as last_purchased_in_round - from round_party_totals - where store_id = $roundTotalsStoreId - and closed_round <= $asOfEndOfRound - and traffic_purchased > 0 - group by party - ) - select rv.validator, - rv.cumulative_traffic_num_purchases, - rv.cumulative_traffic_purchased, - rv.cumulative_traffic_purchased_cc_spent, - coalesce(lp.last_purchased_in_round, 0) - from ranked_validators_by_purchased_traffic rv - left join last_purchases lp - on rv.validator = lp.validator - where rv.rank_nr <= $limit - order by rv.rank_nr; - """.as[(PartyId, Long, Long, BigDecimal, Long)], - "getTopValidatorsByPurchasedTraffic", - ) + rows <- ensureAggregated(asOfEndOfRound) { lastAggregatedRound => + if (lastAggregatedRound == asOfEndOfRound) { + storage.query( + sql""" + select rpt.party as validator, + rpt.cumulative_traffic_num_purchases, + rpt.cumulative_traffic_purchased, + rpt.cumulative_traffic_purchased_cc_spent, + coalesce( + ( + select closed_round as last_purchased_in_round + from round_party_totals + where store_id = rpt.store_id + and store_id = $roundTotalsStoreId + and party = rpt.party + and traffic_purchased > 0 + order by closed_round desc + limit 1 + ), + 0 + ) as last_purchased_in_round + from round_party_totals rpt + join active_parties ap + on rpt.store_id = ap.store_id + and rpt.party = ap.party + and rpt.closed_round = ap.closed_round + and rpt.store_id = $roundTotalsStoreId + and cumulative_traffic_purchased > 0 + order by cumulative_traffic_purchased desc, rpt.party desc + limit $limit; + """.as[(PartyId, Long, Long, BigDecimal, Long)], + "getTopValidatorsByPurchasedTraffic", + ) + } else { + Future.successful(Seq()) + } } } yield rows.map((HttpScanAppClient.ValidatorPurchasedTraffic.apply _).tupled) } diff --git a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala index a3a3b9ee1f..7e5efd2c33 100644 --- a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala +++ b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala @@ -755,25 +755,32 @@ class ScanAggregatorTest val topProviders = getTopProvidersByAppRewardsFromTxLog(round, limit, aggr.txLogStoreId).futureValueUS topProviders should not be empty - store.getTopProvidersByAppRewards(round, limit).futureValue shouldBe topProviders + if (i == lastRound.toInt) { + store.getTopProvidersByAppRewards(round, limit).futureValue shouldBe topProviders + } val topValidatorsByValidatorRewards = getTopValidatorsByValidatorRewardsFromTxLog( round, limit, aggr.txLogStoreId, ).futureValueUS - store - .getTopValidatorsByValidatorRewards(round, limit) - .futureValue shouldBe topValidatorsByValidatorRewards - val topValidatorsByPurchasedTraffic = - getTopValidatorsByPurchasedTrafficFromTxLog( - round, - limit, - aggr.txLogStoreId, - ).futureValue - store - .getTopValidatorsByPurchasedTraffic(round, limit) - .futureValue shouldBe topValidatorsByPurchasedTraffic + + if (i == lastRound.toInt) { + store + .getTopValidatorsByValidatorRewards(round, limit) + .futureValue shouldBe topValidatorsByValidatorRewards + } + if (i == lastRound.toInt) { + val topValidatorsByPurchasedTraffic = + getTopValidatorsByPurchasedTrafficFromTxLog( + round, + limit, + aggr.txLogStoreId, + ).futureValue + store + .getTopValidatorsByPurchasedTraffic(round, limit) + .futureValue shouldBe topValidatorsByPurchasedTraffic + } } val topProviders = getTopProvidersByAppRewardsFromTxLog(lastRound, limit, aggr.txLogStoreId).futureValueUS @@ -1142,7 +1149,7 @@ class ScanAggregatorTest and entry_type = ${EntryType.AppRewardTxLogEntry} and round <= $asOfEndOfRound group by rewarded_party - order by total_app_rewards desc + order by total_app_rewards desc, rewarded_party desc limit $limit; """.as[(PartyId, BigDecimal)] storage.query(q, "getTopProvidersByAppRewardsFromTxLog") @@ -1160,7 +1167,7 @@ class ScanAggregatorTest and entry_type = ${EntryType.ValidatorRewardTxLogEntry} and round <= $asOfEndOfRound group by rewarded_party - order by total_validator_rewards desc + order by total_validator_rewards desc, rewarded_party desc limit $limit; """.as[(PartyId, BigDecimal)] storage.query(q, "getTopValidatorsByValidatorRewardsFromTxLog") @@ -1184,7 +1191,7 @@ class ScanAggregatorTest and entry_type = ${EntryType.ExtraTrafficPurchaseTxLogEntry} and round <= $asOfEndOfRound group by extra_traffic_validator - order by total_traffic_purchased desc + order by total_traffic_purchased desc, validator desc limit $limit; """.as[(PartyId, Long, Long, BigDecimal, Long)], "getTopValidatorsByPurchasedTrafficFromTxLog", diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellAppBootstrap.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellAppBootstrap.scala index 719130aef7..411d627d9f 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellAppBootstrap.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellAppBootstrap.scala @@ -108,7 +108,7 @@ object SplitwellAppBootstrap { testingConfigInternal, clock, splitwellMetrics, - new CommunityStorageFactory(splitwellConfig.storage), + new StorageSingleFactory(splitwellConfig.storage), loggerFactory, futureSupervisor, configuredOpenTelemetry, diff --git a/apps/sv/frontend/src/__tests__/governance/action-required-section.test.tsx b/apps/sv/frontend/src/__tests__/governance/action-required-section.test.tsx index f022a47a6d..94a71f6292 100644 --- a/apps/sv/frontend/src/__tests__/governance/action-required-section.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/action-required-section.test.tsx @@ -45,6 +45,16 @@ describe('Action Required', () => { expect(true).toBe(true); }); + test('should render no items message when no items available', () => { + render( + + + + ); + + expect(screen.getByText('No Action Required items available')).toBeDefined(); + }); + test('should render all action required requests', () => { render( diff --git a/apps/sv/frontend/src/__tests__/governance/forms/grant-revoke-featured-app-form.test.tsx b/apps/sv/frontend/src/__tests__/governance/forms/grant-revoke-featured-app-form.test.tsx index fdc6509931..043269217e 100644 --- a/apps/sv/frontend/src/__tests__/governance/forms/grant-revoke-featured-app-form.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/forms/grant-revoke-featured-app-form.test.tsx @@ -13,6 +13,7 @@ import dayjs from 'dayjs'; import { GrantRevokeFeaturedAppForm } from '../../../components/forms/GrantRevokeFeaturedAppForm'; import { server, svUrl } from '../../setup/setup'; import { rest } from 'msw'; +import { PROPOSAL_SUMMARY_SUBTITLE, PROPOSAL_SUMMARY_TITLE } from '../../../utils/constants'; describe('SV user can', () => { test('login and see the SV party ID', async () => { @@ -54,6 +55,10 @@ describe('Grant Featured App Form', () => { expect(summaryInput).toBeDefined(); expect(summaryInput.getAttribute('value')).toBeNull(); + const summarySubtitle = screen.getByTestId('grant-featured-app-summary-subtitle'); + expect(summarySubtitle).toBeDefined(); + expect(summarySubtitle.textContent).toBe(PROPOSAL_SUMMARY_SUBTITLE); + const urlInput = screen.getByTestId('grant-featured-app-url'); expect(urlInput).toBeDefined(); expect(urlInput.getAttribute('value')).toBe(''); @@ -198,7 +203,7 @@ describe('Grant Featured App Form', () => { await user.click(submitButton); - expect(screen.getByText('Proposal Summary')).toBeDefined(); + expect(screen.getByText(PROPOSAL_SUMMARY_TITLE)).toBeDefined(); }); }); @@ -361,7 +366,7 @@ describe('Revoke Featured App Form', () => { await user.click(submitButton); - expect(screen.getByText('Proposal Summary')).toBeDefined(); + expect(screen.getByText(PROPOSAL_SUMMARY_TITLE)).toBeDefined(); }); test('should show error on form if submission fails', async () => { diff --git a/apps/sv/frontend/src/__tests__/governance/forms/offboard-sv-form.test.tsx b/apps/sv/frontend/src/__tests__/governance/forms/offboard-sv-form.test.tsx index 6a53410eba..b105257e4f 100644 --- a/apps/sv/frontend/src/__tests__/governance/forms/offboard-sv-form.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/forms/offboard-sv-form.test.tsx @@ -13,6 +13,7 @@ import dayjs from 'dayjs'; import { OffboardSvForm } from '../../../components/forms/OffboardSvForm'; import { server, svUrl } from '../../setup/setup'; import { rest } from 'msw'; +import { PROPOSAL_SUMMARY_SUBTITLE, PROPOSAL_SUMMARY_TITLE } from '../../../utils/constants'; describe('SV user can', () => { test('login and see the SV party ID', async () => { @@ -54,6 +55,10 @@ describe('Offboard SV Form', () => { expect(summaryInput).toBeDefined(); expect(summaryInput.getAttribute('value')).toBeNull(); + const summarySubtitle = screen.getByTestId('offboard-sv-summary-subtitle'); + expect(summarySubtitle).toBeDefined(); + expect(summarySubtitle.textContent).toBe(PROPOSAL_SUMMARY_SUBTITLE); + const urlInput = screen.getByTestId('offboard-sv-url'); expect(urlInput).toBeDefined(); expect(urlInput.getAttribute('value')).toBe(''); @@ -228,7 +233,7 @@ describe('Offboard SV Form', () => { await user.click(submitButton); - expect(screen.getByText('Proposal Summary')).toBeDefined(); + expect(screen.getByText(PROPOSAL_SUMMARY_TITLE)).toBeDefined(); }); test('should show error on form if submission fails', async () => { diff --git a/apps/sv/frontend/src/__tests__/governance/forms/pending-fields.test.tsx b/apps/sv/frontend/src/__tests__/governance/forms/pending-fields.test.tsx index 13f27de238..f4e1df0ea3 100644 --- a/apps/sv/frontend/src/__tests__/governance/forms/pending-fields.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/forms/pending-fields.test.tsx @@ -14,6 +14,7 @@ import { Wrapper } from '../../helpers'; import { svPartyId } from '../../mocks/constants'; import { server, svUrl } from '../../setup/setup'; import dayjs from 'dayjs'; +import { SetAmuletConfigRulesForm } from '../../../components/forms/SetAmuletConfigRulesForm'; const today = dayjs(); const proposals: ListDsoRulesVoteRequestsResponse = { @@ -60,7 +61,7 @@ const proposals: ListDsoRulesVoteRequestsResponse = { ], }; -describe('Pending Fields', () => { +describe('DSO Pending Fields', () => { test('login and see the SV party ID', async () => { const user = userEvent.setup(); render( @@ -79,8 +80,10 @@ describe('Pending Fields', () => { expect(await screen.findAllByDisplayValue(svPartyId)).toBeDefined(); }); +}); - test('Pending confirmation fields should be disabled and pending info displayed', async () => { +describe('Pending Fields', () => { + test('DSO Pending fields should be disabled and pending info displayed', async () => { server.use( rest.get(`${svUrl}/v0/admin/sv/voterequests`, (_, res, ctx) => { return res(ctx.json(proposals)); @@ -115,7 +118,9 @@ describe('Pending Fields', () => { ); expect(acsPendingValueDisplay).toBeDefined(); expect(acsPendingValueDisplay).toHaveTextContent('Pending Configuration: 2100'); - expect(acsPendingValueDisplay).toHaveTextContent(/This proposal will go into effect in 4 days/); + expect(acsPendingValueDisplay).toHaveTextContent( + /This pending configuration will go into effect in 4 days/ + ); const trafficThresholdPendingFieldInput = await screen.findByTestId( 'config-field-numMemberTrafficContractsThreshold' @@ -128,7 +133,27 @@ describe('Pending Fields', () => { expect(trafficThresholdPendingValueDisplay).toBeDefined(); expect(trafficThresholdPendingValueDisplay).toHaveTextContent('Pending Configuration: 100'); expect(trafficThresholdPendingValueDisplay).toHaveTextContent( - /This proposal will go into effect at Threshold/ + /This pending configuration will go into effect at Threshold/ ); }); + + test('Amulet Pending fields validation', async () => { + render( + + + + ); + + await waitFor( + () => { + expect( + screen.queryByText('Some fields are disabled for editing due to pending votes.') + ).not.toBeNull(); + }, + { timeout: 5000 } + ); + + const pendingLabels = screen.queryAllByTestId(/^config-pending-value-/); + expect(pendingLabels.length).toBe(7); + }); }); diff --git a/apps/sv/frontend/src/__tests__/governance/forms/set-amulet-rules-form.test.tsx b/apps/sv/frontend/src/__tests__/governance/forms/set-amulet-rules-form.test.tsx index 868c16ff5e..7711a02644 100644 --- a/apps/sv/frontend/src/__tests__/governance/forms/set-amulet-rules-form.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/forms/set-amulet-rules-form.test.tsx @@ -4,6 +4,7 @@ import { render, screen, waitFor } from '@testing-library/react'; import { describe, expect, test } from 'vitest'; import userEvent from '@testing-library/user-event'; +import { rest } from 'msw'; import { SvConfigProvider } from '../../../utils'; import App from '../../../App'; import { svPartyId } from '../../mocks/constants'; @@ -11,6 +12,8 @@ import { Wrapper } from '../../helpers'; import { SetAmuletConfigRulesForm } from '../../../components/forms/SetAmuletConfigRulesForm'; import dayjs from 'dayjs'; import { dateTimeFormatISO } from '@lfdecentralizedtrust/splice-common-frontend-utils'; +import { server, svUrl } from '../../setup/setup'; +import { PROPOSAL_SUMMARY_SUBTITLE, PROPOSAL_SUMMARY_TITLE } from '../../../utils/constants'; describe('SV user can', () => { test('login and see the SV party ID', async () => { @@ -37,7 +40,7 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { test('should render all Set Amulet Config Rules Form components', () => { render( - Promise.resolve()} /> + ); @@ -52,6 +55,10 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { expect(summaryInput).toBeDefined(); expect(summaryInput.getAttribute('value')).toBeNull(); + const summarySubtitle = screen.getByTestId('set-amulet-config-rules-summary-subtitle'); + expect(summarySubtitle).toBeDefined(); + expect(summarySubtitle.textContent).toBe(PROPOSAL_SUMMARY_SUBTITLE); + const urlInput = screen.getByTestId('set-amulet-config-rules-url'); expect(urlInput).toBeDefined(); expect(urlInput.getAttribute('value')).toBe(''); @@ -74,47 +81,51 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { ); }); - test('should render errors when submit button is clicked on new form', async () => { - const user = userEvent.setup(); + test( + 'should render errors when submit button is clicked on new form', + async () => { + const user = userEvent.setup(); - render( - - Promise.resolve()} /> - - ); + render( + + + + ); - const actionInput = screen.getByTestId('set-amulet-config-rules-action'); - const submitButton = screen.getByTestId('submit-button'); - expect(submitButton).toBeDefined(); + const actionInput = screen.getByTestId('set-amulet-config-rules-action'); + const submitButton = screen.getByTestId('submit-button'); + expect(submitButton).toBeDefined(); - await user.click(submitButton); - expect(submitButton.getAttribute('disabled')).toBeDefined(); - await expect(async () => await user.click(submitButton)).rejects.toThrowError( - /Unable to perform pointer interaction/ - ); + await user.click(submitButton); + expect(submitButton.getAttribute('disabled')).toBeDefined(); + await expect(async () => await user.click(submitButton)).rejects.toThrowError( + /Unable to perform pointer interaction/ + ); - expect(screen.getByText('Summary is required')).toBeDefined(); - expect(screen.getByText('Invalid URL')).toBeDefined(); + expect(screen.getByText('Summary is required')).toBeDefined(); + expect(screen.getByText('Invalid URL')).toBeDefined(); - // completing the form should reenable the submit button - const summaryInput = screen.getByTestId('set-amulet-config-rules-summary'); - expect(summaryInput).toBeDefined(); - await user.type(summaryInput, 'Summary of the proposal'); + // completing the form should reenable the submit button + const summaryInput = screen.getByTestId('set-amulet-config-rules-summary'); + expect(summaryInput).toBeDefined(); + await user.type(summaryInput, 'Summary of the proposal'); - const urlInput = screen.getByTestId('set-amulet-config-rules-url'); - expect(urlInput).toBeDefined(); - await user.type(urlInput, 'https://example.com'); + const urlInput = screen.getByTestId('set-amulet-config-rules-url'); + expect(urlInput).toBeDefined(); + await user.type(urlInput, 'https://example.com'); - await user.click(actionInput); // using this to trigger the onBlur event which triggers the validation + await user.click(actionInput); // using this to trigger the onBlur event which triggers the validation - expect(submitButton.getAttribute('disabled')).toBeNull(); - }); + expect(submitButton.getAttribute('disabled')).toBeNull(); + }, + { timeout: 10000 } + ); test('expiry date must be in the future', async () => { const user = userEvent.setup(); render( - Promise.resolve()} /> + ); @@ -142,7 +153,7 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { render( - Promise.resolve()} /> + ); @@ -173,7 +184,7 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { render( - Promise.resolve()} /> + ); @@ -198,7 +209,7 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { render( - Promise.resolve()} /> + ); @@ -226,6 +237,126 @@ describe('Set Amulet Config Rules Form', { timeout: 5000 }, () => { await user.click(submitButton); - expect(screen.getByText('Proposal Summary')).toBeDefined(); + expect(screen.getByText(PROPOSAL_SUMMARY_TITLE)).toBeDefined(); + }); + + test('should show error on form if submission fails', { timeout: 10000 }, async () => { + server.use( + rest.post(`${svUrl}/v0/admin/sv/voterequest/create`, (_, res, ctx) => { + return res(ctx.status(503), ctx.json({ error: 'Service Unavailable' })); + }) + ); + + const user = userEvent.setup(); + + render( + + + + ); + + const summaryInput = screen.getByTestId('set-amulet-config-rules-summary'); + await user.type(summaryInput, 'Summary of the proposal'); + + const urlInput = screen.getByTestId('set-amulet-config-rules-url'); + await user.type(urlInput, 'https://example.com'); + + const c1Input = screen.getByTestId('config-field-transferPreapprovalFee'); + await user.type(c1Input, '99'); + + const c2Input = screen.getByTestId('config-field-transferConfigTransferFeeInitialRate'); + await user.type(c2Input, '9.99'); + + const submitButton = screen.getByTestId('submit-button'); + await waitFor(async () => { + expect(submitButton.getAttribute('disabled')).toBeNull(); + }); + + await user.click(submitButton); // Review proposal + await user.click(submitButton); // Submit proposal + + expect(screen.getByTestId('proposal-submission-error')).toBeDefined(); + expect(screen.getByText(/Submission failed/)).toBeDefined(); + expect(screen.getByText(/Service Unavailable/)).toBeDefined(); + }); + + test('should redirect to governance page after successful submission', async () => { + server.use( + rest.post(`${svUrl}/v0/admin/sv/voterequest/create`, (_, res, ctx) => { + return res(ctx.json({})); + }) + ); + + const user = userEvent.setup(); + + render( + + + + ); + + const summaryInput = screen.getByTestId('set-amulet-config-rules-summary'); + await user.type(summaryInput, 'Summary of the proposal'); + + const urlInput = screen.getByTestId('set-amulet-config-rules-url'); + await user.type(urlInput, 'https://example.com'); + + const c1Input = screen.getByTestId('config-field-transferPreapprovalFee'); + await user.type(c1Input, '99'); + + const c2Input = screen.getByTestId('config-field-transferConfigTransferFeeInitialRate'); + await user.type(c2Input, '9.99'); + + const submitButton = screen.getByTestId('submit-button'); + await waitFor(async () => { + expect(submitButton.getAttribute('disabled')).toBeNull(); + }); + + await user.click(submitButton); // Review proposal + await user.click(submitButton); // Submit proposal + + await waitFor(() => { + expect(screen.queryByText('Action Required')).toBeDefined(); + expect(screen.queryByText('Inflight Votes')).toBeDefined(); + expect(screen.queryByText('Vote History')).toBeDefined(); + expect(screen.queryByText('Successfully submitted the proposal')).toBeDefined(); + }); + }); + + test('should render diffs if changes to config values were made', async () => { + const user = userEvent.setup(); + + render( + + + + ); + + const summaryInput = screen.getByTestId('set-amulet-config-rules-summary'); + await user.type(summaryInput, 'Summary of the proposal'); + + const urlInput = screen.getByTestId('set-amulet-config-rules-url'); + await user.type(urlInput, 'https://example.com'); + + const c1Input = screen.getByTestId('config-field-transferPreapprovalFee'); + await user.type(c1Input, '99'); + + const c2Input = screen.getByTestId('config-field-transferConfigTransferFeeInitialRate'); + await user.type(c2Input, '9.99'); + + const jsonDiffs = screen.getByText('JSON Diffs'); + expect(jsonDiffs).toBeDefined(); + + await user.click(jsonDiffs); + expect(screen.queryByTestId('config-diffs-display')).toBeDefined(); + + const reviewButton = screen.getByTestId('submit-button'); + await waitFor(async () => { + expect(reviewButton.getAttribute('disabled')).toBeNull(); + }); + + expect(jsonDiffs).toBeDefined(); + await user.click(jsonDiffs); + expect(screen.queryByTestId('config-diffs-display')).toBeDefined(); }); }); diff --git a/apps/sv/frontend/src/__tests__/governance/forms/update-sv-reward-weight-form-test.test.tsx b/apps/sv/frontend/src/__tests__/governance/forms/update-sv-reward-weight-form-test.test.tsx index f21bbdb659..7e19dee85d 100644 --- a/apps/sv/frontend/src/__tests__/governance/forms/update-sv-reward-weight-form-test.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/forms/update-sv-reward-weight-form-test.test.tsx @@ -13,6 +13,7 @@ import { dateTimeFormatISO } from '@lfdecentralizedtrust/splice-common-frontend- import dayjs from 'dayjs'; import { server, svUrl } from '../../setup/setup'; import { rest } from 'msw'; +import { PROPOSAL_SUMMARY_SUBTITLE } from '../../../utils/constants'; describe('SV user can', () => { test('login and see the SV party ID', async () => { @@ -54,6 +55,10 @@ describe('Update SV Reward Weight Form', () => { expect(summaryInput).toBeDefined(); expect(summaryInput.getAttribute('value')).toBeNull(); + const summarySubtitle = screen.getByTestId('update-sv-reward-weight-summary-subtitle'); + expect(summarySubtitle).toBeDefined(); + expect(summarySubtitle.textContent).toBe(PROPOSAL_SUMMARY_SUBTITLE); + const urlInput = screen.getByTestId('update-sv-reward-weight-url'); expect(urlInput).toBeDefined(); expect(urlInput.getAttribute('value')).toBe(''); @@ -308,6 +313,54 @@ describe('Update SV Reward Weight Form', () => { expect(screen.getByText(/Service Unavailable/)).toBeDefined(); }); + test('show the correct weights for selected sv in summary page', async () => { + const user = userEvent.setup(); + + render( + + + + ); + + const actionInput = screen.getByTestId('update-sv-reward-weight-action'); + const submitButton = screen.getByTestId('submit-button'); + + const summaryInput = screen.getByTestId('update-sv-reward-weight-summary'); + await user.type(summaryInput, 'Summary of the proposal'); + + const urlInput = screen.getByTestId('update-sv-reward-weight-url'); + expect(urlInput).toBeDefined(); + await user.type(urlInput, 'https://example.com'); + + const memberDropdown = screen.getByTestId('update-sv-reward-weight-member-dropdown'); + expect(memberDropdown).toBeDefined(); + + const selectInput = screen.getByRole('combobox'); + fireEvent.mouseDown(selectInput); + + await waitFor(async () => { + const memberToSelect = screen.getByText('Digital-Asset-Eng-2'); + expect(memberToSelect).toBeDefined(); + await user.click(memberToSelect); + }); + + const weightInput = screen.getByTestId('update-sv-reward-weight-weight'); + expect(weightInput).toBeDefined(); + await user.type(weightInput, '1000'); + + await user.click(actionInput); // using this to trigger the onBlur event which triggers the validation + + await waitFor(async () => { + expect(submitButton.getAttribute('disabled')).toBeNull(); + }); + await user.click(submitButton); //review proposal + + waitFor(() => { + expect(screen.getByTestId('config-change-current-value').textContent).toBe('12345'); + expect(screen.getByTestId('config-change-new-value').textContent).toBe('1000'); + }); + }); + test('should redirect to governance page after successful submission', async () => { server.use( rest.post(`${svUrl}/v0/admin/sv/voterequest/create`, (_, res, ctx) => { diff --git a/apps/sv/frontend/src/__tests__/governance/proposal-details-content.test.tsx b/apps/sv/frontend/src/__tests__/governance/proposal-details-content.test.tsx index e2249b19e2..3564a928d2 100644 --- a/apps/sv/frontend/src/__tests__/governance/proposal-details-content.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/proposal-details-content.test.tsx @@ -39,7 +39,7 @@ const voteRequest = { votingInformation: { requester: 'sv1', requesterIsYou: true, - votingCloses: '2029-01-01 13:00', + votingThresholdDeadline: '2029-01-01 13:00', voteTakesEffect: '2029-01-02 13:00', status: 'Accepted', }, @@ -80,7 +80,7 @@ const voteResult = { votingInformation: { requester: 'sv1', requesterIsYou: true, - votingCloses: '2024-02-01 13:00', + votingThresholdDeadline: '2024-02-01 13:00', voteTakesEffect: '2024-02-02 13:00', status: 'Accepted', }, @@ -618,12 +618,41 @@ describe('Proposal Details > Votes & Voting', () => { expect(noVoteContent.every(v => v === 'Awaiting Response')).toBe(true); }); - test('should render no-vote status badge when voting has closed', async () => { + test('show Awaiting Response status when voting threshold has passed but proposal is not effective', async () => { const user = userEvent.setup(); const votingInformation = { requester: 'sv1', requesterIsYou: true, - votingCloses: '2024-01-01 13:00', + votingThresholdDeadline: '2025-01-01 13:00', + voteTakesEffect: '2029-01-02 13:00', + status: 'In Progress', + } as ProposalVotingInformation; + + render( + + + + ); + + const noVoteVotesTab = screen.getByTestId('no-vote-votes-tab'); + + await user.click(noVoteVotesTab); + const noVoteContent = screen.getByTestId('proposal-details-vote-status-value').textContent; + expect(noVoteContent).toBe('Awaiting Response'); + }); + + test('show Awaiting Response status when voting threshold has not been reached', async () => { + const user = userEvent.setup(); + const votingInformation = { + requester: 'sv1', + requesterIsYou: true, + votingThresholdDeadline: '2024-01-01 13:00', voteTakesEffect: '2029-01-02 13:00', status: 'Accepted', } as ProposalVotingInformation; @@ -643,16 +672,15 @@ describe('Proposal Details > Votes & Voting', () => { const noVoteVotesTab = screen.getByTestId('no-vote-votes-tab'); await user.click(noVoteVotesTab); - const noVoteVotes = screen.getAllByTestId('proposal-details-vote-status-value'); - const noVoteContent = noVoteVotes.map(v => v.textContent); - expect(noVoteContent.every(v => v === 'No Vote')).toBe(true); + const noVoteContent = screen.getByTestId('proposal-details-vote-status-value').textContent; + expect(noVoteContent).toBe('Awaiting Response'); }); test('renders correctly when vote takes effect is threshold', () => { const votingInformation = { requester: 'sv1', requesterIsYou: true, - votingCloses: '2029-01-01 13:00', + votingThresholdDeadline: '2029-01-01 13:00', voteTakesEffect: 'Threshold', status: 'In Progress', } as ProposalVotingInformation; @@ -711,30 +739,6 @@ describe('Proposal Details > Votes & Voting', () => { expect(votingFormReject).toBeDefined(); }); - test('should not render voting form for vote request when voting has closed', () => { - const votingInformation = { - requester: 'sv1', - requesterIsYou: true, - votingCloses: '2024-01-01 13:00', - voteTakesEffect: '2029-01-02 13:00', - status: 'In Progress', - } as ProposalVotingInformation; - - render( - - - - ); - - expect(() => screen.getByTestId('your-vote-form')).toThrowError(/Unable to find an element/); - }); - test('should not render voting form for vote result', () => { render( diff --git a/apps/sv/frontend/src/__tests__/governance/proposal-summary.test.tsx b/apps/sv/frontend/src/__tests__/governance/proposal-summary.test.tsx index 78e7f1b885..08cbed8233 100644 --- a/apps/sv/frontend/src/__tests__/governance/proposal-summary.test.tsx +++ b/apps/sv/frontend/src/__tests__/governance/proposal-summary.test.tsx @@ -38,7 +38,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); @@ -102,7 +102,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); @@ -140,7 +140,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); @@ -177,7 +177,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); @@ -232,7 +232,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); @@ -304,7 +304,7 @@ describe('Review Proposal Component', () => { expect(screen.getByTestId('summary-title').textContent).toBe('Summary'); expect(screen.getByTestId('summary-field').textContent).toBe(summary); - expect(screen.getByTestId('expiryDate-title').textContent).toBe('Expiry Date'); + expect(screen.getByTestId('expiryDate-title').textContent).toBe('Threshold Deadline'); expect(screen.getByTestId('expiryDate-field').textContent).toBe(expiryDate); expect(screen.getByTestId('effectiveDate-title').textContent).toBe('Effective Date'); diff --git a/apps/sv/frontend/src/__tests__/sv.test.tsx b/apps/sv/frontend/src/__tests__/sv.test.tsx index a11233519c..12075345ef 100644 --- a/apps/sv/frontend/src/__tests__/sv.test.tsx +++ b/apps/sv/frontend/src/__tests__/sv.test.tsx @@ -12,10 +12,14 @@ import { test, expect, describe } from 'vitest'; import App from '../App'; import { SvConfigProvider } from '../utils'; +import { onboardingInfo } from '../components/ValidatorOnboardingSecrets'; import { svPartyId, voteRequests } from './mocks/constants'; import { server, svUrl } from './setup/setup'; import { changeAction } from './helpers'; -import { dateTimeFormatISO } from '@lfdecentralizedtrust/splice-common-frontend-utils'; +import { + dateTimeFormatISO, + getUTCWithOffset, +} from '@lfdecentralizedtrust/splice-common-frontend-utils'; import dayjs from 'dayjs'; import { dsoInfo } from '@lfdecentralizedtrust/splice-common-test-handlers'; @@ -50,6 +54,64 @@ describe('SV user can', () => { await screen.findByText('You are on ScratchNet'); }); + test('browse to the validator onboarding tab', async () => { + const user = userEvent.setup(); + render(); + + expect(await screen.findByText('Validator Onboarding')).toBeDefined(); + await user.click(screen.getByText('Validator Onboarding')); + + expect(await screen.findByText('Validator Onboarding Secrets')).toBeDefined(); + }); + + test('create a new validator secret with party hint', async () => { + const user = userEvent.setup(); + render(); + + expect(await screen.findByText('Validator Onboarding')).toBeDefined(); + await user.click(screen.getByText('Validator Onboarding')); + + const partyHintInput = screen.getByTestId('create-party-hint'); + await user.type(partyHintInput, 'wrong-input'); + + expect(screen.getByTestId('create-validator-onboarding-secret').hasAttribute('disabled')).toBe( + true + ); + + await user.clear(partyHintInput); + await user.type(partyHintInput, 'correct-input-123'); + + expect(screen.getByTestId('create-validator-onboarding-secret').hasAttribute('disabled')).toBe( + false + ); + }); + + test('validator onboarding info has correct format', () => { + const validatorOnboardingInfo = onboardingInfo( + { + partyHint: 'splice-client-2', + secret: 'exampleSecret', + expiresAt: '2020-01-01 13:57', + }, + 'testnet' + ); + + expect(validatorOnboardingInfo).toBe( + ` +splice-client-2 +Network: testnet +SPONSOR_SV_URL +http://localhost:3000 + +Secret +exampleSecret + +Expiration +2020-01-01 13:57 (${getUTCWithOffset()}) + `.trim() + ); + }); + test('browse to the governance tab', async () => { const user = userEvent.setup(); render(); diff --git a/apps/sv/frontend/src/__tests__/utils/buildAmuletRulesConfigFromChanges.test.tsx b/apps/sv/frontend/src/__tests__/utils/buildAmuletRulesConfigFromChanges.test.tsx new file mode 100644 index 0000000000..3fb6066979 --- /dev/null +++ b/apps/sv/frontend/src/__tests__/utils/buildAmuletRulesConfigFromChanges.test.tsx @@ -0,0 +1,372 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, test } from 'vitest'; +import { ConfigChange } from '../../utils/types'; +import { buildAmuletRulesConfigFromChanges } from '../../utils/buildAmuletRulesConfigFromChanges'; + +describe('buildAmuletRulesConfigFromChanges', () => { + test('should build AmuletConfig with provided changes', () => { + const changes: ConfigChange[] = [ + { + fieldName: 'tickDuration', + label: 'Tick Duration', + currentValue: '1000', + newValue: '2000', + }, + { + fieldName: 'transferPreapprovalFee', + label: 'Transfer Preapproval Fee', + currentValue: '0.1', + newValue: '0.2', + }, + { + fieldName: 'featuredAppActivityMarkerAmount', + label: 'Featured App Activity Marker Amount', + currentValue: '100', + newValue: '200', + }, + { + fieldName: 'transferConfigCreateFee', + label: 'Transfer Config Create Fee', + currentValue: '0.5', + newValue: '1.0', + }, + { + fieldName: 'transferConfigHoldingFeeRate', + label: 'Transfer Config Holding Fee Rate', + currentValue: '0.01', + newValue: '0.02', + }, + { + fieldName: 'transferConfigTransferFeeInitialRate', + label: 'Transfer Fee Initial Rate', + currentValue: '0.001', + newValue: '0.002', + }, + { + fieldName: 'transferFeeSteps1_1', + label: 'Transfer Fee Step 1 Amount', + currentValue: '100', + newValue: '200', + }, + { + fieldName: 'transferFeeSteps1_2', + label: 'Transfer Fee Step 1 Rate', + currentValue: '0.001', + newValue: '0.002', + }, + { + fieldName: 'transferConfigLockHolderFee', + label: 'Lock Holder Fee', + currentValue: '0.1', + newValue: '0.2', + }, + { + fieldName: 'transferConfigExtraFeaturedAppRewardAmount', + label: 'Extra Featured App Reward Amount', + currentValue: '50', + newValue: '100', + }, + { + fieldName: 'transferConfigMaxNumInputs', + label: 'Max Num Inputs', + currentValue: '10', + newValue: '20', + }, + { + fieldName: 'transferConfigMaxNumOutputs', + label: 'Max Num Outputs', + currentValue: '10', + newValue: '20', + }, + { + fieldName: 'transferConfigMaxNumLockHolders', + label: 'Max Num Lock Holders', + currentValue: '5', + newValue: '10', + }, + { + fieldName: 'issuanceCurveInitialValueAmuletToIssuePerYear', + label: 'Amulet To Issue Per Year', + currentValue: '1000000', + newValue: '2000000', + }, + { + fieldName: 'issuanceCurveInitialValueValidatorRewardPercentage', + label: 'Validator Reward Percentage', + currentValue: '0.4', + newValue: '0.5', + }, + { + fieldName: 'issuanceCurveInitialValueAppRewardPercentage', + label: 'App Reward Percentage', + currentValue: '0.3', + newValue: '0.35', + }, + { + fieldName: 'issuanceCurveInitialValueValidatorRewardCap', + label: 'Validator Reward Cap', + currentValue: '1000', + newValue: '2000', + }, + { + fieldName: 'issuanceCurveInitialValueFeaturedAppRewardCap', + label: 'Featured App Reward Cap', + currentValue: '500', + newValue: '1000', + }, + { + fieldName: 'issuanceCurveInitialValueUnfeaturedAppRewardCap', + label: 'Unfeatured App Reward Cap', + currentValue: '100', + newValue: '200', + }, + { + fieldName: 'issuanceCurveInitialValueOptValidatorFaucetCap', + label: 'Opt Validator Faucet Cap', + currentValue: '50', + newValue: '100', + }, + { + fieldName: 'decentralizedSynchronizerActiveSynchronizer', + label: 'Active Synchronizer', + currentValue: 'sync1', + newValue: 'sync2', + }, + { + fieldName: 'decentralizedSynchronizerRequiredSynchronizers1', + label: 'Required Synchronizer 1', + currentValue: 'sync1', + newValue: 'sync1', + }, + { + fieldName: 'decentralizedSynchronizerRequiredSynchronizers2', + label: 'Required Synchronizer 2', + currentValue: 'sync2', + newValue: 'sync2', + }, + { + fieldName: 'decentralizedSynchronizerFeesBaseRateTrafficLimitsBurstAmount', + label: 'Burst Amount', + currentValue: '1000', + newValue: '2000', + }, + { + fieldName: 'decentralizedSynchronizerFeesBaseRateTrafficLimitsBurstWindow', + label: 'Burst Window', + currentValue: '60000000', + newValue: '120000000', + }, + { + fieldName: 'decentralizedSynchronizerFeesExtraTrafficPrice', + label: 'Extra Traffic Price', + currentValue: '0.1', + newValue: '0.2', + }, + { + fieldName: 'decentralizedSynchronizerFeesReadVsWriteScalingFactor', + label: 'Read Vs Write Scaling Factor', + currentValue: '1.5', + newValue: '2.0', + }, + { + fieldName: 'decentralizedSynchronizerFeesMinTopupAmount', + label: 'Min Topup Amount', + currentValue: '10', + newValue: '20', + }, + { + fieldName: 'packageConfigAmulet', + label: 'Amulet Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + { + fieldName: 'packageConfigAmuletNameService', + label: 'Amulet Name Service Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + { + fieldName: 'packageConfigDsoGovernance', + label: 'DSO Governance Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + { + fieldName: 'packageConfigValidatorLifecycle', + label: 'Validator Lifecycle Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + { + fieldName: 'packageConfigWallet', + label: 'Wallet Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + { + fieldName: 'packageConfigWalletPayments', + label: 'Wallet Payments Package', + currentValue: '0.1.1', + newValue: '0.2.0', + }, + ]; + + const result = buildAmuletRulesConfigFromChanges(changes); + + expect(result.tickDuration.microseconds).toBe('2000'); + expect(result.transferPreapprovalFee).toBe('0.2'); + expect(result.featuredAppActivityMarkerAmount).toBe('200'); + + expect(result.transferConfig.createFee.fee).toBe('1.0'); + expect(result.transferConfig.holdingFee).toEqual({ rate: '0.02' }); + expect(result.transferConfig.transferFee.initialRate).toBe('0.002'); + expect(result.transferConfig.transferFee.steps).toEqual([{ _1: '200', _2: '0.002' }]); + expect(result.transferConfig.lockHolderFee.fee).toBe('0.2'); + expect(result.transferConfig.extraFeaturedAppRewardAmount).toBe('100'); + expect(result.transferConfig.maxNumInputs).toBe('20'); + expect(result.transferConfig.maxNumOutputs).toBe('20'); + expect(result.transferConfig.maxNumLockHolders).toBe('10'); + + expect(result.issuanceCurve.initialValue.amuletToIssuePerYear).toBe('2000000'); + expect(result.issuanceCurve.initialValue.validatorRewardPercentage).toBe('0.5'); + expect(result.issuanceCurve.initialValue.appRewardPercentage).toBe('0.35'); + expect(result.issuanceCurve.initialValue.validatorRewardCap).toBe('2000'); + expect(result.issuanceCurve.initialValue.featuredAppRewardCap).toBe('1000'); + expect(result.issuanceCurve.initialValue.unfeaturedAppRewardCap).toBe('200'); + expect(result.issuanceCurve.initialValue.optValidatorFaucetCap).toBe('100'); + + expect(result.decentralizedSynchronizer.activeSynchronizer).toBe('sync2'); + const expectedRequiredSynchronizers = Array.from( + result.decentralizedSynchronizer.requiredSynchronizers.map.entriesArray().map(e => e[0]) + ).sort(); + expect(expectedRequiredSynchronizers).toEqual(['sync1', 'sync2']); + expect(result.decentralizedSynchronizer.fees.baseRateTrafficLimits.burstAmount).toBe('2000'); + expect( + result.decentralizedSynchronizer.fees.baseRateTrafficLimits.burstWindow.microseconds + ).toBe('120000000'); + expect(result.decentralizedSynchronizer.fees.extraTrafficPrice).toBe('0.2'); + expect(result.decentralizedSynchronizer.fees.readVsWriteScalingFactor).toBe('2.0'); + expect(result.decentralizedSynchronizer.fees.minTopupAmount).toBe('20'); + + expect(result.packageConfig.amulet).toBe('0.2.0'); + expect(result.packageConfig.amuletNameService).toBe('0.2.0'); + expect(result.packageConfig.dsoGovernance).toBe('0.2.0'); + expect(result.packageConfig.validatorLifecycle).toBe('0.2.0'); + expect(result.packageConfig.wallet).toBe('0.2.0'); + expect(result.packageConfig.walletPayments).toBe('0.2.0'); + }); + + test('should handle multiple transfer fee steps', () => { + const changes: ConfigChange[] = [ + { + fieldName: 'transferConfigTransferFeeInitialRate', + label: 'Transfer Fee Initial Rate', + currentValue: '0.001', + newValue: '0.002', + }, + { + fieldName: 'transferFeeSteps1_1', + label: 'Transfer Fee Step 1 Amount', + currentValue: '100', + newValue: '200', + }, + { + fieldName: 'transferFeeSteps1_2', + label: 'Transfer Fee Step 1 Rate', + currentValue: '0.001', + newValue: '0.002', + }, + { + fieldName: 'transferFeeSteps2_1', + label: 'Transfer Fee Step 2 Amount', + currentValue: '500', + newValue: '1000', + }, + { + fieldName: 'transferFeeSteps2_2', + label: 'Transfer Fee Step 2 Rate', + currentValue: '0.0005', + newValue: '0.001', + }, + ]; + + const result = buildAmuletRulesConfigFromChanges(changes); + + expect(result.transferConfig.transferFee.steps).toEqual([ + { _1: '200', _2: '0.002' }, + { _1: '1000', _2: '0.001' }, + ]); + }); + + test('should handle issuance curve future values', () => { + const changes: ConfigChange[] = [ + { + fieldName: 'issuanceCurveFutureValues0', + label: 'Future Value 0 Time', + currentValue: '1000000', + newValue: '2000000', + }, + { + fieldName: 'issuanceCurveFutureValues0AmuletToIssuePerYear', + label: 'Future Value 0 Amulet To Issue Per Year', + currentValue: '1000000', + newValue: '2000000', + }, + { + fieldName: 'issuanceCurveFutureValues0ValidatorRewardPercentage', + label: 'Future Value 0 Validator Reward Percentage', + currentValue: '0.4', + newValue: '0.5', + }, + { + fieldName: 'issuanceCurveFutureValues0AppRewardPercentage', + label: 'Future Value 0 App Reward Percentage', + currentValue: '0.3', + newValue: '0.35', + }, + { + fieldName: 'issuanceCurveFutureValues0ValidatorRewardCap', + label: 'Future Value 0 Validator Reward Cap', + currentValue: '1000', + newValue: '2000', + }, + { + fieldName: 'issuanceCurveFutureValues0FeaturedAppRewardCap', + label: 'Future Value 0 Featured App Reward Cap', + currentValue: '500', + newValue: '1000', + }, + { + fieldName: 'issuanceCurveFutureValues0UnfeaturedAppRewardCap', + label: 'Future Value 0 Unfeatured App Reward Cap', + currentValue: '100', + newValue: '200', + }, + { + fieldName: 'issuanceCurveFutureValues0OptValidatorFaucetCap', + label: 'Future Value 0 Opt Validator Faucet Cap', + currentValue: '50', + newValue: '100', + }, + ]; + + const result = buildAmuletRulesConfigFromChanges(changes); + + expect(result.issuanceCurve.futureValues.length).toBe(1); + expect(result.issuanceCurve.futureValues[0]).toEqual({ + _1: { microseconds: '2000000' }, + _2: { + amuletToIssuePerYear: '2000000', + validatorRewardPercentage: '0.5', + appRewardPercentage: '0.35', + validatorRewardCap: '2000', + featuredAppRewardCap: '1000', + unfeaturedAppRewardCap: '200', + optValidatorFaucetCap: '100', + }, + }); + }); +}); diff --git a/apps/sv/frontend/src/components/ValidatorOnboardingSecrets.tsx b/apps/sv/frontend/src/components/ValidatorOnboardingSecrets.tsx index 7eaa5e21ff..ffc121bfbe 100644 --- a/apps/sv/frontend/src/components/ValidatorOnboardingSecrets.tsx +++ b/apps/sv/frontend/src/components/ValidatorOnboardingSecrets.tsx @@ -5,12 +5,21 @@ import { DisableConditionally, Loading, SvClientProvider, - CopyableTypography, } from '@lfdecentralizedtrust/splice-common-frontend'; +import ContentCopyIcon from '@mui/icons-material/ContentCopy'; import { useMutation } from '@tanstack/react-query'; -import React from 'react'; +import React, { useState, useCallback } from 'react'; -import { Button, Stack, Table, TableContainer, TableHead, Typography } from '@mui/material'; +import { + Button, + IconButton, + Stack, + Table, + TableContainer, + TableHead, + TextField, + Typography, +} from '@mui/material'; import TableBody from '@mui/material/TableBody'; import TableCell from '@mui/material/TableCell'; import TableRow from '@mui/material/TableRow'; @@ -18,15 +27,25 @@ import TableRow from '@mui/material/TableRow'; import { useSvAdminClient } from '../contexts/SvAdminServiceContext'; import { useValidatorOnboardings } from '../hooks/useValidatorOnboardings'; import { useSvConfig } from '../utils'; +import { useNetworkInstanceName } from '../hooks'; +import dayjs from 'dayjs'; +import { + dateTimeFormatISO, + getUTCWithOffset, +} from '@lfdecentralizedtrust/splice-common-frontend-utils'; + +const VALID_PARTY_ID_REGEX = /^[^-]+-[^-]+-\d+$/; const ValidatorOnboardingSecrets: React.FC = () => { const ONBOARDING_SECRET_EXPIRY_IN_SECOND = 172800; // We allow validator to be onboarded in 48 hours const { prepareValidatorOnboarding } = useSvAdminClient(); const validatorOnboardingsQuery = useValidatorOnboardings(); + const [partyHint, setPartyHint] = useState(''); + const prepareOnboardingMutation = useMutation({ - mutationFn: () => { - return prepareValidatorOnboarding(ONBOARDING_SECRET_EXPIRY_IN_SECOND); + mutationFn: (partyHint: string) => { + return prepareValidatorOnboarding(ONBOARDING_SECRET_EXPIRY_IN_SECOND, partyHint); }, }); @@ -38,12 +57,11 @@ const ValidatorOnboardingSecrets: React.FC = () => { return

Error, something went wrong while fetching onboarding secrets.

; } - const validatorOnboardings = validatorOnboardingsQuery.data.sort((a, b) => { - return ( + const validatorOnboardings = validatorOnboardingsQuery.data.toSorted( + (a, b) => new Date(b.contract.payload.expiresAt).valueOf() - new Date(a.contract.payload.expiresAt).valueOf() - ); - }); + ); return ( @@ -57,38 +75,69 @@ const ValidatorOnboardingSecrets: React.FC = () => { )} + + Party Hint + setPartyHint(e.target.value)} + value={partyHint} + /> + + - - - - Expires At +
+ + + Party Hint Onboarding Secret + Expires At + {/* Copy icon column */} - - {validatorOnboardings.map(onboarding => { - return ( - - ); - })} + + {validatorOnboardings.map(onboarding => ( + + ))}
@@ -96,19 +145,75 @@ const ValidatorOnboardingSecrets: React.FC = () => { ); }; +export const onboardingInfo = ( + { partyHint, secret, expiresAt }: OnboardingRowProps, + instanceName?: string +): string => { + let info = ''; + + if (partyHint) { + info += `${partyHint}\n`; + } + + info += `Network: ${instanceName ?? 'localnet'}\n`; + + info += 'SPONSOR_SV_URL\n'; + info += window.location.origin; + info += '\n\n'; + + info += 'Secret\n'; + info += secret; + info += '\n\n'; + + info += 'Expiration\n'; + info += `${dayjs(expiresAt).format(dateTimeFormatISO)} (${getUTCWithOffset()})`; + + return info; +}; + interface OnboardingRowProps { - expiresAt: string; + partyHint?: string; secret: string; + expiresAt: string; } -const OnboardingRow: React.FC = ({ expiresAt, secret }) => { +const OnboardingRow: React.FC = props => { + const networkInstanceName = useNetworkInstanceName(); + + const copySecret = useCallback(() => { + navigator.clipboard.writeText(props.secret); + }, [props]); + + const copyOnboardingInfo = useCallback(() => { + navigator.clipboard.writeText(onboardingInfo(props, networkInstanceName)); + }, [props, networkInstanceName]); + return ( - - - + + + {props.partyHint ?? '---'} + + + + {props.secret} + + + + + + + - - + + + + ); diff --git a/apps/sv/frontend/src/components/amuletprice/MedianAmuletPrice.tsx b/apps/sv/frontend/src/components/amuletprice/MedianAmuletPrice.tsx index 4b75e83cf6..3888a90ee0 100644 --- a/apps/sv/frontend/src/components/amuletprice/MedianAmuletPrice.tsx +++ b/apps/sv/frontend/src/components/amuletprice/MedianAmuletPrice.tsx @@ -1,33 +1,27 @@ // Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -import { AmountDisplay, Loading } from '@lfdecentralizedtrust/splice-common-frontend'; + +import { + AmountDisplay, + Loading, + medianPriceVotes, + useVotesHooks, +} from '@lfdecentralizedtrust/splice-common-frontend'; import BigNumber from 'bignumber.js'; import React, { useMemo } from 'react'; import { Stack } from '@mui/material'; import Typography from '@mui/material/Typography'; -import { useAmuletPriceVotes } from '../../hooks/useAmuletPriceVotes'; -import { useSvConfig } from '../../utils'; +interface MedianAmuletPriceProps { + amuletName: string; +} -const MedianAmuletPrice: React.FC = () => { - const config = useSvConfig(); - const amuletPriceVotesQuery = useAmuletPriceVotes(); - const amuletName = config.spliceInstanceNames.amuletName; +export const MedianAmuletPrice: React.FC = props => { + const { amuletName } = props; - const median = (votedPrices: BigNumber[]) => { - if (votedPrices && votedPrices.length > 0) { - const sorted = [...votedPrices].sort((a, b) => { - return a.isEqualTo(b) ? 0 : a.isLessThan(b) ? -1 : 1; - }); - const length = sorted.length; - const half = Math.floor(length / 2); - return length % 2 !== 0 - ? sorted[half] - : sorted[half - 1].plus(sorted[half]).multipliedBy(0.5); - } - return undefined; - }; + const voteHooks = useVotesHooks(); + const amuletPriceVotesQuery = voteHooks.useAmuletPriceVotes(); const amuletPrices = useMemo( () => @@ -38,7 +32,7 @@ const MedianAmuletPrice: React.FC = () => { ); const medianAmuletPrice = useMemo( - () => (amuletPrices ? median(amuletPrices) : undefined), + () => (amuletPrices ? medianPriceVotes(amuletPrices) : undefined), [amuletPrices] ); @@ -64,5 +58,3 @@ const MedianAmuletPrice: React.FC = () => {
); }; - -export default MedianAmuletPrice; diff --git a/apps/sv/frontend/src/components/form-components/ConfigField.tsx b/apps/sv/frontend/src/components/form-components/ConfigField.tsx index 6c71ba08ba..a585fc7980 100644 --- a/apps/sv/frontend/src/components/form-components/ConfigField.tsx +++ b/apps/sv/frontend/src/components/form-components/ConfigField.tsx @@ -24,6 +24,28 @@ export type ConfigFieldState = { export const ConfigField: React.FC = props => { const { configChange, effectiveDate, pendingFieldInfo } = props; const field = useFieldContext(); + + const isSynchronizerUpgradeTime = + field.state.value?.fieldName === 'nextScheduledSynchronizerUpgradeTime'; + const isSynchronizerUpgradeMigrationId = + field.state.value?.fieldName === 'nextScheduledSynchronizerUpgradeMigrationId'; + + // We disable the field if it is pending and the value is the default value. + // The default value check is to handle the case where the user made a change + // to the field before it became a field with pending changes. + // This gives them the chance to revert that change. + const isPendingAndDefaultValue = + pendingFieldInfo !== undefined && field.state.meta.isDefaultValue; + + const isEffectiveAtThreshold = !effectiveDate; + + // When effective at Threshold, we disable the upgrade time and migrationId config fields + const isEffectiveAtThresholdAndSyncUpgradeTimeOrMigrationId = + isEffectiveAtThreshold && (isSynchronizerUpgradeTime || isSynchronizerUpgradeMigrationId); + + const isDisabled = + isPendingAndDefaultValue || isEffectiveAtThresholdAndSyncUpgradeTimeOrMigrationId; + const textFieldProps = { variant: 'outlined' as const, size: 'small' as const, @@ -34,11 +56,7 @@ export const ConfigField: React.FC = props => { sx: { textAlign: 'right' }, 'data-testid': `config-field-${configChange.fieldName}`, }, - // We disable the field if it is pending and the value is the default value. - // The default value check is to handle the case where the user made a change - // to the field before it became a field with pending changes. - // This gives them the chance to revert that change. - disabled: pendingFieldInfo !== undefined && field.state.meta.isDefaultValue, + disabled: isDisabled, }; return ( @@ -79,8 +97,8 @@ export const ConfigField: React.FC = props => { )} - {field.state.value?.fieldName === 'nextScheduledSynchronizerUpgradeTime' && ( - @@ -108,7 +126,7 @@ export const PendingConfigDisplay: React.FC = ({ pend data-testid={`config-pending-value-${pendingFieldInfo.fieldName}`} > Pending Configuration: {pendingFieldInfo.pendingValue}
- This proposal will go into effect{' '} + This pending configuration will go into effect{' '} {atThreshold ? 'at Threshold' : dayjs(pendingFieldInfo.effectiveDate).fromNow()} @@ -116,13 +134,13 @@ export const PendingConfigDisplay: React.FC = ({ pend ); }; -interface NextScheduledSynchronizerUpgradeDisplayProps { +interface SynchronizerUpgradeTimeDisplayProps { effectiveDate: string | undefined; configChange: ConfigChange; } -export const NextScheduledSynchronizerUpgradeDisplay: React.FC< - NextScheduledSynchronizerUpgradeDisplayProps +export const SynchronizerUpgradeTimeDisplay: React.FC< + SynchronizerUpgradeTimeDisplayProps > = props => { const { effectiveDate } = props; const defaultMigrationTime = dayjs(effectiveDate) diff --git a/apps/sv/frontend/src/components/form-components/ProposalSummaryField.tsx b/apps/sv/frontend/src/components/form-components/ProposalSummaryField.tsx new file mode 100644 index 0000000000..3d57da99b5 --- /dev/null +++ b/apps/sv/frontend/src/components/form-components/ProposalSummaryField.tsx @@ -0,0 +1,47 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box, TextField as MuiTextField, Typography } from '@mui/material'; +import { useFieldContext } from '../../hooks/formContext'; +import { PROPOSAL_SUMMARY_SUBTITLE, PROPOSAL_SUMMARY_TITLE } from '../../utils/constants'; + +export interface ProposalSummaryFieldProps { + id: string; + title?: string; + optional?: boolean; + subtitle?: string; +} + +export const ProposalSummaryField: React.FC = props => { + const { title, optional, id, subtitle } = props; + const field = useFieldContext(); + + return ( + + + {title || PROPOSAL_SUMMARY_TITLE} + {optional && ( + + optional + + )} + + field.handleChange(e.target.value)} + error={!field.state.meta.isValid} + helperText={field.state.meta.errors?.[0]} + inputProps={{ 'data-testid': id }} + /> + + {subtitle || PROPOSAL_SUMMARY_SUBTITLE} + + + ); +}; diff --git a/apps/sv/frontend/src/components/form-components/TextArea.tsx b/apps/sv/frontend/src/components/form-components/TextArea.tsx deleted file mode 100644 index c083221d6f..0000000000 --- a/apps/sv/frontend/src/components/form-components/TextArea.tsx +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { Box, TextField as MuiTextField, Typography } from '@mui/material'; -import { useFieldContext } from '../../hooks/formContext'; - -export interface TextAreaProps { - title: string; - optional?: boolean; - id: string; -} - -export const TextArea: React.FC = props => { - const { title, optional, id } = props; - const field = useFieldContext(); - return ( - - - {title} - {optional && ( - - optional - - )} - - field.handleChange(e.target.value)} - error={!field.state.meta.isValid} - helperText={field.state.meta.errors?.[0]} - inputProps={{ 'data-testid': id }} - /> - - ); -}; diff --git a/apps/sv/frontend/src/components/forms/GrantRevokeFeaturedAppForm.tsx b/apps/sv/frontend/src/components/forms/GrantRevokeFeaturedAppForm.tsx index 0720665c2f..af67020f28 100644 --- a/apps/sv/frontend/src/components/forms/GrantRevokeFeaturedAppForm.tsx +++ b/apps/sv/frontend/src/components/forms/GrantRevokeFeaturedAppForm.tsx @@ -197,7 +197,7 @@ export const GrantRevokeFeaturedAppForm: React.FC validateSummary(value), }} > - {field => } + {field => } { onChange: ({ value }) => validateSummary(value), }} > - {field => } + {field => } a.value === 'CRARC_SetConfig'); -export interface SetAmuletConfigRulesFormProps { - onSubmit: ( - data: SetAmuletConfigCompleteFormData, - action: ActionRequiringConfirmation - ) => Promise; -} - -export const SetAmuletConfigRulesForm: React.FC = _ => { +export const SetAmuletConfigRulesForm: () => JSX.Element = () => { const dsoInfoQuery = useDsoInfos(); + const mutation = useProposalMutation(); + const dsoProposalsQuery = useListDsoRulesVoteRequests(); + const votesHooks = useVotesHooks(); const initialExpiration = getInitialExpiration(dsoInfoQuery.data); const initialEffectiveDate = dayjs(initialExpiration).add(1, 'day'); const [showConfirmation, setShowConfirmation] = useState(false); + const pendingConfigFields = useMemo( + () => buildAmuletRulesPendingConfigFields(dsoProposalsQuery.data), + [dsoProposalsQuery.data] + ); const defaultValues = useMemo((): SetAmuletConfigCompleteFormData => { if (!dsoInfoQuery.data) { @@ -85,11 +99,36 @@ export const SetAmuletConfigRulesForm: React.FC = const form = useAppForm({ defaultValues, - onSubmit: async ({ value }) => { + onSubmit: async ({ value: formData }) => { if (!showConfirmation) { setShowConfirmation(true); } else { - console.log('submit amulet config form data: ', value); + if (!amuletConfig) { + throw new Error('Amulet Config is not defined'); + } + + const changes = configFormDataToConfigChanges( + formData.config, + allAmuletConfigChanges, + false + ); + const baseConfig = amuletConfig; + const newConfig = buildAmuletRulesConfigFromChanges(changes); + const action: ActionRequiringConfirmation = { + tag: 'ARC_AmuletRules', + value: { + amuletRulesAction: { + tag: 'CRARC_SetConfig', + value: { + baseConfig: baseConfig, + newConfig: newConfig, + }, + }, + }, + }; + await mutation.mutateAsync({ formData, action }).catch(e => { + console.error(`Failed to submit proposal`, e); + }); } }, @@ -100,13 +139,54 @@ export const SetAmuletConfigRulesForm: React.FC = effectiveDate: value.common.effectiveDate.effectiveDate, }); }, + onSubmit: ({ value: formData }) => { + const changes = configFormDataToConfigChanges(formData.config, allAmuletConfigChanges); + + const conflictingChanges = changes.filter(c => + pendingConfigFields.some(p => p.fieldName === c.fieldName) + ); + const names = conflictingChanges.map(c => c.label).join(', '); + + if (conflictingChanges.length > 0) { + return `Cannot modify fields that have pending changes (${names})`; + } + }, }, }); const maybeConfig = dsoInfoQuery.data?.amuletRules.payload.configSchedule.initialValue; - const dsoConfig = maybeConfig ? maybeConfig : null; + const amuletConfig = maybeConfig ? maybeConfig : null; // passing the config twice here because we initially have no changes - const amuletConfigChanges = buildAmuletConfigChanges(dsoConfig, dsoConfig, true); + const allAmuletConfigChanges = buildAmuletConfigChanges(amuletConfig, amuletConfig, true); + + const effectiveDateString = form.state.values.common.effectiveDate.effectiveDate; + const effectivity = effectiveDateString ? dayjs(effectiveDateString).toDate() : undefined; + + const changes = configFormDataToConfigChanges( + form.state.values.config, + allAmuletConfigChanges, + false + ); + const changedFields = changes.filter(c => c.currentValue !== c.newValue); + const hasChangedFields = changedFields.length > 0; + + const baseConfig = amuletConfig; + const newConfig = buildAmuletRulesConfigFromChanges(changes); + const dsoAction: AmuletRules_ActionRequiringConfirmation = { + tag: 'CRARC_SetConfig', + value: { + baseConfig: baseConfig!, + newConfig: newConfig, + }, + }; + + const amuletConfigToCompareWith = getAmuletConfigToCompareWith( + effectivity, + undefined, + votesHooks, + dsoAction, + dsoInfoQuery + ); return ( @@ -120,13 +200,19 @@ export const SetAmuletConfigRulesForm: React.FC = formType="config-change" configFormData={configFormDataToConfigChanges( form.state.values.config, - amuletConfigChanges + allAmuletConfigChanges )} onEdit={() => setShowConfirmation(false)} onSubmit={() => {}} /> ) : ( <> + {pendingConfigFields.length > 0 && ( + + Some fields are disabled for editing due to pending votes. + + )} + {field => ( = onChange: ({ value }) => validateSummary(value), }} > - {field => ( - - )} + {field => } = Configuration - {amuletConfigChanges.map((change, index) => ( + {allAmuletConfigChanges.map((change, index) => ( - {field => } + {field => ( + f.fieldName === change.fieldName + )} + /> + )} ))} )} + + {amuletConfigToCompareWith && amuletConfigToCompareWith[1] && hasChangedFields ? ( + + ) : ( + No changes + )} + + + JSX.Element = () => { onChange: ({ value }) => validateSummary(value), }} > - {field => } + {field => } { onChange: ({ value }) => validateSummary(value), }} > - {field => ( - - )} + {field => } = ( - {actionRequiredRequests.map((ar, index) => ( - - ))} + {actionRequiredRequests.length === 0 ? ( + + No Action Required items available + + ) : ( + actionRequiredRequests.map((ar, index) => ( + + )) + )} ); diff --git a/apps/sv/frontend/src/components/governance/ProposalDetailsContent.tsx b/apps/sv/frontend/src/components/governance/ProposalDetailsContent.tsx index 4387ac8cfa..1e62d3f563 100644 --- a/apps/sv/frontend/src/components/governance/ProposalDetailsContent.tsx +++ b/apps/sv/frontend/src/components/governance/ProposalDetailsContent.tsx @@ -37,14 +37,10 @@ const now = () => dayjs(); export const ProposalDetailsContent: React.FC = props => { const { contractId, proposalDetails, votingInformation, votes, currentSvPartyId } = props; - const hasExpired = dayjs(votingInformation.votingCloses).isBefore(now()); const isEffective = votingInformation.voteTakesEffect && dayjs(votingInformation.voteTakesEffect).isBefore(now()); const isClosed = - !proposalDetails.isVoteRequest || - hasExpired || - isEffective || - votingInformation.status === 'Rejected'; + !proposalDetails.isVoteRequest || isEffective || votingInformation.status === 'Rejected'; const [voteTabValue, setVoteTabValue] = useState('all'); @@ -201,14 +197,14 @@ export const ProposalDetailsContent: React.FC = pro data-testid="proposal-details-voting-closes-duration" gutterBottom > - {dayjs(votingInformation.votingCloses).fromNow()} + {dayjs(votingInformation.votingThresholdDeadline).fromNow()} - {votingInformation.votingCloses} + {votingInformation.votingThresholdDeadline} diff --git a/apps/sv/frontend/src/components/governance/ProposalSummary.tsx b/apps/sv/frontend/src/components/governance/ProposalSummary.tsx index 05744e4cd4..b42bc3aa64 100644 --- a/apps/sv/frontend/src/components/governance/ProposalSummary.tsx +++ b/apps/sv/frontend/src/components/governance/ProposalSummary.tsx @@ -59,7 +59,7 @@ export const ProposalSummary: React.FC = props => { diff --git a/apps/sv/frontend/src/components/governance/ProposalVoteForm.tsx b/apps/sv/frontend/src/components/governance/ProposalVoteForm.tsx index c8c5ae6246..624f4d9cdd 100644 --- a/apps/sv/frontend/src/components/governance/ProposalVoteForm.tsx +++ b/apps/sv/frontend/src/components/governance/ProposalVoteForm.tsx @@ -100,60 +100,60 @@ export const ProposalVoteForm: React.FC = props => { > { - const result = z - .string() - .optional() - // URL is optional so we allow undefined or empty string here as it's the default value - .refine(url => !url || url.trim() === '' || isValidUrl(url), { - message: 'Invalid URL', - }) - .safeParse(value); + const result = z.string().safeParse(value); return result.success ? undefined : result.error.issues[0].message; }, }} children={field => { return ( field.handleChange(e.target.value)} error={!field.state.meta.isValid} - helperText={ - - {field.state.meta.errors?.[0]} - - } - inputProps={{ 'data-testid': 'your-vote-url-input' }} + helperText={field.state.meta.errors?.[0]} + inputProps={{ 'data-testid': 'your-vote-reason-input' }} /> ); }} /> { - const result = z.string().safeParse(value); + const result = z + .string() + .optional() + // URL is optional so we allow undefined or empty string here as it's the default value + .refine(url => !url || url.trim() === '' || isValidUrl(url), { + message: 'Invalid URL', + }) + .safeParse(value); return result.success ? undefined : result.error.issues[0].message; }, }} children={field => { return ( field.handleChange(e.target.value)} error={!field.state.meta.isValid} - helperText={field.state.meta.errors?.[0]} - inputProps={{ 'data-testid': 'your-vote-reason-input' }} + helperText={ + + {field.state.meta.errors?.[0]} + + } + inputProps={{ 'data-testid': 'your-vote-url-input' }} /> ); }} diff --git a/apps/sv/frontend/src/contexts/SvAdminServiceContext.tsx b/apps/sv/frontend/src/contexts/SvAdminServiceContext.tsx index 8a4a4f2ba8..c4f5a7a20e 100644 --- a/apps/sv/frontend/src/contexts/SvAdminServiceContext.tsx +++ b/apps/sv/frontend/src/contexts/SvAdminServiceContext.tsx @@ -70,7 +70,10 @@ export interface SvAdminClient { reasonUrl: string, reasonDescription: string ) => Promise; - prepareValidatorOnboarding: (expiresIn: number) => Promise; + prepareValidatorOnboarding: ( + expiresIn: number, + partyHint: string + ) => Promise; listOngoingValidatorOnboardings: () => Promise; listValidatorLicenses: (limit: number, after?: number) => Promise; listAmuletPriceVotes: () => Promise; @@ -170,9 +173,10 @@ export const SvAdminClientProvider: React.FC => { - const request: PrepareValidatorOnboardingRequest = { expires_in }; + const request: PrepareValidatorOnboardingRequest = { expires_in, party_hint }; return await svAdminClient.prepareValidatorOnboarding(request); }, listOngoingValidatorOnboardings: diff --git a/apps/sv/frontend/src/hooks/form.ts b/apps/sv/frontend/src/hooks/form.ts index e55f0ad894..202dbf63ef 100644 --- a/apps/sv/frontend/src/hooks/form.ts +++ b/apps/sv/frontend/src/hooks/form.ts @@ -5,12 +5,12 @@ import { createFormHook } from '@tanstack/react-form'; import { DateField } from '../components/form-components/DateField'; import { fieldContext, formContext } from './formContext'; import { TextField } from '../components/form-components/TextField'; -import { TextArea } from '../components/form-components/TextArea'; import { SelectField } from '../components/form-components/SelectField'; import { ConfigField } from '../components/form-components/ConfigField'; import { FormControls } from '../components/form-components/FormControls'; import { EffectiveDateField } from '../components/form-components/EffectiveDateField'; import { FormErrors } from '../components/form-components/FormErrors'; +import { ProposalSummaryField } from '../components/form-components/ProposalSummaryField'; export const { useAppForm } = createFormHook({ fieldComponents: { @@ -18,7 +18,7 @@ export const { useAppForm } = createFormHook({ DateField, EffectiveDateField, SelectField, - TextArea, + ProposalSummaryField, TextField, }, formComponents: { diff --git a/apps/sv/frontend/src/hooks/useProposalMutation.tsx b/apps/sv/frontend/src/hooks/useProposalMutation.tsx index 22bca94fba..257ed230ba 100644 --- a/apps/sv/frontend/src/hooks/useProposalMutation.tsx +++ b/apps/sv/frontend/src/hooks/useProposalMutation.tsx @@ -51,7 +51,6 @@ export const useProposalMutation: () => UseMutationResult< onError: error => { console.error(`Failed to send proposal to dso`, error); - toast.error(`Failed to send proposal to dso: ${error.message}`); }, }); }; diff --git a/apps/sv/frontend/src/hooks/useValidatorOnboardings.tsx b/apps/sv/frontend/src/hooks/useValidatorOnboardings.tsx index 2196cbac23..30bffb18e2 100644 --- a/apps/sv/frontend/src/hooks/useValidatorOnboardings.tsx +++ b/apps/sv/frontend/src/hooks/useValidatorOnboardings.tsx @@ -10,6 +10,7 @@ import { useSvAdminClient } from '../contexts/SvAdminServiceContext'; export type ValidatorOnboardingSecret = { encodedSecret: string; contract: Contract; + partyHint?: string; }; export const useValidatorOnboardings = (): UseQueryResult => { @@ -21,6 +22,7 @@ export const useValidatorOnboardings = (): UseQueryResult ({ encodedSecret: c.encoded_secret, contract: Contract.decodeOpenAPI(c.contract, ValidatorOnboarding), + partyHint: c.party_hint, })); }, }); diff --git a/apps/sv/frontend/src/routes/amuletPrice.tsx b/apps/sv/frontend/src/routes/amuletPrice.tsx index 0fe0cfd1f1..e6c9c02cdd 100644 --- a/apps/sv/frontend/src/routes/amuletPrice.tsx +++ b/apps/sv/frontend/src/routes/amuletPrice.tsx @@ -5,13 +5,16 @@ import * as React from 'react'; import { Box } from '@mui/material'; import DesiredAmuletPrice from '../components/amuletprice/DesiredAmuletPrice'; -import MedianAmuletPrice from '../components/amuletprice/MedianAmuletPrice'; import OpenMiningRounds from '../components/amuletprice/OpenMiningRounds'; +import { MedianAmuletPrice } from '../components/amuletprice/MedianAmuletPrice'; +import { useSvConfig } from '../utils'; const AmuletPrice: React.FC = () => { + const config = useSvConfig(); + return ( - + diff --git a/apps/sv/frontend/src/routes/createProposal.tsx b/apps/sv/frontend/src/routes/createProposal.tsx index fab6172da1..77ceb43e6b 100644 --- a/apps/sv/frontend/src/routes/createProposal.tsx +++ b/apps/sv/frontend/src/routes/createProposal.tsx @@ -4,23 +4,18 @@ import { useSearchParams } from 'react-router-dom'; import { createProposalActions } from '../utils/governance'; import { SelectAction } from '../components/forms/SelectAction'; -import { ProposalFormData, SupportedActionTag } from '../utils/types'; +import { SupportedActionTag } from '../utils/types'; import { UpdateSvRewardWeightForm } from '../components/forms/UpdateSvRewardWeightForm'; import { OffboardSvForm } from '../components/forms/OffboardSvForm'; import { GrantRevokeFeaturedAppForm } from '../components/forms/GrantRevokeFeaturedAppForm'; import { SetDsoConfigRulesForm } from '../components/forms/SetDsoConfigRulesForm'; import { SetAmuletConfigRulesForm } from '../components/forms/SetAmuletConfigRulesForm'; -import { ActionRequiringConfirmation } from '@daml.js/splice-dso-governance/lib/Splice/DsoRules'; export const CreateProposal: React.FC = () => { const [searchParams, _] = useSearchParams(); const action = searchParams.get('action'); const selectedAction = createProposalActions.find(a => a.value === action); - const onSubmit = async (_formData: ProposalFormData, _action: ActionRequiringConfirmation) => { - await Promise.resolve(); - }; - if (selectedAction) { const a = selectedAction.value as SupportedActionTag; switch (a) { @@ -35,7 +30,7 @@ export const CreateProposal: React.FC = () => { case 'SRARC_SetConfig': return ; case 'CRARC_SetConfig': - return ; + return ; } } else { return ; diff --git a/apps/sv/frontend/src/routes/validatorOnboarding.tsx b/apps/sv/frontend/src/routes/validatorOnboarding.tsx index fddfe425dd..8b3dc04be4 100644 --- a/apps/sv/frontend/src/routes/validatorOnboarding.tsx +++ b/apps/sv/frontend/src/routes/validatorOnboarding.tsx @@ -1,6 +1,6 @@ // Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -import { Box } from '@mui/material'; +import { Box, Divider } from '@mui/material'; import ValidatorLicenses from '../components/ValidatorLicenses'; import ValidatorOnboardingSecrets from '../components/ValidatorOnboardingSecrets'; @@ -9,6 +9,7 @@ const ValidatorOnboarding: React.FC = () => { return ( + ); diff --git a/apps/sv/frontend/src/routes/voteRequestDetails.tsx b/apps/sv/frontend/src/routes/voteRequestDetails.tsx index 0e11dda905..7657faa9ed 100644 --- a/apps/sv/frontend/src/routes/voteRequestDetails.tsx +++ b/apps/sv/frontend/src/routes/voteRequestDetails.tsx @@ -89,7 +89,7 @@ export const VoteRequestDetails: React.FC = () => { const votingInformation: ProposalVotingInformation = { requester: request.requester, requesterIsYou: request.requester === svPartyId, - votingCloses: dayjs(request.voteBefore).format(dateTimeFormatISO), + votingThresholdDeadline: dayjs(request.voteBefore).format(dateTimeFormatISO), voteTakesEffect: request.targetEffectiveAt ? dayjs(request.targetEffectiveAt).format(dateTimeFormatISO) : 'Threshold', diff --git a/apps/sv/frontend/src/utils/buildAmuletConfigChanges.ts b/apps/sv/frontend/src/utils/buildAmuletConfigChanges.ts index e9ab0f9e78..30caa47d3f 100644 --- a/apps/sv/frontend/src/utils/buildAmuletConfigChanges.ts +++ b/apps/sv/frontend/src/utils/buildAmuletConfigChanges.ts @@ -3,6 +3,7 @@ import { Optional } from '@daml/types'; import { AmuletConfig, PackageConfig } from '@daml.js/splice-amulet/lib/Splice/AmuletConfig'; import { Tuple2 } from '@daml.js/daml-prim-DA-Types-1.0.0/lib/DA/Types'; +import { Set as DamlSet } from '@daml.js/daml-stdlib-DA-Set-Types-1.0.0/lib/DA/Set/Types'; import { RelTime } from '@daml.js/daml-stdlib-DA-Time-Types-1.0.0/lib/DA/Time/Types'; import { IssuanceConfig } from '@daml.js/splice-amulet/lib/Splice/Issuance'; import { Schedule } from '@daml.js/splice-amulet/lib/Splice/Schedule'; @@ -152,7 +153,8 @@ function buildTransferFeeStepsChanges( ) { return ( before - ?.map((b, idx) => { + ?.map((b, i) => { + const idx = i + 1; const a = after?.[idx]; return [ { @@ -289,6 +291,28 @@ function buildDecentralizedSynchronizerChanges( ) { if (!before && !after) return []; + const getRequiredSynchronizers = (synchronizers: DamlSet | undefined) => { + if (!synchronizers) return []; + + return synchronizers.map + .entriesArray() + .map(r => r[0]) + .sort(); + }; + + const beforeRequiredSynchronizers = getRequiredSynchronizers(before?.requiredSynchronizers); + const afterRequiredSynchronizers = getRequiredSynchronizers(after?.requiredSynchronizers); + + const allSynchronizers = [ + ...new Set([...beforeRequiredSynchronizers, ...afterRequiredSynchronizers]), + ].sort(); + const requiredSynchronizersChanges = allSynchronizers.map((sync, idx) => ({ + fieldName: `decentralizedSynchronizerRequiredSynchronizers${idx + 1}`, + label: `Decentralized Synchronizer (Required Synchronizer ${idx + 1})`, + currentValue: beforeRequiredSynchronizers.includes(sync) ? sync : '', + newValue: afterRequiredSynchronizers.includes(sync) ? sync : '', + })); + return [ { fieldName: 'decentralizedSynchronizerActiveSynchronizer', @@ -327,5 +351,6 @@ function buildDecentralizedSynchronizerChanges( currentValue: before?.fees.minTopupAmount || '', newValue: after?.fees.minTopupAmount || '', }, + ...requiredSynchronizersChanges, ] as ConfigChange[]; } diff --git a/apps/sv/frontend/src/utils/buildAmuletRulesConfigFromChanges.ts b/apps/sv/frontend/src/utils/buildAmuletRulesConfigFromChanges.ts new file mode 100644 index 0000000000..c11009fb8d --- /dev/null +++ b/apps/sv/frontend/src/utils/buildAmuletRulesConfigFromChanges.ts @@ -0,0 +1,136 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { AmuletConfig } from '@daml.js/splice-amulet/lib/Splice/AmuletConfig'; +import { Tuple2 } from '@daml.js/daml-prim-DA-Types-1.0.0/lib/DA/Types'; +import * as damlTypes from '@daml/types'; +import { RelTime } from '@daml.js/daml-stdlib-DA-Time-Types-1.0.0/lib/DA/Time/Types'; +import { IssuanceConfig } from '@daml.js/splice-amulet/lib/Splice/Issuance'; +import { ConfigChange } from './types'; +import { Set as DamlSet } from '@daml.js/daml-stdlib-DA-Set-Types-1.0.0/lib/DA/Set/Types'; + +function lsToSet(ls: T[]): DamlSet { + return { + // eslint-disable-next-line @typescript-eslint/no-empty-object-type -- This is actually representing Unit in Daml + map: ls.reduce((acc, v) => acc.set(v, {}), damlTypes.emptyMap()), + }; +} + +/** + * Given a list of config changes, build and return an AmuletConfig<'USD'>. + * The config changes should have all fields, whether they have been changed or not. + */ +export function buildAmuletRulesConfigFromChanges( + amuletConfigChanges: ConfigChange[] +): AmuletConfig<'USD'> { + const changeMap = new Map(); + amuletConfigChanges.forEach(change => { + changeMap.set(change.fieldName, change.newValue.toString()); + }); + + const getValue = (fieldName: string, fallbackValue: string = '') => { + const value = changeMap.get(fieldName); + return value ? value : fallbackValue; + }; + + const getArrayCount = (prefix: string, isPairs = false) => { + const keysCount = Array.from(changeMap.keys()).filter(key => key.startsWith(prefix)).length; + + return isPairs ? keysCount / 2 : keysCount; + }; + + const transferFeeStepsCount = getArrayCount('transferFeeSteps', true); + const transferFeeSteps: Tuple2[] = []; + for (let i = 1; i <= transferFeeStepsCount; i++) { + const _1 = getValue(`transferFeeSteps${i}_1`); + const _2 = getValue(`transferFeeSteps${i}_2`); + if (_1 && _2) { + transferFeeSteps.push({ _1, _2 }); + } + } + + const numRequiredSynchronizers = getArrayCount('decentralizedSynchronizerRequiredSynchronizers'); + const requiredSynchronizers: string[] = []; + for (let i = 1; i <= numRequiredSynchronizers; i++) { + requiredSynchronizers.push(getValue(`decentralizedSynchronizerRequiredSynchronizers${i}`)); + } + const requiredSynchronizersSet = lsToSet(requiredSynchronizers); + + const futureValuesCount = Array.from(changeMap.keys()).filter(key => + key.match(/^issuanceCurveFutureValues\d$/) + ).length; + const futureValues: Tuple2[] = []; + for (let i = 0; i < futureValuesCount; i++) { + const time = { microseconds: getValue(`issuanceCurveFutureValues${i}`) }; + const config: IssuanceConfig = { + amuletToIssuePerYear: getValue(`issuanceCurveFutureValues${i}AmuletToIssuePerYear`), + validatorRewardPercentage: getValue(`issuanceCurveFutureValues${i}ValidatorRewardPercentage`), + appRewardPercentage: getValue(`issuanceCurveFutureValues${i}AppRewardPercentage`), + validatorRewardCap: getValue(`issuanceCurveFutureValues${i}ValidatorRewardCap`), + featuredAppRewardCap: getValue(`issuanceCurveFutureValues${i}FeaturedAppRewardCap`), + unfeaturedAppRewardCap: getValue(`issuanceCurveFutureValues${i}UnfeaturedAppRewardCap`), + optValidatorFaucetCap: getValue(`issuanceCurveFutureValues${i}OptValidatorFaucetCap`), + }; + futureValues.push({ _1: time, _2: config }); + } + + const amuletConfig: AmuletConfig<'USD'> = { + tickDuration: { microseconds: getValue('tickDuration') }, + transferPreapprovalFee: getValue('transferPreapprovalFee'), + featuredAppActivityMarkerAmount: getValue('featuredAppActivityMarkerAmount'), + + transferConfig: { + createFee: { fee: getValue('transferConfigCreateFee') }, + holdingFee: { rate: getValue('transferConfigHoldingFeeRate') }, + transferFee: { + initialRate: getValue('transferConfigTransferFeeInitialRate'), + steps: transferFeeSteps, + }, + lockHolderFee: { fee: getValue('transferConfigLockHolderFee') }, + extraFeaturedAppRewardAmount: getValue('transferConfigExtraFeaturedAppRewardAmount'), + maxNumInputs: getValue('transferConfigMaxNumInputs'), + maxNumOutputs: getValue('transferConfigMaxNumOutputs'), + maxNumLockHolders: getValue('transferConfigMaxNumLockHolders'), + }, + + issuanceCurve: { + initialValue: { + amuletToIssuePerYear: getValue('issuanceCurveInitialValueAmuletToIssuePerYear'), + validatorRewardPercentage: getValue('issuanceCurveInitialValueValidatorRewardPercentage'), + appRewardPercentage: getValue('issuanceCurveInitialValueAppRewardPercentage'), + validatorRewardCap: getValue('issuanceCurveInitialValueValidatorRewardCap'), + featuredAppRewardCap: getValue('issuanceCurveInitialValueFeaturedAppRewardCap'), + unfeaturedAppRewardCap: getValue('issuanceCurveInitialValueUnfeaturedAppRewardCap'), + optValidatorFaucetCap: getValue('issuanceCurveInitialValueOptValidatorFaucetCap'), + }, + futureValues: futureValues, + }, + + decentralizedSynchronizer: { + activeSynchronizer: getValue('decentralizedSynchronizerActiveSynchronizer'), + requiredSynchronizers: requiredSynchronizersSet, + fees: { + baseRateTrafficLimits: { + burstAmount: getValue('decentralizedSynchronizerFeesBaseRateTrafficLimitsBurstAmount'), + burstWindow: { + microseconds: getValue('decentralizedSynchronizerFeesBaseRateTrafficLimitsBurstWindow'), + }, + }, + extraTrafficPrice: getValue('decentralizedSynchronizerFeesExtraTrafficPrice'), + readVsWriteScalingFactor: getValue('decentralizedSynchronizerFeesReadVsWriteScalingFactor'), + minTopupAmount: getValue('decentralizedSynchronizerFeesMinTopupAmount'), + }, + }, + + packageConfig: { + amulet: getValue('packageConfigAmulet'), + amuletNameService: getValue('packageConfigAmuletNameService'), + dsoGovernance: getValue('packageConfigDsoGovernance'), + validatorLifecycle: getValue('packageConfigValidatorLifecycle'), + wallet: getValue('packageConfigWallet'), + walletPayments: getValue('packageConfigWalletPayments'), + }, + }; + + return amuletConfig; +} diff --git a/apps/sv/frontend/src/utils/constants.ts b/apps/sv/frontend/src/utils/constants.ts new file mode 100644 index 0000000000..c7069422b8 --- /dev/null +++ b/apps/sv/frontend/src/utils/constants.ts @@ -0,0 +1,5 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +export const PROPOSAL_SUMMARY_TITLE = 'Proposal Summary'; +export const PROPOSAL_SUMMARY_SUBTITLE = 'For CIP votes, consider copying the CIP abstract here'; diff --git a/apps/sv/frontend/src/utils/governance.ts b/apps/sv/frontend/src/utils/governance.ts index ad0c63f6d0..67fd8005fb 100644 --- a/apps/sv/frontend/src/utils/governance.ts +++ b/apps/sv/frontend/src/utils/governance.ts @@ -34,6 +34,7 @@ import type { } from '../utils/types'; import { buildAmuletConfigChanges } from './buildAmuletConfigChanges'; import { buildDsoConfigChanges } from './buildDsoConfigChanges'; +import { AmuletRules_SetConfig } from '@daml.js/splice-amulet/lib/Splice/AmuletRules'; export const actionTagToTitle = (amuletName: string): Record => ({ CRARC_AddFutureAmuletConfigSchedule: `Add Future ${amuletName} Configuration Schedule`, @@ -110,11 +111,11 @@ export function buildProposal(action: ActionRequiringConfirmation, dsoInfo?: Dso } as OffBoardMemberProposal; case 'SRARC_UpdateSvRewardWeight': { const allSvInfos = dsoInfo?.dsoRules.payload.svs.entriesArray() || []; - const svPartyId = dsoInfo?.svPartyId || ''; - const currentWeight = getSvRewardWeight(allSvInfos, svPartyId); + const svToUpdate = dsoAction.value.svParty; + const currentWeight = getSvRewardWeight(allSvInfos, svToUpdate); return { - svToUpdate: dsoAction.value.svParty, + svToUpdate: svToUpdate, currentWeight: currentWeight, weightChange: dsoAction.value.newRewardWeight, } as UpdateSvRewardWeightProposal; @@ -228,3 +229,32 @@ export function buildPendingConfigFields( })); }); } + +export function buildAmuletRulesPendingConfigFields( + proposals: Contract[] | undefined +): PendingConfigFieldInfo[] { + if (!proposals?.length) { + return []; + } + + return proposals + .filter(proposal => { + const a = proposal.payload.action; + return a.tag === 'ARC_AmuletRules' && a.value.amuletRulesAction.tag === 'CRARC_SetConfig'; + }) + .flatMap(proposal => { + const amuletAction = ( + proposal.payload.action.value as ActionRequiringConfirmation.ARC_AmuletRules + ).amuletRulesAction.value as AmuletRules_SetConfig; + const changes = buildAmuletConfigChanges(amuletAction.baseConfig, amuletAction.newConfig); + + return changes.map(change => ({ + fieldName: change.fieldName, + pendingValue: change.newValue as string, + proposalCid: proposal.contractId, + effectiveDate: proposal.payload.targetEffectiveAt + ? dayjs(proposal.payload.targetEffectiveAt).format(dateTimeFormatISO) + : 'Threshold', + })); + }); +} diff --git a/apps/sv/frontend/src/utils/types.ts b/apps/sv/frontend/src/utils/types.ts index 1c2fec3831..9e5ff51b8a 100644 --- a/apps/sv/frontend/src/utils/types.ts +++ b/apps/sv/frontend/src/utils/types.ts @@ -97,7 +97,7 @@ export type ProposalDetails = { export interface ProposalVotingInformation { requester: string; requesterIsYou?: boolean; - votingCloses: string; + votingThresholdDeadline: string; voteTakesEffect: string; status: ProposalListingStatus; } diff --git a/apps/sv/src/main/openapi/sv-internal.yaml b/apps/sv/src/main/openapi/sv-internal.yaml index 878d951aa6..277ab014ce 100644 --- a/apps/sv/src/main/openapi/sv-internal.yaml +++ b/apps/sv/src/main/openapi/sv-internal.yaml @@ -633,6 +633,10 @@ components: description: | The contract storing the onboarding secret. $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/schemas/Contract" + party_hint: + description: | + Human-readable alias of the validator party decoded from the stored secret. + type: string PrepareValidatorOnboardingRequest: type: object @@ -642,6 +646,11 @@ components: expires_in: type: integer minimum: 1 + party_hint: + description: | + Optional alias for the validator party. If provided, it is persisted in the + onboarding secret and stored in the database as the expected party identifier. + type: string PrepareValidatorOnboardingResponse: type: object @@ -665,6 +674,11 @@ components: type: string contact_point: type: string + party_hint: + description: | + Alias of the validator party. + Must match the persisted value in the onboarding secret. + type: string CometBftNodeStatusOrErrorResponse: oneOf: diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala index bd49b3ac33..3ee4369d26 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala @@ -9,6 +9,33 @@ import cats.instances.future.* import cats.syntax.either.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.javaapi.data.User +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.{ + CryptoConfig, + CryptoProvider, + NonNegativeFiniteDuration, + ProcessingTimeout, +} +import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FlagCloseableAsync, SyncCloseable} +import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} +import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} +import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.version.ProtocolVersion +import io.circe.Json +import io.circe.syntax.* +import io.grpc.Status +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.http.cors.scaladsl.CorsDirectives.cors +import org.apache.pekko.http.cors.scaladsl.settings.CorsSettings +import org.apache.pekko.http.scaladsl.model.HttpMethods +import org.apache.pekko.http.scaladsl.server.Directive +import org.apache.pekko.http.scaladsl.server.Directives.* import org.lfdecentralizedtrust.splice.admin.api.TraceContextDirectives.withTraceContext import org.lfdecentralizedtrust.splice.admin.http.{AdminRoutes, HttpErrorHandler} import org.lfdecentralizedtrust.splice.auth.{ @@ -21,18 +48,20 @@ import org.lfdecentralizedtrust.splice.automation.{ DomainParamsAutomationService, DomainTimeAutomationService, } +import org.lfdecentralizedtrust.splice.codegen.java.da.time.types.RelTime import org.lfdecentralizedtrust.splice.codegen.java.splice import org.lfdecentralizedtrust.splice.codegen.java.splice.dsorules.* -import org.lfdecentralizedtrust.splice.codegen.java.da.time.types.RelTime import org.lfdecentralizedtrust.splice.config.SharedSpliceAppParameters import org.lfdecentralizedtrust.splice.environment.* -import org.lfdecentralizedtrust.splice.http.{HttpClient, HttpRateLimiter} +import org.lfdecentralizedtrust.splice.environment.BaseLedgerConnection.INITIAL_ROUND_USER_METADATA_KEY import org.lfdecentralizedtrust.splice.http.v0.sv.SvResource import org.lfdecentralizedtrust.splice.http.v0.sv_admin.SvAdminResource +import org.lfdecentralizedtrust.splice.http.{HttpClient, HttpRateLimiter} import org.lfdecentralizedtrust.splice.migration.AcsExporter import org.lfdecentralizedtrust.splice.setup.{NodeInitializer, ParticipantInitializer} -import org.lfdecentralizedtrust.splice.store.{AppStoreWithIngestion, UpdateHistory} +import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.QueryResult +import org.lfdecentralizedtrust.splice.store.{AppStoreWithIngestion, UpdateHistory} import org.lfdecentralizedtrust.splice.sv.admin.http.{HttpSvAdminHandler, HttpSvHandler} import org.lfdecentralizedtrust.splice.sv.automation.{ DsoDelegateBasedAutomationService, @@ -53,44 +82,17 @@ import org.lfdecentralizedtrust.splice.sv.config.{ import org.lfdecentralizedtrust.splice.sv.metrics.SvAppMetrics import org.lfdecentralizedtrust.splice.sv.migration.DomainDataSnapshotGenerator import org.lfdecentralizedtrust.splice.sv.onboarding.domainmigration.DomainMigrationInitializer -import org.lfdecentralizedtrust.splice.sv.onboarding.sv1.SV1Initializer import org.lfdecentralizedtrust.splice.sv.onboarding.joining.JoiningNodeInitializer import org.lfdecentralizedtrust.splice.sv.onboarding.sponsor.DsoPartyMigration +import org.lfdecentralizedtrust.splice.sv.onboarding.sv1.SV1Initializer import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvSvStore} import org.lfdecentralizedtrust.splice.sv.util.{ + JsonOnboardingSecret, SvOnboardingToken, SvUtil, ValidatorOnboardingSecret, } import org.lfdecentralizedtrust.splice.util.{Contract, HasHealth, TemplateJsonDecoder} -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.{ - CryptoConfig, - CryptoProvider, - NonNegativeFiniteDuration, - ProcessingTimeout, -} -import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FlagCloseableAsync, SyncCloseable} -import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} -import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.util.MonadUtil -import io.circe.Json -import io.grpc.Status -import io.opentelemetry.api.trace.Tracer -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.http.cors.scaladsl.CorsDirectives.cors -import org.apache.pekko.http.cors.scaladsl.settings.CorsSettings -import org.apache.pekko.http.scaladsl.model.HttpMethods -import org.apache.pekko.http.scaladsl.server.Directive -import org.apache.pekko.http.scaladsl.server.Directives.* -import org.lfdecentralizedtrust.splice.environment.BaseLedgerConnection.INITIAL_ROUND_USER_METADATA_KEY -import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import java.time.Instant import java.util.Optional @@ -224,7 +226,8 @@ class SvApp( ) .valueOr(err => throw new IllegalArgumentException(s"Invalid domain parameters config: $err") - ), + ) + .copy(topologyChangeDelay = config.topologyChangeDelayDuration.toInternal), svSynchronizerConfig.sequencer.internalApi, svSynchronizerConfig.sequencer.externalPublicApiUrl, svSynchronizerConfig.sequencer.sequencerAvailabilityDelay.asJava, @@ -354,17 +357,6 @@ class SvApp( } yield res case Some(joiningConfig: SvOnboardingConfig.JoinWithKey) => for { - // It is possible that the participant left disconnected to domains due to party migration failure in the last SV startup. - // reconnect all domains at the beginning of SV initialization just in case. - _ <- appInitStep("Reconnect all domains") { - retryProvider.retry( - RetryFor.WaitingOnInitDependency, - "reconect_domains", - "Reconnect all domains", - participantAdminConnection.reconnectAllDomains(), - logger, - ) - } cometBftNode <- SvUtil.mapToCometBftNode( cometBftClient, cometBftConfig, @@ -744,7 +736,7 @@ class SvApp( Future.traverse(config.expectedValidatorOnboardings)(c => SvApp .prepareValidatorOnboarding( - ValidatorOnboardingSecret(svStoreWithIngestion.store.key.svParty, c.secret), + ValidatorOnboardingSecret(svStoreWithIngestion.store.key.svParty, c.secret, None), c.expiresIn, svStoreWithIngestion, decentralizedSynchronizer, @@ -837,21 +829,34 @@ object SvApp { retryProvider: RetryProvider, )(implicit ec: ExecutionContext, traceContext: TraceContext): Future[Either[String, Unit]] = { val svStore = svStoreWithIngestion.store - val svParty = svStore.key.svParty + + // If the secret contains partyHint, use it as a single source of truth + val (svParty, rawSecret, secretValue) = secret.partyHint match { + case Some(hint) => + val sv = secret.sponsoringSv + ( + sv, + secret.secret, + JsonOnboardingSecret(sv.toProtoPrimitive, secret.secret, hint).asJson.noSpaces, + ) + case None => (svStore.key.svParty, secret.secret, secret.secret) + } + val validatorOnboarding = new splice.validatoronboarding.ValidatorOnboarding( svParty.toProtoPrimitive, - secret.secret, + secretValue, (clock.now + expiresIn.toInternal).toInstant, ).create() + for { - res <- svStore.lookupUsedSecretWithOffset(secret.secret).flatMap { + res <- svStore.lookupUsedSecretWithOffset(rawSecret).flatMap { case QueryResult(_, Some(usedSecret)) => val validator = usedSecret.payload.validator Future.successful( Left(s"This secret has already been used before, for onboarding validator $validator") ) case QueryResult(offset, None) => - svStore.lookupValidatorOnboardingBySecretWithOffset(secret.secret).flatMap { + svStore.lookupValidatorOnboardingBySecretWithOffset(rawSecret).flatMap { case QueryResult(_, Some(_)) => Future.successful( Left("A validator onboarding contract with this secret already exists.") @@ -869,7 +874,7 @@ object SvApp { .CommandId( "org.lfdecentralizedtrust.splice.sv.expectValidatorOnboarding", Seq(svParty), - secret.secret, // not a leak as this gets hashed before it's used + secretValue, // not a leak as this gets hashed before it's used ), deduplicationOffset = offset, ) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvAppBootstrap.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvAppBootstrap.scala index 92c1f25b29..540bb5eb6e 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvAppBootstrap.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvAppBootstrap.scala @@ -109,7 +109,7 @@ object SvAppBootstrap { testingConfigInternal, clock, svMetrics, - new CommunityStorageFactory(svConfig.storage), + new StorageSingleFactory(svConfig.storage), loggerFactory, futureSupervisor, configuredOpenTelemetry, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvAdminAppClient.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvAdminAppClient.scala index 1a01d5e523..720c56cd8d 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvAdminAppClient.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvAdminAppClient.scala @@ -91,7 +91,7 @@ object HttpSvAdminAppClient { } } - case class PrepareValidatorOnboarding(expiresIn: FiniteDuration) + case class PrepareValidatorOnboarding(expiresIn: FiniteDuration, partyHint: Option[String]) extends BaseCommand[http.PrepareValidatorOnboardingResponse, String] { override def submitRequest( @@ -99,7 +99,7 @@ object HttpSvAdminAppClient { headers: List[HttpHeader], ): EitherT[Future, Either[Throwable, HttpResponse], http.PrepareValidatorOnboardingResponse] = client.prepareValidatorOnboarding( - body = definitions.PrepareValidatorOnboardingRequest(expiresIn.toSeconds), + body = definitions.PrepareValidatorOnboardingRequest(expiresIn.toSeconds, partyHint), headers = headers, ) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala index ff0377a79a..20a30979c1 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala @@ -35,7 +35,7 @@ import org.lfdecentralizedtrust.splice.sv.migration.{ } import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvSvStore} import org.lfdecentralizedtrust.splice.sv.util.SvUtil.generateRandomOnboardingSecret -import org.lfdecentralizedtrust.splice.sv.util.ValidatorOnboardingSecret +import org.lfdecentralizedtrust.splice.sv.util.Secrets import org.lfdecentralizedtrust.splice.sv.{LocalSynchronizerNode, SvApp} import java.util.Optional @@ -156,17 +156,19 @@ class HttpSvAdminHandler( validatorOnboardings <- svStore.listValidatorOnboardings() } yield { definitions.ListOngoingValidatorOnboardingsResponse( - validatorOnboardings - .map(onboarding => - definitions.ValidatorOnboarding( - ValidatorOnboardingSecret( - svStore.key.svParty, - onboarding.payload.candidateSecret, - ).toApiResponse, - onboarding.toHttp, + validatorOnboardings.map { onboarding => + val secret = Secrets + .decodeValidatorOnboardingSecret( + onboarding.payload.candidateSecret, + dsoStore.key.svParty, ) + + definitions.ValidatorOnboarding( + secret.toApiResponse, + onboarding.toHttp, + secret.partyHint, ) - .toVector + }.toVector ) } } @@ -189,7 +191,7 @@ class HttpSvAdminHandler( )(tuser: TracedUser): Future[v0.SvAdminResource.PrepareValidatorOnboardingResponse] = { implicit val TracedUser(_, traceContext) = tuser withSpan(s"$workflowId.prepareValidatorOnboarding") { _ => _ => - val secret = generateRandomOnboardingSecret(svStore.key.svParty) + val secret = generateRandomOnboardingSecret(svStore.key.svParty, body.partyHint) val expiresIn = NonNegativeFiniteDuration.ofSeconds(body.expiresIn.toLong) dsoStore .getDsoRules() diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvHandler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvHandler.scala index d726d0b9c4..2c56729c03 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvHandler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvHandler.scala @@ -14,7 +14,6 @@ import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.transaction.SequencerSynchronizerState import com.digitalasset.canton.tracing.{Spanning, TraceContext} import com.google.protobuf.ByteString -import io.circe.parser.* import io.grpc.Status.Code import io.grpc.{Status, StatusRuntimeException} import io.opentelemetry.api.trace.Tracer @@ -37,16 +36,14 @@ import org.lfdecentralizedtrust.splice.sv.onboarding.DsoPartyHosting import org.lfdecentralizedtrust.splice.sv.onboarding.sponsor.DsoPartyMigration import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvSvStore} import org.lfdecentralizedtrust.splice.sv.util.SvUtil.generateRandomOnboardingSecret -import org.lfdecentralizedtrust.splice.sv.util.{SvOnboardingToken, ValidatorOnboardingSecret} +import org.lfdecentralizedtrust.splice.sv.util.{Secrets, SvOnboardingToken} import org.lfdecentralizedtrust.splice.sv.{LocalSynchronizerNode, SvApp} import org.lfdecentralizedtrust.splice.util.{Codec, Contract} -import java.nio.charset.StandardCharsets import java.util.Base64 import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters.* import scala.jdk.OptionConverters.* -import scala.util.{Failure, Success, Try} class HttpSvHandler( svUserName: String, @@ -79,23 +76,6 @@ class HttpSvHandler( override protected val votesStore: ActiveVotesStore = dsoStore override protected val workflowId: String = this.getClass.getSimpleName - private def decodeValidatorOnboardingSecret(secret: String): ValidatorOnboardingSecret = - // There are two ways to create secrets: - // 1. Through the UI/API endpoints. The actual secret (excluding the wrapper that adds the SV party id) is a base64 encoded string of length 30. - // 2. Through `expected-validator-onboardings`. In that case, the secret can be whatever the user chose so it must not be a base64 string. - // So for backwards compatibility we interpret a secret that is not base64 or does not decode to a JSON object - // as a legacy token without the wrapper including the SV party id. - Try(Base64.getDecoder.decode(secret)) match { - case Failure(_) => - ValidatorOnboardingSecret( - svParty, - secret, - ) - case Success(decoded) => - decode[ValidatorOnboardingSecret](new String(decoded, StandardCharsets.UTF_8)) - .getOrElse(ValidatorOnboardingSecret(svParty, secret)) - } - def onboardValidator( respond: v0.SvResource.OnboardValidatorResponse.type )( @@ -105,11 +85,11 @@ class HttpSvHandler( withSpan(s"$workflowId.onboardValidator") { _ => _ => Codec.decode(Codec.Party)(body.partyId) match { case Right(partyId) => - val secret = decodeValidatorOnboardingSecret(body.secret) - if (secret.sponsoringSv == svParty) { - svStore.lookupValidatorOnboardingBySecret(secret.secret).flatMap { + val providedSecret = Secrets.decodeValidatorOnboardingSecret(body.secret, svParty) + if (providedSecret.sponsoringSv == svParty) { + svStore.lookupValidatorOnboardingBySecret(providedSecret.secret).flatMap { case None => - svStore.lookupUsedSecret(secret.secret).flatMap { + svStore.lookupUsedSecret(providedSecret.secret).flatMap { case Some(used) if used.payload.validator == body.partyId => // This validator is already onboarded with the same secret - nothing to do Future.successful(v0.SvResource.OnboardValidatorResponseOK) @@ -122,37 +102,49 @@ class HttpSvHandler( } case Some(vo) => - // Check whether a validator license already exists for this party, - // because when recovering from an ACS snapshot "used secret" information will get lost. - dsoStore - .lookupValidatorLicenseWithOffset(PartyId.tryFromProtoPrimitive(body.partyId)) - .flatMap { - case QueryResult(_, Some(_)) => - // This validator is already onboarded - nothing to do - Future.successful(v0.SvResource.OnboardValidatorResponseOK) - case QueryResult(_, None) => - for { - // We retry here because this mutates the AmuletRules and rounds contracts, - // which can lead to races. - _ <- retryProvider.retryForClientCalls( - "onboard_validator", - "onboard validator via DsoRules", - onboardValidator( - partyId, - secret, - vo, - body.version, - body.contactPoint, - ), - logger, - ) - } yield v0.SvResource.OnboardValidatorResponseOK - } + val storedSecret = + Secrets.decodeValidatorOnboardingSecret(vo.payload.candidateSecret, svParty); + + if (storedSecret.partyHint.exists(_ != partyId.uid.identifier.str)) { + Future.failed( + HttpErrorHandler.badRequest( + s"The onboarding secret entered does not match the secret issued for validatorPartyHint: ${storedSecret.partyHint + .getOrElse("")}" + ) + ) + } else { + // Check whether a validator license already exists for this party, + // because when recovering from an ACS snapshot "used secret" information will get lost. + dsoStore + .lookupValidatorLicenseWithOffset(PartyId.tryFromProtoPrimitive(body.partyId)) + .flatMap { + case QueryResult(_, Some(_)) => + // This validator is already onboarded - nothing to do + Future.successful(v0.SvResource.OnboardValidatorResponseOK) + case QueryResult(_, None) => + for { + // We retry here because this mutates the AmuletRules and rounds contracts, + // which can lead to races. + _ <- retryProvider.retryForClientCalls( + "onboard_validator", + "onboard validator via DsoRules", + onboardValidator( + partyId, + Secrets.encodeValidatorOnboardingSecret(storedSecret), + vo, + body.version, + body.contactPoint, + ), + logger, + ) + } yield v0.SvResource.OnboardValidatorResponseOK + } + } } } else { Future.failed( HttpErrorHandler.badRequest( - s"Secret is for SV ${secret.sponsoringSv} but this SV is ${svParty}, validate your SV sponsor URL" + s"Secret is for SV ${providedSecret.sponsoringSv} but this SV is ${svParty}, validate your SV sponsor URL" ) ) } @@ -307,7 +299,7 @@ class HttpSvHandler( implicit val tc = extracted withSpan(s"$workflowId.devNetOnboardValidatorPrepare") { _ => _ => if (isDevNet) { - val secret = generateRandomOnboardingSecret(svStore.key.svParty) + val secret = generateRandomOnboardingSecret(svStore.key.svParty, None) val expiresIn = NonNegativeFiniteDuration.ofHours(1) dsoStore .getDsoRules() @@ -745,7 +737,7 @@ class HttpSvHandler( private def onboardValidator( candidateParty: PartyId, - secret: ValidatorOnboardingSecret, + secret: String, validatorOnboarding: Contract[ValidatorOnboarding.ContractId, ValidatorOnboarding], version: Option[String], contactPoint: Option[String], @@ -763,7 +755,7 @@ class HttpSvHandler( ) ), validatorOnboarding.exercise( - _.exerciseValidatorOnboarding_Match(secret.secret, candidateParty.toProtoPrimitive) + _.exerciseValidatorOnboarding_Match(secret, candidateParty.toProtoPrimitive) ), ) map (_.update) _ <- dsoStoreWithIngestion diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala index 7651d9c456..76075a5113 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala @@ -232,7 +232,6 @@ class SvDsoAutomationService( participantAdminConnection, config.preparationTimeRecordTimeTolerance, config.mediatorDeduplicationTimeout, - config.topologyChangeDelayDuration, ) ) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/LocalSequencerConnectionsTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/LocalSequencerConnectionsTrigger.scala index 0527eb687c..8f784408a9 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/LocalSequencerConnectionsTrigger.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/LocalSequencerConnectionsTrigger.scala @@ -19,6 +19,7 @@ import com.digitalasset.canton.{SequencerAlias, SynchronizerAlias} import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, SubmissionRequestAmplification, } @@ -110,6 +111,8 @@ class LocalSequencerConnectionsTrigger( // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification = sequencerRequestAmplification, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) if ( ParticipantAdminConnection.dropSequencerId( diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/ReconcileDynamicDomainParametersTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/ReconcileDynamicDomainParametersTrigger.scala index 3af1194407..d63dc5bc8a 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/ReconcileDynamicDomainParametersTrigger.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/ReconcileDynamicDomainParametersTrigger.scala @@ -44,7 +44,6 @@ class ReconcileDynamicSynchronizerParametersTrigger( participantAdminConnection: ParticipantAdminConnection, preparationTimeRecordTimeTolerance: NonNegativeFiniteDuration, mediatorDeduplicationTimeout: NonNegativeFiniteDuration, - topologyChangeDelayDuration: NonNegativeFiniteDuration, )(implicit override val ec: ExecutionContext, mat: Materializer, @@ -87,7 +86,6 @@ class ReconcileDynamicSynchronizerParametersTrigger( amuletConfig, decentralizedSynchronizerConfig, preparationTimeRecordTimeToleranceTarget, - topologyChangeDelayDuration, ) } yield if (state.mapping.parameters != updatedConfig) @@ -151,7 +149,6 @@ class ReconcileDynamicSynchronizerParametersTrigger( task.amuletConfig, task.synchronizerConfig, task.preparationTimeRecordTimeToleranceTarget, - topologyChangeDelayDuration, ), forceChanges = if (task.preparationTimeRecordTimeToleranceTarget.isDefined) @@ -180,7 +177,6 @@ class ReconcileDynamicSynchronizerParametersTrigger( amuletConfig: AmuletConfig[USD], synchronizerConfig: Option[SynchronizerConfig], preparationTimeRecordTimeToleranceTarget: Option[InternalNonNegativeFiniteDuration], - topologyDelay: NonNegativeFiniteDuration, ): DynamicSynchronizerParameters = { val domainFeesConfig = amuletConfig.decentralizedSynchronizer.fees // Make sure that the bootstrap script for the upgrade domain is aligned with any changes made to the @@ -208,7 +204,6 @@ class ReconcileDynamicSynchronizerParametersTrigger( ), mediatorDeduplicationTimeout = InternalNonNegativeFiniteDuration.fromConfig(mediatorDeduplicationTimeout), - topologyChangeDelay = topologyDelay.toInternal, ) } } diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala index 4cd81a6159..d1c436feb6 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala @@ -70,9 +70,9 @@ class SequencerPruningTrigger( Future.unit } { _ => { - logger.debug("Attempt pruning our sequencer...") + logger.info("Attempt pruning our sequencer...") prune().map { prunedResult => - logger.debug(s"Completed pruning our sequencer with result: $prunedResult") + logger.info(s"Completed pruning our sequencer with result: $prunedResult") } } } diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala index 58b1a449cb..93ad1146ce 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala @@ -321,6 +321,7 @@ case class SvAppBackendConfig( NonNegativeFiniteDuration.ofHours(24), // Defaults to 48h as it must be at least 2x preparationTimeRecordtimeTolerance mediatorDeduplicationTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofHours(48), + // We want to be able to override this for simtime tests topologyChangeDelayDuration: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMillis(250), delegatelessAutomationExpectedTaskDuration: Long = 5000, // milliseconds diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeDsoPartyHosting.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeDsoPartyHosting.scala index 3331a95271..ce523cf54a 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeDsoPartyHosting.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeDsoPartyHosting.scala @@ -145,6 +145,9 @@ class JoiningNodeDsoPartyHosting( }, logger, ) + _ = logger.info( + "Received Acs snapshot from sponsor, importing into candidate participant" + ) _ <- participantAdminConnection.uploadAcsSnapshot(response.acsSnapshot) _ = logger.info( "Imported Acs snapshot from sponsor SV participant to candidate participant" diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala index fe817f3227..8e9a8a7558 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala @@ -3,12 +3,31 @@ package org.lfdecentralizedtrust.splice.sv.onboarding.joining +import cats.implicits.catsSyntaxOptionId import cats.data.OptionT -import org.apache.pekko.stream.Materializer import cats.syntax.apply.* import cats.syntax.foldable.* -import cats.syntax.option.* import cats.syntax.traverse.* +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} +import com.digitalasset.canton.config.SynchronizerTimeTrackerConfig +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig +import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.sequencing.{ + GrpcSequencerConnection, + SequencerConnectionPoolDelays, + SequencerConnections, +} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.{HostingParticipant, ParticipantPermission} +import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import io.grpc.Status +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.stream.Materializer import org.lfdecentralizedtrust.splice.codegen.java.splice.svonboarding.SvOnboardingConfirmed import org.lfdecentralizedtrust.splice.config.{ NetworkAppClientConfig, @@ -19,18 +38,19 @@ import org.lfdecentralizedtrust.splice.environment.* import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.TopologyTransactionType import org.lfdecentralizedtrust.splice.http.HttpClient import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo +import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import org.lfdecentralizedtrust.splice.store.{ AppStoreWithIngestion, DomainTimeSynchronization, DomainUnpausedSynchronization, } import org.lfdecentralizedtrust.splice.sv.admin.api.client.SvConnection -import org.lfdecentralizedtrust.splice.sv.automation.{SvDsoAutomationService, SvSvAutomationService} +import org.lfdecentralizedtrust.splice.sv.automation.singlesv.onboarding.SvOnboardingUnlimitedTrafficTrigger import org.lfdecentralizedtrust.splice.sv.automation.singlesv.{ ReconcileSequencerLimitWithMemberTrafficTrigger, SvPackageVettingTrigger, } -import org.lfdecentralizedtrust.splice.sv.automation.singlesv.onboarding.SvOnboardingUnlimitedTrafficTrigger +import org.lfdecentralizedtrust.splice.sv.automation.{SvDsoAutomationService, SvSvAutomationService} import org.lfdecentralizedtrust.splice.sv.cometbft.{ CometBftClient, CometBftConnectionConfig, @@ -46,41 +66,19 @@ import org.lfdecentralizedtrust.splice.sv.onboarding.SynchronizerNodeReconciler. OnboardedAfterDelay, Onboarding, } -import org.lfdecentralizedtrust.splice.sv.onboarding.{ - DsoPartyHosting, - NodeInitializerUtil, - SetupUtil, - SynchronizerNodeInitializer, - SynchronizerNodeReconciler, -} +import org.lfdecentralizedtrust.splice.sv.onboarding.* import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvStore, SvSvStore} import org.lfdecentralizedtrust.splice.sv.util.{SvOnboardingToken, SvUtil} import org.lfdecentralizedtrust.splice.sv.{LocalSynchronizerNode, SvApp} import org.lfdecentralizedtrust.splice.util.{ Contract, - PackageVetting, SynchronizerMigrationUtil, + PackageVetting, TemplateJsonDecoder, } -import com.digitalasset.canton.config.SynchronizerTimeTrackerConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} -import com.digitalasset.canton.lifecycle.CloseContext -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig -import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId -import com.digitalasset.canton.topology.transaction.{HostingParticipant, ParticipantPermission} -import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ShowUtil.* -import io.grpc.Status -import io.opentelemetry.api.trace.Tracer -import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import java.security.interfaces.ECPrivateKey -import scala.concurrent.{ExecutionContextExecutor, Future} +import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} import scala.jdk.CollectionConverters.* /** Container for the methods required by the SvApp to initialize a joining SV node. */ @@ -147,6 +145,8 @@ class JoiningNodeInitializer( // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, config.participantClient.sequencerRequestAmplification, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ), // Set manualConnect = true to avoid any issues with interrupted SV onboardings. // This is changed to false after SV onboarding completes. @@ -172,7 +172,10 @@ class JoiningNodeInitializer( ) ), ).tupled - decentralizedSynchronizerId <- connectToDomainUnlessMigratingDsoParty(dsoPartyId) + // It is possible that the participant left disconnected to domains due to a failure in the last SV startup. + // Reconnect all domains at the beginning of SV initialization in case, but + // only if we already host the dso party or if we don't see a proposal to host it. + decentralizedSynchronizerId <- proceedWithReconnectAllDomains(dsoPartyId) svParty <- SetupUtil.setupSvParty( initConnection, config, @@ -474,6 +477,59 @@ class JoiningNodeInitializer( } } + // We can only reconnect the domains if the participant: + // - already hosts the dsoParty or + // - is not in the process to host it + // if not we risk reconnecting while the party was authorized but the acs was not imported yet thus breaking the participant + private def proceedWithReconnectAllDomains( + dsoParty: PartyId + )(implicit tc: TraceContext, ec: ExecutionContext): Future[SynchronizerId] = { + retryProvider.retry( + RetryFor.ClientCalls, + "reconnect_all_domains", + "Reconnecting to all domains if participant hosts or is not in the process to host the dsoParty.", + for { + decentralizedSynchronizerId <- participantAdminConnection + .getPhysicalSynchronizerIdWithoutConnecting( + config.domains.global.alias + ) + participantId <- participantAdminConnection.getParticipantId() + // Check if the participant hosts the DSO party. If so, + // the dsoParty is hosted on the participant we can proceed to all domains reconnect + dsoPartyToParticipantMapping <- participantAdminConnection.listPartyToParticipant( + store = TopologyStoreId.Synchronizer(decentralizedSynchronizerId).some, + filterParty = dsoParty.filterString, + filterParticipant = participantId.filterString, + topologyTransactionType = TopologyTransactionType.AuthorizedState, + ) + // Check if he participant has a proposal for hosting the DSO party. If so, + // we are in the middle of an DSO party migration so don't reconnect to the domain. + activeDsoPartyToParticipantProposals <- participantAdminConnection + .listPartyToParticipant( + store = TopologyStoreId.Synchronizer(decentralizedSynchronizerId).some, + filterParty = dsoParty.filterString, + filterParticipant = participantId.filterString, + topologyTransactionType = TopologyTransactionType.AllProposals, + ) + _ <- + if ( + dsoPartyToParticipantMapping.nonEmpty || activeDsoPartyToParticipantProposals.isEmpty + ) { + logger.info("Reconnecting all domains.") + participantAdminConnection.reconnectAllDomains() + } else { + Future.unit + } + } yield { + logger.info( + s"Participant hosts dsoParty: ${dsoPartyToParticipantMapping.nonEmpty} and has proposals to host dsoParty ${activeDsoPartyToParticipantProposals.nonEmpty}" + ) + decentralizedSynchronizerId.logical + }, + logger, + ) + } + private def waitForSvParticipantToHaveSubmissionRights( dsoParty: PartyId, synchronizerId: SynchronizerId, @@ -965,39 +1021,6 @@ class JoiningNodeInitializer( logger, ) } - - private def connectToDomainUnlessMigratingDsoParty(dsoPartyId: PartyId): Future[SynchronizerId] = - retryProvider.retry( - RetryFor.ClientCalls, - "connect_domain", - "Connect to global domain if not migrating party", - for { - decentralizedSynchronizerId <- participantAdminConnection - .getSynchronizerIdWithoutConnecting( - config.domains.global.alias - ) - participantId <- participantAdminConnection.getParticipantId() - // Check if we have a proposal for hosting the DSO party signed by our particpant. If so, - // we are in the middle of an DSO party migration so don't reconnect to the domain. - proposals <- participantAdminConnection.listPartyToParticipant( - TopologyStoreId.Synchronizer(decentralizedSynchronizerId).some, - filterParty = dsoPartyId.filterString, - filterParticipant = participantId.filterString, - topologyTransactionType = TopologyTransactionType.ProposalSignedByOwnKey, - ) - _ <- - if (proposals.nonEmpty) { - logger.info( - "Participant is in process of hosting the DSO party, not reconnecting to domain to avoid inconsistent ACS" - ) - Future.unit - } else { - logger.info("Reconnecting to global domain") - participantAdminConnection.connectDomain(config.domains.global.alias) - } - } yield decentralizedSynchronizerId, - logger, - ) } object JoiningNodeInitializer {} diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala index d425dd04f7..c64fec849f 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala @@ -58,6 +58,7 @@ import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, TrafficControlParameters, } @@ -187,6 +188,8 @@ class SV1Initializer( // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, config.participantClient.sequencerRequestAmplification, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ), manualConnect = false, synchronizerId = None, @@ -453,9 +456,8 @@ class SV1Initializer( namespace, ) ) - val initialValues = DynamicSynchronizerParameters.initialValues(clock, ProtocolVersion.v34) + val initialValues = DynamicSynchronizerParameters.initialValues(ProtocolVersion.v34) val values = initialValues.tryUpdate( - topologyChangeDelay = config.topologyChangeDelayDuration.toInternal, trafficControlParameters = Some(initialTrafficControlParameters), reconciliationInterval = PositiveSeconds.fromConfig(SvUtil.defaultAcsCommitmentReconciliationInterval), diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala index c392c95d99..d1bfde3723 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala @@ -83,7 +83,13 @@ class DbSvSvStore( acsStoreId, domainMigrationId, ValidatorOnboarding.COMPANION, - where = sql"""onboarding_secret = ${lengthLimited(secret)}""", + where = sql""" + onboarding_secret = ${lengthLimited(secret)} + or ( + onboarding_secret like '{%' + and (onboarding_secret::jsonb ->> 'secret') = ${lengthLimited(secret)} + ) + """, ).headOption, "lookupValidatorOnboardingBySecretWithOffset", ) @@ -105,7 +111,13 @@ class DbSvSvStore( acsStoreId, domainMigrationId, UsedSecret.COMPANION, - where = sql"""onboarding_secret = ${lengthLimited(secret)}""", + where = sql""" + onboarding_secret = ${lengthLimited(secret)} + or ( + onboarding_secret like '{%' + and (onboarding_secret::jsonb ->> 'secret') = ${lengthLimited(secret)} + ) + """, ).headOption, "lookupUsedSecretWithOffset", ) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/JsonOnboardingSecret.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/JsonOnboardingSecret.scala new file mode 100644 index 0000000000..b6cfc774f0 --- /dev/null +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/JsonOnboardingSecret.scala @@ -0,0 +1,18 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.sv.util + +import io.circe.* +import io.circe.generic.semiauto.* + +final case class JsonOnboardingSecret( + sv: String, + secret: String, + validator_party_hint: String, +) + +object JsonOnboardingSecret { + implicit val decoder: Decoder[JsonOnboardingSecret] = deriveDecoder + implicit val encoder: Encoder[JsonOnboardingSecret] = deriveEncoder +} diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/Secrets.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/Secrets.scala new file mode 100644 index 0000000000..f8bb61a514 --- /dev/null +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/Secrets.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.sv.util + +import com.digitalasset.canton.topology.PartyId +import io.circe.parser.decode +import io.circe.syntax.* +import java.nio.charset.StandardCharsets +import java.util.Base64 +import scala.util.Try + +object Secrets { + private def decodeJsonSecret(str: String): Option[ValidatorOnboardingSecret] = + decode[JsonOnboardingSecret](str).toOption.map { json => + ValidatorOnboardingSecret( + PartyId.tryFromProtoPrimitive(json.sv), + json.secret, + Some(json.validator_party_hint), + ) + } + + private def decodeBase64Secret(str: String): Option[ValidatorOnboardingSecret] = + Try(Base64.getDecoder.decode(str)).toOption.flatMap { decodedBytes => + val decodedStr = new String(decodedBytes, StandardCharsets.UTF_8) + decode[ValidatorOnboardingSecret](decodedStr).toOption + } + + // There are two ways to create secrets: + // 1. Through the UI/API endpoints. The actual secret (excluding the wrapper that adds the SV party id) is a base64 encoded string. + // 2. Through `expected-validator-onboardings`. In that case, the secret can be whatever the user chose so it must not be a base64 string. + // So for backwards compatibility we interpret a secret that is not base64 or does not decode to a JSON object + // as a legacy token without the wrapper including the SV party id. + def decodeValidatorOnboardingSecret( + secret: String, + fallbackSv: PartyId, + ): ValidatorOnboardingSecret = + (secret match { + case s if s.startsWith("{") => decodeJsonSecret(s) + case s => decodeBase64Secret(s) + }).getOrElse(ValidatorOnboardingSecret(fallbackSv, secret, None)) + + def encodeValidatorOnboardingSecret( + secret: ValidatorOnboardingSecret + ): String = ( + secret.partyHint match { + case None => secret.secret; + case Some(hint) => + JsonOnboardingSecret( + secret.sponsoringSv.toProtoPrimitive, + secret.secret, + hint, + ).asJson.noSpaces + } + ) +} diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala index 141085a333..90f6a6a70f 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala @@ -317,7 +317,10 @@ object SvUtil { } } - def generateRandomOnboardingSecret(sv: PartyId): ValidatorOnboardingSecret = { + def generateRandomOnboardingSecret( + sv: PartyId, + partyHint: Option[String], + ): ValidatorOnboardingSecret = { val rng = new SecureRandom(); // 256 bits of entropy val bytes = new Array[Byte](ValidatorOnboardingSecretLength) @@ -326,6 +329,7 @@ object SvUtil { ValidatorOnboardingSecret( sv, secret, + partyHint, ) } diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/ValidatorOnboardingSecret.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/ValidatorOnboardingSecret.scala index a5eba9ea6b..dcc30258c6 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/ValidatorOnboardingSecret.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/ValidatorOnboardingSecret.scala @@ -14,6 +14,7 @@ import JsonCodec.* final case class ValidatorOnboardingSecret( sponsoringSv: PartyId, secret: String, + partyHint: Option[String], ) { // We encode the secret as base64 instead of return a JSON object as they are often copy pasted in terminals // and this avoids the need to worry about string escaping. diff --git a/apps/sv/src/test/scala/org/lfdecentralizedtrust/splice/store/db/SvSvStoreTest.scala b/apps/sv/src/test/scala/org/lfdecentralizedtrust/splice/store/db/SvSvStoreTest.scala index a46679036f..cc10602cb4 100644 --- a/apps/sv/src/test/scala/org/lfdecentralizedtrust/splice/store/db/SvSvStoreTest.scala +++ b/apps/sv/src/test/scala/org/lfdecentralizedtrust/splice/store/db/SvSvStoreTest.scala @@ -51,6 +51,23 @@ abstract class SvSvStoreTest extends StoreTest with HasExecutionContext { } } + "find a ValidatorOnboarding by secret in JSON format" in { + val wanted = validatorOnboarding( + """{"sv": "splice-client-1", "validator_party_hint": "splice-client-2", "secret": "good_secret"}""" + ) + val offset = 303L + for { + store <- mkStore() + _ <- dummyDomain.create(wanted, offset, createdEventSignatories = Seq(storeSvParty))( + store.multiDomainAcsStore + ) + } yield { + store.lookupValidatorOnboardingBySecretWithOffset("good_secret").futureValue should be( + QueryResult(offset, Some(wanted)) + ) + } + } + "return just the offset if there's no entries" in { for { store <- mkStore() @@ -87,6 +104,23 @@ abstract class SvSvStoreTest extends StoreTest with HasExecutionContext { } } + "find a UsedSecret by secret in JSON format" in { + val wanted = usedSecret( + """{"sv": "splice-client-1::dummy", "validator_party_hint": "splice-client-2", "secret": "good_secret"}""" + ) + val offset = 303L + for { + store <- mkStore() + _ <- dummyDomain.create(wanted, offset, createdEventSignatories = Seq(storeSvParty))( + store.multiDomainAcsStore + ) + } yield { + store.lookupUsedSecretWithOffset("good_secret").futureValue should be( + QueryResult(offset, Some(wanted)) + ) + } + } + } } diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala index d671c1b6c8..e986cccc38 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala @@ -166,18 +166,24 @@ class ValidatorApp( ) .asRuntimeException() case _ => - logger.info( - "Ensuring participant is initialized" - ) val cantonIdentifierConfig = ValidatorCantonIdentifierConfig.resolvedNodeIdentifierConfig(config) - ParticipantInitializer.ensureParticipantInitializedWithExpectedId( + val participantInitializer = new ParticipantInitializer( cantonIdentifierConfig.participant, - participantAdminConnection, config.participantBootstrappingDump, loggerFactory, retryProvider, + participantAdminConnection, ) + if (config.svValidator) { + logger.info("Waiting for the participant to be initialized by the SV app") + participantInitializer.waitForNodeInitialized() + } else { + logger.info( + "Ensuring participant is initialized" + ) + participantInitializer.ensureInitializedWithExpectedId() + } } } } @@ -257,7 +263,8 @@ class ValidatorApp( SpliceCircuitBreaker( "restore", config.parameters.circuitBreakers.mediumPriority, - logger, + clock, + loggerFactory, )(ac.scheduler, implicitly), ) val participantUsersDataRestorer = new ParticipantUsersDataRestorer( diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorAppBootstrap.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorAppBootstrap.scala index 882c4cb8ee..97094e60af 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorAppBootstrap.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorAppBootstrap.scala @@ -109,7 +109,7 @@ object ValidatorAppBootstrap { testingConfigInternal, clock, validatorMetrics, - new CommunityStorageFactory(validatorConfig.storage), + new StorageSingleFactory(validatorConfig.storage), loggerFactory, futureSupervisor, configuredOpenTelemetry, diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/AcceptTransferPreapprovalProposalTrigger.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/AcceptTransferPreapprovalProposalTrigger.scala index 678c696b57..e6ae110407 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/AcceptTransferPreapprovalProposalTrigger.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/AcceptTransferPreapprovalProposalTrigger.scala @@ -81,7 +81,9 @@ class AcceptTransferPreapprovalProposalTrigger( validatorWallet.treasury .enqueueAmuletOperation( operation, - dedup = Some(AmuletOperationDedupConfig(commandId, DedupOffset(offset))), + dedup = Some(AmuletOperationDedupConfig(commandId, DedupOffset(offset))).filter(_ => + transferPreapprovalConfig.proposalAcceptanceDeduplication + ), ) .flatMap { case failedOperation: installCodegen.amuletoperationoutcome.COO_Error => diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ReconcileSequencerConnectionsTrigger.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ReconcileSequencerConnectionsTrigger.scala index 9c58b5f461..ab922395d7 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ReconcileSequencerConnectionsTrigger.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ReconcileSequencerConnectionsTrigger.scala @@ -19,6 +19,7 @@ import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionCo import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, SubmissionRequestAmplification, } @@ -102,6 +103,8 @@ class ReconcileSequencerConnectionsTrigger( ), patience, ), + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) } participantAdminConnection.modifyOrRegisterSynchronizerConnectionConfigAndReconnect( diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/TopologyMetricsTrigger.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/TopologyMetricsTrigger.scala new file mode 100644 index 0000000000..b6ed23e01c --- /dev/null +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/TopologyMetricsTrigger.scala @@ -0,0 +1,76 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.validator.automation + +import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, SyncCloseable} +import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.PartyToParticipant +import com.digitalasset.canton.tracing.TraceContext +import io.opentelemetry.api.trace.Tracer +import org.lfdecentralizedtrust.splice.automation.{PollingTrigger, TriggerContext} +import org.lfdecentralizedtrust.splice.environment.ParticipantAdminConnection +import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.TopologyResult +import org.lfdecentralizedtrust.splice.scan.admin.api.client.ScanConnection +import org.lfdecentralizedtrust.splice.validator.metrics.TopologyMetrics + +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} + +class TopologyMetricsTrigger( + override protected val context: TriggerContext, + scanConnection: ScanConnection, + participantAdminConnection: ParticipantAdminConnection, +)(implicit + override val ec: ExecutionContext, + override val tracer: Tracer, +) extends PollingTrigger { + + private val topologyMetrics = new TopologyMetrics(context.metricsFactory) + + override def performWorkIfAvailable()(implicit + tc: TraceContext + ): Future[Boolean] = + for { + synchronizerId <- scanConnection.getAmuletRulesDomain()(tc) + partyMappings <- + participantAdminConnection.listPartyToParticipant( + store = Some(TopologyStoreId.Synchronizer(synchronizerId)) + ) + } yield { + updateMetrics(partyMappings) + false + } + + private def updateMetrics( + partyMappings: Seq[TopologyResult[PartyToParticipant]] + ): Unit = { + // Update total + topologyMetrics.numPartiesGauge.updateValue(partyMappings.size.toDouble) + + // Update per-participant counts + val partiesPerParticipant: mutable.Map[ParticipantId, Int] = mutable.Map(); + partyMappings.foreach(result => + result.mapping.participants.foreach(hostingInfo => + partiesPerParticipant.updateWith(hostingInfo.participantId)(oldValue => + Some(oldValue.getOrElse(0) + 1) + ) + ) + ) + partiesPerParticipant.foreach { case (participantId, count) => + val gauge = topologyMetrics.getNumPartiesPerParticipantGauge(participantId) + gauge.updateValue(count.toDouble) + } + } + + override def closeAsync(): Seq[AsyncOrSyncCloseable] = + super + .closeAsync() + .appended( + SyncCloseable( + "topology metrics", + topologyMetrics.close(), + ) + ) +} diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala index 9021b1bd4f..10014318ae 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala @@ -98,6 +98,18 @@ class ValidatorAutomationService( : org.lfdecentralizedtrust.splice.validator.automation.ValidatorAutomationService.type = ValidatorAutomationService + automationConfig.topologyMetricsPollingInterval.foreach(topologyPollingInterval => + registerTrigger( + new TopologyMetricsTrigger( + triggerContext + .focus(_.config.pollingInterval) + .replace(topologyPollingInterval), + scanConnection, + participantAdminConnection, + ) + ) + ) + walletManagerOpt.foreach { walletManager => registerTrigger( new WalletAppInstallTrigger( diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala index e2102e382c..b2c5420005 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala @@ -23,6 +23,7 @@ import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionCo import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnections, + SequencerConnectionPoolDelays, SubmissionRequestAmplification, } import com.digitalasset.canton.tracing.TraceContext @@ -138,7 +139,7 @@ class DomainConnector( if (connections.isEmpty) { throw Status.NOT_FOUND .withDescription( - s"sequencer connections for migration id $migrationId is empty, validate with your SV sponsor that your migration id is correct" + s"sequencer connections for migration id $migrationId is empty at $time, validate with your SV sponsor that your migration id is correct" ) .asRuntimeException() } else { @@ -147,7 +148,7 @@ class DomainConnector( case None => throw Status.NOT_FOUND .withDescription( - s"sequencer connections for migration id $migrationId is empty, validate with your SV sponsor that your migration id is correct" + s"sequencer connections for migration id $migrationId is empty at $time, validate with your SV sponsor that your migration id is correct" ) .asRuntimeException() case Some(nonEmptyConnections) => @@ -160,6 +161,8 @@ class DomainConnector( ), // TODO(#2110) Rethink this when we enable sequencer connection pools. sequencerLivenessMargin = NonNegativeInt.zero, + // TODO(#2666) Make the delays configurable. + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) } }.toMap diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/TopologyMetrics.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/TopologyMetrics.scala new file mode 100644 index 0000000000..99a82a7739 --- /dev/null +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/TopologyMetrics.scala @@ -0,0 +1,62 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.validator.metrics + +import com.daml.metrics.api.MetricHandle.{Gauge, LabeledMetricsFactory} +import com.daml.metrics.api.MetricQualification.Saturation +import com.daml.metrics.api.{MetricInfo, MetricName, MetricsContext} +import com.digitalasset.canton.topology.ParticipantId +import org.lfdecentralizedtrust.splice.environment.SpliceMetrics + +import scala.collection.concurrent.TrieMap + +class TopologyMetrics(metricsFactory: LabeledMetricsFactory) extends AutoCloseable { + private val prefix: MetricName = SpliceMetrics.MetricsPrefix :+ "synchronizer-topology" + + val numPartiesPerParticipantGauges: TrieMap[ParticipantId, Gauge[Double]] = + TrieMap.empty + + val numPartiesGauge: Gauge[Double] = + metricsFactory.gauge[Double]( + MetricInfo( + prefix :+ "num-parties", + summary = "Total number of parties", + description = + "The total number of parties allocated on the Global Synchronizer. Only available if the topology metrics are exported.", + qualification = Saturation, + ), + Double.NaN, + )(MetricsContext.Empty) + + def getNumPartiesPerParticipantGauge(participantId: ParticipantId): Gauge[Double] = { + // TODO(tech-debt): factor out this allocation logic for labelled metrics + numPartiesPerParticipantGauges.get(participantId) match { + case Some(gauge) => gauge + case None => + val newGauge = metricsFactory.gauge[Double]( + MetricInfo( + prefix :+ "num-parties-per-participant", + summary = "Number of parties per participant", + description = + "The number of parties hosted on a participant connected to the Global Synchronizer. Only available if the topology metrics are exported.", + qualification = Saturation, + ), + Double.NaN, + )(MetricsContext.Empty.withExtraLabels("participant_id" -> participantId.toString)) + val optOldValue = numPartiesPerParticipantGauges.putIfAbsent(participantId, newGauge) + optOldValue match { + case None => newGauge + case Some(oldGauge) => + // Another thread inserted a gauge in the meantime, close the one we created and return the old one + newGauge.close() + oldGauge + } + } + } + + override def close(): Unit = { + numPartiesGauge.close() + numPartiesPerParticipantGauges.values.foreach(_.close()) + } +} diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/ValidatorAppMetrics.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/ValidatorAppMetrics.scala index b0fe8af725..03a477ced5 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/ValidatorAppMetrics.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/metrics/ValidatorAppMetrics.scala @@ -4,12 +4,10 @@ package org.lfdecentralizedtrust.splice.validator.metrics import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory -import org.lfdecentralizedtrust.splice.BaseSpliceMetrics import com.digitalasset.canton.metrics.DbStorageHistograms +import org.lfdecentralizedtrust.splice.BaseSpliceMetrics /** Modelled after [[com.digitalasset.canton.synchronizer.metrics.DomainMetrics]]. - * - * This is only a bare-bones implementation so the code compiles so far. */ class ValidatorAppMetrics( metricsFactory: LabeledMetricsFactory, diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/util/DarUtil.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/util/DarUtil.scala deleted file mode 100644 index c1d3be9874..0000000000 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/util/DarUtil.scala +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package org.lfdecentralizedtrust.splice.validator.util - -import org.lfdecentralizedtrust.splice.util.UploadablePackage -import com.google.protobuf.ByteString - -import java.io.InputStream - -private[validator] object DarUtil { - def readDar( - name: String, - inputStream: InputStream, - ): (UploadablePackage, ByteString) = { - val darFile = ByteString.readFrom(inputStream) - (UploadablePackage.fromByteString(name, darFile), darFile) - } -} diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala index 2a8620973b..ac8720a9e5 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala @@ -91,7 +91,8 @@ class UserWalletService( SpliceCircuitBreaker( "treasury", params.circuitBreakers.mediumPriority, - logger, + clock, + loggerFactory, ), ), treasuryConfig, diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/config/TransferPreapprovalConfig.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/config/TransferPreapprovalConfig.scala index f8d79f44bc..db8b9622ff 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/config/TransferPreapprovalConfig.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/config/TransferPreapprovalConfig.scala @@ -10,4 +10,9 @@ final case class TransferPreapprovalConfig( preapprovalLifetime: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofDays(90), // Automation will try to renew the pre-approval contract this much time before expiry renewalDuration: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofDays(30), + // If set to false, acceptance of TransferPreapprovalProposal's is not deduplicated. This means that + // if multiple proposals are created you will also get multiple TransferPreapprovals. This does not + // break anything but you will pay extra fees and extra traffic for renewals. + // In return, it allows the validator to batch acceptance of preapprovals which can improve throughput. + proposalAcceptanceDeduplication: Boolean = true, ) diff --git a/bootstrap-canton.sc b/bootstrap-canton.sc index 0495adf436..effb94ad76 100644 --- a/bootstrap-canton.sc +++ b/bootstrap-canton.sc @@ -19,8 +19,15 @@ import com.digitalasset.canton.version.ProtocolVersion println("Running canton bootstrap script...") +val tokenFile = System.getenv("CANTON_TOKEN_FILENAME") +if (tokenFile == null) { + sys.error("Environment variable CANTON_TOKEN_FILENAME was not set") +} + val domainParametersConfig = SynchronizerParametersConfig( - alphaVersionSupport = true + alphaVersionSupport = true, + // simtime does not work with non-zero topology change delay so we overwrite it matching the Canton tests + topologyChangeDelay = Some(if (tokenFile == "canton-simtime.tokens") NonNegativeFiniteDuration.Zero else NonNegativeFiniteDuration.ofMillis(250)), ) def staticParameters(sequencer: LocalInstanceReference) = @@ -111,10 +118,6 @@ participants.local.foreach(participant => { val port = participant.config.ledgerApi.internalPort.get.unwrap adminTokensData.append(s"$port" -> adminToken) }) -val tokenFile = System.getenv("CANTON_TOKEN_FILENAME") -if (tokenFile == null) { - sys.error("Environment variable CANTON_TOKEN_FILENAME was not set") -} println(s"Writing admin tokens file to $tokenFile...") val adminTokensContent = adminTokensData.map(x => s"${x._1} ${x._2}").mkString(System.lineSeparator()) diff --git a/build-tools/cluster-lock-users.json b/build-tools/cluster-lock-users.json index 2bca543a49..171c69d2b4 100644 --- a/build-tools/cluster-lock-users.json +++ b/build-tools/cluster-lock-users.json @@ -16,5 +16,7 @@ "OriolMunoz-da": ["oriolmunoz"], "ray-roestenburg-da": ["raymondroestenburg"], "nicu-da": ["nicu"], - "stephencompall-DA": ["stephencompall"] + "stephencompall-DA": ["stephencompall"], + "pasindutennage-da": ["pasindu"], + "mblaze-da": ["mblazejewski"] } diff --git a/build-tools/cncluster b/build-tools/cncluster index 934fd27799..a0507b16c5 100755 --- a/build-tools/cncluster +++ b/build-tools/cncluster @@ -128,9 +128,9 @@ function _retry_until_success() { exit 1 fi - # with 5 second sleep + 1 second kubectl timeout, this is roughly 10 minutes + # with 5 second sleep + 1 second kubectl timeout, this is roughly 15 minutes # See also max_wait which caps the overall wait time - local -i -r max_retries=$((100)) + local -i -r max_retries=$((150)) local -i num_retries=0 local -i -r max_wait=$((60*60)) # do not wait longer than 60m in any case local -i -r start_time=$(date +%s) @@ -1346,15 +1346,14 @@ function _unlock() { } function _check_stateful_sets() { - # Checks if any StatefulSets EXCEPT those with a prometheus image (which are part of the infra stack) + # Checks if any StatefulSets EXCEPT those in the observability namespace (which are part of the infra stack) # are currently deployed # grep exits with return code 1 if it finds no matches so we account for that - query_result=$(kubectl get StatefulSets -A -o json 2> /dev/null | jq '.items[].spec.template.spec.containers[].image' | { grep -v prometheus || test $? = 1; }) - echo "$query_result" + kubectl get statefulset -A -o json | jq '.items[] | select(.metadata.namespace != "observability" and .metadata.namespace != "gmp-system")' } function _check_reset_and_unlock() { - s=$(_check_stateful_sets) + s="$(_check_stateful_sets)" if [ -n "$s" ]; then echo -e "Cluster not reset, it is recommended to reset it before unlocking. Reset now?" diff --git a/build-tools/docker-push b/build-tools/docker-push index 11cf34c2a7..199bdbff55 100755 --- a/build-tools/docker-push +++ b/build-tools/docker-push @@ -80,6 +80,7 @@ if docker-image-reference-exists "${imageReference}"; then fi fi +_info "Tagging image ID ${imageId} with reference ${imageReference}" if ! _set_x docker tag "${imageId}" "${imageReference}"; then _error "Could not tag image '${imageId}." exit 1 @@ -88,6 +89,7 @@ fi pushed="" max_attempts=5 +_info "Pushing Docker image ${imageReference} (with ID ${imageId})" for ((i=1; i<=max_attempts; i++)); do if _set_x docker push "${imageReference}"; then pushed=1 @@ -102,4 +104,6 @@ if [ -z "$pushed" ]; then exit 1 fi +_info "Successfully pushed Docker image ${imageReference} (with ID ${imageId})" + echo "${targetPushState}" > target/docker.push diff --git a/build-tools/prefix-output b/build-tools/prefix-output new file mode 100755 index 0000000000..e6556f4afb --- /dev/null +++ b/build-tools/prefix-output @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [ "$#" -eq 0 ]; then + echo "Error: No command provided." >&2 + echo "Usage: $0 [args...]" >&2 + exit 1 +fi + +FULL_CMD="$*" + +( "$@" ) 2>&1 | awk -v prefix="[$FULL_CMD] " '{ print prefix $0 }' diff --git a/build.sbt b/build.sbt index 2389d6db14..9253de6faf 100644 --- a/build.sbt +++ b/build.sbt @@ -1668,6 +1668,8 @@ def mergeStrategy(oldStrategy: String => MergeStrategy): String => MergeStrategy case PathList("io", "grpc", _*) => MergeStrategy.first // this file comes in multiple flavors, from io.get-coursier:interface and from org.scala-lang.modules:scala-collection-compat. Since the content differs it is resolve this explicitly with this MergeStrategy. case path if path.endsWith("scala-collection-compat.properties") => MergeStrategy.first + // Don't really care about the notice file so just take any. + case "META-INF/FastDoubleParser-NOTICE" => MergeStrategy.first case x => oldStrategy(x) } } diff --git a/canton/UNRELEASED.md b/canton/UNRELEASED.md index bbd982a3a6..8d722c7249 100644 --- a/canton/UNRELEASED.md +++ b/canton/UNRELEASED.md @@ -9,6 +9,191 @@ schedule, i.e. if you add an entry effective at or after the first header, prepend the new date header that corresponds to the Wednesday after your change. +## until 2025-10-15 (Exclusive) +- Add a `max_record_time` field in the `PrepareSubmissionRequest` for externally signed transaction. + This is effectively a Time To Live (TTL) for the submission: it won't be committed to the ledger after that timestamp. + Important: The enforcement of this TTL is done by the submitting node (NOT the confirming node of the external party). + It is therefore unsigned and does not require a change to the transaction hashing scheme. + This also means that the submitting node must be trusted to honor this field if used. +- new participant config parameter `canton.participants..parameters.do-not-await-on-checking-incoming-commitments` to disable the synchronization of checking incoming commitments with crash recovery. +- Removed `contractSynchronizerRenames` on ACS import legacy. +- **BREAKING**: Removed force flag `FORCE_FLAG_ALLOW_UNVET_PACKAGE` and topology manager error `TOPOLOGY_DANGEROUS_VETTING_COMMAND_REQUIRES_FORCE_FLAG`. + Unvetting a package is not a dangerous operation anymore. +- Added new endpoints to allocate external parties via the Ledger API in the `PartyManagementService`: + - `GenerateExternalPartyTopology`: Generates topology transactions required to onboard an external party along with the pre-computed hash ready to sign + - `AllocateExternalParty`: Allocates an external party on a synchronizer from a set of signed topology transactions + Refer to the external party onboarding documentation for more details. +- **BREAKING**: The new party allocation endpoints above come with a breaking change for users of the gRPC `InteractiveSubmissionService` used in external signing workflows. + - The following protobuf messages have been refactored to a new common protobuf package so they can be re-used across different services: + +| Old file | Old FQN | New file | New FQN | +|-------------------------------------------------------------------------|---------------------------------------------------------|-------------------------------------|---------------------------------------------| +| com/daml/ledger/api/v2/interactive/interactive_submission_service.proto | com.daml.ledger.api.v2.interactive.Signature | com/daml/ledger/api/v2/crypto.proto | com.daml.ledger.api.v2.Signature | +| com/daml/ledger/api/v2/interactive/interactive_submission_service.proto | com.daml.ledger.api.v2.interactive.SigningAlgorithmSpec | com/daml/ledger/api/v2/crypto.proto | com.daml.ledger.api.v2.SigningAlgorithmSpec | +| com/daml/ledger/api/v2/interactive/interactive_submission_service.proto | com.daml.ledger.api.v2.interactive.SignatureFormat | com/daml/ledger/api/v2/crypto.proto | com.daml.ledger.api.v2.SignatureFormat | + +To consume the update, re-generate any client code generated from the protobuf definitions and update the package names accordingly in your application code. + +## until 2025-10-08 (Exclusive) +- Fixed a bug in the ModelConformanceChecker that would incorrectly reject otherwise valid externally signed transactions + that make use of a locally created contract in a subview. +- Added support for vetting packages on specific synchonizers, by adding an + optional synchronizer_id field to the following messages: + - `ValidateDarRequest`, `UploadDarRequest`, `VetDarRequest`, and + `UnvetDarRequest` in the Admin API + - `UploadDarFileRequest`, `ValidateDarFileRequest`, and + `UpdateVettedPackagesRequest` in the Ledger API + - These requests no longer use AuthorizedStore for vetting changes, they + must specify a target synchronizer via the new synchronizer_id fields if + the target participant is connected to more than one synchronizer. +- The `ExportPartyAcs` endpoint (`party_management_service.proto`), intended for offline party replication only, now + requires the onboarding flag to be set in the party-to-participant topology transaction that activates the party on a + target participant. The ACS export fails if this flag is missing. +- Setting DynamicSynchronizerParameters.participantResponseTimeout and mediatorReactionTimeout outside + of the interval [1s, 5min] requires force flag. +- BREAKING: Moved the sequencer-api limits into the server config. Now, the same functionality is + also available on the Admin API. The configuration is now under `.stream.limits = { ... }`, for both the public and admin api. +- Added extra configuration for compressing data in indexer DB. + + +## until 2025-10-01 (Exclusive) +- Some methods in Ledger JSON API are now marked as deprecated in `openapi.yml`. +- Config `canton.sequencers..sequencer.block.reader.use-recipients-table-for-reads = true` has been removed, + `sequencer_event_recipients` table is now always used for reading from the sequencer via subscriptions. +- Config `canton.sequencers..sequencer.block.writer.buffer-events-with-payloads = false // default` has been removed, + events are always buffered without payloads now (payloads are cached separately). +- Config `canton.sequencers..sequencer.block.writer.buffer-payloads = true // default` has been removed, + payloads are always handed off to the cache now. +- Dropped redundant column `mediator_id` from `mediator_deduplication_store` table. +- Mediator deduplication store pruning is now much less aggressive: + - running at most 1 query at a time + - running at most once every configurable interval: +```hocon + canton.mediators..deduplication-store.prune-at-most-every = 10s // default value +``` +- Mediator deduplication now has batch aggregator configuration exposed for `persist` operation, + allowing to tune parallelism and batch size under: +```hocon + canton.mediators..deduplication-store.persist-batching = { + maximum-in-flight = 2 // default value + maximum-batch-size = 500 // default value + } +``` +- Added endpoints related to topology snapshots that are less memory intensive for the nodes exporting the topology snapshots: + - `TopologyManagerReadService.ExportTopologySnapshotV2`: generic topology snapshot export + - `TopologyManagerReadService.GenesisStateV2`: export genesis state for major upgrade + - `TopologyManagerWriteService.ImportTopologySnapshotV2`: generic topology snapshot export + - `SequencerAdministrationService.OnboardingStateV2`: export sequencer snapshot for onboarding a new sequencer + - `SequencerInitializationService.InitializeSequencerFromGenesisStateV2`: initialize sequencer for a new synchronizer + - `SequencerInitializationService.InitializeSequencerFromOnboardingStateV2`: initialize sequencer from an onboarding snapshot created by `SequencerAdministrationService.OnboardingStateV2` + +- Added indices that speeds up various topology related queries as well as the update of the `valid_until` column. +- Add flag to disable the initial topology snapshot validation + ``` + participants.participant1.topology.validate-initial-topology-snapshot = true // default value + mediators.mediator1.topology.validate-initial-topology-snapshot = true // default value + sequencers.sequencer1.topology.validate-initial-topology-snapshot = true // default value + ``` + +- Add new endpoint to complete the party onboarding process for offline party replication by removing the onboarding flag. + It's intended for polling (called repeatedly by the client), and removes the flag when the conditions are right. + See `party_management_service.proto`; `CompletePartyOnboarding` rpc for more details. + +- JSON Ledger API is now configured by default. It listens on the port 4003. It can be disabled by setting + ``` + canton.participants..http-ledger-api.enabled=false + ``` +- Introduced the ability for the user to self-administer. A user can now + - query the details (`getParties`) of any the party it has any right to operate (ReadAs, ActAs, ExecuteAs or wildcard forms thereof) + - query own user record (`getUser`) + - query own rights (`listUserRights`) + - allocate own party (`allocateParty`) + + There is a per-participant setting that defines maximum number of access rights that a user can have and still + be able to self-allocate a party: + ``` + canton.participants..ledger-api.party-management-service.max-self-allocated-parties = 4 + ``` + The default is 0. +- `DisclosedContract.template_id` and `DisclosedContract.contract_id` for Ledger API commands + are not required anymore. When provided, the fields are used for validation of the analogous fields in the encoded + created event blob. +- Unvetting a package that is used as a dependency by another vetted package now requires `FORCE_FLAG_ALLOW_UNVETTED_DEPENDENCIES` +- The legacy gRPC Ledger API message `TransactionFilter` has been removed. As a consequence, the `filter` and `verbose` fields + have been dropped from the `GetUpdatesRequest` and `GetActiveContractsRequest` messages. +- The JSON versions of the above gRPC messages **continue to be supported in their old form 3.4**. + They will be removed and altered respectively 3.5. This affects: + - `TransactionFilter` will be removed in 3.5 + - `GetUpdatesRequest` will be altered in 3.5, it is used in + - `/v2/updates` HTTP POST and websocket GET endpoints + - `/v2/updates/flats` HTTP POST and websocket GET endpoints + - `/v2/updates/trees` HTTP POST and websocket GET endpoints + - `GetActiveContractsRequest` will be altered in 3.5, it is used in + - `/v2/state/active-contracts` HTTP POST and websocket GET endpoints +- The configuration for the removed Ledger API transaction tree stream related methods have been removed as well + ``` + canton.participants..ledger-api.index-service.transaction-tree-streams.* + ``` +- gRPC and JSON API payloads for create events (create arguments, interface views and keys) are now + rendered using the normal form of the types defined in `CreatedEvent.representative_package_id`. + This should be transparent on the returned values, with the exception of the record-ids in Ledger API verbose + rendering (`verbose = true`) which now effectively reflect the representative package id as well. +- `VettedPackagesRef` have to refer to only one package if used to vet packages. If they refer to zero packages while unvetting, we log to debug. +- Aligned new Sequencer `GetTime` gRPC API with established conventions +- Topology dispatching errors are now logged at WARN level (instead of ERROR). +- Party allocation and tx generation is now supported on Ledger API. +- BREAKING: minor breaking console change: the BaseResult.transactionHash type has been changed from ByteString to TxHash. The Admin API itself remained unchanged. +- **BREAKING**: `timeProofFreshnessProportion` has been removed + +## until 2025-09-24 (Exclusive) +- Add new Ledger API endpoints to improve UX of package vetting: + + 1. `ListVettedPackages` in `package_service.proto`, to easily list which + packages are vetted. + 2. `UpdateVettedPackages` in `package_management_service.proto`, to easily vet + and unvet packages. + +- Modify `UploadDarFileRequest` in `package_management_service.proto` to take a + `vetting_change` attribute, which specifies whether the uploaded DAR should be + vetted or not. + +- Added a new `GetTime` gRPC endpoint to the sequencer API that returns a "current" sequencing time. +- All JSON API v1 endpoints `/v1/*` have been removed. +- The legacy gRPC Ledger API method `CommandService.SubmitAndWaitForTransactionTree` has been removed. The JSON version + of this request `/v2/commands/submit-and-wait-for-transaction-tree` continues to be supported in 3.4, but will be + removed in 3.5. +- The legacy gRPC Ledger API methods in the `UpdateService` have been removed. + - `GetUpdateTrees` + - `GetTransactionTreeByOffset` + - `GetTransactionTreeById` + - `GetTransactionByOffset` + - `GetTransactionById` +- The JSON versions of the removed `UpdateService` requests continue to be supported in 3.4, but will be removed in 3.5. + - `/v2/updates/trees` + - `/v2/updates/transaction-tree-by-offset` + - `/v2/updates/transaction-tree-by-id` + - `/v2/updates/transaction-by-offset` + - `/v2/updates/transaction-by-id` +- `ParticipantRepairService.ImportAcs` is updated to accommodate new smart-contract upgrading semantics that are introduced in + in Canton 3.4. More specifically: + - **BREAKING** The `ImportAcsRequest.contract_id_suffix_recomputation_mode` is renamed to `ImportAcsRequest.contract_import_mode` + and `ContractIdSuffixRecomputationMode` enum is renamed to `ImportAcsRequest.ContractImportMode` to better reflect its purpose. + Upon import, contracts can be fully validated (including contract-id suffix recomputation). + - `ImportAcsRequest.representative_package_id_override` is introduced to allow overriding the original package id of the imported contracts. + This allows the target participant to use a compatible alternative package for the contract + without needing to upload original contracts packages. + +- **BREAKING**: `topologyChangeDelay` has been moved from `DynamicSynchronizerParameters` to `StaticSynchronizerParameters` and cannot be changed + on a physical synchronizer. + +- **BREAKING**: `reassignmentTimeProofFreshnessProportion` has been moved to nested location `reassignmentsConfig.timeProofFreshnessProportion` + +- Deduplication references added to Ledger API DB, giving performance improvement for ACS retrievals in presence of a lot of archived contracts. + - New participant config parameter `active-contracts-service-streams-config.id-filter-query-parallelism` is added, controlling the + introduced parallel processing stage filtering IDs (default: 2) during the Ledger API client streaming. + - New participant config parameter `indexer-config.db-prepare-parallelism` is added, controlling the introduced parallel stage processing + stage computing deactivation references during indexing. + ## until 2025-09-17 (Exclusive) - The participant admin workflows have been renamed @@ -52,7 +237,6 @@ Wednesday after your change. sequencers.sequencer1.topology.topology-transaction-observation-timeout = 30s // default value sequencers.sequencer1.topology.broadcast-retry-delay = 10s // default value ``` - - **Breaking** Renamed `AuthenticationTokenManagerConfig#pauseRetries` to `minRetryInterval`. - **Breaking** Package upgrade validation moved to vetting state change. Thus uploading an upgrade-incompatible DAR with vetting disabled is now possible. @@ -225,7 +409,7 @@ Wednesday after your change. ``` canton.sequencers.sequencer.parameters.sequencer-api-limits = { "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 10, - "com.digitalasset.canton.sequencer.api.v30.SequencerService/SubscribeV2" : 1000, + "com.digitalasset.canton.sequencer.api.v30.SequencerService/Subscribe" : 1000, } ``` - Authorization of the calls made by the IDP Admins has been tightened. It is no longer possible for them to grant diff --git a/canton/base/errors/src/main/scala/com/digitalasset/base/error/ErrorResource.scala b/canton/base/errors/src/main/scala/com/digitalasset/base/error/ErrorResource.scala index 63e71890c4..3c9241fb88 100644 --- a/canton/base/errors/src/main/scala/com/digitalasset/base/error/ErrorResource.scala +++ b/canton/base/errors/src/main/scala/com/digitalasset/base/error/ErrorResource.scala @@ -9,7 +9,9 @@ package com.digitalasset.base.error * cases, we include the resource identifier as part of the error message. This enum allows an * error to provide identifiers of a resource */ -final case class ErrorResource(asString: String) +final case class ErrorResource(asString: String) { + def nullable: ErrorResource = ErrorResource(s"NULLABLE_$asString") +} object ErrorResource { lazy val ContractId: ErrorResource = ErrorResource("CONTRACT_ID") @@ -68,5 +70,11 @@ object ErrorResource { User, ) - def fromString(str: String): Option[ErrorResource] = all.find(_.asString == str) + def fromString(str: String): Option[ErrorResource] = str.split("NULLABLE_") match { + case Array("", resource) => + all.find(_.asString == resource).map(_.nullable) + case Array(resource) => + all.find(_.asString == resource) + case _ => None + } } diff --git a/canton/base/util-external/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala b/canton/base/util-external/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala index 05ebf6faa8..8a3c20b518 100644 --- a/canton/base/util-external/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala +++ b/canton/base/util-external/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala @@ -197,6 +197,26 @@ object RequireTypes { def size[T](collection: Iterable[T]): NonNegativeInt = tryCreate(collection.size) } + type NonNegativeDouble = NonNegativeNumeric[Double] + + object NonNegativeDouble { + lazy val zero: NonNegativeDouble = NonNegativeDouble.tryCreate(0.0) + lazy val one: NonNegativeDouble = NonNegativeDouble.tryCreate(1.0) + lazy val maxValue: NonNegativeDouble = NonNegativeDouble.tryCreate(Double.MaxValue) + + def create(n: Double): Either[InvariantViolation, NonNegativeDouble] = + NonNegativeNumeric.create(n) + def tryCreate(n: Double): NonNegativeDouble = NonNegativeNumeric.tryCreate(n) + } + + final case class NonNegativeProportion(n: NonNegativeDouble) { + require(n <= NonNegativeDouble.one, "proportion may not be larger than 1") + } + + object NonNegativeProportion { + lazy val zero: NonNegativeProportion = NonNegativeProportion(NonNegativeDouble.zero) + } + type NonNegativeLong = NonNegativeNumeric[Long] object NonNegativeLong { diff --git a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/package_service.proto b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/package_service.proto index 4810809491..ce51a12948 100644 --- a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/package_service.proto +++ b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/package_service.proto @@ -104,6 +104,10 @@ message RemovePackageResponse { message ValidateDarRequest { bytes data = 1; string filename = 2; + + // Used to specify the synchronizer on which to validate that vetting is + // correct. + optional string synchronizer_id = 3; } message ValidateDarResponse { @@ -118,10 +122,21 @@ message UploadDarRequest { optional string expected_main_package_id = 3; } repeated UploadDarData dars = 1; - // if set to true (should be used by default), we'll register the vetting topology transactions on all synchronizers + + // If set to true (default), the node will register the vetting topology + // transactions on the specified synchronizer or autodetect the synchronizer + // if the participant is only connected to a single one. If multiple + // synchronizers are connected and no synchronizer is specified, the request + // returns a PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER error. bool vet_all_packages = 2; - // if set to true, we'll wait until the vetting transaction has been observed by this participant on all connected synchronizers + + // If set to true, we'll wait until the vetting transaction has been observed + // by this participant for the target synchronizer. bool synchronize_vetting = 3; + + // Used to specify the synchronizer to vet packages on when vet_all_packages + // is true. + optional string synchronizer_id = 4; } message UploadDarResponse { @@ -171,14 +186,20 @@ message GetDarContentsResponse { message VetDarRequest { string main_package_id = 1; // if set to true, the API call waits until the vetting transaction has been - // observed by this participant on all connected synchronizers. + // observed by this participant on the specified synchronizer. bool synchronize = 2; + // the synchronizer on which to vet the package. + // can be omitted if the participant is connected to only one synchronizer. + optional string synchronizer_id = 3; } message VetDarResponse {} message UnvetDarRequest { string main_package_id = 1; + // the synchronizer on which to unvet the package. + // can be omitted if the participant is connected to only one synchronizer. + optional string synchronizer_id = 2; } message UnvetDarResponse {} diff --git a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto index 2b9fddb71f..35f4472cf1 100644 --- a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto +++ b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto @@ -111,13 +111,6 @@ message MigrateSynchronizerRequest { message MigrateSynchronizerResponse {} message ExportAcsOldRequest { - message TargetSynchronizer { - // The ID of the synchronizer where the contract is supposed to be assigned when the export is being imported - string synchronizer_id = 1; - // The protocol version associated to the synchronizer where the contract is supposed to be assigned when the contracts snapshot is being imported - int32 protocol_version = 2; - } - // The parties for which the ACS should be exported // Required repeated string parties = 1; @@ -136,7 +129,7 @@ message ExportAcsOldRequest { // ID in the key will be assigned to the synchronizer id and protocol version in the value. This is not a proper synchronizer // migration of contracts and it's supposed to be used only in exceptional cases. // Optional, if not provided the contracts will be exported with the same synchronizer id as they are currently assigned - map contract_synchronizer_renames = 4; + reserved 4; // was map contract_synchronizer_renames // If true, do not check whether the provided timestamp is clean (see `timestamp` field). // NOT FOR PRODUCTION USE. @@ -223,17 +216,38 @@ message ExportAcsResponse { bytes chunk = 1; } -enum ContractIdImportMode { - CONTRACT_ID_IMPORT_MODE_UNSPECIFIED = 0; - // The contract ID neither gets validated nor recomputed. - // Allows to save time for large imports where it has been otherwise ensured that the contract ID suffix - // matches the scheme associated to the synchronizer. - CONTRACT_ID_IMPORT_MODE_ACCEPT = 1; - // The service will fail if any contract ID suffix doesn't match the scheme - // associated to the synchronizer where the contract is being assigned as a result of the import. - CONTRACT_ID_IMPORT_MODE_VALIDATION = 2; - // Any contract ID suffix will be recomputed to match the scheme associated to the synchronizer. - CONTRACT_ID_IMPORT_MODE_RECOMPUTATION = 3; +enum ContractImportMode { + // Default enum value that indicates the ContractImportMode field was not explicitly set. + // Requests with this value will fail validation and be rejected. + CONTRACT_IMPORT_MODE_UNSPECIFIED = 0; + // A contract is imported without validation or contract-id recomputation. + // This option is suitable for large imports when the participant operator is confident that all contracts + // are valid for their associated representative package-id. + CONTRACT_IMPORT_MODE_ACCEPT = 1; + // Upon import, each contract is validated, including authentication of its contract ID. + // The operation will fail if any contract does not pass validation. + // + // This is the recommended option for most import scenarios. + CONTRACT_IMPORT_MODE_VALIDATION = 2; + // The contract is validated and its contract ID suffix is recomputed. + CONTRACT_IMPORT_MODE_RECOMPUTATION = 3; +} + +// Defines override mappings for assigning representative package IDs to contracts upon ACS import. +message RepresentativePackageIdOverride { + // Mapping from contract ID to the target representative package ID. + // Only contracts with IDs matching the keys in this map are affected. + map contract_override = 1; + + // Mapping from a contract's original package IDs (either the creation or the representative package ID) to the target representative package ID. + // Affects only contracts with creation or representative package IDs matching the keys in this map, + // except if they're not already affected by `contract_override`. + map package_id_override = 2; + + // Mapping from any contract's package name to the target representative package ID + // Affects only contracts with package names matching the keys in this map, + // except if they're already affected by `contract_override` or `creation_package_id_override`. + map package_name_override = 3; } message ImportAcsRequest { @@ -246,13 +260,38 @@ message ImportAcsRequest { // The synchronizer id prefix to be used for the imported contracts // Optional, if not provided the service will generate a prefix string workflow_id_prefix = 2; - // How contract ID should be evaluated upon import + // How imported contracts should be validated upon import + // + // NOTE: The representative package IDs assigned to imported contracts must ensure proper type-checking for those contracts. + // Importing contracts representative_package_id's that do not type-check may cause unexpected behavior, + // including INTERNAL errors on Ledger API read queries or incorrect Daml value representations. + // If unsure, perform the ACS import with ContractImportMode set to CONTRACT_IMPORT_MODE_VALIDATION to ensure + // that the assigned representative package IDs correctly validate the imported contracts. + // // Required - ContractIdImportMode contract_id_suffix_recomputation_mode = 3; + ContractImportMode contract_import_mode = 3; // A list of party IDs to exclude from the import. // Any contract that has one or more of these parties as a stakeholder will be omitted during the import. // Optional repeated string excluded_stakeholder_ids = 4; + + // The representative package ID override rules to be applied during import. + // + // The representative package ID is assigned to a contract upon ACS import based on the following precedence rules. + // 1. The associated override for the contract ID in RepresentativePackageIdOverride.contract_override, if specified and present in the package store + // 2. The associated override for the contract's original package ID in RepresentativePackageIdOverride.package_id_override, if specified and present in the package store + // 3. The contract's representative package ID from the ACS snapshot, if present in the package store + // 4. The contract's creation package ID, if present in the package store + // 5. The associated override for the contract's package name in RepresentativePackageIdOverride.package_name_override, if specified and present in the package store + // 6. The highest-versioned package in the participant's package store for the contract's package name + // + // If none of the above rules yield a representative package ID, the ACS import request will fail. + // + // See ``CreatedEvent.representative_package_id`` in ``event.proto`` for more details on the concept of + // representative package IDs. + // + // Optional + RepresentativePackageIdOverride representative_package_id_override = 5; } message ImportAcsResponse { diff --git a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/party_management_service.proto b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/party_management_service.proto index c7a4b020fd..040ac2cd96 100644 --- a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/party_management_service.proto +++ b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/party_management_service.proto @@ -13,8 +13,13 @@ import "scalapb/scalapb.proto"; * The PartyManagementService allows modifying party hosting on participants. * * Note that ACS refers to Active Contract Set. + * + * Important: The online and offline party replication endpoints serve distinct workflows, + * and must not be combined. */ service PartyManagementService { + // Online party replication endpoint. + // // Initiate adding a party already hosted on one or more source participants to this // target participant in the specified synchronizer. // Performs some checks synchronously and then starts the party addition asynchronously. @@ -26,6 +31,8 @@ service PartyManagementService { // already be in place prior to the call. rpc AddPartyAsync(AddPartyAsyncRequest) returns (AddPartyAsyncResponse); + // Online party replication endpoint. + // // Status endpoint that given an add_party_request_id returns status information about progress, // completion, or errors of a previous call to AddPartyAsync on the source or target // participant. @@ -37,9 +44,21 @@ service PartyManagementService { // successfully. rpc GetAddPartyStatus(GetAddPartyStatusRequest) returns (GetAddPartyStatusResponse); - // Export the party's ACS to replicate it onto the target participant. + // Offline party replication focused ACS export. + // + // Exports the ACS of a single party from a source participant, so that the party can be + // replicated onto a target participant. + // + // This endpoint works in conjunction with the ImportPartyAcs endpoint. rpc ExportPartyAcs(ExportPartyAcsRequest) returns (stream ExportPartyAcsResponse); + // Offline party replication focused ACS import. + // + // Imports the party's ACS into the target participant. + // + // This endpoint works in conjunction with ExportPartyAcs endpoint. + rpc ImportPartyAcs(stream ImportPartyAcsRequest) returns (ImportPartyAcsResponse); + // For a given timestamp, find the highest ledger offset among all events that have record time <= timestamp. // // Returns a ledger offset, or an error otherwise. Depending on the error cause, a retry may make sense. @@ -47,6 +66,15 @@ service PartyManagementService { // Further, a returned offset is guaranteed to be "clean", meaning all events have been processed fully and // published to the Ledger API DB until the requested timestamp. rpc GetHighestOffsetByTimestamp(GetHighestOffsetByTimestampRequest) returns (GetHighestOffsetByTimestampResponse); + + // Offline party replication endpoint. + // + // Finalizes a party's onboarding by having the target participant unilaterally remove the onboarding flag on the + // party to participant topology mapping. + // + // The successful removal depends on the change history of the dynamic synchronizer's confirmation response timeout + // and the mediator reaction timeout parameters. + rpc CompletePartyOnboarding(CompletePartyOnboardingRequest) returns (CompletePartyOnboardingResponse); } message AddPartyAsyncRequest { @@ -231,6 +259,17 @@ message ExportPartyAcsResponse { bytes chunk = 1; } +message ImportPartyAcsRequest { + // The raw binary of the ACS snapshot to be imported + // The binary content does not align on individual active contracts! It may contain bytes that are part of + // an active contract, or include several active contracts - depending on how much data fits into the + // configured gRPC message size. + // Required + bytes acs_snapshot = 1; +} + +message ImportPartyAcsResponse {} + // Requests the highest ledger offset among all events belonging to the synchronizer (`synchronizer_id`) // that have a record time before or at the given `timestamp`. // @@ -256,3 +295,40 @@ message GetHighestOffsetByTimestampResponse { // Required int64 ledger_offset = 1; } + +message CompletePartyOnboardingRequest { + // The identifier of the party being onboarded. This party must already be active on the target participant. + // Required + string party_id = 1; + + // The identifier of the synchronizer. + // Required + string synchronizer_id = 2; + + // The unique identifier of the target participant where the party is being onboarded. + // Required + string target_participant_uid = 3; + + // The exclusive ledger offset used as starting point to find the party's most recent activation on the target + // participant. + // Required + int64 begin_offset_exclusive = 4; + + // The maximum duration the service will wait to find the topology transaction that activates the party on + // the target participant. + // If not set, the service use will use a default timeout. + // Optional + google.protobuf.Duration wait_for_activation_timeout = 5; +} + +message CompletePartyOnboardingResponse { + // True if the party onboarding was successfully finalized, false otherwise. + // Required + bool onboarded = 1; + + // If onboarding finalization failed, this filed suggests the earliest time to retry the call. + // + // This is only present when `onboarded` is false. The timestamp is an estimate and does not + // guarantee success on the next attempt. + optional google.protobuf.Timestamp earliest_retry_timestamp = 2; +} diff --git a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/sequencer/v30/sequencer_connection.proto b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/sequencer/v30/sequencer_connection.proto index 190c92a445..56a71e1a4b 100644 --- a/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/sequencer/v30/sequencer_connection.proto +++ b/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/sequencer/v30/sequencer_connection.proto @@ -60,6 +60,9 @@ message SequencerConnections { // // This value must be non-negative. uint32 sequencer_liveness_margin = 4; + + // Configures the various delays used by the sequencer connection pool. + SequencerConnectionPoolDelays sequencer_connection_pool_delays = 5; } message SubmissionRequestAmplification { @@ -73,3 +76,18 @@ message SubmissionRequestAmplification { // This value must be non-negative. google.protobuf.Duration patience = 2; } + +message SequencerConnectionPoolDelays { + // Minimum duration after which a failed sequencer connection is restarted. + // This value must be non-negative. + google.protobuf.Duration min_restart_delay = 1; + + // Maximum duration after which a failed sequencer connection is restarted. + // This value must be non-negative. + google.protobuf.Duration max_restart_delay = 2; + + // Delay between the attempts to obtain new sequencer connections for the sequencer subscription + // pool, when the current number of subscriptions is below `trustThreshold` + `livenessMargin`. + // This value must be non-negative. + google.protobuf.Duration subscription_request_delay = 3; +} diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala index cafb58ec95..b31fbfe59c 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala @@ -70,6 +70,7 @@ import com.daml.ledger.api.v2.command_submission_service.{ } import com.daml.ledger.api.v2.commands.{Command, Commands, DisclosedContract, PrefetchContractKey} import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.crypto as lapicrypto import com.daml.ledger.api.v2.event.CreatedEvent import com.daml.ledger.api.v2.event_query_service.EventQueryServiceGrpc.EventQueryServiceStub import com.daml.ledger.api.v2.event_query_service.{ @@ -152,7 +153,7 @@ import com.daml.ledger.api.v2.update_service.{ GetUpdatesResponse, UpdateServiceGrpc, } -import com.daml.ledger.api.v2.value.Identifier +import com.digitalasset.canton.admin.api.client import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ DefaultUnboundedTimeout, ServerEnforcedTimeout, @@ -168,8 +169,8 @@ import com.digitalasset.canton.admin.api.client.data.{ TemplateId, UserRights, } -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.Signature +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.{Signature, SigningPublicKey} import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod} import com.digitalasset.canton.ledger.api.{ IdentityProviderConfig as ApiIdentityProviderConfig, @@ -182,7 +183,8 @@ import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver import com.digitalasset.canton.platform.apiserver.execution.CommandStatus import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.topology.{Party, PartyId, SynchronizerId} +import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction +import com.digitalasset.canton.topology.{ParticipantId, Party, PartyId, SynchronizerId} import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.canton.{LfPackageId, LfPackageName, LfPartyId} import com.google.protobuf.empty.Empty @@ -237,6 +239,94 @@ object LedgerApiCommands { response.partyDetails.toRight("Party could not be created") } + final case class GenerateExternalPartyTopology( + synchronizerId: SynchronizerId, + partyHint: String, + publicKey: SigningPublicKey, + localParticipantObservationOnly: Boolean, + otherConfirmingParticipantIds: Seq[ParticipantId], + confirmationThreshold: NonNegativeInt, + observingParticipantIds: Seq[ParticipantId], + ) extends BaseCommand[ + GenerateExternalPartyTopologyRequest, + GenerateExternalPartyTopologyResponse, + client.data.parties.GenerateExternalPartyTopology, + ] { + + import com.digitalasset.canton.crypto.LedgerApiCryptoConversions.* + import com.daml.ledger.api.v2 + + override protected def submitRequest( + service: PartyManagementServiceStub, + request: GenerateExternalPartyTopologyRequest, + ): Future[GenerateExternalPartyTopologyResponse] = + service.generateExternalPartyTopology(request) + + override protected def createRequest(): Either[String, GenerateExternalPartyTopologyRequest] = + Right( + GenerateExternalPartyTopologyRequest( + synchronizer = synchronizerId.toProtoPrimitive, + partyHint = partyHint, + publicKey = Some( + publicKey.toProtoV30 + .into[v2.crypto.SigningPublicKey] + .withFieldRenamed(_.publicKey, _.keyData) + .transform + ), + localParticipantObservationOnly = localParticipantObservationOnly, + otherConfirmingParticipantUids = + otherConfirmingParticipantIds.map(_.uid.toProtoPrimitive), + confirmationThreshold = confirmationThreshold.value, + observingParticipantUids = observingParticipantIds.map(_.uid.toProtoPrimitive), + ) + ) + + override protected def handleResponse( + response: GenerateExternalPartyTopologyResponse + ): Either[ + String, + client.data.parties.GenerateExternalPartyTopology, + ] = + client.data.parties.GenerateExternalPartyTopology + .fromProto(response) + .leftMap(_.message) + } + + final case class AllocateExternalParty( + synchronizerId: SynchronizerId, + transactions: Seq[(GenericTopologyTransaction, Seq[Signature])], + multiHashSignatures: Seq[Signature], + ) extends BaseCommand[ + AllocateExternalPartyRequest, + AllocateExternalPartyResponse, + AllocateExternalPartyResponse, + ] { + override protected def createRequest(): Either[String, AllocateExternalPartyRequest] = + Right( + AllocateExternalPartyRequest( + synchronizer = synchronizerId.toProtoPrimitive, + onboardingTransactions = transactions.map { case (transaction, signatures) => + AllocateExternalPartyRequest.SignedTransaction( + transaction.getCryptographicEvidence, + signatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), + ) + }, + multiHashSignatures = + multiHashSignatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), + identityProviderId = "", + ) + ) + override protected def submitRequest( + service: PartyManagementServiceStub, + request: AllocateExternalPartyRequest, + ): Future[AllocateExternalPartyResponse] = + service.allocateExternalParty(request) + override protected def handleResponse( + response: AllocateExternalPartyResponse + ): Either[String, AllocateExternalPartyResponse] = + Right(response) + } + final case class Update( party: Party, annotationsUpdate: Option[Map[String, String]], @@ -301,13 +391,16 @@ object LedgerApiCommands { Right(response.partyDetails) } - final case class GetParty(party: Party, identityProviderId: String) - extends BaseCommand[GetPartiesRequest, GetPartiesResponse, PartyDetails] { + final case class GetParties( + parties: Seq[PartyId], + identityProviderId: String, + failOnNotFound: Boolean, + ) extends BaseCommand[GetPartiesRequest, GetPartiesResponse, Map[PartyId, PartyDetails]] { override protected def createRequest(): Either[String, GetPartiesRequest] = Right( GetPartiesRequest( - parties = Seq(party.toProtoPrimitive), + parties = parties.map(_.toProtoPrimitive), identityProviderId = identityProviderId, ) ) @@ -319,8 +412,18 @@ object LedgerApiCommands { override protected def handleResponse( response: GetPartiesResponse - ): Either[String, PartyDetails] = - response.partyDetails.headOption.toRight("PARTY_NOT_FOUND") + ): Either[String, Map[PartyId, PartyDetails]] = { + val result = + response.partyDetails.map(d => (PartyId.tryFromProtoPrimitive(d.party), d)).toMap + if (failOnNotFound) { + val notFound = parties.toSet -- result.keySet + Either.cond( + notFound.isEmpty, + result, + s"The following parties were not found on the Ledger API: $notFound", + ) + } else Right(result) + } } final case class UpdateIdp( @@ -353,6 +456,7 @@ object LedgerApiCommands { ): Either[String, Unit] = Either.unit } + } object PackageManagementService { @@ -363,13 +467,18 @@ object LedgerApiCommands { PackageManagementServiceGrpc.stub(channel) } - final case class UploadDarFile(darPath: String) + final case class UploadDarFile(darPath: String, synchronizerId: Option[SynchronizerId]) extends BaseCommand[UploadDarFileRequest, UploadDarFileResponse, Unit] { override protected def createRequest(): Either[String, UploadDarFileRequest] = for { bytes <- BinaryFileUtil.readByteStringFromFile(darPath) - } yield UploadDarFileRequest(bytes, submissionId = "") + } yield UploadDarFileRequest( + bytes, + submissionId = "", + vettingChange = UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId.map(_.toProtoPrimitive).getOrElse(""), + ) override protected def submitRequest( service: PackageManagementServiceStub, request: UploadDarFileRequest, @@ -383,13 +492,17 @@ object LedgerApiCommands { } - final case class ValidateDarFile(darPath: String) + final case class ValidateDarFile(darPath: String, synchronizerId: Option[SynchronizerId]) extends BaseCommand[ValidateDarFileRequest, ValidateDarFileResponse, Unit] { override protected def createRequest(): Either[String, ValidateDarFileRequest] = for { bytes <- BinaryFileUtil.readByteStringFromFile(darPath) - } yield ValidateDarFileRequest(bytes, submissionId = "") + } yield ValidateDarFileRequest( + bytes, + submissionId = "", + synchronizerId.map(_.toProtoPrimitive).getOrElse(""), + ) override protected def submitRequest( service: PackageManagementServiceStub, @@ -1184,8 +1297,6 @@ object LedgerApiCommands { beginExclusive = beginExclusive, endInclusive = endInclusive, updateFormat = Some(updateFormat), - filter = None, - verbose = false, ) } @@ -1483,6 +1594,7 @@ object LedgerApiCommands { packageIdSelectionPreference: Seq[LfPackageId], verboseHashing: Boolean, prefetchContractKeys: Seq[PrefetchContractKey], + maxRecordTime: Option[CantonTimestamp], ) extends BaseCommand[ PrepareSubmissionRequest, PrepareSubmissionResponse, @@ -1506,6 +1618,7 @@ object LedgerApiCommands { packageIdSelectionPreference = packageIdSelectionPreference, verboseHashing = verboseHashing, prefetchContractKeys = prefetchContractKeys, + maxRecordTime = maxRecordTime.map(_.toProtoTimestamp), ) ) @@ -1540,13 +1653,12 @@ object LedgerApiCommands { import com.digitalasset.canton.crypto.LedgerApiCryptoConversions.* import io.scalaland.chimney.dsl.* - import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss private def makePartySignatures: PartySignatures = PartySignatures( transactionSignatures.map { case (party, signatures) => SinglePartySignatures( party = party.toProtoPrimitive, - signatures = signatures.map(_.toProtoV30.transformInto[iss.Signature]), + signatures = signatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), ) }.toSeq ) @@ -1991,7 +2103,7 @@ object LedgerApiCommands { Right(response.offset) } - final case class GetConnectedSynchronizers(partyId: LfPartyId) + final case class GetConnectedSynchronizers(partyId: Option[LfPartyId]) extends BaseCommand[ GetConnectedSynchronizersRequest, GetConnectedSynchronizersResponse, @@ -2001,7 +2113,7 @@ object LedgerApiCommands { override protected def createRequest(): Either[String, GetConnectedSynchronizersRequest] = Right( GetConnectedSynchronizersRequest( - partyId.toString, + partyId.getOrElse(""), participantId = "", identityProviderId = "", ) @@ -2024,7 +2136,7 @@ object LedgerApiCommands { parties: Set[LfPartyId], limit: PositiveInt, templateFilter: Seq[TemplateId] = Seq.empty, - interfaceFilter: Seq[Identifier] = Seq.empty, + interfaceFilter: Seq[TemplateId] = Seq.empty, activeAtOffset: Long, verbose: Boolean = true, timeout: FiniteDuration, @@ -2047,13 +2159,13 @@ object LedgerApiCommands { TemplateFilter(Some(tId.toIdentifier), includeCreatedEventBlob) ) ) - ) ++ interfaceFilter.map(id => + ) ++ interfaceFilter.map(iId => CumulativeFilter( IdentifierFilter.InterfaceFilter( InterfaceFilter( - Some(id), - includeCreatedEventBlob = includeCreatedEventBlob, + Some(iId.toIdentifier), includeInterfaceView = true, + includeCreatedEventBlob = includeCreatedEventBlob, ) ) ) @@ -2062,8 +2174,6 @@ object LedgerApiCommands { } else Filters.defaultInstance Right( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = activeAtOffset, eventFormat = Some(EventFormat(parties.map((_, filter)).toMap, None, verbose)), ) diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala index 68b552253d..80064ec618 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -44,7 +44,10 @@ import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.admin.ResourceLimits -import com.digitalasset.canton.participant.admin.data.ContractIdImportMode +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepresentativePackageIdOverride, +} import com.digitalasset.canton.participant.admin.party.PartyParticipantPermission import com.digitalasset.canton.participant.admin.traffic.TrafficStateAdmin import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.{ @@ -68,7 +71,6 @@ import com.digitalasset.canton.topology.{ } import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{BinaryFileUtil, GrpcStreamingUtils, OptionUtil, PathUtils} -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ReassignmentCounter, SequencerCounter, SynchronizerAlias, config} import com.google.protobuf.ByteString import com.google.protobuf.timestamp.Timestamp @@ -189,6 +191,7 @@ object ParticipantAdminCommands { ) final case class UploadDar( dars: Seq[DarData], + synchronizerId: Option[SynchronizerId], vetAllPackages: Boolean, synchronizeVetting: Boolean, requestHeaders: Map[String, String], @@ -221,6 +224,7 @@ object ParticipantAdminCommands { _, synchronizeVetting = synchronizeVetting, vetAllPackages = vetAllPackages, + synchronizerId = synchronizerId.map(_.toProtoPrimitive), ) ) @@ -265,6 +269,7 @@ object ParticipantAdminCommands { object UploadDar { def apply( darPath: String, + synchronizerId: Option[SynchronizerId], vetAllPackages: Boolean, synchronizeVetting: Boolean, description: String, @@ -274,8 +279,9 @@ object ParticipantAdminCommands { darDataO: Option[ByteString], ): UploadDar = UploadDar( Seq(DarData(darPath, description, expectedMainPackageId, darDataO)), - vetAllPackages, - synchronizeVetting, + synchronizerId, + vetAllPackages = vetAllPackages, + synchronizeVetting = synchronizeVetting, requestHeaders, logger, ) @@ -284,6 +290,7 @@ object ParticipantAdminCommands { final case class ValidateDar( darPath: Option[String], + synchronizerId: Option[SynchronizerId], logger: TracedLogger, ) extends PackageCommand[v30.ValidateDarRequest, v30.ValidateDarResponse, String] { @@ -300,6 +307,7 @@ object ParticipantAdminCommands { } yield v30.ValidateDarRequest( darData, filename, + synchronizerId = synchronizerId.map(_.toProtoPrimitive), ) override protected def submitRequest( @@ -437,10 +445,13 @@ object ParticipantAdminCommands { } - final case class VetDar(darDash: String, synchronize: Boolean) - extends PackageCommand[v30.VetDarRequest, v30.VetDarResponse, Unit] { + final case class VetDar( + darDash: String, + synchronize: Boolean, + synchronizer: Option[SynchronizerId], + ) extends PackageCommand[v30.VetDarRequest, v30.VetDarResponse, Unit] { override protected def createRequest(): Either[String, v30.VetDarRequest] = Right( - v30.VetDarRequest(darDash, synchronize) + v30.VetDarRequest(darDash, synchronize, synchronizer.map(_.toProtoPrimitive)) ) override protected def submitRequest( @@ -454,11 +465,11 @@ object ParticipantAdminCommands { // TODO(#14432): Add `synchronize` flag which makes the call block until the unvetting operation // is observed by the participant on all connected synchronizers. - final case class UnvetDar(mainPackageId: String) + final case class UnvetDar(mainPackageId: String, synchronizerId: Option[SynchronizerId]) extends PackageCommand[v30.UnvetDarRequest, v30.UnvetDarResponse, Unit] { override protected def createRequest(): Either[String, v30.UnvetDarRequest] = Right( - v30.UnvetDarRequest(mainPackageId) + v30.UnvetDarRequest(mainPackageId, synchronizerId.map(_.toProtoPrimitive)) ) override protected def submitRequest( @@ -636,6 +647,86 @@ object ParticipantAdminCommands { GrpcAdminCommand.DefaultUnboundedTimeout } + final case class ImportPartyAcs( + acsChunk: ByteString + ) extends GrpcAdminCommand[ + v30.ImportPartyAcsRequest, + v30.ImportPartyAcsResponse, + Unit, + ] { + + override type Svc = PartyManagementServiceStub + + override def createService(channel: ManagedChannel): PartyManagementServiceStub = + v30.PartyManagementServiceGrpc.stub(channel) + + override protected def createRequest(): Either[String, v30.ImportPartyAcsRequest] = + Right(v30.ImportPartyAcsRequest(acsChunk)) + + override protected def submitRequest( + service: PartyManagementServiceStub, + request: v30.ImportPartyAcsRequest, + ): Future[v30.ImportPartyAcsResponse] = + GrpcStreamingUtils.streamToServer( + service.importPartyAcs, + (bytes: Array[Byte]) => + v30.ImportPartyAcsRequest( + ByteString.copyFrom(bytes) + ), + request.acsSnapshot, + ) + + override protected def handleResponse( + response: v30.ImportPartyAcsResponse + ): Either[String, Unit] = Either.unit + + } + + final case class CompletePartyOnboarding( + party: PartyId, + synchronizerId: SynchronizerId, + targetParticipantId: ParticipantId, + beginOffsetExclusive: NonNegativeLong, + waitForActivationTimeout: Option[config.NonNegativeFiniteDuration], + ) extends GrpcAdminCommand[ + v30.CompletePartyOnboardingRequest, + v30.CompletePartyOnboardingResponse, + (Boolean, Option[CantonTimestamp]), + ] { + + override type Svc = PartyManagementServiceStub + + override def createService(channel: ManagedChannel): PartyManagementServiceStub = + v30.PartyManagementServiceGrpc.stub(channel) + + override protected def createRequest(): Either[String, v30.CompletePartyOnboardingRequest] = + Right( + v30.CompletePartyOnboardingRequest( + party.toProtoPrimitive, + synchronizerId.toProtoPrimitive, + targetParticipantId.uid.toProtoPrimitive, + beginOffsetExclusive.unwrap, + waitForActivationTimeout.map(_.toProtoPrimitive), + ) + ) + + override protected def submitRequest( + service: PartyManagementServiceStub, + request: v30.CompletePartyOnboardingRequest, + ): Future[v30.CompletePartyOnboardingResponse] = service.completePartyOnboarding(request) + + override protected def handleResponse( + response: v30.CompletePartyOnboardingResponse + ): Either[String, (Boolean, Option[CantonTimestamp])] = + response.earliestRetryTimestamp + .traverse( + CantonTimestamp + .fromProtoTimestamp(_) + .leftMap(_.message) + ) + .map(tsOption => (response.onboarded, tsOption)) + } + } object ParticipantRepairManagement { @@ -647,7 +738,6 @@ object ParticipantAdminCommands { filterSynchronizerId: Option[SynchronizerId], timestamp: Option[Instant], observer: StreamObserver[v30.ExportAcsOldResponse], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], force: Boolean, ) extends GrpcAdminCommand[ v30.ExportAcsOldRequest, @@ -666,15 +756,6 @@ object ParticipantAdminCommands { parties.map(_.toLf).toSeq, filterSynchronizerId.map(_.toProtoPrimitive).getOrElse(""), timestamp.map(Timestamp.apply), - contractSynchronizerRenames.map { - case (source, (targetSynchronizerId, targetProtocolVersion)) => - val targetSynchronizer = v30.ExportAcsOldRequest.TargetSynchronizer( - synchronizerId = targetSynchronizerId.toProtoPrimitive, - protocolVersion = targetProtocolVersion.toProtoPrimitive, - ) - - (source.toProtoPrimitive, targetSynchronizer) - }, force = force, partiesOffboarding = partiesOffboarding, ) @@ -805,8 +886,9 @@ object ParticipantAdminCommands { final case class ImportAcs( acsChunk: ByteString, workflowIdPrefix: String, - contractIdImportMode: ContractIdImportMode, + contractImportMode: ContractImportMode, excludedStakeholders: Set[PartyId], + representativePackageIdOverride: RepresentativePackageIdOverride, ) extends GrpcAdminCommand[ v30.ImportAcsRequest, v30.ImportAcsResponse, @@ -823,8 +905,9 @@ object ParticipantAdminCommands { v30.ImportAcsRequest( acsChunk, workflowIdPrefix, - contractIdImportMode.toProtoV30, + contractImportMode.toProtoV30, excludedStakeholders.map(_.toProtoPrimitive).toSeq, + Some(representativePackageIdOverride.toProtoV30), ) ) @@ -838,8 +921,9 @@ object ParticipantAdminCommands { v30.ImportAcsRequest( ByteString.copyFrom(bytes), workflowIdPrefix, - contractIdImportMode.toProtoV30, + contractImportMode.toProtoV30, excludedStakeholders.map(_.toProtoPrimitive).toSeq, + Some(representativePackageIdOverride.toProtoV30), ), request.acsSnapshot, ) diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala index f5a9917c9c..b7f85b854f 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala @@ -161,6 +161,47 @@ object SequencerAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + final case class InitializeFromOnboardingStateV2(onboardingState: ByteString) + extends GrpcAdminCommand[ + proto.InitializeSequencerFromOnboardingStateV2Request, + proto.InitializeSequencerFromOnboardingStateV2Response, + InitializeSequencerResponse, + ] { + override type Svc = + proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub + + override def createService( + channel: ManagedChannel + ): proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub = + proto.SequencerInitializationServiceGrpc.stub(channel) + + override protected def submitRequest( + service: proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, + request: proto.InitializeSequencerFromOnboardingStateV2Request, + ): Future[proto.InitializeSequencerFromOnboardingStateV2Response] = + GrpcStreamingUtils.streamToServer( + service.initializeSequencerFromOnboardingStateV2, + (onboardingState: Array[Byte]) => + proto.InitializeSequencerFromOnboardingStateV2Request( + ByteString.copyFrom(onboardingState) + ), + request.onboardingState, + ) + + override protected def createRequest() + : Either[String, proto.InitializeSequencerFromOnboardingStateV2Request] = + Right( + proto.InitializeSequencerFromOnboardingStateV2Request(onboardingState) + ) + + override protected def handleResponse( + response: proto.InitializeSequencerFromOnboardingStateV2Response + ): Either[String, InitializeSequencerResponse] = + Right(InitializeSequencerResponse(response.replicated)) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + final case class InitializeFromGenesisState( topologySnapshot: ByteString, synchronizerParameters: com.digitalasset.canton.protocol.StaticSynchronizerParameters, @@ -255,6 +296,53 @@ object SequencerAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + final case class InitializeFromGenesisStateV2( + topologySnapshot: ByteString, + synchronizerParameters: com.digitalasset.canton.protocol.StaticSynchronizerParameters, + ) extends GrpcAdminCommand[ + proto.InitializeSequencerFromGenesisStateV2Request, + proto.InitializeSequencerFromGenesisStateV2Response, + InitializeSequencerResponse, + ] { + override type Svc = + proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub + + override def createService( + channel: ManagedChannel + ): proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub = + proto.SequencerInitializationServiceGrpc.stub(channel) + + override protected def submitRequest( + service: proto.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, + request: proto.InitializeSequencerFromGenesisStateV2Request, + ): Future[proto.InitializeSequencerFromGenesisStateV2Response] = + GrpcStreamingUtils.streamToServer( + service.initializeSequencerFromGenesisStateV2, + (topologySnapshot: Array[Byte]) => + proto.InitializeSequencerFromGenesisStateV2Request( + topologySnapshot = ByteString.copyFrom(topologySnapshot), + Some(synchronizerParameters.toProtoV30), + ), + request.topologySnapshot, + ) + + override protected def createRequest() + : Either[String, proto.InitializeSequencerFromGenesisStateV2Request] = + Right( + proto.InitializeSequencerFromGenesisStateV2Request( + topologySnapshot = topologySnapshot, + Some(synchronizerParameters.toProtoV30), + ) + ) + + override protected def handleResponse( + response: proto.InitializeSequencerFromGenesisStateV2Response + ): Either[String, InitializeSequencerResponse] = + Right(InitializeSequencerResponse(response.replicated)) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + final case class Snapshot(timestamp: CantonTimestamp) extends BaseSequencerAdministrationCommand[ proto.SnapshotRequest, @@ -329,6 +417,45 @@ object SequencerAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + final case class OnboardingStateV2( + observer: StreamObserver[proto.OnboardingStateV2Response], + sequencerOrTimestamp: Either[SequencerId, CantonTimestamp], + ) extends BaseSequencerAdministrationCommand[ + proto.OnboardingStateV2Request, + CancellableContext, + CancellableContext, + ] { + override protected def createRequest(): Either[String, proto.OnboardingStateV2Request] = + Right( + proto.OnboardingStateV2Request(request = + sequencerOrTimestamp.fold[proto.OnboardingStateV2Request.Request]( + sequencer => + proto.OnboardingStateV2Request.Request + .SequencerUid(sequencer.uid.toProtoPrimitive), + timestamp => + proto.OnboardingStateV2Request.Request.Timestamp(timestamp.toProtoTimestamp), + ) + ) + ) + + override protected def submitRequest( + service: proto.SequencerAdministrationServiceGrpc.SequencerAdministrationServiceStub, + request: proto.OnboardingStateV2Request, + ): Future[CancellableContext] = { + val context = Context.current().withCancellation() + context.run(() => service.onboardingStateV2(request, observer)) + Future.successful(context) + } + + override protected def handleResponse( + response: CancellableContext + ): Either[String, CancellableContext] = + Right(response) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + final case class DisableMember(member: Member) extends BaseSequencerAdministrationCommand[ proto.DisableMemberRequest, diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala index 8b304fb866..357fd2658d 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala @@ -336,6 +336,7 @@ object TopologyAdminCommands { response.results .traverse(ListPartyToParticipantResult.fromProtoV30) .leftMap(_.toString) + } final case class ListSynchronizerParametersState( @@ -587,6 +588,42 @@ object TopologyAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + final case class ExportTopologySnapshotV2( + observer: StreamObserver[ExportTopologySnapshotV2Response], + query: BaseQuery, + excludeMappings: Seq[String], + filterNamespace: String, + ) extends BaseCommand[ + v30.ExportTopologySnapshotV2Request, + CancellableContext, + CancellableContext, + ] { + override protected def createRequest(): Either[String, v30.ExportTopologySnapshotV2Request] = + Right( + new v30.ExportTopologySnapshotV2Request( + baseQuery = Some(query.toProtoV1), + excludeMappings = excludeMappings, + filterNamespace = filterNamespace, + ) + ) + + override protected def submitRequest( + service: TopologyManagerReadServiceStub, + request: v30.ExportTopologySnapshotV2Request, + ): Future[CancellableContext] = { + val context = Context.current().withCancellation() + context.run(() => service.exportTopologySnapshotV2(request, observer)) + Future.successful(context) + } + + override protected def handleResponse( + response: CancellableContext + ): Either[String, CancellableContext] = + Right(response) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + final case class GenesisState( observer: StreamObserver[GenesisStateResponse], synchronizerStore: Option[TopologyStoreId.Synchronizer], @@ -649,6 +686,41 @@ object TopologyAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + + final case class GenesisStateV2( + observer: StreamObserver[GenesisStateV2Response], + synchronizerStore: Option[TopologyStoreId.Synchronizer], + timestamp: Option[CantonTimestamp], + ) extends BaseCommand[ + v30.GenesisStateV2Request, + CancellableContext, + CancellableContext, + ] { + override protected def createRequest(): Either[String, v30.GenesisStateV2Request] = + Right( + v30.GenesisStateV2Request( + synchronizerStore.map(_.toProtoV30), + timestamp.map(_.toProtoTimestamp), + ) + ) + + override protected def submitRequest( + service: TopologyManagerReadServiceStub, + request: v30.GenesisStateV2Request, + ): Future[CancellableContext] = { + val context = Context.current().withCancellation() + context.run(() => service.genesisStateV2(request, observer)) + Future.successful(context) + } + + override protected def handleResponse( + response: CancellableContext + ): Either[String, CancellableContext] = + Right(response) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + } object Aggregation { @@ -806,6 +878,42 @@ object TopologyAdminCommands { ): Either[String, Unit] = Either.unit } + final case class ImportTopologySnapshotV2( + topologySnapshot: ByteString, + store: TopologyStoreId, + waitToBecomeEffective: Option[NonNegativeDuration], + ) extends BaseWriteCommand[ + ImportTopologySnapshotV2Request, + ImportTopologySnapshotV2Response, + Unit, + ] { + override protected def createRequest(): Either[String, ImportTopologySnapshotV2Request] = + Right( + ImportTopologySnapshotV2Request( + topologySnapshot, + Some(store.toProtoV30), + waitToBecomeEffective.map(_.asNonNegativeFiniteApproximation.toProtoPrimitive), + ) + ) + override protected def submitRequest( + service: TopologyManagerWriteServiceStub, + request: ImportTopologySnapshotV2Request, + ): Future[ImportTopologySnapshotV2Response] = + GrpcStreamingUtils.streamToServer( + service.importTopologySnapshotV2, + bytes => + ImportTopologySnapshotV2Request( + ByteString.copyFrom(bytes), + Some(store.toProtoV30), + waitToBecomeEffective.map(_.toProtoPrimitive), + ), + topologySnapshot, + ) + override protected def handleResponse( + response: ImportTopologySnapshotV2Response + ): Either[String, Unit] = Either.unit + } + final case class SignTransactions( transactions: Seq[GenericSignedTopologyTransaction], store: TopologyStoreId, diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/SynchronizerParameters.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/SynchronizerParameters.scala index 9105fca71e..53bdc535bb 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/SynchronizerParameters.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/SynchronizerParameters.scala @@ -13,12 +13,8 @@ import com.digitalasset.canton.admin.api.client.data.crypto.{ RequiredSigningSpecs, SymmetricKeyScheme, } +import com.digitalasset.canton.config.CryptoConfig import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.config.{ - CryptoConfig, - NonNegativeFiniteDuration, - PositiveDurationSeconds, -} import com.digitalasset.canton.crypto.SignatureFormat import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.DynamicSynchronizerParameters.InvalidDynamicSynchronizerParameters @@ -37,11 +33,12 @@ import com.digitalasset.canton.time.{ Clock, NonNegativeFiniteDuration as InternalNonNegativeFiniteDuration, PositiveSeconds, + RemoteClock, + SimClock, } import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.canton.version.{ProtoVersion, ProtocolVersion} import com.digitalasset.canton.{ProtoDeserializationError, config, crypto as SynchronizerCrypto} -import com.google.common.annotations.VisibleForTesting import io.scalaland.chimney.dsl.* import scala.Ordering.Implicits.* @@ -54,6 +51,7 @@ final case class StaticSynchronizerParameters( requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], requiredSignatureFormats: NonEmpty[Set[SignatureFormat]], + topologyChangeDelay: config.NonNegativeFiniteDuration, enableTransparencyChecks: Boolean, protocolVersion: ProtocolVersion, serial: NonNegativeInt, @@ -72,6 +70,7 @@ final case class StaticSynchronizerParameters( param("required symmetric key schemes", _.requiredSymmetricKeySchemes), param("required hash algorithms", _.requiredHashAlgorithms), param("required crypto key formats", _.requiredCryptoKeyFormats), + param("topology change delay", _.topologyChangeDelay), param("protocol version", _.protocolVersion), param("serial", _.serial), ) @@ -100,26 +99,42 @@ object StaticSynchronizerParameters { def defaultsWithoutKMS( protocolVersion: ProtocolVersion, serial: NonNegativeInt = NonNegativeInt.zero, + topologyChangeDelay: config.NonNegativeFiniteDuration = + StaticSynchronizerParametersInternal.defaultTopologyChangeDelay.toConfig, ): StaticSynchronizerParameters = - defaults(CryptoConfig(), protocolVersion, serial) + defaults(CryptoConfig(), protocolVersion, serial, topologyChangeDelay) // This method is unsafe. Not prefixing by `try` to have nicer docs snippets. def defaults( cryptoConfig: CryptoConfig, protocolVersion: ProtocolVersion, serial: NonNegativeInt = NonNegativeInt.zero, + topologyChangeDelay: config.NonNegativeFiniteDuration = + StaticSynchronizerParametersInternal.defaultTopologyChangeDelay.toConfig, ): StaticSynchronizerParameters = { - val internal = SynchronizerParametersConfig() + val internal = SynchronizerParametersConfig(topologyChangeDelay = Some(topologyChangeDelay)) .toStaticSynchronizerParameters(cryptoConfig, protocolVersion, serial) .valueOr(err => throw new IllegalArgumentException( s"Cannot instantiate static synchronizer parameters: $err" ) ) - StaticSynchronizerParameters(internal) } + private[canton] def initialValues( + clock: Clock, + protocolVersion: ProtocolVersion, + serial: NonNegativeInt = NonNegativeInt.zero, + ): StaticSynchronizerParameters = { + val topologyChangeDelay = clock match { + case _: SimClock | _: RemoteClock => + StaticSynchronizerParametersInternal.defaultTopologyChangeDelayNonStandardClock + case _ => StaticSynchronizerParametersInternal.defaultTopologyChangeDelay + } + defaultsWithoutKMS(protocolVersion, serial, topologyChangeDelay.toConfig) + } + def apply( synchronizer: StaticSynchronizerParametersInternal ): StaticSynchronizerParameters = @@ -159,6 +174,7 @@ object StaticSynchronizerParameters { protocolVersionP, serialP, enableTransparencyChecks, + topologyChangeDelayP, ) = synchronizerParametersP for { @@ -212,6 +228,11 @@ object StaticSynchronizerParameters { requiredSignatureFormatsP, SynchronizerCrypto.SignatureFormat.fromProtoEnum, ) + topologyChangeDelay <- ProtoConverter.parseRequired( + config.NonNegativeFiniteDuration.fromProtoPrimitive("topology_change_delay")(_), + "topology_change_delay", + topologyChangeDelayP, + ) // Data in the console is not really validated, so we allow for deleted protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP, allowDeleted = true) serial <- ProtoConverter.parseNonNegativeInt("serial", serialP) @@ -225,6 +246,7 @@ object StaticSynchronizerParameters { requiredHashAlgorithms, requiredCryptoKeyFormats, requiredSignatureFormats, + topologyChangeDelay.toInternal, enableTransparencyChecks, protocolVersion, serial, @@ -235,20 +257,19 @@ object StaticSynchronizerParameters { // TODO(#15650) Properly expose new BFT parameters and synchronizer limits final case class DynamicSynchronizerParameters( - confirmationResponseTimeout: NonNegativeFiniteDuration, - mediatorReactionTimeout: NonNegativeFiniteDuration, - assignmentExclusivityTimeout: NonNegativeFiniteDuration, - topologyChangeDelay: NonNegativeFiniteDuration, - ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, - mediatorDeduplicationTimeout: NonNegativeFiniteDuration, - reconciliationInterval: PositiveDurationSeconds, + confirmationResponseTimeout: config.NonNegativeFiniteDuration, + mediatorReactionTimeout: config.NonNegativeFiniteDuration, + assignmentExclusivityTimeout: config.NonNegativeFiniteDuration, + ledgerTimeRecordTimeTolerance: config.NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: config.NonNegativeFiniteDuration, + reconciliationInterval: config.PositiveDurationSeconds, maxRequestSize: NonNegativeInt, - sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, + sequencerAggregateSubmissionTimeout: config.NonNegativeFiniteDuration, trafficControl: Option[TrafficControlParameters], onboardingRestriction: OnboardingRestriction, acsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpParameters], participantSynchronizerLimits: ParticipantSynchronizerLimits, - preparationTimeRecordTimeTolerance: NonNegativeFiniteDuration, + preparationTimeRecordTimeTolerance: config.NonNegativeFiniteDuration, ) extends PrettyPrinting { def decisionTimeout: config.NonNegativeFiniteDuration = @@ -267,7 +288,7 @@ final case class DynamicSynchronizerParameters( // Originally the validation was done on ledgerTimeRecordTimeTolerance, but was moved to preparationTimeRecordTimeTolerance // instead when the parameter was introduced def compatibleWithNewPreparationTimeRecordTimeTolerance( - newPreparationTimeRecordTimeTolerance: NonNegativeFiniteDuration + newPreparationTimeRecordTimeTolerance: config.NonNegativeFiniteDuration ): Boolean = // If false, a new request may receive the same submission time as a previous request and the previous // request may be evicted too early from the mediator's deduplication store. @@ -280,7 +301,6 @@ final case class DynamicSynchronizerParameters( param("confirmation response timeout", _.confirmationResponseTimeout), param("mediator reaction timeout", _.mediatorReactionTimeout), param("assignment exclusivity timeout", _.assignmentExclusivityTimeout), - param("topology change delay", _.topologyChangeDelay), param("ledger time record time tolerance", _.ledgerTimeRecordTimeTolerance), param("mediator deduplication timeout", _.mediatorDeduplicationTimeout), param("reconciliation interval", _.reconciliationInterval), @@ -295,28 +315,27 @@ final case class DynamicSynchronizerParameters( ) def update( - confirmationResponseTimeout: NonNegativeFiniteDuration = confirmationResponseTimeout, - mediatorReactionTimeout: NonNegativeFiniteDuration = mediatorReactionTimeout, - assignmentExclusivityTimeout: NonNegativeFiniteDuration = assignmentExclusivityTimeout, - topologyChangeDelay: NonNegativeFiniteDuration = topologyChangeDelay, - ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = ledgerTimeRecordTimeTolerance, - mediatorDeduplicationTimeout: NonNegativeFiniteDuration = mediatorDeduplicationTimeout, - reconciliationInterval: PositiveDurationSeconds = reconciliationInterval, + confirmationResponseTimeout: config.NonNegativeFiniteDuration = confirmationResponseTimeout, + mediatorReactionTimeout: config.NonNegativeFiniteDuration = mediatorReactionTimeout, + assignmentExclusivityTimeout: config.NonNegativeFiniteDuration = assignmentExclusivityTimeout, + ledgerTimeRecordTimeTolerance: config.NonNegativeFiniteDuration = + ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout: config.NonNegativeFiniteDuration = mediatorDeduplicationTimeout, + reconciliationInterval: config.PositiveDurationSeconds = reconciliationInterval, confirmationRequestsMaxRate: NonNegativeInt = confirmationRequestsMaxRate, maxRequestSize: NonNegativeInt = maxRequestSize, - sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration = + sequencerAggregateSubmissionTimeout: config.NonNegativeFiniteDuration = sequencerAggregateSubmissionTimeout, trafficControl: Option[TrafficControlParameters] = trafficControl, onboardingRestriction: OnboardingRestriction = onboardingRestriction, acsCommitmentsCatchUpParameters: Option[AcsCommitmentsCatchUpParameters] = acsCommitmentsCatchUp, - preparationTimeRecordTimeTolerance: NonNegativeFiniteDuration = + preparationTimeRecordTimeTolerance: config.NonNegativeFiniteDuration = preparationTimeRecordTimeTolerance, ): DynamicSynchronizerParameters = this.copy( confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, assignmentExclusivityTimeout = assignmentExclusivityTimeout, - topologyChangeDelay = topologyChangeDelay, ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout = mediatorDeduplicationTimeout, reconciliationInterval = reconciliationInterval, @@ -341,7 +360,6 @@ final case class DynamicSynchronizerParameters( InternalNonNegativeFiniteDuration.fromConfig(mediatorReactionTimeout), assignmentExclusivityTimeout = InternalNonNegativeFiniteDuration.fromConfig(assignmentExclusivityTimeout), - topologyChangeDelay = InternalNonNegativeFiniteDuration.fromConfig(topologyChangeDelay), ledgerTimeRecordTimeTolerance = InternalNonNegativeFiniteDuration.fromConfig(ledgerTimeRecordTimeTolerance), mediatorDeduplicationTimeout = @@ -364,16 +382,9 @@ final case class DynamicSynchronizerParameters( object DynamicSynchronizerParameters { - /** Default dynamic synchronizer parameters for non-static clocks */ - @VisibleForTesting - def defaultValues(protocolVersion: ProtocolVersion): DynamicSynchronizerParameters = - DynamicSynchronizerParameters( - DynamicSynchronizerParametersInternal.defaultValues(protocolVersion) - ) - - private[canton] def initialValues(clock: Clock, protocolVersion: ProtocolVersion) = + private[canton] def initialValues(protocolVersion: ProtocolVersion) = DynamicSynchronizerParameters( - DynamicSynchronizerParametersInternal.initialValues(clock, protocolVersion) + DynamicSynchronizerParametersInternal.initialValues(protocolVersion) ) def apply( diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/GenerateExternalPartyTopology.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/GenerateExternalPartyTopology.scala new file mode 100644 index 0000000000..41867857f7 --- /dev/null +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/GenerateExternalPartyTopology.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.parties + +import cats.syntax.functorFilter.* +import cats.syntax.traverse.* +import com.daml.ledger.api.v2.admin.party_management_service +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.{Fingerprint, Hash} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.TopologyTransaction.PositiveTopologyTransaction +import com.digitalasset.canton.topology.transaction.{TopologyChangeOp, TopologyTransaction} + +final case class GenerateExternalPartyTopology( + partyId: PartyId, + publicKeyFingerprint: Fingerprint, + topologyTransactions: Seq[PositiveTopologyTransaction], + multiHash: Hash, +) + +object GenerateExternalPartyTopology { + def fromProto( + value: party_management_service.GenerateExternalPartyTopologyResponse + ): ParsingResult[GenerateExternalPartyTopology] = { + val party_management_service.GenerateExternalPartyTopologyResponse( + partyIdP, + publicKeyFingerprintP, + topologyTransactionsP, + multiHashP, + ) = value + for { + partyId <- PartyId.fromProtoPrimitive(partyIdP, "party_id") + fingerprint <- Fingerprint.fromProtoPrimitive(publicKeyFingerprintP) + topologyTransactions <- topologyTransactionsP.traverse( + TopologyTransaction.fromTrustedByteString + ) + positive = topologyTransactions.mapFilter(_.selectOp[TopologyChangeOp.Replace]) + _ <- Either.cond( + topologyTransactions.sizeIs == positive.length, + (), + ProtoDeserializationError.ValueDeserializationError( + "topology_transactions", + "Unexpected Remove topology transaction", + ), + ) + multiHash <- Hash.fromProtoPrimitive(multiHashP) + } yield GenerateExternalPartyTopology(partyId, fingerprint, positive, multiHash) + } +} diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/PartyDetails.scala similarity index 84% rename from canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala rename to canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/PartyDetails.scala index 3ad6310827..372a0eee4a 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/parties/PartyDetails.scala @@ -1,14 +1,12 @@ // Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -package com.digitalasset.canton.admin.api.client.data +package com.digitalasset.canton.admin.api.client.data.parties import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta as ProtoObjectMeta import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails import com.digitalasset.canton.topology.PartyId -import scala.util.control.NoStackTrace - /** Represents a party details value exposed in the Canton console * * @param isLocal @@ -44,7 +42,3 @@ object PartyDetails { identityProviderId = details.identityProviderId, ) } - -final case class ModifyingNonModifiablePartyDetailsPropertiesError() - extends RuntimeException("MODIFYING_AN_UNMODIFIABLE_PARTY_DETAILS_PROPERTY_ERROR") - with NoStackTrace diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topology/Topology.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topology/Topology.scala index 368ed06a7b..d8353f374e 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topology/Topology.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topology/Topology.scala @@ -6,17 +6,20 @@ package com.digitalasset.canton.admin.api.client.data.topology import cats.syntax.either.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.ProtoDeserializationError.RefinedDurationConversionError import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.crypto.{Fingerprint, Hash} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.admin.v30 import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash +import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.version.ProtocolVersion -import com.google.protobuf.ByteString import java.time.Instant @@ -42,49 +45,62 @@ final case class BaseResult( validUntil: Option[Instant], sequenced: Instant, operation: TopologyChangeOp, - transactionHash: ByteString, + transactionHash: TxHash, serial: PositiveInt, signedBy: NonEmpty[Seq[Fingerprint]], ) object BaseResult { - def fromProtoV30(value: v30.BaseResult): ParsingResult[BaseResult] = + def fromProtoV30(value: v30.BaseResult): ParsingResult[BaseResult] = { + val v30.BaseResult( + storeId, + sequenced, + validFrom, + validUntil, + operation, + transactionHash, + serial, + signedByFingerprints, + ) = value for { - protoValidFrom <- ProtoConverter.required("valid_from", value.validFrom) + protoValidFrom <- ProtoConverter.required("valid_from", validFrom) validFrom <- ProtoConverter.InstantConverter.fromProtoPrimitive(protoValidFrom) - validUntil <- value.validUntil.traverse(ProtoConverter.InstantConverter.fromProtoPrimitive) - protoSequenced <- ProtoConverter.required("sequencer", value.sequenced) + validUntil <- validUntil.traverse(ProtoConverter.InstantConverter.fromProtoPrimitive) + protoSequenced <- ProtoConverter.required("sequenced", sequenced) sequenced <- ProtoConverter.InstantConverter.fromProtoPrimitive(protoSequenced) operation <- ProtoConverter.parseEnum( TopologyChangeOp.fromProtoV30, "operation", - value.operation, + operation, ) serial <- PositiveInt - .create(value.serial) + .create(serial) .leftMap(e => RefinedDurationConversionError("serial", e.message)) signedBy <- ProtoConverter.parseRequiredNonEmpty( Fingerprint.fromProtoPrimitive, "signed_by_fingerprints", - value.signedByFingerprints, + signedByFingerprints, ) - store <- ProtoConverter.parseRequired( TopologyStoreId.fromProtoV30(_, "store"), "store", - value.store, + storeId, ) + txHash <- Hash + .fromByteString(transactionHash) + .leftMap(ProtoDeserializationError.CryptoDeserializationError(_)) } yield BaseResult( store, validFrom, validUntil, sequenced, operation, - value.transactionHash, + TxHash(txHash), serial, signedBy, ) + } } final case class ListNamespaceDelegationResult( @@ -240,6 +256,52 @@ object ListPartyToParticipantResult { } yield ListPartyToParticipantResult(context, item) } +/** Console API class to conveniently represent a party to participant proposals + * + * @param txHash + * the hash of the proposal, required to approve it + * @param party + * the party to be hosted + * @param permission + * the permission for the currently selected participant + * @param otherParticipants + * the other participants involved in the hosting relationship + * @param threshold + * the signing threshold in case this is a multi-sig party + */ +final case class ListMultiHostingProposal( + txHash: TxHash, + party: PartyId, + permission: ParticipantPermission, + otherParticipants: Seq[HostingParticipant], + threshold: PositiveInt, +) extends PrettyPrinting { + override def pretty: Pretty[ListMultiHostingProposal] = + prettyOfClass( + param("txHash", _.txHash.hash), + param("party", _.party), + param("permission", _.permission.showType), + param("others", _.otherParticipants.map(x => (x.participantId, x.permission.showType)).toMap), + param("threshold", _.threshold), + ) +} + +object ListMultiHostingProposal { + def mapFilter(participantId: ParticipantId)( + result: ListPartyToParticipantResult + ): Option[ListMultiHostingProposal] = + result.item.participants.find(_.participantId == participantId).map { res => + ListMultiHostingProposal( + txHash = result.context.transactionHash, + party = result.item.partyId, + permission = res.permission, + otherParticipants = result.item.participants.filter(_.participantId != participantId), + threshold = result.item.threshold, + ) + } + +} + final case class ListSynchronizerParametersStateResult( context: BaseResult, item: DynamicSynchronizerParameters, diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index 51857b7023..db0608c19c 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -73,6 +73,7 @@ import com.digitalasset.canton.sequencing.client.SequencerClientConfig import com.digitalasset.canton.synchronizer.block.{SequencerDriver, SequencerDriverFactory} import com.digitalasset.canton.synchronizer.config.PublicServerConfig import com.digitalasset.canton.synchronizer.mediator.{ + DeduplicationStoreConfig, MediatorConfig, MediatorNodeConfig, MediatorNodeParameterConfig, @@ -89,6 +90,7 @@ import com.digitalasset.canton.synchronizer.sequencer.block.DriverBlockSequencer import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.canton.sequencing.BftSequencerFactory import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig import com.digitalasset.canton.synchronizer.sequencer.config.{ + AsyncWriterConfig, RemoteSequencerConfig, SequencerNodeConfig, SequencerNodeParameterConfig, @@ -453,8 +455,6 @@ trait SharedCantonConfig[Self] extends ConfigDefaults[DefaultPorts, Self] { self adminWorkflow = participantParameters.adminWorkflow, maxUnzippedDarSize = participantParameters.maxUnzippedDarSize, stores = participantParameters.stores, - reassignmentTimeProofFreshnessProportion = - participantParameters.reassignmentTimeProofFreshnessProportion, protocolConfig = ParticipantProtocolConfig( minimumProtocolVersion = participantParameters.minimumProtocolVersion.map(_.unwrap), alphaVersionSupport = participantParameters.alphaVersionSupport, @@ -471,6 +471,10 @@ trait SharedCantonConfig[Self] extends ConfigDefaults[DefaultPorts, Self] { self unsafeOnlinePartyReplication = participantParameters.unsafeOnlinePartyReplication, automaticallyPerformLogicalSynchronizerUpgrade = participantParameters.automaticallyPerformLogicalSynchronizerUpgrade, + reassignmentsConfig = participantParameters.reassignmentsConfig, + doNotAwaitOnCheckingIncomingCommitments = + participantParameters.doNotAwaitOnCheckingIncomingCommitments, + disableOptionalTopologyChecks = participantConfig.topology.disableOptionalTopologyChecks, ) } @@ -492,10 +496,10 @@ trait SharedCantonConfig[Self] extends ConfigDefaults[DefaultPorts, Self] { self protocol = CantonNodeParameterConverter.protocol(this, sequencerNodeConfig.parameters), maxConfirmationRequestsBurstFactor = sequencerNodeConfig.parameters.maxConfirmationRequestsBurstFactor, + asyncWriter = sequencerNodeConfig.parameters.asyncWriter.toParameters, unsafeEnableOnlinePartyReplication = sequencerNodeConfig.parameters.unsafeEnableOnlinePartyReplication, - sequencerApiLimits = sequencerNodeConfig.parameters.sequencerApiLimits, - warnOnUndefinedLimits = sequencerNodeConfig.parameters.warnOnUndefinedLimits, + streamLimits = sequencerNodeConfig.publicApi.stream, ) } @@ -720,6 +724,14 @@ object CantonConfig { allowUnknownKeys = false, ) + // TODO(#27556): Align TlsServerConfig and HttpServerConfig + implicit def httpServerConfigProductHint: ProductHint[HttpServerConfig] = + ProductHint[HttpServerConfig]( + fieldMapping = + ConfigFieldMapping(CamelCase, KebabCase).withOverrides("internalPort" -> "port"), + allowUnknownKeys = false, + ) + object ConfigReaders { import CantonConfigUtil.* import BaseCantonConfig.Readers.* @@ -1053,6 +1065,12 @@ object CantonConfig { lazy implicit val bftBlockOrdererP2PNetworkConfigReader : ConfigReader[BftBlockOrdererConfig.P2PNetworkConfig] = deriveReader[BftBlockOrdererConfig.P2PNetworkConfig] + lazy implicit val bftBlockOrdererBftBlockOrderingStandalonePeerConfigReader + : ConfigReader[BftBlockOrdererConfig.BftBlockOrderingStandalonePeerConfig] = + deriveReader[BftBlockOrdererConfig.BftBlockOrderingStandalonePeerConfig] + lazy implicit val bftBlockOrdererBftBlockOrderingStandaloneNetworkConfigReader + : ConfigReader[BftBlockOrdererConfig.BftBlockOrderingStandaloneNetworkConfig] = + deriveReader[BftBlockOrdererConfig.BftBlockOrderingStandaloneNetworkConfig] lazy implicit val bftBlockOrdererLeaderSelectionPolicyHowLongToBlacklistConfigReader : ConfigReader[BftBlockOrdererConfig.LeaderSelectionPolicyConfig.HowLongToBlacklist] = deriveEnumerationReader[BftBlockOrdererConfig.LeaderSelectionPolicyConfig.HowLongToBlacklist] @@ -1113,8 +1131,10 @@ object CantonConfig { } lazy implicit final val sequencerNodeParametersConfigReader - : ConfigReader[SequencerNodeParameterConfig] = + : ConfigReader[SequencerNodeParameterConfig] = { + implicit val asyncWriterConfigReader = deriveReader[AsyncWriterConfig] deriveReader[SequencerNodeParameterConfig] + } lazy implicit final val SequencerHealthConfigReader: ConfigReader[SequencerHealthConfig] = deriveReader[SequencerHealthConfig] @@ -1127,6 +1147,8 @@ object CantonConfig { lazy implicit final val mediatorConfigReader: ConfigReader[MediatorConfig] = { implicit val mediatorPruningConfigReader: ConfigReader[MediatorPruningConfig] = deriveReader[MediatorPruningConfig] + implicit val deduplicationStoreConfigReader: ConfigReader[DeduplicationStoreConfig] = + deriveReader[DeduplicationStoreConfig] deriveReader[MediatorConfig] } lazy implicit final val remoteMediatorConfigReader: ConfigReader[RemoteMediatorConfig] = @@ -1287,6 +1309,8 @@ object CantonConfig { implicit val unsafeOnlinePartyReplicationConfig : ConfigReader[UnsafeOnlinePartyReplicationConfig] = deriveReader[UnsafeOnlinePartyReplicationConfig] + implicit val reassignmentsReader: ConfigReader[ReassignmentsConfig] = + deriveReader[ReassignmentsConfig] deriveReader[ParticipantNodeParameterConfig] } lazy implicit final val timeTrackerConfigReader: ConfigReader[SynchronizerTimeTrackerConfig] = { @@ -1338,6 +1362,9 @@ object CantonConfig { lazy implicit final val startupMemoryCheckConfigReader: ConfigReader[StartupMemoryCheckConfig] = deriveReader[StartupMemoryCheckConfig] + lazy implicit final val streamLimitConfigReader: ConfigReader[StreamLimitConfig] = + deriveReader[StreamLimitConfig] + implicit val participantReplicationConfigReader: ConfigReader[ReplicationConfig] = deriveReader[ReplicationConfig] implicit val participantFeaturesConfigReader: ConfigReader[ParticipantFeaturesConfig] = @@ -1714,6 +1741,12 @@ object CantonConfig { lazy implicit val bftBlockOrdererBftP2PNetworkConfigWriter : ConfigWriter[BftBlockOrdererConfig.P2PNetworkConfig] = deriveWriter[BftBlockOrdererConfig.P2PNetworkConfig] + lazy implicit val bftBlockOrdererBftBlockOrderingStandalonePeerConfigWriter + : ConfigWriter[BftBlockOrdererConfig.BftBlockOrderingStandalonePeerConfig] = + deriveWriter[BftBlockOrdererConfig.BftBlockOrderingStandalonePeerConfig] + lazy implicit val bftBlockOrdererBftBlockOrderingStandaloneNetworkConfigWriter + : ConfigWriter[BftBlockOrdererConfig.BftBlockOrderingStandaloneNetworkConfig] = + deriveWriter[BftBlockOrdererConfig.BftBlockOrderingStandaloneNetworkConfig] lazy implicit val bftBlockOrdererBftP2PConnectionManagementConfigWriter : ConfigWriter[BftBlockOrdererConfig.P2PConnectionManagementConfig] = deriveWriter[BftBlockOrdererConfig.P2PConnectionManagementConfig] @@ -1784,8 +1817,11 @@ object CantonConfig { } lazy implicit final val sequencerNodeParameterConfigWriter - : ConfigWriter[SequencerNodeParameterConfig] = + : ConfigWriter[SequencerNodeParameterConfig] = { + implicit val asyncWriterConfigWriter: ConfigWriter[AsyncWriterConfig] = + deriveWriter[AsyncWriterConfig] deriveWriter[SequencerNodeParameterConfig] + } lazy implicit final val SequencerHealthConfigWriter: ConfigWriter[SequencerHealthConfig] = deriveWriter[SequencerHealthConfig] lazy implicit final val remoteSequencerConfigWriter: ConfigWriter[RemoteSequencerConfig] = @@ -1794,6 +1830,8 @@ object CantonConfig { lazy implicit final val mediatorConfigWriter: ConfigWriter[MediatorConfig] = { implicit val mediatorPruningConfigWriter: ConfigWriter[MediatorPruningConfig] = deriveWriter[MediatorPruningConfig] + implicit val deduplicationStoreConfigWriter: ConfigWriter[DeduplicationStoreConfig] = + deriveWriter[DeduplicationStoreConfig] deriveWriter[MediatorConfig] } lazy implicit final val mediatorNodeParameterConfigWriter @@ -1930,6 +1968,8 @@ object CantonConfig { implicit val unsafeOnlinePartyReplicationConfigWriter : ConfigWriter[UnsafeOnlinePartyReplicationConfig] = deriveWriter[UnsafeOnlinePartyReplicationConfig] + implicit val reassignmentsConfigWriter: ConfigWriter[ReassignmentsConfig] = + deriveWriter[ReassignmentsConfig] deriveWriter[ParticipantNodeParameterConfig] } lazy implicit final val timeTrackerConfigWriter: ConfigWriter[SynchronizerTimeTrackerConfig] = { @@ -1981,6 +2021,9 @@ object CantonConfig { lazy implicit final val startupMemoryCheckConfigWriter: ConfigWriter[StartupMemoryCheckConfig] = deriveWriter[StartupMemoryCheckConfig] + lazy implicit final val streamLimitConfigWriter: ConfigWriter[StreamLimitConfig] = + deriveWriter[StreamLimitConfig] + implicit val participantReplicationConfigWriter: ConfigWriter[ReplicationConfig] = deriveWriter[ReplicationConfig] implicit val participantFeaturesConfigWriter: ConfigWriter[ParticipantFeaturesConfig] = diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala index e3ca3c8c78..05b9e800ca 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala @@ -46,6 +46,7 @@ import com.digitalasset.canton.participant.config.BaseParticipantConfig import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.{ + SequencerConnectionPoolDelays, SequencerConnectionValidation, SequencerConnections, SubmissionRequestAmplification, @@ -53,10 +54,7 @@ import com.digitalasset.canton.sequencing.{ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransaction, - StoredTopologyTransactions, -} +import com.digitalasset.canton.topology.store.StoredTopologyTransaction import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.tracing.{NoTracing, TraceContext} @@ -427,7 +425,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing { ) } - def fetchContracsAsDisclosed( + def fetchContractsAsDisclosed( participant: LocalParticipantReference, readerParty: PartyId, templateId: Identifier, @@ -736,23 +734,29 @@ trait ConsoleMacros extends NamedLogging with NoTracing { val merged = SignedTopologyTransactions.compact(initialTopologyState).map(_.updateIsProposal(false)) - val storedTopologySnapshot = StoredTopologyTransactions[TopologyChangeOp, TopologyMapping]( - merged.map(stored => - StoredTopologyTransaction( - sequenced = SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), - validFrom = EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime), - validUntil = None, - transaction = stored, - rejectionReason = None, - ) + val out = ByteString.newOutput() + merged.foreach { stored => + val tx = StoredTopologyTransaction( + sequenced = SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), + validFrom = EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime), + validUntil = None, + transaction = stored, + rejectionReason = None, ) - ).toByteString(staticSynchronizerParameters.protocolVersion) + tx.writeDelimitedTo(staticSynchronizerParameters.protocolVersion, out) + .left + .foreach(error => + consoleEnvironment.raiseError(s"The synchronizer cannot be bootstrapped: $error") + ) + } + + val storedTopologySnapshot = out.toByteString sequencers .filterNot(_.health.initialized()) .foreach(x => x.setup - .assign_from_genesis_state(storedTopologySnapshot, staticSynchronizerParameters) + .assign_from_genesis_stateV2(storedTopologySnapshot, staticSynchronizerParameters) .discard ) @@ -768,6 +772,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing { sequencerTrustThreshold, sequencerLivenessMargin, mediatorRequestAmplification, + SequencerConnectionPoolDelays.default, ), // if we run bootstrap ourselves, we should have been able to reach the nodes // so we don't want the bootstrapping to fail spuriously here in the middle of @@ -884,15 +889,18 @@ trait ConsoleMacros extends NamedLogging with NoTracing { newSequencer: SequencerReference, existingSequencer: SequencerReference, synchronizerOwners: Set[InstanceReference], + customCommandTimeout: Option[config.NonNegativeDuration] = None, isBftSequencer: Boolean = false, )(implicit consoleEnvironment: ConsoleEnvironment): Unit = { import consoleEnvironment.* + val commandTimeout = customCommandTimeout.getOrElse(commandTimeouts.bounded) + def synchronizeTopologyAfterAddingSequencer( newSequencerId: SequencerId, existingSequencer: SequencerReference, ): Unit = - ConsoleMacros.utils.retry_until_true(commandTimeouts.bounded) { + ConsoleMacros.utils.retry_until_true(commandTimeout) { existingSequencer.topology.sequencers .list(existingSequencer.synchronizer_id) .headOption @@ -905,7 +913,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing { newSequencerId: SequencerId, existingSequencer: SequencerReference, ): Unit = - ConsoleMacros.utils.retry_until_true(commandTimeouts.bounded) { + ConsoleMacros.utils.retry_until_true(commandTimeout) { existingSequencer.bft.get_ordering_topology().sequencerIds.contains(newSequencerId) } @@ -945,7 +953,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing { logger.info("Proposed a sequencer synchronizer state with the new sequencer") // wait for SequencerSynchronizerState to be observed by the sequencer - ConsoleMacros.utils.retry_until_true(commandTimeouts.bounded) { + ConsoleMacros.utils.retry_until_true(commandTimeout) { val sequencerStates = existingSequencer.topology.sequencers.list(store = synchronizerId) @@ -966,10 +974,10 @@ trait ConsoleMacros extends NamedLogging with NoTracing { // now we can establish the sequencer snapshot val onboardingState = - existingSequencer.setup.onboarding_state_for_sequencer(newSequencer.id) + existingSequencer.setup.onboarding_state_for_sequencerV2(newSequencer.id) // finally, initialize "newSequencer" - newSequencer.setup.assign_from_onboarding_state(onboardingState).discard + newSequencer.setup.assign_from_onboarding_stateV2(onboardingState).discard } } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala index 211d38f197..0489e74aec 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -58,7 +58,6 @@ import com.digitalasset.canton.synchronizer.sequencer.{ } import com.digitalasset.canton.time.{DelegatingSimClock, SimClock} import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.store.TimeQuery import com.digitalasset.canton.tracing.NoTracing import com.digitalasset.canton.util.ErrorUtil @@ -583,8 +582,8 @@ abstract class ParticipantReference( case item if connected.contains(item.physicalSynchronizerId) => ConsoleMacros.utils.retry_until_true(timeout)( { - // ensure that vetted packages on the synchronizer match the ones in the authorized store - val onSynchronizer = participant.topology.vetted_packages + // ensure that vetted packages in this participant's synchronizer store match the vetted packages in other participants' synchronizers' stores + val onSynchronizerOfOtherParticipant = participant.topology.vetted_packages .list( store = item.synchronizerId, filterParticipant = id.filterString, @@ -593,19 +592,21 @@ abstract class ParticipantReference( .flatMap(_.item.packages) .toSet - // Vetted packages from the participant's authorized store - val onParticipantAuthorizedStore = topology.vetted_packages + // Vetted packages from the participant's synchronizer store + val onSynchronizerOfThisParticipant = topology.vetted_packages .list( - store = TopologyStoreId.Authorized, + store = item.synchronizerId, filterParticipant = id.filterString, ) .flatMap(_.item.packages) .toSet - val ret = onParticipantAuthorizedStore == onSynchronizer + val ret = onSynchronizerOfOtherParticipant == onSynchronizerOfThisParticipant if (!ret) { logger.debug( - show"Still waiting for package vetting updates to be observed by Participant ${participant.name} on ${item.physicalSynchronizerId}: vetted -- onSynchronizer is ${onParticipantAuthorizedStore -- onSynchronizer} while onSynchronizer -- vetted is ${onSynchronizer -- onParticipantAuthorizedStore}" + show"""Still waiting for package vetting updates to be observed by Participant ${participant.name} on ${item.physicalSynchronizerId}: + |thisParticipant -- otherParticipant is ${onSynchronizerOfThisParticipant -- onSynchronizerOfOtherParticipant} + |otherParticipant -- thisParticipant is ${onSynchronizerOfOtherParticipant -- onSynchronizerOfThisParticipant}""".stripMargin ) } ret diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala index 928cdb96d9..fd34e37f7e 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala @@ -10,7 +10,7 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnectionValidation -import com.digitalasset.canton.topology.PhysicalSynchronizerId +import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.google.protobuf.ByteString class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(implicit @@ -27,12 +27,13 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(i object dars extends Helpful { @Help.Summary("Upload DARs to participants") @Help.Description( - """If vetAllPackages is true, the participants will vet the package on all synchronizers they are registered. + """If synchronizerId is set, the participants will vet the packages on the specified synchronizer. If synchronizeVetting is true, the command will block until the package vetting transaction has been registered with all connected synchronizers.""" ) def upload( darPath: String, description: String = "", + synchronizerId: Option[SynchronizerId] = None, vetAllPackages: Boolean = true, synchronizeVetting: Boolean = true, expectedMainPackageId: String = "", @@ -45,6 +46,7 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(i _, path = darPath, description = description, + synchronizerId = synchronizerId, vetAllPackages = vetAllPackages, synchronizeVetting = synchronizeVetting, expectedMainPackageId = expectedMainPackageId, @@ -61,11 +63,12 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(i @Help.Summary("Upload DARs to participants") @Help.Description( - """If vetAllPackages is true, the participants will vet the packages on all synchronizers they are registered. + """If synchronizerId is set, the participants will vet the packages on the specified synchronizer. If synchronizeVetting is true, the command will block until the package vetting transaction has been registered with all connected synchronizers.""" ) def upload_many( paths: Seq[String], + synchronizerId: Option[SynchronizerId], vetAllPackages: Boolean, synchronizeVetting: Boolean, requestHeaders: Map[String, String], @@ -75,6 +78,7 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(i ParticipantCommands.dars.upload_many( _, paths = paths, + synchronizerId = synchronizerId, vetAllPackages = vetAllPackages, synchronizeVetting = synchronizeVetting, requestHeaders = requestHeaders, diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/RepairMacros.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/RepairMacros.scala index 88aac2c097..f61a563302 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/RepairMacros.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/RepairMacros.scala @@ -315,7 +315,16 @@ class RepairMacros(override val loggerFactory: NamedLoggerFactory) val files = darsDir.list files.filter(_.name.endsWith(".dar")).foreach { file => logger.info(s"Uploading DAR file $file") - node.dars.upload(file.pathAsString, synchronizeVetting = false).discard[String] + node.dars + .upload( + file.pathAsString, + // Do not vet, because the DAR may not have been vetted on the + // previous participant, and because vetting all recovered DARs + // may not be possible. + vetAllPackages = false, + synchronizeVetting = false, + ) + .discard[String] } } } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala index f5cf45cec8..42cf73ed49 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala @@ -124,7 +124,7 @@ abstract class HealthAdministration[S <: NodeStatus.Status]( case _: NodeStatus.Failure => false case _: NodeStatus.Success[?] => logger.warn( - "Since node is already initialized, it will never be ready to have its if set" + "Since node is already initialized, it will never be ready to have its id set" )(TraceContext.empty) false case NodeStatus.NotInitialized(_active, waitingFor) => diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala index 80e55f74e1..055b84bdea 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala @@ -9,7 +9,7 @@ import cats.syntax.traverse.* import com.daml.jwt.{AuthServiceJWTCodec, JwksUrl, Jwt, JwtDecoder, StandardJWTPayload} import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState import com.daml.ledger.api.v2.admin.package_management_service.PackageDetails -import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails +import com.daml.ledger.api.v2.admin.party_management_service.AllocateExternalPartyResponse import com.daml.ledger.api.v2.commands.{Command, DisclosedContract, PrefetchContractKey} import com.daml.ledger.api.v2.completion.Completion import com.daml.ledger.api.v2.event.CreatedEvent @@ -57,7 +57,6 @@ import com.daml.ledger.javaapi.data.{ Transaction, TransactionFormat, } -import com.daml.ledger.api.v2.value.Identifier import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.* @@ -67,10 +66,15 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{ WrappedIncompleteUnassigned, } import com.digitalasset.canton.admin.api.client.data.* +import com.digitalasset.canton.admin.api.client.data.parties.{ + GenerateExternalPartyTopology, + PartyDetails, +} import com.digitalasset.canton.config.ConsoleCommandTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.{ AdminCommandRunner, + ConsoleCommandResult, ConsoleEnvironment, ConsoleMacros, FeatureFlag, @@ -82,7 +86,7 @@ import com.digitalasset.canton.console.{ ParticipantReference, RemoteParticipantReference, } -import com.digitalasset.canton.crypto.Signature +import com.digitalasset.canton.crypto.{Signature, SigningPublicKey} import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod} import com.digitalasset.canton.ledger.api.{IdentityProviderConfig, IdentityProviderId} import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConfigClient @@ -91,6 +95,7 @@ import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserv import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.platform.apiserver.execution.CommandStatus import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction import com.digitalasset.canton.topology.{ ExternalParty, ParticipantId, @@ -665,6 +670,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference: Seq[LfPackageId] = Seq.empty, verboseHashing: Boolean = false, prefetchContractKeys: Seq[PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[CantonTimestamp] = None, ): PrepareResponseProto = consoleEnvironment.run { ledgerApiCommand( @@ -680,6 +686,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference, verboseHashing, prefetchContractKeys, + maxRecordTime, ) ) } @@ -1363,10 +1370,12 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper } @Help.Summary("Read the current connected synchronizers for a party", FeatureFlag.Testing) - def connected_synchronizers(partyId: Party): GetConnectedSynchronizersResponse = + def connected_synchronizers( + partyId: Option[PartyId] = None + ): GetConnectedSynchronizersResponse = check(FeatureFlag.Testing)(consoleEnvironment.run { ledgerApiCommand( - LedgerApiCommands.StateService.GetConnectedSynchronizers(partyId.toLf) + LedgerApiCommands.StateService.GetConnectedSynchronizers(partyId.map(_.toLf)) ) }) @@ -1426,7 +1435,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper limit: PositiveInt = defaultLimit, verbose: Boolean = true, filterTemplates: Seq[TemplateId] = Seq.empty, - filterInterfaces: Seq[Identifier] = Seq.empty, + filterInterfaces: Seq[TemplateId] = Seq.empty, activeAtOffsetO: Option[Long] = None, timeout: config.NonNegativeDuration = timeouts.unbounded, includeCreatedEventBlob: Boolean = false, @@ -1475,6 +1484,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper |- limit: limit (default set via canton.parameter.console) |- verbose: whether the resulting events should contain detailed type information |- filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + |- filterInterfaces: list of interface ids to filter for, empty sequence does not influence the resulting filter |- activeAtOffsetO: the offset at which the snapshot of the active contracts will be computed, it | must be no greater than the current ledger end offset and must be greater than or equal to the | last pruning offset. If no offset is specified then the current participant end will be used. @@ -1487,7 +1497,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper limit: PositiveInt = defaultLimit, verbose: Boolean = true, filterTemplates: Seq[TemplateId] = Seq.empty, - filterInterfaces: Seq[Identifier] = Seq.empty, + filterInterfaces: Seq[TemplateId] = Seq.empty, activeAtOffsetO: Option[Long] = None, timeout: config.NonNegativeDuration = timeouts.unbounded, includeCreatedEventBlob: Boolean = false, @@ -1514,6 +1524,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper |- limit: limit (default set via canton.parameter.console) |- verbose: whether the resulting events should contain detailed type information |- filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + |- filterInterfaces: list of interface ids to filter for, empty sequence does not influence the resulting filter |- activeAtOffsetO: the offset at which the snapshot of the events will be computed, it | must be no greater than the current ledger end offset and must be greater than or equal to the | last pruning offset. If no offset is specified then the current participant end will be used. @@ -1526,7 +1537,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper limit: PositiveInt = defaultLimit, verbose: Boolean = true, filterTemplates: Seq[TemplateId] = Seq.empty, - filterInterfaces: Seq[Identifier] = Seq.empty, + filterInterfaces: Seq[TemplateId] = Seq.empty, activeAtOffsetO: Option[Long] = None, timeout: config.NonNegativeDuration = timeouts.unbounded, includeCreatedEventBlob: Boolean = false, @@ -1554,6 +1565,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper |- limit: limit (default set via canton.parameter.console) |- verbose: whether the resulting events should contain detailed type information |- filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + |- filterInterfaces: list of interface ids to filter for, empty sequence does not influence the resulting filter |- activeAtOffsetO: the offset at which the snapshot of the events will be computed, it must be no | greater than the current ledger end offset and must be greater than or equal to the last | pruning offset. If no offset is specified then the current participant end will be used. @@ -1566,7 +1578,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper limit: PositiveInt = defaultLimit, verbose: Boolean = true, filterTemplates: Seq[TemplateId] = Seq.empty, - filterInterfaces: Seq[Identifier] = Seq.empty, + filterInterfaces: Seq[TemplateId] = Seq.empty, activeAtOffsetO: Option[Long] = None, timeout: config.NonNegativeDuration = timeouts.unbounded, includeCreatedEventBlob: Boolean = false, @@ -1595,6 +1607,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper - limit: limit (default set via canton.parameter.console) - verbose: whether the resulting events should contain detailed type information - filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + - filterInterfaces: list of interface ids to filter for, empty sequence does not influence the resulting filter - activeAtOffsetO: the offset at which the snapshot of the active contracts will be computed, it must be no greater than the current ledger end offset and must be greater than or equal to the last pruning offset. If no offset is specified then the current participant end will be used. @@ -1609,7 +1622,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper limit: PositiveInt = defaultLimit, verbose: Boolean = true, filterTemplates: Seq[TemplateId] = Seq.empty, - filterInterfaces: Seq[Identifier] = Seq.empty, + filterInterfaces: Seq[TemplateId] = Seq.empty, activeAtOffsetO: Option[Long] = None, timeout: config.NonNegativeDuration = timeouts.unbounded, identityProviderId: String = "", @@ -1731,6 +1744,64 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper PartyDetails.fromProtoPartyDetails(proto) } + @Help.Summary("Generate topology transactions for an external party", FeatureFlag.Preview) + @Help.Description( + """Convenience function to generate the necessary topology transactions. + For more complex setups, please generate your topology transactions manually. + synchronizerId: SynchronizerId for which the transactions should be generated. + partyHint: the prefix for the party + publicKey: the signing public key of the external party + localParticipantObservationOnly: if true, then the allocating participant will only be an observer + otherConfirmingParticipantUids: list of other participants that will be confirming daml transactions on behalf of the party + confirmationThreshold: number of confirming participants which need to approve a daml transaction + observingParticipantUids: list of other participants that should observe the transactions of the external party + """ + ) + def generate_topology( + synchronizerId: SynchronizerId, + partyHint: String, + publicKey: SigningPublicKey, + localParticipantObservationOnly: Boolean = false, + otherConfirmingParticipantIds: Seq[ParticipantId] = Seq.empty, + confirmationThreshold: NonNegativeInt = NonNegativeInt.zero, + observingParticipantIds: Seq[ParticipantId] = Seq.empty, + ): GenerateExternalPartyTopology = + check(FeatureFlag.Preview)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.GenerateExternalPartyTopology( + synchronizerId = synchronizerId, + partyHint = partyHint, + publicKey = publicKey, + localParticipantObservationOnly = localParticipantObservationOnly, + otherConfirmingParticipantIds = otherConfirmingParticipantIds, + confirmationThreshold = confirmationThreshold, + observingParticipantIds = observingParticipantIds, + ) + ) + }) + + @Help.Summary("Allocate a new external party", FeatureFlag.Preview) + @Help.Description( + """Allocates a new external party on the ledger. + synchronizerId: SynchronizerId on which to allocate the party + transactions: onboarding transactions and their individual signatures + multiSignatures: Signatures over the combined hash of all onboarding transactions""" + ) + def allocate_external( + synchronizerId: SynchronizerId, + transactions: Seq[(GenericTopologyTransaction, Seq[Signature])], + multiSignatures: Seq[Signature], + ): AllocateExternalPartyResponse = + check(FeatureFlag.Preview)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.AllocateExternalParty( + synchronizerId = synchronizerId, + transactions = transactions, + multiHashSignatures = multiSignatures, + ) + ) + }) + @Help.Summary("List parties known by the Ledger API server") @Help.Description( """Lists parties known by the Ledger API server. @@ -1748,6 +1819,26 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper proto.map(PartyDetails.fromProtoPartyDetails) } + @Help.Summary("Get party details for known parties") + @Help.Description( + """Get party details for parties known by the Ledger API server for the given identity provider. + identityProviderId: identity provider id""" + ) + def get( + parties: Seq[PartyId], + identityProviderId: String = "", + failOnNotFound: Boolean = true, + ): Map[PartyId, PartyDetails] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.GetParties( + parties = parties, + identityProviderId = identityProviderId, + failOnNotFound = failOnNotFound, + ) + ) + }).map { case (k, v) => (k, PartyDetails.fromProtoPartyDetails(v)) } + @Help.Summary("Update participant-local party details") @Help.Description( """Currently you can update only the annotations. @@ -1761,25 +1852,46 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper modifier: PartyDetails => PartyDetails, identityProviderId: String = "", ): PartyDetails = { - val rawDetails = get(party = party) - val srcDetails = PartyDetails.fromProtoPartyDetails(rawDetails) - val modifiedDetails = modifier(srcDetails) - verifyOnlyModifiableFieldsWhereModified(srcDetails, modifiedDetails) - val annotationsUpdate = makeAnnotationsUpdate( - original = srcDetails.annotations, - modified = modifiedDetails.annotations, - ) val rawUpdatedDetails = consoleEnvironment.run { - ledgerApiCommand( - LedgerApiCommands.PartyManagementService.Update( - party = party, - annotationsUpdate = Some(annotationsUpdate), - resourceVersionO = Some(rawDetails.localMetadata.fold("")(_.resourceVersion)), - identityProviderId = identityProviderId, + for { + rawDetailsMap <- ledgerApiCommand( + LedgerApiCommands.PartyManagementService.GetParties( + parties = Seq(party.partyId), + identityProviderId = identityProviderId, + failOnNotFound = true, + ) ) - ) + rawDetails <- ConsoleCommandResult.fromEither( + rawDetailsMap + .get(party.partyId) + .toRight(s"No such party $party") + ) + srcDetails = PartyDetails.fromProtoPartyDetails(rawDetails) + modifiedDetails = modifier(srcDetails) + // verify only modifiable fields where modified + _ <- { + ConsoleCommandResult.fromEither( + Either.cond( + modifiedDetails.copy(annotations = srcDetails.annotations) == srcDetails, + (), + s"Update to party details of ${party.partyId} attempted to modify unmodifiable fields.", + ) + ) + } + annotationsUpdate = makeAnnotationsUpdate( + original = srcDetails.annotations, + modified = modifiedDetails.annotations, + ) + result <- ledgerApiCommand( + LedgerApiCommands.PartyManagementService.Update( + party = party, + annotationsUpdate = Some(annotationsUpdate), + resourceVersionO = Some(rawDetails.localMetadata.fold("")(_.resourceVersion)), + identityProviderId = identityProviderId, + ) + ) + } yield result } - PartyDetails.fromProtoPartyDetails(rawUpdatedDetails) } @@ -1805,25 +1917,6 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper ) } - private def verifyOnlyModifiableFieldsWhereModified( - srcDetails: PartyDetails, - modifiedDetails: PartyDetails, - ): Unit = { - val withAllowedUpdatesReverted = modifiedDetails.copy(annotations = srcDetails.annotations) - if (withAllowedUpdatesReverted != srcDetails) { - throw ModifyingNonModifiablePartyDetailsPropertiesError() - } - } - - private def get(party: Party, identityProviderId: String = ""): ProtoPartyDetails = - consoleEnvironment.run { - ledgerApiCommand( - LedgerApiCommands.PartyManagementService.GetParty( - party = party, - identityProviderId = identityProviderId, - ) - ) - } } @Help.Summary("Manage packages") @@ -1838,9 +1931,11 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper |Additionally, Dars uploaded using the ledger Api will be vetted, but the system will not wait |for the Dars to be successfully registered with all connected synchronizers. As such, if a Dar is uploaded and then |used immediately thereafter, a command might bounce due to missing package vettings.""") - def upload_dar(darPath: String): Unit = + def upload_dar(darPath: String, synchronizerId: Option[SynchronizerId] = None): Unit = consoleEnvironment.run { - ledgerApiCommand(LedgerApiCommands.PackageManagementService.UploadDarFile(darPath)) + ledgerApiCommand( + LedgerApiCommands.PackageManagementService.UploadDarFile(darPath, synchronizerId) + ) } @Help.Summary("List Daml Packages") @@ -1856,7 +1951,9 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper ) def validate_dar(darPath: String): Unit = consoleEnvironment.run { - ledgerApiCommand(LedgerApiCommands.PackageManagementService.ValidateDarFile(darPath)) + ledgerApiCommand( + LedgerApiCommands.PackageManagementService.ValidateDarFile(darPath, None) + ) } } @@ -2390,6 +2487,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference: Seq[LfPackageId] = Seq.empty, verboseHashing: Boolean = false, prefetchContractKeys: Seq[javab.data.PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[CantonTimestamp] = None, ): PrepareResponseProto = consoleEnvironment.run { ledgerApiCommand( @@ -2405,6 +2503,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference, verboseHashing, prefetchContractKeys.map(k => PrefetchContractKey.fromJavaProto(k.toProto)), + maxRecordTime, ) ) } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala index 9c29864cb2..0750c40943 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -107,6 +107,7 @@ private[console] object ParticipantCommands { runner: AdminCommandRunner, path: String, description: String, + synchronizerId: Option[SynchronizerId], vetAllPackages: Boolean, synchronizeVetting: Boolean, expectedMainPackageId: String, @@ -119,8 +120,9 @@ private[console] object ParticipantCommands { ParticipantAdminCommands.Package .UploadDar( path, - vetAllPackages, - synchronizeVetting, + synchronizerId, + vetAllPackages = vetAllPackages, + synchronizeVetting = synchronizeVetting, description, expectedMainPackageId, requestHeaders, @@ -144,6 +146,7 @@ private[console] object ParticipantCommands { def upload_many( runner: AdminCommandRunner, paths: Seq[String], + synchronizerId: Option[SynchronizerId], vetAllPackages: Boolean, synchronizeVetting: Boolean, requestHeaders: Map[String, String], @@ -153,8 +156,9 @@ private[console] object ParticipantCommands { ParticipantAdminCommands.Package .UploadDar( paths.map(DarData(_, "", "")), - vetAllPackages, - synchronizeVetting, + synchronizerId, + vetAllPackages = vetAllPackages, + synchronizeVetting = synchronizeVetting, requestHeaders, logger, ) @@ -167,7 +171,7 @@ private[console] object ParticipantCommands { ): ConsoleCommandResult[String] = runner.adminCommand( ParticipantAdminCommands.Package - .ValidateDar(Some(path), logger) + .ValidateDar(Some(path), None, logger) ) } @@ -185,6 +189,8 @@ private[console] object ParticipantCommands { sequencerLivenessMargin: NonNegativeInt = NonNegativeInt.zero, submissionRequestAmplification: SubmissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays = + SequencerConnectionPoolDelays.default, ): SynchronizerConnectionConfig = SynchronizerConnectionConfig( synchronizerAlias, @@ -195,6 +201,7 @@ private[console] object ParticipantCommands { sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ), manualConnect = manualConnect, psid, @@ -1573,14 +1580,14 @@ trait ParticipantAdministration extends FeatureFlagFilter { |In order to use Daml templates on a participant, the Dar must first be uploaded and then |vetted by the participant. Vetting will ensure that other participants can check whether they |can actually send a transaction referring to a particular Daml package and participant. - |Vetting is done by registering a VettedPackages topology transaction with the topology manager. - |By default, vetting happens automatically and this command waits for - |the vetting transaction to be successfully registered on all connected synchronizers. - |This is the safe default setting minimizing race conditions. + Packages must be vetted on each synchronizer by registering a VettedPackages topology transaction. | - |If vetAllPackages is true (default), the packages will all be vetted on all synchronizers the participant is registered. + |if synchronizerId is not set (default), the packages will be vetted, if the participant is connected to only one synchronizer. |If synchronizeVetting is true (default), then the command will block until the participant has observed the vetting transactions to be registered with the synchronizer. | + |This command waits for the vetting transaction to be successfully registered on the synchronizer. + |This is the safe default setting minimizing race conditions. + | |Note that synchronize vetting might block on permissioned synchronizers that do not just allow participants to update the topology state. |In such cases, synchronizeVetting should be turned off. |Synchronize vetting can be invoked manually using $participant.package.synchronize_vettings() @@ -1588,6 +1595,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { def upload( path: String, description: String = "", + synchronizerId: Option[SynchronizerId] = None, vetAllPackages: Boolean = true, synchronizeVetting: Boolean = true, expectedMainPackageId: String = "", @@ -1601,6 +1609,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { runner, path = path, description = description, + synchronizerId = synchronizerId, vetAllPackages = vetAllPackages, synchronizeVetting = synchronizeVetting, expectedMainPackageId = expectedMainPackageId, @@ -1610,7 +1619,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { .toEither } yield mainPackageId } - if (synchronizeVetting && vetAllPackages) { + if (synchronizeVetting && synchronizerId.nonEmpty) { packages.synchronize_vetting() } res @@ -1626,20 +1635,21 @@ trait ParticipantAdministration extends FeatureFlagFilter { |In order to use Daml templates on a participant, the Dars must first be uploaded and then |vetted by the participant. Vetting will ensure that other participants can check whether they |can actually send a transaction referring to a particular Daml package and participant. - |Vetting is done by registering a VettedPackages topology transaction with the topology manager. - |By default, vetting happens automatically and this command waits for - |the vetting transaction to be successfully registered on all connected synchronizers. - |This is the safe default setting minimizing race conditions. + |Packages must be vetted on each synchronizer by registering a VettedPackages topology transaction. | - |If vetAllPackages is true (default), the packages will all be vetted on all synchronizers the participant is registered. + |if synchronizerId is not set (default), the packages will be vetted, if the participant is connected to only one synchronizer. |If synchronizeVetting is true (default), then the command will block until the participant has observed the vetting transactions to be registered with the synchronizer. | + |This command waits for the vetting transaction to be successfully registered on the synchronizer. + |This is the safe default setting minimizing race conditions. + | |Note that synchronize vetting might block on permissioned synchronizers that do not just allow participants to update the topology state. |In such cases, synchronizeVetting should be turned off. |Synchronize vetting can be invoked manually using $participant.package.synchronize_vettings() |""") def upload_many( paths: Seq[String], + synchronizerId: Option[SynchronizerId] = None, vetAllPackages: Boolean = true, synchronizeVetting: Boolean = true, requestHeaders: Map[String, String] = Map(), @@ -1650,6 +1660,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { .upload_many( runner, paths, + synchronizerId = synchronizerId, vetAllPackages = vetAllPackages, synchronizeVetting = synchronizeVetting, requestHeaders = requestHeaders, @@ -1658,7 +1669,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { .toEither } yield mainPackageId } - if (synchronizeVetting && vetAllPackages) { + if (synchronizeVetting) { packages.synchronize_vetting() } res @@ -1693,9 +1704,15 @@ trait ParticipantAdministration extends FeatureFlagFilter { @Help.Summary( "Vet all packages contained in the DAR archive identified by the provided main package-id." ) - def enable(mainPackageId: String, synchronize: Boolean = true): Unit = + def enable( + mainPackageId: String, + synchronize: Boolean = true, + synchronizerId: Option[SynchronizerId] = None, + ): Unit = consoleEnvironment.run { - adminCommand(ParticipantAdminCommands.Package.VetDar(mainPackageId, synchronize)) + adminCommand( + ParticipantAdminCommands.Package.VetDar(mainPackageId, synchronize, synchronizerId) + ) } @Help.Summary( @@ -1705,9 +1722,9 @@ trait ParticipantAdministration extends FeatureFlagFilter { |was symmetric and resulted in a single vetting topology transaction for all the packages in the DAR. |This command is potentially dangerous and misuse |can lead the participant to fail in processing transactions""") - def disable(mainPackageId: String): Unit = + def disable(mainPackageId: String, synchronizerId: Option[SynchronizerId] = None): Unit = consoleEnvironment.run { - adminCommand(ParticipantAdminCommands.Package.UnvetDar(mainPackageId)) + adminCommand(ParticipantAdminCommands.Package.UnvetDar(mainPackageId, synchronizerId)) } } } @@ -1958,6 +1975,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerTrustThreshold - Set the minimum number of sequencers that must agree before a message is considered valid. sequencerLivenessMargin - Set the number of extra subscriptions to maintain beyond `sequencerTrustThreshold` in order to ensure liveness. submissionRequestAmplification - Define how often client should try to send a submission request that is eligible for deduplication. + sequencerConnectionPoolDelays - Define the various delays used by the sequencer connection pool. validation - Whether to validate the connectivity and ids of the given sequencers (default All) """) def connect_local_bft( @@ -1974,6 +1992,8 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerLivenessMargin: NonNegativeInt = NonNegativeInt.zero, submissionRequestAmplification: SubmissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays = + SequencerConnectionPoolDelays.default, validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { val config = ParticipantCommands.synchronizers.reference_to_config( @@ -1986,6 +2006,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerTrustThreshold = sequencerTrustThreshold, sequencerLivenessMargin = sequencerLivenessMargin, submissionRequestAmplification = submissionRequestAmplification, + sequencerConnectionPoolDelays = sequencerConnectionPoolDelays, ) connect_by_config(config, validation, synchronize) } @@ -2004,6 +2025,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerTrustThreshold - Set the minimum number of sequencers that must agree before a message is considered valid. sequencerLivenessMargin - Set the number of extra subscriptions to maintain beyond `sequencerTrustThreshold` in order to ensure liveness. submissionRequestAmplification - Define how often client should try to send a submission request that is eligible for deduplication. + sequencerConnectionPoolDelays - Define the various delays used by the sequencer connection pool. validation - Whether to validate the connectivity and ids of the given sequencers (default All) """) def connect_bft( @@ -2019,6 +2041,8 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerLivenessMargin: NonNegativeInt = NonNegativeInt.zero, submissionRequestAmplification: SubmissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays = + SequencerConnectionPoolDelays.default, validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { val config = SynchronizerConnectionConfig.tryGrpc( @@ -2030,6 +2054,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerTrustThreshold = sequencerTrustThreshold, sequencerLivenessMargin = sequencerLivenessMargin, submissionRequestAmplification = submissionRequestAmplification, + sequencerConnectionPoolDelays = sequencerConnectionPoolDelays, ) connect_by_config(config, validation, synchronize) } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala index d20a24d6dd..94de880665 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala @@ -23,8 +23,9 @@ import com.digitalasset.canton.grpc.FileStreamObserver import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.admin.data.{ ActiveContractOld, - ContractIdImportMode, + ContractImportMode, RepairContract, + RepresentativePackageIdOverride, } import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.protocol.{ContractInstance, LfContractId} @@ -191,8 +192,6 @@ class ParticipantRepairAdministration( outputFile: String = ParticipantRepairAdministration.ExportAcsDefaultFile, filterSynchronizerId: Option[SynchronizerId] = None, timestamp: Option[Instant] = None, - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)] = - Map.empty, force: Boolean = false, timeout: NonNegativeDuration = timeouts.unbounded, ): Unit = @@ -210,7 +209,6 @@ class ParticipantRepairAdministration( filterSynchronizerId, timestamp, responseObserver, - contractSynchronizerRenames, force = force, ) ) @@ -337,10 +335,10 @@ class ParticipantRepairAdministration( |The contract IDs of the imported contracts may be checked ahead of starting the process. |If any contract ID doesn't match the contract ID scheme associated to the synchronizer |where the contract is assigned to, the whole import process fails depending on the value - |of `contractIdImportMode`. + |of `contractImportMode`. | - |By default `contractIdImportMode` is set to `ContractIdImportMode.Validation`. If set to - |`ContractIdImportMode.Recomputation`, any contract ID that wouldn't pass the check above + |By default `contractImportMode` is set to `ContractImportMode.Validation`. If set to + |`ContractImportMode.Recomputation`, any contract ID that wouldn't pass the check above |will be recomputed. Note that the recomputation of contract IDs fails under the following |circumstances: | - the contract salt used to compute the contract ID is missing @@ -355,7 +353,7 @@ class ParticipantRepairAdministration( | |Expert only: As validation or recomputation on contract IDs may lengthen the import |significantly, you have the option to simply accept the contract IDs as they are using - |`ContractIdImportMode.Accept`. + |`ContractImportMode.Accept`. | |If the import process succeeds, the mapping from the old contract IDs to the new contract |IDs will be returned. An empty map means that all contract IDs were valid, or have been @@ -365,18 +363,22 @@ class ParticipantRepairAdministration( |- importFilePath: The path denoting the file from where the ACS snapshot will be read. | Defaults to "canton-acs-export.gz" when undefined. |- workflowIdPrefix: Prefixes the workflow ID for the import. Defaults to - | "import-" when undefined. - |- contractIdImportMode: Governs contract ID processing on import. Options include - | Validation (default), [Accept, Recomputation]. + | "import-" when undefined. + |- contractImportMode: Governs contract ID processing on import. Options include + | Validation (default), [Accept, Recomputation]. |- excludedStakeholders: When defined, any contract that has one or more of these | parties as a stakeholder will be omitted from the import. + |- representativePackageIdOverride: Defines override mappings for assigning + | representative package IDs to contracts upon ACS import. """ ) def import_acs( importFilePath: String = "canton-acs-export.gz", workflowIdPrefix: String = "", - contractIdImportMode: ContractIdImportMode = ContractIdImportMode.Validation, + contractImportMode: ContractImportMode = ContractImportMode.Validation, excludedStakeholders: Set[PartyId] = Set.empty, + representativePackageIdOverride: RepresentativePackageIdOverride = + RepresentativePackageIdOverride.NoOverride, ): Map[LfContractId, LfContractId] = check(FeatureFlag.Repair) { consoleEnvironment.run { @@ -384,8 +386,9 @@ class ParticipantRepairAdministration( ParticipantAdminCommands.ParticipantRepairManagement.ImportAcs( ByteString.copyFrom(File(importFilePath).loadBytes), if (workflowIdPrefix.nonEmpty) workflowIdPrefix else s"import-${UUID.randomUUID}", - contractIdImportMode = contractIdImportMode, + contractImportMode = contractImportMode, excludedStakeholders, + representativePackageIdOverride, ) ) } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala index 46346004d6..4b5479f035 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -23,7 +23,6 @@ import com.digitalasset.canton.admin.api.client.data.{AddPartyStatus, ListPartie import com.digitalasset.canton.admin.participant.v30.ExportPartyAcsResponse import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration} -import com.digitalasset.canton.console.ConsoleMacros.utils import com.digitalasset.canton.console.commands.TopologyTxFiltering.{AddedFilter, RevokedFilter} import com.digitalasset.canton.console.{ AdminCommandRunner, @@ -35,8 +34,8 @@ import com.digitalasset.canton.console.{ Helpful, ParticipantReference, } -import com.digitalasset.canton.crypto.SigningKeyUsage -import com.digitalasset.canton.data.OnboardingTransactions +import com.digitalasset.canton.crypto.{Fingerprint, SigningKeyUsage} +import com.digitalasset.canton.data.{CantonTimestamp, OnboardingTransactions} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.grpc.FileStreamObserver import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -48,8 +47,10 @@ import com.digitalasset.canton.topology.transaction.{TopologyTransaction, *} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LedgerParticipantId, SynchronizerAlias, config} import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString import io.grpc.Context import java.time.Instant @@ -218,7 +219,6 @@ class ParticipantPartiesAdministrationGroup( synchronizeParticipants: Seq[ParticipantReference] = consoleEnvironment.participants.all, synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), // External party specifics - confirmationThreshold: PositiveInt = PositiveInt.one, keysCount: PositiveInt = PositiveInt.one, keysThreshold: PositiveInt = PositiveInt.one, ): ExternalParty = { @@ -231,34 +231,15 @@ class ParticipantPartiesAdministrationGroup( onboardingData <- onboarding_transactions( name, synchronizer, - confirmationThreshold = confirmationThreshold, keysCount = keysCount, keysThreshold = keysThreshold, ) (onboardingTxs, externalParty) = onboardingData - _ = reference.topology.transactions.load( - onboardingTxs.toSeq, - psid, - synchronize = synchronize, - ) - - // Wait until the proposal is known - _ = utils.retry_until_true( - reference.topology.party_to_participant_mappings - .list( - psid, - proposals = true, - filterParticipant = reference.id.filterString, - filterParty = externalParty.filterString, - ) - .nonEmpty - ) - - _ = reference.topology.transactions.authorize[PartyToParticipant]( - txHash = onboardingTxs.partyToParticipant.hash, - mustBeFullyAuthorized = true, - store = psid, + _ = reference.ledger_api.parties.allocate_external( + psid.logical, + onboardingTxs.transactionsWithSingleSignature, + onboardingTxs.multiTransactionSignatures, ) _ <- EitherT.fromEither[FutureUnlessShutdown]( @@ -276,25 +257,155 @@ class ParticipantPartiesAdministrationGroup( consoleEnvironment.run(ConsoleCommandResult.fromEitherTUS(onboardingET)) } + /** Generate the party id and namespace transaction for a centralized namespace party. Creates + * the namespace key in the global crypto store. + */ + private def build_centralized_namespace( + name: String, + protocolVersion: ProtocolVersion, + ): EitherT[ + FutureUnlessShutdown, + String, + (PartyId, TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping]), + ] = for { + namespaceKey <- consoleEnvironment.tryGlobalCrypto + .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) + .leftMap(_.toString) + partyId = PartyId.tryCreate(name, namespaceKey.fingerprint) + mapping <- EitherT.fromEither[FutureUnlessShutdown]( + NamespaceDelegation.create( + namespace = partyId.namespace, + target = namespaceKey, + CanSignAllMappings, + ) + ) + namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + mapping, + protocolVersion, + ) + } yield (partyId, namespaceTx) + + /** Generate the party id and namespace transaction for a decentralized namespace party from a + * set of existing namespaces. The namespaces must already exist and be authorized in the + * topology of the target synchronizer. + */ + private def build_decentralized_namespace( + name: String, + protocolVersion: ProtocolVersion, + namespaceOwners: NonEmpty[Set[Namespace]], + namespaceThreshold: PositiveInt, + ): ( + PartyId, + TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], + ) = { + val decentralizedNamespace = + DecentralizedNamespaceDefinition.computeNamespace(namespaceOwners.forgetNE) + val partyId = PartyId.tryCreate(name, decentralizedNamespace.fingerprint) + val namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + DecentralizedNamespaceDefinition.tryCreate( + decentralizedNamespace, + namespaceThreshold, + namespaceOwners, + ), + protocolVersion, + ) + (partyId, namespaceTx) + } + + /** Utility method to create a namespace delegation controlled by an external key. Use to create + * namespaces prior to allocating an external party controlled by a decentralized namespace for + * instance. + * @param synchronizer + * Synchronizer + * @return + * Namespace + */ + @VisibleForTesting // Ensures external this is only used in testing + def create_external_namespace( + synchronizer: Option[SynchronizerAlias] = None, + synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), + ): Namespace = { + val res = for { + psid <- EitherT + .fromEither[FutureUnlessShutdown]( + lookupOrDetectSynchronizerId(synchronizer) + ) + .leftMap(err => s"Cannot find protocol version: $err") + + namespaceKey <- consoleEnvironment.tryGlobalCrypto + .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) + .leftMap(_.toString) + + namespace = Namespace(namespaceKey.fingerprint) + + namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + NamespaceDelegation.tryCreate( + namespace = namespace, + target = namespaceKey, + CanSignAllMappings, + ), + psid.protocolVersion, + ) + + signature <- consoleEnvironment.tryGlobalCrypto.privateCrypto + .sign( + namespaceTx.hash.hash, + namespaceKey.fingerprint, + NonEmpty.mk(Set, SigningKeyUsage.Namespace), + ) + .leftMap(_.toString) + + signedNamespace = SignedTopologyTransaction + .withTopologySignatures( + namespaceTx, + NonEmpty.mk(Seq, SingleTransactionSignature(namespaceTx.hash, signature)), + isProposal = false, + psid.protocolVersion, + ) + + _ = reference.topology.transactions.load( + Seq(signedNamespace), + psid, + synchronize = synchronize, + ) + } yield Namespace(namespaceKey.fingerprint) + + consoleEnvironment.run(ConsoleCommandResult.fromEitherTUS(res)) + } + /** Compute the onboarding transaction to enable party `name` * @param name * Name of the party to be enabled * @param synchronizer * Synchronizer - * @param confirming + * @param additionalConfirming * Other confirming participants * @param observing * Observing participants + * @param decentralizedNamespaceOwners + * Set when creating a party controlle by a decentralized namespace. The namespaces must + * already exist and be authorized in the topology of the target synchronizer. + * @param namespaceThreshold + * Threshold of the decentralized namespace. Only used when decentralizedNamespaceOwners is + * non empty. */ @VisibleForTesting // Ensures external parties are created only in tests def onboarding_transactions( name: String, synchronizer: Option[SynchronizerAlias] = None, - confirming: Seq[ParticipantId] = Seq.empty, + additionalConfirming: Seq[ParticipantId] = Seq.empty, observing: Seq[ParticipantId] = Seq.empty, confirmationThreshold: PositiveInt = PositiveInt.one, keysCount: PositiveInt = PositiveInt.one, keysThreshold: PositiveInt = PositiveInt.one, + decentralizedNamespaceOwners: Set[Namespace] = Set.empty, + namespaceThreshold: PositiveInt = PositiveInt.one, ): EitherT[FutureUnlessShutdown, String, (OnboardingTransactions, ExternalParty)] = for { protocolVersion <- EitherT @@ -303,21 +414,22 @@ class ParticipantPartiesAdministrationGroup( ) .leftMap(err => s"Cannot find protocol version: $err") - namespaceKey <- consoleEnvironment.tryGlobalCrypto - .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) - .leftMap(_.toString) - partyId = PartyId.tryCreate(name, namespaceKey.fingerprint) + decentralizedOwnersNEO = NonEmpty.from(decentralizedNamespaceOwners) - namespaceDelegationTx = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - NamespaceDelegation.tryCreate( - namespace = partyId.namespace, - target = namespaceKey, - CanSignAllMappings, - ), - protocolVersion, - ) + partyIdAndNamespaceTx <- decentralizedOwnersNEO + .map(namespaceOwnersNE => + EitherT.pure[FutureUnlessShutdown, String]( + build_decentralized_namespace( + name, + protocolVersion, + namespaceOwnersNE, + namespaceThreshold, + ) + ) + ) + .getOrElse(build_centralized_namespace(name, protocolVersion)) + + (partyId, namespaceTx) = partyIdAndNamespaceTx protocolSigningKeys = consoleEnvironment.global_secret.keys.secret .generate_keys(keysCount, usage = SigningKeyUsage.ProtocolOnly) @@ -333,7 +445,7 @@ class ParticipantPartiesAdministrationGroup( protocolVersion, ) - hybridParticipants = confirming.intersect(observing) + hybridParticipants = additionalConfirming.intersect(observing) _ <- EitherT.fromEither[FutureUnlessShutdown]( NonEmpty .from(hybridParticipants) @@ -348,7 +460,7 @@ class ParticipantPartiesAdministrationGroup( "reference participant should not be observing", ) - hostingConfirming = (reference.id +: confirming).map( + hostingConfirming = (reference.id +: additionalConfirming).map( HostingParticipant(_, ParticipantPermission.Confirmation) ) @@ -367,45 +479,252 @@ class ParticipantPartiesAdministrationGroup( protocolVersion, ) - transactionHashes = NonEmpty.mk( - Set, - namespaceDelegationTx.hash, - partyToParticipantTx.hash, - partyToKeyTx.hash, + onboardingTransactions <- bundle_onboarding_transactions( + partyId = partyId, + protocolSigningKeys = protocolSigningKeys.map(_.fingerprint), + protocolVersion = protocolVersion, + namespaceTx = namespaceTx, + partyToKeyTx = partyToKeyTx, + partyToParticipantTx = partyToParticipantTx, + decentralizedNamespaceOwners = decentralizedNamespaceOwners, + ) + } yield ( + onboardingTransactions, + ExternalParty(partyId, protocolSigningKeys.map(_.fingerprint)), + ) + + /** Enable an existing external party hosted on `reference` with confirmation rights. Unlike + * `enable`, this command assumes the external party already exists on a different + * synchronizer. + * + * Note: the keys (and threshold) will be the same as on the synchronizer on which the party is + * already hosted. + * + * @param party + * Name of the party to be enabled + * @param synchronizer + * Synchronizer + * @param synchronizeParticipants + * Participants that need to see activation of the party + */ + @VisibleForTesting // Ensures external parties are created only in tests + def also_enable( + party: ExternalParty, + synchronizer: SynchronizerAlias, + synchronizeParticipants: Seq[ParticipantReference] = consoleEnvironment.participants.all, + synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), + ): Unit = { + + val onboardingET = for { + psid <- EitherT + .fromEither[FutureUnlessShutdown](lookupOrDetectSynchronizerId(Some(synchronizer))) + .leftMap(err => s"Cannot find physical synchronizer id: $err") + + onboardingTxs <- onboarding_transactions_for_existing( + party, + synchronizer, + confirmationThreshold = PositiveInt.one, + ) + + _ = reference.ledger_api.parties.allocate_external( + psid.logical, + onboardingTxs.transactionsWithSingleSignature, + onboardingTxs.multiTransactionSignatures, ) - combinedMultiTxHash = MultiTransactionSignature.computeCombinedHash( - transactionHashes, - consoleEnvironment.tryGlobalCrypto.pureCrypto, + + _ <- EitherT.fromEither[FutureUnlessShutdown]( + Applicative[Either[String, *]].whenA(synchronize.nonEmpty)( + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = party.partyId, + hostingParticipant = reference, + synchronizeParticipants = synchronizeParticipants, + synchronizerId = psid.logical, + )(consoleEnvironment) + ) ) + } yield () - // Sign the multi hash with the namespace key, as it is needed to authorize all transactions - namespaceSignature = consoleEnvironment.global_secret.sign( + consoleEnvironment.run(ConsoleCommandResult.fromEitherTUS(onboardingET)) + } + + /** Compute onboarding transactions for an existing external party. + * + * Note: the keys (and threshold) will be the same as on the synchronizer on which the party is + * already hosted. + * @param name + * Name of the party to be enabled + * @param synchronizer + * Synchronizer + * @param additionalConfirming + * Other confirming participants + * @param observing + * Observing participants + */ + @VisibleForTesting // Ensures external parties are used only in tests + def onboarding_transactions_for_existing( + party: ExternalParty, + synchronizer: SynchronizerAlias, + additionalConfirming: Seq[ParticipantId] = Seq.empty, + observing: Seq[ParticipantId] = Seq.empty, + confirmationThreshold: PositiveInt = PositiveInt.one, + ): EitherT[FutureUnlessShutdown, String, OnboardingTransactions] = { + val knownPSIds = reference.synchronizers.list_registered().collect { + case (_, KnownPhysicalSynchronizerId(psid), _) => psid + } + + def getUniqueMapping[T]( + getter: PhysicalSynchronizerId => Seq[T], + key: String, + ): EitherT[FutureUnlessShutdown, String, T] = + knownPSIds.flatMap(getter).toList.distinct match { + case Nil => EitherT.leftT[FutureUnlessShutdown, T](s"Unable to $key for $party") + case head :: Nil => EitherT.rightT[FutureUnlessShutdown, String](head) + case _several => EitherT.leftT[FutureUnlessShutdown, T](s"Found several $key for $party") + } + + for { + protocolVersion <- EitherT + .fromEither[FutureUnlessShutdown]( + lookupOrDetectSynchronizerId(Some(synchronizer)).map(_.protocolVersion) + ) + .leftMap(err => s"Cannot find protocol version: $err") + + namespaceDelegation <- getUniqueMapping( + psid => + reference.topology.namespace_delegations + .list(store = psid, filterNamespace = party.namespace.filterString) + .map(_.item), + "namespace delegation", + ) + + namespaceDelegationTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + namespaceDelegation, + protocolVersion, + ) + + partyToKey <- getUniqueMapping( + psid => + reference.topology.party_to_key_mappings + .list(store = psid, filterParty = party.filterString) + .map(_.item), + "party to key", + ) + + partyToKeyTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + partyToKey, + protocolVersion, + ) + + hybridParticipants = additionalConfirming.intersect(observing) + _ <- EitherT.fromEither[FutureUnlessShutdown]( + NonEmpty + .from(hybridParticipants) + .toLeft(()) + .leftMap(hybridParticipants => + s"The following participants are indicated as observing and confirming: $hybridParticipants" + ) + ) + + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + !observing.contains(reference.id), + "reference participant should not be observing", + ) + + hostingConfirming = (reference.id +: additionalConfirming).map( + HostingParticipant(_, ParticipantPermission.Confirmation) + ) + + hostingObserving = observing.map( + HostingParticipant(_, ParticipantPermission.Observation) + ) + + partyToParticipantTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + PartyToParticipant.tryCreate( + partyId = party.partyId, + threshold = confirmationThreshold, + participants = hostingConfirming ++ hostingObserving, + ), + protocolVersion, + ) + + onboardingTransactions <- bundle_onboarding_transactions( + partyId = party.partyId, + protocolSigningKeys = party.signingFingerprints, + protocolVersion = protocolVersion, + namespaceTx = namespaceDelegationTx, + partyToKeyTx = partyToKeyTx, + partyToParticipantTx = partyToParticipantTx, + decentralizedNamespaceOwners = Set.empty, + ) + + } yield onboardingTransactions + } + + /** Sign the onboarding transactions and build a [[OnboardingTransactions]] + */ + private def bundle_onboarding_transactions( + partyId: PartyId, + protocolSigningKeys: NonEmpty[Seq[Fingerprint]], + protocolVersion: ProtocolVersion, + namespaceTx: TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], + partyToKeyTx: TopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping], + partyToParticipantTx: TopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], + decentralizedNamespaceOwners: Set[Namespace], + ): EitherT[FutureUnlessShutdown, String, OnboardingTransactions] = { + val transactionHashes = NonEmpty.mk( + Set, + namespaceTx.hash, + partyToParticipantTx.hash, + partyToKeyTx.hash, + ) + + val combinedMultiTxHash = MultiTransactionSignature.computeCombinedHash( + transactionHashes, + consoleEnvironment.tryGlobalCrypto.pureCrypto, + ) + + val decentralizedOwnersNEO = NonEmpty.from(decentralizedNamespaceOwners) + + val namespaceFingerprints = decentralizedOwnersNEO + .map(_.map(_.fingerprint)) + .getOrElse(NonEmpty.mk(Set, partyId.fingerprint)) + + // Sign the multi hash with the namespace keys, as it is needed to authorize all transactions + val namespaceSignatures = namespaceFingerprints.toSeq.map( + consoleEnvironment.global_secret.sign( combinedMultiTxHash.getCryptographicEvidence, - namespaceKey.fingerprint, + _, NonEmpty.mk(Set, SigningKeyUsage.Namespace: SigningKeyUsage), ) + ) + for { // The protocol key signature is only needed on the party to key mapping, so we can sign only that protocolSignatures <- protocolSigningKeys.toNEF .parTraverse { protocolSigningKey => consoleEnvironment.tryGlobalCrypto.privateCrypto .sign( partyToKeyTx.hash.hash, - protocolSigningKey.fingerprint, + protocolSigningKey, NonEmpty.mk(Set, SigningKeyUsage.Protocol), ) } .leftMap(_.toString) .map(_.toSeq) - multiTxSignatures = NonEmpty.mk( - Seq, - MultiTransactionSignature(transactionHashes, namespaceSignature), + multiTxSignatures = namespaceSignatures.map(namespaceSignature => + MultiTransactionSignature(transactionHashes, namespaceSignature) ) - signedNamespaceDelegation = SignedTopologyTransaction + signedNamespace = SignedTopologyTransaction .withTopologySignatures( - namespaceDelegationTx, + namespaceTx, multiTxSignatures, isProposal = false, protocolVersion, @@ -430,24 +749,22 @@ class ParticipantPartiesAdministrationGroup( .addSingleSignatures(protocolSignatures.toSet) } yield { val keys = Map( - "namespace-delegation" -> signedNamespaceDelegation, + "namespace" -> signedNamespace, "party-to-participant" -> signedPartyToParticipant, "party-to-key" -> signedPartyToKey, ).view.mapValues(_.signatures.map(_.authorizingLongTermKey).mkString(", ")) logger.info( - s"Generated onboarding transactions for external party $name with id $partyId: $keys" + s"Generated onboarding transactions for external party ${partyId.identifier} with id $partyId: $keys" ) - ( - OnboardingTransactions( - signedNamespaceDelegation, - signedPartyToParticipant, - signedPartyToKey, - ), - ExternalParty(partyId, protocolSigningKeys.map(_.fingerprint)), + OnboardingTransactions( + signedNamespace, + signedPartyToParticipant, + signedPartyToKey, ) } + } } /** @return @@ -462,9 +779,11 @@ class ParticipantPartiesAdministrationGroup( case Seq() => Left("not connected to any synchronizer") case Seq(onlyOneSynchronizer) => Right(onlyOneSynchronizer.physicalSynchronizerId) - case _multiple => + case multiple => + val psids = multiple.map(_.physicalSynchronizerId) + Left( - "cannot automatically determine synchronizer, because participant is connected to more than 1 synchronizer" + s"cannot automatically determine synchronizer, because participant is connected to more than 1 synchronizer: $psids" ) } alias @@ -782,8 +1101,8 @@ class ParticipantPartiesAdministrationGroup( |target participant. | |Upon successful completion, the command writes a GZIP-compressed ACS - |snapshot file. This file can then be imported into the target participant's - |ACS using the `import_acs` repair command. + |snapshot file. This file should then be imported into the target participant's + |ACS using the `import_party_acs` command. | |The arguments are: |- party: The party being replicated, it must already be active on the target participant. @@ -791,7 +1110,7 @@ class ParticipantPartiesAdministrationGroup( |- targetParticipantId: Unique identifier of the target participant where the party | will be replicated. |- beginOffsetExclusive: Exclusive ledger offset used as starting point fo find the party's - | most recent activation on the target participant. + | activation on the target participant. |- exportFilePath: The path denoting the file where the ACS snapshot will be stored. |- waitForActivationTimeout: The maximum duration the service will wait to find the topology | transaction that activates the party on the target participant. @@ -834,6 +1153,77 @@ class ParticipantPartiesAdministrationGroup( ) } + @Help.Summary( + "Import active contracts from a snapshot file to replicate a party." + ) + @Help.Description( + """This command imports contracts from an Active Contract Set (ACS) snapshot + |file into the participant's ACS. It expects the given ACS snapshot file to + |be the result of a previous `export_party_acs` command invocation. + | + |The argument is: + |- importFilePath: The path denoting the file from where the ACS snapshot will be read. + | Defaults to "canton-acs-export.gz" when undefined. + """ + ) + def import_party_acs( + importFilePath: String = "canton-acs-export.gz" + ): Unit = + consoleEnvironment.run { + reference.adminCommand( + ParticipantAdminCommands.PartyManagement.ImportPartyAcs( + ByteString.copyFrom(File(importFilePath).loadBytes) + ) + ) + } + + @Help.Summary( + "Finalize the party onboarding" + ) + @Help.Description( + """Finalizes a party's onboarding by having the target participant unilaterally remove + |the onboarding flag on the party to participant topology mapping. + | + |The successful removal depends on the change history of the dynamic synchronizer's + |confirmation response timeout and the mediator reaction timeout parameters. + | + |Returns a tuple with a boolean and a timestamp: + |- True if the party onboarding was successfully finalized, false otherwise. + |- If onboarding finalization failed, then the timestamp suggests the earliest time to retry + | the call. + | + |The arguments are: + |- party: The party being onboarded, it must already be active on the target participant. + |- synchronizerId: Restricts the party onboarding to the given synchronizer. + |- targetParticipantId: Unique identifier of the target participant where the party + | has been onboarded. + |- beginOffsetExclusive: Exclusive ledger offset used as starting point fo find the party's + | activation on the target participant. + |- waitForActivationTimeout: The maximum duration the service will wait to find the topology + | transaction that activates the party on the target participant. + """ + ) + def complete_party_onboarding( + party: PartyId, + synchronizerId: SynchronizerId, + targetParticipantId: ParticipantId, + beginOffsetExclusive: Long, + waitForActivationTimeout: Option[config.NonNegativeFiniteDuration] = Some( + config.NonNegativeFiniteDuration.ofMinutes(2) + ), + ): (Boolean, Option[CantonTimestamp]) = + consoleEnvironment.run { + reference.adminCommand( + ParticipantAdminCommands.PartyManagement.CompletePartyOnboarding( + party, + synchronizerId, + targetParticipantId, + NonNegativeLong.tryCreate(beginOffsetExclusive), + waitForActivationTimeout, + ) + ) + } + } private[canton] object PartiesAdministration { diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministration.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministration.scala index 0027052695..49d6dcd83e 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministration.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministration.scala @@ -5,7 +5,9 @@ package com.digitalasset.canton.console.commands import com.digitalasset.canton.admin.api.client.commands.SequencerAdminCommands.{ InitializeFromGenesisState, + InitializeFromGenesisStateV2, InitializeFromOnboardingState, + InitializeFromOnboardingStateV2, InitializeFromSynchronizerPredecessor, } import com.digitalasset.canton.admin.api.client.commands.{GrpcAdminCommand, SequencerAdminCommands} @@ -25,7 +27,10 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.grpc.ByteStringStreamObserver import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.sequencer.admin.v30.OnboardingStateResponse +import com.digitalasset.canton.sequencer.admin.v30.{ + OnboardingStateResponse, + OnboardingStateV2Response, +} import com.digitalasset.canton.synchronizer.sequencer.SequencerSnapshot import com.digitalasset.canton.synchronizer.sequencer.admin.grpc.InitializeSequencerResponse import com.digitalasset.canton.topology.SequencerId @@ -89,6 +94,27 @@ class SequencerAdministration(node: SequencerReference) extends ConsoleCommandGr processResult(call, responseObserver.resultBytes, timeout, "Downloading onboarding state") } + @Help.Summary( + "Download the onboarding state for a given sequencer" + ) + def onboarding_state_for_sequencerV2( + sequencerId: SequencerId, + timeout: NonNegativeDuration = timeouts.unbounded, + ): ByteString = + consoleEnvironment.run { + val responseObserver = + new ByteStringStreamObserver[OnboardingStateV2Response](_.onboardingStateForSequencer) + + def call = + runner.adminCommand( + SequencerAdminCommands.OnboardingStateV2( + observer = responseObserver, + sequencerOrTimestamp = Left(sequencerId), + ) + ) + processResult(call, responseObserver.resultBytes, timeout, "Downloading onboarding state") + } + @Help.Summary( "Initialize a sequencer from the beginning of the event stream. This should only be called for " + "sequencer nodes being initialized at the same time as the corresponding synchronizer node. " + @@ -111,6 +137,28 @@ class SequencerAdministration(node: SequencerReference) extends ConsoleCommandGr } } + @Help.Summary( + "Initialize a sequencer from the beginning of the event stream. This should only be called for " + + "sequencer nodes being initialized at the same time as the corresponding synchronizer node. " + + "This is called as part of the synchronizer.setup.bootstrap command, so you are unlikely to need to call this directly." + ) + def assign_from_genesis_stateV2( + genesisState: ByteString, + synchronizerParameters: StaticSynchronizerParameters, + waitForReady: Boolean = true, + ): InitializeSequencerResponse = { + if (waitForReady) node.health.wait_for_ready_for_initialization() + + consoleEnvironment.run { + runner.adminCommand( + InitializeFromGenesisStateV2( + genesisState, + synchronizerParameters.toInternal, + ) + ) + } + } + @Help.Summary( "Initialize a sequencer for the logical upgrade from the state of its predecessor" ) @@ -148,6 +196,23 @@ class SequencerAdministration(node: SequencerReference) extends ConsoleCommandGr } } + @Help.Summary( + "Dynamically initialize a sequencer from a point later than the beginning of the event stream." + ) + def assign_from_onboarding_stateV2( + onboardingState: ByteString, + waitForReady: Boolean = true, + ): InitializeSequencerResponse = { + if (waitForReady && !node.health.initialized()) + node.health.wait_for_ready_for_initialization() + + consoleEnvironment.run { + runner.adminCommand( + InitializeFromOnboardingStateV2(onboardingState) + ) + } + } + } class SequencerHealthAdministration( diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala index 3679fdb4bd..848622a656 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala @@ -44,7 +44,9 @@ import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId.Authorized import com.digitalasset.canton.topology.admin.grpc.{BaseQuery, TopologyStoreId} import com.digitalasset.canton.topology.admin.v30.{ ExportTopologySnapshotResponse, + ExportTopologySnapshotV2Response, GenesisStateResponse, + GenesisStateV2Response, LogicalUpgradeStateResponse, } import com.digitalasset.canton.topology.store.{ @@ -273,6 +275,19 @@ class TopologyAdministrationGroup( writeToFile(file, bytes) } + @Help.Summary("Serializes node's topology identity transactions to a file") + @Help.Description( + "Transactions serialized this way should be loaded into another node with load_from_file" + ) + def export_identity_transactionsV2(file: String): Unit = { + val bytes = instance.topology.transactions + .export_topology_snapshotV2( + filterMappings = Seq(NamespaceDelegation.code, OwnerToKeyMapping.code), + filterNamespace = instance.namespace.filterString, + ) + writeToFile(file, bytes) + } + @Help.Summary("Loads topology transactions from a file into the specified topology store") @Help.Description("The file must contain data serialized by TopologyTransactions.") def import_topology_snapshot_from(file: String, store: TopologyStoreId): Unit = @@ -298,6 +313,34 @@ class TopologyAdministrationGroup( ) } + @Help.Summary("Loads topology transactions from a file into the specified topology store") + @Help.Description("The file must contain data serialized by TopologyTransactions.") + def import_topology_snapshot_fromV2(file: String, store: TopologyStoreId): Unit = + BinaryFileUtil + .readByteStringFromFile(file) + .map(import_topology_snapshotV2(_, store)) + .valueOr { err => + throw new IllegalArgumentException(s"import_topology_snapshot failed: $err") + } + + def import_topology_snapshotV2( + topologyTransactions: ByteString, + store: TopologyStoreId, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.unbounded + ), + ): Unit = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .ImportTopologySnapshotV2( + topologyTransactions, + store, + synchronize, + ) + ) + } + def load( transactions: Seq[GenericSignedTopologyTransaction], store: TopologyStoreId, @@ -417,6 +460,45 @@ class TopologyAdministrationGroup( ) } + @Help.Summary("Authorize a transaction by its hash") + def authorize( + synchronizerId: SynchronizerId, + txHash: TxHash, + ): SignedTopologyTransaction[TopologyChangeOp, TopologyMapping] = + authorize[TopologyMapping]( + txHash, + mustBeFullyAuthorized = false, + TopologyStoreId.Synchronizer(synchronizerId), + ) + + @Help.Summary("Propose a transaction") + @Help.Description("Raw access to admin API command") + def propose[M <: TopologyMapping: ClassTag]( + mapping: M, + store: TopologyStoreId, + signedBy: Seq[Fingerprint] = Seq.empty, + serial: Option[PositiveInt] = None, + change: TopologyChangeOp = TopologyChangeOp.Replace, + mustFullyAuthorize: Boolean = true, + forceChanges: ForceFlags = ForceFlags.none, + waitToBecomeEffective: Option[NonNegativeDuration] = None, + ): SignedTopologyTransaction[TopologyChangeOp, M] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.Propose( + mapping = mapping, + signedBy = signedBy, + store = store, + serial = serial, + change = change, + mustFullyAuthorize = mustFullyAuthorize, + forceChanges = forceChanges, + waitToBecomeEffective = waitToBecomeEffective, + ) + ) + } + + @Help.Summary("Authorize a transaction by its hash") def authorize[M <: TopologyMapping: ClassTag]( txHash: TxHash, mustBeFullyAuthorized: Boolean, @@ -537,6 +619,68 @@ class TopologyAdministrationGroup( } } + @Help.Summary("export topology snapshot") + @Help.Description( + """This command export the node's topology transactions as byte string. + | + |The arguments are: + |excludeMappings: a list of topology mapping codes to exclude from the export. If not provided, all mappings are included. + |filterNamespace: the namespace to filter the transactions by. + |protocolVersion: the protocol version used to serialize the topology transactions. If not provided, the latest protocol version is used. + """ + ) + def export_topology_snapshotV2( + store: TopologyStoreId = TopologyStoreId.Authorized, + proposals: Boolean = false, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterMappings: Seq[TopologyMapping.Code] = Nil, + excludeMappings: Seq[TopologyMapping.Code] = Nil, + filterAuthorizedKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + filterNamespace: String = "", + timeout: NonNegativeDuration = timeouts.unbounded, + ): ByteString = { + if (filterMappings.nonEmpty && excludeMappings.nonEmpty) { + consoleEnvironment.run( + CommandErrors + .GenericCommandError("Cannot specify both filterMappings and excludeMappings") + ) + } + val excludeMappingsCodes = if (filterMappings.nonEmpty) { + TopologyMapping.Code.all.diff(filterMappings).map(_.code) + } else excludeMappings.map(_.code) + + consoleEnvironment + .run { + val responseObserver = + new ByteStringStreamObserver[ExportTopologySnapshotV2Response](_.chunk) + + def call: ConsoleCommandResult[Context.CancellableContext] = + adminCommand( + TopologyAdminCommands.Read.ExportTopologySnapshotV2( + responseObserver, + BaseQuery( + store, + proposals, + timeQuery, + operation, + filterSigningKey = filterAuthorizedKey.map(_.toProtoPrimitive).getOrElse(""), + protocolVersion.map(ProtocolVersion.tryCreate), + ), + excludeMappings = excludeMappingsCodes, + filterNamespace = filterNamespace, + ) + ) + processResult( + call, + responseObserver.resultBytes, + timeout, + s"Exporting the topology state from store $store", + ) + } + } + @Help.Summary( "Download the genesis state for a sequencer. This method should be used when performing a major synchronizer upgrade." ) @@ -566,6 +710,35 @@ class TopologyAdministrationGroup( processResult(call, responseObserver.resultBytes, timeout, "Downloading the genesis state") } + @Help.Summary( + "Download the genesis state for a sequencer. This method should be used when performing a major synchronizer upgrade." + ) + @Help.Description( + """Download the topology snapshot which includes the entire history of topology transactions to initialize a sequencer for a major synchronizer upgrades. The validFrom and validUntil are set to SignedTopologyTransaction.InitialTopologySequencingTime. + |filterSynchronizerStore: Must be specified if the genesis state is requested from a participant node. + |timestamp: If not specified, the max effective time of the latest topology transaction is used. Otherwise, the given timestamp is used. + """ + ) + def genesis_stateV2( + filterSynchronizerStore: Option[TopologyStoreId.Synchronizer] = None, + timestamp: Option[CantonTimestamp] = None, + timeout: NonNegativeDuration = timeouts.unbounded, + ): ByteString = + consoleEnvironment.run { + val responseObserver = new ByteStringStreamObserver[GenesisStateV2Response](_.chunk) + + def call: ConsoleCommandResult[Context.CancellableContext] = + adminCommand( + TopologyAdminCommands.Read.GenesisStateV2( + responseObserver, + synchronizerStore = filterSynchronizerStore, + timestamp = timestamp, + ) + ) + + processResult(call, responseObserver.resultBytes, timeout, "Downloading the genesis state") + } + @Help.Summary( "Download the upgrade state for a sequencer. This method should be used when performing a logical synchronizer upgrade." ) @@ -688,8 +861,7 @@ class TopologyAdministrationGroup( synchronizerId.logical, ConsoleDynamicSynchronizerParameters .initialValues( - consoleEnvironment.environment.clock, - synchronizerId.protocolVersion, + synchronizerId.protocolVersion ), signedBy = None, store = Some(store), @@ -1740,6 +1912,7 @@ class TopologyAdministrationGroup( private[canton] def sign_and_remove( party: ExternalParty, synchronizer: Synchronizer, + forceFlags: ForceFlags = ForceFlags.none, synchronize: Option[config.NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), @@ -1765,6 +1938,7 @@ class TopologyAdministrationGroup( Seq(consoleEnvironment.global_secret.sign(transaction, party, psid.protocolVersion)), psid, synchronize = synchronize, + forceFlags = forceFlags, ) case None => @@ -1816,6 +1990,24 @@ class TopologyAdministrationGroup( ) } + @Help.Summary("List multi-hosted party proposals") + @Help.Description("""Multi-hosted parties require all involved actors to sign the topology transaction. + Topology transactions without sufficient signatures are called proposals. They are + distributed the same way as fully authorized topology transactions, and signatures + are aggregated until the transaction is fully authorized. + This method here allows to inspect the pending queue of open hosting proposals. + The proposals are returned as seen on the specified synchronizer. They can be approved + by the individual participants by invoking node.topology.transactions.authorize(, ). + """) + def list_hosting_proposals( + synchronizerId: SynchronizerId, + participantId: ParticipantId, + ): Seq[ListMultiHostingProposal] = list( + synchronizerId, + proposals = true, + filterParticipant = participantId.filterString, + ).mapFilter(ListMultiHostingProposal.mapFilter(participantId)) + /** Check whether the node knows about `party` being hosted on `hostingParticipants` and * synchronizer `synchronizerId`, optionally the specified expected permission and threshold. * @param synchronizerId @@ -2067,11 +2259,13 @@ class TopologyAdministrationGroup( mustFullyAuthorize: Boolean = true, serial: Option[PositiveInt] = None, change: TopologyChangeOp = TopologyChangeOp.Replace, + featureFlags: Seq[SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag] = Seq.empty, ): SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate] = { val cmd = TopologyAdminCommands.Write.Propose( mapping = SynchronizerTrustCertificate( participantId, synchronizerId, + featureFlags, ), signedBy = Seq.empty, store = store.getOrElse(synchronizerId), @@ -2474,10 +2668,10 @@ class TopologyAdministrationGroup( runAdminCommand(command).discard } - @Help.Summary("List mediator synchronizer topology state") + @Help.Summary("List vetted packages") @Help.Description( """ - synchronizerId: the optional target synchronizer + store: the optional topology store to query from proposals: if true then proposals are shown, otherwise actual validated state """ ) @@ -2509,6 +2703,14 @@ class TopologyAdministrationGroup( @Help.Summary("Inspect mediator synchronizer state") @Help.Group("Mediator Synchronizer State") object mediators extends Helpful { + + @Help.Summary("List mediator synchronizer topology state") + @Help.Description( + """ + synchronizerId: the optional target synchronizer + proposals: if true then proposals are shown, otherwise actual validated state + """ + ) def list( synchronizerId: Option[SynchronizerId] = None, proposals: Boolean = false, @@ -3069,8 +3271,8 @@ class TopologyAdministrationGroup( if ( oldSynchronizerParameters.mediatorDeduplicationTimeout < minMediatorDeduplicationTimeout ) { - val err: RpcError = TopologyManagerError.IncreaseOfPreparationTimeRecordTimeTolerance - .PermanentlyInsecure( + val err: RpcError = + TopologyManagerError.IncreaseOfPreparationTimeRecordTimeTolerance.PermanentlyInsecure( newPreparationTimeRecordTimeTolerance.toInternal, oldSynchronizerParameters.mediatorDeduplicationTimeout.toInternal, ) diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeApi.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeApi.scala index f1b4c94e5e..0b0fd771cc 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeApi.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeApi.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.console.declarative import cats.syntax.either.* +import cats.syntax.traverse.* import com.digitalasset.canton.auth.CantonAdminToken import com.digitalasset.canton.config import com.digitalasset.canton.config.RequireTypes.PositiveInt @@ -12,8 +13,8 @@ import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLogging import com.digitalasset.canton.metrics.DeclarativeApiMetrics import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry import com.digitalasset.canton.util.retry.{NoExceptionRetryPolicy, Success} +import com.digitalasset.canton.util.{MonadUtil, retry} import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} import scala.annotation.tailrec @@ -114,7 +115,11 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg * a function to fetch the current state. the function takes a limit argument. If the fetch * returns the max limit, we assume that we need to fetch more. The system will start to emit * warnings but increase the fetch limit to remain functional but to warn the user that they - * are reaching the limits of managing a node through a config file. + * are reaching the limits of managing a node through a config file. This is used when + * removeExcess is configured + * @param get + * a function to fetch the current state for a specific key. this is used for scalability + * reasons to avoid fetching the entire state. * @param add * if the runner finds a value (K,V) in the want set which is not in the have set, it will * invoke the add function. @@ -140,7 +145,10 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg removeExcess: Boolean, checkSelfConsistent: Boolean, want: Seq[(K, V)], - fetch: PositiveInt => Either[String, Seq[(K, V)]], + fetch: PositiveInt => Either[String, Seq[ + (K, V) + ]], + get: K => Either[String, Option[V]], add: (K, V) => Either[String, Unit], upd: (K, V, V) => Either[String, Unit], rm: (K, V) => Either[String, Unit], @@ -178,25 +186,22 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg } // positive cycle - def addOrUpdate(have: Map[K, V]): UpdateResult = - want.map { case (k, v) => (k, v, have.get(k)) }.foldLeft(UpdateResult()) { case (acc, item) => - acc.accumulate(addOrUpdate = true)(update(item)) + def addOrUpdate(): Either[String, UpdateResult] = + MonadUtil.foldLeftM(UpdateResult(), want) { case (acc, (k, v)) => + get(k).map(current => acc.accumulate(addOrUpdate = true)(update((k, v, current)))) } // negative cycle - def removeItems(have: Map[K, V], result: UpdateResult): UpdateResult = { - val toRemove = have.keySet.diff(want.map(_._1).toSet) - if (removeExcess) { + def removeItems(result: UpdateResult): Either[String, UpdateResult] = if (!removeExcess) + Right(result) + else { + for { + all <- fetchAll(fetch).map(_.toMap) + } yield { + val toRemove = all.keySet.diff(want.map(_._1).toSet) toRemove.foldLeft(result) { case (acc, id) => - acc.accumulate(addOrUpdate = false)(wrapResult(id, "remove", rm(id, have(id)))) + acc.accumulate(addOrUpdate = false)(wrapResult(id, "remove", rm(id, all(id)))) } - } else { - if (toRemove.nonEmpty) { - logger.debug( - s"There are ${toRemove.size} $name which are not in the config, but the sync is configured to not remove them." - ) - } - result } } @@ -204,10 +209,10 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg def runSelfConsistencyCheck(result: UpdateResult): Either[String, Unit] = if ( checkSelfConsistent && result.failed == 0 ) { - fetchAll(fetch).map(_.toMap).flatMap { haveMap => - val consistent: Boolean = want - .map { case (k, v) => (k, v, haveMap.get(k)) } - .map { + val consistentE = + want + .traverse { case (k, v) => get(k).map(vn => (k, v, vn)) } + .map(_.map { case (k, _, None) => logger.error(s"$name not found after sync: $k") false @@ -216,19 +221,37 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg false case _ => true } - .forall(identity) + .forall(identity)) - val res = if (removeExcess) { - haveMap.keySet + val noExcessE = if (removeExcess) { + fetchAll(fetch).map { haveMap => + haveMap + .map(_._1) + .toSet .diff(want.map(_._1).toSet) .map { k => logger.error(s"$name not removed after sync: $k") false } - .forall(identity) && consistent - } else consistent - Either.cond(res, (), s"Self-consistency check failed for $name, cons=$consistent") - } + .forall(identity) + } + } else Right(true) + + for { + consistent <- consistentE + _ <- Either.cond( + consistent, + (), + s"Self-consistency check failed for $name, due to want items not matching", + ) + noExcess <- noExcessE + _ <- Either.cond( + noExcess, + (), + s"Self-consistency check failed for $name, as some items were not removed", + ) + } yield () + } else Either.unit def waitUntilItemsAreRegistered(): Either[String, Unit] = await @@ -240,13 +263,14 @@ trait DeclarativeApi[Cfg, Prep] extends DeclarativeApiHandle[Cfg] with NamedLogg } .getOrElse(Either.unit) - for { - have <- fetchAll(fetch).map(_.toMap) - resultAfterAddOrUpdate = addOrUpdate(have) - resultAfterAll = removeItems(have, resultAfterAddOrUpdate) - _ <- waitUntilItemsAreRegistered() - _ <- runSelfConsistencyCheck(resultAfterAll) - } yield resultAfterAll + if (want.nonEmpty || removeExcess) { + for { + resultAfterAddOrUpdate <- addOrUpdate() + resultAfterAll <- removeItems(resultAfterAddOrUpdate) + _ <- waitUntilItemsAreRegistered() + _ <- runSelfConsistencyCheck(resultAfterAll) + } yield resultAfterAll + } else Right(UpdateResult()) } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeParticipantApi.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeParticipantApi.scala index 46e53212d4..471fee03ad 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeParticipantApi.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/declarative/DeclarativeParticipantApi.scala @@ -13,12 +13,21 @@ import com.digitalasset.canton.admin.api.client.commands.{ ParticipantAdminCommands, TopologyAdminCommands, } -import com.digitalasset.canton.admin.api.client.data.LedgerApiUser +import com.digitalasset.canton.admin.api.client.data.{ + LedgerApiUser, + ListConnectedSynchronizersResult, +} import com.digitalasset.canton.auth.CantonAdminToken import com.digitalasset.canton.config.ClientConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.console.GrpcAdminCommandRunner +import com.digitalasset.canton.console.CommandErrors.{CommandError, GenericCommandError} import com.digitalasset.canton.console.declarative.DeclarativeApi.UpdateResult +import com.digitalasset.canton.console.declarative.DeclarativeParticipantApi.{ + Err, + NotFound, + QueryResult, +} +import com.digitalasset.canton.console.{CommandSuccessful, GrpcAdminCommandRunner} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.api import com.digitalasset.canton.ledger.api.IdentityProviderId @@ -30,7 +39,7 @@ import com.digitalasset.canton.participant.admin.AdminWorkflowServices import com.digitalasset.canton.participant.config.* import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnectionValidation} -import com.digitalasset.canton.topology.admin.grpc.BaseQuery +import com.digitalasset.canton.topology.admin.grpc.{BaseQuery, TopologyStoreId} import com.digitalasset.canton.topology.store.TimeQuery import com.digitalasset.canton.topology.transaction.{ HostingParticipant, @@ -40,14 +49,14 @@ import com.digitalasset.canton.topology.transaction.{ } import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId, UniqueIdentifier} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.util.{BinaryFileUtil, MonadUtil} import com.digitalasset.canton.{SynchronizerAlias, config} import com.digitalasset.daml.lf.archive.DarParser import com.google.protobuf.field_mask.FieldMask import java.io.{File, FileInputStream} import java.util.zip.ZipInputStream -import scala.collection.mutable +import scala.annotation.tailrec import scala.concurrent.ExecutionContext class DeclarativeParticipantApi( @@ -68,7 +77,9 @@ class DeclarativeParticipantApi( closeContext.context .runOnOrAfterClose(new RunOnClosing { override def name: String = "stop-declarative-api" + override def done: Boolean = false + override def run()(implicit traceContext: TraceContext): Unit = LifeCycle.close(adminApiRunner, ledgerApiRunner)(logger) })(TraceContext.empty) @@ -80,24 +91,48 @@ class DeclarativeParticipantApi( runner: GrpcAdminCommandRunner, cfg: ClientConfig, command: GrpcAdminCommand[_, _, Result], - )(implicit traceContext: TraceContext): Either[String, Result] = if ( + )(implicit traceContext: TraceContext): Either[QueryResult, Result] = if ( closeContext.context.isClosing ) - Left("Node is shutting down") + Left(Err("Node is shutting down")) else - activeAdminToken.fold(Left("Node instance is passive"): Either[String, Result])(token => - runner.runCommandWithExistingTrace(name, command, cfg, Some(token.secret)).toEither + activeAdminToken.fold(Left(Err("Node instance is passive")): Either[QueryResult, Result])( + token => + runner.runCommandWithExistingTrace(name, command, cfg, Some(token.secret)) match { + case CommandSuccessful(value) => Right(value) + case GenericCommandError(cause) if cause.contains("NOT_FOUND/") => + Left(NotFound(cause)) + case c: CommandError => Left(Err(c.cause)) + } ) private def queryAdminApi[Result]( command: GrpcAdminCommand[_, _, Result] )(implicit traceContext: TraceContext): Either[String, Result] = - queryApi(adminApiRunner, adminApiConfig, command) + queryApi(adminApiRunner, adminApiConfig, command).leftMap(_.str) private def queryLedgerApi[Result]( command: GrpcAdminCommand[_, _, Result] )(implicit traceContext: TraceContext): Either[String, Result] = - queryApi(ledgerApiRunner, ledgerApiConfig, command) + queryApi(ledgerApiRunner, ledgerApiConfig, command).leftMap(_.str) + + private def toOptionalE[Result]( + res: Either[QueryResult, Result] + ): Either[String, Option[Result]] = res match { + case Right(v) => Right(Some(v)) + case Left(NotFound(_)) => Right(None) + case Left(Err(str)) => Left(str) + } + + private def queryAdminApiIfExists[Result]( + command: GrpcAdminCommand[_, _, Result] + )(implicit traceContext: TraceContext): Either[String, Option[Result]] = + toOptionalE(queryApi(adminApiRunner, adminApiConfig, command)) + + private def queryLedgerApiIfExists[Result]( + command: GrpcAdminCommand[_, _, Result] + )(implicit traceContext: TraceContext): Either[String, Option[Result]] = + toOptionalE(queryApi(ledgerApiRunner, ledgerApiConfig, command)) override protected def prepare(config: DeclarativeParticipantConfig)(implicit traceContext: TraceContext @@ -180,6 +215,21 @@ class DeclarativeParticipantApi( ) ) + def fetchHostedTuple(filterParty: String, synchronizerId: SynchronizerId) = + fetchHosted(filterParty, synchronizerId).map(_.flatMap { party2Participant => + val maybePermission = party2Participant.item.participants + .find(_.participantId == participantId) + .map(_.permission) + maybePermission + .map(permission => + ( + (party2Participant.item.partyId.uid, synchronizerId), + ParticipantPermissionConfig.fromInternal(permission), + ) + ) + .toList + }) + def createTopologyTx( uid: UniqueIdentifier, synchronizerId: SynchronizerId, @@ -222,22 +272,34 @@ class DeclarativeParticipantApi( def awaitLedgerApiServer( parties: Seq[(UniqueIdentifier, SynchronizerId)] - ): Either[String, Boolean] = + ): Either[String, Boolean] = { + @tailrec + def go( + idps: List[String], + pending: Set[PartyId], + ): Either[String, Set[PartyId]] = if (pending.isEmpty) Right(Set.empty) + else + idps match { + case Nil => Right(pending) + case next :: rest => + val unknown = findPartiesNotKnownToIdp(next, pending) + unknown match { + case Right(unknown) => go(rest, unknown) + case Left(value) => Left(value) + } + } + for { idps <- queryLedgerApi(LedgerApiCommands.IdentityProviderConfigs.List()) // loop over all idps and include default one - observed <- (idps.map(_.identityProviderId) :+ "").flatTraverse(idp => - queryLedgerApi( - LedgerApiCommands.PartyManagementService.ListKnownParties(identityProviderId = idp) - ) + notFound <- go( + (idps.map(_.identityProviderId) :+ "").toList, + parties.map { case (uid, _) => PartyId(uid) }.toSet, ) - observedUids <- observed - .traverse(details => UniqueIdentifier.fromProtoPrimitive(details.party, "party")) - .leftMap(_.toString) } yield { - val observedSet = observedUids.toSet - parties.map(_._1).toSet.subsetOf(observedSet) + notFound.isEmpty } + } queryAdminApi(ListConnectedSynchronizers()) .flatMap { synchronizerIds => @@ -257,19 +319,7 @@ class DeclarativeParticipantApi( def fetchAll() = // fold synchronizers and found parties and find the ones that are allocated to our node synchronizerIds.map(_.synchronizerId).flatTraverse { synchronizerId => - fetchHosted(filterParty = "", synchronizerId).map(_.flatMap { party2Participant => - val maybePermission = party2Participant.item.participants - .find(_.participantId == participantId) - .map(_.permission) - maybePermission - .map(permission => - ( - (party2Participant.item.partyId.uid, synchronizerId), - ParticipantPermissionConfig.fromInternal(permission), - ) - ) - .toList - }) + fetchHostedTuple(filterParty = "", synchronizerId) } run[(UniqueIdentifier, SynchronizerId), ParticipantPermissionConfig]( @@ -278,18 +328,43 @@ class DeclarativeParticipantApi( checkSelfConsistency, want = wanted, fetch = _ => fetchAll(), + get = { case (uid, synchronizerId) => + fetchHostedTuple(uid.toProtoPrimitive, synchronizerId).map(_.toList).flatMap { + case (_, v) :: Nil => Right(Some(v)) + case Nil => Right(None) + case rst => + Left( + s"Multiple entries found for party $uid and synchronizer $synchronizerId: $rst" + ) + } + }, add = { case ((uid, synchronizerId), permission) => createTopologyTx(uid, synchronizerId, permission.toNative) }, upd = { case ((uid, synchronizerId), wantPermission, _) => createTopologyTx(uid, synchronizerId, wantPermission.toNative) }, - rm = { case ((u, s), current) => removeParty(u, s) }, + rm = { case ((u, s), current) => + removeParty(u, s) + }, await = Some(awaitLedgerApiServer), ) } } + private def findPartiesNotKnownToIdp(idp: String, parties: Set[PartyId])(implicit + traceContext: TraceContext + ) = + queryLedgerApi( + LedgerApiCommands.PartyManagementService.GetParties( + parties = parties.toList, + identityProviderId = idp, + failOnNotFound = false, + ) + ).map { found => + parties -- found.keySet + } + private def syncUsers( participantId: ParticipantId, users: Seq[DeclarativeUserConfig], @@ -299,28 +374,57 @@ class DeclarativeParticipantApi( traceContext: TraceContext ): Either[String, UpdateResult] = { - // temporarily cache the available parties - val idpParties = mutable.Map[String, Set[String]]() - def getIdpParties(idp: String): Either[String, Set[String]] = - idpParties.get(idp) match { - case Some(parties) => Right(parties) - case None => - queryLedgerApi( - LedgerApiCommands.PartyManagementService.ListKnownParties(identityProviderId = idp) - ).map { parties => - val partySet = parties.map(_.party).toSet - idpParties.put(idp, partySet).discard - partySet - } - } + def fetchUserRights(user: LedgerApiUser): Either[String, DeclarativeUserConfig] = user match { + case LedgerApiUser(id, primaryParty, isDeactivated, metadata, identityProviderId) => + queryLedgerApi( + LedgerApiCommands.Users.Rights.List(id = id, identityProviderId = identityProviderId) + ).map { rights => + DeclarativeUserConfig( + user = id, + primaryParty = primaryParty.map(_.toProtoPrimitive), + isDeactivated = isDeactivated, + annotations = metadata.annotations, + identityProviderId = identityProviderId, + rights = DeclarativeUserRightsConfig( + actAs = rights.actAs.map(_.toProtoPrimitive), + readAs = rights.readAs.map(_.toProtoPrimitive), + participantAdmin = rights.participantAdmin, + identityProviderAdmin = rights.identityProviderAdmin, + readAsAnyParty = rights.readAsAnyParty, + ), + )(resourceVersion = metadata.resourceVersion) + } + } + + val idpsE = queryLedgerApi( + LedgerApiCommands.IdentityProviderConfigs.List() + ).map(x => "" +: x.map(_.identityProviderId)) // empty string to load default idp) + + def getUser(id: String): Either[String, Option[DeclarativeUserConfig]] = idpsE.flatMap { idps => + MonadUtil + .foldLeftM(None: Option[LedgerApiUser], idps) { + case (Some(cfg), _) => Right(Some(cfg)) + case (None, idp) => + // using list user and not get user as get user will return permission denied and log warning + // however, as user ids are unique, this will give the same result + queryLedgerApiIfExists( + LedgerApiCommands.Users + .List(filterUser = id, identityProviderId = idp, pageToken = "", pageSize = 10000) + ).map { res => + res.flatMap(_.users.find(_.id == id)) + } + } + .flatMap { + case Some(user) => fetchUserRights(user).map(c => Some(c)) + case None => Right(None) + } + } def fetchUsers(limit: PositiveInt): Either[String, Seq[(String, DeclarativeUserConfig)]] = for { // meeh, we need to iterate over all idps to load all users - idps <- queryLedgerApi( - LedgerApiCommands.IdentityProviderConfigs.List() - ).map(_.map(_.identityProviderId)) - users <- (idps :+ "") // empty string to load default idp + idps <- idpsE + users <- idps .traverse(idp => queryLedgerApi( LedgerApiCommands.Users.List( @@ -332,29 +436,8 @@ class DeclarativeParticipantApi( ) ) .map(_.flatMap(_.users.filter(_.id != "participant_admin"))) - parsedUsers <- users.traverse { - case LedgerApiUser(id, primaryParty, isDeactivated, metadata, identityProviderId) => - queryLedgerApi( - LedgerApiCommands.Users.Rights.List(id = id, identityProviderId = identityProviderId) - ).map { rights => - ( - id, - DeclarativeUserConfig( - user = id, - primaryParty = primaryParty.map(_.toProtoPrimitive), - isDeactivated = isDeactivated, - annotations = metadata.annotations, - identityProviderId = identityProviderId, - rights = DeclarativeUserRightsConfig( - actAs = rights.actAs.map(_.toProtoPrimitive), - readAs = rights.readAs.map(_.toProtoPrimitive), - participantAdmin = rights.participantAdmin, - identityProviderAdmin = rights.identityProviderAdmin, - readAsAnyParty = rights.readAsAnyParty, - ), - )(resourceVersion = metadata.resourceVersion), - ) - } + parsedUsers <- users.traverse { user => + fetchUserRights(user).map(cfg => (cfg.user, cfg)) } } yield parsedUsers.take(limit.value) @@ -405,11 +488,13 @@ class DeclarativeParticipantApi( if (desired != existing) { def grantOrRevoke(have: Boolean, want: Boolean): (Boolean, Boolean) = if (have != want) if (want) (true, false) else (false, true) else (false, false) + def grantOrRevokeSet(have: Set[String], want: Set[String]): (Set[String], Set[String]) = { val grant = want.diff(have) val revoke = have.diff(want) (grant, revoke) } + val (grantParticipantAdmin, revokeParticipantAdmin) = grantOrRevoke(existing.participantAdmin, desired.participantAdmin) val (grantIdpAdmin, revokeIdpAdmin) = grantOrRevoke( @@ -479,29 +564,25 @@ class DeclarativeParticipantApi( ) } else Either.unit - def activePartyFilter(idp: String, user: String): Either[String, String => Boolean] = - getIdpParties(idp).map { parties => party => - { - if (!parties.contains(party)) { - logger.info(s"User $user refers to party $party not yet known to the ledger api server") - false - } else true - } - } - val wantedE = users.traverse { user => - activePartyFilter(user.identityProviderId, user.user).map { filter => - ( - user.user, - user.mapPartiesToNamespace( - participantId.uid.namespace, - filter, - ), - ) + val mapped = user.mapPartiesToNamespace( + participantId.uid.namespace + ) + // Ledger API server can only assign to parties it has already seen. Therefore, we remove + // the parties not yet known. This can happen if we don't have a synchronizer connection + // when setting up the users. + findPartiesNotKnownToIdp(user.identityProviderId, mapped.referencedParties).map { unknown => + if (unknown.nonEmpty) { + logger.info( + s"User ${user.user} with idp=${user.identityProviderId} refers to the following parties not known to the Ledger API server: $unknown" + ) + } + (user.user, mapped.removeParties(unknown)) } } + wantedE.flatMap { wanted => run[String, DeclarativeUserConfig]( "users", @@ -509,6 +590,7 @@ class DeclarativeParticipantApi( checkSelfConsistency, want = wanted, fetch = fetchUsers, + get = getUser, add = { case (_, user) => createUser(user) }, @@ -567,6 +649,11 @@ class DeclarativeParticipantApi( .map(_.map { case (synchronizerConnectionConfig, _, _) => synchronizerConnectionConfig } .map(toDeclarative)) + def getConnection( + alias: SynchronizerAlias + ): Either[String, Option[DeclarativeConnectionConfig]] = + fetchConnections().map(_.collectFirst { case (a, c) if a == alias => c }) + def removeSynchronizerConnection( synchronizerAlias: SynchronizerAlias ): Either[String, Unit] = @@ -625,7 +712,10 @@ class DeclarativeParticipantApi( checkSelfConsistent = checkSelfConsistent, want = connections.map(c => (SynchronizerAlias.tryCreate(c.synchronizerAlias), c)), fetch = _ => fetchConnections(), - add = { case (_, config) => add(config) }, + get = getConnection, + add = { case (_, config) => + add(config) + }, upd = { case (_, config, existing) => if (config.isEquivalent(existing)) Either.unit else @@ -656,6 +746,13 @@ class DeclarativeParticipantApi( LedgerApiCommands.IdentityProviderConfigs.List() ).map(_.map(c => (c.identityProviderId, toDeclarative(c)))) + def getIdp(idpName: String): Either[String, Option[DeclarativeIdpConfig]] = + queryLedgerApiIfExists( + LedgerApiCommands.IdentityProviderConfigs.Get(identityProviderId = + IdentityProviderId.Id.assertFromString(idpName) + ) + ).map(_.map(toDeclarative)) + def add(config: DeclarativeIdpConfig): Either[String, Unit] = queryLedgerApi( LedgerApiCommands.IdentityProviderConfigs.Create( @@ -694,8 +791,13 @@ class DeclarativeParticipantApi( checkSelfConsistent = checkSelfConsistent, want = idps.map(c => (c.identityProviderId, c)), fetch = _ => fetchIdps(), - add = { case (_, config) => add(config) }, - upd = { case (_, config, _) => update(config) }, + get = getIdp, + add = { case (_, config) => + add(config) + }, + upd = { case (_, config, _) => + update(config) + }, rm = (idp, _) => removeIdp(idp), ) } @@ -704,7 +806,7 @@ class DeclarativeParticipantApi( private def mirrorDarsIfNecessary(fetchDarDirectory: File, dars: Seq[DeclarativeDarConfig])( implicit traceContext: TraceContext - ): Seq[(File, Option[String])] = dars.flatMap { dar => + ): Seq[(File, Option[String], Seq[String])] = dars.flatMap { dar => if (dar.location.startsWith("http")) { val matched = matchDar .findFirstMatchIn(dar.location) @@ -720,29 +822,36 @@ class DeclarativeParticipantApi( logger.warn(s"Failed to download ${dar.location}: $err") } .toOption - .map(_ => (output, dar.expectedMainPackage)) + .map(_ => (output, dar.expectedMainPackage, dar.synchronizers)) }.toList - } else List((new File(dar.location), dar.expectedMainPackage)) + } else List((new File(dar.location), dar.expectedMainPackage, dar.synchronizers)) } - private def computeWanted(dars: Seq[(File, Option[String])])(implicit + private def computeWanted( + dars: Seq[(File, Option[String], Seq[String])], + connectedSynchronizers: Seq[ListConnectedSynchronizersResult], + )(implicit traceContext: TraceContext - ): Seq[(String, String)] = - dars.flatMap { case (item, expected) => + ): Seq[((String, SynchronizerId), String)] = + dars.flatMap { case (item, expected, synchronizers) => val bufInput = new FileInputStream(item) val zipInputStream = new ZipInputStream(bufInput) DarParser .readArchive("file", zipInputStream) - .toOption + .toList .flatMap { loaded => expected match { case Some(value) if value != loaded.main.getHash => logger.warn(s"DAR $item has main package ${loaded.main.getHash} but expected $value") - None - case _ => Some((loaded.main.getHash, item.toString)) + Seq.empty + case _ => + connectedSynchronizers + .filter(s => + synchronizers.isEmpty || synchronizers.contains(s.synchronizerAlias.unwrap) + ) + .map(s => ((loaded.main.getHash, s.synchronizerId), item.toString)) } } - .toList } private def syncDars( @@ -751,40 +860,119 @@ class DeclarativeParticipantApi( fetchDarDirectory: File, )(implicit traceContext: TraceContext - ): Either[String, UpdateResult] = { - val want = computeWanted(mirrorDarsIfNecessary(fetchDarDirectory, dars)) - def fetchDars(limit: PositiveInt): Either[String, Seq[(String, String)]] = - for { - dars <- queryAdminApi(ParticipantAdminCommands.Package.ListDars(filterName = "", limit)) - } yield dars - .filterNot(dar => AdminWorkflowServices.AdminWorkflowNames.contains(dar.name)) - .map(_.mainPackageId) - .map((_, "")) - run[String, String]( - "dars", - removeExcess = false, - checkSelfConsistency, - want = want, - fetch = fetchDars, - add = { case (_, file) => - queryAdminApi( - ParticipantAdminCommands.Package.UploadDar( - darPath = file, - vetAllPackages = true, - synchronizeVetting = false, - description = "Uploaded by declarative API", - expectedMainPackageId = "", - requestHeaders = Map.empty, - logger, - None, - ) - ).map(_ => ()) - }, - upd = { case (hash, desired, existing) => Either.unit }, - rm = (_, _) => Either.unit, // not implemented in canton yet - onlyCheckKeys = true, - ) + ): Either[String, UpdateResult] = + queryAdminApi(TopologyAdminCommands.Init.GetId()).flatMap { participantIdResult => + queryAdminApi(ListConnectedSynchronizers()).flatMap { connectedSynchronizers => + val want = + computeWanted(mirrorDarsIfNecessary(fetchDarDirectory, dars), connectedSynchronizers) + + def fetchVettedPackages(store: Option[TopologyStoreId]) = + participantIdResult.uniqueIdentifier.toRight("Node is not initialized").flatMap { + participantId => + queryAdminApi( + TopologyAdminCommands.Read.ListVettedPackages( + BaseQuery( + store = store, + proposals = false, + timeQuery = TimeQuery.HeadState, + ops = Some(TopologyChangeOp.Replace), + filterSigningKey = "", + protocolVersion = None, + ), + filterParticipant = ParticipantId(participantId).filterString, + ) + ) + } - } + def fetchDars(limit: PositiveInt): Either[String, Seq[((String, SynchronizerId), String)]] = + for { + dars <- queryAdminApi(ParticipantAdminCommands.Package.ListDars(filterName = "", limit)) + vettedPackages <- fetchVettedPackages(None) + } yield { + val packageToSynchronizers = vettedPackages + .flatMap { vp => + Seq(vp.context.storeId) + .collect { case TopologyStoreId.Synchronizer(synchronizerId) => synchronizerId } + .flatMap(synchronizerId => + vp.item.packages + .map(_.packageId -> synchronizerId.bimap(identity, _.logical).merge) + ) + } + .groupBy { case (packageId, _) => packageId } + .map { case (packageId, values) => + (packageId: String, values.map { case (_, synchronizerId) => synchronizerId }) + } + val actualDars = dars + .filterNot(dar => AdminWorkflowServices.AdminWorkflowNames.contains(dar.name)) + .map(_.mainPackageId) + .flatMap(pkgId => + packageToSynchronizers + .getOrElse(pkgId, Seq.empty) + .map(synchronizerId => pkgId -> synchronizerId) + ) + .map((_, "")) + actualDars + } + + def getDar( + mainPkgAndSynchronizerId: (String, SynchronizerId) + ): Either[String, Option[String]] = mainPkgAndSynchronizerId match { + case (mainPkgId, synchronizerId) => + // check that package is vetted + fetchVettedPackages(store = Some(TopologyStoreId.Synchronizer(synchronizerId))) + .map { res => + res + .find(_.item.packages.exists(_.packageId == mainPkgId)) + .map(_ => "") + } + .flatMap { + case None => Right(None) + case Some(_) => + // and verify that dar exists + queryAdminApiIfExists( + ParticipantAdminCommands.Package.GetDarContents(mainPackageId = mainPkgId) + ).map(_.map(_ => "")) + } + } + + run[(String, SynchronizerId), String]( + "dars", + removeExcess = false, + checkSelfConsistency, + want = want, + fetch = fetchDars, + get = getDar, + add = { case ((_hash, synchronizerId), file) => + queryAdminApi( + ParticipantAdminCommands.Package.UploadDar( + darPath = file, + synchronizerId = Some(synchronizerId), + vetAllPackages = true, + synchronizeVetting = true, + description = s"Uploaded by declarative API: $file", + expectedMainPackageId = "", + requestHeaders = Map.empty, + logger, + None, + ) + ).map(_ => ()) + }, + upd = { case ((hash, synchronizerId), desired, existing) => + Either.unit + }, + rm = (_, _) => Either.unit, // not implemented in canton yet + onlyCheckKeys = true, + ) + } + } + +} + +object DeclarativeParticipantApi { + private sealed trait QueryResult { + def str: String + } + private final case class Err(str: String) extends QueryResult + private final case class NotFound(str: String) extends QueryResult } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironmentFactory.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironmentFactory.scala index 8bf82f1cd2..bc797ac728 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironmentFactory.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironmentFactory.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.environment import com.digitalasset.canton.config.{CantonConfig, CommunityCantonEdition, TestingConfigInternal} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.CommunityParticipantNodeBootstrapFactory -import com.digitalasset.canton.resource.CommunityDbMigrationsMetaFactory import com.digitalasset.canton.synchronizer.mediator.CommunityMediatorNodeBootstrapFactory import com.digitalasset.canton.synchronizer.sequencer.CommunitySequencerNodeBootstrapFactory @@ -24,7 +23,6 @@ object CommunityEnvironmentFactory extends EnvironmentFactory[CantonConfig, Cant CommunityParticipantNodeBootstrapFactory, CommunitySequencerNodeBootstrapFactory, CommunityMediatorNodeBootstrapFactory, - new CommunityDbMigrationsMetaFactory(loggerFactory), loggerFactory, ) } diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala index d3301cdbcf..a0c21e94da 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala @@ -30,7 +30,6 @@ import com.digitalasset.canton.metrics.{CantonHistograms, DbStorageHistograms, M import com.digitalasset.canton.networking.grpc.CantonGrpcUtil import com.digitalasset.canton.participant.* import com.digitalasset.canton.participant.config.ParticipantNodeConfig -import com.digitalasset.canton.resource.DbMigrationsMetaFactory import com.digitalasset.canton.synchronizer.mediator.{ MediatorNodeBootstrap, MediatorNodeBootstrapFactory, @@ -69,7 +68,6 @@ abstract class Environment[Config <: SharedCantonConfig[Config]]( participantNodeFactory: ParticipantNodeBootstrapFactory, sequencerNodeFactory: SequencerNodeBootstrapFactory, mediatorNodeFactory: MediatorNodeBootstrapFactory, - protected val migrationsFactoryFactory: DbMigrationsMetaFactory, override val loggerFactory: NamedLoggerFactory, ) extends NamedLogging with AutoCloseable @@ -287,7 +285,6 @@ abstract class Environment[Config <: SharedCantonConfig[Config]]( lazy val participants = new ParticipantNodes[ParticipantNodeBootstrap, ParticipantNode]( createParticipant, - migrationsFactoryFactory.create(clock), timeouts, config.participantsByString, config.participantNodeParametersByString, @@ -298,7 +295,6 @@ abstract class Environment[Config <: SharedCantonConfig[Config]]( val sequencers = new SequencerNodes( createSequencer, - migrationsFactoryFactory.create(clock), timeouts, config.sequencersByString, config.sequencerNodeParametersByString, @@ -308,7 +304,6 @@ abstract class Environment[Config <: SharedCantonConfig[Config]]( val mediators = new MediatorNodes( createMediator, - migrationsFactoryFactory.create(clock), timeouts, config.mediatorsByString, config.mediatorNodeParametersByString, @@ -361,7 +356,7 @@ abstract class Environment[Config <: SharedCantonConfig[Config]]( ParticipantApis( ledgerApi = node.config.ledgerApi.port.unwrap, adminApi = node.config.adminApi.port.unwrap, - jsonApi = node.config.httpLedgerApi.flatMap(_.server.port), + jsonApi = node.config.httpLedgerApi.server.internalPort.map(_.unwrap), ), ) }.toMap @@ -612,7 +607,6 @@ final class CantonEnvironment( participantNodeFactory: ParticipantNodeBootstrapFactory, sequencerNodeFactory: SequencerNodeBootstrapFactory, mediatorNodeFactory: MediatorNodeBootstrapFactory, - migrationsFactoryFactory: DbMigrationsMetaFactory, override val loggerFactory: NamedLoggerFactory, ) extends Environment[CantonConfig]( config, @@ -621,7 +615,6 @@ final class CantonEnvironment( participantNodeFactory, sequencerNodeFactory, mediatorNodeFactory, - migrationsFactoryFactory, loggerFactory, ) { diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala index 56bb332480..84ec61c03a 100644 --- a/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala +++ b/canton/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala @@ -17,8 +17,8 @@ import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.* import com.digitalasset.canton.participant.config.ParticipantNodeConfig +import com.digitalasset.canton.resource.DbMigrations import com.digitalasset.canton.resource.DbStorage.RetryConfig -import com.digitalasset.canton.resource.{DbMigrations, DbMigrationsFactory} import com.digitalasset.canton.synchronizer.mediator.{ MediatorNode, MediatorNodeBootstrap, @@ -116,7 +116,6 @@ class ManagedNodes[ NodeBootstrap <: CantonNodeBootstrap[Node], ]( create: (String, NodeConfig) => NodeBootstrap, - migrationsFactory: DbMigrationsFactory, override protected val timeouts: ProcessingTimeout, configs: => Map[String, NodeConfig], parametersFor: String => CantonNodeParameters, @@ -362,16 +361,23 @@ class ManagedNodes[ case _ => F.pure(Either.unit) } + private def createDbMigration( + name: InstanceName, + dbConfig: DbConfig, + alphaVersionSupport: Boolean, + ): DbMigrations = + DbMigrations.create(dbConfig, alphaVersionSupport, timeouts, loggerFactory.append("node", name)) + // if database is fresh, we will migrate it. Otherwise, we will check if there is any pending migrations, // which need to be triggered manually. private def checkMigration( name: InstanceName, storageConfig: StorageConfig, params: CantonNodeParameters, - ): Either[StartupError, Unit] = + )(implicit traceContext: TraceContext): Either[StartupError, Unit] = runIfUsingDatabase[Id](storageConfig) { dbConfig => - val migrations = migrationsFactory.create(dbConfig, name, params.alphaVersionSupport) - import TraceContext.Implicits.Empty.* + val migrations = createDbMigration(name, dbConfig, params.alphaVersionSupport) + logger.info(s"Setting up database schemas for $name") def errorMapping(err: DbMigrations.Error): StartupError = @@ -404,8 +410,7 @@ class ManagedNodes[ alphaVersionSupport: Boolean, ): Either[StartupError, Unit] = runIfUsingDatabase[Id](storageConfig) { dbConfig => - migrationsFactory - .create(dbConfig, name, alphaVersionSupport) + createDbMigration(name, dbConfig, alphaVersionSupport) .migrateDatabase() .leftMap(FailedDatabaseMigration(name, _)) .value @@ -418,8 +423,7 @@ class ManagedNodes[ alphaVersionSupport: Boolean, ): Either[StartupError, Unit] = runIfUsingDatabase[Id](storageConfig) { dbConfig => - migrationsFactory - .create(dbConfig, name, alphaVersionSupport) + createDbMigration(name, dbConfig, alphaVersionSupport) .repairFlywayMigration() .leftMap(FailedDatabaseRepairMigration(name, _)) .value @@ -431,7 +435,6 @@ class ManagedNodes[ class ParticipantNodes[B <: CantonNodeBootstrap[N], N <: CantonNode]( create: (String, ParticipantNodeConfig) => B, // (nodeName, config) => bootstrap - migrationsFactory: DbMigrationsFactory, timeouts: ProcessingTimeout, configs: => Map[String, ParticipantNodeConfig], parametersFor: String => ParticipantNodeParameters, @@ -442,7 +445,6 @@ class ParticipantNodes[B <: CantonNodeBootstrap[N], N <: CantonNode]( protected val executionContext: ExecutionContextIdlenessExecutorService ) extends ManagedNodes[N, ParticipantNodeConfig, ParticipantNodeParameters, B]( create, - migrationsFactory, timeouts, configs, parametersFor, @@ -456,7 +458,6 @@ class ParticipantNodes[B <: CantonNodeBootstrap[N], N <: CantonNode]( class SequencerNodes( create: (String, SequencerNodeConfig) => SequencerNodeBootstrap, - migrationsFactory: DbMigrationsFactory, timeouts: ProcessingTimeout, configs: => Map[String, SequencerNodeConfig], parameters: String => SequencerNodeParameters, @@ -469,7 +470,6 @@ class SequencerNodes( SequencerNodeBootstrap, ]( create, - migrationsFactory, timeouts, configs, parameters, @@ -479,7 +479,6 @@ class SequencerNodes( class MediatorNodes( create: (String, MediatorNodeConfig) => MediatorNodeBootstrap, - migrationsFactory: DbMigrationsFactory, timeouts: ProcessingTimeout, configs: => Map[String, MediatorNodeConfig], parameters: String => MediatorNodeParameters, @@ -492,7 +491,6 @@ class MediatorNodes( MediatorNodeBootstrap, ]( create, - migrationsFactory, timeouts, configs, parameters, diff --git a/canton/community/app/src/main/resources/sandbox/sandbox.conf b/canton/community/app/src/main/resources/sandbox/sandbox.conf index 812a206bcb..d14857d6fe 100644 --- a/canton/community/app/src/main/resources/sandbox/sandbox.conf +++ b/canton/community/app/src/main/resources/sandbox/sandbox.conf @@ -39,7 +39,7 @@ canton { type = "memory" } }, - type = "community-reference" + type = "reference" }, storage { type = "memory" diff --git a/canton/community/app/src/main/scala/com/digitalasset/canton/Runner.scala b/canton/community/app/src/main/scala/com/digitalasset/canton/Runner.scala index 1f8871032a..521e9f2da0 100644 --- a/canton/community/app/src/main/scala/com/digitalasset/canton/Runner.scala +++ b/canton/community/app/src/main/scala/com/digitalasset/canton/Runner.scala @@ -43,7 +43,7 @@ class ServerRunner[C <: SharedCantonConfig[C]]( consoleEnvironment.grpcLedgerCommandRunner .runCommand( "upload-dar", - LedgerApiCommands.PackageManagementService.UploadDarFile(darPath), + LedgerApiCommands.PackageManagementService.UploadDarFile(darPath, None), p.config.clientLedgerApi, Some(p.adminTokenDispenser.getCurrentToken.secret), ) diff --git a/canton/community/app/src/pack/config/participant.conf b/canton/community/app/src/pack/config/participant.conf index 90dc521a11..af741b8feb 100644 --- a/canton/community/app/src/pack/config/participant.conf +++ b/canton/community/app/src/pack/config/participant.conf @@ -69,6 +69,11 @@ canton.participants.participant { keep-alive-server = ${?_shared.admin-api.keep-alive-server} } + http-ledger-api.server { + address = localhost + port = 10005 + } + // Configure GRPC / HTTP Health Server for monitoring // See https://docs.daml.com/canton/usermanual/monitoring.html#grpc-health-check-service monitoring { diff --git a/canton/community/app/src/pack/config/sandbox.conf b/canton/community/app/src/pack/config/sandbox.conf index 98bcc23a8c..c4704940bf 100644 --- a/canton/community/app/src/pack/config/sandbox.conf +++ b/canton/community/app/src/pack/config/sandbox.conf @@ -20,6 +20,10 @@ canton { address = localhost port = 10022 } + http-ledger-api.server { + address = localhost + port = 10023 + } } sequencers.local { public-api { diff --git a/canton/community/app/src/pack/examples/05-composability/composability-auto-reassignment.canton b/canton/community/app/src/pack/examples/05-composability/composability-auto-reassignment.canton index 5f8ae4fedb..a973eb2ad0 100644 --- a/canton/community/app/src/pack/examples/05-composability/composability-auto-reassignment.canton +++ b/canton/community/app/src/pack/examples/05-composability/composability-auto-reassignment.canton @@ -47,7 +47,8 @@ participant3.parties.enable("Painter", synchronizer = Some(SynchronizerAlias.try // upload the Daml model to all participants val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") -participants.all.dars.upload(darPath) +Seq(participant1, participant2, participant3).dars.upload(darPath, synchronizerId = iouId) +Seq(participant2, participant3).dars.upload(darPath, synchronizerId = paintId) import com.digitalasset.canton.BigDecimalImplicits._ import com.digitalasset.canton.examples.java.iou.{Amount, Iou} diff --git a/canton/community/app/src/pack/examples/05-composability/composability1.canton b/canton/community/app/src/pack/examples/05-composability/composability1.canton index a4a16eaa63..77b85e129c 100644 --- a/canton/community/app/src/pack/examples/05-composability/composability1.canton +++ b/canton/community/app/src/pack/examples/05-composability/composability1.canton @@ -49,7 +49,8 @@ participant3.parties.enable("Painter", synchronizer = Some(SynchronizerAlias.try // upload the Daml model to all participants val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") -participants.all.dars.upload(darPath) +Seq(participant1, participant2, participant3).dars.upload(darPath, synchronizerId = iouId) +Seq(participant2, participant3).dars.upload(darPath, synchronizerId = paintId) // architecture-handbook-entry-end: topology // architecture-handbook-entry-begin: imports diff --git a/canton/community/app/src/pack/examples/05-composability/composability2.canton b/canton/community/app/src/pack/examples/05-composability/composability2.canton index 52057c058a..5133da1ff9 100644 --- a/canton/community/app/src/pack/examples/05-composability/composability2.canton +++ b/canton/community/app/src/pack/examples/05-composability/composability2.canton @@ -73,7 +73,8 @@ utils.retry_until_true { // upload the Daml model to all participants val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") -participants.all.dars.upload(darPath) +participants.all.dars.upload(darPath, synchronizerId = iouId) +Seq(participant2, participant3).dars.upload(darPath, synchronizerId = paintId) // architecture-handbook-entry-end: topology // architecture-handbook-entry-begin: setup diff --git a/canton/community/app/src/pack/examples/07-repair/synchronizer-repair-init.canton b/canton/community/app/src/pack/examples/07-repair/synchronizer-repair-init.canton index 4868db839b..a6d617105b 100644 --- a/canton/community/app/src/pack/examples/07-repair/synchronizer-repair-init.canton +++ b/canton/community/app/src/pack/examples/07-repair/synchronizer-repair-init.canton @@ -11,12 +11,13 @@ bootstrap.synchronizer( staticSynchronizerParameters = StaticSynchronizerParameters.defaultsWithoutKMS(ProtocolVersion.forSynchronizer), ) +// connect participants to synchronizer +Seq(participant1, participant2).foreach(_.synchronizers.connect_local(sequencer1, alias = "lostSynchronizer")) + // upload the Daml model to all participants val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") participants.all.dars.upload(darPath) -// connect participants to synchronizer -Seq(participant1, participant2).foreach(_.synchronizers.connect_local(sequencer1, alias = "lostSynchronizer")) // setup parties val Alice = participant1.parties.enable("Alice") diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_hashing_v2.py b/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_hashing_v2.py index b79e913694..83d4709318 100755 --- a/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_hashing_v2.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_hashing_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # Implements the transaction hashing specification defined in the README.md at https://github.com/digital-asset/canton/blob/main/community/ledger-api/src/release-line-3.2/protobuf/com/daml/ledger/api/v2/interactive/README.md diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_util.py b/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_util.py index ab31003e26..5bc31c98ef 100755 --- a/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_util.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/daml_transaction_util.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # Implements the transaction hashing specification defined in the README.md at https://github.com/digital-asset/canton/blob/main/community/ledger-api/src/release-line-3.2/protobuf/com/daml/ledger/api/v2/interactive/README.md diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.py b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.py deleted file mode 100644 index 67bdb48586..0000000000 --- a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -# [Imports start] -import time - -import grpc -from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey -from cryptography.hazmat.primitives.asymmetric import ec -from cryptography.hazmat.primitives import serialization -from grpc import Channel - -import google.protobuf.empty_pb2 -from com.digitalasset.canton.topology.admin.v30 import ( - topology_manager_write_service_pb2_grpc, -) -from com.digitalasset.canton.topology.admin.v30 import ( - topology_manager_write_service_pb2, -) -from com.digitalasset.canton.topology.admin.v30 import ( - topology_manager_read_service_pb2_grpc, -) -from com.digitalasset.canton.topology.admin.v30 import ( - topology_manager_read_service_pb2, - common_pb2, -) -from com.digitalasset.canton.protocol.v30 import topology_pb2 -from com.digitalasset.canton.crypto.v30 import crypto_pb2 -from google.protobuf import empty_pb2 -from interactive_topology_util import ( - compute_fingerprint, - compute_sha256_canton_hash, - serialize_topology_transaction, - compute_multi_transaction_hash, - sign_hash, - compute_topology_transaction_hash, -) - -# [Imports end] - - -def build_signed_topology_transaction( - transaction: bytes, - hashes: [bytes], - signature: bytes, - signed_by: str, - proposal: bool = False, -): - """ - Builds a signed topology transaction, optionally including multi-transaction signatures. - - Args: - transaction (bytes): The raw bytes representing the transaction to be signed. - hashes (list[bytes]): A list of transaction hashes for the multi-transaction signature. - signature (bytes): The signature for the transaction. - signed_by (str): The identifier of the entity signing the transaction. - proposal (bool, optional): A flag indicating if this transaction is part of a proposal. Defaults to False. - - Returns: - topology_pb2.SignedTopologyTransaction - """ - return topology_pb2.SignedTopologyTransaction( - transaction=transaction, - # Not set because we use the multi transactions signature - signatures=[], - multi_transaction_signatures=[ - topology_pb2.MultiTransactionSignatures( - transaction_hashes=hashes, - signatures=[ - crypto_pb2.Signature( - format=crypto_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, - signature=signature, - signed_by=signed_by, - signing_algorithm_spec=crypto_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, - ) - ], - ) - ], - proposal=proposal, - ) - - -def build_serialized_transaction_and_hash( - mapping: topology_pb2.TopologyMapping, -) -> (bytes, bytes): - """ - Generates a serialized topology transaction and its corresponding hash. - - Args: - mapping (topology_pb2.TopologyMapping): The topology mapping to be serialized. - - Returns: - tuple: A tuple containing: - - bytes: The serialized transaction. - - bytes: The SHA-256 hash of the serialized transaction. - """ - transaction = serialize_topology_transaction(mapping) - transaction_hash = compute_sha256_canton_hash(11, transaction) - return transaction, transaction_hash - - -# Onboard a new external party -def onboard_external_party( - party_name: str, - confirming_participant_ids: [str], - confirming_threshold: int, - synchronizer_id: str, - channel: Channel, -) -> (EllipticCurvePrivateKey, str): - """ - Onboard a new external party. - Generates an in-memory signing key pair to authenticate the external party. - - Args: - party_name (str): Name of the party. - confirming_participant_ids (str): Participant IDs on which the party will be hosted for transaction confirmation. - confirming_threshold (int): Minimum number of confirmations that must be received from the confirming participants to authorize a transaction. - synchronizer_id (str): ID of the synchronizer on which the party will be registered. - channel (grpc.Channel): gRPC channel to one of the confirming participants Admin API. - - Returns: - tuple: A tuple containing: - - EllipticCurvePrivateKey: Private key created for the party. - - str: Fingerprint of the public key created for the party. - """ - print(f"Onboarding {party_name}") - - # [Create clients for the Admin API] - topology_write_client = ( - topology_manager_write_service_pb2_grpc.TopologyManagerWriteServiceStub(channel) - ) - topology_read_client = ( - topology_manager_read_service_pb2_grpc.TopologyManagerReadServiceStub(channel) - ) - # [Created clients for the Admin API] - - # [Generate a public/private key pair] - # For the sake of simplicity in the demo, we use a single signing key pair for the party namespace (used to manage the party itself on the network), - # and for the signing of transactions via the interactive submission service. We however recommend to use different keys in real world deployment for better security. - private_key = ec.generate_private_key(curve=ec.SECP256R1()) - public_key = private_key.public_key() - - # Extract the public key in the DER format - public_key_bytes: bytes = public_key.public_bytes( - encoding=serialization.Encoding.DER, - format=serialization.PublicFormat.SubjectPublicKeyInfo, - ) - # Wrap the public key in a Canton protobuf message - signing_public_key = crypto_pb2.SigningPublicKey( - # Must match the format to which the key was exported to above - format=crypto_pb2.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, - public_key=public_key_bytes, - # Must match the scheme of the key - scheme=crypto_pb2.SigningKeyScheme.SIGNING_KEY_SCHEME_EC_DSA_P256, - # Because we have only one key, we specify both NAMESPACE and PROTOCOL usage for it - # When using different keys, ensure to use only the correct usage for each - usage=[ - crypto_pb2.SigningKeyUsage.SIGNING_KEY_USAGE_NAMESPACE, - crypto_pb2.SigningKeyUsage.SIGNING_KEY_USAGE_PROTOCOL, - ], - # This field is deprecated in favor of scheme but python requires us to set it - key_spec=crypto_pb2.SIGNING_KEY_SPEC_EC_P256, - ) - # [Generated a public/private key pair] - - # [Compute the fingerprint of the public key] - public_key_fingerprint = compute_fingerprint(public_key_bytes) - # [Computed the fingerprint of the public key] - - # [Construct party ID] - # The party id is constructed with party_name :: fingerprint - # This must be the fingerprint of the _namespace signing key_ - party_id = party_name + "::" + public_key_fingerprint - # [Constructed party ID] - - # [Build onboarding transactions and their hash] - # Namespace delegation: registers a root namespace with the public key of the party to the network - # effectively creating the party. - namespace_delegation_mapping = topology_pb2.TopologyMapping( - namespace_delegation=topology_pb2.NamespaceDelegation( - namespace=public_key_fingerprint, - target_key=signing_public_key, - is_root_delegation=True, - ) - ) - (namespace_delegation_transaction, namespace_transaction_hash) = ( - build_serialized_transaction_and_hash(namespace_delegation_mapping) - ) - - # Party to key: registers the public key as the one that will be used to sign and authorize Daml transactions submitted - # to the ledger via the interactive submission service - party_to_key_transaction = build_party_to_key_transaction( - channel, party_id, signing_public_key, synchronizer_id - ) - party_to_key_transaction_hash = compute_topology_transaction_hash( - party_to_key_transaction - ) - - # Party to participant: records the fact that the party wants to be hosted on the participants with confirmation rights - # This means those participants are not allowed to submit transactions on behalf of this party but will validate transactions - # on behalf of the party by confirming or rejecting them according to the ledger model. They also records transaction for that party on the ledger. - confirming_participants_hosting = [] - for confirming_participant_id in confirming_participant_ids: - confirming_participants_hosting.append( - topology_pb2.PartyToParticipant.HostingParticipant( - participant_uid=confirming_participant_id, - permission=topology_pb2.Enums.ParticipantPermission.PARTICIPANT_PERMISSION_CONFIRMATION, - ) - ) - party_to_participant_mapping = topology_pb2.TopologyMapping( - party_to_participant=topology_pb2.PartyToParticipant( - party=party_id, - threshold=confirming_threshold, - participants=confirming_participants_hosting, - ) - ) - (party_to_participant_transaction, party_to_participant_transaction_hash) = ( - build_serialized_transaction_and_hash(party_to_participant_mapping) - ) - # [Built onboarding transactions and their hash] - - # [Compute multi hash] - # Combine the hashes of all three transactions, so we can perform a single signature - multi_hash = compute_multi_transaction_hash( - [ - namespace_transaction_hash, - party_to_key_transaction_hash, - party_to_participant_transaction_hash, - ] - ) - # [Computed multi hash] - - # [Sign multi hash] - signature = sign_hash(private_key, multi_hash) - # [Signed multi hash] - - # [Build signed topology transactions] - hash_list = [ - namespace_transaction_hash, - party_to_key_transaction_hash, - party_to_participant_transaction_hash, - ] - signed_namespace_transaction = build_signed_topology_transaction( - namespace_delegation_transaction, hash_list, signature, public_key_fingerprint - ) - signed_party_to_key_transaction = build_signed_topology_transaction( - party_to_key_transaction, hash_list, signature, public_key_fingerprint - ) - signed_party_to_participant_transaction = build_signed_topology_transaction( - party_to_participant_transaction, - hash_list, - signature, - public_key_fingerprint, - True, - ) - # [Built signed topology transactions] - - # [Load all three transactions onto the participant node] - add_transactions_request = ( - topology_manager_write_service_pb2.AddTransactionsRequest( - transactions=[ - signed_namespace_transaction, - signed_party_to_key_transaction, - signed_party_to_participant_transaction, - ], - store=common_pb2.StoreId( - synchronizer=common_pb2.Synchronizer( - id=synchronizer_id, - ) - ), - ) - ) - topology_write_client.AddTransactions(add_transactions_request) - # [Loaded all three transactions onto the participant node] - - # [Authorize hosting from the confirming node] - topology_write_client.Authorize( - topology_manager_write_service_pb2.AuthorizeRequest( - proposal=topology_manager_write_service_pb2.AuthorizeRequest.Proposal( - change=topology_pb2.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_ADD_REPLACE, - serial=1, - mapping=party_to_participant_mapping, - ), - # False because the authorization from the participant is not enough: - # - it requires the signatures from the party (already submitted above) - # - as well as signatures from any other hosting participant - must_fully_authorize=False, - store=common_pb2.StoreId( - synchronizer=common_pb2.Synchronizer( - id=synchronizer_id, - ), - ), - ) - ) - # [Authorized hosting from the confirming node] - - # Finally wait for the party to appear in the topology, ensuring the onboarding succeeded - print(f"Waiting for {party_name} to appear in topology") - # [Waiting for party] - # If there's only one confirming participant, onboarding should be complete already - if len(confirming_participant_ids) == 1: - wait_to_observe_party_to_participant( - topology_read_client, synchronizer_id, party_id - ) - # [Party found] - - return private_key, public_key_fingerprint - - -def wait_to_observe_party_to_participant( - topology_read_client: topology_manager_read_service_pb2_grpc, - synchronizer_id: str, - party_id, -): - party_in_topology = False - while not party_in_topology: - party_to_participant_response: ( - topology_manager_read_service_pb2.ListPartyToParticipantResponse - ) = topology_read_client.ListPartyToParticipant( - topology_manager_read_service_pb2.ListPartyToParticipantRequest( - base_query=topology_manager_read_service_pb2.BaseQuery( - store=common_pb2.StoreId( - synchronizer=common_pb2.Synchronizer( - id=synchronizer_id, - ) - ), - head_state=google.protobuf.empty_pb2.Empty(), - ), - filter_party=party_id, - ) - ) - if len(party_to_participant_response.results) > 0: - break - else: - time.sleep(0.5) - continue - - -def build_party_to_key_transaction( - channel: grpc.Channel, - party_id: str, - new_signing_key: crypto_pb2.SigningPublicKey, - synchronizer_id: str, -) -> bytes: - """ - Constructs a topology transaction that updates the party-to-key mapping. - - Args: - channel (grpc.Channel): gRPC channel for communication with the topology manager. - party_id (str): Identifier of the party whose key mapping is being updated. - new_signing_key (crypto_pb2.SigningPublicKey): The new signing key to be added. - synchronizer_id (str): ID of the synchronizer to query the topology state. - - Returns: - bytes: Serialized topology transaction containing the updated mapping. - """ - # Retrieve the current party to key mapping - list_party_to_key_request = ( - topology_manager_read_service_pb2.ListPartyToKeyMappingRequest( - base_query=topology_manager_read_service_pb2.BaseQuery( - store=common_pb2.StoreId( - synchronizer=common_pb2.Synchronizer(id=synchronizer_id) - ), - head_state=empty_pb2.Empty(), - ), - filter_party=party_id, - ) - ) - topology_read_client = ( - topology_manager_read_service_pb2_grpc.TopologyManagerReadServiceStub(channel) - ) - party_to_key_response: ( - topology_manager_read_service_pb2.ListPartyToKeyMappingResponse - ) = topology_read_client.ListPartyToKeyMapping(list_party_to_key_request) - if len(party_to_key_response.results) == 0: - current_serial = 1 - current_keys_list = [] - else: - # Sort the results by serial in descending order and take the first one - sorted_results = sorted( - party_to_key_response.results, - key=lambda result: result.context.serial, - reverse=True, - ) - # Get the mapping with the highest serial and its list of hosting participants - current_serial = sorted_results[0].context.serial - current_keys_list: [crypto_pb2.SigningPublicKey] = sorted_results[ - 0 - ].item.signing_keys - - # Create a new mapping adding the new participant to the list and incrementing the serial - updated_mapping = topology_pb2.TopologyMapping( - party_to_key_mapping=topology_pb2.PartyToKeyMapping( - party=party_id, - threshold=1, - signing_keys=current_keys_list + [new_signing_key], - ) - ) - # Build the serialized transaction - return serialize_topology_transaction(updated_mapping, serial=current_serial + 1) diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.sh b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.sh new file mode 100755 index 0000000000..ece1f58460 --- /dev/null +++ b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail # Exit on error, prevent unset vars, fail pipeline on first error + +PRIVATE_KEY_FILE="private_key.der" +HTTP_ENDPOINT="${1:-"localhost:$(jq -r .participant1.jsonApi canton_ports.json)"}" + +# Determine synchronizer id from participant +echo "Connecting to ${HTTP_ENDPOINT}/v2/state/connected-synchronizers" +SYNCHRONIZER_ID=$(curl -s -L ${HTTP_ENDPOINT}/v2/state/connected-synchronizers | jq .connectedSynchronizers.[0].synchronizerId) +echo "Detected synchronizer-id ${SYNCHRONIZER_ID}" +# Generate an ed25519 private key and extract its public key +openssl genpkey -algorithm ed25519 -outform DER -out private_key.der +# Extract the public key from the private key +openssl pkey -in private_key.der -pubout -outform DER -out public_key.der 2> /dev/null +# Convert public key to base64 +PUBLIC_KEY_BASE64=$(base64 -w 0 -i public_key.der) + +# Create the JSON payload to generate the onboarding transaction +GENERATE=$(cat << EOF +{ + "synchronizer" : $SYNCHRONIZER_ID, + "partyHint" : "MyParty", + "publicKey" : { + "format" : "CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO", + "keyData": "$PUBLIC_KEY_BASE64", + "keySpec" : "SIGNING_KEY_SPEC_EC_CURVE25519" + } +} +EOF +) + +# Submit it to the JSON API +ONBOARDING_TX=$(curl -s -d "$GENERATE" -H "Content-Type: application/json" \ + -X POST ${HTTP_ENDPOINT}/v2/parties/external/generate-topology) + +# Extract results +PARTY_ID=$(echo $ONBOARDING_TX | jq -r .partyId) +TRANSACTIONS=$(echo $ONBOARDING_TX | jq '.topologyTransactions | map({ transaction : .})') +PUBLIC_KEY_FINGERPRINT=$(echo $ONBOARDING_TX | jq -r .publicKeyFingerprint) +MULTI_HASH=$(echo -n $ONBOARDING_TX | jq -r .multiHash) + +# Sign the multi-hash using the private key +echo "Signing hash ${MULTI_HASH} for ${PARTY_ID} using ED25519" +echo -n $MULTI_HASH | base64 --decode > hash_binary.bin +openssl pkeyutl -sign -inkey $PRIVATE_KEY_FILE -rawin -in hash_binary.bin -out signature.bin -keyform DER +SIGNATURE=$(base64 -w 0 < signature.bin) +rm signature.bin hash_binary.bin + +# Submit the onboarding transaction to the JSON API +ALLOCATE=$(cat << EOF +{ + "synchronizer" : $SYNCHRONIZER_ID, + "onboardingTransactions": $TRANSACTIONS, + "multiHashSignatures": [{ + "format" : "SIGNATURE_FORMAT_CONCAT", + "signature": "$SIGNATURE", + "signedBy" : "$PUBLIC_KEY_FINGERPRINT", + "signingAlgorithmSpec" : "SIGNING_ALGORITHM_SPEC_ED25519" + }] +} +EOF +) + +RESULT=$(curl -s -d "$ALLOCATE" -H "Content-Type: application/json" \ + -X POST ${HTTP_ENDPOINT}/v2/parties/external/allocate) + +echo "Onboarded party $(echo $RESULT | jq .partyId)" diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_admin_api.py b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_admin_api.py new file mode 100644 index 0000000000..929d5701a3 --- /dev/null +++ b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_admin_api.py @@ -0,0 +1,400 @@ +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +# [Imports start] +import time + +import grpc +from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives import serialization +from grpc import Channel + +import google.protobuf.empty_pb2 +from com.digitalasset.canton.topology.admin.v30 import ( + topology_manager_write_service_pb2_grpc, +) +from com.digitalasset.canton.topology.admin.v30 import ( + topology_manager_write_service_pb2, +) +from com.digitalasset.canton.topology.admin.v30 import ( + topology_manager_read_service_pb2_grpc, +) +from com.digitalasset.canton.topology.admin.v30 import ( + topology_manager_read_service_pb2, + common_pb2, +) +from com.digitalasset.canton.protocol.v30 import topology_pb2 +from com.digitalasset.canton.crypto.v30 import crypto_pb2 +from google.protobuf import empty_pb2 +from interactive_topology_util import ( + compute_fingerprint, + compute_sha256_canton_hash, + serialize_topology_transaction, + compute_multi_transaction_hash, + sign_hash, + compute_topology_transaction_hash, +) + +# [Imports end] + + +def build_signed_topology_transaction( + transaction: bytes, + hashes: [bytes], + signature: bytes, + signed_by: str, + proposal: bool = False, +): + """ + Builds a signed topology transaction, optionally including multi-transaction signatures. + + Args: + transaction (bytes): The raw bytes representing the transaction to be signed. + hashes (list[bytes]): A list of transaction hashes for the multi-transaction signature. + signature (bytes): The signature for the transaction. + signed_by (str): The identifier of the entity signing the transaction. + proposal (bool, optional): A flag indicating if this transaction is part of a proposal. Defaults to False. + + Returns: + topology_pb2.SignedTopologyTransaction + """ + return topology_pb2.SignedTopologyTransaction( + transaction=transaction, + # Not set because we use the multi transactions signature + signatures=[], + multi_transaction_signatures=[ + topology_pb2.MultiTransactionSignatures( + transaction_hashes=hashes, + signatures=[ + crypto_pb2.Signature( + format=crypto_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, + signature=signature, + signed_by=signed_by, + signing_algorithm_spec=crypto_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, + ) + ], + ) + ], + proposal=proposal, + ) + + +def build_serialized_transaction_and_hash( + mapping: topology_pb2.TopologyMapping, +) -> (bytes, bytes): + """ + Generates a serialized topology transaction and its corresponding hash. + + Args: + mapping (topology_pb2.TopologyMapping): The topology mapping to be serialized. + + Returns: + tuple: A tuple containing: + - bytes: The serialized transaction. + - bytes: The SHA-256 hash of the serialized transaction. + """ + transaction = serialize_topology_transaction(mapping) + transaction_hash = compute_sha256_canton_hash(11, transaction) + return transaction, transaction_hash + + +# Onboard a new external party +def onboard_external_party( + party_name: str, + confirming_participant_ids: [str], + confirming_threshold: int, + synchronizer_id: str, + channel: Channel, +) -> (EllipticCurvePrivateKey, str): + """ + Onboard a new external party. + Generates an in-memory signing key pair to authenticate the external party. + + Args: + party_name (str): Name of the party. + confirming_participant_ids (str): Participant IDs on which the party will be hosted for transaction confirmation. + confirming_threshold (int): Minimum number of confirmations that must be received from the confirming participants to authorize a transaction. + synchronizer_id (str): ID of the synchronizer on which the party will be registered. + channel (grpc.Channel): gRPC channel to one of the confirming participants Admin API. + + Returns: + tuple: A tuple containing: + - EllipticCurvePrivateKey: Private key created for the party. + - str: Fingerprint of the public key created for the party. + """ + print(f"Onboarding {party_name}") + + # [Create clients for the Admin API] + topology_write_client = ( + topology_manager_write_service_pb2_grpc.TopologyManagerWriteServiceStub(channel) + ) + topology_read_client = ( + topology_manager_read_service_pb2_grpc.TopologyManagerReadServiceStub(channel) + ) + # [Created clients for the Admin API] + + # [Generate a public/private key pair] + # For the sake of simplicity in the demo, we use a single signing key pair for the party namespace (used to manage the party itself on the network), + # and for the signing of transactions via the interactive submission service. We however recommend to use different keys in real world deployment for better security. + private_key = ec.generate_private_key(curve=ec.SECP256R1()) + public_key = private_key.public_key() + + # Extract the public key in the DER format + public_key_bytes: bytes = public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + # Wrap the public key in a Canton protobuf message + signing_public_key = crypto_pb2.SigningPublicKey( + # Must match the format to which the key was exported to above + format=crypto_pb2.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + public_key=public_key_bytes, + # Must match the scheme of the key + scheme=crypto_pb2.SigningKeyScheme.SIGNING_KEY_SCHEME_EC_DSA_P256, + # Because we have only one key, we specify both NAMESPACE and PROTOCOL usage for it + # When using different keys, ensure to use only the correct usage for each + usage=[ + crypto_pb2.SigningKeyUsage.SIGNING_KEY_USAGE_NAMESPACE, + crypto_pb2.SigningKeyUsage.SIGNING_KEY_USAGE_PROTOCOL, + ], + # This field is deprecated in favor of scheme but python requires us to set it + key_spec=crypto_pb2.SIGNING_KEY_SPEC_EC_P256, + ) + # [Generated a public/private key pair] + + # [Compute the fingerprint of the public key] + public_key_fingerprint = compute_fingerprint(public_key_bytes) + # [Computed the fingerprint of the public key] + + # [Construct party ID] + # The party id is constructed with party_name :: fingerprint + # This must be the fingerprint of the _namespace signing key_ + party_id = party_name + "::" + public_key_fingerprint + # [Constructed party ID] + + # [Build onboarding transactions and their hash] + # Namespace delegation: registers a root namespace with the public key of the party to the network + # effectively creating the party. + namespace_delegation_mapping = topology_pb2.TopologyMapping( + namespace_delegation=topology_pb2.NamespaceDelegation( + namespace=public_key_fingerprint, + target_key=signing_public_key, + is_root_delegation=True, + ) + ) + (namespace_delegation_transaction, namespace_transaction_hash) = ( + build_serialized_transaction_and_hash(namespace_delegation_mapping) + ) + + # Party to key: registers the public key as the one that will be used to sign and authorize Daml transactions submitted + # to the ledger via the interactive submission service + party_to_key_transaction = build_party_to_key_transaction( + channel, party_id, signing_public_key, synchronizer_id + ) + party_to_key_transaction_hash = compute_topology_transaction_hash( + party_to_key_transaction + ) + + # Party to participant: records the fact that the party wants to be hosted on the participants with confirmation rights + # This means those participants are not allowed to submit transactions on behalf of this party but will validate transactions + # on behalf of the party by confirming or rejecting them according to the ledger model. They also records transaction for that party on the ledger. + confirming_participants_hosting = [] + for confirming_participant_id in confirming_participant_ids: + confirming_participants_hosting.append( + topology_pb2.PartyToParticipant.HostingParticipant( + participant_uid=confirming_participant_id, + permission=topology_pb2.Enums.ParticipantPermission.PARTICIPANT_PERMISSION_CONFIRMATION, + ) + ) + party_to_participant_mapping = topology_pb2.TopologyMapping( + party_to_participant=topology_pb2.PartyToParticipant( + party=party_id, + threshold=confirming_threshold, + participants=confirming_participants_hosting, + ) + ) + (party_to_participant_transaction, party_to_participant_transaction_hash) = ( + build_serialized_transaction_and_hash(party_to_participant_mapping) + ) + # [Built onboarding transactions and their hash] + + # [Compute multi hash] + # Combine the hashes of all three transactions, so we can perform a single signature + multi_hash = compute_multi_transaction_hash( + [ + namespace_transaction_hash, + party_to_key_transaction_hash, + party_to_participant_transaction_hash, + ] + ) + # [Computed multi hash] + + # [Sign multi hash] + signature = sign_hash(private_key, multi_hash) + # [Signed multi hash] + + # [Build signed topology transactions] + hash_list = [ + namespace_transaction_hash, + party_to_key_transaction_hash, + party_to_participant_transaction_hash, + ] + signed_namespace_transaction = build_signed_topology_transaction( + namespace_delegation_transaction, hash_list, signature, public_key_fingerprint + ) + signed_party_to_key_transaction = build_signed_topology_transaction( + party_to_key_transaction, hash_list, signature, public_key_fingerprint + ) + signed_party_to_participant_transaction = build_signed_topology_transaction( + party_to_participant_transaction, + hash_list, + signature, + public_key_fingerprint, + True, + ) + # [Built signed topology transactions] + + # [Load all three transactions onto the participant node] + add_transactions_request = ( + topology_manager_write_service_pb2.AddTransactionsRequest( + transactions=[ + signed_namespace_transaction, + signed_party_to_key_transaction, + signed_party_to_participant_transaction, + ], + store=common_pb2.StoreId( + synchronizer=common_pb2.Synchronizer( + id=synchronizer_id, + ) + ), + ) + ) + topology_write_client.AddTransactions(add_transactions_request) + # [Loaded all three transactions onto the participant node] + + # [Authorize hosting from the confirming node] + topology_write_client.Authorize( + topology_manager_write_service_pb2.AuthorizeRequest( + proposal=topology_manager_write_service_pb2.AuthorizeRequest.Proposal( + change=topology_pb2.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_ADD_REPLACE, + serial=1, + mapping=party_to_participant_mapping, + ), + # False because the authorization from the participant is not enough: + # - it requires the signatures from the party (already submitted above) + # - as well as signatures from any other hosting participant + must_fully_authorize=False, + store=common_pb2.StoreId( + synchronizer=common_pb2.Synchronizer( + id=synchronizer_id, + ), + ), + ) + ) + # [Authorized hosting from the confirming node] + + # Finally wait for the party to appear in the topology, ensuring the onboarding succeeded + print(f"Waiting for {party_name} to appear in topology") + # [Waiting for party] + # If there's only one confirming participant, onboarding should be complete already + if len(confirming_participant_ids) == 1: + wait_to_observe_party_to_participant( + topology_read_client, synchronizer_id, party_id + ) + # [Party found] + + return private_key, public_key_fingerprint + + +def wait_to_observe_party_to_participant( + topology_read_client: topology_manager_read_service_pb2_grpc, + synchronizer_id: str, + party_id, +): + party_in_topology = False + while not party_in_topology: + party_to_participant_response: ( + topology_manager_read_service_pb2.ListPartyToParticipantResponse + ) = topology_read_client.ListPartyToParticipant( + topology_manager_read_service_pb2.ListPartyToParticipantRequest( + base_query=topology_manager_read_service_pb2.BaseQuery( + store=common_pb2.StoreId( + synchronizer=common_pb2.Synchronizer( + id=synchronizer_id, + ) + ), + head_state=google.protobuf.empty_pb2.Empty(), + ), + filter_party=party_id, + ) + ) + if len(party_to_participant_response.results) > 0: + break + else: + time.sleep(0.5) + continue + + +def build_party_to_key_transaction( + channel: grpc.Channel, + party_id: str, + new_signing_key: crypto_pb2.SigningPublicKey, + synchronizer_id: str, +) -> bytes: + """ + Constructs a topology transaction that updates the party-to-key mapping. + + Args: + channel (grpc.Channel): gRPC channel for communication with the topology manager. + party_id (str): Identifier of the party whose key mapping is being updated. + new_signing_key (crypto_pb2.SigningPublicKey): The new signing key to be added. + synchronizer_id (str): ID of the synchronizer to query the topology state. + + Returns: + bytes: Serialized topology transaction containing the updated mapping. + """ + # Retrieve the current party to key mapping + list_party_to_key_request = ( + topology_manager_read_service_pb2.ListPartyToKeyMappingRequest( + base_query=topology_manager_read_service_pb2.BaseQuery( + store=common_pb2.StoreId( + synchronizer=common_pb2.Synchronizer(id=synchronizer_id) + ), + head_state=empty_pb2.Empty(), + ), + filter_party=party_id, + ) + ) + topology_read_client = ( + topology_manager_read_service_pb2_grpc.TopologyManagerReadServiceStub(channel) + ) + party_to_key_response: ( + topology_manager_read_service_pb2.ListPartyToKeyMappingResponse + ) = topology_read_client.ListPartyToKeyMapping(list_party_to_key_request) + if len(party_to_key_response.results) == 0: + current_serial = 1 + current_keys_list = [] + else: + # Sort the results by serial in descending order and take the first one + sorted_results = sorted( + party_to_key_response.results, + key=lambda result: result.context.serial, + reverse=True, + ) + # Get the mapping with the highest serial and its list of hosting participants + current_serial = sorted_results[0].context.serial + current_keys_list: [crypto_pb2.SigningPublicKey] = sorted_results[ + 0 + ].item.signing_keys + + # Create a new mapping adding the new participant to the list and incrementing the serial + updated_mapping = topology_pb2.TopologyMapping( + party_to_key_mapping=topology_pb2.PartyToKeyMapping( + party=party_id, + threshold=1, + signing_keys=current_keys_list + [new_signing_key], + ) + ) + # Build the serialized transaction + return serialize_topology_transaction(updated_mapping, serial=current_serial + 1) diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_multi_hosting.py b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_multi_hosting.py index de051933f8..bcb0340e30 100644 --- a/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_multi_hosting.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/external_party_onboarding_multi_hosting.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import time @@ -37,7 +37,7 @@ participant_status_service_pb2, participant_status_service_pb2_grpc, ) -from external_party_onboarding import ( +from external_party_onboarding_admin_api import ( onboard_external_party, wait_to_observe_party_to_participant, ) diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/interactive-submission.conf b/canton/community/app/src/pack/examples/08-interactive-submission/interactive-submission.conf index a4d4241c60..0de2bd26e5 100644 --- a/canton/community/app/src/pack/examples/08-interactive-submission/interactive-submission.conf +++ b/canton/community/app/src/pack/examples/08-interactive-submission/interactive-submission.conf @@ -9,7 +9,12 @@ canton { } participants { - participant1 {} + participant1 { + http-ledger-api.server { + # TODO(#27556): align automatic port allocation with admin / ledger api + port = 7373 + } + } participant2 {} participant3 {} } diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py index deb3d2b222..2d705280d7 100644 --- a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # Simple example of an interactive submission demonstrating the external signing flow @@ -12,8 +12,8 @@ from google.protobuf.json_format import MessageToJson from com.daml.ledger.api.v2.interactive import interactive_submission_service_pb2_grpc from com.daml.ledger.api.v2.interactive import interactive_submission_service_pb2 -from com.daml.ledger.api.v2 import commands_pb2, value_pb2, completion_pb2 -from external_party_onboarding import onboard_external_party +from com.daml.ledger.api.v2 import commands_pb2, value_pb2, completion_pb2, crypto_pb2 +from external_party_onboarding_admin_api import onboard_external_party from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey from cryptography.hazmat.primitives.asymmetric import ec @@ -102,21 +102,8 @@ def get_active_contracts(party: str): ) active_contracts_response = state_client.GetActiveContracts( state_service_pb2.GetActiveContractsRequest( - filter=transaction_filter_pb2.TransactionFilter( - filters_by_party={ - party: transaction_filter_pb2.Filters( - cumulative=[ - transaction_filter_pb2.CumulativeFilter( - wildcard_filter=transaction_filter_pb2.WildcardFilter( - include_created_event_blob=True - ) - ) - ] - ) - } - ), + event_format=get_event_format(party), active_at_offset=ledger_end_response.offset, - verbose=True, ) ) return active_contracts_response @@ -177,11 +164,11 @@ def execute_and_get_contract_id( interactive_submission_service_pb2.SinglePartySignatures( party=party, signatures=[ - interactive_submission_service_pb2.Signature( - format=interactive_submission_service_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, + crypto_pb2.Signature( + format=crypto_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, signature=signature, signed_by=pub_fingerprint, - signing_algorithm_spec=interactive_submission_service_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, + signing_algorithm_spec=crypto_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, ) ], ) @@ -208,15 +195,20 @@ def execute_and_get_contract_id( completion: completion_pb2.Completion = update.completion break - transaction_response: update_service_pb2.GetTransactionResponse = ( - us_client.GetTransactionById( - update_service_pb2.GetTransactionByIdRequest( + update_response: update_service_pb2.GetUpdateResponse = ( + us_client.GetUpdateById( + update_service_pb2.GetUpdateByIdRequest( update_id=completion.update_id, - requesting_parties=[party], + update_format=transaction_filter_pb2.UpdateFormat( + include_transactions=transaction_filter_pb2.TransactionFormat( + event_format=get_event_format(party), + transaction_shape=transaction_filter_pb2.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA + ) + ) ) ) ) - for event in transaction_response.transaction.events: + for event in update_response.transaction.events: if event.HasField("created"): contract_id = event.created.contract_id break diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_example.py b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_example.py index 0886767eb1..103ac5ef7d 100644 --- a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_example.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_example.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # Simple example of an interactive submission demonstrating the external signing flow diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_util.py b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_util.py index 29a8376f34..ca384fa6da 100644 --- a/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_util.py +++ b/canton/community/app/src/pack/examples/08-interactive-submission/interactive_topology_util.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # [Imports start] diff --git a/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh b/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh index ed8b0d96ef..821750bfc9 100755 --- a/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh +++ b/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh @@ -76,6 +76,7 @@ generate_grpc_service() { echo "Generating python code from protobuf definitions" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/commands.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/completion.proto" +generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/crypto.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/event.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/offset_checkpoint.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/reassignment.proto" diff --git a/canton/community/app/src/pack/examples/09-json-api/model/daml.yaml b/canton/community/app/src/pack/examples/09-json-api/model/daml.yaml index 403f5c5273..64659011c1 100644 --- a/canton/community/app/src/pack/examples/09-json-api/model/daml.yaml +++ b/canton/community/app/src/pack/examples/09-json-api/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +sdk-version: 3.3.0-snapshot.20250305.0 build-options: - --enable-interfaces=yes name: model-tests diff --git a/canton/community/app/src/pack/examples/13-observability/docker-compose-observability.yml b/canton/community/app/src/pack/examples/13-observability/docker-compose-observability.yml index d86f836d1d..b122fe07fe 100644 --- a/canton/community/app/src/pack/examples/13-observability/docker-compose-observability.yml +++ b/canton/community/app/src/pack/examples/13-observability/docker-compose-observability.yml @@ -77,9 +77,13 @@ services: # Disabling it does not have effect on the Postgres dashboard. command: --no-collector.database environment: + # Change the config depending on the postgres instance DATA_SOURCE_USER: "postgres" DATA_SOURCE_PASS: "postgres" DATA_SOURCE_URI: "postgres:5432/postgres?sslmode=disable" + # Use this URI if you are running Postgres on the host system + # DATA_SOURCE_URI: "host.docker.internal:5432/postgres?sslmode=disable" + PG_EXPORTER_AUTO_DISCOVER_DATABASES: true volumes: # PostgreSQL Server Exporter configuration - ./postgres/postgres_exporter.yml:/postgres_exporter.yml diff --git a/canton/community/app/src/pack/examples/13-observability/grafana/dashboards/canton-network/sequencer.json b/canton/community/app/src/pack/examples/13-observability/grafana/dashboards/canton-network/sequencer.json index 065af39be1..e8cbf3b4eb 100644 --- a/canton/community/app/src/pack/examples/13-observability/grafana/dashboards/canton-network/sequencer.json +++ b/canton/community/app/src/pack/examples/13-observability/grafana/dashboards/canton-network/sequencer.json @@ -1015,6 +1015,22 @@ } }, "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/.*memory.*/" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "bytes" + } + ] + }, { "__systemRef": "hideSeriesFrom", "matcher": { @@ -1022,7 +1038,7 @@ "options": { "mode": "exclude", "names": [ - "Num. elements - sequencer2" + "Evicted memory - sequencer2" ], "prefix": "All except:", "readOnly": true @@ -1034,11 +1050,129 @@ "value": { "legend": false, "tooltip": false, - "viz": false + "viz": true } } ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 87 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "daml_cache_size{cache=\"events-fan-out-buffer\"}", + "legendFormat": "Num. elements - {{node}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "daml_cache_weight{cache=\"events-fan-out-buffer\"}", + "hide": false, + "legendFormat": "Approx. memory use - {{node}}", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(daml_cache_evicted_weight{cache=\"events-fan-out-buffer\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Evicted memory - {{node}}", + "range": true, + "refId": "E" + } + ], + "title": "Events fan-out buffer (size / memory / evicted memory)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ { "matcher": { "id": "byRegexp", @@ -1061,9 +1195,9 @@ "h": 8, "w": 12, "x": 12, - "y": 87 + "y": 95 }, - "id": 11, + "id": 12, "options": { "legend": { "calcs": [], @@ -1083,7 +1217,7 @@ "uid": "prometheus" }, "editorMode": "builder", - "expr": "daml_cache_size{cache=\"events-fan-out-buffer\"}", + "expr": "daml_cache_size{cache=\"payload-cache\"}", "legendFormat": "Num. elements - {{node}}", "range": true, "refId": "A" @@ -1094,7 +1228,7 @@ "uid": "prometheus" }, "editorMode": "builder", - "expr": "daml_cache_weight{cache=\"events-fan-out-buffer\"}", + "expr": "daml_cache_weight{cache=\"payload-cache\"}", "hide": false, "legendFormat": "Approx. memory use - {{node}}", "range": true, @@ -1106,14 +1240,14 @@ "uid": "prometheus" }, "editorMode": "builder", - "expr": "rate(daml_cache_evicted_weight{cache=\"events-fan-out-buffer\"}[$__rate_interval])", + "expr": "rate(daml_cache_evicted_weight{cache=\"payload-cache\"}[$__rate_interval])", "hide": false, "legendFormat": "Evicted memory - {{node}}", "range": true, "refId": "E" } ], - "title": "Events fan-out buffer (size / memory / evicted memory)", + "title": "Payload cache (size / memory / evicted memory)", "type": "timeseries" } ], @@ -1188,6 +1322,6 @@ "timezone": "", "title": "Sequencer Traffic", "uid": "fdjrxql2alblsd", - "version": 7, + "version": 9, "weekStart": "" } diff --git a/canton/community/app/src/pack/examples/13-observability/postgres/init.sql b/canton/community/app/src/pack/examples/13-observability/postgres/init.sql index f4c8c5784d..92330b3f45 100755 --- a/canton/community/app/src/pack/examples/13-observability/postgres/init.sql +++ b/canton/community/app/src/pack/examples/13-observability/postgres/init.sql @@ -4,6 +4,9 @@ CREATE DATABASE participant1 OWNER canton; CREATE DATABASE participant2 OWNER canton; CREATE DATABASE mediator1 OWNER canton; CREATE DATABASE mediator2 OWNER canton; +CREATE DATABASE mediator3 OWNER canton; +CREATE DATABASE mediator4 OWNER canton; +CREATE DATABASE mediator5 OWNER canton; CREATE DATABASE sequencer1 OWNER canton; CREATE DATABASE sequencer2 OWNER canton; CREATE DATABASE sequencer3 OWNER canton; diff --git a/canton/community/app/src/pack/examples/99-debug-encryption/bootstrap.sc b/canton/community/app/src/pack/examples/99-debug-encryption/bootstrap.sc deleted file mode 100644 index 4286c8d563..0000000000 --- a/canton/community/app/src/pack/examples/99-debug-encryption/bootstrap.sc +++ /dev/null @@ -1,71 +0,0 @@ -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.EncryptionPublicKey -import com.digitalasset.canton.serialization.DeserializationError -import com.digitalasset.canton.util.{ErrorUtil, HexString, MonadUtil} -import com.digitalasset.canton.version.HasToByteString -import com.digitalasset.canton.util.FutureInstances.parallelFuture -import com.google.protobuf.ByteString - -import scala.concurrent.duration.Duration -import scala.concurrent.{Await, Future} -import scala.util.{Failure, Success} - -// Wrapping object required due to https://github.com/com-lihaoyi/Ammonite/issues/534 -object Main { - - case class TestMessage(str: String) extends HasToByteString { - override def toByteString: ByteString = ByteString.copyFromUtf8(str) - } - - object TestMessage { - def fromByteString(bytes: ByteString): Either[DeserializationError, TestMessage] = - Right(TestMessage(bytes.toStringUtf8)) - } - - val publicKeyStr = - """0a6110041a5b3059301306072a8648ce3d020106082a8648ce3d03010703 - |4200049f36974eed2d06c9d2026b7ae4166ffeb0995d6b50f2af60c70cd8 - |4a67c0458a8d5cae3c4f5859ee2ff6aba23e92df3a8cef072b9a28611e1a - |e60257212ca3932801101e - |""".stripMargin.replaceAll("\n", "") - - val publicKeyBytes = - HexString.parseToByteString(publicKeyStr).getOrElse(sys.error("Invalid public key hexstring")) - - val publicKey = EncryptionPublicKey - .fromTrustedByteString(publicKeyBytes) - .fold(err => sys.error(s"Invalid public key: $err"), identity) - - val testMessage = TestMessage("No bugs in Canton") - - val p1 = participant1.underlying.get - val crypto = p1.cryptoPureApi - - val rounds = Option(System.getProperty("canton.encryption-test-rounds")).map(_.toInt).getOrElse(10) - val parallelism = Option(System.getProperty("canton.encryption-test-parallelism")).map(_.toInt).getOrElse(5) - - def main() { - - logger.info( - s"Run asymmetric encryption test with rounds=$rounds, algo=${crypto.defaultEncryptionAlgorithmSpec}" - ) - - val resultF = MonadUtil.parTraverseWithLimit_(PositiveInt.tryCreate(parallelism))(0 until rounds) { round => - Future { - crypto.encryptWith(testMessage, publicKey) match { - case Left(err) => - logger.error(s"[$round] Failed to encrypt: $err") - case Right(encrypted) => - logger.debug(s"[$round] Produced ciphertext: ${HexString.toHexString(encrypted.ciphertext)}") - } - } - } - - Await.ready(resultF, Duration.apply("60 second")) - - logger.info(s"Completed asymmetric encryption test") - println("Encryption test completed") - } -} - -Main.main() diff --git a/canton/community/app/src/pack/examples/99-debug-encryption/simple-topology.conf b/canton/community/app/src/pack/examples/99-debug-encryption/simple-topology.conf deleted file mode 100644 index 57de88fa5a..0000000000 --- a/canton/community/app/src/pack/examples/99-debug-encryption/simple-topology.conf +++ /dev/null @@ -1 +0,0 @@ -canton.participants.participant1 = {} diff --git a/canton/community/app/src/test/resources/documentation-snippets/dynamic-config.conf b/canton/community/app/src/test/resources/documentation-snippets/declarative-config.conf similarity index 100% rename from canton/community/app/src/test/resources/documentation-snippets/dynamic-config.conf rename to canton/community/app/src/test/resources/documentation-snippets/declarative-config.conf diff --git a/canton/community/app/src/test/resources/documentation-snippets/ledger-api-json.conf b/canton/community/app/src/test/resources/documentation-snippets/ledger-api-json.conf index 5a75cdc65a..0deaf22bc0 100644 --- a/canton/community/app/src/test/resources/documentation-snippets/ledger-api-json.conf +++ b/canton/community/app/src/test/resources/documentation-snippets/ledger-api-json.conf @@ -2,6 +2,7 @@ canton { participants { participant1 { http-ledger-api { + enabled = true server { address = 0.0.0.0 port = 10010 diff --git a/canton/community/app/src/test/resources/documentation-snippets/sequencer-api-limits.conf b/canton/community/app/src/test/resources/documentation-snippets/sequencer-api-limits.conf index d9d0ab17f7..64a4a0d6a7 100644 --- a/canton/community/app/src/test/resources/documentation-snippets/sequencer-api-limits.conf +++ b/canton/community/app/src/test/resources/documentation-snippets/sequencer-api-limits.conf @@ -1,4 +1,4 @@ -canton.sequencers.sequencer.parameters.sequencer-api-limits = { +canton.sequencers.sequencer.public-api.stream.limits = { "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 10, "com.digitalasset.canton.sequencer.api.v30.SequencerService/Subscribe" : 1000, } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala index a0605db34a..70c65cfd76 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton import cats.syntax.option.* import com.digitalasset.canton.config.RequireTypes.Port import com.digitalasset.canton.config.{AdminServerConfig, CryptoConfig, StorageConfig} +import com.digitalasset.canton.http.JsonApiConfig import com.digitalasset.canton.participant.config.{ParticipantInitConfig, ParticipantNodeConfig} import com.digitalasset.canton.synchronizer.config.PublicServerConfig import com.digitalasset.canton.synchronizer.mediator.MediatorNodeConfig @@ -20,7 +21,7 @@ object ConfigStubs { ParticipantInitConfig(), CryptoConfig(), null, - None, + JsonApiConfig(), adminApi, StorageConfig.Memory(), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/config/declarative/DeclarativeApiTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/config/declarative/DeclarativeApiTest.scala index 016234e9e3..009f55bee5 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/config/declarative/DeclarativeApiTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/config/declarative/DeclarativeApiTest.scala @@ -98,6 +98,11 @@ class DeclarativeApiTest checkSelfConsistent, want.get().toSeq, fetch = _ => responseOr("fetch", "")(()).map(_ => state.toSeq), + get = { k => + responseOr("get", k)(()) map { _ => + state.get(k) + } + }, add = { case (k, v) => responseOr("add", k) { state.put(k, v) @@ -195,13 +200,13 @@ class DeclarativeApiTest _.warningMessage should include("Operation=remove failed"), ) } - "fetch" in { + "get" in { val f = new TestApi() loggerFactory.assertLogs( f.runTest( Map("a" -> 1), have = Some(Map()), - responses = Map(("fetch", "") -> Left("NOT GOOD")), + responses = Map(("get", "a") -> Left("NOT GOOD")), errors = -3, items = 0, expect = false, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala index 4291ca127b..6e045afab3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala @@ -24,7 +24,6 @@ import com.digitalasset.canton.participant.{ ParticipantNodeBootstrapFactory, ParticipantNodeParameters, } -import com.digitalasset.canton.resource.CommunityDbMigrationsMetaFactory import com.digitalasset.canton.synchronizer.mediator.{ MediatorNodeBootstrap, MediatorNodeBootstrapFactory, @@ -185,7 +184,6 @@ class CommunityEnvironmentTest extends AnyWordSpec with BaseTest with HasExecuti ): Either[String, MediatorNodeBootstrap] = Right(createMediatorMock(arguments.name, arguments.config)) }, - new CommunityDbMigrationsMetaFactory(loggerFactory), loggerFactory, ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala index 1255ccf3c1..df8e3f719e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala @@ -21,7 +21,6 @@ import com.digitalasset.canton.concurrent.{ import com.digitalasset.canton.config.* import com.digitalasset.canton.config.StartupMemoryCheckConfig.ReportingLevel import com.digitalasset.canton.crypto.Crypto -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.health.{ @@ -38,11 +37,7 @@ import com.digitalasset.canton.metrics.{ OnDemandMetricsReader, } import com.digitalasset.canton.networking.grpc.CantonMutableHandlerRegistry -import com.digitalasset.canton.resource.{ - CommunityDbMigrationsFactory, - CommunityStorageFactory, - Storage, -} +import com.digitalasset.canton.resource.{Storage, StorageSingleFactory} import com.digitalasset.canton.sequencing.client.SequencerClientConfig import com.digitalasset.canton.telemetry.ConfiguredOpenTelemetry import com.digitalasset.canton.time.SimClock @@ -152,10 +147,9 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex def arguments(config: TestNodeConfig) = factoryArguments(config) .toCantonNodeBootstrapCommonArguments( - storageFactory = new CommunityStorageFactory(StorageConfig.Memory()), + storageFactory = new StorageSingleFactory(StorageConfig.Memory()), cryptoPrivateStoreFactory = CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - kmsFactory = CommunityKmsFactory, ) .value @@ -231,7 +225,6 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex class TestNodes(factory: TestNodeFactory, configs: Map[String, TestNodeConfig]) extends ManagedNodes[TestNode, TestNodeConfig, CantonNodeParameters, TestNodeBootstrap]( (_, _) => factory.create(), - new CommunityDbMigrationsFactory(loggerFactory), timeouts, configs, _ => MockedNodeParameters.cantonNodeParameters(), diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentCatchupIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentCatchupIntegrationTest.scala new file mode 100644 index 0000000000..95e87bcd2e --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentCatchupIntegrationTest.scala @@ -0,0 +1,207 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests + +import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommands.Inspection.{ + SynchronizerTimeRange, + TimeRange, +} +import com.digitalasset.canton.config +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} +import com.digitalasset.canton.examples.java.iou.Iou +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.util.{CommitmentTestUtil, IntervalDuration} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.participant.pruning.SortedReconciliationIntervalsHelpers +import com.digitalasset.canton.synchronizer.sequencer.HasProgrammableSequencer +import monocle.Monocle.toAppliedFocusOps +import org.slf4j.event.Level + +import java.time.Duration as JDuration +import java.util.concurrent.atomic.AtomicReference + +sealed trait AcsCommitmentCatchupIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with SortedReconciliationIntervalsHelpers + with CommitmentTestUtil + with HasProgrammableSequencer { + + private val interval: JDuration = JDuration.ofSeconds(5) + private implicit val intervalDuration: IntervalDuration = IntervalDuration(interval) + + private val alreadyDeployedContracts12: AtomicReference[Seq[Iou.Contract]] = + new AtomicReference[Seq[Iou.Contract]](Seq.empty) + private val alreadyDeployedContracts23: AtomicReference[Seq[Iou.Contract]] = + new AtomicReference[Seq[Iou.Contract]](Seq.empty) + + private lazy val maxDedupDuration = java.time.Duration.ofHours(1) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1_S1M1 + .addConfigTransforms( + ConfigTransforms.useStaticTime, + ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.engine.enableAdditionalConsistencyChecks) + .replace(true) + ), + ) + .updateTestingConfig( + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) + ) + .withSetup { implicit env => + import env.* + + sequencer1.topology.synchronisation.await_idle() + sequencer2.topology.synchronisation.await_idle() + initializedSynchronizers foreach { case (_, initializedSynchronizer) => + initializedSynchronizer.synchronizerOwners.foreach( + _.topology.synchronizer_parameters + .propose_update( + initializedSynchronizer.synchronizerId, + _.update(reconciliationInterval = config.PositiveDurationSeconds(interval)), + ) + ) + } + + initializedSynchronizers.foreach { case (alias, synchronizer) => synchronizer } + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) + participants.all.foreach(_.dars.upload(CantonExamplesPath, synchronizerId = daId)) + passTopologyRegistrationTimeout(env) + } + + "Commitment catch-up" should { + "not trigger when the participant shouldn't implicitly have sent a commitments" in { + implicit env => + import env.* + + val simClock = environment.simClock.value + + logger.debug(s"P1 and P2 share a contract and exchange a commitments") + val (cids12da, period12da, commitment12da) = + deployOneAndCheck(daId, alreadyDeployedContracts12, participant1, participant2) + + val catchUpParam = initializedSynchronizers(daName).synchronizerOwners.headOption + .getOrElse(fail("synchronizer owners are missing")) + .topology + .synchronizer_parameters + .get_dynamic_synchronizer_parameters(daId) + .acsCommitmentsCatchUp + .getOrElse( + throw new IllegalStateException("Cannot retrieve acs commitment catch-up parameters") + ) + + // We advance the clock by catchUpParam.catchUpIntervalSkip intervals + val catchUpThreshold = interval.multipliedBy( + catchUpParam.catchUpIntervalSkip.value.toLong * catchUpParam.nrIntervalsToTriggerCatchUp.value.toLong + ) + simClock.advanceTo(simClock.now.add(catchUpThreshold)) + + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.DEBUG))( + { + + logger.debug( + s"P2 and P3 share a contract and exchange a commitment. P2 also sends commitments to P1, because" + + s"they still share a contract. That makes P1 appear to be behind by the catch up threshold. However, P1" + + s"shouldn't trigger catch-up mode because it is not actually behind." + ) + val (cids23da, period23da, commitment23da) = + deployOneAndCheck(daId, alreadyDeployedContracts23, participant2, participant3) + + val synchronizerId = daId + val endTimestamp = period23da.toInclusive.forgetRefinement + participant1.commitments.lookup_sent_acs_commitments( + synchronizerTimeRanges = Seq( + SynchronizerTimeRange( + synchronizerId, + Some(TimeRange(endTimestamp.minusMillis(1), endTimestamp)), + ) + ), + counterParticipants = Seq.empty, + commitmentState = Seq.empty, + verboseMode = true, + ) + // user-manual-entry-end: InspectSentCommitments + eventually() { + val p1Computed = participant1.commitments.lookup_sent_acs_commitments( + synchronizerTimeRanges = Seq( + SynchronizerTimeRange( + synchronizerId, + Some(TimeRange(endTimestamp.minusMillis(1), endTimestamp)), + ) + ), + counterParticipants = Seq.empty, + commitmentState = Seq.empty, + verboseMode = true, + ) + + logger.debug( + "P1 sent commitments only for synchronizer da, so the result size should be 1" + ) + p1Computed.size shouldBe 1 + val daCmts = p1Computed.get(daId).value + logger.debug("P1 sent two commitments for synchronizer da") + daCmts.size shouldBe 1 + daCmts(0).destCounterParticipant shouldBe participant2.id + + p1Computed + } + }, + logs => + forAll(logs) { log => + log.message should not include ("Modes: in catch-up mode = true") + }, + ) + } + } +} + +class AcsCommitmentCatchupIntegrationTestPostgres extends AcsCommitmentCatchupIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + ) + ), + ) + ) +} + +//class AcsCommitmentCatchupIntegrationTestH2 extends AcsCommitmentCatchupIntegrationTest { +// registerPlugin(new UseH2(loggerFactory)) +// registerPlugin( +// new UseReferenceBlockSequencer[DbConfig.H2]( +// loggerFactory, +// sequencerGroups = MultiSynchronizer( +// Seq( +// Set(InstanceName.tryCreate("sequencer1")), +// Set(InstanceName.tryCreate("sequencer2")), +// ) +// ), +// ) +// ) +//} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMetricsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMetricsIntegrationTest.scala index 81f3b650ed..1c776f1317 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMetricsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMetricsIntegrationTest.scala @@ -8,19 +8,16 @@ import com.daml.metrics.MetricsFilterConfig import com.daml.metrics.api.MetricQualification import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommands.Inspection.SlowCounterParticipantSynchronizerConfig import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, NonNegativeProportion} +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} import com.digitalasset.canton.console.{ LocalParticipantReference, LocalSequencerReference, ParticipantReference, } import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -82,7 +79,11 @@ trait AcsCommitmentMetricsIntegrationTest ) ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay(Some(NonNegativeProportion.zero), Some(NonNegativeProportion.zero)) + ) + ) ) .withSetup { implicit env => import env.* @@ -134,7 +135,10 @@ trait AcsCommitmentMetricsIntegrationTest ): Unit = { // Connect and disconnect so that we can modify the synchronizer connection config afterwards participant.synchronizers.connect_local(localSequencerReference, alias = synchronizerAlias) - participant.dars.upload(CantonExamplesPath) + participant.dars.upload( + CantonExamplesPath, + synchronizerId = Some(participant.synchronizers.id_of(synchronizerAlias)), + ) } private def deployAndCheckContractOnParticipants( @@ -769,7 +773,7 @@ trait AcsCommitmentMetricsIntegrationTest class AcsCommitmentMetricsIntegrationTestDefault extends AcsCommitmentMetricsIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) @@ -782,7 +786,7 @@ class AcsCommitmentMetricsIntegrationTestDefault extends AcsCommitmentMetricsInt class AcsCommitmentMetricsIntegrationTestPostgres extends AcsCommitmentMetricsIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMismatchInspectionRunbookIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMismatchInspectionRunbookIntegrationTest.scala index 6e7226064e..dea07b945f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMismatchInspectionRunbookIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentMismatchInspectionRunbookIntegrationTest.scala @@ -10,15 +10,12 @@ import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommand } import com.digitalasset.canton.config import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.util.{CommitmentTestUtil, IntervalDuration} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -57,7 +54,14 @@ trait AcsCommitmentMismatchInspectionRunbookIntegrationTest ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* @@ -92,14 +96,15 @@ trait AcsCommitmentMismatchInspectionRunbookIntegrationTest connect(participant1, minObservationDuration) connect(participant2, minObservationDuration) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.foreach(_.dars.upload(CantonExamplesPath)) + participants.all.foreach(_.dars.upload(CantonExamplesPath, synchronizerId = daId)) + participants.all.foreach(_.dars.upload(CantonExamplesPath, synchronizerId = acmeId)) passTopologyRegistrationTimeout(env) } "Commitment mismatch inspection runbook should work" in { implicit env => import env.* - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) logger.info("Open commitment on local participant") @@ -122,7 +127,8 @@ trait AcsCommitmentMismatchInspectionRunbookIntegrationTest .map(_.physicalSynchronizerId) should contain(daId) } - val (_, period, commitment) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_, period, commitment) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val synchronizerId1 = daId val mismatchTimestamp = period.toInclusive.forgetRefinement @@ -332,7 +338,7 @@ trait AcsCommitmentMismatchInspectionRunbookIntegrationTest .list_connected() .map(_.physicalSynchronizerId) should contain(daId) } - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) }, ( LogEntryOptionality.OptionalMany -> (_.warningMessage should include( @@ -347,7 +353,7 @@ class AcsCommitmentMismatchInspectionRunbookIntegrationTestPostgres extends AcsCommitmentMismatchInspectionRunbookIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentNoWaitCounterParticipantIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentNoWaitCounterParticipantIntegrationTest.scala index 5bc3de1a5f..5f8d2f11cb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentNoWaitCounterParticipantIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentNoWaitCounterParticipantIntegrationTest.scala @@ -8,14 +8,11 @@ import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommand WaitCommitments, } import com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.config.{DbConfig, SynchronizerTimeTrackerConfig} +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig, SynchronizerTimeTrackerConfig} import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -89,7 +86,15 @@ trait AcsCommitmentNoWaitCounterParticipantIntegrationTest ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay) + .replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* @@ -491,7 +496,7 @@ trait AcsCommitmentNoWaitCounterParticipantIntegrationTest class AcsCommitmentNoWaitCounterParticipantIntegrationTestPostgres extends AcsCommitmentNoWaitCounterParticipantIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) override val isInMemory = false } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentProcessorIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentProcessorIntegrationTest.scala index 5931317db1..8f9ce615f0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentProcessorIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentProcessorIntegrationTest.scala @@ -6,16 +6,16 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.BigDecimalImplicits.* import com.digitalasset.canton.config import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.config.{DbConfig, SynchronizerTimeTrackerConfig} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeProportion} +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig, SynchronizerTimeTrackerConfig} import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.util.{CommitmentTestUtil, IntervalDuration} import com.digitalasset.canton.integration.{ @@ -82,7 +82,15 @@ sealed trait AcsCommitmentProcessorIntegrationTest ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay) + .replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* @@ -120,7 +128,8 @@ sealed trait AcsCommitmentProcessorIntegrationTest connect(participant2, minObservationDuration2) connect(participant3, minObservationDuration2) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.foreach(_.dars.upload(CantonExamplesPath)) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) passTopologyRegistrationTimeout(env) } @@ -128,7 +137,7 @@ sealed trait AcsCommitmentProcessorIntegrationTest import env.* val simClock = environment.simClock.value - deployOnP1P2AndCheckContract(daId, iouContract) + deployOnTwoParticipantsAndCheckContract(daId, iouContract, participant1, participant2) val tick1 = tickAfter(simClock.uniqueTime()) val tick2 = tickAfter(tick1.forgetRefinement) @@ -197,7 +206,8 @@ sealed trait AcsCommitmentProcessorIntegrationTest val tickNoCommitments1 = tickAfter(simClock.uniqueTime()) simClock.advanceTo(tickNoCommitments1.forgetRefinement.plus(interval.multipliedBy(3))) logger.info(s"Upload a package to trigger some vetting transactions") - participant1.dars.upload(CantonTestsPath) + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) + participant1.dars.upload(CantonTestsPath, synchronizerId = acmeId) simClock.advance(interval.plus(JDuration.ofSeconds(1))) val start = tickNoCommitments1.forgetRefinement.toInstant @@ -481,8 +491,9 @@ sealed trait AcsCommitmentProcessorIntegrationTest ) logger.info(s"We deploy the IOU again for following tests.") - alreadyDeployedContracts = - alreadyDeployedContracts.appended(deployOnP1P2AndCheckContract(daId, iouContract)) + alreadyDeployedContracts = alreadyDeployedContracts.appended( + deployOnTwoParticipantsAndCheckContract(daId, iouContract, participant1, participant2) + ) } "Periodic synchronizer time proofs trigger commitment computations" in { implicit env => @@ -523,7 +534,13 @@ sealed trait AcsCommitmentProcessorIntegrationTest val simClock = environment.simClock.value - deployOnP1P2AndCheckContract(daId, iouContract, observers = Seq(participant3)) + deployOnTwoParticipantsAndCheckContract( + daId, + iouContract, + participant1, + participant2, + observers = Seq(participant3), + ) val seq = getProgrammableSequencer(sequencer1.name) val p1RevokedP = Promise[Unit]() @@ -657,9 +674,6 @@ sealed trait AcsCommitmentProcessorIntegrationTest // participant1 will eventually disconnect from this synchronizer. // Wait for this disconnection to happen. eventually() { - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - simClock.advance(JDuration.ofSeconds(1)) - participant1.synchronizers.is_connected( initializedSynchronizers(daName).synchronizerId ) shouldBe false @@ -721,9 +735,6 @@ sealed trait AcsCommitmentProcessorIntegrationTest change = TopologyChangeOp.Remove, ) eventually() { - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - simClock.advance(JDuration.ofSeconds(1)) - participant2.synchronizers.is_connected( initializedSynchronizers(acmeName).synchronizerId ) shouldBe false @@ -792,7 +803,7 @@ class AcsCommitmentProcessorReferenceIntegrationTestPostgres extends AcsCommitmentProcessorIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentRepairIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentRepairIntegrationTest.scala index 90740cd257..b0e4d88f66 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentRepairIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentRepairIntegrationTest.scala @@ -9,16 +9,12 @@ import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommand } import com.digitalasset.canton.config import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.config.{DbConfig, NonNegativeDuration} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeProportion, PositiveInt} +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig, NonNegativeDuration} import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseH2, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.util.{CommitmentTestUtil, IntervalDuration} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -64,7 +60,15 @@ trait AcsCommitmentRepairIntegrationTest ), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay) + .replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* @@ -83,7 +87,10 @@ trait AcsCommitmentRepairIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.foreach(_.dars.upload(CantonExamplesPath)) + participants.all.foreach { p => + p.dars.upload(CantonExamplesPath, synchronizerId = daId) + p.dars.upload(CantonExamplesPath, synchronizerId = acmeId) + } passTopologyRegistrationTimeout(env) } @@ -96,7 +103,14 @@ trait AcsCommitmentRepairIntegrationTest simClock.advanceTo(simClock.uniqueTime().immediateSuccessor) val createdCids = - (1 to nContracts.value).map(_ => deployOnP1P2AndCheckContract(synchronizerId, iouContract)) + (1 to nContracts.value).map(_ => + deployOnTwoParticipantsAndCheckContract( + synchronizerId, + iouContract, + participant1, + participant2, + ) + ) val tick1 = tickAfter(simClock.uniqueTime()) simClock.advanceTo(tick1.forgetRefinement.immediateSuccessor) @@ -323,7 +337,7 @@ trait AcsCommitmentRepairIntegrationTest class AcsCommitmentRepairIntegrationTestPostgres extends AcsCommitmentRepairIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( @@ -338,7 +352,7 @@ class AcsCommitmentRepairIntegrationTestPostgres extends AcsCommitmentRepairInte class AcsCommitmentRepairIntegrationTestH2 extends AcsCommitmentRepairIntegrationTest { registerPlugin(new UseH2(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentToolingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentToolingIntegrationTest.scala index 4b1a77d9bb..648868bc24 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentToolingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AcsCommitmentToolingIntegrationTest.scala @@ -5,8 +5,8 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.config.{DbConfig, SynchronizerTimeTrackerConfig} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeProportion} +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig, SynchronizerTimeTrackerConfig} import com.digitalasset.canton.console.{ CommandFailure, LocalParticipantReference, @@ -15,11 +15,11 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.crypto.LtHash16 import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.util.{CommitmentTestUtil, IntervalDuration} import com.digitalasset.canton.integration.util.AcsInspection.assertInAcsSync @@ -61,6 +61,7 @@ import org.slf4j.event.Level import java.time.Duration as JDuration import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.concurrent.duration.DurationInt import scala.concurrent.{ExecutionContext, Promise} import scala.jdk.CollectionConverters.* @@ -97,9 +98,17 @@ trait AcsCommitmentToolingIntegrationTest .addConfigTransforms( ConfigTransforms.useStaticTime, ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), + ConfigTransforms.updateTargetTimestampForwardTolerance(24.hours), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* @@ -137,7 +146,10 @@ trait AcsCommitmentToolingIntegrationTest connect(participant2, minObservationDuration2) connect(participant3, minObservationDuration2) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.foreach(_.dars.upload(CantonExamplesPath)) + participants.all.foreach { p => + p.dars.upload(CantonExamplesPath, synchronizerId = daId) + p.dars.upload(CantonExamplesPath, synchronizerId = acmeId) + } passTopologyRegistrationTimeout(env) } @@ -171,8 +183,9 @@ trait AcsCommitmentToolingIntegrationTest logger.debug(s"P1 sends two commitments to P2 ") val (cids1da, period1da, commitment1da) = - deployThreeAndCheck(daId, alreadyDeployedContracts) - val (_, period2da, commitment2da) = deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) + val (_, period2da, commitment2da) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) logger.debug( "Check that P1 can retrieve the two commitments it computed and sent. Eventually P1 receives the" + @@ -403,7 +416,8 @@ trait AcsCommitmentToolingIntegrationTest logger.debug( "Now have P1 and P2 exchange commitments again, so that P1 can see the mismatch." ) - val (_, period3da, commitment3da) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_, period3da, commitment3da) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val p1SentCmts = eventually() { val p1Computed = participant1.commitments.lookup_sent_acs_commitments( synchronizerTimeRanges = Seq( @@ -528,7 +542,8 @@ trait AcsCommitmentToolingIntegrationTest "participant can open a commitment it previously sent" in { implicit env => import env.* - val (_, period, commitment) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_, period, commitment) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val contractsAndReassignmentCounters = participant1.commitments.open_commitment( commitment, daId, @@ -546,7 +561,7 @@ trait AcsCommitmentToolingIntegrationTest val simClock = environment.simClock.value - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) logger.info( "Advance time five reconciliation intervals, remembering the tick after three reconciliation intervals." @@ -591,7 +606,7 @@ trait AcsCommitmentToolingIntegrationTest import env.* val (_createdCids, period, _commitment) = - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val notSentCmt = LtHash16().getByteString() val hashedNotSentCmd = AcsCommitment.hashCommitment(notSentCmt) // give wrong commitment but correct timestamp and counter-participant @@ -618,7 +633,7 @@ trait AcsCommitmentToolingIntegrationTest import env.* val (_createdCids, period, commitment) = - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) // give wrong timestamp but a computed commitment and correct counter-participant loggerFactory.assertThrowsAndLogs[CommandFailure]( @@ -646,7 +661,8 @@ trait AcsCommitmentToolingIntegrationTest "the given counter-participant is incorrect" in { implicit env => import env.* - val (_createdCids, period, commitment) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_createdCids, period, commitment) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) // give wrong counter-participant but a computed commitment and its correct timestamp loggerFactory.assertThrowsAndLogs[CommandFailure]( @@ -670,7 +686,8 @@ trait AcsCommitmentToolingIntegrationTest "the given timestamp is not a reconciliation interval tick" in { implicit env => import env.* - val (_createdCids, period, commitment) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_createdCids, period, commitment) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) // this test assumes that the reconciliation interval is not 1 second for the given opening commitment // timestamp to not fall on a reconciliation interval boundary @@ -704,7 +721,8 @@ trait AcsCommitmentToolingIntegrationTest val simClock = environment.simClock.value - val (_createdCids, period, commitment) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (_createdCids, period, commitment) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) logger.info( "Participant1 waits to receive counter-commitment, so that it can prune past data" @@ -724,7 +742,7 @@ trait AcsCommitmentToolingIntegrationTest logger.info( "Participant1 deploy some more contracts to advance the clean replay, so that it can prune past data" ) - deployThreeAndCheck(daId, alreadyDeployedContracts) + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) logger.info("Wait that ACS background pruning advanced past the timestamp of the commitment") eventually() { @@ -775,7 +793,8 @@ trait AcsCommitmentToolingIntegrationTest import env.* logger.info("Create three contracts on synchronizer da") - val (createdCidsDa, _per, _cmt) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (createdCidsDa, _per, _cmt) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) // archive one of these created contracts logger.info("Archive one of these contaracts") @@ -841,7 +860,8 @@ trait AcsCommitmentToolingIntegrationTest import env.* logger.info("Create three contracts on synchronizer da") - val (createdCidsDa, _per, _cmt) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (createdCidsDa, _per, _cmt) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val ts = getCleanReqTs(participant1, daId).getOrElse(fail("No clean request timestamp found")) @@ -873,7 +893,8 @@ trait AcsCommitmentToolingIntegrationTest participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) logger.info("Create three contracts on synchronizer da") - val (createdCidsDa, _per, _cmt) = deployThreeAndCheck(daId, alreadyDeployedContracts) + val (createdCidsDa, _per, _cmt) = + deployThreeAndCheck(daId, alreadyDeployedContracts, participant1, participant2) val tsBeforeReassign = getCleanReqTs(participant1, daId).getOrElse(fail("No clean request timestamp found")) @@ -924,9 +945,12 @@ trait AcsCommitmentToolingIntegrationTest participant1.health.ping(participant2, synchronizerId = Some(acmeId)) logger.info("Create three contracts on synchronizer acme") - val c1 = deployOnP1P2AndCheckContract(acmeId, iouContract) - val c2 = deployOnP1P2AndCheckContract(acmeId, iouContract) - val c3 = deployOnP1P2AndCheckContract(acmeId, iouContract) + val c1 = + deployOnTwoParticipantsAndCheckContract(acmeId, iouContract, participant1, participant2) + val c2 = + deployOnTwoParticipantsAndCheckContract(acmeId, iouContract, participant1, participant2) + val c3 = + deployOnTwoParticipantsAndCheckContract(acmeId, iouContract, participant1, participant2) val createdCidsAcme = Seq(c1, c2, c3) logger.info( @@ -1056,7 +1080,7 @@ trait AcsCommitmentToolingIntegrationTest class AcsCommitmentToolingIntegrationTestPostgres extends AcsCommitmentToolingIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ActiveContractsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ActiveContractsIntegrationTest.scala index 7582b3f9a8..857b7a4213 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ActiveContractsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ActiveContractsIntegrationTest.scala @@ -25,11 +25,8 @@ import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.crypto.TestSalt import com.digitalasset.canton.data.ViewPosition import com.digitalasset.canton.examples.java.iou.{Amount, GetCash, Iou} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.ActiveContractsIntegrationTest.* import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.GrpcAdminCommandSupport.ParticipantReferenceOps @@ -41,6 +38,7 @@ import com.digitalasset.canton.integration.util.{ } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, + ConfigTransforms, EnvironmentDefinition, SharedEnvironment, TestConsoleEnvironment, @@ -53,6 +51,7 @@ import com.digitalasset.canton.protocol.ContractIdAbsolutizer.ContractIdAbsoluti import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.{PartyId, PhysicalSynchronizerId, SynchronizerId} +import com.digitalasset.canton.util.TestContractHasher import com.digitalasset.canton.{BaseTest, ReassignmentCounter, config} import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.transaction.CreationTime @@ -61,6 +60,7 @@ import org.scalatest.Assertion import java.util.UUID import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.DurationInt import scala.jdk.CollectionConverters.* class ActiveContractsIntegrationTest @@ -72,7 +72,7 @@ class ActiveContractsIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")) @@ -87,6 +87,10 @@ class ActiveContractsIntegrationTest override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2_S1M1_S1M1_S1M1 + .addConfigTransforms( + // Ensure reassignments are not tripped up by some participants being a little behind. + ConfigTransforms.updateTargetTimestampForwardTolerance(30.seconds) + ) .withSetup { implicit env => import env.* @@ -122,7 +126,10 @@ class ActiveContractsIntegrationTest party2 = participant2.parties.enable("party2", synchronizer = alias) } - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) + participants.all.dars + .upload(BaseTest.CantonExamplesPath, synchronizerId = repairSynchronizerId) } protected def getActiveContracts( @@ -163,6 +170,7 @@ class ActiveContractsIntegrationTest ): ContractData = { import env.* + // TODO(#27612) Test should also pass with V12 contract IDs val cantonContractIdVersion = AuthenticatedContractIdVersionV11 val pureCrypto = participant1.underlying.map(_.cryptoPureApi).value @@ -204,14 +212,26 @@ class ActiveContractsIntegrationTest createIndex = 0, viewPosition = ViewPosition(List.empty), ) + val contractHash = + TestContractHasher.Sync.hash(unsuffixedCreateNode, contractIdSuffixer.contractHashingMethod) val ContractIdSuffixer.RelativeSuffixResult(suffixedCreateNode, _, _, authenticationData) = contractIdSuffixer - .relativeSuffixForLocalContract(contractSalt, ledgerCreateTime, unsuffixedCreateNode) + .relativeSuffixForLocalContract( + contractSalt, + ledgerCreateTime, + unsuffixedCreateNode, + contractHash, + ) .valueOr(err => fail("Failed to create contract suffix: " + err)) val suffixedFci = LfFatContractInst .fromCreateNode(suffixedCreateNode, ledgerCreateTime, authenticationData.toLfBytes) val absolutizedFci = contractIdAbsolutizer.absolutizeFci(suffixedFci).value - val repairContract = RepairContract(psid, absolutizedFci, ReassignmentCounter(0)) + val repairContract = RepairContract( + psid, + absolutizedFci, + ReassignmentCounter(0), + absolutizedFci.templateId.packageId, + ) val startOffset = participant1.ledger_api.state.end() participant1.synchronizers.disconnect_all() @@ -796,8 +816,6 @@ class ActiveContractsIntegrationTest StateService.getActiveContracts( proto.state_service.GetActiveContractsRequest( activeAtOffset = bigOffset, - filter = None, - verbose = false, eventFormat = Some(getEventFormat(List(party1a.toLf))), ) ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AdminWorkflowConfigTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AdminWorkflowConfigTest.scala index bc1c3ba6bc..1e39c589b3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AdminWorkflowConfigTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/AdminWorkflowConfigTest.scala @@ -7,9 +7,9 @@ import com.digitalasset.canton.admin.api.client.data.TemplateId import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -17,8 +17,9 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, } import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.participant.admin.PingService import com.digitalasset.canton.participant.admin.workflows.java.canton.internal.ping.Ping -import com.digitalasset.canton.participant.admin.{AdminWorkflowServices, PingService} +import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, SendDecision} import org.slf4j.event.Level @@ -87,7 +88,7 @@ trait AdminWorkflowConfigTest } val ReUploadSuppressionRule: SuppressionRule = - SuppressionRule.forLogger[AdminWorkflowServices] && SuppressionRule.Level(Level.DEBUG) + SuppressionRule.forLogger[CantonSyncService] && SuppressionRule.Level(Level.DEBUG) "Dar is not re-uploaded if participant is restarted" in { implicit env => import env.* @@ -103,6 +104,7 @@ trait AdminWorkflowConfigTest // AdminWorkflowPackages are already loaded from previous ping timeout test participant1.stop() participant1.start() + participant1.synchronizers.reconnect_all() }, allLogEntries => { val logEntries = logEntriesOfInterest(allLogEntries) @@ -116,6 +118,6 @@ trait AdminWorkflowConfigTest class AdminWorkflowConfigTestPostgres extends AdminWorkflowConfigTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BackpressureIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BackpressureIntegrationTest.scala index 069c193178..7f8796ee70 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BackpressureIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BackpressureIntegrationTest.scala @@ -16,8 +16,8 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -55,8 +55,8 @@ trait BackpressureIntegrationTest ) .withSetup { implicit env => import env.* - participants.local.dars.upload(CantonExamplesPath) participants.local.synchronizers.connect_local(sequencer1, daName) + participants.local.dars.upload(CantonExamplesPath) // Code snippet just for the user manual // user-manual-entry-begin: SetResourceLimits @@ -431,7 +431,7 @@ trait BackpressureIntegrationTest } class BackpressureIntegrationTestInMemory extends BackpressureIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BlockedContractIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BlockedContractIntegrationTest.scala index f84eee5aeb..ecbf8bdb08 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BlockedContractIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/BlockedContractIntegrationTest.scala @@ -10,10 +10,7 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.UnknownInformees import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -150,5 +147,5 @@ sealed trait BlockedContractIntegrationTest class BlockedContractIntegrationTestPostgres extends BlockedContractIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ChaoticStartupTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ChaoticStartupTest.scala index a477644bf4..034f96c6fd 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ChaoticStartupTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ChaoticStartupTest.scala @@ -4,10 +4,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -89,5 +86,5 @@ trait ChaoticStartupTest extends CommunityIntegrationTest with SharedEnvironment class ChaoticStartupTestPostgres extends ChaoticStartupTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ClockSkewIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ClockSkewIntegrationTest.scala index 239ddc74ae..82c7459e1f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ClockSkewIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ClockSkewIntegrationTest.scala @@ -11,10 +11,7 @@ import com.daml.test.evidence.tag.Reliability.{ Remediation, } import com.digitalasset.canton.config.{ClockConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -107,7 +104,7 @@ abstract class ClockSkewIntegrationTest(skews: Map[String, FiniteDuration]) } registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } // We test with each node either ahead or behind of all the other nodes diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandDeduplicationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandDeduplicationIntegrationTest.scala index ee9d36da85..7978b2b2e5 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandDeduplicationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandDeduplicationIntegrationTest.scala @@ -20,12 +20,12 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod, Offset} import com.digitalasset.canton.error.CantonBaseError import com.digitalasset.canton.examples.java.cycle as C -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.CommandDeduplicationIntegrationTest.DelayPromises import com.digitalasset.canton.integration.{ @@ -97,7 +97,8 @@ trait CommandDeduplicationIntegrationTest ) participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer2, alias = acmeName) - participant1.dars.upload(CantonExamplesPath) + participant1.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant1.dars.upload(CantonExamplesPath, synchronizerId = acmeId) participant1.parties.enable("Alice", synchronizer = daName) participant1.parties.enable("Alice", synchronizer = acmeName) } @@ -783,7 +784,7 @@ trait CommandDeduplicationTestHelpers { this: BaseTest with HasProgrammableSeque class CommandDeduplicationIntegrationTestInMemory extends CommandDeduplicationIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[StorageConfig.Memory]( + new UseReferenceBlockSequencer[StorageConfig.Memory]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) @@ -1062,6 +1063,6 @@ private object CommandDeduplicationIntegrationTest { class CommandDeduplicationPruningIntegrationTestPostgres extends CommandDeduplicationPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandInterpretationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandInterpretationIntegrationTest.scala index 2c396c7d61..4d3ff1effb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandInterpretationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandInterpretationIntegrationTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -141,5 +138,5 @@ sealed trait CommandInterpretationIntegrationTest class CommandInterpretationReferenceIntegrationTestPostgres extends CommandInterpretationIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandRejectionConcurrentRequestTopologyChangeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandRejectionConcurrentRequestTopologyChangeIntegrationTest.scala index 3cadc26e33..a3e2eaba47 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandRejectionConcurrentRequestTopologyChangeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandRejectionConcurrentRequestTopologyChangeIntegrationTest.scala @@ -11,11 +11,11 @@ import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.MediatorError.MalformedMessage import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{AcsInspection, EntitySyntax, PartiesAllocator} @@ -87,13 +87,6 @@ sealed trait CommandRejectionConcurrentRequestTopologyChangeIntegrationTest .withSetup { implicit env => import env.* - // So that topology changes become effective as of sequencing time - sequencer1.topology.synchronizer_parameters - .propose_update( - daId, - _.update(topologyChangeDelay = config.NonNegativeFiniteDuration.Zero), - ) - participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.dars.upload(BaseTest.CantonExamplesPath) @@ -430,7 +423,7 @@ class CommandRejectionConcurrentRequestTopologyChangeIntegrationTestPostgres extends CommandRejectionConcurrentRequestTopologyChangeIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandResubmissionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandResubmissionIntegrationTest.scala index c542556c07..5ac143ab26 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandResubmissionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandResubmissionIntegrationTest.scala @@ -6,9 +6,9 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -91,6 +91,6 @@ trait CommandResubmissionIntegrationTest class CommandResubmissionReferenceIntegrationTestPostgres extends CommandResubmissionIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandSubmissionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandSubmissionIntegrationTest.scala index 8b8ca2c6b4..d8bb6f2946 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandSubmissionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CommandSubmissionIntegrationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.damltests.java.divulgence.DivulgeIouByExercise -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -74,5 +71,5 @@ trait CommandSubmissionIntegrationTest extends CommunityIntegrationTest with Sha class CommandSubmissionIntegrationTestPostgres extends CommandSubmissionIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConfigGenerationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConfigGenerationTest.scala index d9da8a71c0..9325946f97 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConfigGenerationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConfigGenerationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests import better.files.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.ParticipantReference -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -135,5 +132,5 @@ trait ConfigGenerationTest extends CommunityIntegrationTest with SharedEnvironme class ConfigGenerationTestPostgres extends ConfigGenerationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandIntegrationTest.scala index 787bb61c60..f048ce4710 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandIntegrationTest.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.topology.PartyId import org.scalatest.OptionValues @@ -259,7 +259,7 @@ trait ConsoleCommandIntegrationTest } class ConsoleCommandIntegrationTestDefault extends ConsoleCommandIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } //class ConsoleCommandIntegrationTestPostgres extends ConsoleCommandIntegrationTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandTestWithSharedEnv.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandTestWithSharedEnv.scala index de25b9d35d..90e9b591a3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandTestWithSharedEnv.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ConsoleCommandTestWithSharedEnv.scala @@ -10,10 +10,7 @@ import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -50,8 +47,8 @@ sealed trait ConsoleCommandIntegrationTestWithSharedEnv alice = participant1.parties.enable("Alice", synchronizeParticipants = Seq(participant1)) bob = participant2.parties.enable("Bob", synchronizeParticipants = Seq(participant1)) - participant1.dars.upload(CantonExamplesPath) - participant2.dars.upload(CantonExamplesPath) + participant1.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant2.dars.upload(CantonExamplesPath, synchronizerId = daId) val commands = Seq.fill[Command](3)(createIouWithObserver(bob)) submitAndReturnIous(participant1, bank, commands).discard } @@ -205,5 +202,5 @@ sealed trait ConsoleCommandIntegrationTestWithSharedEnv final class ConsoleCommandReferenceIntegrationTestWithSharedEnvPostgres extends ConsoleCommandIntegrationTestWithSharedEnv { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CrashRecoveryDuringAutoInitIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CrashRecoveryDuringAutoInitIntegrationTest.scala index 16a83046ce..29c1713234 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CrashRecoveryDuringAutoInitIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/CrashRecoveryDuringAutoInitIntegrationTest.scala @@ -8,8 +8,8 @@ import com.digitalasset.canton.console.LocalInstanceReference import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -17,7 +17,7 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, } import com.digitalasset.canton.metrics.CommonMockMetrics -import com.digitalasset.canton.resource.{CommunityStorageFactory, DbStorage} +import com.digitalasset.canton.resource.{DbStorage, StorageSingleFactory} import com.digitalasset.canton.time.SimClock import org.scalatest.Assertion @@ -49,7 +49,7 @@ trait CrashRecoveryDuringAutoInitIntegrationTest node.stop() (id, keys, txs) } - val factory = new CommunityStorageFactory(node.config.storage) + val factory = new StorageSingleFactory(node.config.storage) val storage = factory.tryCreate( connectionPoolForParticipant = false, None, @@ -113,7 +113,7 @@ trait CrashRecoveryDuringAutoInitIntegrationTest class CrashRecoveryDuringAutoInitReferenceIntegrationTestPostgres extends CrashRecoveryDuringAutoInitIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class CrashRecoveryDuringAutoInitBftOrderingIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DamlRollbackTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DamlRollbackTest.scala index a165467198..aacd5f4241 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DamlRollbackTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DamlRollbackTest.scala @@ -9,8 +9,8 @@ import com.digitalasset.canton.config.DbConfig.Postgres import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.DamlRollbackTest.TbContext import com.digitalasset.canton.integration.util.EntitySyntax @@ -29,7 +29,7 @@ import com.digitalasset.daml.lf.data.FrontStack import com.digitalasset.daml.lf.transaction.Node import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.{ CreateKey, - CreateTransactionVersion, + CreateSerializationVersion, } import com.digitalasset.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder} import com.digitalasset.daml.lf.value.Value as LfValue @@ -98,7 +98,7 @@ trait DamlRollbackTest protected def txBuilderContextFrom[A](tx: LfVersionedTransaction)(code: TbContext => A): A = code( TbContext( - txVersion = CreateTransactionVersion.Version(tx.version), + txVersion = CreateSerializationVersion.Version(tx.version), contractIds = contractIdsInPreorder(tx), ) ) @@ -163,7 +163,7 @@ trait DamlRollbackTest } object DamlRollbackTest { - final case class TbContext(txVersion: CreateTransactionVersion, contractIds: Seq[LfContractId]) + final case class TbContext(txVersion: CreateSerializationVersion, contractIds: Seq[LfContractId]) } trait DamlRollbackTestStableLf extends DamlRollbackTest { @@ -1164,7 +1164,7 @@ trait DamlRollbackTestDevLf extends DamlRollbackTest { trait DamlRollbackReferenceSequencerPostgresTest { self: SharedEnvironment => registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) } trait DamlRollbackBftSequencerPostgresTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DecisionTimeElapsedIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DecisionTimeElapsedIntegrationTest.scala index 26bacda09e..4f30d985a3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DecisionTimeElapsedIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DecisionTimeElapsedIntegrationTest.scala @@ -6,9 +6,9 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -128,6 +128,6 @@ trait DecisionTimeElapsedIntegrationTest class DecisionTimeElapsedIntegrationTestPostgres extends DecisionTimeElapsedIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DeliverErrorIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DeliverErrorIntegrationTest.scala index 7efffd2399..e9243ac25f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DeliverErrorIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DeliverErrorIntegrationTest.scala @@ -10,9 +10,9 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ @@ -70,7 +70,7 @@ trait DeliverErrorIntegrationTest val signedModifiedRequest = signModifiedSubmissionRequest( modifiedRequest, - syncCrypto.tryForSynchronizer(daId, defaultStaticSynchronizerParameters), + syncCrypto.tryForSynchronizer(daId, staticSynchronizerParameters1), ) SendDecision.Replace(signedModifiedRequest) @@ -104,7 +104,7 @@ trait DeliverErrorIntegrationTest class DeliverErrorReferenceIntegrationTestPostgres extends DeliverErrorIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DumpIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DumpIntegrationTest.scala index 4628b5cce4..7acdb69896 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DumpIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/DumpIntegrationTest.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.crypto.provider.jce.JcePureCrypto import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou import com.digitalasset.canton.examples.java.iou.Amount -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseH2} +import com.digitalasset.canton.integration.plugins.{UseH2, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -268,5 +268,5 @@ sealed trait DumpIntegrationTest extends CommunityIntegrationTest with SharedEnv final class DumpIntegrationTestH2 extends DumpIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GeneralSynchronizerRouterIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GeneralSynchronizerRouterIntegrationTest.scala index 8e9652e073..19d427fc6b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GeneralSynchronizerRouterIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GeneralSynchronizerRouterIntegrationTest.scala @@ -14,11 +14,8 @@ import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.{ NoCommonSynchronizer, } import com.digitalasset.canton.integration.EnvironmentDefinition -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.participant.util.JavaCodegenUtil.* import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.topology.transaction.ParticipantPermission @@ -33,9 +30,6 @@ sealed trait GeneralSynchronizerRouterIntegrationTest override def environmentDefinition: EnvironmentDefinition = super.environmentDefinition .withSetup { implicit env => - import env.* - participants.all.dars.upload(darPath) - connectToDefaultSynchronizers() } @@ -411,7 +405,7 @@ class GeneralSynchronizerRouterIntegrationTestPostgres extends GeneralSynchronizerRouterIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcConnectionErrorsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcConnectionErrorsIntegrationTest.scala index 8b8f991ff4..a012b4f1f9 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcConnectionErrorsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcConnectionErrorsIntegrationTest.scala @@ -6,11 +6,8 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.RequireTypes.ExistingFile import com.digitalasset.canton.config.{IdentityConfig, PemFile, StorageConfig, TlsBaseServerConfig} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseBftSequencer, - UseCommunityReferenceBlockSequencer, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -247,7 +244,7 @@ trait GrpcConnectionErrorsIntegrationTest extends CommunityIntegrationTest with class GrpcConnectionErrorsReferenceIntegrationTestInMemory extends GrpcConnectionErrorsIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[StorageConfig.Memory]( + new UseReferenceBlockSequencer[StorageConfig.Memory]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set(InstanceName.tryCreate("sequencer1")), Set(InstanceName.tryCreate("sequencer2"))) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcTimeoutIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcTimeoutIntegrationTest.scala index 33522b9969..8d42b8368e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcTimeoutIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/GrpcTimeoutIntegrationTest.scala @@ -4,10 +4,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -37,5 +34,5 @@ trait GrpcTimeoutIntegrationTest extends CommunityIntegrationTest with SharedEnv class GrpcTimeoutIntegrationTestPostegres extends GrpcTimeoutIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/InFlightSubmissionTrackingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/InFlightSubmissionTrackingIntegrationTest.scala index 05142ecf86..d175b9d881 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/InFlightSubmissionTrackingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/InFlightSubmissionTrackingIntegrationTest.scala @@ -17,9 +17,9 @@ import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.{ import com.digitalasset.canton.error.TransactionRoutingError.UnableToQueryTopologySnapshot import com.digitalasset.canton.examples.java.cycle import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -318,6 +318,6 @@ trait InFlightSubmissionTrackingIntegrationTest class InFlightSubmissionTrackingIntegrationTestPostgres extends InFlightSubmissionTrackingIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MaxRequestSizeCrashIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MaxRequestSizeCrashIntegrationTest.scala index ee7dbca759..e323272358 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MaxRequestSizeCrashIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MaxRequestSizeCrashIntegrationTest.scala @@ -11,8 +11,8 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.Dummy import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -21,6 +21,7 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, TestConsoleEnvironment, } +import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.MalformedRequest import com.digitalasset.canton.util.ResourceUtil.withResource import monocle.macros.syntax.lens.* @@ -104,31 +105,6 @@ sealed abstract class MaxRequestSizeCrashIntegrationTest "Canton" should { "recover from failure due to too small request size " in { implicit env => import env.* - participant1.dars.upload(CantonTestsPath) - - // verify the ping is successful - participant1.health.ping(participant1) - - // change maxRequestSize - synchronizerOwners1.foreach( - _.topology.synchronizer_parameters - .propose_update( - synchronizerId = daId, - _.update(maxRequestSize = lowMaxRequestSize.unwrap), - ) - ) - - eventually() { - forAll(nodes.all) { - _.topology.synchronizer_parameters - .latest(daId) - .maxRequestSize - .value shouldBe lowMaxRequestSize.unwrap - } - } - - val matchError = - s"MaxViewSizeExceeded\\(view size = .*, max request size configured = .*\\)." def submitCommand(p: ParticipantReference) = { val commandId = s"submit-async-dummy-${UUID.randomUUID().toString}" @@ -144,8 +120,34 @@ sealed abstract class MaxRequestSizeCrashIntegrationTest (commandId, commandF) } - loggerFactory.assertLogs( + participant1.dars.upload(CantonTestsPath) + + // verify the ping is successful + participant1.health.ping(participant1) + + loggerFactory.assertLogsUnorderedOptional( { + // change maxRequestSize + synchronizerOwners1.foreach( + _.topology.synchronizer_parameters + .propose_update( + synchronizerId = daId, + _.update(maxRequestSize = lowMaxRequestSize.unwrap), + ) + ) + + eventually() { + forAll(nodes.all) { + _.topology.synchronizer_parameters + .latest(daId) + .maxRequestSize + .value shouldBe lowMaxRequestSize.unwrap + } + } + + val matchError = + s"MaxViewSizeExceeded\\(view size = .*, max request size configured = .*\\)." + val (commandId, _) = submitCommand(env.participant1) eventually() { @@ -164,39 +166,42 @@ sealed abstract class MaxRequestSizeCrashIntegrationTest deserializedError.code.id shouldBe MalformedRequest.id reason should include regex matchError } - }, - _.errorMessage should include("INVALID_ARGUMENT/MALFORMED_REQUEST"), - ) - /* - restart Canton with overrideMaxRequestSize, so that max request size can be increased - */ - setOverrideMaxRequestSizeWithNewEnv(env, overrideMaxRequestSize) { implicit newEnv => - import newEnv.* - // we verify that the dynamic parameter is still set to the low value + // restart Canton with overrideMaxRequestSize, so that max request size can be increased + setOverrideMaxRequestSizeWithNewEnv(env, overrideMaxRequestSize) { implicit newEnv => + import newEnv.* + // we verify that the dynamic parameter is still set to the low value - forAll(nodes.all)( - _.topology.synchronizer_parameters.latest(daId).maxRequestSize == lowMaxRequestSize - ) + forAll(nodes.all)( + _.topology.synchronizer_parameters.latest(daId).maxRequestSize == lowMaxRequestSize + ) - val newMaxRequestSize = NonNegativeInt.tryCreate(60_000) - newMaxRequestSize should not be lowMaxRequestSize + val newMaxRequestSize = NonNegativeInt.tryCreate(60_000) + newMaxRequestSize should not be lowMaxRequestSize - synchronizerOwners1.foreach( - _.topology.synchronizer_parameters - .propose_update(synchronizerId = daId, _.update(maxRequestSize = newMaxRequestSize)) - ) + synchronizerOwners1.foreach( + _.topology.synchronizer_parameters + .propose_update(synchronizerId = daId, _.update(maxRequestSize = newMaxRequestSize)) + ) - eventually() { - forAll(nodes.all) { member => - member.topology.synchronizer_parameters - .latest(daId) - .maxRequestSize shouldBe newMaxRequestSize - } - } + eventually() { + forAll(nodes.all) { member => + member.topology.synchronizer_parameters + .latest(daId) + .maxRequestSize shouldBe newMaxRequestSize + } + } - stop(newEnv) - } + stop(newEnv) + } + }, + LogEntryOptionality.Required -> (_.errorMessage should include( + "INVALID_ARGUMENT/MALFORMED_REQUEST" + )), + LogEntryOptionality.OptionalMany -> (_.warningMessage should include( + "Could not send a time-advancing message" + )), + ) restart @@ -210,7 +215,7 @@ sealed abstract class MaxRequestSizeCrashIntegrationTest class MaxRequestSizeCrashReferenceIntegrationIntegrationTestPostgres extends MaxRequestSizeCrashIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class MaxRequestSizeCrashBftOrderingIntegrationIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultiSynchronizerPingIntegrationTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultiSynchronizerPingIntegrationTests.scala index 91001690cf..7c0b70aad6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultiSynchronizerPingIntegrationTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultiSynchronizerPingIntegrationTests.scala @@ -7,11 +7,11 @@ import com.digitalasset.canton.config.* import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.IntegrationTestUtilities.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } sealed trait MultiSynchronizerPingIntegrationTests @@ -65,7 +65,7 @@ class MultiSynchronizerPingReferenceIntegrationTestsPostgres extends MultiSynchronizerPingIntegrationTests { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsIntegrationTest.scala index 68ea3c9d61..668b750c8e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsIntegrationTest.scala @@ -22,8 +22,8 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkTopologyDescription, } import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -59,6 +59,8 @@ import scala.concurrent.{ExecutionContext, Future, Promise} trait MultipleMediatorsBaseTest { this: BaseTest & HasProgrammableSequencer => + protected val errorUnknownSender = "(Eligible) Senders are unknown: MED::" + protected def participantSeesMediators( ref: ParticipantReference, expectedActive: Set[Set[MediatorId]], // For each group, the mediators @@ -131,7 +133,6 @@ trait MultipleMediatorsBaseTest { this: BaseTest & HasProgrammableSequencer => ) .cause - val errorUnknownSender = "(Eligible) Senders are unknown: MED::" loggerFactory.assertLoggedWarningsAndErrorsSeq( submit(), LogEntry.assertLogSeq( @@ -180,7 +181,7 @@ class MultipleMediatorsIntegrationTest with MultipleMediatorsBaseTest with OperabilityTestHelpers { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) // we need to register the ProgrammableSequencer after the ReferenceBlockSequencer registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) @@ -321,16 +322,32 @@ class MultipleMediatorsIntegrationTest participant1.dars.upload(CantonExamplesPath) participant1.health.ping(participant1) - sequencer1.topology.mediators.remove_group(daId, NonNegativeInt.zero) + loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( + { + sequencer1.topology.mediators.remove_group(daId, NonNegativeInt.zero) - eventually() { - participant1.topology.mediators - .list(daId, group = Some(NonNegativeInt.zero)) shouldBe empty - } + eventually() { + participant1.topology.mediators + .list(daId, group = Some(NonNegativeInt.zero)) shouldBe empty + } + + // Environments are isolated, so we can stop the mediator, and prevent it from sending anything + mediator1.stop() - loggerFactory.assertThrowsAndLogs[CommandFailure]( - createCycleContract(participant1, participant1.adminParty, "no-mediator-on-synchronizer"), - _.errorMessage should include(SynchronizerWithoutMediatorError.code.id), + createCycleContract( + participant1, + participant1.adminParty, + "no-mediator-on-synchronizer", + ) + }, + LogEntry.assertLogSeq( + mustContainWithClue = Seq( + (_.errorMessage should include(SynchronizerWithoutMediatorError.code.id), "error") + ), + mayContain = Seq( + _.warningMessage should include(errorUnknownSender) + ), + ), ) uploadAndWaitForMediatorIdentity(mediator2, sequencer1) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsMultipleSynchronizersIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsMultipleSynchronizersIntegrationTest.scala index c46a8ce320..fd993ac2d5 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsMultipleSynchronizersIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/MultipleMediatorsMultipleSynchronizersIntegrationTest.scala @@ -13,10 +13,10 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.AcsInspection import com.digitalasset.canton.integration.{ @@ -82,7 +82,8 @@ final class MultipleMediatorsMultipleSynchronizersIntegrationTest participant1.synchronizers.connect_local(sequencer1, alias = synchronizer1) participant1.synchronizers.connect_local(sequencer2, alias = synchronizer2) - participant1.dars.upload(CantonExamplesPath) + participant1.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant1.dars.upload(CantonExamplesPath, synchronizerId = acmeId) participantSeesMediators(participant1, Set(Set(mediator1.id), Set(mediator3.id))) @@ -98,7 +99,7 @@ final class MultipleMediatorsMultipleSynchronizersIntegrationTest } registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/NonInformeeStakeholderIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/NonInformeeStakeholderIntegrationTest.scala index c12d1ad5a5..9b6f7a38ab 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/NonInformeeStakeholderIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/NonInformeeStakeholderIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.StorageConfig import com.digitalasset.canton.damltests.java.noninformeestakeholder.{Inner, Outer} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -62,5 +62,5 @@ trait NonInformeeStakeholderIntegrationTest class NonInformeeStakeholderReferenceIntegrationTestInMemory extends NonInformeeStakeholderIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PartyToParticipantDeclarativeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PartyToParticipantDeclarativeIntegrationTest.scala index 0d6ab508a3..9cf542b5af 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PartyToParticipantDeclarativeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PartyToParticipantDeclarativeIntegrationTest.scala @@ -6,11 +6,8 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ EntitySyntax, @@ -37,7 +34,7 @@ final class PartyToParticipantDeclarativeIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) @@ -51,7 +48,8 @@ final class PartyToParticipantDeclarativeIntegrationTest participants.all.synchronizers.connect_local(sequencer1, daName) participants.all.synchronizers.connect_local(sequencer2, acmeName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) } "Party to participant declarative" should { @@ -103,8 +101,7 @@ final class PartyToParticipantDeclarativeIntegrationTest val chopperE = participant1.parties.external.enable("chopperE", synchronizer = daName) - // TODO(#27835) Add multi-synchronizer support - // participant1.parties.enable("chopper", synchronizer = acmeName) + participant1.parties.external.also_enable(chopperE, synchronizer = acmeName) def changeTopology( newThreshold: PositiveInt, @@ -140,6 +137,42 @@ final class PartyToParticipantDeclarativeIntegrationTest ) } + "support party replication in multi-synchronizer scenario (external)" in { implicit env => + import env.* + + val donaldE = participant1.parties.external.enable("donaldE", synchronizer = daName) + + participant1.parties.external.also_enable(donaldE, synchronizer = acmeName) + + val targetHosting: (PositiveInt, Set[(ParticipantId, ParticipantPermission)]) = ( + PositiveInt.one, + Set( + (participant1, Confirmation), + (participant2, Confirmation), + ), + ) + + PartyToParticipantDeclarative(Set(participant1, participant2), Set(daId, acmeId))( + owningParticipants = Map(), + externalParties = Set(donaldE), + targetTopology = Map( + donaldE.partyId -> Map( + daId -> targetHosting, + acmeId -> targetHosting, + ) + ), + ) + + forAll(Seq(daId, acmeId)) { synchronizer => + participant1.topology.party_to_participant_mappings + .list(synchronizer, filterParty = donaldE.filterString) + .flatMap(_.item.participants.map(_.participantId)) should contain theSameElementsAs Seq( + participant1.id, + participant2.id, + ) + } + } + "allow party offboarding from synchronizer (local)" in { implicit env => import env.* diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/Phase4SendAsyncIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/Phase4SendAsyncIntegrationTest.scala index 19dba5923c..0fc5b5f5b0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/Phase4SendAsyncIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/Phase4SendAsyncIntegrationTest.scala @@ -6,8 +6,8 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ @@ -108,6 +108,6 @@ sealed trait Phase4SendAsyncIntegrationTest } class Phase4SendAsyncReferenceIntegrationTestPostgres extends Phase4SendAsyncIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PingServiceVacuumingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PingServiceVacuumingIntegrationTest.scala index d63494600a..20fc59a133 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PingServiceVacuumingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/PingServiceVacuumingIntegrationTest.scala @@ -8,9 +8,9 @@ import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -33,7 +33,7 @@ class PingServiceVacuumingIntegrationTest with HasProgrammableSequencer { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) override lazy val environmentDefinition: EnvironmentDefinition = diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ProtocolInterleavingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ProtocolInterleavingIntegrationTest.scala index 4396a8287c..f8a632118a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ProtocolInterleavingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ProtocolInterleavingIntegrationTest.scala @@ -14,8 +14,8 @@ import com.digitalasset.canton.damltests.java.conflicttest.{Many, Single} import com.digitalasset.canton.integration.IntegrationTestUtilities.assertIncreasingRecordTime import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -474,7 +474,7 @@ trait ProtocolInterleavingIntegrationTest class ProtocolInterleavingReferenceIntegrationTestPostgres extends ProtocolInterleavingIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ReassignmentBackdatingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ReassignmentBackdatingIntegrationTest.scala index b06086d9d7..9b82fb2e08 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ReassignmentBackdatingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ReassignmentBackdatingIntegrationTest.scala @@ -7,10 +7,10 @@ import com.digitalasset.canton.NeedsNewLfContractIds import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.crypto.{KeyPurpose, SigningPublicKey} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.synchronizer.sequencer.SendDecision import com.digitalasset.canton.synchronizer.sequencer.SendPolicy.processTimeProofs_ @@ -151,7 +151,7 @@ abstract class ReassignmentBackdatingIntegrationTest class ReassignmentBackdatingIntegrationTestDefault extends ReassignmentBackdatingIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RecordReplayTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RecordReplayTest.scala index a76f736654..e717415057 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RecordReplayTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RecordReplayTest.scala @@ -9,8 +9,8 @@ import com.digitalasset.canton.HasTempDirectory import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.plugins.{ PostgresDumpRestore, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -37,7 +37,7 @@ final class RecordReplayIntegrationTest private val postgresPlugin = new UsePostgres(loggerFactory) registerPlugin(postgresPlugin) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val postgresDumpRestore = PostgresDumpRestore(postgresPlugin, forceLocal = false) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RemoteMediatorInitializationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RemoteMediatorInitializationTest.scala index 6f33914e7e..17353b7a96 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RemoteMediatorInitializationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RemoteMediatorInitializationTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseExternalProcess, -} +import com.digitalasset.canton.integration.plugins.{UseExternalProcess, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -49,7 +46,7 @@ class RemoteMediatorInitializationTest extends CommunityIntegrationTest with Sha } registerPlugin(externalPlugin) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) protected def setup(env: TestConsoleEnvironment): Unit = { import env.* @@ -66,7 +63,7 @@ class RemoteMediatorInitializationTest extends CommunityIntegrationTest with Sha synchronizerThreshold = PositiveInt.one, sequencers = Seq(sequencer1), mediators = Seq(m1), - ) + )(env) ) )(env).bootstrap() } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RobustSynchronizerBootstrapIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RobustSynchronizerBootstrapIntegrationTest.scala index 0db6179d11..521157b2fb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RobustSynchronizerBootstrapIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RobustSynchronizerBootstrapIntegrationTest.scala @@ -18,11 +18,8 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.crypto.KeyPurpose import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.synchronizer.mediator.MediatorNodeConfig @@ -94,7 +91,7 @@ sealed trait RobustSynchronizerBootstrapIntegrationTest new LocalSequencerReference(env, sequencerReference.name) { val counter = new AtomicInteger(0) override def setup: SequencerAdministration = new SequencerAdministration(this) { - override def assign_from_genesis_state( + override def assign_from_genesis_stateV2( genesisState: ByteString, synchronizerParameters: StaticSynchronizerParameters, waitForReady: Boolean, @@ -102,7 +99,7 @@ sealed trait RobustSynchronizerBootstrapIntegrationTest if (counter.incrementAndGet() == 1) throw new RuntimeException("First time sequencer init fails") else - super.assign_from_genesis_state(genesisState, synchronizerParameters) + super.assign_from_genesis_stateV2(genesisState, synchronizerParameters) } } @@ -235,7 +232,7 @@ sealed trait RobustSynchronizerBootstrapIntegrationTest mediator.stop() } - "fail if trying to boostrap a synchronizer with a sequencer or mediator already initialized previously with another synchronizer" in { + "fail if trying to bootstrap a synchronizer with a sequencer or mediator already initialized previously with another synchronizer" in { implicit env => import env.* @@ -310,7 +307,7 @@ class RobustSynchronizerBootstrapIntegrationTestPostgres extends RobustSynchronizerBootstrapIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("secondSequencer"), Set("sequencerToFail")).map( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RouterSubmissionCheckIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RouterSubmissionCheckIntegrationTest.scala index 55ef1ecdbc..4787ec33fc 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RouterSubmissionCheckIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/RouterSubmissionCheckIntegrationTest.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.StorageConfig import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.NoSynchronizerOnWhichAllSubmittersCanSubmit -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.util.{EntitySyntax, PartiesAllocator} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -54,5 +54,5 @@ trait RouterSubmissionCheckIntegrationTest } class RouterSubmissionCheckIntegrationTestInMemory extends RouterSubmissionCheckIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingCommunityIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingCommunityIntegrationTest.scala index 094d0b8f4d..afe84128bf 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingCommunityIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingCommunityIntegrationTest.scala @@ -8,7 +8,7 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{CantonConfig, StorageConfig} import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.environment.CantonEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -66,6 +66,6 @@ sealed trait SimplestPingCommunityIntegrationTest final class SimplestPingReferenceCommunityIntegrationTest extends SimplestPingCommunityIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory) + new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory) ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingIntegrationTest.scala index 091bba119b..5a99d6311c 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingIntegrationTest.scala @@ -6,9 +6,9 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.{DbConfig, StorageConfig} import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UseH2, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -50,18 +50,18 @@ class SimplestPingIntegrationTestInMemory extends SimplestPingIntegrationTest { .addConfigTransform(ConfigTransforms.allInMemory) .addConfigTransform(_.focus(_.monitoring.logging.api.messagePayloads).replace(false)) - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } class SimplestPingReferenceIntegrationTestH2 extends SimplestPingIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } class SimplestPingReferenceIntegrationTestPostgres extends SimplestPingIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class SimplestPingBftOrderingIntegrationTestPostgres extends SimplestPingIntegrationTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/StaticTimeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/StaticTimeIntegrationTest.scala index f748214179..fc5dd1e785 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/StaticTimeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/StaticTimeIntegrationTest.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.StorageConfig import com.digitalasset.canton.damltests.java.statictimetest.Pass import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -81,5 +81,5 @@ trait StaticTimeIntegrationTest extends CommunityIntegrationTest with SharedEnvi } class StaticTimeIntegrationTestInMemory extends StaticTimeIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmissionRequestAmplificationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmissionRequestAmplificationIntegrationTest.scala index 66102655a1..d7a2ae1b51 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmissionRequestAmplificationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmissionRequestAmplificationIntegrationTest.scala @@ -17,9 +17,9 @@ import com.digitalasset.canton.console.LocalSequencerReference import com.digitalasset.canton.integration.EnvironmentDefinition.S2M2 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -87,6 +87,7 @@ abstract class SubmissionRequestAmplificationIntegrationTest PositiveInt.tryCreate(2), config.NonNegativeFiniteDuration.Zero, ), + old.sequencerConnectionPoolDelays, ) } ) @@ -437,7 +438,7 @@ class SubmissionRequestAmplificationReferenceIntegrationTestPostgres registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala index 9ce04e7770..ee2fa17276 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.{ UnknownSubmitters, } import com.digitalasset.canton.examples.java.iou.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -124,6 +121,8 @@ trait SubmitCommandTrialErrorTest extends CommunityIntegrationTest with SharedEn participant2.dars.upload(CantonExamplesPath) + participant2.packages.synchronize_vetting() + participant1.ledger_api.javaapi.commands.submit(Seq(Bank), cmds) } @@ -147,6 +146,6 @@ trait SubmitCommandTrialErrorTest extends CommunityIntegrationTest with SharedEn class SubmitCommandTrialErrorTestPostgres extends SubmitCommandTrialErrorTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerBootstrapWithSeparateConsolesTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerBootstrapWithSeparateConsolesTest.scala index 069415c92b..5d720197b6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerBootstrapWithSeparateConsolesTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerBootstrapWithSeparateConsolesTest.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameter import com.digitalasset.canton.config.{CantonConfig, DbConfig} import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.environment.CantonEnvironment -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseH2} +import com.digitalasset.canton.integration.plugins.{UseH2, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -91,11 +91,11 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest // * extract sequencer1's and mediator1's identity+pubkey topology transactions and share via files // * load mediator1's identity+pubkey topology transactions { - sequencer1.topology.transactions.export_identity_transactions(seqIdentityFile) - mediator1.topology.transactions.export_identity_transactions(medIdentityFile) + sequencer1.topology.transactions.export_identity_transactionsV2(seqIdentityFile) + mediator1.topology.transactions.export_identity_transactionsV2(medIdentityFile) sequencer1.topology.transactions - .import_topology_snapshot_from(medIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(medIdentityFile, TopologyStoreId.Authorized) } // Sequencer2 console: @@ -104,15 +104,15 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest // * load mediator2's identity+pubkey topology transactions { sequencer2.topology.transactions - .import_topology_snapshot_from(seqIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(seqIdentityFile, TopologyStoreId.Authorized) sequencer2.topology.transactions - .import_topology_snapshot_from(medIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(medIdentityFile, TopologyStoreId.Authorized) - sequencer2.topology.transactions.export_identity_transactions(seqIdentityFile) - mediator2.topology.transactions.export_identity_transactions(medIdentityFile) + sequencer2.topology.transactions.export_identity_transactionsV2(seqIdentityFile) + mediator2.topology.transactions.export_identity_transactionsV2(medIdentityFile) sequencer2.topology.transactions - .import_topology_snapshot_from(medIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(medIdentityFile, TopologyStoreId.Authorized) } // Sequencer1 console: @@ -121,9 +121,9 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest { // load sequencer2's identity sequencer1.topology.transactions - .import_topology_snapshot_from(seqIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(seqIdentityFile, TopologyStoreId.Authorized) sequencer1.topology.transactions - .import_topology_snapshot_from(medIdentityFile, TopologyStoreId.Authorized) + .import_topology_snapshot_fromV2(medIdentityFile, TopologyStoreId.Authorized) // propose the decentralized namespace declaration with the sequencer's signature val seq1DND = sequencer1.topology.decentralized_namespaces.propose_new( @@ -207,13 +207,13 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest // create the initial topology snapshot by loading all transactions from sequencer1's authorized store val initialSnapshot = sequencer1.topology.transactions - .export_topology_snapshot(store = TopologyStoreId.Authorized) + .export_topology_snapshotV2(store = TopologyStoreId.Authorized) // load the static synchronizer parameters val synchronizerParams = StaticSynchronizerParameters.tryReadFromFile(paramsFile) // and finally initialize the sequencer with the topology snapshot - sequencer1.setup.assign_from_genesis_state(initialSnapshot, synchronizerParams) + sequencer1.setup.assign_from_genesis_stateV2(initialSnapshot, synchronizerParams) } // Sequencer2's console: @@ -230,13 +230,13 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest // create the initial topology snapshot by loading all transactions from the sequencer's authorized store val initialSnapshot = sequencer2.topology.transactions - .export_topology_snapshot(store = TopologyStoreId.Authorized) + .export_topology_snapshotV2(store = TopologyStoreId.Authorized) // load the static synchronizer parameters val synchronizerParams = StaticSynchronizerParameters.tryReadFromFile(paramsFile) // and finally initialize the sequencer with the topology snapshot - sequencer2.setup.assign_from_genesis_state(initialSnapshot, synchronizerParams) + sequencer2.setup.assign_from_genesis_stateV2(initialSnapshot, synchronizerParams) } // Sequencer1 console: @@ -272,5 +272,5 @@ trait SynchronizerBootstrapWithSeparateConsolesIntegrationTest class SynchronizerBootstrapWithSeparateConsolesIntegrationTestH2 extends SynchronizerBootstrapWithSeparateConsolesIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerChangeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerChangeIntegrationTest.scala index 4b92e46c05..ab503a9718 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerChangeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerChangeIntegrationTest.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.BigDecimalImplicits.* import com.digitalasset.canton.SynchronizerAlias import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.WrappedIncompleteUnassigned import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{ DbConfig, NonNegativeFiniteDuration as NonNegativeFiniteDurationConfig, @@ -26,11 +26,11 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.TestUtils.hasPersistence import com.digitalasset.canton.integration.util.{AcsInspection, EntitySyntax, PartiesAllocator} @@ -76,6 +76,8 @@ object SynchronizerChangeIntegrationTest { final case class Config( simClock: Boolean, assignmentExclusivityTimeout: NonNegativeFiniteDuration, + targetTimestampForwardTolerance: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(30), ) } @@ -109,6 +111,11 @@ abstract class SynchronizerChangeIntegrationTest(config: SynchronizerChangeInteg _.focus(_.monitoring.logging.delayLoggingThreshold) .replace(NonNegativeFiniteDurationConfig.ofDays(100)), ) + .addConfigTransform( + ConfigTransforms.updateTargetTimestampForwardTolerance( + config.targetTimestampForwardTolerance.duration + ) + ) .addConfigTransforms(additionalConfigTransforms*) .withSetup(setUp) @@ -144,7 +151,12 @@ abstract class SynchronizerChangeIntegrationTest(config: SynchronizerChangeInteg ) darPaths.foreach { darPath => - participants.all.foreach(_.dars.upload(darPath)) + participants.all.foreach { p => + p.dars.upload(darPath, synchronizerId = iouSynchronizerId) + } + Seq(participant4, participant5).foreach(p => + p.dars.upload(darPath, synchronizerId = paintSynchronizerId) + ) } // Advance the simClock to trigger time-proof requests, if present @@ -322,16 +334,11 @@ abstract class SynchronizerChangeSimClockIntegrationTest SynchronizerChangeIntegrationTest.Config( simClock = true, assignmentExclusivityTimeout = NonNegativeFiniteDuration.tryOfMinutes(10L), + targetTimestampForwardTolerance = NonNegativeFiniteDuration.tryOfSeconds(30), ) ) with SecurityTestSuite { - override protected def additionalConfigTransforms: Seq[ConfigTransform] = Seq( - ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.parameters.reassignmentTimeProofFreshnessProportion).replace(NonNegativeInt.zero) - ) - ) - // Workaround to avoid false errors reported by IDEA. implicit def tagToContainer(tag: EvidenceTag): Tag = new TagContainer(tag) @@ -367,24 +374,25 @@ abstract class SynchronizerChangeSimClockIntegrationTest s"An assignment for $reassignmentId is triggered automatically after the exclusivity timeout" ) + // Get reassignment from the store + val unassignedEvent = getIncompleteUnassignedContracts(participants, painter) + val exclusivityDeadline = CantonTimestamp + .fromProtoTimestamp(unassignedEvent.assignmentExclusivity.value) + .value + val margin = NonNegativeFiniteDuration.tryOfSeconds(1) // Advance clock just before the exclusivity timeout - clock.advance(exclusivityTimeout.unwrap.minus(margin.unwrap)) + clock.advanceTo(exclusivityDeadline.minus(margin.unwrap)) participants.foreach(_.testing.fetch_synchronizer_times()) checkIncompleteUnassignedContracts( participants, painter, ) // assignment did not happen yet - // Get reassignment from the store - val unassignedEvent = getIncompleteUnassignedContracts(participants, painter) - - val targetTimestamp = CantonTimestamp - .fromProtoTimestamp(unassignedEvent.assignmentExclusivity.value) - .value + exclusivityTimeout // Advance clock to the exclusivity timeout so that the automatic assignment can be triggered - clock.advanceTo(targetTimestamp) + clock.advanceTo(exclusivityDeadline) + participants.foreach(_.testing.fetch_synchronizer_times()) // The reassignment store should be empty once the automatic assignment has completed @@ -648,7 +656,7 @@ class SynchronizerChangeSimClockIntegrationTestPostgres extends SynchronizerChangeSimClockIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( @@ -992,7 +1000,7 @@ class SynchronizerChangeRealClockIntegrationTestPostgres extends SynchronizerChangeRealClockIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIntegrationTest.scala index 012a1969b9..0a1272f2fc 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIntegrationTest.scala @@ -10,9 +10,8 @@ import com.digitalasset.canton.config.{DbConfig, NonNegativeDuration} import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, - UseReferenceBlockSequencerBase, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -33,7 +32,11 @@ import com.digitalasset.canton.participant.synchronizer.SynchronizerRegistryErro import com.digitalasset.canton.participant.synchronizer.SynchronizerRegistryError.InitialOnboardingError import com.digitalasset.canton.sequencing.SequencerConnectionValidation.ThresholdActive import com.digitalasset.canton.sequencing.authentication.MemberAuthentication.MemberAccessDisabled -import com.digitalasset.canton.sequencing.{SequencerConnections, SubmissionRequestAmplification} +import com.digitalasset.canton.sequencing.{ + SequencerConnectionPoolDelays, + SequencerConnections, + SubmissionRequestAmplification, +} import com.digitalasset.canton.topology.SequencerId import com.digitalasset.canton.topology.transaction.TopologyChangeOp import com.digitalasset.canton.{SequencerAlias, SynchronizerAlias, config} @@ -402,6 +405,7 @@ sealed trait SynchronizerConnectivityIntegrationTest sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, ), ), validation = ThresholdActive, @@ -463,9 +467,9 @@ class SynchronizerConnectivityReferenceIntegrationTestPostgres extends SynchronizerConnectivityIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, - sequencerGroups = UseReferenceBlockSequencerBase.MultiSynchronizer( + sequencerGroups = UseReferenceBlockSequencer.MultiSynchronizer( Seq(Set(InstanceName.tryCreate("sequencer1")), Set(InstanceName.tryCreate("sequencer2"))) ), ) @@ -478,7 +482,7 @@ class SynchronizerConnectivityBftOrderingIntegrationTestPostgres registerPlugin( new UseBftSequencer( loggerFactory, - sequencerGroups = UseReferenceBlockSequencerBase.MultiSynchronizer( + sequencerGroups = UseReferenceBlockSequencer.MultiSynchronizer( Seq(Set(InstanceName.tryCreate("sequencer1")), Set(InstanceName.tryCreate("sequencer2"))) ), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIsolatedIntegrationTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIsolatedIntegrationTests.scala index c48ced9fd6..03d212ce88 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIsolatedIntegrationTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityIsolatedIntegrationTests.scala @@ -7,11 +7,11 @@ import com.digitalasset.canton.SequencerAlias import com.digitalasset.canton.config.* import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} @@ -99,7 +99,7 @@ class SynchronizerConnectivityIsolatedReferenceIntegrationTestPostgres extends SynchronizerConnectivityIsolatedIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityTlsIntegrationTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityTlsIntegrationTests.scala index 31eca73fc0..6cb34d161a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityTlsIntegrationTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerConnectivityTlsIntegrationTests.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests import com.digitalasset.canton.config.* import com.digitalasset.canton.config.RequireTypes.ExistingFile import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import monocle.macros.syntax.lens.* trait SynchronizerConnectivityTlsIntegrationTests @@ -69,6 +66,6 @@ class SynchronizerConnectivityTlsReferenceIntegrationTestsPostgres extends SynchronizerConnectivityTlsIntegrationTests { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerRouterIntegrationTestSetup.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerRouterIntegrationTestSetup.scala index f7adab7b82..a5acc2f5ed 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerRouterIntegrationTestSetup.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SynchronizerRouterIntegrationTestSetup.scala @@ -15,6 +15,7 @@ import com.digitalasset.canton.damltests.java.automaticreassignmenttransactions. Single, } import com.digitalasset.canton.damltests.java.test.Dummy +import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.integration.util.{AcsInspection, EntitySyntax} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -138,11 +139,20 @@ trait SynchronizerRouterIntegrationTestSetup ) // allocate parties on synchronizers where they aren't allocated yet allocateParties(participant, synchronizerAlias) + updateVetting(participant, synchronizerAlias) } } synchronizeTopologyState() } + private def updateVetting( + participant: ParticipantReference, + synchronizerAlias: SynchronizerAlias, + ): Unit = { + val synchronizerId = participant.synchronizers.id_of(synchronizerAlias) + participant.dars.upload(darPath, synchronizerId = Some(synchronizerId)).discard + } + private def allocateParties( participant: ParticipantReference, synchronizerAlias: SynchronizerAlias, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TickRequestIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TickRequestIntegrationTest.scala index 7380dc0f87..49c25bf815 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TickRequestIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TickRequestIntegrationTest.scala @@ -13,10 +13,10 @@ import com.digitalasset.canton.config.RequireTypes.{ } import com.digitalasset.canton.config.{StorageConfig, SynchronizerTimeTrackerConfig} import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ @@ -90,6 +90,7 @@ sealed trait TickRequestIntegrationTest .addConfigTransforms( ConfigTransforms.useStaticTime, ConfigTransforms.updateSynchronizerTimeTrackerConfigs_(_ => synchronizerTimeTrackerConfig), + ConfigTransforms.updateTargetTimestampForwardTolerance(Duration.ofHours(1)), ) .addConfigTransforms( ConfigTransforms.setTopologyTransactionRegistrationTimeout( @@ -138,7 +139,8 @@ sealed trait TickRequestIntegrationTest ) ) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) participant1.parties.enable("Alice", synchronizer = daName) participant2.parties.enable("Bob", synchronizer = daName) participant1.parties.enable("Alice", synchronizer = acmeName) @@ -305,7 +307,7 @@ sealed trait TickRequestIntegrationTest class TickRequestIntegrationTestMemory extends TickRequestIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[StorageConfig.Memory]( + new UseReferenceBlockSequencer[StorageConfig.Memory]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TopologyAdministrationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TopologyAdministrationIntegrationTest.scala index d77233ea23..d5119f9c25 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TopologyAdministrationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TopologyAdministrationIntegrationTest.scala @@ -19,13 +19,15 @@ import com.digitalasset.canton.topology.TopologyManager.assignExpectedUsageToKey import com.digitalasset.canton.topology.TopologyManagerError import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId.{Authorized, Temporary} -import com.digitalasset.canton.topology.store.StoredTopologyTransactions +import com.digitalasset.canton.topology.store.StoredTopologyTransaction import com.digitalasset.canton.topology.transaction.{ NamespaceDelegation, OwnerToKeyMapping, SignedTopologyTransaction, TopologyTransaction, } +import com.digitalasset.canton.util.GrpcStreamingUtils +import com.google.protobuf.ByteString import monocle.macros.syntax.lens.* import scala.concurrent.ExecutionContext @@ -116,13 +118,13 @@ class TopologyAdministrationIntegrationTest }, ) - val topologySnapshotBytes = node.topology.transactions.export_topology_snapshot() + val topologySnapshotBytes = node.topology.transactions.export_topology_snapshotV2() - val topologySnapshot = StoredTopologyTransactions - .fromTrustedByteString(topologySnapshotBytes) + val topologySnapshot = GrpcStreamingUtils + .parseDelimitedFromTrusted(topologySnapshotBytes.newInput(), StoredTopologyTransaction) .valueOrFail("failed to deserialize topology snapshot") - val storedOtkm = topologySnapshot.result + val storedOtkm = topologySnapshot .find(_.mapping.isInstanceOf[OwnerToKeyMapping]) .valueOrFail("retrieve OwnerToKeyMapping request") @@ -160,14 +162,13 @@ class TopologyAdministrationIntegrationTest .valueOrFail("failed to re-sign transaction") // we replace the previous OwnerToKeyMapping transaction in the topology snapshot with the new one - val topologySnapshotUpdated = topologySnapshot.copy(result = - topologySnapshot.result.updated( - topologySnapshot.result.indexWhere(_.mapping == otkm), + val topologySnapshotUpdated = + topologySnapshot.updated( + topologySnapshot.indexWhere(_.mapping == otkm), storedOtkm.focus(_.transaction).replace(signedTopologyTransaction), ) - ) - topologySnapshotUpdated.result + topologySnapshotUpdated .find(_.mapping.isInstanceOf[OwnerToKeyMapping]) .valueOrFail("retrieve OwnerToKeyMapping request") .transaction @@ -177,11 +178,14 @@ class TopologyAdministrationIntegrationTest .keys .forgetNE should contain(keyToAdd) - val topologySnapshotUpdatedBytes = - topologySnapshotUpdated.toByteString(testedProtocolVersion) + val builder = ByteString.newOutput() + topologySnapshotUpdated.foreach(stored => + stored.writeDelimitedTo(testedProtocolVersion, builder) + ) + val topologySnapshotUpdatedBytes = builder.toByteString // we can import the previous topology snapshot with a namespace-only key in the OwnerToKeyMapping - node.topology.transactions.import_topology_snapshot( + node.topology.transactions.import_topology_snapshotV2( topologySnapshotUpdatedBytes, TopologyStoreId.Authorized, ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransactionTimeoutsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransactionTimeoutsIntegrationTest.scala index 077ed92021..0e7824ae51 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransactionTimeoutsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransactionTimeoutsIntegrationTest.scala @@ -13,8 +13,8 @@ import com.digitalasset.canton.error.{CantonBaseError, MediatorError} import com.digitalasset.canton.examples.java.cycle.Cycle import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality @@ -194,6 +194,6 @@ abstract class TransactionTimeoutsIntegrationTest final class TransactionTimeoutsReferenceIntegrationTestPostgres extends TransactionTimeoutsIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransientContractIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransientContractIntegrationTest.scala index 5f9ff757a3..e3eb3604f3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransientContractIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/TransientContractIntegrationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.damltests.java.transientcontracts.TransientContractsTest -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -82,5 +79,5 @@ trait TransientContractIntegrationTest extends CommunityIntegrationTest with Sha class TransientContractReferenceIntegrationTestPostgres extends TransientContractIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerBootstrapTemplateTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerBootstrapTemplateTest.scala index 61370d4670..5590c8fe7e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerBootstrapTemplateTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerBootstrapTemplateTest.scala @@ -7,7 +7,7 @@ import com.digitalasset.canton.SequencerAlias import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -142,7 +142,7 @@ sealed trait BftSynchronizerBootstrapTemplateTest class BftSynchronizerBootstrapTemplateTestDefault extends BftSynchronizerBootstrapTemplateTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory ) ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerSequencerConnectionManipulationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerSequencerConnectionManipulationTest.scala index ba1d1d75c8..24ffd85011 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerSequencerConnectionManipulationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/BftSynchronizerSequencerConnectionManipulationTest.scala @@ -8,10 +8,7 @@ import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -149,7 +146,7 @@ class BftSynchronizerSequencerConnectionManipulationTestPostgres extends BftSynchronizerSequencerConnectionManipulationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory ) ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorOnboardingTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorOnboardingTest.scala index 47d3ed23c1..1f0d363bf1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorOnboardingTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorOnboardingTest.scala @@ -14,10 +14,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.{EntitySyntax, PartiesAllocator} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -246,5 +243,5 @@ trait MediatorOnboardingTest class MediatorOnboardingTestPostgres extends MediatorOnboardingTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorTest.scala index 7b58336aa3..9b5ddcd15a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/MediatorTest.scala @@ -7,7 +7,7 @@ import com.digitalasset.canton.SynchronizerAlias import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.LocalParticipantReference -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -200,7 +200,7 @@ sealed trait MediatorTest extends CommunityIntegrationTest with SharedEnvironmen } class MediatorTestDefault extends MediatorTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } //class MediatorTestPostgres extends MediatorTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NestedDecentralizedNamespaceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NestedDecentralizedNamespaceIntegrationTest.scala index 67fb5364d0..a12ae9a234 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NestedDecentralizedNamespaceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NestedDecentralizedNamespaceIntegrationTest.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.bftsynchronizer import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.InstanceReference -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -20,7 +20,7 @@ class NestedDecentralizedNamespaceIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P3_S2M2.withSetup { implicit env => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesBootstrapTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesBootstrapTest.scala index b33209a0af..70aaa9d74f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesBootstrapTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesBootstrapTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests.bftsynchronizer import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -82,5 +79,5 @@ trait NodesBootstrapTest extends CommunityIntegrationTest with SharedEnvironment class NodesBootstrapTestPostgres extends NodesBootstrapTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesRestartTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesRestartTest.scala index 293750471e..4dd06f2509 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesRestartTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/NodesRestartTest.scala @@ -4,11 +4,7 @@ package com.digitalasset.canton.integration.tests.bftsynchronizer import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -16,7 +12,6 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription.LostSequencerSubscription -import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId trait NodesRestartTest extends CommunityIntegrationTest @@ -29,19 +24,8 @@ trait NodesRestartTest "Restart participant nodes not connected to a synchronizer" in { implicit env => import env.* - stopAndWait(participant1) startAndWait(participant1) - - // participants run a package vetting process on startup. - // let's make sure that this mechanism doesn't run again on the second startup - eventually() { - val result = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) - result should have size (1) - result.head.item.packages shouldNot be(empty) - result.head.context.serial shouldBe PositiveInt.one - } } "Restart an onboarded mediator node" in { implicit env => @@ -93,5 +77,5 @@ trait NodesRestartTest class NodesRestartTestPostgres extends NodesRestartTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/ReassignmentTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/ReassignmentTest.scala index f636b0022f..b795ee1238 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/ReassignmentTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/ReassignmentTest.scala @@ -10,11 +10,8 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -130,8 +127,9 @@ trait ReassignmentTest extends CommunityIntegrationTest with SharedEnvironment { val payer = alice val owner = bob - clue("Upload dar") { - Seq(participant1, participant2).foreach(_.dars.upload(CantonExamplesPath)) + clue(s"Upload and vet dar on $synchronizerId1") { + Seq(participant1, participant2).dars + .upload(CantonExamplesPath, synchronizerId = synchronizerId1) } contractId = clue(s"create Iou contract on $synchronizer2") { @@ -163,6 +161,9 @@ trait ReassignmentTest extends CommunityIntegrationTest with SharedEnvironment { clue(s"connect $participant2 to $synchronizer2") { participant2.synchronizers.connect_local(sequencer4, alias = synchronizer2) } + clue(s"upload and vet dar on $synchronizer2") + Seq(participant1, participant2).dars + .upload(CantonExamplesPath, synchronizerId = synchronizerId2) participant1.parties.enable( "alice", synchronizeParticipants = Seq(participant2), @@ -276,7 +277,7 @@ trait ReassignmentTest extends CommunityIntegrationTest with SharedEnvironment { class ReassignmentTestPostgres extends ReassignmentTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory, sequencerGroups) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory, sequencerGroups) ) registerPlugin(new UsePostgres(loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerIntegrationTest.scala index 15316a4f50..972e9626e7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerIntegrationTest.scala @@ -7,12 +7,10 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.console.{InstanceReference, LocalParticipantReference} import com.digitalasset.canton.crypto.SigningKeyUsage -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.error.MediatorError.MalformedMessage +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.OnboardsNewSequencerNode import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -195,6 +193,10 @@ trait SequencerIntegrationTest def submitInvalidEnvelopeAndAssertWarnings( broadcasts: Seq[TopologyTransactionsBroadcast], + participantsToPingForSynchronization: ( + LocalParticipantReference, + LocalParticipantReference, + ), assertEventually: Option[() => Assertion] = None, ): Assertion = loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( @@ -211,6 +213,12 @@ trait SequencerIntegrationTest sequencer1.underlying.value.sequencer.syncCrypto.approximateTimestamp approximateTimestampAfter should be > approximateTimestampBefore + { + // We do a ping in order to make sure the mediator have processed the above malicious topology transaction + val (from, to) = participantsToPingForSynchronization + from.health.ping(to) + } + assertEventually.map(_()).getOrElse(succeed) } }, @@ -228,7 +236,7 @@ trait SequencerIntegrationTest ) val assertMediatorDeserializationWarning: LogEntry => Assertion = _.shouldBeCantonError( - SyncServiceAlarm, + MalformedMessage, _ should include regex (s"InvariantViolation.*${rootKey.fingerprint}".r), ) forAtLeast(2, logs)(assertSequencerDeserializationWarning) @@ -251,7 +259,10 @@ trait SequencerIntegrationTest // check that a batch with a single invalid envelope still gets processed by the sequencer // topology processing pipeline, even if it's just an empty Deliver that simply moves the approximate time forward. clue("check that a batch with a single invalid envelope still gets processed by the sequencer")( - submitInvalidEnvelopeAndAssertWarnings(broadcasts = Seq(mkBroadcast(invalidRootCert))) + submitInvalidEnvelopeAndAssertWarnings( + broadcasts = Seq(mkBroadcast(invalidRootCert)), + participantsToPingForSynchronization = participant3 -> participant3, + ) ) // submit the invalid root cert together with a valid root cert. // we expect that the invalid envelope gets dropped, and the valid envelope @@ -259,6 +270,7 @@ trait SequencerIntegrationTest clue("submit the invalid root cert together with a valid root cert")( submitInvalidEnvelopeAndAssertWarnings( broadcasts = Seq(invalidRootCert, rootCert).map(mkBroadcast), + participantsToPingForSynchronization = participant3 -> participant3, assertEventually = Some(() => participant3.topology.namespace_delegations .list( @@ -280,5 +292,5 @@ trait SequencerIntegrationTest class SequencerIntegrationTestPostgres extends SequencerIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOffboardingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOffboardingIntegrationTest.scala index 9a54923679..db3bd7fa9d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOffboardingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOffboardingIntegrationTest.scala @@ -9,8 +9,8 @@ import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.OffboardsSequencerNode import com.digitalasset.canton.integration.{ @@ -19,6 +19,11 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, } import com.digitalasset.canton.sequencing.{SequencerConnections, SubmissionRequestAmplification} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.admin.SequencerBftAdminData.{ + PeerConnectionStatus, + PeerEndpointHealthStatus, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.topology.OrderingTopology import com.digitalasset.canton.topology.SynchronizerId trait SequencerOffboardingIntegrationTest @@ -34,6 +39,8 @@ trait SequencerOffboardingIntegrationTest override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2S4M1_Manual + protected val isBftOrderer: Boolean + private var synchronizerId: SynchronizerId = _ private var staticParameters: StaticSynchronizerParameters = _ private var synchronizerOwners: Seq[InstanceReference] = _ @@ -78,6 +85,27 @@ trait SequencerOffboardingIntegrationTest "Onboard participantX to sequencerX and send a ping" in { implicit env => import env.* + if (isBftOrderer) { + clue("make sure sequencer1 have connected to enough other sequencers") { + eventually() { + forAll(env.sequencers.all)(sequencer => + sequencer.bft + .get_peer_network_status(None) + .endpointStatuses + .collect { + case PeerConnectionStatus + .PeerEndpointStatus(_, _, health) => + health.status match { + case PeerEndpointHealthStatus.Authenticated(_) => true + case _ => false + } + case PeerConnectionStatus.PeerIncomingConnection(_) => true + } + .size should be >= OrderingTopology.weakQuorumSize(sequencers.all.size) + ) + } + } + } clue("participant1 connects to sequencer1") { participant1.synchronizers.connect_local(sequencer1, daName) } @@ -125,12 +153,14 @@ trait SequencerOffboardingIntegrationTest class SequencerOffboardingReferenceIntegrationTestPostgres extends SequencerOffboardingIntegrationTest { + override val isBftOrderer: Boolean = false registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class SequencerOffboardingBftOrderingIntegrationTestPostgres extends SequencerOffboardingIntegrationTest { + override val isBftOrderer: Boolean = true registerPlugin(new UsePostgres(loggerFactory)) registerPlugin(new UseBftSequencer(loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOnboardingTombstoneTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOnboardingTombstoneTest.scala index 9a637bb69c..4f88158fd1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOnboardingTombstoneTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SequencerOnboardingTombstoneTest.scala @@ -12,10 +12,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.OnboardsNewSequencerNode import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -283,5 +280,5 @@ trait SequencerOnboardingTombstoneTest class SequencerOnboardingTombstoneTestPostgres extends SequencerOnboardingTombstoneTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SimpleFunctionalNodesTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SimpleFunctionalNodesTest.scala index 15fb0cdc2d..ebc5e89d1a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SimpleFunctionalNodesTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/bftsynchronizer/SimpleFunctionalNodesTest.scala @@ -8,11 +8,7 @@ import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.crypto.SigningKeyUsage -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseH2, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -135,10 +131,10 @@ trait SimpleFunctionalNodesTest class SimpleFunctionalNodesTestH2 extends SimpleFunctionalNodesTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } class SimpleFunctionalNodesTestPostgres extends SimpleFunctionalNodesTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/ApiInfoIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/ApiInfoIntegrationTest.scala index 34bfbdd817..ca793ca7da 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/ApiInfoIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/ApiInfoIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.connection import com.digitalasset.canton.config import com.digitalasset.canton.config.StorageConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -60,6 +60,6 @@ class ApiInfoIntegrationTestInMemory extends ApiInfoIntegrationTest { .addConfigTransform(ConfigTransforms.allInMemory) .addConfigTransform(_.focus(_.monitoring.logging.api.messagePayloads).replace(false)) - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/BftSequencerConnectionsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/BftSequencerConnectionsIntegrationTest.scala new file mode 100644 index 0000000000..fea9144ec8 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/BftSequencerConnectionsIntegrationTest.scala @@ -0,0 +1,237 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.connection + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.integration.bootstrap.{ + NetworkBootstrapper, + NetworkTopologyDescription, +} +import com.digitalasset.canton.integration.plugins.{ + UseExternalProcess, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.sequencing.{ + SequencerConnectionValidation, + SequencerConnections, + SubmissionRequestAmplification, +} +import com.digitalasset.canton.{SequencerAlias, config} +import org.scalatest.Assertion + +import scala.concurrent.duration.DurationInt + +/** This test checks that the sequencer connection pool properly recovers connections and + * subscriptions when sequencers go down. + * + * The environment is as follows: + * - 2 participants + * - 4 sequencers (running in external processes) + * - 1 mediator + * + * The participants and the mediator connect to all the sequencers with a trust threshold of 2 and + * a liveness margin of 0. When they start up, all sequencers are up, which means that they will + * have connections to all of them for sending, but only two subscriptions on random sequencers + * will be started (since the liveness margin is 0). + * + * The test then executes the following scenario: + * + * - Ping from participant 1 to participant 2 + * - Stop sequencers 3 and 4 + * - Ping again (nodes must acquire subscriptions on sequencers 1 and 2 if they did not yet have + * them) + * - Restart sequencers 3 and 4, stop sequencers 1 and 2 + * - Ping again (nodes must acquire subscriptions on sequencers 3 and 4) + * - Restart sequencers 1 and 2 + * + * This scenario is executed once using a graceful stop to bring the sequencers down, and a second + * time using a kill. This checks that connection failures are detected properly even when they are + * not gracefully closed. + */ +sealed trait BftSequencerConnectionsIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + protected lazy val externalPlugin = new UseExternalProcess( + loggerFactory, + externalSequencers = Set("sequencer1", "sequencer2", "sequencer3", "sequencer4"), + fileNameHint = this.getClass.getSimpleName, + ) + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2S4M1_Config + .addConfigTransforms(ConfigTransforms.setConnectionPool(true)) + .withManualStart + .withSetup { implicit env => + import env.* + + sequencers.remote.foreach { seq => + logger.debug(s"Starting sequencer ${seq.name}") + externalPlugin.start(seq.name) + } + sequencers.remote.foreach { seq => + seq.health.wait_for_running() + logger.debug(s"Sequencer ${seq.name} is running") + } + } + .withNetworkBootstrap { implicit env => + import env.* + new NetworkBootstrapper( + NetworkTopologyDescription( + daName, + synchronizerOwners = Seq[InstanceReference](remoteSequencer1, mediator1), + synchronizerThreshold = PositiveInt.one, + sequencers = sequencers.remote, + mediators = Seq(mediator1), + overrideMediatorToSequencers = Some( + Map( + mediator1 -> (sequencers.remote, + /* trust threshold */ PositiveInt.two, /* liveness margin */ NonNegativeInt.zero) + ) + ), + ) + ) + } + .withSetup { implicit env => + import env.* + + sequencers.remote.foreach { seq => + seq.health.wait_for_initialized() + logger.debug(s"Sequencer ${seq.name} is initialized") + } + } + + private lazy val expectedLogEntries = Seq[LogEntry => Assertion]( + _.warningMessage should include regex + raw"Request failed for server-.*\. Is the server running\? Did you configure the server address as 0\.0\.0\.0\?" + + raw" Are you using the right TLS settings\?" + ) + + private def pingWithSequencersDown()(implicit env: TestConsoleEnvironment) = { + import env.* + + val pingTimeout = config.NonNegativeDuration.ofSeconds(120) + + loggerFactory.assertLoggedWarningsAndErrorsSeq( + participant1.health.ping(participant2.id, timeout = pingTimeout), + LogEntry.assertLogSeq( + mustContainWithClue = Seq.empty, + mayContain = expectedLogEntries, + ), + ) + } + + "BFT Synchronizer" must { + "Initialize the setup" in { implicit env => + import env.* + + val amplification = SubmissionRequestAmplification( + factor = 10, + patience = config.NonNegativeFiniteDuration.tryFromDuration(5.seconds), + ) + + clue("reconfigure mediator to use amplification") { + mediator1.sequencer_connection.modify_connections { old => + SequencerConnections.tryMany( + connections = old.connections, + sequencerTrustThreshold = old.sequencerTrustThreshold, + sequencerLivenessMargin = old.sequencerLivenessMargin, + submissionRequestAmplification = amplification, + sequencerConnectionPoolDelays = old.sequencerConnectionPoolDelays, + ) + } + } + + val connectionsConfig = sequencers.remote.map(s => + s.config.publicApi.asSequencerConnection(SequencerAlias.tryCreate(s.name)) + ) + + clue("connect participant1 to all sequencers") { + participant1.start() + participant1.synchronizers.connect_bft( + connections = connectionsConfig, + sequencerTrustThreshold = PositiveInt.two, + sequencerLivenessMargin = NonNegativeInt.zero, + submissionRequestAmplification = amplification, + synchronizerAlias = daName, + physicalSynchronizerId = Some(daId), + validation = SequencerConnectionValidation.Disabled, + ) + } + + clue("connect participant2 to all sequencers") { + participant2.start() + participant2.synchronizers.connect_bft( + connections = connectionsConfig, + sequencerTrustThreshold = PositiveInt.two, + sequencerLivenessMargin = NonNegativeInt.zero, + submissionRequestAmplification = amplification, + synchronizerAlias = daName, + physicalSynchronizerId = Some(daId), + validation = SequencerConnectionValidation.Disabled, + ) + } + } + + "Handle sequencers stopped graciously" in { implicit env => + handleFailingSequencers(forceKill = false) + } + + "Restart sequencers 1 and 2" in { implicit env => + import env.* + + Seq(remoteSequencer1, remoteSequencer2).foreach(seq => externalPlugin.start(seq.name)) + } + + "Handle killed sequencers" in { implicit env => + handleFailingSequencers(forceKill = true) + } + } + + private def handleFailingSequencers( + forceKill: Boolean + )(implicit env: TestConsoleEnvironment): Unit = { + import env.* + + clue("Perform an initial ping") { + participant1.health.ping(participant2.id) + } + + clue("Stop sequencers 3 and 4") { + Seq(remoteSequencer3, remoteSequencer4).foreach(seq => + externalPlugin.kill(seq.name, force = forceKill) + ) + } + clue("Ping again") { + pingWithSequencersDown() + } + + clue("Restart sequencers 3 and 4, stop sequencers 1 and 2") { + Seq(remoteSequencer3, remoteSequencer4).foreach(seq => externalPlugin.start(seq.name)) + Seq(remoteSequencer1, remoteSequencer2).foreach(seq => + externalPlugin.kill(seq.name, force = forceKill) + ) + } + clue("Ping yet again") { + pingWithSequencersDown() + } + } +} + +class BftSequencerConnectionsIntegrationTestDefault extends BftSequencerConnectionsIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(externalPlugin) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala new file mode 100644 index 0000000000..ed150bfc9f --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala @@ -0,0 +1,181 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.connection + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.integration.bootstrap.{ + NetworkBootstrapper, + NetworkTopologyDescription, +} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.sequencing.{ + SequencerConnectionValidation, + SequencerConnectionXPool, + SequencerConnections, + SequencerSubscriptionPool, + SubmissionRequestAmplification, +} +import com.digitalasset.canton.{SequencerAlias, config} +import monocle.macros.syntax.lens.* +import org.slf4j.event.Level.INFO + +import scala.concurrent.duration.DurationInt + +sealed trait SequencerConnectionServiceIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2S2M1_Config + .addConfigTransforms( + ConfigTransforms.setConnectionPool(true), + _.focus(_.parameters.timeouts.processing.sequencerInfo) + .replace(config.NonNegativeDuration.tryFromDuration(2.seconds)), + ) + .withNetworkBootstrap { implicit env => + import env.* + new NetworkBootstrapper( + NetworkTopologyDescription( + daName, + synchronizerOwners = Seq[InstanceReference](sequencer1, mediator1), + synchronizerThreshold = PositiveInt.one, + sequencers = Seq(sequencer1, sequencer2), + mediators = Seq(mediator1), + overrideMediatorToSequencers = Some( + Map( + mediator1 -> (Seq(sequencer1, sequencer2), + /* trust threshold */ PositiveInt.one, /* liveness margin */ NonNegativeInt.zero) + ) + ), + ) + ) + } + + "SequencerConnectionService" must { + "Allow modifying the pool configuration" in { implicit env => + import env.* + + val connectionsConfig = Seq(sequencer1, sequencer2).map(s => + s.config.publicApi.clientConfig.asSequencerConnection(SequencerAlias.tryCreate(s.name)) + ) + + clue("connect participant1 to all sequencers") { + participant1.synchronizers.connect_bft( + connections = connectionsConfig, + sequencerTrustThreshold = PositiveInt.one, + sequencerLivenessMargin = NonNegativeInt.zero, + submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + synchronizerAlias = daName, + physicalSynchronizerId = Some(daId), + validation = SequencerConnectionValidation.Disabled, + ) + } + + participant1.health.ping(participant1.id) + + mediator1.sequencer_connection.get().value.sequencerTrustThreshold shouldBe PositiveInt.one + + clue("reconfigure mediator's trust threshold") { + loggerFactory.assertLogsSeq( + SuppressionRule.LevelAndAbove(INFO) && (SuppressionRule + .forLogger[SequencerConnectionXPool] || SuppressionRule + .forLogger[SequencerSubscriptionPool]) + )( + mediator1.sequencer_connection.modify_connections { + _.withSequencerTrustThreshold(PositiveInt.two).valueOrFail("set trust threshold to 2") + }, + forExactly(2, _)(_.infoMessage should include("Configuration updated")), + ) + + mediator1.sequencer_connection.get().value.sequencerTrustThreshold shouldBe PositiveInt.two + + // The mediator is still functional + participant1.health.ping(participant1.id) + } + + clue("reconfigure mediator's connections to use a single connection") { + mediator1.sequencer_connection.modify_connections { old => + SequencerConnections.tryMany( + connectionsConfig.drop(1), + sequencerTrustThreshold = PositiveInt.one, + old.sequencerLivenessMargin, + old.submissionRequestAmplification, + old.sequencerConnectionPoolDelays, + ) + } + + // The configuration has changed + mediator1.sequencer_connection + .get() + .value + .connections + .forgetNE + .loneElement shouldBe connectionsConfig(1) + + // The mediator is still functional + participant1.health.ping(participant1.id) + } + + clue("fail to reconfigure mediator's connections if validation fails") { + sequencer1.stop() + + assertThrowsAndLogsCommandFailures( + mediator1.sequencer_connection.modify_connections { old => + SequencerConnections.tryMany( + connectionsConfig, + sequencerTrustThreshold = PositiveInt.two, + old.sequencerLivenessMargin, + old.submissionRequestAmplification, + old.sequencerConnectionPoolDelays, + ) + }, + _.commandFailureMessage should include( + "FAILED_PRECONDITION/TimeoutError(Connection pool failed to initialize" + ), + ) + + // The configuration has not changed + mediator1.sequencer_connection + .get() + .value + .connections + .forgetNE + .loneElement shouldBe connectionsConfig(1) + + // The mediator is still functional + // We possibly need to retry, because if participant1 has a single subscription on sequencer2, it will not detect + // that sequencer1 is down until it first sends to it, and could therefore still pick it for the first send. + // An alternative would be to use amplification. + eventually() { + loggerFactory.assertLoggedWarningsAndErrorsSeq( + participant1.health.maybe_ping(participant1.id, timeout = 2.seconds) shouldBe defined, + LogEntry.assertLogSeq( + mustContainWithClue = Seq.empty, + mayContain = Seq( + _.warningMessage should include regex + raw"Request failed for server-.*\. Is the server running\? Did you configure the server address as 0\.0\.0\.0\?" + + raw" Are you using the right TLS settings\?" + ), + ), + ) + } + } + } + } +} + +class SequencerConnectionServiceIntegrationTestDefault + extends SequencerConnectionServiceIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/AutomaticReassignmentCrashIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/AutomaticReassignmentCrashIntegrationTest.scala index 511bee6021..86cf71832d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/AutomaticReassignmentCrashIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/AutomaticReassignmentCrashIntegrationTest.scala @@ -7,15 +7,15 @@ import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.{CommandFailure, RemoteParticipantReference} import com.digitalasset.canton.error.TransactionRoutingError.AutomaticReassignmentForTransactionFailure -import com.digitalasset.canton.integration.EnvironmentDefinition -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseExternalProcess, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.SynchronizerRouterIntegrationTestSetup +import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentDefinition} import com.digitalasset.canton.participant.util.JavaCodegenUtil.* import com.digitalasset.canton.synchronizer.sequencer.{ HasProgrammableSequencer, @@ -44,7 +44,7 @@ class AutomaticReassignmentCrashIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin(external) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")) @@ -60,17 +60,15 @@ class AutomaticReassignmentCrashIntegrationTest override def environmentDefinition: EnvironmentDefinition = super.environmentDefinition - .addConfigTransform( - ProgrammableSequencer.configOverride(this.getClass.toString, loggerFactory) + .addConfigTransforms( + ConfigTransforms.updateTargetTimestampForwardTolerance(30.seconds), + ProgrammableSequencer.configOverride(this.getClass.toString, loggerFactory), ) .withSetup { implicit env => import env.* remoteP1 = rp("participant1") - - participants.local.dars.upload(darPath) remoteP1.health.wait_for_initialized() - remoteP1.dars.upload(darPath) connectToCustomSynchronizers( Map( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/LocalSynchronizerRestartTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/LocalSynchronizerRestartTest.scala index ff614e3e75..4f514a1986 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/LocalSynchronizerRestartTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/LocalSynchronizerRestartTest.scala @@ -4,10 +4,7 @@ package com.digitalasset.canton.integration.tests.crashrecovery import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -16,7 +13,7 @@ import com.digitalasset.canton.integration.{ final class LocalSynchronizerRestartTest extends CommunityIntegrationTest with SharedEnvironment { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P1_S1M1.withSetup { implicit env => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SequencerRestartTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SequencerRestartTest.scala index 152306a7fc..a2e3749925 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SequencerRestartTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SequencerRestartTest.scala @@ -46,9 +46,9 @@ import com.digitalasset.canton.integration.bootstrap.{ import com.digitalasset.canton.integration.plugins.UseExternalProcess.ShutdownPhase import com.digitalasset.canton.integration.plugins.{ PostgresDumpRestore, - UseCommunityReferenceBlockSequencer, UseExternalProcess, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -60,7 +60,7 @@ import com.digitalasset.canton.integration.{ import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, HasCloseContext} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.metrics.ParticipantTestMetrics -import com.digitalasset.canton.resource.{CommunityStorageFactory, DbStorage, MemoryStorage} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, StorageSingleFactory} import com.digitalasset.canton.sequencing.TrafficControlParameters as InternalTrafficControlParameters import com.digitalasset.canton.synchronizer.block.data.db.DbSequencerBlockStore import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics @@ -120,7 +120,6 @@ abstract class BaseSynchronizerRestartTest synchronizerThreshold = PositiveInt.one, sequencers = Seq(remoteSequencer1), mediators = Seq(mediator1), - EnvironmentDefinition.defaultStaticSynchronizerParameters, ) ) } @@ -148,7 +147,7 @@ abstract class BaseSynchronizerRestartTest sys.error(s"logging was used but shouldn't be") } ) - val sequencerPlugin = new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + val sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) registerPlugin(sequencerPlugin) registerPlugin(external) @@ -157,7 +156,7 @@ abstract class BaseSynchronizerRestartTest )(implicit env: TestConsoleEnvironment, closeContext: CloseContext): DbStorage = { import env.* val storage = - new CommunityStorageFactory(external.storageConfig(sequencerReference.name)) + new StorageSingleFactory(external.storageConfig(sequencerReference.name)) .create( connectionPoolForParticipant = false, None, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SynchronizerRecoveryTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SynchronizerRecoveryTest.scala index a295510983..d3162ac91f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SynchronizerRecoveryTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/crashrecovery/SynchronizerRecoveryTest.scala @@ -49,12 +49,32 @@ final class SynchronizerRecoveryTest extends BaseSynchronizerRestartTest with Ha _.message should (include("UNAUTHENTICATED") and include( "not match the synchronizer id of the synchronizer the participant" )), + // This is similar to the line above: the connection pool catches mismatches while validating connections + _.message should include("Sequencer connection has changed attributes"), + _.message should (include(SyncServiceSynchronizerDisconnect.id) and include( + "fatally disconnected because of Trust threshold 1 is no longer reachable" + )), _.message should (include("PERMISSION_DENIED") and include("access is disabled")), _.message should include(LostSequencerSubscription.id), _.message should include(SyncServiceSynchronizerDisabledUs.id), _.message should include("Token refresh aborted due to shutdown."), ) + private def expectedLogsForConnectionFailure(usingConnectionPool: Boolean) = { + val seq = Seq[(LogEntry => Assertion, String)]( + (_.shouldBeCommandFailure(FailedToConnectToSequencers), "Failure to connect") + ) + if (usingConnectionPool) + ( + (logEntry: LogEntry) => + logEntry.warningMessage should include( + "Validation failure: Connection is not on expected sequencer" + ), + "Connection validation failure", + ) +: seq + else seq + } + "if synchronizer loses all state, it will consider previously connected participant disabled" in { implicit env: TestConsoleEnvironment => import env.* @@ -90,19 +110,19 @@ final class SynchronizerRecoveryTest extends BaseSynchronizerRestartTest with Ha ) ) + val usingConnectionPool = participant1.config.sequencerClient.useNewConnectionPool + clue("Reconnect and assert throwable and logs ...")( loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( participant1.synchronizers.reconnect(daName), - logEntries => logEntries.head.shouldBeCommandFailure(FailedToConnectToSequencers), + LogEntry.assertLogSeq(expectedLogsForConnectionFailure(usingConnectionPool)), ) ) clue("Reconnect_all and assert throwable and logs ...")( loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( participant1.synchronizers.reconnect_all(ignoreFailures = false), - logEntries => { - logEntries.head.shouldBeCommandFailure(FailedToConnectToSequencers) - }, + LogEntry.assertLogSeq(expectedLogsForConnectionFailure(usingConnectionPool)), ) ) } @@ -142,12 +162,12 @@ final class SynchronizerRecoveryTest extends BaseSynchronizerRestartTest with Ha ) ) + val usingConnectionPool = participant1.config.sequencerClient.useNewConnectionPool + clue("Reconnect participant1 to the synchronizer and assert that its connection is corrupt")( loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( participant1.synchronizers.reconnect(daName), - entries => { - entries.head.shouldBeCommandFailure(FailedToConnectToSequencers) - }, + LogEntry.assertLogSeq(expectedLogsForConnectionFailure(usingConnectionPool)), ) ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/LedgerTimeRecordTimeToleranceChangesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/LedgerTimeRecordTimeToleranceChangesIntegrationTest.scala index 3a156b0d18..75bf4ef251 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/LedgerTimeRecordTimeToleranceChangesIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/LedgerTimeRecordTimeToleranceChangesIntegrationTest.scala @@ -8,8 +8,8 @@ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -139,7 +139,7 @@ trait LedgerTimeRecordTimeToleranceChangesIntegrationTest class LedgerTimeRecordTimeToleranceChangesIntegrationTestDefault extends LedgerTimeRecordTimeToleranceChangesIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory) ) // we need to register the ProgrammableSequencer after the ReferenceBlockSequencer registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersChangeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersChangeIntegrationTest.scala index 5d4727da69..81c33a23c2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersChangeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersChangeIntegrationTest.scala @@ -21,15 +21,16 @@ import com.digitalasset.canton.console.ConsoleEnvironment.Implicits.* import com.digitalasset.canton.console.{CommandFailure, SequencerReference} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.sequencing.protocol.SequencerErrors -import com.digitalasset.canton.time.SimClock +import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.TopologyManagerError.InvalidSynchronizer import com.digitalasset.canton.topology.{ ForceFlag, @@ -93,7 +94,7 @@ object SynchronizerParametersFixture { } } -trait SynchronizerParametersChangeIntegrationTest +sealed trait SynchronizerParametersChangeIntegrationTest extends CommunityIntegrationTest with SharedEnvironment with SynchronizerParametersFixture @@ -103,26 +104,26 @@ trait SynchronizerParametersChangeIntegrationTest private lazy val dynamicReconciliationInterval = config.PositiveDurationSeconds.ofSeconds(2) override lazy val environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P2_S1M1_S1M1.addConfigTransforms( - ConfigTransforms.useStaticTime, - // Disable retries in the ping service so that any submission error is reported reliably - // This makes the log messages more deterministic. - ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.parameters.adminWorkflow.retries).replace(false) - ), - ) withSetup { implicit env => - import env.* + EnvironmentDefinition.P2_S1M1_S1M1 + .addConfigTransforms( + ConfigTransforms.useStaticTime, + // Disable retries in the ping service so that any submission error is reported reliably + // This makes the log messages more deterministic. + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.adminWorkflow.retries).replace(false) + ), + ) + .withSetup { implicit env => + import env.* - participant1.synchronizers.connect_local(sequencer1, alias = daName) - } + participant1.synchronizers.connect_local(sequencer1, alias = daName) + } protected lazy val sequencersForPlugin: MultiSynchronizer = MultiSynchronizer(Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate))) - private lazy val defaultParameters = ConsoleDynamicSynchronizerParameters.initialValues( - new SimClock(loggerFactory = loggerFactory), - testedProtocolVersion, - ) + private lazy val defaultParameters = + ConsoleDynamicSynchronizerParameters.initialValues(testedProtocolVersion) private def acmeSynchronizer(implicit env: TestConsoleEnvironment) = SynchronizerParametersFixture.Synchronizer(env.sequencer2) @@ -237,9 +238,6 @@ trait SynchronizerParametersChangeIntegrationTest myParticipant.topology.synchronizer_parameters .get_dynamic_synchronizer_parameters(synchronizerId) .assignmentExclusivityTimeout - myParticipant.topology.synchronizer_parameters - .get_dynamic_synchronizer_parameters(synchronizerId) - .topologyChangeDelay myParticipant.topology.synchronizer_parameters .get_dynamic_synchronizer_parameters(synchronizerId) .ledgerTimeRecordTimeTolerance @@ -270,7 +268,6 @@ trait SynchronizerParametersChangeIntegrationTest preparationTimeRecordTimeTolerance = 1.minute, mediatorReactionTimeout = 20.seconds, assignmentExclusivityTimeout = 1.second, - topologyChangeDelay = 0.millis, reconciliationInterval = 5.seconds, confirmationRequestsMaxRate = 100, maxRequestSize = 100000, @@ -297,8 +294,7 @@ trait SynchronizerParametersChangeIntegrationTest mySequencer.topology.synchronizer_parameters.propose_update( synchronizerId, _.update( - confirmationResponseTimeout = 10.seconds, - topologyChangeDelay = 1.second, + confirmationResponseTimeout = 10.seconds ), ) // user-manual-entry-begin:-end: UpdateDynamicSynchronizerParameters @@ -311,7 +307,6 @@ trait SynchronizerParametersChangeIntegrationTest _confirmationResponseTimeout, _mediatorReactionTimeout, _assignmentExclusivityTimeout, - _topologyChangeDelay, _ledgerTimeRecordTimeTolerance, _mediatorDeduplicationTimeout, _reconciliationInterval, @@ -421,7 +416,6 @@ trait SynchronizerParametersChangeIntegrationTest confirmationResponseTimeout = config.NonNegativeFiniteDuration.Zero, mediatorReactionTimeout = config.NonNegativeFiniteDuration.Zero, assignmentExclusivityTimeout = config.NonNegativeFiniteDuration.Zero, - topologyChangeDelay = d, ledgerTimeRecordTimeTolerance = config.NonNegativeFiniteDuration.Zero, mediatorDeduplicationTimeout = d, reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1), @@ -491,6 +485,113 @@ trait SynchronizerParametersChangeIntegrationTest getCurrentSynchronizerParameters(daSynchronizer) shouldBe newParams } + + "require force to set out of bounds values" in { implicit env => + val id = daSynchronizer.synchronizerId + val sequencer = daSynchronizer.sequencer + + def runTest( + setter: ( + config.NonNegativeFiniteDuration, + ConsoleDynamicSynchronizerParameters, + ) => ConsoleDynamicSynchronizerParameters, + getter: ConsoleDynamicSynchronizerParameters => config.NonNegativeFiniteDuration, + boundsInternal: (NonNegativeFiniteDuration, NonNegativeFiniteDuration), + name: String, + ) = { + val (minInternal, maxInternal) = boundsInternal + val min = config.NonNegativeFiniteDuration.tryFromJavaDuration(minInternal.duration) + val belowMin = + config.NonNegativeFiniteDuration.tryFromJavaDuration(minInternal.duration.minusNanos(1)) + val max = config.NonNegativeFiniteDuration.tryFromJavaDuration(maxInternal.duration) + val aboveMax = + config.NonNegativeFiniteDuration.tryFromJavaDuration(maxInternal.duration.plusNanos(1)) + + // Ensure changing to min does something + getter(sequencer.topology.synchronizer_parameters.latest(id)) should not be min + + // change to min should succeed + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(min, parameters), + ) + + // change to belowMin should fail + loggerFactory.assertThrowsAndLogs[CommandFailure]( + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(belowMin, parameters), + ), + _.errorMessage should (include( + TopologyManagerError.ValueOutOfBounds + .Error(belowMin.toInternal, name, minInternal, maxInternal) + .cause + )), + ) + + // change to belowMin with force should succeed + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(belowMin, parameters), + force = ForceFlags(ForceFlag.AllowOutOfBoundsValue), + ) + + // change to max should succeed + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(max, parameters), + ) + + // change to aboveMax should fail + loggerFactory.assertThrowsAndLogs[CommandFailure]( + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(aboveMax, parameters), + ), + _.errorMessage should (include( + TopologyManagerError.ValueOutOfBounds + .Error(aboveMax.toInternal, name, minInternal, maxInternal) + .cause + )), + ) + + // change to aboveMax with force should succeed + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(aboveMax, parameters), + force = ForceFlags(ForceFlag.AllowOutOfBoundsValue), + ) + + // reset to valid value + sequencer.topology.synchronizer_parameters + .propose_update( + daSynchronizer.synchronizerId, + parameters => setter(max, parameters), + force = ForceFlags(ForceFlag.AllowOutOfBoundsValue), + ) + } + + runTest( + (v, p) => p.update(confirmationResponseTimeout = v), + _.confirmationResponseTimeout, + DynamicSynchronizerParameters.confirmationResponseTimeoutBounds, + "confirmation response timeout", + ) + + runTest( + (v, p) => p.update(mediatorReactionTimeout = v), + _.mediatorReactionTimeout, + DynamicSynchronizerParameters.mediatorReactionTimeoutBounds, + "mediator reaction timeout", + ) + + } } "A participant" can { @@ -546,7 +647,7 @@ trait SynchronizerParametersChangeIntegrationTest class SynchronizerParametersChangeIntegrationTestInMemory extends SynchronizerParametersChangeIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[StorageConfig.Memory]( + new UseReferenceBlockSequencer[StorageConfig.Memory]( loggerFactory, sequencersForPlugin, ) @@ -568,7 +669,7 @@ trait SynchronizerParametersRestartIntegrationTest EnvironmentDefinition.P0_S1M1 private lazy val defaultParameters = - ConsoleDynamicSynchronizerParameters.defaultValues(testedProtocolVersion) + ConsoleDynamicSynchronizerParameters.initialValues(testedProtocolVersion) "Dynamic synchronizer parameters" should { "not be read from config upon restart" in { implicit env => @@ -600,7 +701,7 @@ trait SynchronizerParametersRestartIntegrationTest class SynchronizerParametersRestartIntegrationTestPostgres extends SynchronizerParametersRestartIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class SynchronizerParametersRestartBftOrderingIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersTimeoutChangesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersTimeoutChangesIntegrationTest.scala index bbfd6fead3..8db534d87d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersTimeoutChangesIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/dynamicsynchronizerparameters/SynchronizerParametersTimeoutChangesIntegrationTest.scala @@ -7,8 +7,8 @@ import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.error.MediatorError import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -168,7 +168,7 @@ trait SynchronizerParametersTimeoutChangesIntegrationTest class SynchronizerParametersTimeoutChangesReferenceIntegrationTestDefault extends SynchronizerParametersTimeoutChangesIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) // we need to register the ProgrammableSequencer after the ReferenceBlockSequencer registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/DockerConfigIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/DockerConfigIntegrationTest.scala new file mode 100644 index 0000000000..610d3f8f7f --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/DockerConfigIntegrationTest.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.examples + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.CommunityIntegrationTest +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.examples.`ExampleIntegrationTest`.dockerImagesPath + +sealed abstract class DockerConfigIntegrationTest + extends ExampleIntegrationTest( + dockerImagesPath / "canton-base" / "storage.conf", + dockerImagesPath / "canton-mediator" / "app.conf", + dockerImagesPath / "canton-sequencer" / "app.conf", + dockerImagesPath / "canton-participant" / "app.conf", + dockerImagesPath / "integration-tests" / "overrides.conf", + ) + with CommunityIntegrationTest { + "run docker synchronizer bootstrap successfully" in { env => + import env.* + runScript(dockerImagesPath / "integration-tests" / "integration-bootstrap.sc")(environment) + + } +} + +final class DockerConfigIntegrationTestPostgres extends DockerConfigIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ExampleIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ExampleIntegrationTest.scala index 21a5549fc4..79f3055004 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ExampleIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ExampleIntegrationTest.scala @@ -64,7 +64,8 @@ trait HasConsoleScriptRunner { this: NamedLogging => ConsoleScriptRunner.run(env, scriptPath.toJava, logger = logger).value.discard } -object ExampleIntegrationTest { +object `ExampleIntegrationTest` { + lazy val dockerImagesPath: File = "docker" / "canton" / "images" lazy val examplesPath: File = "community" / "app" / "src" / "pack" / "examples" lazy val simpleTopology: File = examplesPath / "01-simple-topology" lazy val referenceConfiguration: File = "community" / "app" / "src" / "pack" / "config" @@ -75,7 +76,6 @@ object ExampleIntegrationTest { lazy val advancedConfTestEnv: File = "community" / "app" / "src" / "test" / "resources" / "advancedConfDef.env" lazy val bftSequencerConfigurationFolder: File = examplesPath / "11-bft-sequencer" - def ensureSystemProperties(kvs: (String, String)*): Unit = blocking(synchronized { kvs.foreach { case (key, value) => Option(System.getProperty(key)) match { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/InteractiveSubmissionDemoExampleIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/InteractiveSubmissionDemoExampleIntegrationTest.scala index ce12327d96..a36359d649 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/InteractiveSubmissionDemoExampleIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/InteractiveSubmissionDemoExampleIntegrationTest.scala @@ -72,6 +72,9 @@ sealed abstract class InteractiveSubmissionDemoExampleIntegrationTest interactiveSubmissionFolder / "com", interactiveSubmissionFolder / "google", interactiveSubmissionFolder / "scalapb", + File("canton_ports.json"), + File("private_key.der"), + File("public_key.der"), ).foreach(_.delete(swallowIOExceptions = true)) } @@ -292,6 +295,21 @@ sealed abstract class InteractiveSubmissionDemoExampleIntegrationTest "Invalid unique identifier `invalid_Store` with missing namespace", ) } + + "run external party onboarding via ledger api" in { implicit env => + setupTest + runAndAssertCommandSuccess( + Process( + Seq( + "./external_party_onboarding.sh" + ), + cwd = interactiveSubmissionFolder.toJava, + ), + processLogger, + ) + + } + } final class InteractiveSubmissionDemoExampleIntegrationTestH2 diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ReferenceConfigExampleIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ReferenceConfigExampleIntegrationTest.scala index 4c7d8f4deb..2a6a976749 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ReferenceConfigExampleIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/ReferenceConfigExampleIntegrationTest.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.config.{CantonConfig, EnterpriseCantonEdition, Te import com.digitalasset.canton.console.{LocalInstanceReference, RemoteInstanceReference} import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} import com.digitalasset.canton.integration.tests.examples.ExampleIntegrationTest.referenceConfiguration import com.digitalasset.canton.metrics.MetricsFactoryType diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/RepairExampleIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/RepairExampleIntegrationTest.scala index 124eebbd0c..d042f5b37f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/RepairExampleIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/examples/RepairExampleIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.examples import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.CommunityIntegrationTest -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseH2} +import com.digitalasset.canton.integration.plugins.{UseH2, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.ExampleIntegrationTest.{ referenceConfiguration, repairConfiguration, @@ -30,5 +30,5 @@ sealed abstract class RepairExampleIntegrationTest // TODO(#26093) port to DB or BFT Sequencer final class RepairExampleReferenceIntegrationTestH2 extends RepairExampleIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/DistributedStatusIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/DistributedStatusIntegrationTest.scala index c38f6c6e68..34ecc566d6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/DistributedStatusIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/DistributedStatusIntegrationTest.scala @@ -11,8 +11,8 @@ import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.util.ShowUtil.* @@ -137,7 +137,7 @@ trait DistributedStatusIntegrationTest class DistributedStatusReferenceIntegrationTestPostgres extends DistributedStatusIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class DistributedStatusBftOrderingIntegrationTestPostgres extends DistributedStatusIntegrationTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/RemoteDumpIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/RemoteDumpIntegrationTest.scala index 3f9fd789a5..0d012f9ca1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/RemoteDumpIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/RemoteDumpIntegrationTest.scala @@ -16,9 +16,9 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.environment.{Environment, EnvironmentFactory} import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseExternalProcess, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -28,7 +28,6 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.logging.{LogEntry, NamedLoggerFactory} import com.digitalasset.canton.participant.CommunityParticipantNodeBootstrapFactory -import com.digitalasset.canton.resource.CommunityDbMigrationsMetaFactory import com.digitalasset.canton.synchronizer.mediator.CommunityMediatorNodeBootstrapFactory import com.digitalasset.canton.synchronizer.sequencer.CommunitySequencerNodeBootstrapFactory import com.digitalasset.canton.version.{ProtocolVersionCompatibility, ReleaseVersion} @@ -308,7 +307,7 @@ class NegativeRemoteDumpIntegrationTest private val dumpDelay = 1.second - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) // Customize the environment factory to tweak the health dump generation override protected val environmentFactory: EnvironmentFactory = @@ -324,9 +323,6 @@ class NegativeRemoteDumpIntegrationTest CommunityParticipantNodeBootstrapFactory, CommunitySequencerNodeBootstrapFactory, CommunityMediatorNodeBootstrapFactory, - new CommunityDbMigrationsMetaFactory( - loggerFactory - ), loggerFactory, ) { override def createHealthDumpGenerator( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/StatusIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/StatusIntegrationTest.scala index d26322dd8f..a16b0bcec0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/StatusIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/health/StatusIntegrationTest.scala @@ -10,10 +10,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -181,5 +178,5 @@ sealed trait StatusIntegrationTest class StatusReferenceIntegrationTestPostgres extends StatusIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTest.scala index 148c1f4976..5a314f585b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTest.scala @@ -3,84 +3,7 @@ package com.digitalasset.canton.integration.tests.jsonapi -import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest -import com.daml.ledger.api.v2.commands.Commands -import com.daml.ledger.api.v2.transaction_filter.* -import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter -import com.daml.ledger.api.v2.update_service.GetUpdatesResponse.Update -import com.daml.ledger.api.v2.value as v -import com.daml.scalautil.Statement.discard -import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* -import com.daml.test.evidence.tag.Security.SecurityTest.Property.{Authorization, Availability} -import com.daml.test.evidence.tag.Security.{Attack, SecurityTest} -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.json.JsonProtocol.* -import com.digitalasset.canton.http.util.ClientUtil.{boxedRecord, uniqueCommandId, uniqueId} -import com.digitalasset.canton.http.{ - ActiveContract, - AllocatePartyRequest as HttpAllocatePartyRequest, - Base64, - Choice, - CommandId, - CommandMeta, - Contract, - ContractId, - ContractTypeId, - CreateAndExerciseCommand, - CreateCommand, - CreateCommandResponse, - DeduplicationPeriod, - EnrichedContractId, - EnrichedContractKey, - ErrorInfoDetail, - ErrorMessages, - ErrorResponse, - ExerciseCommand, - ExerciseResponse, - LedgerApiError, - Offset, - OkResponse, - Party, - PartyDetails as HttpPartyDetails, - RequestInfoDetail, - ResourceInfoDetail, - SubmissionId, - SyncResponse, - UnknownParties, - UnknownTemplateIds, - WorkflowId, - util, -} -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.ledger.service.MetadataReader -import com.digitalasset.canton.logging.NamedLogging.loggerWithoutTracing -import com.digitalasset.canton.protocol.ExampleContractFactory -import com.digitalasset.canton.tracing.{SerializableTraceContextConverter, W3CTraceContext} -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA -import com.google.protobuf.ByteString -import org.apache.commons.lang3.RandomStringUtils import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.stream.scaladsl.Sink -import org.scalatest.* -import scalaz.std.list.* -import scalaz.std.scalaFuture.* -import scalaz.std.tuple.* -import scalaz.std.vector.* -import scalaz.syntax.apply.* -import scalaz.syntax.bifunctor.* -import scalaz.syntax.show.* -import scalaz.syntax.std.boolean.* -import scalaz.syntax.tag.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, \/-} -import shapeless.record.Record as ShRecord -import spray.json.* - -import java.util.UUID -import scala.annotation.nowarn -import scala.concurrent.Future -import scala.util.Success trait AbstractHttpServiceIntegrationTestFunsUserToken extends HttpServiceUserFixture.UserToken { self: AbstractHttpServiceIntegrationTestFuns => @@ -91,29 +14,6 @@ trait AbstractHttpServiceIntegrationTestFunsUserToken extends HttpServiceUserFix user: Option[String] ) = HttpServiceTestFixture.headersWithUserAuth(user) - - "get all parties using the legacy token format" in httpTestFixture { fixture => - import fixture.client - val partyIds = Vector("P1", "P2", "P3", "P4").map(getUniqueParty(_).unwrap) - val partyManagement = client.partyManagementClient - partyIds - .traverse { p => - partyManagement.allocateParty(Some(p)) - } - .flatMap { allocatedParties => - fixture - .getRequest( - Uri.Path("/v1/parties"), - headersWithUserAuth(None), - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { case OkResponse(result, None, StatusCodes.OK) => - result.toSet should contain allElementsOf - allocatedParties.toSet.map(HttpPartyDetails.fromLedgerApi) - }) - }: Future[Assertion] - } - } /** Tests that may behave differently depending on @@ -122,1932 +22,17 @@ trait AbstractHttpServiceIntegrationTestFunsUserToken extends HttpServiceUserFix * 1. the query store configuration */ @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -@nowarn("msg=match may not be exhaustive") abstract class AbstractHttpServiceIntegrationTest extends AbstractHttpServiceIntegrationTestFuns { - import AbstractHttpServiceIntegrationTestFuns.* - import HttpServiceTestFixture.{accountCreateCommand, archiveCommand} - - val authorizationSecurity: SecurityTest = - SecurityTest(property = Authorization, asset = "HTTP JSON API Service") - - val availabilitySecurity: SecurityTest = - SecurityTest(property = Availability, asset = "HTTP JSON API Service") - - protected def genSearchDataSet( - party: Party - ): List[CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg]] = - List( - iouCreateCommand(amount = "111.11", currency = "EUR", party = party), - iouCreateCommand(amount = "222.22", currency = "EUR", party = party), - iouCreateCommand(amount = "333.33", currency = "GBP", party = party), - iouCreateCommand(amount = "444.44", currency = "BTC", party = party), - ) - - def packageIdOfDar(darFile: java.io.File): String = { - import com.digitalasset.daml.lf.{archive, typesig} - val dar = archive.UniversalArchiveReader.assertReadFile(darFile) - typesig.PackageSignature.read(dar.main)._2.packageId - } - - protected def testLargeQueries = true - - "query POST with empty query" should { - "single party" in httpTestFixture { fixture => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val searchDataSet = genSearchDataSet(alice) - searchExpectOk( - searchDataSet, - jsObject(s"""{"templateIds": ["${TpId.Iou.Iou.fqn}"]}"""), - fixture, - headers, - ).map { (acl: List[ActiveContract.ResolvedCtTyId[JsValue]]) => - acl.size shouldBe searchDataSet.size - } - } - } - - "multi-party" in httpTestFixture { fixture => - for { - res1 <- fixture.getUniquePartyAndAuthHeaders("Alice") - res2 <- fixture.getUniquePartyAndAuthHeaders("Bob") - (alice, aliceHeaders) = res1 - (bob, bobHeaders) = res2 - aliceAccountResp <- postCreateCommand( - accountCreateCommand(owner = alice, number = "42"), - fixture, - aliceHeaders, - ) - _ = aliceAccountResp.status shouldBe StatusCodes.OK - bobAccountResp <- postCreateCommand( - accountCreateCommand(owner = bob, number = "23"), - fixture, - bobHeaders, - ) - _ = bobAccountResp.status shouldBe StatusCodes.OK - _ <- searchExpectOk( - List(), - jsObject(s"""{"templateIds": ["${TpId.Account.Account.fqn}"]}"""), - fixture, - aliceHeaders, - ) - .map(acl => acl.size shouldBe 1) - _ <- searchExpectOk( - List(), - jsObject(s"""{"templateIds": ["${TpId.Account.Account.fqn}"]}"""), - fixture, - bobHeaders, - ) - .map(acl => acl.size shouldBe 1) - _ <- fixture - .headersWithPartyAuth(List(alice, bob)) - .flatMap(headers => - searchExpectOk( - List(), - jsObject(s"""{"templateIds": ["${TpId.Account.Account.fqn}"]}"""), - fixture, - headers, - ) - ) - .map(acl => acl.size shouldBe 2) - } yield { - assert(true) - } - } - - "with an interface ID" in httpTestFixture { fixture => - import com.digitalasset.canton.http.json.JsonProtocol.* - for { - _ <- uploadPackage(fixture)(ciouDar) - aliceH <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = aliceH - _ <- postCreateCommand( - iouCommand(alice, TpId.CIou.CIou), - fixture, - aliceHeaders, - ) - - searchResp <- - suppressPackageIdWarning { - search( - List.empty, - Map( - "templateIds" -> Seq(TpId.IIou.IIou).toJson - ).toJson.asJsObject, - fixture, - aliceHeaders, - ) - } - } yield inside(searchResp) { - case OkResponse(Seq(ac), None, StatusCodes.OK) => { - discard { - ac.templateId shouldBe TpId.IIou.IIou.copy(packageId = ac.templateId.packageId) - } - ac.payload shouldBe JsObject("amount" -> JsString("42")) - } - } - } - - "multi-view" should { - val amountsCurrencies = Vector(("42.0", "USD"), ("84.0", "CHF")) - val expectedAmountsCurrencies = amountsCurrencies.map { case (a, c) => (a.toDouble, c) } - - def testMultiView[ExParties]( - fixture: HttpServiceTestFixtureData, - allocateParties: Future[ExParties], - )( - observers: ExParties => Vector[Party], - queryHeaders: (Party, List[HttpHeader], ExParties) => Future[List[HttpHeader]], - ) = for { - (alice, aliceHeaders) <- fixture.getUniquePartyAndAuthHeaders("alice") - exParties <- allocateParties - - // create all contracts - exObservers = observers(exParties) - cids <- amountsCurrencies.traverse { case (amount, currency) => - postCreateCommand( - iouCreateCommand( - alice, - amount = amount, - currency = currency, - observers = exObservers, - ), - fixture, - aliceHeaders, - ) map resultContractId - } - queryAsBoth <- queryHeaders(alice, aliceHeaders, exParties) - queryAtCtId = { - (ctid: ContractTypeId.RequiredPkg, amountKey: String, currencyKey: String) => - searchExpectOk( - List.empty, - Map("templateIds" -> List(ctid)).toJson.asJsObject, - fixture, - queryAsBoth, - ) map { resACs => - inside(resACs map (inside(_) { - case ActiveContract(cid, _, _, payload, Seq(`alice`), `exObservers`) => - // ensure the contract metadata is right, then discard - (cid, payload.asJsObject.fields) - })) { case Seq((cid0, payload0), (cid1, payload1)) => - Seq(cid0, cid1) should contain theSameElementsAs cids - // check the actual payloads match the contract IDs from creates - val actualAmountsCurrencies = (if (cid0 == cids.head) Seq(payload0, payload1) - else Seq(payload1, payload0)) - .map(payload => - inside((payload get amountKey, payload get currencyKey)) { - case (Some(JsString(amount)), Some(JsString(currency))) => - (amount.toDouble, currency) - } - ) - actualAmountsCurrencies should ===(expectedAmountsCurrencies) - } - } - } - // run (inserting when query store) on template ID; then interface ID - // (thereby duplicating contract IDs) - _ <- queryAtCtId(TpId.Iou.Iou, "amount", "currency") - _ <- queryAtCtId(TpId.Iou.IIou, "iamount", "icurrency") - // then try template ID again, in case interface ID mangled the results - // for template ID by way of stakeholder join or something even odder - _ <- queryAtCtId(TpId.Iou.Iou, "amount", "currency") - } yield succeed - - "multi-party" in httpTestFixture { fixture => - testMultiView( - fixture, - fixture.getUniquePartyAndAuthHeaders("bob").map(_._1), - )( - bob => Vector(bob), - (alice, _, bob) => fixture.headersWithPartyAuth(List(alice), List(bob)), - ) - } - } - } - - "query with unknown Template IDs" should { - "warns if some are known" in httpTestFixture { fixture => - val query = - jsObject( - s"""{"templateIds": ["${TpId.Iou.Iou.fqn}", "UnknownPkg:UnknownModule:UnknownEntity"]}""" - ) - fixture - .getUniquePartyAndAuthHeaders("UnknownParty") - .flatMap { case (_, headers) => - suppressPackageIdWarning { - search(List(), query, fixture, headers).map { response => - inside(response) { case OkResponse(acl, warnings, StatusCodes.OK) => - acl.size shouldBe 0 - warnings shouldBe Some( - UnknownTemplateIds( - List( - ContractTypeId( - Ref.PackageRef.assertFromString("UnknownPkg"), - "UnknownModule", - "UnknownEntity", - ) - ) - ) - ) - } - } - } - } - } - - "fails if all are unknown" in httpTestFixture { fixture => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - search( - genSearchDataSet(alice), - jsObject("""{"templateIds": ["UnknownPkg:AAA:BBB", "UnknownPkg:ZZZ:YYY"]}"""), - fixture, - headers, - ).map { response => - inside(response) { case ErrorResponse(errors, warnings, StatusCodes.BadRequest, _) => - errors shouldBe List(ErrorMessages.cannotResolveAnyTemplateId) - inside(warnings) { case Some(UnknownTemplateIds(unknownTemplateIds)) => - unknownTemplateIds.toSet shouldBe Set( - ContractTypeId(Ref.PackageRef.assertFromString("UnknownPkg"), "AAA", "BBB"), - ContractTypeId(Ref.PackageRef.assertFromString("UnknownPkg"), "ZZZ", "YYY"), - ) - } - } - } - } - } - } - - "query multiple observers:" should { - Seq( - 0 -> 1, - 1 -> 5, - 10 -> 75, - 50 -> 76, // Allows space to encode content into a JSON array of strings within 4k limit. - 50 -> 80, // The content is the exact 4k limit, no additional room for JSON array syntax. - 1000 -> 185, - ).foreach { case (numSubs, partySize) => - (s"$numSubs observers of $partySize chars") in httpTestFixture { fixture => - val subscribers = (1 to numSubs).map(_ => Party(randomTextN(partySize))).toList - for { - (publisher, headers) <- fixture.getUniquePartyAndAuthHeaders("Alice") - subscriberPartyDetails <- subscribers.traverse { p => - fixture.client.partyManagementClient.allocateParty(Some(p.unwrap)) - } - subscriberParties = Party subst subscriberPartyDetails.map(p => p.party: String) - found <- searchExpectOk( - List(pubSubCreateCommand(publisher, subscriberParties)), - jsObject(s"""{"templateIds": ["${TpId.Account.PubSub.fqn}"]}"""), - fixture, - headers, - ) - } yield { - found.size shouldBe 1 - } - } - } - } - - protected implicit final class `AHS TI uri funs`(private val fixture: UriFixture) { - - def searchAllExpectOk( - headers: List[HttpHeader] - ): Future[List[ActiveContract.ResolvedCtTyId[JsValue]]] = - searchAll(headers).map(expectOk(_)) - - def searchAllExpectOk( - ): Future[List[ActiveContract.ResolvedCtTyId[JsValue]]] = - fixture.headersWithAuth.flatMap(searchAllExpectOk(_)) - - def searchAll( - headers: List[HttpHeader] - ): Future[SyncResponse[List[ActiveContract.ResolvedCtTyId[JsValue]]]] = - fixture - .getRequest(Uri.Path("/v1/query"), headers) - .parseResponse[List[ActiveContract.ResolvedCtTyId[JsValue]]] - - } - - "exercise" should { - "succeeds normally" in httpTestFixture { fixture => - import fixture.encoder - for { - (alice, headers) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - create = iouCreateCommand(alice) - res <- postCreateCommand(create, fixture, headers) - _ <- inside(res) { case OkResponse(createResult, _, StatusCodes.OK) => - val exercise = iouExerciseTransferCommand(createResult.contractId, bob) - val exerciseJson: JsValue = encodeExercise(encoder)(exercise) - - fixture - .postJsonRequest(Uri.Path("/v1/exercise"), exerciseJson, headers) - .parseResponse[ExerciseResponse[JsValue]] - .flatMap(inside(_) { case OkResponse(result, _, StatusCodes.OK) => - assertExerciseResponseNewActiveContract( - result, - create, - exercise, - fixture, - headers, - ) - }) - } - } yield succeed - } - - "with unknown contractId should return proper error" in httpTestFixture { fixture => - import fixture.encoder - val contractIdString = ExampleContractFactory.buildContractId().coid - val contractId = lar.ContractId(contractIdString) - for { - (bob, headers) <- fixture.getUniquePartyAndAuthHeaders("Bob") - exerciseJson: JsValue = - encodeExercise(encoder)(iouExerciseTransferCommand(contractId, bob)) - _ <- fixture - .postJsonRequest(Uri.Path("/v1/exercise"), exerciseJson, headers) - .parseResponse[ExerciseResponse[JsValue]] - .map(inside(_) { - case ErrorResponse(Seq(errorMsg), None, StatusCodes.NotFound, Some(ledgerApiError)) => - errorMsg should include( - s"Contract could not be found with id $contractIdString" - ) - ledgerApiError.message should include("CONTRACT_NOT_FOUND") - ledgerApiError.message should include( - s"Contract could not be found with id $contractIdString" - ) - forExactly(1, ledgerApiError.details) { - case ErrorInfoDetail(errorCodeId, _) => - errorCodeId shouldBe "CONTRACT_NOT_FOUND" - case _ => fail() - } - forExactly(1, ledgerApiError.details) { - case RequestInfoDetail(_) => succeed - case _ => fail() - } - forExactly(1, ledgerApiError.details) { - case ResourceInfoDetail(name, typ) => - name shouldBe contractIdString - typ shouldBe "CONTRACT_ID" - case _ => fail() - } - }) - } yield succeed - } - - "Archive" in httpTestFixture { fixture => - import fixture.encoder - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val create = iouCreateCommand(alice) - postCreateCommand(create, fixture, headers) - .flatMap(inside(_) { case OkResponse(createResult, _, StatusCodes.OK) => - val reference = EnrichedContractId(Some(TpId.Iou.Iou), createResult.contractId) - val exercise = archiveCommand(reference) - val exerciseJson: JsValue = encodeExercise(encoder)(exercise) - - fixture - .postJsonRequest(Uri.Path("/v1/exercise"), exerciseJson, headers) - .parseResponse[ExerciseResponse[JsValue]] - .map(inside(_) { case OkResponse(exercisedResponse, _, StatusCodes.OK) => - assertExerciseResponseArchivedContract(exercisedResponse, exercise) - }) - }) - } - } - - def postCreate( - fixture: HttpServiceTestFixtureData, - payload: JsValue, - headers: List[HttpHeader], - ): Future[ContractId] = - fixture - .postJsonRequest(Uri.Path("/v1/create"), payload, headers) - .parseResponse[ActiveContract.ResolvedCtTyId[JsValue]] - .map(resultContractId) - - // TODO(i24322): Port upgrading tests to JSON.v2 - "should handle multiple package ids with the same name" in httpTestFixture { fixture => - import AbstractHttpServiceIntegrationTestFuns.{fooV1Dar, fooV2Dar} - for { - _ <- uploadPackage(fixture)(fooV1Dar) - _ <- uploadPackage(fixture)(fooV2Dar) - - pkgIdV1 = packageIdOfDar(fooV1Dar) - pkgIdV2 = packageIdOfDar(fooV2Dar) - - (alice, hdrs) <- fixture.getUniquePartyAndAuthHeaders("Alice") - - // create v1 and v2 versions of contract, using the package name and package id. - cidV1PkgId <- postCreate( - fixture, - jsObject(s"""{"templateId": "$pkgIdV1:Foo:Bar", "payload": {"owner": "$alice"}}"""), - hdrs, - ) - cidV1PkgNm <- postCreate( - fixture, - // Payload per V1 but interpreted as V2, as the current highest version with this name. - jsObject(s"""{"templateId": "#foo:Foo:Bar", "payload": {"owner": "$alice"}}"""), - hdrs, - ) - cidV1PkgNmWithV1Pref <- postCreate( - fixture, - // Payload per V1 and interpreted as V1, due to the explicit package id preference. - jsObject( - s"""{"templateId": "#foo:Foo:Bar", "payload": {"owner": "$alice"}, "meta":{"packageIdSelectionPreference":["$pkgIdFooV1"]}}""" - ), - hdrs, - ) - cidV2PkgId <- postCreate( - fixture, - jsObject( - s"""{"templateId": "$pkgIdV2:Foo:Bar", "payload": {"owner": "$alice", "extra":42}}""" - ), - hdrs, - ) - cidV2PkgNm <- postCreate( - fixture, - jsObject( - s"""{"templateId": "#foo:Foo:Bar", "payload": {"owner": "$alice", "extra":42}}""" - ), - hdrs, - ) - - // query using both package ids and package name should lead to same results since package-id queries are - // internally transformed to package-name queries - - _ <- searchExpectOk( - Nil, - jsObject(s"""{"templateIds": ["$pkgIdV1:Foo:Bar"]}"""), - fixture, - hdrs, - ) map { results => - results.map(_.contractId) should contain theSameElementsAs List( - cidV1PkgId, - cidV1PkgNm, - cidV1PkgNmWithV1Pref, - cidV2PkgId, - cidV2PkgNm, - ) - } - - _ <- searchExpectOk( - Nil, - jsObject(s"""{"templateIds": ["$pkgIdV2:Foo:Bar"]}"""), - fixture, - hdrs, - ) map { results => - results.map(_.contractId) should contain theSameElementsAs List( - cidV1PkgId, - cidV1PkgNm, - cidV1PkgNmWithV1Pref, - cidV2PkgId, - cidV2PkgNm, - ) - } - - _ <- searchExpectOk( - Nil, - jsObject(s"""{"templateIds": ["#foo:Foo:Bar"]}"""), - fixture, - hdrs, - suppressWarnings = false, - ) map { results => - results.map(_.contractId) should contain theSameElementsAs List( - cidV1PkgId, - cidV1PkgNm, - cidV1PkgNmWithV1Pref, - cidV2PkgId, - cidV2PkgNm, - ) - } - } yield succeed - } - - "should recognise an archive against a newer version of the same contract" in httpTestFixture { - fixture => - import AbstractHttpServiceIntegrationTestFuns.{fooV1Dar, fooV2Dar} - - for { - _ <- uploadPackage(fixture)(fooV1Dar) - - (alice, hdrs) <- fixture.getUniquePartyAndAuthHeaders("Alice") - - // Create using package package name. The created event will contain the package id from v1. - createdCid <- postCreate( - fixture, - jsObject(s"""{"templateId": "#foo:Foo:Bar", "payload": {"owner": "$alice"}}"""), - hdrs, - ) - - // Query using package name - _ <- searchExpectOk( - Nil, - jsObject(s"""{"templateIds": ["#foo:Foo:Bar"]}"""), - fixture, - hdrs, - suppressWarnings = false, - ) map { results => - results.map(_.contractId) shouldBe List(createdCid) - } - - // Upload v2 of the same package. - _ <- uploadPackage(fixture)(fooV2Dar) - - // Archive using package name but the exercise event will contain the package id from v2. - _ <- fixture - .postJsonRequest( - Uri.Path("/v1/exercise"), - jsObject(s"""{ - "templateId": "#foo:Foo:Bar", - "contractId": "$createdCid", - "choice": "Archive", - "argument": {} - }"""), - hdrs, - ) - .parseResponse[ExerciseResponse[JsValue]] - .flatMap(inside(_) { - case OkResponse( - ExerciseResponse(_, List(Contract(-\/(archived))), _), - _, - StatusCodes.OK, - ) => - Future { - archived.contractId shouldBe createdCid - } - }) - - // The query should no longer serve the contract, as it is no longer in the ACS. - _ <- searchExpectOk( - Nil, - jsObject(s"""{"templateIds": ["#foo:Foo:Bar"]}"""), - fixture, - hdrs, - suppressWarnings = false, - ) map { results => - results.map(_.contractId) shouldBe List.empty - } - - } yield succeed - } - - "should support create and exerciseByKey with package names" in httpTestFixture { fixture => - val tmplId = "#foo:Foo:Quux" - for { - _ <- uploadPackage(fixture)(AbstractHttpServiceIntegrationTestFuns.fooV2Dar) - - (alice, hdrs) <- fixture.getUniquePartyAndAuthHeaders("Alice") - - // create using package name. - cid <- postCreate( - fixture, - jsObject(s"""{"templateId": "$tmplId", "payload": {"owner": "$alice"}}"""), - hdrs, - ) - - // exercise by key, to test resolution of the key type, the arg type and the result type. - // TODO(#20994) Use the key rather than the contract id -// locator = s""""key": {"value": "$alice"}""" - locator = s""""contractId": "$cid"""" - _ <- fixture - .postJsonRequest( - Uri.Path("/v1/exercise"), - jsObject(s"""{ - "templateId": "$tmplId", - $locator, - "choice": "Incr", - "argument": {"a": {"value": 42}} - }"""), - hdrs, - ) - .parseResponse[ExerciseResponse[JsValue]] - .flatMap(inside(_) { - case OkResponse( - ExerciseResponse(jsResult, List(Contract(-\/(archived))), _), - _, - StatusCodes.OK, - ) => - Future { - archived.contractId shouldBe cid - val json = """{"value": "43"}""" - jsResult shouldBe jsObject(json) - } - }) - } yield succeed - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable or adapt once 3.x supports contract keys - "Archive by key" ignore httpTestFixture { fixture => - import fixture.encoder - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val accountNumber = "abc123" - val create: CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg] = - accountCreateCommand(alice, accountNumber) - - val keyRecord = v.Record( - fields = Seq( - v.RecordField(value = Some(v.Value(v.Value.Sum.Party(alice.unwrap)))), - v.RecordField(value = Some(v.Value(v.Value.Sum.Text(accountNumber)))), - ) - ) - val locator = EnrichedContractKey[v.Value]( - TpId.Account.Account, - v.Value(v.Value.Sum.Record(keyRecord)), - ) - val archive = archiveCommand(locator) - val archiveJson: JsValue = encodeExercise(encoder)(archive) - - postCreateCommand(create, fixture, headers).flatMap(inside(_) { - case OkResponse(_, _, StatusCodes.OK) => - fixture - .postJsonRequest(Uri.Path("/v1/exercise"), archiveJson, headers) - .parseResponse[JsValue] - .map(inside(_) { case OkResponse(_, _, StatusCodes.OK) => - succeed - }) - }): Future[Assertion] - } - } - - "passes along disclosed contracts in" should { - import com.daml.ledger.api.v2 as lav2 - import com.digitalasset.canton.http.DisclosedContract as DC - import lav2.commands.Command - import util.IdentifierConverters.{lfIdentifier, refApiIdentifier} - - def unwrapPkgId( - ctid: ContractTypeId.RequiredPkg - ): ContractTypeId.RequiredPkgId = - inside(ctid.packageId) { case Ref.PackageRef.Id(pid) => ctid.copy(packageId = pid) } - - lazy val (_, toDiscloseVA) = - VA.record( - lfIdentifier(unwrapPkgId(TpId.Disclosure.ToDisclose)), - ShRecord(owner = VAx.partySynchronizer, junk = VA.text), - ) - - lazy val (_, anotherToDiscloseVA) = - VA.record( - lfIdentifier(unwrapPkgId(TpId.Disclosure.ToDisclose)), - ShRecord(owner = VAx.partySynchronizer, garbage = VA.text), - ) - - val (_, viewportVA) = - VA.record( - lfIdentifier(unwrapPkgId(TpId.Disclosure.Viewport)), - ShRecord(owner = VAx.partySynchronizer), - ) - - val (_, checkVisibilityVA) = - VA.record( - lfIdentifier(unwrapPkgId(TpId.Disclosure.CheckVisibility)), - ShRecord( - disclosed = VAx.contractIdSynchronizer, - ifaceDisclosed = VAx.contractIdSynchronizer, - ), - ) - - final case class ContractsToDisclose( - alice: Party, - toDiscloseCid: ContractId, - toDiscloseCreatedEventBlob: ByteString, - anotherToDiscloseCid: ContractId, - anotherToDiscloseCreatedEventBlob: ByteString, - ) - - def formatWithPayloadsFor(party: Party) = EventFormat( - filtersByParty = Map( - party.unwrap -> Filters( - Seq( - CumulativeFilter( - IdentifierFilter.InterfaceFilter( - InterfaceFilter( - interfaceId = Some(refApiIdentifier(TpId.Disclosure.HasGarbage).unwrap), - includeCreatedEventBlob = true, - includeInterfaceView = false, - ) - ) - ) - ) :+ - CumulativeFilter( - IdentifierFilter.TemplateFilter( - TemplateFilter( - templateId = Some(refApiIdentifier(TpId.Disclosure.ToDisclose).unwrap), - includeCreatedEventBlob = true, - ) - ) - ) - ) - ), - filtersForAnyParty = None, - verbose = false, - ) - - def contractsToDisclose( - fixture: HttpServiceTestFixtureData, - junkMessage: String, - garbageMessage: String, - ): Future[ContractsToDisclose] = for { - (alice, jwt, userId, _) <- fixture.getUniquePartyTokenUserIdAndAuthHeaders("Alice") - // we're using the ledger API for the initial create because timestamp - // is required in the metadata - toDisclosePayload = argToApi(toDiscloseVA)(ShRecord(owner = alice, junk = junkMessage)) - anotherToDisclosePayload = argToApi(anotherToDiscloseVA)( - ShRecord(owner = alice, garbage = garbageMessage) - ) - createCommands = Seq( - (TpId.Disclosure.ToDisclose, toDisclosePayload), - (TpId.Disclosure.AnotherToDisclose, anotherToDisclosePayload), - ) map { case (tpid, payload) => - Command(util.Commands.create(refApiIdentifier(tpid), payload)) - } - initialCreate = - SubmitAndWaitRequest( - commands = Some( - Commands.defaultInstance.copy( - commandId = uniqueCommandId().unwrap, - userId = userId.unwrap, - actAs = Party unsubst Seq(alice), - commands = createCommands, - ) - ) - ) - createResp <- fixture.client.commandService - .submitAndWaitForTransactionForJsonApi(initialCreate, token = Some(jwt.value)) - // fetch what we can from the command service transaction - (ceAtOffset, (toDiscloseCid, atdCid)) = inside( - createResp.transaction - ) { case Some(tx) => - import lav2.event.Event - import Event.Event.Created - inside(tx.events) { case Seq(Event(Created(ce0)), Event(Created(ce1))) => - val EntityTD = TpId.Disclosure.ToDisclose.entityName - val EntityATD = TpId.Disclosure.AnotherToDisclose.entityName - val orderedCes = inside((ce0, ce1) umap (_.templateId.map(_.entityName))) { - case (Some(EntityTD), Some(EntityATD)) => (ce0, ce1) - case (Some(EntityATD), Some(EntityTD)) => (ce1, ce0) - } - ( - tx.offset, - orderedCes umap { ce => ContractId(ce.contractId) }, - ) - } - } - // use the transaction service to get the blob, which submit-and-wait - // doesn't include in the response - payloadsToDisclose <- { - import lav2.event.Event - import Event.Event.Created - suppressPackageIdWarning { - fixture.client.updateService - .getUpdatesSource( - begin = 0L, - eventFormat = formatWithPayloadsFor(alice), - end = Some(ceAtOffset), - token = Some(jwt.value), - ) - .collect { response => - response.update match { - case Update.Transaction(t) => t - } - } - .mapConcat(_.events) - .collect { - case Event(Created(ce)) - if ce.contractId == toDiscloseCid.unwrap || ce.contractId == atdCid.unwrap => - Base64(ce.createdEventBlob) - } - .runWith(Sink.seq) - } - } - (firstPayload, anotherPayload) = inside(payloadsToDisclose) { case Seq(first, second) => - first -> second - } - } yield ContractsToDisclose( - alice, - toDiscloseCid, - firstPayload.unwrap, - atdCid, - anotherPayload.unwrap, - ) - - def runDisclosureTestCase[Setup]( - fixture: HttpServiceTestFixtureData - )(exerciseEndpoint: Uri.Path, setupBob: (Party, List[HttpHeader]) => Future[Setup])( - exerciseVaryingOnlyMeta: ( - Setup, - ContractsToDisclose, - Option[CommandMeta[ContractTypeId.Template.RequiredPkg]], - ) => JsValue - ): Future[Assertion] = { - val junkMessage = s"some test junk ${uniqueId()}" - val garbageMessage = s"some test garbage ${uniqueId()}" - for { - // first, set up something for alice to disclose to bob - toDisclose @ ContractsToDisclose( - alice, - toDiscloseCid, - firstPayload, - atdCid, - anotherPayload, - ) <- - contractsToDisclose(fixture, junkMessage, garbageMessage) - - // next, onboard bob to try to interact with the disclosed contract - (bob, bobHeaders) <- fixture.getUniquePartyAndAuthHeaders("Bob") - setup <- setupBob(bob, bobHeaders) - - // exercise CheckVisibility with different disclosure options - checkVisibility = { (disclosure: List[DC[ContractTypeId.Template.RequiredPkg]]) => - val meta = disclosure.nonEmpty option CommandMeta( - None, - None, - None, - None, - None, - None, - disclosedContracts = Some(disclosure), - None, - None, - ) - fixture - .postJsonRequest( - exerciseEndpoint, - exerciseVaryingOnlyMeta(setup, toDisclose, meta), - bobHeaders, - ) - .parseResponse[ExerciseResponse[JsValue]] - } - - // ensure that bob can't interact with alice's contract unless it's disclosed - _ <- checkVisibility(List.empty) - .map(inside(_) { - case ErrorResponse( - _, - _, - StatusCodes.NotFound, - Some(LedgerApiError(lapiCode, errorMessage, _)), - ) => - lapiCode should ===(com.google.rpc.Code.NOT_FOUND_VALUE) - errorMessage should include(toDiscloseCid.unwrap) - }) - - _ <- checkVisibility( - List( - DC( - toDiscloseCid, - TpId.Disclosure.ToDisclose, - Base64(firstPayload), - ), - DC( - atdCid, - TpId.Disclosure.AnotherToDisclose, - Base64(anotherPayload), - ), - ) - ) - .map(inside(_) { case OkResponse(ExerciseResponse(JsString(exResp), _, _), _, _) => - exResp should ===(s"'$bob' can see from '$alice': $junkMessage, $garbageMessage") - }) - } yield succeed - } - - val checkVisibilityChoice = Choice("CheckVisibility") - - "exercise" in httpTestFixture { fixture => - runDisclosureTestCase(fixture)( - Uri.Path("/v1/exercise"), - (bob, bobHeaders) => - postCreateCommand( - CreateCommand( - TpId.Disclosure.Viewport, - argToApi(viewportVA)(ShRecord(owner = bob)), - None, - ), - fixture, - bobHeaders, - ) map resultContractId, - ) { (viewportCid, toDisclose, meta) => - encodeExercise(fixture.encoder)( - ExerciseCommand( - EnrichedContractId(Some(TpId.Disclosure.Viewport), viewportCid), - checkVisibilityChoice, - boxedRecord( - argToApi(checkVisibilityVA)( - ShRecord( - disclosed = toDisclose.toDiscloseCid, - ifaceDisclosed = toDisclose.anotherToDiscloseCid, - ) - ) - ), - None, - meta, - ) - ) - } - } - - "create-and-exercise" in httpTestFixture { fixture => - runDisclosureTestCase(fixture)( - Uri.Path("/v1/create-and-exercise"), - (bob, _) => Future successful bob, - ) { (bob, toDisclose, meta) => - fixture.encoder - .encodeCreateAndExerciseCommand( - CreateAndExerciseCommand( - TpId.Disclosure.Viewport, - argToApi(viewportVA)(ShRecord(owner = bob)), - checkVisibilityChoice, - boxedRecord( - argToApi(checkVisibilityVA)( - ShRecord( - disclosed = toDisclose.toDiscloseCid, - ifaceDisclosed = toDisclose.anotherToDiscloseCid, - ) - ) - ), - None, - meta, - ) - ) - .valueOr(e => fail(e.shows)) - } - } - } - } - - private def assertExerciseResponseNewActiveContract( - exerciseResponse: ExerciseResponse[JsValue], - createCmd: CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg], - exerciseCmd: ExerciseCommand[Any, v.Value, EnrichedContractId], - fixture: HttpServiceTestFixtureData, - headers: List[HttpHeader], - ): Future[Assertion] = { - import fixture.{decoder, uri} - inside(exerciseResponse) { - case ExerciseResponse( - JsString(exerciseResult), - List(contract1, contract2), - completionOffset, - ) => - completionOffset.unwrap should not be empty - // checking contracts - inside(contract1) { case Contract(-\/(archivedContract)) => - Future { - (archivedContract.contractId: ContractId) shouldBe (exerciseCmd.reference.contractId: ContractId) - } - } *> - inside(contract2) { case Contract(\/-(activeContract)) => - assertActiveContract(uri)(decoder, activeContract, createCmd, exerciseCmd, fixture) - } *> - // checking exerciseResult - { - exerciseResult.length should be > (0) - val newContractLocator = EnrichedContractId( - Some(TpId.Iou.IouTransfer), - ContractId(exerciseResult), - ) - postContractsLookup(newContractLocator, uri, headers).map(inside(_) { - case OkResponse(Some(contract), _, StatusCodes.OK) => - contract.contractId shouldBe newContractLocator.contractId - }): Future[Assertion] - } - } - } - - "should support multi-party command submissions" in httpTestFixture { fixture => - import fixture.{client, encoder} - val knownPartyNames = List("Alice", "Bob", "Charlie", "David").map(getUniqueParty) - val partyManagement = client.partyManagementClient - for { - knownParties @ List(alice, bob, charlie, david) <- - knownPartyNames.traverse { p => - Party subst partyManagement - .allocateParty(Some(p.unwrap)) - .map(pd => pd.party: String) - } - // multi-party actAs on create - cid <- fixture - .headersWithPartyAuth(List(alice, bob)) - .flatMap( - postCreateCommand(multiPartyCreateCommand(List(alice, bob), ""), fixture, _) - ) - .map(resultContractId) - // multi-party actAs on exercise - cidMulti <- fixture - .headersWithPartyAuth(knownParties) - .flatMap( - fixture.postJsonRequest( - Uri.Path("/v1/exercise"), - encodeExercise(encoder)(multiPartyAddSignatories(cid, List(charlie, david))), - _, - ) - ) - .parseResponse[ExerciseResponse[JsValue]] - .map(inside(_) { case OkResponse(ExerciseResponse(JsString(c), _, _), _, StatusCodes.OK) => - lar.ContractId(c) - }) - // create a contract only visible to Alice - cid <- fixture - .headersWithPartyAuth(List(alice)) - .flatMap( - postCreateCommand( - multiPartyCreateCommand(List(alice), ""), - fixture, - _, - ) - ) - .map(resultContractId) - _ <- fixture - .headersWithPartyAuth(List(charlie), readAs = List(alice)) - .flatMap( - fixture.postJsonRequest( - Uri.Path("/v1/exercise"), - encodeExercise(encoder)(multiPartyFetchOther(cidMulti, cid, List(charlie))), - _, - ) - ) - .map { case (status, _) => - status shouldBe StatusCodes.OK - } - } yield succeed - } - - private def assertExerciseResponseArchivedContract( - exerciseResponse: ExerciseResponse[JsValue], - exercise: ExerciseCommand.RequiredPkg[v.Value, EnrichedContractId], - ): Assertion = - inside(exerciseResponse) { case ExerciseResponse(exerciseResult, List(contract1), _) => - exerciseResult shouldBe JsObject() - inside(contract1) { case Contract(-\/(archivedContract)) => - (archivedContract.contractId.unwrap: String) shouldBe (exercise.reference.contractId.unwrap: String) - } - } - - "fetch by contractId" should { - "succeeds normally" in httpTestFixture { fixture => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val command = iouCreateCommand(alice) - - postCreateCommand(command, fixture, headers).flatMap(inside(_) { - case OkResponse(result, _, StatusCodes.OK) => - val contractId: ContractId = result.contractId - val locator = EnrichedContractId(None, contractId) - lookupContractAndAssert(locator, contractId, command, fixture, headers) - }): Future[Assertion] - } - } - - "succeeds normally with an interface ID" in httpTestFixture { fixture => - uploadPackage(fixture)(ciouDar).flatMap { case _ => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val command = iouCommand(alice, TpId.CIou.CIou) - postCreateCommand(command, fixture, headers).flatMap(inside(_) { - case OkResponse(result, _, StatusCodes.OK) => - val contractId: ContractId = result.contractId - val locator = EnrichedContractId(Some(TpId.IIou.IIou), contractId) - postContractsLookup(locator, fixture.uri, headers).map(inside(_) { - case OkResponse(Some(resultContract), _, StatusCodes.OK) => - contractId shouldBe resultContract.contractId - assertJsPayload(resultContract)(result.payload) - }) - }): Future[Assertion] - } - } - } - -// TODO(#16065) -// "returns {status:200, result:null} when contract is not found" in httpTestFixture { fixture => -// import fixture.uri -// fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => -// val accountNumber = "abc123" -// val locator = synchronizer.EnrichedContractKey( -// TpId.Account.Account, -// JsArray(JsString(alice.unwrap), JsString(accountNumber)), -// ) -// postContractsLookup(locator, uri.withPath(Uri.Path("/v1/fetch")), headers).map(inside(_) { -// case synchronizer.OkResponse(None, _, StatusCodes.OK) => -// succeed -// }): Future[Assertion] -// } -// } - - "fails when readAs not authed, even if prior fetch succeeded" taggedAs authorizationSecurity - .setAttack( - Attack( - "Ledger client", - "fetches by contractId but readAs is not authorized", - "refuse request with UNAUTHORIZED", - ) - ) in httpTestFixture { fixture => - import fixture.uri - for { - res <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = res - command = iouCreateCommand(alice) - createStatusOutput <- postCreateCommand(command, fixture, aliceHeaders) - contractId = inside(createStatusOutput) { case OkResponse(result, _, StatusCodes.OK) => - result.contractId - } - locator = EnrichedContractId(None, contractId) - // will cache if DB configured - _ <- lookupContractAndAssert(locator, contractId, command, fixture, aliceHeaders) - charlie = getUniqueParty("Charlie") - badLookup <- postContractsLookup( - locator, - uri.withPath(Uri.Path("/v1/fetch")), - aliceHeaders, - readAs = Some(List(charlie)), - ) - } yield inside(badLookup) { case ErrorResponse(_, None, StatusCodes.Unauthorized, None) => - succeed - } - } - } - -// "fetch by key" in { -// "succeeds normally" in httpTestFixture { fixture => -// fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => -// val accountNumber = "abc123" -// val command = accountCreateCommand(alice, accountNumber) -// -// postCreateCommand(command, fixture, headers).flatMap(inside(_) { -// case synchronizer.OkResponse(result, _, StatusCodes.OK) => -// val contractId: ContractId = result.contractId -// val locator = synchronizer.EnrichedContractKey( -// TpId.Account.Account, -// JsArray(JsString(alice.unwrap), JsString(accountNumber)), -// ) -// lookupContractAndAssert(locator, contractId, command, fixture, headers) -// }): Future[Assertion] -// } -// } -// -// "containing variant and record" should { -// "encoded as array with number num" in httpTestFixture { fixture => -// fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => -// testFetchByCompositeKey( -// fixture, -// jsObject(s"""{ -// "templateId": "Account:KeyedByVariantAndRecord", -// "key": [ -// "$alice", -// {"tag": "Bar", "value": 42}, -// {"baz": "another baz value"} -// ] -// }"""), -// alice, -// headers, -// ) -// } -// } -// -// "encoded as record with string num" in httpTestFixture { fixture => -// fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => -// testFetchByCompositeKey( -// fixture, -// jsObject(s"""{ -// "templateId": "Account:KeyedByVariantAndRecord", -// "key": { -// "_1": "$alice", -// "_2": {"tag": "Bar", "value": "42"}, -// "_3": {"baz": "another baz value"} -// } -// }"""), -// alice, -// headers, -// ) -// } -// } -// } -// -// "containing a decimal " should { -// Seq( -//// "300000", -//// "300000.0", -// // TODO(#13813): Due to big decimal normalization, you can only fetch a key if you -// // use the exactly normalized value -// "300000.000001" -//// "300000.00000000000001", // Note this is more than the 6 decimal places allowed by the type -// ).foreach { numStr => -// s"with value $numStr" in httpTestFixture { fixture => -// fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => -// testCreateAndFetchDecimalKey(fixture, numStr, alice, headers) -// } -// } -// } -// } -// } - - "Should ignore conflicts on contract key hash constraint violation" in httpTestFixture { - fixture => - import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar - import fixture.{client, encoder} - import shapeless.record.Record as ShRecord - val partyManagement = client.partyManagementClient - - val partyIds = Vector("Alice", "Bob").map(getUniqueParty) - val packageId: Ref.PackageId = MetadataReader - .templateByName(metadataUser)(Ref.QualifiedName.assertFromString("User:User")) - .collectFirst { case (pkgid, _) => pkgid } - .getOrElse(fail(s"Cannot retrieve packageId")) - - def userCreateCommand( - username: Party, - following: Seq[Party] = Seq.empty, - ): CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg] = { - val followingList = lfToApi( - VAx.seq(VAx.partySynchronizer).inj(following) - ).sum - val arg = recordFromFields( - ShRecord( - username = v.Value.Sum.Party(username.unwrap), - following = followingList, - ) - ) - - CreateCommand(TpId.User.User, arg, None) - } - - def userExerciseFollowCommand( - contractId: lar.ContractId, - toFollow: Party, - ): ExerciseCommand[Nothing, v.Value, EnrichedContractId] = { - val reference = EnrichedContractId(Some(TpId.User.User), contractId) - val arg = recordFromFields(ShRecord(userToFollow = v.Value.Sum.Party(toFollow.unwrap))) - val choice = lar.Choice("Follow") - - ExerciseCommand(reference, choice, boxedRecord(arg), None, None) - } - - def followUser(contractId: lar.ContractId, actAs: Party, toFollow: Party) = { - val exercise = userExerciseFollowCommand(contractId, toFollow) - val exerciseJson: JsValue = encodeExercise(encoder)(exercise) - - fixture - .headersWithPartyAuth(actAs = List(actAs)) - .flatMap(headers => - fixture.postJsonRequest(Uri.Path("/v1/exercise"), exerciseJson, headers) - ) - .parseResponse[JsValue] - .map(inside(_) { case OkResponse(_, _, StatusCodes.OK) => - }) - - } - - def queryUsers(fromPerspectiveOfParty: Party) = { - val query = jsObject(s"""{"templateIds": ["$packageId:User:User"]}""") - - suppressPackageIdWarning { - fixture - .headersWithPartyAuth(actAs = List(fromPerspectiveOfParty)) - .flatMap(headers => fixture.postJsonRequest(Uri.Path("/v1/query"), query, headers)) - .parseResponse[JsValue] - .map(inside(_) { case OkResponse(_, _, StatusCodes.OK) => - }) - } - } - - for { - partyDetails <- partyIds.traverse { p => - partyManagement.allocateParty(Some(p.unwrap)) - } - parties = Party subst partyDetails.map(p => p.party: String) - users <- parties.traverse { party => - val command = userCreateCommand(party) - val fut = fixture - .headersWithPartyAuth(actAs = List(party)) - .flatMap(headers => - postCreateCommand( - command, - fixture, - headers, - ) - ) - .map(resultContractId): Future[ContractId] - fut.map(cid => (party, cid)) - } - (alice, aliceUserId) = users(0) - (bob, bobUserId) = users(1) - _ <- followUser(aliceUserId, alice, bob) - _ <- queryUsers(bob) - _ <- followUser(bobUserId, bob, alice) - _ <- queryUsers(alice) - } yield succeed - } - - "query GET" should { - "empty results" in httpTestFixture { fixture => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (_, headers) => - fixture.searchAllExpectOk(headers).map { vector => - vector should have size 0L - } - } - } - - "single-party with results" in httpTestFixture { fixture => - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val searchDataSet = genSearchDataSet(alice) - searchDataSet.traverse(c => postCreateCommand(c, fixture, headers)).flatMap { rs => - rs.map(_.status) shouldBe List.fill(searchDataSet.size)(StatusCodes.OK) - - fixture - .getRequest(Uri.Path("/v1/query"), headers) - .parseResponse[Vector[JsValue]] - .map(inside(_) { case OkResponse(vector, None, StatusCodes.OK) => - vector should have size searchDataSet.size.toLong - }): Future[Assertion] - } - } - } - - "single party with package id" in httpTestFixture { fixture => - val pkgId = packageIdOfDar(AbstractHttpServiceIntegrationTestFuns.dar1) - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val searchDataSet = genSearchDataSet(alice) - searchExpectOk( - searchDataSet, - jsObject(s"""{"templateIds": ["$pkgId:Iou:Iou"]}"""), - fixture, - headers, - ).map { (acl: List[ActiveContract.ResolvedCtTyId[JsValue]]) => - acl.size shouldBe searchDataSet.size - } - } - } - - "multi-party" in httpTestFixture { fixture => - for { - res1 <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = res1 - res2 <- fixture.getUniquePartyAndAuthHeaders("Bob") - (bob, bobHeaders) = res2 - _ <- postCreateCommand( - accountCreateCommand(owner = alice, number = "42"), - fixture, - headers = aliceHeaders, - ).map(r => r.status shouldBe StatusCodes.OK) - _ <- postCreateCommand( - accountCreateCommand(owner = bob, number = "23"), - fixture, - headers = bobHeaders, - ).map(r => r.status shouldBe StatusCodes.OK) - _ <- fixture.searchAllExpectOk(aliceHeaders).map(cs => cs should have size 1) - _ <- fixture.searchAllExpectOk(bobHeaders).map(cs => cs should have size 1) - _ <- fixture - .headersWithPartyAuth(List(alice, bob)) - .flatMap(headers => fixture.searchAllExpectOk(headers)) - .map(cs => cs should have size 2) - } yield succeed - } - } - - "create" should { - "succeeds with single party, proper argument" in httpTestFixture { fixture => - import fixture.encoder - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val command = iouCreateCommand(alice) - - postCreateCommand(command, fixture, headers) - .map(inside(_) { case OkResponse(activeContract, _, StatusCodes.OK) => - assertActiveContract(activeContract)(command, encoder) - }): Future[Assertion] - } - } - - "propagate trace context" in httpTestFixture { fixture => - import fixture.encoder - def generateHex(length: Int): String = - RandomStringUtils.random(length, "0123456789abcdef").toLowerCase - val randomTraceId = generateHex(32) - - val testContextHeaders = - extractHeaders(W3CTraceContext(s"00-$randomTraceId-93bb0fa23a8fb53a-01")) - - fixture.getUniquePartyTokenUserIdAndAuthHeaders("Alice").flatMap { - case (alice, jxt, userId, headers) => - val command = iouCreateCommand(alice) - postCreateCommand(command, fixture, headers ++ testContextHeaders) - .map(inside(_) { case OkResponse(activeContract, _, StatusCodes.OK) => - assertActiveContract(activeContract)(command, encoder) - }) - .flatMap { _ => - fixture.client.updateService - .getUpdatesSource( - begin = 0L, - eventFormat = EventFormat( - filtersByParty = Map( - alice.unwrap -> Filters( - Seq( - CumulativeFilter.defaultInstance - .withWildcardFilter(WildcardFilter(includeCreatedEventBlob = false)) - ) - ) - ), - filtersForAnyParty = None, - verbose = false, - ), - end = None, - token = Some(jxt.value), - ) - .collect { response => - response.update match { - case Update.Transaction(tx) => - SerializableTraceContextConverter - .fromDamlProtoSafeOpt(loggerWithoutTracing(logger))(tx.traceContext) - .traceContext - .traceId - - } - } - .take(1) - .runWith(Sink.seq) - .map(tcs => tcs should be(Seq(Some(randomTraceId)))) - } - } - } - - "fails if authorization header is missing" taggedAs authorizationSecurity.setAttack( - Attack( - "Ledger client", - "calls /create without authorization", - "refuse request with UNAUTHORIZED", - ) - ) in httpTestFixture { fixture => - import fixture.encoder - val alice = getUniqueParty("Alice") - val command = iouCreateCommand(alice) - val input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - - fixture - .postJsonRequest(Uri.Path("/v1/create"), input, List()) - .parseResponse[JsValue] - .map(inside(_) { case ErrorResponse(Seq(error), _, StatusCodes.Unauthorized, _) => - error should include( - "missing Authorization header with OAuth 2.0 Bearer Token" - ) - }): Future[Assertion] - } - - "supports extra readAs parties" in httpTestFixture { fixture => - import fixture.encoder - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - command = iouCreateCommand(alice) - input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - headers <- fixture - .headersWithPartyAuth(actAs = List(alice), readAs = List(bob)) - activeContractResponse <- fixture - .postJsonRequest( - Uri.Path("/v1/create"), - input, - headers, - ) - .parseResponse[ActiveContract.ResolvedCtTyId[JsValue]] - } yield inside(activeContractResponse) { case OkResponse(activeContract, _, StatusCodes.OK) => - assertActiveContract(activeContract)(command, encoder) - } - } - - "with unsupported templateId should return proper error" in httpTestFixture { fixture => - import fixture.encoder - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val command: CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg] = - iouCreateCommand(alice) - .copy(templateId = TpId.Iou.Dummy) - val input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - - fixture - .postJsonRequest(Uri.Path("/v1/create"), input, headers) - .parseResponse[JsValue] - .map(inside(_) { case ErrorResponse(Seq(error), _, StatusCodes.BadRequest, _) => - val unknownTemplateId: ContractTypeId.Template.RequiredPkg = command.templateId - error should include( - s"Cannot resolve template ID, given: $unknownTemplateId" - ) - }): Future[Assertion] - } - } - - "supports command deduplication" in httpTestFixture { fixture => - import fixture.encoder - def genSubmissionId() = SubmissionId(UUID.randomUUID().toString) - - fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => - val cmdId = CommandId apply UUID.randomUUID().toString - - def cmd(submissionId: SubmissionId) = - iouCreateCommand( - alice, - amount = "19002.0", - meta = Some( - CommandMeta( - commandId = Some(cmdId), - actAs = None, - readAs = None, - submissionId = Some(submissionId), - workflowId = None, - deduplicationPeriod = Some(DeduplicationPeriod.Duration(10000L)), - disclosedContracts = None, - synchronizerId = None, - packageIdSelectionPreference = None, - ) - ), - ) - - val firstCreate: JsValue = - encoder.encodeCreateCommand(cmd(genSubmissionId())).valueOr(e => fail(e.shows)) - - fixture - .postJsonRequest(Uri.Path("/v1/create"), firstCreate, headers) - .parseResponse[CreateCommandResponse[JsValue]] - .map(inside(_) { case OkResponse(result, _, _) => - result.completionOffset.unwrap should not be empty - }) - .flatMap { _ => - val secondCreate: JsValue = - encoder.encodeCreateCommand(cmd(genSubmissionId())).valueOr(e => fail(e.shows)) - fixture - .postJsonRequest(Uri.Path("/v1/create"), secondCreate, headers) - .map(inside(_) { case (StatusCodes.Conflict, _) => succeed }): Future[Assertion] - } - } - } - - "supports workflow id" in httpTestFixture { fixture => - import fixture.encoder - - fixture.getUniquePartyTokenUserIdAndAuthHeaders("Alice").flatMap { - case (alice, jwt, _, headers) => - val workflowId = WorkflowId("foobar") - - val cmd = - iouCreateCommand( - alice, - amount = "19002.0", - meta = Some( - CommandMeta( - commandId = None, - actAs = None, - readAs = None, - submissionId = None, - workflowId = Some(workflowId), - deduplicationPeriod = None, - disclosedContracts = None, - synchronizerId = None, - packageIdSelectionPreference = None, - ) - ), - ) - - val create: JsValue = - encoder.encodeCreateCommand(cmd).valueOr(e => fail(e.shows)) - - for { - before <- fixture.client.stateService.getLedgerEndOffset() - result <- fixture - .postJsonRequest(Uri.Path("/v1/create"), create, headers) - .parseResponse[CreateCommandResponse[JsValue]] - .map(inside(_) { case OkResponse(result, _, _) => result }) - update <- fixture.client.updateService - .getUpdatesSource( - begin = before, - eventFormat = EventFormat( - filtersByParty = Map( - alice.unwrap -> Filters( - Seq( - CumulativeFilter.defaultInstance - .withWildcardFilter(WildcardFilter(includeCreatedEventBlob = false)) - ) - ) - ), - filtersForAnyParty = None, - verbose = false, - ), - end = Some(Offset.assertFromStringToLong(result.completionOffset.unwrap)), - token = Some(jwt.value), - ) - .runWith(Sink.last) - } yield update.getTransaction.workflowId shouldBe workflowId - } - } - } - - "create-and-exercise IOU_Transfer" in httpTestFixture { fixture => - import fixture.encoder - for { - (alice, headers) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - cmd = iouCreateAndExerciseTransferCommand(alice, bob) - json: JsValue = encoder.encodeCreateAndExerciseCommand(cmd).valueOr(e => fail(e.shows)) - - res <- fixture - .postJsonRequest(Uri.Path("/v1/create-and-exercise"), json, headers) - .parseResponse[ExerciseResponse[JsValue]] - _ = inside(res) { case OkResponse(result, None, StatusCodes.OK) => - result.completionOffset.unwrap should not be empty - inside(result.events) { - case List( - Contract(\/-(created0)), - Contract(-\/(archived0)), - Contract(\/-(created1)), - ) => - ContractTypeId.withPkgRef(created0.templateId) shouldBe cmd.templateId - ContractTypeId.withPkgRef(archived0.templateId) shouldBe cmd.templateId - archived0.contractId shouldBe created0.contractId - ContractTypeId.withPkgRef(created1.templateId) shouldBe TpId.Iou.IouTransfer - asContractId(result.exerciseResult) shouldBe created1.contractId - } - } - } yield succeed - } - "request non-existent endpoint should return 404 with errors" in httpTestFixture { fixture => val badPath = Uri.Path("/contracts/does-not-exist") - val badUri = fixture.uri withPath badPath fixture - .getRequestWithMinimumAuth[JsValue](badPath) - .map(inside(_) { case ErrorResponse(Seq(errorMsg), _, StatusCodes.NotFound, _) => - errorMsg shouldBe s"${HttpMethods.GET: HttpMethod}, uri: ${badUri: Uri}" - }): Future[Assertion] - } - - "parties endpoint should" should { - "return all known parties" in httpTestFixture { fixture => - import fixture.client - val partyIds = Vector("P1", "P2", "P3", "P4") - val partyManagement = client.partyManagementClient - - partyIds - .traverse { p => - partyManagement.allocateParty(Some(p)) - } - .flatMap { allocatedParties => - fixture - .getRequest( - Uri.Path("/v1/parties"), - headers = headersWithAdminAuth, - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { case OkResponse(result, None, StatusCodes.OK) => - val actualIds: Set[Party] = result.view.map(_.identifier).toSet - val allocatedIds: Set[Party] = - Party.subst(allocatedParties.map(p => p.party: String)).toSet - actualIds should contain allElementsOf allocatedIds - result.toSet should contain allElementsOf - allocatedParties.toSet.map(HttpPartyDetails.fromLedgerApi) - result.size should be > maxPartiesPageSize.value - }) - }: Future[Assertion] - } - - "return only requested parties, unknown parties returned as warnings" in httpTestFixture { - fixture => - import fixture.client - val List(aliceName, bobName, charlieName, erinName) = - List("Alice", "Bob", "Charlie", "Erin").map(getUniqueParty) - // We do not allocate erin - val namesToAllocate = List(aliceName, bobName, charlieName) - val partyManagement = client.partyManagementClient - - namesToAllocate - .traverse { p => - partyManagement.allocateParty(Some(p.unwrap)) - } - .flatMap { allocatedParties => - val allocatedPartiesHttpApi: List[HttpPartyDetails] = - allocatedParties.map(HttpPartyDetails.fromLedgerApi) - // Get alice, bob and charlies real party names - val List(alice, bob, charlie) = allocatedPartiesHttpApi.map(_.identifier) - fixture - .postJsonRequest( - Uri.Path("/v1/parties"), - // Request alice and bob as normal, erin by name (as unallocated, she has no hash) - JsArray(Vector(alice, bob, erinName).map(x => JsString(x.unwrap))), - headersWithAdminAuth, - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { case OkResponse(result, Some(warnings), StatusCodes.OK) => - warnings shouldBe UnknownParties(List(erinName)) - val actualIds: Set[Party] = result.view.map(_.identifier).toSet - actualIds shouldBe Set(alice, bob) // Erin is not known - val expected: Set[HttpPartyDetails] = allocatedPartiesHttpApi.toSet - .filterNot(_.identifier == charlie) - result.toSet shouldBe expected - }) - }: Future[Assertion] - } - - "error if empty array passed as input" in httpTestFixture { fixture => - fixture - .postJsonRequestWithMinimumAuth[JsValue]( - Uri.Path("/v1/parties"), - JsArray(Vector.empty), - ) - .map(inside(_) { case ErrorResponse(Seq(errorMsg), None, StatusCodes.BadRequest, _) => - errorMsg should include("Cannot read JSON: <[]>") - errorMsg should include("must be a JSON array with at least 1 element") - }): Future[Assertion] - } - - "error if empty party string passed" in httpTestFixture { fixture => - val requestedPartyIds: Vector[Party] = Party.subst(Vector("")) - - fixture - .postJsonRequestWithMinimumAuth[List[HttpPartyDetails]]( - Uri.Path("/v1/parties"), - JsArray(requestedPartyIds.map(x => JsString(x.unwrap))), - ) - .map(inside(_) { case ErrorResponse(List(error), None, StatusCodes.BadRequest, _) => - error should include("Daml-LF Party is empty") - }): Future[Assertion] - } - - "return empty result with warnings and OK status if nothing found" in httpTestFixture { - fixture => - val requestedPartyIds: Vector[Party] = - Vector(getUniqueParty("Alice"), getUniqueParty("Bob")) - - fixture - .postJsonRequest( - Uri.Path("/v1/parties"), - JsArray(requestedPartyIds.map(x => JsString(x.unwrap))), - headers = headersWithAdminAuth, - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { - case OkResponse( - List(), - Some(UnknownParties(unknownParties)), - StatusCodes.OK, - ) => - unknownParties.toSet shouldBe requestedPartyIds.toSet - }): Future[Assertion] - } - } - - "parties/allocate should" should { - "allocate a new party" in httpTestFixture { fixture => - val request = HttpAllocatePartyRequest( - Some(Party(s"Carol${uniqueId()}")), - None, - ) - val json = SprayJson.encode(request).valueOr(e => fail(e.shows)) - fixture - .postJsonRequest( - Uri.Path("/v1/parties/allocate"), - json = json, - headers = headersWithAdminAuth, - ) - .parseResponse[HttpPartyDetails] - .flatMap(inside(_) { case OkResponse(newParty, _, StatusCodes.OK) => - newParty.identifier.toString should startWith(request.identifierHint.value.toString) - newParty.isLocal shouldBe true - fixture - .getRequest( - Uri.Path("/v1/parties"), - headersWithAdminAuth, - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { case OkResponse(result, _, StatusCodes.OK) => - result should contain(newParty) - }) - }): Future[Assertion] - } - - "allocate a new party without any hints" in httpTestFixture { fixture => - fixture - .postJsonRequest( - Uri.Path("/v1/parties/allocate"), - json = JsObject(), - headers = headersWithAdminAuth, - ) - .parseResponse[HttpPartyDetails] - .flatMap(inside(_) { case OkResponse(newParty, _, StatusCodes.OK) => - newParty.identifier.unwrap.length should be > 0 - newParty.isLocal shouldBe true - - fixture - .getRequest( - Uri.Path("/v1/parties"), - headers = headersWithAdminAuth, - ) - .parseResponse[List[HttpPartyDetails]] - .map(inside(_) { case OkResponse(result, _, StatusCodes.OK) => - result should contain(newParty) - }) - }): Future[Assertion] - } - - "return BadRequest error if party ID hint is invalid PartyIdString" taggedAs authorizationSecurity - .setAttack( - Attack( - "Ledger client", - "tries to allocate a party with invalid Party ID", - "refuse request with BAD_REQUEST", - ) - ) in httpTestFixture { fixture => - val request = HttpAllocatePartyRequest( - Some(Party(s"Carol-!")), - None, - ) - val json = SprayJson.encode(request).valueOr(e => fail(e.shows)) - - fixture - .postJsonRequest( - Uri.Path("/v1/parties/allocate"), - json = json, - headers = headersWithAdminAuth, - ) - .parseResponse[JsValue] - .map(inside(_) { case ErrorResponse(errors, None, StatusCodes.BadRequest, _) => - errors.length shouldBe 1 - }) - } - } - - "packages endpoint should" should { - "return all known package IDs" in httpTestFixture { fixture => - getAllPackageIds(fixture).map { x => - inside(x) { - case OkResponse(ps, None, StatusCodes.OK) if ps.nonEmpty => - Inspectors.forAll(ps)(_.length should be > 0) - } - }: Future[Assertion] - } - } - - "packages/packageId should" should { - "return a requested package" in httpTestFixture { fixture => - import AbstractHttpServiceIntegrationTestFuns.sha256 - import fixture.uri - getAllPackageIds(fixture).flatMap { okResp => - inside(okResp.result.headOption) { case Some(packageId) => - singleRequest( - HttpRequest( - method = HttpMethods.GET, - uri = uri.withPath(Uri.Path(s"/v1/packages/$packageId")), - headers = headersWithAdminAuth, - ) - ) - .map { resp => - resp.status shouldBe StatusCodes.OK - resp.entity.getContentType() shouldBe ContentTypes.`application/octet-stream` - sha256(resp.entity.dataBytes) shouldBe Success(packageId) - } - } - }: Future[Assertion] - } - - "return NotFound if a non-existing package is requested" in httpTestFixture { fixture => - singleRequest( - HttpRequest( - method = HttpMethods.GET, - uri = fixture.uri.withPath(Uri.Path(s"/v1/packages/12345678")), - headers = headersWithAdminAuth, - ) - ) - .map { resp => - resp.status shouldBe StatusCodes.NotFound - } - } - } - - "packages upload endpoint" in httpTestFixture { fixture => - val newDar = AbstractHttpServiceIntegrationTestFuns.dar3 - - getAllPackageIds(fixture).flatMap { okResp => - val existingPackageIds: Set[String] = okResp.result.toSet - uploadPackage(fixture)(newDar) - .flatMap { _ => - getAllPackageIds(fixture).map { okResp => - val newPackageIds: Set[String] = okResp.result.toSet -- existingPackageIds - newPackageIds.size should be > 0 - } - } - }: Future[Assertion] + .getRequestWithMinimumAuth_(badPath) + .map { + case (StatusCodes.OK, _) => + fail(s"Unexpected success accessing an invalid HTTP endpoint: $badPath") + case (status, _) => status shouldBe StatusCodes.NotFound + } } - "package list is updated when a query request is made" in usingLedger() { (jsonApiPort, client) => - withHttpServiceOnly(jsonApiPort, client) { fixture => - for { - alicePartyAndAuthHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = alicePartyAndAuthHeaders - _ = withHttpServiceOnly(jsonApiPort, client) { innerFixture => - val searchDataSet = genSearchDataSet(alice) - searchDataSet.traverse(c => postCreateCommand(c, innerFixture, headers)).map { rs => - rs.map(_.status) shouldBe List.fill(searchDataSet.size)(StatusCodes.OK) - } - } - _ = withHttpServiceOnly(jsonApiPort, client) { innerFixture => - innerFixture - .getRequest(Uri.Path("/v1/query"), headers) - .parseResponse[Vector[JsValue]] - .map(inside(_) { case OkResponse(result, _, StatusCodes.OK) => - result should have length 4 - }): Future[Assertion] - } - } yield succeed - } - } } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTestFuns.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTestFuns.scala index cfa3e0d5cc..934593daeb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTestFuns.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AbstractHttpServiceIntegrationTestFuns.scala @@ -3,48 +3,14 @@ package com.digitalasset.canton.integration.tests.jsonapi -import com.daml.crypto.MessageDigestPrototype -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2.value as v -import com.digitalasset.canton.fetchcontracts.ContractId -import com.digitalasset.canton.http -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.json.SprayJson.decode1 -import com.digitalasset.canton.http.util.ClientUtil.boxedRecord -import com.digitalasset.canton.http.util.FutureUtil -import com.digitalasset.canton.http.util.FutureUtil.toFuture -import com.digitalasset.canton.http.util.Logging.instanceUUIDLogCtx -import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.* -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.ledger.api.util.LfEngineToApi import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient -import com.digitalasset.canton.ledger.service.MetadataReader import com.digitalasset.canton.testing.utils.TestModels.{ com_daml_ledger_test_ModelTestDar_path, com_daml_ledger_test_SemanticTestDar_path, } import com.digitalasset.canton.util.JarResourceUtils import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value as lfv -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Source, StreamConverters} -import org.apache.pekko.util.ByteString -import org.scalatest.* -import scalaz.std.list.* -import scalaz.std.scalaFuture.* -import scalaz.syntax.show.* -import scalaz.syntax.std.option.* -import scalaz.syntax.tag.* -import scalaz.syntax.traverse.* -import scalaz.{\/, \/-} -import shapeless.record.Record as ShRecord -import spray.json.* - -import java.security.DigestInputStream -import scala.concurrent.Future -import scala.util.Try object AbstractHttpServiceIntegrationTestFuns { val dar1 = JarResourceUtils.resourceFile(com_daml_ledger_test_ModelTestDar_path) @@ -78,686 +44,23 @@ object AbstractHttpServiceIntegrationTestFuns { lazy val pkgNameModelTests = Ref.PackageName.assertFromString("model-tests") lazy val pkgNameAccount = Ref.PackageName.assertFromString("Account") - def packageIdToName(pkgId: Ref.PackageId): Ref.PackageName = pkgId match { - case id if id == pkgIdCiou => pkgNameCiou - case id if id == pkgIdModelTests => pkgNameModelTests - case id if id == pkgIdAccount => pkgNameAccount - case _ => throw new IllegalArgumentException(s"Unexpected package id: $pkgId") - } - - @SuppressWarnings(Array("org.wartremover.warts.While")) - def sha256(source: Source[ByteString, Any])(implicit mat: Materializer): Try[String] = Try { - import com.google.common.io.BaseEncoding - - val md = MessageDigestPrototype.Sha256.newDigest - val is = source.runWith(StreamConverters.asInputStream()) - val dis = new DigestInputStream(is, md) - - // drain the input stream and calculate the hash - while (-1 != dis.read()) () - - dis.on(false) - - BaseEncoding.base16().lowerCase().encode(md.digest()) - } - - // ValueAddend eXtensions - object VAx { - def seq(elem: VA): VA.Aux[Seq[elem.Inj]] = - VA.list(elem).xmap((xs: Seq[elem.Inj]) => xs)(_.toVector) - - // nest assertFromString into arbitrary VA structures - val partyStr: VA.Aux[String] = VA.party.xmap(identity[String])(Ref.Party.assertFromString) - - val partySynchronizer: VA.Aux[http.Party] = http.Party.subst[VA.Aux, String](partyStr) - - val contractIdSynchronizer: VA.Aux[http.ContractId] = { - import lfv.test.ValueGenerators.coidGen - import org.scalacheck.Arbitrary - implicit val arbCid: Arbitrary[lfv.Value.ContractId] = Arbitrary(coidGen) - http.ContractId subst VA.contractId.xmap(_.coid: String)( - lfv.Value.ContractId.fromString(_).fold(sys.error, identity) - ) - } - } - trait UriFixture { def uri: Uri } - trait EncoderFixture { - def encoder: ApiJsonEncoder - } - sealed trait DecoderFixture { - def decoder: ApiJsonDecoder - } - final case class HttpServiceOnlyTestFixtureData( - uri: Uri, - encoder: ApiJsonEncoder, - decoder: ApiJsonDecoder, + uri: Uri ) extends UriFixture - with EncoderFixture - with DecoderFixture final case class HttpServiceTestFixtureData( uri: Uri, - encoder: ApiJsonEncoder, - decoder: ApiJsonDecoder, client: DamlLedgerClient, ) extends UriFixture - with EncoderFixture - with DecoderFixture } @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) trait AbstractHttpServiceIntegrationTestFuns extends HttpJsonApiTestBase with HttpTestFuns { import AbstractHttpServiceIntegrationTestFuns.* - import JsonProtocol.* - - lazy protected val metadata2: MetadataReader.LfMetadata = - MetadataReader.readFromDar(dar2).valueOr(e => fail(s"Cannot read dar2 metadata: $e")) - - lazy protected val metadataUser: MetadataReader.LfMetadata = - MetadataReader.readFromDar(userDar).valueOr(e => fail(s"Cannot read userDar metadata: $e")) override def packageFiles = List(dar1, dar2, userDar) - protected def postCreateCommand( - cmd: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - ): Future[http.SyncResponse[http.ActiveContract.ResolvedCtTyId[JsValue]]] = - postCreateCommand(cmd, fixture.encoder, fixture.uri, headers) - .parseResponse[http.ActiveContract.ResolvedCtTyId[JsValue]] - - protected def postCreateCommand( - cmd: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - fixture: UriFixture with EncoderFixture, - ): Future[http.SyncResponse[http.ActiveContract.ResolvedCtTyId[JsValue]]] = - fixture.headersWithAuth.flatMap(postCreateCommand(cmd, fixture, _)) - - protected def resultContractId( - r: http.SyncResponse[http.ActiveContract[_, _]] - ) = - inside(r) { case http.OkResponse(result, _, _: StatusCodes.Success) => - result.contractId - } - - protected def postArchiveCommand( - templateId: http.ContractTypeId.RequiredPkg, - contractId: http.ContractId, - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - ): Future[(StatusCode, JsValue)] = - postArchiveCommand( - templateId, - contractId, - fixture.encoder, - fixture.uri, - headers, - ) - - protected def postArchiveCommand( - templateId: http.ContractTypeId.RequiredPkg, - contractId: http.ContractId, - fixture: UriFixture with EncoderFixture, - ): Future[(StatusCode, JsValue)] = - fixture.headersWithAuth.flatMap( - postArchiveCommand(templateId, contractId, fixture, _) - ) - - protected def lookupContractAndAssert( - contractLocator: http.ContractLocator[JsValue], - contractId: ContractId, - create: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - ): Future[Assertion] = - postContractsLookup(contractLocator, fixture.uri, headers).map(inside(_) { - case http.OkResponse(Some(resultContract), _, StatusCodes.OK) => - contractId shouldBe resultContract.contractId - assertActiveContract(resultContract)(create, fixture.encoder) - }) - - protected def removeRecordId(a: v.Value): v.Value = a match { - case v.Value(v.Value.Sum.Record(r)) if r.recordId.isDefined => - v.Value(v.Value.Sum.Record(removeRecordId(r))) - case _ => - a - } - - protected def removeRecordId(a: v.Record): v.Record = a.copy(recordId = None) - - import com.digitalasset.daml.lf.data.Numeric as LfNumeric - import shapeless.HList - - private[this] object RecordFromFields extends shapeless.Poly1 { - import shapeless.Witness - import shapeless.labelled.FieldType as :->>: - - implicit def elem[V, K <: Symbol](implicit - fn: Witness.Aux[K] - ): Case.Aux[K :->>: V, (String, V)] = - at[K :->>: V]((fn.value.name, _)) - } - - protected[this] def recordFromFields[L <: HList, I <: HList](hlist: L)(implicit - mapper: shapeless.ops.hlist.Mapper.Aux[RecordFromFields.type, L, I], - lister: shapeless.ops.hlist.ToTraversable.Aux[I, Seq, (String, v.Value.Sum)], - ): v.Record = v.Record(fields = hlist.map(RecordFromFields).to[Seq].map { case (n, vs) => - v.RecordField(n, Some(v.Value(vs))) - }) - - protected[this] def argToApi(va: VA)(arg: va.Inj): v.Record = - lfToApi(va.inj(arg)) match { - case v.Value(v.Value.Sum.Record(r)) => removeRecordId(r) - case _ => fail(s"${va.t} isn't a record type") - } - - private[this] val (_, iouVA) = { - import com.digitalasset.daml.lf.data.Numeric.Scale - val iouT = ShRecord( - issuer = VAx.partySynchronizer, - owner = VAx.partySynchronizer, - currency = VA.text, - amount = VA.numeric(Scale assertFromInt 10), - observers = VA.list(VAx.partySynchronizer), - ) - VA.record(Ref.Identifier assertFromString "none:Iou:Iou", iouT) - } - - protected[this] object TpId { - import com.digitalasset.canton.http.ContractTypeId as CtId - import CtId.Interface.RequiredPkg as IId - import CtId.Template.RequiredPkg as TId - import Ref.PackageRef.Id as PkgId - - object Iou { - val Dummy: TId = CtId.Template(PkgId(pkgIdModelTests), "Iou", "Dummy") - val IIou: IId = CtId.Interface(PkgId(pkgIdModelTests), "Iou", "IIou") - val Iou: TId = CtId.Template(PkgId(pkgIdModelTests), "Iou", "Iou") - val IouTransfer: TId = CtId.Template(PkgId(pkgIdModelTests), "Iou", "IouTransfer") - } - object Test { - val Dummy: TId = CtId.Template(PkgId(pkgIdModelTests), "Test", "Dummy") - val MultiPartyContract: TId = - CtId.Template(PkgId(pkgIdModelTests), "Test", "MultiPartyContract") - } - object Account { - val Account: TId = CtId.Template(PkgId(pkgIdAccount), "Account", "Account") - val Helper: TId = CtId.Template(PkgId(pkgIdAccount), "Account", "Helper") - val IAccount: IId = CtId.Interface(PkgId(pkgIdAccount), "Account", "IAccount") - val KeyedByDecimal: IId = CtId.Interface(PkgId(pkgIdAccount), "Account", "KeyedByDecimal") - val KeyedByVariantAndRecord: TId = - CtId.Template(PkgId(pkgIdAccount), "Account", "KeyedByVariantAndRecord") - val PubSub: TId = CtId.Template(PkgId(pkgIdAccount), "Account", "PubSub") - val SharedAccount: TId = CtId.Template(PkgId(pkgIdAccount), "Account", "SharedAccount") - } - object Disclosure { - val AnotherToDisclose: TId = - CtId.Template(PkgId(pkgIdAccount), "Disclosure", "AnotherToDisclose") - val ToDisclose: TId = CtId.Template(PkgId(pkgIdAccount), "Disclosure", "ToDisclose") - val HasGarbage: IId = CtId.Interface( - Ref.PackageRef.Name(Ref.PackageName.assertFromString("Account")), - "Disclosure", - "HasGarbage", - ) - val Viewport: TId = CtId.Template(PkgId(pkgIdAccount), "Disclosure", "Viewport") - val CheckVisibility: TId = CtId.Template(PkgId(pkgIdAccount), "Disclosure", "CheckVisibility") - } - object User { - val User: TId = CtId.Template(PkgId(pkgIdUser), "User", "User") - } - object CIou { - val CIou: TId = CtId.Template(PkgId(pkgIdCiou), "CIou", "CIou") - } - object IIou { - val IIou: IId = CtId.Interface(PkgId(pkgIdCiou), "IIou", "IIou") - val TestIIou: TId = CtId.Template(PkgId(pkgIdCiou), "IIou", "TestIIou") - } - object Transferrable { - val Transferrable: IId = CtId.Interface(PkgId(pkgIdCiou), "Transferrable", "Transferrable") - } - - def unsafeCoerce[Like[T] <: CtId[T], T](ctId: CtId[T])(implicit - Like: CtId.Like[Like] - ): Like[T] = - Like(ctId.packageId, ctId.moduleName, ctId.entityName) - } - - protected def iouCreateCommand( - party: http.Party, - amount: String = "999.9900000000", - currency: String = "USD", - observers: Vector[http.Party] = Vector.empty, - meta: Option[http.CommandMeta.NoDisclosed] = None, - ): http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg] = { - val arg = argToApi(iouVA)( - ShRecord( - issuer = party, - owner = party, - currency = currency, - amount = LfNumeric assertFromString amount, - observers = observers, - ) - ) - - http.CreateCommand(TpId.Iou.Iou, arg, meta) - } - - private[this] val (_, ciouVA) = { - val iouT = - ShRecord(issuer = VAx.partySynchronizer, owner = VAx.partySynchronizer, amount = VA.text) - VA.record(Ref.Identifier assertFromString "none:Iou:Iou", iouT) - } - - protected def iouCommand( - issuer: http.Party, - templateId: http.ContractTypeId.Template.RequiredPkg, - ) = { - val iouT = argToApi(ciouVA)( - ShRecord( - issuer = issuer, - owner = issuer, - amount = "42", - ) - ) - http.CreateCommand(templateId, iouT, None) - } - - protected def pubSubCreateCommand( - publisher: http.Party, - subscribers: Seq[http.Party], - ) = { - val payload = recordFromFields( - ShRecord( - publisher = v.Value.Sum.Party(Ref.Party assertFromString publisher.unwrap), - subscribers = lfToApi(VAx.seq(VAx.partySynchronizer).inj(subscribers)).sum, - ) - ) - http.CreateCommand( - templateId = TpId.Account.PubSub, - payload = payload, - meta = None, - ) - } - - protected def iouExerciseTransferCommand( - contractId: lar.ContractId, - partyName: http.Party, - ): http.ExerciseCommand[Nothing, v.Value, http.EnrichedContractId] = { - val reference = http.EnrichedContractId(Some(TpId.Iou.Iou), contractId) - val party = Ref.Party assertFromString partyName.unwrap - val arg = - recordFromFields(ShRecord(newOwner = v.Value.Sum.Party(party))) - val choice = lar.Choice("Iou_Transfer") - - http.ExerciseCommand(reference, choice, boxedRecord(arg), None, None) - } - - protected def iouCreateAndExerciseTransferCommand( - originator: http.Party, - target: http.Party, - amount: String = "999.9900000000", - currency: String = "USD", - meta: Option[http.CommandMeta.NoDisclosed] = None, - ): http.CreateAndExerciseCommand[ - v.Record, - v.Value, - http.ContractTypeId.Template.RequiredPkg, - http.ContractTypeId.RequiredPkg, - ] = { - val targetParty = Ref.Party assertFromString target.unwrap - val payload = argToApi(iouVA)( - ShRecord( - issuer = originator, - owner = originator, - currency = currency, - amount = LfNumeric assertFromString amount, - observers = Vector.empty, - ) - ) - - val arg = - recordFromFields(ShRecord(newOwner = v.Value.Sum.Party(targetParty))) - val choice = lar.Choice("Iou_Transfer") - - http.CreateAndExerciseCommand( - templateId = TpId.Iou.Iou, - payload = payload, - choice = choice, - argument = boxedRecord(arg), - choiceInterfaceId = None, - meta = meta, - ) - } - - protected def multiPartyCreateCommand(ps: List[http.Party], value: String) = { - val psv = lfToApi(VAx.seq(VAx.partySynchronizer).inj(ps)).sum - val payload = recordFromFields( - ShRecord( - parties = psv, - value = v.Value.Sum.Text(value), - ) - ) - http.CreateCommand( - templateId = TpId.Test.MultiPartyContract, - payload = payload, - meta = None, - ) - } - - protected def multiPartyAddSignatories(cid: lar.ContractId, ps: List[http.Party]) = { - val psv = lfToApi(VAx.seq(VAx.partySynchronizer).inj(ps)).sum - val argument = boxedRecord(recordFromFields(ShRecord(newParties = psv))) - http.ExerciseCommand( - reference = http.EnrichedContractId(Some(TpId.Test.MultiPartyContract), cid), - argument = argument, - choiceInterfaceId = None, - choice = lar.Choice("MPAddSignatories"), - meta = None, - ) - } - - protected def multiPartyFetchOther( - cid: lar.ContractId, - fetchedCid: lar.ContractId, - actors: List[http.Party], - ) = { - val argument = v.Value( - v.Value.Sum.Record( - recordFromFields( - ShRecord( - cid = v.Value.Sum.ContractId(fetchedCid.unwrap), - actors = lfToApi(VAx.seq(VAx.partySynchronizer).inj(actors)).sum, - ) - ) - ) - ) - http.ExerciseCommand( - reference = http.EnrichedContractId(Some(TpId.Test.MultiPartyContract), cid), - argument = argument, - choiceInterfaceId = None, - choice = lar.Choice("MPFetchOther"), - meta = None, - ) - } - - protected def postContractsLookup( - cmd: http.ContractLocator[JsValue], - uri: Uri, - headers: List[HttpHeader], - readAs: Option[List[http.Party]], - ): Future[http.SyncResponse[Option[http.ActiveContract.ResolvedCtTyId[JsValue]]]] = - for { - locjson <- toFuture(SprayJson.encode(cmd)): Future[JsValue] - json <- toFuture( - readAs.cata( - ral => - SprayJson - .encode(ral) - .map(ralj => - JsObject(locjson.asJsObject.fields.updated(JsonProtocol.ReadersKey, ralj)) - ), - \/-(locjson), - ) - ) - result <- postJsonRequest(uri.withPath(Uri.Path("/v1/fetch")), json, headers) - .parseResponse[Option[http.ActiveContract.ResolvedCtTyId[JsValue]]] - } yield result - - protected def postContractsLookup( - cmd: http.ContractLocator[JsValue], - uri: Uri, - headers: List[HttpHeader], - ): Future[http.SyncResponse[Option[http.ActiveContract.ResolvedCtTyId[JsValue]]]] = - postContractsLookup(cmd, uri, headers, None) - - protected def asContractId(a: JsValue): http.ContractId = inside(a) { case JsString(x) => - http.ContractId(x) - } - - protected def encodeExercise(encoder: ApiJsonEncoder)( - exercise: http.ExerciseCommand.RequiredPkg[v.Value, http.ContractLocator[v.Value]] - ): JsValue = - encoder.encodeExerciseCommand(exercise).getOrElse(fail(s"Cannot encode: $exercise")) - - protected def decodeExercise( - decoder: ApiJsonDecoder, - jwt: Jwt, - )( - jsVal: JsValue - ): Future[http.ExerciseCommand.RequiredPkg[v.Value, http.EnrichedContractId]] = - instanceUUIDLogCtx { implicit lc => - import scalaz.syntax.bifunctor.* - val cmd = - decoder.decodeExerciseCommand(jsVal, jwt).getOrElse(fail(s"Cannot decode $jsVal")) - cmd.map( - _.bimap( - lfToApi, - enrichedContractIdOnly, - ) - ) - } - - protected def enrichedContractIdOnly(x: http.ContractLocator[_]): http.EnrichedContractId = - x match { - case cid: http.EnrichedContractId => cid - case _: http.EnrichedContractKey[_] => - fail(s"Expected synchronizer.EnrichedContractId, got: $x") - } - - protected def lfToApi(lfVal: http.LfValue): v.Value = - LfEngineToApi.lfValueToApiValue(verbose = true, lfVal).fold(e => fail(e), identity) - - protected def assertActiveContract(uri: Uri)( - decoder: ApiJsonDecoder, - actual: http.ActiveContract.ResolvedCtTyId[JsValue], - create: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - exercise: http.ExerciseCommand[Any, v.Value, _], - fixture: UriFixture, - ): Future[Assertion] = { - import http.ActiveContractExtras.* - - val expectedContractFields: Seq[v.RecordField] = create.payload.fields - val expectedNewOwner: v.Value = exercise.argument.sum.record - .flatMap(_.fields.headOption) - .flatMap(_.value) - .getOrElse(fail("Cannot extract expected newOwner")) - fixture - .jwt(uri) - .flatMap(jwt => - instanceUUIDLogCtx(implicit lc => - decoder.decodeUnderlyingValues(actual, jwt).valueOr(e => fail(e.shows)) - ).map(active => - inside(active.payload.sum.record.map(_.fields)) { - case Some( - Seq( - v.RecordField("iou", Some(contractRecord)), - v.RecordField("newOwner", Some(newOwner)), - ) - ) => - val contractFields: Seq[v.RecordField] = - contractRecord.sum.record.map(_.fields).getOrElse(Seq.empty) - (contractFields: Seq[v.RecordField]) shouldBe (expectedContractFields: Seq[ - v.RecordField - ]) - (newOwner: v.Value) shouldBe (expectedNewOwner: v.Value) - } - ) - ) - } - - protected def assertActiveContract( - activeContract: http.ActiveContract.ResolvedCtTyId[JsValue] - )( - command: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - encoder: ApiJsonEncoder, - ): Assertion = { - - import encoder.implicits.* - - val expected: http.CreateCommand[JsValue, http.ContractTypeId.Template.RequiredPkg] = - command - .traversePayload(SprayJson.encode[v.Record](_)) - .getOrElse(fail(s"Failed to encode command: $command")) - - (activeContract.payload: JsValue) shouldBe (expected.payload: JsValue) - } - - protected def assertJsPayload( - activeContract: http.ActiveContract.ResolvedCtTyId[JsValue] - )( - jsPayload: JsValue - ): Assertion = - (activeContract.payload: JsValue) shouldBe (jsPayload) - - protected def getAllPackageIds(fixture: UriFixture): Future[http.OkResponse[List[String]]] = - fixture - .getRequestWithMinimumAuth[List[String]](Uri.Path("/v1/packages")) - .map(inside(_) { case x @ http.OkResponse(_, _, StatusCodes.OK) => - x - }) - - protected[this] def uploadPackage(fixture: UriFixture)(newDar: java.io.File): Future[Unit] = for { - resp <- singleRequest( - HttpRequest( - method = HttpMethods.POST, - uri = fixture.uri.withPath(Uri.Path("/v1/packages")), - headers = headersWithAdminAuth, - entity = HttpEntity.fromFile(ContentTypes.`application/octet-stream`, newDar), - ) - ) - } yield { - resp.status shouldBe StatusCodes.OK - () - } - - protected def initialIouCreate( - serviceUri: Uri, - party: http.Party, - headers: List[HttpHeader], - ): Future[http.SyncResponse[http.ActiveContract.ResolvedCtTyId[JsValue]]] = { - val partyJson = party.toJson.compactPrint - val payload = - s""" - |{ - | "templateId": "${TpId.Iou.Iou.fqn}", - | "payload": { - | "observers": [], - | "issuer": $partyJson, - | "amount": "999.99", - | "currency": "USD", - | "owner": $partyJson - | } - |} - |""".stripMargin - postJsonStringRequest( - serviceUri.withPath(Uri.Path("/v1/create")), - payload, - headers, - ) - .parseResponse[http.ActiveContract.ResolvedCtTyId[JsValue]] - } - - protected def initialAccountCreate( - fixture: UriFixture with EncoderFixture, - owner: http.Party, - headers: List[HttpHeader], - ): Future[http.SyncResponse[http.ActiveContract.ResolvedCtTyId[JsValue]]] = { - val command = accountCreateCommand(owner, "abc123") - postCreateCommand(command, fixture, headers) - } - - protected def jsObject(s: String): JsObject = { - val r: JsonError \/ JsObject = for { - jsVal <- SprayJson.parse(s).leftMap(e => JsonError(e.shows)) - jsObj <- SprayJson.mustBeJsObject(jsVal) - } yield jsObj - r.valueOr(e => fail(e.shows)) - } - - protected def searchExpectOk( - commands: List[http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg]], - query: JsObject, - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - suppressWarnings: Boolean = true, - ): Future[List[http.ActiveContract.ResolvedCtTyId[JsValue]]] = - if (suppressWarnings) - suppressPackageIdWarning { - search(commands, query, fixture, headers).map(expectOk(_)) - } - else search(commands, query, fixture, headers).map(expectOk(_)) - - protected def searchExpectOk( - commands: List[http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg]], - query: JsObject, - fixture: UriFixture with EncoderFixture, - ): Future[List[http.ActiveContract.ResolvedCtTyId[JsValue]]] = - fixture.headersWithAuth.flatMap(searchExpectOk(commands, query, fixture, _)) - - protected def search( - commands: List[http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg]], - query: JsObject, - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - ): Future[ - http.SyncResponse[List[http.ActiveContract.ResolvedCtTyId[JsValue]]] - ] = - commands.traverse(c => postCreateCommand(c, fixture, headers)).flatMap { rs => - rs.map(_.status) shouldBe List.fill(commands.size)(StatusCodes.OK) - fixture.postJsonRequest(Uri.Path("/v1/query"), query, headers).flatMap { case (_, output) => - FutureUtil - .toFuture( - decode1[http.SyncResponse, List[http.ActiveContract.ResolvedCtTyId[JsValue]]]( - output - ) - ) - } - } - - def expectOk[R](resp: http.SyncResponse[R]): R = resp match { - case ok: http.OkResponse[_] => - ok.status shouldBe StatusCodes.OK - ok.warnings shouldBe empty - ok.result - case err: http.ErrorResponse => - fail(s"Expected OK response, got: $err") - } - - protected def randomTextN(n: Int) = { - import org.scalacheck.Gen - Gen - .buildableOfN[String, Char](n, Gen.alphaNumChar) - .sample - .getOrElse(sys.error(s"can't generate ${n}b string")) - } - - def postCreateCommand( - cmd: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg], - encoder: ApiJsonEncoder, - uri: Uri, - headers: List[HttpHeader], - ): Future[(StatusCode, JsValue)] = - for { - json <- FutureUtil.toFuture(encoder.encodeCreateCommand(cmd)): Future[JsValue] - result <- postJsonRequest(uri.withPath(Uri.Path("/v1/create")), json, headers = headers) - } yield result - - def postArchiveCommand( - templateId: http.ContractTypeId.RequiredPkg, - contractId: http.ContractId, - encoder: ApiJsonEncoder, - uri: Uri, - headers: List[HttpHeader], - ): Future[(StatusCode, JsValue)] = { - val ref = http.EnrichedContractId(Some(templateId), contractId) - val cmd = archiveCommand(ref) - for { - json <- FutureUtil.toFuture(encoder.encodeExerciseCommand(cmd)): Future[JsValue] - result <- postJsonRequest(uri.withPath(Uri.Path("/v1/exercise")), json, headers) - } yield result - } - } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AuthorizationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AuthorizationTest.scala deleted file mode 100644 index 07dc05c063..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/AuthorizationTest.scala +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.daml.grpc.adapter.{ExecutionSequencerFactory, PekkoExecutionSequencerPool} -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* -import com.daml.test.evidence.tag.Security.SecurityTest.Property.Authorization -import com.daml.test.evidence.tag.Security.{Attack, SecurityTest} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http.json.v1.{LedgerReader, PackageService, V1Routes} -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, instanceUUIDLogCtx} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient -import com.digitalasset.canton.logging.SuppressionRule -import com.digitalasset.daml.lf.data.Ref -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.Materializer -import org.slf4j.event.Level - -import scala.concurrent.{ExecutionContext, Future} - -final class AuthorizationTest extends HttpJsonApiTestBase { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - private val testId: String = this.getClass.getSimpleName - private implicit val asys: ActorSystem = ActorSystem(testId) - private implicit val mat: Materializer = Materializer(asys) - private implicit val aesf: ExecutionSequencerFactory = - new PekkoExecutionSequencerPool(testId)(asys) - private implicit val ec: ExecutionContext = asys.dispatcher - - private val emptyJWTToken = getToken(Ref.UserId.assertFromString("empty"), authSecret) - - private val authorizationSecurity: SecurityTest = - SecurityTest(property = Authorization, asset = "HTTP JSON API Service") - - override def afterAll(): Unit = { - aesf.close() - mat.shutdown() - asys.terminate().futureValue - super.afterAll() - } - - override def authSecret: Option[String] = Some("secret") - - protected def withLedger[A]( - testFn: DamlLedgerClient => Future[A] - ): FixtureParam => A = - usingLedger[A](Some(toHeader(adminToken))) { case (_, client) => - testFn(client).futureValue - } - - private def packageService( - client: DamlLedgerClient - )(implicit lc: LoggingContextOf[InstanceUUID]): PackageService = { - val loadCache = LedgerReader.LoadCache.freshCache() - new PackageService( - reloadPackageStoreIfChanged = - V1Routes.doLoad(client.packageService, LedgerReader(loggerFactory), loadCache), - loggerFactory = loggerFactory, - ) - } - - val AuthInterceptorSuppressionRule: SuppressionRule = - SuppressionRule.LoggerNameContains("AuthInterceptor") && - SuppressionRule.Level(Level.WARN) - - "PackageService against an authenticated sandbox" should { - "fail updating the package service immediately with insufficient authorization" taggedAs authorizationSecurity - .setAttack( - Attack( - "Ledger client", - "does not provide an auth token", - "refuse updating the package service with a failure", - ) - ) in withLedger { client => - loggerFactory.suppress(AuthInterceptorSuppressionRule) { - instanceUUIDLogCtx(implicit lc => - packageService(client).reload(Jwt(emptyJWTToken.value)).failed.map(_ => succeed) - ) - } - } - - "succeed updating the package service with sufficient authorization" taggedAs authorizationSecurity - .setHappyCase( - "A ledger client can update the package service when authorized" - ) in withLedger { client => - instanceUUIDLogCtx(implicit lc => - packageService(client) - .reload( - Jwt(toHeader(adminToken, authSecret.getOrElse(jwtSecret.unwrap))) - ) - .map(_ => succeed) - ) - } - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/Consume.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/Consume.scala deleted file mode 100644 index 82c5d79ed7..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/Consume.scala +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import org.apache.pekko.stream.scaladsl.Sink -import org.scalactic.source -import scalaz.{Functor, ~>} - -import scala.concurrent.{ExecutionContext, Future} - -/** This example script reads two elements unconditionally, checking whether the first element is - * "good" and that the second element has `true` as its first member, then performs a future, then - * drains any remaining elements from the stream, yielding a result that uses elements of all three - * reads. - * - * {{{ - * val mySyntax = Consume.syntax[Foo] // Foo = stream element type - * import mySyntax._ - * - * val sink: Sink[Foo, Future[Bar]] = Consume.interpret(for { - * a <- readOne - * if aGoodFoo(a) // abort if false - * - * Foo(true, b) <- readOne // abort if pattern doesn't match - * _ <- liftF(Future { q(a, b) /* complete this future before continuing */ }) - * - * rest <- drain // Seq[Foo] of remainder; use only if you expect more elements - * } yield Bar(a, b, rest) - * }}} - */ -sealed abstract class Consume[-T, +V] - -object Consume { - import scalaz.Free - import scalaz.std.scalaFuture.* - - type FCC[T, V] = Free[Consume[T, *], V] - type Description = source.Position - final case class Listen[-T, +V](f: T => V, desc: Description) extends Consume[T, V] - final case class Drain[S, -T, +V](init: S, next: (S, T) => S, out: S => V) extends Consume[T, V] - final case class Emit[+V](run: Future[V]) extends Consume[Any, V] - - /** Strictly speaking, this function really returns the following, these are just passed to - * `Sink.foldAsync` for convenience: - * - * {{{ - * { - * type O - * val init: O - * val step: (O, T) => Future[O] - * val out: O => Future[V] - * } - * }}} - */ - def interpret[T, V](steps: FCC[T, V])(implicit ec: ExecutionContext): Sink[T, Future[V]] = - Sink - .foldAsync(steps) { (steps, t: T) => - // step through steps until performing exactly one listen, - // then step through any further steps until encountering - // either the end or the next listen - def go(steps: FCC[T, V], listened: Boolean): Future[FCC[T, V]] = - steps.resume.fold( - { - case listen @ Listen(f, _) => - if (listened) Future successful (Free roll listen) else go(f(t), true) - case drain: Drain[s, T, FCC[T, V]] => - Future successful Free.roll { - if (listened) drain - else drain.copy(init = drain.next(drain.init, t)) - } - case Emit(run) => run flatMap (go(_, listened)) - }, - v => - if (listened) Future successful (Free point v) - else - Future.failed( - new IllegalStateException( - s"unexpected element $t, script already terminated with $v" - ) - ), - ) - go(steps, false) - } - .mapMaterializedValue(_.flatMap(_.foldMap(Lambda[Consume[T, *] ~> Future] { - case Listen(_, desc) => - Future.failed( - new IllegalStateException( - s"${describe(desc)}: script terminated early, expected another value" - ) - ) - case Drain(init, _, out) => Future(out(init)) - case Emit(run) => run - }))) - - implicit def `consume functor`[T](implicit ec: ExecutionContext): Functor[Consume[T, *]] = - new Functor[Consume[T, *]] { - override def map[A, B](fa: Consume[T, A])(f: A => B): Consume[T, B] = fa match { - case Listen(g, desc) => Listen(g andThen f, desc) - case Drain(init, next, out) => Drain(init, next, out andThen f) - case Emit(run) => Emit(run map f) - } - } - - private def describe(d: Description) = s"${d.fileName}:${d.lineNumber}" - - implicit final class `Consume Ops`[T, V](private val steps: FCC[T, V]) extends AnyVal { - def withFilter(p: V => Boolean)(implicit pos: source.Position): Free[Consume[T, *], V] = - steps flatMap { v => - if (p(v)) Free point v - else - Free liftF Emit( - Future failed new IllegalStateException( - s"${describe(pos)}: script cancelled by match error on $v" - ) - ) - } - } - - def syntax[T]: Syntax[T] = new Syntax - - final class Syntax[T] { - def readOne(implicit pos: source.Position): FCC[T, T] = Free liftF Listen(identity, pos) - def drain: FCC[T, Seq[T]] = - Free liftF Drain(Nil, (acc: List[T], t) => t :: acc, (_: List[T]).reverse) - def liftF[V](run: Future[V]): FCC[T, V] = Free liftF Emit(run) - def point[V](v: V): FCC[T, V] = Free point v - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/ExternalPartyLedgerApiOnboardingTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/ExternalPartyLedgerApiOnboardingTest.scala new file mode 100644 index 0000000000..36f2daa544 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/ExternalPartyLedgerApiOnboardingTest.scala @@ -0,0 +1,205 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.jsonapi + +import com.daml.ledger.api.v2.admin.party_management_service.GenerateExternalPartyTopologyResponse +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.{SignatureFormat, SigningAlgorithmSpec} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.topology.SynchronizerId +import com.google.protobuf.ByteString + +import java.net.URI +import java.net.http.{HttpClient, HttpRequest, HttpResponse} +import java.security.KeyPairGenerator +import java.util.Base64 + +class ExternalPartyLedgerApiOnboardingTest extends CommunityIntegrationTest with SharedEnvironment { + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + .withSetup { implicit env => + import env.* + + participants.all.synchronizers.connect_local(sequencer1, daName) + + } + + private lazy val keyGen = KeyPairGenerator.getInstance("Ed25519") + private lazy val keyPair = keyGen.generateKeyPair() + private lazy val pb = keyPair.getPublic + + private lazy val signingPublicKey = com.digitalasset.canton.crypto.SigningPublicKey + .create( + format = com.digitalasset.canton.crypto.CryptoKeyFormat.DerX509Spki, + key = ByteString.copyFrom(pb.getEncoded), + keySpec = com.digitalasset.canton.crypto.SigningKeySpec.EcCurve25519, + usage = com.digitalasset.canton.crypto.SigningKeyUsage.All, + ) + .valueOrFail("failed to generate pubkey") + + private def generateSignature(bytes: ByteString) = { + val signing = java.security.Signature.getInstance("Ed25519") + signing.initSign(keyPair.getPrivate) + signing.update(bytes.toByteArray) + com.digitalasset.canton.crypto.Signature.create( + format = SignatureFormat.Concat, + signature = ByteString.copyFrom(signing.sign()), + signedBy = signingPublicKey.fingerprint, + signingAlgorithmSpec = Some(SigningAlgorithmSpec.Ed25519), + signatureDelegation = None, + ) + } + + "onboard new single node hosted party via JSON api" in { implicit env => + // Send a JSON API request via HttpClient to verify that the JSON API works + import env.* + val port = + participant1.config.httpLedgerApi.server.internalPort.valueOrFail("JSON API must be enabled") + + val jsonBody = + """{ + | "synchronizer": "%s", + | "partyHint": "Alice", + | "publicKey": { + | "format" : "CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO", + | "keyData": "%s", + | "keySpec" : "SIGNING_KEY_SPEC_EC_CURVE25519" + | } + |} + |""".stripMargin.formatted( + sequencer1.synchronizer_id.toProtoPrimitive, + Base64.getEncoder.encodeToString(pb.getEncoded), + ) + + def sendRequest(jsonBody: String, url: String) = { + val client = HttpClient.newHttpClient() + val request = HttpRequest + .newBuilder() + .uri(new URI(url)) + .header("Content-Type", "application/json") // Set the header for JSON data + .POST(HttpRequest.BodyPublishers.ofString(jsonBody)) // Specify the method and body + .build(); + val ret = client.send(request, HttpResponse.BodyHandlers.ofString()) + if (ret.statusCode() != 200) { + fail(s"Request failed with ${ret.statusCode()} " + ret.body()) + } else ret + } + + val response = + sendRequest(jsonBody, s"http://localhost:$port/v2/parties/external/generate-topology") + + // decode response + import com.digitalasset.canton.http.json.v2.JsPartyManagementCodecs.* + val decoded = io.circe.parser + .decode[GenerateExternalPartyTopologyResponse](response.body()) + .valueOrFail("failed to decode response") + + // sign hash + val signing = java.security.Signature.getInstance("Ed25519") + signing.initSign(keyPair.getPrivate) + signing.update(decoded.multiHash.toByteArray) + val signature = Base64.getEncoder.encodeToString(signing.sign()) + + val onboardingTxs = + decoded.topologyTransactions.map(x => + """{ "transaction" : "%s" }""".format(Base64.getEncoder.encodeToString(x.toByteArray)) + ) + // TODO(#27556) prefix the signature below with an A to get a very ugly error message + // TODO(#27556) change the string "multiHashSignature" into anything else: you don't get a "unknown field" error! + val responseBody = + """{ + | "synchronizer": "%s", + | "onboardingTransactions" : [ + | %s + | ], + | "multiHashSignatures": [{ + | "format" : "SIGNATURE_FORMAT_CONCAT", + | "signature": "%s", + | "signedBy" : "%s", + | "signingAlgorithmSpec" : "SIGNING_ALGORITHM_SPEC_ED25519" + | }] + |} + |""".stripMargin.formatted( + sequencer1.synchronizer_id.toProtoPrimitive, + onboardingTxs.mkString(",\n "), + signature, + decoded.publicKeyFingerprint, + ) + + sendRequest(responseBody, s"http://localhost:$port/v2/parties/external/allocate") + + eventually() { + participant1.parties.hosted("Alice") should not be empty + } + + } + + "onboard new single node hosted party via GRPC api" in { implicit env => + // Send a JSON API request via HttpClient to verify that the JSON API works + import env.* + + val syncId = SynchronizerId.tryFromString( + participant1.ledger_api.state + .connected_synchronizers() + .connectedSynchronizers + .loneElement + .synchronizerId + ) + val txs = participant1.ledger_api.parties.generate_topology( + syncId, + "Bob", + signingPublicKey, + ) + + participant1.ledger_api.parties.allocate_external( + syncId, + txs.topologyTransactions.map((_, Seq.empty[com.digitalasset.canton.crypto.Signature])), + multiSignatures = Seq(generateSignature(txs.multiHash.getCryptographicEvidence)), + ) + + } + + "onboard a new multi-hosted party" in { implicit env => + import env.* + + val txs = participant1.ledger_api.parties.generate_topology( + sequencer1.synchronizer_id, + "Charlie", + signingPublicKey, + otherConfirmingParticipantIds = Seq(participant2.id), + confirmationThreshold = NonNegativeInt.two, + ) + + participant1.ledger_api.parties.allocate_external( + sequencer1.synchronizer_id, + txs.topologyTransactions.map((_, Seq.empty[com.digitalasset.canton.crypto.Signature])), + multiSignatures = Seq(generateSignature(txs.multiHash.getCryptographicEvidence)), + ) + + eventually() { + participant2.topology.party_to_participant_mappings + .list_hosting_proposals(sequencer1.synchronizer_id, participant2.id) should not be empty + } + + participant2.topology.party_to_participant_mappings + .list_hosting_proposals(sequencer1.synchronizer_id, participant2.id) + .foreach { request => + request.threshold.value shouldBe 2 + participant2.topology.transactions.authorize( + sequencer1.synchronizer_id, + request.txHash, + ) + } + + eventually() { + Seq(participant1, participant2).foreach(_.parties.hosted("Charlie") should not be empty) + } + + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/FilterDiscriminatorScenario.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/FilterDiscriminatorScenario.scala deleted file mode 100644 index 011e076238..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/FilterDiscriminatorScenario.scala +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.digitalasset.canton.http -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA -import spray.json.JsValue - -/** A query, a value that matches the query, and a value that doesn't match. - */ -class FilterDiscriminatorScenario[Inj]( - val label: String, - val ctId: http.ContractTypeId.Template.RequiredPkg, - val va: VA.Aux[Inj], - val query: Map[String, JsValue], - val matches: Seq[http.Party => Inj], - val doesNotMatch: Seq[http.Party => Inj], -) - -object FilterDiscriminatorScenario { - def Scenario( - label: String, - ctId: http.ContractTypeId.Template.RequiredPkg, - va: VA, - query: Map[String, JsValue], - )( - matches: Seq[http.Party => va.Inj], - doesNotMatch: Seq[http.Party => va.Inj], - ): FilterDiscriminatorScenario[va.Inj] = - new FilterDiscriminatorScenario(label, ctId, va, query, matches, doesNotMatch) -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpJsonApiTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpJsonApiTestBase.scala index 3b365f36e9..bb5e854fb3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpJsonApiTestBase.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpJsonApiTestBase.scala @@ -7,20 +7,16 @@ import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.jwt.{ AuthServiceJWTCodec, DecodedJwt, - Jwt, JwtSigner, StandardJWTPayload, StandardJWTTokenFormat, } -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.UniquePortGenerator import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString import com.digitalasset.canton.config.RequireTypes.{ExistingFile, PositiveInt} import com.digitalasset.canton.config.{AuthServiceConfig, PemFile, TlsServerConfig} import com.digitalasset.canton.console.LocalParticipantReference -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, instanceUUIDLogCtx} -import com.digitalasset.canton.http.{HttpServerConfig, JsonApiConfig, WebsocketConfig} -import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.{UseTls, jsonCodecs} +import com.digitalasset.canton.http.WebsocketConfig +import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.UseTls import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.ParticipantSelector import com.digitalasset.canton.integration.{ @@ -69,18 +65,11 @@ trait HttpJsonApiTestBase extends CantonFixture { override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P1_S1M1 + .addConfigTransform(ConfigTransforms.useStaticTime) + .prependConfigTransform( + ConfigTransforms.enableHttpLedgerApi("participant1", wsConfig) + ) .addConfigTransforms( - ConfigTransforms.updateParticipantConfig("participant1")(config => - config.copy(httpLedgerApi = - Some( - JsonApiConfig( - // TODO(#13519): Extract in ConfigTransforms.globallyUniquePorts - server = HttpServerConfig().copy(port = Some(UniquePortGenerator.next.unwrap)), - websocketConfig = wsConfig, - ) - ) - ) - ), ConfigTransforms.updateParticipantConfig("participant1")( ConfigTransforms.useTestingTimeService ), @@ -113,7 +102,6 @@ trait HttpJsonApiTestBase extends CantonFixture { .focus(_.ledgerApi.partyManagementService.maxPartiesPageSize) .replace(maxPartiesPageSize) ), - ConfigTransforms.useStaticTime, ) .withSetup { implicit env => import env.* @@ -132,10 +120,7 @@ trait HttpJsonApiTestBase extends CantonFixture { implicit val ec = env.executionContext val participant = participantSelector(env) import com.digitalasset.canton.ledger.client.configuration.* - val jsonApiPort = participant.config.httpLedgerApi - .valueOrFail("http ledger api must be configured") - .server - .port + val jsonApiPort = participant.config.httpLedgerApi.server.internalPort .valueOrFail("port must be configured") val userId = getClass.getName @@ -148,18 +133,14 @@ trait HttpJsonApiTestBase extends CantonFixture { ), loggerFactory, ) - implicit val lc: LoggingContextOf[InstanceUUID] = instanceUUIDLogCtx(identity) val scheme = if (useTls) "https" else "http" - for { - codecs <- jsonCodecs(client, token.map(Jwt(_))) - uri = Uri.from(scheme = scheme, host = "localhost", port = jsonApiPort) - (encoder, decoder) = codecs - } yield AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData( - uri, - encoder, - decoder, - client, + val uri = Uri.from(scheme = scheme, host = "localhost", port = jsonApiPort.unwrap) + Future.successful( + AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData( + uri, + client, + ) ) } @@ -170,10 +151,7 @@ trait HttpJsonApiTestBase extends CantonFixture { executionSequencerFactory: ExecutionSequencerFactory, ): A = { import com.digitalasset.canton.ledger.client.configuration.* - val jsonApiPort = fixtureParam.participant1.config.httpLedgerApi - .valueOrFail("http ledger api must be configured") - .server - .port + val jsonApiPort = fixtureParam.participant1.config.httpLedgerApi.server.internalPort .valueOrFail("port must be configured") val userId = getClass.getName @@ -187,7 +165,7 @@ trait HttpJsonApiTestBase extends CantonFixture { loggerFactory, ) - testFn(jsonApiPort, client) + testFn(jsonApiPort.unwrap, client) } def usingParticipantLedger[A]( @@ -201,10 +179,7 @@ trait HttpJsonApiTestBase extends CantonFixture { ): A = { import com.digitalasset.canton.ledger.client.configuration.* val participant = participantSelector(fixtureParam) - val jsonApiPort = participant.config.httpLedgerApi - .valueOrFail("http ledger api must be configured") - .server - .port + val jsonApiPort = participant.config.httpLedgerApi.server.internalPort .valueOrFail("port must be configured") val userId = getClass.getName @@ -218,7 +193,7 @@ trait HttpJsonApiTestBase extends CantonFixture { loggerFactory, ) - testFn(jsonApiPort, client) + testFn(jsonApiPort.unwrap, client) } protected def getToken( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceIntegrationTestUserManagement.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceIntegrationTestUserManagement.scala deleted file mode 100644 index e8711419fc..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceIntegrationTestUserManagement.scala +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* -import com.daml.test.evidence.tag.Security.Attack -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http -import com.digitalasset.canton.http.UserDetails -import com.digitalasset.canton.http.json.JsonProtocol.* -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.{ - UseTls, - authorizationHeader, -} -import com.digitalasset.canton.ledger.api.UserRight.{CanActAs, ParticipantAdmin} -import com.digitalasset.canton.ledger.api.{User, UserRight} -import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient -import com.digitalasset.daml.lf.data.Ref -import org.apache.pekko.http.scaladsl.model.{HttpHeader, StatusCodes, Uri} -import org.scalatest.Assertion -import org.scalatest.time.{Millis, Seconds, Span} -import scalaz.NonEmptyList -import scalaz.syntax.show.* -import scalaz.syntax.tag.* -import spray.json.JsValue - -import scala.concurrent.Future - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class HttpServiceIntegrationTestUserManagement - extends AbstractHttpServiceIntegrationTest - with AbstractHttpServiceIntegrationTestFuns - with HttpServiceUserFixture.UserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - // This high patience timeout is needed for the test case creating and listing 20K users - implicit override val defaultPatience: PatienceConfig = - PatienceConfig(timeout = scaled(Span(300, Seconds)), interval = scaled(Span(150, Millis))) - - def createUser(ledgerClient: DamlLedgerClient)( - userId: Ref.UserId, - primaryParty: Option[Ref.Party] = None, - initialRights: List[UserRight] = List.empty, - ): Future[User] = - ledgerClient.userManagementClient.createUser( - User(userId, primaryParty), - initialRights, - Some(jwtAdminNoParty.value), - ) - - def headersWithUserAuth(userId: String): List[HttpHeader] = - authorizationHeader(jwtForUser(userId)) - - override def useTls: UseTls = UseTls.Tls - - "Json format of UserRight" should { - "be stable" in { _ => - import spray.json.* - val ham = getUniqueParty("ham") - val spam = getUniqueParty("spam") - val clam = getUniqueParty("clam") - List[http.UserRight]( - http.CanActAs(ham), - http.CanReadAs(spam), - http.CanExecuteAs(clam), - http.ParticipantAdmin, - http.IdentityProviderAdmin, - http.CanReadAsAnyParty, - http.CanExecuteAsAnyParty, - ).toJson shouldBe - List( - Map("type" -> "CanActAs", "party" -> ham.unwrap), - Map("type" -> "CanReadAs", "party" -> spam.unwrap), - Map("type" -> "CanExecuteAs", "party" -> clam.unwrap), - Map("type" -> "ParticipantAdmin"), - Map("type" -> "IdentityProviderAdmin"), - Map("type" -> "CanReadAsAnyParty"), - Map("type" -> "CanExecuteAsAnyParty"), - ).toJson - } - } - - "create IOU" should { - "work with correct user rights" taggedAs authorizationSecurity.setHappyCase( - "A ledger client can create an IOU with correct user rights" - ) in withHttpService() { fixture => - import fixture.{client, encoder} - - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - command = iouCreateCommand(alice) - input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - user <- createUser(client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)) - ), - ) - response <- fixture - .postJsonRequest( - Uri.Path("/v1/create"), - input, - headers = headersWithUserAuth(user.id), - ) - .parseResponse[http.ActiveContract.ResolvedCtTyId[JsValue]] - } yield inside(response) { case http.OkResponse(activeContract, _, StatusCodes.OK) => - assertActiveContract(activeContract)(command, encoder) - } - } - - "fail if user has no permission" taggedAs authorizationSecurity.setAttack( - Attack( - "Ledger client", - "tries to create an IOU without permission", - "refuse request with BAD_REQUEST", - ) - ) in withHttpService() { fixture => - import fixture.{client, encoder} - val alice = getUniqueParty("Alice") - val command = iouCreateCommand(alice) - val input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - for { - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - user <- createUser(client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(bob.unwrap)) - ), - ) - response <- fixture - .postJsonRequest( - Uri.Path("/v1/create"), - input, - headers = headersWithUserAuth(user.id), - ) - .parseResponse[JsValue] - } yield inside(response) { case http.ErrorResponse(_, _, StatusCodes.BadRequest, _) => - succeed - } - } - - "fail if overwritten actAs & readAs result in missing permission even if the user would have the rights" taggedAs authorizationSecurity - .setAttack( - Attack( - "Ledger client", - "tries to create an IOU but overwritten actAs & readAs result in missing permission", - "refuse request with BAD_REQUEST", - ) - ) in withHttpService() { fixture => - import fixture.{client, encoder} - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - meta = http.CommandMeta( - None, - Some(NonEmptyList(bob)), - None, - submissionId = None, - workflowId = None, - deduplicationPeriod = None, - disclosedContracts = None, - synchronizerId = None, - packageIdSelectionPreference = None, - ) - command = iouCreateCommand(alice, meta = Some(meta)) - input: JsValue = encoder.encodeCreateCommand(command).valueOr(e => fail(e.shows)) - user <- createUser(client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)), - CanActAs(Ref.Party.assertFromString(bob.unwrap)), - ), - ) - response <- fixture - .postJsonRequest( - Uri.Path("/v1/create"), - input, - headers = headersWithUserAuth(user.id), - ) - .parseResponse[JsValue] - } yield inside(response) { case http.ErrorResponse(_, _, StatusCodes.BadRequest, _) => - succeed - } - } - } - - "requesting the user id" should { - "be possible via the user endpoint" in withHttpService() { fixture => - for { - user <- createUser(fixture.client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List.empty, - ) - output <- fixture - .getRequest( - Uri.Path("/v1/user"), - headers = headersWithUserAuth(user.id), - ) - .parseResponse[UserDetails] - } yield { - inside(output) { case http.OkResponse(result, _, StatusCodes.OK) => - result shouldEqual UserDetails( - user.id, - user.primaryParty: Option[String], - ) - } - } - } - } - - "user/rights endpoint" should { - "POST yields user rights for a specific user" in withHttpService() { fixture => - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - user <- createUser(fixture.client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)), - CanActAs(Ref.Party.assertFromString(bob.unwrap)), - ), - ) - response <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/rights")), - http.ListUserRightsRequest(user.id).toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - } yield inside(response) { case http.OkResponse(result, _, StatusCodes.OK) => - result should contain theSameElementsAs - List[http.UserRight]( - http.CanActAs(alice), - http.CanActAs(bob), - ) - } - } - - "GET yields user rights for the current user" in withHttpService() { fixture => - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - user <- createUser(fixture.client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)), - CanActAs(Ref.Party.assertFromString(bob.unwrap)), - ), - ) - output <- fixture - .getRequest( - Uri.Path("/v1/user/rights"), - headers = headersWithUserAuth(user.id), - ) - .parseResponse[List[http.UserRight]] - } yield { - inside(output) { case http.OkResponse(result, _, StatusCodes.OK) => - result should contain theSameElementsAs - List[http.UserRight]( - http.CanActAs(alice), - http.CanActAs(bob), - ) - } - } - } - } - - "user/create endpoint" should { - "support creating a user" in withHttpService() { fixture => - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - createUserRequest = http.CreateUserRequest( - "nice.user2", - Some(alice.unwrap), - Some( - List[http.UserRight]( - http.CanActAs(alice), - http.ParticipantAdmin, - ) - ), - ) - response <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, - headers = headersWithAdminAuth, - ).parseResponse[JsValue] - } yield inside(response) { case http.OkResponse(r, _, StatusCodes.OK) => - r shouldBe JsObject() - } - } - - "support creating a user with default values" in withHttpServiceAndClient { (uri, _, _, _) => - import spray.json.* - val username = getUniqueUserName("nice.user") - for { - (status, _) <- postRequest( - uri.withPath(Uri.Path("/v1/user/create")), - JsObject("userId" -> JsString(username)), - headers = headersWithAdminAuth, - ) - _ = status shouldBe StatusCodes.OK - response2 <- postRequest( - uri.withPath(Uri.Path("/v1/user")), - http.GetUserRequest(username).toJson, - headers = headersWithAdminAuth, - ).parseResponse[UserDetails] - _ = inside(response2) { case http.OkResponse(ud, _, StatusCodes.OK) => - ud shouldEqual UserDetails(username, None) - } - response3 <- postRequest( - uri.withPath(Uri.Path("/v1/user/rights")), - http.ListUserRightsRequest(username).toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - } yield inside(response3) { case http.OkResponse(List(), _, StatusCodes.OK) => - succeed - } - } - } - - "be possible via the users endpoint to get all users" in withHttpService() { fixture => - import scalaz.std.list.* - import scalaz.std.scalaFuture.* - import scalaz.syntax.traverse.* - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - usernames = - List("nice.username", "nice.username2", "nice.username3").map(getUniqueUserName) - createUserRequests = usernames.map(name => - http.CreateUserRequest( - name, - Some(alice.unwrap), - Some( - List[http.UserRight]( - http.CanActAs(alice), - http.ParticipantAdmin, - ) - ), - ) - ) - _ <- createUserRequests.traverse(createUserRequest => - for { - (status, _) <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, - headers = headersWithAdminAuth, - ) - _ = status shouldBe StatusCodes.OK - } yield () - ) - result <- fixture - .getRequest( - Uri.Path("/v1/users"), - headers = headersWithAdminAuth, - ) - .parseResponse[List[UserDetails]] - } yield inside(result) { case http.OkResponse(users, _, StatusCodes.OK) => - users.map(_.userId) should contain allElementsOf usernames - } - } - - "user endpoint" should { - "POST yields information about a specific user" in withHttpService() { fixture => - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - createUserRequest = http.CreateUserRequest( - getUniqueUserName("nice.user"), - Some(alice.unwrap), - Some( - List[http.UserRight]( - http.CanActAs(alice), - http.ParticipantAdmin, - ) - ), - ) - response1 <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, - headers = headersWithAdminAuth, - ).parseResponse[JsValue] - _ = inside(response1) { case http.OkResponse(r, _, StatusCodes.OK) => - r shouldBe JsObject() - } - response2 <- postRequest( - fixture.uri.withPath(Uri.Path(s"/v1/user")), - http.GetUserRequest(createUserRequest.userId).toJson, - headers = headersWithAdminAuth, - ).parseResponse[UserDetails] - } yield inside(response2) { case http.OkResponse(ud, _, StatusCodes.OK) => - ud shouldBe UserDetails( - createUserRequest.userId, - createUserRequest.primaryParty, - ) - } - } - - "GET yields information about the current user" in withHttpService() { fixture => - import fixture.uri - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - createUserRequest = http.CreateUserRequest( - getUniqueUserName("nice.user"), - Some(alice.unwrap), - Some( - List[http.UserRight]( - http.CanActAs(alice), - http.ParticipantAdmin, - ) - ), - ) - response1 <- postRequest( - uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, - headers = headersWithAdminAuth, - ).parseResponse[JsValue] - _ = inside(response1) { case http.OkResponse(r, _, StatusCodes.OK) => - r shouldBe JsObject() - } - response2 <- fixture - .getRequest( - Uri.Path(s"/v1/user"), - headers = headersWithUserAuth(createUserRequest.userId), - ) - .parseResponse[UserDetails] - } yield inside(response2) { case http.OkResponse(usrDetails, _, StatusCodes.OK) => - usrDetails shouldBe UserDetails( - createUserRequest.userId, - createUserRequest.primaryParty, - ) - } - } - } - - "deleting a specific user" should { - "be possible via the user/delete endpoint" in withHttpService() { fixture => - import fixture.uri - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - createUserRequest = http.CreateUserRequest( - getUniqueUserName("nice.user"), - Some(alice.unwrap), - Some( - List[http.UserRight]( - http.CanActAs(alice), - http.ParticipantAdmin, - ) - ), - ) - response1 <- postRequest( - uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, - headers = headersWithAdminAuth, - ).parseResponse[JsValue] - _ = inside(response1) { case http.OkResponse(r, _, StatusCodes.OK) => - r shouldBe JsObject() - } - (status2, _) <- postRequest( - uri.withPath(Uri.Path(s"/v1/user/delete")), - http.DeleteUserRequest(createUserRequest.userId).toJson, - headers = headersWithAdminAuth, - ) - _ = status2 shouldBe StatusCodes.OK - response3 <- fixture - .getRequest( - Uri.Path("/v1/users"), - headers = headersWithAdminAuth, - ) - .parseResponse[List[UserDetails]] - } yield inside(response3) { case http.OkResponse(users, _, StatusCodes.OK) => - users should not contain createUserRequest.userId - } - } - } - - "granting the user rights for a specific user" should { - "be possible via a POST to the user/rights/grant endpoint" in withHttpService() { fixture => - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - user <- createUser(fixture.client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)) - ), - ) - response <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/rights/grant")), - http - .GrantUserRightsRequest( - user.id, - List[http.UserRight]( - http.CanActAs(alice), - http.CanActAs(bob), - http.ParticipantAdmin, - ), - ) - .toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - _ = inside(response) { case http.OkResponse(urs, _, StatusCodes.OK) => - urs should contain theSameElementsAs List[ - http.UserRight - ](http.CanActAs(bob), http.ParticipantAdmin) - } - response2 <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/rights")), - http.ListUserRightsRequest(user.id).toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - assertion = inside(response2) { case http.OkResponse(urs, _, StatusCodes.OK) => - urs should contain theSameElementsAs - List[http.UserRight]( - http.CanActAs(alice), - http.CanActAs(bob), - http.ParticipantAdmin, - ) - } - } yield assertion - } - } - - "revoking the user rights for a specific user" should { - "be possible via a POST to the user/rights/revoke endpoint" in withHttpService() { fixture => - import spray.json.* - for { - (alice, _) <- fixture.getUniquePartyAndAuthHeaders("Alice") - (bob, _) <- fixture.getUniquePartyAndAuthHeaders("Bob") - (charlie, _) <- fixture.getUniquePartyAndAuthHeaders("Charlie") - user <- createUser(fixture.client)( - Ref.UserId.assertFromString(getUniqueUserName("nice.user")), - initialRights = List( - CanActAs(Ref.Party.assertFromString(alice.unwrap)), - CanActAs(Ref.Party.assertFromString(bob.unwrap)), - ParticipantAdmin, - ), - ) - response <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/rights/revoke")), - http - .RevokeUserRightsRequest( - user.id, - List[http.UserRight]( - http.CanActAs(bob), - http.CanActAs(charlie), - http.ParticipantAdmin, - ), - ) - .toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - _ = inside(response) { case http.OkResponse(urs, _, StatusCodes.OK) => - urs should contain theSameElementsAs List[http.UserRight]( - http.CanActAs(bob), - http.ParticipantAdmin, - ) - } - response2 <- postRequest( - fixture.uri.withPath(Uri.Path("/v1/user/rights")), - http.ListUserRightsRequest(user.id).toJson, - headers = headersWithAdminAuth, - ).parseResponse[List[http.UserRight]] - } yield inside(response2) { case http.OkResponse(urs, _, StatusCodes.OK) => - urs should contain theSameElementsAs List[http.UserRight](http.CanActAs(alice)) - } - } - } - - "creating and listing 20K users" should { - "be possible" taggedAs availabilitySecurity.setHappyCase( - "A ledger client can create and list 20K users" - ) in withHttpService() { fixture => - import fixture.uri - import spray.json.* - - val createdUsers = 20000 - - val createUserRequests: List[http.CreateUserRequest] = - List.tabulate(createdUsers) { sequenceNumber => - val p = getUniqueParty(f"p$sequenceNumber%05d") - http.CreateUserRequest( - p.unwrap, - Some(p.unwrap), - Some( - List[http.UserRight]( - http.ParticipantAdmin - ) - ), - ) - } - - // Create users in chunks to avoid overloading the server - // https://doc.akka.io/docs/akka-http/current/client-side/pool-overflow.html - def createUsers( - createUserRequests: Seq[http.CreateUserRequest], - chunkSize: Int = 20, - ): Future[Assertion] = - createUserRequests.splitAt(chunkSize) match { - case (Nil, _) => Future.successful(succeed) - case (next, remainingRequests) => - Future - .sequence { - next.map { request => - postRequest( - uri.withPath(Uri.Path("/v1/user/create")), - request.toJson, - headers = headersWithAdminAuth, - ).map(_._1) - } - } - .flatMap { statusCodes => - all(statusCodes) shouldBe StatusCodes.OK - createUsers(remainingRequests, chunkSize) - } - } - - for { - _ <- createUsers(createUserRequests) - response <- fixture - .getRequest( - Uri.Path("/v1/users"), - headers = headersWithAdminAuth, - ) - .parseResponse[List[UserDetails]] - } yield inside(response) { case http.OkResponse(users, _, StatusCodes.OK) => - val userIds = users.map(_.userId) - val expectedUserIds = "participant_admin" :: createUserRequests.map(_.userId) - userIds should contain allElementsOf expectedUserIds - } - } - } - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceTestFixture.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceTestFixture.scala index 024232d54e..02017bca8b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceTestFixture.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceTestFixture.scala @@ -13,8 +13,6 @@ import com.daml.jwt.{ StandardJWTPayload, StandardJWTTokenFormat, } -import com.daml.ledger.api.v2.value as v -import com.daml.logging.LoggingContextOf import com.digitalasset.canton.config.RequireTypes.ExistingFile import com.digitalasset.canton.config.{ PemFile, @@ -22,15 +20,8 @@ import com.digitalasset.canton.config.{ TlsClientConfig, TlsServerConfig, } -import com.digitalasset.canton.http.json.v1.{LedgerReader, PackageService, V1Routes} -import com.digitalasset.canton.http.json.{ApiJsonDecoder, ApiJsonEncoder} -import com.digitalasset.canton.http.util.ClientUtil.boxedRecord -import com.digitalasset.canton.http.util.Logging.InstanceUUID -import com.digitalasset.canton.http.util.{FutureUtil, NewBoolean} -import com.digitalasset.canton.http.{ContractTypeId, CreateCommand, ExerciseCommand, Party, UserId} -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.ledger.api.util.TimestampConversion -import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient +import com.digitalasset.canton.http.UserId +import com.digitalasset.canton.http.util.NewBoolean import com.digitalasset.canton.logging.SuppressingLogger import com.digitalasset.canton.util.JarResourceUtils import com.digitalasset.daml.lf.data.Ref @@ -41,7 +32,6 @@ import org.apache.pekko.util.ByteString import scalaz.syntax.tag.* import spray.json.* -import java.time.Instant import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} @@ -58,28 +48,6 @@ object HttpServiceTestFixture { Ref.PackageRef.assertFromString(typesig.PackageSignature.read(dar.main)._2.packageId) } - def jsonCodecs( - client: DamlLedgerClient, - token: Option[Jwt], - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): Future[(ApiJsonEncoder, ApiJsonDecoder)] = { - val loadCache = LedgerReader.LoadCache.freshCache() - val packageService = - new PackageService( - reloadPackageStoreIfChanged = - V1Routes.doLoad(client.packageService, LedgerReader(loggerFactory), loadCache), - loggerFactory = SuppressingLogger(getClass), - ) - packageService - .reload( - token.getOrElse(Jwt("we use a dummy because there is no token in these tests.")) - ) - .flatMap(x => FutureUtil.toFuture(x)) - .map(_ => V1Routes.buildJsonCodecs(packageService)) - } - object UseTls extends NewBoolean.Named { val Tls: UseTls = True val NoTls: UseTls = False @@ -143,55 +111,6 @@ object HttpServiceTestFixture { def authorizationHeader(token: Jwt): List[HttpHeader] = List(Authorization(OAuth2BearerToken(token.value))) - def archiveCommand[Ref](reference: Ref): ExerciseCommand[Nothing, v.Value, Ref] = { - val arg: v.Record = v.Record() - val choice = lar.Choice("Archive") - ExerciseCommand(reference, choice, boxedRecord(arg), None, None) - } - - def accountCreateCommand( - owner: Party, - number: String, - time: v.Value.Sum.Timestamp = TimestampConversion.roundInstantToMicros(Instant.now), - ): CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg] = { - val templateId = ContractTypeId.Template(staticPkgIdAccount, "Account", "Account") - val timeValue = v.Value(time) - val enabledVariantValue = - v.Value(v.Value.Sum.Variant(v.Variant(None, "Enabled", Some(timeValue)))) - val arg = v.Record( - fields = List( - v.RecordField("owner", Some(v.Value(v.Value.Sum.Party(owner.unwrap)))), - v.RecordField("number", Some(v.Value(v.Value.Sum.Text(number)))), - v.RecordField("status", Some(enabledVariantValue)), - ) - ) - - CreateCommand(templateId, arg, None) - } - - def sharedAccountCreateCommand( - owners: Seq[Party], - number: String, - time: v.Value.Sum.Timestamp = TimestampConversion.roundInstantToMicros(Instant.now), - ): CreateCommand[v.Record, ContractTypeId.Template.RequiredPkg] = { - val templateId = ContractTypeId.Template(staticPkgIdAccount, "Account", "SharedAccount") - val timeValue = v.Value(time) - val ownersEnc = v.Value( - v.Value.Sum.List(v.List(Party.unsubst(owners).map(o => v.Value(v.Value.Sum.Party(o))))) - ) - val enabledVariantValue = - v.Value(v.Value.Sum.Variant(v.Variant(None, "Enabled", Some(timeValue)))) - val arg = v.Record( - fields = List( - v.RecordField("owners", Some(ownersEnc)), - v.RecordField("number", Some(v.Value(v.Value.Sum.Text(number)))), - v.RecordField("status", Some(enabledVariantValue)), - ) - ) - - CreateCommand(templateId, arg, None) - } - def getResponseDataString(resp: HttpResponse, debug: Boolean = false)(implicit mat: Materializer, ec: ExecutionContext, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceUserFixture.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceUserFixture.scala index ebe8926fb4..71ca01b3f4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceUserFixture.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpServiceUserFixture.scala @@ -5,15 +5,19 @@ package com.digitalasset.canton.integration.tests.jsonapi import com.daml.jwt.Jwt import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll +import com.daml.ledger.api.v2.admin.user_management_service.{CreateUserRequest, Right, User} import com.digitalasset.canton.http +import com.digitalasset.canton.http.json.v2.JsUserManagementCodecs.* import com.digitalasset.canton.http.util.ClientUtil.uniqueId import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.authorizationHeader import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.tracing.W3CTraceContext +import io.circe.syntax.EncoderOps import org.apache.pekko.http.scaladsl.model.HttpHeader.ParsingResult import org.apache.pekko.http.scaladsl.model.{HttpHeader, Uri} import org.scalatest.Suite import scalaz.syntax.tag.* +import spray.json.JsonParser import scala.concurrent.{ExecutionContext, Future} @@ -78,20 +82,29 @@ object HttpServiceUserFixture { ec: ExecutionContext ): Future[(Jwt, http.UserId)] = { val username = getUniqueUserName("test") - val createUserRequest = http.CreateUserRequest( - username, - None, - Some( - Option - .when(admin)(http.ParticipantAdmin) - .toList ++ - actAs.map(http.CanActAs.apply) ++ readAs.map(http.CanReadAs.apply) - ), + val rights = Option + .when(admin)( + Right( + Right.Kind.ParticipantAdmin(Right.ParticipantAdmin()) + ) + ) + .toList ++ + actAs + .map(_.unwrap) + .map(Right.CanActAs.apply) + .map(Right.Kind.CanActAs.apply) + .map(Right.apply) ++ + readAs + .map(_.unwrap) + .map(Right.CanReadAs.apply) + .map(Right.Kind.CanReadAs.apply) + .map(Right.apply) + val createUserRequest = toSprayJson( + CreateUserRequest(Some(User(username, "", false, None, "")), rights) ) - import spray.json.*, com.digitalasset.canton.http.json.JsonProtocol.* postRequest( - uri.withPath(Uri.Path("/v1/user/create")), - createUserRequest.toJson, + uri.withPath(Uri.Path("/v2/users")), + createUserRequest, headers = headersWithAdminAuth, ).map(_ => (jwtForUser(username), http.UserId(username))) } @@ -100,5 +113,9 @@ object HttpServiceUserFixture { protected def jwtForUser(userId: String): Jwt = Jwt(getToken(userId, Some("secret")).value) + + private def toSprayJson[T](t: T)(implicit encoder: io.circe.Encoder[T]) = JsonParser( + t.asJson.toString() + ) } } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpTestFuns.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpTestFuns.scala index 4559be78bb..694cce7c8f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpTestFuns.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/HttpTestFuns.scala @@ -4,25 +4,18 @@ package com.digitalasset.canton.integration.tests.jsonapi import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf +import com.daml.ledger.api.v2.admin.party_management_service.AllocatePartyResponse import com.digitalasset.canton.config.TlsClientConfig import com.digitalasset.canton.console.LocalParticipantReference -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.json.SprayJson.decode1 -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, instanceUUIDLogCtx} -import com.digitalasset.canton.http.{ - AllocatePartyRequest as HttpAllocatePartyRequest, - HttpService, - OkResponse, - Party, - PartyDetails as HttpPartyDetails, - SyncResponse, - UserId, -} +import com.digitalasset.canton.http.json.v2.JsPartyManagementCodecs.* +import com.digitalasset.canton.http.json.v2.js.AllocatePartyRequest as JsAllocatePartyRequest +import com.digitalasset.canton.http.{HttpService, Party, UserId} import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.* -import com.digitalasset.canton.integration.tests.jsonapi.WebsocketTestFixture.validSubprotocol import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient import com.google.protobuf.ByteString as ProtoByteString +import io.circe.Decoder +import io.circe.parser.decode +import io.circe.syntax.EncoderOps import org.apache.pekko.http.scaladsl.model.* import org.apache.pekko.http.scaladsl.model.ws.{ Message, @@ -33,16 +26,16 @@ import org.apache.pekko.http.scaladsl.model.ws.{ import org.apache.pekko.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} import org.apache.pekko.stream.scaladsl.{Flow, Keep, Sink, Source} import org.apache.pekko.util.ByteString -import scalaz.syntax.show.* import spray.json.* import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { import AbstractHttpServiceIntegrationTestFuns.* - import JsonProtocol.* + import HttpTestFuns.* implicit val ec: ExecutionContext = system.dispatcher @@ -50,11 +43,6 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { private val clientConnectionContextMap = new TrieMap[TlsClientConfig, HttpsConnectionContext]() - protected def withHttpServiceAndClient[A]( - testFn: (Uri, ApiJsonEncoder, ApiJsonDecoder, DamlLedgerClient) => Future[A] - ): FixtureParam => A = - withHttpService() { case HttpServiceTestFixtureData(a, b, c, d) => testFn(a, b, c, d) } - protected def withHttpService[A]( token: Option[Jwt] = None, participantSelector: FixtureParam => LocalParticipantReference = _.participant1, @@ -64,44 +52,22 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { case (jsonApiPort, client) => withHttpService[A]( jsonApiPort, - token = token orElse Some(jwtAdminNoParty), client = client, - )((u, e, d, c) => testFn(HttpServiceTestFixtureData(u, e, d, c))).futureValue + )((u, c) => testFn(HttpServiceTestFixtureData(u, c))).futureValue case any => throw new IllegalStateException(s"got unexpected $any") } - protected def withHttpServiceAndClient[A](token: Jwt)( - testFn: (Uri, ApiJsonEncoder, ApiJsonDecoder, DamlLedgerClient) => Future[A] - ): FixtureParam => A = usingLedger[A](Some(token.value)) { case (jsonApiPort, client) => - withHttpService[A]( - jsonApiPort, - token = Some(token), - client = client, - )(testFn(_, _, _, _)).futureValue - } - - protected def withHttpService[A]( - f: (Uri, ApiJsonEncoder, ApiJsonDecoder) => Future[A] - ): FixtureParam => A = - withHttpServiceAndClient((a, b, c, _) => f(a, b, c))(_) - private def withHttpService[A]( jsonApiPort: Int, client: DamlLedgerClient, - token: Option[Jwt], )( - testFn: (Uri, ApiJsonEncoder, ApiJsonDecoder, DamlLedgerClient) => Future[A] + testFn: (Uri, DamlLedgerClient) => Future[A] ): Future[A] = { - implicit val lc: LoggingContextOf[InstanceUUID] = instanceUUIDLogCtx(identity) val scheme = if (useTls) "https" else "http" + val uri = Uri.from(scheme = scheme, host = "localhost", port = jsonApiPort) + testFn(uri, client) - for { - codecs <- jsonCodecs(client, token) - uri = Uri.from(scheme = scheme, host = "localhost", port = jsonApiPort) - (encoder, decoder) = codecs - a <- testFn(uri, encoder, decoder, client) - } yield a } def postJsonRequest( @@ -225,17 +191,6 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { f: HttpServiceTestFixtureData => Future[A] ): FixtureParam => A = withHttpService(None, participantSelector)(f)(_) - - protected def withHttpServiceOnly[A](jsonApiPort: Int, client: DamlLedgerClient)( - f: HttpServiceOnlyTestFixtureData => Future[A] - ): A = - withHttpService[A]( - jsonApiPort, - token = Some(jwtAdminNoParty), - client = client, - )((uri, encoder, decoder, _) => - f(HttpServiceOnlyTestFixtureData(uri, encoder, decoder)) - ).futureValue implicit protected final class `AHS Funs Uri functions`(private val self: UriFixture) { import self.uri @@ -246,15 +201,15 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { def getUniquePartyAndAuthHeaders( name: String ): Future[(Party, List[HttpHeader])] = - self.getUniquePartyTokenUserIdAndAuthHeaders(name).map { case (p, _, _, h) => (p, h) } - - def postJsonRequestWithMinimumAuth[Result: JsonReader]( - path: Uri.Path, - json: JsValue, - ): Future[SyncResponse[Result]] = - headersWithAuth - .flatMap(postJsonRequest(path, json, _)) - .parseResponse[Result] + self + .getUniquePartyTokenUserIdAndAuthHeaders(name) + .map { case (p, _, _, h) => (p, h) } + .transform { + case Success(a) => Success(a) + case Failure(err) => + logger.info(s"err: $err") + Failure(err) + } def getStream[T]( path: Uri.Path, @@ -290,27 +245,37 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { uriOverride: Uri = uri, ): Future[(Party, Jwt, UserId, List[HttpHeader])] = { val party = getUniqueParty(name) - val request = HttpAllocatePartyRequest( - Some(party), - None, + val jsAllocate = JsonParser( + JsAllocatePartyRequest(partyIdHint = party.toString).asJson + .toString() ) - val json = SprayJson.encode(request).valueOr(e => fail(e.shows)) for { - OkResponse(newParty, _, StatusCodes.OK) <- + newParty <- postJsonRequest( - Uri.Path("/v1/parties/allocate"), - json = json, + Uri.Path("/v2/parties"), + json = jsAllocate, headers = headersWithAdminAuth, ) - .parseResponse[HttpPartyDetails] + .flatMap { + case (StatusCodes.OK, result) => + decode[AllocatePartyResponse](result.toString()).left + .map(_.toString) + .flatMap(_.partyDetails.toRight("Missing party details")) + .map(_.party) + .map(Party.apply) match { + case Left(err) => Future.failed(new RuntimeException(err)) + case Right(party) => Future.successful(party) + } + case (status, _) => Future.failed(new RuntimeException(status.value)) + } (jwt, userId) <- jwtUserIdForParties(uriOverride)( - List(newParty.identifier), + List(newParty), List.empty, false, false, ) headers = authorizationHeader(jwt) - } yield (newParty.identifier, jwt, userId, headers) + } yield (newParty, jwt, userId, headers) } def headersWithAuth: Future[List[HttpHeader]] = @@ -443,12 +408,26 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { ): Future[(StatusCode, ByteString)] = getRequestBinaryData(uri withPath path, headers) - def getRequestWithMinimumAuth[Result: JsonReader]( + def getRequestWithMinimumAuth_( path: Uri.Path - ): Future[SyncResponse[Result]] = + ): Future[(StatusCode, JsValue)] = headersWithAuth .flatMap(getRequest(path, _)) - .parseResponse[Result] + + def getRequestWithMinimumAuth[Resp]( + path: Uri.Path + )(implicit decoder: Decoder[Resp]): Future[Resp] = + headersWithAuth + .flatMap(getRequest(path, _)) + .flatMap { + case (StatusCodes.OK, result) => + decode[Resp](result.toString()).left + .map(_.toString) match { + case Left(err) => Future.failed(new RuntimeException(err)) + case Right(ok) => Future.successful(ok) + } + case (status, _) => Future.failed(new RuntimeException(status.value)) + } def getRequestString( path: Uri.Path, @@ -458,17 +437,6 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { } - implicit protected final class `Future JsValue functions`( - private val self: Future[(StatusCode, JsValue)] - ) { - def parseResponse[Result: JsonReader]: Future[SyncResponse[Result]] = - self.map { case (status, jsv) => - val r = decode1[SyncResponse, Result](jsv).fold(e => fail(e.shows), identity) - r.status should ===(status) - r - } - } - private def cachedClientContext(config: TlsClientConfig): HttpsConnectionContext = this.clientConnectionContextMap.getOrElseUpdate(config, clientConnectionContext(config)) @@ -476,3 +444,12 @@ trait HttpTestFuns extends HttpJsonApiTestBase with HttpServiceUserFixture { ConnectionContext.httpsClient(HttpService.buildSSLContext(config)) } + +object HttpTestFuns { + val tokenPrefix: String = "jwt.token." + val wsProtocol: String = "daml.ws.auth" + + def validSubprotocol(jwt: Jwt): Option[String] = Option( + s"""$tokenPrefix${jwt.value},$wsProtocol""" + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonDamlDefinitionsServiceTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonDamlDefinitionsServiceTest.scala index 0ae22863be..94f2298a63 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonDamlDefinitionsServiceTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonDamlDefinitionsServiceTest.scala @@ -10,7 +10,7 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.http.json.v2.JsPackageCodecs.* import com.digitalasset.canton.http.json.v2.damldefinitionsservice.Schema.AllTemplatesResponse import com.digitalasset.canton.http.json.v2.damldefinitionsservice.Schema.Codecs.allTemplatesResponseCodec -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentDefinition} import com.digitalasset.canton.version.ProtocolVersion @@ -52,12 +52,12 @@ class JsonDamlDefinitionsServiceTest override def environmentDefinition: EnvironmentDefinition = super.environmentDefinition.addConfigTransform( ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.httpLedgerApi) - .modify(_.map(_.focus(_.damlDefinitionsServiceEnabled).replace(true))) + _.focus(_.httpLedgerApi.damlDefinitionsServiceEnabled) + .replace(true) ) ) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) "Daml definitions service" should { // TODO(#21695): Test only used to generate golden files. diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonPathPrefixTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonPathPrefixTests.scala index 92dfd2bb07..60d6ab3eb4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonPathPrefixTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonPathPrefixTests.scala @@ -3,23 +3,20 @@ package com.digitalasset.canton.integration.tests.jsonapi -import com.digitalasset.canton.UniquePortGenerator import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http.{HttpServerConfig, JsonApiConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.ParticipantSelector import com.digitalasset.canton.integration.{ ConfigTransforms, EnvironmentDefinition, TestConsoleEnvironment, } -import com.digitalasset.canton.participant.config.ParticipantNodeConfig import org.apache.pekko.http.scaladsl.model.{StatusCodes, Uri} class JsonPathPrefixTests extends AbstractHttpServiceIntegrationTestFuns with HttpServiceUserFixture.UserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private lazy val testCases: Map[String, Option[String]] = Map( "participant1" -> Some("/any/company/prefix"), @@ -36,28 +33,12 @@ class JsonPathPrefixTests } createChannel(participant1) } - .addConfigTransforms( + .prependConfigTransforms( testCases.toSeq.map { case (participant, prefix) => - ConfigTransforms - .updateParticipantConfig(participant)(runJsonWithPrefix(prefix)) + ConfigTransforms.enableHttpLedgerApi(participant, pathPrefix = prefix) }* ) - private def runJsonWithPrefix( - prefix: Option[String] - ): ParticipantNodeConfig => ParticipantNodeConfig = { participantConfig => - participantConfig.copy(httpLedgerApi = - Some( - JsonApiConfig( - server = HttpServerConfig().copy( - port = Some(UniquePortGenerator.next.unwrap), - pathPrefix = prefix, - ) - ) - ) - ) - } - def testService(participantSelector: ParticipantSelector, prefix: Option[String])(implicit env: TestConsoleEnvironment ) = { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonStreamAsListPerformanceTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonStreamAsListPerformanceTests.scala index f6773a5621..ad16024c5e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonStreamAsListPerformanceTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonStreamAsListPerformanceTests.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.http.json.SprayJson import com.digitalasset.canton.http.json.v2.JsCommandServiceCodecs.* import com.digitalasset.canton.http.json.v2.JsStateServiceCodecs.* import com.digitalasset.canton.http.json.v2.{JsCommand, JsCommands, JsGetActiveContractsResponse} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.util.MonadUtil import io.circe.parser.decode import io.circe.syntax.* @@ -26,7 +26,7 @@ import scala.concurrent.Future class JsonStreamAsListPerformanceTests extends AbstractHttpServiceIntegrationTestFuns with HttpServiceUserFixture.UserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val numberOfRepeatedCalls: Int = 5 private val maxAverageTimeForCall: Long = 1000 @@ -58,9 +58,7 @@ class JsonStreamAsListPerformanceTests .parse( state_service .GetActiveContractsRequest( - filter = None, activeAtOffset = completionOffset, - verbose = false, eventFormat = Some( transaction_filter.EventFormat( filtersByParty = Map.empty, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonUserApiTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonUserApiTest.scala index 1c01cd6fa4..bfc4045a7b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonUserApiTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonUserApiTest.scala @@ -9,7 +9,7 @@ import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} import com.digitalasset.canton.http.json.v2.JsIdentityProviderCodecs.* import com.digitalasset.canton.http.json.v2.JsUserManagementCodecs.* -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import com.digitalasset.canton.integration.tests.ledgerapi.auth.ServiceCallContext @@ -38,7 +38,7 @@ class JsonUserApiTest with ErrorsAssertions { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override val defaultScope: String = ExpectedScope diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonV2Tests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonV2Tests.scala index 1efc0b9c4a..b70547884f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonV2Tests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/JsonV2Tests.scala @@ -75,7 +75,7 @@ import com.digitalasset.canton.http.json.v2.{ } import com.digitalasset.canton.http.util.ClientUtil.uniqueId import com.digitalasset.canton.http.{Party, WebsocketConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.{ HttpServiceTestFixtureData, dar1, @@ -107,7 +107,6 @@ import spray.json.{JsArray, JsNumber, JsObject, JsString, JsonParser} import java.nio.file.Files import java.util.UUID -import scala.annotation.nowarn import scala.concurrent.Future import scala.concurrent.duration.* @@ -115,12 +114,10 @@ import scala.concurrent.duration.* * * Uses TLS/https */ -// TODO(#23504) remove deprecated methods -@nowarn("cat=deprecation") class JsonV2Tests extends AbstractHttpServiceIntegrationTestFuns with HttpServiceUserFixture.UserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) // Configure extremely small wait time to avoid long test times and test edge cases override def wsConfig: Option[WebsocketConfig] = Some( @@ -926,7 +923,8 @@ class JsonV2Tests _ <- createCommand(fixture, alice, headers, "cmd2", "completions1") (status, result) <- fixture.postJsonStringRequest( fixture.uri withPath Uri.Path("/v2/commands/completions") withQuery Query( - ("limit", "2") + ("limit", "2"), + ("stream_idle_timeout_ms", "1000"), ), command_completion_service .CompletionStreamRequest( @@ -1005,9 +1003,7 @@ class JsonV2Tests websocket(fixture.uri.withPath(Uri.Path("/v2/state/active-contracts")), jwt) val req = state_service .GetActiveContractsRequest( - filter = None, activeAtOffset = endOffset, - verbose = false, eventFormat = Some(allTransactionsFormat), ) @@ -1035,6 +1031,94 @@ class JsonV2Tests } } } + "return active contracts (legacy fields)" in httpTestFixture { fixture => + fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => + for { + jwt <- jwtForParties(fixture.uri)(List(alice), List()) + _ <- createCommand(fixture, alice, headers) + endOffset <- fixture.client.stateService.getLedgerEndOffset() + result <- { + val webSocketFlow = + websocket(fixture.uri.withPath(Uri.Path("/v2/state/active-contracts")), jwt) + val req = LegacyDTOs + .GetActiveContractsRequest( + filter = Some(allTransactionsFilter), + activeAtOffset = endOffset, + verbose = true, + eventFormat = None, + ) + + val message = TextMessage( + req.asJson.noSpaces + ) + Source + .single( + message + ) + .concatMat(Source.maybe[Message])(Keep.left) + .via(webSocketFlow) + .take(1) + .collect { case m: TextMessage => + m.getStrictText + } + .toMat(Sink.seq)(Keep.right) + .run() + .map(s => decode[JsGetActiveContractsResponse](s.head)) + } + } yield { + inside(result.value.contractEntry) { case ac: JsActiveContract => + IdentifierConverter.toJson(ac.createdEvent.templateId) should endWith("Iou:Iou") + } + } + } + } + "return active contracts (empty fields)" in httpTestFixture { fixture => + fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => + for { + jwt <- jwtForParties(fixture.uri)(List(alice), List()) + _ <- createCommand(fixture, alice, headers) + endOffset <- fixture.client.stateService.getLedgerEndOffset() + _ <- { + val webSocketFlow = + websocket(fixture.uri.withPath(Uri.Path("/v2/state/active-contracts")), jwt) + val req = LegacyDTOs + .GetActiveContractsRequest( + filter = None, + activeAtOffset = endOffset, + eventFormat = None, + ) + + val message = TextMessage( + req.asJson.noSpaces + ) + Source + .single( + message + ) + .concatMat(Source.maybe[Message])(Keep.left) + .via(webSocketFlow) + .take(1) + .collect { case m: TextMessage => + m.getStrictText + } + .toMat(Sink.seq)(Keep.right) + .run() + .map { value => + value + .map(decode[JsCantonError]) + .collect { case Right(error) => + error.errorCategory shouldBe ErrorCategory.InvalidIndependentOfSystemState.asInt + error.code should include("INVALID_ARGUMENT") + error.cause should include( + "Either filter/verbose or event_format is required. Please use either backwards compatible arguments (filter and verbose) or event_format." + ) + } + .head + } + } + } yield () + } + } "return active contracts list" in httpTestFixture { fixture => fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => for { @@ -1048,9 +1132,7 @@ class JsonV2Tests .parse( state_service .GetActiveContractsRequest( - filter = None, activeAtOffset = endOffset, - verbose = false, eventFormat = Some(allTransactionsFormat), ) .asJson @@ -1071,6 +1153,81 @@ class JsonV2Tests } } } + "return active contracts list (event_format and verbose fields set)" in httpTestFixture { + fixture => + fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => + for { + _ <- createCommand(fixture, alice, headers) + endOffset <- fixture.client.stateService.getLedgerEndOffset() + _ <- fixture + .postJsonRequest( + Uri.Path("/v2/state/active-contracts"), + SprayJson + .parse( + LegacyDTOs + .GetActiveContractsRequest( + filter = None, + activeAtOffset = endOffset, + verbose = true, + eventFormat = Some(allTransactionsFormat), + ) + .asJson + .toString() + ) + .valueOr(err => fail(s"$err")), + headers, + ) + .map { case (status, result) => + status should be(StatusCodes.BadRequest) + val cantonError = + decode[JsCantonError](result.toString()) + cantonError.value.errorCategory should be( + ErrorCategory.InvalidIndependentOfSystemState.asInt + ) + cantonError.value.cause should include( + "Both event_format and verbose are set. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." + ) + } + } yield () + } + } + "return active contracts list (event_format and filter fields set)" in httpTestFixture { + fixture => + fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => + for { + _ <- createCommand(fixture, alice, headers) + endOffset <- fixture.client.stateService.getLedgerEndOffset() + _ <- fixture + .postJsonRequest( + Uri.Path("/v2/state/active-contracts"), + SprayJson + .parse( + LegacyDTOs + .GetActiveContractsRequest( + filter = Some(allTransactionsFilter), + activeAtOffset = endOffset, + eventFormat = Some(allTransactionsFormat), + ) + .asJson + .toString() + ) + .valueOr(err => fail(s"$err")), + headers, + ) + .map { case (status, result) => + status should be(StatusCodes.BadRequest) + val cantonError = + decode[JsCantonError](result.toString()) + cantonError.value.errorCategory should be( + ErrorCategory.InvalidIndependentOfSystemState.asInt + ) + cantonError.value.cause should include( + "Both event_format and filter are set. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." + ) + } + } yield () + } + } "handle offset after ledger end for active contracts list" in httpTestFixture { fixture => fixture.getUniquePartyAndAuthHeaders("Alice").flatMap { case (alice, headers) => for { @@ -1084,9 +1241,7 @@ class JsonV2Tests .parse( state_service .GetActiveContractsRequest( - filter = None, activeAtOffset = endOffset + 100, - verbose = false, eventFormat = Some(allTransactionsFormat), ) .asJson @@ -1412,7 +1567,7 @@ class JsonV2Tests _ <- postJsonRequest( uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-offset")), json = toSprayJson( - update_service.GetTransactionByOffsetRequest( + LegacyDTOs.GetTransactionByOffsetRequest( offset = offset, transactionFormat = transactionFormat(alice.unwrap), requestingParties = Nil, @@ -1423,10 +1578,46 @@ class JsonV2Tests .map { case (status, _) => status should be(StatusCodes.OK) } + _ <- postJsonRequest( + uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-offset")), + json = toSprayJson( + LegacyDTOs.GetTransactionByOffsetRequest( + offset = offset, + transactionFormat = None, + requestingParties = Seq(alice.unwrap), + ) + ), + headers = headers, + ) + .map { case (status, _) => + status should be(StatusCodes.OK) + } + _ <- postJsonRequest( + uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-offset")), + json = toSprayJson( + LegacyDTOs.GetTransactionByOffsetRequest( + offset = offset, + transactionFormat = None, + requestingParties = Nil, + ) + ), + headers = headers, + ) + .map { case (status, result) => + status should be(StatusCodes.BadRequest) + val cantonError = + decode[JsCantonError](result.toString()) + cantonError.value.errorCategory should be( + ErrorCategory.InvalidIndependentOfSystemState.asInt + ) + cantonError.value.cause should include( + "Either transaction_format or requesting_parties is required. Please use either backwards compatible arguments (requesting_parties) or transaction_format." + ) + } _ <- postJsonRequest( uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-id")), json = toSprayJson( - update_service.GetTransactionByIdRequest( + LegacyDTOs.GetTransactionByIdRequest( updateId = updateId, transactionFormat = transactionFormat(alternateParty), // the party is different requestingParties = Nil, @@ -1437,6 +1628,42 @@ class JsonV2Tests .map { case (status, _) => status should be(StatusCodes.OK) } + _ <- postJsonRequest( + uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-id")), + json = toSprayJson( + LegacyDTOs.GetTransactionByIdRequest( + updateId = updateId, + transactionFormat = None, + requestingParties = Seq(alternateParty), + ) + ), + headers = headers, + ) + .map { case (status, _) => + status should be(StatusCodes.OK) + } + _ <- postJsonRequest( + uri = fixture.uri.withPath(Uri.Path("/v2/updates/transaction-by-id")), + json = toSprayJson( + LegacyDTOs.GetTransactionByIdRequest( + updateId = updateId, + transactionFormat = transactionFormat(alternateParty), + requestingParties = Seq(alternateParty), + ) + ), + headers = headers, + ) + .map { case (status, result) => + status should be(StatusCodes.BadRequest) + val cantonError = + decode[JsCantonError](result.toString()) + cantonError.value.errorCategory should be( + ErrorCategory.InvalidIndependentOfSystemState.asInt + ) + cantonError.value.cause should include( + "Both transaction_format and requesting_parties are set. Please use either backwards compatible arguments (requesting_parties) or transaction_format but not both." + ) + } _ <- postJsonRequest( uri = fixture.uri.withPath(Uri.Path("/v2/updates/update-by-offset")), @@ -1739,15 +1966,13 @@ class JsonV2Tests beginExclusive = 0, endInclusive = None, filter = Some(allTransactionsFilter), - verbose = false, + verbose = true, updateFormat = None, ) private val updatesRequest = update_service.GetUpdatesRequest( beginExclusive = 0, endInclusive = None, - filter = None, - verbose = false, updateFormat = Some( UpdateFormat( includeTransactions = Some( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/OpenApiTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/OpenApiTests.scala index 4c780d0f5d..96e030d7ab 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/OpenApiTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/OpenApiTests.scala @@ -5,13 +5,13 @@ package com.digitalasset.canton.integration.tests.jsonapi import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.http.json.v2.ApiDocsGenerator -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import org.apache.pekko.http.scaladsl.model.{StatusCodes, Uri} final class OpenApiTests extends AbstractHttpServiceIntegrationTestFuns with HttpServiceUserFixture.UserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val apiDocsGenerator = new ApiDocsGenerator(loggerFactory) private val expectedOpenApiServices = Seq( "commands", @@ -19,6 +19,7 @@ final class OpenApiTests "idps", "interactive-submission", "packages", + "package-vetting", "parties", "state", "updates", diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/SecureHttpServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/SecureHttpServiceIntegrationTest.scala deleted file mode 100644 index 9e3067e623..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/SecureHttpServiceIntegrationTest.scala +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.daml.ledger.api.v2.value as v -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http -import com.digitalasset.canton.http.CommandMeta -import com.digitalasset.canton.http.json.JsonError -import com.digitalasset.canton.http.json.SprayJson.decode as jdecode -import com.digitalasset.canton.http.util.Logging.instanceUUIDLogCtx -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData -import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.UseTls -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA -import org.apache.pekko.http.scaladsl.model.{StatusCodes, Uri} -import org.scalatest.Assertion -import scalaz.std.scalaFuture.* -import scalaz.syntax.apply.* -import scalaz.syntax.bifunctor.* -import scalaz.syntax.show.* -import scalaz.{EitherT, \/, \/-} -import shapeless.record.Record as ShRecord -import spray.json.JsValue - -import scala.concurrent.Future - -/** Tests that are exercising features independently of both user authentication and the query - * store. - */ -class SecureHttpServiceIntegrationTest - extends AbstractHttpServiceIntegrationTest - with AbstractHttpServiceIntegrationTestFunsUserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - import SecureHttpServiceIntegrationTest.* - import com.digitalasset.canton.http.json.JsonProtocol.* - import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.ciouDar - - override def useTls: UseTls = UseTls.Tls - - "query with invalid JSON query should return error" in httpTestFixture { fixture => - fixture - .postJsonStringRequest(Uri.Path("/v1/query"), "{NOT A VALID JSON OBJECT") - .parseResponse[JsValue] - .map(inside(_) { case http.ErrorResponse(_, _, StatusCodes.BadRequest, _) => - succeed - }): Future[Assertion] - } - - "should be able to serialize and deserialize synchronizer commands" in httpTestFixture { - fixture => - (testCreateCommandEncodingDecoding(fixture) *> - testExerciseCommandEncodingDecoding(fixture)): Future[Assertion] - } - - private def testCreateCommandEncodingDecoding( - fixture: HttpServiceTestFixtureData - ): Future[Assertion] = instanceUUIDLogCtx { implicit lc => - import fixture.{uri, encoder, decoder} - import com.digitalasset.canton.http.util.ErrorOps.* - import com.daml.jwt.Jwt - - val command0: http.CreateCommand[v.Record, http.ContractTypeId.Template.RequiredPkg] = - iouCreateCommand(http.Party("Alice")) - - type F[A] = EitherT[Future, JsonError, A] - val x: F[Assertion] = for { - jsVal <- EitherT.either( - encoder.encodeCreateCommand(command0).liftErr(JsonError) - ): F[JsValue] - command1 <- (EitherT.rightT(fixture.jwt(uri)): F[Jwt]) - .flatMap(decoder.decodeCreateCommand(jsVal, _)) - } yield command1.bimap(removeRecordId, identity) should ===(command0) - - (x.run: Future[JsonError \/ Assertion]).map(_.fold(e => fail(e.shows), identity)) - } - - private def testExerciseCommandEncodingDecoding( - fixture: HttpServiceTestFixtureData - ): Future[Assertion] = { - import fixture.{uri, encoder, decoder} - val command0 = iouExerciseTransferCommand(lar.ContractId("#a-contract-ID"), http.Party("Bob")) - val jsVal: JsValue = encodeExercise(encoder)(command0) - val command1 = - fixture.jwt(uri).flatMap(decodeExercise(decoder, _)(jsVal)) - command1.map(_.bimap(removeRecordId, identity) should ===(command0)) - } - - "submit commands" should { - import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.{ - UriFixture, - EncoderFixture, - } - def submitCommand( - fixture: UriFixture with EncoderFixture, - synchronizerId: Option[SynchronizerId], - ) = - for { - aliceH <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = aliceH - response <- postCreateCommand( - iouCommand(alice, TpId.IIou.TestIIou) - .copy(meta = - Some( - CommandMeta( - None, - None, - None, - None, - None, - None, - None, - synchronizerId, - None, - ) - ) - ), - fixture, - aliceHeaders, - ) - } yield response - - "succeed for no synchronizer id (automatic synchronizer routing)" in httpTestFixture { - fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - response <- submitCommand(fixture, synchronizerId = None) - } yield inside(response) { case http.OkResponse(_, None, StatusCodes.OK) => - succeed - } - } - - "succeed for valid synchronizer id" in httpTestFixture { fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - response <- submitCommand(fixture, synchronizerId = Some(validSynchronizerId)) - } yield inside(response) { case http.OkResponse(_, None, StatusCodes.OK) => - succeed - } - } - - "fail for invalid synchronizer id" in httpTestFixture { fixture => - val invalidId = SynchronizerId.tryFromString("invalid::synchronizerid") - for { - _ <- uploadPackage(fixture)(ciouDar) - response <- submitCommand( - fixture, - synchronizerId = Some(invalidId), - ) - } yield inside(response) { - case http.ErrorResponse(Seq(onlyError), None, StatusCodes.BadRequest, Some(lapiError)) => - val errMsg = s"Cannot submit transaction to prescribed synchronizer `$invalidId`" - onlyError should include(errMsg) - lapiError.message should include(errMsg) - } - } - } - - "exercise interface choices" should { - import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.{ - UriFixture, - EncoderFixture, - } - - def createIouAndExerciseTransfer( - fixture: UriFixture with EncoderFixture, - initialTplId: http.ContractTypeId.Template.RequiredPkg, - exerciseTid: http.ContractTypeId.RequiredPkg, - choice: TExercise[_] = tExercise(choiceArgType = echoTextVA)(echoTextSample), - ) = for { - aliceH <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = aliceH - createTest <- postCreateCommand( - iouCommand(alice, initialTplId), - fixture, - aliceHeaders, - ) - testIIouID = resultContractId(createTest) - exerciseTest <- fixture - .postJsonRequest( - Uri.Path("/v1/exercise"), - encodeExercise(fixture.encoder)( - iouTransfer( - http.EnrichedContractId(Some(exerciseTid), testIIouID), - choice, - ) - ), - aliceHeaders, - ) - .parseResponse[http.ExerciseResponse[JsValue]] - } yield exerciseTest - - def exerciseSucceeded[A]( - exerciseTest: http.SyncResponse[http.ExerciseResponse[JsValue]] - ) = - inside(exerciseTest) { case http.OkResponse(er, None, StatusCodes.OK) => - inside(jdecode[String](er.exerciseResult)) { case \/-(decoded) => decoded } - } - - "templateId = interface ID" in httpTestFixture { fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - result <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.IIou.TestIIou, - // whether we can exercise by interface-ID - exerciseTid = TpId.IIou.IIou, - ) map exerciseSucceeded - } yield result should ===("Bob invoked IIou.Transfer") - } - - // ideally we would upload IIou.daml, then force a reload, then upload ciou; - // however tests currently don't play well with reload -SC - "templateId = template ID" in httpTestFixture { fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - result <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.CIou.CIou, - // whether we can exercise inherited by concrete template ID - exerciseTid = TpId.CIou.CIou, - ) map exerciseSucceeded - } yield result should ===("Bob invoked IIou.Transfer") - } - - "templateId = template ID, choiceInterfaceId = interface ID" in httpTestFixture { fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - result <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.CIou.CIou, - exerciseTid = TpId.CIou.CIou, - choice = tExercise(choiceInterfaceId = Some(TpId.IIou.IIou), choiceArgType = echoTextVA)( - echoTextSample - ), - ) map exerciseSucceeded - } yield result should ===("Bob invoked IIou.Transfer") - } - - "templateId = template, no choiceInterfaceId, picks template Overridden" in httpTestFixture { - fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - result <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.CIou.CIou, - exerciseTid = TpId.CIou.CIou, - choice = tExercise(choiceName = "Overridden", choiceArgType = echoTextPairVA)( - ShRecord(echo = ShRecord(_1 = "yes", _2 = "no")) - ), - ) map exerciseSucceeded - } yield result should ===("(\"yes\",\"no\") invoked CIou.Overridden") - } - - "templateId = template, choiceInterfaceId = interface, picks interface Overridden" in httpTestFixture { - fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - result <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.CIou.CIou, - exerciseTid = TpId.CIou.CIou, - choice = tExercise(Some(TpId.Transferrable.Transferrable), "Overridden", echoTextVA)( - ShRecord(echo = "yesyes") - ), - ) map exerciseSucceeded - } yield result should ===("yesyes invoked Transferrable.Overridden") - } - - "templateId = template, no choiceInterfaceId, ambiguous" in httpTestFixture { fixture => - for { - _ <- uploadPackage(fixture)(ciouDar) - response <- createIouAndExerciseTransfer( - fixture, - initialTplId = TpId.CIou.CIou, - exerciseTid = TpId.CIou.CIou, - choice = tExercise(choiceName = "Ambiguous", choiceArgType = echoTextVA)( - ShRecord(echo = "ambiguous-test") - ), - ) - } yield inside(response) { - case http.ErrorResponse(Seq(onlyError), None, StatusCodes.BadRequest, None) => - (onlyError should include regex - raw"Cannot resolve Choice Argument type, given: \(TemplateId\([0-9a-f]{64},CIou,CIou\), Ambiguous\)") - } - } - } - - "fail to exercise by key with interface ID" in httpTestFixture { fixture => - import fixture.encoder - for { - _ <- uploadPackage(fixture)(ciouDar) - aliceH <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = aliceH - createTest <- postCreateCommand( - iouCommand(alice, TpId.CIou.CIou), - fixture, - aliceHeaders, - ) - _ = createTest.status should ===(StatusCodes.OK) - exerciseTest <- fixture - .postJsonRequest( - Uri.Path("/v1/exercise"), - encodeExercise(encoder)( - iouTransfer( - http.EnrichedContractKey( - TpId.unsafeCoerce[http.ContractTypeId.Template, Ref.PackageRef](TpId.IIou.IIou), - v.Value(v.Value.Sum.Party(http.Party unwrap alice)), - ), - tExercise()(ShRecord(echo = "bob")), - ) - ), - aliceHeaders, - ) - .parseResponse[JsValue] - } yield inside(exerciseTest) { - case http.ErrorResponse(Seq(lookup), None, StatusCodes.BadRequest, _) => - lookup should include regex raw"Cannot resolve template ID, given: TemplateId\(\w+,IIou,IIou\)" - } - } - - private[this] def iouTransfer[Inj]( - locator: http.ContractLocator[v.Value], - choice: TExercise[Inj], - ) = { - import choice.{choiceInterfaceId, choiceName, choiceArgType, choiceArg} - val payload = argToApi(choiceArgType)(choiceArg) - http.ExerciseCommand( - locator, - http.Choice(choiceName), - v.Value(v.Value.Sum.Record(payload)), - choiceInterfaceId, - None, - ) - } -} - -object SecureHttpServiceIntegrationTest { - private[this] val irrelevant = Ref.Identifier assertFromString "none:Discarded:Identifier" - - private val (_, echoTextVA) = - VA.record(irrelevant, ShRecord(echo = VA.text)) - - private val (_, echoTextPairVA) = - VA.record( - irrelevant, - ShRecord(echo = VA.record(irrelevant, ShRecord(_1 = VA.text, _2 = VA.text))._2), - ) - - private val echoTextSample: echoTextVA.Inj = ShRecord(echo = "Bob") - - private def tExercise( - choiceInterfaceId: Option[http.ContractTypeId.Interface.RequiredPkg] = None, - choiceName: String = "Transfer", - choiceArgType: VA = echoTextVA, - )( - choiceArg: choiceArgType.Inj - ): TExercise[choiceArgType.Inj] = - TExercise(choiceInterfaceId, choiceName, choiceArgType, choiceArg) - - private final case class TExercise[Inj]( - choiceInterfaceId: Option[http.ContractTypeId.Interface.RequiredPkg], - choiceName: String, - choiceArgType: VA.Aux[Inj], - choiceArg: Inj, - ) -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/TlsTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/TlsTest.scala index f0b7dafc82..00ef65ce88 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/TlsTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/TlsTest.scala @@ -3,25 +3,21 @@ package com.digitalasset.canton.integration.tests.jsonapi +import com.daml.ledger.api.v2.admin.party_management_service.GetParticipantIdResponse import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.daml.test.evidence.tag.Security.SecurityTest import com.daml.test.evidence.tag.Security.SecurityTest.Property.Authenticity import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http -import com.digitalasset.canton.http.json.JsonProtocol.* -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.http.json.v2.JsPartyManagementCodecs.* +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.UseTls -import org.apache.pekko.http.scaladsl.model.{StatusCodes, Uri} -import org.scalatest.Assertion -import spray.json.JsValue - -import scala.concurrent.Future +import org.apache.pekko.http.scaladsl.model.Uri @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) class TlsTest extends AbstractHttpServiceIntegrationTestFuns with AbstractHttpServiceIntegrationTestFunsUserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) val authenticationSecurity: SecurityTest = SecurityTest(property = Authenticity, asset = "HTTP JSON API Service") @@ -33,10 +29,8 @@ class TlsTest "A client request returns OK with enabled TLS" ) in withHttpService() { fixture => fixture - .getRequestWithMinimumAuth[Vector[JsValue]](Uri.Path("/v1/query")) - .map(inside(_) { case http.OkResponse(vector, None, StatusCodes.OK) => - vector should have size 0L - }): Future[Assertion] + .getRequestWithMinimumAuth[GetParticipantIdResponse](Uri.Path("/v2/parties/participant-id")) + .map(_.participantId should (not be empty)) } } } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceIntegrationTest.scala deleted file mode 100644 index 9fddef834e..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceIntegrationTest.scala +++ /dev/null @@ -1,1703 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.daml.ledger.api.v2.admin.participant_pruning_service as PruneGrpc -import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* -import com.daml.test.evidence.tag.Security.SecurityTest.Property.Authorization -import com.daml.test.evidence.tag.Security.{Attack, SecurityTest} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http.json.{JsonProtocol, SprayJson} -import com.digitalasset.canton.http.{ - Choice, - ContractId, - ContractKeyStreamRequest, - ContractLocator, - ContractTypeId as HttpContractTypeId, - CreateCommand, - EnrichedContractId, - EnrichedContractKey, - ErrorMessages, - ErrorResponse, - ExerciseCommand, - Offset, - Party, - ResolvedQuery, - WebsocketConfig, -} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.{ - UriFixture, - packageIdToName, -} -import com.digitalasset.canton.integration.tests.jsonapi.HttpServiceTestFixture.{ - accountCreateCommand, - sharedAccountCreateCommand, -} -import com.digitalasset.canton.integration.tests.jsonapi.WebsocketTestFixture.* -import com.digitalasset.daml.lf.data.Ref.PackageRef -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.Http -import org.apache.pekko.http.scaladsl.model.ws.{ - Message, - PeerClosedConnectionException, - TextMessage, - WebSocketRequest, -} -import org.apache.pekko.http.scaladsl.model.{HttpHeader, StatusCodes, Uri} -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.apache.pekko.stream.{KillSwitches, UniqueKillSwitch} -import org.scalatest.* -import scalaz.std.list.* -import scalaz.std.option.* -import scalaz.std.scalaFuture.* -import scalaz.std.vector.* -import scalaz.syntax.std.option.* -import scalaz.syntax.tag.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, \/-} -import spray.json.{ - DeserializationException, - JsArray, - JsNull, - JsObject, - JsString, - JsValue, - enrichAny as `sj enrichAny`, -} - -import scala.concurrent.Future -import scala.util.chaining.* - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class WebsocketServiceIntegrationTest - extends AbstractHttpServiceIntegrationTestFuns - with AbstractHttpServiceIntegrationTestFunsUserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - private val authorizationSecurity = - SecurityTest( - property = Authorization, - asset = s"HTTP JSON API Service: WebsocketService", - ) - - private def attackUnauthorized(threat: String): Attack = Attack( - actor = s"Websocket client", - threat = threat, - mitigation = s"Refuse call by the client with UNAUTHORIZED", - ) - - override def wsConfig: Option[WebsocketConfig] = Some(WebsocketConfig()) - - private def templateIdWithPackageName(t: HttpContractTypeId.RequiredPkg): String = { - val packageName: PackageRef.Name = t.packageId match { - case name: PackageRef.Name => name - case PackageRef.Id(id) => PackageRef.Name(packageIdToName(id)) - } - s"$packageName:${t.moduleName}:${t.entityName}" - } - - private val baseQueryInput: Source[Message, NotUsed] = - Source.single( - TextMessage.Strict( - s"""{"templateIds": ["${templateIdWithPackageName(TpId.Account.Account)}"]}""" - ) - ) - - private val fetchRequest = - s"""[{"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["Alice", "abc123"]}]""" - - private val baseFetchInput: Source[Message, NotUsed] = - Source.single(TextMessage.Strict(fetchRequest)) - - private def heartbeatOffset(event: JsValue) = event match { - case ContractDelta(Vector(), Vector(), Some(offset)) => offset - case _ => throw new IllegalArgumentException(s"Expected heartbeat but got $event") - } - - private def immediateQuery(fixture: UriFixture, scenario: SimpleScenario): Future[Seq[String]] = - for { - jwt <- fixture.jwt(fixture.uri) - webSocketFlow = - Http().webSocketClientFlow( - WebSocketRequest( - uri = fixture.uri.copy(scheme = "ws").withPath(scenario.path), - subprotocol = validSubprotocol(jwt), - ) - ) - ran <- scenario.input via webSocketFlow runWith collectResultsAsTextMessageSkipOffsetTicks - } yield ran - - List( - SimpleScenario("query", Uri.Path("/v1/stream/query"), baseQueryInput), - SimpleScenario("fetch", Uri.Path("/v1/stream/fetch"), baseFetchInput), - ).foreach { scenario => - s"${scenario.id} request with valid protocol token should allow client subscribe to stream" taggedAs authorizationSecurity - .setHappyCase( - "Websocket client with valid protocol token can subscribe to stream" - ) in httpTestFixture { fixture => - import fixture.uri - fixture - .jwt(uri) - .flatMap(jwt => - wsConnectRequest( - uri.copy(scheme = "ws").withPath(scenario.path), - validSubprotocol(jwt), - scenario.input, - )._1 map (x => x.response.status shouldBe StatusCodes.SwitchingProtocols) - ) - } - - s"${scenario.id} request with invalid protocol token should be denied" taggedAs authorizationSecurity - .setAttack(attackUnauthorized("Present invalid protocol token")) in withHttpService { - (uri, _, _) => - wsConnectRequest( - uri.copy(scheme = "ws").withPath(scenario.path), - Option("foo"), - scenario.input, - )._1 map (x => x.response.status shouldBe StatusCodes.Unauthorized) - } - - s"${scenario.id} request without protocol token should be denied" taggedAs authorizationSecurity - .setAttack(attackUnauthorized("Present no protocol token")) in withHttpService { - (uri, _, _) => - wsConnectRequest( - uri.copy(scheme = "ws").withPath(scenario.path), - None, - scenario.input, - )._1 map (x => x.response.status shouldBe StatusCodes.Unauthorized) - } - - s"two ${scenario.id} requests over the same WebSocket connection are NOT allowed" taggedAs authorizationSecurity - .setAttack( - Attack( - actor = "Websocket client", - threat = "Sends two requests over the same connection", - mitigation = "Refuse call by the client with BADREQUEST", - ) - ) in httpTestFixture { fixture => - immediateQuery(fixture, scenario.mapInput(_.mapConcat(x => List(x, x)))) - .map { msgs => - inside(msgs) { case Seq(errorMsg) => - val error = decodeErrorResponse(errorMsg) - error shouldBe ErrorResponse( - List("Multiple requests over the same WebSocket connection are not allowed."), - None, - StatusCodes.BadRequest, - ) - } - } - } - } - - List( - SimpleScenario( - "query", - Uri.Path("/v1/stream/query"), - Source.single(TextMessage.Strict("""{"templateIds": ["ZZ:AA:BB"]}""")), - ), - SimpleScenario( - "fetch", - Uri.Path("/v1/stream/fetch"), - Source.single(TextMessage.Strict("""[{"templateId": "ZZ:AA:BB", "key": ["k", "v"]}]""")), - ), - ).foreach { scenario => - s"${scenario.id} report error when cannot resolve any template ID" in httpTestFixture { - fixture => - immediateQuery(fixture, scenario) - .map { msgs => - inside(msgs) { case Seq(errorMsg) => - val error = decodeErrorResponse(errorMsg) - error shouldBe ErrorResponse( - List(ErrorMessages.cannotResolveAnyTemplateId), - None, - StatusCodes.BadRequest, - ) - } - } - } - } - - "query error when queries with more than 1 interface id" in httpTestFixture { fixture => - import AbstractHttpServiceIntegrationTestFuns.ciouDar - val queryInput = Source.single( - TextMessage.Strict( - s"""[{"templateIds": ["${templateIdWithPackageName( - TpId.Account.IAccount - )}"]}, {"templateIds": ["${templateIdWithPackageName(TpId.IIou.IIou)}"]}]""" - ) - ) - val scenario = SimpleScenario("", Uri.Path("/v1/stream/query"), queryInput) - for { - _ <- uploadPackage(fixture)(ciouDar) - msgs <- immediateQuery(fixture, scenario) - } yield inside(msgs) { case Seq(errorMsg) => - val error = decodeErrorResponse(errorMsg) - error shouldBe ErrorResponse( - List(ResolvedQuery.CannotQueryManyInterfaceIds.errorMsg), - None, - StatusCodes.BadRequest, - ) - } - } - - "query error when queries with both template and interface id" in httpTestFixture { fixture => - val queryInput = Source.single( - TextMessage.Strict( - s"""[{"templateIds": ["${templateIdWithPackageName( - TpId.Account.IAccount - )}"]}, {"templateIds": ["${templateIdWithPackageName(TpId.Account.Account)}"]}]""" - ) - ) - val scenario = SimpleScenario("", Uri.Path("/v1/stream/query"), queryInput) - for { - msgs <- immediateQuery(fixture, scenario) - } yield inside(msgs) { case Seq(errorMsg) => - val error = decodeErrorResponse(errorMsg) - error shouldBe ErrorResponse( - List(ResolvedQuery.CannotQueryBothTemplateIdsAndInterfaceIds.errorMsg), - None, - StatusCodes.BadRequest, - ) - } - } - - "transactions when command create is completed from" should { - "query endpoint" in httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - _ <- initialIouCreate(uri, alice, headers) - jwt <- jwtForParties(uri)(List(alice), List()) - clientMsg <- singleClientQueryStream( - jwt, - uri, - s"""{"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]}""", - ).take(2) - .runWith(collectResultsAsTextMessage) - } yield inside(clientMsg) { case result +: heartbeats => - result should include(s""""issuer":"$alice"""") - result should include(""""amount":"999.99"""") - Inspectors.forAll(heartbeats)(assertHeartbeat) - } - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable when keys are supported in 3.x - "fetch endpoint" ignore httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - _ <- initialAccountCreate(fixture, alice, headers) - jwt <- jwtForParties(uri)(List(alice), Nil) - fetchRequest = - s"""[{"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$alice", "abc123"]}]""" - clientMsg <- singleClientFetchStream(jwt, uri, fetchRequest) - .take(2) - .runWith(collectResultsAsTextMessage) - } yield inside(clientMsg) { case result +: heartbeats => - result should include(s""""owner":"$alice"""") - result should include(""""number":"abc123"""") - result should not include (""""offset":"""") - Inspectors.forAll(heartbeats)(assertHeartbeat) - } - } - } - - "interface sub" should { - "query endpoint" in httpTestFixture { fixture => - import fixture.uri - import com.digitalasset.canton.http.json.JsonProtocol.* - - def resp( - owner: Party, - headers: List[HttpHeader], - kill: UniqueKillSwitch, - ): Sink[JsValue, Future[ShouldHaveEnded]] = { - def createAccount( - owner: Party, - amount: String, - headers: List[HttpHeader], - ) = postCreateCommand( - accountCreateCommand(owner, amount), - fixture, - headers = headers, - ) - - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - - def readAndExtract( - record: AccountRecord - ): Consume.FCC[JsValue, CreatedAccountEvent] = for { - _ <- liftF(createAccount(owner, record.amount, headers)) - AccountQuery(event) <- readOne - } yield event - .tap(_.created.record should ===(record)) - .tap(e => - HttpContractTypeId.withPkgRef(e.created.templateId) should ===(TpId.Account.IAccount) - ) - - Consume.interpret( - for { - ContractDelta(Vector(), _, Some(offset)) <- readOne - Seq(createdAccountEvent1, _, _) <- - List( - AccountRecord("abc123", true, true), - AccountRecord("abc456", true, false), - AccountRecord("def123", false, true), - ).traverse(readAndExtract) - - _ <- liftF( - fixture.postJsonRequest( - Uri.Path("/v1/exercise"), { - val ecid: ContractLocator[JsValue] = - EnrichedContractId( - Some(TpId.Account.IAccount), - createdAccountEvent1.created.contractId, - ) - ExerciseCommand( - ecid, - choice = Choice("ChangeAmount"), - argument = Map("newAmount" -> "abcxx").toJson, - Option.empty[HttpContractTypeId.RequiredPkg], - None, - ).toJson - }, - headers, - ) map (_._1.isSuccess() shouldBe true) - ) - - evtsWrapper @ ContractDelta( - Vector(_), - Vector(observeConsumed), - Some(lastSeenOffset), - ) <- - readOne - liveStartOffset = { - observeConsumed.contractId should ===(createdAccountEvent1.created.contractId) - inside(evtsWrapper) { case JsObject(obj) => - inside(obj get "events") { - case Some( - JsArray( - Vector( - Archived( - ContractIdField( - JsString(archivedContractId), - TemplateIdField(ContractTypeId(archivedTemplateId), _), - ), - _, - ), - Created( - CreatedAccount( - CreatedAccountContract(_, createdTemplateId, createdRecord) - ), - MatchedQueries(NumList(matchedQueries), _), - ), - ) - ) - ) => - archivedContractId should ===(createdAccountEvent1.created.contractId) - HttpContractTypeId.withPkgRef(archivedTemplateId) should ===( - TpId.Account.IAccount - ) - - HttpContractTypeId.withPkgRef(createdTemplateId) should ===( - TpId.Account.IAccount - ) - - createdRecord should ===(AccountRecord("abcxx", true, false)) - matchedQueries shouldBe Vector(0) - } - } - offset - } - _ = kill.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + lastSeenOffset).size - 1 - } yield ShouldHaveEnded( - liveStartOffset = liveStartOffset, - msgCount = 2 + hbCount, - lastSeenOffset = lastSeenOffset, - ) - ) - } - - for { - (alice, aliceAuthHeaders) <- fixture.getUniquePartyAndAuthHeaders("Alice") - jwt <- jwtForParties(uri)(List(alice), List()) - (kill, source) = singleClientQueryStream( - jwt = jwt, - serviceUri = uri, - query = s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Account.IAccount)}"]}]""", - ) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - ShouldHaveEnded(_, msgCount, _) <- source via parseResp runWith resp( - alice, - aliceAuthHeaders, - kill, - ) - } yield { - msgCount should ===(2) - } - } - } - - "warn on unknown template IDs from" should { - "query endpoint" in httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - _ <- initialIouCreate(uri, alice, headers) - - clientMsg <- jwtForParties(uri)(List(alice), List()) - .flatMap( - singleClientQueryStream( - _, - uri, - s"""{"templateIds": ["${templateIdWithPackageName( - TpId.Iou.Iou - )}", "UnknownPkg:Unknown:Template"]}""", - ) - .take(3) - .runWith(collectResultsAsTextMessage) - ) - } yield inside(clientMsg) { case warning +: result +: heartbeats => - warning should include("\"warnings\":{\"unknownTemplateIds\":[\"Unk") - result should include(s""""issuer":"$alice"""") - Inspectors.forAll(heartbeats)(assertHeartbeat) - } - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable when keys are supported in 3.x - "fetch endpoint" ignore httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - _ <- initialAccountCreate(fixture, alice, headers) - - clientMsg <- jwtForParties(uri)(List(alice), List()).flatMap( - singleClientFetchStream( - _, - uri, - s"""[{"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$alice", "abc123"]}, {"templateId": "UnknownPkg:Unknown:Template", "key": ["$alice", "abc123"]}]""", - ).take(3) - .runWith(collectResultsAsTextMessage) - ) - } yield inside(clientMsg) { case warning +: result +: heartbeats => - warning should include("""{"warnings":{"unknownTemplateIds":["Unk""") - result should include(s""""owner":"$alice"""") - result should include(""""number":"abc123"""") - Inspectors.forAll(heartbeats)(assertHeartbeat) - } - } - } - - "error msg when receiving malformed message," should { - "query endpoint" in httpTestFixture { fixture => - import fixture.uri - fixture - .jwt(uri) - .flatMap( - singleClientQueryStream(_, uri, "{}") - .runWith(collectResultsAsTextMessageSkipOffsetTicks) - ) - .map { result => - result should have size 1 - val errorResponse = decodeErrorResponse(result.head) - errorResponse.status shouldBe StatusCodes.BadRequest - errorResponse.errors should have size 1 - } - } - - "fetch endpoint" in httpTestFixture { fixture => - import fixture.uri - fixture - .jwt(uri) - .flatMap( - singleClientFetchStream(_, uri, """[abcdefg!]""") - .runWith(collectResultsAsTextMessageSkipOffsetTicks) - ) - .map { result => - result should have size 1 - val errorResponse = decodeErrorResponse(result.head) - errorResponse.status shouldBe StatusCodes.BadRequest - errorResponse.errors should have size 1 - } - } - } - - private def exercisePayload(cid: ContractId, amount: BigDecimal = BigDecimal("42.42")) = { - import JsonProtocol.* - ExerciseCommand( - EnrichedContractId(Some(TpId.Iou.Iou), cid): ContractLocator[JsValue], - Choice("Iou_Split"), - Map("splitAmount" -> amount).toJson, - Option.empty[HttpContractTypeId.RequiredPkg], - None, - ).toJson - } - - "matchedQueries should be correct for multiqueries with per-query offsets" in httpTestFixture { - fixture => - import fixture.uri - def resp( - iouCid: ContractId, - kill: UniqueKillSwitch, - ): Sink[JsValue, Future[Offset]] = { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume - .interpret( - for { - evtsWrapper @ ContractDelta(Vector((ctid, _)), Vector(), None) <- readOne - _ = { - (ctid: ContractId) shouldBe iouCid - inside(evtsWrapper) { case JsObject(obj) => - inside(obj get "events") { - case Some( - JsArray( - Vector( - Created(IouAmount(amt), MatchedQueries(NumList(ix), _)) - ) - ) - ) => - // matchedQuery should be 0 for the initial query supplied - Set((amt, ix)) should ===(Set((BigDecimal("999.99"), Vector(BigDecimal(0))))) - } - } - } - ContractDelta(Vector(), _, Some(offset)) <- readOne - - _ = kill.shutdown() - _ <- drain - - } yield offset - ) - } - - // initial query without offset - val query = s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]}]""" - - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (party, headers) = aliceHeaders - creation <- initialIouCreate(uri, party, headers) - iouCid = resultContractId(creation) - jwt <- jwtForParties(uri)(List(party), List()) - (kill, source) = singleClientQueryStream(jwt, uri, query) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - lastSeen <- source via parseResp runWith resp(iouCid, kill) - - // construct a new multiquery with one of them having an offset while the other doesn't - multiquery = s"""[ - {"templateIds": ["${templateIdWithPackageName( - TpId.Iou.Iou - )}"], "offset": "${lastSeen.unwrap}"}, - {"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]} - ]""" - - clientMsg <- singleClientQueryStream(jwt, uri, multiquery) - .take(1) - .runWith(collectResultsAsTextMessageSkipOffsetTicks) - } yield inside(clientMsg) { case Vector(result) => - // we should expect to have matchedQueries [1] to indicate a match for the new template query only. - result should include(s"""$iouCid""") - result should include(""""matchedQueries":[1]""") - } - } - - "deltas as contracts are archived/created from" should { - "single-party query" in httpTestFixture { fixture => - import fixture.uri - val getAliceHeaders = fixture.getUniquePartyAndAuthHeaders("Alice") - - def resp( - iouCid: ContractId, - kill: UniqueKillSwitch, - ): Sink[JsValue, Future[ShouldHaveEnded]] = { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume - .interpret( - for { - ContractDelta(Vector((ctid, _)), Vector(), None) <- readOne - _ = (ctid: ContractId) shouldBe iouCid - _ <- liftF( - getAliceHeaders.flatMap { case (_, headers) => - fixture.postJsonRequest( - Uri.Path("/v1/exercise"), - exercisePayload(ctid), - headers, - ) map { case (statusCode, _) => - statusCode.isSuccess shouldBe true - } - } - ) - - ContractDelta(Vector(), _, Some(offset)) <- readOne - - (preOffset, consumedCtid) = (offset, ctid) - evtsWrapper @ ContractDelta( - Vector((fstId, fst), (sndId, snd)), - Vector(observeConsumed), - Some(lastSeenOffset), - ) <- readOne - (liveStartOffset, msgCount) = { - observeConsumed.contractId should ===(consumedCtid) - Set(fstId, sndId, consumedCtid) should have size 3 - inside(evtsWrapper) { case JsObject(obj) => - inside(obj get "events") { - case Some( - JsArray( - Vector( - Archived(_, _), - Created(IouAmount(amt1), MatchedQueries(NumList(ixes1), _)), - Created(IouAmount(amt2), MatchedQueries(NumList(ixes2), _)), - ) - ) - ) => - Set((amt1, ixes1), (amt2, ixes2)) should ===( - Set( - (BigDecimal("42.42"), Vector(BigDecimal(0))), - (BigDecimal("957.57"), Vector(BigDecimal(0))), - ) - ) - } - } - (preOffset, 2) - } - - _ = kill.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + lastSeenOffset).size - 1 - } yield - // don't count empty events block if lastSeenOffset does not change - ShouldHaveEnded( - liveStartOffset = liveStartOffset, - msgCount = msgCount + hbCount, - lastSeenOffset = lastSeenOffset, - ) - ) - } - - val query = s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]}]""" - - for { - aliceHeaders <- getAliceHeaders - (party, headers) = aliceHeaders - creation <- initialIouCreate(uri, party, headers) - iouCid = resultContractId(creation) - jwt <- jwtForParties(uri)(List(party), List()) - (kill, source) = singleClientQueryStream(jwt, uri, query) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - lastState <- source via parseResp runWith resp(iouCid, kill) - liveOffset = inside(lastState) { case ShouldHaveEnded(liveStart, 2, lastSeen) => - lastSeen.unwrap should be > liveStart.unwrap - liveStart - } - rescan <- (singleClientQueryStream(jwt, uri, query, Some(liveOffset)) - via parseResp).take(1) runWith remainingDeltas - } yield inside(rescan) { - case (Vector((fstId, fst @ _), (sndId, snd @ _)), Vector(observeConsumed), Some(_)) => - Set(fstId, sndId, observeConsumed.contractId) should have size 3 - } - } - - "multi-party query" in httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceAuthHeaders) = aliceHeaders - bobHeaders <- fixture.getUniquePartyAndAuthHeaders("Bob") - (bob, bobAuthHeaders) = bobHeaders - f1 = - postCreateCommand( - accountCreateCommand(alice, "abc123"), - fixture, - headers = aliceAuthHeaders, - ) - f2 = - postCreateCommand( - accountCreateCommand(bob, "def456"), - fixture, - headers = bobAuthHeaders, - ) - - query = - s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Account.Account)}"]}]""" - resp = ( - cid1: ContractId, - cid2: ContractId, - kill: UniqueKillSwitch, - ) => { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume.interpret( - for { - Vector((account1, _), (account2, _)) <- readAcsN(2) - _ = Seq(account1, account2) should contain theSameElementsAs Seq(cid1, cid2) - ContractDelta(Vector(), _, Some(liveStartOffset)) <- readOne - _ <- liftF( - postCreateCommand( - accountCreateCommand(alice, "abc234"), - fixture, - headers = aliceAuthHeaders, - ) - ) - ContractDelta(Vector((_, aliceAccount)), _, Some(_)) <- readOne - _ = inside(aliceAccount) { case JsObject(obj) => - inside((obj get "owner", obj get "number")) { - case (Some(JsString(owner)), Some(JsString(number))) => - owner shouldBe alice.unwrap - number shouldBe "abc234" - } - } - _ <- liftF( - postCreateCommand( - accountCreateCommand(bob, "def567"), - fixture, - headers = bobAuthHeaders, - ) - ) - ContractDelta(Vector((_, bobAccount)), _, Some(lastSeenOffset)) <- readOne - _ = inside(bobAccount) { case JsObject(obj) => - inside((obj get "owner", obj get "number")) { - case (Some(JsString(owner)), Some(JsString(number))) => - owner shouldBe bob.unwrap - number shouldBe "def567" - } - } - _ = kill.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + lastSeenOffset).size - 1 - } yield ( - // don't count empty events block if lastSeenOffset does not change - ShouldHaveEnded( - liveStartOffset = liveStartOffset, - msgCount = 5 + hbCount, - lastSeenOffset = lastSeenOffset, - ), - ) - ) - } - - r1 <- f1 - cid1 = resultContractId(r1) - - r2 <- f2 - cid2 = resultContractId(r2) - - jwt <- jwtForParties(uri)(List(alice, bob), List()) - (kill, source) = singleClientQueryStream( - jwt, - uri, - query, - ).viaMat(KillSwitches.single)(Keep.right).preMaterialize() - lastState <- source via parseResp runWith resp(cid1, cid2, kill) - liveOffset = inside(lastState) { case ShouldHaveEnded(liveStart, 5, lastSeen) => - lastSeen.unwrap should be > liveStart.unwrap - liveStart - } - rescan <- jwtForParties(uri)(List(alice), List()).flatMap(jwt => - (singleClientQueryStream( - jwt, - uri, - query, - Some(liveOffset), - ) - via parseResp).take(1) runWith remainingDeltas - ) - } yield inside(rescan) { case (Vector(_), _, Some(_)) => - succeed - } - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable when keys are supported in 3.x - "fetch, filtering out phantom archives" ignore httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - templateId = TpId.Account.Account - fetchRequest = (contractIdAtOffset: Option[Option[ContractId]]) => { - import JsonProtocol.* - List( - Map( - "templateId" -> s"${templateIdWithPackageName(TpId.Account.Account)}".toJson, - "key" -> List(alice.unwrap, "abc123").toJson, - ) - ++ contractIdAtOffset - .map(ocid => contractIdAtOffsetKey -> ocid.toJson) - .toList - ).toJson.compactPrint - } - f1 = - postCreateCommand( - accountCreateCommand(alice, "abc123"), - fixture, - headers, - ) - f2 = - postCreateCommand( - accountCreateCommand(alice, "def456"), - fixture, - headers, - ) - - resp = ( - cid1: ContractId, - cid2: ContractId, - kill: UniqueKillSwitch, - ) => { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume.interpret( - for { - ContractDelta(Vector((cid, c)), Vector(), None) <- readOne - _ = (cid: ContractId) shouldBe cid1 - ctid <- liftF(postArchiveCommand(templateId, cid2, fixture, headers).flatMap { - case (statusCode, _) => - statusCode.isSuccess shouldBe true - postArchiveCommand(templateId, cid1, fixture, headers).map { - case (statusCode, _) => - statusCode.isSuccess shouldBe true - cid - } - }) - - ContractDelta(Vector(), _, Some(offset)) <- readOne - (off, archivedCid) = (offset, ctid) - - ContractDelta(Vector(), Vector(observeArchivedCid), Some(lastSeenOffset)) <- readOne - (liveStartOffset, msgCount) = { - observeArchivedCid.contractId shouldBe archivedCid - observeArchivedCid.contractId shouldBe cid1 - (off, 0) - } - - _ = kill.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + lastSeenOffset).size - 1 - - } yield - // don't count empty events block if lastSeenOffset does not change - ShouldHaveEnded( - liveStartOffset = liveStartOffset, - msgCount = msgCount + hbCount, - lastSeenOffset = lastSeenOffset, - ) - ) - } - r1 <- f1 - cid1 = resultContractId(r1) - - r2 <- f2 - cid2 = resultContractId(r2) - jwt <- jwtForParties(uri)(List(alice), List()) - (kill, source) = singleClientFetchStream(jwt, uri, fetchRequest(None)) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - - lastState <- source - .via(parseResp) runWith resp(cid1, cid2, kill) - - liveOffset = inside(lastState) { case ShouldHaveEnded(liveStart, 0, lastSeen) => - lastSeen.unwrap should be > liveStart.unwrap - liveStart - } - - // check contractIdAtOffsets' effects on phantom filtering - resumes <- Future.traverse(Seq((None, 2L), (Some(None), 0L), (Some(Some(cid1)), 1L))) { - case (abcHint, expectArchives) => - (singleClientFetchStream( - jwt, - uri, - fetchRequest(abcHint), - Some(liveOffset), - ) - via parseResp) - .take(2) - .runWith(remainingDeltas) - .map { case (creates, archives, _) => - creates shouldBe empty - archives should have size expectArchives - } - } - - } yield resumes.foldLeft(1 shouldBe 1)((_, a) => a) - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable when keys are supported in 3.x - "multi-party fetch-by-key" ignore httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceAuthHeaders) = aliceHeaders - bobHeaders <- fixture.getUniquePartyAndAuthHeaders("Bob") - (bob, bobAuthHeaders) = bobHeaders - templateId = TpId.Account.Account - - f1 = - postCreateCommand( - accountCreateCommand(alice, "abc123"), - fixture, - headers = aliceAuthHeaders, - ) - f2 = - postCreateCommand( - accountCreateCommand(bob, "def456"), - fixture, - headers = bobAuthHeaders, - ) - - query = - s"""[ - {"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$alice", "abc123"]}, - {"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$bob", "def456"]} - ]""" - - resp = ( - cid1: ContractId, - cid2: ContractId, - kill: UniqueKillSwitch, - ) => { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume.interpret( - for { - Vector((account1, _), (account2, _)) <- readAcsN(2) - _ = Seq(account1, account2) should contain theSameElementsAs Seq(cid1, cid2) - ContractDelta(Vector(), _, Some(liveStartOffset)) <- readOne - _ <- liftF(postArchiveCommand(templateId, cid1, fixture, aliceAuthHeaders)) - ContractDelta(Vector(), Vector(archivedCid1), Some(_)) <- readOne - _ = archivedCid1.contractId shouldBe cid1 - _ <- liftF( - postArchiveCommand( - templateId, - cid2, - fixture, - headers = bobAuthHeaders, - ) - ) - ContractDelta(Vector(), Vector(archivedCid2), Some(lastSeenOffset)) <- readOne - _ = archivedCid2.contractId shouldBe cid2 - _ = kill.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + lastSeenOffset).size - 1 - } yield ( - // don't count empty events block if lastSeenOffset does not change - ShouldHaveEnded( - liveStartOffset = liveStartOffset, - msgCount = 5 + hbCount, - lastSeenOffset = lastSeenOffset, - ), - ) - ) - } - r1 <- f1 - cid1 = resultContractId(r1) - - r2 <- f2 - cid2 = resultContractId(r2) - - jwt <- jwtForParties(uri)(List(alice, bob), List()) - (kill, source) = singleClientFetchStream( - jwt, - uri, - query, - ).viaMat(KillSwitches.single)(Keep.right).preMaterialize() - lastState <- source via parseResp runWith resp(cid1, cid2, kill) - liveOffset = inside(lastState) { case ShouldHaveEnded(liveStart, 5, lastSeen) => - lastSeen.unwrap should be > liveStart.unwrap - liveStart - } - rescan <- (singleClientFetchStream( - jwt, - uri, - query, - Some(liveOffset), - ) - via parseResp).take(2) runWith remainingDeltas - } yield inside(rescan) { case (Vector(), Vector(_, _), Some(_)) => - succeed - } - } - } - - // TODO(https://github.com/DACH-NY/canton/issues/16065): re-enable when keys are supported in 3.x - "fetch multiple keys should work" ignore httpTestFixture { fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - jwt <- jwtForParties(uri)(List(alice), List()) - create = (account: String) => - for { - r <- postCreateCommand( - accountCreateCommand(alice, account), - fixture, - headers, - ) - } yield resultContractId(r) - archive = (id: ContractId) => - for { - r <- postArchiveCommand( - TpId.Account.Account, - id, - fixture, - headers, - ) - } yield { - assert(r._1.isSuccess) - } - resp = (kill: UniqueKillSwitch) => { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume.interpret( - for { - ContractDelta(Vector(), Vector(), Some(liveStartOffset)) <- readOne - cid1 <- liftF(create("abc123")) - ContractDelta(Vector((cid, _)), Vector(), Some(_)) <- readOne - _ = cid shouldBe cid1 - _ <- liftF(create("abc124")) - _ <- liftF(create("abc125")) - cid2 <- liftF(create("def456")) - ContractDelta(Vector((cid, _)), Vector(), Some(_)) <- readOne - _ = cid shouldBe cid2 - _ <- liftF(archive(cid2)) - ContractDelta(Vector(), Vector(cid), Some(_)) <- readOne - _ = cid.contractId shouldBe cid2 - _ <- liftF(archive(cid1)) - ContractDelta(Vector(), Vector(cid), Some(_)) <- readOne - _ = cid.contractId shouldBe cid1 - _ = kill.shutdown() - heartbeats <- drain - _ = heartbeats.foreach { d => - inside(d) { case ContractDelta(Vector(), Vector(), Some(_)) => - succeed - } - } - } yield succeed - ) - } - req = - s""" - |[{"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$alice", "abc123"]}, - | {"templateId": "${templateIdWithPackageName( - TpId.Account.Account - )}", "key": ["$alice", "def456"]}] - |""".stripMargin - (kill, source) = singleClientFetchStream(jwt, uri, req) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - - res <- source.via(parseResp).runWith(resp(kill)) - } yield res - } - - /** Consume ACS blocks expecting `createCount` contracts. Fail if there are too many contracts. - */ - private[this] def readAcsN( - createCount: Int - ): Consume.FCC[JsValue, Vector[(ContractId, JsValue)]] = { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - def go(createCount: Int): Consume.FCC[JsValue, Vector[(ContractId, JsValue)]] = - if (createCount <= 0) point(Vector.empty) - else - for { - ContractDelta(creates, Vector(), None) <- readOne - found = creates.size - if found <= createCount - tail <- if (found < createCount) go(createCount - found) else point(Vector.empty) - } yield creates ++ tail - go(createCount) - } - - /** Updates the ACS retrieved with [[readAcsN]] with the given number of events The caller is in - * charge of reading the live marker if that is expected - */ - private[this] def updateAcs( - acs: Map[ContractId, JsValue], - events: Int, - ): Consume.FCC[JsValue, Map[ContractId, JsValue]] = { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - def go( - acs: Map[ContractId, JsValue], - missingEvents: Int, - ): Consume.FCC[JsValue, Map[ContractId, JsValue]] = - if (missingEvents <= 0) { - point(acs) - } else { - for { - ContractDelta(creates, archives, _) <- readOne - newAcs = acs ++ creates -- archives.map(_.contractId) - events = creates.size + archives.size - next <- go(newAcs, missingEvents - events) - } yield next - } - go(acs, events) - } - - "fetch should should return an error if empty list of (templateId, key) pairs is passed" in httpTestFixture { - fixture => - import fixture.uri - fixture - .jwt(uri) - .flatMap( - singleClientFetchStream(_, uri, "[]") - .runWith(collectResultsAsTextMessageSkipOffsetTicks) - ) - .map { clientMsgs => - inside(clientMsgs) { case Seq(errorMsg) => - val errorResponse = decodeErrorResponse(errorMsg) - errorResponse.status shouldBe StatusCodes.BadRequest - inside(errorResponse.errors) { case List(error) => - error should include("must be a JSON array with at least 1 element") - } - } - }: Future[Assertion] - } - - // Following #16782, we use canton community edition over sandbox-on-x. - "fail reading from a pruned offset" ignore httpTestFixture { fixture => - import fixture.{uri, client} - for { - aliceH <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, aliceHeaders) = aliceH - offsets <- offsetBeforeAfterArchival(alice, fixture, aliceHeaders) - (offsetBeforeArchive, offsetAfterArchive) = offsets - - pruned <- PruneGrpc.ParticipantPruningServiceGrpc - .stub(client.channel) - .prune( - PruneGrpc.PruneRequest( - pruneUpTo = Offset tryToLong offsetAfterArchive, - submissionId = "", - pruneAllDivulgedContracts = true, - ) - ) - _ = pruned should ===(PruneGrpc.PruneResponse()) - - // now query again with a pruned offset - jwt <- jwtForParties(uri)(List(alice), List()) - query = s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]}]""" - streamError <- singleClientQueryStream(jwt, uri, query, Some(offsetBeforeArchive)) - .runWith(Sink.seq) - .failed - } yield inside(streamError) { case t: PeerClosedConnectionException => - // TODO(i13379) descriptive/structured error. The logs when running this - // test include - // Websocket handler failed with FAILED_PRECONDITION: PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): - // Transactions request from 6 to 8 - // precedes pruned offset 7 - // but this doesn't propagate to the client - t.closeCode should ===(1011) // see RFC 6455 - t.closeReason should ===("internal error") - } - } - - import AbstractHttpServiceIntegrationTestFuns.{UriFixture, EncoderFixture} - - private[this] def offsetBeforeAfterArchival( - party: Party, - fixture: UriFixture with EncoderFixture, - headers: List[HttpHeader], - ): Future[(Offset, Offset)] = { - import fixture.uri - type In = JsValue // JsValue might not be the most convenient choice - val syntax = Consume.syntax[In] - import syntax.* - - def offsetAfterCreate(): Consume.FCC[In, (ContractId, Offset)] = for { - // make a contract - create <- liftF( - postCreateCommand( - iouCreateCommand(party), - fixture, - headers, - ) - ) - cid = resultContractId(create) - // wait for the creation's offset - offsetAfter <- readUntil[In] { - case ContractDelta(creates, _, off @ Some(_)) => - if (creates.exists(_._1 == cid)) off else None - case _ => None - } - } yield (cid, offsetAfter) - - def readMidwayOffset(kill: UniqueKillSwitch) = for { - // wait for the ACS - _ <- readUntil[In] { - case ContractDelta(_, _, offset) => offset - case _ => None - } - // make a contract and fetch the offset after it - (cid, betweenOffset) <- offsetAfterCreate() - // archive it - archive <- liftF(postArchiveCommand(TpId.Iou.Iou, cid, fixture, headers)) - _ = archive._1 should ===(StatusCodes.OK) - // wait for the archival offset - afterOffset <- readUntil[In] { - case ContractDelta(_, archived, offset) => - if (archived.exists(_.contractId == cid)) offset else None - case _ => None - } - // if you try to prune afterOffset, pruning fails with - // OFFSET_OUT_OF_RANGE(9,db14ee96): prune_up_to needs to be before ledger end 0000000000000007 - // create another dummy contract and ignore it - _ <- offsetAfterCreate() - _ = kill.shutdown() - } yield (betweenOffset, afterOffset) - - val query = s"""[{"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]}]""" - for { - jwt <- jwtForParties(uri)(List(party), List()) - (kill, source) = - singleClientQueryStream(jwt, uri, query) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - offsets <- source.via(parseResp).runWith(Consume.interpret(readMidwayOffset(kill))) - } yield offsets - } - - import AbstractHttpServiceIntegrationTestFuns.UriFixture - - "query on a bunch of random splits should yield consistent results" in httpTestFixture { - fixture => - import fixture.uri - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - splitSample = SplitSeq.gen.map(_ map (BigDecimal(_))).sample.value - query = - s"""[ - {"templateIds": ["${templateIdWithPackageName(TpId.Iou.Iou)}"]} - ]""" - jwt <- jwtForParties(uri)(List(alice), List()) - (kill, source) = - singleClientQueryStream(jwt, uri, query) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - res <- source - .via(parseResp) - .map(iouSplitResult) - .filterNot(_ == \/-((Vector(), Vector()))) // liveness marker/heartbeat - .runWith( - Consume.interpret(trialSplitSeq(fixture, splitSample, kill, alice, headers)) - ) - } yield res - } - - private def trialSplitSeq( - fixture: UriFixture, - ss: SplitSeq[BigDecimal], - kill: UniqueKillSwitch, - partyName: Party, - headers: List[HttpHeader], - ): Consume.FCC[IouSplitResult, Assertion] = { - val dslSyntax = Consume.syntax[IouSplitResult] - import SplitSeq.* - import dslSyntax.* - def go( - createdCid: ContractId, - ss: SplitSeq[BigDecimal], - ): Consume.FCC[IouSplitResult, Assertion] = ss match { - case Leaf(_) => - point(1 shouldBe 1) - case Node(_, l, r) => - for { - (StatusCodes.OK, _) <- liftF( - fixture.postJsonRequest( - Uri.Path("/v1/exercise"), - exercisePayload(createdCid, l.x), - headers, - ) - ) - - \/-((Vector((cid1, amt1), (cid2, amt2)), Vector(archival))) <- readOne - (lCid, rCid) = { - archival should ===(createdCid) - Set(amt1, amt2) should ===(Set(l.x, r.x)) - if (amt1 == l.x) (cid1, cid2) else (cid2, cid1) - } - - _ <- go(lCid, l) - last <- go(rCid, r) - } yield last - } - - val initialPayload = { - import JsonProtocol.* - CreateCommand( - TpId.Iou.Iou, - Map( - "observers" -> List[String]().toJson, - "issuer" -> partyName.toJson, - "amount" -> ss.x.toJson, - "currency" -> "USD".toJson, - "owner" -> partyName.toJson, - ).toJson, - meta = None, - ).toJson - } - for { - (StatusCodes.OK, _) <- liftF( - fixture.postJsonRequest( - Uri.Path("/v1/create"), - initialPayload, - headers, - ) - ) - \/-((Vector((genesisCid, amt)), Vector())) <- readOne - _ = amt should ===(ss.x) - last <- go(genesisCid, ss) - _ = kill.shutdown() - } yield last - } - - private def iouSplitResult(jsv: JsValue): IouSplitResult = jsv match { - case ContractDelta(creates, archives, _) => - creates traverse { - case (cid, JsObject(fields)) => - fields get "amount" collect { case JsString(amt) => - (cid, BigDecimal(amt)) - } - case _ => None - } map ((_, archives map (_.contractId))) toRightDisjunction jsv - case _ => -\/(jsv) - } - - "no duplicates should be returned when retrieving contracts for multiple parties" in httpTestFixture { - fixture => - import fixture.uri - - def test( - expectedContractId: String, - expectedParties: Vector[Party], - killSwitch: UniqueKillSwitch, - ): Sink[JsValue, Future[ShouldHaveEnded]] = { - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - Consume.interpret( - for { - Vector((sharedAccountId, sharedAccount)) <- readAcsN(1) - _ = sharedAccountId shouldBe expectedContractId - ContractDelta(Vector(), _, Some(offset)) <- readOne - _ = inside(sharedAccount) { case JsObject(obj) => - import JsonProtocol.* - inside( - (obj get "owners" map (SprayJson.decode[Vector[Party]](_)), obj get "number") - ) { case (Some(\/-(owners)), Some(JsString(number))) => - owners should contain theSameElementsAs expectedParties - number shouldBe "4444" - } - } - ContractDelta(Vector(), _, Some(_)) <- readOne - _ = killSwitch.shutdown() - heartbeats <- drain - hbCount = (heartbeats.iterator.map(heartbeatOffset).toSet + offset).size - 1 - } yield - // don't count empty events block if lastSeenOffset does not change - ShouldHaveEnded( - liveStartOffset = offset, - msgCount = 2 + hbCount, - lastSeenOffset = offset, - ) - ) - } - - for { - aliceAndBob @ List(alice, bob) <- List("Alice", "Bob").traverse { p => - fixture.getUniquePartyAndAuthHeaders(p).map(_._1) - } - jwtForAliceAndBob <- - jwtForParties(uri)(actAs = aliceAndBob, readAs = Nil) - createResponse <- - fixture - .headersWithPartyAuth(aliceAndBob) - .flatMap(headers => - postCreateCommand( - cmd = sharedAccountCreateCommand(owners = aliceAndBob, "4444"), - fixture = fixture, - headers = headers, - ) - ) - expectedContractId = resultContractId(createResponse) - (killSwitch, source) = singleClientQueryStream( - jwt = jwtForAliceAndBob, - serviceUri = uri, - query = - s"""{"templateIds": ["${templateIdWithPackageName(TpId.Account.SharedAccount)}"]}""", - ) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - result <- source via parseResp runWith test( - expectedContractId.unwrap, - Vector(alice, bob), - killSwitch, - ) - } yield inside(result) { case ShouldHaveEnded(_, 2, _) => - succeed - } - } - - "Per-query offsets should work as expected" in httpTestFixture { fixture => - import fixture.uri - val dslSyntax = Consume.syntax[JsValue] - import dslSyntax.* - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (alice, headers) = aliceHeaders - jwt <- jwtForParties(uri)(List(alice), List()) - iouTemplate = templateIdWithPackageName(TpId.Iou.Iou) - dummyTemplate = templateIdWithPackageName(TpId.Test.Dummy) - createIouCommand = s"""{ - | "templateId": "$iouTemplate", - | "payload": { - | "observers": [], - | "issuer": "$alice", - | "amount": "999.99", - | "currency": "USD", - | "owner": "$alice" - | } - |}""".stripMargin - createDummyContractCommand = s"""{ - | "templateId": "$dummyTemplate", - | "payload": { "operator": "$alice" } - |}""".stripMargin - create = (command: String) => - postJsonStringRequest( - uri.withPath(Uri.Path("/v1/create")), - command, - headers, - ) - .map(_._1 shouldBe a[StatusCodes.Success]) - contractsQuery = (templateQualifiedName: String) => - s"""{"templateIds":["$templateQualifiedName"]}""" - contractsQueryWithOffset = (offset: Offset, templateQualifiedName: String) => - s"""{"templateIds":["$templateQualifiedName"], "offset":"${offset.unwrap}"}""" - contracts = (templateQualifiedName: String, offset: Option[Offset]) => - offset.fold(contractsQuery(templateQualifiedName))( - contractsQueryWithOffset(_, templateQualifiedName) - ) - acsEnd = (expectedContracts: Int) => { - def go(killSwitch: UniqueKillSwitch): Sink[JsValue, Future[Offset]] = - Consume.interpret( - for { - _ <- readAcsN(expectedContracts) - ContractDelta(Vector(), Vector(), Some(offset)) <- readOne - _ = killSwitch.shutdown() - _ <- drain - } yield offset - ) - val (killSwitch, source) = - singleClientQueryStream( - jwt, - uri, - s"""{"templateIds":["$dummyTemplate", "$iouTemplate"]}""", - ) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - source.via(parseResp).runWith(go(killSwitch)) - } - test = ( - clue: String, - expectedAcsSize: Int, - expectedEvents: Int, - queryFrom: Option[Offset], - iousFrom: Option[Offset], - dummysFrom: Option[Offset], - expected: Map[String, Int], - ) => { - def go(killSwitch: UniqueKillSwitch): Sink[JsValue, Future[Assertion]] = - Consume.interpret( - for { - acs <- readAcsN(expectedAcsSize) - _ <- if (acs.nonEmpty) readOne else point(()) - contracts <- updateAcs(Map.from(acs), expectedEvents) - result = contracts.toSeq - .map { - case (_, payload) if payload.asJsObject.fields.contains("currency") => - iouTemplate -> payload - case (_, payload) => dummyTemplate -> payload - } - .groupMap(_._1)(_._2) - .map { case (k, vs) => k -> vs.size } - _ = killSwitch.shutdown() - } yield withClue(clue) { - result shouldEqual expected - } - ) - val (killSwitch, source) = - singleClientQueryStream( - jwt, - uri, - Seq(contracts(iouTemplate, iousFrom), contracts(dummyTemplate, dummysFrom)) - .mkString("[", ",", "]"), - queryFrom, - ) - .viaMat(KillSwitches.single)(Keep.right) - .preMaterialize() - source.via(parseResp).runWith(go(killSwitch)) - } - _ <- create(createIouCommand) - _ <- create(createDummyContractCommand) - offset1 <- acsEnd(2) - _ <- create(createIouCommand) - _ <- create(createDummyContractCommand) - offset2 <- acsEnd(4) - _ <- create(createIouCommand) - _ <- create(createDummyContractCommand) - _ <- test( - "No offsets", - 6, - 0, - None, - None, - None, - Map(iouTemplate -> 3, dummyTemplate -> 3), - ) - _ <- test( - "Offset message only", - 0, - 2, - Some(offset2), - None, - None, - Map(iouTemplate -> 1, dummyTemplate -> 1), - ) - _ <- test( - "Per-query offsets only", - 0, - 3, - None, - Some(offset1), - Some(offset2), - Map(iouTemplate -> 2, dummyTemplate -> 1), - ) - _ <- test( - "Absent per-query offset is overridden by offset message", - 0, - 3, - Some(offset2), - None, - Some(offset1), - Map(iouTemplate -> 1, dummyTemplate -> 2), - ) - _ <- test( - "Offset message does not override per-query offsets", - 0, - 4, - Some(offset2), - Some(offset1), - Some(offset1), - Map(iouTemplate -> 2, dummyTemplate -> 2), - ) - _ <- test( - "Per-query offset with ACS query", - 3, - 1, - None, - None, - Some(offset2), - Map(iouTemplate -> 3, dummyTemplate -> 1), - ) - } yield succeed - } - - "ContractKeyStreamRequest" when { - import JsonProtocol.* - val baseVal = - EnrichedContractKey( - HttpContractTypeId.Template(PackageRef.assertFromString("ab"), "cd", "ef"), - JsString("42"): JsValue, - ) - val baseMap = baseVal.toJson.asJsObject.fields - val withSome = JsObject(baseMap + (contractIdAtOffsetKey -> JsString("hi"))) - val withNone = JsObject(baseMap + (contractIdAtOffsetKey -> JsNull)) - - "initial JSON reader" should { - type T = ContractKeyStreamRequest[Unit, JsValue] - - "shares EnrichedContractKey format" in { _ => - JsObject(baseMap).convertTo[T] should ===(ContractKeyStreamRequest((), baseVal)) - } - - "errors on contractIdAtOffset presence" in { _ => - a[DeserializationException] shouldBe thrownBy { - withSome.convertTo[T] - } - a[DeserializationException] shouldBe thrownBy { - withNone.convertTo[T] - } - } - } - - "resuming JSON reader" should { - type T = ContractKeyStreamRequest[Option[Option[ContractId]], JsValue] - - "shares EnrichedContractKey format" in { _ => - JsObject(baseMap).convertTo[T] should ===(ContractKeyStreamRequest(None, baseVal)) - } - - "distinguishes null and string" in { _ => - withSome.convertTo[T] should ===(ContractKeyStreamRequest(Some(Some("hi")), baseVal)) - withNone.convertTo[T] should ===(ContractKeyStreamRequest(Some(None), baseVal)) - } - } - } - - private def wsConnectRequest[M]( - uri: Uri, - subprotocol: Option[String], - input: Source[Message, NotUsed], - ) = - Http().singleWebSocketRequest( - request = WebSocketRequest(uri = uri, subprotocol = subprotocol), - clientFlow = dummyFlow(input), - ) - - private def assertHeartbeat(str: String): Assertion = - inside( - SprayJson - .decode[EventsBlock](str) - ) { case \/-(eventsBlock) => - eventsBlock.events shouldBe Vector.empty[JsValue] - inside(eventsBlock.offset) { - case Some(JsString(offset)) => - offset.length should be > 0 - case Some(JsNull) => - Succeeded - } - } - - private def decodeErrorResponse(str: String): ErrorResponse = { - import JsonProtocol.* - inside(SprayJson.decode[ErrorResponse](str)) { case \/-(e) => - e - } - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceOffsetTickIntTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceOffsetTickIntTest.scala deleted file mode 100644 index 381ad0fc1c..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketServiceOffsetTickIntTest.scala +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.http.{Offset, WebsocketConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import scalaz.\/- - -import scala.concurrent.duration.* - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class WebsocketServiceOffsetTickIntTest - extends AbstractHttpServiceIntegrationTestFuns - with AbstractHttpServiceIntegrationTestFunsUserToken { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - // make sure websocket heartbeats non-stop, DO NOT CHANGE `0.second` - override def wsConfig: Option[WebsocketConfig] = - Some(WebsocketConfig(heartbeatPeriod = 0.second)) - - import WebsocketTestFixture.* - - "JSON API websocket endpoints" should { - "emit only offset ticks, given empty ACS" in httpTestFixture { fixture => - import fixture.uri - for { - jwt <- fixture.jwt(uri) - msgs <- - suppressPackageIdWarning { - singleClientQueryStream(jwt, uri, s"""{"templateIds": ["${TpId.Iou.Iou.fqn}"]}""") - .take(10) - .runWith(collectResultsAsTextMessage) - } - } yield { - inside(eventsBlockVector(msgs.toVector)) { case \/-(offsetTicks) => - offsetTicks.forall(isOffsetTick) shouldBe true - offsetTicks should have length 10 - } - } - } - - "emit ACS block and after it only absolute offset ticks, given non-empty ACS" in withHttpService() { - fixture => - import fixture.uri - - for { - aliceHeaders <- fixture.getUniquePartyAndAuthHeaders("Alice") - (party, headers) = aliceHeaders - _ <- initialIouCreate(uri, party, headers) - jwt <- jwtForParties(uri)(List(party), List()) - msgs <- - suppressPackageIdWarning { - singleClientQueryStream(jwt, uri, s"""{"templateIds": ["${TpId.Iou.Iou.fqn}"]}""") - .take(10) - .runWith(collectResultsAsTextMessage) - } - } yield { - inside(eventsBlockVector(msgs.toVector)) { case \/-(acs +: offsetTicks) => - isAcs(acs) shouldBe true - acs.events should have length 1 - offsetTicks.forall(isAbsoluteOffsetTick) shouldBe true - offsetTicks should have length 9 - } - } - } - - "immediately start emitting ticks, given an offset to resume at" in httpTestFixture { fixture => - import fixture.{uri, client} - for { - ledgerOffset <- client.stateService - .getLedgerEndOffset() - .map(Offset(_)) - jwt <- fixture.jwt(uri) - msgs <- - suppressPackageIdWarning { - singleClientQueryStream( - jwt, - uri, - s"""{"templateIds": ["${TpId.Iou.Iou.fqn}"]}""", - offset = Some(ledgerOffset), - ) - .take(10) - .runWith(collectResultsAsTextMessage) - } - } yield { - inside(eventsBlockVector(msgs.toVector)) { case \/-(offsetTicks) => - offsetTicks.forall(isAbsoluteOffsetTick) shouldBe true - offsetTicks should have length 10 - } - } - } - - "immediately start emitting ticks, given an offset to resume at inside the query" in httpTestFixture { - fixture => - import fixture.{uri, client} - for { - ledgerOffset <- client.stateService - .getLedgerEndOffset() - .map(Offset(_)) - jwt <- fixture.jwt(uri) - msgs <- - suppressPackageIdWarning { - singleClientQueryStream( - jwt, - uri, - s"""[{"templateIds": ["${TpId.Iou.Iou.fqn}"], "offset": "$ledgerOffset"}]""", - ) - .take(10) - .runWith(collectResultsAsTextMessage) - } - } yield { - inside(eventsBlockVector(msgs.toVector)) { case \/-(offsetTicks) => - offsetTicks.forall(isAbsoluteOffsetTick) shouldBe true - offsetTicks should have length 10 - } - } - } - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketTestFixture.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketTestFixture.scala deleted file mode 100644 index 3f8a50dff9..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/jsonapi/WebsocketTestFixture.scala +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.jsonapi - -import com.daml.jwt.Jwt -import com.digitalasset.canton.http -import com.digitalasset.canton.http.json.JsonProtocol.* -import com.digitalasset.canton.http.json.SprayJson -import com.digitalasset.canton.http.json.v1.WebsocketEndpoints.{tokenPrefix, wsProtocol} -import com.digitalasset.daml.lf.data.Ref -import com.typesafe.scalalogging.StrictLogging -import org.apache.pekko.NotUsed -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.http.scaladsl.Http -import org.apache.pekko.http.scaladsl.model.Uri -import org.apache.pekko.http.scaladsl.model.ws.{ - BinaryMessage, - Message, - TextMessage, - WebSocketRequest, -} -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Flow, Keep, Sink, Source} -import org.scalacheck.Gen -import org.scalatest.Assertions -import org.scalatest.matchers.{MatchResult, Matcher} -import scalaz.\/ -import scalaz.std.option.* -import scalaz.std.vector.* -import scalaz.syntax.std.option.* -import scalaz.syntax.tag.* -import scalaz.syntax.traverse.* -import spray.json.{ - DefaultJsonProtocol, - JsArray, - JsBoolean, - JsNull, - JsNumber, - JsObject, - JsString, - JsValue, - RootJsonReader, - enrichAny as `sj enrichAny`, - enrichString as `sj enrichString`, -} - -import scala.concurrent.duration.* -import scala.concurrent.{ExecutionContext, Future} - -private[jsonapi] object WebsocketTestFixture extends StrictLogging with Assertions { - - def validSubprotocol(jwt: Jwt) = Option(s"""$tokenPrefix${jwt.value},$wsProtocol""") - - def dummyFlow[A](source: Source[A, NotUsed]): Flow[A, A, NotUsed] = - Flow.fromSinkAndSource(Sink.foreach(println), source) - - val contractIdAtOffsetKey = "contractIdAtOffset" - - private[jsonapi] final case class SimpleScenario( - id: String, - path: Uri.Path, - input: Source[Message, NotUsed], - ) { - def mapInput(f: Source[Message, NotUsed] => Source[Message, NotUsed]): SimpleScenario = - copy(input = f(input)) - } - - private[jsonapi] final case class ShouldHaveEnded( - liveStartOffset: http.Offset, - msgCount: Int, - lastSeenOffset: http.Offset, - ) - - private[jsonapi] object ContractDelta { - private val tagKeys = Set("created", "archived", "error") - type T = - (Vector[(http.ContractId, JsValue)], Vector[http.ArchivedContract], Option[http.Offset]) - - def unapply( - jsv: JsValue - ): Option[T] = - for { - JsObject(eventsWrapper) <- Some(jsv) - JsArray(sums) <- eventsWrapper.get("events") - pairs = sums collect { case JsObject(fields) => fields.view.filterKeys(tagKeys).toMap.head } - if pairs.sizeCompare(sums) == 0 - sets = pairs groupBy (_._1) - creates = sets.getOrElse("created", Vector()) collect { case (_, fields) => - fields - } - - createPairs = creates map { add => - import com.digitalasset.canton.http.json.JsonProtocol.ActiveContractFormat - val ac = - add.convertTo[http.ActiveContract[http.ContractTypeId.ResolvedPkgId, JsValue]] - (ac.contractId, ac.payload) - }: Vector[(http.ContractId, JsValue)] - - archives = sets.getOrElse("archived", Vector()) collect { case (_, adata) => - import com.digitalasset.canton.http.json.JsonProtocol.ArchivedContractFormat - adata.convertTo[http.ArchivedContract] - }: Vector[http.ArchivedContract] - - offset = eventsWrapper - .get("offset") - .collect { case JsString(str) => http.Offset(str) }: Option[http.Offset] - - } yield (createPairs, archives, offset) - } - - private[jsonapi] object IouAmount { - def unapply(jsv: JsObject): Option[BigDecimal] = - for { - JsObject(payload) <- jsv.fields get "payload" - JsString(amount) <- payload get "amount" - } yield BigDecimal(amount) - } - - private[jsonapi] object NumList { - def unapply(jsv: JsValue): Option[Vector[BigDecimal]] = - for { - JsArray(numvs) <- Some(jsv) - nums = numvs collect { case JsNumber(n) => n } - if numvs.sizeCompare(nums) == 0 - } yield nums - } - - private[jsonapi] final case class AccountRecord( - amount: String, - isAbcPrefix: Boolean, - is123Suffix: Boolean, - ) - private[jsonapi] final case class CreatedAccountEvent( - created: CreatedAccountContract, - matchedQueries: Vector[Int], - ) - private[jsonapi] final case class CreatedAccountContract( - contractId: http.ContractId, - templateId: http.ContractTypeId.Unknown[Ref.PackageId], - record: AccountRecord, - ) - - private[jsonapi] object ContractTypeId { - def unapply(jsv: JsValue): Option[http.ContractTypeId.Unknown[Ref.PackageId]] = for { - JsString(templateIdStr) <- Some(jsv) - templateId <- Ref.Identifier.fromString(templateIdStr).toOption - } yield http.ContractTypeId.Unknown( - templateId.packageId, - templateId.qualifiedName.module.dottedName, - templateId.qualifiedName.name.dottedName, - ) - } - - private[jsonapi] object CreatedAccount { - def unapply(jsv: JsValue): Option[CreatedAccountContract] = - for { - JsObject(created) <- Some(jsv) - JsString(contractId) <- created.get("contractId") - ContractTypeId(templateId) <- created.get("templateId") - JsObject(payload) <- created.get("payload") - JsString(amount) <- payload.get("amount") - JsBoolean(isAbcPrefix) <- payload get "isAbcPrefix" - JsBoolean(is123Suffix) <- payload get "is123Suffix" - } yield CreatedAccountContract( - http.ContractId(contractId), - templateId, - AccountRecord(amount, isAbcPrefix, is123Suffix), - ) - } - - private[jsonapi] object AccountQuery { - def unapply(jsv: JsValue): Option[CreatedAccountEvent] = - for { - JsObject(eventsWrapper) <- Some(jsv) - JsArray(events) <- eventsWrapper.get("events") - Created( - CreatedAccount(createdAccountContract), - MatchedQueries(NumList(matchedQueries), _), - ) <- events.headOption - } yield CreatedAccountEvent( - createdAccountContract, - matchedQueries.map(_.toInt), - ) - } - - private[jsonapi] abstract class JsoField(label: String) { - def unapply(jsv: JsObject): Option[(JsValue, JsObject)] = - jsv.fields get label map ((_, JsObject(jsv.fields - label))) - } - - private[jsonapi] object Created extends JsoField("created") - private[jsonapi] object Archived extends JsoField("archived") - private[jsonapi] object MatchedQueries extends JsoField("matchedQueries") - private[jsonapi] object ContractIdField extends JsoField("contractId") - private[jsonapi] object TemplateIdField extends JsoField("templateId") - - private[jsonapi] final case class EventsBlock(events: Vector[JsValue], offset: Option[JsValue]) - private[jsonapi] object EventsBlock { - import DefaultJsonProtocol.* - - // cannot rely on default reader, offset: JsNull gets read as None, I want Some(JsNull) for LedgerBegin - implicit val EventsBlockReader: RootJsonReader[EventsBlock] = (json: JsValue) => { - val obj = json.asJsObject - val events = obj.fields("events").convertTo[Vector[JsValue]] - val offset: Option[JsValue] = obj.fields.get("offset").collect { - case x: JsString => x - case JsNull => JsNull - } - EventsBlock(events, offset) - } - } - - type IouSplitResult = - JsValue \/ (Vector[(http.ContractId, BigDecimal)], Vector[http.ContractId]) - - sealed abstract class SplitSeq[+X] extends Product with Serializable { - import SplitSeq.* - def x: X - - def fold[Z](leaf: X => Z, node: (X, Z, Z) => Z): Z = { - def go(self: SplitSeq[X]): Z = self match { - case Leaf(x) => leaf(x) - case Node(x, l, r) => node(x, go(l), go(r)) - } - go(this) - } - - def map[B](f: X => B): SplitSeq[B] = - fold[SplitSeq[B]](x => Leaf(f(x)), (x, l, r) => Node(f(x), l, r)) - } - - object SplitSeq { - final case class Leaf[+X](x: X) extends SplitSeq[X] - final case class Node[+X](x: X, l: SplitSeq[X], r: SplitSeq[X]) extends SplitSeq[X] - - type Amount = Long - - val gen: Gen[SplitSeq[Amount]] = - Gen.posNum[Amount] flatMap (x => Gen.sized(genSplit(x, _))) - - private def genSplit(x: Amount, size: Int): Gen[SplitSeq[Amount]] = - if (size > 1 && x > 1) - Gen.frequency( - (1, Gen const Leaf(x)), - ( - 8 min size, - Gen.chooseNum(1: Amount, x - 1) flatMap { split => - Gen.zip(genSplit(split, size / 2), genSplit(x - split, size / 2)) map { case (l, r) => - Node(x, l, r) - } - }, - ), - ) - else Gen const Leaf(x) - } - - def singleClientQueryStream( - jwt: Jwt, - serviceUri: Uri, - query: String, - offset: Option[http.Offset] = None, - )(implicit asys: ActorSystem): Source[Message, NotUsed] = - singleClientWSStream(jwt, "query", serviceUri, query, offset) - - def singleClientFetchStream( - jwt: Jwt, - serviceUri: Uri, - request: String, - offset: Option[http.Offset] = None, - )(implicit asys: ActorSystem): Source[Message, NotUsed] = - singleClientWSStream(jwt, "fetch", serviceUri, request, offset) - - def singleClientWSStream( - jwt: Jwt, - path: String, - serviceUri: Uri, - query: String, - offset: Option[http.Offset], - )(implicit asys: ActorSystem): Source[Message, NotUsed] = { - val uri = serviceUri.copy(scheme = "ws").withPath(Uri.Path(s"/v1/stream/$path")) - logger.info( - s"---- singleClientWSStream uri: ${uri.toString}, query: $query, offset: ${offset.toString}" - ) - val webSocketFlow = - Http().webSocketClientFlow(WebSocketRequest(uri = uri, subprotocol = validSubprotocol(jwt))) - offset - .cata( - off => - Source.fromIterator(() => - Seq(Map("offset" -> off.unwrap).toJson.compactPrint, query).iterator - ), - Source single query, - ) - .map(TextMessage(_)) - // pekko-http will cancel the whole stream once the input ends so we use - // Source.maybe to keep the input open. - .concatMat(Source.maybe[Message])(Keep.left) - .via(webSocketFlow) - } - - val collectResultsAsTextMessageSkipOffsetTicks: Sink[Message, Future[Seq[String]]] = - Flow[Message] - .collect { case m: TextMessage => m.getStrictText } - .filterNot(isOffsetTick) - .toMat(Sink.seq)(Keep.right) - - val collectResultsAsTextMessage: Sink[Message, Future[Seq[String]]] = - Flow[Message] - .collect { case m: TextMessage => m.getStrictText } - .toMat(Sink.seq)(Keep.right) - - private def isOffsetTick(str: String): Boolean = - SprayJson - .decode[EventsBlock](str) - .map(isOffsetTick) - .valueOr(_ => false) - - def isOffsetTick(v: JsValue): Boolean = - SprayJson - .decode[EventsBlock](v) - .map(isOffsetTick) - .valueOr(_ => false) - - def isOffsetTick(x: EventsBlock): Boolean = { - val hasOffset = x.offset - .collect { - case JsString(offset) => offset.length > 0 - case JsNull => true // JsNull is for LedgerBegin - } - .getOrElse(false) - - x.events.isEmpty && hasOffset - } - - def isAbsoluteOffsetTick(x: EventsBlock): Boolean = { - val hasAbsoluteOffset = x.offset - .collect { case JsString(offset) => - offset.length > 0 - } - .getOrElse(false) - - x.events.isEmpty && hasAbsoluteOffset - } - - def isAcs(x: EventsBlock): Boolean = - x.events.nonEmpty && x.offset.isEmpty - - def eventsBlockVector(msgs: Vector[String]): SprayJson.JsonReaderError \/ Vector[EventsBlock] = - msgs.traverse(SprayJson.decode[EventsBlock]) - - def matchJsValue(expected: JsValue) = new JsValueMatcher(expected) - - def matchJsValues(expected: Seq[JsValue]) = new MultipleJsValuesMatcher(expected) - - final class JsValueMatcher(right: JsValue) extends Matcher[JsValue] { - override def apply(left: JsValue): MatchResult = { - val result = (left, right) match { - case (JsArray(l), JsArray(r)) => - l.sizeCompare(r) == 0 && matchJsValues(r)(l).matches - case (JsObject(l), JsObject(r)) => - r.keys.forall(k => matchJsValue(r(k))(l(k)).matches) - case (JsString(l), JsString(r)) => l == r - case (JsNumber(l), JsNumber(r)) => l == r - case (JsBoolean(l), JsBoolean(r)) => l == r - case (JsNull, JsNull) => true - case _ => false - } - MatchResult(result, s"$left did not match $right", s"$left matched $right") - } - } - - final class MultipleJsValuesMatcher(right: Seq[JsValue]) extends Matcher[Seq[JsValue]] { - override def apply(left: Seq[JsValue]): MatchResult = { - val result = left.sizeCompare(right) == 0 && left.lazyZip(right).forall { case (l, r) => - matchJsValue(r)(l).matches - } - MatchResult(result, s"$left did not match $right", s"$left matched $right") - } - } - - def readUntil[A]: ReadUntil[A] = new ReadUntil(Consume.syntax[A]) - - final class ReadUntil[A](private val syntax: Consume.Syntax[A]) extends AnyVal { - def apply[B](f: A => Option[B]): Consume.FCC[A, B] = { - def go: Consume.FCC[A, B] = - syntax.readOne flatMap { a => f(a).fold(go)(syntax.point) } - go - } - } - - def parseResp(implicit - ec: ExecutionContext, - fm: Materializer, - ): Flow[Message, JsValue, NotUsed] = - Flow[Message] - .mapAsync(1) { - case _: BinaryMessage => fail("shouldn't get BinaryMessage") - case tm: TextMessage => tm.toStrict(1.second).map(_.text.parseJson) - } - .filter { - case JsObject(fields) => !(fields contains "heartbeat") - case _ => true - } - val remainingDeltas: Sink[JsValue, Future[ContractDelta.T]] = - Sink.fold[ContractDelta.T, JsValue]((Vector.empty, Vector.empty, Option.empty[http.Offset])) { - (acc, jsv) => - import http.Offset.semigroup - import scalaz.std.tuple.* - import scalaz.std.vector.* - import scalaz.syntax.semigroup.* - jsv match { - case ContractDelta(c, a, o) => acc |+| ((c, a, o)) - case _ => acc - } - } - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/BaseTlsServerIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/BaseTlsServerIT.scala index e09d98eaf1..a2709c5613 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/BaseTlsServerIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/BaseTlsServerIT.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.config.{ TlsClientConfig, TlsServerConfig, } -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.integration.{ ConfigTransforms, @@ -44,7 +44,7 @@ abstract class BaseTlsServerIT(minimumServerProtocolVersion: Option[TlsVersion]) extends CantonFixture { registerPlugin(TLSPlugin(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) minimumServerProtocolVersion match { case Some(TlsVersion.V1_3) => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/EngineModeIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/EngineModeIT.scala index 7d101be552..0d9e68126f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/EngineModeIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/EngineModeIT.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.integration.ConfigTransforms.{ enableAlphaVersionSupport, setBetaSupport, } -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixtureIsolated import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentSetupPlugin} import com.digitalasset.canton.ledger.api.grpc.GrpcClientResource @@ -43,7 +43,7 @@ abstract class BaseEngineModeIT(supportDevLanguageVersions: Boolean) with ProtocolVersionChecksFixtureAnyWordSpec { registerPlugin(EngineModePlugin(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private[this] val userId = UserId("EngineModeIT") diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiJavaCodegenIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiJavaCodegenIntegrationTest.scala index cff1283732..07ef534a22 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiJavaCodegenIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiJavaCodegenIntegrationTest.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SH import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.damltests.java.test.Dummy -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -84,5 +84,5 @@ trait LedgerApiJavaCodegenIntegrationTest extends CommunityIntegrationTest with } class LedgerApiJavaCodegenIntegrationTestDefault extends LedgerApiJavaCodegenIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiParticipantPruningTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiParticipantPruningTest.scala index 08df642890..2453fffb91 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiParticipantPruningTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiParticipantPruningTest.scala @@ -23,7 +23,7 @@ import com.digitalasset.canton.damltests.java.test import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.examples.java.paint.OfferToPaintHouseByOwner -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransform, @@ -57,8 +57,6 @@ trait LedgerApiParticipantPruningTest .replace(1) .focus(_.ledgerApi.indexService.updatesStreams.maxPayloadsPerPayloadsPage) .replace(1) - .focus(_.ledgerApi.indexService.transactionTreeStreams.maxPayloadsPerPayloadsPage) - .replace(1) ) private val confirmationRequestsMaxRate = NonNegativeInt.tryCreate(2 * largeTransactionBatchSize) @@ -506,7 +504,7 @@ trait LedgerApiParticipantPruningTest } class LedgerApiParticipantPruningTestDefault extends LedgerApiParticipantPruningTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } //class LedgerApiParticipantPruningTestPostgres extends LedgerApiParticipantPruningTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiPartyAllocationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiPartyAllocationIntegrationTest.scala index 1a0a9c37f7..5a3acb556f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiPartyAllocationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiPartyAllocationIntegrationTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -70,5 +67,5 @@ trait LedgerApiPartyAllocationIntegrationTest class LedgerApiPartyAllocationIntegrationTestPostgres extends LedgerApiPartyAllocationIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiStreamingTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiStreamingTest.scala index bd2058b89b..edd8e6908a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiStreamingTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiStreamingTest.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.ConsoleCommandResult import com.digitalasset.canton.damltests.java.simplecontractwithpayload.SimpleContractWithPayload -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -69,7 +69,7 @@ class LedgerApiStreamingTest extends CommunityIntegrationTest with SharedEnviron None, ) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) "test various stream closure scenarios and verify closure in logs, akka stream, gRPC" in { implicit env: TestConsoleEnvironment => @@ -275,8 +275,6 @@ class LedgerApiStreamingTest extends CommunityIntegrationTest with SharedEnviron GetUpdatesRequest( beginExclusive = 0L, endInclusive = Some(twoHundredTxLedgerEnd), - filter = None, - verbose = false, updateFormat = Some( UpdateFormat( includeTransactions = Some( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiTopologyTransactionsTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiTopologyTransactionsTest.scala index 1fe667aab0..9461d1fc68 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiTopologyTransactionsTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiTopologyTransactionsTest.scala @@ -11,11 +11,14 @@ import com.daml.ledger.api.v2.topology_transaction.{ TopologyTransaction, } import com.digitalasset.canton.auth.AuthorizationChecksErrors.PermissionDenied +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{DbConfig, NonNegativeFiniteDuration} import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -45,6 +48,8 @@ import org.scalatest.Inside.inside import org.scalatest.matchers.should.Matchers.* import org.slf4j.event.Level +import scala.annotation.nowarn + trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with SharedEnvironment { import LedgerApiTopologyTransactionsTest.* @@ -55,8 +60,16 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh private val maxDedupDuration = java.time.Duration.ofSeconds(2) private val reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1) + @nowarn("msg=match may not be exhaustive") override lazy val environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P2_S1M1 + EnvironmentDefinition.P3S2M2_Config + .withNetworkBootstrap { implicit env => + val Seq(daDesc, acmeDesc) = EnvironmentDefinition.S1M1_S1M1 + new NetworkBootstrapper( + daDesc, + acmeDesc.withTopologyChangeDelay(NonNegativeFiniteDuration.ofSeconds(10)), + ) + } .addConfigTransforms( ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration) ) @@ -393,23 +406,29 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh implicit env => import env.* - sequencer1.topology.synchronizer_parameters.propose_update( - daId, - _.copy( - // giving plenty of time for the test to succeed - topologyChangeDelay = NonNegativeFiniteDuration.Zero.plusSeconds(10) - ), - ) - val babayaga = participant1.parties.enable( + // run this test case against a synchronizer with a large topology change delay and a fresh participant, + // so that the other synchronizer used in this test doesn't interfere with its pruning timestamps + eventually() { + sequencer2.topology.sequencers + .list(acmeId) + .flatMap(_.item.active.forgetNE) + .loneElement shouldBe sequencer2.id + + } + participant3.synchronizers.connect_local(sequencer2, acmeName) + participant3.health.ping(participant3, synchronizerId = acmeId) + + val babayaga = participant3.parties.enable( "Babayaga", synchronizeParticipants = Seq(), synchronize = None, + synchronizer = acmeName, ) val (sequencedTime, effectiveTime) = eventually() { - participant1.topology.transactions + participant3.topology.transactions .list( timeQuery = TimeQuery.Snapshot(wallClock.now.plusSeconds(20)), - store = daId, + store = acmeId, filterMappings = List(TopologyMapping.Code.PartyToParticipant), ) .result @@ -425,8 +444,8 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh } // making sure that the topology transaction is fully processed eventually() { - val synchronizerIndex = participant1.testing.state_inspection - .lookupCleanSynchronizerIndex(daName) + val synchronizerIndex = participant3.testing.state_inspection + .lookupCleanSynchronizerIndex(acmeName) .value .failOnShutdown .futureValue @@ -437,22 +456,22 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh synchronizerIndex.sequencerIndex.value.sequencerTimestamp should be >= sequencedTime } - participant1.synchronizers.disconnect(daName) - participant1.synchronizers.reconnect(daName, synchronize = None) + participant3.synchronizers.disconnect(acmeName) + participant3.synchronizers.reconnect(acmeName, synchronize = None) // making sure that the topology update is not visible just yet: synchronizer time did not reach the effective time yet // (crash recovery for topology events should just schedule the event, not emit it) - participant1.ledger_api.updates + participant3.ledger_api.updates .topology_transactions( 1, Seq(babayaga), 0, - Some(participant1.ledger_api.state.end()), + Some(participant3.ledger_api.state.end()), ) should have size 0 // making sure that the effective time is not yet passed after synchronizer reconnect - val currentRecordTime = participant1.testing.state_inspection - .lookupCleanSynchronizerIndex(daName) + val currentRecordTime = participant3.testing.state_inspection + .lookupCleanSynchronizerIndex(acmeName) .value .failOnShutdown .futureValue @@ -464,14 +483,14 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh currentRecordTime should be >= sequencedTime currentRecordTime should be < effectiveTime - participant1.health.ping(participant1) - val ledgerEnd = participant1.ledger_api.state.end() + participant3.health.ping(participant3, synchronizerId = acmeId) + val ledgerEnd = participant3.ledger_api.state.end() // After a while the not-yet effective topology transaction should be the cause for pruning error eventually() { - participant1.health.ping(participant1) + participant3.health.ping(participant3) loggerFactory.assertThrowsAndLogs[CommandFailure]( - participant1.pruning.prune(ledgerEnd), + participant3.pruning.prune(ledgerEnd), logEntry => { logEntry.errorMessage should include(UnsafeToPrune.id) logEntry.errorMessage should include("due to Topology event crash recovery") @@ -481,8 +500,8 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh // the topology update is getting emitted on effective time eventually() { - val updates = participant1.ledger_api.updates - .topology_transactions(1, Seq(babayaga), 0, Some(participant1.ledger_api.state.end())) + val updates = participant3.ledger_api.updates + .topology_transactions(1, Seq(babayaga), 0, Some(participant3.ledger_api.state.end())) .map(_.topologyTransaction) updates should have size 1 CantonTimestamp @@ -493,7 +512,9 @@ trait LedgerApiTopologyTransactionsTest extends CommunityIntegrationTest with Sh } // and after effective time emission the participant can be pruned above the related sequenced time - participant1.pruning.prune(ledgerEnd) + participant3.pruning.prune(ledgerEnd) + + participant3.synchronizers.disconnect(acmeName) } "Topology transaction is emitted in case SynchronizerTrustCertificate revocation" in { @@ -622,5 +643,15 @@ private object LedgerApiTopologyTransactionsTest { } class LedgerApiTopologyTransactionsTestDefault extends LedgerApiTopologyTransactionsTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.H2]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + ) + ), + ) + ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/SuperReaderUserIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/SuperReaderUserIT.scala index 08d6cc4841..5dc61a9e9c 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/SuperReaderUserIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/SuperReaderUserIT.scala @@ -22,7 +22,7 @@ import com.daml.ledger.javaapi.data.Transaction import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.damltests.java.divulgence.DivulgeIouByExercise import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.ValueConversions.* import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{CantonFixture, CreatesParties} import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands @@ -243,5 +243,5 @@ sealed trait SuperReaderUserIT extends CantonFixture with CreatesParties with Te } class SuperReaderUserITDefault extends SuperReaderUserIT { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/TlsIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/TlsIT.scala index a9b7970bed..45fbbca97b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/TlsIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/TlsIT.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.config.{ TlsClientConfig, TlsServerConfig, } -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.integration.{ ConfigTransforms, @@ -35,7 +35,7 @@ import org.scalatest.RecoverMethods.recoverToSucceededIf class TlsIT extends CantonFixture { registerPlugin(TLSPlugin(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private def getPemFile(fileName: String): PemFile = PemFile(ExistingFile.tryCreate(JarResourceUtils.resourceFile("test-certificates/" + fileName))) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrOperateAsPartyServiceCallAuthTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrOperateAsPartyServiceCallAuthTests.scala new file mode 100644 index 0000000000..7cb65f0a21 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrOperateAsPartyServiceCallAuthTests.scala @@ -0,0 +1,13 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.auth + +trait AdminOrIdpAdminOrOperateAsPartyServiceCallAuthTests + extends ReadOnlyServiceCallAuthTests + with AdminOrIDPAdminServiceCallAuthTests { + override def denyAdmin: Boolean = + false; // Disable tests that deny admin in ReadOnlyServiceCallAuthTests + override def denyReadAsAny: Boolean = + false; // Disable tests that deny read-as-any in AdminOrIDPAdminServiceCallAuthTests +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrReadAsPartyServiceCallAuthTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrReadAsPartyServiceCallAuthTests.scala deleted file mode 100644 index 9359324826..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AdminOrIdpAdminOrReadAsPartyServiceCallAuthTests.scala +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -trait AdminOrIdpAdminOrReadAsPartyServiceCallAuthTests - extends ReadOnlyServiceCallAuthTests - with AdminOrIDPAdminServiceCallAuthTests { - override def denyAdmin: Boolean = - false; // Disable tests that deny admin in ReadOnlyServiceCallAuthTests - override def denyReadAsAny: Boolean = - false; // Disable tests that deny read-as-any in AdminOrIDPAdminServiceCallAuthTests -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala new file mode 100644 index 0000000000..8740b9eada --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.auth + +import com.daml.ledger.api.v2.admin.party_management_service.* +import com.digitalasset.canton.HasExecutionContext +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.integration.TestConsoleEnvironment +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.topology.transaction.ParticipantPermission.Confirmation +import com.digitalasset.canton.topology.transaction.{ + HostingParticipant, + PartyToParticipant, + TopologyChangeOp, + TopologyTransaction, +} + +import scala.concurrent.Future + +final class AllocateExternalPartyAuthIT + extends AdminOrIDPAdminServiceCallAuthTests + with HasExecutionContext { + + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + override def serviceCallName: String = "PartyManagementService#AllocateExternalParty" + + override def serviceCall( + context: ServiceCallContext + )(implicit env: TestConsoleEnvironment): Future[Any] = + stub(PartyManagementServiceGrpc.stub(channel), context.token) + .allocateExternalParty( + AllocateExternalPartyRequest( + synchronizer = env.synchronizer1Id.toProtoPrimitive, + onboardingTransactions = Seq( + AllocateExternalPartyRequest.SignedTransaction( + TopologyTransaction( + op = TopologyChangeOp.Replace, + serial = PositiveInt.one, + PartyToParticipant.tryCreate( + DefaultTestIdentities.party1, + PositiveInt.one, + Seq(HostingParticipant(env.participant1.id, Confirmation)), + ), + testedProtocolVersion, + ).toByteString, + Seq.empty, + ) + ), + multiHashSignatures = Seq.empty, + identityProviderId = context.identityProviderId, + ) + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyAuthIT.scala index 9c251bd6c6..eaf574bf80 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class AllocatePartyAuthIT extends AdminOrIDPAdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#AllocateParty" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyBoxToIDPAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyBoxToIDPAuthIT.scala index 65cb1475eb..e3bf6bbd97 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyBoxToIDPAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocatePartyBoxToIDPAuthIT.scala @@ -14,7 +14,7 @@ import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ ApiPartyManagementServiceSuppressionRule, AuthServiceJWTSuppressionRule, @@ -27,7 +27,7 @@ final class AllocatePartyBoxToIDPAuthIT extends ServiceCallAuthTests with IdentityProviderConfigAuth with ErrorsAssertions { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#AllocateParty()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AudienceBasedTokenAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AudienceBasedTokenAuthIT.scala index a49721f5e5..af7e2463c7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AudienceBasedTokenAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AudienceBasedTokenAuthIT.scala @@ -8,7 +8,7 @@ import com.daml.ledger.api.v2.admin.package_management_service.* import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ AuthInterceptorSuppressionRule, AuthServiceJWTSuppressionRule, @@ -27,7 +27,7 @@ import scala.concurrent.Future class AudienceBasedTokenAuthIT extends ServiceCallAuthTests with ErrorsAssertions { registerPlugin(ExpectedAudienceOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "Any service call with target audience based token authorization" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CheckHealthAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CheckHealthAuthIT.scala index 0c2b39245a..3d1916151c 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CheckHealthAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CheckHealthAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.grpc.test.StreamConsumer import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import io.grpc.health.v1.{HealthCheckRequest, HealthCheckResponse, HealthGrpc} import scala.concurrent.Future final class CheckHealthAuthIT extends UnsecuredServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "HealthService" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CompletionStreamAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CompletionStreamAuthIT.scala index 6756d0e37e..3455d7efd9 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CompletionStreamAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CompletionStreamAuthIT.scala @@ -14,14 +14,14 @@ import com.daml.ledger.api.v2.command_completion_service.{ import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import io.grpc.stub.StreamObserver import scala.concurrent.Future final class CompletionStreamAuthIT extends ExpiringStreamServiceCallAuthTests[CompletionStreamResponse] { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandCompletionService#CompletionStream" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateIdentityProviderConfigAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateIdentityProviderConfigAuthIT.scala index 92cbc0c3b6..808de2384a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateIdentityProviderConfigAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateIdentityProviderConfigAuthIT.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import io.grpc.Status.Code import scala.concurrent.Future @@ -14,7 +14,7 @@ import scala.concurrent.Future final class CreateIdentityProviderConfigAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "IdentityProviderConfigService#CreateIdentityProviderConfig" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserAuthIT.scala index 6f830ebbd5..48d9ee0a92 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserAuthIT.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.user_management_service as ums import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ ApiUserManagementServiceSuppressionRule, IDPAndJWTSuppressionRule, @@ -19,7 +19,7 @@ final class CreateUserAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth with GrantPermissionTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#CreateUser" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserBoxToIDPAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserBoxToIDPAuthIT.scala index 362fe937ff..42e384c118 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserBoxToIDPAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/CreateUserBoxToIDPAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.user_management_service as uproto import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.{ExecutionContext, Future} final class CreateUserBoxToIDPAuthIT extends IDPBoxingServiceCallOutTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#CreateUser()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteIdentityProviderConfigsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteIdentityProviderConfigsAuthIT.scala index b4445097c8..5f7225dfdb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteIdentityProviderConfigsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteIdentityProviderConfigsAuthIT.scala @@ -6,14 +6,14 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.identity_provider_config_service.DeleteIdentityProviderConfigRequest import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class DeleteIdentityProviderConfigsAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "IdentityProviderConfigService#ListIdentityProviderConfigs" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteUserAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteUserAuthIT.scala index b91138bf25..d29eba0bbe 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteUserAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/DeleteUserAuthIT.scala @@ -7,14 +7,14 @@ import com.daml.ledger.api.v2.admin.user_management_service.DeleteUserRequest import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import java.util.UUID import scala.concurrent.Future final class DeleteUserAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#DeleteUser" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitAuthIT.scala index a6b26e4e8f..940fd9106b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyPreparedSubmission import org.scalatest.Assertion @@ -16,7 +16,7 @@ final class ExecuteSubmissionAndWaitAuthIT extends SyncServiceCallAuthTests with SubmitDummyPreparedSubmission with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = expectInvalidArgument(f) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitForTransactionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitForTransactionAuthIT.scala index 5d2e2c03ca..2b1f997e7f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitForTransactionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAndWaitForTransactionAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyPreparedSubmission import org.scalatest.Assertion @@ -16,7 +16,7 @@ final class ExecuteSubmissionAndWaitForTransactionAuthIT extends SyncServiceCallAuthTests with SubmitDummyPreparedSubmission with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = expectInvalidArgument(f) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAuthIT.scala index cbbf9f2225..17e5b625a7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExecuteSubmissionAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyPreparedSubmission import org.scalatest.Assertion @@ -16,7 +16,7 @@ final class ExecuteSubmissionAuthIT extends SyncServiceCallAuthTests with SubmitDummyPreparedSubmission with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = expectInvalidArgument(f) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExpiringStreamServiceCallAuthTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExpiringStreamServiceCallAuthTests.scala index 560a091dac..eb9ebd1e3b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExpiringStreamServiceCallAuthTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ExpiringStreamServiceCallAuthTests.scala @@ -3,14 +3,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth -import com.daml.grpc.test.StreamConsumer import com.daml.grpc.{GrpcException, GrpcStatus} import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.daml.timer.Delayed import com.digitalasset.canton.auth.AuthorizationChecksErrors.AccessTokenExpired import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand import io.grpc.Status import io.grpc.stub.StreamObserver @@ -22,7 +21,7 @@ import scala.concurrent.{Future, Promise} trait ExpiringStreamServiceCallAuthTests[T] extends ReadOnlyServiceCallAuthTests with SubmitAndWaitDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) protected def stream( context: ServiceCallContext, @@ -99,86 +98,3 @@ trait ExpiringStreamServiceCallAuthTests[T] } } - -// TODO(#23504) remove -trait ExpiringStreamServiceCallAuthTestsLegacy[T] - extends ReadOnlyServiceCallAuthTests - with SubmitAndWaitDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - protected def stream( - context: ServiceCallContext, - mainActorId: String, - env: TestConsoleEnvironment, - ): StreamObserver[T] => Unit - - private def expectExpiration(context: ServiceCallContext, mainActorId: String)(implicit - env: TestConsoleEnvironment - ): Future[Unit] = { - val promise = Promise[Unit]() - stream(context, mainActorId, env)(new StreamObserver[T] { - @volatile private[this] var gotSomething = false - def onNext(value: T): Unit = - gotSomething = true - def onError(t: Throwable): Unit = - t match { - case GrpcException(GrpcStatus(Status.Code.ABORTED, Some(description)), _) - if gotSomething && description.contains(AccessTokenExpired.id) => - val _ = promise.trySuccess(()) - case _ => - val _ = promise.tryFailure(t) - } - def onCompleted(): Unit = { - val _ = promise.tryFailure(new RuntimeException("stream completed before token expiration")) - } - }) - promise.future - } - - private def canActAsMainActorExpiresInFiveSeconds: ServiceCallContext = - ServiceCallContext( - Some(toHeader(expiringIn(Duration.ofSeconds(5), standardToken(mainActorActUser)))) - ) - - private def canReadAsMainActorExpiresInFiveSeconds: ServiceCallContext = - ServiceCallContext( - Some(toHeader(expiringIn(Duration.ofSeconds(5), standardToken(mainActorReadUser)))) - ) - - serviceCallName should { - "break a stream in-flight upon read-only token expiration" taggedAs securityAsset - .setAttack( - streamAttack(threat = "Present a read-only JWT upon expiration") - ) in { implicit env => - import env.* - val mainActorId = getMainActorId - val _ = Delayed.Future.by(10.seconds)(submitAndWaitAsMainActor(mainActorId)) - expectExpiration(canReadAsMainActorExpiresInFiveSeconds, mainActorId) - .map(_ => succeed) - .futureValue - } - - "break a stream in-flight upon read/write token expiration" taggedAs securityAsset - .setAttack( - streamAttack(threat = "Present a read/write JWT upon expiration") - ) in { implicit env => - import env.* - val mainActorId = getMainActorId - val _ = Delayed.Future.by(10.seconds)(submitAndWaitAsMainActor(mainActorId)) - expectExpiration(canActAsMainActorExpiresInFiveSeconds, mainActorId) - .map(_ => succeed) - .futureValue - } - } - - override def serviceCall(context: ServiceCallContext)(implicit - env: TestConsoleEnvironment - ): Future[Any] = { - import env.* - val mainActorId = getMainActorId - submitAndWaitAsMainActor(mainActorId).flatMap(_ => - new StreamConsumer[T](stream(context, mainActorId, env)).first() - ) - } - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetActiveContractsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetActiveContractsAuthIT.scala index 48b50c8aec..8b478e547a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetActiveContractsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetActiveContractsAuthIT.scala @@ -11,12 +11,12 @@ import com.daml.ledger.api.v2.state_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetActiveContractsAuthIT extends SuperReaderServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "StateService#GetActiveContracts" @@ -28,8 +28,6 @@ final class GetActiveContractsAuthIT extends SuperReaderServiceCallAuthTests { stub(StateServiceGrpc.stub(channel), context.token) .getActiveContracts( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = 0, eventFormat = context.eventFormat.orElse(Some(eventFormat(getMainActorId))), ), diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetAuthenticatedUserAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetAuthenticatedUserAuthIT.scala index 530bd751ac..63b3de7cf2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetAuthenticatedUserAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetAuthenticatedUserAuthIT.scala @@ -13,7 +13,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.{ import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthInterceptorSuppressionRule import org.scalatest.Assertion @@ -22,7 +22,7 @@ import scala.concurrent.Future /** Tests covering the special behaviour of GetUser wrt the authenticated user. */ class GetAuthenticatedUserAuthIT extends ServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val testId = UUID.randomUUID().toString diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetCommandStatusAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetCommandStatusAuthIT.scala index a79c033c25..895cc0f378 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetCommandStatusAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetCommandStatusAuthIT.scala @@ -10,7 +10,7 @@ import com.daml.ledger.api.v2.admin.command_inspection_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommandHelpers import java.util.UUID @@ -19,7 +19,7 @@ import scala.concurrent.Future final class GetCommandStatusAuthIT extends AdminServiceCallAuthTests with SubmitAndWaitDummyCommandHelpers { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandInspectionService#GetCommandStatus" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetConnectedSynchronizersAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetConnectedSynchronizersAuthIT.scala index ac83d9c978..25ab7b1dce 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetConnectedSynchronizersAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetConnectedSynchronizersAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.state_service.{GetConnectedSynchronizersRequest, StateServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetConnectedSynchronizersAuthIT - extends AdminOrIdpAdminOrReadAsPartyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + extends AdminOrIdpAdminOrOperateAsPartyServiceCallAuthTests { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "StateService#GetConnectedSynchronizers" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetEventsByContractIdRequestAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetEventsByContractIdRequestAuthIT.scala index 825af52c5d..4f1e5f9f74 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetEventsByContractIdRequestAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetEventsByContractIdRequestAuthIT.scala @@ -17,7 +17,7 @@ import com.daml.ledger.api.v2.transaction_filter.{ import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.protocol.{ExampleTransactionFactory, LfContractId} import io.grpc.Status import org.scalatest.Assertion @@ -25,7 +25,7 @@ import org.scalatest.Assertion import scala.concurrent.{ExecutionContext, Future} final class GetEventsByContractIdRequestAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "EventQueryService#GetEventsByContractId" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetIdentityProviderConfigAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetIdentityProviderConfigAuthIT.scala index 90879cfd7d..ebb7e98db4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetIdentityProviderConfigAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetIdentityProviderConfigAuthIT.scala @@ -5,12 +5,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetIdentityProviderConfigAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "IdentityProviderConfigService#GetIdentityProviderConfig" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLatestPrunedOffsetsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLatestPrunedOffsetsAuthIT.scala index c72e0cf110..565d88083e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLatestPrunedOffsetsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLatestPrunedOffsetsAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.state_service.{GetLatestPrunedOffsetsRequest, StateServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future class GetLatestPrunedOffsetsAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "StateService#GetLatestPrunedOffsets" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerApiVersionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerApiVersionAuthIT.scala index b0052c0713..b4bb7977f7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerApiVersionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerApiVersionAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.version_service.{GetLedgerApiVersionRequest, VersionServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetLedgerApiVersionAuthIT extends UnsecuredServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "VersionService#GetLedgerApiVersion" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerEndAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerEndAuthIT.scala index 615dbb53fb..534543ffdd 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerEndAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetLedgerEndAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.state_service.GetLedgerEndRequest import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.util.GrpcServices.StateService import scala.concurrent.Future final class GetLedgerEndAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "StateService#GetLedgerEnd" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageAuthIT.scala index 09e590c85e..24aa9d972d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageAuthIT.scala @@ -10,12 +10,12 @@ import com.daml.ledger.api.v2.package_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetPackageAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageService#GetPackage" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageStatusAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageStatusAuthIT.scala index e3d060362a..628b767231 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageStatusAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPackageStatusAuthIT.scala @@ -10,12 +10,12 @@ import com.daml.ledger.api.v2.package_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetPackageStatusAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageService#GetPackageStatus" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetParticipantIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetParticipantIdAuthIT.scala index 04544d6787..79b90ca884 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetParticipantIdAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetParticipantIdAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future -final class GetParticipantIdAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) +final class GetParticipantIdAuthIT extends PublicServiceCallAuthTests { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#GetParticipantId" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPartiesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPartiesAuthIT.scala index d46d55b981..84b37db122 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPartiesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPartiesAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetPartiesAuthIT extends AdminOrIDPAdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#GetParties" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackageVersionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackageVersionAuthIT.scala index 518bd203d5..f3455eb996 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackageVersionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackageVersionAuthIT.scala @@ -10,13 +10,13 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.participant.admin.workflows.java.canton.internal.ping.Ping import scala.concurrent.Future final class GetPreferredPackageVersionAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = s"${InteractiveSubmissionService.getClass.getSimpleName}#GetPreferredPackageVersion" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackagesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackagesAuthIT.scala index d9a90791ea..745a34d281 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackagesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetPreferredPackagesAuthIT.scala @@ -11,13 +11,13 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.participant.admin.workflows.java.canton.internal.ping.Ping import scala.concurrent.Future final class GetPreferredPackagesAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = s"${InteractiveSubmissionService.getClass.getSimpleName}#GetPreferredPackages" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTimeAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTimeAuthIT.scala index 48fe7287a9..11f141e848 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTimeAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTimeAuthIT.scala @@ -5,12 +5,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class GetTimeAuthIT extends PublicServiceCallAuthTests with TimeAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "TimeService#GetTime" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByIdAuthIT.scala deleted file mode 100644 index 7c93541499..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByIdAuthIT.scala +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.ledger.api.v2.update_service.{GetTransactionByIdRequest, UpdateServiceGrpc} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import io.grpc.Status -import org.scalatest.Assertion - -import java.util.UUID -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} - -// TODO(#23504) remove -@nowarn("cat=deprecation") -final class GetTransactionByIdAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "UpdateService#GetTransactionById" - - override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = - expectFailure(f, Status.Code.NOT_FOUND) - - private def request(mainActorId: String) = - new GetTransactionByIdRequest(UUID.randomUUID.toString, List(mainActorId), None) - - override def serviceCall(context: ServiceCallContext)(implicit - env: TestConsoleEnvironment - ): Future[Any] = - stub(UpdateServiceGrpc.stub(channel), context.token) - .getTransactionById(request(getMainActorId)) - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByOffsetAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByOffsetAuthIT.scala deleted file mode 100644 index 9ed881462c..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionByOffsetAuthIT.scala +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.ledger.api.v2.update_service.{GetTransactionByOffsetRequest, UpdateServiceGrpc} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import io.grpc.Status -import org.scalatest.Assertion - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} -import scala.util.Random - -// TODO(#23504) remove -@nowarn("cat=deprecation") -final class GetTransactionByOffsetAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "UpdateService#GetTransactionByOffset" - - override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = - expectFailure(f, Status.Code.NOT_FOUND) - - private def request(mainActorId: String) = - new GetTransactionByOffsetRequest( - Random.nextLong(Long.MaxValue) + 1, - List(mainActorId), - None, - ) - - override def serviceCall( - context: ServiceCallContext - )(implicit env: TestConsoleEnvironment): Future[Any] = - stub(UpdateServiceGrpc.stub(channel), context.token) - .getTransactionByOffset(request(getMainActorId)) - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByIdAuthIT.scala deleted file mode 100644 index 98388e3e0c..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByIdAuthIT.scala +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.ledger.api.v2.update_service.{GetTransactionByIdRequest, UpdateServiceGrpc} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import io.grpc.Status -import org.scalatest.Assertion - -import java.util.UUID -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} - -// TODO(#23504) remove -@nowarn("cat=deprecation") -final class GetTransactionTreeByIdAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "UpdateService#GetTransactionTreeById" - - override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = - expectFailure(f, Status.Code.NOT_FOUND) - - private def request(mainActorId: String) = - new GetTransactionByIdRequest(UUID.randomUUID.toString, List(mainActorId), None) - - override def serviceCall(context: ServiceCallContext)(implicit - env: TestConsoleEnvironment - ): Future[Any] = - stub(UpdateServiceGrpc.stub(channel), context.token) - .getTransactionTreeById(request(getMainActorId)) - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByOffsetAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByOffsetAuthIT.scala deleted file mode 100644 index 3950c9efb2..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetTransactionTreeByOffsetAuthIT.scala +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.ledger.api.v2.update_service.{GetTransactionByOffsetRequest, UpdateServiceGrpc} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import io.grpc.Status -import org.scalatest.Assertion - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} -import scala.util.Random - -// TODO(#23504) remove -@nowarn("cat=deprecation") -final class GetTransactionTreeByOffsetAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "UpdateService#getTransactionTreeByOffset" - - override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = - expectFailure(f, Status.Code.NOT_FOUND) - - private def request(mainActorId: String) = - new GetTransactionByOffsetRequest( - Random.nextLong(Long.MaxValue) + 1, - List(mainActorId), - None, - ) - - override def serviceCall(context: ServiceCallContext)(implicit - env: TestConsoleEnvironment - ): Future[Any] = - stub(UpdateServiceGrpc.stub(channel), context.token) - .getTransactionTreeByOffset(request(getMainActorId)) - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByIdAuthIT.scala index ec55579a76..a4865239de 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByIdAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByIdAuthIT.scala @@ -7,7 +7,8 @@ import com.daml.ledger.api.v2.transaction_filter.UpdateFormat import com.daml.ledger.api.v2.update_service.{GetUpdateByIdRequest, UpdateServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.protocol.TestUpdateId import io.grpc.Status import org.scalatest.Assertion @@ -15,7 +16,7 @@ import java.util.UUID import scala.concurrent.{ExecutionContext, Future} final class GetUpdateByIdAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UpdateService#GetUpdateById" @@ -23,7 +24,10 @@ final class GetUpdateByIdAuthIT extends ReadOnlyServiceCallAuthTests { expectFailure(f, Status.Code.NOT_FOUND) private def request(updateFormat: Option[UpdateFormat]) = - new GetUpdateByIdRequest(updateId = UUID.randomUUID.toString, updateFormat = updateFormat) + new GetUpdateByIdRequest( + updateId = TestUpdateId(UUID.randomUUID.toString).toHexString, + updateFormat = updateFormat, + ) override def serviceCall(context: ServiceCallContext)(implicit env: TestConsoleEnvironment diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByOffsetAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByOffsetAuthIT.scala index 4421b0a145..c42f1f595e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByOffsetAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateByOffsetAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.transaction_filter.UpdateFormat import com.daml.ledger.api.v2.update_service.{GetUpdateByOffsetRequest, UpdateServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import io.grpc.Status import org.scalatest.Assertion @@ -15,7 +15,7 @@ import scala.concurrent.{ExecutionContext, Future} import scala.util.Random final class GetUpdateByOffsetAuthIT extends ReadOnlyServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UpdateService#GetUpdateByOffset" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateTreesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateTreesAuthIT.scala deleted file mode 100644 index aa4154291a..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdateTreesAuthIT.scala +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.ledger.api.v2.update_service.{ - GetUpdateTreesResponse, - GetUpdatesRequest, - UpdateServiceGrpc, -} -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand -import io.grpc.stub.StreamObserver - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -final class GetUpdateTreesAuthIT - extends ExpiringStreamServiceCallAuthTestsLegacy[GetUpdateTreesResponse] - with SubmitAndWaitDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "UpdateService#GetUpdateTrees" - - private def request(mainActorId: String) = - new GetUpdatesRequest( - beginExclusive = participantBegin, - endInclusive = None, - filter = txFilterFor(mainActorId), - verbose = false, - updateFormat = None, - ) - - override protected def stream( - context: ServiceCallContext, - mainActorId: String, - env: TestConsoleEnvironment, - ): StreamObserver[GetUpdateTreesResponse] => Unit = - observer => - stub(UpdateServiceGrpc.stub(channel), context.token) - .getUpdateTrees(request(mainActorId), observer) - -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdatesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdatesAuthIT.scala index 88f373dec7..13c8fa94fd 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdatesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUpdatesAuthIT.scala @@ -19,7 +19,7 @@ import com.daml.ledger.api.v2.update_service.{ import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand import io.grpc.stub.StreamObserver @@ -29,7 +29,7 @@ final class GetUpdatesAuthIT extends ExpiringStreamServiceCallAuthTests[GetUpdatesResponse] with ReadOnlyServiceCallAuthTests with SubmitAndWaitDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UpdateService#GetUpdates" @@ -38,8 +38,6 @@ final class GetUpdatesAuthIT beginExclusive = participantBegin, endInclusive = None, updateFormat = updateFormat, - filter = None, - verbose = false, ) override protected def stream( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUserAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUserAuthIT.scala index c2f3c9f561..1cd0277cc0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUserAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GetUserAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.* import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import io.grpc.Status @@ -15,7 +15,7 @@ import java.util.UUID import scala.concurrent.{ExecutionContext, Future} class GetUserAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#GetUser" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsAuthIT.scala index 3608d65a42..94cf626ca1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.user_management_service as ums import com.daml.ledger.api.v2.admin.user_management_service.GrantUserRightsRequest import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import java.util.UUID @@ -17,7 +17,7 @@ final class GrantUserRightsAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth with GrantPermissionTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#GrantUserRights" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsBoxToIDPAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsBoxToIDPAuthIT.scala index 4f6d19c6a5..158ce0657f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsBoxToIDPAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/GrantUserRightsBoxToIDPAuthIT.scala @@ -10,12 +10,12 @@ import com.daml.ledger.api.v2.admin.user_management_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.{ExecutionContext, Future} final class GrantUserRightsBoxToIDPAuthIT extends IDPBoxingServiceCallOutTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#GrantUserRights()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpPartyManagementServiceTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpPartyManagementServiceTest.scala index fdfa85078b..f622b1b29e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpPartyManagementServiceTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpPartyManagementServiceTest.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthServiceJWTSuppressionRule import com.google.protobuf.field_mask.FieldMask import org.scalatest.Assertion @@ -16,7 +16,7 @@ import java.util.UUID import scala.concurrent.Future class ImpliedIdpPartyManagementServiceTest extends ServiceCallAuthTests with ImpliedIdpFixture { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpUserManagementServiceTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpUserManagementServiceTest.scala index 17e4d4e98b..75a579ec2b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpUserManagementServiceTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ImpliedIdpUserManagementServiceTest.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.user_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthServiceJWTSuppressionRule import com.google.protobuf.field_mask.FieldMask import org.scalatest.Assertion @@ -15,7 +15,7 @@ import java.util.UUID import scala.concurrent.{ExecutionContext, Future} class ImpliedIdpUserManagementServiceTest extends ServiceCallAuthTests with ImpliedIdpFixture { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiAuthIT.scala index ca25fe75a4..c0673cb05e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiAuthIT.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.http.json.SprayJson import com.digitalasset.canton.http.json.v2.JsSchema.JsCantonError import com.digitalasset.canton.http.json.v2.{JsCommand, JsCommands} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData import com.digitalasset.canton.integration.tests.jsonapi.{ HttpServiceTestFixture, @@ -50,7 +50,7 @@ class JsonApiAuthIT with ErrorsAssertions { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override protected def packageFiles: List[File] = List(super.darFile) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiV1AuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiV1AuthIT.scala deleted file mode 100644 index e8069fb060..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/JsonApiV1AuthIT.scala +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.daml.jwt.{AuthServiceJWTCodec, Jwt, StandardJWTPayload, StandardJWTTokenFormat} -import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* -import com.digitalasset.base.error.ErrorsAssertions -import com.digitalasset.canton.auth.CantonAdminToken -import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData -import com.digitalasset.canton.integration.tests.jsonapi.{ - HttpServiceTestFixture, - HttpServiceUserFixture, - HttpTestFuns, -} -import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ - AuthInterceptorSuppressionRule, - AuthServiceJWTSuppressionRule, -} -import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture -import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands -import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentSetupPlugin} -import com.digitalasset.canton.logging.{NamedLoggerFactory, SuppressionRule} -import monocle.macros.syntax.lens.* -import org.apache.pekko.http.scaladsl.model.{StatusCode, StatusCodes, Uri} -import org.slf4j.event.Level - -import java.io.File -import java.time.{Duration, Instant} -import scala.concurrent.Future - -class JsonApiV1AuthIT - extends CantonFixture - with SecurityTags - with TestCommands - with HttpTestFuns - with HttpServiceUserFixture.UserToken - with ErrorsAssertions { - - registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override protected def packageFiles: List[File] = - List(super.darFile) - - override def serviceCallName: String = - "JSON API" - - private def expect(code: StatusCode, forbiddenMessage: Option[String] = None)( - call: => Future[(StatusCode, String)] - ) = - for { - (actualCode, actualResponse) <- call - } yield { - forbiddenMessage.fold(succeed)(actualResponse should not include _) - actualCode should be(code) - } - - private def publicCall( - fixture: HttpServiceTestFixtureData, - context: ServiceCallContext, - ): Future[(StatusCode, String)] = { - - val headers = HttpServiceTestFixture.authorizationHeader(Jwt(context.token.getOrElse(""))) - fixture.getRequestString(Uri.Path("/v1/parties"), headers) - - } - - def callRequiringPartyClaims( - fixture: HttpServiceTestFixtureData, - context: ServiceCallContext, - ): Future[(StatusCode, String)] = { - val headers = HttpServiceTestFixture.authorizationHeader(Jwt(context.token.getOrElse(""))) - fixture.getRequestString(Uri.Path("/v1/query"), headers) - } - - private def toScopeContext( - payload: StandardJWTPayload - ): ServiceCallContext = ServiceCallContext( - token = Some( - toHeader( - payload = payload, - enforceFormat = Some(StandardJWTTokenFormat.Scope), - secret = jwtSecret.unwrap, - ) - ) - ) - - private val fallbackToken = CantonAdminToken.create(new SymbolicPureCrypto()) - - override val defaultScope: String = AuthServiceJWTCodec.scopeLedgerApiFull - - case class Tokens( - user: StandardJWTPayload, - admin: StandardJWTPayload, - additionalDiscriminator: StandardJWTPayload, - noDiscriminator: StandardJWTPayload, - wrongDiscriminator: StandardJWTPayload, - mixedDiscriminators: StandardJWTPayload, - expired: StandardJWTPayload, - unknownUser: StandardJWTPayload, - ) - - val scopeBaseToken: StandardJWTPayload = StandardJWTPayload( - issuer = None, - participantId = None, - userId = participantAdmin, - exp = None, - format = StandardJWTTokenFormat.Scope, - audiences = List.empty, - scope = Some(AuthServiceJWTCodec.scopeLedgerApiFull), - ) - val scope: Tokens = Tokens( - user = scopeBaseToken.copy(userId = randomPartyActUser), - admin = scopeBaseToken, - additionalDiscriminator = scopeBaseToken.copy(scope = Some(s"$defaultScope additionalScope")), - noDiscriminator = scopeBaseToken.copy(scope = None), - wrongDiscriminator = scopeBaseToken.copy(scope = Some("scope1 scope2")), - mixedDiscriminators = scopeBaseToken - .copy(audiences = List(ExpectedAudience), scope = Some(s"$defaultScope additionalScope")), - expired = scopeBaseToken.copy(exp = Some(Instant.now().plusNanos(Duration.ofDays(-1).toNanos))), - unknownUser = scopeBaseToken.copy(userId = "unknown_user"), - ) - - serviceCallName should { - "allow access to an endpoint requiring admin rights with fall-back authorization" taggedAs securityAsset - .setHappyCase( - "Ledger API client can make a call with a canton admin token" - ) in httpTestFixture { fixture => - expect(StatusCodes.OK) { - publicCall(fixture, ServiceCallContext(token = Some(fallbackToken.secret))) - } - } - - "allow access to an endpoint requiring admin rights with admin token" taggedAs securityAsset - .setHappyCase( - "Ledger API client can make a call requiring admin rights with an admin token" - ) in httpTestFixture { fixture => - loggerFactory.suppress(AuthServiceJWTSuppressionRule) { - for { - _ <- expect(StatusCodes.OK)( - publicCall(fixture, toScopeContext(scope.admin)) - ) - } yield () - } - } - - "deny access to an endpoint requiring party rights with admin token" taggedAs securityAsset - .setAttack( - attackPermissionDenied(threat = - "Ledger API client cannot make a call requiring party rights with an admin token" - ) - ) in httpTestFixture { fixture => - loggerFactory.suppress(AuthServiceJWTSuppressionRule) { - for { - _ <- expect(StatusCodes.Unauthorized)( - callRequiringPartyClaims(fixture, toScopeContext(scope.admin)) - ) - } yield () - } - } - - "deny access to an endpoint requiring admin rights with user token" taggedAs securityAsset - .setAttack( - attackPermissionDenied(threat = - "Ledger API client cannot make a call requiring admin rights with a user token" - ) - ) in httpTestFixture { fixture => - loggerFactory.suppress(AuthServiceJWTSuppressionRule) { - for { - _ <- expect(StatusCodes.Unauthorized)( - publicCall(fixture, toScopeContext(scope.user)) - ) - } yield () - } - // There is no concept of user token for privilegedScp and privilegedAud jwt - } - - "allow access to an endpoint requiring party rights with user token" taggedAs securityAsset - .setHappyCase( - "Ledger API client can make a call requiring party rights with a user token" - ) in httpTestFixture { fixture => - loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(Level.DEBUG))( - for { - _ <- expect(StatusCodes.OK, Some("UNAUTHENTICATED"))( - callRequiringPartyClaims(fixture, toScopeContext(scope.user)) - ) - } yield (), - entries => { - forEvery(entries)( - _.message should not include "failed with UNAUTHENTICATED/An error occurred" - ) - }, - ) - } - -// "allow access to an endpoint with a token that mixes expected audiences/scopes" taggedAs securityAsset -// .setHappyCase( -// "Ledger API client can make a call with a token with multiple expected audiences/scopes" -// ) in httpTestFixture { fixture => -// loggerFactory.suppress(AuthServiceJWTSuppressionRule) { -// for { -// _ <- expect(StatusCodes.OK)( -// publicCall(fixture, toScopeContext(scope.mixedDiscriminators)) -// ) -// } yield () -// } -// } - - "deny access to an endpoint with an expired token" taggedAs adminSecurityAsset.setAttack( - attackUnauthenticated(threat = "Present an expired token") - ) in httpTestFixture { fixture => - loggerFactory.suppress(AuthServiceJWTSuppressionRule) { - - for { - _ <- expect(StatusCodes.InternalServerError)( - publicCall(fixture, toScopeContext(scope.expired)) - ) - } yield () - } - } - - "deny access to an endpoint with an token for an unknown user" taggedAs adminSecurityAsset - .setAttack( - attackUnauthenticated(threat = "Present a token for unknown user") - ) in httpTestFixture { fixture => - loggerFactory.suppress(AuthServiceJWTSuppressionRule || AuthInterceptorSuppressionRule) { - for { - _ <- expect(StatusCodes.Unauthorized)( - publicCall(fixture, toScopeContext(scope.unknownUser)) - ) - } yield () - // There is no concept of user token for privilegedScp and privilegedAud jwt - } - } - - "deny unauthenticated access" taggedAs securityAsset.setAttack( - attackUnauthenticated(threat = "Do not present token") - ) in httpTestFixture { fixture => - expect(StatusCodes.Unauthorized)( - publicCall(fixture, noToken) - ) - } - } - - // plugin to override the configuration and use authorization with scope - case class ExpectedScopeOverrideConfig( - protected val loggerFactory: NamedLoggerFactory - ) extends EnvironmentSetupPlugin { - override def beforeEnvironmentCreated(config: CantonConfig): CantonConfig = - ConfigTransforms.updateParticipantConfig("participant1") { - _.focus(_.ledgerApi.authServices) - .replace( - Seq( - AuthServiceConfig - .UnsafeJwtHmac256( - secret = jwtSecret, - targetAudience = None, - targetScope = None, - ) - ) - ) - .focus(_.adminApi.adminTokenConfig.fixedAdminToken) - .replace(Some(fallbackToken.secret)) - }(config) - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListAuthenticatedUserRightsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListAuthenticatedUserRightsAuthIT.scala index 79a77ef7a4..5d3e393532 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListAuthenticatedUserRightsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListAuthenticatedUserRightsAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.* import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthInterceptorSuppressionRule import org.scalatest.Assertion @@ -15,7 +15,7 @@ import java.util.UUID import scala.concurrent.Future class ListAuthenticatedUserRightsAuthIT extends ServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val testId = UUID.randomUUID().toString diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListIdentityProviderConfigsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListIdentityProviderConfigsAuthIT.scala index 103c25e1c0..e04963b784 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListIdentityProviderConfigsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListIdentityProviderConfigsAuthIT.scala @@ -6,14 +6,14 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.identity_provider_config_service.ListIdentityProviderConfigsRequest import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ListIdentityProviderConfigsAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "IdentityProviderConfigService#ListIdentityProviderConfigs" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPackagesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPackagesAuthIT.scala index 33d67040cf..44c75501ae 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPackagesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPackagesAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.package_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ListKnownPackagesAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageManagementService#ListKnownPackages" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPartiesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPartiesAuthIT.scala index a391162a35..1282b48564 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPartiesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListKnownPartiesAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ListKnownPartiesAuthIT extends AdminOrIDPAdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#ListKnownParties" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListPackagesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListPackagesAuthIT.scala index 0154202e95..57e53ab348 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListPackagesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListPackagesAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.package_service.{ListPackagesRequest, PackageServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ListPackagesAuthIT extends PublicServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageService#ListPackages" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListServicesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListServicesAuthIT.scala index be81dbf050..40af4fa02b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListServicesAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListServicesAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.grpc.test.StreamConsumer import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import io.grpc.reflection.v1.{ServerReflectionGrpc, ServerReflectionResponse} import scala.concurrent.Future class ListServicesAuthIT extends UnsecuredServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "ServerReflection#List" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUserRightsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUserRightsAuthIT.scala index d5488459b4..7c41c058f4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUserRightsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUserRightsAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.* import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import io.grpc.Status @@ -15,7 +15,7 @@ import java.util.UUID import scala.concurrent.Future class ListUserRightsAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#ListUserRights" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUsersAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUsersAuthIT.scala index 050dec5e77..e2f599fa69 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUsersAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListUsersAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.user_management_service.ListUsersRequest import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ListUsersAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#ListUsers" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListVettedPackagesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListVettedPackagesAuthIT.scala new file mode 100644 index 0000000000..1c26feddd4 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ListVettedPackagesAuthIT.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.auth + +import com.daml.ledger.api.v2.package_service.{ + ListVettedPackagesRequest, + PackageMetadataFilter, + PackageServiceGrpc, + TopologyStateFilter, +} +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.TestConsoleEnvironment +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer + +import scala.concurrent.Future + +final class ListVettedPackagesAuthIT extends PublicServiceCallAuthTests { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + override def serviceCallName: String = "PackageService#ListVettedPackages" + + private val request = + ListVettedPackagesRequest( + Some(PackageMetadataFilter(Seq(), Seq())), + Some(TopologyStateFilter(Seq(), Seq())), + "", + 0, + ) + + override def serviceCall(context: ServiceCallContext)(implicit + env: TestConsoleEnvironment + ): Future[Any] = + stub(PackageServiceGrpc.stub(channel), context.token).listVettedPackages(request) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/OngoingStreamAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/OngoingStreamAuthIT.scala index a4248bae85..c3cbaff3b2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/OngoingStreamAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/OngoingStreamAuthIT.scala @@ -17,7 +17,7 @@ import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommandHelpers import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.topology.PartyId @@ -34,7 +34,7 @@ final class OngoingStreamAuthIT extends ServiceCallAuthTests with SubmitAndWaitDummyCommandHelpers with ErrorsAssertions { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "OngoingStreamAuthorizer" @@ -75,8 +75,6 @@ final class OngoingStreamAuthIT val request = new GetUpdatesRequest( beginExclusive = participantBegin, endInclusive = None, - filter = None, - verbose = false, updateFormat = Some(getUpdateFormat(Set(party))), ) val _ = stub(UpdateServiceGrpc.stub(channel), token) @@ -170,8 +168,6 @@ final class OngoingStreamAuthIT val request = new GetUpdatesRequest( beginExclusive = participantBegin, endInclusive = None, - filter = None, - verbose = false, updateFormat = Some(getUpdateFormat(Set(party))), ) val _ = stub(UpdateServiceGrpc.stub(channel), token) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ParticipantPruningAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ParticipantPruningAuthIT.scala index 4386db8b46..56155380eb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ParticipantPruningAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ParticipantPruningAuthIT.scala @@ -9,12 +9,12 @@ import com.daml.ledger.api.v2.admin.participant_pruning_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class ParticipantPruningAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "ParticipantPruningService#Prune" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrepareSubmissionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrepareSubmissionAuthIT.scala index 88f9f2c4d9..edfc8ddc54 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrepareSubmissionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrepareSubmissionAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyPreparedSubmission import scala.concurrent.Future @@ -15,7 +15,7 @@ final class PrepareSubmissionAuthIT extends ReadOnlyServiceCallAuthTests with SubmitDummyPreparedSubmission with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "InteractiveSubmissionService#PrepareSubmission" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrivilegedTokenAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrivilegedTokenAuthIT.scala index 3f161028a7..41b8bfa8a4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrivilegedTokenAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/PrivilegedTokenAuthIT.scala @@ -10,7 +10,7 @@ import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.canton.auth.AccessLevel import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthServiceJWTSuppressionRule import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommandHelpers import com.digitalasset.canton.integration.{ @@ -233,7 +233,7 @@ trait PrivilegedTokenAuthIT class PrivilegedScopeTokenAuthIT extends PrivilegedTokenAuthIT { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) protected def toContext( payload: StandardJWTPayload, @@ -318,7 +318,7 @@ class PrivilegedScopeTokenAuthIT extends PrivilegedTokenAuthIT { class PrivilegedAudienceTokenAuthIT extends PrivilegedTokenAuthIT { registerPlugin(ExpectedAudienceOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) protected def toContext( payload: StandardJWTPayload, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/RevokeUserRightsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/RevokeUserRightsAuthIT.scala index c041f32556..4ff72ca505 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/RevokeUserRightsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/RevokeUserRightsAuthIT.scala @@ -10,7 +10,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.{ } import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import java.util.UUID @@ -20,7 +20,7 @@ final class RevokeUserRightsAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth with GrantPermissionTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#RevokeUserRights" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ScopeBasedTokenAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ScopeBasedTokenAuthIT.scala index ea6eef20d9..8833b25148 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ScopeBasedTokenAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ScopeBasedTokenAuthIT.scala @@ -8,7 +8,7 @@ import com.daml.ledger.api.v2.admin.package_management_service.* import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ AuthInterceptorSuppressionRule, AuthServiceJWTSuppressionRule, @@ -27,7 +27,7 @@ import scala.concurrent.Future class ScopeBasedTokenAuthIT extends ServiceCallAuthTests with ErrorsAssertions { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "Any service call with target scope based token authorization" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SelfAdminAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SelfAdminAuthIT.scala new file mode 100644 index 0000000000..33f2a7a5aa --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SelfAdminAuthIT.scala @@ -0,0 +1,384 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.auth + +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.GetPartiesRequest +import com.daml.ledger.api.v2.admin.user_management_service.{GetUserRequest, ListUserRightsRequest} +import com.daml.ledger.api.v2.admin.{ + party_management_service as pproto, + user_management_service as uproto, +} +import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* +import com.digitalasset.base.error.ErrorsAssertions +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.{ + ApiPartyManagementServiceSuppressionRule, + AuthServiceJWTSuppressionRule, +} +import com.digitalasset.canton.integration.{ + ConfigTransforms, + EnvironmentDefinition, + TestConsoleEnvironment, +} +import monocle.Monocle.toAppliedFocusOps + +import java.util.UUID +import scala.concurrent.{ExecutionContext, Future} + +class SelfAdminAuthIT + extends ServiceCallAuthTests + with IdentityProviderConfigAuth + with ErrorsAssertions { + + override def environmentDefinition: EnvironmentDefinition = + super.environmentDefinition.addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.ledgerApi.partyManagementService.maxSelfAllocatedParties) + .replace(NonNegativeInt.tryCreate(2)) + ) + ) + + override protected def serviceCall(context: ServiceCallContext)(implicit + env: TestConsoleEnvironment + ): Future[Any] = ??? + + protected def createUser( + userId: String, + serviceCallContext: ServiceCallContext, + rights: Vector[uproto.Right], + ): Future[uproto.CreateUserResponse] = { + val user = uproto.User( + id = userId, + primaryParty = "", + isDeactivated = false, + metadata = Some(ObjectMeta.defaultInstance), + identityProviderId = serviceCallContext.identityProviderId, + ) + val req = uproto.CreateUserRequest(Some(user), rights) + stub(uproto.UserManagementServiceGrpc.stub(channel), serviceCallContext.token) + .createUser(req) + } + + private def allocateParty( + party: String, + serviceCallContext: ServiceCallContext, + userId: String = "", + identityProviderIdOverride: Option[String] = None, + )(implicit + ec: ExecutionContext + ): Future[String] = + stub(pproto.PartyManagementServiceGrpc.stub(channel), serviceCallContext.token) + .allocateParty( + pproto.AllocatePartyRequest( + partyIdHint = party, + localMetadata = None, + identityProviderId = + identityProviderIdOverride.getOrElse(serviceCallContext.identityProviderId), + synchronizerId = "", + userId = userId, + ) + ) + .map(_.partyDetails.value.party) + + serviceCallName should { + + "allow user querying about own parties" taggedAs adminSecurityAsset + .setHappyCase( + "User can query the details of the own parties" + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectSuccess { + val suffix = UUID.randomUUID().toString + for { + read <- allocateParty(s"read-$suffix", asAdmin) + act <- allocateParty(s"act-$suffix", asAdmin) + execute <- allocateParty(s"execute-$suffix", asAdmin) + + inputParties = Seq(read, act, execute) + + (_, user1Context) <- createUserByAdmin( + "user-1-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(read))), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(act))), + uproto.Right(uproto.Right.Kind.CanExecuteAs(uproto.Right.CanExecuteAs(execute))), + ), + ) + (_, user2Context) <- createUserByAdmin( + "user-2-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAsAnyParty(uproto.Right.CanReadAsAnyParty())), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(act))), + uproto.Right(uproto.Right.Kind.CanExecuteAs(uproto.Right.CanExecuteAs(execute))), + ), + ) + (_, user3Context) <- createUserByAdmin( + "user-3-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(read))), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(act))), + uproto.Right( + uproto.Right.Kind.CanExecuteAsAnyParty(uproto.Right.CanExecuteAsAnyParty()) + ), + ), + ) + + parties1 <- stub(pproto.PartyManagementServiceGrpc.stub(channel), user1Context.token) + .getParties( + GetPartiesRequest(parties = inputParties, identityProviderId = "") + ) + parties2 <- stub(pproto.PartyManagementServiceGrpc.stub(channel), user2Context.token) + .getParties( + GetPartiesRequest(parties = inputParties, identityProviderId = "") + ) + parties3 <- stub(pproto.PartyManagementServiceGrpc.stub(channel), user3Context.token) + .getParties( + GetPartiesRequest(parties = inputParties, identityProviderId = "") + ) + + } yield ( + parties1.partyDetails.map(_.party) should contain theSameElementsAs inputParties, + parties2.partyDetails.map(_.party) should contain theSameElementsAs inputParties, + parties3.partyDetails.map(_.party) should contain theSameElementsAs inputParties, + ) + } + } + } + + "deny user querying about parties it doesn't own" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User querying status of parties it doesn't own") + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + read <- allocateParty(s"read-$suffix", asAdmin) + act <- allocateParty(s"act-$suffix", asAdmin) + execute <- allocateParty(s"execute-$suffix", asAdmin) + + inputParties = Seq(read, act, execute) + + (_, userContext) <- createUserByAdmin( + "user-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(read))), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(act))), + ), + ) + + _ <- stub(pproto.PartyManagementServiceGrpc.stub(channel), userContext.token) + .getParties( + GetPartiesRequest(parties = inputParties, identityProviderId = "") + ) + + } yield () + } + } + } + + "allow user querying own records" taggedAs adminSecurityAsset + .setHappyCase( + "User can query own records" + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectSuccess { + val suffix = UUID.randomUUID().toString + for { + (user, userContext) <- createUserByAdmin("user-" + suffix) + _ <- stub(uproto.UserManagementServiceGrpc.stub(channel), userContext.token).getUser( + GetUserRequest(user.id, "") + ) + _ <- stub(uproto.UserManagementServiceGrpc.stub(channel), userContext.token) + .listUserRights( + ListUserRightsRequest(user.id, "") + ) + .map(_.rights) + + } yield () + } + } + } + + "deny user querying someone else's records" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User querying records of other users") + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + (_, userContext) <- createUserByAdmin("user-1-" + suffix) + (anotherUser, _) <- createUserByAdmin("user-2-" + suffix) + _ <- stub(uproto.UserManagementServiceGrpc.stub(channel), userContext.token).getUser( + GetUserRequest(anotherUser.id, "") + ) + _ <- stub(uproto.UserManagementServiceGrpc.stub(channel), userContext.token) + .listUserRights( + ListUserRightsRequest(anotherUser.id, "") + ) + .map(_.rights) + } yield () + } + } + } + + "allow user allocating own party" taggedAs adminSecurityAsset + .setHappyCase( + "User can allocate own party" + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectSuccess { + val suffix = UUID.randomUUID().toString + for { + (user, userContext) <- createUserByAdmin("user-" + suffix) + party <- allocateParty(s"party-$suffix", userContext, userId = user.id) + parties <- stub(pproto.PartyManagementServiceGrpc.stub(channel), userContext.token) + .getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = "") + ) + rights <- stub(uproto.UserManagementServiceGrpc.stub(channel), userContext.token) + .listUserRights( + ListUserRightsRequest(user.id, "") + ) + .map(_.rights) + + } yield ( + parties.partyDetails.map(_.party) should contain theSameElementsAs Seq(party), + rights should contain theSameElementsAs Vector( + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(party))) + ), + ) + } + } + } + + "deny user allocating party for another user" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User allocating party for another user") + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + (_, userContext) <- createUserByAdmin("user-1-" + suffix) + (anotherUser, _) <- createUserByAdmin("user-2-" + suffix) + _ <- allocateParty(s"party-$suffix", userContext, userId = anotherUser.id) + } yield () + } + } + } + + "deny user allocating party without specifying who will own it" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User allocating party without specifying who will own it") + ) in { implicit env => + import env.* + loggerFactory.suppress(AuthServiceJWTSuppressionRule) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + (_, userContext) <- createUserByAdmin("user-" + suffix) + _ <- allocateParty(s"party-$suffix", userContext) + } yield () + } + } + } + + "deny user allocating party above the quota" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User allocating party above the quota") + ) in { implicit env => + import env.* + loggerFactory.suppress( + AuthServiceJWTSuppressionRule || ApiPartyManagementServiceSuppressionRule + ) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + (user, userContext) <- createUserByAdmin("user-1-" + suffix) + _ <- allocateParty(s"party-1-$suffix", userContext, userId = user.id) + _ <- allocateParty(s"party-2-$suffix", userContext, userId = user.id) + _ <- allocateParty(s"party-3-$suffix", userContext, userId = user.id) + } yield () + } + } + } + + "deny user allocating party above the quota, when other parties added by admin" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User allocating party above the quota") + ) in { implicit env => + import env.* + loggerFactory.suppress( + AuthServiceJWTSuppressionRule || ApiPartyManagementServiceSuppressionRule + ) { + expectPermissionDenied { + val suffix = UUID.randomUUID().toString + for { + party1 <- allocateParty(s"party-1-$suffix", asAdmin) + party2 <- allocateParty(s"party-2-$suffix", asAdmin) + + (user, userContext) <- createUserByAdmin( + "user-1-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(party1))), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(party2))), + ), + ) + _ <- allocateParty(s"party-3-$suffix", userContext, userId = user.id) + } yield () + } + } + } + + "allow user allocating party below the quota, when rights contain few distinctive parties" taggedAs adminSecurityAsset + .setAttack( + attackUnknownResource(threat = "User allocating party above the quota") + ) in { implicit env => + import env.* + loggerFactory.suppress( + AuthServiceJWTSuppressionRule || ApiPartyManagementServiceSuppressionRule + ) { + expectSuccess { + val suffix = UUID.randomUUID().toString + for { + party1 <- allocateParty(s"party-1-$suffix", asAdmin) + + (user, userContext) <- createUserByAdmin( + "user-1-" + suffix, + rights = Vector( + uproto.Right(uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(party1))), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(party1))), + ), + ) + _ <- allocateParty(s"party-3-$suffix", userContext, userId = user.id) + } yield () + } + } + } + } + + protected def idpAdminRights: Vector[uproto.Right] = Vector( + uproto.Right( + uproto.Right.Kind.IdentityProviderAdmin(uproto.Right.IdentityProviderAdmin()) + ) + ) + protected def readAndActRights(readAsParty: String, actAsParty: String): Vector[uproto.Right] = + Vector( + uproto.Right( + uproto.Right.Kind.CanReadAs(uproto.Right.CanReadAs(readAsParty)) + ), + uproto.Right(uproto.Right.Kind.CanActAs(uproto.Right.CanActAs(actAsParty))), + ) + + override def serviceCallName: String = "GetParties" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ServiceCallAuthTests.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ServiceCallAuthTests.scala index 3862223c3d..9c59899dca 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ServiceCallAuthTests.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ServiceCallAuthTests.scala @@ -11,7 +11,6 @@ import com.daml.ledger.api.v2.transaction_filter.{ Filters, ParticipantAuthorizationTopologyFormat, TopologyFormat, - TransactionFilter, TransactionFormat, UpdateFormat, } @@ -20,7 +19,6 @@ import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import io.grpc.Status import org.scalatest.Assertion -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal @@ -66,11 +64,6 @@ trait ServiceCallAuthTests fail(e) }.futureValue - // TODO(#23504) use UpdateFormat instead of TransactionFilter - @nowarn("cat=deprecation") - protected def txFilterFor(party: String): Option[TransactionFilter] = - Some(TransactionFilter(Map(party -> Filters(Nil)), None)) - protected def eventFormat(party: String): EventFormat = EventFormat( filtersByParty = Map(party -> Filters(Nil)), diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SetTimeAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SetTimeAuthIT.scala index 038a874e6d..7018b27ee7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SetTimeAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SetTimeAuthIT.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.testing.time_service.{SetTimeRequest, TimeServiceGrpc} import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future final class SetTimeAuthIT extends AdminServiceCallAuthTests with TimeAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "TimeService#SetTime" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitAuthIT.scala index 0e76f93969..eb66fdd23a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand import scala.concurrent.Future @@ -14,7 +14,7 @@ final class SubmitAndWaitAuthIT extends SyncServiceCallAuthTests with SubmitAndWaitDummyCommand with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandService#SubmitAndWait" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForReassignmentAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForReassignmentAuthIT.scala index 2dd53ffb4b..cbff1f3664 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForReassignmentAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForReassignmentAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyReassignment import org.scalatest.Assertion @@ -14,7 +14,7 @@ import scala.concurrent.{ExecutionContext, Future} final class SubmitAndWaitForReassignmentAuthIT extends SyncServiceCallAuthTests with SubmitDummyReassignment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = expectInvalidArgument(f) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionAuthIT.scala index abaec85ae3..eacd4504b0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand import scala.concurrent.Future @@ -14,7 +14,7 @@ final class SubmitAndWaitForTransactionAuthIT extends SyncServiceCallAuthTests with SubmitAndWaitDummyCommand with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandService#SubmitAndWaitForTransaction" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionTreeAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionTreeAuthIT.scala deleted file mode 100644 index 7851289e9f..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitForTransactionTreeAuthIT.scala +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.ledgerapi.auth - -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommand - -import scala.concurrent.Future - -final class SubmitAndWaitForTransactionTreeAuthIT - extends SyncServiceCallAuthTests - with SubmitAndWaitDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) - - override def serviceCallName: String = "CommandService#SubmitAndWaitForTransactionTree" - - override def serviceCall( - context: ServiceCallContext - )(implicit env: TestConsoleEnvironment): Future[Any] = { - import env.* - submitAndWaitForTransactionTree( - context.token, - context.userId, - party = getMainActorId, - ) - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitMultiPartyAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitMultiPartyAuthIT.scala index c596d53723..0241950043 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitMultiPartyAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAndWaitMultiPartyAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitMultiPartyDummyCommand import scala.concurrent.Future @@ -13,7 +13,7 @@ import scala.concurrent.Future final class SubmitAndWaitMultiPartyAuthIT extends MultiPartyServiceCallAuthTests with SubmitAndWaitMultiPartyDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandService#SubmitAndWait()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAuthIT.scala index 029297cb28..650ac38980 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyCommand import scala.concurrent.Future @@ -14,7 +14,7 @@ final class SubmitAuthIT extends SyncServiceCallAuthTests with SubmitDummyCommand with ExecuteAsAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandSubmissionService#Submit" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitMultiPartyAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitMultiPartyAuthIT.scala index d509206437..00e62643a1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitMultiPartyAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitMultiPartyAuthIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitMultiPartyDummyCommand import scala.concurrent.Future @@ -13,7 +13,7 @@ import scala.concurrent.Future final class SubmitMultiPartyAuthIT extends MultiPartyServiceCallAuthTests with SubmitMultiPartyDummyCommand { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "CommandSubmissionService#Submit()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitReassignmentAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitReassignmentAuthIT.scala index 03f874452e..c3a0b1c976 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitReassignmentAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/SubmitReassignmentAuthIT.scala @@ -5,14 +5,14 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitDummyReassignment import org.scalatest.Assertion import scala.concurrent.{ExecutionContext, Future} final class SubmitReassignmentAuthIT extends SyncServiceCallAuthTests with SubmitDummyReassignment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def successfulBehavior(f: Future[Any])(implicit ec: ExecutionContext): Assertion = expectInvalidArgument(f) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/TokenExpirationVerificationIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/TokenExpirationVerificationIT.scala index 40c499e97e..37f92f1d35 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/TokenExpirationVerificationIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/TokenExpirationVerificationIT.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.http.json.SprayJson import com.digitalasset.canton.http.json.v2.{JsCommand, JsCommands} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData import com.digitalasset.canton.integration.tests.jsonapi.{ HttpServiceTestFixture, @@ -49,7 +49,7 @@ class TokenExpirationVerificationIT with ErrorsAssertions { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override protected def adminToken: StandardJWTPayload = standardToken( participantAdmin, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateIdentityProviderConfigAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateIdentityProviderConfigAuthIT.scala index 530ea6e365..20c1f8b0f0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateIdentityProviderConfigAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateIdentityProviderConfigAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.identity_provider_config_service.UpdateIdent import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.google.protobuf.field_mask.FieldMask import io.grpc.Status.Code @@ -16,7 +16,7 @@ import scala.concurrent.Future final class UpdateIdentityProviderConfigAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "IdentityProviderConfigService#UpdateIdentityProviderConfig" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyDetailsAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyDetailsAuthIT.scala index 4874731f5a..6543fb417d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyDetailsAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyDetailsAuthIT.scala @@ -6,14 +6,14 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.google.protobuf.field_mask.FieldMask import java.util.UUID import scala.concurrent.Future final class UpdatePartyDetailsAuthIT extends AdminOrIDPAdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#UpdatePartyDetails" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyIdentityProviderIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyIdentityProviderIdAuthIT.scala index 771de5cabe..4e3674b5fd 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyIdentityProviderIdAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdatePartyIdentityProviderIdAuthIT.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import java.util.UUID import scala.concurrent.Future @@ -14,7 +14,7 @@ import scala.concurrent.Future final class UpdatePartyIdentityProviderIdAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PartyManagementService#UpdatePartyIdentityProviderId" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserAuthIT.scala index be6fbdc222..503ee26ecf 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserAuthIT.scala @@ -7,7 +7,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.UpdateUserRequest import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule import com.google.protobuf.field_mask.FieldMask @@ -15,7 +15,7 @@ import java.util.UUID import scala.concurrent.Future final class UpdateUserAuthIT extends AdminOrIDPAdminServiceCallAuthTests with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#UpdateUser" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserIdentityProviderIdAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserIdentityProviderIdAuthIT.scala index 8e9363b9fa..67fcddfc42 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserIdentityProviderIdAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserIdentityProviderIdAuthIT.scala @@ -9,7 +9,7 @@ import com.daml.ledger.api.v2.admin.user_management_service.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import scala.concurrent.Future @@ -17,7 +17,7 @@ final class UpdateUserIdentityProviderIdAuthIT extends AdminServiceCallAuthTests with IdentityProviderConfigAuth with UserManagementAuth { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#UpdateUserIdentityProviderId" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserSelfDeactivationAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserSelfDeactivationAuthIT.scala index 98c9e7b25e..fba18f14b7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserSelfDeactivationAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateUserSelfDeactivationAuthIT.scala @@ -8,7 +8,7 @@ import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.google.protobuf.field_mask.FieldMask import io.grpc.{Status, StatusRuntimeException} @@ -17,7 +17,7 @@ import scala.concurrent.Future import scala.util.{Failure, Success} final class UpdateUserSelfDeactivationAuthIT extends ServiceCallAuthTests with ErrorsAssertions { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "UserManagementService#UpdateUser()" diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateVettedPackagesAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateVettedPackagesAuthIT.scala new file mode 100644 index 0000000000..b8a25b6ca9 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UpdateVettedPackagesAuthIT.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.auth + +import com.daml.ledger.api.v2.admin.package_management_service.* +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.TestConsoleEnvironment +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.ledger.api.PriorTopologySerialExists + +import scala.concurrent.Future + +final class UpdateVettedPackagesAuthIT extends AdminServiceCallAuthTests { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + override def serviceCallName: String = "PackageManagementService#UpdateVettedPackages" + + lazy private val request = + UpdateVettedPackagesRequest( + Seq(), + false, + "", + Some(PriorTopologySerialExists(0).toProtoLAPI), + ) + + override def serviceCall(context: ServiceCallContext)(implicit + env: TestConsoleEnvironment + ): Future[Any] = + stub(PackageManagementServiceGrpc.stub(channel), context.token).updateVettedPackages(request) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UploadDarFileAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UploadDarFileAuthIT.scala index 75a8527371..4c0366a8a4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UploadDarFileAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UploadDarFileAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.package_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.util.BinaryFileUtil import scala.concurrent.Future final class UploadDarFileAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageManagementService#UploadDarFile" @@ -21,7 +21,12 @@ final class UploadDarFileAuthIT extends AdminServiceCallAuthTests { .readByteStringFromFile(CantonExamplesPath) .valueOrFail("could not load examples") - UploadDarFileRequest(darData, submissionId = "") + UploadDarFileRequest( + darData, + submissionId = "", + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId = "", + ) } override def serviceCall(context: ServiceCallContext)(implicit diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UserConfigAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UserConfigAuthIT.scala index 71e8362100..cc834226b9 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UserConfigAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/UserConfigAuthIT.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.auth.{AuthorizedUser, CantonAdminToken} import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString import com.digitalasset.canton.config.{AuthServiceConfig, CantonConfig, DbConfig} import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthServiceJWTSuppressionRule import com.digitalasset.canton.integration.tests.ledgerapi.services.SubmitAndWaitDummyCommandHelpers import com.digitalasset.canton.integration.{ @@ -32,7 +32,7 @@ class UserConfigAuthIT with SubmitAndWaitDummyCommandHelpers { registerPlugin(ExpectedScopeOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def prerequisiteParties: List[String] = List(randomParty) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ValidateDarFileAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ValidateDarFileAuthIT.scala index 7eb96db21f..2088d3073a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ValidateDarFileAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/ValidateDarFileAuthIT.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.package_management_service.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.util.BinaryFileUtil import scala.concurrent.Future final class ValidateDarFileAuthIT extends AdminServiceCallAuthTests { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override def serviceCallName: String = "PackageManagementService#ValidateDarFile" @@ -21,7 +21,7 @@ final class ValidateDarFileAuthIT extends AdminServiceCallAuthTests { .readByteStringFromFile(CantonExamplesPath) .valueOrFail("could not load examples") - ValidateDarFileRequest(darData, submissionId = "") + ValidateDarFileRequest(darData, submissionId = "", synchronizerId = "") } override def serviceCall(context: ServiceCallContext)(implicit diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientAuthIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientAuthIT.scala index 08bc6eeafd..53adb6963a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientAuthIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientAuthIT.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.client import com.daml.grpc.GrpcException import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.TestConsoleEnvironment -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.AuthInterceptorSuppressionRule import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{ CantonFixture, @@ -22,7 +22,7 @@ import com.digitalasset.canton.ledger.client.configuration.{ import java.util.UUID final class LedgerClientAuthIT extends CantonFixture with CreatesParties with CreatesUsers { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val clientConfigurationWithoutToken = LedgerClientConfiguration( userId = classOf[LedgerClientAuthIT].getSimpleName, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientIT.scala index 20b97151dc..52c291fa4b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/client/LedgerClientIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.client import com.daml.jwt.JwksUrl import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.ledger.api.{IdentityProviderConfig, IdentityProviderId} @@ -21,7 +21,7 @@ import scalaz.OneAnd final class LedgerClientIT extends CantonFixture { registerPlugin(NoAuthPlugin(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val ClientConfiguration = LedgerClientConfiguration( userId = classOf[LedgerClientIT].getSimpleName, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/fixture/CantonFixture.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/fixture/CantonFixture.scala index 7974f12442..d8f7231b52 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/fixture/CantonFixture.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/fixture/CantonFixture.scala @@ -85,7 +85,7 @@ trait CantonFixtureAbstract createChannel(participant1) - participant1.dars.upload(CantonTestsPath) + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) } protected val channels = TrieMap[String, CloseableChannel]() diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/otel/LedgerApiOtelIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/otel/LedgerApiOtelIT.scala index f84610b4cf..f07245e332 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/otel/LedgerApiOtelIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/otel/LedgerApiOtelIT.scala @@ -14,12 +14,15 @@ import com.daml.ledger.api.v2.command_service.CommandServiceGrpc.CommandService import com.daml.ledger.api.v2.command_submission_service.CommandSubmissionServiceGrpc import com.daml.ledger.api.v2.commands.CreateCommand import com.daml.ledger.api.v2.state_service.{GetLedgerEndRequest, StateServiceGrpc} -import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} import com.daml.ledger.api.v2.transaction_filter.{ EventFormat, Filters, - TransactionFilter, TransactionFormat, + UpdateFormat, } import com.daml.ledger.api.v2.update_service.* import com.daml.ledger.api.v2.value.{Record, RecordField, Value} @@ -27,7 +30,7 @@ import com.digitalasset.canton.UniquePortGenerator import com.digitalasset.canton.config.AuthServiceConfig.Wildcard import com.digitalasset.canton.config.RequireTypes.ExistingFile import com.digitalasset.canton.config.{CantonConfig, DbConfig, PemFile, TlsServerConfig} -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseOtlp} +import com.digitalasset.canton.integration.plugins.{UseOtlp, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.ledgerapi.fixture.ValueConversions.* import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{CantonFixture, CreatesParties} import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands @@ -49,11 +52,9 @@ import org.scalatest.{Assertion, Succeeded} import org.slf4j.event.Level import java.util.UUID -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} import scala.util.Success -// TODO(#23504) remove TransactionTree related tests trait LedgerApiOtelITBase extends CantonFixture with CreatesParties @@ -63,7 +64,7 @@ trait LedgerApiOtelITBase val otlpHeaders = Map("custom-key" -> "custom-value") registerPlugin(LedgerApiOtelOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) protected def useOtlp: UseOtlp registerPlugin(useOtlp) @@ -291,7 +292,6 @@ trait LedgerApiOtelITBase } } -@nowarn("cat=deprecation") class LedgerApiOtelIT extends LedgerApiOtelITBase { protected override lazy val useOtlp: UseOtlp = @@ -343,16 +343,6 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { }, origin = "com.daml.ledger.api.v2.CommandService/SubmitAndWaitForTransaction", ) - - testCommandService( - submit = { party => - commandService - .submitAndWaitForTransactionTree( - submitAndWaitRequest(party, userId) - ) - }, - origin = "com.daml.ledger.api.v2.CommandService/SubmitAndWaitForTransactionTree", - ) } } @@ -402,7 +392,7 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { } } - "CommandTransactionService" when { + "UpdateService" when { val svcName = "com.daml.ledger.api.v2.UpdateService" def submissionService = CommandServiceGrpc @@ -415,7 +405,7 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { def updateService = UpdateServiceGrpc.stub(channel).withInterceptors(TraceContextGrpc.clientInterceptor) - def extractTraceContextFromFlat(response: GetUpdatesResponse): Seq[TraceContext] = + def extractTraceContextFromUpdates(response: GetUpdatesResponse): Seq[TraceContext] = response.update.transaction .map(c => SerializableTraceContextConverter @@ -424,34 +414,37 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { ) .toList - def extractTraceContextFromTrees(response: GetTransactionTreeResponse): TraceContext = - SerializableTraceContextConverter - .fromDamlProtoSafeOpt(loggerWithoutTracing(logger))( - response.transaction.flatMap(_.traceContext) - ) - .traceContext - - def extractTraceContextFromUpdateTrees(response: GetUpdateTreesResponse): Seq[TraceContext] = - response.update.transactionTree - .map(c => - SerializableTraceContextConverter - .fromDamlProtoSafeOpt(loggerWithoutTracing(logger))(c.traceContext) - .traceContext - ) - .toList - - def extractTraceContextFromPointwiseFlat( - response: GetTransactionResponse + def extractTraceContextFromPointwiseUpdate( + response: GetUpdateResponse ): TraceContext = SerializableTraceContextConverter .fromDamlProtoSafeOpt(loggerWithoutTracing(logger))( - response.transaction.flatMap(_.traceContext) + response.update.transaction.flatMap(_.traceContext) ) .traceContext val aUserId = userId - "retrieving flat transaction with a span and trace" should { + def updateFormat(party: String) = Some( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map(party -> Filters.defaultInstance), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + ) + + "retrieving updates with a span and trace" should { testStreamingService[GetUpdatesResponse]( submissionService, stateService @@ -462,18 +455,11 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { GetUpdatesRequest( beginExclusive = offset, endInclusive = None, - filter = Some( - TransactionFilter( - filtersByParty = Map(party -> Filters.defaultInstance), - filtersForAnyParty = None, - ) - ), - verbose = false, - updateFormat = None, + updateFormat = updateFormat(party), ), _, ), - extractTraceContextFromFlat, + extractTraceContextFromUpdates, origin = s"$svcName/getUpdates", userId = aUserId, filterRelevantResponses = @@ -484,64 +470,17 @@ class LedgerApiOtelIT extends LedgerApiOtelITBase { testPointwiseQuery( submissionService, (tId, party) => - updateService.getTransactionById( - GetTransactionByIdRequest( + updateService.getUpdateById( + GetUpdateByIdRequest( updateId = tId, - requestingParties = Seq(party), - transactionFormat = None, + updateFormat = updateFormat(party), ) ), - extractTraceContextFromPointwiseFlat, + extractTraceContextFromPointwiseUpdate, s"$svcName/getTransactionById", aUserId, ) } - - "retrieving transaction tree with a span and trace" should { - testStreamingService[GetUpdateTreesResponse]( - submissionService, - stateService - .getLedgerEnd(GetLedgerEndRequest()) - .map(_.offset)(_), - (party, offset) => - updateService.getUpdateTrees( - GetUpdatesRequest( - beginExclusive = offset, - endInclusive = None, - filter = Some( - TransactionFilter( - filtersByParty = Map(party -> Filters.defaultInstance), - filtersForAnyParty = None, - ) - ), - verbose = false, - updateFormat = None, - ), - _, - ), - extractTraceContextFromUpdateTrees, - origin = s"$svcName/getUpdateTrees", - userId = aUserId, - filterRelevantResponses = - // Filter out offset checkpoints as they are not triggered by the command submission, - // hence not relevant in asserting spans/traces - !_.update.isOffsetCheckpoint, - ) - testPointwiseQuery( - submissionService, - (tId, party) => - updateService.getTransactionTreeById( - GetTransactionByIdRequest( - updateId = tId, - requestingParties = Seq(party), - transactionFormat = None, - ) - ), - extractTraceContextFromTrees, - s"$svcName/getTransactionTreeById", - aUserId, - ) - } } } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitDummyCommand.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitDummyCommand.scala index d737b7c0e0..e3e0811a2d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitDummyCommand.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitDummyCommand.scala @@ -17,7 +17,6 @@ import com.digitalasset.canton.integration.tests.ledgerapi.auth.{ import com.google.protobuf.empty.Empty import java.util.UUID -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} trait SubmitAndWaitDummyCommand extends TestCommands with SubmitAndWaitDummyCommandHelpers { @@ -94,18 +93,4 @@ trait SubmitAndWaitDummyCommandHelpers extends TestCommands { ) .map(_ => Empty()) - // TODO(#23504) remove - @nowarn("cat=deprecation") - protected def submitAndWaitForTransactionTree( - token: Option[String], - userId: String, - party: String, - commandId: Option[String] = None, - )(implicit ec: ExecutionContext): Future[Empty] = - service(token) - .submitAndWaitForTransactionTree( - dummySubmitAndWaitRequest(userId, party = party, commandId = commandId) - ) - .map(_ => Empty()) - } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitMultiPartyDummyCommand.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitMultiPartyDummyCommand.scala index e132facf94..586d7061f2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitMultiPartyDummyCommand.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitAndWaitMultiPartyDummyCommand.scala @@ -14,7 +14,6 @@ import com.digitalasset.canton.integration.tests.ledgerapi.auth.ServiceCallAuthT import com.google.protobuf.empty.Empty import java.util.UUID -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} trait SubmitAndWaitMultiPartyDummyCommand extends TestCommands { self: ServiceCallAuthTests => @@ -79,16 +78,4 @@ trait SubmitAndWaitMultiPartyDummyCommand extends TestCommands { self: ServiceCa .submitAndWait(dummySubmitAndWaitRequest(actAs, readAs, userId)) .map(_ => Empty()) - // TODO(#23504) remove - @nowarn("cat=deprecation") - protected def submitAndWaitForTransactionTree( - token: Option[String], - actAs: Seq[String], - readAs: Seq[String], - userId: String, - )(implicit ec: ExecutionContext): Future[Empty] = - service(token) - .submitAndWaitForTransactionTree(dummySubmitAndWaitRequest(actAs, readAs, userId)) - .map(_ => Empty()) - } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala index e412fc97ed..58e0e47a16 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala @@ -45,6 +45,7 @@ trait SubmitDummyPreparedSubmission extends SubmitDummyCommand { packageIdSelectionPreference = Seq.empty, verboseHashing = true, prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, ) protected def dummyExecuteSubmissionRequest( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceBackPressureIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceBackPressureIT.scala index 7a3f39b7db..162a85988c 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceBackPressureIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceBackPressureIT.scala @@ -10,10 +10,7 @@ import com.daml.ledger.api.v2.value.{Record, RecordField, Value} import com.digitalasset.canton.config.AuthServiceConfig.Wildcard import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{CantonConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.ledgerapi.fixture.ValueConversions.* import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{CantonFixture, CreatesParties} import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands @@ -112,5 +109,5 @@ final case class BackpressureOverrideConfig( class CommandServiceBackPressureITPostgres extends CommandServiceBackPressureITBase { registerPlugin(BackpressureOverrideConfig(loggerFactory)) registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceIT.scala index 205e10a7c5..d25a71948b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/command/CommandServiceIT.scala @@ -10,10 +10,7 @@ import com.daml.ledger.api.v2.value.{Record, RecordField, Value} import com.digitalasset.canton.config.AuthServiceConfig.Wildcard import com.digitalasset.canton.config.{CantonConfig, ClockConfig, DbConfig} import com.digitalasset.canton.console.LocalParticipantReference -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.ledgerapi.fixture.ValueConversions.* import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{CantonFixture, CreatesParties} import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands @@ -147,5 +144,5 @@ final case class CommandServiceOverrideConfig( class CommandServiceITPostgres extends CommandServiceITBase { registerPlugin(CommandServiceOverrideConfig(loggerFactory)) registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/completion/CompletionServiceIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/completion/CompletionServiceIT.scala index c6433dec39..4d89d562bf 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/completion/CompletionServiceIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/completion/CompletionServiceIT.scala @@ -16,10 +16,7 @@ import com.daml.ledger.api.v2.value.{Record, RecordField, Value} import com.digitalasset.canton.config.AuthServiceConfig.Wildcard import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{CantonConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.ledgerapi.fixture.ValueConversions.* import com.digitalasset.canton.integration.tests.ledgerapi.fixture.{CantonFixture, CreatesParties} import com.digitalasset.canton.integration.tests.ledgerapi.services.TestCommands @@ -160,5 +157,5 @@ final case class CompletionServiceOverrideConfig( class CompletionServiceITPostgres extends CompletionServiceITBase { registerPlugin(CompletionServiceOverrideConfig(loggerFactory)) registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/reflection/ReflectionIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/reflection/ReflectionIT.scala index 3f86eeaa7a..5faf128aac 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/reflection/ReflectionIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/reflection/ReflectionIT.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.ledgerapi.services.reflection import com.daml.grpc.test.StreamConsumer import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import io.grpc.reflection.v1.{ ServerReflectionGrpc, @@ -17,7 +17,7 @@ import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters.* final class ReflectionIT extends CantonFixture { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val listServices: ServerReflectionRequest = ServerReflectionRequest.newBuilder().setHost("127.0.0.1").setListServices("").build() diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/time/WallClockTimeIT.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/time/WallClockTimeIT.scala index 490353dcdf..85d0196972 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/time/WallClockTimeIT.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/time/WallClockTimeIT.scala @@ -7,7 +7,7 @@ import com.daml.grpc.GrpcException import com.daml.ledger.api.v2.testing.time_service.{GetTimeRequest, SetTimeRequest, TimeServiceGrpc} import com.digitalasset.canton.config.AuthServiceConfig.Wildcard import com.digitalasset.canton.config.{CantonConfig, ClockConfig, DbConfig} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentSetupPlugin} import com.digitalasset.canton.ledger.api.util.TimestampConversion.fromInstant @@ -19,7 +19,7 @@ import java.time.Instant final class WallClockTimeIT extends CantonFixture { registerPlugin(WallClockOverrideConfig(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val unimplemented: PartialFunction[Any, Unit] = { case GrpcException.UNIMPLEMENTED() => () diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala index ddb2ffa5e1..5cfd2727ae 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala @@ -19,6 +19,7 @@ import com.daml.ledger.api.v2.transaction_filter.{ UpdateFormat, WildcardFilter, } +import com.digitalasset.canton.BaseTest import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.TransactionWrapper import com.digitalasset.canton.config.RequireTypes.PositiveInt @@ -28,7 +29,6 @@ import com.digitalasset.canton.console.{ ParticipantReference, } import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.data.OnboardingTransactions import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.{ ParticipantSelector, defaultConfirmingParticipant, @@ -40,26 +40,15 @@ import com.digitalasset.canton.integration.{ ConfigTransforms, TestConsoleEnvironment, } -import com.digitalasset.canton.interactive.ExternalPartyUtils import com.digitalasset.canton.logging.{LogEntry, NamedLogging} import com.digitalasset.canton.topology.ForceFlag.DisablePartyWithActiveContracts -import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash -import com.digitalasset.canton.topology.{ - ExternalParty, - ForceFlags, - PartyId, - PhysicalSynchronizerId, - SynchronizerId, -} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import com.digitalasset.canton.topology.{ExternalParty, ForceFlags, PartyId, SynchronizerId} import com.google.protobuf.ByteString import monocle.Monocle.toAppliedFocusOps import org.scalatest.Suite import java.util.UUID -import scala.concurrent.ExecutionContext object BaseInteractiveSubmissionTest { type ParticipantSelector = TestConsoleEnvironment => LocalParticipantReference @@ -69,15 +58,10 @@ object BaseInteractiveSubmissionTest { val defaultConfirmingParticipant: ParticipantSelector = _.participant3 } -trait BaseInteractiveSubmissionTest - extends ExternalPartyUtils - with BaseTest - with HasExecutionContext { +trait BaseInteractiveSubmissionTest extends BaseTest { this: Suite & NamedLogging => - override val externalPartyExecutionContext: ExecutionContext = parallelExecutionContext - protected def ppn(implicit env: TestConsoleEnvironment): LocalParticipantReference = defaultPreparingParticipant(env) protected def cpn(implicit env: TestConsoleEnvironment): LocalParticipantReference = @@ -95,59 +79,6 @@ trait BaseInteractiveSubmissionTest ), ) - protected def loadOnboardingTransactions( - externalParty: ExternalParty, - confirming: ParticipantReference, - synchronizerId: PhysicalSynchronizerId, - onboardingTransactions: OnboardingTransactions, - extraConfirming: Seq[ParticipantReference] = Seq.empty, - observing: Seq[ParticipantReference] = Seq.empty, - )(implicit env: TestConsoleEnvironment): Unit = { - // Start by loading the transactions signed by the party - confirming.topology.transactions.load( - onboardingTransactions.toSeq, - store = synchronizerId, - ) - - val partyId = externalParty.partyId - val allParticipants = Seq(confirming) ++ extraConfirming ++ observing - - // Then each hosting participant must sign and load the PartyToParticipant transaction - allParticipants.map { hp => - // Eventually because it could take some time before the transaction makes it to all participants - val partyToParticipantProposal = eventually() { - hp.topology.party_to_participant_mappings - .list( - synchronizerId, - proposals = true, - filterParty = partyId.toProtoPrimitive, - ) - .loneElement - } - - // In practice, participant operators are expected to inspect the transaction here before authorizing it - val transactionHash = partyToParticipantProposal.context.transactionHash - hp.topology.transactions.authorize[PartyToParticipant]( - TxHash(Hash.fromByteString(transactionHash).value), - mustBeFullyAuthorized = false, - store = synchronizerId, - ) - } - - allParticipants.foreach { hp => - // Wait until all participants agree the hosting is effective - env.utils.retry_until_true( - hp.topology.party_to_participant_mappings - .list( - synchronizerId, - filterParty = partyId.toProtoPrimitive, - ) - .nonEmpty - ) - } - } - - // TODO(#27680) Extract into PartyToParticipantDeclarative protected def offboardParty( party: ExternalParty, participant: LocalParticipantReference, @@ -170,7 +101,7 @@ trait BaseInteractiveSubmissionTest global_secret.sign(removeTopologyTx, party, testedProtocolVersion) participant.topology.transactions.load( Seq(removeCharlieSignedTopologyTx), - TopologyStoreId.Synchronizer(synchronizerId), + synchronizerId, forceFlags = ForceFlags(DisablePartyWithActiveContracts), ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala new file mode 100644 index 0000000000..cc67ae949c --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala @@ -0,0 +1,304 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.submission + +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.console.commands.PartiesAdministration +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + HasCycleUtils, + SharedEnvironment, +} +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.ExternalPartyAlreadyExists +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ + Confirmation, + Observation, +} +import com.digitalasset.canton.topology.transaction.{HostingParticipant, PartyToParticipant} + +import scala.concurrent.Future + +trait ExternalPartyOnboardingIntegrationTestSetup + extends CommunityIntegrationTest + with SharedEnvironment + with BaseInteractiveSubmissionTest + with HasCycleUtils { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) +} + +class ExternalPartyOnboardingIntegrationTest extends ExternalPartyOnboardingIntegrationTestSetup { + "External party onboarding" should { + "host parties on multiple participants with a threshold" in { implicit env => + import env.* + val (onboardingTransactions, externalParty) = + participant1.parties.external + .onboarding_transactions( + "Alice", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = PositiveInt.two, + ) + .futureValueUS + .value + + Seq(participant1, participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, + ) + } + + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2, participant3), + synchronizerId = synchronizer1Id.logical, + ) + } + + "allocate a party from one of their observing nodes" in { implicit env => + import env.* + + val (onboardingTransactions, externalParty) = participant1.parties.external + .onboarding_transactions( + "Bob", + observing = Seq(participant2), + ) + .futureValueUS + .value + val partyId = externalParty.partyId + + participant2.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + // Use the admin API to authorize the hosting in this test, but it can also be done via the + // allocateExternalParty endpoint on the admin API + // See multi hosted decentralized party below for an example + val partyToParticipantProposal = eventually() { + participant1.topology.party_to_participant_mappings + .list( + synchronizer1Id, + proposals = true, + filterParty = partyId.toProtoPrimitive, + ) + .loneElement + } + val transactionHash = partyToParticipantProposal.context.transactionHash + participant1.topology.transactions.authorize[PartyToParticipant]( + transactionHash, + mustBeFullyAuthorized = false, + store = TopologyStoreId.Synchronizer(synchronizer1Id), + ) + + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2), + synchronizerId = synchronizer1Id.logical, + ) + } + + "allocate a decentralized multi-hosted multi-sig external party" in { implicit env => + import env.* + + // Create the namespace owners first + val namespace1 = participant1.parties.external.create_external_namespace() + val namespace2 = participant1.parties.external.create_external_namespace() + val namespace3 = participant1.parties.external.create_external_namespace() + val namespaceOwners = NonEmpty.mk(Set, namespace1, namespace2, namespace3) + + val confirmationThreshold = PositiveInt.two + val keysCount = PositiveInt.three + val keysThreshold = PositiveInt.two + val namespaceThreshold = PositiveInt.three + + // Generate the corresponding onboarding transactions + val onboardingData = participant1.parties.external.onboarding_transactions( + name = "Emily", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = confirmationThreshold, + keysCount = keysCount, + keysThreshold = keysThreshold, + decentralizedNamespaceOwners = namespaceOwners.forgetNE, + namespaceThreshold = namespaceThreshold, + ) + + val (onboardingTransactions, emilyE) = onboardingData.futureValueUS.value + + // Start by having the extra hosting nodes authorize the hosting + // We can do that even before the party namespace is authorized + Seq(participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + Seq(onboardingTransactions.partyToParticipant.transaction -> Seq.empty), + multiSignatures = Seq.empty, + ) + } + + // Then load all transactions via the allocate endpoint + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, + ) + + // Eventually everything should be authorized correctly + eventually() { + val p2p = participant1.topology.party_to_participant_mappings + .list(filterParty = emilyE.partyId.filterString, synchronizerId = synchronizer1Id) + + p2p.loneElement.item.partyId shouldBe emilyE.partyId + p2p.loneElement.item.threshold shouldBe confirmationThreshold + p2p.loneElement.item.participants contains HostingParticipant(participant1, Confirmation) + p2p.loneElement.item.participants contains HostingParticipant(participant2, Confirmation) + p2p.loneElement.item.participants contains HostingParticipant(participant3, Observation) + } + + eventually() { + val p2k = participant1.topology.party_to_key_mappings.list( + filterParty = emilyE.partyId.filterString, + store = synchronizer1Id, + ) + + p2k.loneElement.item.party shouldBe emilyE.partyId + p2k.loneElement.item.threshold shouldBe keysThreshold + p2k.loneElement.item.signingKeys.forgetNE + .map(_.fingerprint) should contain theSameElementsAs emilyE.signingFingerprints.forgetNE + } + + eventually() { + val dnd = participant1.topology.decentralized_namespaces.list( + filterNamespace = emilyE.partyId.namespace.filterString, + store = synchronizer1Id, + ) + + dnd.loneElement.item.namespace shouldBe emilyE.partyId.uid.namespace + dnd.loneElement.item.threshold shouldBe namespaceThreshold + dnd.loneElement.item.owners.forgetNE shouldBe namespaceOwners.forgetNE + } + } + + "provide useful error message when the participant is not connected to the synchronizer" in { + implicit env => + import env.* + val (onboardingTransactions, _) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + participant1.synchronizers.disconnect_all() + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ), + _.errorMessage should include( + s"This node is not connected to the requested synchronizer ${synchronizer1Id.logical}." + ), + ) + + participant1.synchronizers.reconnect_all() + } + + "provide useful error message when onboarding the same party twice" in { implicit env => + import env.* + + val (onboardingTransactions, partyE) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + def allocate() = + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + // Allocate once + allocate() + participant1.ledger_api.parties.list().find(_.party == partyE.partyId) shouldBe defined + + // Allocate a second time + loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( + allocate(), + LogEntry.assertLogSeq( + Seq( + ( + _.errorMessage should include( + ExternalPartyAlreadyExists.Failure(partyE.partyId, synchronizer1Id).cause + ), + "Expected party already exists error", + ) + ) + ), + ) + } + + "provide useful error message when concurrently retrying onboarding requests for the same party" in { + implicit env => + import env.* + + val (onboardingTransactions, partyE) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + def allocate() = + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + loggerFactory.assertLoggedWarningsAndErrorsSeq( + { + val results = timeouts.default.await("Waiting for concurrent allocation attempts")( + Seq + .fill(10)( + Future(allocate()).map(_ => Right(())).recover { case ex => Left(ex) } + ) + .sequence + ) + // Only one of them should be a success + results.count(_.isRight) shouldBe 1 + }, + LogEntry.assertLogSeq( + Seq( + ( + _.errorMessage should include( + s"Party ${partyE.partyId.uid.identifier.str} is in the process of being allocated on this node." + ), + "Expected party already exists error", + ) + ), + // It's not impossible that one of the calls gets in late when the party is already fully allocated and + // when no other call is in flight, so catch that case here + Seq( + _.errorMessage should include( + ExternalPartyAlreadyExists.Failure(partyE.partyId, synchronizer1Id).cause + ) + ), + ), + ) + + // Check the party was still allocated + participant1.ledger_api.parties.list().find(_.party == partyE.partyId) shouldBe defined + } + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala index adade780a4..44ef19d999 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.integration.tests.ledgerapi.submission import com.daml.ledger.api.v2.interactive.interactive_submission_service.PrepareSubmissionResponse import com.daml.nonempty.NonEmptyUtil import com.daml.scalautil.future.FutureConversion.* -import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.crypto.InteractiveSubmission.TransactionMetadataForHashing @@ -24,7 +23,12 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.ExecuteRequest import com.digitalasset.canton.logging.SuppressionRule.LevelAndAbove -import com.digitalasset.canton.logging.{ErrorLoggingContext, LogEntry, LoggingContextWithTrace} +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + LogEntry, + LoggingContextWithTrace, + SuppressionRule, +} import com.digitalasset.canton.platform.apiserver.execution.CommandInterpretationResult import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.PreparedTransactionDecoder import com.digitalasset.canton.protocol.hash.HashTracer @@ -32,6 +36,7 @@ import com.digitalasset.canton.sequencing.protocol.MemberRecipient import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, SendDecision} import com.digitalasset.canton.topology.{ExternalParty, PartyId} import com.digitalasset.canton.version.HashingSchemeVersion +import com.digitalasset.canton.{HasExecutionContext, LfTimestamp} import com.digitalasset.daml.lf.data.ImmArray import com.digitalasset.daml.lf.data.Ref.{SubmissionId, UserId} import io.grpc.Status @@ -49,6 +54,7 @@ final class InteractiveSubmissionConfirmationIntegrationTest with SharedEnvironment with BaseInteractiveSubmissionTest with HasProgrammableSequencer + with HasExecutionContext with HasCycleUtils { private var aliceE: ExternalParty = _ @@ -57,9 +63,9 @@ final class InteractiveSubmissionConfirmationIntegrationTest EnvironmentDefinition.P3_S1M1 .withSetup { implicit env => import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.dars.upload(CantonExamplesPath) participants.all.dars.upload(CantonTestsPath) - participants.all.synchronizers.connect_local(sequencer1, alias = daName) aliceE = cpn.parties.external.enable( "Alice", @@ -104,7 +110,7 @@ final class InteractiveSubmissionConfirmationIntegrationTest SendDecision.HoldBack(releaseSubmission.future) case _ => SendDecision.Process } - loggerFactory.assertLoggedWarningsAndErrorsSeq( + loggerFactory.assertEventuallyLogsSeq(LevelAndAbove(Level.WARN))( { val (submissionId, ledgerEnd) = exec(prepared, Map(aliceE.partyId -> Seq(singleSignature)), epn) @@ -123,14 +129,17 @@ final class InteractiveSubmissionConfirmationIntegrationTest completion.status.value.code shouldBe Status.Code.INVALID_ARGUMENT.value() }, LogEntry.assertLogSeq( - Seq( + Seq(2, 3).map({ p => ( - _.warningMessage should include( - s"Received 1 valid signatures (0 invalid), but expected at least 2 valid for ${aliceE.partyId}" - ), - "expect not enough signatures", + e => { + e.warningMessage should (include( + s"Received 1 valid signatures (0 invalid), but expected at least 2 valid for ${aliceE.partyId}" + )) + e.mdc.get("participant") shouldBe Some(s"participant$p") + }, + s"participant$p authentication", ) - ), + }), Seq.empty, ), ) @@ -172,17 +181,21 @@ final class InteractiveSubmissionConfirmationIntegrationTest SendDecision.HoldBack(releaseSubmission.future) case _ => SendDecision.Process } - loggerFactory.assertLoggedWarningsAndErrorsSeq( + loggerFactory.assertEventuallyLogsSeq(LevelAndAbove(Level.WARN))( assertion(prepared, signatures, releaseSubmission), LogEntry.assertLogSeq( - additionalExpectedLogs ++ Seq( - ( - _.warningMessage should include( - s"Received 0 valid signatures (3 invalid), but expected at least 2 valid for ${aliceE.partyId}" - ), - "expect invalid signatures", - ) - ), + additionalExpectedLogs ++ + Seq(2, 3).map({ p => + ( + e => { + e.warningMessage should (include( + s"Received 0 valid signatures (3 invalid), but expected at least 2 valid for ${aliceE.partyId}" + )) + e.mdc.get("participant") shouldBe Some(s"participant$p") + }, + s"participant$p authentication", + ) + }), Seq.empty, ), ) @@ -268,23 +281,24 @@ final class InteractiveSubmissionConfirmationIntegrationTest aliceE.partyId -> global_secret.sign(prepared.preparedTransactionHash, aliceE) ) // This is only currently detected in phase III, at which point warnings are issued - val completion = loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val (submissionId, ledgerEnd) = exec(prepared, signatures, epn) - findCompletion(submissionId, ledgerEnd, aliceE, epn) - }, - LogEntry.assertLogSeq( - Seq(2, 3).map({ p => - ( - e => { - e.warningMessage should (include regex "LOCAL_VERDICT_MALFORMED_REQUEST.*with a view that is not correctly authenticated") - e.mdc.get("participant") shouldBe Some(s"participant$p") - }, - s"participant$p authentication", - ) - }) - ), - ) + val completion = + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( + { + val (submissionId, ledgerEnd) = exec(prepared, signatures, epn) + findCompletion(submissionId, ledgerEnd, aliceE, epn) + }, + LogEntry.assertLogSeq( + Seq(2, 3).map({ p => + ( + e => { + e.warningMessage should (include regex "LOCAL_VERDICT_MALFORMED_REQUEST.*with a view that is not correctly authenticated") + e.mdc.get("participant") shouldBe Some(s"participant$p") + }, + s"participant$p authentication", + ) + }) + ), + ) completion.status.value.code shouldBe Status.Code.INVALID_ARGUMENT.value() } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionIntegrationTest.scala index f85879380a..f3ee0d3466 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionIntegrationTest.scala @@ -22,6 +22,8 @@ import com.daml.ledger.javaapi.data.codegen.ContractId as CodeGenCID import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.TransactionWrapper import com.digitalasset.canton.admin.api.client.data.TemplateId import com.digitalasset.canton.config +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.crypto.SigningKeyUsage @@ -30,7 +32,8 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.cycle.Cycle import com.digitalasset.canton.examples.java.trailingnone.TrailingNone import com.digitalasset.canton.examples.java.{cycle as M, trailingnone as T} -import com.digitalasset.canton.integration.plugins.UsePostgres +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -55,6 +58,7 @@ import monocle.macros.GenLens import org.slf4j.event.Level import java.util.UUID +import scala.jdk.OptionConverters.RichOption trait InteractiveSubmissionIntegrationTestSetup extends CommunityIntegrationTest @@ -85,9 +89,9 @@ trait InteractiveSubmissionIntegrationTestSetup EnvironmentDefinition.P3_S1M1 .withSetup { implicit env => import env.* - participants.all.dars.upload(CantonExamplesPath) - participants.all.dars.upload(CantonTestsPath) participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonTestsPath, synchronizerId = daId) aliceE = cpn.parties.external.enable("Alice") } @@ -522,10 +526,10 @@ class InteractiveSubmissionIntegrationTest extends InteractiveSubmissionIntegrat Seq(archiveCmd), disclosedContracts = Seq( new DisclosedContract( - TrailingNone.TEMPLATE_ID_WITH_PACKAGE_ID, - contract1CreatedEvent.contractId, contract1CreatedEvent.createdEventBlob, env.daId.logical.toProtoPrimitive, + Some(TrailingNone.TEMPLATE_ID_WITH_PACKAGE_ID).toJava, + Some(contract1CreatedEvent.contractId).toJava, ) ), ) @@ -593,10 +597,10 @@ class InteractiveSubmissionIntegrationTest extends InteractiveSubmissionIntegrat Seq(exerciseRepeatOnCycleContract), disclosedContracts = Seq( new DisclosedContract( - Cycle.TEMPLATE_ID_WITH_PACKAGE_ID, - cycleCreated.contractId, cycleCreated.createdEventBlob, daId.logical.toProtoPrimitive, + Some(Cycle.TEMPLATE_ID_WITH_PACKAGE_ID).toJava, + Some(cycleCreated.contractId).toJava, ) ), ) @@ -830,6 +834,72 @@ class InteractiveSubmissionIntegrationTest extends InteractiveSubmissionIntegrat } +class InteractiveSubmissionMultiSynchronizerIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with BaseInteractiveSubmissionTest + with HasCycleUtils { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1_S1M1_S1M1 + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) + participants.all.synchronizers.connect_local(sequencer3, alias = repairSynchronizerName) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonTestsPath, synchronizerId = daId) + } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) + + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + Set(InstanceName.tryCreate("sequencer3")), + ) + ), + ) + ) + registerPlugin(new UsePostgres(loggerFactory)) + + "External parties" should { + "can be allocated in a multi-synchronizer scenario" in { implicit env => + import env.* + + val aliceE = participant1.parties.external.enable("Alice", synchronizer = Some(daName)) + + // Check that alice is hosted on `hostedCount` synchronizers + def ensureCorrectHosting(hostedCount: Int) = { + val synchronizers = Seq(daId, acmeId, repairSynchronizerId) + val activeOn = synchronizers.take(hostedCount) + val inactiveOn = synchronizers.drop(hostedCount) + + activeOn.foreach { psid => + participant1.topology.party_to_participant_mappings + .list(psid, filterParty = aliceE.filterString) should not be empty + } + + inactiveOn.foreach { psid => + participant1.topology.party_to_participant_mappings + .list(psid, filterParty = aliceE.filterString) shouldBe empty + } + } + + ensureCorrectHosting(1) + + participant1.parties.external.also_enable(aliceE, acmeName) + ensureCorrectHosting(2) + + participant1.parties.external.also_enable(aliceE, repairSynchronizerName) + ensureCorrectHosting(3) + } + } +} + class InteractiveSubmissionIntegrationTestTimeouts extends InteractiveSubmissionIntegrationTestSetup { "timeout if CPN does not respond" in { implicit env => @@ -869,14 +939,12 @@ class InteractiveSubmissionIntegrationTestTimeouts loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.WARN))( cpn.synchronizers.reconnect_all(), LogEntry.assertLogSeq( + Seq.empty, Seq( - ( - _.warningMessage should (include("Response message for request") and include( - "timed out" - )), - "expected timed out message", - ) - ) + _.warningMessage should (include("Response message for request") and include( + "timed out" + )) + ), ), ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala index f57b72969d..a9e75b454d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala @@ -7,6 +7,7 @@ import com.digitalasset.canton.admin.api.client.data.TemplateId import com.digitalasset.canton.config import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.console.commands.PartiesAdministration import com.digitalasset.canton.damltests.java.cycle.Cycle import com.digitalasset.canton.error.MediatorError import com.digitalasset.canton.integration.plugins.UseH2 @@ -20,13 +21,6 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.topology.ExternalParty -import com.digitalasset.canton.topology.transaction.{ - HostingParticipant, - ParticipantPermission, - PartyToParticipant, - TopologyChangeOp, - TopologyTransaction, -} import io.grpc.Status /** Test and demonstrates onboarding of a multi hosted external party @@ -37,10 +31,12 @@ sealed trait MultiHostingInteractiveSubmissionIntegrationTest with BaseInteractiveSubmissionTest with HasCycleUtils { + // Alice is onboarded as a multi hosted party at the beginning of the test suite and re-used in subsequent tests private var aliceE: ExternalParty = _ override protected def epn(implicit env: TestConsoleEnvironment): LocalParticipantReference = env.participant1 + private val cpns: ParticipantsSelector = env => Seq(env.participant1, env.participant2) private val opns: ParticipantsSelector = env => Seq(env.participant3) @@ -57,66 +53,41 @@ sealed trait MultiHostingInteractiveSubmissionIntegrationTest ) ) - participants.all.dars.upload(CantonExamplesPath) participants.all.synchronizers.connect_local(sequencer1, alias = daName) - } - .addConfigTransforms(enableInteractiveSubmissionTransforms*) + participants.all.dars.upload(CantonExamplesPath) - "Interactive submission" should { - "host parties on multiple participants with a threshold" in { implicit env => - import env.* - val (onboardingTransactions, externalParty) = - participant1.parties.external - .onboarding_transactions( - "Alice", - confirming = Seq(participant2), - observing = Seq(participant3), - confirmationThreshold = PositiveInt.two, - ) - .futureValueUS - .value - - loadOnboardingTransactions( - externalParty, - confirming = participant1, - synchronizerId = daId, - onboardingTransactions, - extraConfirming = Seq(participant2), - observing = Seq(participant3), - ) + // Create a multi hosted party for this test suite + val (onboardingTransactions, externalParty) = + participant1.parties.external + .onboarding_transactions( + "Alice", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = PositiveInt.two, + ) + .futureValueUS + .value - aliceE = externalParty + aliceE = externalParty - val newPTP = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.two, - mapping = PartyToParticipant - .create( - aliceE.partyId, - threshold = PositiveInt.two, - Seq( - HostingParticipant(participant1, ParticipantPermission.Confirmation, false), - HostingParticipant(participant2, ParticipantPermission.Confirmation, false), - HostingParticipant(participant3, ParticipantPermission.Observation, false), - ), + Seq(participant1, participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, ) - .value, - protocolVersion = testedProtocolVersion, - ) + } - eventually() { - participants.all.forall( - _.topology.party_to_participant_mappings - .is_known( - daId, - aliceE, - hostingParticipants = participants.all, - threshold = Some(newPTP.mapping.threshold), - ) - ) shouldBe true + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2, participant3), + synchronizerId = synchronizer1Id.logical, + ) } - } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) + "Interactive submission" should { "create a contract and read it from all confirming and observing participants" in { implicit env => val contractId = diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/NoSubmitterPackageIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/NoSubmitterPackageIntegrationTest.scala new file mode 100644 index 0000000000..d113bb3283 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/NoSubmitterPackageIntegrationTest.scala @@ -0,0 +1,83 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.submission + +import com.daml.ledger.api.v2.interactive.interactive_submission_service.PrepareSubmissionResponse +import com.digitalasset.canton.damltests.java.simpletemplate.SimpleTemplate +import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.InvalidPrescribedSynchronizerId +import com.digitalasset.canton.integration.* +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.PackageNotVettedByRecipients +import com.digitalasset.canton.topology.ExternalParty + +class NoSubmitterPackageIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with BaseInteractiveSubmissionTest + with HasCycleUtils { + + private var aliceE: ExternalParty = _ + private var bobE: ExternalParty = _ + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + ppn.dars.upload(CantonTestsPath, synchronizerId = daId) + cpn.dars.upload(CantonTestsPath, synchronizerId = daId) + + bobE = ppn.parties.external.enable("Bob") + aliceE = cpn.parties.external.enable("Alice") + } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) + + "Interactive submission" should { + + // Prepare a submission on a participant that has the package + def prepare()(implicit env: TestConsoleEnvironment): PrepareSubmissionResponse = + cpn.ledger_api.javaapi.interactive_submission.prepare( + Seq(aliceE.partyId), + Seq( + SimpleTemplate + .create(aliceE.toProtoPrimitive, bobE.toProtoPrimitive) + .commands() + .loneElement + ), + ) + + "fail if the executing participant does not have the package loaded" in { + implicit env: TestConsoleEnvironment => + // Submit on one that does not have package + assertThrowsAndLogsCommandFailures( + epn.ledger_api.commands.external.submit_prepared(aliceE, prepare()), + (e: LogEntry) => { + e.shouldBeCantonErrorCode(InvalidPrescribedSynchronizerId) + e.errorMessage should include regex raw"(?s)Participant PAR::participant2.*has not vetted" + }, + ) + } + + "fail if the package is loaded but not vetted" in { implicit env: TestConsoleEnvironment => + // Load DAR without vetting + epn.dars.upload(CantonTestsPath, vetAllPackages = false) + + assertThrowsAndLogsCommandFailures( + epn.ledger_api.commands.external.submit_prepared(aliceE, prepare()), + (e: LogEntry) => { + e.shouldBeCantonErrorCode(PackageNotVettedByRecipients) + e.errorMessage should include regex raw"(?s)Participant PAR::participant2.*has not vetted" + }, + ) + + } + + "pass if the package is vetted" in { implicit env: TestConsoleEnvironment => + // Load DAR with vetting + epn.dars.upload(CantonTestsPath, vetAllPackages = true) + epn.ledger_api.commands.external.submit_prepared(aliceE, prepare()) + } + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala index d9d870d757..52352456b3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala @@ -10,7 +10,9 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.Updat import com.digitalasset.canton.admin.api.client.data.TemplateId.fromIdentifier import com.digitalasset.canton.damltests.java.cycle.Cycle import com.digitalasset.canton.damltests.java.statictimetest.Pass +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.plugins.UseProgrammableSequencer import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.defaultConfirmingParticipant import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.integration.{ @@ -20,10 +22,18 @@ import com.digitalasset.canton.integration.{ HasCycleUtils, SharedEnvironment, } +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.TimeoutError +import com.digitalasset.canton.synchronizer.sequencer.{ + HasProgrammableSequencer, + SendDecision, + SendPolicy, +} import com.digitalasset.canton.topology.{ExternalParty, ForceFlags} import com.digitalasset.canton.{HasExecutionContext, config} import com.digitalasset.daml.lf.data.Time import io.grpc.Status +import org.slf4j.event.Level import scalapb.TimestampConverters import java.time.{Duration, Instant} @@ -37,20 +47,23 @@ final class TimeBasedInteractiveIntegrationTest with SharedEnvironment with BaseInteractiveSubmissionTest with HasCycleUtils - with HasExecutionContext { + with HasExecutionContext + with HasProgrammableSequencer { private val oneDay = Duration.ofHours(24) override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P3_S1M1 + .addConfigTransform(ConfigTransforms.useStaticTime) .withSetup { implicit env => import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.dars.upload(CantonExamplesPath) participants.all.dars.upload(CantonTestsPath) - participants.all.synchronizers.connect_local(sequencer1, alias = daName) } .addConfigTransforms(enableInteractiveSubmissionTransforms*) - .addConfigTransform(ConfigTransforms.useStaticTime) + + registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) private var aliceE: ExternalParty = _ @@ -171,6 +184,78 @@ final class TimeBasedInteractiveIntegrationTest execAndWait(prepared, signatures).discard } + "respect max record time" in { implicit env => + import env.* + val simClock = env.environment.simClock.value + + def test(sequenceAt: CantonTimestamp => CantonTimestamp, expectSuccess: Boolean): Unit = { + // Set max record time below ledgerTimeRecordTimeTolerance + val maxRecordTime = simClock.now.add(ledgerTimeRecordTimeTolerance.dividedBy(2)) + val prepared = + cpn.ledger_api.interactive_submission.prepare( + Seq(aliceE), + Seq(createCycleCommand(aliceE, "test")), + maxRecordTime = Some(maxRecordTime), + ) + + val signatures = Map( + aliceE.partyId -> global_secret.sign(prepared.preparedTransactionHash, aliceE) + ) + + getProgrammableSequencer(sequencer1.name).withSendPolicy( + "Delay sequencing of submission request", + SendPolicy.processTimeProofs { implicit traceContext => submissionRequest => + if (submissionRequest.isConfirmationRequest && submissionRequest.sender == epn.id) { + // When we receive the confirmation request, advance time to the desired sequencing time + simClock.advanceTo(sequenceAt(maxRecordTime)) + } + SendDecision.Process + }, + ) { + + // exec will pick LET = clock.now + // and max sequencing time + // = Min(LET + ledgerTimeRecordTimeTolerance, maxRecordTime) + // = Min(clock.now + ledgerTimeRecordTimeTolerance, clock.now + ledgerTimeRecordTimeTolerance / 2) + // = maxRecordTime + if (expectSuccess) { + execAndWait(prepared, signatures) + } else { + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( + { + val (submissionId, ledgerEnd) = exec(prepared, signatures, epn) + // Request a time proof to advance synchronizer time on the participant so it realizes + // that the request has timed out and emits a completion event + epn.underlying.value.sync + .lookupSynchronizerTimeTracker(synchronizer1Id) + .value + .requestTick(maxRecordTime.immediateSuccessor, immediately = true) + val completion = findCompletion(submissionId, ledgerEnd, aliceE, epn) + completion.status.value.code shouldBe io.grpc.Status.Code.ABORTED.value() + completion.status.value.message should include(TimeoutError.code.id) + () + }, + LogEntry.assertLogSeq( + Seq( + ( + _.warningMessage should include("Submission timed out"), + "expected submission timed out warning", + ) + ) + ), + ) + } + } + } + + // Expect success when the event goes just before the max record time + // Technically exactly at max record time is fine but because there's concurrent ticks going on, testing at exactly + // max sequencing time ends up not going through if a tick gets sequenced before + test(_.minusMillis(1), expectSuccess = true) + // Expect failure when the event goes through right after max record time + test(_.immediateSuccessor, expectSuccess = false) + } + "rejects execution requests outside the submission tolerance" in { implicit env => import env.* val simClock = env.environment.simClock.value diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/DamlDebugLoggingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/DamlDebugLoggingIntegrationTest.scala index a2cc1bedac..b5c12263d7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/DamlDebugLoggingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/DamlDebugLoggingIntegrationTest.scala @@ -53,8 +53,8 @@ class DamlDebugLoggingIntegrationTest extends CommunityIntegrationTest with Shar import env.* participants.local.foreach { participant => - participant.dars.upload(CantonTestsPath) participant.synchronizers.connect_local(sequencer1, alias = daName) + participant.dars.upload(CantonTestsPath) val alice = participant.parties.enable("Alice") participant.ledger_api.javaapi.commands diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceContextIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceContextIntegrationTest.scala index b1af503623..3883e6086e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceContextIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceContextIntegrationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests.logging import cats.syntax.functor.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.integration.IntegrationTestUtilities.grabCounts -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -115,5 +112,5 @@ abstract class TraceContextIntegrationTest extends CommunityIntegrationTest with class GrpcTraceContextIntegrationTestPostgres extends TraceContextIntegrationTest { // run with postgres to ensure writing to persistent stores is working correctly registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceIdIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceIdIntegrationTest.scala index d3371227dd..3355b6ff27 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceIdIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/logging/TraceIdIntegrationTest.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.integration.tests.logging import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -80,5 +80,5 @@ abstract class TraceIdIntegrationTest extends CommunityIntegrationTest with Shar } class TraceIdIntegrationTestDefault extends TraceIdIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/metrics/MetricRegistryIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/metrics/MetricRegistryIntegrationTest.scala index ed86c5dfb1..431b525f6f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/metrics/MetricRegistryIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/metrics/MetricRegistryIntegrationTest.scala @@ -7,10 +7,7 @@ import com.daml.metrics.MetricsFilterConfig import com.daml.metrics.api.MetricQualification import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -175,5 +172,5 @@ sealed trait MetricRegistryIntegrationTest class MetricRegistryIntegrationTestDefault extends MetricRegistryIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsImportNoSynchronizerConnectionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsImportNoSynchronizerConnectionIntegrationTest.scala index 7463da17ee..3a9edae7cb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsImportNoSynchronizerConnectionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsImportNoSynchronizerConnectionIntegrationTest.scala @@ -108,13 +108,16 @@ sealed trait AcsImportNoSynchronizerConnectionIntegrationTest "allow importing the ACS" in { implicit env => import env.* - participant2.dars.upload(CantonExamplesPath) - participant3.dars.upload(CantonExamplesPath) + participant2.dars.upload(CantonExamplesPath, vetAllPackages = false) + val examplesMainPackageId = + participant3.dars.upload(CantonExamplesPath, vetAllPackages = false) participant2.repair.import_acs(acsFilename.canonicalPath) participant3.repair.import_acs(acsFilename.canonicalPath) participants.all.synchronizers.reconnect_all() + participant2.dars.vetting.enable(examplesMainPackageId) + participant3.dars.vetting.enable(examplesMainPackageId) Seq(participant1, participant2).foreach( _.topology.party_to_participant_mappings.propose_delta( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsMultiHostedPartyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsMultiHostedPartyIntegrationTest.scala index 6097f375b2..2d51f637b6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsMultiHostedPartyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AcsMultiHostedPartyIntegrationTest.scala @@ -4,7 +4,8 @@ package com.digitalasset.canton.integration.tests.multihostedparties import com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.CommitmentSendDelay +import com.digitalasset.canton.config.RequireTypes.{NonNegativeProportion, PositiveInt} import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.Iou @@ -66,7 +67,15 @@ trait AcsMultiHostedPartyIntegrationTest ConfigTransforms.updateMaxDeduplicationDurations(maxDedupDuration), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay) + .replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .withSetup { implicit env => import env.* diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AutomaticReassignmentDecentralizedPartyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AutomaticReassignmentDecentralizedPartyIntegrationTest.scala index 2c6111c559..6ff255be9e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AutomaticReassignmentDecentralizedPartyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/AutomaticReassignmentDecentralizedPartyIntegrationTest.scala @@ -10,11 +10,8 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.damltests.java.automaticreassignmenttransactions as M import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.SynchronizerRouterIntegrationTestSetup import com.digitalasset.canton.integration.util.{ EntitySyntax, @@ -37,7 +34,7 @@ class AutomaticReassignmentDecentralizedPartyIntegrationTest with EntitySyntax { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) @@ -59,8 +56,7 @@ class AutomaticReassignmentDecentralizedPartyIntegrationTest .propose_update( d.synchronizerId, _.update( - assignmentExclusivityTimeout = config.NonNegativeFiniteDuration.Zero, - topologyChangeDelay = config.NonNegativeFiniteDuration.Zero, + assignmentExclusivityTimeout = config.NonNegativeFiniteDuration.Zero ), ) ) @@ -68,9 +64,10 @@ class AutomaticReassignmentDecentralizedPartyIntegrationTest disableAssignmentExclusivityTimeout(getInitializedSynchronizer(daName)) disableAssignmentExclusivityTimeout(getInitializedSynchronizer(acmeName)) - participants.all.dars.upload(CantonTestsPath) participants.all.synchronizers.connect_local(sequencer1, daName) participants.all.synchronizers.connect_local(sequencer2, acmeName) + participants.all.dars.upload(CantonTestsPath, synchronizerId = daId) + participants.all.dars.upload(CantonTestsPath, synchronizerId = acmeId) participant1.health.ping(participant2, synchronizerId = Some(daId)) participant1.health.ping(participant2, synchronizerId = Some(acmeId)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/DivulgenceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/DivulgenceIntegrationTest.scala index 3b5c271920..4bd314a94d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/DivulgenceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/DivulgenceIntegrationTest.scala @@ -3,80 +3,40 @@ package com.digitalasset.canton.integration.tests.multihostedparties -import better.files.File import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.transaction_filter.* import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ TRANSACTION_SHAPE_ACS_DELTA, TRANSACTION_SHAPE_LEDGER_EFFECTS, } -import com.daml.ledger.api.v2.transaction_filter.{ - EventFormat, - Filters, - TransactionFormat, - TransactionShape, - UpdateFormat, -} import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.TransactionWrapper -import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.examples.java.divulgence.DivulgeIouByExercise import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} import com.digitalasset.canton.integration.tests.examples.IouSyntax -import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative -import com.digitalasset.canton.integration.{ - CommunityIntegrationTest, - EnvironmentDefinition, - SharedEnvironment, -} -import com.digitalasset.canton.participant.admin.data.ContractIdImportMode -import com.digitalasset.canton.time.PositiveSeconds +import com.digitalasset.canton.protocol.{ContractInstance, LfContractId} import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP -final class DivulgenceIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { +final class DivulgenceIntegrationTest extends OfflinePartyReplicationIntegrationTestBase { import DivulgenceIntegrationTest.* - // TODO(#27707) - Remove when ACS commitments consider the onboarding flag - // A party gets activated on multiple participants without being replicated (= ACS mismatch), - // and we want to minimize the risk of warnings related to acs commitment mismatches - private val reconciliationInterval = PositiveSeconds.tryOfDays(365 * 10) - - override def environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P2_S1M1.withSetup { implicit env => - import env.* - participants.local.synchronizers.connect_local(sequencer1, daName) - participants.local.dars.upload(CantonExamplesPath) - sequencer1.topology.synchronizer_parameters - .propose_update(daId, _.update(reconciliationInterval = reconciliationInterval.toConfig)) - - participant1.parties.enable("Alice", synchronizeParticipants = Seq(participant2)) - participant2.parties.enable("Bob", synchronizeParticipants = Seq(participant1)) - } - - registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) - ) - - private val acsSnapshotAtOffset: String = - "offline_party_replication_test_acs_snapshot_at_offset.gz" - - override def afterAll(): Unit = - try { - val exportFile = File(acsSnapshotAtOffset) - if (exportFile.exists) exportFile.delete() - } finally super.afterAll() - "Divulgence should work as expected" in { implicit env => import env.* - val alice = participant1.parties.find("Alice") - val bob = participant2.parties.find("Bob") + def contractStore(participant: LocalParticipantReference) = + participant.testing.state_inspection.syncPersistentStateManager + .acsInspection(daId) + .map(_.contractStore) + .value + def contractFor( + participant: LocalParticipantReference, + contractId: String, + ): Option[ContractInstance] = + contractStore(participant) + .lookup(LfContractId.assertFromString(contractId)) + .value + .futureValueUS // baseline Iou-s to test views / stakeholders / projections on the two participants, and ensure correct party migration baseline val (aliceStakeholderCreatedP1, _) = participant1.createIou(alice, alice) @@ -86,12 +46,19 @@ final class DivulgenceIntegrationTest extends CommunityIntegrationTest with Shar participant1.acsDeltas(alice) should have size 1 participant2.acsDeltas(bob) should have size 1 } + contractFor(participant1, aliceStakeholderCreatedP1.contractId) should not be empty + contractFor(participant1, bobStakeholderCreatedP2.contractId) shouldBe empty + contractFor(participant2, aliceStakeholderCreatedP1.contractId) shouldBe empty + contractFor(participant2, bobStakeholderCreatedP2.contractId) should not be empty + val (aliceBobStakeholderCreatedP1, _) = participant1.createIou(alice, bob) eventually() { // ensuring that both participants see all events necessary after running the commands (these numbers are deduced from the assertions below) participant1.acsDeltas(alice) should have size 2 participant2.acsDeltas(bob) should have size 2 } + contractFor(participant1, aliceBobStakeholderCreatedP1.contractId) should not be empty + contractFor(participant2, aliceBobStakeholderCreatedP1.contractId) should not be empty // divulgence proxy contract for divulgence operations: divulging to bob val (divulgeIouByExerciseP2, divulgeIouByExerciseContract) = @@ -101,17 +68,25 @@ final class DivulgenceIntegrationTest extends CommunityIntegrationTest with Shar participant1.acsDeltas(alice) should have size 3 participant2.acsDeltas(bob) should have size 3 } + contractFor(participant1, divulgeIouByExerciseP2.contractId) should not be empty + contractFor(participant2, divulgeIouByExerciseP2.contractId) should not be empty // creating two iou-s with alice, which will be divulged to bob val (immediateDivulged1P1, immediateDivulged1Contract) = participant1.immediateDivulgeIou(alice, divulgeIouByExerciseContract) - val (immediateDivulged2P1, immediateDivulged2Contract) = + val (immediateDivulged2P1, _immediateDivulged2Contract) = participant1.immediateDivulgeIou(alice, divulgeIouByExerciseContract) eventually() { // ensuring that both participants see all events necessary after running the commands (these numbers are deduced from the assertions below) participant1.acsDeltas(alice) should have size 5 participant2.ledgerEffects(alice) should have size 6 } + contractFor(participant1, immediateDivulged1P1.contractId) should not be empty + // Immediately divulged contracts are stored in the ContractStore + contractFor(participant2, immediateDivulged1P1.contractId) should not be empty + contractFor(participant1, immediateDivulged2P1.contractId) should not be empty + // Immediately divulged contracts are stored in the ContractStore + contractFor(participant2, immediateDivulged2P1.contractId) should not be empty // archiving the first divulged Iou participant1.archiveIou(alice, immediateDivulged1Contract) @@ -129,6 +104,9 @@ final class DivulgenceIntegrationTest extends CommunityIntegrationTest with Shar participant1.acsDeltas(alice) should have size 8 participant2.ledgerEffects(alice) should have size 8 } + contractFor(participant1, aliceStakeholderCreated2P1.contractId) should not be empty + // Retroactively divulged contracts are not stored in the ContractStore + contractFor(participant2, aliceStakeholderCreated2P1.contractId) shouldBe empty // participant1 alice val divulgeIouByExerciseP1 = participant1.acsDeltas(alice)(2)._1 @@ -257,30 +235,22 @@ final class DivulgenceIntegrationTest extends CommunityIntegrationTest with Shar participant2.acsDeltas(Seq.empty) shouldBe participant2.acsDeltas(Seq(alice, bob)) participant2.acsDeltas(Seq.empty) shouldBe participant2.acsDeltas(Seq(bob)) - val ledgerEndP1 = participant1.ledger_api.state.end() + val source = participant1 + val target = participant2 - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, + val beforeActivationOffset = authorizeAliceWithTargetDisconnect(daId, source, target) + + // Replicate `alice` from `source` (`participant1`) to `target` (`participant2`) + source.parties.export_party_acs( alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Submission), - ), - ) - participant2.synchronizers.disconnect_all() - - participant1.parties.export_party_acs( - party = alice, - synchronizerId = daId, - targetParticipantId = participant2.id, - beginOffsetExclusive = ledgerEndP1, - exportFilePath = acsSnapshotAtOffset, + daId, + target, + beforeActivationOffset, + acsSnapshotPath, ) + target.parties.import_party_acs(acsSnapshotPath) - participant2.repair.import_acs(acsSnapshotAtOffset, "", ContractIdImportMode.Accept) - - participant2.synchronizers.reconnect(daName) + target.synchronizers.reconnect(daName) // participant1 alice participant1.acsDeltas(alice) shouldBe List( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/FindPartyActivationOffsetsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/FindPartyActivationOffsetsIntegrationTest.scala index be7411e412..bcc21e81b3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/FindPartyActivationOffsetsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/FindPartyActivationOffsetsIntegrationTest.scala @@ -7,7 +7,7 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative import com.digitalasset.canton.time.PositiveSeconds @@ -33,7 +33,7 @@ final class FindPartyActivationOffsetsIntegrationTest } registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory) ) "Alice has 2 activations on P1 and P3" in { implicit env => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OffboardingConsortiumPartyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OffboardingConsortiumPartyIntegrationTest.scala index b8d1b2f75d..2f72c6fee3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OffboardingConsortiumPartyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OffboardingConsortiumPartyIntegrationTest.scala @@ -9,10 +9,7 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.integration.EnvironmentDefinition -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.topology.store.TimeQuery import com.digitalasset.canton.topology.transaction.{ DecentralizedNamespaceDefinition, @@ -249,5 +246,5 @@ sealed trait OffboardingConsortiumPartyIntegrationTest extends ConsortiumPartyIn final class OffboardingConsortiumPartyIntegrationTestPostgres extends OffboardingConsortiumPartyIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationExplicitDisclosureIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationExplicitDisclosureIntegrationTest.scala index f2ab8f027f..958db241e6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationExplicitDisclosureIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationExplicitDisclosureIntegrationTest.scala @@ -3,76 +3,30 @@ package com.digitalasset.canton.integration.tests.multihostedparties +import com.daml.ledger.javaapi.data.* import com.daml.ledger.javaapi.data.codegen.HasCommands -import com.daml.ledger.javaapi.data.{ - Command, - CumulativeFilter, - EventFormat, - Filter, - Identifier, - TransactionFormat, - TransactionShape, -} -import com.digitalasset.canton.HasTempDirectory import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.damltests.java.explicitdisclosure.PriceQuotation -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} -import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative -import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentDefinition} +import com.digitalasset.canton.integration.EnvironmentDefinition +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.ledger.error.groups.ConsistencyErrors.ContractNotFound import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil -import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP +import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import java.util.Collections sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest - extends UseSilentSynchronizerInTest - with HasTempDirectory { - - private val acsSnapshot = tempDirectory.toTempFile(s"${getClass.getSimpleName}.gz") - private val acsSnapshotPath: String = acsSnapshot.toString - - // TODO(#27707) - Remove when ACS commitments consider the onboarding flag - // Party replication to the target participant may trigger ACS commitment mismatch warnings. - // This is expected behavior. To reduce the frequency of these warnings and avoid associated - // test flakes, `reconciliationInterval` is set to one year. - private val reconciliationInterval = PositiveSeconds.tryOfDays(365 * 10) - - private var alice: PartyId = _ - private var bob: PartyId = _ + extends OfflinePartyReplicationIntegrationTestBase { override lazy val environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P2_S1M1 - .addConfigTransforms(ConfigTransforms.useStaticTime) + super.environmentDefinition .withSetup { implicit env => import env.* - - participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.dars.upload(CantonTestsPath) - alice = participant1.parties.enable( - "Alice", - synchronizeParticipants = Seq(participant2), - ) - bob = participant2.parties.enable( - "Bob", - synchronizeParticipants = Seq(participant1), - ) - - sequencers.all.foreach { s => - adjustTimeouts(s) - s.topology.synchronizer_parameters - .propose_update( - daId, - _.update(reconciliationInterval = reconciliationInterval.toConfig), - ) - } + source = participant1 + target = participant2 } "Explicit disclosure should work on replicated contracts" in { implicit env => @@ -84,8 +38,6 @@ sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest hasCommands.commands.iterator.asScala.toSeq } - val simClock = Some(env.environment.simClock.value) - // Create a contract visible only to `alice` val (quote, disclosedQuote) = { val quote = new PriceQuotation(alice.toProtoPrimitive, "DAML", 6865) @@ -106,45 +58,30 @@ sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest (contract.id, disclosedContract) } - val beforeActivationOffset = participant1.ledger_api.state.end() - - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Observation), - ), - ) + val beforeActivationOffset = authorizeAliceWithTargetDisconnect(daId, source, target) - silenceSynchronizerAndAwaitEffectiveness(daId, sequencer1, participant1, simClock) - - // Replicate `alice` from `participant1` to `participant2` - repair.party_replication.step1_hold_and_store_acs( + // Replicate `alice` from `source` (`participant1`) to `target` (`participant2`) + source.parties.export_party_acs( alice, daId, - participant1, - participant2.id, - acsSnapshotPath, + target, beforeActivationOffset, + acsSnapshotPath, ) - repair.party_replication.step2_import_acs(alice, daId, participant2, acsSnapshotPath) + target.parties.import_party_acs(acsSnapshotPath) - resumeSynchronizerAndAwaitEffectiveness(daId, sequencer1, participant2, simClock) + val beforeTargetReconnectOffset = target.ledger_api.state.end() - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Submission), - ), - ) + target.synchronizers.reconnect_all() + + eventually(timeUntilSuccess = 2.minutes, maxPollInterval = 30.seconds) { + val (onboard, earliestRetryTimestamp) = + target.parties.complete_party_onboarding(alice, daId, target, beforeTargetReconnectOffset) + (onboard, earliestRetryTimestamp) shouldBe (true, None) + } // Verify that `alice` can see the contract with explicit disclosure - participant2.ledger_api.javaapi.commands.submit( + target.ledger_api.javaapi.commands.submit( actAs = Seq(alice), commands = quote.exercisePriceQuotation_Fetch(alice.toProtoPrimitive), disclosedContracts = Seq(disclosedQuote), @@ -152,7 +89,7 @@ sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest // Verify that `bob` can't see the contract without explicit disclosure assertThrowsAndLogsCommandFailures( - participant2.ledger_api.javaapi.commands.submit( + target.ledger_api.javaapi.commands.submit( actAs = Seq(bob), commands = quote.exercisePriceQuotation_Fetch(bob.toProtoPrimitive), ), @@ -160,7 +97,7 @@ sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest ) // Verify that `bob` can see the contract with explicit disclosure - participant2.ledger_api.javaapi.commands.submit( + target.ledger_api.javaapi.commands.submit( actAs = Seq(bob), commands = quote.exercisePriceQuotation_Fetch(bob.toProtoPrimitive), disclosedContracts = Seq(disclosedQuote), @@ -192,5 +129,5 @@ sealed trait OfflinePartyReplicationExplicitDisclosureIntegrationTest final class OfflinePartyReplicationExplicitDisclosureIntegrationTestPostgres extends OfflinePartyReplicationExplicitDisclosureIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationIntegrationTest.scala index 87f07c6cd5..76ef2d5f0d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationIntegrationTest.scala @@ -12,11 +12,9 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java as M -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.tests.multihostedparties.PartyActivationFlow.authorizeWithTargetDisconnect import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -25,16 +23,88 @@ import com.digitalasset.canton.integration.{ TestEnvironment, } import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality -import com.digitalasset.canton.participant.admin.data.ContractIdImportMode +import com.digitalasset.canton.participant.admin.data.ContractImportMode +import com.digitalasset.canton.participant.admin.party.PartyManagementServiceError.InvalidState.AbortAcsExportForMissingOnboardingFlag import com.digitalasset.canton.participant.admin.party.PartyManagementServiceError.InvalidTimestamp import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP import com.digitalasset.canton.topology.transaction.ParticipantPermission.{Observation, Submission} +import com.digitalasset.canton.topology.transaction.{ + ParticipantPermission, + ParticipantPermission as PP, +} import com.digitalasset.canton.topology.{PartyId, PhysicalSynchronizerId} import com.digitalasset.canton.{HasExecutionContext, HasTempDirectory, config} import java.time.Instant +private[multihostedparties] object PartyActivationFlow { + + def authorizeOnly( + party: PartyId, + synchronizerId: PhysicalSynchronizerId, + source: ParticipantReference, + target: ParticipantReference, + ): Long = + authorizeWithTargetDisconnect( + party, + synchronizerId, + source, + target, + disconnectTarget = false, + ) + + def authorizeWithTargetDisconnect( + party: PartyId, + synchronizerId: PhysicalSynchronizerId, + source: ParticipantReference, + target: ParticipantReference, + disconnectTarget: Boolean = true, + ): Long = { + target.topology.party_to_participant_mappings + .propose_delta( + party = party, + adds = Seq(target.id -> ParticipantPermission.Submission), + store = synchronizerId, + requiresPartyToBeOnboarded = true, + ) + + if (disconnectTarget) { + target.synchronizers.disconnect_all() + } + + val sourceLedgerEnd = source.ledger_api.state.end() + + source.topology.party_to_participant_mappings.propose_delta( + party = party, + adds = Seq(target.id -> ParticipantPermission.Submission), + store = synchronizerId, + requiresPartyToBeOnboarded = true, + ) + + sourceLedgerEnd + } + + /** Unilateral clears the onboarding flag. + * + * As opposed to the `parties.complete_party_onboarding` endpoint, this does not wait for the + * appropriate time to remove the onboarding flag, but does so immediately. + */ + def removeOnboardingFlag( + party: PartyId, + synchronizerId: PhysicalSynchronizerId, + target: ParticipantReference, + ): Unit = + target.topology.party_to_participant_mappings + .propose_delta( + party = party, + adds = Seq(target.id -> ParticipantPermission.Submission), + store = synchronizerId, + requiresPartyToBeOnboarded = false, + ) + .discard + +} + trait OfflinePartyReplicationIntegrationTestBase extends CommunityIntegrationTest with SharedEnvironment @@ -53,20 +123,21 @@ trait OfflinePartyReplicationIntegrationTestBase protected var bob: PartyId = _ override def environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P3_S1M1.withSetup { implicit env => - import env.* - participants.local.synchronizers.connect_local(sequencer1, daName) - participants.local.dars.upload(CantonExamplesPath) - sequencer1.topology.synchronizer_parameters - .propose_update(daId, _.update(reconciliationInterval = reconciliationInterval.toConfig)) - - alice = participant1.parties.enable("Alice", synchronizeParticipants = Seq(participant2)) - bob = participant2.parties.enable("Bob", synchronizeParticipants = Seq(participant1)) - } + EnvironmentDefinition.P3_S1M1 + .withSetup { implicit env => + import env.* + participants.local.synchronizers.connect_local(sequencer1, daName) + participants.local.dars.upload(CantonExamplesPath) + sequencer1.topology.synchronizer_parameters + .propose_update(daId, _.update(reconciliationInterval = reconciliationInterval.toConfig)) + + alice = participant1.parties.enable("Alice", synchronizeParticipants = Seq(participant2)) + bob = participant2.parties.enable("Bob", synchronizeParticipants = Seq(participant1)) + } registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) private val acsSnapshot = @@ -74,19 +145,31 @@ trait OfflinePartyReplicationIntegrationTestBase protected val acsSnapshotPath: String = acsSnapshot.toString - protected def authorizeAlice( + protected def authorizeAliceWithTargetDisconnect( + synchronizerId: PhysicalSynchronizerId, + source: ParticipantReference, + target: ParticipantReference, + ): Long = + authorizeWithTargetDisconnect( + alice, + synchronizerId, + source, + target, + ) + + protected def authorizeAliceWithoutOnboardingFlag( permission: PP, - p1: ParticipantReference, - p3: ParticipantReference, + source: ParticipantReference, + target: ParticipantReference, synchronizerId: PhysicalSynchronizerId, )(implicit env: TestEnvironment): Unit = - PartyToParticipantDeclarative.forParty(Set(p1, p3), synchronizerId)( - p1.id, + PartyToParticipantDeclarative.forParty(Set(source, target), synchronizerId)( + source.id, alice, PositiveInt.one, Set( - (p1.id, PP.Submission), - (p3.id, permission), + (source.id, PP.Submission), + (target.id, permission), ), ) @@ -161,36 +244,100 @@ final class OfflinePartyReplicationAtOffsetIntegrationTest exportFilePath = acsSnapshotPath, waitForActivationTimeout = Some(config.NonNegativeFiniteDuration.ofMillis(5)), ), - _.commandFailureMessage should include regex "The stream has not been completed in.*– Possibly missing party activation?", + _.errorMessage should include regex "The stream has not been completed in.*– Possibly missing party activation?", ) } - "Exporting and importing a LAPI based ACS snapshot as part of a party replication using ledger offset" in { + "Party activation on the target participant with missing onboarding flag aborts ACS export" in { implicit env => import env.* val ledgerEndP1 = source.ledger_api.state.end() - authorizeAlice(Observation, source, target, daId) + authorizeAliceWithoutOnboardingFlag(Submission, source, target, daId) + + forAll( + source.topology.party_to_participant_mappings + .list( + daId, + filterParty = alice.toProtoPrimitive, + filterParticipant = target.toProtoPrimitive, + ) + .loneElement + .item + .participants + ) { p => + p.onboarding shouldBe false + } + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + source.parties.export_party_acs( + party = alice, + synchronizerId = daId, + targetParticipantId = target.id, + beginOffsetExclusive = ledgerEndP1, + exportFilePath = acsSnapshotPath, + waitForActivationTimeout = Some(config.NonNegativeFiniteDuration.ofMillis(5)), + ), + _.errorMessage should include(AbortAcsExportForMissingOnboardingFlag(alice, target).cause), + ) + + // Undo activating Alice on the target participant without onboarding flag set; for the following test + source.topology.party_to_participant_mappings.propose_delta( + party = alice, + removes = Seq(target.id), + store = daId, + mustFullyAuthorize = true, + ) + + // Wait for party deactivation to propagate to both source and target participants + eventually() { + val sourceMapping = source.topology.party_to_participant_mappings + .list(daId, filterParty = alice.toProtoPrimitive) + .loneElement + .item + + sourceMapping.participants should have size 1 + forExactly(1, sourceMapping.participants) { p => + p.participantId shouldBe source.id + p.onboarding should be(false) + } + + // Ensure the target has processed the "remove" transaction to prevent flakes + val targetMapping = target.topology.party_to_participant_mappings + .list(daId, filterParty = alice.toProtoPrimitive) + .loneElement + .item + + targetMapping.participants should have size 1 + forExactly(1, targetMapping.participants) { p => + p.participantId shouldBe source.id + p.onboarding should be(false) + } + } + + } + + "Exporting and importing a LAPI based ACS snapshot as part of a party replication using ledger offset" in { + implicit env => + import env.* + + val beforeActivationOffset = authorizeAliceWithTargetDisconnect(daId, source, target) source.parties.export_party_acs( party = alice, synchronizerId = daId, targetParticipantId = target.id, - beginOffsetExclusive = ledgerEndP1, + beginOffsetExclusive = beforeActivationOffset, exportFilePath = acsSnapshotPath, ) - target.synchronizers.disconnect_all() - - target.repair.import_acs( - acsSnapshotPath, - contractIdImportMode = ContractIdImportMode.Accept, - ) + target.parties.import_party_acs(acsSnapshotPath) target.synchronizers.reconnect(daName) - authorizeAlice(Submission, source, target, daId) + // To prevent flakes when trying to archive contracts on the target in the next step + PartyActivationFlow.removeOnboardingFlag(alice, daId, target) assertAcsAndContinuedOperation(target) } @@ -199,15 +346,13 @@ final class OfflinePartyReplicationAtOffsetIntegrationTest implicit env => import env.* - val ledgerEndP1 = source.ledger_api.state.end() - - authorizeAlice(Observation, source, participant2, daId) + val beforeActivationOffset = authorizeAliceWithTargetDisconnect(daId, source, participant2) source.parties.export_party_acs( party = alice, synchronizerId = daId, targetParticipantId = participant2.id, - beginOffsetExclusive = ledgerEndP1, + beginOffsetExclusive = beforeActivationOffset, exportFilePath = acsSnapshotPath, ) @@ -240,7 +385,7 @@ final class OfflinePartyReplicationWithSilentSynchronizerIntegrationTest adjustTimeouts(sequencer1) - authorizeAlice(Observation, source, target, daId) + authorizeAliceWithoutOnboardingFlag(Observation, source, target, daId) val silentSynchronizerValidFrom = silenceSynchronizerAndAwaitEffectiveness(daId, sequencer1, source, simClock = None) @@ -261,14 +406,14 @@ final class OfflinePartyReplicationWithSilentSynchronizerIntegrationTest target.repair.import_acs( acsSnapshotPath, - contractIdImportMode = ContractIdImportMode.Accept, + contractImportMode = ContractImportMode.Accept, ) target.synchronizers.reconnect(daName) resumeSynchronizerAndAwaitEffectiveness(daId, sequencer1, source, simClock = None) - authorizeAlice(Submission, source, target, daId) + authorizeAliceWithoutOnboardingFlag(Submission, source, target, daId) assertAcsAndContinuedOperation(target) } @@ -321,15 +466,13 @@ final class OfflinePartyReplicationFilterAcsExportIntegrationTest IouSyntax.createIou(source)(alice, charlie, 99.99).discard - val ledgerEndP1 = source.ledger_api.state.end() - - authorizeAlice(Observation, source, target, daId) + val beforeActivationOffset = authorizeAliceWithTargetDisconnect(daId, source, target) source.parties.export_party_acs( party = alice, synchronizerId = daId.logical, targetParticipantId = target.id, - beginOffsetExclusive = ledgerEndP1, + beginOffsetExclusive = beforeActivationOffset, exportFilePath = acsSnapshotPath, ) @@ -345,9 +488,7 @@ final class OfflinePartyReplicationFilterAcsExportIntegrationTest stakeholders.intersect(Set(charlie.toProtoPrimitive)).isEmpty } - target.synchronizers.disconnect_all() - - target.repair.import_acs(acsSnapshotPath) + target.parties.import_party_acs(acsSnapshotPath) target.synchronizers.reconnect(daName) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingClearanceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingClearanceIntegrationTest.scala deleted file mode 100644 index 4c0714bc2a..0000000000 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingClearanceIntegrationTest.scala +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.multihostedparties - -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.integration.EnvironmentDefinition -import com.digitalasset.canton.integration.tests.examples.IouSyntax -import com.digitalasset.canton.integration.tests.examples.IouSyntax.testIou -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.transaction.ParticipantPermission - -import scala.jdk.CollectionConverters.CollectionHasAsScala - -/** Setup: - * - Alice is hosted on participant1 (Source) - * - Bob is hosted on participant2 (Target) - * - 2 active IOU contract between Alice (signatory) and Bob (observer) - * - * Test: Replicate Alice to target using the onboarding flag - * - Target participant authorizes Alice->target setting the onboarding flag - * - Target participant disconnects from the synchronizer - * - A creation transaction to create a contract with Alice as signatory and Bob as observer is - * sequenced - * - Source participant approves/confirms transaction - * - Source participant authorizes Alice->target setting the onboarding flag - * - ACS snapshot for Alice is taken on source participant - * - ACS is imported on the target participant, which then reconnects to the synchronizers - * - Target participant clears the onboarding flag unilaterally - * - Assert successful clearing of onboarding flag by the target participant - */ -sealed trait OfflinePartyReplicationOnboardingClearanceIntegrationTest - extends OfflinePartyReplicationIntegrationTestBase { - - override def environmentDefinition: EnvironmentDefinition = - super.environmentDefinition.withSetup { implicit env => - import env.* - source = participant1 - target = participant2 - } -} - -final class OffPROnboardingClearanceIntegrationTest - extends OfflinePartyReplicationOnboardingClearanceIntegrationTest { - - "Party replication sets and clears the onboarding flag successfully" in { implicit env => - import env.* - - IouSyntax.createIou(participant1)(alice, bob, 3.33).discard - - target.topology.party_to_participant_mappings - .propose_delta( - party = alice, - adds = Seq(target.id -> ParticipantPermission.Submission), - store = daId, - requiresPartyToBeOnboarded = true, - ) - - target.synchronizers.disconnect_all() - - val createIouCmd = testIou(alice, bob, 2.20).create().commands().asScala.toSeq - - source.ledger_api.javaapi.commands.submit( - Seq(alice), - createIouCmd, - daId, - optTimeout = None, - ) - - val ledgerEndP1 = source.ledger_api.state.end() - - source.topology.party_to_participant_mappings.propose_delta( - party = alice, - adds = Seq(target.id -> ParticipantPermission.Submission), - store = daId, - requiresPartyToBeOnboarded = true, - ) - - source.parties.export_party_acs( - party = alice, - synchronizerId = daId, - targetParticipantId = target, - beginOffsetExclusive = ledgerEndP1, - exportFilePath = acsSnapshotPath, - ) - - target.repair.import_acs(acsSnapshotPath) - - assertOnboardingFlag(daId, setOnTarget = true) - - target.synchronizers.reconnect(daName) - - source.health.ping(target) - target.ledger_api.state.acs.of_party(alice).size shouldBe 2 - - // unilateral onboarding flag clearance - target.topology.party_to_participant_mappings.propose_delta( - party = alice, - adds = Seq(target.id -> ParticipantPermission.Submission), - store = daId, - requiresPartyToBeOnboarded = false, - ) - - assertOnboardingFlag(daId, setOnTarget = false) - - } - - private def assertOnboardingFlag(daId: => PhysicalSynchronizerId, setOnTarget: Boolean) = - eventually() { - val lastPTP = - source.topology.party_to_participant_mappings.list(synchronizerId = daId).last.item - - lastPTP.partyId shouldBe alice - lastPTP.participants should have size 2 - - forExactly(1, lastPTP.participants) { p => - p.participantId shouldBe source.id - p.onboarding should be(false) - } - forExactly(1, lastPTP.participants) { p => - p.participantId shouldBe target.id - p.onboarding should be(setOnTarget) - } - } -} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingCompletetionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingCompletetionIntegrationTest.scala new file mode 100644 index 0000000000..6472c5500b --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationOnboardingCompletetionIntegrationTest.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.multihostedparties + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.EnvironmentDefinition +import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.tests.examples.IouSyntax.testIou +import com.digitalasset.canton.topology.PhysicalSynchronizerId +import com.digitalasset.canton.topology.transaction.ParticipantPermission +import org.scalatest.time.SpanSugar.convertIntToGrainOfTime + +import scala.jdk.CollectionConverters.CollectionHasAsScala + +/** Setup: + * - Alice is hosted on participant1 (Source) + * - Bob is hosted on participant2 (Target) + * - 2 active IOU contract between Alice (signatory) and Bob (observer) + * + * Test: Replicate Alice to target using the onboarding flag + * - Target participant authorizes Alice->target setting the onboarding flag + * - Target participant disconnects from the synchronizer + * - A creation transaction to create a contract with Alice as signatory and Bob as observer is + * sequenced + * - Source participant approves/confirms transaction + * - Source participant authorizes Alice->target setting the onboarding flag + * - ACS snapshot for Alice is taken on source participant + * - ACS is imported on the target participant, which then reconnects to the synchronizers + * - Target participant clears the onboarding flag unilaterally + * - Assert successful clearing of onboarding flag by the target participant + */ +sealed trait OfflinePartyReplicationOnboardingCompletionIntegrationTest + extends OfflinePartyReplicationIntegrationTestBase { + + override def environmentDefinition: EnvironmentDefinition = + super.environmentDefinition + .withSetup { implicit env => + import env.* + source = participant1 + target = participant2 + } +} + +final class OffPROnboardingCompletionIntegrationTest + extends OfflinePartyReplicationOnboardingCompletionIntegrationTest { + + "Party replication sets and clears the onboarding flag successfully" in { implicit env => + import env.* + + IouSyntax.createIou(participant1)(alice, bob, 3.33).discard + + target.topology.party_to_participant_mappings + .propose_delta( + party = alice, + adds = Seq(target.id -> ParticipantPermission.Submission), + store = daId, + requiresPartyToBeOnboarded = true, + ) + + target.synchronizers.disconnect_all() + + val createIouCmd = testIou(alice, bob, 2.20).create().commands().asScala.toSeq + + source.ledger_api.javaapi.commands.submit( + Seq(alice), + createIouCmd, + daId, + optTimeout = None, + ) + + val sourceLedgerEnd = source.ledger_api.state.end() + + source.topology.party_to_participant_mappings.propose_delta( + party = alice, + adds = Seq(target.id -> ParticipantPermission.Submission), + store = daId, + requiresPartyToBeOnboarded = true, + ) + + source.parties.export_party_acs( + party = alice, + synchronizerId = daId, + targetParticipantId = target, + beginOffsetExclusive = sourceLedgerEnd, + exportFilePath = acsSnapshotPath, + ) + + target.parties.import_party_acs(acsSnapshotPath) + + assertOnboardingFlag(daId, setOnTarget = true) + + val targetLedgerEnd = target.ledger_api.state.end() + + target.synchronizers.reconnect(daName) + + source.health.ping(target) + target.ledger_api.state.acs.of_party(alice).size shouldBe 2 + + eventually(timeUntilSuccess = 2.minutes, maxPollInterval = 30.seconds) { + val (onboard, earliestRetryTimestamp) = + target.parties.complete_party_onboarding(alice, daId, target, targetLedgerEnd) + (onboard, earliestRetryTimestamp) shouldBe (true, None) + } + + assertOnboardingFlag(daId, setOnTarget = false) + } + + private def assertOnboardingFlag(daId: => PhysicalSynchronizerId, setOnTarget: Boolean) = + eventually() { + val lastPTP = + source.topology.party_to_participant_mappings.list(synchronizerId = daId).last.item + + lastPTP.partyId shouldBe alice + lastPTP.participants should have size 2 + + forExactly(1, lastPTP.participants) { p => + p.participantId shouldBe source.id + p.onboarding should be(false) + } + forExactly(1, lastPTP.participants) { p => + p.participantId shouldBe target.id + p.onboarding should be(setOnTarget) + } + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationPreventDuplicateContractsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationPreventDuplicateContractsIntegrationTest.scala index 52912e1661..c2ff73ee5e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationPreventDuplicateContractsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationPreventDuplicateContractsIntegrationTest.scala @@ -61,6 +61,7 @@ sealed trait OfflinePartyReplicationPreventDuplicateContractsIntegrationTest party = alice, adds = Seq(target.id -> ParticipantPermission.Submission), store = daId, + requiresPartyToBeOnboarded = true, ) target.synchronizers.disconnect_all() @@ -80,6 +81,7 @@ sealed trait OfflinePartyReplicationPreventDuplicateContractsIntegrationTest party = alice, adds = Seq(target.id -> ParticipantPermission.Submission), store = daId, + requiresPartyToBeOnboarded = true, ) activationOffset = source.parties.find_party_max_activation_offset( @@ -126,7 +128,7 @@ final class OffPRPreventDupContractsFailureIntegrationTest (entry: LogEntry) => entry.shouldBeCantonError( SyncServiceSynchronizerDisconnect, - _ should include regex "(?s)fatally disconnected because of handler returned error.*with failed activeness check is approved", + _ should include regex "(?s)fatally disconnected because of.*", ), ), ( @@ -219,7 +221,7 @@ final class OffPRPreventDupContractsIntegrationTest repair.acs.read_from_file(acsSnapshotPath) should have size 1 - target.repair.import_acs(acsSnapshotPath) + target.parties.import_party_acs(acsSnapshotPath) target.synchronizers.reconnect(daName) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationRepairMacroIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationRepairMacroIntegrationTest.scala index b720b3f7e0..472691d1b3 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationRepairMacroIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationRepairMacroIntegrationTest.scala @@ -3,38 +3,32 @@ package com.digitalasset.canton.integration.tests.multihostedparties +import com.digitalasset.canton.config import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.InstanceReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax -import com.digitalasset.canton.integration.util.{AcsInspection, PartyToParticipantDeclarative} +import com.digitalasset.canton.integration.tests.multihostedparties.PartyActivationFlow.authorizeOnly +import com.digitalasset.canton.integration.util.AcsInspection import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentDefinition} import com.digitalasset.canton.participant.util.JavaCodegenUtil.* import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP -import com.digitalasset.canton.{HasTempDirectory, config} import scala.jdk.CollectionConverters.* sealed trait OfflinePartyReplicationRepairMacroIntegrationTest - extends UseSilentSynchronizerInTest - with AcsInspection - with HasTempDirectory { + extends OfflinePartyReplicationIntegrationTestBase + with UseSilentSynchronizerInTest + with AcsInspection { private val aliceName = "Alice" private val bobName = "Bob" private val charlieName = "Charlie" - private var alice: PartyId = _ - private var bob: PartyId = _ private var charlie: PartyId = _ // TODO(#27707) - Remove when ACS commitments consider the onboarding flag @@ -57,7 +51,8 @@ sealed trait OfflinePartyReplicationRepairMacroIntegrationTest participant2.synchronizers.connect_local(sequencer4, alias = acmeName) participant3.synchronizers.connect_local(sequencer4, alias = acmeName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) // Allocate parties alice = participant1.parties.enable( @@ -98,9 +93,6 @@ sealed trait OfflinePartyReplicationRepairMacroIntegrationTest .propose_update(daId, _.update(reconciliationInterval = reconciliationInterval.toConfig)) } - private val acsSnapshot = tempDirectory.toTempFile("alize.gz") - private val acsSnapshotPath: String = acsSnapshot.toString - "setup our test scenario: create archived and active contracts" in { implicit env => import env.* @@ -182,17 +174,8 @@ sealed trait OfflinePartyReplicationRepairMacroIntegrationTest ) ) - val beforeActivationOffset = participant1.ledger_api.state.end() - - PartyToParticipantDeclarative.forParty(Set(participant1, participant3), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant3, PP.Observation), - ), - ) + val beforeActivationOffset = + authorizeOnly(alice, daId, source = participant1, target = participant3) silenceSynchronizerAndAwaitEffectiveness(daId, sequencer1, participant1, simClock) @@ -255,16 +238,6 @@ sealed trait OfflinePartyReplicationRepairMacroIntegrationTest synchronizer = acmeName, ) - PartyToParticipantDeclarative.forParty(Set(participant1, participant3), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant3, PP.Submission), - ), - ) - participant3.ledger_api.javaapi.commands .submit( Seq(alice), @@ -280,7 +253,7 @@ final class OfflinePartyReplicationRepairMacroIntegrationTestPostgres extends OfflinePartyReplicationRepairMacroIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationSharedContractIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationSharedContractIntegrationTest.scala index 23cc3b1b8b..576d58ab8d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationSharedContractIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationSharedContractIntegrationTest.scala @@ -6,13 +6,9 @@ package com.digitalasset.canton.integration.tests.multihostedparties import better.files.File import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax -import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative +import com.digitalasset.canton.integration.tests.multihostedparties.PartyActivationFlow.authorizeWithTargetDisconnect import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -20,7 +16,6 @@ import com.digitalasset.canton.integration.{ TestConsoleEnvironment, } import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP /** Consider the following setup: * - Alice, Bob hosted on participant1 @@ -63,29 +58,19 @@ sealed trait OfflinePartyReplicationSharedContractIntegrationTest )(implicit env: TestConsoleEnvironment): Unit = { import env.* - val ledgerEndP1 = participant1.ledger_api.state.end() - - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, - party, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Observation), - ), - ) + val beforeActivationOffset = + authorizeWithTargetDisconnect(party, daId, source = participant1, target = participant2) val file = File.newTemporaryFile(s"acs_export_chopper_$party") participant1.parties.export_party_acs( party = party, synchronizerId = daId.logical, targetParticipantId = participant2.id, - beginOffsetExclusive = ledgerEndP1, + beginOffsetExclusive = beforeActivationOffset, exportFilePath = file.canonicalPath, ) - participant2.synchronizers.disconnect_all() - participant2.repair.import_acs(file.canonicalPath) + participant2.parties.import_party_acs(file.canonicalPath) participant2.synchronizers.reconnect_all() } @@ -112,5 +97,5 @@ sealed trait OfflinePartyReplicationSharedContractIntegrationTest final class OfflinePartyReplicationSharedContractIntegrationTestPostgres extends OfflinePartyReplicationSharedContractIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationWorkflowIdsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationWorkflowIdsIntegrationTest.scala index 3b71e2a637..a7b990da83 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationWorkflowIdsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OfflinePartyReplicationWorkflowIdsIntegrationTest.scala @@ -4,29 +4,20 @@ package com.digitalasset.canton.integration.tests.multihostedparties import com.daml.ledger.javaapi.data.Command -import com.digitalasset.canton.HasTempDirectory import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} -import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.multihostedparties.PartyActivationFlow.authorizeOnly import com.digitalasset.canton.integration.{EnvironmentDefinition, TestConsoleEnvironment} import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP import java.util.Collections sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest - extends UseSilentSynchronizerInTest - with HasTempDirectory { - - private val acsSnapshot = tempDirectory.toTempFile(s"${getClass.getSimpleName}.gz") - private val acsSnapshotPath: String = acsSnapshot.toString + extends OfflinePartyReplicationIntegrationTestBase + with UseSilentSynchronizerInTest { // TODO(#27707) - Remove when ACS commitments consider the onboarding flag // Party replication to the target participant may trigger ACS commitment mismatch warnings. @@ -55,10 +46,7 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest implicit env => import env.* - val alice = participant1.parties.enable( - "Alice", - synchronizeParticipants = Seq(participant2), - ) + alice = participant1.parties.enable("Alice", synchronizeParticipants = Seq(participant2)) // create some IOUs, we'll expect the migration to group together those sharing the // ledger time (i.e. they have been created in the same transaction) @@ -67,17 +55,8 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest .submit(actAs = Seq(alice), commands = commands) } - val beforeActivationOffset = participant1.ledger_api.state.end() - - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Observation), - ), - ) + val beforeActivationOffset = + authorizeOnly(alice, daId, source = participant1, target = participant2) silenceSynchronizerAndAwaitEffectiveness(daId, Seq(sequencer1, sequencer2), participant1) @@ -90,16 +69,6 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest resumeSynchronizerAndAwaitEffectiveness(daId, Seq(sequencer1, sequencer2), participant1) - PartyToParticipantDeclarative.forParty(Set(participant1, participant2), daId)( - participant1, - alice, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant2, PP.Submission), - ), - ) - // Check that the transactions generated for the migration are actually grouped as // expected and that their workflow IDs can be used to correlate those transactions val txs = participant2.ledger_api.javaapi.updates.transactions(Set(alice), completeAfter = 4) @@ -133,27 +102,14 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest val workflowIdPrefix = "SOME_WORKFLOW_ID_123" - val bob = participant1.parties.enable( - "Bob", - synchronizeParticipants = Seq(participant3), - ) + bob = participant1.parties.enable("Bob", synchronizeParticipants = Seq(participant3)) for (commands <- Seq(ious(bob, 1), ious(bob, 1))) { - participant1.ledger_api.javaapi.commands - .submit(actAs = Seq(bob), commands = commands) + participant1.ledger_api.javaapi.commands.submit(actAs = Seq(bob), commands = commands) } - val beforeActivationOffset = participant1.ledger_api.state.end() - - PartyToParticipantDeclarative.forParty(Set(participant1, participant3), daId)( - participant1, - bob, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant3, PP.Observation), - ), - ) + val beforeActivationOffset = + authorizeOnly(bob, daId, participant1, participant3) silenceSynchronizerAndAwaitEffectiveness(daId, Seq(sequencer1, sequencer2), participant1) @@ -167,16 +123,6 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest resumeSynchronizerAndAwaitEffectiveness(daId, Seq(sequencer1, sequencer2), participant1) - PartyToParticipantDeclarative.forParty(Set(participant1, participant3), daId)( - participant1, - bob, - PositiveInt.one, - Set( - (participant1, PP.Submission), - (participant3, PP.Submission), - ), - ) - // Check that the workflow ID prefix is set as specified val txs = participant3.ledger_api.javaapi.updates.transactions(Set(bob), completeAfter = 2) inside(txs) { case Seq(tx1, tx2) => @@ -228,5 +174,5 @@ sealed trait OfflinePartyReplicationWorkflowIdsIntegrationTest final class OfflinePartyReplicationWorkflowIdsIntegrationTestPostgres extends OfflinePartyReplicationWorkflowIdsIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnboardingConsortiumPartyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnboardingConsortiumPartyIntegrationTest.scala index 51bdaeb9f6..702484d583 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnboardingConsortiumPartyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnboardingConsortiumPartyIntegrationTest.scala @@ -6,15 +6,11 @@ package com.digitalasset.canton.integration.tests.multihostedparties import better.files.File import com.daml.nonempty.NonEmpty import com.digitalasset.canton.BaseTest.CantonLfV21 -import com.digitalasset.canton.config -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeProportion, PositiveInt} +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.integration.EnvironmentDefinition -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.LoggerSuppressionHelpers import com.digitalasset.canton.topology.transaction.{ DecentralizedNamespaceDefinition, @@ -23,8 +19,6 @@ import com.digitalasset.canton.topology.transaction.{ import com.digitalasset.canton.topology.{Namespace, PartyId, UniqueIdentifier} import monocle.Monocle.toAppliedFocusOps -import java.util.concurrent.TimeUnit -import scala.concurrent.duration.FiniteDuration import scala.util.Try /* @@ -46,20 +40,15 @@ sealed trait OnboardingConsortiumPartyIntegrationTest extends ConsortiumPartyInt EnvironmentDefinition.P4_S1M1 .updateTestingConfig( // do not delay sending commitments - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay(Some(NonNegativeProportion.zero), Some(NonNegativeProportion.zero)) + ) + ) ) .withSetup { implicit env => import env.* - // Reduce the epsilon (topology change delay) to 5ms so that topology transaction(s) - // become effective sooner; and an outdated topology state and test flakiness is avoided. - sequencer1.topology.synchronizer_parameters.propose_update( - daId, - _.update(topologyChangeDelay = - config.NonNegativeFiniteDuration(FiniteDuration(5, TimeUnit.MILLISECONDS)) - ), - ) - hostingParticipants = Seq(participant1, participant2, participant3) owningParticipants = Seq(participant1, participant2, participant3) @@ -128,6 +117,7 @@ sealed trait OnboardingConsortiumPartyIntegrationTest extends ConsortiumPartyInt newHostingParticipants.map(_.id -> ParticipantPermission.Confirmation), threshold = threshold, store = daId, + participantsRequiringPartyToBeOnboarded = Seq(participant4), ) ) @@ -191,7 +181,7 @@ sealed trait OnboardingConsortiumPartyIntegrationTest extends ConsortiumPartyInt logger.debug("Onboarding: Import ACS to P4 (an empty participant)") participant4.ledger_api.state.acs.of_all() shouldBe empty - participant4.repair.import_acs(acsFilename) + participant4.parties.import_party_acs(acsFilename) logger.debug(s"Onboarding: Connect P4 to the synchronizer $daName") participant4.synchronizers.connect_local(sequencer1, daName) @@ -371,5 +361,5 @@ sealed trait OnboardingConsortiumPartyIntegrationTest extends ConsortiumPartyInt final class OnboardingConsortiumPartyIntegrationTestPostgres extends OnboardingConsortiumPartyIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationDecentralizedPartyTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationDecentralizedPartyTest.scala index 14f36d125d..d63eaaab69 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationDecentralizedPartyTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationDecentralizedPartyTest.scala @@ -12,9 +12,9 @@ import com.digitalasset.canton.console.LocalInstanceReference import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.Iou import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ @@ -55,7 +55,7 @@ sealed trait OnlinePartyReplicationDecentralizedPartyTest with HasProgrammableSequencer with SharedEnvironment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) private var alice: PartyId = _ diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationNegotiationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationNegotiationTest.scala index f4d88b8d46..db97081725 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationNegotiationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationNegotiationTest.scala @@ -25,10 +25,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -45,6 +42,7 @@ import com.digitalasset.canton.participant.admin.workflows.java.canton.internal import com.digitalasset.canton.participant.config.UnsafeOnlinePartyReplicationConfig import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{ + SequencerConnectionPoolDelays, SequencerConnectionValidation, SequencerConnections, SubmissionRequestAmplification, @@ -57,6 +55,7 @@ import com.digitalasset.canton.{SequencerAlias, config} import monocle.macros.syntax.lens.* import org.slf4j.event.Level +import scala.concurrent.duration.DurationInt import scala.jdk.CollectionConverters.* import scala.util.chaining.scalaUtilChainingOps @@ -92,7 +91,7 @@ sealed trait OnlinePartyReplicationNegotiationTest .focus(_.parameters.unsafeEnableOnlinePartyReplication) .replace(sequencer != "sequencer3") - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private val aliceName = "Alice" @@ -148,11 +147,13 @@ sealed trait OnlinePartyReplicationNegotiationTest val sequencerConnections = SequencerConnections.tryMany( sequencers .map(s => s.sequencerConnection.withAlias(SequencerAlias.tryCreate(s.name))), - // A threshold of 2 ensures that each participant connects to all the three sequencers in the connectivity map + // A threshold of 3 ensures that each participant connects to all the three sequencers in the connectivity map + // and stays connected. // TODO(#19911) Make this properly configurable - sequencerTrustThreshold = PositiveInt.two, + sequencerTrustThreshold = PositiveInt.three, sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) participant.synchronizers.connect_by_config( SynchronizerConnectionConfig( @@ -287,7 +288,9 @@ sealed trait OnlinePartyReplicationNegotiationTest }) // Wait until both SP and TP report that party replication has completed. - eventually() { + // Clearing the onboarding flag takes up to max-decision-timeout (initial value of 60s), + // so wait at least 1 minute. + eventually(timeUntilSuccess = 2.minutes) { val tpStatus = targetParticipant.parties.get_add_party_status( addPartyRequestId = addPartyRequestId ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationParticipantProtocolTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationParticipantProtocolTest.scala index f2c22aa8a2..237127d452 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationParticipantProtocolTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationParticipantProtocolTest.scala @@ -11,11 +11,8 @@ import com.digitalasset.canton.crypto.TestHash import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.cycle as M -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.sequencer.channel.SequencerChannelProtocolTestExecHelpers import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -60,7 +57,7 @@ sealed trait OnlinePartyReplicationParticipantProtocolTest with SequencerChannelProtocolTestExecHelpers with HasCycleUtils { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) @@ -95,13 +92,14 @@ sealed trait OnlinePartyReplicationParticipantProtocolTest participants.local.start() participants.local.synchronizers.connect_local(sequencer1, daName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) alice = participant1.parties.enable( aliceName, synchronizeParticipants = Seq(participant2), synchronizer = Some(daName), ) participant1.synchronizers.connect_local(sequencer2, acmeName) + participant1.dars.upload(CantonExamplesPath, synchronizerId = acmeId) participant1.parties.enable(aliceName, synchronizer = Some(acmeName)).discard } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationRecoverFromDisruptionsTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationRecoverFromDisruptionsTest.scala index a58ca6ae5e..b80c08646a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationRecoverFromDisruptionsTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationRecoverFromDisruptionsTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests.multihostedparties import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -38,7 +35,7 @@ sealed trait OnlinePartyReplicationRecoverFromDisruptionsTest with OnlinePartyReplicationTestHelpers with SharedEnvironment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private var alice: PartyId = _ private var carol: PartyId = _ @@ -224,6 +221,16 @@ sealed trait OnlinePartyReplicationRecoverFromDisruptionsTest e.warningMessage should include regex "Detected late processing \\(or clock skew\\) of batch with timestamp .* after sequencing" }, + LogEntryOptionality.OptionalMany -> { e => + e.loggerName should include("SequencerBasedRegisterTopologyTransactionHandle") + e.warningMessage should include( + "Failed broadcasting topology transactions: RequestFailed(No connection available)" + ) + }, + LogEntryOptionality.OptionalMany -> { e => + e.loggerName should include("QueueBasedSynchronizerOutbox") + e.warningMessage should include regex "synchronizer outbox flusher The synchronizer Synchronizer '.*?' failed the following topology transactions".r + }, ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationTestHelpers.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationTestHelpers.scala index de0100e39c..382497d3df 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationTestHelpers.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multihostedparties/OnlinePartyReplicationTestHelpers.scala @@ -3,7 +3,10 @@ package com.digitalasset.canton.integration.tests.multihostedparties -import com.digitalasset.canton.admin.api.client.data.AddPartyStatus +import com.digitalasset.canton.admin.api.client.data.{ + AddPartyStatus, + DynamicSynchronizerParameters as ConsoleDynamicSynchronizerParameters, +} import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.{InstanceReference, ParticipantReference} @@ -17,7 +20,7 @@ import com.digitalasset.canton.topology.transaction.{ SignedTopologyTransaction, TopologyChangeOp, } -import com.digitalasset.canton.{BaseTest, integration} +import com.digitalasset.canton.{BaseTest, config, integration} import scala.concurrent.duration.* import scala.util.Try @@ -27,6 +30,24 @@ import scala.util.Try private[tests] trait OnlinePartyReplicationTestHelpers { this: BaseTest => + protected def updateSynchronizerParameters( + synchronizerOwner: InstanceReference, + maxDecisionTimeout: config.NonNegativeFiniteDuration, + update: ConsoleDynamicSynchronizerParameters => ConsoleDynamicSynchronizerParameters = + identity, + )(implicit + env: integration.TestConsoleEnvironment + ): Unit = { + import env.* + val half = (maxDecisionTimeout.toInternal / NonNegativeInt.two).toConfig + synchronizerOwner.topology.synchronizer_parameters.propose_update( + daId, + params => + update(params.update(confirmationResponseTimeout = half, mediatorReactionTimeout = half)), + mustFullyAuthorize = true, + ) + } + protected def createSharedContractsAndProposeTopologyForOnPR( sourceParticipant: ParticipantReference, targetParticipant: ParticipantReference, @@ -222,7 +243,13 @@ private[tests] trait OnlinePartyReplicationTestHelpers { addPartyRequestId: String, expectedNumContracts: NonNegativeInt, ): Unit = - eventually(retryOnTestFailuresOnly = false, maxPollInterval = 10.millis) { + eventually( + // Clearing the onboarding flag takes up to max-decision-timeout (initial value of 60s), + // so wait at least 1 minute. + timeUntilSuccess = 2.minutes, + retryOnTestFailuresOnly = false, + maxPollInterval = 1.second, + ) { // The try handles the optional `CommandFailure`, so that we don't give up while the SP is stopped. val spStatusO = Try(sourceParticipant.parties.get_add_party_status(addPartyRequestId)).toOption diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AssignmentBeforeUnassignmentIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AssignmentBeforeUnassignmentIntegrationTest.scala index db8ced2c34..6b18176caa 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AssignmentBeforeUnassignmentIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AssignmentBeforeUnassignmentIntegrationTest.scala @@ -9,11 +9,11 @@ import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{EntitySyntax, PartiesAllocator} @@ -50,7 +50,8 @@ sealed trait AssignmentBeforeUnassignmentIntegrationTest import env.* participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) val alice = "alice" @@ -223,7 +224,7 @@ class AssignmentBeforeUnassignmentIntegrationTestPostgres extends AssignmentBeforeUnassignmentIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AsynchronousReassignmentProtocolIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AsynchronousReassignmentProtocolIntegrationTest.scala index 84f8c18434..866b5dcf67 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AsynchronousReassignmentProtocolIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AsynchronousReassignmentProtocolIntegrationTest.scala @@ -7,11 +7,11 @@ import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{EntitySyntax, PartiesAllocator} @@ -55,7 +55,7 @@ final class AsynchronousReassignmentProtocolIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) @@ -73,7 +73,8 @@ final class AsynchronousReassignmentProtocolIntegrationTest import env.* participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) // Disable automatic assignment so that we really control it def disableAutomaticAssignment( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AutomaticReassignmentBatchingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AutomaticReassignmentBatchingIntegrationTest.scala index 2a35288422..b0e5e02b2a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AutomaticReassignmentBatchingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/AutomaticReassignmentBatchingIntegrationTest.scala @@ -40,7 +40,8 @@ class AutomaticReassignmentBatchingIntegrationTest import env.* participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer2, alias = acmeName) - participant1.dars.upload(CantonTestsPath) + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) + participant1.dars.upload(CantonTestsPath, synchronizerId = acmeId) } "automatic reassignment" should { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/GetConnectedSynchronizersIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/GetConnectedSynchronizersIntegrationTest.scala index 9f48ee3968..8971e54bea 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/GetConnectedSynchronizersIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/GetConnectedSynchronizersIntegrationTest.scala @@ -7,11 +7,8 @@ import com.daml.ledger.api.v2 as proto import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.LocalParticipantReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.GrpcAdminCommandSupport.* import com.digitalasset.canton.integration.util.GrpcServices.StateService import com.digitalasset.canton.integration.{ @@ -25,7 +22,7 @@ abstract class GetConnectedSynchronizerIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { private lazy val plugin = - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationAdminPartyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationAdminPartyIntegrationTest.scala index 52fbffb330..7e70f10c6e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationAdminPartyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationAdminPartyIntegrationTest.scala @@ -6,11 +6,11 @@ package com.digitalasset.canton.integration.tests.multisynchronizer import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ @@ -43,6 +43,7 @@ import java.util.UUID import java.util.concurrent.atomic.AtomicLong import scala.collection.concurrent.TrieMap import scala.collection.mutable +import scala.concurrent.duration.DurationInt /* This test checks that the admin party of the submitting participant is required for reassignments. @@ -65,6 +66,11 @@ sealed trait ReassignmentConfirmationAdminPartyIntegrationTest EnvironmentDefinition.P2_S1M1_S1M1 // We want to trigger time out .addConfigTransforms(ConfigTransforms.useStaticTime) + .addConfigTransforms( + // Because we play with the simClock, ensure we have enough forward tolerance + // on the target timestamp to not impact up unassigments. + ConfigTransforms.updateTargetTimestampForwardTolerance(1.hours) + ) .withSetup { implicit env => import env.* @@ -83,7 +89,8 @@ sealed trait ReassignmentConfirmationAdminPartyIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) signatory = participant1.parties.enable( "signatory", @@ -314,7 +321,7 @@ class ReassignmentConfirmationAdminPartyIntegrationTestPostgres extends ReassignmentConfirmationAdminPartyIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationPoliciesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationPoliciesIntegrationTest.scala index d2f41d4742..be308406b1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationPoliciesIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentConfirmationPoliciesIntegrationTest.scala @@ -7,11 +7,8 @@ import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ AcsInspection, @@ -64,7 +61,8 @@ sealed trait ReassignmentConfirmationPoliciesPartyIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) } "confirmation policies for reassignments" should { @@ -187,7 +185,7 @@ class ReassignmentConfirmationPoliciesPartyIntegrationTestPostgres extends ReassignmentConfirmationPoliciesPartyIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentNoReassignmentDataIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentNoReassignmentDataIntegrationTest.scala index a23e8bf9ce..e8acdd5e01 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentNoReassignmentDataIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentNoReassignmentDataIntegrationTest.scala @@ -10,11 +10,11 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.LocalSequencerReference import com.digitalasset.canton.examples.java.iou.GetCash -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{AcsInspection, PartyToParticipantDeclarative} @@ -81,7 +81,8 @@ sealed trait ReassignmentNoReassignmentDataIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) alice = participant1.parties.enable("alice", synchronizer = daName) participant1.parties.enable("alice", synchronizer = acmeName) @@ -313,7 +314,7 @@ class ReassignmentNoReassignmentDataIntegrationTestPostgres extends ReassignmentNoReassignmentDataIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceConcurrentReassignmentsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceConcurrentReassignmentsIntegrationTest.scala index 0c3df93f94..f10af18a82 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceConcurrentReassignmentsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceConcurrentReassignmentsIntegrationTest.scala @@ -6,11 +6,11 @@ package com.digitalasset.canton.integration.tests.multisynchronizer import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.LocalSequencerReference -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ @@ -74,7 +74,8 @@ trait ReassignmentServiceConcurrentReassignmentsIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) party1aId = participant1.parties.enable(party1a, synchronizer = daName) participant1.parties.enable(party1a, synchronizer = acmeName) @@ -327,7 +328,7 @@ class ReassignmentServiceConcurrentReassignmentsIntegrationTestPostgres extends ReassignmentServiceConcurrentReassignmentsIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceIntegrationTest.scala index 54c02a1498..a6f378d4d0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceIntegrationTest.scala @@ -16,11 +16,8 @@ import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.data.ReassignmentRef import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.GrpcAdminCommandSupport.* import com.digitalasset.canton.integration.util.GrpcServices.ReassignmentsService @@ -38,6 +35,7 @@ import com.digitalasset.canton.integration.util.{ } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, + ConfigTransforms, EnvironmentDefinition, EnvironmentSetupPlugin, SharedEnvironment, @@ -90,6 +88,10 @@ abstract class ReassignmentServiceIntegrationTest override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P3_S1M1_S1M1_S1M1 + .addConfigTransforms( + // Ensure reassignments are not tripped up by some participants being a little behind. + ConfigTransforms.updateTargetTimestampForwardTolerance(30.seconds) + ) .withSetup { implicit env => import env.* @@ -101,7 +103,8 @@ abstract class ReassignmentServiceIntegrationTest participant2.synchronizers.connect_local(sequencer2, alias = acmeName) participant3.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) // Allocate parties party1a = participant1.parties.enable( @@ -794,7 +797,7 @@ abstract class ReassignmentServiceIntegrationTest class ReferenceReassignmentServiceIntegrationTest extends ReassignmentServiceIntegrationTest { override protected lazy val plugin = - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceTimeoutCommandRejectedIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceTimeoutCommandRejectedIntegrationTest.scala index f56848b822..4d5272bc39 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceTimeoutCommandRejectedIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentServiceTimeoutCommandRejectedIntegrationTest.scala @@ -12,11 +12,11 @@ import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.data.{CantonTimestamp, UnassignmentData} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.HasCommandRunnersHelpers.{ @@ -57,6 +57,7 @@ import com.digitalasset.canton.{ import org.scalatest.Assertion import scala.collection.mutable +import scala.concurrent.duration.DurationInt sealed trait ReassignmentServiceTimeoutCommandRejectedIntegrationTest extends CommunityIntegrationTest @@ -70,6 +71,7 @@ sealed trait ReassignmentServiceTimeoutCommandRejectedIntegrationTest override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2_S1M1_S1M1 .addConfigTransforms(ConfigTransforms.useStaticTime) + .addConfigTransforms(ConfigTransforms.updateTargetTimestampForwardTolerance(60.seconds)) .withSetup { implicit env => import env.* @@ -88,7 +90,8 @@ sealed trait ReassignmentServiceTimeoutCommandRejectedIntegrationTest disableAssignmentExclusivityTimeout(getInitializedSynchronizer(daName)) disableAssignmentExclusivityTimeout(getInitializedSynchronizer(acmeName)) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) programmableSequencers.put(daName, getProgrammableSequencer(sequencer1.name)) programmableSequencers.put(acmeName, getProgrammableSequencer(sequencer2.name)) } @@ -324,7 +327,7 @@ class ReassignmentServiceTimeoutCommandRejectedIntegrationTestPostgres extends ReassignmentServiceTimeoutCommandRejectedIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentSubmissionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentSubmissionIntegrationTest.scala index faf2a862ad..00b777ebae 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentSubmissionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentSubmissionIntegrationTest.scala @@ -5,14 +5,11 @@ package com.digitalasset.canton.integration.tests.multisynchronizer import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{CommandFailure, LocalSequencerReference} import com.digitalasset.canton.data.ReassignmentRef -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ AcsInspection, @@ -36,7 +33,6 @@ import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission import com.digitalasset.canton.{BaseTest, config} -import monocle.macros.syntax.lens.* sealed trait ReassignmentSubmissionIntegrationTest extends CommunityIntegrationTest @@ -55,21 +51,13 @@ sealed trait ReassignmentSubmissionIntegrationTest EnvironmentDefinition.P2_S1M1_S1M1 // We want to trigger time out .addConfigTransforms(ConfigTransforms.useStaticTime) - .addConfigTransform( - ConfigTransforms.updateAllParticipantConfigs_( - // Make sure that unassignment picks a recent target synchronizer topology snapshot - // TODO(#25110): Remove this configuration once the correct snapshot is used in computing - // the vetting checks for the target synchronizer - _.focus(_.parameters.reassignmentTimeProofFreshnessProportion) - .replace(NonNegativeInt.zero) - ) - ) .withSetup { implicit env => import env.* participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) disableAutomaticAssignment(sequencer1) disableAutomaticAssignment(sequencer2) @@ -236,7 +224,7 @@ sealed trait ReassignmentSubmissionIntegrationTest class ReassignmentSubmissionIntegrationTestPostgres extends ReassignmentSubmissionIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentTargetTimestampIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentTargetTimestampIntegrationTest.scala new file mode 100644 index 0000000000..7b9fc47dbb --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentTargetTimestampIntegrationTest.scala @@ -0,0 +1,169 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.multisynchronizer + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.{CantonTimestamp, FullUnassignmentTree} +import com.digitalasset.canton.integration.plugins.UseProgrammableSequencer +import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.util.{AcsInspection, EntitySyntax, PartiesAllocator} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentDataHelpers +import com.digitalasset.canton.participant.protocol.submission.SeedGenerator +import com.digitalasset.canton.protocol.ContractInstance +import com.digitalasset.canton.synchronizer.sequencer.{ + HasProgrammableSequencer, + ProgrammableSequencer, + SendDecision, +} +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission +import com.digitalasset.canton.util.MaliciousParticipantNode +import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} +import org.slf4j.event.Level + +import java.time.Instant +import java.util.UUID + +class ReassignmentTargetTimestampIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with AcsInspection + with HasProgrammableSequencer + with EntitySyntax { + registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) + + override def environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2_S1M1_S1M1 + .withSetup { implicit env => + import env.* + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) + + val allParticipants = participants.all.toSet + PartiesAllocator(allParticipants)( + newParties = Seq("alice" -> participant1), + targetTopology = Map( + "alice" -> Map( + daId -> (PositiveInt.one, allParticipants.map(_.id -> Submission)), + acmeId -> (PositiveInt.one, allParticipants.map(_.id -> Submission)), + ) + ), + ) + alice = "alice".toPartyId(participant1) + + maliciousP1 = MaliciousParticipantNode( + participant1, + daId, + testedProtocolVersion, + timeouts, + loggerFactory, + ) + } + + private var alice: PartyId = _ + private var maliciousP1: MaliciousParticipantNode = _ + + "unassignment request with target timestamp too far in the future" should { + "lead to LocalAbstain" in { implicit env => + import env.* + + val contract = createContract() + val doomsday = CantonTimestamp.fromInstant(Instant.parse("2060-12-31T23:59:59Z")).value + val unvalidatableUnassignReq = createFullUnassignmentTree(contract, Target(doomsday)) + + val recorder = new RecordSequencerMessages() + getProgrammableSequencer(sequencer1.name) + .setPolicy_("record sequencer messages")(recorder.onSequencerMessage(_)) + + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.INFO))( + within = maliciousP1 + .submitUnassignmentRequest(unvalidatableUnassignReq) + .futureValueUS + .value, + logs => + forAtLeast(2, logs)( + _.message should include regex "Sending an abstain verdict for .* because target timestamp is not validatable" + ), + ) + + eventually() { + ProgrammableSequencer.confirmationResponsesKind(recorder.seen) shouldBe Map( + participant1.id -> Seq("LocalAbstain"), + participant2.id -> Seq("LocalAbstain"), + ) + } + } + } + + private def createContract()(implicit env: TestConsoleEnvironment): ContractInstance = { + import env.* + val iou = IouSyntax.createIou(participant1, Some(daId))(alice, alice, 1.0) + participant1.testing + .acs_search(daName, exactId = iou.id.contractId, limit = PositiveInt.one) + .loneElement + } + + private def createFullUnassignmentTree( + contract: ContractInstance, + targetTs: Target[CantonTimestamp], + )(implicit + env: TestConsoleEnvironment + ): FullUnassignmentTree = { + import env.* + import com.digitalasset.canton.config.RequireTypes.NonNegativeInt + import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient + + val pureCrypto = participant1.underlying.value.sync.syncCrypto + .forSynchronizer(daId, staticSynchronizerParameters1) + .value + .pureCrypto + + val helpers = ReassignmentDataHelpers( + contract = contract, + sourceSynchronizer = Source(daId), + targetSynchronizer = Target(acmeId), + pureCrypto = pureCrypto, + targetTimestamp = targetTs, + ) + + val uuid = new UUID(42L, 0) + val seed = new SeedGenerator(pureCrypto).generateSaltSeed() + + helpers + .unassignmentRequest( + alice.toLf, + participant1, + MediatorGroupRecipient(NonNegativeInt.zero), + )(Set(participant1, participant2)) + .toFullUnassignmentTree(pureCrypto, pureCrypto, seed, uuid) + } + + private class RecordSequencerMessages { + import com.digitalasset.canton.sequencing.protocol.SubmissionRequest + import com.digitalasset.canton.topology.Member + import scala.collection.concurrent.TrieMap + + private val bySender = TrieMap.empty[Member, Seq[SubmissionRequest]] + + def onSequencerMessage(req: SubmissionRequest): SendDecision = { + bySender.updateWith(req.sender) { + case None => Some(Seq(req)) + case Some(msgs) => Some(msgs ++ Seq(req)) + } + SendDecision.Process + } + + def seen: Map[Member, Seq[SubmissionRequest]] = bySender.toMap + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationObserversIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationObserversIntegrationTest.scala index 4c88dd1a90..d600f27a7e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationObserversIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationObserversIntegrationTest.scala @@ -8,11 +8,11 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{LocalParticipantReference, LocalSequencerReference} import com.digitalasset.canton.data.UnassignmentData -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ @@ -103,7 +103,8 @@ sealed trait ReassignmentsConfirmationObserversIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) PartiesAllocator(Set(participant1, participant2, participant3))( newParties = Seq( @@ -297,7 +298,7 @@ class ReassignmentsConfirmationObserversIntegrationTestPostgres extends ReassignmentsConfirmationObserversIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate)) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationThresholdIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationThresholdIntegrationTest.scala index 7dd5c5a484..a08ed9a192 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationThresholdIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/ReassignmentsConfirmationThresholdIntegrationTest.scala @@ -5,14 +5,14 @@ package com.digitalasset.canton.integration.tests.multisynchronizer import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{LocalSequencerReference, ParticipantReference} import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ @@ -43,13 +43,13 @@ import com.digitalasset.canton.synchronizer.sequencer.{ import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.{BaseTest, SynchronizerAlias, config} -import monocle.macros.syntax.lens.* import org.scalatest.Assertion import java.time.Duration import java.util.concurrent.atomic.AtomicLong import scala.collection.concurrent.TrieMap import scala.collection.mutable +import scala.concurrent.duration.DurationInt import scala.concurrent.{Future, Promise} /* @@ -91,10 +91,7 @@ sealed trait ReassignmentsConfirmationThresholdIntegrationTest EnvironmentDefinition.P3_S1M1_S1M1 .addConfigTransforms( ConfigTransforms.useStaticTime, - ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.parameters.reassignmentTimeProofFreshnessProportion) - .replace(NonNegativeInt.zero) - ), + ConfigTransforms.updateTargetTimestampForwardTolerance(10.minutes), ) .withSetup { implicit env => import env.* @@ -115,7 +112,8 @@ sealed trait ReassignmentsConfirmationThresholdIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) programmableSequencers.put( daName, @@ -514,7 +512,7 @@ class ReassignmentsConfirmationThresholdIntegrationTestPostgres extends ReassignmentsConfirmationThresholdIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq(Set("sequencer1"), Set("sequencer2")) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/RepairServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/RepairServiceIntegrationTest.scala index 728138e4ae..1fb37afae4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/RepairServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/RepairServiceIntegrationTest.scala @@ -8,14 +8,11 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.Updat UnassignedWrapper, } import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.{DbConfig, NonNegativeDuration} import com.digitalasset.canton.console.LocalSequencerReference import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.AcsInspection import com.digitalasset.canton.integration.{ @@ -69,7 +66,8 @@ abstract class RepairServiceIntegrationTest participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer3, alias = acmeName) - participant1.dars.upload(CantonExamplesPath) + participant1.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant1.dars.upload(CantonExamplesPath, synchronizerId = acmeId) payer = participant1.parties.enable(payerName, synchronizer = daName) participant1.parties.enable(payerName, synchronizer = acmeName) @@ -168,8 +166,7 @@ abstract class RepairServiceIntegrationTest ) // Because we changed the reassignment counter of a contract, the running commitments will not match the ACS - // TODO(#23735) Add here the sequence of repair commands that bring the system into a consistent state - // and remove the suppression of ACS_COMMITMENT_INTERNAL_ERROR + // This is why we repair the commitments val expectedLogs: Seq[(LogEntryOptionality, LogEntry => Assertion)] = Seq( ( LogEntryOptionality.OptionalMany, @@ -185,26 +182,44 @@ abstract class RepairServiceIntegrationTest loggerFactory.assertLogsUnorderedOptional( { participant1.synchronizers.reconnect_all() - - val afterAssignation = participant1.ledger_api.state.acs - .active_contracts_of_party(payer) - .filter(_.createdEvent.value.contractId == cid.coid) - .loneElement - - afterAssignation.synchronizerId shouldBe acmeId.logical.toProtoPrimitive - afterAssignation.reassignmentCounter shouldBe 2 - - val archiveCmd = participant1.ledger_api.javaapi.state.acs - .await(Iou.COMPANION)(payer, predicate = _.id.toLf == cid) - .id - .exerciseArchive() - .commands() - .asScala - .toSeq - participant1.ledger_api.javaapi.commands.submit(Seq(payer), archiveCmd) + // TODO(i23735): when we fix the issue, the commitment reinitialization below shouldn't be necessary, + // because upon reconnection recovery events would essentially get commitments back to a good state + // Repairing commitments happens when we are connected to the synchronizer, so we reconnect first. + // In between reconnecting and repairing, a commitment tick might still happen, which is why we still + // have the log suppression until after repairing the commitments + val reinitCmtsResult = participant1.commitments.reinitialize_commitments( + Seq.empty, + Seq.empty, + Seq.empty, + NonNegativeDuration.ofSeconds(30), + ) + reinitCmtsResult.map(_.synchronizerId) should contain theSameElementsAs Seq( + daId.logical, + acmeId.logical, + ) + forAll(reinitCmtsResult)(_.acsTimestamp.isDefined shouldBe true) }, - expectedLogs* + expectedLogs *, ) + + // After reinitializing the commitments, there should not be any more ACS_COMMITMENT_INTERNAL_ERROR + + val afterAssignation = participant1.ledger_api.state.acs + .active_contracts_of_party(payer) + .filter(_.createdEvent.value.contractId == cid.coid) + .loneElement + + afterAssignation.synchronizerId shouldBe acmeId.logical.toProtoPrimitive + afterAssignation.reassignmentCounter shouldBe 2 + + val archiveCmd = participant1.ledger_api.javaapi.state.acs + .await(Iou.COMPANION)(payer, predicate = _.id.toLf == cid) + .id + .exerciseArchive() + .commands() + .asScala + .toSeq + participant1.ledger_api.javaapi.commands.submit(Seq(payer), archiveCmd) } } } @@ -212,7 +227,7 @@ abstract class RepairServiceIntegrationTest class ReferenceRepairServiceIntegrationTest extends RepairServiceIntegrationTest { override protected lazy val plugin = - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/UpdateServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/UpdateServiceIntegrationTest.scala index 9fd9f4fec4..28e5e418bf 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/UpdateServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/multisynchronizer/UpdateServiceIntegrationTest.scala @@ -14,11 +14,8 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.examples.java.iou.{Dummy, GetCash, Iou} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.integration.util.{ @@ -54,7 +51,8 @@ abstract class UpdateServiceIntegrationTest participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer3, alias = acmeName) - participant1.dars.upload(CantonExamplesPath) + participant1.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant1.dars.upload(CantonExamplesPath, synchronizerId = acmeId) // Allocate parties otherParty = participant1.parties.enable(otherPartyName, synchronizer = daName) @@ -63,7 +61,7 @@ abstract class UpdateServiceIntegrationTest } private lazy val plugin = - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/KmsCryptoNoPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/KmsCryptoNoPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..4ae13dbbfe --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/KmsCryptoNoPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,72 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms + +import cats.data.EitherT +import cats.syntax.parallel.* +import com.digitalasset.canton.concurrent.{ExecutionContextIdlenessExecutorService, Threading} +import com.digitalasset.canton.config.DefaultProcessingTimeouts.shutdownProcessing +import com.digitalasset.canton.config.{DbConfig, KmsConfig} +import com.digitalasset.canton.crypto.kms.{Kms, KmsError} +import com.digitalasset.canton.crypto.store.{CryptoPrivateStore, KmsCryptoPrivateStore} +import com.digitalasset.canton.integration.plugins.{UseKms, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetup, + EnvironmentSetupPlugin, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.util.FutureInstances.* + +import scala.concurrent.Future + +/** Runs a crypto integration test with one participant running a KMS provider without pre-generated + * keys (i.e. keys are generated on-the-fly using a KMS and nodes are automatically initialized). + */ +trait KmsCryptoNoPreDefinedKeysIntegrationTest extends KmsCryptoIntegrationTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected def kmsConfig: KmsConfig + + private lazy implicit val kmsInitKeysDeletionExecutionContext + : ExecutionContextIdlenessExecutorService = + Threading.newExecutionContext( + loggerFactory.threadName + "-kms-init-keys-deletion-execution-context", + noTracingLogger, + ) + + private def deleteKeys( + store: CryptoPrivateStore, + kmsClient: Kms, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = + store match { + case kmsStore: KmsCryptoPrivateStore => + for { + listKeys <- EitherT.right(kmsStore.listAllKmsKeys) + _ <- listKeys.toList.parTraverse(kmsKeyId => kmsClient.deleteKey(kmsKeyId)) + } yield () + case _ => EitherT.rightT[FutureUnlessShutdown, KmsError](()) + } + + // all generated keys must be deleted after the test is ran + protected def deleteAllGenerateKeys(): Unit = + shutdownProcessing.await_("delete all canton-created keys") { + val deleteResult = UseKms.withKmsClient(kmsConfig, timeouts, loggerFactory) { kmsClient => + provideEnvironment.nodes.local.parTraverse_ { node => + if (protectedNodes.contains(node.name)) + deleteKeys(node.crypto.cryptoPrivateStore, kmsClient).failOnShutdown + else EitherT.rightT[Future, KmsError](()) + } + } + deleteResult.valueOr(err => logger.error("error deleting keys: " + err.show)) + + } + + setupPlugins( + withAutoInit = true, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..c59a707b88 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.EncryptedCryptoPrivateStoreIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsEncryptedCryptoPrivateStoreTestBase + +/** Tests the encrypted private store in a setting where the AWS KMS key IS NOT pre-defined. Canton + * will (a) create a new temporary SINGLE-REGION key that is scheduled for deletion at the end of + * the test. Creating a new key costs a small fee so this test is only run nightly. + */ +class AwsEncryptedCryptoPrivateStoreNoPreDefinedKeyReferenceIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + withPreGenKey = false, + ) + +} + +/** (b) create a new temporary MULTI-REGION key that is scheduled for deletion at the end of the + * test. + */ +class AwsEncryptedCryptoPrivateStoreNoPreDefinedKeyMultiRegionReferenceIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + withPreGenKey = false, + multiRegion = true, + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsKmsCryptoNoPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsKmsCryptoNoPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..9abb1936f6 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsKmsCryptoNoPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.aws + +import com.digitalasset.canton.integration.tests.nightly.kms.KmsCryptoNoPreDefinedKeysIntegrationTest +import com.digitalasset.canton.integration.tests.security.CryptoIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsKmsCryptoIntegrationTestBase + +/** Runs a ping while one participant is using an AWS KMS provider and letting Canton generate its + * own keys (i.e. auto-init == true) + */ +class AwsKmsCryptoNoPreDefinedKeysReferenceIntegrationTest + extends CryptoIntegrationTest( + AwsKmsCryptoIntegrationTestBase.defaultAwsKmsCryptoConfig + ) + with AwsKmsCryptoIntegrationTestBase + with KmsCryptoNoPreDefinedKeysIntegrationTest { + + override def afterAll(): Unit = { + deleteAllGenerateKeys() + super.afterAll() + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..293e90765c --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/aws/AwsRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,26 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsEncryptedCryptoPrivateStoreTestBase + +/** Tests a manual rotation of an AWS wrapper key, where NO KEY is SPECIFIED as the new wrapper key + * and, as such, Canton will automatically generate a new one. + */ +class AwsRotateWrapperKeyNoPreDefinedKeyReferenceIntegrationTestPostgres + extends RotateWrapperKeyIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + override protected val preDefinedKey: Option[String] = None + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..524891a328 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpEncryptedCryptoPrivateStoreNoPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,41 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.gcp + +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.EncryptedCryptoPrivateStoreIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpEncryptedCryptoPrivateStoreTestBase + +/** Tests the encrypted private store in a setting where the GCP KMS key IS NOT pre-defined. Canton + * will (a) create a new temporary SINGLE-REGION key that is scheduled for deletion at the end of + * the test. Creating a new key costs a small fee so this test is only run nightly. + */ +class GcpEncryptedCryptoPrivateStoreNoPreDefinedKeyBftOrderingIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + withPreGenKey = false, + ) + +} + +/** In GCP to use a multi-region key we need to use a different keyring + */ +class GcpEncryptedCryptoPrivateStoreNoPreDefinedKeyMultiRegionBftOrderingIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + withPreGenKey = false, + multiRegion = true, + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpKmsCryptoNoPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpKmsCryptoNoPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..52368ed458 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpKmsCryptoNoPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.gcp + +import com.digitalasset.canton.integration.tests.nightly.kms.KmsCryptoNoPreDefinedKeysIntegrationTest +import com.digitalasset.canton.integration.tests.security.CryptoIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpKmsCryptoIntegrationTestBase + +/** Runs a ping while one participant is using a GCP KMS provider and letting Canton generate its + * own keys (i.e. auto-init == true) + */ +class GcpKmsCryptoNoPreDefinedKeysReferenceIntegrationTest + extends CryptoIntegrationTest( + GcpKmsCryptoIntegrationTestBase.defaultGcpKmsCryptoConfig + ) + with GcpKmsCryptoIntegrationTestBase + with KmsCryptoNoPreDefinedKeysIntegrationTest { + + override def afterAll(): Unit = { + deleteAllGenerateKeys() + super.afterAll() + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..4c8868d5f4 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/nightly/kms/gcp/GcpRotateWrapperKeyNoPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.nightly.kms.gcp + +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpEncryptedCryptoPrivateStoreTestBase + +/** Tests a manual rotation of a GCP wrapper key, where NO KEY is SPECIFIED as the new wrapper key + * and, as such, Canton will automatically generate a new one. + */ +class GcpRotateWrapperKeyNoPreDefinedKeyBftOrderingIntegrationTestPostgres + extends RotateWrapperKeyIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + override protected val preDefinedKey: Option[String] = None + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala index eb234df2d7..5e76c38bf1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.console.{CommandFailure, ParticipantReference, Se import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.examples.java.iou.{Amount, Iou} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -23,15 +20,18 @@ import com.digitalasset.canton.integration.{ import com.digitalasset.canton.ledger.error.PackageServiceErrors.Reading.InvalidDar import com.digitalasset.canton.ledger.error.PackageServiceErrors.Validation.ValidationError import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.Package.AllowedLanguageVersions +import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality import com.digitalasset.canton.participant.admin.CantonPackageServiceError.PackageRemovalErrorCode import com.digitalasset.canton.participant.admin.PackageService.{DarDescription, DarMainPackageId} import com.digitalasset.canton.participant.admin.{AdminWorkflowServices, PackageServiceTest} import com.digitalasset.canton.platform.apiserver.services.admin.PackageTestUtils import com.digitalasset.canton.platform.apiserver.services.admin.PackageTestUtils.ArchiveOps +import com.digitalasset.canton.topology.TopologyManagerError import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.daml.lf.archive.{DarParser, DarReader} +import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.testing.parser.Implicits.SyntaxHelper import com.google.protobuf.ByteString import org.scalatest.concurrent.PatienceConfiguration.Timeout @@ -40,7 +40,7 @@ import org.scalatest.time.{Seconds, Span} import java.util.zip.ZipInputStream import scala.concurrent.Future import scala.jdk.CollectionConverters.* -import scala.util.{Failure, Success} +import scala.util.{Failure, Success, Try} trait PackageUploadIntegrationTest extends CommunityIntegrationTest @@ -62,8 +62,8 @@ trait PackageUploadIntegrationTest synchronizerAlias: SynchronizerAlias, ): Unit = if (!ref.synchronizers.list_connected().map(_.synchronizerAlias).contains(synchronizerAlias)) { - ref.dars.upload(CantonTestsPath) ref.synchronizers.connect_local(sequencerConnection, alias = synchronizerAlias) + ref.dars.upload(CantonTestsPath, synchronizerId = Some(sequencerConnection.synchronizer_id)) } private var cantonTestsMainPackageId, cantonExamplesMainPkgId: String = _ @@ -73,23 +73,27 @@ trait PackageUploadIntegrationTest "enable the package" in { implicit env => import env.* - def inAuthStore() = inStore(TopologyStoreId.Authorized, participant1) + inStore(daId, participant1) shouldBe empty - def onSynchronizer() = inStore(daId, participant1) + clue("uploading tests without vetting " + CantonTestsPath) { + participant1.dars.upload( + CantonTestsPath, + vetAllPackages = false, + synchronizeVetting = false, + ) + } - onSynchronizer() shouldBe empty + inStore(daId, participant1) shouldBe empty - clue("uploading tests " + CantonTestsPath) { - participant1.dars.upload(CantonTestsPath) - } clue("connecting to synchronizer") { participant1.synchronizers.connect_local(sequencer1, alias = daName) } - assertPackageUsable(participant1, participant1, daId) - - onSynchronizer() shouldBe inAuthStore() + clue("uploading tests " + CantonTestsPath) { + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) + } + assertPackageUsable(participant1, participant1, daId) } "properly deal with empty zips" in { implicit env => @@ -110,26 +114,26 @@ trait PackageUploadIntegrationTest "not struggle with multiple uploads of the same dar" in { implicit env => import env.* - def inAuthStore() = inStore(TopologyStoreId.Authorized, participant1) + def inSynchronizerStore() = inStore(TopologyStoreId.Synchronizer(daId), participant1) def packages() = participant1.packages.list() - participant1.dars.upload(CantonTestsPath) + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) - val beforeVettingTx = inAuthStore() + val beforeReuploadTx = inSynchronizerStore() val beforeNumPx = packages() clue("uploading tests multiple times") { (1 to 5).foreach { _ => - participant1.dars.upload(CantonTestsPath) + participant1.dars.upload(CantonTestsPath, synchronizerId = daId) } } - val afterVettingTx = inAuthStore() + val afterVettingTx = inSynchronizerStore() val afterNumPx = packages() beforeNumPx.toSet shouldBe afterNumPx.toSet - beforeVettingTx shouldBe afterVettingTx + beforeReuploadTx shouldBe afterVettingTx } } @@ -142,8 +146,8 @@ trait PackageUploadIntegrationTest ensureParticipantIsConnectedUploaded(participant1, sequencer1, daName) participant2.synchronizers.connect_local(sequencer1, alias = daName) - participant2.dars.upload(CantonExamplesPath) - participant2.dars.upload(CantonTestsPath) + participant2.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant2.dars.upload(CantonTestsPath, synchronizerId = daId) assertPackageUsable(participant1, participant2, daId) assertPackageUsable(participant2, participant1, daId) @@ -151,20 +155,6 @@ trait PackageUploadIntegrationTest } } - "uploading before reconnect" must { - "enable the package on all synchronizers" in { implicit env => - import env.* - - participant3.synchronizers.connect_local(sequencer1, alias = daName) - participant3.synchronizers.disconnect(daName) - participant3.dars.upload(CantonTestsPath) - participant3.synchronizers.reconnect(daName) - participant3.packages.synchronize_vetting() - - assertPackageUsable(participant3, participant1, daId) - } - } - "connecting and then restarting" must { "not log any warnings when we stop" in { implicit env => @@ -181,6 +171,7 @@ trait PackageUploadIntegrationTest participant4.start() participant4.synchronizers.reconnect_all() + participant4.dars.upload(CantonTestsPath, synchronizerId = daId) participant4.packages.synchronize_vetting() assertPackageUsable(participant4, participant4, daId) @@ -242,6 +233,7 @@ trait PackageUploadIntegrationTest "show content of dars" in { implicit env => import env.* + participant3.synchronizers.connect_local(sequencer1, alias = daName) participant3.dars.upload(CantonExamplesPath) val items = participant3.dars.list(filterName = "CantonExamples") @@ -265,6 +257,8 @@ trait PackageUploadIntegrationTest "successfully validate the DAR without uploading" in { implicit env => import env.* + participants.all.foreach(_.synchronizers.connect_local(sequencer1, alias = daName)) + // Validate a DAR against all participants val darHashes = participants.all.dars.validate(CantonExamplesPath) @@ -339,7 +333,7 @@ trait PackageUploadIntegrationTest // Be explicit about ensuring and waiting for vetting .upload( file.pathAsString, - vetAllPackages = true, + synchronizerId = daId, synchronizeVetting = vettingSyncEnabled, ) } @@ -349,32 +343,62 @@ trait PackageUploadIntegrationTest darMetadata.description.description shouldBe darPath.nameWithoutExtension darMetadata.description.mainPackageId shouldBe mainPackageId // If successful, the DAR's main package should also be vetted - (inStore(TopologyStoreId.Authorized, participant1) should contain( - mainPackageId - )).discard - Success(Success(mainPackageId)) - case failure @ Failure(_) => + inStore(daId, participant1).contains(mainPackageId) shouldBe vettingSyncEnabled + Success((darPath, Success(mainPackageId))) + case Failure(t) => // When vetting is enabled, uploads can fail due to rejected vetting operations that were run concurrently (inStore( - TopologyStoreId.Authorized, + daId, participant1, ) should not contain mainPackageId).discard // Wrap in Success to ensure all futures are waited for in the Future.traverse - Success(failure) + Success((darPath, Failure(t))) } } } - // Unlift the inner Try. If there was a failure during upload, this will explode - .map(_.map(_.success.value)) + .map( + _.map { case (darPath: File, result: Try[Ref.PackageId]) => + // Unlift the inner Try. + // When vetting synchronization is enabled, explode on failure since + // that shouldn't happen when synchronization is enabled. + if (vettingSyncEnabled) Right(result.success.value) + // If a failure occurs vetting synchronization is NOT enabled, + // return the darPath on failure. The log assertion below will check + // that a serial mismatch was emitted that references that DAR path. + else + inside(result) { + case Failure(failed: CommandFailure) => + Left(darPath.canonicalPath) + case Success(value) => Right(value) + } + } + ) - // If we reach this code, uploadedPackages should contain all the packages that we uploaded - val uploadedPackages = testParallelUploads().futureValue(Timeout(Span(30, Seconds))) - uploadedPackages should have size darsDiscriminatorList.size.toLong + // If we reach this code, uploadedPackages should have resulted in a package uploaded or + // a command failure due to serial mismatch due to concurrent vetting proposals + val uploadedPackagesResults = + loggerFactory.assertLogsUnorderedOptionalFromResult( + testParallelUploads().futureValue(Timeout(Span(30, Seconds))), + (results: Seq[Either[String, Ref.PackageId]]) => + results.collect { case Left(path) => + ( + LogEntryOptionality.Required, + logEntry => + logEntry.shouldBeOneOfCommandFailure( + Seq(TopologyManagerError.SerialMismatch), + path, + ), + ) + }, + ) + + uploadedPackagesResults should have size darsDiscriminatorList.size.toLong if (!vettingSyncEnabled) { // Wait for vetting transactions to finish if the command was not run with // vetting synchronization enabled participant1.packages.synchronize_vetting() + participant1.topology.synchronisation.await_idle() } } @@ -417,14 +441,18 @@ trait PackageUploadIntegrationTest }.unzip def getVettingSerial = participant1.topology.vetted_packages - .list(TopologyStoreId.Authorized) + .list(daId, filterParticipant = participant1.filterString) .loneElement .context .serial val vettedPackagesSerialBefore = getVettingSerial - participant1.dars.upload_many(darFiles, vetAllPackages = true, synchronizeVetting = true) + participant1.dars.upload_many( + darFiles, + synchronizerId = Some(daId), + synchronizeVetting = true, + ) val darDescription = participant1.dars.list() darDescription @@ -433,9 +461,14 @@ trait PackageUploadIntegrationTest // If successful, the DAR's main package should also be vetted mainPackages should contain - inStore(TopologyStoreId.Authorized, participant1) should contain allElementsOf mainPackages + inStore( + daId, + participant1, + ) should contain allElementsOf mainPackages - getVettingSerial shouldBe vettedPackagesSerialBefore.increment + eventually() { + getVettingSerial shouldBe vettedPackagesSerialBefore.increment + } } } @@ -515,6 +548,7 @@ trait PackageUploadIntegrationTest ) // Now, remove the CantonExamples DAR + participant4.packages.synchronize_vetting() participant4.dars.remove(cantonExamplesMainPkgId) // Check that CantonExamples main package-id was removed participant4.packages @@ -533,6 +567,7 @@ trait PackageUploadIntegrationTest participant5.topology.vetted_packages .propose_delta( participant5.id, + store = daId, adds = Seq( VettedPackage( cantonExamplesMainPkgId, @@ -588,5 +623,5 @@ trait PackageUploadIntegrationTest class PackageUploadIntegrationTestPostgres extends PackageUploadIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadVersionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadVersionIntegrationTest.scala index 246f9d62b0..94cced4ed2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadVersionIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadVersionIntegrationTest.scala @@ -6,8 +6,8 @@ package com.digitalasset.canton.integration.tests.pkgdars import com.digitalasset.canton.BaseTest import com.digitalasset.canton.admin.api.client.data.TemplateId.templateIdsFromJava import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -23,7 +23,7 @@ class PackageUploadVersionIntegrationTest with PackageUsableMixin { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ACSPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ACSPruningIntegrationTest.scala index e66871c585..b522e19177 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ACSPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ACSPruningIntegrationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests.pruning import com.digitalasset.canton.config.{CantonConfig, DbConfig, PositiveDurationSeconds} import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.time.NonNegativeFiniteDuration import java.time.Duration as JDuration @@ -77,7 +74,7 @@ trait ACSPruningIntegrationTest class AcsPruningIntegrationTestPostgres extends ACSPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } //class AcsPruningIntegrationTestH2 extends ACSPruningIntegrationTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/LedgerPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/LedgerPruningIntegrationTest.scala index ca9fe2fa26..3d0099de07 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/LedgerPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/LedgerPruningIntegrationTest.scala @@ -13,9 +13,9 @@ import com.digitalasset.canton.examples.java.cycle import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.OffsetOutOfRange import com.digitalasset.canton.participant.admin.grpc.PruningServiceError.UnsafeToPrune @@ -543,7 +543,7 @@ object LedgerPruningIntegrationTest extends OptionValues { class LedgerPruningIntegrationTestPostgres extends LedgerPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/MediatorPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/MediatorPruningIntegrationTest.scala index 6532a15b73..6c43bef6bf 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/MediatorPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/MediatorPruningIntegrationTest.scala @@ -7,8 +7,8 @@ import com.digitalasset.canton.config.{CantonConfig, DbConfig} import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -99,7 +99,7 @@ trait MediatorPruningIntegrationTest extends CommunityIntegrationTest with Share class MediatorPruningReferenceIntegrationTestPostgres extends MediatorPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruneLockedContractIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruneLockedContractIntegrationTest.scala index 63ed1c105a..61419ebe76 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruneLockedContractIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruneLockedContractIntegrationTest.scala @@ -9,9 +9,9 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.damltests.java.failedtransactionsdonotdivulge.One import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -179,6 +179,6 @@ trait PruneLockedContractIntegrationTest class PruneLockedContractReferenceIntegrationTestPostgres extends PruneLockedContractIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruningDocumentationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruningDocumentationTest.scala index ef89c8d031..118b199b29 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruningDocumentationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/PruningDocumentationTest.scala @@ -7,7 +7,7 @@ import com.digitalasset.canton.admin.api.client.data.PruningSchedule import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.commands.PruningSchedulerAdministration import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -114,7 +114,7 @@ abstract class PruningDocumentationIntegrationTest class PruningDocumentationIntegrationTestH2 extends PruningDocumentationIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory) ) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ReassignmentPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ReassignmentPruningIntegrationTest.scala index ec1cec68ec..351fac1cee 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ReassignmentPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ReassignmentPruningIntegrationTest.scala @@ -9,11 +9,8 @@ import com.digitalasset.canton.config.{DbConfig, PositiveDurationSeconds} import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.examples.java.iou.Iou import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{ AcsInspection, @@ -92,7 +89,8 @@ sealed trait ReassignmentPruningIntegrationTest bank = participant2.parties.enable("bank", synchronizer = daName) participant2.parties.enable("bank", synchronizer = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) participant1.health.ping(participant2.id) } @@ -327,7 +325,7 @@ sealed trait ReassignmentPruningIntegrationTest class ReassignmentPruningIntegrationTestPostgres extends ReassignmentPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledHADatabaseSequencerPruningTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledHADatabaseSequencerPruningTest.scala index ce72151d73..dc81c7062a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledHADatabaseSequencerPruningTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledHADatabaseSequencerPruningTest.scala @@ -11,8 +11,8 @@ import com.digitalasset.canton.console.LocalSequencerReference import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, UseSharedStorage, } import com.digitalasset.canton.integration.util.BackgroundWorkloadRunner @@ -25,7 +25,7 @@ import scala.util.control.NonFatal class ScheduledHADatabaseSequencerPruningTestPostgres extends ScheduledHADatabaseSequencerPruningTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin( UseSharedStorage.forSequencers( "sequencer1", diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledMediatorPruningTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledMediatorPruningTest.scala index 7dd2b4f271..90251c68ab 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledMediatorPruningTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledMediatorPruningTest.scala @@ -12,10 +12,7 @@ import com.digitalasset.canton.config.{DbConfig, PositiveDurationSeconds} import com.digitalasset.canton.console.{CommandFailure, LocalMediatorReference} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.BackgroundWorkloadRunner import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.scheduler.IgnoresTransientSchedulerErrors @@ -25,7 +22,7 @@ import scala.util.chaining.* class ScheduledMediatorPruningTestPostgres extends ScheduledMediatorPruningTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } abstract class ScheduledMediatorPruningTest @@ -108,7 +105,7 @@ abstract class ScheduledMediatorPruningTest // In the second half, the pruning test is able to prune a lot more aggressively according to the 4 second // timeout set as a dynamic synchronizer parameter. val secondsWorstCaseUntilFirstPrune = DynamicSynchronizerParameters - .initialValues(env.environment.clock, testedProtocolVersion) + .initialValues(testedProtocolVersion) .confirmationResponseTimeout // == 30.seconds .unwrap .getSeconds diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledParticipantPruningTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledParticipantPruningTest.scala index 0db85bac6b..733b239156 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledParticipantPruningTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pruning/ScheduledParticipantPruningTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{DbConfig, PositiveDurationSeconds} import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.BackgroundWorkloadRunner import com.digitalasset.canton.scheduler.IgnoresTransientSchedulerErrors import com.digitalasset.canton.time.NonNegativeFiniteDuration @@ -23,7 +20,7 @@ import java.time.Duration as JDuration class ScheduledParticipantPruningTestPostgres extends ScheduledParticipantPruningTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } abstract class ScheduledParticipantPruningTest diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/KmsCliIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/KmsCliIntegrationTest.scala new file mode 100644 index 0000000000..32ad133463 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/KmsCliIntegrationTest.scala @@ -0,0 +1,83 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release.kms + +import better.files.File +import com.digitalasset.canton.console.BufferedProcessLogger +import com.digitalasset.canton.integration.plugins.UseExternalConsole +import com.digitalasset.canton.integration.tests.release.ReleaseArtifactIntegrationTestUtils +import org.scalatest.Outcome + +/** Cli integration test for KMS configurations. + * + * Before being able to run these tests locally, you need to execute `sbt bundle`. + */ +trait KmsCliIntegrationTest extends ReleaseArtifactIntegrationTestUtils { + + protected def kmsConfigs: Seq[String] + protected def cantonProcessEnvVar: Seq[(String, String)] + protected def bootstrapScript: String + protected def testName: String + + override protected val isEnterprise: Boolean = false + override protected def withFixture(test: OneArgTest): Outcome = test(new BufferedProcessLogger) + + override type FixtureParam = BufferedProcessLogger + + private lazy val simpleConf = + "community/app/src/pack/examples/01-simple-topology/simple-topology.conf" + + private lazy val kmsLog = s"log/$testName-kms.log" + + private def createExternalConsole( + extraConfigArguments: Seq[String] = Seq.empty + ): UseExternalConsole = + UseExternalConsole( + remoteConfigOrConfigFile = Right(File(simpleConf)), + cantonBin = cantonBin, + extraConfigs = kmsConfigs.map(c => s"--config $c") ++ extraConfigArguments, + extraEnv = cantonProcessEnvVar, + fileNameHint = s"${this.getClass.getSimpleName}-$testName", + ) + + "Calling Canton" should { + + "bootstrap and run ping" in { processLogger => + val externalConsole = createExternalConsole() + externalConsole.runBootstrapScript(bootstrapScript) + checkOutput(processLogger) + } + + "send kms audit logs to a separate file" in { processLogger => + // ensure kms log is empty + File(kmsLog).clear() + + val externalConsole = createExternalConsole(Seq(s"--kms-log-file-name $kmsLog")) + + externalConsole.runBootstrapScript(bootstrapScript) + + checkOutput(processLogger) + val logFile = File(externalConsole.logFile) + assert(logFile.exists) + val contents = logFile.contentAsString + assert(!contents.contains(": GetPublicKeyRequest")) + val kmsLogFile = File(kmsLog) + assert(kmsLogFile.exists) + val kmsContents = kmsLogFile.contentAsString + assert(kmsContents.contains(": GetPublicKeyRequest")) + } + + "merge kms logging with canton file by default" in { processLogger => + val externalConsole = createExternalConsole() + + externalConsole.runBootstrapScript(bootstrapScript) + + val logFile = File(externalConsole.logFile) + checkOutput(processLogger) + assert(logFile.exists) + val contents = logFile.contentAsString + assert(contents.contains(": GetPublicKeyRequest")) + } + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/aws/AwsCliIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/aws/AwsCliIntegrationTest.scala new file mode 100644 index 0000000000..a892647598 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/aws/AwsCliIntegrationTest.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release.kms.aws + +import com.digitalasset.canton.integration.tests.release.kms.KmsCliIntegrationTest + +/** Cli integration test for AWS KMS configurations Before being able to run these tests locally, + * you need to execute `sbt bundle`. + */ +class AwsCliIntegrationTest extends KmsCliIntegrationTest { + override lazy val kmsConfigs: Seq[String] = Seq( + "enterprise/app/src/test/resources/aws-kms-provider-tagged.conf", + "enterprise/app/src/test/resources/participant1-manual-init.conf", + ) + override lazy val cantonProcessEnvVar: Seq[(String, String)] = Seq("AWS_PROFILE" -> "sts") + override lazy val bootstrapScript: String = + "enterprise/app/src/test/resources/scripts/aws_kms_participant1.canton" + override lazy val testName: String = "aws" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/AwsKmsDriverCliIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/AwsKmsDriverCliIntegrationTest.scala new file mode 100644 index 0000000000..3ef565bbd2 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/AwsKmsDriverCliIntegrationTest.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release.kms.driver + +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.integration.tests.release.kms.KmsCliIntegrationTest + +/** Cli integration test for AWS KMS Driver configurations. + * + * Before being able to run these tests locally, you need to execute `sbt bundle` and `sbt + * aws-kms-driver/assembly`. + */ +class AwsKmsDriverCliIntegrationTest extends KmsCliIntegrationTest { + + private lazy val driverVersion = sys.env.getOrElse("RELEASE_SUFFIX", BuildInfo.version) + private lazy val driverJar = + s"community/aws-kms-driver/target/scala-2.13/aws-kms-driver_2.13-$driverVersion.jar" + + override lazy val kmsConfigs: Seq[String] = + Seq("enterprise/app/src/test/resources/aws-kms-driver.conf") + + // Run Canton with EXTRA_CLASSPATH set to the driver.jar + override lazy val cantonProcessEnvVar: Seq[(String, String)] = + Seq("AWS_PROFILE" -> "sts", "EXTRA_CLASSPATH" -> driverJar) + + override lazy val bootstrapScript: String = + "enterprise/app/src/test/resources/scripts/aws_kms_participant1.canton" + + override lazy val testName: String = s"aws-kms-$packageName" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/MockKmsDriverCliIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/MockKmsDriverCliIntegrationTest.scala new file mode 100644 index 0000000000..95350b085d --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/driver/MockKmsDriverCliIntegrationTest.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release.kms.driver + +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.integration.tests.release.kms.KmsCliIntegrationTest + +/** Cli integration test for the Mock KMS Driver configurations. + * + * Before being able to run these tests locally, you need to execute `sbt bundle` and `sbt + * mock-kms-driver/package`. + */ +class MockKmsDriverCliIntegrationTest extends KmsCliIntegrationTest { + + private lazy val driverVersion = sys.env.getOrElse("RELEASE_SUFFIX", BuildInfo.version) + private lazy val driverJar = + s"community/mock-kms-driver/target/scala-2.13/mock-kms-driver_2.13-$driverVersion.jar" + + override lazy val kmsConfigs: Seq[String] = + Seq("community/app/src/test/resources/mock-kms-driver.conf") + + // Run Canton with EXTRA_CLASSPATH set to the driver.jar + override lazy val cantonProcessEnvVar: Seq[(String, String)] = + Seq("EXTRA_CLASSPATH" -> driverJar) + + override protected def bootstrapScript: String = + "enterprise/app/src/test/resources/scripts/mock_kms_participant1.canton" + + override lazy val testName: String = s"mock-kms-$packageName" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/gcp/GcpCliIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/gcp/GcpCliIntegrationTest.scala new file mode 100644 index 0000000000..8762979758 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/kms/gcp/GcpCliIntegrationTest.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release.kms.gcp + +import com.digitalasset.canton.integration.tests.release.kms.KmsCliIntegrationTest + +/** Cli integration test for GCP KMS configurations Before being able to run these tests locally, + * you need to execute `sbt bundle`. + */ +class GcpCliIntegrationTest extends KmsCliIntegrationTest { + override lazy val kmsConfigs: Seq[String] = Seq( + "enterprise/app/src/test/resources/gcp-kms-provider-tagged.conf", + "enterprise/app/src/test/resources/participant1-manual-init.conf", + ) + override lazy val cantonProcessEnvVar: Seq[(String, String)] = Seq.empty + override lazy val bootstrapScript: String = + "enterprise/app/src/test/resources/scripts/gcp_kms_participant1.canton" + override lazy val testName: String = "gcp" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/AcsCommitmentRestartIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/AcsCommitmentRestartIntegrationTest.scala index 20c11b487b..799fe154ea 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/AcsCommitmentRestartIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/AcsCommitmentRestartIntegrationTest.scala @@ -5,15 +5,15 @@ package com.digitalasset.canton.integration.tests.reliability import com.digitalasset.canton.BigDecimalImplicits.* import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} import com.digitalasset.canton.console.LocalParticipantReference import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.java.iou.{Amount, Iou} import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -31,6 +31,7 @@ import com.digitalasset.canton.synchronizer.sequencer.{ ProgrammableSequencer, } import com.digitalasset.canton.time.PositiveSeconds +import com.digitalasset.canton.topology.{ForceFlag, ForceFlags} import com.digitalasset.canton.{LedgerParticipantId, config} import monocle.Monocle.toAppliedFocusOps @@ -55,7 +56,11 @@ trait AcsCommitmentRestartIntegrationTest ProgrammableSequencer.configOverride(this.getClass.toString, loggerFactory), ) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay(Some(NonNegativeProportion.zero), Some(NonNegativeProportion.zero)) + ) + ) ) .withSetup { implicit env => import env.* @@ -70,6 +75,7 @@ trait AcsCommitmentRestartIntegrationTest sequencerAggregateSubmissionTimeout = config .NonNegativeFiniteDuration(confirmationResponseTimeout.plus(mediatorReactionTimeout)), ), + force = ForceFlags(ForceFlag.AllowOutOfBoundsValue), ) participants.all.synchronizers.connect_local(sequencer1, alias = daName) @@ -221,6 +227,6 @@ trait AcsCommitmentRestartIntegrationTest class AcsCommitmentRestartIntegrationTestPostgres extends AcsCommitmentRestartIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/ChangeSequencerAfterRestartIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/ChangeSequencerAfterRestartIntegrationTest.scala index 0b01ef65fe..0e6a2612c0 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/ChangeSequencerAfterRestartIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/ChangeSequencerAfterRestartIntegrationTest.scala @@ -13,7 +13,7 @@ import com.daml.test.evidence.tag.Reliability.{ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{LocalInstanceReference, LocalMediatorReference} -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -31,7 +31,7 @@ class ChangeSequencerAfterRestartIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) override lazy val environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P1_S2M1_Manual diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/QuickRestartAfterStartIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/QuickRestartAfterStartIntegrationTest.scala index d44aee1486..bab0c053a5 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/QuickRestartAfterStartIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/reliability/QuickRestartAfterStartIntegrationTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests.reliability import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -23,7 +20,7 @@ final class QuickRestartAfterStartIntegrationTest EnvironmentDefinition.P1S1M1_Manual registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) "start / stop / restart and ping" in { implicit env => import env.* diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/AcsImportRepresentativePackageIdSelectionIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/AcsImportRepresentativePackageIdSelectionIntegrationTest.scala new file mode 100644 index 0000000000..a88caad338 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/AcsImportRepresentativePackageIdSelectionIntegrationTest.scala @@ -0,0 +1,557 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.repair + +import better.files.File +import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll +import com.daml.ledger.api.v2.{state_service, transaction_filter, value as apiValue} +import com.digitalasset.canton.admin.api.client.data.TemplateId +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} +import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.http.json.tests.upgrades +import com.digitalasset.canton.http.json.v2.JsContractEntry.JsActiveContract +import com.digitalasset.canton.http.json.v2.JsGetActiveContractsResponse +import com.digitalasset.canton.http.json.v2.JsStateServiceCodecs.{ + getActiveContractsRequestRW, + jsGetActiveContractsResponseRW, +} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepresentativePackageIdOverride, +} +import com.digitalasset.canton.participant.admin.repair.RepairServiceError.ImportAcsError +import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.{ + HasExecutionContext, + LfPackageId, + LfPackageName, + SynchronizerAlias, + protocol, +} +import com.digitalasset.daml.lf.value.Value.ContractId +import io.circe.Json +import io.circe.syntax.EncoderOps +import monocle.Monocle.toAppliedFocusOps +import org.apache.pekko.http.scaladsl.Http +import org.apache.pekko.http.scaladsl.model.* +import org.apache.pekko.util.ByteString +import org.scalatest.Assertion + +import java.util.UUID +import scala.concurrent.Future +import scala.jdk.CollectionConverters.CollectionHasAsScala +import scala.util.chaining.scalaUtilChainingOps + +class AcsImportRepresentativePackageIdSelectionIntegrationTest + extends CommunityIntegrationTest + with HasExecutionContext + with PekkoBeforeAndAfterAll + with SharedEnvironment { + + private val FooV1PkgId = upgrades.v1.java.foo.Foo.PACKAGE_ID + private val FooV2PkgId = upgrades.v2.java.foo.Foo.PACKAGE_ID + private val FooV3PkgId = upgrades.v3.java.foo.Foo.PACKAGE_ID + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .prependConfigTransforms(ConfigTransforms.enableHttpLedgerApi) + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participant1.dars.upload(FooV1Path) + } + + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + private def createUniqueParty( + participant: => LocalParticipantReference, + daName: => SynchronizerAlias, + ): PartyId = + participant.parties.enable(s"Alice-${UUID.randomUUID().toString}", synchronizer = Some(daName)) + + "Importing an ACS" should { + // This test case uses participant 2 as import target + "preserve the original package-id as representative package-id if no override" in { + implicit env => + import env.* + + val party = createUniqueParty(participant1, daName) + + // Upload Foo V1 to P2 + participant2.dars.upload(FooV1Path) + + // Create a contract on P1 + val contractId = createContract(participant1, party) + + // Check the initial rp-id of the contract is the same as the original template-id + expectRpId(contractId, party, participant1, FooV1PkgId) + + // Export on P1 and import on P2 without any override + exportAndImportOn(participant2, party) + + // Check the rp-id of contract after import on P2 is unchanged + expectRpId(contractId, party, participant2, FooV1PkgId) + } + + // Tests below use participant 3 as import target + "fail on unknown package-name for the imported contract" in { implicit env => + import env.* + val party = createUniqueParty(participant1, daName) + + val contractId = createContract(participant1, party) + + exportAndImportOn( + importParticipant = participant3, + party = party, + handleImport = importAcs => + assertThrowsAndLogsCommandFailures( + importAcs(), + entry => { + entry.shouldBeCantonErrorCode(ImportAcsError) + entry.message should include( + show"Could not select a representative package-id for contract with id $contractId. No package in store for the contract's package-name 'foo'" + ) + }, + ), + ) + } + + "select a known package for a contract if the original rp-id is not known" in { implicit env => + import env.* + + val party = createUniqueParty(participant1, daName) + + // Upload only Foo V2 to P3 + participant3.dars.upload(FooV2Path) + + // Create a contract on P1 + // P1 has only Foo V1 + val contractId = createContract(participant1, party) + + // Export on P1 and import on P2 without any override + exportAndImportOn(participant3, party) + + // Check the rp-id of contract after import on P3 (Foo V2) + expectRpId(contractId, party, participant3, FooV2PkgId) + } + + "consider representative package-id overrides" in { implicit env => + import env.* + + val party = createUniqueParty(participant1, daName) + + // Both participants have both versions of Foo + participant1.dars.upload(FooV2Path) + participant3.dars.upload(FooV1Path) + + // Create two contracts on P1 + val contractId1 = createContract(participant1, party) + val contractId2 = createContract(participant1, party) + + // Check the initial rp-id of the contract is the same as the original template-id (Foo V2) + expectRpId( + contractId = contractId1, + partyId = party, + participantRef = participant1, + expectedRpId = FooV2PkgId, + expectedNumberOfEventsForUpdatesQuery = 2, + ) + expectRpId( + contractId = contractId2, + partyId = party, + participantRef = participant1, + expectedRpId = FooV2PkgId, + expectedNumberOfEventsForUpdatesQuery = 2, + ) + + // Export on P1 and import on P2 without any override + exportAndImportOn( + participant3, + party, + contractRpIdOverride = Map( + contractId1 -> FooV1PkgId, + contractId2 -> LfPackageId.assertFromString("unknown-pkg-id"), + ), + ) + + // Representative package ID selection for contract 1 should have considered Foo V1 since it's known to P3 + expectRpId( + contractId = contractId1, + partyId = party, + participantRef = participant3, + expectedRpId = FooV1PkgId, + expectedNumberOfEventsForUpdatesQuery = 2, + ) + // Override for contract 2 should be ignored since the package-id is unknown to P3 + expectRpId( + contractId = contractId2, + partyId = party, + participantRef = participant3, + expectedRpId = FooV2PkgId, + expectedNumberOfEventsForUpdatesQuery = 2, + ) + } + + s"should fail on with import mode ${ContractImportMode.Accept} if the selected representative ID differs from the exported representative package ID" in { + implicit env => + import env.* + + val party = createUniqueParty(participant1, daName) + + // Both participants have both versions of Foo + // Create a contract on P1 + val contractId = createContract(participant1, party) + + // Check the initial rp-id of the contract is the same as the original template-id (Foo V2) + expectRpId(contractId, party, participant1, FooV2PkgId) + + exportAndImportOn( + participant3, + party, + contractRpIdOverride = Map(contractId -> FooV1PkgId), + contractImportMode = ContractImportMode.Accept, + handleImport = f => + assertThrowsAndLogsCommandFailures( + f(), + entry => { + entry.shouldBeCantonErrorCode(ImportAcsError) + entry.message should include( + show"Contract import mode is 'Accept' but the selected representative package-id ${LfPackageId + .assertFromString(FooV1PkgId)} for contract with id $contractId differs from the exported representative package-id ${LfPackageId.assertFromString(FooV2PkgId)}. Please use contract import mode 'Validation' or 'Recomputation' to change the representative package-id." + ) + }, + ), + ) + } + + s"should fail on contract validation failure if import mode is ${ContractImportMode.Validation}" in { + implicit env => + import env.* + + val party = createUniqueParty(participant1, daName) + val otherParty = createUniqueParty(participant1, daName) + + // Create a contract on P1 + val contractId = createContract(participant1, party, Some(otherParty)) + + // Disable vetting so it is upgrade compatibility is not checked between V2 and V3 (that would fail) + participant3.dars.upload(FooV3Path, vetAllPackages = false) + + // Import with validation mode should fail since the contract does not pass validation + exportAndImportOn( + participant3, + party, + contractRpIdOverride = Map(contractId -> FooV3PkgId), + contractImportMode = ContractImportMode.Validation, + handleImport = f => + assertThrowsAndLogsCommandFailures( + f(), + entry => { + entry.shouldBeCantonErrorCode(ImportAcsError) + entry.message should include(show"Failed to authenticate contract") + }, + ), + ) + } + + // TODO(#28075): Test vetting-based override when implemented + } + + // TODO(#25385): Move representative package ID rendering assertions to LAPITT + // once it is possible to test + // a different representative ID than a contract's create package ID. + // Hint: when creation package unvetting for re-assignments is implemented, + // as currently ACS imports cannot be easily tested in LAPITT. + private def expectRpId( + contractId: LfContractId, + partyId: PartyId, + participantRef: LocalParticipantReference, + expectedRpId: LfPackageId, + expectedNumberOfEventsForUpdatesQuery: Int = 1, + ): Assertion = { + val queryIfooEventFormat = transaction_filter.EventFormat( + filtersByParty = Map( + partyId.toProtoPrimitive -> transaction_filter.Filters( + cumulative = Seq( + transaction_filter.CumulativeFilter( + identifierFilter = + transaction_filter.CumulativeFilter.IdentifierFilter.InterfaceFilter( + transaction_filter.InterfaceFilter( + interfaceId = Some( + apiValue.Identifier.fromJavaProto( + upgrades.v1.java.ifoo.IFoo.INTERFACE_ID.toProto + ) + ), + includeInterfaceView = true, + includeCreatedEventBlob = false, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = true, + ) + + // We assert as follows: + // - For JSON API, only the ACS endpoint since the Daml value conversion and transcode logic is common with all other queries (e.g. updates) + // - For gRPC API, the updates, ACS and event query endpoints since they do not fully share the same backend logic for event serialization + assertJsonApi(contractId, partyId, queryIfooEventFormat, participantRef, expectedRpId) + assertGrpcApi( + contractId, + partyId, + queryIfooEventFormat, + participantRef, + expectedRpId, + expectedNumberOfEventsForUpdatesQuery, + ) + } + + private def assertGrpcApi( + contractId: LfContractId, + party: PartyId, + queryEventFormat: transaction_filter.EventFormat, + participantRef: ParticipantReference, + expectedRpId: LfPackageId, + expectedNumberOfEventsForUpdatesQuery: Int, + ): Assertion = { + // Get create from update stream + val updateSourcedCreatedEvent = participantRef.ledger_api.updates + .updates( + updateFormat = transaction_filter.UpdateFormat( + includeTransactions = Some( + transaction_filter.TransactionFormat( + Some(queryEventFormat), + transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ), + completeAfter = PositiveInt.tryCreate(expectedNumberOfEventsForUpdatesQuery), + ) + .flatMap(_.createEvents) + .filter(_.contractId == contractId.coid) + .loneElement + + // Get create from ACS + val acsCreatedEvent = participantRef.ledger_api.state.acs + .of_party( + party, + filterInterfaces = Seq( + TemplateId.fromJavaIdentifier(upgrades.v1.java.ifoo.IFoo.INTERFACE_ID) + ), + ) + .filter(_.contractId == contractId.coid) + .loneElement + .event + + val eventQueryCreatedEvent = participantRef.ledger_api.event_query + .by_contract_id(contractId.coid, Seq(party)) + .getCreated + .getCreatedEvent + + // Equality check implies representative package IDs are the same + updateSourcedCreatedEvent shouldBe acsCreatedEvent + // Interface views not supported in event query endpoint, so check equality without interface views + eventQueryCreatedEvent shouldBe acsCreatedEvent.copy(interfaceViews = Seq.empty) + + val expectedRepresentativeTemplateId = apiValue.Identifier( + packageId = expectedRpId, + moduleName = upgrades.v1.java.foo.Foo.TEMPLATE_ID.getModuleName, + entityName = upgrades.v1.java.foo.Foo.TEMPLATE_ID.getEntityName, + ) + + // We don't care about the content but just that the view computation was successful + // This verifies that the view computation works with the selected representative package-id + acsCreatedEvent.interfaceViews.loneElement.viewValue shouldBe defined + + acsCreatedEvent.createArguments.value shouldBe apiValue.Record( + recordId = Some(expectedRepresentativeTemplateId), + fields = Vector( + apiValue.RecordField( + "owner", + Some(apiValue.Value(apiValue.Value.Sum.Party(party.toProtoPrimitive))), + ), + apiValue.RecordField( + "otherParty", + Some(apiValue.Value(apiValue.Value.Sum.Party(party.toProtoPrimitive))), + ), + ), + ) + + acsCreatedEvent.representativePackageId shouldBe expectedRpId + } + + private def assertJsonApi( + contractId: LfContractId, + partyId: PartyId, + queryEventFormat: transaction_filter.EventFormat, + participantRef: LocalParticipantReference, + expectedRpId: LfPackageId, + ): Assertion = { + val endOffset = participantRef.ledger_api.state.end() + val responses = queryJsonAcsFor(participantRef, queryEventFormat, endOffset).futureValue + val targetCreatedEvent = responses + .map(r => + inside(r.contractEntry) { case JsActiveContract(createdEvent, _, _) => + createdEvent + } + ) + .filter(_.contractId == contractId.coid) + .loneElement + inside(targetCreatedEvent) { createdEvent => + createdEvent.contractId shouldBe contractId.coid + createdEvent.representativePackageId shouldBe expectedRpId + // As in the gRPC assertion, we assert that rendering of the values is successful + // which is relevant for situations where the representative package-id differs from + // the creation package-id when the latter is unknown to the participant. + createdEvent.createArgument.value shouldBe Json.obj( + // Using two distinct parties to highlight validation failure on incompatible upgrade + // when the order of the fields in a template changes + "owner" -> Json.fromString(partyId.toProtoPrimitive), + "otherParty" -> Json.fromString(partyId.toProtoPrimitive), + ) + createdEvent.interfaceViews.loneElement.viewValue.value shouldBe Json.obj( + "owner" -> Json.fromString(partyId.toProtoPrimitive) + ) + } + } + + private def queryJsonAcsFor( + participantRef: LocalParticipantReference, + queryEventFormat: transaction_filter.EventFormat, + ledgerEndOffset: Long, + ): Future[Seq[JsGetActiveContractsResponse]] = { + val uri = Uri + .from( + scheme = "http", + host = "localhost", + port = participantRef.config.httpLedgerApi.server.port.unwrap, + ) + .withPath(Uri.Path("/v2/state/active-contracts")) + + val getActiveContractsRequest = state_service + .GetActiveContractsRequest( + activeAtOffset = ledgerEndOffset, + eventFormat = Some(queryEventFormat), + ) + .asJson + .toString() + + Http() + .singleRequest( + HttpRequest( + method = HttpMethods.POST, + uri = uri, + headers = Seq.empty, + entity = HttpEntity(ContentTypes.`application/json`, getActiveContractsRequest), + ) + ) + .flatMap { response => + response.entity.dataBytes + .runFold(ByteString.empty)((b, a) => b ++ a) + .map(_.utf8String) + .map(io.circe.parser.decode[Seq[JsGetActiveContractsResponse]]) + } + .map(_.value) + } + + private def exportAndImportOn( + importParticipant: ParticipantReference, + party: PartyId, + contractRpIdOverride: Map[ContractId, LfPackageId] = Map.empty, + packageIdOverride: Map[LfPackageId, LfPackageId] = Map.empty, + packageNameOverride: Map[LfPackageName, LfPackageId] = Map.empty, + contractImportMode: ContractImportMode = ContractImportMode.Validation, + handleImport: (() => Unit) => Unit = (f: () => Unit) => f(), + )(implicit env: FixtureParam): Unit = { + import env.* + + File.usingTemporaryFile() { file => + participant1.repair.export_acs( + parties = Set(party), + exportFilePath = file.canonicalPath, + synchronizerId = Some(daId), + ledgerOffset = NonNegativeLong.tryCreate(participant1.ledger_api.state.end()), + ) + + importParticipant.synchronizers.disconnect_all() + handleImport { () => + try { + importParticipant.repair + .import_acs( + importFilePath = file.canonicalPath, + representativePackageIdOverride = RepresentativePackageIdOverride( + contractOverride = contractRpIdOverride, + packageIdOverride = packageIdOverride, + packageNameOverride = packageNameOverride, + ), + contractImportMode = contractImportMode, + ) + .discard + } finally { + importParticipant.synchronizers.reconnect_all() + } + } + } + } + + private def createContract( + participantRef: ParticipantReference, + party: PartyId, + otherParty: Option[PartyId] = None, + )(implicit + env: FixtureParam + ): protocol.LfContractId = { + import env.* + + participantRef.ledger_api.javaapi.commands + .submit( + Seq(party.toLf), + new upgrades.v1.java.foo.Foo( + party.toProtoPrimitive, + otherParty.getOrElse(party).toProtoPrimitive, + ) + .create() + .commands() + .asScala + .toSeq, + ) + .getEvents + .asScala + .loneElement + .getContractId + .pipe(LfContractId.assertFromString) + } +} + +// TODO(#25385): This test should be a variation in the conformance test suites +// but since there is no possibility to test ACS import effects +// and more importantly here +// the representative package ID selection in LAPITT yet, +// we keep it here for now. +class AcsImportRepresentativePackageIdSelectionIntegrationTestNoImfoBuffer + extends AcsImportRepresentativePackageIdSelectionIntegrationTest { + override def environmentDefinition: EnvironmentDefinition = + super.environmentDefinition + .addConfigTransforms(ConfigTransforms.updateAllParticipantConfigs { case (_, config) => + config + .focus(_.ledgerApi.indexService.maxTransactionsInMemoryFanOutBufferSize) + .replace(0) + }) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/DeceasedPartyContractPurgeRepairIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/DeceasedPartyContractPurgeRepairIntegrationTest.scala index 130584b5f2..d877aa3ab6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/DeceasedPartyContractPurgeRepairIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/DeceasedPartyContractPurgeRepairIntegrationTest.scala @@ -9,8 +9,8 @@ import com.digitalasset.canton.console.FeatureFlag import com.digitalasset.canton.examples.java.iou import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ @@ -136,7 +136,7 @@ trait DeceasedPartyContractPurgeRepairIntegrationTest class DeceasedPartyContractPurgeRepairIntegrationTestPostgres extends DeceasedPartyContractPurgeRepairIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class DeceasedPartyContractPurgeRepairBftOrderingIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportAcsPartyOffboardingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportAcsPartyOffboardingIntegrationTest.scala index 74bb6993f0..cf1cb1f994 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportAcsPartyOffboardingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportAcsPartyOffboardingIntegrationTest.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.integration.tests.repair import better.files.File import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.AcsInspection import com.digitalasset.canton.integration.{ @@ -44,7 +41,7 @@ final class ExportAcsPartyOffboardingIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) private var alice: PartyId = _ diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIdRecomputationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIdRecomputationIntegrationTest.scala index f61973c821..47d7f9c5eb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIdRecomputationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIdRecomputationIntegrationTest.scala @@ -14,10 +14,7 @@ import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.damltests.java.refs.Refs import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -26,8 +23,8 @@ import com.digitalasset.canton.integration.{ TestConsoleEnvironment, } import com.digitalasset.canton.logging.SuppressionRule.{Level, forLogger} -import com.digitalasset.canton.participant.admin.data.{ActiveContract, ContractIdImportMode} -import com.digitalasset.canton.participant.admin.repair.ContractIdsImportProcessor +import com.digitalasset.canton.participant.admin.data.{ActiveContract, ContractImportMode} +import com.digitalasset.canton.participant.admin.repair.ContractAuthenticationImportProcessor import com.digitalasset.canton.protocol.{LfContractId, LfHash, LfThinContractInst} import com.digitalasset.canton.topology.ForceFlag.DisablePartyWithActiveContracts import com.digitalasset.canton.topology.transaction.ParticipantPermission @@ -57,7 +54,7 @@ sealed trait ExportContractsIdRecomputationIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) /** Create contracts `participant` for the given `party` -- these will be the contracts exported @@ -312,7 +309,7 @@ object ExportContractsIdRecomputationIntegrationTest { val remapping = participant2.repair.import_acs( brokenExportFile.canonicalPath, - contractIdImportMode = ContractIdImportMode.Recomputation, + contractImportMode = ContractImportMode.Recomputation, ) remapping should have size exportSize forAll(remapping) { case (oldCid, newCid) => @@ -333,7 +330,7 @@ object ExportContractsIdRecomputationIntegrationTest { val remapping = participant2.repair.import_acs( exportFile.canonicalPath, - contractIdImportMode = ContractIdImportMode.Recomputation, + contractImportMode = ContractImportMode.Recomputation, ) remapping shouldBe empty } @@ -442,10 +439,12 @@ class ExportContractsIdRecomputationArchivedDependencyIntegrationTest withExport(break = removeLeaves andThen zeroOutSuffixes) { (brokenExportFile, exportSize, alice) => whileDisconnected(participant2, daName) { - loggerFactory.assertLogs(forLogger[ContractIdsImportProcessor] && Level(WARN))( + loggerFactory.assertLogs( + forLogger[ContractAuthenticationImportProcessor] && Level(WARN) + )( participant2.repair.import_acs( brokenExportFile.canonicalPath, - contractIdImportMode = ContractIdImportMode.Recomputation, + contractImportMode = ContractImportMode.Recomputation, ), _.message should include regex "Missing dependency with contract ID '.+'. The contract might have been archived. Its contract ID cannot be recomputed.", ) @@ -490,7 +489,7 @@ class ExportContractsIdRecomputationDuplicateDiscriminatorIntegrationTest loggerFactory.assertThrowsAndLogs[CommandFailure]( participant2.repair.import_acs( brokenExportFile.canonicalPath, - contractIdImportMode = ContractIdImportMode.Recomputation, + contractImportMode = ContractImportMode.Recomputation, ), _.errorMessage should include regex "Duplicate discriminator '0+' is used by 2 contract IDs, including", ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIntegrationTest.scala index 28f519d2b3..4c9aabe000 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ExportContractsIntegrationTest.scala @@ -7,10 +7,7 @@ import better.files.* import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ @@ -48,7 +45,7 @@ final class ExportContractsIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) "Exporting an ACS" should { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/IgnoreSequencedEventsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/IgnoreSequencedEventsIntegrationTest.scala index 3c300ff82b..deb17cebe6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/IgnoreSequencedEventsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/IgnoreSequencedEventsIntegrationTest.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.config.{DbConfig, PositiveDurationSeconds} import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.crypto.{EncryptionPublicKey, KeyPurpose, SigningKeyUsage} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseH2} +import com.digitalasset.canton.integration.plugins.{UseH2, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -359,7 +359,7 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with p1SequencedEventStore.delete(lastStoredEvent.counter).futureValueUS p1SequencedEventStore.store(Seq(tracedSignedTamperedEvent)).futureValueUS - loggerFactory.assertLogs( + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( { participant1.synchronizers.reconnect(daName) @@ -371,9 +371,16 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with }, // The problem happens to be "ForkHappened" due to the order of checks carried out by the sequencer client. // Feel free to change, if another property is checked first, e.g., "SignatureInvalid". - _.shouldBeCantonErrorCode(ResilientSequencerSubscription.ForkHappened), - _.warningMessage should include("ForkHappened"), - _.shouldBeCantonErrorCode(SyncServiceSynchronizerDisconnect), + { entries => + val requiredErrorMessages = Seq( + ResilientSequencerSubscription.ForkHappened.id, + "ForkHappened", + SyncServiceSynchronizerDisconnect.id, + ) + requiredErrorMessages.forall(errMsg => + entries.exists(_.message.contains(errMsg)) + ) shouldBe true + }, ) eventually() { @@ -458,17 +465,19 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with store = daId, force = ForceFlags(ForceFlag.AlienMember), ) - // Wait until p1 has processed the topology transaction. + // Wait until p1 and p2 have processed the topology transaction. eventually() { - participant1.topology.owner_to_key_mappings - .list( - store = daId, - filterKeyOwnerUid = participant1.id.filterString, - ) - .flatMap(_.item.keys) - .filter(_.purpose == missingEncryptionKey.purpose) - .loneElement - .fingerprint shouldBe missingEncryptionKey.fingerprint + forAll(Seq(participant1, participant2))( + _.topology.owner_to_key_mappings + .list( + store = daId, + filterKeyOwnerUid = participant1.id.filterString, + ) + .flatMap(_.item.keys) + .filter(_.purpose == missingEncryptionKey.purpose) + .loneElement + .fingerprint shouldBe missingEncryptionKey.fingerprint + ) } }, // Participant1 will emit an error, because the key is not present. @@ -489,8 +498,11 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with // Note: The following ping will bring transaction processing to a halt, // because it can't decrypt the confirmation request. - loggerFactory.assertLoggedWarningsAndErrorsSeq( + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( { + env.environment.simClock.foreach(_.advance(java.time.Duration.ofSeconds(5))) + participant1.synchronizers.list_connected() should not be empty + clue("pinging to halt") { pokeAndAdvance(Future { participant2.health.maybe_ping(participant1, timeout = 2.seconds) @@ -508,7 +520,7 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with participant1.synchronizers.list_connected() shouldBe empty }, forAtLeast(1, _) { - _.message should startWith("Asynchronous event processing failed") + _.toString should include("Can't decrypt the randomness of the view") }, ) } @@ -564,5 +576,5 @@ trait IgnoreSequencedEventsIntegrationTest extends CommunityIntegrationTest with class IgnoreSequencedEventsIntegrationTestH2 extends IgnoreSequencedEventsIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ImportContractsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ImportContractsIntegrationTest.scala index 379127b567..e4aa6c181f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ImportContractsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ImportContractsIntegrationTest.scala @@ -7,11 +7,8 @@ import better.files.* import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.NonNegativeLong -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -33,7 +30,8 @@ final class ImportContractsIntegrationTest extends CommunityIntegrationTest with import env.* participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) alice = participant1.parties.enable("Alice", synchronizer = daName) participant1.parties.enable("Alice", synchronizer = acmeName) @@ -43,7 +41,7 @@ final class ImportContractsIntegrationTest extends CommunityIntegrationTest with registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/LargeAcsExportAndImportTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/LargeAcsExportAndImportTest.scala index c216aa7a22..b24104e30d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/LargeAcsExportAndImportTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/LargeAcsExportAndImportTest.scala @@ -5,8 +5,8 @@ package com.digitalasset.canton.integration.tests.repair import better.files.* import com.daml.test.evidence.scalatest.OperabilityTestHelpers +import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{DbConfig, RequireTypes} import com.digitalasset.canton.console.{ LocalInstanceReference, LocalMediatorReference, @@ -22,8 +22,8 @@ import com.digitalasset.canton.integration.bootstrap.{ } import com.digitalasset.canton.integration.plugins.{ PostgresDumpRestore, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{AcsInspection, PartyToParticipantDeclarative} @@ -34,7 +34,8 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, TestConsoleEnvironment, } -import com.digitalasset.canton.participant.admin.data.ContractIdImportMode +import com.digitalasset.canton.participant.admin.data.ContractImportMode +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.topology.transaction.ParticipantPermission as PP @@ -45,9 +46,13 @@ import monocle.Monocle.toAppliedFocusOps import java.util.concurrent.TimeUnit import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, ExecutionContext, Future} -import scala.jdk.CollectionConverters.* /** Given a large active contract set (ACS), we want to test the ACS export and import. + * + * The tooling also allows to generate "transient contracts": these are contracts that are created + * and archived immediately (and thus don't show up in the ACS snapshot). This is useful to check + * how such archived but not-yet pruned contracts impact performance of the export. Hence, it is + * important to disable (background pruning). * * Raison d'être – Have this test code readily available in the repository for on-demand * performance investigations. Thus, we're not so much interested in actually creating a large ACS @@ -103,14 +108,15 @@ protected abstract class LargeAcsExportAndImportTestBase with AcsInspection with OperabilityTestHelpers { - /** Test definition, in particular how many active IOU contracts should be used */ + /** Test definition, in particular how many active Iou contracts should be used */ protected def testSet: TestSet protected case class TestSet( acsSize: Int, + transientContracts: Int, name: String, directory: File, - creationBatchSize: Int = 500, + creationBatchSize: Int, ) { def dumpDirectory: TempDirectory = TempDirectory((directory / "dump").createDirectoryIfNotExists(createParents = true)) @@ -130,11 +136,19 @@ protected abstract class LargeAcsExportAndImportTestBase def formatWithUnderscores(number: Int): String = number.toString.reverse.grouped(3).mkString("_").reverse - def apply(acsSize: Int) = { + def apply(acsSize: Int, transientContracts: Int): TestSet = { val testName = formatWithUnderscores(acsSize) + val creationBatchSize = 500 + val testDirectory = - File.currentWorkingDirectory / "tmp" / s"${testName}_LargeAcsExportAndImportTest" - new TestSet(acsSize, testName, testDirectory) + File.currentWorkingDirectory / "tmp" / s"$testName-transient=${transientContracts}_LargeAcsExportAndImportTest" + new TestSet( + acsSize = acsSize, + transientContracts = transientContracts, + testName, + testDirectory, + creationBatchSize = creationBatchSize, + ) } } @@ -144,38 +158,11 @@ protected abstract class LargeAcsExportAndImportTestBase protected val baseEnvironmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P3_S1M1_Manual - .clearConfigTransforms() // Disable globally unique ports - .addConfigTransforms(ConfigTransforms.allDefaultsButGloballyUniquePorts*) .addConfigTransforms( - // Adding hard-coded ports because reconnecting to a synchronizer requires the synchronizer - // ports to be the same, and we do so across node restarts as part of the tests - ConfigTransforms.updateSequencerConfig("sequencer1")(cfg => - cfg - .focus(_.publicApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9018))) - .focus(_.adminApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9019))) - ), - ConfigTransforms.updateParticipantConfig("participant1")(cfg => - cfg - .focus(_.ledgerApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9011))) - .focus(_.adminApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9012))) - ), - ConfigTransforms.updateParticipantConfig("participant2")(cfg => - cfg - .focus(_.ledgerApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9021))) - .focus(_.adminApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9022))) - ), - ConfigTransforms.updateParticipantConfig("participant3")(cfg => - cfg - .focus(_.ledgerApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9031))) - .focus(_.adminApi.internalPort) - .replace(Some(RequireTypes.Port.tryCreate(9032))) + // Disable background pruning + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.journalGarbageCollectionDelay) + .replace(config.NonNegativeFiniteDuration.ofDays(365 * 100)) ), // Use distinct timeout values so that it is clear which timeout expired _.focus(_.parameters.timeouts.processing.unbounded) @@ -200,18 +187,16 @@ protected abstract class LargeAcsExportAndImportTestBase override protected def environmentDefinition: EnvironmentDefinition = baseEnvironmentDefinition // Need the persistence for dumping and restoring large ACS - protected val referenceSequencer = new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + protected val referenceSequencer = new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory ) - registerPlugin( - referenceSequencer - ) + registerPlugin(referenceSequencer) protected val pgPlugin = new UsePostgres(loggerFactory) registerPlugin(pgPlugin) protected lazy val dumpRestore: PostgresDumpRestore = PostgresDumpRestore(pgPlugin, forceLocal = true) - protected def testSetup(implicit env: TestConsoleEnvironment): Unit = { + protected def testSetup()(implicit env: TestConsoleEnvironment): Unit = { import env.* Seq(participant1, participant2, participant3).foreach(_.start()) @@ -238,50 +223,60 @@ protected abstract class LargeAcsExportAndImportTestBase .valueOrFail(s"Where is party $name?") ) - protected def createContracts(implicit env: TestConsoleEnvironment): Unit = { + protected def createContracts()(implicit env: TestConsoleEnvironment): Unit = { import env.* // Enable parties - participant1.parties.enable( - "Alice" - ) - participant2.parties.enable( - "Bob" - ) + participant1.parties.enable("Alice") + participant2.parties.enable("Bob") // Create contracts val alice = grabPartyId(participant1, "Alice") val bob = grabPartyId(participant2, "Bob") - val dataset = Range.inclusive(1, testSet.acsSize) - val batches: Iterator[Seq[Int]] = dataset.grouped(testSet.creationBatchSize) + val contractsDataset = Range.inclusive(1, testSet.acsSize) + val batches = contractsDataset.grouped(testSet.creationBatchSize).toList + val batchesCount = batches.size + val transientContractsPerBatch = + Math.ceil(testSet.transientContracts.toDouble / batchesCount).toInt batches.foreach { batch => val start = System.nanoTime() - val iousCommands = batch.flatMap { amount => - IouSyntax.testIou(alice, bob, amount.toDouble).create.commands.asScala.toSeq + val iousCommands = batch.map { amount => + IouSyntax.testIou(alice, bob, amount.toDouble).create.commands.loneElement } participant1.ledger_api.javaapi.commands.submit(Seq(alice), iousCommands) - participant1.ledger_api.javaapi.state.acs.await(M.iou.Iou.COMPANION)(alice).discard val ledgerEnd = participant1.ledger_api.state.end() val end = System.nanoTime() logger.info( s"Batch: ${batch.head} to ${batch.head + testSet.creationBatchSize} took ${TimeUnit.NANOSECONDS .toMillis(end - start)}ms and ledger end = $ledgerEnd" ) - } - val aliceOnP1AcsSize = - participant1.ledger_api.state.acs.of_party(alice, limit = PositiveInt.MaxValue).size - withClue(s"Alice on P1 ACS size: $aliceOnP1AcsSize")( - aliceOnP1AcsSize shouldBe testSet.acsSize - ) + // Transient contracts + if (transientContractsPerBatch > 0) { + val transientContractsCreateCmds = + Seq.fill(transientContractsPerBatch)(100.0).map { amount => + IouSyntax.testIou(alice, bob, amount).create.commands.loneElement + } + val chip = JavaDecodeUtil.decodeAllCreated(M.iou.Iou.COMPANION)( + participant1.ledger_api.javaapi.commands + .submit(Seq(alice), transientContractsCreateCmds) + ) - val bobOnP2AcsSize = - participant2.ledger_api.state.acs.of_party(bob, limit = PositiveInt.MaxValue).size - withClue(s"Bob on P2 ACS size: $bobOnP2AcsSize")( - bobOnP2AcsSize shouldBe testSet.acsSize - ) + val archiveCmds = chip.map(_.id.exerciseArchive().commands().loneElement) + + participant1.ledger_api.javaapi.commands.submit(Seq(alice), archiveCmds) + } + } + + // Sanity checks + participant1.ledger_api.state.acs + .of_party(alice, limit = PositiveInt.MaxValue) + .size shouldBe testSet.acsSize + participant2.ledger_api.state.acs + .of_party(bob, limit = PositiveInt.MaxValue) + .size shouldBe testSet.acsSize } } @@ -294,10 +289,9 @@ protected abstract class LargeAcsExportAndImportTestBase * The number of created active contracts is defined by the [[TestSet]]. */ protected abstract class DumpTestSet extends LargeAcsExportAndImportTestBase { - override protected def environmentDefinition: EnvironmentDefinition = baseEnvironmentDefinition.withSetup { implicit env => - testSetup + testSetup() } private def saveDumps( @@ -308,6 +302,7 @@ protected abstract class DumpTestSet extends LargeAcsExportAndImportTestBase { ): Future[Unit] = MonadUtil.sequentialTraverse_(nodes) { node => val filename = s"${node.name}.pg_dump" + dumpRestore.saveDump(node, testSet.dumpDirectory.toTempFile(filename)) } @@ -334,10 +329,10 @@ protected abstract class DumpTestSet extends LargeAcsExportAndImportTestBase { } s"create ${testSet.acsSize} active contracts" in { implicit env => - createContracts + createContracts() } - s"create database dump" in { implicit env => + "create database dump" in { implicit env => import env.* createDatabaseDump( @@ -349,10 +344,16 @@ protected abstract class DumpTestSet extends LargeAcsExportAndImportTestBase { } protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBase { + // If true, use legacy export/import (Canton internal instead of LAPI) + def useLegacyExportImport: Boolean + + protected def testContractIdImportMode: ContractImportMode - protected def testContractIdImportMode: ContractIdImportMode + // Replicate Alice from P1 to P3 + private val acsExportFile = new SingleUseCell[File] + private val ledgerOffsetBeforePartyOnboarding = new SingleUseCell[Long] - private def restoreDump(node: LocalInstanceReference)(implicit + protected def restoreDump(node: LocalInstanceReference)(implicit env: TestConsoleEnvironment ): Future[Unit] = { val filename = s"${node.name}.pg_dump" @@ -368,11 +369,9 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas env: TestConsoleEnvironment, executionContext: ExecutionContext, ): Unit = handleStartupLogs { - val nodes = - (participants: Seq[LocalInstanceReference]) ++ - (sequencers: Seq[LocalInstanceReference]) ++ - (mediators: Seq[LocalInstanceReference]) + val nodes = (participants: Seq[LocalInstanceReference]) ++ sequencers ++ mediators nodes.foreach(_.stop()) + clue("Restoring database") { val res = for { @@ -384,6 +383,7 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas } yield () Await.result(res, 15.minutes) } + clue("Starting all nodes") { nodes.foreach(_.start()) new NetworkBootstrapper(networkTopologyDescriptions*).bootstrap() @@ -391,9 +391,9 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas nodes.forall(_.is_initialized) } } - clue("Reconnecting synchronizers")( - participants.foreach(_.synchronizers.reconnect_all()) - ) + + participants.foreach(_.synchronizers.reconnect_all()) + clue( "Ensure that all participants are up-to-date with the state of the topology " + "at the given time as returned by the synchronizer" @@ -409,8 +409,8 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas import env.* if (testSet.isEmptyDumpDirectory) { - testSetup - createContracts + testSetup() + createContracts() } else { loadState( Seq(sequencer1), @@ -421,8 +421,6 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas } } - val ledgerOffsetBeforePartyOnboarding = new SingleUseCell[Long] - "authorize Alice on P3" in { implicit env => import env.* val alice = grabPartyId(participant1, "Alice") @@ -441,24 +439,11 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas } // Replicate Alice from P1 to P3 - - val acsExportFile = new SingleUseCell[File] - "export ACS for Alice from P1" in { implicit env => import env.* val alice = grabPartyId(participant1, "Alice") - val aliceAddedOnP3Offset = participant1.parties.find_party_max_activation_offset( - partyId = alice, - synchronizerId = daId, - participantId = participant3.id, - beginOffsetExclusive = ledgerOffsetBeforePartyOnboarding.getOrElse( - throw new RuntimeException("missing begin offset") - ), - completeAfter = PositiveInt.one, - ) - acsExportFile.putIfAbsent( File.newTemporaryFile( parent = Some(testSet.exportDirectory), @@ -466,16 +451,33 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas ) ) - acsExportFile.get.foreach { acsExport => - clue("Get Alice ACS on P1 with export_acs")( + if (useLegacyExportImport) { + acsExportFile.get.foreach { acsExport => + participant1.repair.export_acs_old( + Set(alice), + partiesOffboarding = false, + outputFile = acsExport.canonicalPath, + ) + } + } else { + val aliceAddedOnP3Offset = participant1.parties.find_party_max_activation_offset( + partyId = alice, + synchronizerId = daId, + participantId = participant3.id, + beginOffsetExclusive = ledgerOffsetBeforePartyOnboarding.getOrElse( + throw new RuntimeException("missing begin offset") + ), + completeAfter = PositiveInt.one, + ) + + acsExportFile.get.foreach { acsExport => participant1.repair.export_acs( parties = Set(alice), exportFilePath = acsExport.canonicalPath, ledgerOffset = aliceAddedOnP3Offset, ) - ) + } } - } "import ACS for Alice on P3" in { implicit env => @@ -485,22 +487,21 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas acsExportFile.get.foreach { acsExportFile => val startImport = System.nanoTime() - clue("Import Alice ACS on P3")( + + if (useLegacyExportImport) { + participant3.repair.import_acs_old(acsExportFile.canonicalPath) + } else participant3.repair.import_acs( acsExportFile.canonicalPath, - contractIdImportMode = testContractIdImportMode, + contractImportMode = testContractIdImportMode, ) - ) + val importDurationMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startImport) - withClue("ACS import duration in milliseconds") { - importDurationMs should be < testSet.acsImportDurationBoundMs - } + importDurationMs should be < testSet.acsImportDurationBoundMs } - clue(s"Reconnect P3")( - participant3.synchronizers.reconnect(daName) - ) + participant3.synchronizers.reconnect(daName) clue(s"Assert ACS on P3") { participant3.testing.state_inspection @@ -508,20 +509,33 @@ protected abstract class EstablishTestSet extends LargeAcsExportAndImportTestBas .futureValueUS shouldBe Some(testSet.acsSize) } } - } /** Use this test to create a large ACS, and dump the test environment to file for subsequent * testing */ -//final class LargeAcsCreateContractsTest extends DumpTestSet { -// override protected def testSet: TestSet = TestSet(1000) -//} +/* +final class LargeAcsCreateContractsTest extends DumpTestSet { + override protected def testSet: TestSet = TestSet(1000, transientContracts = 13) +} + */ /** The actual test */ final class LargeAcsExportAndImportTest extends EstablishTestSet { - override protected def testSet: TestSet = TestSet(1000) + override protected def testSet: TestSet = TestSet(10, transientContracts = 1) + + override def useLegacyExportImport: Boolean = false + + override protected def testContractIdImportMode: ContractImportMode = + ContractImportMode.Validation +} + +/** The actual test */ +final class LargeAcsExportAndImportTestLegacy extends EstablishTestSet { + override protected def testSet: TestSet = TestSet(10, transientContracts = 1) + + override def useLegacyExportImport: Boolean = true - override protected def testContractIdImportMode: ContractIdImportMode = - ContractIdImportMode.Validation + override protected def testContractIdImportMode: ContractImportMode = + ContractImportMode.Validation } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/OfflinePartyMigrationAcsCommitmentIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/OfflinePartyMigrationAcsCommitmentIntegrationTest.scala index 04d6b2d804..c4412d5826 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/OfflinePartyMigrationAcsCommitmentIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/OfflinePartyMigrationAcsCommitmentIntegrationTest.scala @@ -5,15 +5,15 @@ package com.digitalasset.canton.integration.tests.repair import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* import com.daml.test.evidence.tag.FuncTest -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion +import com.digitalasset.canton.config.{CommitmentSendDelay, DbConfig} import com.digitalasset.canton.console.FeatureFlag import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ @@ -53,7 +53,11 @@ sealed trait OfflinePartyMigrationAcsCommitmentIntegrationTest ) // do no delay sending commitments .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay(Some(NonNegativeProportion.zero), Some(NonNegativeProportion.zero)) + ) + ) ) "use repair to migrate a party to a different participant" taggedAs @@ -177,7 +181,7 @@ sealed trait OfflinePartyMigrationAcsCommitmentIntegrationTest final class OfflinePartyMigrationAcsCommitmentIntegrationTestPostgres extends OfflinePartyMigrationAcsCommitmentIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UsePostgres(loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ParticipantMigrateSynchronizerIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ParticipantMigrateSynchronizerIntegrationTest.scala index 181c37130c..e180ca2505 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ParticipantMigrateSynchronizerIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/ParticipantMigrateSynchronizerIntegrationTest.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.error.MediatorError.InvalidMessage import com.digitalasset.canton.examples.java as M -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{AcsInspection, LoggerSuppressionHelpers} @@ -133,8 +133,8 @@ final class ParticipantMigrateSynchronizerIntegrationTest ignoreDeprecatedProtocolMessage( participant.synchronizers.connect_local(sequencer1, alias = daName) ) - participant.dars.upload(CantonExamplesPath) - participant.dars.upload(CantonTestsPath) + participant.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant.dars.upload(CantonTestsPath, synchronizerId = daId) } val alice = participant1.parties.enable( @@ -151,6 +151,10 @@ final class ParticipantMigrateSynchronizerIntegrationTest // temporarily connect to synchronizer 2 and allocate parties there participant1.synchronizers.connect_local(sequencer2, alias = acmeName) participant2.synchronizers.connect_local(sequencer2, alias = acmeName) + Seq(participant1, participant2).foreach { participant => + participant.dars.upload(CantonExamplesPath, synchronizerId = acmeId) + participant.dars.upload(CantonTestsPath, synchronizerId = acmeId) + } participant1.parties.enable( "Alice", synchronizeParticipants = Seq(participant2), @@ -573,8 +577,10 @@ final class ParticipantMigrateSynchronizerCrashRecoveryIntegrationTest Seq(participant1, participant2, participant3).foreach { participant => participant.synchronizers.connect_local(sequencer1, daName) participant.synchronizers.connect_local(sequencer2, acmeName) - participant.dars.upload(CantonExamplesPath) - participant.dars.upload(CantonTestsPath).discard + participant.dars.upload(CantonExamplesPath, synchronizerId = daId) + participant.dars.upload(CantonExamplesPath, synchronizerId = acmeId) + participant.dars.upload(CantonTestsPath, synchronizerId = daId).discard + participant.dars.upload(CantonTestsPath, synchronizerId = acmeId).discard } sequencer1.topology.synchronizer_parameters diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairServiceIntegrationTest.scala index aea34e5e2b..470c3157e2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairServiceIntegrationTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.integration.tests.repair import cats.syntax.either.* +import com.daml.ledger.api.v2.event.CreatedEvent import com.daml.test.evidence.scalatest.ScalaTestSupport.TagContainer import com.daml.test.evidence.tag.EvidenceTag import com.daml.test.evidence.tag.Security.{Attack, SecurityTest, SecurityTestSuite} @@ -15,11 +16,11 @@ import com.digitalasset.canton.crypto.TestSalt import com.digitalasset.canton.data.{CantonTimestamp, ViewPosition} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.{EntitySyntax, PartiesAllocator} import com.digitalasset.canton.participant.admin.data.RepairContract @@ -30,6 +31,7 @@ import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission +import com.digitalasset.canton.util.TestContractHasher import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ LfVersioned, @@ -38,13 +40,13 @@ import com.digitalasset.canton.{ SynchronizerAlias, config, } -import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{ImmArray, Ref} import com.digitalasset.daml.lf.transaction.CreationTime import com.digitalasset.daml.lf.value.Value import com.digitalasset.daml.lf.value.Value.{ValueParty, ValueRecord} import org.scalatest.{Assertion, Tag} +import java.time.Duration import java.util.UUID import scala.annotation.nowarn import scala.concurrent.{Future, Promise} @@ -96,7 +98,8 @@ sealed trait RepairServiceIntegrationTest participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer2, alias = acmeName) - participant1.dars.upload(cantonTestsPath) + participant1.dars.upload(cantonTestsPath, synchronizerId = daId) + participant1.dars.upload(cantonTestsPath, synchronizerId = acmeId) eventually()(assert(participant1.synchronizers.is_connected(daId))) participant2.synchronizers.connect_local(sequencer2, alias = acmeName) @@ -246,31 +249,38 @@ sealed trait RepairServiceIntegrationTestStableLf "contract doesn't exist yet (remote version)" in { implicit env => import env.* - def queryCids(): Seq[String] = + def queryContracts(): Seq[CreatedEvent] = participant1.ledger_api.state.acs.of_all().collect { - case entry if entry.synchronizerId.contains(daId.logical) => entry.contractId - } - - def queryCreateLETs(): Seq[Timestamp] = - participant1.ledger_api.state.acs.of_all().collect { - case entry if entry.synchronizerId.contains(daId.logical) => - CantonTimestamp.fromProtoTimestamp(entry.event.createdAt.value).value.underlying + case entry if entry.synchronizerId.contains(daId.logical) => entry.event } withParticipantsInitialized { (alice, bob) => val c1 = createContractInstance(participant2, acmeName, acmeId, alice, bob) + .copy(representativePackageId = "should-not-be-used") val c2 = createContractInstance(participant2, acmeName, acmeId, alice, bob) - val cids = Set(c1, c2).map(_.contract.contractId.coid) - val createLETs = Set(c1, c2).map(_.contract.createdAt.time) - - queryCids() should contain noElementsOf cids + val acsBeforeRepair = queryContracts().toSet participant1_.repair.add(daId, testedProtocolVersion, Seq(c1, c2)) withSynchronizerConnected(daName) { eventually() { - queryCids() should contain allElementsOf cids - queryCreateLETs() should contain allElementsOf createLETs + val acsAfterRepair = queryContracts() + val newContracts = acsAfterRepair.filterNot(acsBeforeRepair) + + newContracts should have size 2 + + newContracts.zip(Seq(c1, c2)).foreach { + case (queriedCreatedEvent, expectedRepairContract) => + queriedCreatedEvent.contractId shouldBe expectedRepairContract.contractId.coid + CantonTimestamp + .fromProtoTimestamp(queriedCreatedEvent.createdAt.value) + .value + .underlying shouldBe expectedRepairContract.contract.createdAt.time + + // Limitation: ImportAcsOld assigns the representative package ID as the original contract package ID + // TODO(#24610): Adapt to assert that the representative package ID of the repair contract is used + queriedCreatedEvent.representativePackageId shouldBe expectedRepairContract.contract.templateId.packageId + } } } } @@ -399,6 +409,37 @@ sealed trait RepairServiceIntegrationTestStableLf ) } } + + // TODO(#24610): Add test cases for ContractImportMode.ACCEPT to showcase that authentication is bypassed + "contract authentication fails" in { implicit env => + withParticipantsInitialized { (alice, bob) => + import env.* + + val contract = withSynchronizerConnected(daName) { + createContractInstance(participant1, daName, daId, alice, bob, "YEN") + } + + val modifiedContract = { + val fci = contract.contract + contract.copy(contract = + LfFatContractInst.fromCreateNode( + fci.toCreateNode, + fci.createdAt.copy(fci.createdAt.time.add(Duration.ofSeconds(1337L))), + fci.authenticationData, + ) + ) + } + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.repair.add( + acmeId, + testedProtocolVersion, + Seq(modifiedContract), + ), + _.commandFailureMessage should include(s"Failed to authenticate contract with id"), + ) + } + } } } @@ -798,7 +839,7 @@ sealed trait RepairServiceIntegrationTestDevLf extends RepairServiceIntegrationT template = lfNoMaintainerTemplateId, packageName = lfPackageName, arg = LfVersioned( - ExampleTransactionFactory.transactionVersion, + ExampleTransactionFactory.serializationVersion, ValueRecord(None, ImmArray(None -> ValueParty(alice.toLf))), ), ) @@ -819,13 +860,22 @@ sealed trait RepairServiceIntegrationTestDevLf extends RepairServiceIntegrationT stakeholders = Set(alice.toLf), key = Some(keyWithMaintainers.unversioned), ) + val contractHash = TestContractHasher.Sync.hash( + unsuffixedCreateNode, + contractIdSuffixer.contractHashingMethod, + ) val ContractIdSuffixer.RelativeSuffixResult( suffixedCreateNode, _, _, authenticationData, ) = contractIdSuffixer - .relativeSuffixForLocalContract(contractSalt, creationTime, unsuffixedCreateNode) + .relativeSuffixForLocalContract( + contractSalt, + creationTime, + unsuffixedCreateNode, + contractHash, + ) .valueOr(err => fail(s"Failed to generate contract suffix: $err")) val suffixedContractInstance = LfFatContractInst.fromCreateNode( @@ -840,7 +890,12 @@ sealed trait RepairServiceIntegrationTestDevLf extends RepairServiceIntegrationT daId, testedProtocolVersion, Seq( - RepairContract(daId, absolutizedContractInstance, ReassignmentCounter.Genesis) + RepairContract( + daId, + absolutizedContractInstance, + ReassignmentCounter.Genesis, + absolutizedContractInstance.templateId.packageId, + ) ), ), _.commandFailureMessage should ( @@ -859,7 +914,7 @@ sealed trait RepairServiceReferenceSequencerPostgresTest { self: SharedEnvironment => registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairSynchronizerRecoveryIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairSynchronizerRecoveryIntegrationTest.scala index 079f5bf070..1c3bd2d221 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairSynchronizerRecoveryIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairSynchronizerRecoveryIntegrationTest.scala @@ -13,9 +13,9 @@ import com.digitalasset.canton.examples.java.iou import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.ledger.participant.state.SequencerIndex @@ -171,7 +171,7 @@ trait RepairSynchronizerRecoveryIntegrationTest val signedModifiedRequest = signModifiedSubmissionRequest( modifiedRequest, participant1.underlying.value.sync.syncCrypto - .tryForSynchronizer(daId, defaultStaticSynchronizerParameters), + .tryForSynchronizer(daId, staticSynchronizerParameters1), ) dropSomeMessagesToP1.set(true) SendDecision.Replace(signedModifiedRequest) @@ -435,7 +435,7 @@ class RepairSynchronizerRecoveryIntegrationTestPostgres extends RepairSynchronizerRecoveryIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) ) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairTestUtil.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairTestUtil.scala index 12a24efb8e..1befb1a38f 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairTestUtil.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RepairTestUtil.scala @@ -78,7 +78,14 @@ trait RepairTestUtil { .value ._2 - RepairContract(synchronizerId, contract.inst, ReassignmentCounter.Genesis) + RepairContract( + synchronizerId, + contract.inst, + ReassignmentCounter.Genesis, + // Contracts read from the PCS have the representative package ID the same as the original package ID + // TODO(#24610): Use the Ledger API Active contract service to get the correct representative package ID + representativePackageId = contract.templateId.packageId, + ) } protected def createContractInstance( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RollbackUnassignmentIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RollbackUnassignmentIntegrationTest.scala index e7fa743589..81c364754d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RollbackUnassignmentIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/RollbackUnassignmentIntegrationTest.scala @@ -10,11 +10,8 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.Updat import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.{CommandFailure, FeatureFlag} -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -34,13 +31,16 @@ sealed trait RollbackUnassignmentIntegrationTest override lazy val environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2_S1M1_S1M1 - .addConfigTransform(ConfigTransforms.enableAdvancedCommands(FeatureFlag.Repair)) + .addConfigTransforms( + ConfigTransforms.enableAdvancedCommands(FeatureFlag.Repair) + ) .withSetup { implicit env => import env.* Seq(participant1, participant2).foreach { p => p.synchronizers.connect_local(sequencer1, alias = daName) p.synchronizers.connect_local(sequencer2, alias = acmeName) - p.dars.upload(CantonExamplesPath) + p.dars.upload(CantonExamplesPath, synchronizerId = daId) + p.dars.upload(CantonExamplesPath, synchronizerId = acmeId) } Seq(daName, acmeName).foreach { alias => @@ -225,7 +225,7 @@ final class RollbackUnassignmentIntegrationTestPostgres extends RollbackUnassignmentIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/SynchronizerRepairIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/SynchronizerRepairIntegrationTest.scala index aebe6824f8..31b9dcc414 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/SynchronizerRepairIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/repair/SynchronizerRepairIntegrationTest.scala @@ -16,11 +16,11 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.examples.java.iou import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.logging.{LogEntry, SuppressingLogger, SuppressionRule} @@ -123,9 +123,10 @@ sealed abstract class SynchronizerRepairIntegrationTest "Participants disconnected from lost synchronizer will now attempt to connect to new synchronizer" ) - Seq(participant1, participant2).foreach( - _.synchronizers.connect_local(newSynchronizerSequencer, alias = newSynchronizerAlias) - ) + Seq(participant1, participant2).foreach { p => + p.synchronizers.connect_local(newSynchronizerSequencer, alias = newSynchronizerAlias) + p.dars.upload(CantonExamplesPath, synchronizerId = newSynchronizerId) + } // Wait for topology state to appear before disconnecting again. clue("newSynchronizer initialization timed out") { @@ -323,7 +324,7 @@ final class SynchronizerRepairReferenceIntegrationTestPostgres extends SynchronizerRepairIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer( Seq( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/CryptoIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/CryptoIntegrationTest.scala new file mode 100644 index 0000000000..323351abd9 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/CryptoIntegrationTest.scala @@ -0,0 +1,377 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security + +import com.digitalasset.canton.SynchronizerAlias +import com.digitalasset.canton.admin.api.client.data.ParticipantStatus.SubmissionReady +import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{CryptoConfig, CryptoProvider, DbConfig} +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.crypto.SigningKeyUsage +import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.participant.admin.PingService +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllButNamespaceDelegations +import com.digitalasset.canton.topology.transaction.ParticipantPermission +import org.slf4j.event.Level + +class JceCryptoReferenceIntegrationTest + extends CryptoIntegrationTest(CryptoConfig(provider = CryptoProvider.Jce)) + with ReconnectSynchronizerAutoInitIntegrationTest { + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory, sequencerGroups) + ) +} + +class JceCryptoBftOrderingIntegrationTest + extends CryptoIntegrationTest(CryptoConfig(provider = CryptoProvider.Jce)) + with ReconnectSynchronizerAutoInitIntegrationTest { + registerPlugin( + new UseBftSequencer(loggerFactory, sequencerGroups) + ) +} + +abstract class CryptoIntegrationTest(cryptoConfig: CryptoConfig) + extends CommunityIntegrationTest + with SharedEnvironment { + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2S2M2_Manual + .addConfigTransforms( + ConfigTransforms.setCrypto(cryptoConfig) + ) + // force sequencer2 to use the same publicApi port as sequencer1 + .addConfigTransform { config => + val seq1 = InstanceName.tryCreate("sequencer1") + val seq2 = InstanceName.tryCreate("sequencer2") + import monocle.macros.syntax.lens.* + config + .focus(_.sequencers) + .index(seq2) + .modify( + _.focus(_.publicApi.internalPort) + .replace(config.sequencers(seq1).publicApi.internalPort) + ) + } + .withSetup { implicit env => + import env.* + + participants.local.start() + sequencer1.start() + mediator1.start() + + val staticSynchronizerParameters = StaticSynchronizerParameters.defaults( + sequencer1.config.crypto, + testedProtocolVersion, + ) + + // initialize the first synchronizer where the 'regular' tests will be run + val synchronizerId = bootstrap.synchronizer( + daName.unwrap, + sequencers = Seq(sequencer1), + mediators = Seq(mediator1), + synchronizerOwners = Seq(sequencer1), + synchronizerThreshold = PositiveInt.one, + staticSynchronizerParameters, + ) + + env.initializedSynchronizers.put( + daName, + InitializedSynchronizer( + synchronizerId, + staticSynchronizerParameters.toInternal, + synchronizerOwners = Set(sequencer1), + ), + ) + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + + s"With crypto provider ${cryptoConfig.provider}" should { + + "make a ping" in { implicit env => + import env.* + + eventually() { + assert( + participant1.synchronizers + .list_connected() + .map(_.synchronizerAlias.unwrap) + .contains(daName.unwrap) + ) + assert( + participant2.synchronizers + .list_connected() + .map(_.synchronizerAlias.unwrap) + .contains(daName.unwrap) + ) + } + + loggerFactory.suppress( + SuppressionRule.forLogger[PingService] && SuppressionRule.Level(Level.WARN) + ) { + eventually() { + assertPingSucceeds(participant1, participant2) + } + } + + } + + // exporting private keys is not allowed when using a KMS provider + if (cryptoConfig.provider != CryptoProvider.Kms) { + "export and import private keys pair offline" in { implicit env => + import env.* + // architecture-handbook-entry-begin: ExportKeyForOfflineStorage + // fingerprint of namespace giving key + val participantId = participant1.id + val namespace = participantId.fingerprint + + // create new key + val name = "new-identity-key" + val targetKey = + participant1.keys.secret + .generate_signing_key(name = name, usage = SigningKeyUsage.NamespaceOnly) + + // create an intermediate certificate authority through a namespace delegation + // we do this by adding a new namespace delegation for the newly generated key + // and we sign this using the root namespace key + participant1.topology.namespace_delegations.propose_delegation( + participantId.namespace, + targetKey, + CanSignAllButNamespaceDelegations, + ) + + // export namespace key to file for offline storage, in this example, it's a temporary file + better.files.File.usingTemporaryFile("namespace", ".key") { privateKeyFile => + participant1.keys.secret.download_to(namespace, privateKeyFile.toString) + + // delete namespace key (very dangerous ...) + participant1.keys.secret.delete(namespace, force = true) + + // architecture-handbook-entry-end: ExportKeyForOfflineStorage + + // key should not be present anymore on p1 + participant1.keys.secret.list(namespace.unwrap) shouldBe empty + + // show that we can still add new parties using the new delegate key + participant1.topology.party_to_participant_mappings.propose( + PartyId(participantId.uid.tryChangeId("NewParty")), + newParticipants = List(participantId -> ParticipantPermission.Submission), + signedBy = Seq(targetKey.fingerprint), + ) + + // ensure party pops up on the synchronizers + eventually() { + participant1.parties.list(filterParty = "NewParty") should not be empty + } + + val other = participant1 // using other for the manual ... + // architecture-handbook-entry-begin: ImportFromOfflineStorage + // import it back wherever needed + other.keys.secret + .upload_from(privateKeyFile.toString, Some("newly-imported-identity-key")) + // architecture-handbook-entry-end: ImportFromOfflineStorage + + participant1.keys.secret.list(namespace.unwrap) should not be empty + } + } + + "export and import private key pairs offline to a different participant" in { implicit env => + import env.* + + val fingerprint = participant1.fingerprint + + better.files.File.usingTemporaryFile("namespace", ".key") { keyPairFile => + participant1.keys.secret.download_to(fingerprint, keyPairFile.toString) + + participant2.keys.secret.list(fingerprint.unwrap) shouldBe empty + participant2.keys.secret + .upload_from(keyPairFile.toString, Some("newly-imported-identity-key")) + participant2.keys.secret.list(fingerprint.unwrap) should not be empty + } + } + + "export and import private key pairs with a password" in { implicit env => + import env.* + + // Generate a new key for export and export with a password for encryption + val key = participant1.keys.secret + .generate_signing_key("encrypted-export-test", SigningKeyUsage.ProtocolOnly) + + val keypairPlain = participant1.keys.secret.download(key.id) + val keypairEnc = participant1.keys.secret.download(key.id, password = Some("hello world")) + + keypairPlain shouldNot equal(keypairEnc) + + participant2.keys.secret.upload( + keypairEnc, + name = Some("encrypted-import-test"), + password = Some("hello world"), + ) + + participant2.keys.secret.list(key.id.unwrap) should not be empty + } + + "export and fail import of private key pairs with invalid passwords" in { implicit env => + import env.* + + // Generate a new key for export and export with a password for encryption + val key = participant1.keys.secret + .generate_signing_key("encrypted-export-test-2", SigningKeyUsage.ProtocolOnly) + val keypair = participant1.keys.secret.download(key.id, password = Some("hello world")) + + // Invalid password + assertThrowsAndLogsCommandFailures( + participant2.keys.secret.upload( + keypair, + name = Some("encrypted-import-test-2"), + password = Some("wrong password"), + ), + _.errorMessage should include("Failed to decrypt"), + ) + + // no password + assertThrowsAndLogsCommandFailures( + participant2.keys.secret.upload( + keypair, + name = Some("encrypted-import-test-2"), + ), + _.errorMessage should include("Failed to parse crypto key pair"), + ) + } + } + } +} + +/** Test that participants are able to automatically reconnect to a synchronizer after it has + * restarted with a new ID/Alias. To simulate this behavior we stop the original synchronizer and + * start a new one that uses the same endpoints as the first synchronizer. + */ +sealed trait ReconnectSynchronizerAutoInitIntegrationTest { + self: CommunityIntegrationTest => + + protected val sequencerGroups: MultiSynchronizer = + MultiSynchronizer(Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate))) + + "be able to connect to a reset synchronizer with a new alias" in { implicit env => + import env.* + eventually() { + val healthStatus = health.status() + val (sequencerStatus, participantStatus) = + (healthStatus.sequencerStatus, healthStatus.participantStatus) + sequencerStatus(sequencer1.name).connectedParticipants should contain.allOf( + participantStatus(participant1.name).id, + participantStatus(participant2.name).id, + ) + participantStatus(participant1.name).connectedSynchronizers should contain( + sequencerStatus(sequencer1.name).synchronizerId -> SubmissionReady(true) + ) + participantStatus(participant2.name).connectedSynchronizers should contain( + sequencerStatus(sequencer1.name).synchronizerId -> SubmissionReady(true) + ) + } + + val anotherSynchronizerAlias = SynchronizerAlias.tryCreate("anotherSynchronizer") + loggerFactory.suppressWarningsAndErrors { + logger.info(s"stopping synchronizer $daName") + mediator1.stop() + sequencer1.stop() + + // we start a new synchronizer with a different alias + logger.info(s"starting synchronizer ${anotherSynchronizerAlias.unwrap}") + sequencer2.start() + mediator2.start() + + val staticSynchronizerParameters = StaticSynchronizerParameters.defaults( + sequencer1.config.crypto, + testedProtocolVersion, + ) + + val anotherSynchronizerId = bootstrap.synchronizer( + anotherSynchronizerAlias.unwrap, + sequencers = Seq(sequencer2), + mediators = Seq(mediator2), + synchronizerOwners = Seq(sequencer2), + synchronizerThreshold = PositiveInt.one, + staticSynchronizerParameters, + ) + + env.initializedSynchronizers.put( + anotherSynchronizerAlias, + InitializedSynchronizer( + anotherSynchronizerId, + staticSynchronizerParameters.toInternal, + synchronizerOwners = Set(sequencer2), + ), + ) + + // after synchronizer reset, participants get automatically disconnected + eventually() { + val healthStatus = health.status() + val (sequencerStatus, participantStatus) = + (healthStatus.sequencerStatus, healthStatus.participantStatus) + sequencerStatus(sequencer2.name).connectedParticipants shouldBe empty + participantStatus(participant1.name).connectedSynchronizers shouldBe empty + participantStatus(participant2.name).connectedSynchronizers shouldBe empty + } + } + // connecting to the reset synchronizer with the same alias is not possible, since the synchronizer now has a new identity + loggerFactory.suppressWarningsErrorsExceptions[CommandFailure]( + participant1.synchronizers.reconnect(daName) + ) + loggerFactory.suppressWarningsErrorsExceptions[CommandFailure]( + participant2.synchronizers.reconnect(daName) + ) + + // connect with a new alias and it all should work + participant1.synchronizers.connect_local( + sequencer2, + anotherSynchronizerAlias, + manualConnect = false, + ) + participant2.synchronizers.connect_local( + sequencer2, + anotherSynchronizerAlias, + manualConnect = false, + ) + + eventually() { + val healthStatus = health.status() + val (sequencerStatus, participantStatus) = + (healthStatus.sequencerStatus, healthStatus.participantStatus) + sequencerStatus(sequencer2.name).connectedParticipants should contain.allOf( + participantStatus(participant1.name).id, + participantStatus(participant2.name).id, + ) + participantStatus(participant1.name).connectedSynchronizers should contain( + sequencerStatus(sequencer2.name).synchronizerId -> SubmissionReady(true) + ) + participantStatus(participant2.name).connectedSynchronizers should contain( + sequencerStatus(sequencer2.name).synchronizerId -> SubmissionReady(true) + ) + + // we need to wait until participant1 observed both participants, as otherwise below ping will fail + participant1.parties + .list( + filterParticipant = participantStatus(participant2.name).id.filterString, + synchronizerIds = sequencerStatus(sequencer2.name).synchronizerId.logical, + ) should have length 1 + } + + logger.info(s"participant1 pings participant2") + assertPingSucceeds(participant1, participant2) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyManagementIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyManagementIntegrationTest.scala index 5d6d62c252..57f85c46a8 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyManagementIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyManagementIntegrationTest.scala @@ -17,8 +17,8 @@ import com.digitalasset.canton.crypto.store.CryptoPrivateStoreExtended import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId @@ -673,7 +673,7 @@ sealed trait KeyManagementIntegrationTest class KeyManagementReferenceIntegrationTestPostgres extends KeyManagementIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class KeyManagementBftOrderingIntegrationTestPostgres extends KeyManagementIntegrationTest { diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyRotationWithMultipleSequencersIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyRotationWithMultipleSequencersIntegrationTest.scala index 5f0680dc1a..db0dc2fa34 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyRotationWithMultipleSequencersIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/KeyRotationWithMultipleSequencersIntegrationTest.scala @@ -5,10 +5,7 @@ package com.digitalasset.canton.integration.tests.security import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.crypto.{KeyPurpose, SigningKeyUsage} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -95,7 +92,7 @@ class KeyRotationWithMultipleSequencersReferenceIntegrationTestPostgres extends KeyRotationWithMultipleSequencersIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory ) ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestHelpers.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestHelpers.scala new file mode 100644 index 0000000000..01f665c730 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestHelpers.scala @@ -0,0 +1,737 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security + +import cats.syntax.alternative.* +import cats.syntax.functorFilter.* +import cats.syntax.parallel.* +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction.Transaction.toJavaProto +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.{ + TransactionWrapper, + UpdateWrapper, +} +import com.digitalasset.canton.console.{ + LocalInstanceReference, + LocalMediatorReference, + LocalParticipantReference, + LocalSequencerReference, + ParticipantReference, + SequencerReference, +} +import com.digitalasset.canton.crypto.{HashOps, SyncCryptoApi, SynchronizerCryptoClient} +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.error.MediatorError +import com.digitalasset.canton.integration.tests.security.SecurityTestHelpers.{ + MessageTransform, + SignedMessageTransform, +} +import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat +import com.digitalasset.canton.integration.{HasCycleUtils, TestConsoleEnvironment} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.protocol.LocalRejectError.ConsistencyRejections.LockedContracts +import com.digitalasset.canton.protocol.messages.* +import com.digitalasset.canton.protocol.messages.Verdict.MediatorReject +import com.digitalasset.canton.protocol.{LocalRejectError, RequestId} +import com.digitalasset.canton.sequencing.protocol.{SignedContent, SubmissionRequest} +import com.digitalasset.canton.synchronizer.mediator.MediatorVerdict +import com.digitalasset.canton.synchronizer.sequencer.ProgrammableSequencerPolicies.{ + isConfirmationResponse, + isConfirmationResult, +} +import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, SendDecision} +import com.digitalasset.canton.topology.{Member, ParticipantId, PartyId, PhysicalSynchronizerId} +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.OptionUtil +import com.digitalasset.canton.{BaseTest, LfPartyId} +import io.grpc.Status.Code +import io.grpc.stub.StreamObserver +import monocle.macros.GenLens +import monocle.syntax.all.* +import org.scalatest.Assertion + +import java.time.Duration as JDuration +import java.util.UUID +import java.util.concurrent.atomic.AtomicReference +import scala.Ordering.Implicits.* +import scala.collection.concurrent.TrieMap +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future, Promise, blocking} +import scala.util.Try +import scala.util.control.NonFatal + +/** A collection of utility methods for intercepting messages at the sequencer and for manipulating + * messages. + * + * Limitation: the utility methods are tailored to transaction processing. Further tweaking is + * required to apply them to transfer processing. + */ +trait SecurityTestHelpers extends SecurityTestLensUtils { + self: BaseTest & HasProgrammableSequencer & HasCycleUtils => + + // participant verdicts + lazy val localApprove: LocalApprove = LocalApprove(testedProtocolVersion) + lazy val localReject: LockedContracts.Reject = + LocalRejectError.ConsistencyRejections.LockedContracts.Reject(Seq.empty) + + // mediator verdicts + lazy val mediatorApprove: Verdict.Approve = Verdict.Approve(testedProtocolVersion) + def participantReject(participantId: ParticipantId): Verdict.ParticipantReject = + Verdict.ParticipantReject( + NonEmpty( + List, + (Set.empty[LfPartyId], participantId, localReject.toLocalReject(testedProtocolVersion)), + ), + testedProtocolVersion, + ) + lazy val mediatorReject: MediatorReject = + MediatorVerdict + .MediatorReject(MediatorError.MalformedMessage.Reject("malformed message test cause")) + .toVerdict(testedProtocolVersion) + + /** Runs `body` while replacing confirmation responses at the sequencer. As a reminder, + * confirmation responses are messages sent by the participants to the mediator, through the + * sequencer. + * + * The method will apply every `transform` in `messageTransforms` to an incoming confirmation + * response `message` yielding a sequence `newMessages` of confirmation responses. It will + * replace the original response `message` by `newMessages`. + * + * If `newMessages` is empty, the method will advance the sim clock (if any) by the participant + * confirmation response timeout. + * + * The method will fail, if the sequencer does not receive a confirmation response for the + * mediator at `sequencerRef`. The method will also fail, if the sequencer receives a + * confirmation response for the mediator at `sequencerRef` from a sender other than `sender`. + * + * @param senderRef + * only confirmation responses sent by `sender` will be replaced + * @param sequencerRef + * only confirmation responses submitted to this sequencer will be replaced + */ + def replacingConfirmationResponses[A]( + senderRef: LocalParticipantReference, + sequencerRef: LocalSequencerReference, + synchronizerId: PhysicalSynchronizerId, + messageTransforms: SignedMessageTransform[ConfirmationResponses]* + )(body: => A)(implicit env: TestConsoleEnvironment): A = { + + val receivedResponseP = Promise[Unit]() + + val confirmationResponseTimeout = + sequencerRef.topology.synchronizer_parameters + .get_dynamic_synchronizer_parameters(synchronizerId) + .confirmationResponseTimeout + .asJava + val simClockO = env.environment.simClock + + val result = getProgrammableSequencer(sequencerRef.name).withSendPolicy_( + s"Replacing confirmation responses", + message => + if (!isConfirmationResponse(message)) SendDecision.Process + else if (message.sender == senderRef.id) { + receivedResponseP.trySuccess(()) + + val newMessages = messageTransforms.map(_.apply(senderRef, message, synchronizerId)) + + // Unchecked needed because of: https://github.com/scala/bug/issues/12252 + (newMessages: @unchecked) match { + case head +: tail => SendDecision.Replace(head, tail*) + + case Seq() => + // Advance the clock to trigger a timeout at the mediator. + simClockO.foreach(_.advance(confirmationResponseTimeout)) + // This extra second is needed, because the clock may be slightly behind of the request time. + simClockO.foreach(_.advance(JDuration.ofSeconds(1))) + + SendDecision.Drop + } + } else { + receivedResponseP.tryComplete( + Try(fail(s"Received unexpected verdict from ${message.sender}")) + ) + SendDecision.Process + }, + )(body) + + receivedResponseP.future.futureValue + + result + } + + /** Collects encrypted view messages that are part of confirmation requests while executing + * `body`, then applies `checkMessages` to the collected messages. Only messages sent by the + * specified sender are included. + */ + def checkingEncryptedViewMessages[A]( + hashOps: HashOps, + sender: ParticipantReference, + sequencerRef: SequencerReference, + )( + body: => A + )( + checkMessages: Seq[EncryptedViewMessage[ViewType]] => Assertion + ): A = { + + val requestsB = Seq.newBuilder[EncryptedViewMessage[ViewType]] + val result = getProgrammableSequencer(sequencerRef.name).withSendPolicy_( + s"Collecting encrypted view messages", + { message => + if ( + message.isConfirmationRequest && + sender.id == message.sender + ) { + blocking { + requestsB.synchronized { + val allProtocolMessages = message.batch.envelopes.map( + _.openEnvelope(hashOps, testedProtocolVersion) + .valueOrFail("open envelopes") + .protocolMessage + ) + requestsB ++= allProtocolMessages.collect { + case encryptedView: EncryptedViewMessage[ViewType] => encryptedView + } + } + } + } + SendDecision.Process + }, + )(body) + + val requests = blocking { + requestsB.synchronized { + requestsB.result() + } + } + + checkMessages(requests) + + result + } + + /** Collects confirmation responses while running `body` and applies `checkResponses` to the + * collected confirmation responses. + * + * It will only collect responses from `participantRefs` to any mediator group. It will check the + * `requestId` of the collected responses and only collect responses with the earliest requestId; + * as a result, all collected responses will refer to the first request. + */ + def checkingConfirmationResponses[A]( + participantRefs: Seq[ParticipantReference], + sequencerRef: SequencerReference, + )( + body: => A + )( + checkResponses: Map[ParticipantId, Seq[ConfirmationResponse]] => Assertion + )(implicit env: TestConsoleEnvironment): A = { + import env.* + + // Access needs to be synchronized through itself + val responsesB = Seq.newBuilder[ConfirmationResponses] + + // Earliest request id seen so far + val requestIdO = new AtomicReference[Option[RequestId]](None) + + val result = getProgrammableSequencer(sequencerRef.name).withSendPolicy_( + s"Collecting confirmation responses", + { message => + blocking { + responsesB.synchronized { + if ( + isConfirmationResponse(message) && + participantRefs.exists(_.id == message.sender) + ) { + val messages = traverseMessages[ConfirmationResponses](_ => None).getAll(message) + + val requestId = + requestIdO + .updateAndGet( + OptionUtil.mergeWith(_, messages.headOption.map(_.requestId)) { + case (oldRequestId, newRequestId) => + if (newRequestId >= oldRequestId) oldRequestId + else { + // The messages refer to an earlier requestId. + // Discard collected responses, because they do not refer to the earliest requestId. + responsesB.clear() + newRequestId + } + } + ) + .value + + responsesB ++= messages.filter(_.requestId == requestId) + } + } + } + + SendDecision.Process + }, + )(body) + + val responses = blocking { + responsesB.synchronized { + responsesB.result() + } + } + + val responsesGroupedBySender = + responses.groupMap(_.sender)(_.responses).view.mapValues(_.flatten).toMap + + checkResponses(responsesGroupedBySender) + + result + } + + /** Runs `body` while replacing confirmation results at the sequencer. As a reminder, confirmation + * results are messages sent by the mediator to the participants, through the sequencer. + * + * The method will apply every `transform` in `messageTransforms` to an incoming confirmation + * result `message` yielding a sequence `newMessages` of confirmation results. It will replace + * the original result `message` by `newMessages`. + * + * If `newMessages` is empty, the method will advance the sim clock (if any) by the participant + * confirmation response timeout plus the mediator reaction timeout. + * + * The method will fail, if the sequencer does not receive a confirmation response for the + * mediator at `synchronizerRef`. + * + * Only confirmation results submitted to `sequencerRef` of `synchronizerId` and originating from + * `mediatorRef` will be replaced. + * + * @return + * The result of body as well as intercepted messages, by sender. It can be used to check that + * honest participants rejected a transaction. + */ + def replacingConfirmationResult[A]( + synchronizerId: PhysicalSynchronizerId, + sequencerRef: LocalSequencerReference, + mediatorRef: LocalMediatorReference, + messageTransforms: SignedMessageTransform[ConfirmationResultMessage]* + )(body: => A)(implicit env: TestConsoleEnvironment): (A, Map[Member, Seq[SubmissionRequest]]) = { + + val receivedResultP = Promise[Unit]() + + val synchronizerParameters = + sequencerRef.topology.synchronizer_parameters.get_dynamic_synchronizer_parameters( + synchronizerId + ) + val simClockO = env.environment.simClock + + val interceptedMessages = TrieMap[Member, Seq[SubmissionRequest]]() + + val result = getProgrammableSequencer(sequencerRef.name).withSendPolicy_( + "Replacing confirmation results", + message => { + interceptedMessages.updateWith(message.sender) { + case Some(messages) => + // Number of messages is sufficiently small that appending is fine + Some(messages :+ message) + case None => Some(Seq(message)) + } + + if (isConfirmationResult(message, mediatorRef.id)) { + receivedResultP.trySuccess(()) + + val newMessages = messageTransforms.map(_.apply(mediatorRef, message, synchronizerId)) + + // Unchecked needed because of: https://github.com/scala/bug/issues/12252 + (newMessages: @unchecked) match { + case head +: tail => + SendDecision.Replace(head, tail*) + + case Seq() => + // Advance the clock to trigger a timeout at the participants. + simClockO.foreach( + _.advance(synchronizerParameters.confirmationResponseTimeout.asJava) + ) + // This extra second is needed, because the clock may be slightly behind of the request time. + simClockO.foreach(_.advance(JDuration.ofSeconds(1))) + simClockO.foreach(_.advance(synchronizerParameters.mediatorReactionTimeout.asJava)) + // Additionally, advance the sim clock by the mediator's observation timeout + // so that it observes the timeout. Otherwise, the warning about the timeout + // may spill over into the next test. + simClockO.foreach { simClock => + val minObservationDuration = + mediatorRef.underlying.value.replicaManager.mediatorRuntime.value.mediator.timeTracker.config.minObservationDuration + simClock.advance(minObservationDuration.asJava.plusSeconds(1)) + } + SendDecision.Drop + } + } else SendDecision.Process + }, + )(body) + + receivedResultP.future.futureValue + + (result, interceptedMessages.toMap) + } + + /** Creates a [[SignedMessageTransform]] to replace the verdict in a confirmation response. The + * transform will automatically update the signature of the confirmation response, as well as + * sign and send the resulting submission request as the original. + */ + def withLocalVerdict( + verdict: LocalVerdict + )(implicit executionContext: ExecutionContext): SignedMessageTransform[ConfirmationResponses] = + signedTransformOf( + traverseMessages[ConfirmationResponses](_) + .modify(cr => + ConfirmationResponses.tryCreate( + cr.requestId, + cr.rootHash, + cr.synchronizerId, + cr.sender, + cr.responses.map(cr => ConfirmationResponse.localVerdictUnsafe.replace(verdict)(cr)), + testedProtocolVersion, + ) + )(_) + ) + + def withLocalVerdict( + verdict: LocalRejectError + )(implicit executionContext: ExecutionContext): SignedMessageTransform[ConfirmationResponses] = + withLocalVerdict(verdict.toLocalReject(testedProtocolVersion)) + + /** Creates a [[SignedMessageTransform]] to replace the verdict in a confirmation response. + * + * @param senderRef + * member reference used as the submission request sender and to sign the confirmation response + * and submission request. + */ + def withLocalVerdict( + verdict: LocalVerdict, + senderRef: LocalInstanceReference, + )(implicit executionContext: ExecutionContext): SignedMessageTransform[ConfirmationResponses] = + signedTransformWithSender( + traverseMessages[ConfirmationResponses](_) + .modify(cr => + ConfirmationResponses.tryCreate( + cr.requestId, + cr.rootHash, + cr.synchronizerId, + cr.sender, + cr.responses.map(cr => ConfirmationResponse.localVerdictUnsafe.replace(verdict)(cr)), + testedProtocolVersion, + ) + )(_), + senderRef, + ) + + /** Creates a [[MessageTransform]] to replace the verdict in a transaction result message. The + * transform will automatically update the signature of the confirmation result, as well as sign + * and send the resulting submission request as the original. + */ + def withMediatorVerdict( + verdict: Verdict + )(implicit + executionContext: ExecutionContext + ): SignedMessageTransform[ConfirmationResultMessage] = + signedTransformOf( + traverseMessages[ConfirmationResultMessage](_) + .andThen(GenLens[ConfirmationResultMessage](_.verdict)) + .replace(verdict)(_) + ) + + /** Creates a [[MessageTransform]] to replace the verdict in a transaction result message. + * + * @param senderRef + * member reference used as the submission request sender and to sign the verdict and + * submission request. + */ + def withMediatorVerdict( + verdict: Verdict, + senderRef: LocalInstanceReference, + )(implicit + executionContext: ExecutionContext + ): SignedMessageTransform[ConfirmationResultMessage] = + signedTransformWithSender( + traverseMessages[ConfirmationResultMessage](_) + .andThen(GenLens[ConfirmationResultMessage](_.verdict)) + .replace(verdict)(_), + senderRef, + ) + + /** Convenience method to create a [[SignedMessageTransform]] from a [[MessageTransform]], signing + * and sending the submission request as the original. + * + * @param useCurrentSnapshot + * use the current snapshot when signing then message content + */ + def signedTransformOf[A <: SignedProtocolMessageContent]( + transform: MessageTransform[A], + useCurrentSnapshot: Boolean = false, + )(implicit + executionContext: ExecutionContext + ): SignedMessageTransform[A] = + (senderRef, submissionRequest, synchronizerId) => { + val (sender, syncCrypto) = senderAndCryptoFromRef(senderRef, synchronizerId) + + (transform(updateSignatureUsing(syncCrypto, useCurrentSnapshot), _)) + .andThen(_.focus(_.sender).replace(sender)) + // TODO(i16512): See if we should pass `useCurrentSnapshot` to sign the submission request + .andThen(signModifiedSubmissionRequest(_, syncCrypto))(submissionRequest) + } + + /** Convenience method to create a [[SignedMessageTransform]] from a [[MessageTransform]] signing + * and sending the submission request as `senderRef`. + * + * @param useCurrentSnapshot + * use the current snapshot when signing then message content + */ + def signedTransformWithSender[A <: SignedProtocolMessageContent]( + transform: MessageTransform[A], + senderRef: LocalInstanceReference, + useCurrentSnapshot: Boolean = false, + )(implicit + executionContext: ExecutionContext + ): SignedMessageTransform[A] = + (_, submissionRequest, synchronizerId) => { + val (sender, syncCrypto) = senderAndCryptoFromRef(senderRef, synchronizerId) + + (transform(updateSignatureUsing(syncCrypto, useCurrentSnapshot), _)) + .andThen(_.focus(_.sender).replace(sender)) + // TODO(i16512): See if we should pass `useCurrentSnapshot` to sign the submission request + .andThen(signModifiedSubmissionRequest(_, syncCrypto))(submissionRequest) + } + + private def senderAndCryptoFromRef( + ref: LocalInstanceReference, + synchronizerId: PhysicalSynchronizerId, + ): (Member, SynchronizerCryptoClient) = ref match { + case p: LocalParticipantReference => + ( + p.id: Member, + p.underlying.value.sync.syncCrypto + .tryForSynchronizer(synchronizerId, defaultStaticSynchronizerParameters), + ) + case m: LocalMediatorReference => + (m.id, m.underlying.value.replicaManager.mediatorRuntime.value.mediator.syncCrypto) + case _ => fail(s"Unexpected reference: $ref") + } + + private def updateSignatureUsing[A <: SignedProtocolMessageContent]( + syncCrypto: SynchronizerCryptoClient, + useCurrentSnapshot: Boolean, + ): A => Option[SyncCryptoApi] = + message => { + if (useCurrentSnapshot) Some(syncCrypto.currentSnapshotApproximation) + else + Some( + message.signingTimestamp + .map(syncCrypto.awaitSnapshot(_).futureValueUS) + .getOrElse(syncCrypto.headSnapshot) + ) + } + + /** Tracks completions and transaction while executing a piece of code `body`. + * + * After `body` has terminated, run a cycle on every tracked participant and stop tracking as + * soon as the participant emits a transaction/completion corresponding to the cycle. The + * transaction/completion corresponding to the cycle will not be included in the tracking result. + * Tracking will also be aborted, if `body` throws an exception. + * + * @param participants + * the participants to track + * @param extraTrackedParties + * the parties to track - the admin parties of `participants` will always be tracked. + * @return + * the result of `body` and the tracking result + */ + def trackingLedgerEvents[A]( + participants: Seq[ParticipantReference], + extraTrackedParties: Seq[PartyId], + )(body: => A)(implicit executionContext: ExecutionContext): ( + A, + TrackingResult, + ) = { + // Completes only if there is a submission failure + val submissionFailedP = Promise[Unit]() + + // Command id used to identify the flush commands at the end + val finishCommandId = s"finish-tracking-${UUID.randomUUID()}" + + val trackedParties = extraTrackedParties ++ participants.map(_.id.adminParty) + + // Subscribe to transaction trees & completions for all participants + val (completionsF, transactionsF) = (for (participant <- participants) yield { + val completionObserver = + new CollectUntilObserver[Completion](_.commandId == finishCommandId) + val completionCloseable = participant.ledger_api.completions + .subscribe( + completionObserver, + trackedParties, + beginOffsetExclusive = participant.ledger_api.state.end(), + ) + completionObserver.result.onComplete(_ => completionCloseable.close()) + submissionFailedP.future.onComplete(_ => completionCloseable.close()) + + val transactionObserver = + new CollectUntilObserver[UpdateWrapper]({ + case TransactionWrapper(tt) => tt.commandId == finishCommandId + case _ => false + }) + val updateFormat = getUpdateFormat( + partyIds = trackedParties.toSet, + filterTemplates = Seq.empty, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + includeReassignments = true, + ) + val ledgerEnd = participant.ledger_api.state.end() + val transactionCloseable = participant.ledger_api.updates + .subscribe_updates( + transactionObserver, + updateFormat, + beginOffsetExclusive = ledgerEnd, + ) + transactionObserver.result.onComplete(_ => transactionCloseable.close()) + submissionFailedP.future.onComplete(_ => transactionCloseable.close()) + val transactions = transactionObserver.result.map(_.collect { + case TransactionWrapper(transaction) => transaction + }) + + ( + participant -> completionObserver.result, + participant -> transactions, + ) + }).separate + + // Run body and abort the tracking on exceptions + try { + val result = body + + participants + .parTraverse(participant => + Future( + runCycle( + participant.id.adminParty, + participant, + participant, + finishCommandId, + ) + ) + ) + .futureValue + + (result, new TrackingResult(completionsF.toMap, transactionsF.toMap)) + } catch { + case NonFatal(ex) => + submissionFailedP.trySuccess(()) + throw ex + } + } + + private class CollectUntilObserver[A](isDone: A => Boolean) extends StreamObserver[A] { + private val resultB: mutable.Builder[A, Seq[A]] = Seq.newBuilder[A] + private val resultP: Promise[Seq[A]] = Promise[Seq[A]]() + + val result: Future[Seq[A]] = resultP.future + + override def onNext(value: A): Unit = + if (isDone(value)) resultP.trySuccess(resultB.result()) + else resultB += value + + override def onError(t: Throwable): Unit = resultP.tryFailure(t) + + override def onCompleted(): Unit = + resultP.tryFailure(new IllegalStateException("Subscription has terminated unexpectedly.")) + } + + class TrackingResult( + completions: Map[ParticipantReference, Future[Seq[Completion]]], + transactions: Map[ParticipantReference, Future[Seq[Transaction]]], + ) { + + def assertStatusOk(participant: ParticipantReference): Assertion = + assertExactlyOneCompletion(participant).status.value.code shouldBe Code.OK.value() + + def assertExactlyOneCompletion( + participant: ParticipantReference + ): Completion = + awaitCompletions.toSeq.mapFilter { case (otherParticipant, completions) => + withClue(s"for $otherParticipant") { + if (otherParticipant == participant) { + Some(completions.loneElement) + } else { + completions shouldBe empty + None + } + } + }.loneElement + + def assertNoCompletionsExceptFor(participant: ParticipantReference): Assertion = + forEvery(awaitCompletions) { case (otherParticipant, completions) => + if (otherParticipant == participant) succeed + else + withClue(s"for $otherParticipant") { + completions shouldBe empty + } + } + + lazy val awaitCompletions: Map[ParticipantReference, Seq[Completion]] = completions.map { + case (participant, completionsF) => + withClue(s"for $participant")(participant -> completionsF.futureValue) + } + + lazy val awaitTransactions: Map[ParticipantReference, Seq[Transaction]] = + transactions.map { case (participant, transactionsF) => + participant -> withClue(s"for $participant")(transactionsF.futureValue) + } + + def assertNoTransactions(): Assertion = forEvery(awaitTransactions) { + case (participant, transactions) => + withClue(s"for $participant")(transactions shouldBe empty) + } + + def allCreated[TC]( + companion: ContractCompanion[TC, ?, ?] + )( + participant: ParticipantReference + ): Seq[TC] = + awaitTransactions(participant).flatMap(tx => + JavaDecodeUtil.decodeAllCreated(companion)( + javaapi.data.Transaction.fromProto(toJavaProto(tx)) + ) + ) + + def allArchived[TCid]( + companion: ContractCompanion[?, TCid, ?] + )( + participant: ParticipantReference + ): Seq[TCid] = + awaitTransactions(participant).flatMap(tx => + JavaDecodeUtil.decodeAllArchivedLedgerEffectsEvents(companion)( + javaapi.data.Transaction.fromProto(toJavaProto(tx)) + ) + ) + } +} + +object SecurityTestHelpers { + + /** Wraps a function updating a [[com.digitalasset.canton.sequencing.protocol.SubmissionRequest]]. + * Since the update usually involves updating a signature, the function receives an extra input + * `updateSignatureWith` that should be used to update any signature. + */ + trait MessageTransform[A] { + + def apply( + updateSignatureWith: A => Option[SyncCryptoApi], + message: SubmissionRequest, + ): SubmissionRequest + } + + trait SignedMessageTransform[A] { + + def apply( + sender: LocalInstanceReference, + message: SubmissionRequest, + synchronizerId: PhysicalSynchronizerId, + ): SignedContent[SubmissionRequest] + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestLensUtils.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestLensUtils.scala new file mode 100644 index 0000000000..81ed3da883 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/SecurityTestLensUtils.scala @@ -0,0 +1,111 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.crypto.{CryptoPureApi, SyncCryptoApi} +import com.digitalasset.canton.data.* +import com.digitalasset.canton.data.MerkleTree.VersionedMerkleTree +import com.digitalasset.canton.protocol.messages.* +import com.digitalasset.canton.sequencing.protocol.{ + ClosedEnvelope, + OpenEnvelope, + Recipients, + SubmissionRequest, +} +import monocle.macros.GenLens +import monocle.{Lens, Traversal} +import org.scalactic.source.Position + +import scala.concurrent.ExecutionContext + +/** A collection of Monocle utility methods used in security integration tests. */ +trait SecurityTestLensUtils { + this: BaseTest => + + def pureCrypto: CryptoPureApi + + /** Traversal for the messages embedded in [[SignedProtocolMessage]]. Fails if the submission + * request contains messages that are not of type [[SignedProtocolMessage]]. If the content of a + * [[SignedProtocolMessage]] is not of type `M`, the traversal may still succeed due to erasure; + * however, downstream code will likely fail with a [[java.lang.ClassCastException]] in that + * case. + */ + def traverseMessages[M <: SignedProtocolMessageContent]( + updateSignatureWith: M => Option[SyncCryptoApi] + )(implicit + executionContext: ExecutionContext + ): Traversal[SubmissionRequest, M] = + traverseSignedProtocolMessages[M] + .andThen( + Lens[SignedProtocolMessage[M], M]( + _.typedMessage.content + ) { newMessage => signedMessage => + val newTypedMessage = signedMessage.typedMessage.copy(content = newMessage) + updateSignatureWith(newMessage) match { + case Some(snapshot) => + val newSig = SignedProtocolMessage + .mkSignature(newTypedMessage, snapshot) + .failOnShutdown + .futureValue + signedMessage.copy(typedMessage = newTypedMessage, signatures = NonEmpty(Seq, newSig)) + case None => signedMessage.copy(typedMessage = newTypedMessage) + } + } + ) + + /** Traversal for signed protocol messages. Fails if the submission request contains messages that + * are not of type [[SignedProtocolMessage]]. If the content of a [[SignedProtocolMessage]] is + * not of type `M`, the traversal may still succeed due to erasure; however, downstream code will + * likely fail with a [[java.lang.ClassCastException]] in that case. + */ + def traverseSignedProtocolMessages[M <: SignedProtocolMessageContent] + : Traversal[SubmissionRequest, SignedProtocolMessage[M]] = + GenLens[SubmissionRequest](_.batch.envelopes) + .andThen(Traversal.fromTraverse[List, ClosedEnvelope]) + .andThen(ClosedEnvelope.tryDefaultOpenEnvelope(pureCrypto, testedProtocolVersion)) + .andThen( + Lens[DefaultOpenEnvelope, SignedProtocolMessage[M]]( + _.protocolMessage.asInstanceOf[SignedProtocolMessage[M]] + )(newMessage => _.copy(protocolMessage = newMessage)) + ) + + /** Lens for focusing on the first element of a MerkleSeq. Fails if the seq is empty or the first + * element is blinded. + */ + def firstElement[M <: VersionedMerkleTree[M]](implicit pos: Position): Lens[MerkleSeq[M], M] = + Lens[MerkleSeq[M], M]( + _.toSeq.headOption + .valueOrFail("Unable to get the first element of the empty MerkleSeq.") + .unwrap + .valueOrFail("Unable to get the first element, because it is blinded.") + )(newFirst => + merkleSeq => + (merkleSeq.toSeq: @unchecked) match { + case Seq() => + fail("Unable to update the first element of the empty MerkleSeq.") + case (_: BlindedNode[?]) +: _ => + fail("Unable to update the first element, because it is blinded.") + case _ +: tail => + MerkleSeq.fromSeq(pureCrypto, testedProtocolVersion)(newFirst +: tail) + } + ) + + def firstViewCommonData: Lens[GenTransactionTree, ViewCommonData] = + GenTransactionTree.rootViewsUnsafe + .andThen(firstElement[TransactionView]) + .andThen(TransactionView.viewCommonDataUnsafe) + .andThen(MerkleTree.tryUnwrap[ViewCommonData]) + + def allViewRecipients: Traversal[TransactionConfirmationRequest, Recipients] = + GenLens[TransactionConfirmationRequest](_.viewEnvelopes) + .andThen(Traversal.fromTraverse[Seq, OpenEnvelope[TransactionViewMessage]]) + .andThen( + GenLens[OpenEnvelope[TransactionViewMessage]](_.recipients): Lens[ + OpenEnvelope[TransactionViewMessage], + Recipients, + ] + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreIntegrationTest.scala new file mode 100644 index 0000000000..7db335319c --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreIntegrationTest.scala @@ -0,0 +1,94 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import cats.syntax.parallel.* +import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* +import com.daml.test.evidence.tag.Security.Attack +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.crypto.kms.mock.v1.MockKmsDriverFactory.mockKmsDriverName +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError.FailedToReadKey +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} + +/** Defines the different environments and tests to be used by the EncryptedCryptoPrivateStore... + * integration tests (i.e. ...WithPreDefinedKey...; ...NoPreDefinedKey...; + * ...NoPreDefinedKeyMultiRegion...) + */ +trait EncryptedCryptoPrivateStoreIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with EncryptedCryptoPrivateStoreTestHelpers { + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1.withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + + protected val protectedNodes: Set[String] = Set("participant1") + + "participants can ping each other" in { implicit env => + import env.* + participant1.health.ping(participant2.id) + } + + "participants can ping each other after restart" in { implicit env => + import env.* + + // TODO(#25069): Add persistence to mock KMS driver to support node restarts as used in this test case. + // Cancel this test case when mock kms driver is used. + assume( + participant1.config.crypto.kms.exists(kms => + !(kms.isInstanceOf[KmsConfig.Driver] + && kms.asInstanceOf[KmsConfig.Driver].name == mockKmsDriverName) + ) + ) + + /* Restart node 1 and verify that it can still ping. + Note that a new KMS key has been created and restarting the node means fetching this newly created key + 'wrapper_key_id' from the database + */ + participant1.stop() + participant1.start() + participant1.synchronizers.reconnect_all() + + participant1.health.ping(participant2.id) + } + + "protected nodes have their stored private keys encrypted and these can be decrypted" taggedAs + securityAsset.setAttack( + Attack( + actor = "malicious db admin", + threat = "attempts to read private keys", + mitigation = "encrypt all private keys in the database.", + ) + ) in { implicit env => + forAll(protectedNodes) { nodeName => + checkAndDecryptKeys(nodeName) + } + } + + "decryption fails when a protected node has a non-encrypted key in an encrypted store" in { + implicit env => + import env.* + forAll(protectedNodes) { nodeName => + // store a clear key + storeClearKey(nodeName) + // TODO(i10760): remove encrypted flag from listPrivateKeys so that we can simply + // call checkAndDecryptKeys and expect it to fail with a decrypt error + // then this check should fail with a decryption error + // checkAndDecryptKeys(nodeName, kms) + val encStore = getEncryptedCryptoStore(nodeName) + listAllStoredKeys(encStore.store).toList + .parTraverse(storedKey => encStore.exportPrivateKey(storedKey.id)) + .leftOrFailShutdown("check encryption") + .futureValue shouldBe a[FailedToReadKey] + } + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreTestHelpers.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreTestHelpers.scala new file mode 100644 index 0000000000..5bd80a3b72 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/EncryptedCryptoPrivateStoreTestHelpers.scala @@ -0,0 +1,159 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.daml.test.evidence.tag.Security.SecurityTest.Property.Privacy +import com.daml.test.evidence.tag.Security.{SecurityTest, SecurityTestSuite} +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.crypto.kms.{Kms, KmsError, KmsKeyId} +import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto +import com.digitalasset.canton.crypto.store.db.{DbCryptoPrivateStore, StoredPrivateKey} +import com.digitalasset.canton.crypto.store.{ + CryptoPrivateStoreExtended, + EncryptedCryptoPrivateStore, +} +import com.digitalasset.canton.integration.TestConsoleEnvironment +import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ByteString6144 +import com.google.protobuf.ByteString + +import scala.concurrent.ExecutionContext + +trait EncryptedCryptoPrivateStoreTestHelpers extends SecurityTestSuite { + this: BaseTest => + + lazy protected val securityAsset: SecurityTest = + SecurityTest(property = Privacy, asset = "Canton node") + + def getEncryptedCryptoStore( + nodeName: String + )(implicit env: TestConsoleEnvironment): EncryptedCryptoPrivateStore = + env.n(nodeName).crypto.cryptoPrivateStore match { + // check that node's private store is encrypted + case encStore: EncryptedCryptoPrivateStore => encStore + case _ => + fail( + "node " + nodeName + " selected for protection does not have an encrypted crypto private store" + ) + } + + /** Returns all the private keys in the store by calling listPrivateKeys with all combination of + * the filters (purpose and encrypted) + */ + def listAllStoredKeys(store: CryptoPrivateStoreExtended)(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): Set[StoredPrivateKey] = + retryET()(store.listPrivateKeys()).valueOrFailShutdown("list keys").futureValue + + def storeClearKey(nodeName: String)(implicit + ec: ExecutionContext, + env: TestConsoleEnvironment, + ): Unit = { + val encStore = getEncryptedCryptoStore(nodeName) + val crypto = SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory) + encStore.store + .storePrivateKey( + crypto.newSymbolicEncryptionKeyPair().privateKey, + None, + ) + .valueOrFailShutdown("write clear key in encrypted store") + .futureValue + } + + def checkAndDecryptKeys(nodeName: String)(implicit + env: TestConsoleEnvironment + ): Set[StoredPrivateKey] = { + import env.* + + val encStore = getEncryptedCryptoStore(nodeName) + // lists all the keys as they are stored in the db ('stored'). A db admin has access to this type of keys. + val allStoredKeys = listAllStoredKeys(encStore.store) + // lists all the keys AFTER decryption ('in-memory'). This uses the underlying encrypted store to decrypt the keys. + val allDecryptedStoredKeys = listAllStoredKeys(encStore) + + allStoredKeys.size shouldEqual allDecryptedStoredKeys.size + + forAll(allStoredKeys) { storedKey => + val decryptedStoredKey = + allDecryptedStoredKeys.find(_.id == storedKey.id).valueOrFail("could not find key") + // verify that each stored private key is different from the in-memory version of the same key + decryptedStoredKey.data should not be storedKey.data + // verify the listed keys from the encrypted store match the decryption of the encrypted keys + decryptedStoredKey.data shouldBe decryptKey(storedKey, encStore.kms) + .valueOrFail( + "failed to decrypt keys" + ) + } + allDecryptedStoredKeys + } + + private def decryptKey( + encryptedKey: StoredPrivateKey, + kms: Kms, + )(implicit ec: ExecutionContext): Either[KmsError, ByteString] = { + /* ByteString6144 defines the higher bound on the data size we can decrypt. This is the + * maximum accepted input size for all the external KMSs that we support. + */ + val encryptedKeyData = ByteString6144 + .create(encryptedKey.data) match { + case Left(err) => fail(err) + case Right(encData) => encData + } + kms + .decryptSymmetric( + KmsKeyId( + encryptedKey.wrapperKeyId + .valueOrFail( + s"key ${encryptedKey.id} does not have a wrapper key associated so it cannot be decrypted" + ) + ), + encryptedKeyData, + ) + .map(_.unwrap) + .value + .failOnShutdown + .futureValue + } + + def checkAndReturnClearKeys(nodeName: String)(implicit + ec: ExecutionContext, + env: TestConsoleEnvironment, + ): Set[StoredPrivateKey] = + env.n(nodeName).crypto.cryptoPrivateStore match { + // check that node's private store is in clear + case store: DbCryptoPrivateStore => + val allKeys = listAllStoredKeys(store) + allKeys should not be empty + // checks that we can parse all keys (none of them is encrypted) + forAll(allKeys)(storedKey => + store + .exportPrivateKey(storedKey.id) + .valueOrFailShutdown("export clear key") + .futureValue + ) + allKeys + case _ => + fail( + "node " + nodeName + " does not have a clear crypto private store" + ) + } + + def stopAllNodesIgnoringSequencerClientWarnings(stop: => Unit): Unit = + loggerFactory.assertLogsUnorderedOptional( + stop, + // As we stop the synchronizer node before (or at the same time as) the participants, allow the participants' + // sequencer clients to warn about the sequencer being down until all nodes are stopped. + ( + LogEntryOptionality.OptionalMany, + logEntry => { + logEntry.loggerName should include("GrpcSequencerClientTransport") + logEntry.warningMessage should include( + "Request failed for sequencer. Is the server running?" + ) + }, + ), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoIntegrationTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoIntegrationTestBase.scala new file mode 100644 index 0000000000..a823e1c133 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoIntegrationTestBase.scala @@ -0,0 +1,198 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{ + CryptoConfig, + CryptoProvider, + KmsConfig, + NonNegativeFiniteDuration, + PrivateKeyStoreConfig, +} +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.integration.* +import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer +import com.digitalasset.canton.integration.tests.topology.TopologyManagementHelper +import com.digitalasset.canton.protocol.StaticSynchronizerParameters as StaticSynchronizerParametersInternal +import com.digitalasset.canton.time.{RemoteClock, SimClock} +import monocle.macros.syntax.lens.* + +/** Defines the necessary environment and setup for running a set of nodes with KMS as their + * provider, either with or without pre-generated keys. + * + * Check contributing/kms.md on how to run the tests + */ +trait KmsCryptoIntegrationTestBase extends TopologyManagementHelper { + self: CommunityIntegrationTest with EnvironmentSetup => + + // Defines which nodes will run an external KMS. + protected lazy val protectedNodes: Set[String] = Set("participant1") + + // Defines which nodes will not use session signing keys. + protected lazy val nodesWithSessionSigningKeysDisabled: Set[String] = Set.empty + + protected def otherConfigTransforms: Seq[ConfigTransform] = Seq.empty + + protected val kmsConfigTransform: ConfigTransform = { + def modifyCryptoSchemeConfig(conf: CryptoConfig): CryptoConfig = + conf.copy(provider = CryptoProvider.Jce) + + val sequencerConfigTransform = ConfigTransforms.updateSequencerConfig("sequencer1")( + _.focus(_.crypto).modify(modifyCryptoSchemeConfig) + ) + val mediatorConfigTransform = ConfigTransforms.updateMediatorConfig("mediator1")( + _.focus(_.crypto).modify(modifyCryptoSchemeConfig) + ) + val participant1ConfigTransform = ConfigTransforms.updateParticipantConfig("participant1")( + _.focus(_.crypto).modify(modifyCryptoSchemeConfig) + ) + val participant2ConfigTransform = ConfigTransforms.updateParticipantConfig("participant2")( + _.focus(_.crypto).modify(modifyCryptoSchemeConfig) + ) + + sequencerConfigTransform + .compose(mediatorConfigTransform) + .compose(participant1ConfigTransform) + .compose(participant2ConfigTransform) + } + + protected def kmsConfig: KmsConfig + + protected def topologyPreDefinedKeys: TopologyKmsKeys + + // we use a distributed topology for the KMS tests + protected val environmentBaseConfig: EnvironmentDefinition = + EnvironmentDefinition.P2S1M1_Manual + + protected def getTopologyKeysForNode(name: String): TopologyKmsKeys = + if (name.contains("sequencer")) + topologyPreDefinedKeys.copy( + namespaceKeyId = topologyPreDefinedKeys.namespaceKeyId.map(_.concat("-sequencer")), + sequencerAuthKeyId = topologyPreDefinedKeys.sequencerAuthKeyId.map(_.concat("-sequencer")), + signingKeyId = topologyPreDefinedKeys.signingKeyId.map(_.concat("-sequencer")), + encryptionKeyId = topologyPreDefinedKeys.encryptionKeyId, + ) + else if (name.contains("mediator")) + topologyPreDefinedKeys.copy( + namespaceKeyId = topologyPreDefinedKeys.namespaceKeyId.map(_.concat("-mediator")), + sequencerAuthKeyId = topologyPreDefinedKeys.sequencerAuthKeyId.map(_.concat("-mediator")), + signingKeyId = topologyPreDefinedKeys.signingKeyId.map(_.concat("-mediator")), + encryptionKeyId = topologyPreDefinedKeys.encryptionKeyId, + ) + else // for the participants + topologyPreDefinedKeys.copy( + namespaceKeyId = topologyPreDefinedKeys.namespaceKeyId.map(_.concat(s"-$name")), + sequencerAuthKeyId = topologyPreDefinedKeys.sequencerAuthKeyId.map(_.concat(s"-$name")), + signingKeyId = topologyPreDefinedKeys.signingKeyId.map(_.concat(s"-$name")), + encryptionKeyId = topologyPreDefinedKeys.encryptionKeyId.map(_.concat(s"-$name")), + ) + + protected def teardown(): Unit = {} + + override lazy val environmentDefinition: EnvironmentDefinition = + environmentBaseConfig + .addConfigTransforms(otherConfigTransforms*) + .addConfigTransform( + ConfigTransforms.setCrypto( + CryptoConfig( + provider = CryptoProvider.Kms, + kms = Some(kmsConfig), + privateKeyStore = PrivateKeyStoreConfig(None), + ), + (name: String) => protectedNodes.contains(name), + ) + ) + .withSetup { implicit env => + import env.* + + sequencer1.start() + mediator1.start() + + if ( + sequencer1.config.init.identity.isManual && !sequencer1.config.init.generateTopologyTransactionsAndKeys + ) { + manuallyInitNode( + sequencer1, + if (sequencer1.config.crypto.provider == CryptoProvider.Kms) + Some(getTopologyKeysForNode(sequencer1.name)) + else None, + ) + } + + if ( + mediator1.config.init.identity.isManual && !mediator1.config.init.generateTopologyTransactionsAndKeys + ) + manuallyInitNode( + mediator1, + if (mediator1.config.crypto.provider == CryptoProvider.Kms) + Some(getTopologyKeysForNode(mediator1.name)) + else None, + ) + + val topologyChangeDelay = environment.clock match { + case _: RemoteClock | _: SimClock => NonNegativeFiniteDuration.Zero + case _ => StaticSynchronizerParametersInternal.defaultTopologyChangeDelay.toConfig + } + + val staticParameters = + StaticSynchronizerParameters.defaults( + sequencer1.config.crypto, + testedProtocolVersion, + topologyChangeDelay = topologyChangeDelay, + ) + val synchronizerId = bootstrap.synchronizer( + daName.unwrap, + sequencers = Seq(sequencer1), + mediators = Seq(mediator1), + synchronizerOwners = Seq(sequencer1), + synchronizerThreshold = PositiveInt.one, + staticParameters, + ) + + env.initializedSynchronizers.put( + daName, + InitializedSynchronizer( + synchronizerId, + staticParameters.toInternal, + synchronizerOwners = Set(sequencer1), + ), + ) + + // make sure synchronizer nodes are initialized + mediator1.health.wait_for_initialized() + sequencer1.health.wait_for_initialized() + + participant1.start() + participant2.start() + + Seq(participant1, participant2) foreach { + case p: LocalParticipantReference if p.config.init.identity.isManual => + manuallyInitNode( + p, + if (p.config.crypto.provider == CryptoProvider.Kms) + Some(getTopologyKeysForNode(p.name)) + else None, + ) + case _ => + } + + // make sure participant nodes are initialized + participant1.health.wait_for_initialized() + participant2.health.wait_for_initialized() + + Seq(participant1, participant2).foreach( + _.synchronizers.connect_local(sequencer1, alias = daName) + ) + } + .withTeardown(_ => teardown()) + + // by default auto-init is set to true, and we run without persistence + protected def setupPlugins( + withAutoInit: Boolean, + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + ): Unit +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoWithPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoWithPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..2b6ff9b2d7 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsCryptoWithPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,37 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.CryptoProvider +import com.digitalasset.canton.crypto.store.KmsCryptoPrivateStore +import com.digitalasset.canton.integration.{CommunityIntegrationTest, EnvironmentSetup} + +/** Runs a crypto integration tests with one participant using a KMS provider with pre-generated + * keys. Runs with persistence so we also check that it is able to recover from an unexpected + * shutdown. + */ +trait KmsCryptoWithPreDefinedKeysIntegrationTest extends KmsCryptoIntegrationTestBase { + self: CommunityIntegrationTest & EnvironmentSetup => + + "be able to restart from a persisted state" in { implicit env => + import env.* + + assert(participant1.config.crypto.provider == CryptoProvider.Kms) + assert(participant1.crypto.cryptoPrivateStore.isInstanceOf[KmsCryptoPrivateStore]) + + /* restarting the node means that we need to rebuild our mappings between the public keys + * stored in Canton and the private keys externally stored in the KMS */ + participant1.stop() + + participant1.start() + participant1.health.wait_for_running() + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + + eventually() { + assertPingSucceeds(participant1, participant2) + } + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsMigrationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsMigrationIntegrationTest.scala new file mode 100644 index 0000000000..1bf896e880 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/KmsMigrationIntegrationTest.scala @@ -0,0 +1,286 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import better.files.File +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} +import com.digitalasset.canton.crypto.{KeyPurpose, SigningPublicKeyWithName} +import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{ + UseBftSequencer, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.util.AcsInspection +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.topology.ForceFlag +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllButNamespaceDelegations +import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, ParticipantPermission} + +import scala.jdk.CollectionConverters.* + +trait KmsMigrationIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with KmsCryptoIntegrationTestBase + with AcsInspection { + + protected val sequencerGroups: MultiSynchronizer = + MultiSynchronizer(Seq(Set("sequencer1"), Set("sequencer2")).map(_.map(InstanceName.tryCreate))) + + /** Environment: + * - p1: with KMS + * - p2: with JCE and KMS aligned schemes + * - p3: default crypto config (JCE, default schemes) + * - da: with JCE and KMS aligned schemes + * - acme: default crypto config (JCE, default schemes) + */ + override protected val environmentBaseConfig: EnvironmentDefinition = + EnvironmentDefinition.P3S2M2_Manual + + "setup synchronizer to migrate from" in { implicit env => + import env.* + + participant3.start() + sequencer2.start() + mediator2.start() + + // check that participant1 has KMS keys + participant1.keys.secret.list().map(_.kmsKeyId) should contain theSameElementsAs + topologyPreDefinedKeys.forNode(participant1).productIterator.toSeq + + // check that participant3 has no KMS Keys + participant3.keys.secret.list().map(_.kmsKeyId).forall(_.isEmpty) shouldBe true + + // initialize synchronizer2 (synchronizer1 is initialized in KmsCryptoIntegrationTestBase) + val synchronizerId = bootstrap.synchronizer( + acmeName.unwrap, + synchronizerOwners = Seq(sequencer2), + synchronizerThreshold = PositiveInt.one, + sequencers = Seq(sequencer2), + mediators = Seq(mediator2), + staticSynchronizerParameters = EnvironmentDefinition.defaultStaticSynchronizerParameters, + ) + + env.initializedSynchronizers.put( + acmeName, + InitializedSynchronizer( + synchronizerId, + EnvironmentDefinition.defaultStaticSynchronizerParameters.toInternal, + synchronizerOwners = Set(sequencer2), + ), + ) + + sequencer2.health.wait_for_initialized() + + participant3.synchronizers.connect_local(sequencer2, alias = acmeName) + + Set(participant1, participant3).foreach { p => + p.dars.upload(CantonExamplesPath) + } + + val alice = participant3.parties.enable("Alice", synchronizer = acmeName) + val bob = participant3.parties.enable("Bob", synchronizer = acmeName) + + // Create some contract for alice and bob on p3 + val (obligor, owner, participant) = (alice, bob, participant3) + IouSyntax.createIou(participant)(obligor, owner) + } + + "setup namespace delegation" in { implicit env => + import env.* + + val participantOld = participant3 + val participantNew = participant1 + + // user-manual-entry-begin: KmsSetupNamespaceDelegation + val namespaceOld = participantOld.namespace + val namespaceNew = participantNew.namespace + + val rootNamespaceDelegationOld = participantOld.topology.transactions + .list(filterAuthorizedKey = Some(namespaceOld.fingerprint)) + .result + .map(_.transaction) + .filter(_.mapping.code == NamespaceDelegation.code) + .head + + val namespaceKeyNew = participantNew.keys.public.download(namespaceNew.fingerprint) + participantOld.keys.public.upload(namespaceKeyNew, Some("pNew-namespace-key")) + + val namespaceNewSigningKey = participantNew.keys.public + .list( + filterFingerprint = participantNew.fingerprint.unwrap, + filterPurpose = Set(KeyPurpose.Signing), + ) + .map(_.asInstanceOf[SigningPublicKeyWithName].publicKey) + .loneElement + + // Delegate namespace of old participant to new participant + val delegation = participantOld.topology.namespace_delegations.propose_delegation( + namespace = namespaceOld, + targetKey = namespaceNewSigningKey, + CanSignAllButNamespaceDelegations, + ) + + participantNew.topology.transactions + .load( + Seq(rootNamespaceDelegationOld, delegation), + TopologyStoreId.Authorized, + ForceFlag.AlienMember, + ) + // user-manual-entry-end: KmsSetupNamespaceDelegation + } + + "re-create Alice/Bob on new participant" in { implicit env => + import env.* + + val participantOld = participant3 + val participantNew = participant1 + val newKmsSynchronizerAlias = daName + + // user-manual-entry-begin: KmsRecreatePartiesInNewParticipant + val parties = participantOld.parties.list().map(_.party) + + parties.foreach { party => + participantNew.topology.party_to_participant_mappings + .propose( + party = party, + newParticipants = Seq(participantNew.id -> ParticipantPermission.Submission), + store = daId, + ) + } + + // Disconnect from new KMS-compatible synchronizer to prepare migration of parties and contracts + participantNew.synchronizers.disconnect(newKmsSynchronizerAlias) + // user-manual-entry-end: KmsRecreatePartiesInNewParticipant + } + + "export and import ACS for Alice/Bob" in { implicit env => + import env.* + + val participantOld = participant3 + val participantNew = participant1 + + val aliceOld = participantOld.parties.find("Alice") + val bobOld = participantOld.parties.find("Bob") + + val oldSynchronizerSequencer = sequencer2 + val oldSynchronizerAlias = acmeName + val newSynchronizerAlias = daName + + val oldSynchronizerMediator = mediator2 + + val oldSynchronizerId = acmeId + val newKmsSynchronizerId = daId + + // user-manual-entry-begin: KmsMigrateACSofParties + val parties = participantOld.parties.list().map(_.party) + + // Make sure synchronizer and the old participant are quiet before exporting ACS + participantOld.synchronizers.disconnect(oldSynchronizerAlias) + oldSynchronizerMediator.stop() + oldSynchronizerSequencer.stop() + + File.usingTemporaryFile("participantOld-acs", suffix = ".txt") { acsFile => + val acsFileName = acsFile.toString + + val ledgerEnd = NonNegativeLong.tryCreate(participantOld.ledger_api.state.end()) + + // Export from old participant + participantOld.repair.export_acs( + parties = parties.toSet, + exportFilePath = acsFileName, + ledgerOffset = ledgerEnd, + contractSynchronizerRenames = Map(oldSynchronizerId.logical -> newKmsSynchronizerId.logical), + ) + + // Import to new participant + participantNew.repair.import_acs(acsFileName) + } + + // Kill/stop the old participant + participantOld.stop() + + // Connect the new participant to the new synchronizer + participantNew.synchronizers.reconnect(newSynchronizerAlias) + // user-manual-entry-end: KmsMigrateACSofParties + + // wait for participantNew to full onboard and process all topology transactions from the synchronizer + val (aliceP1, bobP1) = eventually(retryOnTestFailuresOnly = false) { + (participantNew.parties.find("Alice"), participantNew.parties.find("Bob")) + } + + aliceP1 shouldEqual aliceOld + bobP1 shouldEqual bobOld + + eventually() { + participantNew.ledger_api.state.acs.of_party(aliceP1) should not be empty + participantNew.ledger_api.state.acs.of_party(bobP1) should not be empty + } + + participantNew.health.ping(participant2) + } + + "test that ACS of Alice/Bob works on new participant" in { implicit env => + import env.* + + val alice = participant1.parties.find("Alice") + val bob = participant1.parties.find("Bob") + + val (obligor, owner, participant) = (alice, bob, participant1) + + val iou = findIOU( + participant, + owner, + contract => + contract.data.owner == owner.toProtoPrimitive && contract.data.payer == obligor.toProtoPrimitive, + ) + + // Transfer contract originally created on p3 from bob to alice + participant.ledger_api.javaapi.commands + .submit( + Seq(owner), + iou.id.exerciseTransfer(obligor.toProtoPrimitive).commands.asScala.toSeq, + ) + + // Alice creates a new contract directly with bob + IouSyntax.createIou(participant)(obligor, owner) + } +} + +class AwsKmsMigrationReferenceIntegrationTestPostgres + extends KmsMigrationIntegrationTest + with AwsKmsCryptoIntegrationTestBase { + + setupPlugins( + withAutoInit = false, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory, sequencerGroups), + ) + +} + +class GcpKmsMigrationBftOrderingBlockIntegrationTestPostgres + extends KmsMigrationIntegrationTest + with GcpKmsCryptoIntegrationTestBase { + + setupPlugins( + withAutoInit = false, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory, sequencerGroups), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationClearToEncryptedStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationClearToEncryptedStoreIntegrationTest.scala new file mode 100644 index 0000000000..a6a43eff18 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationClearToEncryptedStoreIntegrationTest.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.daml.test.evidence.scalatest.ScalaTestSupport.Implicits.* +import com.daml.test.evidence.tag.Security.Attack +import com.digitalasset.canton.integration.plugins.UseKms +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.util.ResourceUtil.withResource + +trait MigrationClearToEncryptedStoreIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with EncryptedCryptoPrivateStoreTestHelpers { + + protected val kmsPlugin: UseKms + + protected val protectedNodes: Set[String] = Set("participant1") + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1.withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + + "after migrating to an encrypted private key store, keys remain encrypted and are the same as the initial keys" taggedAs + securityAsset.setAttack( + Attack( + actor = "untrusted db admin", + threat = "attempting to read private keys after a migration", + mitigation = "all keys that the db admin has access are encrypted", + ) + ) in { implicit env => + import env.* + // private keys (in clear) before migrating to an encrypted private store + val initialEnvKeys = protectedNodes.map { nodeName => + (nodeName, checkAndReturnClearKeys(nodeName)) + }.toMap + + /* Restart nodes and modify environment with a new config file. + Note that this means all clear rows in the store will be encrypted. + */ + stopAllNodesIgnoringSequencerClientWarnings(env.stopAll()) + registerPlugin(kmsPlugin) + + withResource( + manualCreateEnvironmentWithPreviousState( + env.actualConfig + ) + ) { implicit newEnv => + import newEnv.* + + participant1.synchronizers.reconnect_all() + participant2.synchronizers.reconnect_all() + + participant1.health.ping(participant2.id) + + forAll(protectedNodes) { nodeName => + val decryptedKeys = + checkAndDecryptKeys(nodeName)(newEnv) + // keys are the same as the ones before the migration + decryptedKeys.toSet shouldBe initialEnvKeys(nodeName).toSet + } + } + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationEncryptedToClearStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationEncryptedToClearStoreIntegrationTest.scala new file mode 100644 index 0000000000..0ed6aff709 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/MigrationEncryptedToClearStoreIntegrationTest.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.integration.plugins.* +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.util.ResourceUtil.withResource + +trait MigrationEncryptedToClearStoreIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with EncryptedCryptoPrivateStoreTestHelpers { + + protected val kmsRevertPlugin: UseKms + + protected val protectedNodes: Set[String] = Set("participant1") + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1.withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + + "after revoking an encrypted private key store, keys are in clear and are the same as the initial encrypted keys" in { + implicit env => + // private keys (encrypted) before migrating to a clear private store + val initialEnvKeys = protectedNodes.map { nodeName => + (nodeName, checkAndDecryptKeys(nodeName)) + }.toMap + + /* Restart nodes and modify environment with a new config file with the encrypted store set to be revoked. + Note that this means that canton will decrypt the keys and use a non-encrypted store. + */ + registerPlugin(kmsRevertPlugin) + stopAllNodesIgnoringSequencerClientWarnings(env.stopAll()) + + withResource( + manualCreateEnvironmentWithPreviousState( + env.actualConfig + ) + ) { implicit newEnv => + import newEnv.* + + participant1.synchronizers.reconnect_all() + participant2.synchronizers.reconnect_all() + + participant1.health.ping(participant2.id) + + forAll(protectedNodes) { nodeName => + val currentKeys = checkAndReturnClearKeys(nodeName)(newEnv.executionContext, newEnv) + // keys are the same as the ones before the migration + currentKeys.toSet shouldBe initialEnvKeys(nodeName).toSet + } + } + } +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/NamespaceIntermediateKmsKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/NamespaceIntermediateKmsKeyIntegrationTest.scala new file mode 100644 index 0000000000..73bc6e9a0e --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/NamespaceIntermediateKmsKeyIntegrationTest.scala @@ -0,0 +1,96 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.crypto.{SigningKeyUsage, SigningPublicKey} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.security.KeyManagementIntegrationTestHelper +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetupPlugin, + SharedEnvironment, +} +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllButNamespaceDelegations + +import java.util.concurrent.atomic.AtomicInteger + +trait NamespaceIntermediateKmsKeyIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with KmsCryptoIntegrationTestBase + with KeyManagementIntegrationTestHelper { + + protected val namespaceKeys: Array[String] + protected val rotationKey: String + + private def setupIntermediateKey( + counter: AtomicInteger + )(node: InstanceReference): SigningPublicKey = { + + val namespaceDelegations = + node.topology.namespace_delegations.list( + store = TopologyStoreId.Authorized, + filterNamespace = node.namespace.toProtoPrimitive, + ) + + // create a new namespace intermediate key + val intermediateKey = + node.keys.secret + .register_kms_signing_key( + namespaceKeys(counter.getAndIncrement()), + SigningKeyUsage.NamespaceOnly, + ) + + // Create a namespace delegation for the intermediate key with the namespace root key + node.topology.namespace_delegations.propose_delegation( + node.namespace, + intermediateKey, + CanSignAllButNamespaceDelegations, + ) + + // Check that the new namespace delegations appears + eventually() { + val updatedNamespaceDelegations = node.topology.namespace_delegations.list( + store = TopologyStoreId.Authorized, + filterNamespace = node.namespace.toProtoPrimitive, + ) + assertResult(1, updatedNamespaceDelegations)( + updatedNamespaceDelegations.length - namespaceDelegations.length + ) + } + + intermediateKey + } + + "create and rotate intermediate keys for participant" in { implicit env => + import env.* + + rotateIntermediateNamespaceKeyAndPing( + participant1, + Some(rotationKey), + setupIntermediateKey(new AtomicInteger(0)), + ) + } + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +class NamespaceIntermediateAwsKmsKeyReferenceIntegrationTest + extends NamespaceIntermediateKmsKeyIntegrationTest + with AwsKmsCryptoIntegrationTestBase { + override protected val namespaceKeys: Array[String] = Array( + "alias/canton-kms-test-namespace-intermediate-signing-key-participant", + "alias/canton-kms-test-namespace-intermediate-signing-key-2-participant", + ) + + override protected val rotationKey: String = "alias/canton-kms-test-signing-key" +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateKmsKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateKmsKeyIntegrationTest.scala new file mode 100644 index 0000000000..4862e3dfbd --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateKmsKeyIntegrationTest.scala @@ -0,0 +1,127 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.console.{LocalInstanceReference, LocalParticipantReference} +import com.digitalasset.canton.crypto.admin.grpc.PrivateKeyMetadata +import com.digitalasset.canton.crypto.{KeyPurpose, SigningKeyUsage} +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetupPlugin, + SharedEnvironment, +} + +trait RotateKmsKeyIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with KmsCryptoIntegrationTestBase { + + protected val newKmsKeyIdsMap: Map[String, String] + + "be able to rotate private keys with other existing pre-generated KMS keys" in { implicit env => + import env.* + + def getSigningKeyForNode(node: LocalInstanceReference): PrivateKeyMetadata = + node.keys.secret + .list(filterPurpose = Set(KeyPurpose.Signing), filterUsage = SigningKeyUsage.ProtocolOnly) + .find(_.publicKey.fingerprint != node.id.uid.namespace.fingerprint) + .valueOrFail("Could not find signing key") + + val signingKeyParticipant1 = getSigningKeyForNode(participant1).publicKey + val keyFingerprint = signingKeyParticipant1.fingerprint.unwrap + val newKmsKeyId = newKmsKeyIdsMap(participant1.name) + + // user-manual-entry-begin: RotateKmsNodeKey + val newSigningKeyParticipant = participant1.keys.secret + .rotate_kms_node_key( + keyFingerprint, + newKmsKeyId, + "kms_key_rotated", + ) + // user-manual-entry-end: RotateKmsNodeKey + + protectedNodes.foreach { nodeName => + val node = env.n(nodeName) + val signingKey = + if (nodeName == "participant1") signingKeyParticipant1 + else getSigningKeyForNode(node).publicKey + + val newSigningKey = node match { + case participant: LocalParticipantReference if participant.name.contains("participant1") => + newSigningKeyParticipant + case node => + node.keys.secret.rotate_kms_node_key( + signingKey.fingerprint.unwrap, + newKmsKeyIdsMap(node.name), + "kms_key_rotated", + ) + } + + node.topology.synchronisation.await_idle() + + val storedKey = node.keys.secret + .list(filterFingerprint = newSigningKey.fingerprint.unwrap) + .lastOption + .valueOrFail("Could not find key") + + signingKey should not be newSigningKey + newSigningKey shouldBe storedKey.publicKey + storedKey.name shouldBe Some("kms_key_rotated") + } + + assertPingSucceeds(participant1, participant2) + } + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +class RotateAwsKmsKeyReferenceIntegrationTest + extends RotateKmsKeyIntegrationTest + with AwsKmsCryptoIntegrationTestBase { + + // It does not matter which keys we rotate to, as long as they are distinct and not previously used. + // We have chosen these particular keys for rotation to avoid generating new keys in the KMS, + // which would incur extra costs. + override protected val newKmsKeyIdsMap: Map[String, String] = + Map( + "participant1" -> "alias/canton-kms-test-signing-key", + "participant2" -> "alias/canton-kms-test-signing-key-da", + "mediator1" -> "alias/canton-kms-test-signing-key-domainManager", + "sequencer1" -> "alias/canton-kms-test-authentication-key-domainManager", + ) + + override protected lazy val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) +} + +class RotateGcpKmsKeyReferenceIntegrationTest + extends RotateKmsKeyIntegrationTest + with GcpKmsCryptoIntegrationTestBase { + + // It does not matter which keys we rotate to, as long as they are distinct and not previously used. + // We have chosen these particular keys for rotation to avoid generating new keys in the KMS, + // which would incur extra costs. + override protected val newKmsKeyIdsMap: Map[String, String] = + Map( + "participant1" -> "canton-kms-test-signing-key", + "participant2" -> "canton-kms-test-signing-key-da", + "mediator1" -> "canton-kms-test-signing-key-domainManager", + "sequencer1" -> "canton-kms-test-authentication-key-domainManager", + ) + + override protected lazy val protectedNodes: Set[String] = + allNodeNames( + environmentDefinition.baseConfig + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyFailureIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyFailureIntegrationTest.scala new file mode 100644 index 0000000000..7f92a31d26 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyFailureIntegrationTest.scala @@ -0,0 +1,101 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultServiceError +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} + +trait RotateWrapperKeyFailureIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with EncryptedCryptoPrivateStoreTestHelpers { + + protected val protectedNodes: Set[String] = Set("participant1") + + protected val disabledKeyId: KmsKeyId + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + + "fails if we select the same existing wrapper key for a rotation" in { implicit env => + import env.* + val currentWrapperKey = getEncryptedCryptoStore(participant1.name).wrapperKeyId + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.keys.secret.rotate_wrapper_key(currentWrapperKey.unwrap), + entry => { + entry.shouldBeCantonErrorCode( + GrpcVaultServiceError.WrapperKeyAlreadyInUseError.code + ) + entry.errorMessage should include( + s"Wrapper key id [$currentWrapperKey] selected for rotation is already being used." + ) + }, + ) + } + + "fails if we select a non existing wrapper key for a rotation" in { implicit env => + import env.* + val wrongKeyId = KmsKeyId(String300.tryCreate("a_key_that_does_not_exist")) + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.keys.secret.rotate_wrapper_key(wrongKeyId.unwrap), + _.warningMessage should include( + s"KMS operation `verify key $wrongKeyId exists and is active` failed: KmsCannotFindKeyError" + ), + entry => { + entry.shouldBeCantonErrorCode(GrpcVaultServiceError.WrapperKeyNotExistError.code) + entry.errorMessage should include( + s"Wrapper key id [$wrongKeyId] selected for rotation does not match an existing KMS key id." + ) + }, + ) + } + + "fails if we select a disabled wrapper key for a rotation" in { implicit env => + import env.* + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.keys.secret.rotate_wrapper_key(disabledKeyId.unwrap), + _.warningMessage should include( + s"KMS operation `verify key $disabledKeyId exists and is active` failed: KmsKeyDisabledError" + ), + entry => { + entry.shouldBeCantonErrorCode( + GrpcVaultServiceError.WrapperKeyDisabledOrDeletedError.code + ) + entry.errorMessage should include( + s"Wrapper key id [$disabledKeyId] selected for rotation cannot be used " + + s"because key is disabled or set to be deleted." + ) + }, + ) + } + + "fails if we rotate a wrapper key without an encrypted private store" in { implicit env => + import env.* + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant2.keys.secret.rotate_wrapper_key(), + entry => { + entry.shouldBeCantonErrorCode( + GrpcVaultServiceError.NoEncryptedPrivateKeyStoreError.code + ) + entry.errorMessage should include( + "Node is not running an encrypted private store" + ) + }, + entry => { + entry.errorMessage should include( + "GrpcServiceUnavailable: UNIMPLEMENTED/An error occurred. " + + "Please contact the operator and inquire about the request" + ) + }, + ) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyIntegrationTest.scala new file mode 100644 index 0000000000..419e9b0ccb --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/RotateWrapperKeyIntegrationTest.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} + +trait RotateWrapperKeyIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with EncryptedCryptoPrivateStoreTestHelpers { + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1.withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + + protected val protectedNodes: Set[String] = Set("participant1") + + protected val preDefinedKey: Option[String] + + "private keys are encrypted with correct wrapper key after rotation" in { implicit env => + import env.* + + val encStore = getEncryptedCryptoStore(participant1.name) + + val initialKeys = listAllStoredKeys(encStore) + val initialWrapperKey = encStore.wrapperKeyId + + preDefinedKey match { + case Some(newWrapperKeyId) => + // user-manual-entry-begin: WrapperKeyRotationWithManuallyGeneratedKey + participant1.keys.secret.rotate_wrapper_key(newWrapperKeyId) + // user-manual-entry-end: WrapperKeyRotationWithManuallyGeneratedKey + case None => + // user-manual-entry-begin: WrapperKeyRotationWithAutoGeneratedKey + participant1.keys.secret.rotate_wrapper_key() + // user-manual-entry-end: WrapperKeyRotationWithAutoGeneratedKey + } + + try { + val currentWrapperKey = encStore.wrapperKeyId + initialWrapperKey shouldNot be(currentWrapperKey) + if (preDefinedKey.isDefined) preDefinedKey shouldBe Some(currentWrapperKey.unwrap) + forAll( + listAllStoredKeys(encStore.store) + .map(storedKey => KmsKeyId(storedKey.wrapperKeyId.valueOrFail("no wrapper key"))) + ) { wrapperKey => + wrapperKey shouldBe currentWrapperKey + } + + val decryptedKeys = checkAndDecryptKeys(participant1.name) + // compare decrypted result with previously existent keys (e.g. before the rotation) + decryptedKeys.map(_.copy(wrapperKeyId = None)) shouldBe initialKeys + .map(_.copy(wrapperKeyId = None)) + } finally { + if (preDefinedKey.isEmpty) + encStore.kms.deleteKey(encStore.wrapperKeyId).failOnShutdown.futureValue + } + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysIntegrationTest.scala new file mode 100644 index 0000000000..2921a42b5c --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysIntegrationTest.scala @@ -0,0 +1,89 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.{DbConfig, SessionSigningKeysConfig} +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.aws.AwsKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.gcp.GcpKmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.mock.MockKmsDriverCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetupPlugin, + SharedEnvironment, +} + +/** Test a scenario where we have a combination of non-KMS, KMS and KMS with session signing keys' + * nodes and make sure communication is correct among all of them. + */ +trait SessionSigningKeysIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with KmsCryptoIntegrationTestBase { + + s"ping succeeds with nodes $protectedNodes using session signing keys" in { implicit env => + import env.* + + env.nodes.local.foreach { node => + if (protectedNodes.contains(node.name)) { + val sessionSigningKeysConfig = + node.config.crypto.kms.valueOrFail("no kms config").sessionSigningKeys + if (nodesWithSessionSigningKeysDisabled.contains(node.name)) + sessionSigningKeysConfig shouldBe SessionSigningKeysConfig.disabled + else sessionSigningKeysConfig shouldBe SessionSigningKeysConfig.default + } else node.config.crypto.kms shouldBe empty + } + + assertPingSucceeds(participant1, participant2) + } +} + +class AwsKmsSessionSigningKeysIntegrationTestPostgres + extends SessionSigningKeysIntegrationTest + with AwsKmsCryptoIntegrationTestBase { + override protected lazy val nodesWithSessionSigningKeysDisabled: Set[String] = + Set("participant2") + + override protected lazy val protectedNodes: Set[String] = + Set("participant1", "participant2", "mediator1") + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +class GcpKmsSessionSigningKeysIntegrationTestPostgres + extends SessionSigningKeysIntegrationTest + with GcpKmsCryptoIntegrationTestBase { + override protected lazy val nodesWithSessionSigningKeysDisabled: Set[String] = + Set("participant2") + + override protected lazy val protectedNodes: Set[String] = + Set("participant1", "participant2", "mediator1") + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +class MockKmsDriverSessionSigningKeysIntegrationTestPostgres + extends SessionSigningKeysIntegrationTest + with MockKmsDriverCryptoIntegrationTestBase { + override protected lazy val nodesWithSessionSigningKeysDisabled: Set[String] = + Set.empty + + override protected lazy val protectedNodes: Set[String] = + Set("sequencer1") + + setupPlugins( + // TODO(#25069): Add persistence to mock KMS driver to support auto-init = false + withAutoInit = true, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysLifecycleIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysLifecycleIntegrationTest.scala new file mode 100644 index 0000000000..97bd712356 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/SessionSigningKeysLifecycleIntegrationTest.scala @@ -0,0 +1,78 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms + +import com.digitalasset.canton.config.{DbConfig, KmsConfig, PositiveFiniteDuration} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.security.kms.mock.MockKmsDriverCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.mock.MockKmsDriverCryptoIntegrationTestBase.mockKmsDriverConfig +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransform, + ConfigTransforms, + EnvironmentSetupPlugin, + SharedEnvironment, +} +import monocle.macros.syntax.lens.* + +/** TODO(#27529): In some scenarios clock advances still fails due to the current snapshot + * approximation problems. For example, since participants rely on the current snapshot + * approximation and can sign a message arbitrarily in the past, the verification by the sequencer + * will fail if the nodes remain idle for a long time. + * + * Once everything is working, this test should be merged into + * [[SessionSigningKeysIntegrationTest]], and session signing keys should be set as default again. + */ +trait SessionSigningKeysLifecycleIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with KmsCryptoIntegrationTestBase { + + protected val keyValidityDuration: PositiveFiniteDuration = PositiveFiniteDuration.ofMinutes(5) + protected val advanceBy: PositiveFiniteDuration = PositiveFiniteDuration.ofMinutes(3) + + override protected def otherConfigTransforms: Seq[ConfigTransform] = Seq( + ConfigTransforms.useStaticTime + ) + + "verify correct session key lifecycle with clock advances" in { implicit env => + import env.* + + val simClock = env.environment.simClock.value + + assertPingSucceeds(participant1, participant2) + + // session signing keys created are still valid + simClock.advance(advanceBy.asJava) + + assertPingSucceeds(participant1, participant2) + + // session signing keys have expired; new keys will be generated + simClock.advance(advanceBy.asJava) + + assertPingSucceeds(participant1, participant2) + + } + +} + +class MockKmsDriverSessionSigningKeysLifecycleIntegrationTestPostgres + extends SessionSigningKeysLifecycleIntegrationTest + with MockKmsDriverCryptoIntegrationTestBase { + + override protected val kmsConfig: KmsConfig = + mockKmsDriverConfig.focus(_.sessionSigningKeys.keyValidityDuration).replace(keyValidityDuration) + + override protected lazy val nodesWithSessionSigningKeysDisabled: Set[String] = + Set.empty + + override protected lazy val protectedNodes: Set[String] = + Set("sequencer1") + + setupPlugins( + withAutoInit = true, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreTestBase.scala new file mode 100644 index 0000000000..413b9e88b4 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreTestBase.scala @@ -0,0 +1,44 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.integration.plugins.UseAwsKms +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetup, + EnvironmentSetupPlugin, +} + +trait AwsEncryptedCryptoPrivateStoreTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected def setupPlugins( + protectedNodes: Set[String], + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + withPreGenKey: Boolean = true, + multiRegion: Boolean = false, + ): Unit = { + registerPlugin( + if (withPreGenKey) + new UseAwsKms( + nodes = protectedNodes, + multiRegion = multiRegion, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + else + new UseAwsKms( + nodes = protectedNodes, + multiRegion = multiRegion, + keyId = None, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..440149deac --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.EncryptedCryptoPrivateStoreIntegrationTest + +/** Tests the encrypted private store in a setting where the AWS KMS key IS pre-defined: + * "alias/canton-kms-test-key" where (a) only participant1 has an encrypted private store + */ +class AwsEncryptedCryptoPrivateStoreWithPreDefinedKeyReferenceIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} + +/** (b) all nodes have an encrypted private store + */ +class AwsEncryptedCryptoPrivateStoreWithPreDefinedKeyReferenceIntegrationTestAllPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + override protected val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoIntegrationTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoIntegrationTestBase.scala new file mode 100644 index 0000000000..71218fcc8c --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoIntegrationTestBase.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.{ + CryptoConfig, + CryptoProvider, + KmsConfig, + PrivateKeyStoreConfig, +} +import com.digitalasset.canton.integration.plugins.{ + EncryptedPrivateStoreStatus, + UseAwsKms, + UseConfigTransforms, +} +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentSetup, + EnvironmentSetupPlugin, +} + +trait AwsKmsCryptoIntegrationTestBase extends KmsCryptoIntegrationTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected val kmsConfig: KmsConfig = KmsConfig.Aws.defaultTestConfig + + protected val topologyPreDefinedKeys: TopologyKmsKeys = TopologyKmsKeys( + namespaceKeyId = Some(s"alias/canton-kms-test-namespace-key"), + sequencerAuthKeyId = Some(s"alias/canton-kms-test-authentication-key"), + signingKeyId = Some(s"alias/canton-kms-test-signing-key"), + encryptionKeyId = Some(s"alias/canton-kms-test-asymmetric-key"), + ) + + protected def setupPlugins( + withAutoInit: Boolean, + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + ): Unit = { + if (!withAutoInit) + registerPlugin( + new UseConfigTransforms( + Seq(ConfigTransforms.disableAutoInit(protectedNodes)), + loggerFactory, + ) + ) + registerPlugin( + new UseAwsKms( + nodes = protectedNodes, + nodesWithSessionSigningKeysDisabled = nodesWithSessionSigningKeysDisabled, + enableEncryptedPrivateStore = EncryptedPrivateStoreStatus.Disable, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} + +object AwsKmsCryptoIntegrationTestBase { + val defaultAwsKmsCryptoConfig: CryptoConfig = CryptoConfig( + provider = CryptoProvider.Kms, + kms = Some(KmsConfig.Aws.defaultTestConfig), + privateKeyStore = PrivateKeyStoreConfig(None), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoWithPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoWithPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..85fe2f3b79 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsKmsCryptoWithPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,80 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.EnvironmentSetupPlugin +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.CryptoIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoWithPreDefinedKeysIntegrationTest + +/** Runs a crypto integration tests with ALL nodes using an AWS KMS provider and pre-generated keys. + * Consequently keys must be registered in Canton and nodes MUST be manually initialized. + */ +class AwsKmsCryptoWithPreDefinedKeysReferenceIntegrationTestAllNodes + extends CryptoIntegrationTest( + AwsKmsCryptoIntegrationTestBase.defaultAwsKmsCryptoConfig + ) + with AwsKmsCryptoIntegrationTestBase { + + // all nodes are protected using KMS as a provider + override lazy protected val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +/** Runs a crypto integration tests with one participant using an AWS KMS provider and pre-generated + * CROSS-ACCOUNT keys. Because we are using cross-account keys the IAM user has less permissions, + * but should still be able to run Canton. Consequently keys must be registered in Canton and the + * participant node MUST be manually initialized. + */ +class AwsKmsCryptoWithPreDefinedCrossAccountKeysReferenceIntegrationTest + extends CryptoIntegrationTest( + AwsKmsCryptoIntegrationTestBase.defaultAwsKmsCryptoConfig + ) + with AwsKmsCryptoIntegrationTestBase { + override val topologyPreDefinedKeys: TopologyKmsKeys = TopologyKmsKeys( + namespaceKeyId = + Some(s"arn:aws:kms:us-east-1:577087714890:key/c94045e5-ef8d-44d9-bc75-54733aeb5df0"), + sequencerAuthKeyId = + Some(s"arn:aws:kms:us-east-1:577087714890:key/91b6e88e-cb70-4b90-97a8-023c2955ae61"), + signingKeyId = + Some(s"arn:aws:kms:us-east-1:577087714890:key/bbf85eb2-bfed-48b7-9d3f-3e0ddfb0c0d7"), + encryptionKeyId = + Some(s"arn:aws:kms:us-east-1:577087714890:key/b9e2b349-0682-4446-9e9a-36db172a993b"), + ) + + override protected def getTopologyKeysForNode(name: String): TopologyKmsKeys = + topologyPreDefinedKeys + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} + +/** Runs a crypto integration tests with one participant using an AWS KMS provider with + * pre-generated keys. Runs with persistence so we also check that it is able to recover from an + * unexpected shutdown. + */ +class AwsKmsCryptoWithPreDefinedKeysReferenceIntegrationTestPostgres + extends CryptoIntegrationTest( + AwsKmsCryptoIntegrationTestBase.defaultAwsKmsCryptoConfig + ) + with AwsKmsCryptoIntegrationTestBase + with KmsCryptoWithPreDefinedKeysIntegrationTest { + setupPlugins( + withAutoInit = false, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationClearToEncryptedStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationClearToEncryptedStoreIntegrationTest.scala new file mode 100644 index 0000000000..7fc092939a --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationClearToEncryptedStoreIntegrationTest.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.{ + UseAwsKms, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.tests.security.kms.MigrationClearToEncryptedStoreIntegrationTest + +/** Tests a migration from a clear crypto private store to an encrypted private store. It requires a + * node to restart and to set-up an encrypted private store and AWS KMS in the config files. + */ +class AwsMigrationClearToEncryptedStoreReferenceIntegrationTestPostgres + extends MigrationClearToEncryptedStoreIntegrationTest { + + override protected val kmsPlugin = new UseAwsKms( + nodes = protectedNodes, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UsePostgres(loggerFactory)) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationEncryptedToClearStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationEncryptedToClearStoreIntegrationTest.scala new file mode 100644 index 0000000000..98de3d1ca2 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsMigrationEncryptedToClearStoreIntegrationTest.scala @@ -0,0 +1,30 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.* +import com.digitalasset.canton.integration.tests.security.kms.MigrationEncryptedToClearStoreIntegrationTest + +/** Tests a migration from an encrypted private store that uses AWS KMS to a clear crypto private + * store. + */ +class AwsMigrationEncryptedToClearStoreReferenceIntegrationTestPostgres + extends MigrationEncryptedToClearStoreIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + override protected val kmsRevertPlugin = new UseAwsKms( + nodes = protectedNodes, + enableEncryptedPrivateStore = EncryptedPrivateStoreStatus.Revert, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyFailureIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyFailureIntegrationTest.scala new file mode 100644 index 0000000000..cb74a81536 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyFailureIntegrationTest.scala @@ -0,0 +1,27 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyFailureIntegrationTest + +/** Tests erroneous calls to RotateWrapperKey console command. + */ +class AwsRotateWrapperKeyFailureReferenceIntegrationTestPostgres + extends RotateWrapperKeyFailureIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + override protected val disabledKeyId: KmsKeyId = + KmsKeyId(String300.tryCreate("alias/canton-kms-test-key-disabled")) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyIntegrationTest.scala new file mode 100644 index 0000000000..9eefb6bb2e --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/aws/AwsRotateWrapperKeyIntegrationTest.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.aws + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.{ + UseAwsKms, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyIntegrationTest + +/** Tests a manual rotation of the wrapper key, where an AWS KMS key is SPECIFIED - + * "alias/canton-kms-rotation-test-key" - and selected to be the new wrapper key. + */ +class AwsRotateWrapperKeyWithPreDefinedKeyReferenceIntegrationTestPostgres + extends RotateWrapperKeyIntegrationTest + with AwsEncryptedCryptoPrivateStoreTestBase { + + override protected val preDefinedKey: Option[String] = Some( + UseAwsKms.DefaultCantonRotationTestKeyId.unwrap + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreTestBase.scala new file mode 100644 index 0000000000..6411c45ccd --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreTestBase.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.integration.plugins.UseGcpKms +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetup, + EnvironmentSetupPlugin, +} + +trait GcpEncryptedCryptoPrivateStoreTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected def setupPlugins( + protectedNodes: Set[String], + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + withPreGenKey: Boolean = true, + multiRegion: Boolean = false, + ): Unit = { + val kmsConfig = + if (multiRegion) KmsConfig.Gcp.multiRegionTestConfig + else KmsConfig.Gcp.defaultTestConfig + + registerPlugin( + if (withPreGenKey) + new UseGcpKms( + nodes = protectedNodes, + kmsConfig = kmsConfig, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + else + new UseGcpKms( + nodes = protectedNodes, + keyId = None, + kmsConfig = kmsConfig, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala new file mode 100644 index 0000000000..9bd9fc3303 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpEncryptedCryptoPrivateStoreWithPreDefinedKeyIntegrationTest.scala @@ -0,0 +1,41 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.EncryptedCryptoPrivateStoreIntegrationTest + +/** Tests the encrypted private store in a setting where the GCP KMS key IS pre-defined: + * "canton-kms-test-key" where (a) only participant1 has an encrypted private store + */ +class GcpEncryptedCryptoPrivateStoreWithPreDefinedKeyBftOrderingIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} + +/** (b) all nodes have an encrypted private store + */ +class GcpEncryptedCryptoPrivateStoreWithPreDefinedKeyBftOrderingIntegrationTestAllPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + override protected val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoIntegrationTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoIntegrationTestBase.scala new file mode 100644 index 0000000000..29f179f6f3 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoIntegrationTestBase.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.config.{ + CryptoConfig, + CryptoProvider, + KmsConfig, + PrivateKeyStoreConfig, +} +import com.digitalasset.canton.integration.plugins.{ + EncryptedPrivateStoreStatus, + UseConfigTransforms, + UseGcpKms, +} +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentSetup, + EnvironmentSetupPlugin, +} + +trait GcpKmsCryptoIntegrationTestBase extends KmsCryptoIntegrationTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected val kmsConfig: KmsConfig = KmsConfig.Gcp.defaultTestConfig + + protected val topologyPreDefinedKeys: TopologyKmsKeys = TopologyKmsKeys( + namespaceKeyId = Some(s"canton-kms-test-namespace-key"), + sequencerAuthKeyId = Some(s"canton-kms-test-authentication-key"), + signingKeyId = Some(s"canton-kms-test-signing-key"), + encryptionKeyId = Some(s"canton-kms-test-asymmetric-key"), + ) + + protected def setupPlugins( + withAutoInit: Boolean, + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + ): Unit = { + if (!withAutoInit) + registerPlugin( + new UseConfigTransforms( + Seq(ConfigTransforms.disableAutoInit(protectedNodes)), + loggerFactory, + ) + ) + registerPlugin( + new UseGcpKms( + nodes = protectedNodes, + nodesWithSessionSigningKeysDisabled = nodesWithSessionSigningKeysDisabled, + enableEncryptedPrivateStore = EncryptedPrivateStoreStatus.Disable, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} + +object GcpKmsCryptoIntegrationTestBase { + val defaultGcpKmsCryptoConfig: CryptoConfig = CryptoConfig( + provider = CryptoProvider.Kms, + kms = Some(KmsConfig.Gcp.defaultTestConfig), + privateKeyStore = PrivateKeyStoreConfig(None), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoWithPreDefinedKeysIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoWithPreDefinedKeysIntegrationTest.scala new file mode 100644 index 0000000000..914986a620 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpKmsCryptoWithPreDefinedKeysIntegrationTest.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.EnvironmentSetupPlugin +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.CryptoIntegrationTest +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoWithPreDefinedKeysIntegrationTest + +/** Runs a crypto integration tests with ALL nodes (except participant3 that is not involved in the + * tests) using a GCP KMS provider and pre-generated keys. Consequently, keys must be registered in + * Canton and nodes MUST be manually initialized. + */ +class GcpKmsCryptoWithPreDefinedKeysBftOrderingIntegrationTestAllNodes + extends CryptoIntegrationTest( + GcpKmsCryptoIntegrationTestBase.defaultGcpKmsCryptoConfig + ) + with GcpKmsCryptoIntegrationTestBase { + + // all nodes are protected using KMS as a provider + override lazy protected val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) + + setupPlugins( + withAutoInit = false, + storagePlugin = Option.empty[EnvironmentSetupPlugin], + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) +} + +/** Runs a crypto integration tests with one participant using a GCP KMS provider with pre-generated + * keys. Runs with persistence so we also check that it is able to recover from an unexpected + * shutdown. + */ +class GcpKmsCryptoWithPreDefinedKeysBftOrderingIntegrationTestPostgres + extends CryptoIntegrationTest( + GcpKmsCryptoIntegrationTestBase.defaultGcpKmsCryptoConfig + ) + with GcpKmsCryptoIntegrationTestBase + with KmsCryptoWithPreDefinedKeysIntegrationTest { + setupPlugins( + withAutoInit = false, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationClearToEncryptedStoreBftOrderingIntegrationTestPostgres.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationClearToEncryptedStoreBftOrderingIntegrationTestPostgres.scala new file mode 100644 index 0000000000..3b3e2a3f5a --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationClearToEncryptedStoreBftOrderingIntegrationTestPostgres.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseGcpKms, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.MigrationClearToEncryptedStoreIntegrationTest + +/** Tests a migration from a clear crypto private store to an encrypted private store. It requires a + * node to restart and to set-up an encrypted private store and GCP KMS in the config files. + */ +class GcpMigrationClearToEncryptedStoreBftOrderingIntegrationTestPostgres + extends MigrationClearToEncryptedStoreIntegrationTest { + + override protected val kmsPlugin = new UseGcpKms( + nodes = protectedNodes, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + + registerPlugin(new UseBftSequencer(loggerFactory)) + registerPlugin(new UsePostgres(loggerFactory)) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationEncryptedToClearStoreBftOrderingIntegrationTestPostgres.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationEncryptedToClearStoreBftOrderingIntegrationTestPostgres.scala new file mode 100644 index 0000000000..f7393a0963 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpMigrationEncryptedToClearStoreBftOrderingIntegrationTestPostgres.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.integration.plugins.{ + EncryptedPrivateStoreStatus, + UseBftSequencer, + UseGcpKms, + UsePostgres, +} +import com.digitalasset.canton.integration.tests.security.kms.MigrationEncryptedToClearStoreIntegrationTest + +/** Tests a migration from an encrypted private store that uses GCP KMS to a clear crypto private + * store. + */ +class GcpMigrationEncryptedToClearStoreBftOrderingIntegrationTestPostgres + extends MigrationEncryptedToClearStoreIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + override protected val kmsRevertPlugin = new UseGcpKms( + nodes = protectedNodes, + enableEncryptedPrivateStore = EncryptedPrivateStoreStatus.Revert, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyFailureIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyFailureIntegrationTest.scala new file mode 100644 index 0000000000..9f32009541 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyFailureIntegrationTest.scala @@ -0,0 +1,26 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyFailureIntegrationTest + +/** Tests erroneous calls to RotateWrapperKey console command. + */ +class GcpRotateWrapperKeyFailureBftOrderingIntegrationTestPostgres + extends RotateWrapperKeyFailureIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + override protected val disabledKeyId: KmsKeyId = + KmsKeyId(String300.tryCreate("canton-kms-test-key-disabled")) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyIntegrationTest.scala new file mode 100644 index 0000000000..291c62dad3 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/gcp/GcpRotateWrapperKeyIntegrationTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.gcp + +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseGcpKms, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.RotateWrapperKeyIntegrationTest + +/** Tests a manual rotation of the wrapper key, where a GCP KMS key is SPECIFIED - + * "canton-kms-rotation-test-key" - and selected to be the new wrapper key. + */ +class GcpRotateWrapperKeyWithPreDefinedKeyBftOrderingIntegrationTestPostgres + extends RotateWrapperKeyIntegrationTest + with GcpEncryptedCryptoPrivateStoreTestBase { + + override protected val preDefinedKey: Option[String] = Some( + UseGcpKms.DefaultCantonRotationTestKeyId.unwrap + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreIntegrationTest.scala new file mode 100644 index 0000000000..eddee97d52 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreIntegrationTest.scala @@ -0,0 +1,41 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.mock + +import com.digitalasset.canton.integration.EnvironmentDefinition.allNodeNames +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} +import com.digitalasset.canton.integration.tests.security.kms.EncryptedCryptoPrivateStoreIntegrationTest + +/** Tests the encrypted private store with a Mock KMS diver where (a) only participant1 has an + * encrypted private store + */ +class MockEncryptedCryptoPrivateStoreBftOrderingIntegrationTestPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with MockEncryptedCryptoPrivateStoreTestBase { + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} + +/** (b) all nodes have an encrypted private store + */ +class MockEncryptedCryptoPrivateStoreBftOrderingIntegrationTestAllPostgres + extends EncryptedCryptoPrivateStoreIntegrationTest + with MockEncryptedCryptoPrivateStoreTestBase { + + override protected val protectedNodes: Set[String] = allNodeNames( + environmentDefinition.baseConfig + ) + + setupPlugins( + protectedNodes, + storagePlugin = Some(new UsePostgres(loggerFactory)), + sequencerPlugin = new UseBftSequencer(loggerFactory), + ) + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreTestBase.scala new file mode 100644 index 0000000000..56edb065e9 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockEncryptedCryptoPrivateStoreTestBase.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.mock + +import com.digitalasset.canton.crypto.kms.mock.v1.MockKmsDriverFactory.mockKmsDriverName +import com.digitalasset.canton.integration.plugins.UseKmsDriver +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetup, + EnvironmentSetupPlugin, +} + +trait MockEncryptedCryptoPrivateStoreTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected def setupPlugins( + protectedNodes: Set[String], + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + ): Unit = { + registerPlugin( + new UseKmsDriver( + nodes = protectedNodes, + keyId = None, + driverName = mockKmsDriverName, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTest.scala new file mode 100644 index 0000000000..a77bcbf981 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTest.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.mock + +import com.digitalasset.canton.integration.tests.nightly.kms.KmsCryptoNoPreDefinedKeysIntegrationTest +import com.digitalasset.canton.integration.tests.security.CryptoIntegrationTest + +/** Runs a ping while one participant is using a Mock KMS provider and letting Canton generate its + * own keys (i.e. auto-init == true) + */ +class MockKmsDriverCryptoIntegrationTest + extends CryptoIntegrationTest( + MockKmsDriverCryptoIntegrationTestBase.mockKmsDriverCryptoConfig + ) + with MockKmsDriverCryptoIntegrationTestBase + with KmsCryptoNoPreDefinedKeysIntegrationTest { + + override def afterAll(): Unit = { + deleteAllGenerateKeys() + super.afterAll() + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTestBase.scala new file mode 100644 index 0000000000..55f23d8d00 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/security/kms/mock/MockKmsDriverCryptoIntegrationTestBase.scala @@ -0,0 +1,75 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.security.kms.mock + +import com.digitalasset.canton.config.{ + CryptoConfig, + CryptoProvider, + KmsConfig, + PrivateKeyStoreConfig, +} +import com.digitalasset.canton.crypto.kms.mock.v1.MockKmsDriverFactory.mockKmsDriverName +import com.digitalasset.canton.integration.plugins.{EncryptedPrivateStoreStatus, UseKmsDriver} +import com.digitalasset.canton.integration.tests.security.kms.KmsCryptoIntegrationTestBase +import com.digitalasset.canton.integration.tests.security.kms.mock.MockKmsDriverCryptoIntegrationTestBase.mockKmsDriverConfig +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentSetup, + EnvironmentSetupPlugin, +} +import com.typesafe.config.ConfigValueFactory + +import scala.jdk.CollectionConverters.* + +trait MockKmsDriverCryptoIntegrationTestBase extends KmsCryptoIntegrationTestBase { + self: CommunityIntegrationTest with EnvironmentSetup => + + protected val kmsConfig: KmsConfig = mockKmsDriverConfig + + // TODO(#25069): Add persistence to mock KMS driver + protected val topologyPreDefinedKeys: TopologyKmsKeys = TopologyKmsKeys( + namespaceKeyId = Some(s"canton-kms-test-namespace-key"), + sequencerAuthKeyId = Some(s"canton-kms-test-authentication-key"), + signingKeyId = Some(s"canton-kms-test-signing-key"), + encryptionKeyId = Some(s"canton-kms-test-asymmetric-key"), + ) + + protected def setupPlugins( + withAutoInit: Boolean, + storagePlugin: Option[EnvironmentSetupPlugin], + sequencerPlugin: EnvironmentSetupPlugin, + ): Unit = { + if (!withAutoInit) + // TODO(#25069): Add persistence to mock KMS driver + throw new IllegalArgumentException( + s"Cannot run with auto-init = false because there is not persistence in the Mock KMS Driver" + ) + registerPlugin( + new UseKmsDriver( + nodes = protectedNodes, + nodesWithSessionSigningKeysDisabled = nodesWithSessionSigningKeysDisabled, + enableEncryptedPrivateStore = EncryptedPrivateStoreStatus.Disable, + driverName = mockKmsDriverName, + timeouts = timeouts, + loggerFactory = loggerFactory, + ) + ) + registerPlugin(sequencerPlugin) + storagePlugin.foreach(registerPlugin) + } + +} + +object MockKmsDriverCryptoIntegrationTestBase { + lazy val mockKmsDriverConfig: KmsConfig.Driver = + KmsConfig.Driver( + mockKmsDriverName, + ConfigValueFactory.fromMap(Map.empty[String, AnyRef].asJava), + ) + val mockKmsDriverCryptoConfig: CryptoConfig = CryptoConfig( + provider = CryptoProvider.Kms, + kms = Some(mockKmsDriverConfig), + privateKeyStore = PrivateKeyStoreConfig(None), + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/ExternalSequencerIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/ExternalSequencerIntegrationTest.scala index ad5bf2bd28..0969c6b9a7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/ExternalSequencerIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/ExternalSequencerIntegrationTest.scala @@ -24,9 +24,9 @@ import com.digitalasset.canton.integration.bootstrap.{ import com.digitalasset.canton.integration.plugins.UseExternalProcess.ShutdownPhase import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UseExternalProcess, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -185,9 +185,8 @@ private[tests] final case class QuickSequencerReconnection( class ExternalReferenceSequencerIntegrationTest extends ExternalSequencerIntegrationTest("reference") { - override protected lazy val sequencerPlugin - : UseCommunityReferenceBlockSequencer[DbConfig.Postgres] = - new UseCommunityReferenceBlockSequencer[Postgres](loggerFactory) + override protected lazy val sequencerPlugin: UseReferenceBlockSequencer[DbConfig.Postgres] = + new UseReferenceBlockSequencer[Postgres](loggerFactory) } class ExternalBftOrderingSequencerIntegrationTest diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/RehydrationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/RehydrationIntegrationTest.scala index 0d0f1e2364..883dea72c7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/RehydrationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/RehydrationIntegrationTest.scala @@ -16,10 +16,7 @@ import com.digitalasset.canton.config.DbConfig.Postgres import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.{InstanceReference, ParticipantReference} import com.digitalasset.canton.examples.java.{cycle as C, iou} -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -33,7 +30,11 @@ import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality import com.digitalasset.canton.participant.config.{ParticipantInitConfig, ParticipantNodeConfig} import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig -import com.digitalasset.canton.sequencing.{SequencerConnections, SubmissionRequestAmplification} +import com.digitalasset.canton.sequencing.{ + SequencerConnectionPoolDelays, + SequencerConnections, + SubmissionRequestAmplification, +} import com.digitalasset.canton.synchronizer.mediator.MediatorNodeConfig import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeConfig import com.digitalasset.canton.topology.{PartyId, PhysicalSynchronizerId} @@ -67,9 +68,6 @@ abstract class RehydrationIntegrationTest private val transactionLimit: Int = iterations * 3 private val acsLimit: Int = iterations - private val staticSynchronizerParameters = - EnvironmentDefinition.defaultStaticSynchronizerParameters - private var observedTx: Seq[Transaction] = _ private var observedAcs: Map[String, CreatedEvent] = _ @@ -126,7 +124,7 @@ abstract class RehydrationIntegrationTest mediators = Seq(mediator1), synchronizerOwners = Seq[InstanceReference](sequencer1, mediator1), synchronizerThreshold = PositiveInt.two, - staticSynchronizerParameters, + staticSynchronizerParameters = EnvironmentDefinition.defaultStaticSynchronizerParameters, ) sequencer1.health.wait_for_initialized() @@ -198,6 +196,7 @@ abstract class RehydrationIntegrationTest sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, ) // stop mediator1 and copy it over to the fresh mediator2 @@ -226,7 +225,7 @@ abstract class RehydrationIntegrationTest sequencer2, tempDirSequencer, synchronizerId, - staticSynchronizerParameters, + EnvironmentDefinition.defaultStaticSynchronizerParameters, sequencerConnections, ) // architecture-handbook-entry-end: RehydrationSequencer @@ -239,7 +238,7 @@ abstract class RehydrationIntegrationTest mediator2, tempDirMediator, synchronizerId, - staticSynchronizerParameters, + EnvironmentDefinition.defaultStaticSynchronizerParameters, sequencerConnections, ) // architecture-handbook-entry-end: RehydrationMediator @@ -268,7 +267,7 @@ abstract class RehydrationIntegrationTest participant2, tempDirParticipant, synchronizerId, - staticSynchronizerParameters, + EnvironmentDefinition.defaultStaticSynchronizerParameters, sequencerConnections, ) repair.dars.upload(participant2, tempDirParticipant) @@ -298,6 +297,7 @@ abstract class RehydrationIntegrationTest sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, ) loggerFactory.assertLogsUnorderedOptional( @@ -424,7 +424,7 @@ abstract class RehydrationIntegrationTest class ReferenceRehydrationIntegrationTestPostgres extends RehydrationIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) } // TODO(#16823): Re-enable test. This test requires that the second sequencer reads old blocks from genesis, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerApiRateLimitingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerApiRateLimitingIntegrationTest.scala index 88ac85a597..fa77fcf6c4 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerApiRateLimitingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerApiRateLimitingIntegrationTest.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.integration.tests.sequencer import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.StreamLimitConfig import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, @@ -29,10 +30,14 @@ class SequencerApiRateLimitingIntegrationTest EnvironmentDefinition.P2S1M1_Manual .addConfigTransform( ConfigTransforms.updateAllSequencerConfigs_( - _.focus(_.parameters.sequencerApiLimits).replace( - Map( - com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_DOWNLOAD_TOPOLOGY_STATE_FOR_INIT.getFullMethodName -> NonNegativeInt.one, - com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_SUBSCRIBE.getFullMethodName -> NonNegativeInt.maxValue, + _.focus(_.publicApi.stream).replace( + Some( + StreamLimitConfig(limits = + Map( + com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_DOWNLOAD_TOPOLOGY_STATE_FOR_INIT.getFullMethodName -> NonNegativeInt.one, + com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_SUBSCRIBE.getFullMethodName -> NonNegativeInt.maxValue, + ) + ) ) ) ) @@ -56,7 +61,7 @@ class SequencerApiRateLimitingIntegrationTest import env.* // enforce the limit - sequencer1.underlying.value.sequencer.streamCounterCheck.value.updateLimits( + sequencer1.underlying.value.streamCounterCheck.value.updateLimits( com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_DOWNLOAD_TOPOLOGY_STATE_FOR_INIT.getFullMethodName, Some(NonNegativeInt.zero), ) @@ -69,7 +74,7 @@ class SequencerApiRateLimitingIntegrationTest // must not have completed as otherwise we didn't get blocked background.isCompleted shouldBe false // increase limit - sequencer1.underlying.value.sequencer.streamCounterCheck.value.updateLimits( + sequencer1.underlying.value.streamCounterCheck.value.updateLimits( com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.METHOD_DOWNLOAD_TOPOLOGY_STATE_FOR_INIT.getFullMethodName, Some(NonNegativeInt.one), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerConnectServiceIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerConnectServiceIntegrationTest.scala index 617f6f4513..ea634c477a 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerConnectServiceIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SequencerConnectServiceIntegrationTest.scala @@ -19,8 +19,8 @@ import com.digitalasset.canton.console.LocalSequencerReference import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.networking.Endpoint @@ -272,7 +272,7 @@ class GrpcSequencerConnectServiceIntegrationTestPostgresReference extends GrpcSequencerConnectServiceIntegrationTestPostgres { override lazy val sequencerPlugin = - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory) override protected def localSequencer(implicit env: TestConsoleEnvironment diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest.scala index 372cfbc4c0..2012c6c6e2 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest.scala @@ -36,7 +36,7 @@ trait SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest // Third sequencer's console: // * write file with identity topology transactions { - sequencer3.topology.transactions.export_identity_transactions(identityFile) + sequencer3.topology.transactions.export_identity_transactionsV2(identityFile) } // Fist and second sequencers' (i.e., owners) console: @@ -46,9 +46,9 @@ trait SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest { // Store the third sequencer's identity topology transactions on the synchronizer sequencer1.topology.transactions - .import_topology_snapshot_from(identityFile, store = synchronizerId) + .import_topology_snapshot_fromV2(identityFile, store = synchronizerId) sequencer2.topology.transactions - .import_topology_snapshot_from(identityFile, store = synchronizerId) + .import_topology_snapshot_fromV2(identityFile, store = synchronizerId) // wait for the identity transactions to become effective sequencer1.topology.synchronisation.await_idle() diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/bftordering/BftSequencerApiTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/bftordering/BftSequencerApiTest.scala index 1be70ad28a..cdfc8abd35 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/bftordering/BftSequencerApiTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/bftordering/BftSequencerApiTest.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.MemoryStorage import com.digitalasset.canton.sequencing.traffic.TrafficReceipt +import com.digitalasset.canton.synchronizer.block.AsyncWriterParameters import com.digitalasset.canton.synchronizer.metrics.SequencerTestMetrics import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.canton.sequencing.BftSequencerFactory import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig @@ -49,6 +50,7 @@ class BftSequencerApiTest extends SequencerApiTest with RateLimitManagerTesting dontWarnOnDeprecatedPV = false, ), maxConfirmationRequestsBurstFactor = PositiveDouble.tryCreate(1.0), + asyncWriter = AsyncWriterParameters(), ) override final def createSequencer(crypto: SynchronizerCryptoClient)(implicit diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/channel/SequencerChannelProtocolIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/channel/SequencerChannelProtocolIntegrationTest.scala index 95289d1b82..8b09580d03 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/channel/SequencerChannelProtocolIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/channel/SequencerChannelProtocolIntegrationTest.scala @@ -13,10 +13,7 @@ import com.digitalasset.canton.integration.bootstrap.{ NetworkBootstrapper, NetworkTopologyDescription, } -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -54,7 +51,7 @@ sealed trait SequencerChannelProtocolIntegrationTest extends CommunityIntegrationTest with SharedEnvironment with SequencerChannelProtocolTestExecHelpers { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) override lazy val environmentDefinition: EnvironmentDefinition = EnvironmentDefinition.P2S1M1_Manual @@ -380,8 +377,10 @@ sealed trait SequencerChannelProtocolIntegrationTest extends SequencerChannelProtocolProcessor { override def onConnected()(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Unit] = - MonadUtil.sequentialTraverse_(initialPayloads)(sendPayload("initial payload", _)) + ): EitherT[FutureUnlessShutdown, String, Unit] = for { + _ <- super.onConnected() + _ <- MonadUtil.sequentialTraverse_(initialPayloads)(sendPayload("initial payload", _)) + } yield () override def handlePayload(payload: ByteString)(implicit traceContext: TraceContext diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceDynamicOnboardingIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceDynamicOnboardingIntegrationTest.scala index e32b4ab60d..57921f36c9 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceDynamicOnboardingIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceDynamicOnboardingIntegrationTest.scala @@ -4,11 +4,11 @@ package com.digitalasset.canton.integration.tests.sequencer.reference import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer class ReferenceDynamicOnboardingIntegrationTest extends ReferenceDynamicOnboardingIntegrationTestBase(DriverName) { - override protected lazy val plugin: UseCommunityReferenceBlockSequencer[DbConfig.Postgres] = + override protected lazy val plugin: UseReferenceBlockSequencer[DbConfig.Postgres] = createPlugin[DbConfig.Postgres](loggerFactory) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerApiTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerApiTest.scala index 7cfb8da890..34f12770fe 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerApiTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerApiTest.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.MemoryStorage import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.synchronizer.block.SequencerDriver +import com.digitalasset.canton.synchronizer.block.{AsyncWriterParameters, SequencerDriver} import com.digitalasset.canton.synchronizer.metrics.SequencerTestMetrics import com.digitalasset.canton.synchronizer.sequencer.block.DriverBlockSequencerFactory import com.digitalasset.canton.synchronizer.sequencer.config.{ @@ -88,6 +88,7 @@ class ReferenceSequencerApiTest extends SequencerApiTest with RateLimitManagerTe dontWarnOnDeprecatedPV = false, ), maxConfirmationRequestsBurstFactor = PositiveDouble.tryCreate(1.0), + asyncWriter = AsyncWriterParameters(), ) "Reference sequencer" when runSequencerApiTests() diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerDriverApiConformanceTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerDriverApiConformanceTest.scala index 5a65f2f471..52018f9854 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerDriverApiConformanceTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerDriverApiConformanceTest.scala @@ -6,12 +6,12 @@ package com.digitalasset.canton.integration.tests.sequencer.reference import cats.syntax.either.* import com.digitalasset.canton.config.{DbConfig, StorageConfig} import com.digitalasset.canton.crypto.SynchronizerCryptoClient -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.synchronizer.block.SequencerDriver import com.digitalasset.canton.synchronizer.sequencer.SequencerConfig import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.{ - CommunityReferenceSequencerDriverFactory, ReferenceSequencerDriver, + ReferenceSequencerDriverFactory, } import com.digitalasset.canton.time.TimeProvider @@ -22,14 +22,14 @@ class ReferenceSequencerDriverApiConformanceTest ReferenceSequencerDriver.Config[StorageConfig] ] { - private val driverFactory = new CommunityReferenceSequencerDriverFactory + private val driverFactory = new ReferenceSequencerDriverFactory override protected final val driverConfig : AtomicReference[Option[ReferenceSequencerDriver.Config[StorageConfig]]] = new AtomicReference(Some(ReferenceSequencerDriver.Config(StorageConfig.Memory()))) override protected final lazy val plugin = - new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory) + new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory) override protected final def parseConfig( maybeConfig: Option[SequencerConfig] diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerPruningIntegrationTest.scala index ac60495425..e1583efa4e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerPruningIntegrationTest.scala @@ -13,23 +13,22 @@ import com.digitalasset.canton.config.RequireTypes.{ } import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UseConfigTransforms, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.pruning.SequencerPruningIntegrationTest class ReferenceSequencerPruningIntegrationTest extends SequencerPruningIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin( new UseConfigTransforms( Seq( reduceSequencerClientAcknowledgementInterval, increaseParticipant3AcknowledgementInterval, reduceSequencerAcknowledgementConflateWindow, - ConfigTransforms.useStaticTime, ), loggerFactory, ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerWithTrafficControlApiTestBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerWithTrafficControlApiTestBase.scala index da66784289..7071f31e7c 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerWithTrafficControlApiTestBase.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSequencerWithTrafficControlApiTestBase.scala @@ -39,6 +39,7 @@ import com.digitalasset.canton.sequencing.traffic.{ TrafficReceipt, } import com.digitalasset.canton.store.db.DbTest +import com.digitalasset.canton.synchronizer.block.AsyncWriterParameters import com.digitalasset.canton.synchronizer.metrics.{SequencerHistograms, SequencerMetrics} import com.digitalasset.canton.synchronizer.sequencer.block.BlockSequencerFactory import com.digitalasset.canton.synchronizer.sequencer.config.{ @@ -68,12 +69,13 @@ import com.digitalasset.canton.synchronizer.sequencing.traffic.{ TrafficConsumedManagerFactory, TrafficPurchasedManager, } -import com.digitalasset.canton.time.{Clock, SimClock} +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, SimClock} import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.PekkoUtil import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ + BaseTest, FailOnShutdown, MockedNodeParameters, ProtocolVersionChecksFixtureAsyncWordSpec, @@ -275,7 +277,13 @@ abstract class ReferenceSequencerWithTrafficControlApiTestBase ), ) val topologyFactoryWithSynchronizerParameters = env.topology - .copy(synchronizerParameters = parameters) + .copy( + synchronizerParameters = parameters, + staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParametersWith( + topologyChangeDelay = NonNegativeFiniteDuration.Zero, + protocolVersion = testedProtocolVersion, + ), + ) .build(loggerFactory) val params = SequencerNodeParameters( general = MockedNodeParameters.cantonNodeParameters( @@ -287,6 +295,7 @@ abstract class ReferenceSequencerWithTrafficControlApiTestBase dontWarnOnDeprecatedPV = false, ), maxConfirmationRequestsBurstFactor = PositiveDouble.tryCreate(1.0), + asyncWriter = AsyncWriterParameters(), ) // Important to create the histograms before the factory, because creating the factory will // register them once and for all and we can't add more afterwards diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSynchronizerBootstrapWithSeparateConsolesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSynchronizerBootstrapWithSeparateConsolesIntegrationTest.scala index a059f00e19..05f12be81b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSynchronizerBootstrapWithSeparateConsolesIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/ReferenceSynchronizerBootstrapWithSeparateConsolesIntegrationTest.scala @@ -4,15 +4,12 @@ package com.digitalasset.canton.integration.tests.sequencer.reference import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.sequencer.SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest class ReferenceSynchronizerBootstrapWithSeparateConsolesIntegrationTest extends SynchronizerBootstrapWithMultipleConsolesAndSequencersIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/package.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/package.scala index fba34d4263..456dc1f9e6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/package.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/sequencer/reference/package.scala @@ -4,17 +4,17 @@ package com.digitalasset.canton.integration.tests.sequencer import com.digitalasset.canton.config.StorageConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.logging.NamedLoggerFactory import scala.reflect.ClassTag package object reference { - private[reference] val DriverName: String = "community-reference" + private[reference] val DriverName: String = "reference" private[reference] def createPlugin[S <: StorageConfig]( loggerFactory: NamedLoggerFactory )(implicit c: ClassTag[S]) = - new UseCommunityReferenceBlockSequencer[S](loggerFactory) + new UseReferenceBlockSequencer[S](loggerFactory) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DanglingPartiesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DanglingPartiesIntegrationTest.scala new file mode 100644 index 0000000000..b3d51c4993 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DanglingPartiesIntegrationTest.scala @@ -0,0 +1,156 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.topology + +import com.digitalasset.canton.config +import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + HasCycleUtils, + SharedEnvironment, +} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.store.TimeQuery +import com.digitalasset.canton.topology.transaction.{ParticipantPermission, TopologyChangeOp} +import monocle.macros.syntax.lens.* +import org.slf4j.event.Level + +/** Test that the system still works properly after parties are left dangling + * + * A dangling party is a party for which one of the participants has become defunct. What we test + * here is the following scenario: + * - Alice on P1 and P2 + * - Both have a contract + * - P2 sends an OTK remove + * - Alice still can exercise on their contracts via P1 + */ +class DanglingPartiesIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with HasCycleUtils { + + private var alice: PartyId = _ + private var storeId: TopologyStoreId = _ + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + .addConfigTransform(ConfigTransforms.updateAllParticipantConfigs { case (_, config) => + config.focus(_.topology.disableOptionalTopologyChecks).replace(true) + }) + .addConfigTransform( + _.focus(_.monitoring.logging.eventDetails) + .replace(true) + .focus(_.monitoring.logging.api.maxMessageLines) + .replace(10000) + .focus(_.monitoring.logging.api.maxStringLength) + .replace(40000) + ) + .withSetup { env => + import env.* + + // make sure we send enough commitments + Seq[InstanceReference](mediator1, sequencer1).foreach( + _.topology.synchronizer_parameters + .propose_update( + sequencer1.synchronizer_id, + _.update(reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1)), + ) + .discard + ) + + participants.all.foreach { p => + p.synchronizers.connect_local(sequencer1, daName) + p.dars.upload(CantonExamplesPath) + } + + storeId = TopologyStoreId.Synchronizer(sequencer1.synchronizer_id) + alice = PartyId.tryCreate("Alice", participant1.id.namespace) + participant1.topology.party_to_participant_mappings.propose( + alice, + newParticipants = Seq( + (participant1.id, ParticipantPermission.Submission), + (participant2.id, ParticipantPermission.Confirmation), + ), + store = storeId, + ) + + Seq((participant2, alice)).foreach { case (participant, party) => + val proposal = eventually() { + participant.topology.party_to_participant_mappings + .list_hosting_proposals(sequencer1.synchronizer_id, participant.id) + .loneElement + } + participant.topology.transactions.authorize(sequencer1.synchronizer_id, proposal.txHash) + eventually() { + participant.ledger_api.parties.list().map(_.party) should contain(party) + } + } + } + + "create contracts for alice and bob" in { implicit env => + import env.* + createCycleContract(participant1, alice, id = "CYCLE1") + } + + "unregister p2 by removing otk" in { implicit env => + import env.* + + val otk = participant2.topology.owner_to_key_mappings + .list(store = Some(storeId), filterKeyOwnerUid = participant2.filterString) + .loneElement + participant2.topology.transactions.propose( + otk.item, + storeId, + change = TopologyChangeOp.Remove, + ) + eventually() { + val all = participant1.topology.owner_to_key_mappings + .list( + store = Some(storeId), + filterKeyOwnerUid = participant2.filterString, + timeQuery = TimeQuery.Range(None, None), + operation = None, + ) + all.lastOption + .valueOrFail("Must exist") + .context + .operation shouldBe TopologyChangeOp.Remove + } + // TODO(#28232) automatically disconnect from synchronizer + participant2.synchronizers.disconnect_all() + + } + + "contract with p2 is still alive" in { implicit env => + import env.* + + // TODO(#28232) without the eventually, p2 will be addressed because p1 still thinks that p2 is active + // once we make it impossible to remove OTK that are still in use, we can switch this test to STC + eventually() { + participant1.topology.owner_to_key_mappings.list( + sequencer1.synchronizer_id, + filterKeyOwnerUid = participant2.filterString, + ) shouldBe empty + } + + loggerFactory.assertLogsSeq( + SuppressionRule + .LevelAndAbove(Level.WARN) + )( + awaitAndExerciseCycleContract(participant1, alice), + messages => + forAll(messages) { msg => + msg.warningMessage should include( + "has a synchronizer trust certificate, but no keys on synchronizer" + ) + }, + ) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DuplicateSignaturesIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DuplicateSignaturesIntegrationTest.scala index fbe7dcc808..5dcc47567d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DuplicateSignaturesIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/DuplicateSignaturesIntegrationTest.scala @@ -74,7 +74,7 @@ sealed trait DuplicateSignaturesIntegrationTest testedProtocolVersion, ) - val network = NetworkTopologyDescription( + val network = NetworkTopologyDescription.createWithStaticSynchronizerParameters( daName, synchronizerOwners = Seq[InstanceReference](sequencer1, mediator1), synchronizerThreshold = PositiveInt.two, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/IdentityProviderConfigIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/IdentityProviderConfigIntegrationTest.scala index 9c286a6eba..e4239c7e78 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/IdentityProviderConfigIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/IdentityProviderConfigIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.topology import com.daml.jwt.JwksUrl import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -156,5 +156,5 @@ trait IdentityProviderConfigIntegrationTest class IdentityProviderConfigReferenceIntegrationTestPostgres extends IdentityProviderConfigIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala index 58d4a32896..e19ae9f93d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala @@ -5,15 +5,12 @@ package com.digitalasset.canton.integration.tests.topology import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.crypto.SigningKeyUsage import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -24,40 +21,57 @@ import com.digitalasset.canton.integration.{ import com.digitalasset.canton.participant.store.SynchronizerConnectionConfigStore import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} +import com.digitalasset.canton.topology.TopologyManagerError.InvalidSynchronizerSuccessor import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings import com.digitalasset.canton.topology.{ KnownPhysicalSynchronizerId, + PhysicalSynchronizerId, SequencerId, TopologyManagerError, UnknownPhysicalSynchronizerId, } import com.google.protobuf.ByteString import monocle.syntax.all.* +import org.scalatest.Assertion import java.net.URI +import java.util.concurrent.atomic.AtomicReference sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { override def environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P3_S2M2.addConfigTransform( - ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.parameters.automaticallyPerformLogicalSynchronizerUpgrade).replace(false) + EnvironmentDefinition.P3_S2M2 + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.automaticallyPerformLogicalSynchronizerUpgrade).replace(false) + ) ) - ) + .withSetup { env => + latestSuccessorPSId.set(Some(env.daId)) + } - private def successorSynchronizerId(implicit env: TestConsoleEnvironment) = - env.daId.copy(serial = NonNegativeInt.one) + /* + PSId of the successor needs to be strictly increasing with different announcements. + This allows to track the latest used. + */ + private val latestSuccessorPSId = new AtomicReference[Option[PhysicalSynchronizerId]](None) + + private def allocateSuccessorPSId(): PhysicalSynchronizerId = + latestSuccessorPSId.updateAndGet { existing => + Some(existing.value.copy(serial = existing.value.serial.increment.toNonNegative)) + }.value private lazy val upgradeTime = CantonTimestamp.now().plusSeconds(3600) "migration announcement does not permit further topology transactions" in { implicit env => import env.* + val successorPSId = allocateSuccessorPSId() synchronizerOwners1.foreach { owner => owner.topology.synchronizer_upgrade.announcement.propose( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = successorPSId, upgradeTime = upgradeTime, ) } @@ -68,7 +82,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest loggerFactory.assertThrowsAndLogs[CommandFailure]( owner1.topology.namespace_delegations .propose_delegation(owner1.namespace, targetKey, CanSignAllMappings, daId), - _ shouldBeCantonErrorCode (TopologyManagerError.OngoingSynchronizerUpgrade), + _.shouldBeCantonErrorCode(TopologyManagerError.OngoingSynchronizerUpgrade), ) } @@ -76,7 +90,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest import env.* synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.revoke( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = latestSuccessorPSId.get().value, upgradeTime = upgradeTime, ) ) @@ -106,9 +120,10 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest ) // announce the migration to prepare for the sequencer connection announcements + val successorPSId = allocateSuccessorPSId() synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.propose( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = successorPSId, upgradeTime = upgradeTime, ) ) @@ -129,7 +144,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest .get(daName, UnknownPhysicalSynchronizerId) .toOption shouldBe None connectionConfigStore(participant2) - .get(daName, KnownPhysicalSynchronizerId(successorSynchronizerId)) + .get(daName, KnownPhysicalSynchronizerId(successorPSId)) .toOption shouldBe None // sequencer2 announces its connection details for the successor synchronizer @@ -181,7 +196,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest // unfrozen successor synchronizer synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.revoke( - successorSynchronizerId, + latestSuccessorPSId.get().value, upgradeTime = upgradeTime, ) ) @@ -216,13 +231,61 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest } } + "successor PSId should increase between announcements" in { implicit env => + import env.* + + val successor1 = allocateSuccessorPSId() + val successor2 = allocateSuccessorPSId() + val successor3 = allocateSuccessorPSId() + + Seq(successor1, successor2).foreach { successor => + synchronizerOwners1.foreach { owner => + owner.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ) + } + + synchronizerOwners1.foreach { owner => + owner.topology.synchronizer_upgrade.announcement.revoke( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ) + } + } + + // Re-using successor1 or successor2 should fail + Seq(successor1, successor2).foreach { successor => + loggerFactory.assertThrowsAndLogs[CommandFailure]( + sequencer1.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ), + entry => { + entry shouldBeCantonErrorCode (InvalidSynchronizerSuccessor) + entry.errorMessage should include( + InvalidSynchronizerSuccessor.Reject + .conflictWithPreviousAnnouncement(successor, successor2) + .cause + ) + }, + ) + } + + // But successor3 should be fine + sequencer1.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor3, + upgradeTime = upgradeTime, + ) + } + private def connectionConfigStore(participant: LocalParticipantReference) = participant.underlying.value.sync.synchronizerConnectionConfigStore private def checkUpgradedSequencerConfig( participant: LocalParticipantReference, expectedSequencerPorts: (SequencerId, Int)* - )(implicit env: TestConsoleEnvironment) = { + )(implicit env: TestConsoleEnvironment): Assertion = { import env.* val portMap = expectedSequencerPorts.groupBy(_._1).view.mapValues(_.map(_._2)).toMap eventually() { @@ -231,7 +294,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest configStore.get(daName, KnownPhysicalSynchronizerId(daId)).value currentConfig.status shouldBe SynchronizerConnectionConfigStore.Active val successorConfig = - configStore.get(daName, KnownPhysicalSynchronizerId(successorSynchronizerId)).value + configStore.get(daName, KnownPhysicalSynchronizerId(latestSuccessorPSId.get().value)).value successorConfig.status shouldBe SynchronizerConnectionConfigStore.UpgradingTarget val currentSequencers = currentConfig.config.sequencerConnections.aliasToConnection.map { @@ -255,5 +318,5 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest class LogicalSynchronizerUpgradeTopologyIntegrationTestPostgres extends LogicalSynchronizerUpgradeTopologyIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualParticipantSetupIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualParticipantSetupIntegrationTest.scala index c77bd8f15d..2f1db096a7 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualParticipantSetupIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualParticipantSetupIntegrationTest.scala @@ -7,11 +7,7 @@ import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameter import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{DbConfig, StorageConfig} import com.digitalasset.canton.console.InstanceReference -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseH2, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -78,15 +74,15 @@ trait ManualParticipantSetupIntegrationTest } class ManualParticipantSetupIntegrationTestInMemory extends ManualParticipantSetupIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[StorageConfig.Memory](loggerFactory)) } class ManualParticipantSetupIntegrationTestPostgres extends ManualParticipantSetupIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class ManualParticipantSetupIntegrationTestH2 extends ManualParticipantSetupIntegrationTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualSynchronizerNodesInitIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualSynchronizerNodesInitIntegrationTest.scala new file mode 100644 index 0000000000..b858f5e0f8 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ManualSynchronizerNodesInitIntegrationTest.scala @@ -0,0 +1,116 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.topology + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.LocalInstanceReference +import com.digitalasset.canton.crypto.SigningKeyUsage +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings +import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} + +/** Test to fully manually initialize synchronizer nodes with identity and topology keys. */ +trait ManualSynchronizerNodesInitIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2S1M1_Manual + .addConfigTransform(ConfigTransforms.disableAutoInit(Set("sequencer1", "mediator1"))) + + private def nodeInit(node: LocalInstanceReference): Unit = { + // create namespace key for the node + val namespaceKey = node.keys.secret + .generate_signing_key( + s"${node.name}-${SigningKeyUsage.Namespace.identifier}", + usage = SigningKeyUsage.NamespaceOnly, + ) + + node.health.wait_for_ready_for_id() + + // initialize the node id + node.topology.init_id_from_uid( + UniqueIdentifier.tryCreate(node.name, namespaceKey.fingerprint) + ) + + node.health.wait_for_ready_for_node_topology() + + node.topology.namespace_delegations.propose_delegation( + Namespace(namespaceKey.fingerprint), + namespaceKey, + CanSignAllMappings, + ) + + // every node needs to create a signing key + val protocolSigningKey = node.keys.secret + .generate_signing_key( + s"${node.name}-${SigningKeyUsage.Protocol.identifier}", + usage = SigningKeyUsage.ProtocolOnly, + ) + + // create a sequencer authentication signing key for the mediator + val sequencerAuthKey = node.keys.secret + .generate_signing_key( + s"${node.name}-${SigningKeyUsage.SequencerAuthentication.identifier}", + usage = SigningKeyUsage.SequencerAuthenticationOnly, + ) + + val keys = NonEmpty(Seq, protocolSigningKey, sequencerAuthKey) + + node.topology.owner_to_key_mappings.propose( + member = node.id.member, + keys = keys, + signedBy = (namespaceKey +: keys).map(_.fingerprint), + ) + + node.health.wait_for_ready_for_initialization() + + } + + "manually initialize the mediator node" in { implicit env => + import env.* + + mediator1.start() + nodeInit(mediator1) + } + + "manually initialize the sequencer node" in { implicit env => + import env.* + + sequencer1.start() + nodeInit(sequencer1) + } + + "bootstrap the synchronizer and run a ping" in { implicit env => + import env.* + + bootstrap.synchronizer( + "synchronizer1", + sequencers = Seq(sequencer1), + mediators = Seq(mediator1), + synchronizerOwners = Seq(sequencer1), + synchronizerThreshold = PositiveInt.one, + staticSynchronizerParameters = EnvironmentDefinition.defaultStaticSynchronizerParameters, + ) + + participant1.start() + participant1.synchronizers.connect_local(sequencer1, "synchronizer1") + participant1.health.ping(participant1) + } + +} + +class ManualSynchronizerNodesInitIntegrationTestPostgres + extends ManualSynchronizerNodesInitIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/MemberAutoInitIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/MemberAutoInitIntegrationTest.scala index 9204b73f0e..d5477117c5 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/MemberAutoInitIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/MemberAutoInitIntegrationTest.scala @@ -19,11 +19,11 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.crypto.SigningKeyUsage import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -277,7 +277,7 @@ trait MemberAutoInitIntegrationTest class MemberAutoInitReferenceIntegrationTestPostgres extends MemberAutoInitIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer( Seq(Set(InstanceName.tryCreate("sequencer1")), Set(InstanceName.tryCreate("sequencer2"))) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ParticipantStateChangeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ParticipantStateChangeIntegrationTest.scala index d0c79f7e56..daa5fa872e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ParticipantStateChangeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/ParticipantStateChangeIntegrationTest.scala @@ -13,10 +13,7 @@ import com.digitalasset.canton.console.{CommandFailure, LocalInstanceReference} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.NoSynchronizerOnWhichAllSubmittersCanSubmit import com.digitalasset.canton.examples.java.iou.Iou -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -324,5 +321,5 @@ trait ParticipantStateChangeIntegrationTest class ParticipantStateChangeIntegrationTestPostgres extends ParticipantStateChangeIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PartyManagementIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PartyManagementIntegrationTest.scala index 1d41f432f6..1b6fdcf422 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PartyManagementIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PartyManagementIntegrationTest.scala @@ -3,20 +3,16 @@ package com.digitalasset.canton.integration.tests.topology -import com.digitalasset.canton.admin.api.client.data.PartyDetails +import com.digitalasset.canton.admin.api.client.data.parties.PartyDetails import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, SharedEnvironment, } -import com.digitalasset.canton.ledger.error.groups.PartyManagementServiceErrors.PartyNotFound import com.digitalasset.canton.topology.TopologyManagerError.MappingAlreadyExists import com.digitalasset.canton.topology.transaction.{ParticipantPermission, TopologyChangeOp} import com.digitalasset.canton.topology.{PartyId, SynchronizerId, UniqueIdentifier} @@ -149,7 +145,7 @@ trait PartyManagementIntegrationTest extends CommunityIntegrationTest with Share partyDetails.copy(annotations = partyDetails.annotations.updated("a", "b")) }, ), - _.shouldBeCantonErrorCode(PartyNotFound), + _.errorMessage should include("The following parties were not found on the Ledger"), ) } @@ -296,5 +292,5 @@ trait PartyManagementIntegrationTest extends CommunityIntegrationTest with Share class PartyManagementIntegrationTestPostgres extends PartyManagementIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PermissionedSynchronizerTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PermissionedSynchronizerTest.scala index c1047be9f6..bad1e4b123 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PermissionedSynchronizerTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/PermissionedSynchronizerTest.scala @@ -10,10 +10,7 @@ import com.digitalasset.canton.admin.api.client.data.OnboardingRestriction import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.ParticipantReference import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceSynchronizerDisabledUs import com.digitalasset.canton.participant.synchronizer.SynchronizerRegistryError.InitialOnboardingError @@ -272,6 +269,6 @@ trait PermissionedSynchronizerTest //} class PermissionedSynchronizerTestPostgres extends PermissionedSynchronizerTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UsePostgres(loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/StressTopologyDispatcherIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/StressTopologyDispatcherIntegrationTest.scala index f8bf8718b4..36d8352051 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/StressTopologyDispatcherIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/StressTopologyDispatcherIntegrationTest.scala @@ -16,10 +16,7 @@ import com.digitalasset.canton.console.{ } import com.digitalasset.canton.crypto.{PublicKey, SigningKeyUsage, SigningPublicKey} import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.integration.plugins.{ - UseBftSequencer, - UseCommunityReferenceBlockSequencer, -} +import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -238,7 +235,7 @@ trait StressTopologyDispatcherIntegrationTest class StressTopologyDispatcherReferenceIntegrationTestPostgres extends StressTopologyDispatcherIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class StressTopologyDispatcherBftOrderingIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TimeAdvancingTopologySubscriberIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TimeAdvancingTopologySubscriberIntegrationTest.scala new file mode 100644 index 0000000000..d99a8ebd7f --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TimeAdvancingTopologySubscriberIntegrationTest.scala @@ -0,0 +1,174 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.topology + +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.{ + DbConfig, + SynchronizerTimeTrackerConfig, + TestSequencerClientFor, +} +import com.digitalasset.canton.console.{ParticipantReference, SequencerReference} +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.integration.plugins.{ + UsePostgres, + UseProgrammableSequencer, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig +import com.digitalasset.canton.sequencing.client.DelayedSequencerClient +import com.digitalasset.canton.sequencing.client.DelayedSequencerClient.{ + DelaySequencerClient, + SequencedEventDelayPolicy, +} +import com.digitalasset.canton.sequencing.protocol.{ + AllMembersOfSynchronizer, + ClosedEnvelope, + Deliver, + SequencedEvent, + TimeProof, +} +import com.digitalasset.canton.sequencing.{SequencedSerializedEvent, SequencerConnections} +import com.digitalasset.canton.synchronizer.sequencer.time.TimeAdvancingTopologySubscriber.TimeAdvanceBroadcastMessageIdPrefix +import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, SendDecision} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import monocle.macros.syntax.lens.* + +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.concurrent.Promise + +trait TimeAdvancingTopologySubscriberIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with HasProgrammableSequencer { + + private lazy val observationLatency: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(10) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S2M1 + .updateTestingConfig( + _.focus(_.testSequencerClientFor).replace( + Set( + TestSequencerClientFor(this.getClass.getSimpleName, "participant1", "synchronizer1") + ) + ) + ) + .addConfigTransforms( + ConfigTransforms.updateAllSequencerConfigs_( + _.focus(_.timeTracker.observationLatency).replace(observationLatency.toConfig) + ), + ConfigTransforms.updateAllMediatorConfigs_( + _.focus(_.timeTracker.observationLatency).replace(observationLatency.toConfig) + ), + ) + // Do not use a static time because this test requires a non-zero topology change delay + .withSetup { env => + import env.* + + def connect( + participant: ParticipantReference, + sequencer: SequencerReference, + ): Unit = { + val daSequencerConnection = + SequencerConnections.single(sequencer.sequencerConnection.withAlias(daName.toString)) + participant.synchronizers.connect_by_config( + SynchronizerConnectionConfig( + synchronizerAlias = daName, + sequencerConnections = daSequencerConnection, + timeTracker = + SynchronizerTimeTrackerConfig(observationLatency = observationLatency.toConfig), + ) + ) + } + + connect(participant1, sequencer1) + connect(participant2, sequencer2) + } + + "TimeAdvancingTopologySubscriber" should { + "prevent time proof requests after topology transactions" in { implicit env => + import env.* + + val timeProofRequestCounter = new AtomicInteger(0) + val timeAdvancingCounter = new AtomicInteger(0) + val timeAdvancingDelay = Promise[Unit]() + + val progSeqs = + Seq(sequencer1, sequencer2).map(sequencer => getProgrammableSequencer(sequencer.name)) + + progSeqs.foreach(_.setPolicy_("count time proof requests and delay time advancing requests") { + submissionRequest => + if (TimeProof.isTimeProofSubmission(submissionRequest)) { + timeProofRequestCounter.getAndIncrement().discard + SendDecision.Process + } else if ( + submissionRequest.messageId.unwrap.startsWith(TimeAdvanceBroadcastMessageIdPrefix) + ) { + // Make sure that we actually have both sequencers submit time advancements. + // Without holding them back, there is the chance that one sequencer is slow and observes the other's + // advancement, so it could then decide to not attempt to send anything at all. + val count = timeAdvancingCounter.incrementAndGet() + if (count >= 2) timeAdvancingDelay.trySuccess(()).discard + SendDecision.HoldBack(timeAdvancingDelay.future) + } else { + SendDecision.Process + } + }) + + // Look at all messages received by participant 1 and count those that are addressed to AllMembersOfSynchronizer + val broadcastsObservedByP1 = new AtomicReference[Seq[SequencedSerializedEvent]](Vector.empty) + val p1SequencerClientInterceptor = DelayedSequencerClient + .delayedSequencerClient( + this.getClass.getSimpleName, + daId, + participant1.id.uid.toString, + ) + .value + p1SequencerClientInterceptor.setDelayPolicy(new SequencedEventDelayPolicy { + private def isBroadcastEvent(event: SequencedEvent[ClosedEnvelope]): Boolean = event match { + case deliver: Deliver[ClosedEnvelope] => + deliver.envelopes.exists(_.recipients.allRecipients.contains(AllMembersOfSynchronizer)) + case _ => false + } + + override def apply(event: SequencedSerializedEvent): DelaySequencerClient = { + if (isBroadcastEvent(event.underlying.value.content)) + broadcastsObservedByP1.getAndUpdate(_ :+ event).discard + DelayedSequencerClient.Immediate + } + }) + + // Send a topology transaction + val start = System.nanoTime() + participant1.parties.enable("test-party") + val end = System.nanoTime() + + // Let's wait until the observation latency really has elapsed to make sure that we'll catch late time proof requests. + val wait = Math.max(observationLatency.duration.toMillis - (end - start) / 1000000, 0) + Threading.sleep(wait + 100) + + timeProofRequestCounter.get() shouldBe 0 + + val broadcasts = broadcastsObservedByP1.get().map(_.underlying.value.content) + // We expect two broadcasts: One for the topology transaction and one for the aggregated notification message + broadcasts should have size 2 + + progSeqs.foreach(_.resetPolicy()) + } + } +} + +class TimeAdvancingTopologySubscriberReferenceIntegrationTestPostgres + extends TimeAdvancingTopologySubscriberIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala index 669eea03f4..239d13d0bb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.crypto.SigningKeyUsage.{Namespace, Protocol} import com.digitalasset.canton.crypto.{EncryptionPublicKey, SigningKeyUsage, SigningPublicKey} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, EnvironmentDefinition, @@ -88,9 +85,12 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv certs.head } + val expectedFeatureFlags = Seq.empty + val expectedTrustCert1 = SynchronizerTrustCertificate( participant1.id, daId, + featureFlags = expectedFeatureFlags, ) trustCert1.context.serial shouldBe PositiveInt.one @@ -348,8 +348,8 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv "vetted_packages.propose" in { implicit env => import env.* val packageIds = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) - .head + .list(store = daId, filterParticipant = participant1.filterString) + .loneElement .item .packages @@ -358,18 +358,22 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv // remove all packages participant1.topology.vetted_packages.propose( participant1.id, - packages = Nil, - force = ForceFlags(ForceFlag.AllowUnvetPackage), + store = daId, + packages = packageIds, operation = TopologyChangeOp.Remove, ) val result = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) + .list(store = daId, filterParticipant = participant1.filterString) result should have size 0 - participant1.topology.vetted_packages.propose(participant1.id, packages = packageIds) + participant1.topology.vetted_packages.propose( + participant1.id, + store = daId, + packages = packageIds, + ) val packageIds3 = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) - .head + .list(store = daId, filterParticipant = participant1.filterString) + .loneElement .item .packages packageIds3 should contain theSameElementsAs packageIds @@ -377,21 +381,25 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv // Set vetted packages to empty but do not remove the mapping participant1.topology.vetted_packages.propose( participant1.id, + store = daId, packages = Seq.empty, - force = ForceFlag.AllowUnvetPackage, ) val packageIds4 = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) - .head + .list(store = daId, filterParticipant = participant1.filterString) + .loneElement .item .packages packageIds4 shouldBe empty // Set it back so the next test is happy - participant1.topology.vetted_packages.propose(participant1.id, packages = packageIds) + participant1.topology.vetted_packages.propose( + participant1.id, + store = daId, + packages = packageIds, + ) val packageIds5 = participant1.topology.vetted_packages - .list(store = TopologyStoreId.Authorized) - .head + .list(store = daId, filterParticipant = participant1.filterString) + .loneElement .item .packages packageIds5 should contain theSameElementsAs packageIds @@ -401,7 +409,7 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv import env.* def getVettedPackages() = participant1.topology.vetted_packages .list( - store = TopologyStoreId.Authorized, + store = daId, filterParticipant = participant1.id.filterString, ) .loneElement @@ -418,10 +426,10 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv // first check that we indeed would add new packages startingPackages.size should be <= adds.size - participant1.dars.upload(CantonTestsPath, vetAllPackages = false) + participant1.dars.upload(CantonTestsPath) // vet some more packages - participant1.topology.vetted_packages.propose_delta(participant1.id, adds = adds) + participant1.topology.vetted_packages.propose_delta(participant1.id, store = daId, adds = adds) val newPackageIdsResult = getVettedPackages() newPackageIdsResult.context.serial shouldBe startingSerial.increment @@ -433,8 +441,9 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv // unvet the starting packages participant1.topology.vetted_packages.propose_delta( participant1.id, + store = daId, removes = startingPackages, - force = ForceFlags(ForceFlag.AllowUnvetPackage), + force = ForceFlags(ForceFlag.AllowUnvettedDependencies), ) val removedPackagesResult = getVettedPackages() @@ -448,9 +457,9 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv .thrownBy( participant1.topology.vetted_packages.propose_delta( participant1.id, + store = daId, adds = VettedPackage.unbounded(startingPackages), removes = startingPackages, - force = ForceFlags(ForceFlag.AllowUnvetPackage), ) ) .getMessage should include("Cannot both add and remove a packageId: ") @@ -489,5 +498,5 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv class TopologyAdministrationTestPostgres extends TopologyAdministrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyManagementIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyManagementIntegrationTest.scala index 4ac89411bf..325684186d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyManagementIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyManagementIntegrationTest.scala @@ -19,8 +19,8 @@ import com.digitalasset.canton.examples.java.cycle as C import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.util.{PartiesAllocator, PartyToParticipantDeclarative} @@ -245,7 +245,7 @@ trait TopologyManagementIntegrationTest ) } - val snapshot = participant1.topology.transactions.export_topology_snapshot(daId) + val snapshot = participant1.topology.transactions.export_topology_snapshotV2(daId) // ignores duplicate transaction loggerFactory.assertLogsSeq( @@ -254,7 +254,7 @@ trait TopologyManagementIntegrationTest )( { participant1.topology.transactions.load(Seq(tx), daId) - participant1.topology.transactions.import_topology_snapshot(snapshot, daId) + participant1.topology.transactions.import_topology_snapshotV2(snapshot, daId) }, { logEntries => logEntries should not be empty @@ -1366,7 +1366,7 @@ trait TopologyManagementIntegrationTest class TopologyManagementReferenceIntegrationTestPostgres extends TopologyManagementIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } class TopologyManagementBftOrderingIntegrationTestPostgres diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyValidationMultiSynchronizerIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyValidationMultiSynchronizerIntegrationTest.scala index d24b688d26..adfb1fe471 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyValidationMultiSynchronizerIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyValidationMultiSynchronizerIntegrationTest.scala @@ -38,7 +38,8 @@ class TopologyValidationMultiSynchronizerIntegrationTest import env.* participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) - participants.all.dars.upload(BaseTest.CantonExamplesPath) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(BaseTest.CantonExamplesPath, synchronizerId = acmeId) // Disable automatic assignment so that we really control it def disableAutomaticAssignment( diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/UserManagementIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/UserManagementIntegrationTest.scala index c4e185d7f0..9a239387eb 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/UserManagementIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/UserManagementIntegrationTest.scala @@ -10,7 +10,7 @@ import com.digitalasset.canton.admin.api.client.data.{ } import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.UseCommunityReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -469,11 +469,11 @@ trait UserManagementIntegrationTest extends CommunityIntegrationTest with Shared } class UserManagementReferenceIntegrationTestDefault extends UserManagementIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } class UserManagementReferenceIntegrationTestPostgres extends UserManagementIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } trait UserManagementNoExtraAdminIntegrationTest @@ -502,5 +502,5 @@ trait UserManagementNoExtraAdminIntegrationTest class UserManagementNoExtraAdminReferenceIntegrationTestPostgres extends UserManagementNoExtraAdminIntegrationTest { - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlConcurrentTopologyChangeTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlConcurrentTopologyChangeTest.scala index 21c472e053..35426880ba 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlConcurrentTopologyChangeTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlConcurrentTopologyChangeTest.scala @@ -13,9 +13,9 @@ import com.digitalasset.canton.config.RequireTypes.{ } import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.{ CommunityIntegrationTest, @@ -63,9 +63,7 @@ trait TrafficControlConcurrentTopologyChangeTest sequencer1.topology.synchronizer_parameters.propose_update( synchronizerId = daId, _.update( - trafficControl = Some(trafficControlParameters), - // So that topology changes become effective as of sequencing time - topologyChangeDelay = config.NonNegativeFiniteDuration.Zero, + trafficControl = Some(trafficControlParameters) ), ) @@ -229,6 +227,6 @@ trait TrafficControlConcurrentTopologyChangeTest class TrafficControlConcurrentTopologyChangeTestPostgres extends TrafficControlConcurrentTopologyChangeTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) } diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlTest.scala index 2a7f9f9f76..7dbf2aa2c6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/traffic/TrafficControlTest.scala @@ -28,11 +28,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseH2, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.TrafficBalanceSupport import com.digitalasset.canton.integration.util.OnboardsNewSequencerNode import com.digitalasset.canton.integration.{ @@ -109,11 +105,11 @@ trait TrafficControlTest new NetworkBootstrapper(S1M1) } .addConfigTransforms( + ConfigTransforms.useStaticTime, ConfigTransforms.updateAllSequencerClientConfigs_( // Force the participant to notice quickly that the synchronizer is down _.focus(_.warnDisconnectDelay).replace(config.NonNegativeFiniteDuration.ofMillis(1)) ), - ConfigTransforms.useStaticTime, ) .addConfigTransform( ConfigTransforms.updateAllSequencerConfigs_( @@ -206,7 +202,7 @@ trait TrafficControlTest val clock = env.environment.simClock.value // Re-fill the base rate to give some credit to the mediator, still won't be enough for the submission request though clock.advance(trafficControlParameters.maxBaseTrafficAccumulationDuration.asJava) - participant1.ledger_api.packages.upload_dar(CantonTestsPath) + participant1.ledger_api.packages.upload_dar(CantonTestsPath, synchronizerId = daId) val alice = participant1.parties.enable( "Alice", @@ -368,13 +364,8 @@ trait TrafficControlTest trafficStateBeforeRestart.trafficStates should contain theSameElementsAs trafficStateAfterRestart.trafficStates - clue("advance clock for sequencer pool connection restart") { + clue("wait for members to reconnect") { eventually() { - val clock = env.environment.simClock.value - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - // 1 second is the default subscription pool retry delay. - clock.advance(Duration.ofSeconds(1)) - participant1.health.status.trySuccess.connectedSynchronizers .get(daId) should contain(SubmissionReady(true)) @@ -839,12 +830,12 @@ trait TrafficControlTest class TrafficControlTestH2 extends TrafficControlTest { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) } class TrafficControlTestPostgres extends TrafficControlTest { registerPlugin(new UsePostgres(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) } // TODO(#16789) Re-enable test once dynamic onboarding is supported for BFT Orderer diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala index 6d82510e0c..f684bf2011 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala @@ -22,7 +22,11 @@ import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.{ } import com.digitalasset.canton.sequencing.client.{SendCallback, SendResult} import com.digitalasset.canton.sequencing.protocol.{Batch, Deliver} -import com.digitalasset.canton.sequencing.{SequencerConnections, SubmissionRequestAmplification} +import com.digitalasset.canton.sequencing.{ + SequencerConnectionPoolDelays, + SequencerConnections, + SubmissionRequestAmplification, +} import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, OwnerToKeyMapping} import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId, UniqueIdentifier} @@ -79,7 +83,7 @@ trait LogicalUpgradeUtils { self: BaseTest => def writeAuthorizeStoreToFile(node: InstanceReference): Unit = { val byteString = node.topology.transactions - .export_topology_snapshot( + .export_topology_snapshotV2( filterMappings = Seq(NamespaceDelegation.code, OwnerToKeyMapping.code), filterNamespace = node.id.uid.namespace.filterString, ) @@ -130,7 +134,7 @@ trait LogicalUpgradeUtils { self: BaseTest => migratedNode.topology.init_id_from_uid(files.uid) migratedNode.health.wait_for_ready_for_node_topology() migratedNode.topology.transactions - .import_topology_snapshot(files.authorizedStore, TopologyStoreId.Authorized) + .import_topology_snapshotV2(files.authorizedStore, TopologyStoreId.Authorized) migratedNode match { case newSequencer: SequencerReference => @@ -149,6 +153,7 @@ trait LogicalUpgradeUtils { self: BaseTest => sequencerTrustThreshold, sequencerLivenessMargin, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, ), ) @@ -206,9 +211,7 @@ private[upgrade] object LogicalUpgradeUtils { genesisStateFile: File, ) { def uid: UniqueIdentifier = - UniqueIdentifier.tryFromProtoPrimitive( - uidFile.contentAsString - ) + UniqueIdentifier.tryFromProtoPrimitive(uidFile.contentAsString) def keys: Seq[(ByteString, Option[String])] = keyFiles.map { file => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala index 2acf7b5004..34782dbdb6 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala @@ -4,7 +4,9 @@ package com.digitalasset.canton.integration.tests.upgrade.lsu import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters -import com.digitalasset.canton.config.RequireTypes +import com.digitalasset.canton.config +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.UsePostgres @@ -13,8 +15,7 @@ import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.Syn import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.version.ProtocolVersion.ProtocolVersionWithStatus -import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionAnnotation} +import com.digitalasset.canton.version.ProtocolVersion import monocle.macros.syntax.lens.* /** This trait provides helpers for the logical synchronizer upgrade tests. The main goal is to @@ -33,7 +34,8 @@ trait LSUBase protected var newSynchronizerNodes: SynchronizerNodes = _ protected def newOldSequencers: Map[String, String] protected def newOldMediators: Map[String, String] - protected val newOldNodesResolution: Map[String, String] = newOldSequencers ++ newOldMediators + protected def newOldNodesResolution: Map[String, String] = + newOldSequencers ++ newOldMediators protected def upgradeTime: CantonTimestamp @@ -50,6 +52,24 @@ trait LSUBase ConfigTransforms.useStaticTime, ) + protected def fixtureWithDefaults(upgradeTime: CantonTimestamp = upgradeTime)(implicit + env: TestConsoleEnvironment + ): Fixture = { + val currentPSId = env.daId + + Fixture( + currentPSId = currentPSId, + upgradeTime = upgradeTime, + oldSynchronizerNodes = oldSynchronizerNodes, + newSynchronizerNodes = newSynchronizerNodes, + newOldNodesResolution = newOldNodesResolution, + oldSynchronizerOwners = env.synchronizerOwners1, + newPV = ProtocolVersion.dev, + // increasing the serial as well, so that the test also works when running with PV=dev + newSerial = currentPSId.serial.increment.toNonNegative, + ) + } + /** Perform synchronizer side of the LSU: * * - Upgrade announcement @@ -58,16 +78,14 @@ trait LSUBase */ protected def performSynchronizerNodesLSU( fixture: Fixture - )(implicit env: TestConsoleEnvironment): Unit = { - import env.* - - synchronizerOwners1.foreach( - _.topology.synchronizer_upgrade.announcement.propose(fixture.newPSId, upgradeTime) + ): Unit = { + fixture.oldSynchronizerOwners.foreach( + _.topology.synchronizer_upgrade.announcement.propose(fixture.newPSId, fixture.upgradeTime) ) migrateSynchronizerNodes(fixture) - oldSynchronizerNodes.sequencers.zip(newSynchronizerNodes.sequencers).foreach { + fixture.oldSynchronizerNodes.sequencers.zip(fixture.newSynchronizerNodes.sequencers).foreach { case (oldSequencer, newSequencer) => oldSequencer.topology.synchronizer_upgrade.sequencer_successors.propose_successor( sequencerId = oldSequencer.id, @@ -85,34 +103,42 @@ trait LSUBase ): Unit = { exportNodesData( SynchronizerNodes( - sequencers = oldSynchronizerNodes.sequencers, - mediators = oldSynchronizerNodes.mediators, + sequencers = fixture.oldSynchronizerNodes.sequencers, + mediators = fixture.oldSynchronizerNodes.mediators, ) ) // Migrate nodes preserving their data (and IDs) - newSynchronizerNodes.all.foreach { newNode => + fixture.newSynchronizerNodes.all.foreach { newNode => migrateNode( migratedNode = newNode, newStaticSynchronizerParameters = fixture.newStaticSynchronizerParameters, synchronizerId = fixture.currentPSId, - newSequencers = newSynchronizerNodes.sequencers, + newSequencers = fixture.newSynchronizerNodes.sequencers, exportDirectory = baseExportDirectory, - sourceNodeNames = newOldNodesResolution, + sourceNodeNames = fixture.newOldNodesResolution, ) } } } private[lsu] object LSUBase { - final case class Fixture(currentPSId: PhysicalSynchronizerId, upgradeTime: CantonTimestamp) { - val newPV: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] = ProtocolVersion.dev - - // increasing the serial as well, so that the test also works when running with PV=dev - val newSerial: RequireTypes.NonNegativeNumeric[Int] = currentPSId.serial.increment.toNonNegative - + final case class Fixture( + currentPSId: PhysicalSynchronizerId, + upgradeTime: CantonTimestamp, + oldSynchronizerNodes: SynchronizerNodes, + newSynchronizerNodes: SynchronizerNodes, + newOldNodesResolution: Map[String, String], + oldSynchronizerOwners: Set[InstanceReference], + newPV: ProtocolVersion, + newSerial: NonNegativeInt, + ) { val newStaticSynchronizerParameters: StaticSynchronizerParameters = - StaticSynchronizerParameters.defaultsWithoutKMS(newPV, newSerial) + StaticSynchronizerParameters.defaultsWithoutKMS( + newPV, + newSerial, + topologyChangeDelay = config.NonNegativeFiniteDuration.Zero, + ) val newPSId: PhysicalSynchronizerId = PhysicalSynchronizerId(currentPSId.logical, newStaticSynchronizerParameters.toInternal) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala new file mode 100644 index 0000000000..a5078f66c0 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala @@ -0,0 +1,296 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrade.lsu + +import com.digitalasset.canton.admin.api.client.data.DynamicSynchronizerParameters as ConsoleDynamicSynchronizerParameters +import com.digitalasset.canton.config +import com.digitalasset.canton.config.{DbConfig, SynchronizerTimeTrackerConfig} +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.integration.* +import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 +import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{ + UseBftSequencer, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes +import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture +import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig +import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.topology.{PartyId, TopologyManagerError} +import com.digitalasset.canton.version.ProtocolVersion +import monocle.macros.syntax.lens.* + +import scala.annotation.nowarn + +/** The goal is to ensure that an LSU can be cancelled and that another LSU can be done + * subsequently. + * + * Test setup: + * + * - LSU is announced + * - Before the upgrade time, it is cancelled + * - Another LSU is announced + * - Second LSU is performed + */ +@nowarn("msg=dead code") +abstract class LSUCancellationIntegrationTest extends LSUBase { + + override protected def testName: String = "logical-synchronizer-upgrade" + + registerPlugin(new UsePostgres(loggerFactory)) + + override protected lazy val newOldSequencers: Map[String, String] = + throw new IllegalAccessException("Use fixtures instead") + override protected lazy val newOldMediators: Map[String, String] = + throw new IllegalAccessException("Use fixtures instead") + + private lazy val upgradeTime1: CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(30) + private lazy val upgradeTime2: CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(90) + + private var fixture1: Fixture = _ + private var fixture2: Fixture = _ + + private var bob: PartyId = _ + + private var dynamicSynchronizerParameters: ConsoleDynamicSynchronizerParameters = _ + + override protected lazy val upgradeTime: CantonTimestamp = throw new IllegalAccessException( + "Use upgradeTime1 and upgradeTime2 instead" + ) + + override protected def configTransforms: List[ConfigTransform] = { + val lowerBound1 = List("sequencer2") // successor of sequencer1 for the first upgrade + .map(sequencerName => + ConfigTransforms + .updateSequencerConfig(sequencerName)( + _.focus(_.parameters.sequencingTimeLowerBoundExclusive).replace(Some(upgradeTime1)) + ) + ) + + val lowerBound2 = List("sequencer3") // successor of sequencer1 for the second upgrade + .map(sequencerName => + ConfigTransforms + .updateSequencerConfig(sequencerName)( + _.focus(_.parameters.sequencingTimeLowerBoundExclusive).replace(Some(upgradeTime2)) + ) + ) + + val allNewNodes = Set("sequencer2", "sequencer3", "mediator2", "mediator3") + + lowerBound1 ++ lowerBound2 ++ List( + ConfigTransforms.disableAutoInit(allNewNodes), + ConfigTransforms.useStaticTime, + ) + } + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition + .buildBaseEnvironmentDefinition( + numParticipants = 1, + numSequencers = 3, + numMediators = 3, + ) + /* + The test is made slightly more robust by controlling explicitly which nodes are running. + This allows to ensure that correct synchronizer nodes are used for each LSU. + */ + .withManualStart + .withNetworkBootstrap { implicit env => + new NetworkBootstrapper(S1M1) + } + .addConfigTransforms(configTransforms*) + .withSetup { implicit env => + import env.* + + val daSequencerConnection = + SequencerConnections.single(sequencer1.sequencerConnection.withAlias(daName.toString)) + + participants.local.start() + + participants.all.synchronizers.connect( + SynchronizerConnectionConfig( + synchronizerAlias = daName, + sequencerConnections = daSequencerConnection, + timeTracker = SynchronizerTimeTrackerConfig(observationLatency = + config.NonNegativeFiniteDuration.Zero + ), + ) + ) + + participants.all.dars.upload(CantonExamplesPath) + participant1.health.ping(participant1) + + synchronizerOwners1.foreach( + _.topology.synchronizer_parameters.propose_update( + daId, + _.copy(reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1)), + ) + ) + } + + /** Check whether an LSU is ongoing + * @param successor + * Defined iff an upgrade is ongoing + */ + private def checkLSUOngoing( + successor: Option[SynchronizerSuccessor] + )(implicit env: TestConsoleEnvironment) = { + import env.* + + val connectedSynchronizer = participant1.underlying.value.sync + .connectedSynchronizerForAlias(daName) + .value + + connectedSynchronizer.ephemeral.recordOrderPublisher.getSynchronizerSuccessor shouldBe successor + + connectedSynchronizer.synchronizerCrypto.currentSnapshotApproximation.ipsSnapshot + .synchronizerUpgradeOngoing() + .futureValueUS + .map { case (successor, _) => successor } shouldBe successor + } + + "Logical synchronizer upgrade should be cancellable and re-announced" should { + "initial setup" in { implicit env => + import env.* + + fixture1 = Fixture( + currentPSId = daId, + upgradeTime = upgradeTime1, + oldSynchronizerNodes = SynchronizerNodes(Seq(sequencer1), Seq(mediator1)), + newSynchronizerNodes = SynchronizerNodes(Seq(sequencer2), Seq(mediator2)), + newOldNodesResolution = Map("sequencer2" -> "sequencer1", "mediator2" -> "mediator1"), + oldSynchronizerOwners = synchronizerOwners1, + newPV = ProtocolVersion.dev, + newSerial = daId.serial.increment.toNonNegative, + ) + + fixture2 = Fixture( + currentPSId = daId, + upgradeTime = upgradeTime2, + oldSynchronizerNodes = SynchronizerNodes(Seq(sequencer1), Seq(mediator1)), + newSynchronizerNodes = SynchronizerNodes(Seq(sequencer3), Seq(mediator3)), + newOldNodesResolution = Map("sequencer3" -> "sequencer1", "mediator3" -> "mediator1"), + oldSynchronizerOwners = synchronizerOwners1, + newPV = ProtocolVersion.dev, + newSerial = fixture1.newSerial.increment.toNonNegative, + ) + + dynamicSynchronizerParameters = participant1.topology.synchronizer_parameters.latest(daId) + + // Some assertions below don't make sense if the value is too low + dynamicSynchronizerParameters.decisionTimeout.asJava.getSeconds should be > 10L + + daId should not be fixture1.newPSId + fixture1.newPSId should not be fixture2.newPSId + + val alice = participant1.parties.enable("Alice") + val bank = participant1.parties.enable("Bank") + IouSyntax.createIou(participant1)(bank, alice).discard + } + + "first LSU and cancellation" in { implicit env => + import env.* + + val clock = environment.simClock.value + + sequencer2.start() + mediator2.start() + + performSynchronizerNodesLSU(fixture1) + + eventually()(checkLSUOngoing(Some(fixture1.synchronizerSuccessor))) + + // Fails because the upgrade is ongoing + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.parties.enable("Bob"), + _.shouldBeCantonErrorCode(TopologyManagerError.OngoingSynchronizerUpgrade), + ) + + clock.advanceTo(upgradeTime1.minusSeconds(5)) + + fixture1.oldSynchronizerOwners.foreach( + _.topology.synchronizer_upgrade.announcement.revoke(fixture1.newPSId, fixture1.upgradeTime) + ) + + eventually()(checkLSUOngoing(None)) + + sequencer2.stop() + mediator2.stop() + + clock.advanceTo(upgradeTime1.immediateSuccessor) + + // Time offset on the old sequencer is not applied + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be < upgradeTime1.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) + + // Call should fail if no upgrade is ongoing + eventually() { + participant1.underlying.value.sync + .upgradeSynchronizerTo(daId, fixture1.synchronizerSuccessor) + .value + .futureValueUS + .left + .value shouldBe "No synchronizer upgrade ongoing" + } + + bob = participant1.parties.enable("Bob") + } + + "second LSU" in { implicit env => + import env.* + + val clock = environment.simClock.value + + sequencer3.start() + mediator3.start() + + performSynchronizerNodesLSU(fixture2) + + clock.advanceTo(upgradeTime2.immediateSuccessor) + + eventually() { + participants.all.forall(_.synchronizers.is_connected(fixture2.newPSId)) shouldBe true + } + + // Time offset is applied on the old sequencer + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be >= upgradeTime2.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) + + // Bob is known + participant1.topology.party_to_participant_mappings + .list(fixture2.newPSId, filterParty = bob.filterString) + .loneElement + } + } +} + +final class LSUCancellationReferenceIntegrationTest extends LSUCancellationIntegrationTest { + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")), + ) + ) +} + +final class LSUCancellationBftOrderingIntegrationTest extends LSUCancellationIntegrationTest { + registerPlugin( + new UseBftSequencer( + loggerFactory, + MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")), + ) + ) +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala index a0e621c71d..4d6781330d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala @@ -10,15 +10,14 @@ import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections @@ -78,7 +77,7 @@ abstract class LSUEndToEndIntegrationTest extends LSUBase { "work end-to-end" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant2) @@ -92,9 +91,6 @@ abstract class LSUEndToEndIntegrationTest extends LSUBase { eventually() { participants.all.forall(_.synchronizers.is_connected(fixture.newPSId)) shouldBe true - - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - environment.simClock.value.advance(Duration.ofSeconds(1)) } oldSynchronizerNodes.all.stop() @@ -137,7 +133,7 @@ abstract class LSUEndToEndIntegrationTest extends LSUBase { final class LSUEndToEndReferenceIntegrationTest extends LSUEndToEndIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala index c7cc733328..bb168aeda1 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala @@ -11,18 +11,15 @@ import com.digitalasset.canton.examples.java.iou.Iou import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections +import java.util.Optional import scala.jdk.CollectionConverters.* abstract class LSUExternalPartiesIntegrationTest extends LSUBase { @@ -74,7 +71,7 @@ abstract class LSUExternalPartiesIntegrationTest extends LSUBase { "work with external parties" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.external.enable("AliceE") val bob = participant2.parties.enable("Bob") @@ -107,10 +104,10 @@ abstract class LSUExternalPartiesIntegrationTest extends LSUBase { val iouCreated = txIouAlice.getEvents.asScalaProtoCreatedContracts.loneElement val disclosedIou = new DisclosedContract( - Iou.TEMPLATE_ID_WITH_PACKAGE_ID, - iou.id.contractId, iouCreated.createdEventBlob, daId.logical.toProtoPrimitive, + Optional.of(Iou.TEMPLATE_ID_WITH_PACKAGE_ID), + Optional.of(iou.id.contractId), ) participant3.ledger_api.state.acs.of_all() shouldBe empty @@ -130,7 +127,7 @@ abstract class LSUExternalPartiesIntegrationTest extends LSUBase { final class LSUExternalPartiesReferenceIntegrationTest extends LSUExternalPartiesIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala index 2501d3130a..bbf9b32288 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala @@ -4,8 +4,9 @@ package com.digitalasset.canton.integration.tests.upgrade.lsu import com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion import com.digitalasset.canton.config.{ + CommitmentSendDelay, DbConfig, PositiveDurationSeconds, SynchronizerTimeTrackerConfig, @@ -15,15 +16,14 @@ import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections import monocle.macros.syntax.lens.* @@ -58,7 +58,14 @@ abstract class LSUPruningIntegrationTest extends LSUBase { } .addConfigTransforms(configTransforms*) .updateTestingConfig( - _.focus(_.maxCommitmentSendDelayMillis).replace(Some(NonNegativeInt.zero)) + _.focus(_.commitmentSendDelay).replace( + Some( + CommitmentSendDelay( + Some(NonNegativeProportion.zero), + Some(NonNegativeProportion.zero), + ) + ) + ) ) .addConfigTransforms( ConfigTransforms.updateMaxDeduplicationDurations(10.minutes.toJava) @@ -94,7 +101,7 @@ abstract class LSUPruningIntegrationTest extends LSUBase { "work correctly" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant2) @@ -110,9 +117,6 @@ abstract class LSUPruningIntegrationTest extends LSUBase { eventually() { participants.all.forall(_.synchronizers.is_connected(fixture.newPSId)) shouldBe true - - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - environment.simClock.value.advance(Duration.ofSeconds(1)) } oldSynchronizerNodes.all.stop() @@ -156,7 +160,7 @@ abstract class LSUPruningIntegrationTest extends LSUBase { final class LSUPruningReferenceIntegrationTest extends LSUPruningIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala index c95306ee1b..2c4cdcd727 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala @@ -9,14 +9,10 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1_S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.sequencing.SequencerConnections @@ -65,7 +61,8 @@ abstract class LSUReassignmentsIntegrationTest extends LSUBase { ) participants.all.synchronizers.connect_local(sequencer2, acmeName) - participants.all.dars.upload(CantonExamplesPath) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = daId) + participants.all.dars.upload(CantonExamplesPath, synchronizerId = acmeId) synchronizerOwners1.foreach( _.topology.synchronizer_parameters.propose_update( @@ -82,7 +79,7 @@ abstract class LSUReassignmentsIntegrationTest extends LSUBase { "work with reassignments" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.enable("Alice", synchronizer = Some(daName)) participant1.parties.enable("Alice", synchronizer = Some(acmeName)) @@ -167,7 +164,7 @@ abstract class LSUReassignmentsIntegrationTest extends LSUBase { final class LSUReassignmentsReferenceIntegrationTest extends LSUReassignmentsIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala index a314a1bf83..455e52608e 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala @@ -9,14 +9,13 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import monocle.macros.syntax.lens.* /* @@ -39,11 +38,6 @@ abstract class LSURestartIntegrationTest extends LSUBase { new NetworkBootstrapper(S1M1) } .addConfigTransforms(configTransforms*) - // Set a connection pool timeout larger than the upgrade time, otherwise it may trigger when we advance the simclock - .addConfigTransform( - _.focus(_.parameters.timeouts.processing.sequencerInfo) - .replace(config.NonNegativeDuration.ofSeconds(40)) - ) .withSetup { implicit env => import env.* @@ -80,7 +74,7 @@ abstract class LSURestartIntegrationTest extends LSUBase { "work when participants are restarted" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant1) @@ -105,7 +99,7 @@ abstract class LSURestartIntegrationTest extends LSUBase { final class LSURestartReferenceIntegrationTest extends LSURestartIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala index cdc7d54619..66f6f14559 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala @@ -10,15 +10,14 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig @@ -29,7 +28,6 @@ import com.digitalasset.canton.synchronizer.sequencer.{ ProgrammableSequencerPolicies, SendDecision, } -import monocle.macros.syntax.lens.* import scala.concurrent.Future import scala.jdk.CollectionConverters.* @@ -67,12 +65,6 @@ abstract class LSUTimeoutInFlightIntegrationTest extends LSUBase with HasProgram new NetworkBootstrapper(S1M1) } .addConfigTransforms(configTransforms*) - // Set a connection pool timeout larger than the duration between the initial time of the test - // and the upgrade time, otherwise it may trigger when we advance the simclock - .addConfigTransform( - _.focus(_.parameters.timeouts.processing.sequencerInfo) - .replace(config.NonNegativeDuration.ofSeconds(40)) - ) .withSetup { implicit env => import env.* @@ -113,7 +105,7 @@ abstract class LSUTimeoutInFlightIntegrationTest extends LSUBase with HasProgram "be timed out around LSU" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.enable("alice") val bob = participant2.parties.enable("bob") @@ -267,7 +259,7 @@ abstract class LSUTimeoutInFlightIntegrationTest extends LSUBase with HasProgram final class LSUTimeoutInFlightReferenceIntegrationTest extends LSUTimeoutInFlightIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala index be3406ad3a..77d5b06d51 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala @@ -10,14 +10,13 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer import com.digitalasset.canton.integration.plugins.{ UseBftSequencer, - UseCommunityReferenceBlockSequencer, UsePostgres, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.topology.TopologyManagerError import com.digitalasset.canton.topology.store.TimeQuery import com.digitalasset.canton.topology.transaction.TopologyMapping @@ -89,7 +88,7 @@ abstract class LSUTopologyIntegrationTest extends LSUBase { "work end-to-end" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val newPSId = fixture.newPSId val newStaticSynchronizerParameters = fixture.newStaticSynchronizerParameters @@ -119,7 +118,7 @@ abstract class LSUTopologyIntegrationTest extends LSUBase { final class LSUTopologyReferenceIntegrationTest extends LSUTopologyIntegrationTest { registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2")), ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/MinimumSequencingTimeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/MinimumSequencingTimeIntegrationTest.scala index a0bbfad45f..6887bde61d 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/MinimumSequencingTimeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/MinimumSequencingTimeIntegrationTest.scala @@ -6,8 +6,6 @@ package com.digitalasset.canton.integration.tests.upgrade.lsu import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 -import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper import com.digitalasset.canton.integration.plugins.{UseBftSequencer, UsePostgres} import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils import com.digitalasset.canton.integration.util.EntitySyntax @@ -41,10 +39,8 @@ abstract class MinimumSequencingTimeIntegrationTest private val sequencingTimeLowerBoundExclusive = CantonTimestamp.Epoch.plusSeconds(60) override lazy val environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P1S1M1_Config - .withNetworkBootstrap { implicit env => - new NetworkBootstrapper(S1M1) - } + EnvironmentDefinition.P1_S1M1 + .addConfigTransforms(ConfigTransforms.useStaticTime) .addConfigTransforms( ConfigTransforms .updateAllSequencerConfigs_( @@ -52,7 +48,6 @@ abstract class MinimumSequencingTimeIntegrationTest .replace(Some(sequencingTimeLowerBoundExclusive)) ) ) - .addConfigTransforms(ConfigTransforms.useStaticTime) "Logical synchronizer upgrade" should { "initialize the nodes for the upgraded synchronizer" in { implicit env => diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala index 1e26939e27..006221fdce 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala @@ -4,17 +4,16 @@ package com.digitalasset.canton.integration.tests.upgrade.lsu import cats.syntax.functor.* -import com.digitalasset.canton.admin.api.client.data.ParticipantStatus.SubmissionReady import com.digitalasset.canton.config import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.integration.* -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.SingleSynchronizer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.SingleSynchronizer import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, UsePostgres, UseProgrammableSequencer, + UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} @@ -23,7 +22,6 @@ import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, import monocle.macros.syntax.lens.* import org.slf4j.event.Level -import java.time.Duration import scala.concurrent.Future /* @@ -41,7 +39,7 @@ class UpgradeTimeOldSynchronizerIntegrationTest registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = SingleSynchronizer, ) @@ -66,10 +64,9 @@ class UpgradeTimeOldSynchronizerIntegrationTest participant1.synchronizers.connect_local(sequencer1, daName) - synchronizerOwners1.foreach { owner => - owner.topology.synchronizer_upgrade.announcement - .propose(successorPSId, upgradeTime) - } + synchronizerOwners1.foreach( + _.topology.synchronizer_upgrade.announcement.propose(successorPSId, upgradeTime) + ) eventually() { participant1.topology.synchronizer_upgrade.announcement @@ -103,16 +100,6 @@ class UpgradeTimeOldSynchronizerIntegrationTest ), ) - if (participant1.config.sequencerClient.useNewConnectionPool) - eventually() { - // The sequencer connection pool internal mechanisms to restart connections rely on the clock time advancing. - environment.simClock.value.advance(Duration.ofSeconds(1)) - - participant1.health.status.trySuccess.connectedSynchronizers.get(daId) should contain( - SubmissionReady(true) - ) - } - /** If the participant sends the ping before the resilient subscription detects that the * sequencer is up again, it fails (failed future). On such failed future, the submission is * not removed from the pending submissions. @@ -175,9 +162,13 @@ class UpgradeTimeOldSynchronizerIntegrationTest logger.debug("Ping failed") participant1.testing.fetch_synchronizer_times() - // TODO(#26580): Test also cancelling and updating the upgrade announcement, e.g.: - // - cancel the upgrade announcement and check that the time is not offset (or that offsetting got removed and this was logged) - // - update the upgrade announcement to a later time and check that the time offsetting is updated accordingly + val dynamicSynchronizerParameters = participant1.topology.synchronizer_parameters.latest(daId) + + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be >= upgradeTime.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) val cleanSynchronizerIndex = participant1.underlying.value.sync.stateInspection .getAcsInspection(daId) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/util/CommitmentTestUtil.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/util/CommitmentTestUtil.scala index aa7c175afe..a4722c0bd8 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/util/CommitmentTestUtil.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/util/CommitmentTestUtil.scala @@ -38,9 +38,11 @@ trait CommitmentTestUtil extends BaseTest with SortedReconciliationIntervalsHelp environment.participant1.config.topology.topologyTransactionRegistrationTimeout.asFiniteApproximation.toJava ) - protected def deployOnP1P2AndCheckContract( + protected def deployOnTwoParticipantsAndCheckContract( synchronizerId: SynchronizerId, iouContract: AtomicReference[Iou.Contract], + firstParticipant: LocalParticipantReference, + secondParticipant: LocalParticipantReference, observers: Seq[LocalParticipantReference] = Seq.empty, )(implicit env: TestConsoleEnvironment, @@ -50,9 +52,9 @@ trait CommitmentTestUtil extends BaseTest with SortedReconciliationIntervalsHelp logger.info(s"Deploying the iou contract on both participants") val iou = IouSyntax - .createIou(participant1, Some(synchronizerId))( - participant1.adminParty, - participant2.adminParty, + .createIou(firstParticipant, Some(synchronizerId))( + firstParticipant.adminParty, + secondParticipant.adminParty, observers = observers.toList.map(_.adminParty), ) @@ -60,7 +62,7 @@ trait CommitmentTestUtil extends BaseTest with SortedReconciliationIntervalsHelp logger.info(s"Waiting for the participants to see the contract in their ACS") eventually() { - (Seq(participant1, participant2) ++ observers).foreach(p => + (Seq(firstParticipant, secondParticipant) ++ observers).foreach(p => p.ledger_api.state.acs .of_all() .filter(_.contractId == iou.id.contractId) should not be empty @@ -90,16 +92,43 @@ trait CommitmentTestUtil extends BaseTest with SortedReconciliationIntervalsHelp def deployThreeAndCheck( synchronizerId: SynchronizerId, alreadyDeployedContracts: AtomicReference[Seq[Iou.Contract]], + firstParticipant: LocalParticipantReference, + secondParticipant: LocalParticipantReference, )(implicit env: TestConsoleEnvironment, intervalDuration: IntervalDuration, ): (Seq[Iou.Contract], CommitmentPeriod, AcsCommitment.HashedCommitmentType) = - deployManyAndCheck(synchronizerId, PositiveInt.three, alreadyDeployedContracts) + deployManyAndCheck( + synchronizerId, + PositiveInt.three, + alreadyDeployedContracts, + firstParticipant, + secondParticipant, + ) + + def deployOneAndCheck( + synchronizerId: SynchronizerId, + alreadyDeployedContracts: AtomicReference[Seq[Iou.Contract]], + firstParticipant: LocalParticipantReference, + secondParticipant: LocalParticipantReference, + )(implicit + env: TestConsoleEnvironment, + intervalDuration: IntervalDuration, + ): (Seq[Iou.Contract], CommitmentPeriod, AcsCommitment.HashedCommitmentType) = + deployManyAndCheck( + synchronizerId, + PositiveInt.one, + alreadyDeployedContracts, + firstParticipant, + secondParticipant, + ) def deployManyAndCheck( synchronizerId: SynchronizerId, nContracts: PositiveInt, alreadyDeployedContracts: AtomicReference[Seq[Iou.Contract]], + firstParticipant: LocalParticipantReference, + secondParticipant: LocalParticipantReference, )(implicit env: TestConsoleEnvironment, intervalDuration: IntervalDuration, @@ -112,20 +141,27 @@ trait CommitmentTestUtil extends BaseTest with SortedReconciliationIntervalsHelp simClock.advanceTo(simClock.uniqueTime().immediateSuccessor) val createdCids = - (1 to nContracts.value).map(_ => deployOnP1P2AndCheckContract(synchronizerId, iouContract)) + (1 to nContracts.value).map(_ => + deployOnTwoParticipantsAndCheckContract( + synchronizerId, + iouContract, + firstParticipant, + secondParticipant, + ) + ) val tick1 = tickAfter(simClock.uniqueTime()) simClock.advanceTo(tick1.forgetRefinement.immediateSuccessor) - participant1.testing.fetch_synchronizer_times() + firstParticipant.testing.fetch_synchronizer_times() val p1Computed = eventually() { - val p1Computed = participant1.commitments + val p1Computed = firstParticipant.commitments .computed( daName, tick1.toInstant.minusMillis(1), tick1.toInstant, - Some(participant2), + Some(secondParticipant), ) p1Computed.size shouldBe 1 p1Computed diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/AlphaVersionSupportIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/AlphaVersionSupportIntegrationTest.scala index 7781752ad8..f3573b26dd 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/AlphaVersionSupportIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/AlphaVersionSupportIntegrationTest.scala @@ -14,12 +14,8 @@ import com.digitalasset.canton.console.{ LocalSequencerReference, } import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UseH2, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -29,7 +25,7 @@ import com.digitalasset.canton.integration.{ import com.digitalasset.canton.lifecycle.{FlagCloseable, HasCloseContext} import com.digitalasset.canton.metrics.CommonMockMetrics import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceInconsistentConnectivity -import com.digitalasset.canton.resource.CommunityStorageFactory +import com.digitalasset.canton.resource.StorageSingleFactory import com.digitalasset.canton.time.SimClock import com.digitalasset.canton.topology.store.InitializationStore import com.digitalasset.canton.tracing.TraceContext @@ -142,7 +138,7 @@ sealed trait AlphaVersionSupportIntegrationTest private def checkIsDev( config: StorageConfig )(implicit executionContext: ExecutionContext, traceContext: TraceContext): Future[Boolean] = { - val storageFactory = new CommunityStorageFactory(config) + val storageFactory = new StorageSingleFactory(config) val storage = storageFactory .create( connectionPoolForParticipant = false, @@ -244,7 +240,7 @@ sealed trait AlphaVersionSupportIntegrationTest class AlphaVersionSupportIntegrationTestH2 extends AlphaVersionSupportIntegrationTest { registerPlugin(new UseH2(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.H2]( + new UseReferenceBlockSequencer[DbConfig.H2]( loggerFactory, sequencerGroups = sequencerGroups, ) @@ -254,7 +250,7 @@ class AlphaVersionSupportIntegrationTestH2 extends AlphaVersionSupportIntegratio class AlphaVersionSupportIntegrationTestPostgres extends AlphaVersionSupportIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = sequencerGroups, ) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/FailingMinimumHandshakeIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/FailingMinimumHandshakeIntegrationTest.scala index 004537ea16..49f632fe14 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/FailingMinimumHandshakeIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/FailingMinimumHandshakeIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.version import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.console.CommandFailure -import com.digitalasset.canton.integration.plugins.{UseCommunityReferenceBlockSequencer, UseH2} +import com.digitalasset.canton.integration.plugins.{UseH2, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.{ CommunityIntegrationTest, ConfigTransforms, @@ -21,7 +21,7 @@ class FailingMinimumHandshakeIntegrationTestH2 extends CommunityIntegrationTest with SharedEnvironment { registerPlugin(new UseH2(loggerFactory)) - registerPlugin(new UseCommunityReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) private lazy val participantVersion = Some( ParticipantProtocolVersion(TestProtocolVersions.UnreleasedValidPV) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/MultipleProtocolVersionReassignmentIntegrationTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/MultipleProtocolVersionReassignmentIntegrationTest.scala index f509ba9a26..fde60758ce 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/MultipleProtocolVersionReassignmentIntegrationTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/version/MultipleProtocolVersionReassignmentIntegrationTest.scala @@ -7,11 +7,8 @@ import com.daml.ledger.api.v2.commands.Command import com.digitalasset.canton.SynchronizerAlias import com.digitalasset.canton.config.DbConfig import com.digitalasset.canton.examples.java.cycle as M -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.MultiSynchronizer -import com.digitalasset.canton.integration.plugins.{ - UseCommunityReferenceBlockSequencer, - UsePostgres, -} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.util.{ AcsInspection, HasCommandRunnersHelpers, @@ -60,11 +57,13 @@ sealed trait MultipleProtocolVersionReassignmentIntegrationTest .withSetup { implicit env => import env.* - participant1.dars.upload(CantonExamplesPath) participant1.synchronizers.connect_local(sequencer1, alias = daName) participant1.synchronizers.connect_local(sequencer2, alias = acmeName) participant1.synchronizers.connect_local(sequencer3, alias = repairSynchronizerName) participant1.synchronizers.connect_local(sequencer4, alias = devSynchronizerName) + Seq(daId, acmeId, repairSynchronizerId, devSynchronizerId).foreach(psid => + participant1.dars.upload(CantonExamplesPath, synchronizerId = psid) + ) } private def createCycleCommand( @@ -174,7 +173,7 @@ class MultipleProtocolVersionReassignmentIntegrationTestPostgres extends MultipleProtocolVersionReassignmentIntegrationTest { registerPlugin(new UsePostgres(loggerFactory)) registerPlugin( - new UseCommunityReferenceBlockSequencer[DbConfig.Postgres]( + new UseReferenceBlockSequencer[DbConfig.Postgres]( loggerFactory, sequencerGroups = MultiSynchronizer.tryCreate( Set("sequencer1"), diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/OnboardsNewSequencerNode.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/OnboardsNewSequencerNode.scala index d5a00777bd..70ab5f983b 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/OnboardsNewSequencerNode.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/OnboardsNewSequencerNode.scala @@ -3,10 +3,13 @@ package com.digitalasset.canton.integration.util +import com.digitalasset.canton.config import com.digitalasset.canton.console.{InstanceReference, SequencerReference} import com.digitalasset.canton.integration.TestConsoleEnvironment import com.digitalasset.canton.topology.PhysicalSynchronizerId +import scala.concurrent.duration.DurationInt + trait OnboardsNewSequencerNode { protected val isBftSequencer: Boolean = false @@ -33,6 +36,8 @@ trait OnboardsNewSequencerNode { newSequencerReference, existingSequencerReference, synchronizerOwners, + // Avoid issues if things are slow + customCommandTimeout = Some(config.NonNegativeDuration.tryFromDuration(2.minutes)), isBftSequencer = true, ) // user-manual-entry-end: DynamicallyOnboardBftSequencer diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/TestSubmissionService.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/TestSubmissionService.scala index 4451abaac1..97f3b6d046 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/TestSubmissionService.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/integration/util/TestSubmissionService.scala @@ -34,7 +34,6 @@ import com.digitalasset.canton.integration.util.TestSubmissionService.{ } import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, - ValidateDisclosedContracts, ValidateUpgradingPackageResolutions, } import com.digitalasset.canton.ledger.participant.state.* @@ -287,7 +286,6 @@ class TestSubmissionService( actAs = commands.actAs, apiCommands = commands.apiCommands(), readAs = commands.readAs, - disclosures = commands.disclosures, submissionSeed = commands.submissionSeed, packagePreferenceOverride = commands.packagePreferenceOverride, packageMapOverride = commands.packageMapOverride, @@ -297,7 +295,6 @@ class TestSubmissionService( actAs: Seq[PartyId], apiCommands: ApiCommands, readAs: Seq[PartyId], - disclosures: ImmArray[FatContractInstance] = ImmArray.Empty, submissionSeed: crypto.Hash = WeakRandom.nextSeed(), @unused packageMapOverride: Option[ Map[Ref.PackageId, (Ref.PackageName, Ref.PackageVersion)] @@ -313,7 +310,6 @@ class TestSubmissionService( submitters = actAs.map(_.toLf).toSet, readAs = readAs.map(_.toLf).toSet, cmds = apiCommands, - disclosures = disclosures, participantId = participantId.toLf, prefetchKeys = Seq.empty, submissionSeed = submissionSeed, @@ -555,7 +551,6 @@ object TestSubmissionService { submissionId: String = UUID.randomUUID().toString, deduplicationPeriodO: Option[DeduplicationPeriod] = None, ledgerTime: Time.Timestamp = Time.Timestamp.now(), - disclosures: ImmArray[FatContractInstance] = ImmArray.Empty, submissionSeed: crypto.Hash = WeakRandom.nextSeed(), packageMapOverride: Option[Map[Ref.PackageId, (Ref.PackageName, Ref.PackageVersion)]] = None, packagePreferenceOverride: Option[Set[Ref.PackageId]] = None, @@ -565,8 +560,7 @@ object TestSubmissionService { def apiCommands()(implicit errorLogger: ErrorLoggingContext): ApiCommands = { val apiCommands = new CommandsValidator( - validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty, - validateDisclosedContracts = ValidateDisclosedContracts.WithContractIdVerificationDisabled, + validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty ) .validateInnerCommands(commands) .valueOr(throw _) diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/participant/util/DAMLeTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/participant/util/DAMLeTest.scala index 2895e2a64f..6d25808181 100644 --- a/canton/community/app/src/test/scala/com/digitalasset/canton/participant/util/DAMLeTest.scala +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/participant/util/DAMLeTest.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.participant.util.DAMLe.PackageResolver import com.digitalasset.canton.platform.apiserver.configuration.EngineLoggingConfig import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticators.ContractAuthenticatorFn import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.util.{LegacyContractHash, TestEngine} +import com.digitalasset.canton.util.TestEngine import com.digitalasset.canton.{ BaseTest, FailOnShutdown, @@ -177,7 +177,7 @@ class DAMLeTest val (exerciseSeed, replayExercise, submitters) = replayExecuteCommand(contract) val inst = contract.inst - val contractHash = LegacyContractHash.fatContractHash(inst).value + val contractHash = testEngine.hashAndConsume(inst.toCreateNode) reinterpret( submitters = submitters, @@ -186,6 +186,7 @@ class DAMLeTest contracts = new ExtendedContractLookup(Map(contract.contractId -> contract), Map.empty), contractAuthenticator = { case (`inst`, `contractHash`) => Either.unit + case (`inst`, h) => fail(s"Hash mismatch: $h : $contractHash") case other => fail(s"Unexpected: $other") }, ).value @@ -202,7 +203,7 @@ class DAMLeTest val (exerciseSeed, replayExercise, submitters) = replayExecuteCommand(contract) val inst = contract.inst - val contractHash = LegacyContractHash.fatContractHash(inst).value + val contractHash = testEngine.hashAndConsume(inst.toCreateNode) reinterpret( submitters = submitters, diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtils.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtils.scala new file mode 100644 index 0000000000..4698b46435 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtils.scala @@ -0,0 +1,184 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import better.files.File +import cats.data.NonEmptyList +import cats.syntax.functorFilter.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.BufferedProcessLogger +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.version.{ + ProtocolVersion, + ProtocolVersionCompatibility, + ReleaseVersion, +} + +import java.nio.file.{Files, Paths} +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future, blocking} + +/** A collection of small utilities for tests that have no obvious home */ +object ReleaseUtils { + final case class TestedRelease( + releaseVersion: ReleaseVersion, + protocolVersions: NonEmpty[List[ProtocolVersion]], + ) + + /** Given a list of [[TestedRelease]] returns a new list of [[TestedRelease]] where each element + * is a release version together with one of its supported protocol version. + * + * Example: + * - Given [(2.7.9, [3, 4, 5]), (2.9.0, [5, 6]) + * - Returns [(2.7.9, [3]), (2.7.9, [4]), (2.7.9, [5]), (2.9.0, [5]), (2.9.0, [6])] + */ + def zipReleasesWithProtocolVersions(releases: List[TestedRelease]): List[TestedRelease] = + releases.flatMap { case TestedRelease(v, ps) => + ps.map(e => TestedRelease(v, NonEmpty.mk(List, e))) + } + + /** Given a list of `E` and a number of shards `n` returns a new list with exactly `n` sub-lists + * (shards) containing ideally the same number of elements. + * + * For a given list having fewer elements than the given `n` the result is padded with empty + * lists so that the resulting list has size `n`. + * + * Examples: + * - List(1, 2, 3) and n=2 => List(List(1), List(2, 3)) + * - List(1, 2, 3) and n=4 => List(List(1), List(2), List(3), List()) + */ + def shard[E](list: NonEmptyList[E], numberOfShards: PositiveInt): List[List[E]] = { + val items = list.toList + val n = numberOfShards.value + val numItems = items.size + val sharded = + if (numItems < n) items.grouped(1).padTo(n, Nil) + else { + val (itemsPerShard, remainingItems) = (numItems / n, numItems % n) + val (left, right) = items.splitAt(numItems - remainingItems * (itemsPerShard + 1)) + left.grouped(itemsPerShard) ++ right.grouped(itemsPerShard + 1) + } + sharded.toList + } + + private lazy val previousStableReleases: List[TestedRelease] = + File("release-notes/") + .list(file => file.name.startsWith(ReleaseVersion.current.major.toString)) + .map(_.name.replace(".md", "")) + .map(ReleaseVersion.tryCreate) + .collect { + case releaseVersion if releaseVersion.isStable => + TestedRelease( + releaseVersion, + ProtocolVersionCompatibility.supportedProtocols( + includeAlphaVersions = false, + includeBetaVersions = true, + release = releaseVersion, + ), + ) + } + .toList + .sortBy(_.releaseVersion) + + // All previous stable releases minus releases that support only deleted protocol versions + lazy val previousSupportedStableReleases: List[TestedRelease] = + previousStableReleases.mapFilter { case TestedRelease(releaseVersion, protocolVersions) => + NonEmpty + .from(protocolVersions.filterNot(_.isDeleted)) + .map(TestedRelease(releaseVersion, _)) + } + + lazy val reducedScopeOfPreviousSupportedStableReleases: List[TestedRelease] = + reduceToLatestSupportedStablesReleases(previousSupportedStableReleases) + + private def reduceToLatestSupportedStablesReleases(releases: List[TestedRelease]) = + releases.sortBy(_.releaseVersion).foldLeft(List.empty[TestedRelease]) { + case (one :: rest, item) + // keep the latest of each patch version + if (one.releaseVersion.majorMinorMatches(item.releaseVersion)) => + item :: rest + case (acc, item) => item :: acc + } + + /** Returns release versions for the latest patch and minor version together with their list of + * supported protocol versions for a given range of release versions. + * + * Example: + * - Latest stable releases for 2.7 and 2.8 are 2.7.9 and 2.8.3 + * - Given a range from 2.7.0 to 2.9.0 this function returns the list [(2.7.9, [3, 4, 5]), + * (2.8.3, [3, 4, 5]) + */ + def latestSupportedStableReleasesInRange( + fromInclusive: ReleaseVersion, + toExclusive: ReleaseVersion, + ): List[TestedRelease] = + reduceToLatestSupportedStablesReleases(previousStableReleases.filter { + case TestedRelease(releaseVersion, _) => + releaseVersion >= fromInclusive && releaseVersion < toExclusive + }) + + /** The first time we attempt to get a release, a future is inserted into the map. This allows to + * synchronize between different requests for the same release. + */ + private val releasesRetrieval: TrieMap[ReleaseVersion, Future[String]] = TrieMap.empty + + /** If the .tar.gz corresponding to release is not found locally, attempts to download it from + * artifactory. Then, extract the .tar.gz file. + * @param release + * Version that needs to be retrieved + * @return + * Directory containing the downloaded release + */ + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) + def retrieve( + release: ReleaseVersion + )(implicit elc: ErrorLoggingContext, ec: ExecutionContext): Future[String] = + blocking { + synchronized { + releasesRetrieval.get(release) match { + case Some(releaseRetrieval) => releaseRetrieval + case None => + val releaseRetrieval = Future(downloadAndExtract(release)) + releasesRetrieval.put(release, releaseRetrieval).discard + releaseRetrieval + } + } + } + + /** This method should not be called concurrently for the same release. Use [[retrieve]] method + * above instead. + * + * @param release + * Release version th + * @return + * Directory containing the downloaded release + */ + private def downloadAndExtract( + release: ReleaseVersion + )(implicit elc: ErrorLoggingContext): String = { + import scala.sys.process.* + + val cantonDir = s"tmp/canton-community-$release/bin/canton" + if (Files.exists(Paths.get(cantonDir))) { + elc.info(s"Release $release already downloaded.") + cantonDir + } else { + val processLogger = new BufferedProcessLogger + elc.info(s"Beginning to download release $release. This may take a while. ") + val exitCode = s"scripts/testing/get-release.sh $release".!(processLogger) + ErrorUtil.requireArgument( + exitCode == 0, + s"getting release $release failed with exit code $exitCode. Download script output: \n ${processLogger.output()}", + ) + elc.info( + s"Finished downloading release $release. Download script output: \n ${processLogger.output()}" + ) + + cantonDir + } + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtilsTest.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtilsTest.scala new file mode 100644 index 0000000000..67198144ce --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/util/ReleaseUtilsTest.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.NonEmptyList +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.util.ReleaseUtils.{ + previousSupportedStableReleases, + reducedScopeOfPreviousSupportedStableReleases, + shard, +} +import com.digitalasset.canton.version.ReleaseVersion +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.{Inside, Inspectors} + +final class ReleaseUtilsTest extends AnyFlatSpec with Matchers with Inspectors with Inside { + + behavior of "ReleaseUtils" + + it should "only report versions from the current release version" in { + + val previousSupportedStableReleasesMajor = + previousSupportedStableReleases.map(_.releaseVersion.major) + all(previousSupportedStableReleasesMajor) shouldBe ReleaseVersion.current.major + + val reducedScopeOfPreviousSupportedStableReleasesMajor = + reducedScopeOfPreviousSupportedStableReleases.map(_.releaseVersion.major) + all(reducedScopeOfPreviousSupportedStableReleasesMajor) shouldBe ReleaseVersion.current.major + + } + + it should "correctly identify the latest available patch version for every (major, minor) pair" in { + + val allVersionsByMajorMinor = + previousSupportedStableReleases.groupBy(v => (v.releaseVersion.major, v.releaseVersion.minor)) + + val reducedScopedByMajorMinor = + reducedScopeOfPreviousSupportedStableReleases.groupBy(v => + (v.releaseVersion.major, v.releaseVersion.minor) + ) + + withClue("all (major, minor) pairs covered by the reduced scope") { + allVersionsByMajorMinor.keys should contain theSameElementsAs reducedScopedByMajorMinor.keys + } + + withClue("exactly one patch version in the reduced scope") { + all(reducedScopedByMajorMinor.values) should have length 1 + } + + withClue("only the latest version in the reduced scope") { + forAll(allVersionsByMajorMinor) { case (majorMinor, patches) => + inside(patches.sortBy(_.releaseVersion).reverse) { case latest :: rest => + inside(reducedScopedByMajorMinor.get(majorMinor)) { case Some(List(reduced)) => + reduced shouldBe latest + all(rest.map(_.releaseVersion)) should be < reduced.releaseVersion + } + } + } + } + + } + + private val one = PositiveInt.one + private val two = PositiveInt.two + private val three = PositiveInt.tryCreate(3) + private val four = PositiveInt.tryCreate(4) + + behavior of "sharding" + + it should "shard a single item" in { + shard(NonEmptyList.of(1), one) shouldBe List(List(1)) + } + it should "pad with empty lists (shards) when fewer items than requested shards are available" in { + shard(NonEmptyList.of(1), two) shouldBe List(List(1), List()) + } + it should "put 1 item per shard when the number of items equals the requested shards" in { + shard(NonEmptyList.of(1, 2, 3), three) shouldBe List(List(1), List(2), List(3)) + } + it should "spread items to all shards" in { + shard(NonEmptyList.of(1, 2, 3, 4), two) shouldBe List(List(1, 2), List(3, 4)) + + shard(NonEmptyList.of(1, 2, 3, 4), three) shouldBe List(List(1), List(2), List(3, 4)) + /* + ^^ shard(list = List(1, 2, 3, 4), n = 3) with + val itemsPerShard = Math.ceil(list.length / n.toDouble).toInt // = 2 + list.grouped(itemsPerShard) + .toList + .padTo(n) + + results in: List(List(1, 2), List(3, 4), List()) + */ + } + it should "spread items evenly to shards" in { + shard(NonEmptyList.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), four) shouldBe List( + List(1, 2), + List(3, 4), + List(5, 6, 7), + List(8, 9, 10), + ) + } + +} diff --git a/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala b/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala new file mode 100644 index 0000000000..57f1809d49 --- /dev/null +++ b/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala @@ -0,0 +1,206 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.implicits.catsSyntaxSemigroup +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.ParticipantReference +import com.digitalasset.canton.participant.admin.AdminWorkflowServices +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.topology.{ForceFlag, ForceFlags, PhysicalSynchronizerId} +import com.digitalasset.canton.util.SetupPackageVetting.AllUnvettingFlags +import com.digitalasset.canton.util.collection.MapsUtil +import com.digitalasset.canton.{BaseTest, LfPackageId} +import com.digitalasset.daml.lf.archive.DarReader +import com.digitalasset.daml.lf.data.Ref.PackageId +import org.scalatest.Assertions.fail +import org.scalatest.LoneElement.convertToCollectionLoneElementWrapper + +import java.io.File + +/** Declarative API for configuring the vetting state for a set of participants and their connected + * synchronizers. + * + * @param darPaths + * The file paths to the DARs that need to be uploaded to the participants. A participant's need + * for a DAR is determined based on the target topology. + * @param targetTopology + * The desired vetting topology for each synchronizer and its connected participants. The vetted + * packages for each participant must reference only the main package-ids of DARs listed in + * [[darPaths]]. All dependencies of a DAR are vetted without limits, unless a dependency is + * itself a main package-id of one of the DARs, in which case the bounds defined in its + * [[VettedPackage]] declaration are respected. + */ +class SetupPackageVetting( + darPaths: Set[String /* DAR path */ ], + targetTopology: Map[PhysicalSynchronizerId, Map[ParticipantReference, Set[VettedPackage]]], +) { + def run(): Unit = { + val participants = targetTopology.view.values.flatMap(_.keys).toSet + val participantsPerSynchronizer: Map[PhysicalSynchronizerId, Set[ParticipantReference]] = + targetTopology.view.map { case (sync, participantPackages) => + sync -> participantPackages.keySet + }.toMap + val synchronizersPerParticipant: Map[ParticipantReference, Set[PhysicalSynchronizerId]] = + MapsUtil.transpose(participantsPerSynchronizer) + + val darsMap: Map[PackageId, (String, Set[PackageId])] = darPaths.view.map { darPath => + val dar = DarReader + .readArchiveFromFile(darFile = new File(darPath)) + .fold( + err => throw new IllegalArgumentException(s"Failed to read DAR: $darPath", err), + identity, + ) + + dar.main.pkgId -> (darPath, dar.dependencies.map(_.pkgId).toSet) + }.toMap + + // Clean-up + unvetAllUserPackages(participants, synchronizersPerParticipant) + + // Upload the required DARs on each participant (without vetting) + uploadDarsToParticipants(darsMap.view.mapValues(_._1).toMap) + + // Setup the target vetting state + setupTargetVettingState(participantsPerSynchronizer, darsMap.view.mapValues(_._2).toMap) + } + + private def setupTargetVettingState( + participantsPerSynchronizer: Map[PhysicalSynchronizerId, Set[ParticipantReference]], + darsMap: Map[PackageId, Set[PackageId]], + ): Unit = + targetTopology.foreach { case (synchronizerId, vettedPackagesPerParticipant) => + vettedPackagesPerParticipant + // Do not issue vetting transactions with empty vetted packages set + .filter(_._2.nonEmpty) + .foreach { case (participant, vettedPackages) => + val packageIdsWithExplicitVetting = vettedPackages.map(_.packageId) + val vettedPackagesAdditions = vettedPackages ++ { + // Add the dependencies of all DAR main package-ids + VettedPackage.unbounded( + vettedPackages.view + .map(_.packageId) + .flatMap(darsMap) + .toSeq + .distinct + // Do not set unbound vetting if the package-id was already explicitly vetted in the targetTopology + // (e.g. a main DAR package-id can be a dependency of another DAR) + .filterNot(packageIdsWithExplicitVetting) + ) + } + participant.topology.vetted_packages.propose_delta( + participant = participant.id, + store = synchronizerId, + adds = vettedPackagesAdditions.toSeq, + ) + + val allParticipants = participantsPerSynchronizer(synchronizerId) + + allParticipants.foreach { participantToObserveTopology => + BaseTest.eventually() { + val currentVettingState = participantToObserveTopology.topology.vetted_packages + .list( + Some(synchronizerId), + filterParticipant = participant.id.filterString, + ) + .loneElement + .item + .packages + .toSet + + if (vettedPackagesAdditions.subsetOf(currentVettingState)) () else fail() + } + } + } + } + + private def uploadDarsToParticipants(darsMap: Map[PackageId, String]): Unit = + targetTopology.view.values + .reduceOption(_ |+| _) + .getOrElse(Map.empty[ParticipantReference, Set[VettedPackage]]) + .foreach { case (participant, vettedPackages) => + vettedPackages + .map(_.packageId) + .map(darMainPkgId => + darsMap.getOrElse( + darMainPkgId, + throw new IllegalArgumentException(s"DAR for $darMainPkgId not found"), + ) + ) + .foreach { darPath => + participant.dars.upload( + darPath, + synchronizerId = None, + vetAllPackages = false, + synchronizeVetting = false, + ) + } + } + + private def unvetAllUserPackages( + participants: Set[ParticipantReference], + synchronizersPerParticipant: Map[ParticipantReference, Set[PhysicalSynchronizerId]], + ): Unit = + participants.foreach { participant => + val defaultPackageIds = (Set( + "daml-prim", + "daml-stdlib", + "daml-script", + "ghc-stdlib", + ) ++ AdminWorkflowServices.AdminWorkflowNames).flatMap(filterName => + withFailOnLimitHit(PositiveInt.tryCreate(1000), s"fetch SDK packages ($filterName)") { + fetchLimit => + participant.packages.list(limit = fetchLimit, filterName = filterName).map(_.packageId) + } + ) + + val userDefinedPackages = + (withFailOnLimitHit(PositiveInt.tryCreate(1000), "fetch all packages") { fetchLimit => + participant.packages.list(limit = fetchLimit).map(_.packageId) + }.toSet + -- defaultPackageIds).map(LfPackageId.assertFromString) + + if (userDefinedPackages.nonEmpty) { + // Remove the vetted packages from the authorized store + participant.topology.vetted_packages.propose_delta( + participant = participant.id, + store = TopologyStoreId.Authorized, + removes = userDefinedPackages.toSeq, + force = AllUnvettingFlags, + ) + + // Remove the vetted packages from each synchronizer store + synchronizersPerParticipant(participant).foreach { syncId => + participant.topology.vetted_packages.propose_delta( + participant = participant.id, + store = syncId, + removes = userDefinedPackages.toSeq, + force = AllUnvettingFlags, + ) + } + } + } + + private def withFailOnLimitHit[T](fetchLimit: PositiveInt, opName: String)( + f: PositiveInt => Seq[T] + ): Seq[T] = { + val result = f(fetchLimit) + if (result.sizeIs == fetchLimit.value) fail(s"Request limit hit for $opName") + else result + } +} + +object SetupPackageVetting { + val AllUnvettingFlags: ForceFlags = ForceFlags( + ForceFlag.AllowUnvettedDependencies, + ForceFlag.AllowUnvetPackageWithActiveContracts, + ) + + def apply( + darPaths: Set[String], + targetTopology: Map[PhysicalSynchronizerId, Map[ParticipantReference, Set[VettedPackage]]], + ): Unit = + new SetupPackageVetting(darPaths, targetTopology).run() +} diff --git a/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.KmsDriverFactory b/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.KmsDriverFactory new file mode 100644 index 0000000000..39d9e1c8fc --- /dev/null +++ b/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.KmsDriverFactory @@ -0,0 +1 @@ +com.digitalasset.canton.crypto.kms.driver.v1.aws.AwsKmsDriverFactory diff --git a/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.v1.KmsDriverFactory b/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.v1.KmsDriverFactory new file mode 100644 index 0000000000..39d9e1c8fc --- /dev/null +++ b/canton/community/aws-kms-driver/src/main/resources/META-INF/services/com.digitalasset.canton.crypto.kms.driver.api.v1.KmsDriverFactory @@ -0,0 +1 @@ +com.digitalasset.canton.crypto.kms.driver.v1.aws.AwsKmsDriverFactory diff --git a/canton/community/aws-kms-driver/src/main/scala/com/digitalasset/canton/crypto/kms/driver/v1/aws/AwsKmsDriver.scala b/canton/community/aws-kms-driver/src/main/scala/com/digitalasset/canton/crypto/kms/driver/v1/aws/AwsKmsDriver.scala new file mode 100644 index 0000000000..99630deb6d --- /dev/null +++ b/canton/community/aws-kms-driver/src/main/scala/com/digitalasset/canton/crypto/kms/driver/v1/aws/AwsKmsDriver.scala @@ -0,0 +1,308 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.driver.v1.aws + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto +import com.digitalasset.canton.crypto.KeyName +import com.digitalasset.canton.crypto.kms.aws.AwsKms +import com.digitalasset.canton.crypto.kms.driver.api.v1.* +import com.digitalasset.canton.crypto.kms.driver.api.v1.SigningKeySpec.EcSecp256k1 +import com.digitalasset.canton.crypto.kms.driver.v1.KmsDriverSpecsConverter +import com.digitalasset.canton.crypto.kms.{KmsEncryptionPublicKey, KmsKeyId, KmsSigningPublicKey} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext} +import com.digitalasset.canton.util.{ByteString256, ByteString4096, ByteString6144, EitherTUtil} +import com.google.protobuf.ByteString +import io.opentelemetry.context.Context +import org.slf4j.Logger +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.{ExecutionContext, Future} + +final case class AwsKmsDriverConfig( + region: String, + multiRegionKey: Boolean = false, + auditLogging: Boolean = false, +) + +class AwsKmsDriverFactory extends KmsDriverFactory { + + override def name: String = "aws-kms" + + override def buildInfo: Option[String] = Some(BuildInfo.version) + + override type ConfigType = AwsKmsDriverConfig + + override type Driver = AwsKmsDriver + + override def configReader: ConfigReader[AwsKmsDriverConfig] = { + import pureconfig.generic.semiauto.* + deriveReader[AwsKmsDriverConfig] + } + + override def configWriter(confidential: Boolean): ConfigWriter[AwsKmsDriverConfig] = { + import pureconfig.generic.semiauto.* + deriveWriter[AwsKmsDriverConfig] + } + + override def create( + config: AwsKmsDriverConfig, + loggerFactory: Class[_] => Logger, + executionContext: ExecutionContext, + ): AwsKmsDriver = { + + val awsConfig = KmsConfig.Aws( + region = config.region, + multiRegionKey = config.multiRegionKey, + auditLogging = config.auditLogging, + ) + + // Use default timeouts and logger factory for the wrapped KMS + val awsKms = + AwsKms + .create( + awsConfig, + ProcessingTimeout(), + NamedLoggerFactory.root, + NoReportingTracerProvider, + ) + .valueOr { err => + throw new RuntimeException(s"Failed to create AWS KMS: $err") + } + + new AwsKmsDriver(awsKms)(executionContext) + } +} + +/** A reference implementation of the KMS Driver API based on the existing internal AWS KMS + * integration. + */ +class AwsKmsDriver(kms: AwsKms)(implicit ec: ExecutionContext) extends KmsDriver { + + private def mapErr[A, E](operation: String)( + result: => EitherT[Future, String, A] + ): Future[A] = + EitherTUtil.toFuture( + result.leftMap(err => + KmsDriverException( + new RuntimeException(s"KMS operation `$operation` failed: $err"), + // Internally the AwsKms already retries when possible, so only non-retryable errors bubble up here. + retryable = false, + ) + ) + ) + + // TODO(i18206): Check that connection and permissions are properly configured + override def health: Future[KmsDriverHealth] = Future.successful(KmsDriverHealth.Ok) + + override def supportedSigningKeySpecs: Set[SigningKeySpec] = + Set(SigningKeySpec.EcP256, SigningKeySpec.EcP384, EcSecp256k1) + + override def supportedSigningAlgoSpecs: Set[SigningAlgoSpec] = + Set(SigningAlgoSpec.EcDsaSha256, SigningAlgoSpec.EcDsaSha384) + + override def supportedEncryptionKeySpecs: Set[EncryptionKeySpec] = Set(EncryptionKeySpec.Rsa2048) + + override def supportedEncryptionAlgoSpecs: Set[EncryptionAlgoSpec] = Set( + EncryptionAlgoSpec.RsaEsOaepSha256 + ) + + override def generateSigningKeyPair( + signingKeySpec: SigningKeySpec, + keyName: Option[String], + )(traceContext: Context): Future[String] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("generate-signing-key") { + for { + _ <- EitherTUtil.condUnitET[Future]( + supportedSigningKeySpecs.contains(signingKeySpec), + s"Unsupported signing key spec: $signingKeySpec", + ) + keySpec = KmsDriverSpecsConverter.convertToCryptoSigningKeySpec(signingKeySpec) + name <- keyName.traverse(KeyName.create).toEitherT[Future] + keyId <- kms + .generateSigningKeyPair(keySpec, name) + .map(_.unwrap) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + + } yield keyId + } + } + + override def generateEncryptionKeyPair( + encryptionKeySpec: EncryptionKeySpec, + keyName: Option[String], + )(traceContext: Context): Future[String] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("generate-encryption-key") { + for { + _ <- EitherTUtil.condUnitET[Future]( + supportedEncryptionKeySpecs.contains(encryptionKeySpec), + s"Unsupported encryption key spec: $encryptionKeySpec", + ) + name <- keyName.traverse(KeyName.create).toEitherT[Future] + keySpec = KmsDriverSpecsConverter.convertToCryptoEncryptionKeySpec(encryptionKeySpec) + keyId <- kms + .generateAsymmetricEncryptionKeyPair(keySpec, name) + .map(_.unwrap) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield keyId + } + } + + override def generateSymmetricKey( + keyName: Option[String] + )(traceContext: Context): Future[String] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("generate symmetric key") { + for { + name <- keyName.traverse(KeyName.create).toEitherT[Future] + keyId <- kms + .generateSymmetricEncryptionKey(name) + .map(_.unwrap) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield keyId + } + } + + override def sign( + data: Array[Byte], + keyId: String, + algoSpec: SigningAlgoSpec, + )(traceContext: Context): Future[Array[Byte]] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("sign") { + for { + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + data <- ByteString4096.create(ByteString.copyFrom(data)).toEitherT[Future] + signingScheme = KmsDriverSpecsConverter.convertToCryptoSigningAlgoSpec(algoSpec) + signature <- kms + .sign( + kmsKeyId, + data, + signingScheme, + // We can hard-code the key spec here as AWS KMS does not use the provided key spec for signing + crypto.SigningKeySpec.EcP256, + ) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield signature.toByteArray + } + } + + override def decryptAsymmetric( + ciphertext: Array[Byte], + keyId: String, + algoSpec: EncryptionAlgoSpec, + )(traceContext: Context): Future[Array[Byte]] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("decrypt-asymmetric") { + for { + cipher <- ByteString256.create(ByteString.copyFrom(ciphertext)).toEitherT[Future] + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + encryptSpec = KmsDriverSpecsConverter.convertToCryptoEncryptionAlgoSpec(algoSpec) + data <- kms + .decryptAsymmetric(kmsKeyId, cipher, encryptSpec) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield data.unwrap.toByteArray + } + } + + override def decryptSymmetric(ciphertext: Array[Byte], keyId: String)( + traceContext: Context + ): Future[Array[Byte]] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("decrypt-symmetric") { + for { + cipher <- ByteString6144.create(ByteString.copyFrom(ciphertext)).toEitherT[Future] + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + decrypted <- kms + .decryptSymmetric(kmsKeyId, cipher) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield decrypted.unwrap.toByteArray + } + } + + override def encryptSymmetric(data: Array[Byte], keyId: String)( + traceContext: Context + ): Future[Array[Byte]] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("encrypt-symmetric") { + for { + plaintext <- ByteString4096.create(ByteString.copyFrom(data)).toEitherT[Future] + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + encrypted <- kms + .encryptSymmetric(kmsKeyId, plaintext) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + } yield encrypted.unwrap.toByteArray + } + } + + override def getPublicKey(keyId: String)(traceContext: Context): Future[PublicKey] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("get-public-key") { + for { + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + publicKey <- kms + .getPublicKey(kmsKeyId) + .leftMap(_.toString) + .subflatMap { + case KmsEncryptionPublicKey(key, spec) => + KmsDriverSpecsConverter + .convertToDriverEncryptionKeySpec(spec) + .map(spec => PublicKey(key.toByteArray, spec)) + case KmsSigningPublicKey(key, spec) => + KmsDriverSpecsConverter + .convertToDriverSigningKeySpec(spec) + .map(spec => PublicKey(key.toByteArray, spec)) + case unknownKey => Left(s"Invalid public key: $unknownKey") + } + .failOnShutdownToAbortException(functionFullName) + } yield publicKey + } + } + + override def keyExistsAndIsActive(keyId: String)(traceContext: Context): Future[Unit] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("key-exists-and-active") { + for { + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + _ <- kms + .keyExistsAndIsActive(kmsKeyId) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + + } yield () + } + } + + override def deleteKey(keyId: String)(traceContext: Context): Future[Unit] = + TraceContext.withOpenTelemetryContext(traceContext) { implicit tc: TraceContext => + mapErr("delete-key") { + for { + kmsKeyId <- KmsKeyId.create(keyId).toEitherT[Future] + _ <- kms + .deleteKey(kmsKeyId) + .leftMap(_.toString) + .failOnShutdownToAbortException(functionFullName) + + } yield () + } + } + + override def close(): Unit = kms.close() + +} diff --git a/canton/community/aws-kms-driver/src/test/scala/com/digitalasset/canton/nightly/AwsKmsDriverTest.scala b/canton/community/aws-kms-driver/src/test/scala/com/digitalasset/canton/nightly/AwsKmsDriverTest.scala new file mode 100644 index 0000000000..b978cfca9c --- /dev/null +++ b/canton/community/aws-kms-driver/src/test/scala/com/digitalasset/canton/nightly/AwsKmsDriverTest.scala @@ -0,0 +1,133 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.nightly + +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.crypto.kms.aws.AwsKms +import com.digitalasset.canton.crypto.kms.driver.api.v1.{ + EncryptionKeySpec, + KmsDriver, + SigningKeySpec, +} +import com.digitalasset.canton.crypto.kms.driver.testing.v1.{KmsDriverFactoryTest, KmsDriverTest} +import com.digitalasset.canton.crypto.kms.driver.v1.DriverKms +import com.digitalasset.canton.crypto.kms.driver.v1.aws.{ + AwsKmsDriver, + AwsKmsDriverConfig, + AwsKmsDriverFactory, +} +import com.digitalasset.canton.crypto.provider.kms.HasPredefinedAwsKmsKeys +import com.digitalasset.canton.tracing.NoReportingTracerProvider +import org.scalatest.wordspec.FixtureAsyncWordSpec +import pureconfig.ConfigWriter + +/** Implements the test using our internal test suite for KMS */ +class AwsKmsDriverInternalTest + extends FixtureAsyncWordSpec + with ExternalKmsTest + with HasPredefinedAwsKmsKeys { + + override type KmsType = DriverKms + + override protected def defaultKmsConfig: KmsConfig.Driver = { + import pureconfig.generic.auto.* + val awsConfig = ConfigWriter[KmsConfig.Aws].to(KmsConfig.Aws.defaultTestConfig) + KmsConfig.Driver("aws-kms", awsConfig) + } + + override protected def newKms(config: KmsConfig.Driver): DriverKms = + DriverKms + .create( + config, + FutureSupervisor.Noop, + wallClock, + timeouts, + loggerFactory, + parallelExecutionContext, + ) + .valueOrFail("Failed to create Driver KMS") + + "AWS KMS Driver" must { + behave like kms() + } + +} + +/** Implements the test with the external test suite for Kms Drivers. */ +// user-manual-entry-begin: AwsKmsDriverWithPredefinedKeysTest +class AwsKmsDriverWithPredefinedKeysTest extends KmsDriverTest { + + override val predefinedSigningKeys: Map[SigningKeySpec, String] = + Map( + SigningKeySpec.EcP256 -> "alias/canton-kms-test-signing-key", + SigningKeySpec.EcP384 -> "alias/canton-kms-test-signing-key-P384", + ) + + override val predefinedEncryptionKeys: Map[EncryptionKeySpec, String] = + Map(EncryptionKeySpec.Rsa2048 -> "alias/canton-kms-test-asymmetric-key") + + override val predefinedSymmetricKey: Option[String] = Some("alias/canton-kms-test-key") + + override protected def newKmsDriver(): KmsDriver = { + val awsKms = AwsKms + .create( + KmsConfig.Aws.defaultTestConfig, + timeouts, + loggerFactory, + NoReportingTracerProvider, + ) + .valueOrFail("failed to create AWS KMS") + + new AwsKmsDriver(awsKms) + } + + "AWS KMS Driver" must { + behave like kmsDriver(allowKeyGeneration = false) + } + +} +// user-manual-entry-end: AwsKmsDriverWithPredefinedKeysTest + +// user-manual-entry-begin: AwsKmsDriverTest +class AwsKmsDriverTest extends KmsDriverTest { + + override protected def newKmsDriver(): KmsDriver = { + val awsKms = AwsKms + .create( + KmsConfig.Aws.defaultTestConfig, + timeouts, + loggerFactory, + NoReportingTracerProvider, + ) + .valueOrFail("failed to create AWS KMS") + + new AwsKmsDriver(awsKms) + } + + "AWS KMS Driver" must { + behave like kmsDriver(allowKeyGeneration = true) + } + +} +// user-manual-entry-end: AwsKmsDriverTest + +// user-manual-entry-begin: AwsKmsDriverFactoryTest +class AwsKmsDriverFactoryTest extends KmsDriverFactoryTest { + + override type Factory = AwsKmsDriverFactory + + override protected lazy val factory: AwsKmsDriverFactory = + new AwsKmsDriverFactory + + override protected lazy val config: AwsKmsDriverFactory#ConfigType = { + val aws = KmsConfig.Aws.defaultTestConfig + AwsKmsDriverConfig(region = aws.region, multiRegionKey = aws.multiRegionKey) + } + + "AWS KMS Driver Factory" must { + behave like kmsDriverFactory() + } +} +// user-manual-entry-end: AwsKmsDriverFactoryTest diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/participant_reassignment.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/participant_reassignment.proto index 8a2a29a203..02392ac1bc 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/participant_reassignment.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/participant_reassignment.proto @@ -8,7 +8,6 @@ package com.digitalasset.canton.protocol.v30; import "com/digitalasset/canton/crypto/v30/crypto.proto"; import "com/digitalasset/canton/protocol/v30/common_stable.proto"; import "com/digitalasset/canton/protocol/v30/merkle.proto"; -import "com/digitalasset/canton/time/v30/time_proof.proto"; import "google/protobuf/timestamp.proto"; import "scalapb/scalapb.proto"; @@ -88,7 +87,7 @@ message UnassignmentView { com.digitalasset.canton.crypto.v30.Salt salt = 1; string target_physical_synchronizer_id = 2; - com.digitalasset.canton.time.v30.TimeProof target_time_proof = 3; + int64 target_timestamp = 3; repeated ActiveContract contracts = 4; } diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto index 492dbc54b1..9e7254ab8d 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto @@ -9,6 +9,7 @@ import "com/digitalasset/canton/crypto/v30/crypto.proto"; import "com/digitalasset/canton/protocol/v30/common_stable.proto"; import "com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto"; import "com/digitalasset/canton/v30/trace_context.proto"; +import "google/protobuf/duration.proto"; import "google/rpc/status.proto"; import "scalapb/scalapb.proto"; @@ -68,6 +69,8 @@ message StaticSynchronizerParameters { // Flag to enable transparency checks. bool enable_transparency_checks = 9; + + google.protobuf.Duration topology_change_delay = 10; } message Envelope { diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/synchronizer_parameters.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/synchronizer_parameters.proto index 4ab6de454f..490bcbc66f 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/synchronizer_parameters.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/synchronizer_parameters.proto @@ -49,7 +49,7 @@ message DynamicSynchronizerParameters { google.protobuf.Duration confirmation_response_timeout = 1; google.protobuf.Duration mediator_reaction_timeout = 2; google.protobuf.Duration assignment_exclusivity_timeout = 3; - google.protobuf.Duration topology_change_delay = 4; + reserved 4; // was topology_change_delay = 4; google.protobuf.Duration ledger_time_record_time_tolerance = 5; google.protobuf.Duration reconciliation_interval = 6; google.protobuf.Duration mediator_deduplication_timeout = 7; diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto index 60532f44cc..d097d1572c 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto @@ -60,6 +60,13 @@ message Enums { TOPOLOGY_MAPPING_CODE_SYNCHRONIZER_MIGRATION_ANNOUNCEMENT = 19; TOPOLOGY_MAPPING_CODE_SEQUENCER_CONNECTION_SUCCESSOR = 20; } + + enum ParticipantFeatureFlag { + PARTICIPANT_FEATURE_FLAG_UNSPECIFIED = 0; + // UNUSED in PV >= 34 - Was meant to tactically fix a bug in the external signing hash computation + // in model conformance in PV 33 + PARTICIPANT_FEATURE_FLAG_PV33_EXTERNAL_SIGNING_LOCAL_CONTRACT_IN_SUBVIEW = 1; + } } // [start NamespaceDelegation definition] @@ -168,6 +175,9 @@ message SynchronizerTrustCertificate { reserved 3; // was bool reassignment_only_to_given_target_synchronizer_ids = 3; reserved 4; // was repeated string target_synchronizer_ids = 4; + + // Feature flags that this node declares to support on this synchronizer + repeated Enums.ParticipantFeatureFlag feature_flags = 5; } // the optional trust certificate of the synchronizer towards the participant diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/sequencer/api/v30/sequencer_service.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/sequencer/api/v30/sequencer_service.proto index 8cd9ce4e01..3cde0a9df2 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/sequencer/api/v30/sequencer_service.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/sequencer/api/v30/sequencer_service.proto @@ -97,6 +97,11 @@ service SequencerService { // Return the currently known traffic state for a member. Callers must be authorized to request the traffic state. rpc GetTrafficStateForMember(GetTrafficStateForMemberRequest) returns (GetTrafficStateForMemberResponse); + + // Return a "current" sequencing time such that, when a `SendAsync` operation is + // subsequently called, if sequenced, the sequencing time of the resulting event is guaranteed to + // be later than the sequencing time previously returned by the `GetTime` call. + rpc GetTime(GetTimeRequest) returns (GetTimeResponse); } message SendAsyncRequest { @@ -196,3 +201,14 @@ message GetTrafficStateForMemberResponse { com.digitalasset.canton.protocol.v30.TrafficState traffic_state = 1; } + +message GetTimeRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion"; +} + +message GetTimeResponse { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion"; + + // The result can be empty if the sequencer is still initializing. + optional int64 sequencing_timestamp = 1; +} diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/time/v30/time_proof.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/time/v30/time_proof.proto deleted file mode 100644 index 11e17fa171..0000000000 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/time/v30/time_proof.proto +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton.time.v30; - -import "com/digitalasset/canton/protocol/v30/sequencing.proto"; - -// Messages for sequencing -message TimeProof { - com.digitalasset.canton.protocol.v30.PossiblyIgnoredSequencedEvent event = 1; // must be an ordinary event -} diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_read_service.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_read_service.proto index 2319c42827..cd7c7769bf 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_read_service.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_read_service.proto @@ -35,10 +35,12 @@ service TopologyManagerReadService { rpc ListAvailableStores(ListAvailableStoresRequest) returns (ListAvailableStoresResponse); rpc ListAll(ListAllRequest) returns (ListAllResponse); rpc ExportTopologySnapshot(ExportTopologySnapshotRequest) returns (stream ExportTopologySnapshotResponse); + rpc ExportTopologySnapshotV2(ExportTopologySnapshotV2Request) returns (stream ExportTopologySnapshotV2Response); // Fetch the genesis topology state. // The returned bytestring can be used directly to initialize a sequencer. rpc GenesisState(GenesisStateRequest) returns (stream GenesisStateResponse); + rpc GenesisStateV2(GenesisStateV2Request) returns (stream GenesisStateV2Response); // Fetch the topology state // The returned bytestring can be used directly to initialize a successor sequencer @@ -277,17 +279,27 @@ message ListAllRequest { string filter_namespace = 3; } +message ListAllResponse { + com.digitalasset.canton.topology.admin.v30.TopologyTransactions result = 1; +} + message ExportTopologySnapshotRequest { BaseQuery base_query = 1; repeated string exclude_mappings = 2; string filter_namespace = 3; } -message ListAllResponse { - com.digitalasset.canton.topology.admin.v30.TopologyTransactions result = 1; +message ExportTopologySnapshotResponse { + bytes chunk = 1; } -message ExportTopologySnapshotResponse { +message ExportTopologySnapshotV2Request { + BaseQuery base_query = 1; + repeated string exclude_mappings = 2; + string filter_namespace = 3; +} + +message ExportTopologySnapshotV2Response { bytes chunk = 1; } @@ -303,6 +315,18 @@ message GenesisStateResponse { bytes chunk = 1; } +message GenesisStateV2Request { + // Must be specified if the genesis state is requested from a participant node. + optional StoreId synchronizer_store = 1; + // Optional - the effective time used to fetch the topology transactions. If not provided the effective time of the last topology transaction is used. + google.protobuf.Timestamp timestamp = 2; +} + +message GenesisStateV2Response { + // versioned stored topology transactions + bytes chunk = 1; +} + message LogicalUpgradeStateRequest {} message LogicalUpgradeStateResponse { diff --git a/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto b/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto index 3f18ff91b0..70060695a8 100644 --- a/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto +++ b/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto @@ -20,6 +20,7 @@ service TopologyManagerWriteService { rpc AddTransactions(AddTransactionsRequest) returns (AddTransactionsResponse); rpc ImportTopologySnapshot(stream ImportTopologySnapshotRequest) returns (ImportTopologySnapshotResponse); + rpc ImportTopologySnapshotV2(stream ImportTopologySnapshotV2Request) returns (ImportTopologySnapshotV2Response); rpc SignTransactions(SignTransactionsRequest) returns (SignTransactionsResponse); @@ -171,6 +172,17 @@ message ImportTopologySnapshotRequest { } message ImportTopologySnapshotResponse {} +/** + * Same message as AddTransactionsRequest, except that transactions are encoded in a byte string + */ +message ImportTopologySnapshotV2Request { + bytes topology_snapshot = 1; + StoreId store = 2; + /** Optional timeout to wait for the transaction to become effective in the store. */ + google.protobuf.Duration wait_to_become_effective = 3; +} +message ImportTopologySnapshotV2Response {} + message SignTransactionsRequest { /** The transactions to be signed, but will not be stored in the authorized store */ repeated com.digitalasset.canton.protocol.v30.SignedTopologyTransaction transactions = 1; @@ -220,8 +232,9 @@ enum ForceFlag { FORCE_FLAG_ALIEN_MEMBER = 1; /* Deprecated, increasing ledger time record time tolerance does not require a force flag for PV >= 32 */ FORCE_FLAG_LEDGER_TIME_RECORD_TIME_TOLERANCE_INCREASE = 2; - /** Required when revoking the vetting of a package */ - FORCE_FLAG_ALLOW_UNVET_PACKAGE = 3; + // Previously FORCE_FLAG_ALLOW_UNVET_PACKAGE, now always enabled as it is not dangerous anymore + reserved 3; + reserved "FORCE_FLAG_ALLOW_UNVET_PACKAGE"; /** Required when vetting unknown packages (not uploaded). */ FORCE_FLAG_ALLOW_UNKNOWN_PACKAGE = 4; /** Required when vetting a package with unvetted dependencies */ @@ -245,4 +258,6 @@ enum ForceFlag { FORCE_FLAG_ALLOW_INSUFFICIENT_SIGNATORY_ASSIGNING_PARTICIPANTS_FOR_PARTY = 11; /** Required when vetting a package that fails upgrade checking */ FORCE_FLAG_ALLOW_VET_INCOMPATIBLE_UPGRADES = 12; + /** Required when submitting dynamic synchronizer parameters that have out-of-bounds values */ + FORCE_FLAG_ALLOW_OUT_OF_BOUNDS_VALUE = 13; } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/AuthorizationError.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/AuthorizationError.scala index c36e07c777..07704671b9 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/AuthorizationError.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/AuthorizationError.scala @@ -60,15 +60,20 @@ object AuthorizationError { override val reason = s"Claims do not authorize to act as party '$party'" } + final case class MissingOperationClaim(party: String) extends AuthorizationError { + override val reason = s"Claims do not authorize to operate for party '$party'" + } + final case class InvalidIdentityProviderId(identityProviderId: String) extends AuthorizationError { override val reason = s"identity_provider_id from the request `$identityProviderId` does not match the one provided in the authorization claims" } - final case class MissingAdminOrIdpAdminOrReadClaim(party: String) extends AuthorizationError { + final case class MissingAdminOrIdpAdminOrOperationClaim(party: String) + extends AuthorizationError { override val reason = - s"Claims do not authorize the use of administrative services nor authorize to read data for party '$party'" + s"Claims do not authorize the use of administrative services nor authorize to operate for party '$party'" } final case class InvalidField(fieldName: String, reason: String) extends AuthorizationError diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Authorizer.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Authorizer.scala index ee057ba681..8484d6fbe2 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Authorizer.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Authorizer.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.auth import cats.syntax.either.* +import cats.syntax.traverse.* import com.daml.jwt.JwtTimestampLeeway import com.daml.tracing.Telemetry import com.digitalasset.canton.LfLedgerString @@ -207,6 +208,8 @@ final class Authorizer( ) ) + // No authenticated user but a user-id present in the request OR + // Authenticated user different from user-id in the request (likely an admin operation) case _ => Right(None) } @@ -256,16 +259,29 @@ final class Authorizer( case None => claims.isAdminOrIDPAdmin.map(_ => req) } + case RequiredClaim.AdminOrIdpAdminOrSelfAdmin(_) => + claims.isAdminOrIDPAdmin.left + .flatMap { err => + val userId = requiredClaim.requestStringL.get(req) + authenticatedUserId(claims).flatMap { + case Some(authUserId) if authUserId == userId => + Right(()) + case _ => + Left(err) + } + } + .map(_ => req) + case RequiredClaim.Admin() => claims.isAdmin.map(_ => req) case RequiredClaim.AdminOrIdpAdmin() => claims.isAdminOrIDPAdmin.map(_ => req) - case RequiredClaim.AdminOrIdpAdminOrReadAsParty(party) => + case RequiredClaim.AdminOrIdpAdminOrOperateAsParty(parties) => (claims.isAdminOrIDPAdmin match { - case Left(_) => - claims.canReadAs(party) match { - case Left(_) => - Left(AuthorizationError.MissingAdminOrIdpAdminOrReadClaim(party)) + case Left(_) if parties.nonEmpty => + parties.traverse(claims.canOperateAs) match { + case Left(AuthorizationError.MissingOperationClaim(party)) => + Left(AuthorizationError.MissingAdminOrIdpAdminOrOperationClaim(party)) case x => x } case x => x @@ -354,7 +370,10 @@ object RequiredClaim { ) extends RequiredClaim[Req] final case class MatchUserIdForUserManagement[Req](override val requestStringL: Lens[Req, String]) extends RequiredClaim[Req] + final case class AdminOrIdpAdminOrSelfAdmin[Req](override val requestStringL: Lens[Req, String]) + extends RequiredClaim[Req] final case class Admin[Req]() extends RequiredClaim[Req] final case class AdminOrIdpAdmin[Req]() extends RequiredClaim[Req] - final case class AdminOrIdpAdminOrReadAsParty[Req](party: String) extends RequiredClaim[Req] + final case class AdminOrIdpAdminOrOperateAsParty[Req](parties: Seq[String]) + extends RequiredClaim[Req] } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Claims.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Claims.scala index 5d83558721..08688b886a 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Claims.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/auth/Claims.scala @@ -238,6 +238,24 @@ object ClaimSet { (), AuthorizationError.MissingExecuteAsAnyPartyClaim, ) + + /** Returns true if the set of claims authorizes the user to read data for the given party, + * unless the claims expired + */ + def canOperateAs(party: String): Either[AuthorizationError, Unit] = + Either.cond( + claims.exists { + case ClaimActAsAnyParty => true + case ClaimReadAsAnyParty => true + case ClaimExecuteAsAnyParty => true + case ClaimActAsParty(p) if p == party => true + case ClaimReadAsParty(p) if p == party => true + case ClaimExecuteAsParty(p) if p == party => true + case _ => false + }, + (), + AuthorizationError.MissingOperationClaim(party), + ) } /** The representation of a user that was authenticated, but whose [[Claims]] have not yet been diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/caching/ScaffeineCache.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/caching/ScaffeineCache.scala index c49f84d605..ab23549e84 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/caching/ScaffeineCache.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/caching/ScaffeineCache.scala @@ -120,6 +120,8 @@ object ScaffeineCache { def invalidateAll(): Unit = underlying.synchronous().invalidateAll() def cleanUp(): Unit = underlying.synchronous().cleanUp() + + def getIfPresentSync(key: K): Option[V] = underlying.synchronous().getIfPresent(key) } class TunnelledAsyncLoadingCache[F[_], K, V] private[ScaffeineCache] ( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/KmsConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/KmsConfig.scala index 71a69ebc08..66c3d72aa5 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/config/KmsConfig.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/KmsConfig.scala @@ -130,7 +130,7 @@ object KmsConfig { disableSslVerification: Boolean = false, endpointOverride: Option[String] = None, ) extends KmsConfig - with EnterpriseOnlyCantonConfigValidation + with UniformCantonConfigValidation object Aws { val defaultTestConfig: Aws = Aws(region = "us-east-1") @@ -167,7 +167,7 @@ object KmsConfig { override val retries: RetryConfig = RetryConfig(), endpointOverride: Option[String] = None, ) extends KmsConfig - with EnterpriseOnlyCantonConfigValidation + with UniformCantonConfigValidation object Gcp { val defaultTestConfig: Gcp = Gcp( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/ReassignmentsConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ReassignmentsConfig.scala new file mode 100644 index 0000000000..5d73aea230 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ReassignmentsConfig.scala @@ -0,0 +1,40 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.config.NonNegativeFiniteDuration +import com.digitalasset.canton.config.semiauto.CantonConfigValidatorDerivation + +/** Configuration relating to reassignments. + * + * @param targetTimestampForwardTolerance + * Defines how far into the future a target timestamp on an unassignment request may be, before + * this participant will determine the request unavalidatable and abstain from the decision. + * + * In order to validate an unassignment request, we need to refer to the topology of the target + * synchronizer. Which snapshot of the topology we use is defined by the target timestamp in the + * unassignment request. However the processing of events from each synchronizer can go at its own + * pace on each participant, so at the time a participant receives the unassignment request to + * validate, its own topology snapshot on the target synchronizer may lag behind the target + * timestamp chosen by the submitting participant. In such cases, the validating participant can + * simply wait until it has processed topology events indicating that it has caught up to the + * required timestamp. However if the target timestamp is too far in the future, we do not want to + * tie up resources waiting for the relevant topology, and this participant will simply abstain + * from the decision, deferring to other participants to make the decision. + * + * The tuning of this parameter should be understood as a trade-off. Higher values mean that the + * participant may consume more resources waiting to catch up before validating an unassignment. + * Lower values mean that the participant is more likely to opt-out of validating unassignments if + * other participants are running ahead of this one. + */ +final case class ReassignmentsConfig( + targetTimestampForwardTolerance: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofSeconds(5) +) extends UniformCantonConfigValidation + +object ReassignmentsConfig { + implicit val reassignmentsConfigCantonConfigValidator + : CantonConfigValidator[ReassignmentsConfig] = + CantonConfigValidatorDerivation[ReassignmentsConfig] +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala index be910a8b3e..d00b08e5cb 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala @@ -90,14 +90,14 @@ trait RefinedNonNegativeDuration[D <: RefinedNonNegativeDuration[D]] extends Pre await(description, logFailing, stackTraceFilter, onTimeout)(futUS.unwrap) /** Same as await, but not returning a value */ - def await_( + def await_[A]( description: => String, logFailing: Option[Level] = None, - )(fut: Future[?])(implicit loggingContext: ErrorLoggingContext): Unit = + )(fut: Future[A])(implicit loggingContext: ErrorLoggingContext): Unit = await(description, logFailing)(fut).discard - def awaitUS_(description: => String, logFailing: Option[Level] = None)( - fut: FutureUnlessShutdown[?] + def awaitUS_[A](description: => String, logFailing: Option[Level] = None)( + fut: FutureUnlessShutdown[A] )(implicit loggingContext: ErrorLoggingContext): Unit = awaitUS(description, logFailing)(fut).discard @@ -373,6 +373,10 @@ object NonNegativeFiniteDuration implicit val forgetRefinementFDuration: Transformer[NonNegativeFiniteDuration, FiniteDuration] = _.underlying + implicit val toInternalTransformer + : Transformer[NonNegativeFiniteDuration, NonNegativeFiniteDurationInternal] = + _.toInternal + def fromDuration(duration: Duration): Either[String, NonNegativeFiniteDuration] = duration match { case x: FiniteDuration => Either.cond(x.length >= 0, NonNegativeFiniteDuration(x), s"Duration $x is negative!") diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala similarity index 96% rename from canton/community/common/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala index 7efc013194..0e4a432f70 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ReplicationConfig.scala @@ -47,4 +47,6 @@ object ReplicationConfig { .map(_.copy(enabled = enabled)) .orElse(enabled.map(enabled => ReplicationConfig(enabled = Some(enabled)))) } + + val disabled: ReplicationConfig = ReplicationConfig(enabled = Some(false)) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala index 12bc81687d..6039a5add0 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala @@ -30,6 +30,25 @@ import org.slf4j.LoggerFactory import scala.concurrent.duration.Duration import scala.math.Ordering.Implicits.infixOrderingOps +/** Configuration to limit the number of open streams per service + * + * @param limits + * map of service name to maximum number of parallel open streams + * @param warnOnUndefinedLimits + * emit warning if a limit is not configured for a stream + */ +final case class StreamLimitConfig( + limits: Map[String, NonNegativeInt] = Map.empty, + warnOnUndefinedLimits: Boolean = true, +) extends UniformCantonConfigValidation + +object StreamLimitConfig { + implicit val streamLimitConfigCantonConfigValidator: CantonConfigValidator[StreamLimitConfig] = { + import CantonConfigValidatorInstances.* + CantonConfigValidatorDerivation[StreamLimitConfig] + } +} + /** Configuration for hosting a server api */ trait ServerConfig extends Product with Serializable { @@ -94,6 +113,9 @@ trait ServerConfig extends Product with Serializable { /** settings for the jwks cache */ def jwksCacheConfig: JwksCacheConfig + /** configure limits for open streams per service */ + def stream: Option[StreamLimitConfig] + /** Use the configuration to instantiate the interceptors for this server */ def instantiateServerInterceptors( tracingConfig: TracingConfig, @@ -107,6 +129,7 @@ trait ServerConfig extends Product with Serializable { jwksCacheConfig: JwksCacheConfig, telemetry: Telemetry, additionalInterceptors: Seq[ServerInterceptor] = Seq.empty, + streamLimits: Option[StreamLimitConfig], ): CantonServerInterceptors = new CantonCommunityServerInterceptors( tracingConfig, apiLoggingConfig, @@ -119,6 +142,7 @@ trait ServerConfig extends Product with Serializable { jwksCacheConfig, telemetry, additionalInterceptors, + streamLimits, ) } @@ -144,6 +168,7 @@ final case class AdminServerConfig( override val adminTokenConfig: AdminTokenConfig = AdminTokenConfig(), override val maxTokenLifetime: NonNegativeDuration = NonNegativeDuration(Duration.Inf), override val jwksCacheConfig: JwksCacheConfig = JwksCacheConfig(), + override val stream: Option[StreamLimitConfig] = None, ) extends ServerConfig with UniformCantonConfigValidation { def clientConfig: FullClientConfig = diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala index fa79337638..35c790fa30 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala @@ -119,6 +119,7 @@ final case class BatchingConfig( maxAcsImportBatchSize: PositiveNumeric[Int] = BatchingConfig.defaultMaxAcsImportBatchSize, parallelism: PositiveNumeric[Int] = BatchingConfig.defaultBatchingParallelism, aggregator: BatchAggregatorConfig = BatchingConfig.defaultAggregator, + contractStoreAggregator: BatchAggregatorConfig = BatchingConfig.defaultContractStoreAggregator, maxPruningTimeInterval: PositiveFiniteDuration = BatchingConfig.defaultMaxPruningTimeInterval, ) extends UniformCantonConfigValidation @@ -134,6 +135,11 @@ object BatchingConfig { private val defaultLedgerApiPruningBatchSize: PositiveInt = PositiveNumeric.tryCreate(50000) private val defaultMaxAcsImportBatchSize: PositiveNumeric[Int] = PositiveNumeric.tryCreate(1000) private val defaultAggregator: BatchAggregatorConfig.Batching = BatchAggregatorConfig.Batching() + private val defaultContractStoreAggregator: BatchAggregatorConfig.Batching = + BatchAggregatorConfig.Batching( + maximumInFlight = PositiveNumeric.tryCreate(5), + maximumBatchSize = PositiveNumeric.tryCreate(50), + ) // default of 30min corresponds to 1440 pruning queries after 30 days downtime, which is a reasonable tradeoff private val defaultMaxPruningTimeInterval: PositiveFiniteDuration = PositiveFiniteDuration.ofMinutes(30) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala index e399b0e95a..19c2e9997c 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.NonNegativeProportion import com.digitalasset.canton.metrics.MetricsFactoryType import com.digitalasset.canton.metrics.MetricsFactoryType.External @@ -38,9 +38,12 @@ import com.digitalasset.canton.metrics.MetricsFactoryType.External * }}} * * See also the example in `EngineComputationAbortIntegrationTest`. - * @param maxCommitmentSendDelayMillis - * The maximum delay for sending commitments in milliseconds. If not set, commitment sending is - * delayed by a random amount at most the default value. + * @param commitmentSendDelay + * The delay bounds for sending commitments as fraction of the reconciliation interval. Min and + * max bounds are enforced to be between 0 and 1. If commitmentSendDelay is not set, commitment + * sending is delayed by a random amount between the default bounds. If any of the bounds is not + * set, we take the default value for that bound. If the maximum bound is smaller than the + * minimum bound, commitment sending is delayed by a random amount between the default bounds. * @param sequencerTransportSeed * The seed to be used for choosing threshold number of sequencer transports. * @param warnOnAcsCommitmentDegradation @@ -60,7 +63,7 @@ final case class TestingConfigInternal( initializeGlobalOpenTelemetry: Boolean = true, doNotUseCommitmentCachingFor: Set[String] = Set.empty, reinterpretationTestHookFor: String => () => Unit = _ => () => (), - maxCommitmentSendDelayMillis: Option[NonNegativeInt] = None, + commitmentSendDelay: Option[CommitmentSendDelay] = None, sequencerTransportSeed: Option[Long] = None, participantsWithoutLapiVerification: Set[String] = Set.empty, enableInMemoryTransactionStoreForParticipants: Boolean = false, @@ -79,3 +82,15 @@ final case class TestSequencerClientFor( memberName: String, synchronizerName: String, ) + +/* @param minCommitmentSendDelay + * The minimum delay as fraction of reconciliation interval for sending commitments. If not set, commitment sending is + * delayed by a random amount at least the default fraction. + * @param maxCommitmentSendDelay + * The maximum delay as fraction of reconciliation interval for sending commitments. If not set, commitment sending is + * delayed by a random amount at most the default fraction. + */ +final case class CommitmentSendDelay( + minCommitmentSendDelay: Option[NonNegativeProportion] = None, + maxCommitmentSendDelay: Option[NonNegativeProportion] = None, +) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala index 4aec01cef8..1bd52e46b7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala @@ -349,7 +349,6 @@ object Crypto { publicKeyConversionCacheConfig: CacheConfig, storage: Storage, cryptoPrivateStoreFactory: CryptoPrivateStoreFactory, - kmsFactory: KmsFactory, releaseProtocolVersion: ReleaseProtocolVersion, futureSupervisor: FutureSupervisor, clock: Clock, @@ -386,7 +385,7 @@ object Crypto { for { kmsConfig <- config.kms.toRight("Missing KMS configuration for KMS crypto provider") cryptoSchemes <- CryptoSchemes.fromConfig(config) - kms <- kmsFactory + kms <- KmsFactory .create( kmsConfig, timeouts, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala index 8aff2f98db..e800c4d0d1 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala @@ -168,7 +168,7 @@ object Hash { s"Size of given hash ${hash.size()} does not match expected size ${algorithm.length} for ${algorithm.name}", ) - private def tryFromByteString(bytes: ByteString): Hash = + def tryFromByteString(bytes: ByteString): Hash = fromByteString(bytes).valueOr(err => throw new IllegalArgumentException(s"Failed to deserialize hash from $bytes: $err") ) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala index dec0f75c2f..d7de14e64d 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala @@ -288,7 +288,8 @@ object InteractiveSubmission { _ <- EitherT.cond[FutureUnlessShutdown]( validSignaturesSet.sizeIs >= authInfo.threshold.unwrap, (), - s"Received ${validSignatures.size} valid signatures (${invalidSignatures.size} invalid), but expected at least ${authInfo.threshold} valid for $party", + s"Received ${validSignatures.size} valid signatures (${invalidSignatures.size} invalid), but expected at least ${authInfo.threshold} valid for $party. " + + s"Transaction hash to be signed: ${hash.toHexString}. Ensure the correct transaction hash is signed with the correct key(s).", ) } yield { logger.debug( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala index 82c327a834..135c4f4457 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala @@ -200,7 +200,8 @@ trait SigningPrivateStoreOps extends SigningPrivateOps { * The fingerprint of the key that was used to generate the signature. * @param signingAlgorithmSpec * The signing algorithm scheme used to generate this signature. It is optional to ensure - * backwards compatibility. + * backwards compatibility with previous version where the spec is not set, but the general code + * path expects you to provide it. * @param signatureDelegation * An additional "optional" signature that includes a session key and a delegation/authorization * through a signature created by a long-term key. This allows the session key to be used for @@ -966,11 +967,17 @@ object SigningKeySpec { keySpecP: v30.SigningKeySpec, keySchemeP: v30.SigningKeyScheme, ): ParsingResult[SigningKeySpec] = - SigningKeySpec.fromProtoEnum("key_spec", keySpecP).leftFlatMap { - case ProtoDeserializationError.FieldNotSet(_) => - SigningKeySpec.fromProtoEnumSigningKeyScheme("scheme", keySchemeP) - case err => Left(err) - } + // return better error if neither field is set + if ( + keySpecP == v30.SigningKeySpec.SIGNING_KEY_SPEC_UNSPECIFIED && keySchemeP.isSigningKeySchemeUnspecified + ) + Left(ProtoDeserializationError.FieldNotSet("key_spec and scheme")) + else + SigningKeySpec.fromProtoEnum("key_spec", keySpecP).leftFlatMap { + case ProtoDeserializationError.FieldNotSet(_) => + SigningKeySpec.fromProtoEnumSigningKeyScheme("scheme", keySchemeP) + case err => Left(err) + } /** Converts an old SigningKeyScheme enum to the new key scheme, ensuring backward compatibility * with existing data. @@ -1399,7 +1406,7 @@ object SigningPublicKey * format. If the [[SigningKeySpec]] is EC-based, it also validates that the public key lies on * the expected curve. */ - private[crypto] def create( + def create( format: CryptoKeyFormat, key: ByteString, keySpec: SigningKeySpec, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiParticipantProvider.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiParticipantProvider.scala index df63058ff8..d9be5770eb 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiParticipantProvider.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiParticipantProvider.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.crypto import cats.data.EitherT import cats.syntax.either.* -import cats.syntax.flatMap.* import cats.syntax.functor.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty @@ -20,10 +19,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.protocol.{ - DynamicSynchronizerParameters, - StaticSynchronizerParameters, -} +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.serialization.DeserializationError import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex @@ -36,7 +32,7 @@ import com.digitalasset.canton.topology.client.{ import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.LoggerUtil -import com.digitalasset.canton.version.{HasToByteString, ProtocolVersion} +import com.digitalasset.canton.version.HasToByteString import com.google.protobuf.ByteString import org.slf4j.event.Level @@ -185,48 +181,33 @@ object SyncCryptoClient { client: SyncCryptoClient[SyncCryptoApi], desiredTimestamp: CantonTimestamp, previousTimestampO: Option[CantonTimestamp], - protocolVersion: ProtocolVersion, warnIfApproximate: Boolean = true, )(implicit - executionContext: ExecutionContext, - loggingContext: ErrorLoggingContext, + loggingContext: ErrorLoggingContext ): FutureUnlessShutdown[SyncCryptoApi] = { val traceContext: TraceContext = loggingContext.traceContext - def lookupDynamicSynchronizerParameters( - timestamp: CantonTimestamp - ): FutureUnlessShutdown[DynamicSynchronizerParameters] = - for { - snapshot <- client.awaitSnapshotUSSupervised( - s"searching for topology change delay at $timestamp for desired timestamp $desiredTimestamp and known until ${client.topologyKnownUntilTimestamp}" - )(timestamp) - synchronizerParams <- - snapshot.ipsSnapshot.findDynamicSynchronizerParametersOrDefault( - protocolVersion = protocolVersion, - warnOnUsingDefault = false, - )(traceContext) - } yield synchronizerParams - - computeTimestampForValidation( + val timestamp = computeTimestampForValidation( desiredTimestamp, previousTimestampO, client.topologyKnownUntilTimestamp, client.approximateTimestamp, warnIfApproximate, - )(lookupDynamicSynchronizerParameters).flatMap { timestamp => - if (timestamp <= client.topologyKnownUntilTimestamp) { - loggingContext.debug( - s"Getting topology snapshot at $timestamp; desired=$desiredTimestamp, known until ${client.topologyKnownUntilTimestamp}; previous $previousTimestampO" - ) - client.hypotheticalSnapshot(timestamp, desiredTimestamp)(traceContext) - } else { - loggingContext.debug( - s"Waiting for topology snapshot at $timestamp; desired=$desiredTimestamp, known until ${client.topologyKnownUntilTimestamp}; previous $previousTimestampO" - ) - client.awaitSnapshotUSSupervised( - s"requesting topology snapshot at $timestamp; desired=$desiredTimestamp, previousO=$previousTimestampO, known until=${client.topologyKnownUntilTimestamp}" - )(timestamp) - } + client.staticSynchronizerParameters, + ) + + if (timestamp <= client.topologyKnownUntilTimestamp) { + loggingContext.debug( + s"Getting topology snapshot at $timestamp; desired=$desiredTimestamp, known until ${client.topologyKnownUntilTimestamp}; previous $previousTimestampO" + ) + client.hypotheticalSnapshot(timestamp, desiredTimestamp)(traceContext) + } else { + loggingContext.debug( + s"Waiting for topology snapshot at $timestamp; desired=$desiredTimestamp, known until ${client.topologyKnownUntilTimestamp}; previous $previousTimestampO" + ) + client.awaitSnapshotUSSupervised( + s"requesting topology snapshot at $timestamp; desired=$desiredTimestamp, previousO=$previousTimestampO, known until=${client.topologyKnownUntilTimestamp}" + )(timestamp) } } @@ -236,16 +217,12 @@ object SyncCryptoClient { topologyKnownUntilTimestamp: CantonTimestamp, currentApproximateTimestamp: CantonTimestamp, warnIfApproximate: Boolean, - )( - synchronizerParamsLookup: CantonTimestamp => FutureUnlessShutdown[ - DynamicSynchronizerParameters - ] + staticSynchronizerParameters: StaticSynchronizerParameters, )(implicit - loggingContext: ErrorLoggingContext, - executionContext: ExecutionContext, - ): FutureUnlessShutdown[CantonTimestamp] = + loggingContext: ErrorLoggingContext + ): CantonTimestamp = if (desiredTimestamp <= topologyKnownUntilTimestamp) { - FutureUnlessShutdown.pure(desiredTimestamp) + desiredTimestamp } else { previousTimestampO match { case None => @@ -253,23 +230,22 @@ object SyncCryptoClient { if (warnIfApproximate) Level.WARN else Level.INFO, s"Using approximate topology snapshot at $currentApproximateTimestamp for desired timestamp $desiredTimestamp", ) - FutureUnlessShutdown.pure(currentApproximateTimestamp) + currentApproximateTimestamp case Some(previousTimestamp) => if (desiredTimestamp <= previousTimestamp.immediateSuccessor) - FutureUnlessShutdown.pure(desiredTimestamp) + desiredTimestamp else { import scala.Ordered.orderingToOrdered - synchronizerParamsLookup(previousTimestamp).map { previousSynchronizerParams => - val delay = previousSynchronizerParams.topologyChangeDelay - val diff = desiredTimestamp - previousTimestamp - val snapshotTimestamp = - if (diff > delay.unwrap) { - // `desiredTimestamp` is larger than `previousTimestamp` plus the `delay`, - // so timestamps cannot overflow here - checked(previousTimestamp.plus(delay.unwrap).immediateSuccessor) - } else desiredTimestamp - snapshotTimestamp - } + + val delay = staticSynchronizerParameters.topologyChangeDelay + val diff = desiredTimestamp - previousTimestamp + val snapshotTimestamp = + if (diff > delay.unwrap) { + // `desiredTimestamp` is larger than `previousTimestamp` plus the `delay`, + // so timestamps cannot overflow here + checked(previousTimestamp.plus(delay.unwrap).immediateSuccessor) + } else desiredTimestamp + snapshotTimestamp } } } @@ -280,6 +256,7 @@ object SyncCryptoClient { */ class SynchronizerCryptoClient private ( val member: Member, + val staticSynchronizerParameters: StaticSynchronizerParameters, val psid: PhysicalSynchronizerId, val ips: SynchronizerTopologyClient, val crypto: SynchronizerCrypto, @@ -408,6 +385,7 @@ object SynchronizerCryptoClient { ) new SynchronizerCryptoClient( member, + staticSynchronizerParameters, PhysicalSynchronizerId(synchronizerId, staticSynchronizerParameters), ips, synchronizerCrypto, @@ -457,6 +435,7 @@ object SynchronizerCryptoClient { ) new SynchronizerCryptoClient( member, + staticSynchronizerParameters, synchronizerId, ips, synchronizerCrypto, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/CommunityKmsFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/CommunityKmsFactory.scala deleted file mode 100644 index f686110bcf..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/CommunityKmsFactory.scala +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.kms - -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.{ - EnterpriseOnlyCantonConfigValidation, - KmsConfig, - ProcessingTimeout, -} -import com.digitalasset.canton.crypto.kms.driver.v1.DriverKms -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.tracing.TracerProvider - -import scala.concurrent.ExecutionContext - -/** Factory to create a KMS client for the community edition. */ -object CommunityKmsFactory extends KmsFactory { - - def create( - config: KmsConfig, - timeouts: ProcessingTimeout, - futureSupervisor: FutureSupervisor, - tracerProvider: TracerProvider, - clock: Clock, - loggerFactory: NamedLoggerFactory, - executionContext: ExecutionContext, - ): Either[KmsError, Kms] = - config match { - case driverKmsConfig: KmsConfig.Driver => - DriverKms.create( - driverKmsConfig, - futureSupervisor, - clock, - timeouts, - loggerFactory, - executionContext, - ) - case other: EnterpriseOnlyCantonConfigValidation => - throw new IllegalArgumentException( - s"Unsupported KMS configuration in community edition: $other" - ) - } -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/KmsFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/KmsFactory.scala index f5e4234ab6..01e20a5c1f 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/KmsFactory.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/KmsFactory.scala @@ -5,14 +5,16 @@ package com.digitalasset.canton.crypto.kms import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.kms.aws.AwsKms +import com.digitalasset.canton.crypto.kms.driver.v1.DriverKms +import com.digitalasset.canton.crypto.kms.gcp.GcpKms import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TracerProvider import scala.concurrent.ExecutionContext -/** Factory to create a KMS. */ -trait KmsFactory { +object KmsFactory { def create( config: KmsConfig, @@ -22,5 +24,21 @@ trait KmsFactory { clock: Clock, loggerFactory: NamedLoggerFactory, executionContext: ExecutionContext, - ): Either[KmsError, Kms] + ): Either[KmsError, Kms] = + config match { + case awsKmsConfig: KmsConfig.Aws => + AwsKms.create(awsKmsConfig, timeouts, loggerFactory, tracerProvider) + case gcpKmsConfig: KmsConfig.Gcp => + GcpKms.create(gcpKmsConfig, timeouts, loggerFactory) + case driverKmsConfig: KmsConfig.Driver => + DriverKms.create( + driverKmsConfig, + futureSupervisor, + clock, + timeouts, + loggerFactory, + executionContext, + ) + } + } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/AwsKms.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/AwsKms.scala new file mode 100644 index 0000000000..4e354425e5 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/AwsKms.scala @@ -0,0 +1,662 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.aws + +import cats.data.EitherT +import cats.syntax.bifunctor.* +import cats.syntax.either.* +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.kms.KmsError.* +import com.digitalasset.canton.crypto.kms.aws.AwsKms.* +import com.digitalasset.canton.crypto.kms.aws.audit.AwsRequestResponseLogger +import com.digitalasset.canton.crypto.kms.aws.audit.AwsRequestResponseLogger.traceContextExecutionAttribute +import com.digitalasset.canton.crypto.kms.aws.tracing.AwsTraceContextInterceptor +import com.digitalasset.canton.crypto.kms.{ + Kms, + KmsEncryptionPublicKey, + KmsError, + KmsKeyId, + KmsSigningPublicKey, +} +import com.digitalasset.canton.crypto.{EncryptionAlgorithmSpec, SigningAlgorithmSpec, *} +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext, TracerProvider} +import com.digitalasset.canton.util.* +import com.google.api.gax.rpc.ResourceExhaustedException +import com.google.protobuf.ByteString +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider +import software.amazon.awssdk.awscore.{AwsRequest, AwsRequestOverrideConfiguration} +import software.amazon.awssdk.core.SdkBytes +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration +import software.amazon.awssdk.core.exception.SdkClientException +import software.amazon.awssdk.http.SdkHttpConfigurationOption +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.services.kms.model.* +import software.amazon.awssdk.services.kms.{KmsAsyncClient, model as aws} +import software.amazon.awssdk.utils.AttributeMap + +import java.net.URI +import java.util.concurrent.CompletionException +import scala.concurrent.ExecutionContext +import scala.jdk.FutureConverters.* + +/** Stands for Amazon Web Services - Key Management Service and is an internal KMS implementation + * that wraps the necessary cryptographic functions from the AWS SDK. + */ +class AwsKms( + override val config: KmsConfig.Aws, + private val kmsClient: KmsAsyncClient, + override val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +) extends Kms + with NamedLogging { + + override type Config = KmsConfig.Aws + + override def name: String = "aws-kms" + + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + + private val errorMessagesToRetry = + Set( + "Unable to execute HTTP request: The connection was closed during the request.", + "Unable to execute HTTP request: connection timed out", + ) + + private def errorHandler( + err: Throwable, + kmsErrorGen: (String, Boolean) => KmsError, + ): KmsError = + err match { + case err: CompletionException => + Option(err.getCause) match { + case Some(kmsErr: KmsException) if kmsErr.retryable() => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + // we look for network failure errors to retry on + case Some(sdkErr: SdkClientException) + if errorMessagesToRetry.exists(sdkErr.getMessage.contains(_)) || sdkErr.retryable() => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + // we retry on resource exceptions as well + case Some(resourceException: ResourceExhaustedException) => + logger.debug( + s"ResourceExhaustedException with retry: ${resourceException.isRetryable}" + )(TraceContext.empty) + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + case _ => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), false) + } + case err => kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + } + + /** Creates an AWS KMS key based on a series of specifications and returns its key identifier. + * + * @param keySpec + * specifies the type of KMS key to create (e.g. SYMMETRIC_DEFAULT (AES-256-CBC) or RSA_2048). + * @param keyUsage + * the cryptographic operations for which you can use the KMS key (e.g. signing or encryption). + * @param name + * optional name for the KMS key that is currently mapped to the description field. + * @return + * a key id or an error if it fails to create a key + */ + private def createKey( + keySpec: aws.KeySpec, + keyUsage: aws.KeyUsageType, + name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keyRequest <- + Either + .catchOnly[aws.KmsException] { + aws.CreateKeyRequest.builder + .multiRegion(config.multiRegionKey) + .keySpec(keySpec) + .keyUsage(keyUsage) + .tags(Tag.builder().tagKey("CreatedBy").tagValue("Canton").build()) + .description(name.map(_.unwrap).getOrElse("")) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsCreateKeyRequestError(ErrorUtil.messageWithStacktrace(err))) + response <- EitherTUtil.fromFuture[KmsError, aws.CreateKeyResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.createKey(keyRequest).asScala), + err => errorHandler(err, (errStr, retryable) => KmsCreateKeyError(errStr, retryable)), + ) + kmsKeyId <- EitherT + .fromEither[FutureUnlessShutdown](String300.create(response.keyMetadata().keyId())) + .map(KmsKeyId.apply) + .leftMap[KmsError](err => KmsCreateKeyError(err)) + } yield kmsKeyId + + override protected def generateSigningKeyPairInternal( + signingKeySpec: SigningKeySpec, + name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + awsKeySpec <- convertToAwsKeySpec(signingKeySpec) + .leftMap(err => KmsCreateKeyError(err)) + .toEitherT[FutureUnlessShutdown] + kmsKeyId <- createKey( + awsKeySpec, + aws.KeyUsageType.SIGN_VERIFY, + name, + ) + } yield kmsKeyId + + override protected def generateSymmetricEncryptionKeyInternal( + name: Option[KeyName] + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + createKey( + aws.KeySpec.SYMMETRIC_DEFAULT, + aws.KeyUsageType.ENCRYPT_DECRYPT, + name, + ) + + override protected def generateAsymmetricEncryptionKeyPairInternal( + encryptionKeySpec: EncryptionKeySpec, + name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keySpec <- convertToAwsAsymmetricKeyEncryptionSpec(encryptionKeySpec) + .leftMap(err => KmsCreateKeyError(err)) + .toEitherT[FutureUnlessShutdown] + kmsKeyId <- createKey( + keySpec, + aws.KeyUsageType.ENCRYPT_DECRYPT, + name, + ) + } yield kmsKeyId + + private def getPublicKeyInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, aws.GetPublicKeyResponse] = + for { + getPublicKeyRequest <- + Either + .catchOnly[aws.KmsException] { + aws.GetPublicKeyRequest.builder + .keyId(keyId.unwrap) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsGetPublicKeyRequestError(keyId, ErrorUtil.messageWithStacktrace(err))) + pkResponse <- EitherTUtil.fromFuture[KmsError, aws.GetPublicKeyResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.getPublicKey(getPublicKeyRequest).asScala), + err => + errorHandler(err, (errStr, retryable) => KmsGetPublicKeyError(keyId, errStr, retryable)), + ) + } yield pkResponse + + override protected def getPublicSigningKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsSigningPublicKey] = + getPublicKeyInternal(keyId).flatMap { pkResponse => + pkResponse.keyUsage() match { + case aws.KeyUsageType.SIGN_VERIFY => + val pubKeyRaw = ByteString.copyFrom(pkResponse.publicKey().asByteBuffer()) + for { + keySpec <- convertFromAwsSigningKeySpec(pkResponse.keySpec()) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKey <- KmsSigningPublicKey + .create(pubKeyRaw, keySpec) + .leftMap[KmsError](err => KmsGetPublicKeyError(keyId, err.toString)) + .toEitherT[FutureUnlessShutdown] + } yield pubKey + case _ => + EitherT.leftT[FutureUnlessShutdown, KmsSigningPublicKey]( + KmsGetPublicKeyError( + keyId, + s"The selected key is defined for ${pkResponse.keyUsage()} and not for signing", + ) + ) + } + } + + override protected def getPublicEncryptionKeyInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsEncryptionPublicKey] = + getPublicKeyInternal(keyId).flatMap { pkResponse => + pkResponse.keyUsage() match { + case aws.KeyUsageType.ENCRYPT_DECRYPT => + val pubKeyRaw = ByteString.copyFrom(pkResponse.publicKey().asByteBuffer()) + for { + keySpec <- convertFromAwsAsymmetricKeyEncryptionSpec(pkResponse.keySpec()) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKey <- KmsEncryptionPublicKey + .create(pubKeyRaw, keySpec) + .leftMap[KmsError](err => KmsGetPublicKeyError(keyId, err)) + .toEitherT[FutureUnlessShutdown] + } yield pubKey + case _ => + EitherT.leftT[FutureUnlessShutdown, KmsEncryptionPublicKey]( + KmsGetPublicKeyError( + keyId, + s"The selected key is defined for ${pkResponse.keyUsage()} and not for encryption", + ) + ) + } + } + + private def convertToAwsKeySpec( + signingKeySpec: SigningKeySpec + ): Either[String, aws.KeySpec] = + signingKeySpec match { + case SigningKeySpec.EcCurve25519 => + Left(s"Unsupported signing key type: ${signingKeySpec.name}") + case SigningKeySpec.EcP256 => + Right(aws.KeySpec.ECC_NIST_P256) + case SigningKeySpec.EcP384 => + Right(aws.KeySpec.ECC_NIST_P384) + case SigningKeySpec.EcSecp256k1 => + Right(aws.KeySpec.ECC_SECG_P256_K1) + } + + private def convertToAwsAlgoSpec( + signingAlgorithmSpec: SigningAlgorithmSpec + ): Either[String, aws.SigningAlgorithmSpec] = + signingAlgorithmSpec match { + case SigningAlgorithmSpec.Ed25519 => + Left(s"Unsupported signing algorithm type: ${signingAlgorithmSpec.name}") + case SigningAlgorithmSpec.EcDsaSha256 => + Right(aws.SigningAlgorithmSpec.ECDSA_SHA_256) + case SigningAlgorithmSpec.EcDsaSha384 => + Right(aws.SigningAlgorithmSpec.ECDSA_SHA_384) + } + + private def convertToAwsAsymmetricKeyEncryptionSpec( + encryptionKeySpec: EncryptionKeySpec + ): Either[String, aws.KeySpec] = + encryptionKeySpec match { + case EncryptionKeySpec.EcP256 => + Left(s"Unsupported encryption key type: ${encryptionKeySpec.name}") + case EncryptionKeySpec.Rsa2048 => + Right(aws.KeySpec.RSA_2048) + } + + private def convertToAwsAsymmetricEncryptionAlgorithmSpec( + encryptionAlgorithmSpec: EncryptionAlgorithmSpec + ): Either[String, aws.EncryptionAlgorithmSpec] = + encryptionAlgorithmSpec match { + case EncryptionAlgorithmSpec.EciesHkdfHmacSha256Aes128Cbc => + Left(s"Unsupported encryption key type: ${encryptionAlgorithmSpec.name}") + case EncryptionAlgorithmSpec.RsaOaepSha256 => + Right(aws.EncryptionAlgorithmSpec.RSAES_OAEP_SHA_256) + } + + private def convertFromAwsSigningKeySpec(keySpec: aws.KeySpec): Either[String, SigningKeySpec] = + keySpec match { + case aws.KeySpec.ECC_NIST_P256 => Right(SigningKeySpec.EcP256) + case aws.KeySpec.ECC_NIST_P384 => Right(SigningKeySpec.EcP384) + case aws.KeySpec.ECC_SECG_P256_K1 => Right(SigningKeySpec.EcSecp256k1) + case _ => Left(s"Unsupported signing key type: ${keySpec.toString}") + } + + private def convertFromAwsAsymmetricKeyEncryptionSpec( + keySpec: aws.KeySpec + ): Either[String, EncryptionKeySpec] = + keySpec match { + case aws.KeySpec.RSA_2048 => Right(EncryptionKeySpec.Rsa2048) + case _ => Left(s"Unsupported encryption key type: ${keySpec.toString}") + } + + private def getMetadataForActiveKeys( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KeyMetadata] = + retrieveKeyMetadata(keyId) + .leftMap[KmsError] { + case err: KmsRetrieveKeyMetadataError if !err.retryable => + KmsCannotFindKeyError(keyId, err.show) + case err => err + } + .flatMap { keyMetadata => + EitherTUtil + .condUnitET[FutureUnlessShutdown]( + keyMetadata.enabled() == true, + KmsKeyDisabledError( + keyId, + s"key is disabled", + ), + ) + .map(_ => keyMetadata) + .leftWiden[KmsError] + } + + override protected def keyExistsAndIsActiveInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = getMetadataForActiveKeys(keyId).map(_ => ()) + + private def encrypt( + keyId: KmsKeyId, + data: ByteString, + encryptionAlgorithm: aws.EncryptionAlgorithmSpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = + for { + encryptRequest <- + Either + .catchOnly[aws.KmsException] { + aws.EncryptRequest.builder + .keyId(keyId.unwrap) + .encryptionAlgorithm(encryptionAlgorithm) + .plaintext(SdkBytes.fromByteArray(data.toByteArray)) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsEncryptRequestError(keyId, ErrorUtil.messageWithStacktrace(err))) + response <- EitherTUtil.fromFuture[KmsError, aws.EncryptResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.encrypt(encryptRequest).asScala), + err => errorHandler(err, (errStr, retryable) => KmsEncryptError(keyId, errStr, retryable)), + ) + encryptedData = response.ciphertextBlob + } yield ByteString.copyFrom(encryptedData.asByteBuffer()) + + override protected def encryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString4096, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString6144] = + encrypt(keyId, data.unwrap, aws.EncryptionAlgorithmSpec.SYMMETRIC_DEFAULT).flatMap(dataEnc => + ByteString6144 + .create(dataEnc) + .toEitherT[FutureUnlessShutdown] + .leftMap(err => + KmsError + .KmsEncryptError(keyId, s"generated ciphertext does not adhere to bound: $err)") + ) + ) + + private def decrypt( + keyId: KmsKeyId, + data: ByteString, + encryptionAlgorithm: aws.EncryptionAlgorithmSpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = + for { + decryptRequest <- + Either + .catchOnly[aws.KmsException] { + aws.DecryptRequest.builder + .ciphertextBlob(SdkBytes.fromByteArray(data.toByteArray)) + .keyId(keyId.unwrap) + .encryptionAlgorithm(encryptionAlgorithm) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsDecryptRequestError(keyId, ErrorUtil.messageWithStacktrace(err))) + response <- EitherTUtil.fromFuture[KmsError, aws.DecryptResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.decrypt(decryptRequest).asScala), + err => errorHandler(err, (errStr, retryable) => KmsDecryptError(keyId, errStr, retryable)), + ) + decryptedData = response.plaintext + } yield ByteString.copyFrom(decryptedData.asByteBuffer()) + + override protected def decryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString6144, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString4096] = + decrypt(keyId, data.unwrap, aws.EncryptionAlgorithmSpec.SYMMETRIC_DEFAULT).flatMap(dataPlain => + ByteString4096 + .create(dataPlain) + .toEitherT[FutureUnlessShutdown] + .leftMap(err => + KmsError.KmsDecryptError(keyId, s"plaintext does not adhere to bound: $err)") + ) + ) + + override protected def decryptAsymmetricInternal( + keyId: KmsKeyId, + data: ByteString256, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString190] = + for { + encryptionAlgorithm <- convertToAwsAsymmetricEncryptionAlgorithmSpec(encryptionAlgorithmSpec) + .leftMap(KmsDecryptError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + decryptedData <- decrypt(keyId, data.unwrap, encryptionAlgorithm) + .flatMap( + ByteString190 + .create(_) + .toEitherT[FutureUnlessShutdown] + .leftMap[KmsError](err => + KmsError.KmsDecryptError(keyId, s"plaintext does not adhere to bound: $err)") + ) + ) + } yield decryptedData + + override protected def signInternal( + keyId: KmsKeyId, + data: ByteString4096, + signingAlgorithmSpec: SigningAlgorithmSpec, + signingKeySpec: SigningKeySpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = + for { + signingAlgorithm <- convertToAwsAlgoSpec(signingAlgorithmSpec) + .leftMap(KmsSignRequestError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + signRequest <- + Either + .catchOnly[aws.KmsException] { + aws.SignRequest.builder + .messageType(MessageType.RAW) + .message(SdkBytes.fromByteArray(data.unwrap.toByteArray)) + .signingAlgorithm(signingAlgorithm) + .keyId(keyId.unwrap) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsSignRequestError(keyId, ErrorUtil.messageWithStacktrace(err))) + response <- EitherTUtil.fromFuture[KmsError, aws.SignResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.sign(signRequest).asScala), + err => errorHandler(err, (errStr, retryable) => KmsSignError(keyId, errStr, retryable)), + ) + } yield ByteString.copyFrom(response.signature().asByteBuffer()) + + override protected def deleteKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = + for { + scheduleKeyDeletionRequest <- + Either + .catchOnly[aws.KmsException] { + aws.ScheduleKeyDeletionRequest + .builder() + .keyId(keyId.unwrap) + // 7 days is the minimum waiting time for key deletion + .pendingWindowInDays(7) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => KmsDeleteKeyRequestError(keyId, ErrorUtil.messageWithStacktrace(err))) + _ <- EitherTUtil.fromFuture[KmsError, aws.ScheduleKeyDeletionResponse]( + FutureUnlessShutdown.outcomeF( + kmsClient.scheduleKeyDeletion(scheduleKeyDeletionRequest).asScala + ), + err => errorHandler(err, (errStr, retryable) => KmsDeleteKeyError(keyId, errStr, retryable)), + ) + } yield () + + private def retrieveKeyMetadata( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, aws.KeyMetadata] = + for { + describeRequest <- + Either + .catchOnly[aws.KmsException] { + aws.DescribeKeyRequest + .builder() + .keyId(keyId.unwrap) + .withTraceContext(_.overrideConfiguration) + .build + } + .toEitherT[FutureUnlessShutdown] + .leftMap(err => + KmsRetrieveKeyMetadataRequestError(keyId, ErrorUtil.messageWithStacktrace(err)) + ) + response <- EitherTUtil.fromFuture[KmsError, aws.DescribeKeyResponse]( + FutureUnlessShutdown.outcomeF(kmsClient.describeKey(describeRequest).asScala), + err => + errorHandler( + err, + (errStr, retryable) => KmsRetrieveKeyMetadataError(keyId, errStr, retryable), + ), + ) + } yield response.keyMetadata() + + override def onClosed(): Unit = + LifeCycle.close(kmsClient)(logger) + +} + +object AwsKms { + + /** Extension class on AWS request builders with a method to be used to set the Canton trace + * context as an attribute that will be accessible by the request/response logger. See + * [[audit.AwsRequestResponseLogger]] for more details. + */ + implicit class ExtendedAwsRequestBuilder[A <: AwsRequest.Builder](val requestBuilder: A) + extends AnyVal { + + /** Should be called on request builders to inject the canton trace context to the request + * logger. Because of typing constraints of the SDK, the `overrideConfiguration` method must be + * used on the specific request builder type and cannot be abstracted. This method usage looks + * like: + * + * aws.DescribeKeyRequest .builder() .keyId(keyId.unwrap) + * .withTraceContext(_.overrideConfiguration) .build + */ + def withTraceContext( + overrideMethod: A => AwsRequestOverrideConfiguration => A + )(implicit tc: TraceContext): A = overrideMethod(requestBuilder)( + AwsRequestOverrideConfiguration + .builder() + .putExecutionAttribute(traceContextExecutionAttribute, tc) + .build() + ) + } + + def create( + config: KmsConfig.Aws, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + tracerProvider: TracerProvider = NoReportingTracerProvider, + ): Either[KmsError, AwsKms] = { + val kmsAsyncClientDefault = { + val builder = KmsAsyncClient + .builder() + .region(Region.of(config.region)) + /* We can access AWS in multiple ways, for example: (1) using the AWS security token service (sts) + profile (2) setting up the following environment variables: AWS_ACCESS_KEY_ID, + AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN */ + .credentialsProvider(DefaultCredentialsProvider.create()) + + config.endpointOverride.map(URI.create).fold(builder)(builder.endpointOverride) + } + + val kmsAsyncClientBuilder = if (config.disableSslVerification) { + loggerFactory + .getLogger(getClass) + .info("Disabling SSL verification") + val httpClient = NettyNioAsyncHttpClient + .builder() + .buildWithDefaults( + AttributeMap + .builder() + .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, Boolean.box(true)) + .build() + ) + kmsAsyncClientDefault + // this disables SSL certificate checks in the underlying http client + .httpClient(httpClient) + } else kmsAsyncClientDefault + + for { + kms <- + Either + .catchOnly[aws.KmsException] { + val builder = + if (config.auditLogging) + kmsAsyncClientBuilder + .overrideConfiguration( + ClientOverrideConfiguration + .builder() + .addExecutionInterceptor( + new AwsTraceContextInterceptor(loggerFactory, tracerProvider) + ) + .addExecutionInterceptor(new AwsRequestResponseLogger(loggerFactory)) + .build() + ) + else + kmsAsyncClientBuilder + + new AwsKms( + config, + builder + .region(Region.of(config.region)) + /* We can access AWS in multiple ways, for example: (1) using the AWS security token service (sts) + profile (2) setting up the following environment variables: AWS_ACCESS_KEY_ID, + AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN */ + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(), + timeouts, + loggerFactory, + ) + } + .leftMap[KmsError](err => KmsCreateClientError(ErrorUtil.messageWithStacktrace(err))) + } yield kms + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/audit/AwsRequestResponseLogger.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/audit/AwsRequestResponseLogger.scala new file mode 100644 index 0000000000..559660915e --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/audit/AwsRequestResponseLogger.scala @@ -0,0 +1,186 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.aws.audit + +import cats.Show +import com.digitalasset.canton.crypto.kms.audit.KmsRequestResponseLogger +import com.digitalasset.canton.crypto.kms.aws.audit.AwsRequestResponseLogger.{ + sdkRequestPretty, + sdkResponsePretty, +} +import com.digitalasset.canton.crypto.kms.aws.tracing.AwsTraceContextInterceptor.{ + otelSpanExecutionAttribute, + withTraceContext, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil +import software.amazon.awssdk.awscore.AwsResponse +import software.amazon.awssdk.core.interceptor.{ + Context, + ExecutionAttribute, + ExecutionAttributes, + ExecutionInterceptor, +} +import software.amazon.awssdk.core.{SdkRequest, SdkResponse} +import software.amazon.awssdk.services.kms.model.* + +import scala.compat.java8.OptionConverters.RichOptionalGeneric + +/** AWS SDK execution interceptor that logs all requests and responses. Retrieves the canton trace + * context via execution attributes. + * + * Logs every request before it's being sent, and every response after it's been deserialized, + * along with the request that triggered it, at INFO level. Every failed request will also be + * logged at WARN level, with the optional response if available. + */ +class AwsRequestResponseLogger(override val loggerFactory: NamedLoggerFactory) + extends NamedLogging + with ExecutionInterceptor + with ShowUtil { + + private def getSpanId(executionAttributes: ExecutionAttributes): String = + Option(executionAttributes.getAttribute(otelSpanExecutionAttribute)) + .map(_.getSpanContext.getSpanId) + .getOrElse("no-id") + + override def beforeTransmission( + context: Context.BeforeTransmission, + executionAttributes: ExecutionAttributes, + ): Unit = withTraceContext(executionAttributes, logger) { implicit tc => + logger.info( + show"Sending request [${getSpanId(executionAttributes)}]: ${context + .request()} to ${context.httpRequest().getUri.show}" + ) + } + + override def afterUnmarshalling( + context: Context.AfterUnmarshalling, + executionAttributes: ExecutionAttributes, + ): Unit = withTraceContext(executionAttributes, logger) { implicit tc => + logger.info( + show"Received response [${getSpanId(executionAttributes)}]: ${context.response()}" + ) + } + + override def onExecutionFailure( + context: Context.FailedExecution, + executionAttributes: ExecutionAttributes, + ): Unit = withTraceContext(executionAttributes, logger) { implicit tc => + logger.warn( + s"Request [${getSpanId(executionAttributes)}] failed.${context.response().asScala.map(r => s" Response: ${r.show}").getOrElse("")}", + context.exception(), + ) + } +} + +object AwsRequestResponseLogger extends KmsRequestResponseLogger with ShowUtil { + private[aws] val traceContextExecutionAttribute = + new ExecutionAttribute[TraceContext]("canton-trace-context") + + private def awsRequestLog(response: AwsResponse) = + s"[Aws-Id: ${response.responseMetadata().requestId()}]" + + private val createKeyRequestShow: Show[CreateKeyRequest] = { request => + createKeyRequestMsg(request.keyUsageAsString(), request.keySpecAsString()) + } + + private val createKeyResponseShow: Show[CreateKeyResponse] = { response => + s"${awsRequestLog(response)} - " + + createKeyResponseMsg( + response.keyMetadata().keyId(), + response.keyMetadata().keyUsageAsString(), + response.keyMetadata().keySpecAsString(), + ) + } + + private val getPublicKeyRequestShow: Show[GetPublicKeyRequest] = { request => + getPublicKeyRequestMsg(request.keyId()) + } + + private val getPublicKeyResponseShow: Show[GetPublicKeyResponse] = { response => + s"${awsRequestLog(response)} - " + + getPublicKeyResponseMsg(response.keyId(), response.keySpecAsString()) + } + + private val retrieveKeyMetadataRequestShow: Show[DescribeKeyRequest] = { request => + retrieveKeyMetadataRequestMsg(request.keyId()) + } + + private val retrieveKeyMetadataResponseShow: Show[DescribeKeyResponse] = { response => + s"${awsRequestLog(response)} - " + + retrieveKeyMetadataResponseMsg( + response.keyMetadata().keyId(), + response.keyMetadata().keySpecAsString(), + response.keyMetadata().keyStateAsString(), + ) + } + + // Pretty instances for encrypt/decrypt request/response + private val encryptRequestShow: Show[EncryptRequest] = { request => + encryptRequestMsg(request.keyId, request.encryptionAlgorithmAsString) + } + + private val encryptResponseShow: Show[EncryptResponse] = { response => + s"${awsRequestLog(response)} - " + + encryptResponseMsg(response.keyId, response.encryptionAlgorithmAsString) + } + + private val decryptRequestShow: Show[DecryptRequest] = { request => + decryptRequestMsg(request.keyId, request.encryptionAlgorithmAsString) + } + + private val decryptResponseShow: Show[DecryptResponse] = { response => + s"${awsRequestLog(response)} - " + + decryptResponseMsg(response.keyId, response.encryptionAlgorithmAsString) + } + + private val signRequestShow: Show[SignRequest] = { request => + signRequestMsg(request.keyId, request.messageTypeAsString, request.signingAlgorithmAsString) + } + + private val signResponseShow: Show[SignResponse] = { response => + s"${awsRequestLog(response)} - " + + signResponseMsg(response.keyId, response.signingAlgorithmAsString) + } + + private val deleteKeyRequestShow: Show[ScheduleKeyDeletionRequest] = { request => + deleteKeyRequestMsg(request.keyId) + } + + private val deleteKeyResponseShow: Show[ScheduleKeyDeletionResponse] = { response => + s"${awsRequestLog(response)} - " + + deleteKeyResponseMsg(response.keyId) + } + + implicit val sdkRequestPretty: Show[SdkRequest] = { + case createKeyRequest: CreateKeyRequest => createKeyRequestShow.show(createKeyRequest) + case getPublicKeyRequest: GetPublicKeyRequest => + getPublicKeyRequestShow.show(getPublicKeyRequest) + case describeKeyRequest: DescribeKeyRequest => + retrieveKeyMetadataRequestShow.show(describeKeyRequest) + case encryptRequest: EncryptRequest => encryptRequestShow.show(encryptRequest) + case decryptRequest: DecryptRequest => decryptRequestShow.show(decryptRequest) + case signRequest: SignRequest => signRequestShow.show(signRequest) + case scheduleKeyDeletionRequest: ScheduleKeyDeletionRequest => + deleteKeyRequestShow.show(scheduleKeyDeletionRequest) + case other => other.toString + } + + implicit val sdkResponsePretty: Show[SdkResponse] = { + case createKeyResponse: CreateKeyResponse => createKeyResponseShow.show(createKeyResponse) + case getPublicKeyResponse: GetPublicKeyResponse => + getPublicKeyResponseShow.show(getPublicKeyResponse) + case describeKeyResponse: DescribeKeyResponse => + retrieveKeyMetadataResponseShow.show(describeKeyResponse) + case encryptResponse: EncryptResponse => encryptResponseShow.show(encryptResponse) + case decryptResponse: DecryptResponse => decryptResponseShow.show(decryptResponse) + case signResponse: SignResponse => signResponseShow.show(signResponse) + case scheduleKeyDeletionResponse: ScheduleKeyDeletionResponse => + deleteKeyResponseShow.show(scheduleKeyDeletionResponse) + case response: AwsResponse => + s"[${response.responseMetadata().requestId()}] - ${response.toString}" + case response => response.toString + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/tracing/AwsTraceContextInterceptor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/tracing/AwsTraceContextInterceptor.scala new file mode 100644 index 0000000000..d39bcb6001 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/aws/tracing/AwsTraceContextInterceptor.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.aws.tracing + +import com.digitalasset.canton.crypto.kms.aws.audit.AwsRequestResponseLogger.traceContextExecutionAttribute +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.tracing.{Spanning, TraceContext, TracerProvider} +import io.opentelemetry.api.trace.Span +import software.amazon.awssdk.core.interceptor.{ + Context, + ExecutionAttribute, + ExecutionAttributes, + ExecutionInterceptor, + SdkExecutionAttribute, +} + +import scala.compat.java8.OptionConverters.RichOptionalGeneric + +import AwsTraceContextInterceptor.{otelSpanExecutionAttribute, withTraceContext} + +/** Starts a new trace span before each request and end it when receiving the response + */ +class AwsTraceContextInterceptor( + override val loggerFactory: NamedLoggerFactory, + tracerProvider: TracerProvider, +) extends ExecutionInterceptor + with NamedLogging + with Spanning { + override def beforeExecution( + context: Context.BeforeExecution, + executionAttributes: ExecutionAttributes, + ): Unit = + withTraceContext(executionAttributes, logger) { tc => + val spanName = Option(executionAttributes.getAttribute(SdkExecutionAttribute.OPERATION_NAME)) + .getOrElse("Aws-Kms-Operation") + val span = tracerProvider.tracer + .spanBuilder(spanName) + .setParent(tc.context) + .startSpan() + executionAttributes.putAttribute(otelSpanExecutionAttribute, span) + () + } + + override def afterExecution( + context: Context.AfterExecution, + executionAttributes: ExecutionAttributes, + ): Unit = + Option(executionAttributes.getAttribute(otelSpanExecutionAttribute)).foreach(_.end()) +} + +object AwsTraceContextInterceptor { + private[aws] val otelSpanExecutionAttribute = + new ExecutionAttribute[Span]("canton-otel-span") + + // Extract the canton trace context from the attributes, should be set with ExtendedAwsRequestBuilder.withTraceContext + private[aws] def withTraceContext[A]( + executionAttributes: ExecutionAttributes, + logger: TracedLogger, + )(f: TraceContext => A): A = { + val tc = + executionAttributes.getOptionalAttribute(traceContextExecutionAttribute).asScala.getOrElse { + val emptyTc = TraceContext.empty + logger.info( + "Missing canton trace context, please make sure that all Aws request builders set the trace context execution attribute. See c.d.c.crypto.kms.AwsKms.traceContextExecutionAttribute" + )(emptyTc) + TraceContext.empty + } + f(tc) + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/GcpKms.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/GcpKms.scala new file mode 100644 index 0000000000..8cc17e2ea2 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/GcpKms.scala @@ -0,0 +1,677 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.gcp + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.kms.KmsError.* +import com.digitalasset.canton.crypto.kms.gcp.audit.GcpRequestResponseLogger +import com.digitalasset.canton.crypto.kms.{ + Kms, + KmsEncryptionPublicKey, + KmsError, + KmsKeyId, + KmsSigningPublicKey, +} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.* +import com.google.api.core.ApiFunction +import com.google.api.gax.core.FixedCredentialsProvider +import com.google.api.gax.rpc.{ApiException, ResourceExhaustedException} +import com.google.auth.oauth2.GoogleCredentials +import com.google.cloud.kms.v1 as gcp +import com.google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose +import com.google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm +import com.google.cloud.kms.v1.{AsymmetricSignRequest, CryptoKeyVersion} +import com.google.protobuf.ByteString +import io.grpc.ManagedChannelBuilder +import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo +import org.bouncycastle.openssl.PEMParser + +import java.io.{IOException, StringReader} +import java.util.UUID +import scala.annotation.unused +import scala.concurrent.{ExecutionContext, Future, blocking} + +/** Stands for Google Cloud Platform - Key Management Service and is an internal KMS implementation + * that wraps the necessary cryptographic functions from the GCP SDK. + */ +class GcpKms( + val config: KmsConfig.Gcp, + private val location: gcp.LocationName, + private val kmsClient: gcp.KeyManagementServiceClient, + override val timeouts: ProcessingTimeout, + override val loggerFactory: NamedLoggerFactory, +) extends Kms + with NamedLogging { + + override type Config = KmsConfig.Gcp + + override def name: String = "gcp-kms" + + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + + private lazy val loggerKms = new GcpRequestResponseLogger(config.auditLogging, loggerFactory) + + /* Identifies the version for all asymmetric GCP keys. Canton always opts to generate a new keys + * rather than adding a new version for that key. + */ + private val gcpKeyversion = "1" + + private val errorMessagesToRetry = + Set("io.grpc.StatusRuntimeException: UNAVAILABLE: Connection closed") + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def convertPublicKeyFromPemToDer(pubKeyPEM: String): Either[String, ByteString] = { + val pemParser: PEMParser = new PEMParser(new StringReader(pubKeyPEM)) + try { + Option(pemParser.readObject) match { + case Some(spki: SubjectPublicKeyInfo) => + Right(ByteString.copyFrom(spki.getEncoded)) + case Some(_) => + Left("unexpected type conversion") + case None => + Left("could not parse public key info from PEM format") + } + } catch { + case e: IOException => + Left( + s"failed to convert public key from PEM to DER format: ${ErrorUtil.messageWithStacktrace(e)}" + ) + } finally { + pemParser.close() + } + } + + private def errorHandler( + err: RuntimeException, + kmsErrorGen: (String, Boolean) => KmsError, + ): KmsError = + err match { + // we look for network failure errors to retry on + case networkErr if errorMessagesToRetry.exists(networkErr.getMessage.contains(_)) => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + // we retry on resource exceptions as well + case resourceException: ResourceExhaustedException => + logger.debug(s"ResourceExhaustedException with retry: ${resourceException.isRetryable}")( + TraceContext.empty + ) + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + // CancelledException is a subclass of ApiException, so this case must come *before* + // the generic `ApiException` clause to ensure CancelledExceptions are handled specifically. + case cancelled: com.google.api.gax.rpc.CancelledException + if Option(cancelled.getMessage).exists(_.contains("CANCELLED")) => + logger.debug("Got CancelledException(CANCELLED) — treating it as retryable")( + TraceContext.empty + ) + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + case internalErr: com.google.api.gax.rpc.InternalException + if Option(internalErr.getMessage).exists(_.contains("Internal error encountered")) => + logger.debug( + "Got InternalException(Internal error encountered) — treating it as retryable" + )( + TraceContext.empty + ) + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + case apiErr: ApiException if apiErr.isRetryable => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), true) + case _ => + kmsErrorGen(ErrorUtil.messageWithStacktrace(err), false) + } + + private def wrapKmsCall[A](kmsErrorGen: (String, Boolean) => KmsError)( + kmsCall: => A + )(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, KmsError, A] = + EitherT { + FutureUnlessShutdown.outcomeF { + Future { + blocking { + Either.catchOnly[RuntimeException](kmsCall) + } + } + } + }.leftMap[KmsError](err => + errorHandler(err, (errStr, retryable) => kmsErrorGen(errStr, retryable)) + ) + + /** Creates a GCP KMS key based on a series of specifications and returns its key identifier. + * + * @param keySpec + * specifies the type of KMS key to create (e.g. SYMMETRIC_DEFAULT (AES-256-CBC) or RSA_2048). + * @param keyPurpose + * the cryptographic operations for which you can use the KMS key (e.g. signing or encryption). + * @param keyRingId + * specifies the key ring to which the new key will be associated to. A key ring can be set for + * multi-region, which automatically sets all its keys to be multi-region. + * @return + * a key id or an error if it fails to create a key + */ + private def createKey( + keySpec: CryptoKeyVersionAlgorithm, + keyPurpose: CryptoKeyPurpose, + keyRingId: String, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = { + val kmsKeyIdStr = UUID.randomUUID().toString + val keyRingName = gcp.KeyRingName.of(location.getProject, location.getLocation, keyRingId) + for { + _ <- loggerKms.withLogging[gcp.CryptoKey]( + loggerKms.createKeyRequestMsg(keyPurpose.name, keySpec.name), + _ => loggerKms.createKeyResponseMsg(kmsKeyIdStr, keyPurpose.name, keySpec.name), + )(wrapKmsCall((errStr, retryable) => KmsCreateKeyError(errStr, retryable)) { + val key = + gcp.CryptoKey + .newBuilder() + .setPurpose(keyPurpose) + .setVersionTemplate( + gcp.CryptoKeyVersionTemplate + .newBuilder() + .setAlgorithm(keySpec) + .setProtectionLevel(gcp.ProtectionLevel.HSM) + ) + .build() + kmsClient.createCryptoKey(keyRingName, kmsKeyIdStr, key) + }) + kmsKeyId <- String300 + .create(kmsKeyIdStr) + .toEitherT[FutureUnlessShutdown] + .map(KmsKeyId.apply) + .leftMap[KmsError](err => KmsCreateKeyError(err)) + } yield kmsKeyId + } + + override protected def generateSigningKeyPairInternal( + signingKeySpec: SigningKeySpec, + @unused name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keySpec <- convertToGcpSigningScheme(signingKeySpec) + .leftMap(err => KmsCreateKeyError(err)) + .toEitherT[FutureUnlessShutdown] + // GCP KMS does not allow to store the name alongside the key + kmsKeyId <- createKey( + keySpec, + CryptoKeyPurpose.ASYMMETRIC_SIGN, + config.keyRingId, + ) + } yield kmsKeyId + + override protected def generateSymmetricEncryptionKeyInternal( + @unused name: Option[KeyName] + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + // GCP KMS does not allow to store the name alongside the key + createKey( + CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION, + CryptoKeyPurpose.ENCRYPT_DECRYPT, + config.keyRingId, + ) + + override protected def generateAsymmetricEncryptionKeyPairInternal( + encryptionKeySpec: EncryptionKeySpec, + @unused name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keySpec <- convertToGcpAsymmetricKeyEncryptionSpec(encryptionKeySpec) + .leftMap(err => KmsCreateKeyError(err)) + .toEitherT[FutureUnlessShutdown] + // GCP KMS does not allow to store the name alongside the key + kmsKeyId <- createKey( + keySpec, + CryptoKeyPurpose.ASYMMETRIC_DECRYPT, + config.keyRingId, + ) + } yield kmsKeyId + + private def getPublicKeyInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, gcp.PublicKey] = { + val keyVersionName = + gcp.CryptoKeyVersionName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + gcpKeyversion, + ) + loggerKms.withLogging[gcp.PublicKey]( + loggerKms.getPublicKeyRequestMsg(keyId.unwrap), + publicKey => loggerKms.getPublicKeyResponseMsg(keyId.unwrap, publicKey.getAlgorithm.name), + )( + wrapKmsCall((errStr, retryable) => KmsGetPublicKeyError(keyId, errStr, retryable))( + kmsClient.getPublicKey(keyVersionName) + ) + ) + } + + override protected def getPublicSigningKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsSigningPublicKey] = + for { + pkResponse <- getPublicKeyInternal(keyId) + keySpec <- convertFromGcpSigningScheme(pkResponse.getAlgorithm) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKeyRaw <- convertPublicKeyFromPemToDer(pkResponse.getPem) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKey <- KmsSigningPublicKey + .create(pubKeyRaw, keySpec) + .leftMap[KmsError](err => KmsGetPublicKeyError(keyId, err.toString)) + .toEitherT[FutureUnlessShutdown] + } yield pubKey + + override protected def getPublicEncryptionKeyInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsEncryptionPublicKey] = + for { + pkResponse <- getPublicKeyInternal(keyId) + keySpec <- convertFromGcpAsymmetricEncryptionSpec(pkResponse.getAlgorithm) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKeyRaw <- convertPublicKeyFromPemToDer(pkResponse.getPem) + .leftMap[KmsError](KmsGetPublicKeyError(keyId, _)) + .toEitherT[FutureUnlessShutdown] + pubKey <- KmsEncryptionPublicKey + .create(pubKeyRaw, keySpec) + .leftMap[KmsError](err => KmsGetPublicKeyError(keyId, err)) + .toEitherT[FutureUnlessShutdown] + } yield pubKey + + private def convertToGcpSigningScheme( + signingKeySpec: SigningKeySpec + ): Either[String, CryptoKeyVersionAlgorithm] = + signingKeySpec match { + case SigningKeySpec.EcCurve25519 => + Left(s"Unsupported signing key type: ${signingKeySpec.name}") + case SigningKeySpec.EcP256 => + Right(CryptoKeyVersionAlgorithm.EC_SIGN_P256_SHA256) + case SigningKeySpec.EcP384 => + Right(CryptoKeyVersionAlgorithm.EC_SIGN_P384_SHA384) + case SigningKeySpec.EcSecp256k1 => + Right(CryptoKeyVersionAlgorithm.EC_SIGN_SECP256K1_SHA256) + } + + private def convertToGcpAsymmetricEncryptionSpec( + encryptionAlgorithmSpec: EncryptionAlgorithmSpec + ): Either[String, CryptoKeyVersionAlgorithm] = + encryptionAlgorithmSpec match { + case EncryptionAlgorithmSpec.EciesHkdfHmacSha256Aes128Cbc => + Left(s"Unsupported encryption specification: ${encryptionAlgorithmSpec.name}") + case EncryptionAlgorithmSpec.RsaOaepSha256 => + Right(CryptoKeyVersionAlgorithm.RSA_DECRYPT_OAEP_2048_SHA256) + } + + private def convertToGcpAsymmetricKeyEncryptionSpec( + encryptionKeySpec: EncryptionKeySpec + ): Either[String, CryptoKeyVersionAlgorithm] = + encryptionKeySpec match { + case EncryptionKeySpec.EcP256 => + Left(s"Unsupported encryption key type: ${encryptionKeySpec.name}") + case EncryptionKeySpec.Rsa2048 => + Right(CryptoKeyVersionAlgorithm.RSA_DECRYPT_OAEP_2048_SHA256) + } + + private def convertFromGcpSigningScheme( + keySpec: CryptoKeyVersionAlgorithm + ): Either[String, SigningKeySpec] = + keySpec match { + case CryptoKeyVersionAlgorithm.EC_SIGN_P256_SHA256 => Right(SigningKeySpec.EcP256) + case CryptoKeyVersionAlgorithm.EC_SIGN_P384_SHA384 => Right(SigningKeySpec.EcP384) + case CryptoKeyVersionAlgorithm.EC_SIGN_SECP256K1_SHA256 => Right(SigningKeySpec.EcSecp256k1) + case _ => Left(s"Unsupported signing key type: ${keySpec.toString}") + } + + private def convertFromGcpAsymmetricEncryptionSpec( + keySpec: CryptoKeyVersionAlgorithm + ): Either[String, EncryptionKeySpec] = + keySpec match { + case CryptoKeyVersionAlgorithm.RSA_DECRYPT_OAEP_2048_SHA256 => + Right(EncryptionKeySpec.Rsa2048) + case _ => Left(s"Unsupported encryption key type: ${keySpec.toString}") + } + + override protected def keyExistsAndIsActiveInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = + retrieveKeyMetadata(keyId) + .leftMap[KmsError] { + case err: KmsRetrieveKeyMetadataError if !err.retryable => + KmsCannotFindKeyError(keyId, err.show) + case err => err + } + .flatMap { keyMetadata => + keyMetadata.getState match { + case pending @ CryptoKeyVersion.CryptoKeyVersionState.PENDING_GENERATION => + EitherT.leftT[FutureUnlessShutdown, Unit]( + KmsKeyDisabledError( + keyId, + s"key is $pending", + retryable = true, + ) + ) + case CryptoKeyVersion.CryptoKeyVersionState.ENABLED => + EitherT.rightT[FutureUnlessShutdown, KmsError](()) + // non retryable error + case otherState => + EitherT.leftT[FutureUnlessShutdown, Unit]( + KmsKeyDisabledError( + keyId, + s"key is $otherState", + ) + ) + } + } + + override protected def encryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString4096, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString6144] = { + val keyName = + gcp.CryptoKeyName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + ) + val encryptionAlgorithm = CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION + for { + dataEnc <- loggerKms.withLogging[ByteString]( + loggerKms.encryptRequestMsg(keyId.unwrap, encryptionAlgorithm.name), + _ => loggerKms.encryptResponseMsg(keyId.unwrap, encryptionAlgorithm.name), + )( + wrapKmsCall((errStr, retryable) => KmsEncryptError(keyId, errStr, retryable))( + kmsClient.encrypt(keyName, data.unwrap).getCiphertext + ) + ) + ciphertext <- ByteString6144 + .create(dataEnc) + .toEitherT[FutureUnlessShutdown] + .leftMap[KmsError](err => + KmsError + .KmsEncryptError(keyId, s"generated ciphertext does not adhere to bound: $err)") + ) + } yield ciphertext + } + + override protected def decryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString6144, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString4096] = { + val keyName = + gcp.CryptoKeyName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + ) + val encryptionAlgorithm = CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION + for { + dataPlain <- loggerKms.withLogging[ByteString]( + loggerKms.decryptRequestMsg(keyId.unwrap, encryptionAlgorithm.name), + _ => loggerKms.decryptResponseMsg(keyId.unwrap, encryptionAlgorithm.name), + )( + wrapKmsCall((errStr, retryable) => KmsDecryptError(keyId, errStr, retryable))( + kmsClient.decrypt(keyName, data.unwrap).getPlaintext + ) + ) + plaintext <- ByteString4096 + .create(dataPlain) + .toEitherT[FutureUnlessShutdown] + .leftMap[KmsError](err => + KmsError.KmsDecryptError(keyId, s"plaintext does not adhere to bound: $err)") + ) + } yield plaintext + } + + override protected def decryptAsymmetricInternal( + keyId: KmsKeyId, + data: ByteString256, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString190] = { + val keyName = + gcp.CryptoKeyVersionName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + gcpKeyversion, + ) + for { + encryptionAlgorithm <- convertToGcpAsymmetricEncryptionSpec(encryptionAlgorithmSpec) + .leftMap(err => KmsDecryptError(keyId, err)) + .toEitherT[FutureUnlessShutdown] + dataPlain <- loggerKms.withLogging[ByteString]( + loggerKms.decryptRequestMsg(keyId.unwrap, encryptionAlgorithm.name), + _ => loggerKms.decryptResponseMsg(keyId.unwrap, encryptionAlgorithm.name), + )( + wrapKmsCall((errStr, retryable) => KmsDecryptError(keyId, errStr, retryable))( + kmsClient.asymmetricDecrypt(keyName, data.unwrap).getPlaintext + ) + ) + plaintext <- ByteString190 + .create(dataPlain) + .toEitherT[FutureUnlessShutdown] + .leftMap[KmsError](err => + KmsError.KmsDecryptError(keyId, s"plaintext does not adhere to bound: $err)") + ) + } yield plaintext + } + + private def signEcDsa( + keyId: KmsKeyId, + keyVersionName: gcp.CryptoKeyVersionName, + signingAlgorithm: CryptoKeyVersionAlgorithm, + data: ByteString, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = + loggerKms.withLogging[ByteString]( + loggerKms.signRequestMsg(keyId.unwrap, "data", signingAlgorithm.name), + _ => loggerKms.signResponseMsg(keyId.unwrap, signingAlgorithm.name), + )( + wrapKmsCall((errStr, retryable) => KmsSignError(keyId, errStr, retryable)) { + val request = + AsymmetricSignRequest.newBuilder().setData(data).setName(keyVersionName.toString).build() + kmsClient.asymmetricSign(request).getSignature + } + ) + + override protected def signInternal( + keyId: KmsKeyId, + data: ByteString4096, + signingAlgorithmSpec: SigningAlgorithmSpec, + signingKeySpec: SigningKeySpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = { + val keyVersionName = + gcp.CryptoKeyVersionName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + gcpKeyversion, + ) + signingAlgorithmSpec match { + case SigningAlgorithmSpec.EcDsaSha256 => + signingKeySpec match { + case SigningKeySpec.EcP256 => + signEcDsa( + keyId, + keyVersionName, + CryptoKeyVersionAlgorithm.EC_SIGN_P256_SHA256, + data.unwrap, + ) + case SigningKeySpec.EcSecp256k1 => + signEcDsa( + keyId, + keyVersionName, + CryptoKeyVersionAlgorithm.EC_SIGN_SECP256K1_SHA256, + data.unwrap, + ) + case SigningKeySpec.EcP384 | SigningKeySpec.EcCurve25519 => + EitherT.leftT[FutureUnlessShutdown, ByteString]( + KmsError.KmsSignError( + keyId, + s"unsupported signing key spec $signingKeySpec for algorithm $signingAlgorithmSpec", + ) + ) + } + + case SigningAlgorithmSpec.EcDsaSha384 => + signEcDsa(keyId, keyVersionName, CryptoKeyVersionAlgorithm.EC_SIGN_P384_SHA384, data.unwrap) + case scheme => + EitherT.leftT[FutureUnlessShutdown, ByteString]( + KmsError.KmsSignError(keyId, s"unsupported signing algorithm scheme: ${scheme.show}") + ) + } + } + + override protected def deleteKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = { + val keyVersionName = + gcp.CryptoKeyVersionName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + gcpKeyversion, + ) + loggerKms.withLogging[Unit]( + loggerKms.deleteKeyRequestMsg(keyId.unwrap), + _ => loggerKms.deleteKeyResponseMsg(keyId.unwrap), + )( + wrapKmsCall((errStr, retryable) => KmsDeleteKeyError(keyId, errStr, retryable))( + kmsClient.destroyCryptoKeyVersion(keyVersionName).discard + ) + ) + } + + private def retrieveKeyMetadata( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, gcp.CryptoKeyVersion] = { + val keyVersionName = + gcp.CryptoKeyVersionName.of( + config.projectId, + config.locationId, + config.keyRingId, + keyId.unwrap, + gcpKeyversion, + ) + loggerKms.withLogging[gcp.CryptoKeyVersion]( + loggerKms.retrieveKeyMetadataRequestMsg(keyId.unwrap), + keyMetadata => + loggerKms.retrieveKeyMetadataResponseMsg( + keyId.unwrap, + keyMetadata.getAlgorithm.name, + keyMetadata.getState.name, + ), + )( + wrapKmsCall((errStr, retryable) => KmsRetrieveKeyMetadataError(keyId, errStr, retryable))( + kmsClient.getCryptoKeyVersion(keyVersionName) + ) + ) + } + + override def onClosed(): Unit = LifeCycle.close(kmsClient)(logger) + +} + +object GcpKms { + + def create( + config: KmsConfig.Gcp, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + ): Either[KmsError, GcpKms] = + for { + kms <- + Either + .catchOnly[IOException] { + val credentials = GoogleCredentials.getApplicationDefault() + val keyManagementServiceSettings = + config.endpointOverride match { + case Some(endpoint) => + val channelProvider = gcp.KeyManagementServiceSettings + .defaultGrpcTransportProviderBuilder() + .setEndpoint(endpoint) + .setChannelConfigurator( + new ApiFunction[ManagedChannelBuilder[_], ManagedChannelBuilder[_]] { + override def apply( + managedChannelBuilder: ManagedChannelBuilder[_] + ): ManagedChannelBuilder[_] = { + managedChannelBuilder + .overrideAuthority("cloudkms.googleapis.com") + managedChannelBuilder + } + } + ) + .build() + + gcp.KeyManagementServiceSettings + .newBuilder() + .setCredentialsProvider(FixedCredentialsProvider.create(credentials)) + .setTransportChannelProvider(channelProvider) + .setEndpoint(endpoint) + .build() + case None => + gcp.KeyManagementServiceSettings + .newBuilder() + .setCredentialsProvider(FixedCredentialsProvider.create(credentials)) + .build() + } + new GcpKms( + config, + gcp.LocationName.of(config.projectId, config.locationId), + gcp.KeyManagementServiceClient.create(keyManagementServiceSettings), + timeouts, + loggerFactory, + ) + } + .leftMap[KmsError](err => KmsCreateClientError(ErrorUtil.messageWithStacktrace(err))) + } yield kms + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/audit/GcpRequestResponseLogger.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/audit/GcpRequestResponseLogger.scala new file mode 100644 index 0000000000..2fdc43b05b --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/kms/gcp/audit/GcpRequestResponseLogger.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms.gcp.audit + +import cats.data.EitherT +import com.digitalasset.canton.crypto.kms.KmsError +import com.digitalasset.canton.crypto.kms.audit.KmsRequestResponseLogger +import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* + +import java.util.UUID +import scala.concurrent.ExecutionContext + +/** This class sits here, inside /audit, so that the GCP KMS logs can be successfully retrieved. + */ +class GcpRequestResponseLogger( + val auditLogging: Boolean, + override val loggerFactory: NamedLoggerFactory, +) extends KmsRequestResponseLogger + with NamedLogging { + + def withLogging[A]( + requestMsg: String, + responseMsg: A => String, + )( + f: => EitherT[FutureUnlessShutdown, KmsError, A] + )(implicit tc: TraceContext, ec: ExecutionContext): EitherT[FutureUnlessShutdown, KmsError, A] = + if (!auditLogging) f + else { + val requestId = UUID.randomUUID().toString + logger.info(s"Sending request [$requestId]: $requestMsg.") + f.thereafter { + case scala.util.Success(UnlessShutdown.Outcome(Right(result))) => + logger.info(s"Received response ${responseMsg(result)}. Original request [$requestId]") + case scala.util.Success(UnlessShutdown.Outcome(Left(kmsError))) => + logger.warn(s"Request $requestId failed with: ${kmsError.show}") + case scala.util.Success(_: AbortedDueToShutdown) => + logger.info(s"Request $requestId aborted due to shutdown.") + case scala.util.Failure(throwable) => + logger.warn(s"Request $requestId failed", throwable) + } + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala index bd3be7aabf..af6ab2c412 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala @@ -142,7 +142,7 @@ object JcePrivateCrypto { .leftMap(SigningKeyGenerationError.KeyCreationError.apply) } yield keyPair - private[crypto] def generateSigningKeypair( + private[canton] def generateSigningKeypair( keySpec: SigningKeySpec, usage: NonEmpty[Set[SigningKeyUsage]], ): Either[SigningKeyGenerationError, SigningKeyPair] = keySpec match { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CommunityCryptoPrivateStoreFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CommunityCryptoPrivateStoreFactory.scala deleted file mode 100644 index 2de3e14484..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CommunityCryptoPrivateStoreFactory.scala +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store - -import cats.data.EitherT -import cats.syntax.either.* -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.* -import com.digitalasset.canton.crypto.kms.{Kms, KmsFactory} -import com.digitalasset.canton.crypto.store.db.DbCryptoPrivateStore -import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPrivateStore -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggerFactory} -import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} -import com.digitalasset.canton.version.ReleaseProtocolVersion - -import scala.concurrent.ExecutionContext - -class CommunityCryptoPrivateStoreFactory( - cryptoProvider: CryptoProvider, - kmsConfigO: Option[KmsConfig], - kmsFactory: KmsFactory, - kmsStoreCacheConfig: CacheConfig, - privateKeyStoreConfig: PrivateKeyStoreConfig, - futureSupervisor: FutureSupervisor, - clock: Clock, - executionContext: ExecutionContext, -) extends CryptoPrivateStoreFactory - with HasLoggerName - with EncryptedCryptoPrivateStoreHelper { - - private def createKms( - errFn: String => CryptoPrivateStoreError, - timeouts: ProcessingTimeout, - tracerProvider: TracerProvider, - loggerFactory: NamedLoggerFactory, - ): Either[CryptoPrivateStoreError, Kms] = for { - kmsConfig <- kmsConfigO.toRight( - errFn( - "Missing KMS configuration for KMS crypto provider" - ) - ) - kms <- kmsFactory - .create( - kmsConfig, - timeouts, - futureSupervisor, - tracerProvider, - clock, - loggerFactory, - executionContext, - ) - .leftMap(err => errFn(s"Failed to create KMS client: $err")) - } yield kms - - override def create( - storage: Storage, - releaseProtocolVersion: ReleaseProtocolVersion, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - tracerProvider: TracerProvider, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, CryptoPrivateStore] = - cryptoProvider match { - case CryptoProvider.Kms => - EitherT.fromEither[FutureUnlessShutdown] { - createKms( - CryptoPrivateStoreError.KmsPrivateStoreError.apply, - timeouts, - tracerProvider, - loggerFactory, - ).map { kms => - KmsCryptoPrivateStore - .create(storage, kms, kmsStoreCacheConfig, timeouts, loggerFactory) - } - } - case CryptoProvider.Jce => - for { - store <- storage match { - case jdbc: DbStorage => - val dbCryptoPrivateStore = new DbCryptoPrivateStore( - jdbc, - releaseProtocolVersion, - timeouts, - loggerFactory, - ) - // check if encryption is enabled - privateKeyStoreConfig.encryption match { - case Some(EncryptedPrivateStoreConfig.Kms(kmsKeyId, reverted)) => - for { - kms <- createKms( - CryptoPrivateStoreError.EncryptedPrivateStoreError.apply, - timeouts, - tracerProvider, - loggerFactory, - ).toEitherT[FutureUnlessShutdown] - store <- EncryptedCryptoPrivateStore - .create( - storage, - dbCryptoPrivateStore, - kms, - kmsKeyId, - reverted, - releaseProtocolVersion, - timeouts, - loggerFactory, - ) - } yield store - case None => - EitherT.rightT[FutureUnlessShutdown, CryptoPrivateStoreError]( - dbCryptoPrivateStore - ) - } - case _: MemoryStorage => - EitherT.rightT[FutureUnlessShutdown, CryptoPrivateStoreError]( - new InMemoryCryptoPrivateStore(releaseProtocolVersion, loggerFactory) - ) - } - _ <- store.toExtended match { - case Some(extendedStore) => extendedStore.migratePrivateKeys(storage.isActive, timeouts) - case None => EitherT.pure[FutureUnlessShutdown, CryptoPrivateStoreError](()) - } - } yield store - } - -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala index 28a49e1ea6..cf1da72c83 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala @@ -91,6 +91,11 @@ trait CryptoPrivateStoreExtended extends CryptoPrivateStore { this: NamedLogging traceContext: TraceContext ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] + @VisibleForTesting + private[canton] def listPrivateKeys()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] + private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreFactory.scala index f9078bd6a2..5899a7e97c 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreFactory.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreFactory.scala @@ -4,17 +4,16 @@ package com.digitalasset.canton.crypto.store import cats.data.EitherT +import cats.syntax.either.* import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.{ - CachingConfigs, - CryptoProvider, - PrivateKeyStoreConfig, - ProcessingTimeout, -} -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.kms.{Kms, KmsFactory} +import com.digitalasset.canton.crypto.store.db.DbCryptoPrivateStore +import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPrivateStore import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggerFactory} +import com.digitalasset.canton.replica.ReplicaManager +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} import com.digitalasset.canton.version.ReleaseProtocolVersion @@ -22,7 +21,114 @@ import com.google.common.annotations.VisibleForTesting import scala.concurrent.ExecutionContext -trait CryptoPrivateStoreFactory { +class CryptoPrivateStoreFactory( + cryptoProvider: CryptoProvider, + kmsConfigO: Option[KmsConfig], + kmsStoreCacheConfig: CacheConfig, + privateKeyStoreConfig: PrivateKeyStoreConfig, + replicaManager: Option[ReplicaManager], + futureSupervisor: FutureSupervisor, + clock: Clock, + executionContext: ExecutionContext, +) extends EncryptedCryptoPrivateStoreHelper + with HasLoggerName { + + private def createKms( + errFn: String => CryptoPrivateStoreError, + timeouts: ProcessingTimeout, + tracerProvider: TracerProvider, + loggerFactory: NamedLoggerFactory, + ): Either[CryptoPrivateStoreError, Kms] = for { + kmsConfig <- kmsConfigO.toRight( + errFn( + "Missing KMS configuration for KMS crypto provider" + ) + ) + kms <- KmsFactory + .create( + kmsConfig, + timeouts, + futureSupervisor, + tracerProvider, + clock, + loggerFactory, + executionContext, + ) + .leftMap(err => errFn(s"Failed to create KMS client: $err")) + } yield kms + + private def createInternal( + storage: Storage, + releaseProtocolVersion: ReleaseProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + tracerProvider: TracerProvider, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, CryptoPrivateStore] = + cryptoProvider match { + case CryptoProvider.Kms => + EitherT.fromEither[FutureUnlessShutdown] { + createKms( + CryptoPrivateStoreError.KmsPrivateStoreError.apply, + timeouts, + tracerProvider, + loggerFactory, + ).map { kms => + KmsCryptoPrivateStore + .create(storage, kms, kmsStoreCacheConfig, timeouts, loggerFactory) + } + } + case CryptoProvider.Jce => + for { + store <- storage match { + case jdbc: DbStorage => + val dbCryptoPrivateStore = new DbCryptoPrivateStore( + jdbc, + releaseProtocolVersion, + timeouts, + loggerFactory, + ) + // check if encryption is enabled + privateKeyStoreConfig.encryption match { + case Some(EncryptedPrivateStoreConfig.Kms(kmsKeyId, reverted)) => + for { + kms <- createKms( + CryptoPrivateStoreError.EncryptedPrivateStoreError.apply, + timeouts, + tracerProvider, + loggerFactory, + ).toEitherT[FutureUnlessShutdown] + store <- EncryptedCryptoPrivateStore + .create( + storage, + dbCryptoPrivateStore, + kms, + kmsKeyId, + reverted, + releaseProtocolVersion, + timeouts, + loggerFactory, + ) + } yield store + case None => + EitherT.rightT[FutureUnlessShutdown, CryptoPrivateStoreError]( + dbCryptoPrivateStore + ) + } + case _: MemoryStorage => + EitherT.rightT[FutureUnlessShutdown, CryptoPrivateStoreError]( + new InMemoryCryptoPrivateStore(releaseProtocolVersion, loggerFactory) + ) + } + _ <- store.toExtended match { + case Some(extendedStore) => extendedStore.migratePrivateKeys(storage.isActive, timeouts) + case None => EitherT.pure[FutureUnlessShutdown, CryptoPrivateStoreError](()) + } + } yield store + } + def create( storage: Storage, releaseProtocolVersion: ReleaseProtocolVersion, @@ -32,7 +138,16 @@ trait CryptoPrivateStoreFactory { )(implicit ec: ExecutionContext, traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, CryptoPrivateStore] + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, CryptoPrivateStore] = + createInternal(storage, releaseProtocolVersion, timeouts, loggerFactory, tracerProvider).map { + case encryptedPrivateStore: EncryptedCryptoPrivateStore => + // Register the encrypted private store with the replica manager to refresh the in-memory caches on failover + replicaManager.foreach(_.setPrivateKeyStore(encryptedPrivateStore)) + encryptedPrivateStore + + case store => store + } + } object CryptoPrivateStoreFactory { @@ -40,12 +155,12 @@ object CryptoPrivateStoreFactory { /** A simple version of a crypto private store factory that does not use a KMS for testing. */ @VisibleForTesting def withoutKms(clock: Clock, executionContext: ExecutionContext): CryptoPrivateStoreFactory = - new CommunityCryptoPrivateStoreFactory( + new CryptoPrivateStoreFactory( cryptoProvider = CryptoProvider.Jce, kmsConfigO = None, - kmsFactory = CommunityKmsFactory, kmsStoreCacheConfig = CachingConfigs.kmsMetadataCache, privateKeyStoreConfig = PrivateKeyStoreConfig(), + replicaManager = None, futureSupervisor = FutureSupervisor.Noop, clock = clock, executionContext = executionContext, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStore.scala index 7bcf88b5b1..d28ed00269 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStore.scala @@ -100,8 +100,16 @@ class EncryptedCryptoPrivateStore( traceContext: TraceContext ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = for { - storedKeys <- store - .listPrivateKeys(purpose, encrypted = true) + storedKeys <- store.listPrivateKeys(purpose, encrypted = true) + keys <- storedKeys.toList.parTraverse(decryptStoredKey(kms, _)) + } yield keys.toSet + + @VisibleForTesting + private[canton] def listPrivateKeys()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = + for { + storedKeys <- store.listPrivateKeys() keys <- storedKeys.toList.parTraverse(decryptStoredKey(kms, _)) } yield keys.toSet diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala index 24ce133d9a..971bc778bc 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala @@ -77,6 +77,11 @@ class DbCryptoPrivateStore( import storage.api.* + private def queryKeys(): DbAction.ReadOnly[Set[StoredPrivateKey]] = + sql"select key_id, data, purpose, name, wrapper_key_id from common_crypto_private_keys" + .as[StoredPrivateKey] + .map(_.toSet) + private def queryKeys(purpose: KeyPurpose): DbAction.ReadOnly[Set[StoredPrivateKey]] = sql"select key_id, data, purpose, name, wrapper_key_id from common_crypto_private_keys where purpose = $purpose" .as[StoredPrivateKey] @@ -197,10 +202,13 @@ class DbCryptoPrivateStore( private[canton] def listPrivateKeys(purpose: KeyPurpose)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = - EitherT.right( - storage - .query(queryKeys(purpose), functionFullName) - ) + EitherT.right(storage.query(queryKeys(purpose), functionFullName)) + + @VisibleForTesting + private[canton] def listPrivateKeys()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = + EitherT.right(storage.query(queryKeys(), functionFullName)) private def deleteKey(keyId: Fingerprint): SqlAction[Int, NoStream, Effect.Write] = sqlu"delete from common_crypto_private_keys where key_id = $keyId" diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala index c771234f5a..533389f591 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala @@ -183,6 +183,15 @@ class InMemoryCryptoPrivateStore( ) }).map(_.toSet) + @VisibleForTesting + private[canton] def listPrivateKeys()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = + for { + signingPrivateKeys <- listPrivateKeys(Signing) + encryptionPrivateKeys <- listPrivateKeys(Encryption) + } yield signingPrivateKeys ++ encryptionPrivateKeys + private[crypto] def deletePrivateKey( keyId: Fingerprint )(implicit diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala index b2a5a9104c..b229c54bb7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala @@ -38,6 +38,7 @@ final case class CantonTimestamp(underlying: LfTimestamp) def plus(d: Duration): CantonTimestamp = new CantonTimestamp(underlying.add(d)) def add(d: Duration): CantonTimestamp = new CantonTimestamp(underlying.add(d)) + def add(d: FiniteDuration): CantonTimestamp = new CantonTimestamp(underlying.add(d.toJava)) def addMicros(micros: Long): CantonTimestamp = new CantonTimestamp(underlying.addMicros(micros)) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala index be1a6f4a88..e31ad45595 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala @@ -38,7 +38,7 @@ final case class FullInformeeTree private (tree: GenTransactionTree)( @transient override protected lazy val companionObj: FullInformeeTree.type = FullInformeeTree - lazy val transactionId: TransactionId = TransactionId.fromRootHash(tree.rootHash) + lazy val transactionId: UpdateId = UpdateId.fromRootHash(tree.rootHash) private lazy val commonMetadata: CommonMetadata = checked(tree.commonMetadata.tryUnwrap) lazy val synchronizerId: PhysicalSynchronizerId = commonMetadata.synchronizerId diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala index 1faaccf3b1..394a763509 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala @@ -129,7 +129,7 @@ final case class GenTransactionTree private ( case _ => throw new UnsupportedOperationException(s"Invalid view position: $viewPos") } - lazy val transactionId: TransactionId = TransactionId.fromRootHash(rootHash) + lazy val transactionId: UpdateId = UpdateId.fromRootHash(rootHash) /** Yields the full informee tree corresponding to this transaction tree. The resulting informee * tree is full, only if every view common data is unblinded. @@ -206,9 +206,7 @@ final case class GenTransactionTree private ( * Later on, if a view shares the same recipients tree, we can use the same randomness/key. */ val witnessMap = - allTransactionViewTrees.foldLeft( - Map.empty[ViewPosition, Witnesses] - ) { case (ws, tvt) => + allTransactionViewTrees.foldLeft(Map.empty[ViewPosition, Witnesses]) { case (ws, tvt) => val parentPosition = ViewPosition(tvt.viewPosition.position.drop(1)) val witnesses = ws.get(parentPosition) match { case Some(parentWitnesses) => @@ -223,6 +221,7 @@ final case class GenTransactionTree private ( ws.updated(tvt.viewPosition, witnesses) } + // TODO(#23971) This is horribly inefficient as we're going over all recipients over and over again. We could fuse all this into a single tree traversal. for { allViewsWithMetadata <- allTransactionViewTrees.parTraverse { tvt => val viewWitnesses = witnessMap(tvt.viewPosition) @@ -231,10 +230,8 @@ final case class GenTransactionTree private ( // We return the witnesses for testing purposes. We will use the recipients to derive our view encryption key. for { - viewRecipients <- viewWitnesses - .toRecipients(topologySnapshot) - parentRecipients <- parentWitnessesO - .traverse(_.toRecipients(topologySnapshot)) + viewRecipients <- viewWitnesses.toRecipients(topologySnapshot) + parentRecipients <- parentWitnessesO.traverse(_.toRecipients(topologySnapshot)) } yield ViewWithWitnessesAndRecipients(tvt, viewWitnesses, viewRecipients, parentRecipients) } } yield allViewsWithMetadata diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala index 1d61c55ac6..b55b113370 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.{v30, *} import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.RoseTree import com.digitalasset.canton.version.* import monocle.PLens @@ -161,32 +162,30 @@ object LightTransactionViewTree * To make the method more generic, light view trees are represented as `A` and full view trees * as `B` and the `lens` parameter is used to convert between these types, as needed. * + * @tparam C + * Additional data associated with [[LightTransactionViewTree]]s. Each + * [[FullTransactionViewTree]] in the result aggregates the data from all aggregated + * [[LightTransactionViewTree]]s in preorder. * @param topLevelOnly * whether to return only top-level full view trees * @param lightViewTrees * the light transaction view trees to convert - * @return - * A triple consisting of (1) the full view trees that could be converted, (2) the light view - * trees that could not be converted due to missing descendants, and (3) duplicate light view - * trees in the input. The view trees in the output are sorted by view position, i.e., in - * pre-order. If the input contains the same view several times, then the output (1) contains - * one occurrence and the output (3) every other occurrence of the view. */ - def toFullViewTrees[A, B]( - lens: PLens[A, B, LightTransactionViewTree, FullTransactionViewTree], + def toFullViewTrees[A, B, C]( + lens: PLens[A, B, (LightTransactionViewTree, C), (FullTransactionViewTree, RoseTree[C])], protocolVersion: ProtocolVersion, hashOps: HashOps, + // TODO(#23971) we don't need this parameter any more, only the true case is used. topLevelOnly: Boolean, - )( - lightViewTrees: Seq[A] - ): (Seq[B], Seq[A], Seq[A]) = { + lightViewTrees: Seq[A], + ): ToFullViewTreesResult[A, B] = { val lightViewTreesBoxedInPostOrder = lightViewTrees - .sortBy(lens.get(_).viewPosition)(ViewPosition.orderViewPosition.toOrdering) + .sortBy(lens.get(_)._1.viewPosition)(ViewPosition.orderViewPosition.toOrdering) .reverse // All reconstructed full views - val fullViewByHash = mutable.Map.empty[ViewHash, TransactionView] + val fullViewByHash = mutable.Map.empty[ViewHash, (TransactionView, RoseTree[C])] // All reconstructed full view trees, boxed, paired with their view hashes. val allFullViewTreesInPreorderB = mutable.ListBuffer.empty[(ViewHash, B)] // All light view trees, boxed, that could not be reconstructed to full view trees, due to missing descendants @@ -197,18 +196,19 @@ object LightTransactionViewTree val subviewHashesB = Set.newBuilder[ViewHash] for (lightViewTreeBoxed <- lightViewTreesBoxedInPostOrder) { - val lightViewTree = lens.get(lightViewTreeBoxed) + val (lightViewTree, c) = lens.get(lightViewTreeBoxed) val subviewHashes = lightViewTree.subviewHashes.toSet val missingSubviews = subviewHashes -- fullViewByHash.keys if (missingSubviews.isEmpty) { - val fullSubviewsSeq = lightViewTree.subviewHashes.map(fullViewByHash) + val (fullSubviewsSeq, subviewCs) = lightViewTree.subviewHashes.map(fullViewByHash).unzip val fullSubviews = TransactionSubviews(fullSubviewsSeq)(protocolVersion, hashOps) val fullView = lightViewTree.view.tryCopy(subviews = fullSubviews) val fullViewTree = FullTransactionViewTree.tryCreate( lightViewTree.tree.mapUnblindedRootViews(_.replace(fullView.viewHash, fullView)) ) - val fullViewTreeBoxed = lens.replace(fullViewTree)(lightViewTreeBoxed) + val cs = RoseTree(c, subviewCs*) + val fullViewTreeBoxed = lens.replace(fullViewTree -> cs)(lightViewTreeBoxed) if (topLevelOnly) subviewHashesB ++= subviewHashes @@ -217,7 +217,7 @@ object LightTransactionViewTree duplicateLightViewTreesB += lightViewTreeBoxed } else { (fullViewTree.viewHash -> fullViewTreeBoxed) +=: allFullViewTreesInPreorderB - fullViewByHash += fullView.viewHash -> fullView + fullViewByHash += fullView.viewHash -> (fullView -> cs) } } else { invalidLightViewTreesB += lightViewTreeBoxed @@ -234,13 +234,32 @@ object LightTransactionViewTree fullViewTreeBoxed } - ( + ToFullViewTreesResult( allFullViewTreesInPreorder, invalidLightViewTreesB.result().reverse, duplicateLightViewTreesB.result().reverse, ) } + /** The result of the conversion from [[LightTransactionViewTree]]s to + * [[FullTransactionViewTree]]s. The view trees in the output are sorted by view position, + * i.e., in pre-order. If the input contains the same view several times, then + * [[ToFullViewTreesResult.convertedFullViews]] contains one occurrence and + * [[ToFullViewTreesResult.duplicateLightViews]] every other occurrence of the view. + * + * @param convertedFullViews + * the full view trees that could be converted + * @param lightViewsWithMissingDescendants + * the light view trees that could not be converted due to missing descendants + * @param duplicateLightViews + * duplicate light view trees in the input. + */ + final case class ToFullViewTreesResult[+A, +B]( + convertedFullViews: Seq[B], + lightViewsWithMissingDescendants: Seq[A], + duplicateLightViews: Seq[A], + ) + /** Turns a full transaction view tree into a lightweight one. Not stack-safe. */ def fromTransactionViewTree( tvt: FullTransactionViewTree, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala index d956299d69..a5b1efa7e1 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala @@ -3,15 +3,28 @@ package com.digitalasset.canton.data +import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.topology.transaction.* /** Onboarding transactions for an external party */ final case class OnboardingTransactions( - namespaceDelegation: SignedTopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation], + namespace: SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], partyToParticipant: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], partyToKeyMapping: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping], ) { def toSeq: Seq[SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping]] = - Seq(namespaceDelegation, partyToParticipant, partyToKeyMapping) + Seq(namespace, partyToParticipant, partyToKeyMapping) + + def transactionsWithSingleSignature + : Seq[(TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], Seq[Signature])] = + toSeq.map { signedTransaction => + signedTransaction.transaction -> signedTransaction.signatures.collect { + case SingleTransactionSignature(_, signature) => signature + }.toSeq + } + + def multiTransactionSignatures: Seq[Signature] = toSeq.flatMap(_.signatures).collect { + case MultiTransactionSignature(_, signature) => signature + } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala index 80325e28e3..6c6d1e353d 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala @@ -11,7 +11,10 @@ import com.digitalasset.canton.data.ActionDescription.{ FetchActionDescription, } import com.digitalasset.canton.data.TransactionView.{ + AllSubviewState, InvalidView, + TransactionViewTreeOps, + TransactionViewTreeOpsWithPosition, WithPath, validateViewCommonData, validateViewParticipantData, @@ -22,7 +25,7 @@ import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} import com.digitalasset.canton.protocol.{v30, *} import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.util.collection.MapsUtil -import com.digitalasset.canton.util.{ErrorUtil, NamedLoggingLazyVal} +import com.digitalasset.canton.util.{ErrorUtil, NamedLoggingLazyVal, RoseTree} import com.digitalasset.canton.version.* import com.digitalasset.canton.{LfVersioned, ProtoDeserializationError} import com.google.common.annotations.VisibleForTesting @@ -95,9 +98,15 @@ final case class TransactionView private ( val viewHash: ViewHash = ViewHash.fromRootHash(rootHash) + lazy val allSubviews: RoseTree[TransactionView] = + RoseTree.foldLeft(TransactionViewTreeOps, this)(init = AllSubviewState.init)(finish = _.finish)( + update = _.update(_) + ) + /** Traverses all unblinded subviews `v1, v2, v3, ...` in pre-order and yields `f(...f(f(z, v1), * v2)..., vn)` */ + // TODO(#23971) remove this function as it's used only in tests def foldLeft[A](z: A)(f: (A, TransactionView) => A): A = subviews.unblindedElements .to(LazyList) @@ -106,26 +115,23 @@ final case class TransactionView private ( /** Yields all (direct and indirect) subviews of this view in pre-order. The first element is this * view. */ - lazy val flatten: Seq[TransactionView] = - foldLeft(Seq.newBuilder[TransactionView])((acc, v) => acc += v).result() + lazy val flatten: Seq[TransactionView] = allSubviews.preorder.toSeq lazy val tryFlattenToParticipantViews: Seq[ParticipantTransactionView] = flatten.map(ParticipantTransactionView.tryCreate) + def allSubviewsWithPositionTree( + rootPos: ViewPosition + ): RoseTree[(TransactionView, ViewPosition)] = + RoseTree.foldLeft(TransactionViewTreeOpsWithPosition, this -> rootPos)( + init = AllSubviewState.init + )(finish = _.finish)(update = _.update(_)) + /** Yields all (direct and indirect) subviews of this view in pre-order, along with the subview * position under the root view position `rootPos`. The first element is this view. */ - def allSubviewsWithPosition(rootPos: ViewPosition): Seq[(TransactionView, ViewPosition)] = { - def helper( - view: TransactionView, - viewPos: ViewPosition, - ): Seq[(TransactionView, ViewPosition)] = - (view, viewPos) +: view.subviews.unblindedElementsWithIndex.flatMap { - case (view, viewIndex) => helper(view, viewIndex +: viewPos) - } - - helper(this, rootPos) - } + def allSubviewsWithPosition(rootPos: ViewPosition): Seq[(TransactionView, ViewPosition)] = + allSubviewsWithPositionTree(rootPos).preorder.toSeq override protected def pretty: Pretty[TransactionView] = prettyOfClass( param("root hash", _.rootHash), @@ -504,4 +510,33 @@ object TransactionView /** Indicates an attempt to create an invalid view. */ final case class InvalidView(message: String) extends RuntimeException(message) + + case object TransactionViewTreeOps extends RoseTree.TreeOps[TransactionView] { + override def children(view: TransactionView): Iterator[TransactionView] = + view.subviews.unblindedElements.iterator + } + + case object TransactionViewTreeOpsWithPosition + extends RoseTree.TreeOps[(TransactionView, ViewPosition)] { + override def children( + viewAndPos: (TransactionView, ViewPosition) + ): Iterator[(TransactionView, ViewPosition)] = { + val (view, position) = viewAndPos + view.subviews.unblindedElementsWithIndex.iterator.map { case (v, idx) => + (v, idx +: position) + } + } + } + + private final case class AllSubviewState[A]( + visitedSiblingsReversed: List[RoseTree[A]], + current: A, + ) { + def update(next: RoseTree[A]): AllSubviewState[A] = + copy(visitedSiblingsReversed = next +: visitedSiblingsReversed) + def finish: RoseTree[A] = RoseTree(current, visitedSiblingsReversed.reverse*) + } + private object AllSubviewState { + def init[A](current: A): AllSubviewState[A] = AllSubviewState(List.empty, current) + } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala index 872f8b3d2a..b64313fa97 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.data import cats.syntax.either.* import com.digitalasset.canton.data.ViewPosition.MerklePathElement -import com.digitalasset.canton.protocol.{RootHash, TransactionId, ViewHash} +import com.digitalasset.canton.protocol.{RootHash, UpdateId, ViewHash} import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.PhysicalSynchronizerId import com.digitalasset.canton.{LfPartyId, WorkflowId} @@ -44,7 +44,7 @@ trait TransactionViewTree extends ViewTree { override def rootHash: RootHash = tree.rootHash - lazy val transactionId: TransactionId = TransactionId.fromRootHash(rootHash) + lazy val transactionId: UpdateId = UpdateId.fromRootHash(rootHash) override def toBeSigned: Option[RootHash] = if (isTopLevel) Some(rootHash) else None diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentData.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentData.scala index e2b4c4a075..3750df1ec6 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentData.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentData.scala @@ -29,7 +29,7 @@ final case class UnassignmentData( reassigningParticipants: Set[ParticipantId], sourcePSId: Source[PhysicalSynchronizerId], targetPSId: Target[PhysicalSynchronizerId], - targetTimestamp: CantonTimestamp, + targetTimestamp: Target[CantonTimestamp], // Data unknown by the submitter unassignmentTs: CantonTimestamp, ) extends HasProtocolVersionedWrapper[UnassignmentData] { @@ -59,7 +59,7 @@ final case class UnassignmentData( reassigningParticipantUids = reassigningParticipants.map(_.uid.toProtoPrimitive).toSeq, sourcePhysicalSynchronizerId = sourcePSId.unwrap.toProtoPrimitive, targetPhysicalSynchronizerId = targetPSId.unwrap.toProtoPrimitive, - targetTimestamp = targetTimestamp.toProtoTimestamp.some, + targetTimestamp = targetTimestamp.unwrap.toProtoTimestamp.some, unassignmentTs = unassignmentTs.toProtoTimestamp.some, ) } @@ -87,7 +87,7 @@ object UnassignmentData sourcePSId = unassignmentRequest.sourceSynchronizer, targetPSId = unassignmentRequest.targetSynchronizer, unassignmentTs = unassignmentTs, - targetTimestamp = unassignmentRequest.targetTimeProof.timestamp, + targetTimestamp = unassignmentRequest.targetTimestamp, ) private def fromProtoV30(proto: v30.UnassignmentData): ParsingResult[UnassignmentData] = for { @@ -130,11 +130,13 @@ object UnassignmentData ) .map(Target(_)) - targetTimestamp <- ProtoConverter.parseRequired( - CantonTimestamp.fromProtoTimestamp, - "target_timestamp", - proto.targetTimestamp, - ) + targetTimestamp <- ProtoConverter + .parseRequired( + CantonTimestamp.fromProtoTimestamp, + "target_timestamp", + proto.targetTimestamp, + ) + .map(Target(_)) unassignmentTs <- ProtoConverter.parseRequired( CantonTimestamp.fromProtoTimestamp, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentViewTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentViewTree.scala index 2be9608724..9db8874ef9 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentViewTree.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/data/UnassignmentViewTree.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.data.ReassignmentRef.ContractIdRef import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.messages.UnassignmentMediatorMessage import com.digitalasset.canton.protocol.{v30, *} -import com.digitalasset.canton.sequencing.protocol.{MediatorGroupRecipient, TimeProof} +import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, UniqueIdentifier} @@ -291,9 +291,8 @@ object UnassignmentCommonData * Contract being reassigned * @param targetSynchronizerId * The synchronizer to which the contract is reassigned. - * @param targetTimeProof - * The sequenced event from the target synchronizer whose timestamp defines the baseline for - * measuring time periods on the target synchronizer + * @param targetTimestamp + * The timestamp of the topology on the target synchronizer to use for validations. * @param targetProtocolVersion * Protocol version of the target synchronizer */ @@ -301,7 +300,7 @@ final case class UnassignmentView private ( override val salt: Salt, contracts: ContractsReassignmentBatch, targetSynchronizerId: Target[PhysicalSynchronizerId], - targetTimeProof: TimeProof, + targetTimestamp: Target[CantonTimestamp], )( hashOps: HashOps, override val representativeProtocolVersion: RepresentativeProtocolVersion[ @@ -323,7 +322,7 @@ final case class UnassignmentView private ( v30.UnassignmentView( salt = Some(salt.toProtoV30), targetPhysicalSynchronizerId = targetSynchronizerId.unwrap.toProtoPrimitive, - targetTimeProof = Some(targetTimeProof.toProtoV30), + targetTimestamp = targetTimestamp.unwrap.toProtoPrimitive, contracts = contracts.contracts.map { reassign => v30.ActiveContract( reassign.contract.encoded, @@ -335,7 +334,7 @@ final case class UnassignmentView private ( override protected def pretty: Pretty[UnassignmentView] = prettyOfClass( param("template ids", _.contracts.contracts.map(_.templateId).toSet), param("target synchronizer id", _.targetSynchronizerId), - param("target time proof", _.targetTimeProof), + param("target timestamp", _.targetTimestamp), param( "contracts", _.contracts.contractIds, @@ -358,14 +357,14 @@ object UnassignmentView extends VersioningCompanionContextMemoization[Unassignme salt: Salt, contracts: ContractsReassignmentBatch, targetSynchronizer: Target[PhysicalSynchronizerId], - targetTimeProof: TimeProof, + targetTimestamp: Target[CantonTimestamp], sourceProtocolVersion: Source[ProtocolVersion], ): UnassignmentView = UnassignmentView( salt, contracts, targetSynchronizer, - targetTimeProof, + targetTimestamp, )(hashOps, protocolVersionRepresentativeFor(sourceProtocolVersion.unwrap), None) private[this] def fromProtoV30(hashOps: HashOps, unassignmentViewP: v30.UnassignmentView)( @@ -374,7 +373,7 @@ object UnassignmentView extends VersioningCompanionContextMemoization[Unassignme val v30.UnassignmentView( saltP, targetSynchronizerIdP, - targetTimeProofP, + targetTimestampP, contractsP, ) = unassignmentViewP @@ -384,9 +383,7 @@ object UnassignmentView extends VersioningCompanionContextMemoization[Unassignme targetSynchronizerIdP, "targetPhysicalSynchronizerId", ) - targetTimeProof <- ProtoConverter - .required("targetTimeProof", targetTimeProofP) - .flatMap(TimeProof.fromProtoV30(targetSynchronizerId.protocolVersion, hashOps)) + targetTimestamp <- CantonTimestamp.fromProtoPrimitive(targetTimestampP) contracts <- contractsP .traverse { case v30.ActiveContract(contractP, reassignmentCounterP) => ContractInstance @@ -404,7 +401,7 @@ object UnassignmentView extends VersioningCompanionContextMemoization[Unassignme salt, contracts, Target(targetSynchronizerId), - targetTimeProof, + Target(targetTimestamp), )( hashOps, rpv, @@ -434,7 +431,7 @@ final case class FullUnassignmentTree(tree: UnassignmentViewTree) override def synchronizerId: PhysicalSynchronizerId = commonData.sourceSynchronizerId.unwrap override def sourceSynchronizer: Source[PhysicalSynchronizerId] = commonData.sourceSynchronizerId override def targetSynchronizer: Target[PhysicalSynchronizerId] = view.targetSynchronizerId - def targetTimeProof: TimeProof = view.targetTimeProof + def targetTimestamp: Target[CantonTimestamp] = view.targetTimestamp def targetProtocolVersion: Target[ProtocolVersion] = targetSynchronizer.map(_.protocolVersion) def mediatorMessage( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/data/package.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/data/package.scala deleted file mode 100644 index 99545ddc91..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/data/package.scala +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.digitalasset.daml.lf.data.Ref - -package object data { - type UpdateId = Ref.TransactionId -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/lifecycle/HasSynchronizeWithReaders.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/lifecycle/HasSynchronizeWithReaders.scala index 8bdd2973b4..e2186f583c 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/lifecycle/HasSynchronizeWithReaders.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/lifecycle/HasSynchronizeWithReaders.scala @@ -7,14 +7,14 @@ import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.Thereafter import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{Thereafter, TryUtil} import java.util.concurrent.Semaphore import scala.annotation.tailrec import scala.collection.concurrent.TrieMap import scala.concurrent.duration.FiniteDuration -import scala.util.{Failure, Success, Try} +import scala.util.{Failure, Success} /** Mix-in for keeping track of a set of the readers. Used for implementing the * [[HasSynchronizeWithClosing]] logic: Each computation acquires one permit before it starts and @@ -59,14 +59,12 @@ trait HasSynchronizeWithReaders extends HasSynchronizeWithClosing { name: String )(f: => F[A])(implicit traceContext: TraceContext, F: Thereafter[F]): UnlessShutdown[F[A]] = addReader(name).map { handle => - Try(f) match { - case Success(fa) => - fa.thereafter { _ => - removeReader(handle) - } - case Failure(error) => + TryUtil.tryCatchInterrupted(f) match { + case Failure(exception) => removeReader(handle) - throw error + throw exception + case Success(value) => + value.thereafter(_ => removeReader(handle)) } } @@ -113,32 +111,50 @@ trait HasSynchronizeWithReaders extends HasSynchronizeWithClosing { val deadline = synchronizeWithClosingPatience.fromNow @tailrec def poll(patienceMillis: Long): Boolean = { - val acquired = readerSemaphore.tryAcquire( - // Grab all of the permits at once - Int.MaxValue, - patienceMillis, - java.util.concurrent.TimeUnit.MILLISECONDS, - ) - if (acquired) true - else { - val timeLeft = deadline.timeLeft - if (timeLeft < zeroDuration) { + val acquireE = + try { + Right( + readerSemaphore.tryAcquire( + // Grab all the permits at once + Int.MaxValue, + patienceMillis, + java.util.concurrent.TimeUnit.MILLISECONDS, + ) + ) + } catch { + case exception: InterruptedException => + Left(exception) + } + + acquireE match { + case Left(interruptException) => logger.warn( - s"Timeout $synchronizeWithClosingPatience expired, but readers are still active. Shutting down forcibly." + s"Thread was interrupted while acquiring the reader semaphore. Forcibly shutting down.", + interruptException, ) - logger.debug(s"Active readers: ${readerUnderapproximation.keys.mkString(",")}") - dumpRunning() false - } else { - val readerCount = Int.MaxValue - readerSemaphore.availablePermits() - val nextPatienceMillis = - (patienceMillis * 2) min maxSleepMillis min timeLeft.toMillis - logger.debug( - s"At least $readerCount active readers prevent closing. Next log message in ${nextPatienceMillis}ms. Active readers: ${readerUnderapproximation.keys - .mkString(",")}" - ) - poll(nextPatienceMillis) - } + case Right(acquired) => + if (acquired) true + else { + val timeLeft = deadline.timeLeft + if (timeLeft < zeroDuration) { + logger.warn( + s"Timeout $synchronizeWithClosingPatience expired, but readers are still active. Shutting down forcibly." + ) + logger.debug(s"Active readers: ${readerUnderapproximation.keys.mkString(",")}") + dumpRunning() + false + } else { + val readerCount = Int.MaxValue - readerSemaphore.availablePermits() + val nextPatienceMillis = + (patienceMillis * 2) min maxSleepMillis min timeLeft.toMillis + logger.debug( + s"At least $readerCount active readers prevent closing. Next log message in ${nextPatienceMillis}ms. Active readers: ${readerUnderapproximation.keys + .mkString(",")}" + ) + poll(nextPatienceMillis) + } + } } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala index 98bcdfd3b4..37ec78efb4 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala @@ -207,7 +207,7 @@ trait PrettyInstances { lfContractId.toString } - implicit def prettyLfLanguageVersion: Pretty[LfLanguageVersion] = prettyOfString( + implicit def prettyLfSerializationVersion: Pretty[LfSerializationVersion] = prettyOfString( _.pretty ) diff --git a/canton/community/app-base/src/main/scala/com/digitalasset/canton/metrics/LogReporter.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/metrics/LogReporter.scala similarity index 100% rename from canton/community/app-base/src/main/scala/com/digitalasset/canton/metrics/LogReporter.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/metrics/LogReporter.scala diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ActiveStreamCounterInterceptor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ActiveStreamCounterInterceptor.scala similarity index 100% rename from canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ActiveStreamCounterInterceptor.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ActiveStreamCounterInterceptor.scala diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala index a11d87e191..3c12e5a88b 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala @@ -12,8 +12,13 @@ import com.digitalasset.canton.config.{ ApiLoggingConfig, AuthServiceConfig, JwksCacheConfig, + StreamLimitConfig, } import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.networking.grpc.ratelimiting.{ + RateLimitingInterceptor, + StreamCounterCheck, +} import com.digitalasset.canton.tracing.{TraceContextGrpc, TracingConfig} import io.grpc.ServerInterceptors.intercept import io.grpc.{ServerInterceptor, ServerServiceDefinition} @@ -25,6 +30,8 @@ trait CantonServerInterceptors { service: ServerServiceDefinition, withLogging: Boolean, ): ServerServiceDefinition + + def streamCounterCheck: Option[StreamCounterCheck] } class CantonCommunityServerInterceptors( @@ -39,7 +46,13 @@ class CantonCommunityServerInterceptors( jwksCacheConfig: JwksCacheConfig, telemetry: Telemetry, additionalInterceptors: Seq[ServerInterceptor] = Seq.empty, + streamLimits: Option[StreamLimitConfig], ) extends CantonServerInterceptors { + + override val streamCounterCheck: Option[StreamCounterCheck] = streamLimits.map { limits => + new StreamCounterCheck(limits.limits, limits.warnOnUndefinedLimits, loggerFactory) + } + private def interceptForLogging( service: ServerServiceDefinition, withLogging: Boolean, @@ -79,6 +92,11 @@ class CantonCommunityServerInterceptors( telemetry, ) + private def addLimitInterceptor(service: ServerServiceDefinition): ServerServiceDefinition = + streamCounterCheck.fold(service) { limits => + intercept(service, new RateLimitingInterceptor(List(limits.check)), limits) + } + def addAllInterceptors( service: ServerServiceDefinition, withLogging: Boolean, @@ -87,6 +105,7 @@ class CantonCommunityServerInterceptors( .pipe(interceptForLogging(_, withLogging)) .pipe(addTraceContextInterceptor) .pipe(addMetricsInterceptor) + .pipe(addLimitInterceptor) .pipe(addAuthInterceptor) .pipe(s => additionalInterceptors.foldLeft(s)((acc, i) => intercept(acc, i))) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala index 2d9e989471..74e9fe3356 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala @@ -32,7 +32,7 @@ import io.grpc.Context.CancellableContext import io.grpc.stub.{AbstractStub, StreamObserver} import java.util.concurrent.TimeUnit -import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.duration.{Duration, DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} @@ -402,6 +402,24 @@ object CantonGrpcUtil { override def retryable: Option[ErrorCategoryRetry] = Some(ErrorCategoryRetry(10.seconds)) } } + + /** Error used by the Admin API server limits */ + @Explanation( + """The node is at capacity and therefore limits the number of parallel requests.""" + ) + @Resolution("""Retry with exponential backoff.""") + object Overloaded + extends ErrorCode(id = "SERVER_OVERLOADED", ErrorCategory.ContentionOnSharedResources) { + final case class TooManyStreams(methodName: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Reached the limit of concurrent streams for $methodName. Please try again later" + ) { + override def retryable = Some(ErrorCategoryRetry(5.seconds)) + } + } + } implicit class GrpcFUSExtended[A](val f: FutureUnlessShutdown[A]) extends AnyVal { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala index 850884fe1c..f1e0a39a89 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala @@ -11,7 +11,9 @@ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.TlsServerConfig.logTlsProtocolsAndCipherSuites import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.networking.grpc.ratelimiting.StreamCounterCheck import com.digitalasset.canton.tracing.TracingConfig +import com.google.common.annotations.VisibleForTesting import io.grpc.* import io.grpc.netty.shaded.io.grpc.netty.{GrpcSslContexts, NettyServerBuilder} import io.grpc.netty.shaded.io.netty.handler.ssl.{SslContext, SslContextBuilder} @@ -33,6 +35,7 @@ trait CantonServerBuilder { def build: Server def maxInboundMessageSize(bytes: NonNegativeInt): CantonServerBuilder + } trait CantonMutableHandlerRegistry extends AutoCloseable { @@ -49,6 +52,10 @@ trait CantonMutableHandlerRegistry extends AutoCloseable { def removeService(service: ServerServiceDefinition): CantonMutableHandlerRegistry def removeServiceU(service: ServerServiceDefinition): Unit = removeService(service).discard + + @VisibleForTesting + def streamCounterCheck: Option[StreamCounterCheck] + } object CantonServerBuilder { @@ -92,6 +99,10 @@ object CantonServerBuilder { .removeService(registry.getServices.get(registry.getServices.size() - 1)) .discard[Boolean] } + + override def streamCounterCheck: Option[StreamCounterCheck] = + interceptors.streamCounterCheck + } override def addService(service: BindableService, withLogging: Boolean): CantonServerBuilder = { @@ -178,6 +189,7 @@ object CantonServerBuilder { config.jwksCacheConfig, telemetry, additionalInterceptors, + config.stream, ), ) } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/LimitResult.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/LimitResult.scala similarity index 100% rename from canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/LimitResult.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/LimitResult.scala diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/RateLimitingInterceptor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/RateLimitingInterceptor.scala similarity index 100% rename from canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/RateLimitingInterceptor.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/RateLimitingInterceptor.scala diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala similarity index 88% rename from canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala index 69feb58b5c..56932d83e9 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ratelimiting/StreamCounterCheck.scala @@ -7,8 +7,8 @@ import com.digitalasset.base.error.RpcError import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.networking.grpc.ActiveStreamCounterInterceptor import com.digitalasset.canton.networking.grpc.ratelimiting.LimitResult.* +import com.digitalasset.canton.networking.grpc.{ActiveStreamCounterInterceptor, CantonGrpcUtil} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import java.util.concurrent.atomic.AtomicReference @@ -17,7 +17,7 @@ import scala.collection.concurrent.TrieMap /** Counts number of open stream and can be used with a [[RateLimitingInterceptor]] to enforce a * limit for open requests */ -abstract class StreamCounterCheck( +class StreamCounterCheck( initialLimits: Map[FullMethodName, NonNegativeInt], warnOnUnconfiguredLimits: Boolean, val loggerFactory: NamedLoggerFactory, @@ -33,9 +33,13 @@ abstract class StreamCounterCheck( limits.updateAndGet(_.updatedWith(key)(_ => newLimit)).discard } - protected def errorFactory(methodName: FullMethodName, limit: NonNegativeInt)(implicit + private def errorFactory(methodName: FullMethodName)(implicit traceContext: TraceContext - ): RpcError + ): RpcError = { + val err = CantonGrpcUtil.GrpcErrors.Overloaded.TooManyStreams(methodName) + err.log() + err.toCantonRpcError + } private def adjust(methodName: FullMethodName, delta: Int): Unit = counters @@ -55,7 +59,7 @@ abstract class StreamCounterCheck( val current = counters.getOrElse(methodName, 0) if (current >= limit.value) { LimitResult.OverLimit( - errorFactory(methodName, limit)( + errorFactory(methodName)( TraceContextGrpc.fromGrpcContextOrNew("StreamCounterCheck.check") ) ) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala index 3a24f9c73b..daf0d40573 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.checked import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.crypto.* import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.daml.lf.crypto.Hash.HashingMethod.UpgradeFriendly import com.digitalasset.daml.lf.data.Bytes import com.google.protobuf.ByteString @@ -19,7 +20,7 @@ object CantonContractIdVersion { protocolVersion: ProtocolVersion ): Either[String, CantonContractIdVersion] = // TODO(#23971) Use V2 by default in dev - if (protocolVersion >= ProtocolVersion.v34) Right(AuthenticatedContractIdVersionV11) + if (protocolVersion >= ProtocolVersion.v34) Right(AuthenticatedContractIdVersionV12) else Left(s"No contract ID scheme found for ${protocolVersion.v}") def extractCantonContractIdVersion( @@ -59,7 +60,9 @@ object CantonContractIdVersion { private def fromContractSuffixV1( contractSuffix: Bytes ): Either[String, CantonContractIdV1Version] = - if (contractSuffix.startsWith(AuthenticatedContractIdVersionV11.versionPrefixBytes)) { + if (contractSuffix.startsWith(AuthenticatedContractIdVersionV12.versionPrefixBytes)) { + Right(AuthenticatedContractIdVersionV12) + } else if (contractSuffix.startsWith(AuthenticatedContractIdVersionV11.versionPrefixBytes)) { Right(AuthenticatedContractIdVersionV11) } else if (contractSuffix.startsWith(AuthenticatedContractIdVersionV10.versionPrefixBytes)) { Right(AuthenticatedContractIdVersionV10) @@ -87,12 +90,17 @@ object CantonContractIdVersion { * * Lazily initialized to work around bug https://github.com/scala/bug/issues/9115 */ - lazy val all: Seq[CantonContractIdVersion] = + lazy val allV1: Seq[CantonContractIdV1Version] = Seq( AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11, - CantonContractIdV2Version0, + AuthenticatedContractIdVersionV12, ) + + // TODO(#27612) where possible convert tests using this to iterate over allV1 + lazy val maxV1: CantonContractIdV1Version = AuthenticatedContractIdVersionV12 + + lazy val all: Seq[CantonContractIdVersion] = allV1 :+ CantonContractIdV2Version0 } sealed trait CantonContractIdVersion @@ -104,6 +112,8 @@ sealed trait CantonContractIdVersion protected def comparisonKey: Int + def contractHashingMethod: LfHash.HashingMethod + override final def compare(that: CantonContractIdVersion): Int = this.comparisonKey.compare(that.comparisonKey) } @@ -118,12 +128,6 @@ sealed abstract class CantonContractIdV1Version( override type AuthenticationData = ContractAuthenticationDataV1 - def contractHashingMethod: LfHash.HashingMethod - - /** Set to true if upgrade friendly hashing should be used when constructing the contract hash */ - def useUpgradeFriendlyHashing: Boolean = - contractHashingMethod == LfHash.HashingMethod.UpgradeFriendly - def versionPrefixBytes: Bytes def fromDiscriminator(discriminator: LfHash, unicum: Unicum): LfContractId.V1 = @@ -140,10 +144,18 @@ case object AuthenticatedContractIdVersionV11 extends CantonContractIdV1Version( override def contractHashingMethod: LfHash.HashingMethod = LfHash.HashingMethod.UpgradeFriendly } +case object AuthenticatedContractIdVersionV12 extends CantonContractIdV1Version(12) { + lazy val versionPrefixBytes: Bytes = Bytes.fromByteArray(Array(0xca.toByte, 0x12.toByte)) + override def contractHashingMethod: LfHash.HashingMethod = LfHash.HashingMethod.TypedNormalForm +} + sealed trait CantonContractIdV2Version extends CantonContractIdVersion { def versionPrefixBytesRelative: Bytes def versionPrefixBytesAbsolute: Bytes override type AuthenticationData = ContractAuthenticationDataV2 + + // TODO(#23969) review hashing method for V2 + override def contractHashingMethod: LfHash.HashingMethod = UpgradeFriendly } case object CantonContractIdV2Version0 extends CantonContractIdV2Version { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractAuthenticationData.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractAuthenticationData.scala index 7ace593b88..f40adc409f 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractAuthenticationData.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractAuthenticationData.scala @@ -52,7 +52,8 @@ final case class ContractAuthenticationDataV1(salt: Salt)( @SuppressWarnings(Array("com.digitalasset.canton.ProtobufToByteString")) override def toLfBytes: LfBytes = contractIdVersion match { - case AuthenticatedContractIdVersionV10 | AuthenticatedContractIdVersionV11 => + case AuthenticatedContractIdVersionV10 | AuthenticatedContractIdVersionV11 | + AuthenticatedContractIdVersionV12 => LfBytes.fromByteArray( v1.UntypedVersionedMessage( v1.UntypedVersionedMessage.Wrapper.Data( @@ -73,7 +74,7 @@ final case class ContractAuthenticationDataV1(salt: Salt)( final case class ContractAuthenticationDataV2( salt: LfBytes, - creatingTransactionId: Option[TransactionId], + creatingTransactionId: Option[UpdateId], relativeArgumentSuffixes: Seq[LfBytes], )(val contractIdVersion: CantonContractIdV2Version) extends ContractAuthenticationData { @@ -125,10 +126,12 @@ object ContractAuthenticationData { ): ParsingResult[CAD] = version match { // Pattern-matching on singletons in isolation is necessary for correct type inference - case AuthenticatedContractIdVersionV10 => - versionV1(AuthenticatedContractIdVersionV10, bytes) + case AuthenticatedContractIdVersionV12 => + versionV1(AuthenticatedContractIdVersionV12, bytes) case AuthenticatedContractIdVersionV11 => versionV1(AuthenticatedContractIdVersionV11, bytes) + case AuthenticatedContractIdVersionV10 => + versionV1(AuthenticatedContractIdVersionV10, bytes) case CantonContractIdV2Version0 => versionV2(CantonContractIdV2Version0, bytes) } @@ -156,7 +159,7 @@ object ContractAuthenticationData { proto <- ProtoConverter.protoParser(v31.ContractAuthenticationData.parseFrom)(bytes) v31.ContractAuthenticationData(saltP, creatingTransactionIdP, relativeArgumentSuffixesP) = proto - creatingTransactionId <- creatingTransactionIdP.traverse(TransactionId.fromProtoPrimitive) + creatingTransactionId <- creatingTransactionIdP.traverse(UpdateId.fromProtoPrimitive) _ <- Either.cond( IterableUtil.isSorted(relativeArgumentSuffixesP)(ByteStringUtil.orderingByteString), (), diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala index 52ad4f2987..0770945d6f 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala @@ -49,7 +49,8 @@ sealed trait GenContractInstance extends PrettyPrinting { } object ContractInstance { - private final case class ContractInstanceImpl[Time <: CreationTime]( + // TODO(#28382) revert removal of private access modifier + final case class ContractInstanceImpl[Time <: CreationTime]( override val inst: FatContractInstance { type CreatedAtTime = Time }, override val metadata: ContractMetadata, override val serialization: ByteString, @@ -80,7 +81,6 @@ object ContractInstance { ): Either[String, ContractAuthenticationData] = CantonContractIdVersion .extractCantonContractIdVersion(inst.contractId) - .leftMap(_.toString) .flatMap(contractAuthenticationData(_, inst)) private[protocol] def contractAuthenticationData( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/SynchronizerParameters.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/SynchronizerParameters.scala index 1e3787ccfa..a2060dad71 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/SynchronizerParameters.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/SynchronizerParameters.scala @@ -19,13 +19,7 @@ import com.digitalasset.canton.protocol.v30 import com.digitalasset.canton.sequencing.TrafficControlParameters import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.time.{ - Clock, - NonNegativeFiniteDuration, - PositiveSeconds, - RemoteClock, - SimClock, -} +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, PositiveSeconds} import com.digitalasset.canton.topology.transaction.ParticipantSynchronizerLimits import com.digitalasset.canton.util.EitherUtil.RichEither import com.digitalasset.canton.version.* @@ -68,6 +62,7 @@ final case class StaticSynchronizerParameters( requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], requiredSignatureFormats: NonEmpty[Set[SignatureFormat]], + topologyChangeDelay: NonNegativeFiniteDuration, enableTransparencyChecks: Boolean, protocolVersion: ProtocolVersion, serial: NonNegativeInt, @@ -89,6 +84,7 @@ final case class StaticSynchronizerParameters( requiredHashAlgorithms = requiredHashAlgorithms.toSeq.map(_.toProtoEnum), requiredCryptoKeyFormats = requiredCryptoKeyFormats.toSeq.map(_.toProtoEnum), requiredSignatureFormats = requiredSignatureFormats.toSeq.map(_.toProtoEnum), + topologyChangeDelay = Some(topologyChangeDelay.toProtoPrimitive), enableTransparencyChecks = enableTransparencyChecks, protocolVersion = protocolVersion.toProtoPrimitive, serial = serial.value, @@ -100,6 +96,7 @@ final case class StaticSynchronizerParameters( param("required symmetric key schemes", _.requiredSymmetricKeySchemes), param("required hash algorithms", _.requiredHashAlgorithms), param("required crypto key formats", _.requiredCryptoKeyFormats), + param("topology change delay", _.topologyChangeDelay), param("enable transparency checks", _.enableTransparencyChecks), param("protocol version", _.protocolVersion), param("serial", _.serial), @@ -112,6 +109,11 @@ object StaticSynchronizerParameters // Note: if you need static synchronizer parameters for testing, look at BaseTest.defaultStaticSynchronizerParametersWith + val defaultTopologyChangeDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfMillis(250) + val defaultTopologyChangeDelayNonStandardClock: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.Zero // SimClock, RemoteClock + val versioningTable: VersioningTable = VersioningTable( ProtoVersion(30) -> VersionedProtoCodec(ProtocolVersion.v34)( v30.StaticSynchronizerParameters @@ -143,6 +145,7 @@ object StaticSynchronizerParameters protocolVersionP, serialP, enableTransparencyChecks, + topologyChangeDelayP, ) = synchronizerParametersP for { @@ -178,6 +181,11 @@ object StaticSynchronizerParameters requiredSignatureFormatsP, SignatureFormat.fromProtoEnum, ) + topologyChangeDelay <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("topology_change_delay")(_), + "topology_change_delay", + topologyChangeDelayP, + ) protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) serial <- ProtoConverter.parseNonNegativeInt("serial", serialP) } yield StaticSynchronizerParameters( @@ -187,6 +195,7 @@ object StaticSynchronizerParameters requiredHashAlgorithms, requiredCryptoKeyFormats, requiredSignatureFormats, + topologyChangeDelay, enableTransparencyChecks, protocolVersion, serial, @@ -334,8 +343,7 @@ object OnboardingRestriction { * protobuf version v2 and protocol version v30. If None, the catch-up mode is disabled: the * participant does not trigger the catch-up mode when lagging behind. If not None, it specifies * the number of reconciliation intervals that the participant skips in catch-up mode, and the - * number of catch-up intervals intervals a participant should lag behind in order to enter - * catch-up mode. + * number of catch-up intervals a participant should lag behind in order to enter catch-up mode. * @param preparationTimeRecordTimeTolerance * the maximum absolute difference between the preparation time and the record time of a command. * If the absolute difference would be larger for a command, then the command must be rejected. @@ -347,7 +355,6 @@ final case class DynamicSynchronizerParameters private ( confirmationResponseTimeout: NonNegativeFiniteDuration, mediatorReactionTimeout: NonNegativeFiniteDuration, assignmentExclusivityTimeout: NonNegativeFiniteDuration, - topologyChangeDelay: NonNegativeFiniteDuration, ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, mediatorDeduplicationTimeout: NonNegativeFiniteDuration, reconciliationInterval: PositiveSeconds, @@ -440,7 +447,6 @@ final case class DynamicSynchronizerParameters private ( confirmationResponseTimeout: NonNegativeFiniteDuration = confirmationResponseTimeout, mediatorReactionTimeout: NonNegativeFiniteDuration = mediatorReactionTimeout, assignmentExclusivityTimeout: NonNegativeFiniteDuration = assignmentExclusivityTimeout, - topologyChangeDelay: NonNegativeFiniteDuration = topologyChangeDelay, ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout: NonNegativeFiniteDuration = mediatorDeduplicationTimeout, reconciliationInterval: PositiveSeconds = reconciliationInterval, @@ -457,7 +463,6 @@ final case class DynamicSynchronizerParameters private ( confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, assignmentExclusivityTimeout = assignmentExclusivityTimeout, - topologyChangeDelay = topologyChangeDelay, ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout = mediatorDeduplicationTimeout, reconciliationInterval = reconciliationInterval, @@ -474,7 +479,6 @@ final case class DynamicSynchronizerParameters private ( confirmationResponseTimeout = Some(confirmationResponseTimeout.toProtoPrimitive), mediatorReactionTimeout = Some(mediatorReactionTimeout.toProtoPrimitive), assignmentExclusivityTimeout = Some(assignmentExclusivityTimeout.toProtoPrimitive), - topologyChangeDelay = Some(topologyChangeDelay.toProtoPrimitive), ledgerTimeRecordTimeTolerance = Some(ledgerTimeRecordTimeTolerance.toProtoPrimitive), mediatorDeduplicationTimeout = Some(mediatorDeduplicationTimeout.toProtoPrimitive), reconciliationInterval = Some(reconciliationInterval.toProtoPrimitive), @@ -493,7 +497,6 @@ final case class DynamicSynchronizerParameters private ( param("confirmation response timeout", _.confirmationResponseTimeout), param("mediator reaction timeout", _.mediatorReactionTimeout), param("assignment exclusivity timeout", _.assignmentExclusivityTimeout), - param("topology change delay", _.topologyChangeDelay), param("ledger time record time tolerance", _.ledgerTimeRecordTimeTolerance), param("mediator deduplication timeout", _.mediatorDeduplicationTimeout), param("reconciliation interval", _.reconciliationInterval), @@ -542,11 +545,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron private val defaultTrafficControlParameters: Option[TrafficControlParameters] = Option.empty[TrafficControlParameters] - private val defaultTopologyChangeDelay: NonNegativeFiniteDuration = - NonNegativeFiniteDuration.tryOfMillis(250) - private val defaultTopologyChangeDelayNonStandardClock: NonNegativeFiniteDuration = - NonNegativeFiniteDuration.Zero // SimClock, RemoteClock - private val defaultLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = NonNegativeFiniteDuration.tryOfSeconds(60) @@ -567,6 +565,12 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron AcsCommitmentsCatchUpParameters(PositiveInt.tryCreate(5), PositiveInt.tryCreate(2)) ) + val confirmationResponseTimeoutBounds = + (NonNegativeFiniteDuration.tryOfSeconds(1), NonNegativeFiniteDuration.tryOfMinutes(5)) + + val mediatorReactionTimeoutBounds = + (NonNegativeFiniteDuration.tryOfSeconds(1), NonNegativeFiniteDuration.tryOfMinutes(5)) + /** Safely creates DynamicSynchronizerParameters. * * @return @@ -577,7 +581,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout: NonNegativeFiniteDuration, mediatorReactionTimeout: NonNegativeFiniteDuration, assignmentExclusivityTimeout: NonNegativeFiniteDuration, - topologyChangeDelay: NonNegativeFiniteDuration, ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, mediatorDeduplicationTimeout: NonNegativeFiniteDuration, reconciliationInterval: PositiveSeconds, @@ -598,7 +601,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout, mediatorReactionTimeout, assignmentExclusivityTimeout, - topologyChangeDelay, ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout, reconciliationInterval, @@ -621,7 +623,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout: NonNegativeFiniteDuration, mediatorReactionTimeout: NonNegativeFiniteDuration, assignmentExclusivityTimeout: NonNegativeFiniteDuration, - topologyChangeDelay: NonNegativeFiniteDuration, ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, mediatorDeduplicationTimeout: NonNegativeFiniteDuration, reconciliationInterval: PositiveSeconds, @@ -641,7 +642,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout, mediatorReactionTimeout, assignmentExclusivityTimeout, - topologyChangeDelay, ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout, reconciliationInterval, @@ -656,10 +656,9 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron /** Default dynamic synchronizer parameters for non-static clocks */ def defaultValues(protocolVersion: ProtocolVersion): DynamicSynchronizerParameters = - initialValues(defaultTopologyChangeDelay, protocolVersion) + initialValues(protocolVersion) def initialValues( - topologyChangeDelay: NonNegativeFiniteDuration, protocolVersion: ProtocolVersion, mediatorReactionTimeout: NonNegativeFiniteDuration = defaultMediatorReactionTimeout, ): DynamicSynchronizerParameters = checked( // safe because default values are safe @@ -667,7 +666,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout = defaultConfirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, assignmentExclusivityTimeout = defaultAssignmentExclusivityTimeout, - topologyChangeDelay = topologyChangeDelay, ledgerTimeRecordTimeTolerance = defaultLedgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout = defaultMediatorDeduplicationTimeout, reconciliationInterval = DynamicSynchronizerParameters.defaultReconciliationInterval, @@ -685,7 +683,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron ) def tryInitialValues( - topologyChangeDelay: NonNegativeFiniteDuration, protocolVersion: ProtocolVersion, confirmationRequestsMaxRate: NonNegativeInt = DynamicSynchronizerParameters.defaultConfirmationRequestsMaxRate, @@ -703,7 +700,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, assignmentExclusivityTimeout = defaultAssignmentExclusivityTimeout, - topologyChangeDelay = topologyChangeDelay, ledgerTimeRecordTimeTolerance = defaultLedgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout = defaultMediatorDeduplicationTimeout, reconciliationInterval = reconciliationInterval, @@ -718,17 +714,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron protocolVersionRepresentativeFor(protocolVersion) ) - def initialValues( - clock: Clock, - protocolVersion: ProtocolVersion, - ): DynamicSynchronizerParameters = { - val topologyChangeDelay = clock match { - case _: RemoteClock | _: SimClock => defaultTopologyChangeDelayNonStandardClock - case _ => defaultTopologyChangeDelay - } - initialValues(topologyChangeDelay, protocolVersion) - } - // if there is no topology change delay defined (or not yet propagated), we'll use this one val topologyChangeDelayIfAbsent: NonNegativeFiniteDuration = NonNegativeFiniteDuration.Zero @@ -739,7 +724,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeoutP, mediatorReactionTimeoutP, assignmentExclusivityTimeoutP, - topologyChangeDelayP, ledgerTimeRecordTimeToleranceP, reconciliationIntervalP, mediatorDeduplicationTimeoutP, @@ -768,9 +752,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron )( assignmentExclusivityTimeoutP ) - topologyChangeDelay <- NonNegativeFiniteDuration.fromProtoPrimitiveO("topologyChangeDelay")( - topologyChangeDelayP - ) ledgerTimeRecordTimeTolerance <- NonNegativeFiniteDuration.fromProtoPrimitiveO( "ledgerTimeRecordTimeTolerance" )( @@ -826,7 +807,6 @@ object DynamicSynchronizerParameters extends VersioningCompanion[DynamicSynchron confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, assignmentExclusivityTimeout = assignmentExclusivityTimeout, - topologyChangeDelay = topologyChangeDelay, ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, mediatorDeduplicationTimeout = mediatorDeduplicationTimeout, reconciliationInterval = reconciliationInterval, @@ -924,7 +904,6 @@ final case class DynamicSynchronizerParametersWithValidity( def mediatorDeduplicationTimeout: NonNegativeFiniteDuration = parameters.mediatorDeduplicationTimeout - def topologyChangeDelay: NonNegativeFiniteDuration = parameters.topologyChangeDelay def assignmentExclusivityTimeout: NonNegativeFiniteDuration = parameters.assignmentExclusivityTimeout def sequencerTopologyTimestampTolerance: NonNegativeFiniteDuration = @@ -933,6 +912,83 @@ final case class DynamicSynchronizerParametersWithValidity( parameters.submissionCostTimestampTopologyTolerance } +object DynamicSynchronizerParametersWithValidity { + implicit lazy val ordering: Ordering[DynamicSynchronizerParametersWithValidity] = + Ordering.by(params => + ( + params.validFrom, + // Sort None last using Boolean which sorts false before true + params.validUntil.isEmpty, + params.validUntil, + ) + ) +} + +/** Utility functions for operating on a sequence (history) of dynamic synchronizer parameters. + */ +object DynamicSynchronizerParametersHistory { + + /** Computes the latest possible decision deadline based on a history of parameter changes. + * + * For each item in the history, a potential deadline is calculated by adding its + * `decisionTimeout` to its `validUntil` timestamp. This method returns the maximum value among + * all computed deadlines and the initial `lowerBound`. + * + * @param history + * The sequence of parameter changes over time. + * @param lowerBound + * The minimum timestamp for the result. It also serves as a fallback for items that have an + * undefined `validUntil`. + * @return + * The latest possible decision deadline, guaranteed to be at least `lowerBound`. + */ + def latestDecisionDeadline( + history: Seq[DynamicSynchronizerParametersWithValidity], + lowerBound: CantonTimestamp, + ): CantonTimestamp = + history.foldLeft(lowerBound) { case (previousBound, parametersChange) => + val parameters = parametersChange.parameters + val maxTime = parametersChange.validUntil.getOrElse(lowerBound) + + val newBound = maxTime + parameters.decisionTimeout + + newBound.max(previousBound) + } + + /** Computes the latest possible decision deadline based on a history of parameter changes + * relative to a given topology effective timestamp as an anchor. + * + * Note the special handling of the parameters valid at `effectiveAt` below and the associated + * need to order the parameters by `validFrom` and `validUntil` in descending order. + */ + def latestDecisionDeadlineEffectiveAt( + history: Seq[DynamicSynchronizerParametersWithValidity], + effectiveAt: CantonTimestamp, + ): CantonTimestamp = { + val newestToOldest = NonEmpty + .from(history.sorted(DynamicSynchronizerParametersWithValidity.ordering.reverse)) + .getOrElse(throw new IllegalStateException("no synchronizer parameters found")) + + // Extract parameters valid at effectiveAt for explicit calculation + // in case parametersAtEffectiveTs has since been redefined (i.e. validUntil.nonEmpty) + // as we want to anchor the "effectiveAt" decision timeout valid at effectiveAt + // rather than at validUntil. + val parametersAtEffectiveTs = newestToOldest.head1 + + // Topology sanity check + require( + parametersAtEffectiveTs.validUntil.forall(effectiveAt < _), + s"Parameters effective at $effectiveAt can only be expired later, but encountered ${parametersAtEffectiveTs.validUntil}", + ) + + val historyExcludingEffectiveTsParams = newestToOldest.tail1 + DynamicSynchronizerParametersHistory.latestDecisionDeadline( + historyExcludingEffectiveTsParams, + effectiveAt + parametersAtEffectiveTs.parameters.decisionTimeout, + ) + } +} + /** The class specifies the catch-up parameters governing the catch-up mode of a participant lagging * behind with its ACS commitments computation. ***** Parameter recommendations A high * [[catchUpIntervalSkip]] outputs more commitments and is slower to catch-up. For equal diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala index 93974cce7c..573cac1984 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala @@ -84,7 +84,7 @@ object RootHash { } /** A hash-based transaction id. */ -final case class TransactionId(private val hash: Hash) extends HasCryptographicEvidence { +final case class UpdateId(private val hash: Hash) extends HasCryptographicEvidence { def unwrap: Hash = hash def toRootHash: RootHash = RootHash(hash) @@ -98,51 +98,64 @@ final case class TransactionId(private val hash: Hash) extends HasCryptographicE def tryAsLedgerTransactionId: LedgerTransactionId = LedgerTransactionId.assertFromString(hash.toHexString) + + def toHexString: String = hash.toHexString } -object TransactionId { +object UpdateId { /** The all-zeros transaction ID. This transaction ID is used as the creating transaction ID for * contracts whose creation transaction ID is unknown. */ - val zero: TransactionId = { + val zero: UpdateId = { val algo = HashAlgorithm.Sha256 - new TransactionId( + new UpdateId( Hash.tryFromByteStringRaw(ByteString.copyFrom(new Array[Byte](algo.length.toInt)), algo) ) } - def fromProtoPrimitive(bytes: ByteString): ParsingResult[TransactionId] = + def fromProtoPrimitive(bytes: ByteString): ParsingResult[UpdateId] = Hash .fromByteString(bytes) - .bimap(ProtoDeserializationError.CryptoDeserializationError.apply, TransactionId(_)) + .bimap(ProtoDeserializationError.CryptoDeserializationError.apply, UpdateId(_)) + + def tryFromProtoPrimitive(bytes: ByteString): UpdateId = + fromProtoPrimitive(bytes).valueOr(err => throw new IllegalArgumentException(err.toString)) + + def tryFromByteArray(bytes: Array[Byte]): UpdateId = + fromProtoPrimitive(ByteString.copyFrom(bytes)).valueOr(err => + throw new IllegalArgumentException(err.toString) + ) - def fromRootHash(rootHash: RootHash): TransactionId = TransactionId(rootHash.unwrap) + def fromRootHash(rootHash: RootHash): UpdateId = UpdateId(rootHash.unwrap) - /** Ordering for [[TransactionId]]s based on the serialized hash */ - implicit val orderTransactionId: Order[TransactionId] = - Order.by[TransactionId, ByteString](_.hash.getCryptographicEvidence)( + def fromLedgerString(txId: String): Either[DeserializationError, UpdateId] = + Hash.fromHexString(txId).map(UpdateId.apply) + + /** Ordering for [[UpdateId]]s based on the serialized hash */ + implicit val orderTransactionId: Order[UpdateId] = + Order.by[UpdateId, ByteString](_.hash.getCryptographicEvidence)( ByteStringUtil.orderByteString ) - implicit val orderingTransactionId: Ordering[TransactionId] = orderTransactionId.toOrdering + implicit val orderingTransactionId: Ordering[UpdateId] = orderTransactionId.toOrdering - implicit val prettyTransactionId: Pretty[TransactionId] = { + implicit val prettyTransactionId: Pretty[UpdateId] = { import Pretty.* - prettyOfParam(_.unwrap) + prettyOfParam(_.hash) } - implicit val setParameterTransactionId: SetParameter[TransactionId] = (v, pp) => pp.>>(v.hash) + implicit val setParameterTransactionId: SetParameter[UpdateId] = (v, pp) => pp.>>(v.hash) - implicit val getResultTransactionId: GetResult[TransactionId] = GetResult { r => - TransactionId(r.<<) + implicit val getResultTransactionId: GetResult[UpdateId] = GetResult { r => + UpdateId(r.<<) } - implicit val setParameterOptionTransactionId: SetParameter[Option[TransactionId]] = (v, pp) => + implicit val setParameterOptionTransactionId: SetParameter[Option[UpdateId]] = (v, pp) => pp.>>(v.map(_.hash)) - implicit val getResultOptionTransactionId: GetResult[Option[TransactionId]] = GetResult { r => - (r.<<[Option[Hash]]).map(TransactionId(_)) + implicit val getResultOptionTransactionId: GetResult[Option[UpdateId]] = GetResult { r => + (r.<<[Option[Hash]]).map(UpdateId(_)) } } @@ -242,6 +255,12 @@ object ReassignmentId { case b => Left(s"invalid version: ${b.toInt}") }).leftMap(err => s"cannot parse ReassignmentId bytes: $err") + def assertFromBytes(bytes: Array[Byte]): ReassignmentId = + ReassignmentId.fromBytes(ByteString.copyFrom(bytes)) match { + case Left(e) => throw new IllegalArgumentException(s"Cannot convert reassignment id: $e") + case Right(id) => id + } + def apply( source: Source[SynchronizerId], target: Target[SynchronizerId], diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/NodeHashBuilder.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/NodeHashBuilder.scala index 63558b99ea..50ccb0ea2c 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/NodeHashBuilder.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/NodeHashBuilder.scala @@ -9,8 +9,7 @@ import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.protocol.hash.NodeBuilder.NodeEncodingV1 import com.digitalasset.canton.version.HashingSchemeVersion import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{Node, NodeId, TransactionVersion} +import com.digitalasset.daml.lf.transaction.{Node, NodeId, SerializationVersion} import com.digitalasset.daml.lf.value.Value import TransactionHash.* @@ -72,10 +71,10 @@ private sealed abstract class NodeHashBuilder( private object NodeBuilder { // Version of the protobuf used to encode nodes defined in the interactive_submission_data.proto private[hash] val NodeEncodingV1 = 1 - private[hash] val HashingVersionToSupportedLFVersionMapping - : Map[HashingSchemeVersion, Set[LanguageVersion]] = + private[hash] val HashingVersionToSupportedLFSerializationVersionMapping + : Map[HashingSchemeVersion, Set[SerializationVersion]] = Map( - HashingSchemeVersion.V2 -> Set(LanguageVersion.v2_1) + HashingSchemeVersion.V2 -> Set(SerializationVersion.V1) ) private[hash] sealed abstract class NodeTag(val tag: Byte) @@ -88,19 +87,19 @@ private object NodeBuilder { } @throws[NodeHashingError] - private[hash] def assertHashingVersionSupportsLfVersion( - version: LanguageVersion, + private[hash] def assertHashingVersionSupportsLfSerializationVersion( + version: SerializationVersion, nodeHashVersion: HashingSchemeVersion, ): Unit = if ( - !HashingVersionToSupportedLFVersionMapping + !HashingVersionToSupportedLFSerializationVersionMapping // This really shouldn't happen, unless someone removed an entry from the HashingVersionToSupportedLFVersionMapping map .getOrElse( nodeHashVersion, throw NodeHashingError.UnsupportedHashingVersion(nodeHashVersion), ) .contains(version) - ) throw NodeHashingError.UnsupportedLanguageVersion(nodeHashVersion, version) + ) throw NodeHashingError.UnsupportedSerializationVersion(nodeHashVersion, version) } private class NodeBuilderV1( @@ -119,7 +118,7 @@ private class NodeBuilderV1( node.optVersion .foreach( NodeBuilder - .assertHashingVersionSupportsLfVersion(_, HashingSchemeVersion.V2) + .assertHashingVersionSupportsLfSerializationVersion(_, HashingSchemeVersion.V2) ) new NodeBuilderV1(purpose, hashTracer, enforceNodeSeedForCreateNodes) @@ -142,7 +141,7 @@ private class NodeBuilderV1( ) => if (keyOpt.isDefined) notSupported("keyOpt in Create node") // 2.dev feature addContext("Create Node") - .withContext("Node Version")(_.add(TransactionVersion.toProtoValue(version))) + .withContext("Node Version")(_.add(SerializationVersion.toProtoValue(version))) .addByte(NodeBuilder.NodeTag.CreateTag.tag, _ => "Create Node Tag") .withContext("Node Seed")( _.addOptional(nodeSeed, builder => seed => builder.addLfHash(seed, "node seed")) @@ -171,7 +170,7 @@ private class NodeBuilderV1( if (keyOpt.nonEmpty) notSupported("keyOpt in Fetch node") // 2.dev feature if (byKey == true) notSupported("byKey in Fetch node") // 2.dev feature addContext("Fetch Node") - .withContext("Node Version")(_.add(TransactionVersion.toProtoValue(version))) + .withContext("Node Version")(_.add(SerializationVersion.toProtoValue(version))) .addByte(NodeBuilder.NodeTag.FetchTag.tag, _ => "Fetch Node Tag") .withContext("Contract Id")(_.addCid(coid)) .withContext("Package Name")(_.add(packageName)) @@ -211,7 +210,7 @@ private class NodeBuilderV1( if (keyOpt.nonEmpty) notSupported("keyOpt in Exercise node") // 2.dev feature if (byKey == true) notSupported("byKey in Exercise node") // 2.dev feature addContext("Exercise Node") - .withContext("Node Version")(_.add(TransactionVersion.toProtoValue(version))) + .withContext("Node Version")(_.add(SerializationVersion.toProtoValue(version))) .addByte(NodeBuilder.NodeTag.ExerciseTag.tag, _ => "Exercise Node Tag") .withContext("Node Seed")(_.addLfHash(nodeSeed, "seed")) .withContext("Contract Id")(_.addCid(targetCoid)) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/TransactionHash.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/TransactionHash.scala index 4119b074ea..f8baf17ffe 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/TransactionHash.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/hash/TransactionHash.scala @@ -15,18 +15,18 @@ object TransactionHash { final case class MissingNodeSeed(message: String) extends NodeHashingError(message) final case class IncompleteTransactionTree(nodeId: NodeId) extends NodeHashingError(s"The transaction does not contain a node with nodeId $nodeId") - final case class UnsupportedLanguageVersion( + final case class UnsupportedSerializationVersion( nodeHashVersion: HashingSchemeVersion, - version: TransactionVersion, + version: SerializationVersion, ) extends NodeHashingError( - s"Cannot hash node with LF $version using hash version $nodeHashVersion. Supported LF versions: ${NodeBuilder.HashingVersionToSupportedLFVersionMapping + s"Cannot hash node with LF $version using hash version $nodeHashVersion. Supported LF serialization versions: ${NodeBuilder.HashingVersionToSupportedLFSerializationVersionMapping .getOrElse(nodeHashVersion, Set.empty) .mkString(", ")}" ) final case class UnsupportedHashingVersion(version: HashingSchemeVersion) extends NodeHashingError( - s"Cannot hash node with hashing version $version. Supported versions: ${NodeBuilder.HashingVersionToSupportedLFVersionMapping.keySet + s"Cannot hash node with hashing version $version. Supported versions: ${NodeBuilder.HashingVersionToSupportedLFSerializationVersionMapping.keySet .mkString(", ")}" ) } @@ -78,8 +78,8 @@ object TransactionHash { hashTracer, enforceNodeSeedForCreateNodes = true, ).addPurpose - .withContext("Transaction Version")( - _.add(TransactionVersion.toProtoValue(versionedTransaction.version)) + .withContext("Serialization Version")( + _.add(SerializationVersion.toProtoValue(versionedTransaction.version)) ) .withContext("Root Nodes")( _.addNodesFromNodeIds(versionedTransaction.roots, versionedTransaction.nodes, nodeSeeds) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala index a434033a1f..f0981654cb 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala @@ -534,4 +534,5 @@ object EncryptedViewMessageError { synchronizerId: PhysicalSynchronizerId, ) extends EncryptedViewMessageError + final case class InvalidContractIdInView(error: String) extends EncryptedViewMessageError } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala index 08636c470b..cb96c7e3b5 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala @@ -45,9 +45,10 @@ package object protocol { type LfLanguageVersion = LanguageVersion val LfLanguageVersion: LanguageVersion.type = LanguageVersion - val LfTransactionVersion: TransactionVersion.type = TransactionVersion + type LfSerializationVersion = SerializationVersion + val LfSerializationVersion: SerializationVersion.type = SerializationVersion - val DummyTransactionVersion: LfLanguageVersion = LanguageVersion.v2_dev + val DummySerializationVersion: LfSerializationVersion = LfSerializationVersion.VDev // Ledger transaction statistics based on lf transaction nodes type LedgerTransactionNodeStatistics = TransactionNodeStatistics @@ -121,9 +122,11 @@ package object protocol { type RequestProcessor[VT <: ViewType] = Phase37Processor[RequestAndRootHashMessage[OpenEnvelope[EncryptedViewMessage[VT]]]] - def maxTransactionVersion(versions: NonEmpty[Seq[LfLanguageVersion]]): LfLanguageVersion = { + def maxSerializationVersion( + versions: NonEmpty[Seq[LfSerializationVersion]] + ): LfSerializationVersion = { import Ordering.Implicits.* - versions.reduceLeft[LfLanguageVersion](_ max _) + versions.reduceLeft[LfSerializationVersion](_ max _) } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/replica/ReplicaManager.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/replica/ReplicaManager.scala new file mode 100644 index 0000000000..55bf87c3c2 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/replica/ReplicaManager.scala @@ -0,0 +1,183 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.replica + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.store.EncryptedCryptoPrivateStore +import com.digitalasset.canton.error.FatalError +import com.digitalasset.canton.lifecycle.{ + AsyncOrSyncCloseable, + CloseContext, + FlagCloseable, + FlagCloseableAsync, + FutureUnlessShutdown, + SyncCloseable, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.util.{ + EitherTUtil, + FutureUnlessShutdownUtil, + SimpleExecutionQueue, + SingleUseCell, +} + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +abstract class ReplicaManager( + exitOnFatalFailures: Boolean, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, +)(implicit ec: ExecutionContext) + extends NamedLogging + with FlagCloseableAsync { self => + + private val execQueue = new SimpleExecutionQueue( + "replica-manager-queue", + futureSupervisor, + timeouts, + loggerFactory, + crashOnFailure = exitOnFatalFailures, + ) + + private def makeCloseContext: FlagCloseable = FlagCloseable(logger, timeouts) + + protected val sessionCloseContext: AtomicReference[FlagCloseable] = + new AtomicReference[FlagCloseable](makeCloseContext) + + def getSessionContext: CloseContext = CloseContext(sessionCloseContext.get()) + + private val replicaStateRef: AtomicReference[Option[ReplicaState]] = + new AtomicReference[Option[ReplicaState]](None) + + private val privateKeyStoreRef: SingleUseCell[EncryptedCryptoPrivateStore] = + new SingleUseCell[EncryptedCryptoPrivateStore] + + protected def transitionToActive()(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + protected def transitionToPassive()(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + private def setState(newState: ReplicaState)(implicit traceContext: TraceContext): Unit = { + logger.info(s"Setting replica manager state to $newState") + replicaStateRef.set(Some(newState)) + } + + private def changeState[A](newState: ReplicaState, setNewStateEagerly: Boolean)( + body: => FutureUnlessShutdown[A] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[A]] = { + logger.info(s"Transitioning replica state to $newState") + execQueue.executeUS( + synchronizeWithClosing(functionFullName) { + if (replicaStateRef.get().contains(newState)) { + logger.debug(s"Replica already in state $newState, ignoring replica state change") + FutureUnlessShutdown.pure(None) + } else { + if (setNewStateEagerly) { + // Set the state to transition from one state to another before running the body + logger.info(s"Eagerly setting replica state to $newState") + setState(newState) + } + + FutureUnlessShutdownUtil + .logOnFailureUnlessShutdown( + body, + s"Failed to run replica state transition to $newState", + ) + .map { res => + if (!setNewStateEagerly) + setState(newState) + logger.info(s"Successfully performed replica state change to $newState") + Some(res) + } + } + }, + s"replica state update to $newState", + ) + } + + def setInitialState(state: ReplicaState): Unit = { + noTracingLogger.info(s"Setting initial replica state to $state") + if (!replicaStateRef.compareAndSet(None, Some(state))) + noTracingLogger.info(s"Failed to set initial state to $state, was already initialized") + } + + def setPrivateKeyStore(privateKeyStore: EncryptedCryptoPrivateStore): Unit = + privateKeyStoreRef.putIfAbsent(privateKeyStore).foreach { _ => + logger.warn(s"Failed to set initial private key store, was already initialized")( + TraceContext.empty + ) + } + + def setActive(): FutureUnlessShutdown[Unit] = withNewTraceContext("active_replica") { + implicit traceContext => + changeState(ReplicaState.Active, setNewStateEagerly = false) { + for { + _ <- privateKeyStoreRef.get.fold(FutureUnlessShutdown.unit) { store => + EitherTUtil.toFutureUnlessShutdown( + store + .refreshWrapperKey() + .leftMap(err => new RuntimeException(s"Failed to refresh wrapper key: $err")) + ) + } + _ <- transitionToActive() + } yield () + } + .map(_ => ()) + .recover { + case exception if exitOnFatalFailures => + FatalError.exitOnFatalError("Failed to transition node to active", exception, logger) + } + } + + def setPassive(): FutureUnlessShutdown[Option[CloseContext]] = + withNewTraceContext("passive_replica") { implicit traceContext => + // Close the session context first but then don't wait for the close to be done before transitioning to passive. + // Once transition is complete, make sure the session context has closed. + // That is because the session context is used in storage to perform DB queries. + // Therefore closing it will mean waiting for all DB operations to complete. + // For that to happen we need to also start the passive transition otherwise the node will keep functioning normally + // and try to perform more DB queries. + // But we also want the session context to be closing while we transition to passive to stop any DB retries. + changeState(ReplicaState.Passive, setNewStateEagerly = true) { + val newContext = makeCloseContext + val oldContext = sessionCloseContext.getAndSet(newContext) + val sessionContextIsClosed = + FutureUnlessShutdown.outcomeF(Future(oldContext.close())) + logger.debug("Starting transition to passive") + for { + _ <- transitionToPassive() + _ = logger.debug("Waiting for session context to be closed") + _ <- sessionContextIsClosed + } yield CloseContext(newContext) + }.recover { + case exception if exitOnFatalFailures => + FatalError.exitOnFatalError("Failed to transition node to passive", exception, logger) + } + } + + def isActive: Boolean = replicaStateRef.get().contains(ReplicaState.Active) + + override def closeAsync(): Seq[AsyncOrSyncCloseable] = + Seq( + SyncCloseable("closeQueue", execQueue.close()), + SyncCloseable("closeInternal", closeInternal()), + ) + + protected def closeInternal(): Unit = () +} + +sealed trait ReplicaState extends Product with Serializable +object ReplicaState { + case object Active extends ReplicaState + case object Passive extends ReplicaState +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLock.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLock.scala new file mode 100644 index 0000000000..f1d0b406ea --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLock.scala @@ -0,0 +1,691 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.Eval +import cats.data.EitherT +import cats.instances.future.* +import cats.syntax.either.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose, PseudoRandom} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.{ + FlagCloseable, + FutureUnlessShutdown, + HasCloseContext, + UnlessShutdown, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting, PrettyUtil} +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLogging, + TracedLogger, +} +import com.digitalasset.canton.resource.DbStorage.Profile +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy +import com.digitalasset.canton.util.{ErrorUtil, FutureUnlessShutdownUtil, retry} +import com.google.common.annotations.VisibleForTesting +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.SetParameter +import slick.util.AsyncExecutorWithShutdown + +import java.nio.ByteBuffer +import java.time.Duration +import java.util.concurrent.atomic.AtomicReference +import scala.collection.mutable +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +sealed abstract case class DbLockCounter(value: Int) extends PrettyPrinting { + + require(value > 0, s"DbLockCounter must be at least 1, but is $value.") + require(value <= 256, s"DbLockCounter must be at most 256, but is $value.") + + override protected def pretty: Pretty[DbLockCounter] = prettyOfParam(_.value) +} + +object DbLockCounter { + + private val counters: mutable.Set[Int] = mutable.Set.empty[Int] + + def apply(counter: Int): DbLockCounter = + if (counters.add(counter)) + new DbLockCounter(counter) {} + else + throw new IllegalArgumentException(s"DbLockCounter $counter already in use") +} + +/** We pre-allocate a set of counters that are used as part of the lock ID allocation. */ +object DbLockCounters { + val PARTICIPANT_WRITE = DbLockCounter(1) + val INDEXER_MAIN = DbLockCounter(2) + val INDEXER_WORKER = DbLockCounter(3) + val SEQUENCER_INIT = DbLockCounter(4) + + val SEQUENCER_WRITERS_MAIN: Array[DbLockCounter] = + DbLockCounters.range(5, DbLockConfig.MAX_SEQUENCER_WRITERS_AVAILABLE) + val NODE_MIGRATIONS = DbLockCounter(37) + val MEDIATOR_WRITE = DbLockCounter(38) + + // A shared lock used by all the writers in the write DB connection pool + val PARTICIPANT_WRITERS = DbLockCounter(39) + val MEDIATOR_WRITERS = DbLockCounter(40) + + val SEQUENCER_WRITERS_POOL: Array[DbLockCounter] = + DbLockCounters.range(41, DbLockConfig.MAX_SEQUENCER_WRITERS_AVAILABLE) + + val SEQUENCER_PRUNING_SCHEDULER_WRITE = DbLockCounter(74) + val SEQUENCER_PRUNING_SCHEDULER_WRITERS = DbLockCounter(75) + + val SEQUENCER_INIT_WORKER = DbLockCounter(76) + + // First counter used for testing purposes + // This and all subsequent counters are reserved for testing. + val FIRST_TESTING = 77 + + private def range(startLockCounter: Int, num: Int): Array[DbLockCounter] = { + require(num > 0, "must create at least one lock counter") + + startLockCounter.until(startLockCounter + num).map(DbLockCounter(_)).toArray + } +} + +/** Common Wrapper for DB lock identifier + */ +final case class DbLockId private (id: Int) extends PrettyPrinting { + override protected def pretty: Pretty[DbLockId] = prettyOfParam(_.id) +} + +object DbLockId { + implicit val setParameterLockId: SetParameter[DbLockId] = (l, pp) => pp.setInt(l.id) + + private[resource] def create(scope: String, counter: DbLockCounter)(implicit + loggingContext: ErrorLoggingContext + ): DbLockId = { + val scopeHash = Hash + .build(HashPurpose.DbLockId, HashAlgorithm.Sha256) + // It's fine to omit the length prefix because we're only adding another fixed-size item before finishing. + .addWithoutLengthPrefix(scope) + .finish() + + val lockId = { + val len = java.lang.Integer.BYTES + val buffer = ByteBuffer.allocate(len) + buffer.put(scopeHash.unwrap.toByteArray, 0, len) + + // `flip` to switch from putting into the bytebuffer to getting from the buffer + buffer.flip() + + // Bit 1 to 8: counter + // Bit 9 to 29: scopeHash + // Bit 30: always set + (counter.value - 1) | + (buffer.getInt & 0x1fffff00) | + (1 << 29) + } + + ErrorUtil.requireState(lockId >= 0, s"Generated lock id $lockId must be non-negative") + ErrorUtil.requireState( + lockId <= 1073741823, + s"Generated lock id $lockId must be maximal 30bit", + ) + + DbLockId(lockId) + } + + def allocate(dbConfig: DbConfig, lockCounter: DbLockCounter, loggerFactory: NamedLoggerFactory)( + implicit traceContext: TraceContext + ): Either[DbLockError, DbLockId] = + dbConfig match { + case pgConfig: DbConfig.Postgres => + PostgresDbLock + .allocateLockId(pgConfig, lockCounter)(loggerFactory) + case _ => Left(DbLockError.UnsupportedDatabaseConfig(dbConfig)) + } +} + +sealed trait DbLockMode extends Product with Serializable with PrettyPrinting +object DbLockMode { + case object Exclusive extends DbLockMode { + override protected def pretty: Pretty[Exclusive.type] = prettyOfObject[Exclusive.type] + } + case object Shared extends DbLockMode { + override protected def pretty: Pretty[Shared.type] = prettyOfObject[Shared.type] + } +} + +/** Abstraction for an application-specific database lock. + * + * The lock is identified by an integer and bound to the given database's session/connection. + * + * If the connection is lost, the database releases the lock and the DbLock sets the lock state to + * Lost. The caller's connection management has to recreate and try to reacquire the lock with the + * given id. NOTE: The database must be configured with a single connection. + */ +trait DbLock extends NamedLogging with FlagCloseable with HasCloseContext { + + import DbLock.* + import DbLockError.* + + @VisibleForTesting + private[resource] val lockState: AtomicReference[LockState] = new AtomicReference(LockState.Free) + + private def transition( + expectedState: LockState, + newState: LockState, + error: LockState => DbLockError, + ): Either[DbLockError, Unit] = { + val updatedState = lockState.getAndUpdate { currentState => + if (currentState == expectedState) newState else currentState + } + Either.cond(updatedState == expectedState, (), error(updatedState)) + } + + // A transition that must succeed, otherwise we have a bug. + private def transitionOrFail(expectedState: LockState, newState: LockState)(implicit + traceContext: TraceContext + ): Unit = { + val _ = transition( + expectedState, + newState, + errorState => + ErrorUtil.internalError( + new IllegalStateException( + s"Failed to transition lock $lockId from $expectedState to $newState: current state is $errorState" + ) + ), + ) + } + + private def acquireError( + errorState: LockState + )(implicit traceContext: TraceContext): DbLockError = errorState match { + case LockState.Acquired => LockAlreadyAcquired(lockId) + case LockState.Acquiring => LockAlreadyInAcquisition(lockId) + case LockState.Releasing => LockNotFree(lockId, "Lock is being released") + case LockState.Lost => LostLock(lockId) + case LockState.Free => + ErrorUtil.internalError(new IllegalStateException(s"Acquire error called on free state")) + } + + private def acquireInternalError[A]( + error: DbLockError + )(implicit traceContext: TraceContext): Either[DbLockError, A] = { + logger.debug(s"Failed to acquire lock $lockId: $error") + + // Failed to acquire, try move back to free state + transition( + LockState.Acquiring, + LockState.Free, + { + case LockState.Lost => LostLock(lockId) + case state => LockInvalidState(lockId, s"Invalid state during acquire: $state") + }, + ).flatMap(_ => Left(error)) // Return original error if the transition worked + } + + private def releaseError( + errorState: LockState + )(implicit traceContext: TraceContext): DbLockError = errorState match { + case LockState.Free => LockAlreadyReleased(lockId) + case LockState.Releasing => LockAlreadyInReleasing(lockId) + case LockState.Acquiring => LockNotAcquired(lockId, "Lock is being acquired") + case LockState.Lost => LostLock(lockId) + case LockState.Acquired => + ErrorUtil.internalError(new IllegalStateException(s"Release error called on acquired state")) + } + + private def releaseInternalError( + error: DbLockError + )(implicit traceContext: TraceContext): Either[DbLockError, Unit] = { + logger.debug(s"Failed to release lock $lockId: $error") + + // Failed to release, try move back to acquired state + transition( + LockState.Releasing, + LockState.Acquired, + { + case LockState.Lost => LostLock(lockId) + case state => LockInvalidState(lockId, s"Invalid state during release: $state") + }, + ).flatMap(_ => Left(error)) // Return original error if the transition worked + } + + // Periodically check the lock's acquisition state and call the `lostHook` if the lock was lost. + private def checkLock(now: CantonTimestamp): Unit = { + import TraceContext.Implicits.Empty.* + + logger.trace(s"Checking lock status of $lockId at $now") + + // Only check the lock if we still think it's in acquired state + if (lockState.get() == LockState.Acquired) { + + // Only retry when the lock check query was rejected by slick's queue + implicit val success: retry.Success[Either[DbLockError, Boolean]] = retry.Success { + case Left(DbLockError.LockCheckRejected(_lockId)) => + false + case _ => true + } + + def runLockCheck(): FutureUnlessShutdown[Unit] = + EitherT { + retry + .Backoff( + logger, + this, + 15, + 200.millis, + 5.second, + functionFullName, + // We only retry when the lock check is rejected due to contention, so we can log on DEBUG + retryLogLevel = Some(Level.DEBUG), + ) + .unlessShutdown( + hasLock.mapK(FutureUnlessShutdown.outcomeK).value, + NoExceptionRetryPolicy, + ) + .tapOnShutdown(logger.debug("Stopped lock check due to shutdown")) + .recover { case NonFatal(e) => + // When an unexpected exception is thrown we treat the lock check to have failed, resulting in the lock to assumed to be lost + UnlessShutdown.Outcome( + Left( + FailedToCheckLock( + lockId, + s"Lock check failed due to an exception: ${ErrorUtil.messageWithStacktrace(e)}", + ) + ) + ) + } + } + .valueOr { + case _: LockCheckRejected => + logger.debug(s"Lock check failed due to contention, assuming lock is acquired") + true + case err if executorShuttingDown.value => + logger.debug( + "Lock check failed but executor is shutting down. Ignoring failed check.", + err, + ) + false + case err => + if (!isClosing) { + // If the hasLock query fails (e.g. underlying connection closed), we indicate that the lock is lost + logger.warn( + s"Failed to check database lock status for $lockId, assuming lost: $err" + ) + } + false + } + .map { stillAcquired => + if (!stillAcquired) { + val state = lockState.getAndUpdate { currentState => + if (currentState == LockState.Acquired) LockState.Lost else currentState + } + state match { + case LockState.Acquired => + if (!isClosing && !executorShuttingDown.value) + logger.warn(s"Lock $lockId was lost") + case LockState.Lost | LockState.Free => () + case LockState.Releasing | LockState.Acquiring => scheduleCheckLock(clock.now) + } + } else { + logger.trace(s"Lock $lockId still acquired at $now") + scheduleCheckLock(clock.now) + } + } + + // Do not wait for the lock check to have completed, to not block any other tasks on the clock scheduler + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + synchronizeWithClosing("check-lock")(runLockCheck()), + "Failed to check lock", + ) + } + } + + private def scheduleCheckLock(now: CantonTimestamp): Unit = { + // Add a jitter of 20% of the health check period to the scheduled lock check time to scatter the checks, such that they don't run at the same time + val period = config.healthCheckPeriod.asJava + val jitter = Duration.ofMillis(PseudoRandom.randomLong((period.toMillis * 0.2).toLong)) + + val checkAt = now.add(config.healthCheckPeriod.asJava).add(jitter) + + logger.trace(s"At $now schedule next health check for $checkAt")(TraceContext.empty) + + clock.scheduleAt(checkLock, checkAt).discard + } + + protected implicit def ec: ExecutionContext + + protected def profile: DbStorage.Profile + + protected def clock: Clock + + protected def executorShuttingDown: Eval[Boolean] + + protected def database: Database + + protected def timeouts: ProcessingTimeout + + protected def config: DbLockConfig + + protected def mode: DbLockMode + + /** Check if the lock is still held by this session. */ + protected[resource] def hasLock(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Boolean] + + /** Internal (DB specific) blocking acquisition of the lock. */ + protected def acquireInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Unit] + + /** Internal (DB specific) non-blocking acquisition of the lock. */ + protected def tryAcquireInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Boolean] + + /** Internal (DB specific) release of the lock. */ + @VisibleForTesting + protected[resource] def releaseInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Unit] + + /** The application-specific lock ID, which remains the same for the lifetime of the lock. */ + def lockId: DbLockId + + /** Return the PIDs of the processes that own this lock. + */ + def getLockOwners()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Vector[Long]] + + def isAcquired: Boolean = lockState.get() == LockState.Acquired + + def isLost: Boolean = lockState.get() == LockState.Lost + + /** A blocking acquisition of the lock. + * + * Blocks until the lock has been acquired. A lock can only be acquired and attempted to be + * acquired once. A second attempt will fail even when the first attempt is still pending + * acquisition of the lock. + */ + def acquire()(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Unit] = + for { + _ <- transition(LockState.Free, LockState.Acquiring, acquireError).toEitherT + _ = logger.trace(s"Acquiring lock $lockId") + _ <- acquireInternal().leftFlatMap(err => acquireInternalError[Unit](err).toEitherT) + _ = transitionOrFail(LockState.Acquiring, LockState.Acquired) + _ = logger.trace(s"Acquired lock $lockId") + _ = scheduleCheckLock(clock.now) + } yield () + + /** A non-blocking acquisition of the lock. + * + * Tries to acquire the lock and immediately returns true or false if the lock was acquired or + * not. A lock can only be acquired and attempted to be acquired once. + */ + def tryAcquire()(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Boolean] = + for { + _ <- transition(LockState.Free, LockState.Acquiring, acquireError).toEitherT + _ = logger.trace(s"Try acquiring lock $lockId") + acquired <- tryAcquireInternal().leftFlatMap(err => + acquireInternalError[Boolean](err).toEitherT + ) + } yield { + if (acquired) { + transitionOrFail(LockState.Acquiring, LockState.Acquired) + logger.trace(s"Acquired lock $lockId") + scheduleCheckLock(clock.now) + } else + transitionOrFail(LockState.Acquiring, LockState.Free) + + acquired + } + + /** Explicitly release the lock. */ + def release()(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Unit] = + for { + _ <- transition(LockState.Acquired, LockState.Releasing, releaseError).toEitherT + _ = logger.trace(s"Releasing lock $lockId") + _ <- releaseInternal().leftFlatMap(err => releaseInternalError(err).toEitherT) + _ = transitionOrFail(LockState.Releasing, LockState.Free) + _ = logger.trace(s"Released lock $lockId") + } yield () + + /** Returns true if the lock is already taken by another session */ + def isTaken(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Boolean] +} + +object DbLock { + + implicit val pretty: Pretty[DbLock] = { + import PrettyUtil.* + prettyOfClass(param("id", _.lockId), param("mode", _.mode)) + } + + private[canton] def isSupported( + profile: DbStorage.Profile + ): Either[String, DbStorage.Profile & DbStorage.DbLockSupport] = + profile match { + case pg: Profile.Postgres => Right(pg) + case _: Profile.H2 => + Left("Database profile must be Postgres but H2 was configured.") + } + + private[resource] sealed trait LockState + private[resource] object LockState { + case object Free extends LockState + case object Acquiring extends LockState + case object Acquired extends LockState + case object Releasing extends LockState + case object Lost extends LockState + } + + private def create( + profile: DbStorage.Profile & DbStorage.DbLockSupport, + database: Database, + lockConfig: DbLockConfig, + lockId: DbLockId, + lockMode: DbLockMode, + timeouts: ProcessingTimeout, + clock: Clock, + loggerFactory: NamedLoggerFactory, + executorShuttingDown: Eval[Boolean] = Eval.now(false), + )(implicit ec: ExecutionContext): DbLock = + profile match { + case pgProfile: DbStorage.Profile.Postgres => + new DbLockPostgres( + pgProfile, + database, + lockId, + lockMode, + lockConfig, + timeouts, + clock, + loggerFactory.append("lockId", lockId.toString), + executorShuttingDown, + ) + } + + def create( + profile: DbStorage.Profile & DbStorage.DbLockSupport, + database: Database, + dbConfig: DbConfig, + lockConfig: DbLockConfig, + lockCounter: DbLockCounter, + lockMode: DbLockMode, + timeouts: ProcessingTimeout, + clock: Clock, + loggerFactory: NamedLoggerFactory, + )(implicit ec: ExecutionContext, traceContext: TraceContext): Either[DbLockError, DbLock] = + for { + // Ensures that the database for the lock only uses one connection + _ <- Either.cond( + database.source.maxConnections.contains(1), + (), + DbLockError.InvalidDatabaseConfig("Database must be configured with max 1 connection"), + ) + lockId <- DbLockId.allocate(dbConfig, lockCounter, loggerFactory) + logger = TracedLogger(DbLock.getClass, loggerFactory) + _ = logger.debug(s"Allocated lock-id $lockId for lock-counter $lockCounter") + lock = create( + profile, + database, + lockConfig, + lockId, + lockMode, + timeouts, + clock, + loggerFactory, + ) + } yield lock + + def create( + profile: DbStorage.Profile & DbStorage.DbLockSupport, + connection: KeepAliveConnection, + lockConfig: DbLockConfig, + lockId: DbLockId, + lockMode: DbLockMode, + timeouts: ProcessingTimeout, + clock: Clock, + loggerFactory: NamedLoggerFactory, + writeExecutor: AsyncExecutorWithShutdown, + )(implicit ec: ExecutionContext): DbLock = { + val logger = loggerFactory.getLogger(DbLock.getClass) + val database = + KeepAliveConnection.createDatabaseFromConnection( + connection, + logger, + writeExecutor, + ) + + val executorShuttingDown = Eval.always(writeExecutor.isShuttingDown) + + create( + profile, + database, + lockConfig, + lockId, + lockMode, + timeouts, + clock, + loggerFactory, + executorShuttingDown, + ) + } + +} + +sealed trait DbLockConfigError extends Product with Serializable with PrettyPrinting {} + +sealed trait DbLockError extends Product with Serializable with PrettyPrinting {} + +object DbLockError { + + final case class UnsupportedDatabaseProfile(profile: DbStorage.Profile) extends DbLockError { + override protected def pretty: Pretty[UnsupportedDatabaseProfile] = prettyOfClass( + unnamedParam(_.profile) + ) + } + + final case class UnsupportedDatabaseConfig(dbConfig: DbConfig) extends DbLockError { + override protected def pretty: Pretty[UnsupportedDatabaseConfig] = prettyOfClass( + unnamedParam(_.dbConfig) + ) + } + + final case class InvalidDatabaseConfig(error: String) extends DbLockError { + override protected def pretty: Pretty[InvalidDatabaseConfig] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + + final case class FailedToAcquireLock(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[FailedToAcquireLock] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + } + + final case class LockAcquireRejected(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockAcquireRejected] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class FailedToReleaseLock(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[FailedToReleaseLock] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + } + + final case class LockReleaseRejected(lockId: DbLockId) extends DbLockError { + override def pretty: Pretty[LockReleaseRejected] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class FailedToCheckLock(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[FailedToCheckLock] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + + } + + final case class FailedToGetLockOwners(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[FailedToGetLockOwners] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + + } + + final case class LockCheckRejected(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockCheckRejected] = + prettyOfClass(param("lockId", _.lockId)) + + } + + final case class LockAlreadyAcquired(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockAlreadyAcquired] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class LockAlreadyReleased(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockAlreadyReleased] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class LockAlreadyInAcquisition(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockAlreadyInAcquisition] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class LockAlreadyInReleasing(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LockAlreadyInReleasing] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class LockNotFree(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[LockNotFree] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + } + + final case class LockNotAcquired(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[LockNotAcquired] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + + } + + final case class LostLock(lockId: DbLockId) extends DbLockError { + override protected def pretty: Pretty[LostLock] = + prettyOfClass(param("lockId", _.lockId)) + } + + final case class LockInvalidState(lockId: DbLockId, error: String) extends DbLockError { + override protected def pretty: Pretty[LockInvalidState] = + prettyOfClass(param("lockId", _.lockId), param("error", _.error.unquoted)) + + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockPostgres.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockPostgres.scala new file mode 100644 index 0000000000..8806cdf99f --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockPostgres.scala @@ -0,0 +1,200 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.Eval +import cats.data.EitherT +import cats.instances.future.* +import cats.syntax.bifunctor.* +import cats.syntax.either.* +import com.digitalasset.canton.config.{DbConfig, DbLockConfig, ProcessingTimeout} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} +import com.digitalasset.canton.resource.DbStorage.NoConnectionAvailable +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil +import com.digitalasset.canton.util.ShowUtil.* +import org.postgresql.Driver +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.canton.SQLActionBuilder + +import java.util.concurrent.RejectedExecutionException +import scala.concurrent.{ExecutionContext, Future} + +/** A Postgres DB lock using advisory locks */ +class DbLockPostgres private[resource] ( + override protected val profile: DbStorage.Profile.Postgres, + override protected val database: Database, + override val lockId: DbLockId, + override protected val mode: DbLockMode, + override protected val config: DbLockConfig, + override protected val timeouts: ProcessingTimeout, + override protected val clock: Clock, + protected val loggerFactory: NamedLoggerFactory, + override protected val executorShuttingDown: Eval[Boolean], +)(override protected implicit val ec: ExecutionContext) + extends DbLock { + + import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.* + import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.* + + /** Returns the list of process PIDs owning the lock. + */ + def getLockOwners()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Vector[Long]] = { + val lockCheckQuery: SQLActionBuilder = + sql"""select pid from pg_locks where locktype = 'advisory' and objid = $lockId and granted = true""" + + EitherTUtil + .fromFuture( + database.run(lockCheckQuery.as[Long]), + { + case _: RejectedExecutionException | _: NoConnectionAvailable => + DbLockError.LockCheckRejected(lockId) + case err => DbLockError.FailedToGetLockOwners(lockId, show"$err") + }, + ) + .leftWiden[DbLockError] + } + + private def lockCheck( + pidFilter: SQLActionBuilder + )(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Boolean] = { + import profile.DbStorageAPI.jdbcActionExtensionMethods + + val lockCheckQuery: SQLActionBuilder = + sql"""select 1 from pg_locks where locktype = 'advisory' and objid = $lockId and granted = true and """ ++ + pidFilter ++ sql""" limit 1""" + + // Set an explicit timeout on the lock check query enforced by the server + val lockCheckAction = lockCheckQuery + .as[Int] + .headOption + .withStatementParameters(statementInit = + _.setQueryTimeout(config.healthCheckTimeout.toInternal.toSecondsTruncated(logger).unwrap) + ) + + EitherTUtil + .fromFuture( + database.run(lockCheckAction).map(_.isDefined), + { + case _: RejectedExecutionException | _: NoConnectionAvailable => + DbLockError.LockCheckRejected(lockId) + case err => DbLockError.FailedToCheckLock(lockId, show"$err") + }, + ) + .leftWiden[DbLockError] + } + + private lazy val modeSuffix: String = mode match { + case DbLockMode.Shared => "_shared" + case DbLockMode.Exclusive => "" + } + + override protected[resource] def hasLock(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Boolean] = + lockCheck(sql"pid = pg_backend_pid()") + + override def acquireInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Unit] = { + val lockQuery = sql"select pg_advisory_lock#$modeSuffix($lockId)".as[String].map(_ => ()) + + EitherTUtil.fromFuture( + database.run(lockQuery), + { + case _: RejectedExecutionException | _: NoConnectionAvailable => + DbLockError.LockAcquireRejected(lockId) + case err => DbLockError.FailedToAcquireLock(lockId, show"$err") + }, + ) + } + + override def tryAcquireInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Boolean] = { + val lockQuery = sql"select pg_try_advisory_lock#$modeSuffix($lockId)".as[Boolean].headOption + + for { + acquiredO <- EitherTUtil.fromFuture( + database.run(lockQuery), + { + case _: RejectedExecutionException | _: NoConnectionAvailable => + DbLockError.LockAcquireRejected(lockId) + case err => DbLockError.FailedToAcquireLock(lockId, show"$err") + }, + ) + acquired <- acquiredO + .toRight[DbLockError]( + DbLockError.FailedToAcquireLock(lockId, s"No result from trying to acquire lock") + ) + .toEitherT + } yield acquired + } + + override def releaseInternal()(implicit + traceContext: TraceContext + ): EitherT[Future, DbLockError, Unit] = { + val releaseQuery = sql"select pg_advisory_unlock#$modeSuffix($lockId)".as[Boolean].headOption + + for { + releasedO <- EitherTUtil.fromFuture( + database.run(releaseQuery), + { + case _: RejectedExecutionException | _: NoConnectionAvailable => + DbLockError.LockReleaseRejected(lockId) + case err => DbLockError.FailedToReleaseLock(lockId, show"$err") + }, + ) + released <- releasedO + .toRight(DbLockError.FailedToReleaseLock(lockId, s"No result from trying to release lock")) + .toEitherT + _ <- EitherT + .cond(released, (), DbLockError.FailedToReleaseLock(lockId, s"Lock was not released")) + .leftWiden[DbLockError] + } yield () + } + + override def isTaken(implicit traceContext: TraceContext): EitherT[Future, DbLockError, Boolean] = + lockCheck(sql"pid != pg_backend_pid()") +} + +object PostgresDbLock { + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private[resource] def allocateLockId(config: DbConfig.Postgres, counter: DbLockCounter)( + loggerFactory: NamedLoggerFactory + )(implicit traceContext: TraceContext): Either[DbLockError, DbLockId] = { + + import slick.util.ConfigExtensionMethods.* + + implicit val loggingContext = + ErrorLoggingContext.fromTracedLogger( + TracedLogger(loggerFactory.getLogger(classOf[DbLockPostgres])) + ) + + for { + // Use the DB name as the scope of the lock id + lockScope <- config.config + .getStringOpt("properties.databaseName") + .orElse { + config.config.getStringOpt("url").flatMap { url => + val props = Driver.parseURL(url, null) + Option(props.getProperty("PGDBNAME")) + } + } + .toRight[DbLockError]( + DbLockError.InvalidDatabaseConfig("Unable to extract DB name for lock id scope") + ) + } yield { + val lockId = DbLockId.create(lockScope, counter) + loggingContext.debug( + s"Allocated new lock ID $lockId for scope $lockScope and counter $counter" + ) + lockId + } + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnection.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnection.scala new file mode 100644 index 0000000000..6d5b6dde0a --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnection.scala @@ -0,0 +1,912 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.checked +import com.digitalasset.canton.concurrent.{FutureSupervisor, Threading} +import com.digitalasset.canton.config.{ + DbLockedConnectionConfig, + PositiveFiniteDuration as PositiveFiniteDurationConfig, + ProcessingTimeout, +} +import com.digitalasset.canton.crypto.PseudoRandom +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLogging, + TracedLogger, +} +import com.digitalasset.canton.resource.DbStorage.Profile +import com.digitalasset.canton.time.{Clock, PositiveFiniteDuration} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.* +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.retry.{AllExceptionRetryPolicy, RetryEither, Success} +import com.google.common.annotations.VisibleForTesting +import com.typesafe.config.{Config, ConfigFactory} +import org.slf4j.event.Level +import slick.dbio.DBIO +import slick.jdbc.{DataSourceJdbcDataSource, JdbcDataSource} +import slick.util.{AsyncExecutorWithShutdown, ClassLoaderUtil} + +import java.io.EOFException +import java.sql.{Connection, SQLException} +import java.time.Duration +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future, TimeoutException, blocking} +import scala.jdk.CollectionConverters.* +import scala.util.control.Exception.handling +import scala.util.{Failure, Try} + +/** Maintains the combination of a persistent DB connection and a DB lock acquired on that + * connection. + * + * If the connection is closed or becomes invalid, it will try to rebuild the connection and + * re-acquire the lock. + */ +class DbLockedConnection private ( + createConnection: () => EitherT[Future, String, KeepAliveConnection], + createLock: KeepAliveConnection => DbLock, + config: DbLockedConnectionConfig, + checkReadOnly: KeepAliveConnection => EitherT[Future, String, Boolean], + private[resource] val lockId: DbLockId, + exitOnFatalFailures: Boolean, + clock: Clock, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, + writeExecutor: AsyncExecutorWithShutdown, + logLockOwnersOnLockAcquisitionAttempt: Boolean, +)(implicit ec: ExecutionContext) + extends NamedLogging + with FlagCloseable + with HasCloseContext + with StateMachine[DbLockedConnection.State] { + + import DbLockedConnection.* + + private val execQueue = + new SimpleExecutionQueue( + "db-locked-connection-queue", + futureSupervisor, + timeouts, + loggerFactory, + crashOnFailure = exitOnFatalFailures, + ) + + override protected val stateRef: AtomicReference[State] = new AtomicReference( + State.Init + ) + + @VisibleForTesting + private[resource] def state: State = stateRef.get() + + private def withLockOwners( + lock: DbLock + )(f: Vector[Long] => Unit)(implicit traceContext: TraceContext): Future[Unit] = + lock + .getLockOwners() + .value + .transform { + case util.Success(Right(lockOwners)) => + util.Success(f(lockOwners)) + case util.Success(Left(err)) => + util.Success(logger.debug(s"Failed to get lock owners for lock ${lock.lockId}: $err")) + case Failure(err) => + util.Success(logger.debug(s"Failed to get lock owners for lock ${lock.lockId}", err)) + } + + /** Rebuild the DB connection until successfully reconnected or shutdown. + * + * Retries indefinitely to connect to the database. + */ + private def rebuildConnection()(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[KeepAliveConnection] = + retry + .Backoff( + logger, + this, + retry.Forever, + 100.millis, + 10.seconds, + operationName = functionFullName, + retryLogLevel = Some(Level.INFO), + ) + .unlessShutdown( + createConnection().mapK(FutureUnlessShutdown.outcomeK).value, + AllExceptionRetryPolicy, + ) + // Due to the infinite retry we must get a connection here + .map(_.valueOr(err => ErrorUtil.invalidState(s"Failed to establish DB connection: $err"))) + + /** Attempt to acquire the DB lock with customizable retry config + */ + private def becomeActiveWithRetries( + connection: KeepAliveConnection, + lock: DbLock, + maxRetries: Int, + retryInterval: PositiveFiniteDurationConfig, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Either[DbLockError, Boolean]] = { + implicit val success: Success[Either[DbLockError, Boolean]] = Success.apply { + case Right(false) => + // Lock acquisition was not successful, retry + false + case Left(DbLockError.LockAcquireRejected(_)) => + // Lock acquisition failed due to contention in slick, retry + false + case Left(err) => + // Failed with an error (e.g., connection problem, and not contention), stop the retry + true + case Right(true) => + // Lock acquisition was successful, stop the retry + true + } + + FutureUnlessShutdownUtil.logOnFailureUnlessShutdown( + retry + .Pause( + logger, + this, + maxRetries, + retryInterval.underlying, + operationName = functionFullName, + retryLogLevel = Some(Level.DEBUG), + ) + .unlessShutdown( + synchronizeWithClosingF(functionFullName) { + lock.tryAcquire().value.thereafterF { + case scala.util.Success(Right(true)) if logLockOwnersOnLockAcquisitionAttempt => + withLockOwners(lock)(lockOwners => + logger.debug( + s"Lock successfully acquired, the following PIDs now own it: ${lockOwners.mkString(", ")}" + ) + ) + case scala.util.Success(Right(false)) if logLockOwnersOnLockAcquisitionAttempt => + // When the lock could not be acquired, check who already owns the lock + withLockOwners(lock)(lockOwners => + logger.debug( + s"Failed to acquire lock, the following PIDs already own it: ${lockOwners.mkString(", ")}" + ) + ) + case _ => + // In case of connection/lock acquisition errors, do not check lock owners + Future.unit + } + }, + AllExceptionRetryPolicy, + ) + .tapOnShutdown { + logger.debug(s"Aborting to become active due to shutdown, closing connection and lock..") + closeLockedConnection(connection, lock, Level.DEBUG) + }, + "failed to acquire lock and become active", + level = Level.INFO, + ) + + } + + // For the initial attempt to become active, the retry interval period is shortened + // Regardless of the outcome + private def initialBecomeActive( + connection: KeepAliveConnection, + lock: DbLock, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val initialAttempt = becomeActiveWithRetries( + connection, + lock, + config.initialAcquisitionMaxRetries, + config.initialAcquisitionInterval, + ) + + initialAttempt.transformIntoSuccess { + case util.Success(UnlessShutdown.AbortedDueToShutdown) => + UnlessShutdown.AbortedDueToShutdown + case util.Success(UnlessShutdown.Outcome(Right(true))) => + // We already became active on the first try + UnlessShutdown.Outcome(()) + case _ => + // The initial attempt failed or we did not become active, try again in the background + UnlessShutdown.Outcome(becomeActive(connection, lock)) + } + } + + /** Attempt to acquire the DB lock until successfully acquired, a failure occurred, or shutdown. + * + * Retries indefinitely if the acquisition fails. + */ + private def becomeActive( + connection: KeepAliveConnection, + lock: DbLock, + )(implicit traceContext: TraceContext): Unit = { + + logger.debug(s"Trying to become active..") + + val becomeActiveF = becomeActiveWithRetries( + connection, + lock, + retry.Forever, + config.passiveCheckPeriod, + ).map { + case Left(err) => logger.info(s"Failed to become active: $err") + case Right(false) => logger.debug("Failed to acquire lock and become active") + case Right(true) => logger.trace("Successfully became active") + } + + FutureUtil.doNotAwait( + becomeActiveF.unwrap, + "failed to acquire lock and become active", + level = Level.INFO, + ) + } + + private def connectAndBecomeActive(init: Boolean)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, DbLockError, State.Connected] = + for { + // Rebuild the database connection + newConn <- EitherT.right(rebuildConnection()) + _ = logger.debug("Succeeded to reconnect to database") + + _ = logger.debug(s"Creating new DB lock with $newConn") + newLock = createLock(newConn) + + // During initialization, wait for the lock acquisition to complete before returning to avoid racing the node initialization + _ <- + if (init) + EitherT + .liftF[FutureUnlessShutdown, DbLockError, Unit]( + initialBecomeActive(newConn, newLock) + ) + // Otherwise run it in the background + else + EitherT.pure[FutureUnlessShutdown, DbLockError]( + becomeActive(newConn, newLock) + ) + } yield State.Connected(newConn, newLock) + + private def buildLockedConnection(fromState: State)(implicit traceContext: TraceContext): Unit = { + + transitionOrFail( + fromState, + State.Recovering, + ) + + val initial = fromState == State.Init + + val result = for { + + // Retry to connect to the database and become active. If the lock acquisition fails with an error, we rebuild the connection + connected <- retry + .Backoff( + logger, + this, + retry.Forever, + 500.millis, + 10.second, + operationName = functionFullName, + ) + .unlessShutdown(connectAndBecomeActive(initial).value, AllExceptionRetryPolicy) + .map(_.valueOr(err => ErrorUtil.invalidState(s"Failed to connect and become active: $err"))) + .tapOnShutdown(logger.debug("Stopped recovery due to shutdown")) + + _ <- FutureUnlessShutdown.lift( + synchronizeWithClosingSync("transitioning to connected state")( + transitionOrFail(State.Recovering, connected) + ) + ) + _ = logger.info("Successfully rebuilt connection") + + // Schedule next health check from the current time when we have recovered + _ = scheduleCheckConnection(clock.now) + } yield { + logger.debug("Successfully finished locked connection rebuild") + } + + // We don't need to wait for the future to complete here, the next connection check is only scheduled when the recovery is completed + FutureUtil.doNotAwait(result.unwrap, "Failed to recover locked connection") + } + + private def closeLockedConnection(connection: KeepAliveConnection, lock: DbLock, logLevel: Level)( + implicit traceContext: TraceContext + ): Unit = { + // Stop the health check of the lock, this does not release the lock if acquired. + lock.close() + // Close the database connection. If the lock was acquired it is released due to the connection closing. + connection.closeUnderlying(logLevel) + } + + private def runLockedConnectionCheck(now: CantonTimestamp): Unit = { + import TraceContext.Implicits.Empty.* + + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + synchronizeWithClosing("check-locked-connection")(checkHealth(now)), + "check-locked-connection", + closeContext = Some(closeContext), + ) + } + + private def scheduleCheckConnection(now: CantonTimestamp): Unit = { + // Add a jitter of 20% of the health check period to the scheduled connection check time to scatter the checks, such that they don't run at the same time + val period = config.healthCheckPeriod.asJava + val jitter = Duration.ofMillis(PseudoRandom.randomLong((period.toMillis * 0.2).toLong)) + + val checkAt = now.add(config.healthCheckPeriod.asJava).add(jitter) + + logger.trace(s"At $now schedule next health check for $checkAt")(TraceContext.empty) + + clock + .scheduleAt(runLockedConnectionCheck, checkAt) + .discard + } + + private def checkHealth(now: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = { + def checkLockedConnection(now: CantonTimestamp): Unit = { + val state = stateRef.get() + state match { + case State.Init => + logger.debug("Building initial DB connection and lock") + buildLockedConnection(State.Init) + + case State.SetPassive => + logger.debug("Connection was closed to become passive, trying to become active again") + buildLockedConnection(State.SetPassive) + + case State.Connected(connection, lock) => + logger.trace(s"Checking if DB locked connection $connection and $lock is healthy at $now") + + // Check the health of the DB connection and that the lock was not lost + val isValid = + checkConnection( + connection, + checkReadOnly, + config.connectionTimeout.toInternal, + timeouts, + logger, + ) && !lock.isLost + + if (!isValid) { + // Connection and/or lock was lost thus setting the connected state to lost + val prevState = stateRef.getAndUpdate { + case _: State.Connected => State.Lost + case s => s + } + + prevState match { + case _: State.Connected if writeExecutor.isShuttingDown => + logger.debug( + s"Locked connection was lost but write executor is shutting down. Skipping re-connection." + ) + case _: State.Connected => + logger.warn(s"Locked connection was lost, trying to rebuild") + logger.debug(s"Trying to close defunct connection $connection before recovery") + // Try to close the defunct connection and lock before rebuilding the pair + closeLockedConnection(connection, lock, Level.DEBUG) + buildLockedConnection(State.Lost) + case State.Disconnecting | State.Disconnected => + logger.debug(s"Skipping connection recovery due to closing") + case State.Init | State.Lost | State.Recovering | State.SetPassive => + ErrorUtil.invalidState(s"Invalid state for recovery: $prevState") + } + } else { + logger.trace("Locked connection is healthy") + scheduleCheckConnection(clock.now) + } + + case State.Disconnecting | State.Disconnected => + if (isClosing) + logger.debug(s"Skipping connection check due to closing") + else + ErrorUtil.invalidState(s"Closing state $state for connection but closing flag not set") + + case State.Lost | State.Recovering => + ErrorUtil.invalidState(s"Invalid state for connection check: $state") + } + } + + execQueue.execute(Future(checkLockedConnection(now)), "check-health") + } + + /** Returns true if the connection is valid and the lock is held. + * + * It actually performs the checks instead of returning a cached result from the periodic health + * checks. + */ + private[resource] def isActiveNow()(implicit + traceContext: TraceContext + ): EitherT[Future, String, Boolean] = + stateRef.get() match { + case State.Connected(connection, lock) => + for { + hasLock <- lock.hasLock.leftMap(err => s"Failed to check lock: $err") + isValid = checkConnection( + connection, + checkReadOnly, + config.connectionTimeout.toInternal, + timeouts, + logger, + ) + } yield hasLock && isValid + case _ => EitherT.rightT(false) + } + + // Run the initial connection check + runLockedConnectionCheck(clock.now) + + /** Returns a connection only if the connection is available and the lock has been acquired. + * + * The consumer should get a connection when needed and not cache the returned connection to + * ensure that the connection is healthy when used. + */ + def get: Either[DbLockedConnectionError, KeepAliveConnection] = + stateRef.get() match { + case State.Connected(connection, lock) => + if (lock.isAcquired) Right(connection) + else Left(DbLockedConnectionError.DbLockNotAcquired) + case state => Left(DbLockedConnectionError.DbConnectionNotAvailable(state)) + } + + def isActive: Boolean = get.isRight + + /** Returns true if node became passive and another replica became active. Returns false if no + * other replica took the lock and the node should remain active. + */ + def setPassive()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = { + def setPassiveInternal() = + stateRef.get() match { + case State.Connected(connection, lock) if lock.isAcquired => + logger.info( + s"Closing the connection $connection to release the lock $lock to become passive" + ) + + // Close the main connection to release the lock + closeLockedConnection(connection, lock, Level.INFO) + + transitionOrFail[State.Connected](State.SetPassive) + + logger.info(s"Waiting for the other replica to try to become active") + + // Wait for twice the passive check period to give the other replica a chance to acquire the lock + Threading.sleep((config.passiveCheckPeriod * 2).asJava.toMillis) + + Either.unit + case s => + Left(s"Connection is not connected or active: $s") + } + + // Run health check and set passive through the execution queue to ensure they don't interfere + execQueue.executeEUS(EitherT.fromEither(setPassiveInternal()), "set-passive") + } + + override def onClosed(): Unit = { + def getConnectedOrRecovering( + state: State + ): Option[Either[State.Recovering.type, State.Connected]] = state match { + case c: State.Connected => Some(Right(c)) + case State.Recovering => Some(Left(State.Recovering)) + case _ => None + } + + def closeConnectionWarn(warnState: State)(implicit traceContext: TraceContext): Unit = + logger.debug(s"Not closing connection as connection is $warnState") + + TraceContext.withNewTraceContext("close_locked_connection") { implicit traceContext => + logger.debug(s"Closing DB-locked connection") + + transitionEither[Unit, State.Recovering.type, State.Connected]( + getConnectedOrRecovering(_), + State.Disconnecting, + closeConnectionWarn, + ) + .foreach { + case Right(connected) => + closeLockedConnection(connected.connection, connected.lock, Level.INFO) + transitionOrFail(State.Disconnecting, State.Disconnected) + logger.debug(s"Closed DB-locked connection: ${connected.connection}") + case Left(State.Recovering) => + transitionOrFail(State.Disconnecting, State.Disconnected) + logger.debug(s"Recovering connection is now disconnected") + } + } + } + +} + +object DbLockedConnection { + + sealed trait State extends Product with Serializable with PrettyPrinting + object State { + + // The initial state + case object Init extends State { + override protected def pretty: Pretty[Init.type] = prettyOfObject[Init.type] + } + + // The healthy state with a connection and db lock + final case class Connected(connection: KeepAliveConnection, lock: DbLock) extends State { + override protected def pretty: Pretty[Connected] = + prettyOfClass(param("connection", _.connection), param("lock", _.lock)) + } + + // The database and connection is explicitly being closed + case object Disconnecting extends State { + override protected def pretty: Pretty[Disconnecting.type] = prettyOfObject[Disconnecting.type] + + } + case object Disconnected extends State { + override protected def pretty: Pretty[Disconnected.type] = prettyOfObject[Disconnected.type] + } + + // The connection of the database has been lost and needs to be recovered + case object Lost extends State { + override protected def pretty: Pretty[Lost.type] = prettyOfObject[Lost.type] + } + case object Recovering extends State { + override protected def pretty: Pretty[Recovering.type] = prettyOfObject[Recovering.type] + } + + // The connection was gracefully closed to become passive + case object SetPassive extends State { + override protected def pretty: Pretty[SetPassive.type] = prettyOfObject[SetPassive.type] + } + } + + /** Returns true if the connection is valid. */ + @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) + private def checkConnection( + connection: KeepAliveConnection, + checkReadOnly: KeepAliveConnection => EitherT[Future, String, Boolean], + connectionTimeout: PositiveFiniteDuration, + timeouts: ProcessingTimeout, + logger: TracedLogger, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): Boolean = + if (connection.markInUse()) { + + logger.trace(s"Checking if connection $connection is valid") + + val isValid = + try { + blocking { + // isValid only throws when the provided timeout is < 0 + checked { + connection.isValid(connectionTimeout.toSecondsTruncated(logger).unwrap) + } + } + } finally { + // Although isValid should not throw any exception with a timeout >= 0, make sure that we will always mark the connection as free again + connection.markFree() + } + + if (isValid) { + logger.trace(s"Connection $connection is valid, checking if connection is read-only") + + // If the connection is valid, further check that the connection is not read-only + val isReadOnly = Try { + timeouts.network.await("connection check read-only") { + checkReadOnly(connection).valueOr { err => + logger.debug(s"Failed to check if connection is read-only: $err") + // Assume connection is NOT read-only, e.g., due to contention + false + } + } + }.recover { case _: TimeoutException => + logger.debug(s"Read-only check timed out, assuming connection is not read-only") + false + }.get // Currently exceptions of the read-only check are returned as Left's and the timeout exception explicitly handled above + + if (isReadOnly) + logger.info(s"Connection $connection is read-only") + else + logger.trace(s"Connection $connection is not read-only") + + !isReadOnly + } else { + logger.info(s"Connection $connection is NOT valid") + false + } + } else { + logger.trace(s"Skip connection $connection check because the connection is in use") + true + } + + private[resource] def awaitConnection( + connection: DbLockedConnection, + maxRetries: Int, + waitInMs: Long, + stop: Option[DbLockedConnectionError => Boolean], + logger: TracedLogger, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, DbLockedConnectionError, Connection] = + RetryEither.retry[DbLockedConnectionError, Connection]( + maxRetries, + waitInMs, + functionFullName, + stop, + retryLogLevel = Level.DEBUG, + failLogLevel = Level.INFO, + ) { + connection.get + }(ErrorLoggingContext.fromTracedLogger(logger), closeContext) + + private[resource] def awaitActive( + connection: DbLockedConnection, + maxRetries: Int, + waitInMs: Long, + logger: TracedLogger, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, DbLockedConnectionError, Unit] = + awaitConnection(connection, maxRetries, waitInMs, None, logger).map(_ => ()) + + /** Awaits for the locked connection to be either active or passive. */ + private[resource] def awaitInitialized( + connection: DbLockedConnection, + maxRetries: Int, + waitInMs: Long, + logger: TracedLogger, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Unit = { + def successOnPassive(error: DbLockedConnectionError): Boolean = error match { + case DbLockedConnectionError.DbLockNotAcquired => true + case _: DbLockedConnectionError.DbConnectionNotAvailable => false + } + + awaitConnection(connection, maxRetries, waitInMs, Some(successOnPassive), logger).discard + } + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private[resource] def createDataSource( + baseDbConfig: Config, + poolSize: Int, + connectionTimeout: PositiveFiniteDuration, + )(implicit errorLoggingContext: ErrorLoggingContext): Either[String, DataSourceJdbcDataSource] = { + import slick.util.ConfigExtensionMethods.* + + val logger = errorLoggingContext.logger + + for { + // Sanity check that the user has not configured connection pooling + _ <- Either.cond( + !baseDbConfig + .getStringOpt("dataSourceClassName") + .orElse(baseDbConfig.getStringOpt("dataSourceClass")) + .contains("org.postgresql.ds.PGConnectionPoolDataSource"), + (), + s"The data source should not be configured with connection pooling, use `org.postgresql.ds.PGSimpleDataSource` instead", + ) + + dbConfig = ConfigFactory + .parseMap( + Map[String, Any]( + "keepAliveConnection" -> false, // We use our own keep alive connections + "maxConnections" -> poolSize, + "connectionPool" -> "disabled", // Explicitly disable HikariCP to have full control over the connections + ).asJava + ) + .withFallback(baseDbConfig) + + ds = JdbcDataSource.forConfig( + c = dbConfig, + driver = null, + name = "", + classLoader = ClassLoaderUtil.defaultClassLoader, + ) + } yield { + // Setup the data source + ds match { + case jdbcDS: DataSourceJdbcDataSource => + jdbcDS.ds.setLoginTimeout( + connectionTimeout.toSecondsTruncated(logger)(errorLoggingContext.traceContext).unwrap + ) + jdbcDS + case invalidDS => + ErrorUtil.invalidState(s"Got invalid datasource from configuration: $invalidDS") + } + } + } + + private[resource] def create( + profile: DbStorage.Profile with DbStorage.DbLockSupport, + ds: DataSourceJdbcDataSource, // We don't use a HikariCP data source to have full control over the connections + lockId: DbLockId, + lockMode: DbLockMode, + connectionConfig: DbLockedConnectionConfig, + isMainConnection: Boolean, + timeouts: ProcessingTimeout, + exitOnFatalFailures: Boolean, + clock: Clock, + loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, + writeExecutor: AsyncExecutorWithShutdown, + logLockOwnersOnLockAcquisitionAttempt: Boolean, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): DbLockedConnection = { + + val logger = loggerFactory.getLogger(DbLockedConnection.getClass) + val tracedLogger = TracedLogger(logger) + + def initConnection(connection: KeepAliveConnection): EitherT[Future, String, Unit] = for { + _ <- Either + .catchOnly[SQLException](connection.setAutoCommit(true)) + .leftMap(err => show"Failed to set autocommit to true: $err") + .toEitherT[Future] + _ <- profile match { + case _: Profile.Postgres => + import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.* + val db = KeepAliveConnection.createDatabaseFromConnection( + connection, + logger, + writeExecutor, + ) + + // Set an explicit network timeout on the main connection as we do not run any (long) user queries on that connection + if (isMainConnection) + connection.setNetworkTimeout( + Threading.directExecutionContext(logger), + connectionConfig.healthCheckTimeout.duration.toMillis.toInt, + ) + + def toSecondsOrDefault(d: Option[PositiveFiniteDuration]): Int = + d.fold(0)(_.toSecondsTruncated(tracedLogger).unwrap) + + // Using 0 values results in using the system defaults + val keepAliveIdle = toSecondsOrDefault(connectionConfig.keepAliveIdle.map(_.toInternal)) + val keepAliveInterval = toSecondsOrDefault( + connectionConfig.keepAliveInterval.map(_.toInternal) + ) + val keepAliveCount = connectionConfig.keepAliveCount.getOrElse(0) + val setKeepAliveSettings = DBIO + .seq( + sqlu"SET tcp_keepalives_idle TO #$keepAliveIdle", + sqlu"SET tcp_keepalives_interval TO #$keepAliveInterval", + sqlu"SET tcp_keepalives_count TO #$keepAliveCount", + ) + + EitherTUtil.fromFuture( + db.run(setKeepAliveSettings), + err => show"Failed to initialize new connection: $err", + ) + } + } yield () + + def createConnection(): EitherT[Future, String, KeepAliveConnection] = { + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext.fromTracedLogger(tracedLogger) + + for { + newConn <- EitherT { + Future { + blocking { + handling(classOf[EOFException], classOf[SQLException]) + .by(Left(_)) + .apply(Right(ds.createConnection())) + .map(conn => new KeepAliveConnection(conn)) + .leftMap(err => show"Failed to create connection: $err") + } + } + } + + // Ensure the new connection is valid + _ <- EitherTUtil + .condUnitET[Future]( + checkConnection( + newConn, + checkReadOnly, + connectionConfig.connectionTimeout.toInternal, + timeouts, + tracedLogger, + ), { + // Attempt to close the new invalid connection + newConn.closeUnderlying(Level.DEBUG)(errorLoggingContext) + "New connection was not valid" + }, + ) + + // Initialize the new connection + _ <- initConnection(newConn) + } yield newConn + } + + def createLock(connection: KeepAliveConnection): DbLock = + DbLock + .create( + profile, + connection, + connectionConfig.lock, + lockId, + lockMode, + timeouts, + clock, + loggerFactory, + writeExecutor, + ) + + def checkReadOnly(connection: KeepAliveConnection): EitherT[Future, String, Boolean] = + profile match { + case Profile.Postgres(jdbc) => + import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.* + val db = KeepAliveConnection.createDatabaseFromConnection( + connection, + logger, + writeExecutor, + ) + + import profile.DbStorageAPI.jdbcActionExtensionMethods + + val readOnlyQuery = + sql"show transaction_read_only" + .as[String] + .headOption + .withStatementParameters(statementInit = + _.setQueryTimeout( + connectionConfig.healthCheckTimeout.toInternal + .toSecondsTruncated(tracedLogger) + .unwrap + ) + ) + + EitherTUtil + .fromFuture( + db.run(readOnlyQuery), + err => show"Failed to check new connection for read-only: $err", + ) + .map(_.map(_.toLowerCase).contains("on")) + } + + if (ds.keepAliveConnection) { + logger.warn(s"DataSource should not be configured with keep-alive") + } + + new DbLockedConnection( + createConnection _, + createLock, + connectionConfig, + checkReadOnly, + lockId, + exitOnFatalFailures, + clock, + timeouts, + loggerFactory, + futureSupervisor, + writeExecutor, + logLockOwnersOnLockAcquisitionAttempt, + ) + } + +} + +sealed trait DbLockedConnectionError extends Product with Serializable with PrettyPrinting +object DbLockedConnectionError { + case object DbLockNotAcquired extends DbLockedConnectionError { + override protected def pretty: Pretty[DbLockNotAcquired.type] = + prettyOfObject[DbLockNotAcquired.type] + } + final case class DbConnectionNotAvailable(state: DbLockedConnection.State) + extends DbLockedConnectionError { + override protected def pretty: Pretty[DbConnectionNotAvailable] = prettyOfClass( + unnamedParam(_.state) + ) + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnectionPool.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnectionPool.scala new file mode 100644 index 0000000000..91e3b5b9fe --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbLockedConnectionPool.scala @@ -0,0 +1,455 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.functorFilter.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.{DirectExecutionContext, FutureSupervisor} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{DbConfig, DbLockedConnectionPoolConfig, ProcessingTimeout} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLogging, + TracedLogger, +} +import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy +import com.digitalasset.canton.util.{ + EitherTUtil, + FutureUnlessShutdownUtil, + SimpleExecutionQueue, + retry, +} +import com.google.common.annotations.VisibleForTesting +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.{DataSourceJdbcDataSource, JdbcDataSource} +import slick.util.{AsyncExecutor, AsyncExecutorWithMetrics, AsyncExecutorWithShutdown} + +import java.sql.{Connection, SQLTransientException} +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +/** A pool of [[DbLockedConnection]] for writes guarded by a main lock. It implements the + * [[slick.jdbc.JdbcDataSource]] trait to be used by slick. + * + * The pool is considered active iff the main connection is active, i.e., the connection is healthy + * and the lock acquired. If the main connection becomes inactive, the pool of connections are + * ramped down and the pool periodically attempts to become active again. Once a pool becomes + * active, it waits for the other pool to ramp down by acquiring exclusive access to the pool's + * shared lock. + */ +class DbLockedConnectionPool private ( + ds: JdbcDataSource, + private[resource] val mainConnection: DbLockedConnection, + private val mainExecutor: AsyncExecutorWithShutdown, + val config: DbLockedConnectionPoolConfig, + private[resource] val poolSize: PositiveInt, + createPoolConnection: (String, DbLockMode) => DbLockedConnection, + clock: Clock, + exitOnFatalFailures: Boolean, + futureSupervisor: FutureSupervisor, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends JdbcDataSource + with FlagCloseable + with HasCloseContext + with NamedLogging + with StateMachine[DbLockedConnectionPool.State] { + + import DbLockedConnectionPool.* + + private val execQueue = + new SimpleExecutionQueue( + "db-locked-connection-pool-queue", + futureSupervisor, + timeouts, + loggerFactory, + crashOnFailure = exitOnFatalFailures, + ) + + // Start out as passive and try to become active + override protected val stateRef: AtomicReference[DbLockedConnectionPool.State] = + new AtomicReference(State.Passive) + + private def becomeActive()(implicit traceContext: TraceContext): Future[Unit] = { + logger.debug("Becoming active") + + // Create an exclusive connection and lock for the pool and wait until active to ensure that the other connection + // pool has scaled down all its connections. + val poolExclusive = createPoolConnection("pool-wait", DbLockMode.Exclusive) + retry + .Pause( + logger, + this, + retry.Forever, + 500.millis, + "wait for exclusive access to connection pool", + retryLogLevel = Some(Level.DEBUG), + )( + poolExclusive.get match { + // While we're waiting to acquire the pool in exclusive mode, it's possible that we lose the main connection lock + // to another instance (for whatever reason). If that happens we can stop retrying here. + case Left(_) if !mainConnection.isActive => + // Close the exclusive pool connection before failing the retry + poolExclusive.close() + Future.failed( + PassiveInstanceException( + s"Main connection lock was lost while trying to acquire write pool locks" + ) + ) + case other => Future.successful(other) + }, + NoExceptionRetryPolicy, + ) + .map { + case Left(err) => + // This can only happen when the retry is aborted due to shutdown, because otherwise we retry forever + logger.info(s"Failed to get exclusive access to the pool's lock, staying passive: $err") + poolExclusive.close() + + case Right(_conn) => + logger.debug("Obtained exclusive access to the pool's lock") + + // Close the exclusive access to the pool and ramp up the connections in the pool using a shared lock. + poolExclusive.close() + + logger.debug(s"Ramping up connection pool with ${poolSize.value} connections") + + val pool = Range(0, poolSize.value).map { idx => + createPoolConnection(s"pool-$idx", DbLockMode.Shared) + } + + // Additional check that the main lock is still acquired when the first shared lock of the pool has been acquired. + val result = timeouts.network.await("check main connection health when becoming active") { + // Break up the configurable timeout into smaller chunks of 200ms + val waitInMs = 200L + val maxRetries = + Math.min(config.activeTimeout.asJava.toMillis / waitInMs, Int.MaxValue).toInt + val checkResult = for { + firstConnection <- pool.headOption + .toRight("Pool is empty") + .toEitherT[FutureUnlessShutdown] + // Wait until one connection is active and has acquired the lock + _ <- DbLockedConnection + .awaitActive(firstConnection, maxRetries, waitInMs, logger) + .leftMap(err => s"Connection did not become active: $err") + .mapK(FutureUnlessShutdown.liftK) + // Force a check that the main connection is valid and the lock is still acquired + isActive <- mainConnection.isActiveNow().mapK(FutureUnlessShutdown.outcomeK) + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + isActive, + "Lost main connection lock while becoming active", + ) + } yield () + + checkResult.value.unwrap + } + + result match { + case UnlessShutdown.Outcome(Left(err)) => + logger.info(s"Failed to ramp up pool, staying passive: $err") + pool.foreach(_.close()) + + case UnlessShutdown.Outcome(Right(())) => + transitionOrFail(State.Passive, State.Active(pool)) + logger.debug(s"Connection pool now active") + + case UnlessShutdown.AbortedDueToShutdown => + logger.info(s"Becoming active was interrupted due to shutdown") + pool.foreach(_.close()) + } + } + } + + private def becomePassive()(implicit traceContext: TraceContext): Unit = { + logger.debug("Becoming passive") + + def getActiveState(state: State): Option[State.Active] = state match { + case active: State.Active => Some(active) + case State.Passive => None + } + + logger.trace(s"Closing pool connections") + LifeCycle.close( + (transitionOrFail( + classOf[State.Active].getSimpleName, + getActiveState(_), + State.Passive, + ).pool)* + )(logger) + } + + private def checkPoolHealth( + pool: Seq[DbLockedConnection] + )(implicit traceContext: TraceContext): Unit = { + val invalidConns = pool.filterNot(_.isActive) + if (invalidConns.nonEmpty) { + logger.info(s"#${invalidConns.size}/${poolSize.value} connections unhealthy") + invalidConns.foreach { c => + logger.debug(s"Connection $c unhealthy") + } + } else { + logger.trace("Connection pool is healthy") + } + } + + @VisibleForTesting + private[resource] def getPool: Option[Seq[DbLockedConnection]] = stateRef.get() match { + case State.Active(pool) => Some(pool) + case State.Passive => None + } + + private def checkHealth()(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + def checkHealthInternal(): Future[Unit] = { + val result = stateRef.get() match { + case State.Active(pool) => + if (mainConnection.isActive) { + logger.trace(s"Connection pool remains active") + checkPoolHealth(pool) + Future.unit + } else { + becomePassive() + Future.unit + } + case State.Passive => + if (!mainConnection.isActive) { + logger.trace(s"Connection pool remains passive") + Future.unit + } else { + becomeActive() + } + } + + // Run health check scheduling on the direct execution context to avoid rejected execution exceptions + result.map(_ => scheduleHealthCheck(clock.now))(DirectExecutionContext(logger)) + } + + // Run the health check and becoming active/passive through the queue to ensure there's only one attempt at a time. + execQueue.execute(checkHealthInternal(), "check-health") + } + + private def runScheduledHealthCheck(ts: CantonTimestamp): Unit = + TraceContext.withNewTraceContext("db_scheduled_health_check") { implicit traceContext => + synchronizeWithClosingSync(functionFullName) { + logger.trace(s"Checking connection pool health at $ts") + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + checkHealth(), + "failed connection pool health check", + ) + }.onShutdown { + logger.debug("Shutting down, stop connection pool health check") + } + } + + private def scheduleHealthCheck(now: CantonTimestamp): Unit = + clock.scheduleAt(runScheduledHealthCheck, now.add(config.healthCheckPeriod.asJava)).discard + + private def findActiveConnection(pool: Seq[DbLockedConnection]): Option[KeepAliveConnection] = { + val availableConnectionOpt = pool.mapFilter(_.get.toOption).find(_.markInUse()) + + if (availableConnectionOpt.isEmpty) { + logger.debug(s"Did not find any available connection in the pool. Connection states: ${pool + .map(_.get.map(c => if (c.inUse.get()) "In use" else "Not in use")) + .mkString(", ")}")(TraceContext.empty) + } + availableConnectionOpt + } + + // Run and wait for the initial health check + TraceContext.withNewTraceContext("db_initial_health_check") { implicit traceContext => + timeouts.default.await("initial health check") { + checkHealth().onShutdown(()) + } + } + + /** The pool is active and the main connection is active */ + def isActive: Boolean = stateRef.get() match { + case _: State.Active if mainConnection.isActive => true + case _ => false + } + + def isPassive: Boolean = stateRef.get() match { + case _: State.Active => false + case State.Passive => true + } + + def setPassive()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = { + + def setPassiveInternal(): EitherT[FutureUnlessShutdown, String, Unit] = { + logger.info("Setting active connection pool to passive") + + // Closes the main connection and thus release its lock, which will trigger becomePassive on the next health check + mainConnection.setPassive() + } + + // Run the set passive in the execution queue to not infer with health checks + execQueue.executeEUS(setPassiveInternal(), "set passive") + } + + override def createConnection(): Connection = + stateRef.get() match { + case State.Active(pool) => + findActiveConnection(pool).getOrElse(throw NoActiveConnectionAvailable()) + case State.Passive => + throw PassiveInstanceException(s"Connection pool is not active") + } + + override val maxConnections: Option[Int] = Some(poolSize.value) + + override def onClosed(): Unit = + stateRef.get() match { + case State.Active(pool) => + LifeCycle.close(execQueue +: pool :+ mainConnection :+ mainExecutor :+ ds: _*)(logger) + case State.Passive => + LifeCycle.close(execQueue, mainConnection, mainExecutor, ds)(logger) + } + +} + +object DbLockedConnectionPool { + + final case class NoActiveConnectionAvailable() + extends SQLTransientException("No active and free KeepAliveConnection available") + + sealed trait State extends Product with Serializable + object State { + final case class Active(pool: Seq[DbLockedConnection]) extends State + case object Passive extends State + } + + def createDatabaseFromPool( + connectionPool: DbLockedConnectionPool, + executor: AsyncExecutor, + ): Database = + Database.forSource( + connectionPool, + executor, + ) + + def create( + profile: DbStorage.Profile with DbStorage.DbLockSupport, + dbConfig: DbConfig, + config: DbLockedConnectionPoolConfig, + poolSize: PositiveInt, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + clock: Clock, + timeouts: ProcessingTimeout, + exitOnFatalFailures: Boolean, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + writeExecutor: AsyncExecutorWithShutdown, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): Either[DbLockedConnectionPoolError, DbLockedConnectionPool] = { + + val logger = loggerFactory.getLogger(DbLockedConnectionPool.getClass) + val tracedLogger = TracedLogger.apply(logger) + + tracedLogger.debug(s"Creating write connection pool with ${poolSize.value} connections") + + def createConnection( + ds: DataSourceJdbcDataSource, + connId: String, + lockId: DbLockId, + mode: DbLockMode, + isMainConnection: Boolean, + executor: AsyncExecutorWithShutdown, + ): DbLockedConnection = + DbLockedConnection.create( + profile, + ds, + lockId, + mode, + config.connection, + isMainConnection, + timeouts, + exitOnFatalFailures, + clock, + // We create multiple connections, differentiate them using a connection id + loggerFactory.append( + "connId", + connId, + ), + futureSupervisor, + executor, + logLockOwnersOnLockAcquisitionAttempt = false, + ) + + for { + ds <- DbLockedConnection + .createDataSource( + dbConfig.config, + poolSize.value + 1, // We obtain the main connection and poolSize connections from the data source + config.connection.connectionTimeout.toInternal, + )(ErrorLoggingContext.fromTracedLogger(tracedLogger)) + .leftMap(DbLockedConnectionPoolError.FailedToCreateDataSource.apply) + mainLockId <- DbLockId + .allocate(dbConfig, mainLockCounter, loggerFactory) + .leftMap(DbLockedConnectionPoolError.FailedToAllocateLockId.apply) + poolLockId <- DbLockId + .allocate(dbConfig, poolLockCounter, loggerFactory) + .leftMap(DbLockedConnectionPoolError.FailedToAllocateLockId.apply) + + // main connection gets its own executor + mainExecutor = AsyncExecutorWithMetrics.createSingleThreaded("PoolMain", logger) + mainConnection = createConnection( + ds, + "pool-main", + mainLockId, + DbLockMode.Exclusive, + isMainConnection = true, + mainExecutor, + ) + + // Wait until main connection is either active or passive before returning the connection pool + _ = DbLockedConnection.awaitInitialized(mainConnection, 50, 200, tracedLogger) + } yield { + new DbLockedConnectionPool( + ds, + mainConnection, + mainExecutor, + config, + poolSize, + (connId, lockMode) => + // lock connection gets sent through the write pool executor + createConnection( + ds, + connId, + poolLockId, + lockMode, + isMainConnection = false, + writeExecutor, + ), + clock, + exitOnFatalFailures = exitOnFatalFailures, + futureSupervisor, + timeouts, + loggerFactory, + ) + } + } + +} + +sealed trait DbLockedConnectionPoolError extends Product with Serializable +object DbLockedConnectionPoolError { + final case class FailedToCreateDataSource(error: String) extends DbLockedConnectionPoolError + final case class FailedToAllocateLockId(error: DbLockError) extends DbLockedConnectionPoolError +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala index cc689755a1..96c08df4db 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala @@ -6,14 +6,21 @@ package com.digitalasset.canton.resource import cats.data.EitherT import cats.syntax.either.* import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} +import com.digitalasset.canton.config.{ + DbConfig, + DbLockedConnectionConfig, + PositiveFiniteDuration, + ProcessingTimeout, +} import com.digitalasset.canton.environment.CantonNodeParameters -import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, UnlessShutdown} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.DbStorage.RetryConfig -import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.resource.WithDbLock.WithDbLockError.OperationError +import com.digitalasset.canton.time.WallClock import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.retry.RetryEither @@ -22,43 +29,30 @@ import org.flywaydb.core.Flyway import org.flywaydb.core.api.FlywayException import slick.jdbc.JdbcBackend.Database import slick.jdbc.hikaricp.HikariCPJdbcDataSource -import slick.jdbc.{DataSourceJdbcDataSource, JdbcDataSource} +import slick.jdbc.{DataSourceJdbcDataSource, JdbcBackend, JdbcDataSource} import java.sql.SQLException import javax.sql.DataSource import scala.concurrent.duration.Duration import scala.concurrent.{ExecutionContext, blocking} -trait DbMigrationsMetaFactory { - - type Factory <: DbMigrationsFactory - - def create(clock: Clock)(implicit ec: ExecutionContext): Factory - -} - -trait DbMigrationsFactory { - - def create(dbConfig: DbConfig, devVersionSupport: Boolean)(implicit - closeContext: CloseContext - ): DbMigrations - - def create(dbConfig: DbConfig, name: String, devVersionSupport: Boolean)(implicit - closeContext: CloseContext - ): DbMigrations - -} - -trait DbMigrations { this: NamedLogging => +/** Performs DB migrations using Flyway. + * + * @param alphaVersionSupport + * Whether we want to add the schema files found in the dev folder to the migration. A user that + * does that, won't be able to upgrade to new Canton versions, as we reserve our right to just + * modify the dev version files in any way we like. + */ +class DbMigrations( + dbConfig: DbConfig, + alphaVersionSupport: Boolean, + timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext, closeContext: CloseContext) + extends NamedLogging { - implicit protected def closeContext: CloseContext - - /** Whether we want to add the schema files found in the dev folder to the migration - * - * A user that does that, won't be able to upgrade to new Canton versions, as we reserve our - * right to just modify the dev version files in any way we like. - */ - protected def alphaVersionSupport: Boolean + /** For DB migrations we always use a wallclock. */ + private val clock = new WallClock(timeouts, loggerFactory) /** Database is migrated using Flyway, which looks at the migration files at * src/main/resources/db/migration/canton as explained at @@ -88,13 +82,6 @@ trait DbMigrations { this: NamedLogging => .leftMap(DbMigrations.DatabaseError.apply) .flatMap(db => ResourceUtil.withResource(db)(fn)) - /** Obtain access to the database to run the migration operation. */ - protected def withDb[A]( - retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast - )(fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])(implicit - traceContext: TraceContext - ): EitherT[UnlessShutdown, DbMigrations.Error, A] - protected def migrateDatabaseInternal( flyway: Flyway )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = @@ -119,8 +106,6 @@ trait DbMigrations { this: NamedLogging => .leftMap[DbMigrations.Error](DbMigrations.FlywayError.apply) .toEitherT[UnlessShutdown] - protected def dbConfig: DbConfig - /** Migrate the database with all pending migrations. */ def migrateDatabase(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = TraceContext.withNewTraceContext("migrate_database") { implicit traceContext => @@ -272,50 +257,63 @@ trait DbMigrations { this: NamedLogging => } } yield () + /** With replication possibly multiple nodes sharing the same database, therefore we use a DB-lock + * based coordination for performing the DB migration. Only the node that acquired the lock + * performs the migration operation at a time. + */ + def withDb[A](retryConfig: RetryConfig = RetryConfig.failFast)( + fn: JdbcBackend.Database => EitherT[UnlessShutdown, DbMigrations.Error, A] + )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, A] = { + val profile = DbStorage.profile(dbConfig) + + withCreatedDb(retryConfig) { db => + EitherT { + timeouts.io + .await("db-migration") { + WithDbLock + .withDbLock( + "db-migration", + DbLockCounters.NODE_MIGRATIONS, + timeouts, + dbConfig, + DbLockedConnectionConfig( + // Lower the retry period for db migration lock acquisitions, such that in cases with high contention + // (e.g HA sequencers on a single DB), all sequencer can get initialized in a timely manner + passiveCheckPeriod = PositiveFiniteDuration.ofSeconds(3) + ), + profile, + FutureSupervisor.Noop, + clock, + loggerFactory, + logLockOwnersOnLockAcquisitionAttempt = true, + )(fn(db).mapK(FutureUnlessShutdown.liftK)) + .leftMap { + case OperationError(err: DbMigrations.Error) => err + case err => DbMigrations.DatabaseError(s"Failed to migrate with DB lock: $err") + } + .value + .unwrap + } + } + } + } } -class CommunityDbMigrationsMetaFactory(loggerFactory: NamedLoggerFactory) - extends DbMigrationsMetaFactory { - - override type Factory = CommunityDbMigrationsFactory - - override def create(clock: Clock)(implicit ec: ExecutionContext): CommunityDbMigrationsFactory = - new CommunityDbMigrationsFactory(loggerFactory) -} +object DbMigrations { -class CommunityDbMigrationsFactory(loggerFactory: NamedLoggerFactory) extends DbMigrationsFactory { - override def create(dbConfig: DbConfig, name: String, devVersionSupport: Boolean)(implicit - closeContext: CloseContext - ): DbMigrations = - new CommunityDbMigrations( + def create( + dbConfig: DbConfig, + alphaVersionSupport: Boolean, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext, closeContext: CloseContext): DbMigrations = + new DbMigrations( dbConfig, - devVersionSupport, - loggerFactory.appendUnnamedKey("node", name), + alphaVersionSupport, + timeouts, + loggerFactory, ) - override def create(dbConfig: DbConfig, devVersionSupport: Boolean)(implicit - closeContext: CloseContext - ): DbMigrations = - new CommunityDbMigrations(dbConfig, devVersionSupport, loggerFactory) -} - -class CommunityDbMigrations( - protected val dbConfig: DbConfig, - protected val alphaVersionSupport: Boolean, - protected val loggerFactory: NamedLoggerFactory, -)(implicit override protected val closeContext: CloseContext) - extends DbMigrations - with NamedLogging { - - override def withDb[A]( - retryConfig: RetryConfig - )(fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])(implicit - traceContext: TraceContext - ): EitherT[UnlessShutdown, DbMigrations.Error, A] = withCreatedDb(retryConfig)(fn) -} - -object DbMigrations { - def createDataSource(jdbcDataSource: JdbcDataSource): DataSource = jdbcDataSource match { case dataS: DataSourceJdbcDataSource => dataS.ds diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbParameterUtils.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbParameterUtils.scala index 9a2bea0172..64d3517a76 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbParameterUtils.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbParameterUtils.scala @@ -175,4 +175,44 @@ object DbParameterUtils { } } + /** Sets an Iterable of int as an array of string database parameter. + * + * @param items + * the strings to store in the column. + * @param pp + * A `PositionedParameters` object, which is used to set the database parameter. + */ + def setArrayIntParameterDb( + items: Iterable[Int], + pp: PositionedParameters, + ): Unit = { + val jdbcArray: Array[Integer] = items.view.map(Int.box).toArray + pp.setObject(jdbcArray, JDBCType.ARRAY.getVendorTypeNumber) + } + + /** Retrieves an array ints from the database. + */ + def getIntArrayResultsDb: GetResult[Array[Int]] = { + + def anyRefToInt(obj: AnyRef): Int = obj match { + case int: java.lang.Integer => Int.unbox(int) + case invalid => + throw new SQLNonTransientException( + s"Cannot convert object array element (of type ${invalid.getClass.getName}) to Int" + ) + } + + GetResult(r => r.rs.getArray(r.skip.currentPos)) + .andThen { case (sqlArr: java.sql.Array) => + sqlArr.getArray match { + case arr: Array[Int] => arr // Postgres + case arr: Array[AnyRef] => arr.map(anyRefToInt) // H2 + case other => + throw new SQLNonTransientException( + s"Cannot convert object (of type ${other.getClass.getName}) to int array" + ) + } + } + } + } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageMulti.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageMulti.scala new file mode 100644 index 0000000000..6e557309f1 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageMulti.scala @@ -0,0 +1,325 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{ + DbConfig, + DbLockedConnectionPoolConfig, + ProcessingTimeout, + QueryCostMonitoringConfig, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.DbStorageMetrics +import com.digitalasset.canton.resource.DbStorage.DbAction.{All, ReadTransactional} +import com.digitalasset.canton.resource.DbStorageMulti.passiveInstanceHealthState +import com.digitalasset.canton.time.{Clock, PositiveFiniteDuration, WallClock} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureUnlessShutdownUtil +import com.digitalasset.canton.util.Thereafter.syntax.* +import slick.jdbc.JdbcBackend.Database +import slick.util.{AsyncExecutor, AsyncExecutorWithMetrics, QueryCostTrackerImpl} + +import java.sql.SQLTransientConnectionException +import java.util.concurrent.ScheduledExecutorService +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.concurrent.ExecutionContext + +/** DB Storage implementation that allows multiple processes to access the underlying database and + * uses a pool of write connections which are guarded by an exclusive main lock to ensure a single + * writer instance. + * + * Periodically checks the activeness of the write connection pool and if the activeness changes + * executes the `onActive` or `onPassive` callbacks. + */ +final class DbStorageMulti private ( + override val profile: DbStorage.Profile with DbStorage.DbLockSupport, + generalDb: Database, + private[resource] val writeConnectionPool: DbLockedConnectionPool, + val dbConfig: DbConfig, + onActive: () => FutureUnlessShutdown[Unit], + onPassive: () => FutureUnlessShutdown[Option[CloseContext]], + checkPeriod: PositiveFiniteDuration, + clock: Clock, + closeClock: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + override val metrics: DbStorageMetrics, + override protected val timeouts: ProcessingTimeout, + override val threadsAvailableForWriting: PositiveInt, + override protected val loggerFactory: NamedLoggerFactory, + initialCloseContext: Option[CloseContext], + writeDbExecutor: AsyncExecutor, +)(override protected implicit val ec: ExecutionContext) + extends DbStorage + with NamedLogging + with HasCloseContext { + + protected val logOperations: Boolean = logQueryCost.exists(_.logOperations) + + private val active: AtomicBoolean = new AtomicBoolean(writeConnectionPool.isActive) + + private val sessionCloseContext = new AtomicReference[Option[CloseContext]](initialCloseContext) + + override def initialHealthState: ComponentHealthState = + if (active.get()) ComponentHealthState.Ok() + else passiveInstanceHealthState + + private def checkHealth(now: CantonTimestamp): Unit = + TraceContext.withNewTraceContext("db_health") { implicit traceContext => + synchronizeWithClosingSync(functionFullName) { + logger.trace(s"Checking storage health at $now") + + val connectionPoolActive = writeConnectionPool.isActive + val activeOrPassive = if (connectionPoolActive) "active" else "passive" + if (active.compareAndSet(!connectionPoolActive, connectionPoolActive)) { + logger.debug( + s"Write connection pool is now $activeOrPassive. Beginning state transition." + ) + // We have a transition of the activeness + val transitionReplicaState = + if (connectionPoolActive) + onActive() + .thereafter(_ => reportHealthState(ComponentHealthState.Ok())) + else + onPassive() + .map(sessionCloseContext.set) + .thereafter(_ => reportHealthState(passiveInstanceHealthState)) + + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + // Schedule next health check after transition is completed + transitionReplicaState.thereafter(_ => scheduleHealthCheck(clock.now)), + failureMessage = s"Failed to transition replica state", + // if the transition failed we revert the new activeness state + onFailure = _ => active.set(!connectionPoolActive), + ) + } else { + // Immediately schedule next health check + logger.trace( + s"Write connection pool is still $activeOrPassive. Scheduling next health check." + ) + scheduleHealthCheck(clock.now) + } + }.onShutdown { + logger.debug(s"Shutting down, stop storage health check") + } + } + + private def scheduleHealthCheck(now: CantonTimestamp)(implicit tc: TraceContext): Unit = { + val nextCheckTime = now.add(checkPeriod.unwrap) + logger.trace(s"Scheduling the next health check at $nextCheckTime") + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + clock.scheduleAt( + checkHealth, + nextCheckTime, + ), + "failed to schedule next health check", + closeContext = Some(closeContext), + ) + } + + // Run the initial health check + checkHealth(clock.now) + + private val writeDb: Database = DbLockedConnectionPool.createDatabaseFromPool( + writeConnectionPool, + writeDbExecutor, + ) + + private def runIfSessionIsOpen[A]( + action: String, + operationName: String, + maxRetries: Int, + )( + f: => FutureUnlessShutdown[A] + )(implicit traceContext: TraceContext, closeContext: CloseContext): FutureUnlessShutdown[A] = { + val sessionContext = sessionCloseContext.get + sessionContext + .map { sessionCC => + if (sessionCC.context.isClosing) { + FutureUnlessShutdown.abortedDueToShutdown + } else { + CloseContext.withCombinedContext(closeContext, sessionCC, timeouts, logger) { cc => + run(action, operationName, maxRetries) { + f + }(traceContext, cc) + } + } + } + .getOrElse { + run(action, operationName, maxRetries) { + f + } + } + .recover { + // If the session close context is closed, DB queries won't be retried but may end up bubbling up SQL errors that would + // normally be retried. Catch them here and replace them with a more appropriate AbortedDueToShutdown. + case e: SQLTransientConnectionException if sessionContext.exists(_.context.isClosing) => + logger.debug( + "Caught a transient DB error while session close context is closing. Masking it with AbortedDueToShutdown", + e, + ) + UnlessShutdown.AbortedDueToShutdown + } + } + + override protected[canton] def runRead[A]( + action: ReadTransactional[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): FutureUnlessShutdown[A] = + runIfSessionIsOpen("reading", operationName, maxRetries)( + FutureUnlessShutdown.outcomeF(generalDb.run(action)) + ) + + override protected[canton] def runWrite[A]( + action: All[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): FutureUnlessShutdown[A] = + runIfSessionIsOpen("writing", operationName, maxRetries)( + FutureUnlessShutdown.outcomeF(writeDb.run(action)) + ) + + override def isActive: Boolean = writeConnectionPool.isActive + + override def onClosed(): Unit = { + // Closing first the pool and then the executor, otherwise we may get rejected execution exceptions for the pool's connection health checks. + // Slick by default closes first the executor and then the source, which does not work here. + val clockCloseable = if (closeClock) Seq(clock) else Seq.empty + val otherCloseables = Seq(generalDb, writeConnectionPool, writeDbExecutor) + LifeCycle.close((clockCloseable ++ otherCloseables)*)(logger) + } + + def setPassive()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = + writeConnectionPool.setPassive() + + def setSessionCloseContext(sessionContext: Option[CloseContext]): Unit = + sessionCloseContext.set(sessionContext) +} + +object DbStorageMulti { + private val passiveInstanceHealthState = ComponentHealthState.failed("instance is passive") + + /** Creates a new multi-process aware DbStorage. + * + * The caller should check if the returned instance is active or not, and act correspondingly, + * because onActive/onPassive are not invoked during creation. + * + * @param customClock + * allows for injecting a custom clock in tests. The caller is responsible for closing the + * custom clock. + */ + def create( + dbConfig: DbConfig, + writeConnectionPoolConfig: DbLockedConnectionPoolConfig, + readPoolSize: PositiveInt, + writePoolSize: PositiveInt, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + onActive: () => FutureUnlessShutdown[Unit], + onPassive: () => FutureUnlessShutdown[Option[CloseContext]], + metrics: DbStorageMetrics, + logQueryCost: Option[QueryCostMonitoringConfig], + customClock: Option[Clock], + scheduler: Option[ScheduledExecutorService], + timeouts: ProcessingTimeout, + exitOnFatalFailures: Boolean, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + initialCloseContext: Option[CloseContext] = None, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, DbStorageMulti] = { + val logger = loggerFactory.getTracedLogger(getClass) + + // By default, ensure that storage runs with wallclock for its health checks + val clock: Clock = customClock.getOrElse(new WallClock(timeouts, loggerFactory)) + + logger.info(s"Creating storage, num-reads: $readPoolSize, num-writes: $writePoolSize") + for { + generalDb <- DbStorage + .createDatabase( + dbConfig, + readPoolSize, + Some(metrics.general), + logQueryCost, + scheduler, + )(loggerFactory) + + profile <- DbLock.isSupported(DbStorage.profile(dbConfig)).toEitherT[UnlessShutdown] + + writeExecutor = { + import slick.util.ConfigExtensionMethods.* + val logger = loggerFactory.getLogger(DbStorageMulti.getClass) + val tracker = new QueryCostTrackerImpl( + logQueryCost, + metrics.write, + scheduler, + warnOnSlowQueryO = dbConfig.parameters.warnOnSlowQuery.map(_.toInternal), + warnInterval = dbConfig.parameters.warnOnSlowQueryInterval.toInternal, + writePoolSize.value, + logger, + ) + new AsyncExecutorWithMetrics( + name = "db-lock-pool-ec", + minThreads = writePoolSize.value, + maxThreads = writePoolSize.value, + queueSize = dbConfig.config.getIntOr("queueSize", 1000), + logger, + tracker, + maxConnections = writePoolSize.value, + ) + } + + writeConnectionPool <- DbLockedConnectionPool + .create( + profile, + dbConfig, + writeConnectionPoolConfig, + writePoolSize, + mainLockCounter, + poolLockCounter, + clock, + timeouts, + exitOnFatalFailures = exitOnFatalFailures, + futureSupervisor, + loggerFactory, + writeExecutor, + ) + .leftMap(err => s"Failed to create write connection pool: $err") + .toEitherT[UnlessShutdown] + + sharedStorage = new DbStorageMulti( + profile, + generalDb, + writeConnectionPool, + dbConfig, + onActive, + onPassive, + writeConnectionPoolConfig.healthCheckPeriod.toInternal, + clock, + closeClock = customClock.isEmpty, + logQueryCost, + metrics, + timeouts, + writePoolSize, + loggerFactory, + initialCloseContext, + writeExecutor, + ) + } yield sharedStorage + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/KeepAliveConnection.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/KeepAliveConnection.scala new file mode 100644 index 0000000000..c8d946f72b --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/KeepAliveConnection.scala @@ -0,0 +1,171 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.either.* +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.resource.DbStorage.NoConnectionAvailable +import com.digitalasset.canton.util.LoggerUtil +import com.digitalasset.canton.util.ShowUtil.* +import com.typesafe.scalalogging.Logger +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.JdbcDataSource +import slick.util.AsyncExecutor + +import java.sql.{Array as _, *} +import java.util +import java.util.Properties +import java.util.concurrent.Executor +import java.util.concurrent.atomic.AtomicBoolean + +object KeepAliveConnection { + + implicit val pretty: Pretty[KeepAliveConnection] = { + import com.digitalasset.canton.logging.pretty.PrettyUtil.* + + implicit val prettyConnection: Pretty[Connection] = prettyOfString(_.toString) + + prettyOfClass(unnamedParam(_.underlying)) + } + + /** Single threaded database. + * + * Should only be used for low-volume workloads. + */ + def createDatabaseFromConnection( + connection: KeepAliveConnection, + logger: Logger, + asyncExecutor: AsyncExecutor, + ): Database = + Database.forSource( + new JdbcDataSource { + def createConnection(): Connection = + if (connection.markInUse()) + connection + else + throw NoConnectionAvailable() + def close(): Unit = { + import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* + connection.closeUnderlying(Level.WARN)( + ErrorLoggingContext.fromTracedLogger(TracedLogger(logger)) + ) + } + val maxConnections: Option[Int] = Some(1) + }, + asyncExecutor, + ) + +} + +/** Connection wrapper to prevent closing of the connection. */ +class KeepAliveConnection(conn: Connection) extends Connection { + + private[resource] val inUse: AtomicBoolean = new AtomicBoolean(false) + + private[resource] def markInUse(): Boolean = inUse.compareAndSet(false, true) + + private[resource] def markFree(): Unit = inUse.compareAndSet(true, false).discard + + def close(): Unit = + // Only mark the connection as free, but not closing the actual connection + markFree().discard + + private[resource] def underlying: Connection = conn + + private[resource] def closeUnderlying( + logLevel: Level + )(implicit errorLoggingContext: ErrorLoggingContext): Unit = + Either + .catchOnly[SQLException](conn.close()) + .valueOr { err => + LoggerUtil.logAtLevel(logLevel, show"Failed to close connection: $err")(errorLoggingContext) + } + // Delegated methods below + def createStatement(): Statement = conn.createStatement() + def setAutoCommit(autoCommit: Boolean): Unit = conn.setAutoCommit(autoCommit) + def setHoldability(holdability: Int): Unit = conn.setHoldability(holdability) + def clearWarnings(): Unit = conn.clearWarnings() + def getNetworkTimeout: Int = conn.getNetworkTimeout + def createBlob(): Blob = conn.createBlob() + def createSQLXML(): SQLXML = conn.createSQLXML() + def setSavepoint(): Savepoint = conn.setSavepoint() + def setSavepoint(name: String): Savepoint = conn.setSavepoint(name) + def createNClob(): NClob = conn.createNClob() + def getTransactionIsolation: Int = conn.getTransactionIsolation + def getClientInfo(name: String): String = conn.getClientInfo(name) + def getClientInfo: Properties = conn.getClientInfo + def getSchema: String = conn.getSchema + def setNetworkTimeout(executor: Executor, milliseconds: Int): Unit = + conn.setNetworkTimeout(executor, milliseconds) + def getMetaData: DatabaseMetaData = conn.getMetaData + def getTypeMap: util.Map[String, Class[_]] = conn.getTypeMap + def rollback(): Unit = conn.rollback() + def rollback(savepoint: Savepoint): Unit = conn.rollback(savepoint) + def createStatement(resultSetType: Int, resultSetConcurrency: Int): Statement = + conn.createStatement(resultSetType, resultSetConcurrency) + def createStatement( + resultSetType: Int, + resultSetConcurrency: Int, + resultSetHoldability: Int, + ): Statement = + conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability) + def getHoldability: Int = conn.getHoldability + def setReadOnly(readOnly: Boolean): Unit = conn.setReadOnly(readOnly) + def setClientInfo(name: String, value: String): Unit = conn.setClientInfo(name, value) + def setClientInfo(properties: Properties): Unit = conn.setClientInfo(properties) + def isReadOnly: Boolean = conn.isReadOnly + def setTypeMap(map: util.Map[String, Class[_]]): Unit = conn.setTypeMap(map) + def getCatalog: String = conn.getCatalog + def createClob(): Clob = conn.createClob + def setTransactionIsolation(level: Int): Unit = conn.setTransactionIsolation(level) + def nativeSQL(sql: String): String = conn.nativeSQL(sql) + def prepareCall(sql: String): CallableStatement = conn.prepareCall(sql) + def prepareCall(sql: String, resultSetType: Int, resultSetConcurrency: Int): CallableStatement = + conn.prepareCall(sql, resultSetType, resultSetConcurrency) + def prepareCall( + sql: String, + resultSetType: Int, + resultSetConcurrency: Int, + resultSetHoldability: Int, + ): CallableStatement = + conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability) + def createArrayOf(typeName: String, elements: Array[AnyRef]): java.sql.Array = + conn.createArrayOf(typeName, elements) + def setCatalog(catalog: String): Unit = conn.setCatalog(catalog) + def getAutoCommit: Boolean = conn.getAutoCommit + def abort(executor: Executor): Unit = conn.abort(executor) + def isValid(timeout: Int): Boolean = conn.isValid(timeout) + def prepareStatement(sql: String): PreparedStatement = conn.prepareStatement(sql) + def prepareStatement( + sql: String, + resultSetType: Int, + resultSetConcurrency: Int, + ): PreparedStatement = + conn.prepareStatement(sql, resultSetType, resultSetConcurrency) + def prepareStatement( + sql: String, + resultSetType: Int, + resultSetConcurrency: Int, + resultSetHoldability: Int, + ): PreparedStatement = + conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability) + def prepareStatement(sql: String, autoGeneratedKeys: Int): PreparedStatement = + conn.prepareStatement(sql, autoGeneratedKeys) + def prepareStatement(sql: String, columnIndexes: Array[Int]): PreparedStatement = + conn.prepareStatement(sql, columnIndexes) + def prepareStatement(sql: String, columnNames: Array[String]): PreparedStatement = + conn.prepareStatement(sql, columnNames) + def releaseSavepoint(savepoint: Savepoint): Unit = conn.releaseSavepoint(savepoint) + def isClosed: Boolean = conn.isClosed + def createStruct(typeName: String, attributes: Array[AnyRef]): Struct = + conn.createStruct(typeName, attributes) + def getWarnings: SQLWarning = conn.getWarnings + def setSchema(schema: String): Unit = conn.setSchema(schema) + def commit(): Unit = conn.commit() + def unwrap[T](iface: Class[T]): T = conn.unwrap[T](iface) + def isWrapperFor(iface: Class[_]): Boolean = conn.isWrapperFor(iface) +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StateMachine.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StateMachine.scala new file mode 100644 index 0000000000..30bc1c2127 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StateMachine.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.either.* +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherUtil.RichEither +import com.digitalasset.canton.util.ErrorUtil + +import java.util.concurrent.atomic.AtomicReference +import scala.reflect.ClassTag + +trait StateMachine[S] { self: NamedLogging => + + protected val stateRef: AtomicReference[S] + + // Like `transition` but supports 2 expected states instead of one + // The use of Either here is not right-biased and is just a wrapper for 2 possible states + protected def transitionEither[E, T <: S, U <: S]( + getExpectedState: S => Option[Either[T, U]], + newState: S, + unexpectedStateFn: S => E, + ): Either[E, Either[T, U]] = { + val prevState = stateRef.getAndUpdate { currentState => + if (getExpectedState(currentState).isDefined) + newState + else currentState + } + + getExpectedState(prevState).toRight(unexpectedStateFn(prevState)) + } + + protected def transition[E, T <: S]( + getExpectedState: S => Option[T], + newState: S, + unexpectedStateFn: S => E, + ): Either[E, T] = { + val prevState = stateRef.getAndUpdate { currentState => + if (getExpectedState(currentState).isDefined) + newState + else currentState + } + + getExpectedState(prevState).toRight(unexpectedStateFn(prevState)) + } + + protected def transition[E]( + expectedState: S, + newState: S, + error: S => E, + ): Either[E, Unit] = + transition(s => Some(s).filter(_ == expectedState), newState, error).map(_ => ()) + + protected def transitionOrFail[T <: S]( + expectedState: String, + getExpectedState: S => Option[T], + newState: S, + )(implicit traceContext: TraceContext): T = + transition( + getExpectedState, + newState, + errorState => + s"Failed to transition from $expectedState to $newState: current state is $errorState", + ).tapRight(_ => logger.debug(s"Transitioned from $expectedState to $newState")) + .valueOr(ErrorUtil.invalidState(_)) + + protected def transitionOrFail(expectedState: S, newState: S)(implicit + traceContext: TraceContext + ): Unit = + transition( + expectedState, + newState, + errorState => + ErrorUtil.internalError( + new IllegalStateException( + s"Failed to transition from $expectedState to $newState: current state is $errorState" + ) + ), + ).discard + + // Match on the expected state type but not the value + protected def transitionOrFail[S1 <: S: ClassTag](newState: S)(implicit + traceContext: TraceContext + ): Unit = { + val prevState = stateRef.getAndUpdate { + case _: S1 => newState + case currentState => currentState + } + + prevState match { + case _: S1 => () + case unexpectedState => + ErrorUtil.internalError( + new IllegalStateException( + s"Failed to transition to $newState: current state is $unexpectedState" + ) + ) + } + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala index a9e31d10a4..362a876c19 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -30,10 +30,8 @@ import com.digitalasset.canton.metrics.{DbQueueMetrics, DbStorageMetrics} import com.digitalasset.canton.protocol.{LfContractId, LfGlobalKey, LfHash} import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile} -import com.digitalasset.canton.resource.StorageFactory.StorageCreationException import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.store.db.{DbDeserializationException, DbSerializationException} -import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.* import com.digitalasset.canton.util.ShowUtil.* @@ -81,87 +79,6 @@ sealed trait Storage extends CloseableHealthComponent with AtomicHealthComponent } -trait StorageFactory { - def config: StorageConfig - - /** Throws an exception in case of errors or shutdown during storage creation. */ - def tryCreate( - connectionPoolForParticipant: Boolean, - logQueryCost: Option[QueryCostMonitoringConfig], - clock: Clock, - scheduler: Option[ScheduledExecutorService], - metrics: DbStorageMetrics, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - ): Storage = - create( - connectionPoolForParticipant, - logQueryCost, - clock, - scheduler, - metrics, - timeouts, - loggerFactory, - ) - .valueOr(err => throw new StorageCreationException(err)) - .onShutdown(throw new StorageCreationException("Shutdown during storage creation")) - - def create( - connectionPoolForParticipant: Boolean, - logQueryCost: Option[QueryCostMonitoringConfig], - clock: Clock, - scheduler: Option[ScheduledExecutorService], - metrics: DbStorageMetrics, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - ): EitherT[UnlessShutdown, String, Storage] -} - -object StorageFactory { - class StorageCreationException(message: String) extends RuntimeException(message) -} - -class CommunityStorageFactory(val config: StorageConfig) extends StorageFactory { - override def create( - connectionPoolForParticipant: Boolean, - logQueryCost: Option[QueryCostMonitoringConfig], - clock: Clock, - scheduler: Option[ScheduledExecutorService], - metrics: DbStorageMetrics, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - ): EitherT[UnlessShutdown, String, Storage] = - config match { - case StorageConfig.Memory(_, _) => - EitherT.rightT(new MemoryStorage(loggerFactory, timeouts)) - case db: DbConfig => - DbStorageSingle - .create( - db, - connectionPoolForParticipant, - logQueryCost, - clock, - scheduler, - metrics, - timeouts, - loggerFactory, - ) - .widen[Storage] - } -} - final class MemoryStorage( override val loggerFactory: NamedLoggerFactory, override val timeouts: ProcessingTimeout, @@ -610,7 +527,7 @@ object DbStorage { loggerFactory: NamedLoggerFactory )(implicit closeContext: CloseContext): EitherT[UnlessShutdown, String, Database] = { val baseLogger = loggerFactory.getLogger(classOf[DbStorage]) - val logger = TracedLogger(baseLogger) + implicit val logger = TracedLogger(baseLogger) TraceContext.withNewTraceContext("create_db") { implicit traceContext => // Must be called to set proper defaults in case of H2 @@ -652,27 +569,32 @@ object DbStorage { s"Initializing database storage with config: ${DbConfig.hideConfidential(configWithMigrationFallbacks)}" ) - RetryEither.retry[String, Database]( - maxRetries = retryConfig.maxRetries, - waitInMs = retryConfig.retryWaitingTime.toMillis, - operationName = functionFullName, - retryLogLevel = retryConfig.retryLogLevel, - failLogLevel = Level.ERROR, - ) { - for { - db <- createJdbcBackendDatabase( - configWithMigrationFallbacks, - metrics, - logQueryCost, - scheduler, - config.parameters, - baseLogger, - ) - _ <- Either - .catchOnly[SQLException](db.createSession().close()) - .leftMap(err => show"Failed to create session with database: $err") - } yield db - }(ErrorLoggingContext.fromTracedLogger(logger), closeContext) + RetryEither + .retry[DatabaseCreationFailed, Database]( + maxRetries = retryConfig.maxRetries, + waitInMs = retryConfig.retryWaitingTime.toMillis, + operationName = functionFullName, + stopOnLeft = Some(_.isFatal), + retryLogLevel = retryConfig.retryLogLevel, + failLogLevel = Level.ERROR, + ) { + for { + db <- createJdbcBackendDatabase( + configWithMigrationFallbacks, + metrics, + logQueryCost, + scheduler, + config.parameters, + baseLogger, + ) + _ <- Either + .catchOnly[SQLException](db.createSession().close()) + .leftMap(err => + DatabaseCreationFailed(show"Failed to create session with database: $err", err) + ) + } yield db + }(ErrorLoggingContext.fromTracedLogger(logger), closeContext) + .leftMap(_.message) } } @@ -684,7 +606,10 @@ object DbStorage { scheduler: Option[ScheduledExecutorService], parameters: DbParametersConfig, logger: Logger, - ): Either[String, Database] = { + )(implicit + tracedLogger: TracedLogger, + traceContext: TraceContext, + ): Either[DatabaseCreationFailed, Database] = { // copy paste from JdbcBackend.forConfig import slick.util.ConfigExtensionMethods.* try { @@ -743,10 +668,21 @@ object DbStorage { Right(JdbcBackend.Database.forSource(source, executor)) } catch { - case ex: SlickException => Left(show"Failed to setup database access: $ex") - case ex: PoolInitializationException => Left(show"Failed to connect to database: $ex") + case ex: SlickException => + Left(DatabaseCreationFailed(show"Failed to setup database access: $ex", ex.getCause)) + case ex: PoolInitializationException => + Left(DatabaseCreationFailed(show"Failed to connect to database: $ex", ex.getCause)) } + } + + private final case class DatabaseCreationFailed(message: String, ex: Throwable)(implicit + logger: TracedLogger, + traceContext: TraceContext, + ) { + import com.digitalasset.canton.util.retry.ErrorKind + val isFatal = + DbExceptionRetryPolicy.determineExceptionErrorKind(ex, logger) == ErrorKind.FatalErrorKind } /** Construct a bulk operation (e.g., insertion, deletion). The operation must not return a result diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageFactory.scala new file mode 100644 index 0000000000..c17fae2b56 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageFactory.scala @@ -0,0 +1,63 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import com.digitalasset.canton.config.* +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.DbStorageMetrics +import com.digitalasset.canton.resource.StorageFactory.StorageCreationException +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.ExecutionContext + +trait StorageFactory { + + /** Throws an exception in case of errors or shutdown during storage creation. */ + def tryCreate( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): Storage = + create( + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + ) + .valueOr(err => throw new StorageCreationException(err)) + .onShutdown(throw new StorageCreationException("Shutdown during storage creation")) + + def create( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, Storage] +} + +object StorageFactory { + class StorageCreationException(message: String) extends RuntimeException(message) +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageMultiFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageMultiFactory.scala new file mode 100644 index 0000000000..b252ac8a31 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageMultiFactory.scala @@ -0,0 +1,103 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.functor.* +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.* +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.DbStorageMetrics +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.ExecutionContext + +class StorageMultiFactory( + val config: StorageConfig, + exitOnFatalFailures: Boolean, + replicationConfig: Option[ReplicationConfig], + onActive: () => FutureUnlessShutdown[Unit], + onPassive: () => FutureUnlessShutdown[Option[CloseContext]], + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + futureSupervisor: FutureSupervisor, + override protected val loggerFactory: NamedLoggerFactory, + initialSessionContext: Option[CloseContext] = None, +) extends StorageFactory + with NamedLogging { + + override def create( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, Storage] = + config match { + case StorageConfig.Memory(_, _) => + EitherT.rightT(new MemoryStorage(loggerFactory, timeouts)) + case dbConfig: DbConfig => + replicationConfig match { + case Some(replConfig) if replConfig.isEnabled => + val writePoolSize = + dbConfig.numWriteConnectionsCanton( + connectionPoolForParticipant, + withWriteConnectionPool = true, + withMainConnection = true, + ) + val readPoolSize = + dbConfig.numReadConnectionsCanton( + forParticipant = connectionPoolForParticipant, + withWriteConnectionPool = true, + withMainConnection = false, + ) + + DbStorageMulti + .create( + dbConfig, + replConfig.connectionPool, + readPoolSize, + writePoolSize, + mainLockCounter, + poolLockCounter, + onActive, + onPassive, + metrics, + logQueryCost, + None, + scheduler, + timeouts, + exitOnFatalFailures = exitOnFatalFailures, + futureSupervisor, + loggerFactory, + initialSessionContext, + ) + .widen[Storage] + case _ => + logger.info(s"Replication is disabled, using DbStorageSingle") + DbStorageSingle + .create( + dbConfig, + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + ) + .widen[Storage] + } + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSetup.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSetup.scala deleted file mode 100644 index ed77ee03bd..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSetup.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.resource - -import com.daml.metrics.api.noop.NoOpMetricsFactory -import com.daml.metrics.api.{HistogramInventory, MetricName, MetricsContext} -import com.digitalasset.canton.config.{ - DbConfig, - ProcessingTimeout, - QueryCostMonitoringConfig, - StorageConfig, -} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.CloseContext -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.metrics.{DbStorageHistograms, DbStorageMetrics} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.tracing.TraceContext - -import scala.concurrent.ExecutionContext - -trait StorageSetup { - - def tryCreateAndMigrateStorage( - storageConfig: StorageConfig, - logQueryCostConfig: Option[QueryCostMonitoringConfig], - clock: Clock, - processingTimeout: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - setMigrationsPath: StorageConfig => StorageConfig = identity, - )(implicit - executionContext: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - metricsContext: MetricsContext, - ): Storage -} - -object CommunityStorageSetup extends StorageSetup { - - override def tryCreateAndMigrateStorage( - storageConfig: StorageConfig, - logQueryCostConfig: Option[QueryCostMonitoringConfig], - clock: Clock, - processingTimeout: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - setMigrationsPath: StorageConfig => StorageConfig = identity, - )(implicit - executionContext: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - metricsContext: MetricsContext, - ): Storage = { - val storageConfigWithMigrations = setMigrationsPath(storageConfig) - storageConfigWithMigrations match { - case dbConfig: DbConfig => - migrateDb(new CommunityDbMigrations(dbConfig, false, loggerFactory)) - case _ => - // Not a DB storage (currently, only memory) => no need for migrations. - () - } - new CommunityStorageFactory(storageConfigWithMigrations) - .tryCreate( - connectionPoolForParticipant = false, - logQueryCostConfig, - clock, - scheduler = None, - metrics = createDbStorageMetrics(), - processingTimeout, - loggerFactory, - ) - } - - private[canton] def migrateDb(dbMigrations: DbMigrations): Unit = - dbMigrations - .migrateDatabase() - .value - .map { - case Left(error) => sys.error(s"Error with migration $error") - case Right(_) => () - } - .discard - - private[canton] def createDbStorageMetrics()(implicit - metricsContext: MetricsContext - ): DbStorageMetrics = - new DbStorageMetrics( - new DbStorageHistograms(MetricName("none"))(new HistogramInventory), - NoOpMetricsFactory, - ) -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleFactory.scala new file mode 100644 index 0000000000..c5f39a8b85 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleFactory.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.functor.* +import com.digitalasset.canton.config.* +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.DbStorageMetrics +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.ExecutionContext + +/** Storage factory for nodes with a single writer to the database. MUST NOT be used for replicated + * nodes, use [[StorageMultiFactory]] instead. + */ +class StorageSingleFactory( + val config: StorageConfig +) extends StorageFactory { + + def create( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, Storage] = + config match { + case StorageConfig.Memory(_, _) => + EitherT.rightT(new MemoryStorage(loggerFactory, timeouts)) + case dbConfig: DbConfig => + DbStorageSingle + .create( + dbConfig, + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + ) + .widen[Storage] + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleSetup.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleSetup.scala new file mode 100644 index 0000000000..9f4ed50768 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/StorageSingleSetup.scala @@ -0,0 +1,82 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import com.daml.metrics.api.noop.NoOpMetricsFactory +import com.daml.metrics.api.{HistogramInventory, MetricName, MetricsContext} +import com.digitalasset.canton.config.{ + DbConfig, + ProcessingTimeout, + QueryCostMonitoringConfig, + StorageConfig, +} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.{DbStorageHistograms, DbStorageMetrics} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.ExecutionContext + +/** Storage setup for nodes with a single writer to the database. MUST NOT be used for replicated + * nodes, use [[StorageMultiFactory]] instead. + */ +object StorageSingleSetup { + + private def migrateDb(dbMigrations: DbMigrations): Unit = + dbMigrations + .migrateDatabase() + .value + .map { + case Left(error) => sys.error(s"Error with migration $error") + case Right(_) => () + } + .discard + + private def createDbStorageMetrics()(implicit + metricsContext: MetricsContext + ): DbStorageMetrics = + new DbStorageMetrics( + new DbStorageHistograms(MetricName("none"))(new HistogramInventory), + NoOpMetricsFactory, + ) + + def tryCreateAndMigrateStorage( + storageConfig: StorageConfig, + logQueryCostConfig: Option[QueryCostMonitoringConfig], + clock: Clock, + processingTimeout: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + setMigrationsPath: StorageConfig => StorageConfig = identity, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + metricsContext: MetricsContext, + ): Storage = { + val storageConfigWithMigrations = setMigrationsPath(storageConfig) + storageConfigWithMigrations match { + case dbConfig: DbConfig => + migrateDb( + new DbMigrations(dbConfig, false, processingTimeout, loggerFactory) + ) + case _ => + // Not a DB storage (currently, only memory) => no need for migrations. + () + } + new StorageSingleFactory( + storageConfigWithMigrations + ) + .tryCreate( + connectionPoolForParticipant = false, + logQueryCostConfig, + clock, + scheduler = None, + metrics = createDbStorageMetrics(), + processingTimeout, + loggerFactory, + ) + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/resource/WithDbLock.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/WithDbLock.scala new file mode 100644 index 0000000000..0b89690b6f --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/resource/WithDbLock.scala @@ -0,0 +1,184 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.{DbConfig, DbLockedConnectionConfig, ProcessingTimeout} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} +import com.digitalasset.canton.resource.DbStorage.{DbLockSupport, Profile} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.retry +import slick.util.AsyncExecutorWithMetrics + +import scala.concurrent.ExecutionContext + +object WithDbLock { + sealed trait WithDbLockError extends Product with Serializable with PrettyPrinting + + object WithDbLockError { + final case class DatabaseConfigurationError(error: String) extends WithDbLockError { + override protected def pretty: Pretty[DatabaseConfigurationError] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + + final case class ConnectionError(error: DbLockedConnectionError) extends WithDbLockError { + override protected def pretty: Pretty[ConnectionError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class LockAcquisitionError(error: DbLockError) extends WithDbLockError { + override protected def pretty: Pretty[LockAcquisitionError] = prettyOfClass( + unnamedParam(_.error) + ) + } + + final case class OperationError[A: Pretty](error: A) extends WithDbLockError { + override protected def pretty: Pretty[OperationError[A]] = prettyOfClass( + unnamedParam(_.error) + ) + } + } + + /** Attempts to acquire an exclusive lock with the given `lockCounter` and will block until this + * occurs. The block will be executed once the lock is acquired and finally the lock as well as + * the connection holding the lock will be released. + * + * If the provided `storage` instance does not support creating DB provided application locks the + * block will be directly run with no synchronization. This is considered reasonable as this is + * mainly used for synchronizing multiple processes around a shared database, however when using + * in-memory or h2 stores this storage is unable or unlikely to be shared, and is likely the only + * process running the operation. + */ + def withDbLock[A: Pretty, B]( + lockName: String, + lockCounter: DbLockCounter, + timeouts: ProcessingTimeout, + dbConfig: DbConfig, + connectionConfig: DbLockedConnectionConfig, + profile: Profile, + futureSupervisor: FutureSupervisor, + clock: Clock, + loggerFactory: NamedLoggerFactory, + logLockOwnersOnLockAcquisitionAttempt: Boolean, + )(fn: => EitherT[FutureUnlessShutdown, A, B])(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, WithDbLockError, B] = { + val logger = loggerFactory.getLogger(WithDbLock.getClass) + val tracedLogger = TracedLogger(logger) + + def withDbDistributedLock( + dbProfile: Profile with DbLockSupport + ): EitherT[FutureUnlessShutdown, WithDbLockError, B] = for { + lockId <- DbLockId + .allocate(dbConfig, lockCounter, loggerFactory) + .leftMap(WithDbLockError.LockAcquisitionError.apply) + .toEitherT[FutureUnlessShutdown] + + ds <- DbLockedConnection + .createDataSource( + dbConfig.config, + 1, + connectionConfig.connectionTimeout.toInternal, + )(ErrorLoggingContext.fromTracedLogger(tracedLogger)) + .leftMap(WithDbLockError.DatabaseConfigurationError.apply) + .toEitherT[FutureUnlessShutdown] + + executor = AsyncExecutorWithMetrics.createSingleThreaded(lockName, logger) + + lockedConnection = DbLockedConnection.create( + dbProfile, + ds, + lockId, + DbLockMode.Exclusive, + connectionConfig, + isMainConnection = true, + timeouts, + exitOnFatalFailures = false, + clock, + loggerFactory + .append("connId", lockName) + .append("lockId", lockId.toString), + futureSupervisor, + executor, + logLockOwnersOnLockAcquisitionAttempt, + ) + + // Wait until the lock is acquired + _ <- DbLockedConnection + .awaitActive(lockedConnection, retry.Forever, 200, tracedLogger) + .leftMap[WithDbLockError] { err => + lockedConnection.close() + ds.close() + executor.close() + WithDbLockError.ConnectionError(err) + } + .mapK(FutureUnlessShutdown.liftK) + + result <- fn + .leftMap[WithDbLockError](WithDbLockError.OperationError(_)) + .thereafter { _ => + lockedConnection.close() + ds.close() + executor.close() + } + } yield { + result + } + + DbLock.isSupported(profile) match { + case Right(dbProfile) => withDbDistributedLock(dbProfile) + case Left(_unsupported) => + tracedLogger.debug( + s"Distributed locks for the configured Db storage profile ($profile) are not supported. Running [$lockName] without lock." + ) + fn.leftMap[WithDbLockError](WithDbLockError.OperationError(_)) + } + } + + def withDbLock[A: Pretty, B]( + lockName: String, + lockCounter: DbLockCounter, + timeouts: ProcessingTimeout, + storage: Storage, + clock: Clock, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + connectionConfig: DbLockedConnectionConfig, + logLockOwnersOnLockAcquisitionAttempt: Boolean, + )(fn: => EitherT[FutureUnlessShutdown, A, B])(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, WithDbLockError, B] = + storage match { + case dbStorage: DbStorage => + withDbLock( + lockName, + lockCounter, + timeouts, + dbStorage.dbConfig, + connectionConfig, + dbStorage.profile, + futureSupervisor, + clock, + loggerFactory, + logLockOwnersOnLockAcquisitionAttempt, + )(fn) + case other => + val logger = TracedLogger(WithDbLock.getClass, loggerFactory) + logger.debug( + s"Distributed locks for the configured storage ($other) are not supported. Running [$lockName] without lock." + ) + fn.leftMap[WithDbLockError](WithDbLockError.OperationError(_)) + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala index a06b84504e..8ffebe214b 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala @@ -69,7 +69,15 @@ object ConnectionX { customTrustCertificates: Option[ByteString], expectedSequencerIdO: Option[SequencerId], tracePropagation: TracingConfig.Propagation, - ) + ) extends PrettyPrinting { + override protected def pretty: Pretty[ConnectionXConfig] = prettyOfClass( + param("name", _.name.singleQuoted), + param("endpoint", _.endpoint.toURI(transportSecurity)), + param("transportSecurity", _.transportSecurity), + param("customTrustCertificates", _.customTrustCertificates.nonEmpty), + paramIfDefined("expectedSequencerId", _.expectedSequencerIdO), + ) + } class ConnectionXHealth( override val name: String, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionX.scala index 3966ab9f1f..2aa13f43ed 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionX.scala @@ -22,6 +22,7 @@ import com.digitalasset.canton.lifecycle.{ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.RetryPolicy import com.digitalasset.canton.sequencing.ConnectionX.{ConnectionXConfig, ConnectionXState} import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ ConnectionAttributes, @@ -259,10 +260,12 @@ class GrpcInternalSequencerConnectionX private[sequencing] ( logger.debug(s"Starting validation of $name") + // We are not retrying these calls, because the connection pool takes care of restarting connections when they fail, + // and in particular if they fail validation val resultET = for { apiName <- stub .getApiName( - retryPolicy = retryPolicy(retryOnUnavailable = true), + retryPolicy = RetryPolicy.noRetry, logPolicy = CantonGrpcUtil.SilentLogPolicy, ) .leftMap(SequencerConnectionXInternalError.StubError.apply) @@ -276,7 +279,7 @@ class GrpcInternalSequencerConnectionX private[sequencing] ( .performHandshake( clientProtocolVersions, minimumProtocolVersion, - retryPolicy = retryPolicy(retryOnUnavailable = true), + retryPolicy = RetryPolicy.noRetry, logPolicy = CantonGrpcUtil.SilentLogPolicy, ) .leftMap(SequencerConnectionXInternalError.StubError.apply) @@ -292,7 +295,7 @@ class GrpcInternalSequencerConnectionX private[sequencing] ( synchronizerAndSequencerIds <- stub .getSynchronizerAndSequencerIds( - retryPolicy = retryPolicy(retryOnUnavailable = true), + retryPolicy = RetryPolicy.noRetry, logPolicy = CantonGrpcUtil.SilentLogPolicy, ) .leftMap[SequencerConnectionXInternalError]( @@ -310,7 +313,7 @@ class GrpcInternalSequencerConnectionX private[sequencing] ( params <- stub .getStaticSynchronizerParameters( - retryPolicy = retryPolicy(retryOnUnavailable = true), + retryPolicy = RetryPolicy.noRetry, logPolicy = CantonGrpcUtil.SilentLogPolicy, ) .leftMap[SequencerConnectionXInternalError]( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionX.scala index 6189b17890..202aac50df 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionX.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.sequencing import cats.data.EitherT import cats.syntax.either.* import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -230,6 +231,11 @@ class GrpcSequencerConnectionX( ): EitherT[FutureUnlessShutdown, Status, Unit] = clientAuth.logout() + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + stub.getTime(timeout, retryPolicy = retryPolicy(retryOnUnavailable = false)).leftMap(_.toString) + override def downloadTopologyStateForInit( request: TopologyStateForInitRequest, timeout: Duration, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcUserSequencerConnectionXStub.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcUserSequencerConnectionXStub.scala index 6476183834..6a45863f6b 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcUserSequencerConnectionXStub.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/GrpcUserSequencerConnectionXStub.scala @@ -4,10 +4,11 @@ package com.digitalasset.canton.sequencing import cats.data.EitherT -import cats.implicits.catsSyntaxEither +import cats.implicits.{catsSyntaxEither, toTraverseOps} import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasRunOnClosing} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, GrpcError} @@ -116,7 +117,7 @@ class GrpcUserSequencerConnectionXStub( override def getTrafficStateForMember( request: GetTrafficStateForMemberRequest, timeout: Duration, - retryPolicy: GrpcError => Boolean = CantonGrpcUtil.RetryPolicy.noRetry, + retryPolicy: GrpcError => Boolean, logPolicy: CantonGrpcUtil.GrpcLogPolicy, )(implicit traceContext: TraceContext @@ -144,6 +145,36 @@ class GrpcUserSequencerConnectionXStub( ) } yield response + override def getTime( + timeout: Duration, + retryPolicy: GrpcError => Boolean, + logPolicy: CantonGrpcUtil.GrpcLogPolicy, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, SequencerConnectionXStubError, Option[CantonTimestamp]] = + for { + response <- connection + .sendRequest( + requestDescription = s"get-time", + stubFactory = sequencerSvcFactory, + retryPolicy = retryPolicy, + logPolicy = logPolicy, + timeout = timeout, + )(_.getTime(com.digitalasset.canton.sequencer.api.v30.GetTimeRequest())) + .leftMap[SequencerConnectionXStubError]( + SequencerConnectionXStubError.ConnectionError.apply + ) + timestampO <- EitherT.fromEither[FutureUnlessShutdown]( + response.sequencingTimestamp + .traverse(CantonTimestamp.fromProtoPrimitive) + .leftMap(err => + SequencerConnectionXStubError.DeserializationError( + err.message + ): SequencerConnectionXStubError + ) + ) + } yield timestampO + override def downloadTopologyStateForInit( request: TopologyStateForInitRequest, timeout: Duration, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionPoolDelays.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionPoolDelays.scala new file mode 100644 index 0000000000..9d968d4589 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionPoolDelays.scala @@ -0,0 +1,79 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.digitalasset.canton.admin.sequencer.v30 +import com.digitalasset.canton.config +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** Configures the various delays used by the sequencer connection pool. + * + * @param minRestartDelay + * Minimum duration after which a failed sequencer connection is restarted. + * @param maxRestartDelay + * Maximum duration after which a failed sequencer connection is restarted. + * @param subscriptionRequestDelay + * Delay between the attempts to obtain new sequencer connections for the sequencer subscription + * pool, when the current number of subscriptions is below `trustThreshold` + `livenessMargin`. + */ +final case class SequencerConnectionPoolDelays( + minRestartDelay: config.NonNegativeFiniteDuration, + maxRestartDelay: config.NonNegativeFiniteDuration, + subscriptionRequestDelay: config.NonNegativeFiniteDuration, +) extends PrettyPrinting { + private[sequencing] def toProtoV30: v30.SequencerConnectionPoolDelays = + v30.SequencerConnectionPoolDelays( + minRestartDelay = Some(minRestartDelay.toProtoPrimitive), + maxRestartDelay = Some(maxRestartDelay.toProtoPrimitive), + subscriptionRequestDelay = Some(subscriptionRequestDelay.toProtoPrimitive), + ) + + override protected def pretty: Pretty[SequencerConnectionPoolDelays] = prettyOfClass( + param("minRestartDelay", _.minRestartDelay), + param("maxRestartDelay", _.maxRestartDelay), + param("subscriptionRequestDelay", _.subscriptionRequestDelay), + ) +} + +object SequencerConnectionPoolDelays { + val default: SequencerConnectionPoolDelays = SequencerConnectionPoolDelays( + minRestartDelay = config.NonNegativeFiniteDuration.ofMillis(10), + maxRestartDelay = config.NonNegativeFiniteDuration.ofSeconds(10), + subscriptionRequestDelay = config.NonNegativeFiniteDuration.ofSeconds(1), + ) + + private[sequencing] def fromProtoV30( + proto: v30.SequencerConnectionPoolDelays + ): ParsingResult[SequencerConnectionPoolDelays] = { + val v30.SequencerConnectionPoolDelays( + minRestartDelayP, + maxRestartDelayP, + subscriptionRequestDelayP, + ) = proto + + for { + minRestartDelay <- ProtoConverter.parseRequired( + config.NonNegativeFiniteDuration.fromProtoPrimitive("min_restart_delay"), + "min_restart_delay", + minRestartDelayP, + ) + maxRestartDelay <- ProtoConverter.parseRequired( + config.NonNegativeFiniteDuration.fromProtoPrimitive("max_restart_delay"), + "min_restart_delay", + maxRestartDelayP, + ) + subscriptionRequestDelay <- ProtoConverter.parseRequired( + config.NonNegativeFiniteDuration.fromProtoPrimitive("subscription_request_delay"), + "subscription_request_delay", + subscriptionRequestDelayP, + ) + } yield SequencerConnectionPoolDelays( + minRestartDelay, + maxRestartDelay, + subscriptionRequestDelay, + ) + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionX.scala index 27103b8b54..6721f546ea 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionX.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.sequencing import cats.data.EitherT +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.logging.NamedLogging import com.digitalasset.canton.sequencing.ConnectionX.ConnectionXConfig @@ -67,6 +68,11 @@ trait SequencerConnectionX extends FlagCloseable with NamedLogging { def logout()(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, Status, Unit] + /** Fetches the "current" sequencing time */ + def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] + def downloadTopologyStateForInit(request: TopologyStateForInitRequest, timeout: Duration)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, TopologyStateForInitResponse] diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala index 8fff81cbb6..b9cf5839bd 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.lifecycle.{ HasRunOnClosing, OnShutdownRunner, } +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.protocol.StaticSynchronizerParameters @@ -155,8 +156,10 @@ object SequencerConnectionXPool { * for the pool to initialize and start serving connections. After initialization, if the * number of connections in the pool goes below the threshold, the pool's health will * transition to `degraded` (or `failed` if it reaches 0). - * @param restartConnectionDelay - * The duration after which a failed connection is restarted. + * @param minRestartConnectionDelay + * The minimum duration after which a failed connection is restarted. + * @param maxRestartConnectionDelay + * The maximum duration after which a failed connection is restarted. * @param expectedPSIdO * If provided, defines the synchronizer to which the connections are expected to connect. If * empty, the synchronizer will be determined as soon as [[trustThreshold]]-many connections @@ -165,13 +168,21 @@ object SequencerConnectionXPool { final case class SequencerConnectionXPoolConfig( connections: NonEmpty[Seq[ConnectionXConfig]], trustThreshold: PositiveInt, - restartConnectionDelay: config.NonNegativeFiniteDuration = - config.NonNegativeFiniteDuration.ofMillis(500), + minRestartConnectionDelay: config.NonNegativeFiniteDuration, + maxRestartConnectionDelay: config.NonNegativeFiniteDuration, expectedPSIdO: Option[PhysicalSynchronizerId] = None, - ) { + ) extends PrettyPrinting { // TODO(i24780): when persisting, use com.digitalasset.canton.version.Invariant machinery for validation import SequencerConnectionXPoolConfig.* + override protected def pretty: Pretty[SequencerConnectionXPoolConfig] = prettyOfClass( + param("connections", _.connections), + param("trustThreshold", _.trustThreshold), + param("minRestartConnectionDelay", _.minRestartConnectionDelay), + param("maxRestartConnectionDelay", _.maxRestartConnectionDelay), + paramIfDefined("expectedPSIdO", _.expectedPSIdO), + ) + def validate: Either[SequencerConnectionXPoolError, Unit] = { val (names, endpoints) = connections.map(conn => conn.name -> conn.endpoint).unzip @@ -195,6 +206,11 @@ object SequencerConnectionXPool { (), s"Trust threshold ($trustThreshold) must not exceed the number of connections (${connections.size})", ) + _ <- Either.cond( + minRestartConnectionDelay.duration <= maxRestartConnectionDelay.duration, + (), + s"Minimum restart connection delay ($minRestartConnectionDelay) must not exceed the maximum ($maxRestartConnectionDelay)", + ) } yield () check.leftMap(SequencerConnectionXPoolError.InvalidConfigurationError.apply) @@ -217,7 +233,12 @@ object SequencerConnectionXPool { private[sequencing] final case class ChangedConnections( added: Set[ConnectionXConfig], removed: Set[ConnectionXConfig], - ) + ) extends PrettyPrinting { + override protected def pretty: Pretty[ChangedConnections] = prettyOfClass( + param("added", _.added), + param("removed", _.removed), + ) + } /** Create a sequencer connection pool configuration from the existing format. * @@ -261,6 +282,10 @@ object SequencerConnectionXPool { connectionsConfig, trustThreshold = sequencerConnections.sequencerTrustThreshold, expectedPSIdO = expectedPSIdO, + minRestartConnectionDelay = + sequencerConnections.sequencerConnectionPoolDelays.minRestartDelay, + maxRestartConnectionDelay = + sequencerConnections.sequencerConnectionPoolDelays.maxRestartDelay, ) } @@ -302,7 +327,8 @@ trait SequencerConnectionXPoolFactory { import SequencerConnectionXPool.{SequencerConnectionXPoolConfig, SequencerConnectionXPoolError} def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -314,6 +340,7 @@ trait SequencerConnectionXPoolFactory { sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala index 4c5761224c..ba8103a1b2 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala @@ -8,7 +8,7 @@ import cats.syntax.either.* import cats.syntax.functorFilter.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.checked +import com.digitalasset.canton import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} @@ -29,12 +29,13 @@ import com.digitalasset.canton.sequencing.SequencerConnectionXPool.{ SequencerConnectionXPoolHealth, } import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenManagerConfig -import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.time.{Clock, WallClock} import com.digitalasset.canton.topology.{Member, PhysicalSynchronizerId, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.util.collection.SeqUtil import com.digitalasset.canton.util.{ErrorUtil, FutureUnlessShutdownUtil, LoggerUtil, SingleUseCell} import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{checked, config as cantonConfig} import com.google.common.annotations.VisibleForTesting import org.apache.pekko.stream.Materializer @@ -59,6 +60,11 @@ class SequencerConnectionXPoolImpl private[sequencing] ( import SequencerConnectionXPoolImpl.* + /** Use a wall clock for scheduling restart delays, so that the pool can make progress even in + * tests that use static time without explicitly advancing the time + */ + private val wallClock = new WallClock(timeouts, loggerFactory) + /** Reference to the currently active configuration */ private val configRef = new AtomicReference[SequencerConnectionXPoolConfig](initialConfig) @@ -143,7 +149,7 @@ class SequencerConnectionXPoolImpl private[sequencing] ( } FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( - clock.scheduleAfter(_ => signalTimeout(), initializationTimeout.asJavaApproximation), + wallClock.scheduleAfter(_ => signalTimeout(), initializationTimeout.asJavaApproximation), s"connection-pool-initialization-timeout", ) } @@ -221,25 +227,54 @@ class SequencerConnectionXPoolImpl private[sequencing] ( SequencerConnectionXPoolImpl.this.loggerFactory .append("connection", s"${connection.config.name}") - private val restartScheduledRef: AtomicBoolean = new AtomicBoolean(false) + /** @param scheduled + * Indicates that a restart has already been scheduled for this connection + * @param delay + * Current restart delay. It grows exponentially at every restart, and is reset to the + * minimum when a connection fails after having been validated. + */ + private case class RestartData( + scheduled: Boolean, + delay: cantonConfig.NonNegativeFiniteDuration, + ) + + private val restartDataRef = new AtomicReference[RestartData]( + RestartData(scheduled = false, delay = config.minRestartConnectionDelay) + ) + + private def resetRestartDelay(): Unit = restartDataRef.getAndUpdate { + _.copy(delay = config.minRestartConnectionDelay) + }.discard + + private def scheduleRestart()(implicit traceContext: TraceContext): Unit = { + val RestartData(restartAlreadyScheduled, delay) = restartDataRef.getAndUpdate { + case RestartData(false, delay) => + RestartData( + scheduled = true, + // Exponentially backoff the restart delay, bounded by the max + delay = canton.config.NonNegativeFiniteDuration.tryFromDuration( + (delay.duration * 2).min(config.maxRestartConnectionDelay.duration) + ), + ) + case other => other + } - private def scheduleRestart()(implicit traceContext: TraceContext): Unit = - if (restartScheduledRef.getAndSet(true)) + if (restartAlreadyScheduled) logger.debug("Restart already scheduled -- ignoring") else { - val delay = config.restartConnectionDelay - logger.debug( + logger.info( s"Scheduling restart after ${LoggerUtil.roundDurationForHumans(delay.duration)}" ) FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( - clock.scheduleAfter(_ => restart(), delay.asJava), + wallClock.scheduleAfter(_ => restart(), delay.asJava), s"restart-connection-${connection.name}", ) } + } private def restart()(implicit traceContext: TraceContext): Unit = if (!isClosing) { - if (restartScheduledRef.getAndSet(false)) { + if (restartDataRef.getAndUpdate(_.copy(scheduled = false)).scheduled) { logger.debug("Restarting") startConnection() } @@ -269,7 +304,7 @@ class SequencerConnectionXPoolImpl private[sequencing] ( case SequencerConnectionXState.Validated => processValidatedConnection(connection) case SequencerConnectionXState.Stopped => - removeConnectionFromPool(connection) + removeConnectionFromPool(connection, actionIfPresent = resetRestartDelay()) if (!isClosing) scheduleRestart() // For any other state, ensure the connection is not in the pool @@ -278,7 +313,7 @@ class SequencerConnectionXPoolImpl private[sequencing] ( SequencerConnectionXState.Started | SequencerConnectionXState.Stopping => if (state == SequencerConnectionXState.Fatal) checkIfThresholdIsStillReachable(config.trustThreshold) - removeConnectionFromPool(connection) + removeConnectionFromPool(connection, actionIfPresent = resetRestartDelay()) } } }) @@ -342,10 +377,14 @@ class SequencerConnectionXPoolImpl private[sequencing] ( } } yield { configRef.set(newConfig) + logger.info(s"Configuration updated to: $newConfig") // If the trust threshold is now reached, process it bootstrapIfThresholdReachedO.foreach(initializePool) + logger.debug( + s"Configuration update triggers the following connection changes: $changedConnections" + ) updateTrackedConnections( toBeAdded = changedConnections.added, toBeRemoved = changedConnections.removed, @@ -563,8 +602,13 @@ class SequencerConnectionXPoolImpl private[sequencing] ( } } + /** @param actionIfPresent + * An action to perform if the connection is effectively present in the pool (which is not + * always the case due to concurrent activity) + */ private def removeConnectionFromPool( - connection: InternalSequencerConnectionX + connection: InternalSequencerConnectionX, + actionIfPresent: => Unit = (), )(implicit traceContext: TraceContext): Unit = blocking { lock.synchronized { connection.attributes match { @@ -576,6 +620,7 @@ class SequencerConnectionXPoolImpl private[sequencing] ( // Match on config case Some(current) if current.exists(_.config == connection.config) => logger.debug(s"Removing $connection from the pool") + actionIfPresent val newList = current.filter(_.config != connection.config) NonEmpty.from(newList) case None => None @@ -684,18 +729,21 @@ class GrpcSequencerConnectionXPoolFactory( import SequencerConnectionXPool.{SequencerConnectionXPoolConfig, SequencerConnectionXPoolError} override def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, materializer: Materializer, ): Either[SequencerConnectionXPoolError, SequencerConnectionXPool] = { + val loggerWithPoolName = loggerFactory.append("pool", name) + val connectionFactory = new GrpcInternalSequencerConnectionXFactory( clientProtocolVersions, minimumProtocolVersion, futureSupervisor, timeouts, - loggerFactory, + loggerWithPoolName, ) for { @@ -711,7 +759,7 @@ class GrpcSequencerConnectionXPoolFactory( seedForRandomnessO, futureSupervisor, timeouts, - loggerFactory, + loggerWithPoolName, ) } } @@ -720,6 +768,7 @@ class GrpcSequencerConnectionXPoolFactory( sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -733,6 +782,6 @@ class GrpcSequencerConnectionXPoolFactory( ) logger.debug(s"poolConfig = $poolConfig") - create(poolConfig) + create(poolConfig, name) } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala index a8e0b066ff..375b37d7dd 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala @@ -30,6 +30,7 @@ final case class SequencerConnections private ( sequencerTrustThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, ) extends HasVersionedWrapper[SequencerConnections] with PrettyPrinting { require( @@ -109,12 +110,16 @@ final case class SequencerConnections private ( s"Sequencer trust threshold $sequencerTrustThreshold cannot be greater than number of sequencer connections ${aliasToConnection.size}", ) + def withLivenessMargin(sequencerLivenessMargin: NonNegativeInt): SequencerConnections = + this.copy(sequencerLivenessMargin = sequencerLivenessMargin) + override protected def pretty: Pretty[SequencerConnections] = prettyOfClass( param("connections", _.aliasToConnection.forgetNE), param("sequencer trust threshold", _.sequencerTrustThreshold), param("sequencer liveness margin", _.sequencerLivenessMargin), param("submission request amplification", _.submissionRequestAmplification), + param("sequencer connection pool delays", _.sequencerConnectionPoolDelays), ) def toProtoV30: v30.SequencerConnections = @@ -123,6 +128,7 @@ final case class SequencerConnections private ( sequencerTrustThreshold.unwrap, Some(submissionRequestAmplification.toProtoV30), sequencerLivenessMargin.unwrap, + Some(sequencerConnectionPoolDelays.toProtoV30), ) @transient override protected lazy val companionObj @@ -142,6 +148,7 @@ object SequencerConnections sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) def many( @@ -149,6 +156,7 @@ object SequencerConnections sequencerTrustThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, ): Either[String, SequencerConnections] = { val repeatedAliases = connections.groupBy(_.sequencerAlias).filter { case (_, connections) => connections.lengthCompare(1) > 0 @@ -166,6 +174,7 @@ object SequencerConnections sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ) ) .leftMap(_.getMessage) @@ -177,12 +186,14 @@ object SequencerConnections sequencerTrustThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, ): SequencerConnections = many( NonEmptyUtil.fromUnsafe(connections), sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ).valueOr(err => throw new IllegalArgumentException(err)) def fromProtoV30( @@ -193,6 +204,7 @@ object SequencerConnections sequencerTrustThresholdP, submissionRequestAmplificationP, sequencerLivenessMarginP, + sequencerConnectionPoolDelaysP, ) = sequencerConnectionsProto for { sequencerTrustThreshold <- ProtoConverter.parsePositiveInt( @@ -213,6 +225,11 @@ object SequencerConnections "sequencer_connections", sequencerConnectionsP, ) + sequencerConnectionPoolDelays <- ProtoConverter.parseRequired( + SequencerConnectionPoolDelays.fromProtoV30, + "sequencer_connection_pool_delays", + sequencerConnectionPoolDelaysP, + ) _ <- Either.cond( sequencerConnectionsNes.map(_.sequencerAlias).toSet.sizeIs == sequencerConnectionsNes.size, (), @@ -226,6 +243,7 @@ object SequencerConnections sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ).leftMap(ProtoDeserializationError.InvariantViolation(field = None, _)) } yield sequencerConnections } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala index 164380a328..d96cec6ea6 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala @@ -4,12 +4,14 @@ package com.digitalasset.canton.sequencing import com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} import com.digitalasset.canton.lifecycle.{FlagCloseable, HasRunOnClosing} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.SequencerSubscriptionPoolConfig import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.SubscriptionStartProvider +import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.{SequencerClient, SequencerClientSubscriptionError} import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext @@ -62,21 +64,36 @@ trait SequencerSubscriptionPool extends FlagCloseable with NamedLogging { object SequencerSubscriptionPool { /** Subscription pool configuration - * @param trustThreshold - * Minimal number of subscriptions needed to satisfy the trust requirements. * @param livenessMargin * Number of extra subscriptions to maintain to ensure liveness. - * @param connectionRequestDelay + * @param subscriptionRequestDelay * Delay between the attempts to obtain new connections, when the current number of - * subscriptions is not [[trustThreshold]] + [[livenessMargin]]. + * subscriptions is not `trustThreshold` + [[livenessMargin]]. */ final case class SequencerSubscriptionPoolConfig( - trustThreshold: PositiveInt, livenessMargin: NonNegativeInt, - connectionRequestDelay: config.NonNegativeFiniteDuration = - config.NonNegativeFiniteDuration.ofSeconds(1), - ) { - lazy val activeThreshold: PositiveInt = trustThreshold + livenessMargin + subscriptionRequestDelay: config.NonNegativeFiniteDuration, + ) extends PrettyPrinting { + override protected def pretty: Pretty[SequencerSubscriptionPoolConfig] = prettyOfClass( + param("livenessMargin", _.livenessMargin), + param("subscriptionRequestDelay", _.subscriptionRequestDelay), + ) + } + + object SequencerSubscriptionPoolConfig { + + /** Create a sequencer subscription pool configuration from the existing format. + * + * TODO(i27260): remove when no longer needed + */ + def fromSequencerTransports( + sequencerTransports: SequencerTransports[?] + ): SequencerSubscriptionPoolConfig = + SequencerSubscriptionPoolConfig( + livenessMargin = sequencerTransports.sequencerLivenessMargin, + subscriptionRequestDelay = + sequencerTransports.sequencerConnectionPoolDelays.subscriptionRequestDelay, + ) } class SequencerSubscriptionPoolHealth( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala index d8b0afd52c..3b4b7ff91e 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala @@ -4,8 +4,9 @@ package com.digitalasset.canton.sequencing import cats.syntax.either.* +import com.digitalasset.canton.config as cantonConfig import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.health.HealthListener import com.digitalasset.canton.lifecycle.LifeCycle @@ -15,7 +16,10 @@ import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.{ SequencerSubscriptionPoolConfig, SequencerSubscriptionPoolHealth, } -import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.SubscriptionStartProvider +import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.{ + ConfigWithThreshold, + SubscriptionStartProvider, +} import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason.UnrecoverableError import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.{ ApplicationHandlerPassive, @@ -26,7 +30,7 @@ import com.digitalasset.canton.sequencing.client.{ SequencerClientSubscriptionError, SubscriptionCloseReason, } -import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.time.WallClock import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{ErrorUtil, FutureUnlessShutdownUtil, LoggerUtil} @@ -42,7 +46,6 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( subscriptionHandlerFactory: SubscriptionHandlerXFactory, pool: SequencerConnectionXPool, member: Member, - clock: Clock, private val initialSubscriptionEventO: Option[ProcessingSerializedEvent], subscriptionStartProvider: SubscriptionStartProvider, protected override val timeouts: ProcessingTimeout, @@ -50,6 +53,11 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( )(implicit ec: ExecutionContext) extends SequencerSubscriptionPool { + /** Use a wall clock for scheduling restart delays, so that the pool can make progress even in + * tests that use static time without explicitly advancing the time + */ + private val wallClock = new WallClock(timeouts, loggerFactory) + /** Reference to the currently active configuration */ private val configRef = new AtomicReference[SequencerSubscriptionPoolConfig](initialConfig) @@ -67,10 +75,17 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( override def config: SequencerSubscriptionPoolConfig = configRef.get + /** Use this instead of [[config]] to obtain a snapshot of all the current configuration + * parameters at once. + */ + private def currentConfigWithThreshold: ConfigWithThreshold = + ConfigWithThreshold(config, pool.config.trustThreshold) + override def updateConfig( newConfig: SequencerSubscriptionPoolConfig )(implicit traceContext: TraceContext): Unit = { configRef.set(newConfig) + logger.info(s"Configuration updated to: $newConfig") // We might need new connections adjustConnectionsIfNeeded() @@ -85,7 +100,7 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( /** Examine the current number of subscriptions in comparison to the configured trust threshold * with liveness margin. If we are under, we request additional connections, and if we can't * obtain enough, we reschedule the check later after - * [[SequencerSubscriptionPoolConfig.connectionRequestDelay]]. If we are over, we drop some + * [[SequencerSubscriptionPoolConfig.subscriptionRequestDelay]]. If we are over, we drop some * subscriptions. */ private def adjustConnectionsIfNeeded()(implicit traceContext: TraceContext): Unit = { @@ -95,8 +110,7 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( def adjustInternal(): Unit = blocking { lock.synchronized { if (!isClosing && currentRequest.get == myToken) { - val currentConfig = config - val activeThreshold = currentConfig.activeThreshold + val activeThreshold = currentConfigWithThreshold.activeThreshold val current = trackedSubscriptions.toSet logger.debug( @@ -146,8 +160,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( case Left(_) if nbToRequest < 0 => val toRemove = trackedSubscriptions.take(-nbToRequest) - logger.debug( - s"Dropping ${toRemove.size} subscriptions: ${toRemove.map(_.subscription.connection.name).mkString(", ")}" + logger.info( + s"Dropping ${toRemove.size} extra subscription(s): ${toRemove.map(_.subscription.connection.name).mkString(", ")}" ) removeSubscriptionsFromPool(toRemove.toSeq*) @@ -163,11 +177,11 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( if (!isClosing) { FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( { - val delay = config.connectionRequestDelay + val delay = config.subscriptionRequestDelay logger.debug( s"Scheduling new check in ${LoggerUtil.roundDurationForHumans(delay.duration)}" ) - clock.scheduleAfter(_ => adjustInternal(), delay.asJava) + wallClock.scheduleAfter(_ => adjustInternal(), delay.asJava) }, "adjustConnectionsIfNeeded", ) @@ -204,7 +218,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( def isThresholdStillReachable(ignoreCurrent: Boolean): Boolean = blocking(lock.synchronized { val ignored: Set[ConnectionX.ConnectionXConfig] = if (ignoreCurrent) Set(connection.config) else Set.empty - val result = pool.isThresholdStillReachable(config.trustThreshold, ignored) + val trustThreshold = currentConfigWithThreshold.trustThreshold + val result = pool.isThresholdStillReachable(trustThreshold, ignored) logger.debug(s"isThresholdStillReachable(ignored = $ignored) = $result") result }) @@ -303,7 +318,7 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( } private def updateHealth()(implicit traceContext: TraceContext): Unit = { - val currentConfig = config + val currentConfig = currentConfigWithThreshold trackedSubscriptions.size match { case nb if nb >= currentConfig.activeThreshold.unwrap => health.resolveUnhealthy() @@ -312,8 +327,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( s"below liveness margin: $nb subscription(s) available, trust threshold = ${currentConfig.trustThreshold}," + s" liveness margin = ${currentConfig.livenessMargin}" ) - case _ if !pool.isThresholdStillReachable(config.trustThreshold, Set.empty) => - val reason = s"Trust threshold ${config.trustThreshold} is no longer reachable" + case _ if !pool.isThresholdStillReachable(currentConfig.trustThreshold, Set.empty) => + val reason = s"Trust threshold ${currentConfig.trustThreshold} is no longer reachable" health.fatalOccurred(reason) closeReasonPromise.tryComplete(Success(UnrecoverableError(reason))).discard case nb => @@ -363,6 +378,15 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( } object SequencerSubscriptionPoolImpl { + private final case class ConfigWithThreshold( + private val poolConfig: SequencerSubscriptionPoolConfig, + trustThreshold: PositiveInt, + ) { + val livenessMargin: NonNegativeInt = poolConfig.livenessMargin + val subscriptionRequestDelay: cantonConfig.NonNegativeFiniteDuration = + poolConfig.subscriptionRequestDelay + lazy val activeThreshold: PositiveInt = trustThreshold + livenessMargin + } /** Trait for an object that can provide the starting event for a subscription */ @@ -377,7 +401,6 @@ object SequencerSubscriptionPoolImpl { class SequencerSubscriptionPoolFactoryImpl( sequencerSubscriptionFactory: SequencerSubscriptionXFactory, subscriptionHandlerFactory: SubscriptionHandlerXFactory, - clock: Clock, timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, ) extends SequencerSubscriptionPoolFactory @@ -397,7 +420,6 @@ class SequencerSubscriptionPoolFactoryImpl( subscriptionHandlerFactory, connectionPool, member, - clock, initialSubscriptionEventO, subscriptionStartProvider, timeouts, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionX.scala index a06629c3f7..74ebb34462 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionX.scala @@ -130,34 +130,48 @@ class SequencerSubscriptionX[HandlerError] private[sequencing] ( reason match { case Success(SubscriptionCloseReason.Closed) => logger.trace("Closing sequencer subscription") - connection.fatal("Normal closing") + // Normal closing of this subscription can be triggered either by: + // - closing of the subscription pool, which closed all the subscriptions; in this case, the connection pool + // will also be closed and will take care of the connections + // - failure of a connection, and the subscription pool closed the associated subscription; in this case, the + // connection will already be closed if need be + // We therefore don't need to explicitly close the connection. + case Success(SubscriptionCloseReason.Shutdown) => logger.info("Closing sequencer subscription due to an ongoing shutdown") - connection.fatal("Shutdown") + // If we reach here, it is due to a concurrent closing of the subscription (see previous case) and a subscription + // error. Again, we don't need to explicitly close the connection. + case Success(SubscriptionCloseReason.HandlerError(_: ApplicationHandlerShutdown.type)) => logger.info("Closing sequencer subscription due to handler shutdown") connection.fatal("Subscription handler shutdown") + case Success(SubscriptionCloseReason.HandlerError(exception: ApplicationHandlerException)) => logger.error( s"Permanently closing sequencer subscription due to handler exception (this indicates a bug): $exception" ) connection.fatal(exception.toString) + case Success(SubscriptionCloseReason.HandlerError(ApplicationHandlerPassive(reason))) => logger.info( s"Permanently closing sequencer subscription because instance became passive: $reason" ) connection.fatal("Instance became passive") + case Success(Fatal(reason)) if isClosing => logger.info( s"Permanently closing sequencer subscription after an error due to an ongoing shutdown: $reason" ) connection.fatal("Error during shutdown") + case Success(ex: HandlerException) => logger.error(s"Permanently closing sequencer subscription due to handler exception: $ex") connection.fatal(ex.toString) + case Success(error) => logger.warn(s"Permanently closing sequencer subscription due to error: $error") connection.fatal(error.toString) + case Failure(exception) => logger.error(s"Permanently closing sequencer subscription due to exception", exception) connection.fatal(exception.toString) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala index a6c32dd170..5d4c1545ec 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.{SequencerAlias, time} import java.util.concurrent.atomic.AtomicReference import scala.concurrent.ExecutionContext -class SubscriptionHandlerX( +class SubscriptionHandlerX private[sequencing] ( clock: Clock, metrics: SequencerClientMetrics, applicationHandlerFailure: SingleUseCell[ApplicationHandlerFailure], diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/UserSequencerConnectionXStub.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/UserSequencerConnectionXStub.scala index a971df44d0..e5faee39fc 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/UserSequencerConnectionXStub.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/UserSequencerConnectionXStub.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.sequencing import cats.data.EitherT +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLogging import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, GrpcError} @@ -63,6 +64,14 @@ trait UserSequencerConnectionXStub extends NamedLogging { GetTrafficStateForMemberResponse, ] + def getTime( + timeout: Duration, + retryPolicy: GrpcError => Boolean = CantonGrpcUtil.RetryPolicy.noRetry, + logPolicy: CantonGrpcUtil.GrpcLogPolicy = CantonGrpcUtil.DefaultGrpcLogPolicy, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, SequencerConnectionXStubError, Option[CantonTimestamp]] + def downloadTopologyStateForInit(request: TopologyStateForInitRequest, timeout: Duration)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SequencerConnectionXStubError, TopologyStateForInitResponse] @@ -74,4 +83,5 @@ trait UserSequencerConnectionXStub extends NamedLogging { )(implicit traceContext: TraceContext ): Either[SequencerConnectionXStubError, SequencerSubscription[E]] + } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala index 4297cedbb7..c4d954950a 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala @@ -291,6 +291,11 @@ object AuthenticationTokenProvider { logger: TracedLogger, )(implicit tc: TraceContext): ErrorKind = exception match { + case ex: StatusRuntimeException + if ex.getStatus.getCode == Status.Code.UNAVAILABLE && + ex.getMessage.contains("Channel shutdown invoked") => + FatalErrorKind + // Ideally we would like to retry only on retryable gRPC status codes (such as `UNAVAILABLE`), // but as this could be hard to get right, we compromise by retrying on all gRPC status codes, // and use a finite number of retries. diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala index 8bf8ee4510..39ec638ce7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala @@ -53,7 +53,6 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.PekkoUtil.WithKillSwitch import com.digitalasset.canton.util.PekkoUtil.syntax.* -import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import scala.concurrent.ExecutionContext @@ -272,7 +271,6 @@ object SequencedEventValidator extends HasLoggerName { topologyTimestamp: CantonTimestamp, sequencingTimestamp: CantonTimestamp, latestTopologyClientTimestamp: Option[CantonTimestamp], - protocolVersion: ProtocolVersion, warnIfApproximate: Boolean, getTolerance: DynamicSynchronizerParametersWithValidity => NonNegativeFiniteDuration, )(implicit @@ -289,7 +287,6 @@ object SequencedEventValidator extends HasLoggerName { // So a change of tolerance does not negatively impact pending requests. topologyTimestamp, latestTopologyClientTimestamp, - protocolVersion, warnIfApproximate, ) @@ -462,7 +459,7 @@ class SequencedEventValidatorImpl( // Otherwise, this is a fresh subscription and we will get the topology state with the first transaction // TODO(#4933) Upon a fresh subscription, retrieve the keys via the topology API and validate immediately or // validate the signature after processing the initial event - _ <- verifySignature(priorEventO, event, sequencerId, psid.protocolVersion) + _ <- verifySignature(priorEventO, event, sequencerId) _ = logger.debug("Successfully verified signature") } yield () } @@ -531,7 +528,6 @@ class SequencedEventValidatorImpl( Some(priorEvent), reconnectEvent, sequencerId, - psid.protocolVersion, ) } yield () // do not update the priorEvent because if it was ignored, then it was ignored for a reason. @@ -551,7 +547,6 @@ class SequencedEventValidatorImpl( priorEventO: Option[ProcessingSerializedEvent], event: SequencedSerializedEvent, sequencerId: SequencerId, - protocolVersion: ProtocolVersion, ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = { implicit val traceContext: TraceContext = event.traceContext if (event.previousTimestamp.isEmpty) { @@ -574,7 +569,6 @@ class SequencedEventValidatorImpl( signingTs, event.timestamp, lastTopologyClientTimestamp(priorEventO), - protocolVersion, warnIfApproximate = priorEventO.nonEmpty, _.sequencerTopologyTimestampTolerance, ) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala index 96e5dfd4a8..ea008a2f5e 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala @@ -51,13 +51,21 @@ import com.digitalasset.canton.sequencing.SequencerAggregatorPekko.{ HasSequencerSubscriptionFactoryPekko, SubscriptionControl, } +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolConfig import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.SequencerSubscriptionPoolConfig import com.digitalasset.canton.sequencing.client.PeriodicAcknowledgements.FetchCleanTimestamp import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError import com.digitalasset.canton.sequencing.client.SendCallback.CallbackFuture import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.PreviousTimestampMismatch import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports +import com.digitalasset.canton.sequencing.client.SequencerClientImpl.SequencerClientTimeSourcesPool import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.* +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeFetcher.TimeSourcesPool +import com.digitalasset.canton.sequencing.client.time.fetcher.{ + ExpiringInMemorySequencingTimeReadings, + OneCallAtATimeSourcesAccessor, + SequencingTimeFetcher, +} import com.digitalasset.canton.sequencing.client.transports.{ SequencerClientTransport, SequencerClientTransportCommon, @@ -88,7 +96,7 @@ import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.canton.util.TryUtil.* import com.digitalasset.canton.util.collection.IterableUtil import com.digitalasset.canton.util.retry.{AllExceptionRetryPolicy, NoExceptionRetryPolicy} -import com.digitalasset.canton.{SequencerAlias, SequencerCounter, time} +import com.digitalasset.canton.{SequencerAlias, SequencerCounter, config, time} import com.google.common.annotations.VisibleForTesting import io.grpc.Status import io.opentelemetry.api.trace.Tracer @@ -98,8 +106,10 @@ import org.apache.pekko.{Done, NotUsed} import org.slf4j.event.Level import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec import scala.concurrent.* import scala.concurrent.duration.* +import scala.jdk.DurationConverters.* import scala.util.{Failure, Success, Try} trait SequencerClient extends SequencerClientSend with FlagCloseable { @@ -182,6 +192,9 @@ trait SequencerClient extends SequencerClientSend with FlagCloseable { traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, GenericStoredTopologyTransactions] + /** Functionality to performing current sequencing time interrogations efficiently */ + def timeFetcher: SequencingTimeFetcher + /** For participant nodes, the predecessor synchronizer if any. */ protected def synchronizerPredecessor: Option[SynchronizerPredecessor] @@ -192,8 +205,9 @@ trait RichSequencerClient extends SequencerClient { def healthComponent: CloseableHealthComponent def changeTransport( - sequencerTransports: SequencerTransports[?] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] /** Future which is completed when the client is not functional any more and is ready to be * closed. The value with which the future is completed will indicate the reason for completion. @@ -239,8 +253,8 @@ abstract class SequencerClientImpl( with HasCloseContext { import SequencerClientImpl.LinkDetails - noTracingLogger.debug( - s"[$member] Using connection pool: ${config.useNewConnectionPool} for synchronizer $psid" + noTracingLogger.info( + s"[$member] Using ${if (config.useNewConnectionPool) "new connection pool" else "old transports"} for synchronizer $psid" ) override def logout()(implicit @@ -956,22 +970,124 @@ abstract class SequencerClientImpl( EitherT(resultFUS) } + override val timeFetcher = + new SequencingTimeFetcher( + new SequencerClientTimeSourcesPool( + config, + connectionPool, + sequencersTransportState, + ), + new OneCallAtATimeSourcesAccessor( + clock, + new ExpiringInMemorySequencingTimeReadings( + clock, + config.timeReadingsRetention.toInternal, + loggerFactory, + ), + loggerFactory, + ), + clock, + loggerFactory, + ) + protected val periodicAcknowledgementsRef = new AtomicReference[Option[PeriodicAcknowledgements]](None) } object SequencerClientImpl { + private final case class LinkDetails( sequencerAlias: SequencerAlias, sequencerId: SequencerId, transportOrPoolConnection: Either[SequencerClientTransportCommon, SequencerConnectionX], ) + private final case class AmplifiedSendState( previousSequencers: Seq[SequencerId], nextLinkDetailsO: Option[LinkDetails], - nextPatienceO: Option[NonNegativeFiniteDuration], + nextPatienceO: Option[config.NonNegativeFiniteDuration], isFirstStep: Boolean, ) + + private final class SequencerClientTimeSourcesPool( + sequencerClientConfig: SequencerClientConfig, + connectionXPool: SequencerConnectionXPool, + transportsState: SequencersTransportState, + )(implicit executionContext: ExecutionContext) + extends TimeSourcesPool { + + private type Transport = ( + SequencerAlias, + SequencerId, + SequencerClientTransportCommon, + Option[config.NonNegativeFiniteDuration], + ) + + override def readTrustThreshold(): PositiveInt = + if (sequencerClientConfig.useNewConnectionPool) + connectionXPool.config.trustThreshold + else transportsState.getSequencerTrustThreshold + + override def timeSources(count: PositiveInt, exclusions: Set[SequencerId])(implicit + traceContext: TraceContext + ): Seq[ + (SequencerId, time.PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]]) + ] = { + val sequencerIdToEitherTTimeSource = + if (sequencerClientConfig.useNewConnectionPool) + connectionXPool + .getConnections("SequencingTimeClient", count, exclusions) + .map { connection => + connection.attributes.sequencerId -> ((timeout: time.PositiveFiniteDuration) => + connection.getTime(timeout.duration.toScala) + ) + } + else + getTransports(exclusions, count).map { case (sequencerId, transport) => + sequencerId -> ((timeout: time.PositiveFiniteDuration) => + transport.getTime(timeout.duration.toScala) + ) + } + sequencerIdToEitherTTimeSource.view + .filterNot { case (sequencerId, _) => exclusions.contains(sequencerId) } + .map { case (sequencerId, lazyEitherTFUS) => + sequencerId -> ((timeout: time.PositiveFiniteDuration) => + lazyEitherTFUS(timeout).value.map(_.toOption.flatten) + ) + } + .toSeq + } + + private def getTransports(exclusions: Set[SequencerId], count: PositiveInt)(implicit + traceContext: TraceContext + ): Map[SequencerId, SequencerClientTransportCommon] = { + + @tailrec def go( + transportsAccum: Seq[Transport] = Seq.empty, + excludeSequencerIds: Set[SequencerId] = exclusions, + missing: Int = count.unwrap, + ): Seq[Transport] = + if (missing == 0) { + transportsAccum + } else { + val next @ (_, nextSequencerId, _, _) = + transportsState.nextAmplifiedTransport(excludeSequencerIds.toSeq) + if ( + transportsAccum.exists { case (_, sequencerId, _, _) => sequencerId == nextSequencerId } + ) + transportsAccum + else + go( + transportsAccum = next +: transportsAccum, + excludeSequencerIds = excludeSequencerIds ++ + transportsAccum.map { case (_, sequencerId, _, _) => sequencerId }.toSet, + missing = missing - 1, + ) + } + + go().map { case (_, sequencerId, transport, _) => sequencerId -> transport }.toMap + } + } } /** The sequencer client facilitates access to the individual synchronizer sequencer. A client @@ -1187,10 +1303,8 @@ class RichSequencerClientImpl( ) if (config.useNewConnectionPool) { - val subscriptionPoolConfig = SequencerSubscriptionPoolConfig( - trustThreshold = sequencerTransports.sequencerTrustThreshold, - livenessMargin = sequencerTransports.sequencerLivenessMargin, - ) + val subscriptionPoolConfig = + SequencerSubscriptionPoolConfig.fromSequencerTransports(sequencerTransports) val eventBatchProcessor = new EventBatchProcessor { override def process( eventBatch: Seq[SequencedSerializedEvent] @@ -1229,7 +1343,6 @@ class RichSequencerClientImpl( val sequencerSubscriptionPoolFactory = new SequencerSubscriptionPoolFactoryImpl( sequencerSubscriptionFactory, subscriptionHandlerFactory, - clock, timeouts, loggerFactory, ) @@ -1687,17 +1800,45 @@ class RichSequencerClientImpl( }(EitherT.leftT[FutureUnlessShutdown, Unit](_)) } - def changeTransport( - sequencerTransports: SequencerTransports[?] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - sequencerAggregator.changeMessageAggregationConfig( - MessageAggregationConfig( - sequencerTransports.expectedSequencersO, - sequencerTransports.sequencerTrustThreshold, + override def changeTransport( + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = + for { + _ <- + EitherT.fromEither[FutureUnlessShutdown](if (config.useNewConnectionPool) { + val newConnectionPoolConfig = newConnectionPoolConfigO.getOrElse( + ErrorUtil.invalidState( + "Connection pool enabled, yet connection pool config not provided" + ) + ) + + for { + _ <- connectionPool + .updateConfig(newConnectionPoolConfig) + .leftMap(error => s"Failed to update connection pool configuration: $error") + } yield { + sequencerSubscriptionPoolRef.get.foreach { subscriptionPool => + val newSubscriptionPoolConfig = + SequencerSubscriptionPoolConfig.fromSequencerTransports(sequencerTransports) + subscriptionPool.updateConfig(newSubscriptionPoolConfig) + } + } + } else Either.unit) + + _ = sequencerAggregator.changeMessageAggregationConfig( + MessageAggregationConfig( + sequencerTransports.expectedSequencersO, + sequencerTransports.sequencerTrustThreshold, + ) ) - ) - FutureUnlessShutdown.outcomeF(sequencersTransportState.changeTransport(sequencerTransports)) - } + + _ <- EitherT.right( + FutureUnlessShutdown.outcomeF( + sequencersTransportState.changeTransport(sequencerTransports) + ) + ) + } yield () private val subscriptionPoolCompletePromise = Promise[SequencerClient.CloseReason]() @@ -2244,6 +2385,7 @@ object SequencerClient { sequencerTrustThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, ) { def expectedSequencersO: Option[NonEmpty[Set[SequencerId]]] = sequencerToTransportMapO.map(_.map(_._2.sequencerId).toSet) @@ -2267,6 +2409,7 @@ object SequencerClient { sequencerSignatureThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, ): Either[String, SequencerTransports[E]] = sequencerTransportsMapO .zip(expectedSequencersO) @@ -2291,6 +2434,7 @@ object SequencerClient { sequencerTrustThreshold = sequencerSignatureThreshold, sequencerLivenessMargin = sequencerLivenessMargin, submissionRequestAmplification = submissionRequestAmplification, + sequencerConnectionPoolDelays = sequencerConnectionPoolDelays, ) ) @@ -2305,6 +2449,7 @@ object SequencerClient { sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, ) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala index 53284ef880..cac4bcd1b9 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala @@ -52,6 +52,11 @@ import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenMana * more stable and predictable system behavior. * @param useNewConnectionPool * Use the new sequencer connection pool instead of the former transports. + * @param timeReadingsRetention + * The duration for which sequencing time readings are retained. This setting depends on the + * assumptions about dynamic faults and ensures that faulty time readings skewed towards the + * future do not linger in the system for too long. Regular eviction also ensures that time + * readings from offboarded sequencers are not taken into account indefinitely. */ final case class SequencerClientConfig( eventInboxSize: PositiveInt = PositiveInt.tryCreate(100), @@ -70,6 +75,7 @@ final case class SequencerClientConfig( overrideMaxRequestSize: Option[NonNegativeInt] = None, maximumInFlightEventBatches: PositiveInt = PositiveInt.tryCreate(20), useNewConnectionPool: Boolean = false, + timeReadingsRetention: PositiveFiniteDuration = PositiveFiniteDuration.ofMinutes(5), ) extends UniformCantonConfigValidation object SequencerClientConfig { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala index dab535eb70..25acdc0ab3 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala @@ -210,6 +210,7 @@ object SequencerClientFactory { sequencerConnections.sequencerTrustThreshold, sequencerConnections.sequencerLivenessMargin, sequencerConnections.submissionRequestAmplification, + sequencerConnections.sequencerConnectionPoolDelays, ) ) // Reinitialize the sequencer counter allocator to ensure that passive->active replica transitions diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientFactory.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientFactory.scala index 04c16423f8..f51fd71d13 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientFactory.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientFactory.scala @@ -21,7 +21,7 @@ import com.digitalasset.canton.sequencing.{ } import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{Member, PhysicalSynchronizerId, SequencerId} -import com.digitalasset.canton.tracing.TracingConfig +import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.version.ProtocolVersion import io.grpc.ManagedChannel @@ -47,7 +47,8 @@ final class SequencerChannelClientFactory( sequencerConnections: SequencerConnections, expectedSequencers: NonEmpty[Map[SequencerAlias, SequencerId]], )(implicit - executionContext: ExecutionContextExecutor + executionContext: ExecutionContextExecutor, + traceContext: TraceContext, ): Either[String, SequencerChannelClient] = makeChannelTransports( sequencerConnections, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientState.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientState.scala index 6127e62ec5..a7f1bfc9f1 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientState.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelClientState.scala @@ -26,8 +26,11 @@ private[channel] final class SequencerChannelClientState( transportsMap: NonEmpty[Map[SequencerId, SequencerChannelClientTransport]], val timeouts: ProcessingTimeout, val loggerFactory: NamedLoggerFactory, -) extends NamedLogging +)(implicit traceContext: TraceContext) + extends NamedLogging with FlagCloseable { + logger.info(s"Channel sequencers ${transportsMap.keys.mkString(", ")}") + private val transports: NonEmpty[Map[SequencerId, SequencerChannelState]] = transportsMap.map { case (sequencerId, transport) => sequencerId -> new SequencerChannelState(transport, loggerFactory) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelProtocolProcessor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelProtocolProcessor.scala index 88276a03ed..bcfd575c16 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelProtocolProcessor.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/SequencerChannelProtocolProcessor.scala @@ -14,7 +14,7 @@ import com.digitalasset.canton.sequencing.client.channel.endpoint.SequencerChann import com.digitalasset.canton.sequencing.client.transports.GrpcSubscriptionError import com.digitalasset.canton.topology.PhysicalSynchronizerId import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.LoggerUtil +import com.digitalasset.canton.util.{EitherTUtil, LoggerUtil} import com.digitalasset.canton.version.ProtocolVersion import com.google.protobuf.ByteString import org.slf4j.event.Level @@ -61,8 +61,11 @@ trait SequencerChannelProtocolProcessor extends FlagCloseable with NamedLogging /** Notification that the processor is now connected and can begin sending and receiving messages. */ def onConnected()(implicit + @scala.annotation.unused // unused trace context used by inheritors traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Unit] + ): EitherT[FutureUnlessShutdown, String, Unit] = + EitherTUtil + .condUnitET[FutureUnlessShutdown](!isConnected.getAndSet(true), "Channel already connected") /** Handles payload from the channel. */ def handlePayload(payload: ByteString)(implicit @@ -85,8 +88,11 @@ trait SequencerChannelProtocolProcessor extends FlagCloseable with NamedLogging channelEndpoint.getAndSet(None).nonEmpty } - /** Sends payload to channel */ - final protected def sendPayload(operation: String, payload: ByteString)(implicit + /** Sends payload to channel + * + * Overrideable for testing + */ + protected def sendPayload(operation: String, payload: ByteString)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = synchronizeWithClosing(operation) { @@ -104,8 +110,11 @@ trait SequencerChannelProtocolProcessor extends FlagCloseable with NamedLogging } } - /** Sends channel completion thereby ending the ability to send subsequent messages. */ - final protected def sendCompleted(status: String)(implicit + /** Sends channel completion thereby ending the ability to send subsequent messages. + * + * Overrideable for testing + */ + protected def sendCompleted(status: String)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = synchronizeWithClosing(s"complete with $status") { @@ -116,8 +125,11 @@ trait SequencerChannelProtocolProcessor extends FlagCloseable with NamedLogging }(_.sendCompleted(status).map(_ => hasCompleted.set(true))) } - /** Sends channel error thereby ending the ability to send subsequent messages. */ - final protected def sendError(error: String)(implicit + /** Sends channel error thereby ending the ability to send subsequent messages. + * + * Overrideable for testing + */ + protected def sendError(error: String)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = synchronizeWithClosing(s"send error $error") { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/endpoint/ChannelStage.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/endpoint/ChannelStage.scala index 394d9ac9da..c669486ac8 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/endpoint/ChannelStage.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/channel/endpoint/ChannelStage.scala @@ -185,10 +185,8 @@ private[endpoint] class ChannelStageSecurelyConnected(data: InternalData)(implic */ private def processOnChannelReadyForProcessor()(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Unit] = { - data.processor.isConnected.set(true) + ): EitherT[FutureUnlessShutdown, String, Unit] = data.processor.onConnected() - } override def handleMessage(response: Response)(implicit traceContext: TraceContext diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcher.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcher.scala new file mode 100644 index 0000000000..ca96820938 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcher.scala @@ -0,0 +1,167 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeFetcher.* +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeReadings.TimeReading +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, PositiveFiniteDuration} +import com.digitalasset.canton.topology.SequencerId +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.ExecutionContext + +/** Allows to determine the current sequencing time, and whether a sequencing time has been reached, + * based on a set of time sources, caching their results to avoid repeated calls to the same time + * source. + */ +class SequencingTimeFetcher private[client] ( + timeSourcesPool: TimeSourcesPool, + timeSourcesAccessor: TimeSourcesAccessor, + localClock: Clock, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + private val timeReadings = timeSourcesAccessor.timeReadings + + def currentSequencingTimeInfo(maxTimeReadingsAge: Option[NonNegativeFiniteDuration])(implicit + traceContext: TraceContext + ): SequencingTimeInfo = { + val readings = timeReadings.getTimeReadings(maxTimeReadingsAge) + val times = readings.view.values.flatMap(_.reading).toVector + val trustThreshold = timeSourcesPool.readTrustThreshold() + SequencingTimeInfo( + timeReadings.validTimeInterval(times, trustThreshold), + trustThreshold, + readings, + ) + } + + /** Returns `true` only if synchronizer has surely reached the given sequencing time. + */ + // Given a sequencing time, we want to know if at least one non-faulty sequencer node has reached it + // (i.e., if any subsequent send will be assigned a later sequencing time). + // + // A negative outcome is however considered safe because in that case we'll retry, so false negatives + // are not a correctness problem; this means that a large number of unresponsive nodes can be tolerated. + // + // Let `f` = maximum tolerated number of nodes that can be faulty in arbitrary ways, i.e., `trustThreshold` - 1. + // + // In order to make sure that at least one non-faulty node has reached the given time, we must find + // at least additional `f` nodes that have also reached it, i.e., `f+1` in total. + // This is because in the worst case `f` nodes may be maliciously colluding and causing a false positive answer + // (e.g., their `GetTime` could always return `CantonTimestamp.MaxValue`), and in that case we need + // another `f` replies agreeing with the correct node to outvote the malicious ones. + // + // This means that in the worst case, under the trust assumption of up to `f` non-compliant nodes, + // we may end up querying `2f+1` nodes. + // + // Since connections are not exclusive, however, we can just ask for one connection per sequencer + // and have the best possible pool at our disposal, so our strategy is to try and reach `f+1` positive + // outcomes, potentially by ending up querying all available sequencers. + // This allows tolerating a large number of unresponsive nodes. + def hasReached( + time: CantonTimestamp, + timeout: PositiveFiniteDuration, + maxTimeReadingsAge: Option[NonNegativeFiniteDuration] = None, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Boolean] = { + def returnOutcome(outcome: Boolean) = + FutureUnlessShutdown.pure(outcome) + + def countPositives(times: Map[SequencerId, TimeReading]): Int = + times.values.count(_.reading.exists(_ >= time)) + + logger.debug(s"Asked whether synchronizer has reached sequencing time $time") + val times = timeReadings.getTimeReadings(maxTimeReadingsAge) + logger.debug("Available cached time readings: " + times) + val positives = countPositives(times) + val trustThreshold = timeSourcesPool.readTrustThreshold() + val trustThresholdInt = trustThreshold.unwrap + + if (positives >= trustThresholdInt) { + logger.debug(s"The synchronizer has reached sequencing time $time (cached)") + returnOutcome(true) + } else { + val missingPositives = trustThresholdInt - positives + val timeSources = + getTimeSources( + exclusions = times.keySet, + missingPositives, + ) + if (timeSources.sizeIs < missingPositives) { + logger.debug( + s"Cannot determine whether the synchronizer has reached $time: insufficient threshold " + + s"(missing positives: $missingPositives, available time sources: ${timeSources.size})" + ) + returnOutcome(false) + } else { + val start = localClock.now + timeSourcesAccessor + .queryTimeSources(timeSources, timeout) + .flatMap { _ => + val duration = localClock.now - start + val positives = countPositives(timeReadings.getTimeReadings(maxTimeReadingsAge = None)) + if (positives >= trustThresholdInt) { + logger.debug( + s"The synchronizer has reached sequencing time $time " + + s"with $positives positives (threshold: $trustThresholdInt)" + ) + returnOutcome(true) + } else { + val leftTime = timeout.duration.minus(duration) + if (leftTime.isPositive) { + logger.debug( + s"Cannot determine yet whether the synchronizer has reached sequencing time $time " + + s"due to insufficient $positives positives (threshold: $trustThresholdInt), trying more sources" + ) + hasReached( + time, + PositiveFiniteDuration.tryCreate(leftTime), + maxTimeReadingsAge = None, + ) + } else { + logger.debug( + s"Cannot determine yet whether the synchronizer has reached sequencing time $time " + + s"due to insufficient $positives positives (threshold: $trustThresholdInt) and the time is over" + ) + returnOutcome(false) + } + } + } + } + } + } + + private def getTimeSources(exclusions: Set[SequencerId], numberOfTimesToFetch: Int)(implicit + traceContext: TraceContext + ): Map[SequencerId, PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]]] = + PositiveInt + .create(numberOfTimesToFetch) + .map( + timeSourcesPool.timeSources(_, exclusions).toMap + ) + .getOrElse(Map.empty) +} + +object SequencingTimeFetcher { + + private[client] trait TimeSourcesPool { + + def readTrustThreshold(): PositiveInt + + def timeSources(count: PositiveInt, exclusions: Set[SequencerId])(implicit + traceContext: TraceContext + ): Seq[(SequencerId, PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]])] + } + + private[client] final case class SequencingTimeInfo( + validTimeInterval: Option[(CantonTimestamp, CantonTimestamp)], + forTrustThreshold: PositiveInt, + basedOnTimeReadings: Map[SequencerId, TimeReading], + ) +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeReadings.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeReadings.scala new file mode 100644 index 0000000000..04d22baa95 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeReadings.scala @@ -0,0 +1,121 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeReadings.TimeReading +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, PositiveFiniteDuration} +import com.digitalasset.canton.topology.SequencerId +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.atomic.AtomicReference + +private[client] trait SequencingTimeReadings { + + def getTimeReadings( + maxTimeReadingsAge: Option[NonNegativeFiniteDuration] + ): Map[SequencerId, TimeReading] + + def validTimeInterval( + times: Vector[CantonTimestamp], + trustThreshold: PositiveInt, + )(implicit traceContext: TraceContext): Option[(CantonTimestamp, CantonTimestamp)] + + def recordReading( + sequencerId: SequencerId, + timeReading: Option[CantonTimestamp], + receivedAt: CantonTimestamp, + ): Unit +} + +object SequencingTimeReadings { + + private[client] final case class TimeReading( + reading: Option[CantonTimestamp], + receivedAt: CantonTimestamp, + ) +} + +private[client] class ExpiringInMemorySequencingTimeReadings( + localClock: Clock, + timeReadingsRetention: PositiveFiniteDuration, + override protected val loggerFactory: NamedLoggerFactory, + timesRef: AtomicReference[Map[SequencerId, TimeReading]] = // Only for testing + new AtomicReference(Map.empty), +) extends SequencingTimeReadings + with NamedLogging { + + import ExpiringInMemorySequencingTimeReadings.* + + override def getTimeReadings( + maxTimeReadingsAge: Option[NonNegativeFiniteDuration] + ): Map[SequencerId, TimeReading] = { + val now = localClock.now + timesRef + .updateAndGet { times => + times.filter { case (_, TimeReading(_, receivedAt)) => + now.minus(timeReadingsRetention.duration) < receivedAt + } + } + .filter { case (_, TimeReading(_, receivedAt)) => + maxTimeReadingsAge.forall(age => now.minus(age.duration) < receivedAt) + } + } + + def validTimeInterval( + times: Vector[CantonTimestamp], + trustThreshold: PositiveInt, + )(implicit traceContext: TraceContext): Option[(CantonTimestamp, CantonTimestamp)] = { + val bftTimeThresholdInt = bftTimeThreshold(trustThreshold).unwrap + val result = + Option.when(times.sizeIs >= bftTimeThresholdInt) { + // We expect a low cardinality, so using sorting is fine + val sortedTimes = times.sorted + sortedTimes(trustThreshold.unwrap - 1) -> + sortedTimes(sortedTimes.size - trustThreshold.unwrap) + } + if (result.isEmpty) + logger.debug( + s"Cannot determine sequencing time: only ${times.size} times have been provided but " + + s"at least $bftTimeThresholdInt are needed" + ) + result + } + + def recordReading( + sequencerId: SequencerId, + timeReading: Option[CantonTimestamp], + receivedAt: CantonTimestamp, + ): Unit = + timesRef + .updateAndGet( + _.updatedWith(sequencerId)(v => + v.map { case TimeReading(prevTimeReading, prevReadingReceivedAt) => + (prevTimeReading, timeReading) match { + case (Some(prevTs), Some(ts)) => + TimeReading( + Some(prevTs max ts), + if (prevTs > ts) prevReadingReceivedAt else receivedAt, + ) + case (Some(_), None) => + TimeReading(prevTimeReading, prevReadingReceivedAt) + case (None, Some(_)) => + TimeReading(timeReading, receivedAt) + case (None, None) => + TimeReading(None, prevReadingReceivedAt) + } + }.orElse(Some(TimeReading(timeReading, receivedAt))) + ) + ) + .discard +} + +private object ExpiringInMemorySequencingTimeReadings { + + private def bftTimeThreshold(trustThreshold: PositiveInt): PositiveInt = + trustThreshold + trustThreshold.decrement +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/TimeSourcesAccessor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/TimeSourcesAccessor.scala new file mode 100644 index 0000000000..8587980c4d --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/time/fetcher/TimeSourcesAccessor.scala @@ -0,0 +1,161 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import cats.syntax.parallel.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, PromiseUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.time.fetcher.OneCallAtATimeSourcesAccessor.QueryTimeSourcesRunningTask +import com.digitalasset.canton.sequencing.client.time.fetcher.TimeSourcesAccessor.TimeSources +import com.digitalasset.canton.time.{Clock, PositiveFiniteDuration} +import com.digitalasset.canton.topology.SequencerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureUnlessShutdownUtil + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.collection.View +import scala.concurrent.ExecutionContext + +private[client] trait TimeSourcesAccessor { + + def timeReadings: SequencingTimeReadings + + def queryTimeSources( + timeSources: TimeSources, + timeout: PositiveFiniteDuration, + concurrent: Boolean = false, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[SequencerId, Option[CantonTimestamp]]] +} + +object TimeSourcesAccessor { + + private[client] type TimeSources = + Map[SequencerId, PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]]] +} + +private[client] class OneCallAtATimeSourcesAccessor( + localClock: Clock, + override val timeReadings: SequencingTimeReadings, + override protected val loggerFactory: NamedLoggerFactory, + runningTaskRef: AtomicReference[Option[QueryTimeSourcesRunningTask]] = // Only for testing + new AtomicReference(None), +)(implicit + ec: ExecutionContext +) extends TimeSourcesAccessor + with NamedLogging { + + override def queryTimeSources( + timeSources: TimeSources, + timeout: PositiveFiniteDuration, + concurrent: Boolean, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[SequencerId, Option[CantonTimestamp]]] = { + + def updateStatusAfterCompletion(completedSequencerIds: Set[SequencerId]): Unit = + runningTaskRef.getAndUpdate { + case Some((ids, fut)) => + val remainingIds = ids.diff(completedSequencerIds) + Option.when(remainingIds.nonEmpty)((remainingIds, fut)) + case None => None + }.discard + + val completionPromise = + PromiseUnlessShutdown.unsupervised[Map[SequencerId, Option[CantonTimestamp]]]() + + val previousState = + if (concurrent) + None + else { + runningTaskRef.getAndUpdate { + case None => Some((timeSources.keySet, completionPromise.futureUS)) + case Some(runningSequencerIds -> fut) => + Some((runningSequencerIds ++ timeSources.keySet, fut)) + } + } + + val queryF = + previousState match { + case Some((runningSequencerIds, fut)) => + val prevCompletionOrTimeoutPromise = + PromiseUnlessShutdown.unsupervised[Map[SequencerId, Option[CantonTimestamp]]]() + val shouldSetPromise = new AtomicBoolean(false) + val sequencerIdsToStart = timeSources.keySet.diff(runningSequencerIds) + def setFirst(r: Map[SequencerId, Option[CantonTimestamp]]): Unit = + if (shouldSetPromise.compareAndSet(false, true)) + prevCompletionOrTimeoutPromise.outcome_(r) + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + fut.map(r => setFirst(r)), + "waiting for ongoing time source query failed", + ) + // It's OK not to return the result from the running computation if we time out, + // because we won't retry in that case + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + localClock + .scheduleAfter(_ => setFirst(Map.empty), timeout.duration), + "waiting for timeout to elapse failed", + ) + val timeSourcesToBeStarted = + sequencerIdsToStart.view + .flatMap(sequencerId => timeSources.get(sequencerId).map(sequencerId -> _)) + logger + .debug(s"Querying additional time sources: $sequencerIdsToStart") + val futures = startTimeSources(timeSourcesToBeStarted, timeout) + prevCompletionOrTimeoutPromise.futureUS.flatMap { result1 => + futures + .parTraverse(recordReadingOnceComplete) + .map { result2 => + val aggregatedResult = result1 ++ result2.toMap + updateStatusAfterCompletion(completedSequencerIds = sequencerIdsToStart) + completionPromise.outcome_(aggregatedResult) + } + } + + case None => + logger.debug(s"Querying time sources: ${timeSources.keySet}") + val futures = startTimeSources(timeSources.view, timeout) + futures + .parTraverse(recordReadingOnceComplete) + .map { result => + updateStatusAfterCompletion(completedSequencerIds = timeSources.keySet) + completionPromise.outcome_(result.toMap) + } + } + + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown(queryF, "Querying time sources failed") + + completionPromise.futureUS + } + + private def startTimeSources( + timeSourcesToBeStarted: View[ + (SequencerId, PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]]) + ], + timeout: PositiveFiniteDuration, + ): Seq[FutureUnlessShutdown[(SequencerId, Option[CantonTimestamp])]] = + timeSourcesToBeStarted.map { case (sequencerId, timeSource) => + timeSource(timeout).map(sequencerId -> _) + }.toSeq + + private def recordReadingOnceComplete( + fut: FutureUnlessShutdown[(SequencerId, Option[CantonTimestamp])] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[(SequencerId, Option[CantonTimestamp])] = + fut.map { case (sequencerId, timestampO) => + logger.debug(s"Received response from time source $sequencerId: $timestampO") + timeReadings.recordReading(sequencerId, timestampO, localClock.now) + sequencerId -> timestampO + } +} + +private object OneCallAtATimeSourcesAccessor { + + type QueryTimeSourcesRunningTask = + (Set[SequencerId], FutureUnlessShutdown[Map[SequencerId, Option[CantonTimestamp]]]) +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala index e3bdee9828..18c5d9f8d2 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala @@ -4,11 +4,13 @@ package com.digitalasset.canton.sequencing.client.transports import cats.data.EitherT +import cats.implicits.toTraverseOps import cats.syntax.either.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.pekko.ClientAdapter import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{ FlagCloseable, FutureUnlessShutdown, @@ -262,6 +264,28 @@ private[transports] abstract class GrpcSequencerClientTransportCommon( EitherT(resultF) } + + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + for { + response <- + CantonGrpcUtil + .sendGrpcRequest(sequencerServiceClient, "sequencer")( + _.getTime(v30.GetTimeRequest()), + requestDescription = s"getTime", + timeout = timeout, + logger = logger, + retryPolicy = retryPolicy(retryOnUnavailable = false), + ) + .leftMap(_.toString) + timestampO <- + EitherT.fromEither[FutureUnlessShutdown]( + response.sequencingTimestamp + .traverse(CantonTimestamp.fromProtoPrimitive) + .leftMap(_.message) + ) + } yield timestampO } trait GrpcClientTransportHelpers { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala index 667f3df348..2815a04176 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.sequencing.client.transports import cats.data.EitherT +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.sequencing.SequencedEventHandler import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError @@ -53,6 +54,11 @@ trait SequencerClientTransportCommon extends FlagCloseable { def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit traceContext: TraceContext ): EitherT[Future, String, TopologyStateForInitResponse] + + /** Fetches the "current" sequencing time */ + def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] } /** Implementation dependent operations for a client to read and write to a synchronizer sequencer. diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala index d46b5db688..5b2f08055d 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala @@ -75,6 +75,11 @@ class ReplayingEventsSequencerClientTransport( ): EitherT[FutureUnlessShutdown, String, GetTrafficStateForMemberResponse] = EitherT.pure(GetTrafficStateForMemberResponse(None, protocolVersion)) + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + EitherT.rightT(None) + /** Replays all events in `replayPath` to the handler. */ override def subscribe[E](request: SubscriptionRequest, handler: SequencedEventHandler[E])( implicit traceContext: TraceContext diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala index 0e40c2a35b..2f0b70b635 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala @@ -484,6 +484,11 @@ class ReplayingSendsSequencerClientTransportImpl( ): EitherT[FutureUnlessShutdown, Status, Unit] = EitherT.pure(()) + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + EitherT.rightT(None) + override def subscribe[E](request: SubscriptionRequest, handler: SequencedEventHandler[E])( implicit traceContext: TraceContext ): SequencerSubscription[E] = new SequencerSubscription[E] { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala index 81d991e9ae..e5017e31dc 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala @@ -54,6 +54,8 @@ final case class Batch[+Env <: Envelope[?]] private (envelopes: List[Env])( case AllMembersOfSynchronizer => AllMembersOfSynchronizer } + lazy val isBroadcast: Boolean = allRecipients.contains(AllMembersOfSynchronizer) + private[protocol] def toProtoV30: v30.CompressedBatch = { val batch = v30.Batch(envelopes = envelopes.map(_.closeEnvelope.toProtoV30)) val compressed = ByteStringUtil.compressGzip(checkedToByteString(batch)) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala index 7bd331945e..5ff2f803b7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala @@ -4,27 +4,16 @@ package com.digitalasset.canton.sequencing.protocol import cats.data.EitherT -import cats.syntax.either.* import cats.syntax.option.* import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.config.CantonRequireTypes.String73 -import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.sequencing.OrdinaryProtocolEvent import com.digitalasset.canton.sequencing.client.{SendAsyncClientError, SequencerClient} -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.serialization.{HasCryptographicEvidence, ProtoConverter} -import com.digitalasset.canton.store.SequencedEventStore.{ - IgnoredSequencedEvent, - OrdinarySequencedEvent, - PossiblyIgnoredSequencedEvent, -} -import com.digitalasset.canton.time.v30 +import com.digitalasset.canton.serialization.HasCryptographicEvidence +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import com.google.protobuf.ByteString @@ -58,31 +47,11 @@ final case class TimeProof private ( unnamedParam(_.timestamp) ) - def toProtoV30: v30.TimeProof = v30.TimeProof(Some(event.toProtoV30)) - override def getCryptographicEvidence: ByteString = deliver.getCryptographicEvidence } object TimeProof { - def fromProtoV30( - protocolVersion: ProtocolVersion, - hashOps: HashOps, - )(timeProofP: v30.TimeProof): ParsingResult[TimeProof] = { - val v30.TimeProof(eventPO) = timeProofP - for { - possiblyIgnoredProtocolEvent <- ProtoConverter - .required("event", eventPO) - .flatMap(PossiblyIgnoredSequencedEvent.fromProtoV30(protocolVersion, hashOps)) - event <- possiblyIgnoredProtocolEvent match { - case ordinary: OrdinaryProtocolEvent => Right(ordinary) - case _: IgnoredSequencedEvent[_] => - Left(ProtoDeserializationError.OtherError("Event is ignored, but must be ordinary.")) - } - timeProof <- fromEvent(event).leftMap(ProtoDeserializationError.OtherError.apply) - } yield timeProof - } - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) def fromEvent(event: OrdinarySequencedEvent[Envelope[?]]): Either[String, TimeProof] = for { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala index 1d10a5b865..07dc34d0e1 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala @@ -186,6 +186,9 @@ object ProtoConverter { def parsePackageId(id: String): ParsingResult[Ref.PackageId] = parseString(id, field = None)(Ref.PackageId.fromString) + def parsePackageName(name: String): ParsingResult[Ref.PackageName] = + parseString(name, field = None)(Ref.PackageName.fromString) + private def parseString[T](from: String, field: Option[String])( to: String => Either[String, T] ): ParsingResult[T] = diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala index e8a8c08ac4..c633e83dcb 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala @@ -48,7 +48,7 @@ abstract class IndexedStringFromDb[A <: IndexedString[B], B] { def indexed( indexedStringStore: IndexedStringStore - )(item: B)(implicit ec: ExecutionContext): FutureUnlessShutdown[A] = + )(item: B)(implicit ec: ExecutionContext, traceContext: TraceContext): FutureUnlessShutdown[A] = indexedStringStore .getOrCreateIndex(dbTyp, asString(item)) .map(buildIndexed(item, _)) @@ -58,16 +58,22 @@ abstract class IndexedStringFromDb[A <: IndexedString[B], B] { )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext, - ): OptionT[FutureUnlessShutdown, A] = + ): OptionT[FutureUnlessShutdown, A] = { + implicit val traceContext: TraceContext = loggingContext.traceContext + fromDbIndexET(indexedStringStore)(index).leftMap { err => loggingContext.logger.error( s"Corrupt log id: $index for $dbTyp within context $context: $err" )(loggingContext.traceContext) }.toOption + } def fromDbIndexET( indexedStringStore: IndexedStringStore - )(index: Int)(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, String, A] = + )(index: Int)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, String, A] = EitherT(indexedStringStore.getForIndex(dbTyp, index).map { strO => for { str <- strO.toRight("No entry for given index") @@ -180,8 +186,12 @@ object IndexedStringType { /** uid index such that we can store integers instead of long strings in our database */ trait IndexedStringStore extends AutoCloseable { - def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): FutureUnlessShutdown[Int] - def getForIndex(dbTyp: IndexedStringType, idx: Int): FutureUnlessShutdown[Option[String300]] + def getOrCreateIndex(dbTyp: IndexedStringType, str: String300)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Int] + def getForIndex(dbTyp: IndexedStringType, idx: Int)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[String300]] } object IndexedStringStore { @@ -190,10 +200,7 @@ object IndexedStringStore { config: CacheConfig, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, - )(implicit - ec: ExecutionContext, - tc: TraceContext, - ): IndexedStringStore = + )(implicit ec: ExecutionContext): IndexedStringStore = storage match { case _: MemoryStorage => InMemoryIndexedStringStore() case jdbc: DbStorage => @@ -209,7 +216,7 @@ class IndexedStringCache( parent: IndexedStringStore, config: CacheConfig, val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext, tc: TraceContext) +)(implicit ec: ExecutionContext) extends IndexedStringStore with NamedLogging { @@ -248,13 +255,13 @@ class IndexedStringCache( override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = index2strFUS.get((idx, dbTyp)) override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = str2Index.get((str, dbTyp)) override def close(): Unit = { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala index 5715ed334e..9cc977a34f 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.ExecutionContext @@ -22,13 +23,12 @@ class DbIndexedStringStore( extends IndexedStringStore with DbStore { - import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* import storage.api.* override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = getIndexForStr(dbTyp.source, str).getOrElseF { insertIgnore(dbTyp.source, str).flatMap { _ => getIndexForStr(dbTyp.source, str).getOrElse { @@ -40,7 +40,9 @@ class DbIndexedStringStore( } } - private def getIndexForStr(dbType: Int, str: String300): OptionT[FutureUnlessShutdown, Int] = + private def getIndexForStr(dbType: Int, str: String300)(implicit + traceContext: TraceContext + ): OptionT[FutureUnlessShutdown, Int] = OptionT( storage .query( @@ -51,7 +53,9 @@ class DbIndexedStringStore( ) ) - private def insertIgnore(dbType: Int, str: String300): FutureUnlessShutdown[Unit] = { + private def insertIgnore(dbType: Int, str: String300)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = { // not sure how to get "last insert id" here in case the row was inserted // therefore, we're just querying the db again. this is a bit dorky, // but we'll hardly ever do this, so should be good @@ -64,7 +68,7 @@ class DbIndexedStringStore( override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = storage .query( sql"select string from common_static_strings where id = $idx and source = ${dbTyp.source}" diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala index 3d78f38a34..3c55f01afd 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala @@ -7,6 +7,7 @@ import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} +import com.digitalasset.canton.tracing.TraceContext import scala.collection.concurrent.TrieMap import scala.collection.mutable.ArrayBuffer @@ -27,7 +28,7 @@ class InMemoryIndexedStringStore(val minIndex: Int, val maxIndex: Int) extends I override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = FutureUnlessShutdown.pure(getOrCreateIndexForTesting(dbTyp, str)) /** @throws java.lang.IllegalArgumentException @@ -51,7 +52,7 @@ class InMemoryIndexedStringStore(val minIndex: Int, val maxIndex: Int) extends I override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = FutureUnlessShutdown.pure { blocking { synchronized { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadata.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/store/packagemeta/PackageMetadata.scala similarity index 79% rename from canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadata.scala rename to canton/community/base/src/main/scala/com/digitalasset/canton/store/packagemeta/PackageMetadata.scala index 4de301be94..e8eca99a94 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadata.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/store/packagemeta/PackageMetadata.scala @@ -1,12 +1,12 @@ // Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -package com.digitalasset.canton.platform.store.packagemeta +package com.digitalasset.canton.store.packagemeta import cats.kernel.Semigroup import cats.syntax.semigroup.* import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.{ +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ InterfacesImplementedBy, PackageResolution, } @@ -14,6 +14,8 @@ import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.language.util.PackageInfo import com.digitalasset.daml.lf.language.{Ast, Util as LfUtil} +import scala.annotation.tailrec + // TODO(#17635): Move to [[com.digitalasset.canton.participant.store.memory.PackageMetadataView]] final case class PackageMetadata( interfaces: Set[Ref.Identifier] = Set.empty, @@ -23,9 +25,39 @@ final case class PackageMetadata( packageIdVersionMap: Map[Ref.PackageId, (Ref.PackageName, Ref.PackageVersion)] = Map.empty, // TODO(#21695): Use [[com.digitalasset.daml.lf.language.PackageInterface]] once public packages: Map[Ref.PackageId, Ast.PackageSignature] = Map.empty, - packageUpgradabilityMap: Map[Ref.PackageId, Boolean] = Map.empty, ) { + /** Compute the set of dependencies recursively. Assuming that the package store is closed under + * dependencies, it throws an exception if a package is unknown. + * + * @param packageIds + * the set of packages from which to compute the dependencies + * @return + * the set of packages and their dependencies, recursively + */ + def allDependenciesRecursively( + packageIds: Set[Ref.PackageId] + ): Set[Ref.PackageId] = { + @tailrec + def go( + packageIds: Set[Ref.PackageId], + knownDependencies: Set[Ref.PackageId], + ): Set[Ref.PackageId] = + if (packageIds.isEmpty) knownDependencies + else { + val newDependencies = + packageIds.flatMap(pkgId => tryGet(pkgId).directDeps) -- knownDependencies + go(newDependencies, knownDependencies ++ newDependencies) + } + go(packageIds, packageIds) + } + + private def tryGet(packageId: Ref.PackageId): Ast.PackageSignature = + packages.getOrElse( + packageId, + throw new IllegalStateException(s"Missing package-id $packageId in package metadata view"), + ) + /** Resolve all template or interface ids for (package-name, qualified-name). * * As context, package-level upgrading compatibility between two packages pkg1 and pkg2, where @@ -97,7 +129,6 @@ object PackageMetadata { ) val packageInfo = new PackageInfo(Map(packageId -> packageAst)) - val isPackageUpgradable = packageAst.supportsUpgrades(packageId) PackageMetadata( packageNameMap = packageNameMap, interfaces = packageInfo.definedInterfaces, @@ -108,7 +139,6 @@ object PackageMetadata { // Consider unifying with the other package caches in the participant // (e.g. [[com.digitalasset.canton.platform.packages.DeduplicatingPackageLoader]]) packages = Map(packageId -> LfUtil.toSignature(packageAst)), - packageUpgradabilityMap = Map(packageId -> isPackageUpgradable), ) } @@ -136,17 +166,21 @@ object PackageMetadata { } }, packages = x.packages ++ y.packages, - packageUpgradabilityMap = x.packageUpgradabilityMap ++ y.packageUpgradabilityMap, ) } - implicit def upgradablePackageIdPriorityMapSemigroup: Semigroup[PackageResolution] = + implicit def upgradablePackageIdPriorityMapSemigroup: Semigroup[PackageResolution] = { + val preferenceOrdering = Ordering + .by[LocalPackagePreference, (Ref.PackageVersion, Ref.PackageId)](pref => + // Sort by version then by package-id to ensure deterministic sorting + pref.version -> pref.packageId + ) Semigroup.instance { case (x, y) => PackageResolution( - preference = - if (y.preference.version > x.preference.version) y.preference else x.preference, + preference = preferenceOrdering.max(x.preference, y.preference), allPackageIdsForName = x.allPackageIdsForName ++ y.allPackageIdsForName, ) } + } } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala index 5b88a1a6ec..1a110bf441 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala @@ -671,7 +671,7 @@ object SynchronizerTimeTracker { * proof. Use this only for debugging purposes to identify the reason for the time proof * requests. */ - private val PrintCallStackForExecutedTimeProofRequests: Boolean = true + private val PrintCallStackForExecutedTimeProofRequests: Boolean = false @inline private def callStackForExecutedTimeProofRequest(): String = diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala new file mode 100644 index 0000000000..951ab26e8d --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala @@ -0,0 +1,482 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.syntax.alternative.* +import cats.syntax.option.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.Signature +import com.digitalasset.canton.topology.ExternalPartyOnboardingDetails.{ + Centralized, + Decentralized, + OptionallySignedPartyToParticipant, + PartyNamespace, + SignedPartyToKeyMapping, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.{ + GenericSignedTopologyTransaction, + PositiveSignedTopologyTransaction, +} +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace +import com.digitalasset.canton.topology.transaction.TopologyTransaction.PositiveTopologyTransaction +import com.digitalasset.canton.version.ProtocolVersion + +import scala.reflect.ClassTag + +/** Data class containing onboarding signed topology transactions for an external party. The + * constructor of this class ensures that only transactions related to the creation of an external + * party can be submitted. It performs validations on the kind of transactions and their + * relationship with each other. It does NOT validate anything related to the authorization of + * those transactions. That logic is implemented in the topology manager. + * + * @param partyNamespace + * Fully authorized party namespace. Can be either a single namespace or decentralized one with + * accompanying individual namespace owner transactions + * @param signedPartyToKeyMappingTransaction + * Party to Key mapping transaction + * @param optionallySignedPartyToParticipant + * Party to Participant transaction, either signed or unsigned + * @param isConfirming + * True if the allocating node is a confirming node for the party + */ +final case class ExternalPartyOnboardingDetails private ( + partyNamespace: Option[PartyNamespace], + signedPartyToKeyMappingTransaction: Option[SignedPartyToKeyMapping], + optionallySignedPartyToParticipant: OptionallySignedPartyToParticipant, + isConfirming: Boolean, +) { + // Invariants + require( + !optionallySignedPartyToParticipant.mapping.participants + .map(_.permission) + .contains(ParticipantPermission.Submission), + "External party cannot be hosted with Submission permission", + ) + require( + partyNamespace.forall(_.namespace == optionallySignedPartyToParticipant.mapping.namespace), + "The party namespace does not match the PartyToParticipant namespace", + ) + require( + partyNamespace.forall { + case decentralized: Decentralized => + decentralized.individualNamespaceTransaction.sizeIs <= ExternalPartyOnboardingDetails.maxDecentralizedOwnersSize.value + case _ => true + }, + "Decentralized namespace has over the maximum limit of namespace owners", + ) + require( + signedPartyToKeyMappingTransaction.forall( + _.mapping.namespace == optionallySignedPartyToParticipant.mapping.namespace + ), + "The PartyToKeyMapping namespace does not match the PartyToParticipant namespace", + ) + + /** Return true if we expect the party to be fully allocated and authorized with the provided + * transactions + */ + def fullyAllocatesParty: Boolean = + // Expect fully allocated if there's a centralized namespace + // (It could be fully allocated as well with a decentralized namespace but checking this + // would require re-running the authorization checks implemented in the topology manager) + partyNamespace.exists { + case _: Centralized => true + case _ => false + } && + // and a party to key + signedPartyToKeyMappingTransaction.isDefined && + // and is not multi hosted + hostingParticipants.sizeIs == 1 + + /** Namespace of the external party. + */ + def namespace: Namespace = optionallySignedPartyToParticipant.mapping.namespace + + /** PartyId of the external party + */ + def partyId: PartyId = optionallySignedPartyToParticipant.mapping.partyId + + /** Party hint of the external party + */ + def partyHint: String = partyId.uid.identifier.str + + def hostingParticipants: Seq[HostingParticipant] = + optionallySignedPartyToParticipant.mapping.participants + def confirmationThreshold: PositiveInt = optionallySignedPartyToParticipant.mapping.threshold +} + +object ExternalPartyOnboardingDetails { + + // Maximum number of decentralized namespace owners allowed through the `allocateExternalParty` API + // This is hardcoded here to avoid unreasonably high number of namespace owner transactions to be distributed + // through this endpoint, as the DecentralizedNamespaceDefinition itself does not have any limit on the number of + // namespace owners. If this limit is too low for a given use case, go through the Admin API topology write service instead. + // TODO(i27530): Make this configurable, or lift it when DecentralizedNamespaceDefinition has a limit + val maxDecentralizedOwnersSize: PositiveInt = PositiveInt.tryCreate(10) + + // Type aliases for conciseness + private type SignedNamespaceDelegation = + SignedTopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation] + private type SignedDecentralizedNamespace = + SignedTopologyTransaction[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition] + private type SignedPartyToKeyMapping = + SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping] + + /** External party namespace, can be either centralied or decentralized + */ + sealed trait PartyNamespace { + + /** Transactions to be loaded in the topology manager to create the party's namespace + */ + def signedTransactions: Seq[GenericSignedTopologyTransaction] + + /** Namespace of the party + */ + def namespace: Namespace + } + + /** Decentralized party namespace. All transactions are expected to have all required signatures + * to be fully authorized. If not the party allocation will fail in the topology manager auth + * checks. + * @param decentralizedTransaction + * The decentralized namespace transaction + * @param individualNamespaceTransaction + * The individual namespace owner transactions + */ + final private case class Decentralized( + decentralizedTransaction: SignedDecentralizedNamespace, + individualNamespaceTransaction: Seq[SignedNamespaceDelegation], + ) extends PartyNamespace { + // In that order on purpose, as the individual namespaces must be processed before the decentralized namespace can be authorized + override def signedTransactions: Seq[GenericSignedTopologyTransaction] = + individualNamespaceTransaction :+ decentralizedTransaction + override def namespace: Namespace = decentralizedTransaction.mapping.namespace + } + + /** Centralized party namespace. The transaction is expected to be fully authorized/ If not the + * party allocation will fail in the topology manager auth checks. + * @param singleTransaction + * The signed namespace definition transaction + */ + final private case class Centralized(singleTransaction: SignedNamespaceDelegation) + extends PartyNamespace { + override def signedTransactions: Seq[GenericSignedTopologyTransaction] = Seq(singleTransaction) + override def namespace: Namespace = singleTransaction.mapping.namespace + } + + /** The PartyToParticipant mapping may be submitted signed (by the party's namespace) or unsigned + * (by hosting nodes wanting to authorize the hosting). This trait makes the distinction between + * the two cases. + */ + sealed trait OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant + } + final case class SignedPartyToParticipant( + signed: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant] + ) extends OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant = signed.mapping + } + final case class UnsignedPartyToParticipant( + unsigned: TopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant] + ) extends OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant = unsigned.mapping + } + + private val expectedTransactionMappings = Seq( + NamespaceDelegation, + DecentralizedNamespaceDefinition, + PartyToParticipant, + PartyToKeyMapping, + ) + + // TODO(i27530): We may be able to be more precise for P2P and P2K but it's hard to tell + // at this stage especially with decentralized namespaces + private def isProposal( + transaction: PositiveTopologyTransaction + ): Boolean = + // Namespaces must be fully authorized so they can't be proposals + transaction.selectMapping[NamespaceDelegation].isEmpty && + transaction.selectMapping[DecentralizedNamespaceDefinition].isEmpty + + /** Build SignedTopologyTransactions for transactions that have at least one signature. + * Transactions without signatures are returned separately. + */ + private def parseTransactionsWithSignatures( + transactionsWithSignatures: NonEmpty[List[(PositiveTopologyTransaction, List[Signature])]], + multiSignatures: List[Signature], + protocolVersion: ProtocolVersion, + ): Either[ + String, + (List[PositiveSignedTopologyTransaction], List[PositiveTopologyTransaction]), + ] = { + val transactionHashes = transactionsWithSignatures.map { case (transaction, _) => + transaction.hash + }.toSet + val multiTransactionSignatures = + multiSignatures.map(MultiTransactionSignature(transactionHashes, _)) + + // Gather the signatures for a transaction. If no signatures can be found return None + def signaturesForTransaction( + transaction: PositiveTopologyTransaction, + singleTransactionSignatures: Seq[Signature], + multiTransactionSignatures: Seq[MultiTransactionSignature], + ): Option[NonEmpty[Seq[TopologyTransactionSignature]]] = { + // Deduplicate signatures to keep only one per signer + val deduplicatedSignatures = NonEmpty + .from( + (singleTransactionSignatures.map( + SingleTransactionSignature(transaction.hash, _) + ) ++ multiTransactionSignatures) + // Prefer single transaction signatures over multi transaction ones + // as they're smaller (they don't need the list of signed hashes) + .groupMapReduce(_.authorizingLongTermKey)(identity)((first, _) => first) + .values + .toSeq + ) + transaction.mapping match { + // Special case for root namespaces: they require signatures only from the namespace key + case namespaceDelegation: NamespaceDelegation => + deduplicatedSignatures + .map(_.filter(_.authorizingLongTermKey == namespaceDelegation.namespace.fingerprint)) + .flatMap(NonEmpty.from) + case _ => deduplicatedSignatures + } + } + + transactionsWithSignatures.forgetNE + .traverse { case (transaction, signatures) => + signaturesForTransaction(transaction, signatures, multiTransactionSignatures) + .traverse(transactionSignatures => + SignedTopologyTransaction.create( + transaction, + transactionSignatures.toSet, + isProposal = isProposal(transaction), + protocolVersion, + ) + ) + .map(_.map(Left(_)).getOrElse(Right(transaction))) + } + .map(_.separate) + } + + private def validateMaximumOneElement[T]( + list: List[T], + error: Int => String, + ): Either[String, Option[T]] = list match { + case Nil => Right(None) + case singleTransaction :: Nil => Right(Some(singleTransaction)) + case moreThanOneMapping => Left(error(moreThanOneMapping.length)) + } + + private def validateMaximumOneMapping[M <: TopologyMapping]( + transactions: List[PositiveSignedTopologyTransaction] + )(implicit + classTag: ClassTag[M] + ): Either[String, Option[SignedTopologyTransaction[Replace, M]]] = + validateMaximumOneElement( + transactions.flatMap(_.select[TopologyChangeOp.Replace, M]), + length => + s"Only one transaction of type ${classTag.runtimeClass.getName} can be provided, got $length", + ) + + /* + * Look for either a Decentralized namespace with optionally its individual namespace delegation, or a single namespace + * Optional because one may only provide a PartyToParticipant transaction to authorize the hosting. + */ + private def validatePartyNamespace( + signedTransactions: List[PositiveSignedTopologyTransaction], + p2pNamespace: Namespace, + ): Either[String, Option[PartyNamespace]] = + for { + // Look first for a decentralized namespace, can only be at most one + signedDecentralizedTxO <- validateMaximumOneMapping[DecentralizedNamespaceDefinition]( + signedTransactions + ) + partyNamespaceO <- signedDecentralizedTxO match { + case Some(signedDecentralizedTx) => + // If there's one, get the corresponding NamespaceDelegations for it + val namespaceOwners = signedTransactions + .flatMap(_.select[Replace, NamespaceDelegation]) + .filter(namespaceTx => + signedDecentralizedTx.mapping.owners.contains(namespaceTx.mapping.namespace) + ) + Either.cond( + namespaceOwners.sizeIs <= maxDecentralizedOwnersSize.value, + Decentralized( + signedDecentralizedTx, + namespaceOwners, + ).some, + "Decentralized namespaces cannot have more than " + + s"${maxDecentralizedOwnersSize.value} individual namespace owners, got ${namespaceOwners.size}", + ) + case None => + // Otherwise look for a single delegation + for { + namespaceDelegationO <- validateMaximumOneMapping[NamespaceDelegation]( + signedTransactions + ) + _ <- Either.cond( + namespaceDelegationO.forall(NamespaceDelegation.isRootCertificate), + (), + "NamespaceDelegation is not a root namespace. Ensure the namespace and target key are the same", + ) + } yield namespaceDelegationO.map(Centralized(_): PartyNamespace) + } + _ <- partyNamespaceO.traverse(partyNamespace => + Either.cond( + partyNamespace.namespace == p2pNamespace, + (), + s"The Party namespace (${partyNamespace.namespace}) does not match the PartyToParticipant namespace ($p2pNamespace)", + ) + ) + } yield partyNamespaceO + + private def validateExactlyOnePartyToParticipant( + signedTransactions: List[PositiveSignedTopologyTransaction], + unsignedTransactions: List[PositiveTopologyTransaction], + ): Either[String, OptionallySignedPartyToParticipant] = + // Check first if there's a signed P2P + validateMaximumOneMapping[PartyToParticipant](signedTransactions) + .flatMap { + case Some(signed) => Right(SignedPartyToParticipant(signed)) + case None => + // Otherwise there must be an unsigned one + validateMaximumOneElement( + unsignedTransactions.flatMap(_.select[TopologyChangeOp.Replace, PartyToParticipant]), + length => + s"Only one transaction of type PartyToParticipant can be provided, got $length", + ).flatMap( + _.toRight(s"One transaction of type PartyToParticipant must be provided, got 0") + ).map(UnsignedPartyToParticipant(_)) + } + + /** Find and validate the PartyToParticipant transaction. It can be either signed (by the party + * namespace) Or unsigned, in which case it will be signed by this participant (if it can) to + * authorize the hosting of the party + */ + private def validatePartyToParticipant( + signedTopologyTransactions: List[PositiveSignedTopologyTransaction], + unsignedTopologyTransactions: List[PositiveTopologyTransaction], + participantId: ParticipantId, + ): Either[String, (OptionallySignedPartyToParticipant, Boolean)] = + for { + optionallySignedPartyToParticipant <- validateExactlyOnePartyToParticipant( + signedTopologyTransactions, + unsignedTopologyTransactions, + ) + hostingParticipants = optionallySignedPartyToParticipant.mapping.participants + nodePermissionsMap = optionallySignedPartyToParticipant.mapping.participants + .groupMap(_.permission)(_.participantId) + nodesWithSubmissionPermission = nodePermissionsMap.getOrElse( + ParticipantPermission.Submission, + Seq.empty, + ) + _ <- Either.cond( + nodesWithSubmissionPermission.isEmpty, + (), + s"The PartyToParticipant transaction must not contain any node with Submission permission. Nodes with submission permission: ${nodesWithSubmissionPermission + .mkString(", ")}", + ) + _ <- hostingParticipants.toList match { + case HostingParticipant(hosting, permission, _onboarding) :: Nil => + Either.cond( + hosting == participantId && permission == ParticipantPermission.Confirmation, + (), + s"The party is to be hosted on a single participant ($hosting) that is not this participant ($participantId). Submit the allocation request on $hosting instead.", + ) + case _ => Right(()) + } + confirmingNodes = nodePermissionsMap.getOrElse(ParticipantPermission.Confirmation, List.empty) + _ <- Either.cond( + confirmingNodes.nonEmpty, + (), + "The PartyToParticipant transaction must contain at least one node with Confirmation permission", + ) + isConfirmingNode = confirmingNodes.contains(participantId) + _ <- Either.cond( + // If it's not a confirming node it should be an observing one, as external parties are not expected + // to give Submission permission to any node + Option + .when(!isConfirmingNode)( + nodePermissionsMap + .getOrElse(ParticipantPermission.Observation, List.empty) + .contains(participantId) + ) + .forall(_ == true), + (), + s"This node is not hosting the party either with Confirmation or Observation permission.", + ) + } yield (optionallySignedPartyToParticipant, isConfirmingNode) + + /** Find at most one PartyToKeyMapping. Optional because one may only provide a PartyToParticipant + * transaction to authorize the hosting. If provided, validate the namespace matches the + * PartyToParticipant one. + */ + private def validatePartyToKey( + signedTopologyTransactions: List[PositiveSignedTopologyTransaction], + p2pNamespace: Namespace, + ): Either[String, Option[SignedPartyToKeyMapping]] = for { + signedPartyToKeyO <- validateMaximumOneMapping[PartyToKeyMapping](signedTopologyTransactions) + _ <- signedPartyToKeyO.traverse(signedPartyToKey => + Either.cond( + signedPartyToKey.mapping.namespace == p2pNamespace, + (), + s"The PartyToKeyMapping namespace (${signedPartyToKey.mapping.namespace}) does not match the PartyToParticipant namespace ($p2pNamespace)", + ) + ) + } yield signedPartyToKeyO + + private def failOnUnwantedTransactionTypes(transactions: Seq[PositiveTopologyTransaction]) = { + val unwantedTransactions = + transactions.filterNot(tx => + expectedTransactionMappings.map(_.code).contains(tx.mapping.code) + ) + Either.cond( + unwantedTransactions.isEmpty, + (), + "Unsupported transactions found: " + unwantedTransactions.distinct + .map(_.mapping.getClass.getSimpleName) + .mkString(", ") + ". Supported transactions are: " + expectedTransactionMappings + .map(_.getClass.getSimpleName.stripSuffix("$")) + .mkString(", "), + ) + } + + def create( + signedTransactions: NonEmpty[List[(PositiveTopologyTransaction, List[Signature])]], + multiSignatures: List[Signature], + protocolVersion: ProtocolVersion, + participantId: ParticipantId, + ): Either[String, ExternalPartyOnboardingDetails] = + for { + _ <- failOnUnwantedTransactionTypes(signedTransactions.map(_._1)) + parsedTransactionsWithSignatures <- parseTransactionsWithSignatures( + signedTransactions, + multiSignatures, + protocolVersion, + ) + (signedTopologyTransactions, unsignedTopologyTransactions) = parsedTransactionsWithSignatures + partyToParticipantAndIsConfirming <- validatePartyToParticipant( + signedTopologyTransactions, + unsignedTopologyTransactions, + participantId, + ) + (partyToParticipant, isConfirming) = partyToParticipantAndIsConfirming + partyToKey <- validatePartyToKey( + signedTopologyTransactions, + partyToParticipant.mapping.namespace, + ) + partyNamespace <- validatePartyNamespace( + signedTopologyTransactions, + partyToParticipant.mapping.namespace, + ) + } yield ExternalPartyOnboardingDetails( + partyNamespace, + partyToKey, + partyToParticipant, + isConfirming, + ) +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala index 75b1340086..a5fc1c7a12 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala @@ -37,8 +37,6 @@ object ForceFlag { case object PreparationTimeRecordTimeToleranceIncrease extends ForceFlag(v30.ForceFlag.FORCE_FLAG_PREPARATION_TIME_RECORD_TIME_TOLERANCE_INCREASE) - case object AllowUnvetPackage extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE) - case object AllowUnvetPackageWithActiveContracts extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE_WITH_ACTIVE_CONTRACTS) @@ -66,6 +64,9 @@ object ForceFlag { case object AllowVetIncompatibleUpgrades extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_VET_INCOMPATIBLE_UPGRADES) + case object AllowOutOfBoundsValue + extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_OUT_OF_BOUNDS_VALUE) + /** This should only be used internally in situations where * - the caller knows what they are doing * - it's not necessarily clear which specific flags to use, but there also isn't really any @@ -78,7 +79,6 @@ object ForceFlag { Seq[ForceFlag]( AlienMember, LedgerTimeRecordTimeToleranceIncrease, - AllowUnvetPackage, AllowUnknownPackage, AllowUnvettedDependencies, DisablePartyWithActiveContracts, @@ -88,6 +88,7 @@ object ForceFlag { AllowInsufficientParticipantPermissionForSignatoryParty, AllowInsufficientSignatoryAssigningParticipantsForParty, AllowVetIncompatibleUpgrades, + AllowOutOfBoundsValue, ) .map(ff => ff.toProtoV30 -> ff) .toMap diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala index 8349fce4d9..75ca75f732 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala @@ -9,26 +9,27 @@ import cats.syntax.foldable.* import cats.syntax.parallel.* import com.daml.nonempty.NonEmpty import com.daml.nonempty.catsinstances.* -import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{NonNegativeFiniteDuration, ProcessingTimeout} import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.protocol.{ DynamicSynchronizerParameters, StaticSynchronizerParameters, } import com.digitalasset.canton.sequencing.AsyncResult -import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration} import com.digitalasset.canton.topology.TopologyManager.assignExpectedUsageToKeys import com.digitalasset.canton.topology.TopologyManagerError.{ DangerousCommandRequiresForce, IncreaseOfPreparationTimeRecordTimeTolerance, - ParticipantTopologyManagerError, + InvalidSynchronizerSuccessor, + ValueOutOfBounds, } import com.digitalasset.canton.topology.processing.{ EffectiveTime, @@ -42,6 +43,7 @@ import com.digitalasset.canton.topology.store.TopologyStoreId.{ } import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.{ + TimeQuery, TopologyStore, TopologyStoreId, ValidatedTopologyTransaction, @@ -54,14 +56,22 @@ import com.digitalasset.canton.topology.transaction.TopologyTransaction.{ GenericTopologyTransaction, TxHash, } +import com.digitalasset.canton.topology.transaction.checks.{ + NoopTopologyMappingChecks, + OptionalTopologyMappingChecks, + RequiredTopologyMappingChecks, + TopologyMappingChecks, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.{EitherTUtil, MonadUtil, SimpleExecutionQueue} import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionValidation} +import com.digitalasset.canton.{LfPackageId, config} import java.util.concurrent.atomic.AtomicReference import scala.annotation.unused -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext +import scala.math.Ordered.orderingToOrdered trait TopologyManagerObserver { def addedNewTransactions( @@ -77,6 +87,7 @@ class SynchronizerTopologyManager( staticSynchronizerParameters: StaticSynchronizerParameters, override val store: TopologyStore[SynchronizerStore], val outboxQueue: SynchronizerOutboxQueue, + disableOptionalTopologyChecks: Boolean, exitOnFatalFailures: Boolean, timeouts: ProcessingTimeout, futureSupervisor: FutureSupervisor, @@ -95,14 +106,24 @@ class SynchronizerTopologyManager( ) { def psid: PhysicalSynchronizerId = store.storeId.psid - override protected val processor: TopologyStateProcessor = + override protected val processor: TopologyStateProcessor = { + + val required = new RequiredTopologyMappingChecks(store, loggerFactory) + val checks = + if (!disableOptionalTopologyChecks) + new TopologyMappingChecks.All( + required, + new OptionalTopologyMappingChecks(store, loggerFactory), + ) + else required TopologyStateProcessor.forTopologyManager( store, Some(outboxQueue), - new ValidatingTopologyMappingChecks(store, loggerFactory), + checks, crypto.pureCrypto, loggerFactory, ) + } // When evaluating transactions against the synchronizer store, we want to validate against // the head state. We need to take all previously sequenced transactions into account, because @@ -184,7 +205,10 @@ class AuthorizedTopologyManager( timeouts, futureSupervisor, loggerFactory, - ) + ) { + def initialize(implicit @unused traceContext: TraceContext): FutureUnlessShutdown[Unit] = + FutureUnlessShutdown.unit +} abstract class LocalTopologyManager[StoreId <: TopologyStoreId]( nodeId: UniqueIdentifier, @@ -315,6 +339,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC def validatePackageVetting( @unused currentlyVettedPackages: Set[LfPackageId], @unused nextPackageIds: Set[LfPackageId], + @unused dryRunSnapshot: Option[PackageMetadata], @unused forceFlags: ForceFlags, )(implicit traceContext: TraceContext @@ -382,7 +407,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC protocolVersion: ProtocolVersion, expectFullAuthorization: Boolean, forceChanges: ForceFlags = ForceFlags.none, - waitToBecomeEffective: Option[NonNegativeFiniteDuration], + waitToBecomeEffective: Option[config.NonNegativeFiniteDuration], )(implicit traceContext: TraceContext ): EitherT[ @@ -399,9 +424,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC ) for { existingTransaction <- findExistingTransaction(mapping) - tx <- build(op, mapping, serial, protocolVersion, existingTransaction).mapK( - FutureUnlessShutdown.outcomeK - ) + tx <- build(op, mapping, serial, protocolVersion, existingTransaction) signedTx <- signTransaction( tx, signingKeys, @@ -498,7 +521,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC existingTransaction: Option[GenericSignedTopologyTransaction], )(implicit traceContext: TraceContext - ): EitherT[Future, TopologyManagerError, TopologyTransaction[Op, M]] = { + ): EitherT[FutureUnlessShutdown, TopologyManagerError, TopologyTransaction[Op, M]] = { val existingTransactionTuple = existingTransaction.map(t => (t.operation, t.mapping, t.serial, t.signatures)) for { @@ -508,7 +531,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC EitherT.rightT(PositiveInt.one) case (None, Some(proposed)) => // didn't find an existing transaction, therefore the proposed serial must be 1 - EitherT.cond[Future][TopologyManagerError, PositiveInt]( + EitherT.cond[FutureUnlessShutdown][TopologyManagerError, PositiveInt]( proposed == PositiveInt.one, PositiveInt.one, TopologyManagerError.SerialMismatch.Failure(PositiveInt.one, proposed), @@ -519,7 +542,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC // auto-select existing EitherT.rightT(existingSerial) case (Some((`op`, `mapping`, existingSerial, signatures)), Some(proposed)) => - EitherT.cond[Future]( + EitherT.cond[FutureUnlessShutdown]( existingSerial == proposed, existingSerial, TopologyManagerError.MappingAlreadyExists @@ -532,12 +555,12 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC case (Some((_, _, existingSerial, _)), Some(proposed)) => // check that the proposed serial matches existing+1 val next = existingSerial.increment - EitherT.cond[Future]( + EitherT.cond[FutureUnlessShutdown]( next == proposed, next, TopologyManagerError.SerialMismatch.Failure(next, proposed), ) - }): EitherT[Future, TopologyManagerError, PositiveInt] + }): EitherT[FutureUnlessShutdown, TopologyManagerError, PositiveInt] } yield TopologyTransaction(op, theSerial, mapping, protocolVersion) } @@ -730,14 +753,10 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC ) transactionsInStore <- EitherT - .liftF( - store.findLatestTransactionsAndProposalsByTxHash( - transactions.map(_.hash).toSet - ) - ) - existingHashes = transactionsInStore - .map(tx => tx.hash -> tx) - .toMap + .liftF(store.findLatestTransactionsAndProposalsByTxHash(transactions.map(_.hash).toSet)) + + existingHashes = transactionsInStore.map(tx => tx.hash -> tx).toMap + // find transactions that provide new signatures (existingTransactions, newTransactionsOrAdditionalSignatures) = transactions.partition { tx => @@ -813,13 +832,21 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC traceContext: TraceContext ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = transaction.mapping match { case SynchronizerParametersState(synchronizerId, newSynchronizerParameters) => - checkPreparationTimeRecordTimeToleranceNotIncreasing( - synchronizerId, - newSynchronizerParameters, - forceChanges, - ) + for { + _ <- checkPreparationTimeRecordTimeToleranceNotIncreasing( + synchronizerId, + newSynchronizerParameters, + forceChanges, + ) + + _ <- EitherT.fromEither[FutureUnlessShutdown]( + checkDynamicSynchronizerParametersBounds(newSynchronizerParameters, forceChanges) + ) + } yield () + case OwnerToKeyMapping(member, _) => checkTransactionIsForCurrentNode(member, forceChanges, transaction.mapping.code) + case VettedPackages(participantId, newPackages) => checkPackageVettingIsNotDangerous( participantId, @@ -827,6 +854,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC forceChanges, transaction.mapping.code, ) + case PartyToParticipant(partyId, threshold, participants) => checkPartyToParticipantIsNotDangerous( partyId, @@ -835,6 +863,12 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC forceChanges, transaction.transaction.operation, ) + + case upgradeAnnouncement: SynchronizerUpgradeAnnouncement => + if (transaction.operation == TopologyChangeOp.Replace) + checkSynchronizerUpgradeAnnouncementIsNotDangerous(upgradeAnnouncement, transaction.serial) + else EitherT.pure(()) + case _ => EitherT.rightT(()) } @@ -850,6 +884,16 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC DangerousCommandRequiresForce.AlienMember(member, topologyMappingCode), ) + private def checkDynamicSynchronizerParametersBounds( + newSynchronizerParameters: DynamicSynchronizerParameters, + forceChanges: ForceFlags, + )(implicit + traceContext: TraceContext + ): Either[TopologyManagerError, Unit] = + if (!forceChanges.permits(ForceFlag.AllowOutOfBoundsValue)) + TopologyManager.checkBounds(newSynchronizerParameters) + else ().asRight + private def checkPreparationTimeRecordTimeToleranceNotIncreasing( synchronizerId: SynchronizerId, newSynchronizerParameters: DynamicSynchronizerParameters, @@ -927,26 +971,55 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC .getOrElse(Nil) .toSet } - _ <- checkPackageVettingRevocation(currentlyVettedPackages, newPackageIds, forceChanges) _ <- checkTransactionIsForCurrentNode(participantId, forceChanges, topologyMappingCode) - _ <- validatePackageVetting(currentlyVettedPackages, newPackageIds, forceChanges) + _ <- validatePackageVetting(currentlyVettedPackages, newPackageIds, None, forceChanges) } yield () - private def checkPackageVettingRevocation( - currentlyVettedPackages: Set[LfPackageId], - nextPackageIds: Set[LfPackageId], - forceChanges: ForceFlags, + private def checkSynchronizerUpgradeAnnouncementIsNotDangerous( + upgradeAnnouncement: SynchronizerUpgradeAnnouncement, + serial: PositiveInt, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = { - val removed = currentlyVettedPackages -- nextPackageIds - val force = forceChanges.permits(ForceFlag.AllowUnvetPackage) - val changeIdDangerous = removed.nonEmpty - EitherT.cond( - !changeIdDangerous || force, - (), - ParticipantTopologyManagerError.DangerousVettingCommandsRequireForce.Reject(), - ) + + val resF = store + .inspect( + proposals = false, + timeQuery = TimeQuery.Range(None, None), + asOfExclusiveO = None, + op = None, + types = Seq(TopologyMapping.Code.SynchronizerUpgradeAnnouncement), + idFilter = None, + namespaceFilter = None, + ) + .map { result => + result + .collectOfMapping[SynchronizerUpgradeAnnouncement] + .result + .maxByOption(_.serial) match { + case None => ().asRight + + case Some(latestUpgradeAnnouncement) => + // If the latest is another upgrade, we want the PSId to be strictly greater + if (serial == latestUpgradeAnnouncement.serial) + ().asRight + else { + val previouslyAnnouncedSuccessorPSId = + latestUpgradeAnnouncement.mapping.successorSynchronizerId + + Either.cond( + previouslyAnnouncedSuccessorPSId < upgradeAnnouncement.successorSynchronizerId, + (), + InvalidSynchronizerSuccessor.Reject.conflictWithPreviousAnnouncement( + successorSynchronizerId = upgradeAnnouncement.successorSynchronizerId, + previouslyAnnouncedSuccessor = previouslyAnnouncedSuccessorPSId, + ), + ) + } + } + } + + EitherT(resF) } private def checkPartyToParticipantIsNotDangerous( @@ -1120,4 +1193,36 @@ object TopologyManager { } } + + def checkBounds( + parameters: DynamicSynchronizerParameters + )(implicit errorLoggingContext: ErrorLoggingContext): Either[TopologyManagerError, Unit] = { + def check( + proj: DynamicSynchronizerParameters => NonNegativeFiniteDuration, + name: String, + bounds: (NonNegativeFiniteDuration, NonNegativeFiniteDuration), + ): Either[TopologyManagerError, Unit] = { + val value = proj(parameters) + + val (min, max) = bounds + + for { + _ <- Either.cond(value >= min, (), ValueOutOfBounds.Error(value, name, min, max)) + _ <- Either.cond(value <= max, (), ValueOutOfBounds.Error(value, name, min, max)) + } yield () + } + + for { + _ <- check( + _.confirmationResponseTimeout, + "confirmation response timeout", + DynamicSynchronizerParameters.confirmationResponseTimeoutBounds, + ) + _ <- check( + _.mediatorReactionTimeout, + "mediator reaction timeout", + DynamicSynchronizerParameters.mediatorReactionTimeoutBounds, + ) + } yield () + } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala index e9ef1d76b9..401878b9ad 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala @@ -51,12 +51,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { id = "TOPOLOGY_MANAGER_INTERNAL_ERROR", ErrorCategory.SystemInternalAssumptionViolated, ) { - final case class AssumptionViolation(description: String)(implicit - val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = s"Assumption violation: $description" - ) - with TopologyManagerError final case class Unhandled(description: String, throwable: Throwable)(implicit val loggingContext: ErrorLoggingContext @@ -121,6 +115,13 @@ object TopologyManagerError extends TopologyManagerErrorGroup { ) with TopologyManagerError + final case class NotFoundForSynchronizer(synchronizerId: SynchronizerId)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Topology store for synchronizer $synchronizerId is not known." + ) + with TopologyManagerError + final case class NoSynchronizerStoreAvailable()(implicit val loggingContext: ErrorLoggingContext ) extends CantonError.Impl( @@ -369,26 +370,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } - @Explanation( - """This error indicates that the attempted key removal would create dangling topology transactions, making the node unusable.""" - ) - @Resolution( - """Add the `force = true` flag to your command if you are really sure what you are doing.""" - ) - object RemovingKeyWithDanglingTransactionsMustBeForced - extends ErrorCode( - id = "TOPOLOGY_REMOVING_KEY_DANGLING_TRANSACTIONS_MUST_BE_FORCED", - ErrorCategory.InvalidGivenCurrentSystemStateOther, - ) { - final case class Failure(key: Fingerprint, purpose: KeyPurpose)(implicit - val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = - "Topology transaction would remove a key that creates conflicts and dangling transactions" - ) - with TopologyManagerError - } - @Explanation( """This error indicates that it has been attempted to increase the ``preparationTimeRecordTimeTolerance`` synchronizer parameter in an insecure manner. |Increasing this parameter may disable security checks and can therefore be a security risk. @@ -432,6 +413,34 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } + @Explanation( + """This error occurs when the new parameter value is outside the defined lower and upper bounds.""" + ) + @Resolution( + """Choose a value that is within the allowed lower and upper limits. + | + |Alternatively, add the flag ``ForceFlag.AllowOutOfBoundsValue`` to force the value change. + |Caution: Forcing a value change may result in adverse system behaviour. Proceed only if you understand the risks. + |""" + ) + object ValueOutOfBounds + extends ErrorCode( + id = "TOPOLOGY_VALUE_OUT_OF_BOUNDS", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Error( + value: NonNegativeFiniteDuration, + name: String, + min: NonNegativeFiniteDuration, + max: NonNegativeFiniteDuration, + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Parameter `$name` needs to be between $min and $max; found: $value" + ) + with TopologyManagerError + } + @Explanation( "This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both." ) @@ -474,38 +483,19 @@ object TopologyManagerError extends TopologyManagerErrorGroup { } @Explanation( - "This error indicates that the topology transaction references parties that are currently unknown." - ) - @Resolution( - """Wait for the onboarding of the parties to be become active or remove the unknown parties from the topology transaction. - |The metadata details of this error contain the unknown parties in the field ``parties``.""" - ) - object UnknownParties - extends ErrorCode( - id = "TOPOLOGY_UNKNOWN_PARTIES", - ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, - ) { - final case class Failure(parties: Seq[PartyId])(implicit - override val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = s"Parties ${parties.sorted.mkString(", ")} are unknown." - ) - with TopologyManagerError - } - - @Explanation( - """This error indicates that a participant is trying to rescind their synchronizer trust certificate + """This error indicates that a participant is trying to rescind actively used topology transactions |while still being hosting parties.""" ) @Resolution( - """The participant should work with the owners of the parties mentioned in the ``parties`` field in the - |error details metadata to get itself removed from the list of hosting participants of those parties.""" + """The participant must remove itself from the party to participant mappings that still refer to it.""" ) - object IllegalRemovalOfSynchronizerTrustCertificate + object IllegalRemovalOfActiveTopologyTransactions extends ErrorCode( - id = "TOPOLOGY_ILLEGAL_REMOVAL_OF_SYNCHRONIZER_TRUST_CERTIFICATE", + id = "TOPOLOGY_ILLEGAL_REMOVAL_OF_ACTIVE_TRANSACTIONS", ErrorCategory.InvalidGivenCurrentSystemStateOther, ) { + private val maxDisplayed = 10 + final case class ParticipantStillHostsParties( participantId: ParticipantId, parties: Seq[PartyId], @@ -513,10 +503,14 @@ object TopologyManagerError extends TopologyManagerErrorGroup { override val loggingContext: ErrorLoggingContext ) extends CantonError.Impl( cause = - s"Cannot remove synchronizer trust certificate for $participantId because it still hosts parties ${parties.sorted - .mkString(",")}" + s"Cannot remove synchronizer trust certificate or owner to key mapping for $participantId because it still hosts parties ${parties.sorted + .take(maxDisplayed) + .mkString(",")} ${if (parties.sizeIs >= maxDisplayed) + s" (only showing first $maxDisplayed of ${parties.size})" + else ""}" ) with TopologyManagerError + } @Explanation( @@ -686,6 +680,13 @@ object TopologyManagerError extends TopologyManagerErrorGroup { s"Members ${members.sorted} tried to rejoin a synchronizer which they had previously left." ) with TopologyManagerError + final case class RejectNewKeys(member: Member)(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Member $member tried to re-register its keys which they had previously removed." + ) + with TopologyManagerError + } @Explanation( @@ -747,25 +748,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } - @Explanation( - """This error indicates that the topology transactions weren't processed in the allotted time.""" - ) - @Resolution( - "Contact the node administrator to check the result of processing the topology transactions." - ) - object TimeoutWaitingForTransaction - extends ErrorCode( - id = "TOPOLOGY_TIMEOUT_WAITING_FOR_TRANSACTION", - ErrorCategory.DeadlineExceededRequestStateUnknown, - ) { - final case class Failure()(implicit - val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = s"The topology transactions weren't processed in the allotted time." - ) - with TopologyManagerError - } - @Explanation( "This error indicates that there already exists a temporary topology store with the desired identifier." ) @@ -825,22 +807,44 @@ object TopologyManagerError extends TopologyManagerErrorGroup { } @Explanation("This error indicates that the successor synchronizer id is not valid.") - @Resolution( - "Change the successor synchronizer ID to have a protocol version that is the same as or newer than the current synchronizer's." - ) + @Resolution("""Change the physical synchronizer id of the successor so that it satisfies: + |- it is greater than the current physical synchronizer id + |- it is greater than all previous synchronizer announcements + |""") object InvalidSynchronizerSuccessor extends ErrorCode(id = "TOPOLOGY_INVALID_SUCCESSOR", InvalidIndependentOfSystemState) { final case class Reject( - currentSynchronizerId: PhysicalSynchronizerId, successorSynchronizerId: PhysicalSynchronizerId, + details: String, )(implicit val loggingContext: ErrorLoggingContext) extends CantonError.Impl( cause = - s"The declared successor $successorSynchronizerId of synchronizer $currentSynchronizerId is not valid." + s"The declared successor $successorSynchronizerId of synchronizer is not valid: $details" ) with TopologyManagerError + object Reject { + def conflictWithCurrentPSId( + currentSynchronizerId: PhysicalSynchronizerId, + successorSynchronizerId: PhysicalSynchronizerId, + )(implicit loggingContext: ErrorLoggingContext): Reject = + Reject( + successorSynchronizerId, + s"successor id is not greater than current synchronizer id $currentSynchronizerId", + ) + + def conflictWithPreviousAnnouncement( + successorSynchronizerId: PhysicalSynchronizerId, + previouslyAnnouncedSuccessor: PhysicalSynchronizerId, + )(implicit loggingContext: ErrorLoggingContext): Reject = + Reject( + successorSynchronizerId = successorSynchronizerId, + details = + s"conflicts with previous announcement with successor $previouslyAnnouncedSuccessor", + ) + } } + @Explanation( "This error indicates that the synchronizer upgrade announcement specified an invalid upgrade time." ) @@ -866,24 +870,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { abstract class ParticipantErrorGroup extends ErrorGroup() object ParticipantTopologyManagerError extends ParticipantErrorGroup { - @Explanation( - """This error indicates that a dangerous package vetting command was rejected. - |This is the case when a command is revoking the vetting of a package. - |Use the force flag to revoke the vetting of a package.""" - ) - @Resolution("Set the ForceFlag.PackageVettingRevocation if you really know what you are doing.") - object DangerousVettingCommandsRequireForce - extends ErrorCode( - id = "TOPOLOGY_DANGEROUS_VETTING_COMMAND_REQUIRES_FORCE_FLAG", - ErrorCategory.InvalidGivenCurrentSystemStateOther, - ) { - final case class Reject()(implicit val loggingContext: ErrorLoggingContext) - extends CantonError.Impl( - cause = "Revoking a vetted package requires ForceFlag.PackageVettingRevocation" - ) - with TopologyManagerError - } - @Explanation( """This error indicates a vetting request failed due to dependencies not being vetted. |On every vetting request, the set supplied packages is analysed for dependencies. The @@ -919,7 +905,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup { id = "TOPOLOGY_CANNOT_VET_DUE_TO_MISSING_PACKAGES", ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, ) { - final case class Missing(packages: Ref.PackageId)(implicit + final case class Missing(packages: Set[Ref.PackageId])(implicit val loggingContext: ErrorLoggingContext ) extends CantonError.Impl( cause = "Package vetting failed due to packages not existing on the local node" diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala index 81c632cad1..78eb5ce1ef 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.sequencing.AsyncResult +import com.digitalasset.canton.topology.TopologyStateProcessor.MaybePending import com.digitalasset.canton.topology.processing.{ EffectiveTime, SequencedTime, @@ -25,12 +26,10 @@ import com.digitalasset.canton.topology.store.* import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactions import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash -import com.digitalasset.canton.topology.transaction.{ - SignedTopologyTransactions, - TopologyMappingChecks, -} +import com.digitalasset.canton.topology.transaction.checks.TopologyMappingChecks import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} @@ -58,27 +57,6 @@ class TopologyStateProcessor private ( loggerFactoryParent.append("store", store.storeId.toString) } else loggerFactoryParent - // small container to store potentially pending data - private case class MaybePending(originalTx: GenericSignedTopologyTransaction) - extends PrettyPrinting { - val adjusted = new AtomicReference[Option[GenericSignedTopologyTransaction]](None) - val rejection = new AtomicReference[Option[TopologyTransactionRejection]](None) - val expireImmediately = new AtomicBoolean(false) - - def currentTx: GenericSignedTopologyTransaction = adjusted.get().getOrElse(originalTx) - - def validatedTx: GenericValidatedTopologyTransaction = - ValidatedTopologyTransaction(currentTx, rejection.get(), expireImmediately.get()) - - override protected def pretty: Pretty[MaybePending] = - prettyOfClass( - param("original", _.originalTx), - paramIfDefined("adjusted", _.adjusted.get()), - paramIfDefined("rejection", _.rejection.get()), - paramIfTrue("expireImmediately", _.expireImmediately.get()), - ) - } - private val txForMapping = TrieMap[MappingHash, MaybePending]() private val proposalsByMapping = TrieMap[MappingHash, Seq[TxHash]]() private val proposalsForTx = TrieMap[TxHash, MaybePending]() @@ -289,7 +267,7 @@ class TopologyStateProcessor private ( Either.cond( expected == toValidate.serial, (), - TopologyTransactionRejection.SerialMismatch(expected, toValidate.serial), + TopologyTransactionRejection.Processor.SerialMismatch(expected, toValidate.serial), ) case None => Either.unit } @@ -360,15 +338,7 @@ class TopologyStateProcessor private ( effective, tx_deduplicatedAndMerged, tx_inStore, - // TODO(#26009): this creates a new data structure for every check. this is very inefficient. - // even more, we never shrink the map and we keep on iterating through it. - txForMapping.view.mapValues { pending => - require( - !pending.expireImmediately.get() && pending.rejection.get.isEmpty, - s"unexpectedly used rejected or immediately expired tx: $pending", - ) - pending.currentTx - }.toMap, + txForMapping, ) _ <- // we potentially merge the transaction with the currently active if this is just a signature update @@ -467,6 +437,27 @@ class TopologyStateProcessor private ( object TopologyStateProcessor { + // small container to store potentially pending data + private[topology] final case class MaybePending(originalTx: GenericSignedTopologyTransaction) + extends PrettyPrinting { + val adjusted = new AtomicReference[Option[GenericSignedTopologyTransaction]](None) + val rejection = new AtomicReference[Option[TopologyTransactionRejection]](None) + val expireImmediately = new AtomicBoolean(false) + + def currentTx: GenericSignedTopologyTransaction = adjusted.get().getOrElse(originalTx) + + def validatedTx: GenericValidatedTopologyTransaction = + ValidatedTopologyTransaction(currentTx, rejection.get(), expireImmediately.get()) + + override protected def pretty: Pretty[MaybePending] = + prettyOfClass( + param("original", _.originalTx), + paramIfDefined("adjusted", _.adjusted.get()), + paramIfDefined("rejection", _.rejection.get()), + paramIfTrue("expireImmediately", _.expireImmediately.get()), + ) + } + /** Creates a TopologyStateProcessor for topology managers. */ def forTopologyManager[PureCrypto <: CryptoPureApi]( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala index 51e7f27317..eb34ffc621 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala @@ -19,15 +19,17 @@ import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, import com.digitalasset.canton.protocol.{ DynamicSequencingParametersWithValidity, DynamicSynchronizerParametersWithValidity, + StaticSynchronizerParameters, } import com.digitalasset.canton.time.{Clock, SynchronizerTimeTracker} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.PartyInfo import com.digitalasset.canton.topology.processing.* import com.digitalasset.canton.topology.store.{ - PackageDependencyResolverUS, + PackageDependencyResolver, TopologyStore, TopologyStoreId, + UnknownOrUnvettedPackages, } import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction @@ -50,6 +52,9 @@ final class CachingSynchronizerTopologyClient( extends SynchronizerTopologyClientWithInit with NamedLogging { + override def staticSynchronizerParameters: StaticSynchronizerParameters = + delegate.staticSynchronizerParameters + override def updateHead( sequencedTimestamp: SequencedTime, effectiveTimestamp: EffectiveTime, @@ -272,9 +277,10 @@ object CachingSynchronizerTopologyClient { def create( clock: Clock, + staticSynchronizerParameters: StaticSynchronizerParameters, store: TopologyStore[TopologyStoreId.SynchronizerStore], synchronizerPredecessor: Option[SynchronizerPredecessor], - packageDependenciesResolver: PackageDependencyResolverUS, + packageDependenciesResolver: PackageDependencyResolver, cachingConfigs: CachingConfigs, batchingConfig: BatchingConfig, timeouts: ProcessingTimeout, @@ -290,6 +296,7 @@ object CachingSynchronizerTopologyClient { val dbClient = new StoreBasedSynchronizerTopologyClient( clock, + staticSynchronizerParameters, store, packageDependenciesResolver, timeouts, @@ -305,7 +312,7 @@ object CachingSynchronizerTopologyClient { futureSupervisor, loggerFactory, ) - headStateInitializer.initialize(caching, synchronizerPredecessor) + headStateInitializer.initialize(caching, synchronizerPredecessor, staticSynchronizerParameters) } } @@ -367,7 +374,7 @@ private class ForwardingTopologySnapshotClient( packageId: PackageId, ledgerTime: CantonTimestamp, vettedPackagesLoader: VettedPackagesLoader, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[UnknownOrUnvettedPackages] = parent.loadUnvettedPackagesOrDependenciesUsingLoader( participant, packageId, @@ -434,10 +441,10 @@ private class ForwardingTopologySnapshotClient( ): FutureUnlessShutdown[Option[PartyKeyTopologySnapshotClient.PartyAuthorizationInfo]] = parent.partyAuthorization(party) - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = - parent.isSynchronizerUpgradeOngoing() + parent.synchronizerUpgradeOngoing() override def sequencerConnectionSuccessors()(implicit traceContext: TraceContext @@ -616,7 +623,7 @@ class CachingTopologySnapshot( packageId: PackageId, ledgerTime: CantonTimestamp, vettedPackagesLoader: VettedPackagesLoader, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[UnknownOrUnvettedPackages] = parent.loadUnvettedPackagesOrDependenciesUsingLoader( participant, packageId, @@ -721,10 +728,10 @@ class CachingTopologySnapshot( ) .map(_.toMap) - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = - getAndCache(synchronizerUpgradeCache, parent.isSynchronizerUpgradeOngoing()) + getAndCache(synchronizerUpgradeCache, parent.synchronizerUpgradeOngoing()) override def sequencerConnectionSuccessors()(implicit traceContext: TraceContext diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala index 000805cf42..1eae7b54b9 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.topology.client import cats.data.EitherT +import cats.implicits.toFoldableOps import cats.syntax.functor.* import cats.syntax.functorFilter.* import cats.syntax.parallel.* @@ -25,6 +26,7 @@ import com.digitalasset.canton.protocol.{ DynamicSequencingParametersWithValidity, DynamicSynchronizerParameters, DynamicSynchronizerParametersWithValidity, + StaticSynchronizerParameters, } import com.digitalasset.canton.sequencing.TrafficControlParameters import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient @@ -38,6 +40,7 @@ import com.digitalasset.canton.topology.processing.{ SequencedTime, TopologyTransactionProcessingSubscriber, } +import com.digitalasset.canton.topology.store.UnknownOrUnvettedPackages import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.SingleUseCell @@ -100,6 +103,8 @@ trait TopologyClientApi[+T] { this: HasFutureSupervision => def psid: PhysicalSynchronizerId def synchronizerId: SynchronizerId + def staticSynchronizerParameters: StaticSynchronizerParameters + def protocolVersion: ProtocolVersion = psid.protocolVersion /** Our current snapshot approximation @@ -119,8 +124,9 @@ trait TopologyClientApi[+T] { this: HasFutureSupervision => * As we future date topology transactions, the head snapshot is our latest knowledge of the * topology state, but as it can be still future dated, we need to be careful when actually using * it: the state might not yet be active, as the topology transactions are future dated. - * Therefore, do not act towards the sequencer using this snapshot, but use the - * currentSnapshotApproximation instead. + * Therefore, do not prepare regular transactions using this snapshot, but use the + * currentSnapshotApproximation instead. A head snapshot can be useful, however, for producing + * new topology changes, e.g., for picking the correct serial. */ def headSnapshot(implicit traceContext: TraceContext): T = checked( trySnapshot(topologyKnownUntilTimestamp) @@ -308,7 +314,9 @@ trait PartyTopologySnapshotClient { check: ParticipantAttributes => Boolean = _ => true, )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, Set[LfPartyId], Unit] - /** Returns true if there is at least one participant that satisfies the predicate */ + /** Returns the set of parties for which there is at least one participant that satisfies the + * predicate + */ def isHostedByAtLeastOneParticipantF( parties: Set[LfPartyId], check: (LfPartyId, ParticipantAttributes) => Boolean, @@ -472,6 +480,13 @@ trait ParticipantTopologySnapshotClient { traceContext: TraceContext ): FutureUnlessShutdown[Boolean] + def participantsWithSupportedFeature( + participants: Set[ParticipantId], + feature: SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[ParticipantId]] + /** Checks whether the provided participant exists, is active and can login at the given point in * time * @@ -581,7 +596,7 @@ trait VettedPackagesSnapshotClient { participantId: ParticipantId, packages: Set[PackageId], ledgerTime: CantonTimestamp, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[UnknownOrUnvettedPackages] /** Checks the vetting state for the given packages and returns the packages that have no entry in * the participant's VettedPackages topology transactions. Note: this does not check the vetted @@ -598,6 +613,13 @@ trait VettedPackagesSnapshotClient { participantId: ParticipantId, packageIds: Set[PackageId], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] + + /** @return + * all vetted packages + */ + def vettedPackages(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[VettedPackage]] } trait SynchronizerGovernanceSnapshotClient { @@ -628,8 +650,7 @@ trait SynchronizerGovernanceSnapshotClient { // we must use zero as default change delay parameter, as otherwise static time tests will not work // however, once the synchronizer has published the initial set of synchronizer parameters, the zero time will be // adjusted. - topologyChangeDelay = DynamicSynchronizerParameters.topologyChangeDelayIfAbsent, - protocolVersion = protocolVersion, + protocolVersion = protocolVersion ) } @@ -677,7 +698,7 @@ trait SynchronizerUpgradeClient { * synchronizer id of the successor of this synchronizer and the upgrade time. Otherwise, returns * None. */ - def isSynchronizerUpgradeOngoing()(implicit + def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] @@ -866,6 +887,19 @@ private[client] trait ParticipantTopologySnapshotLoader extends ParticipantTopol ): FutureUnlessShutdown[Boolean] = findParticipantState(participantId).map(_.isDefined) + override def participantsWithSupportedFeature( + participants: Set[ParticipantId], + feature: SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[ParticipantId]] = for { + participantAttributesMap <- loadParticipantStates(participants.toSeq) + } yield { + participantAttributesMap.collect { + case (pid, attributes) if attributes.features.contains(feature) => pid + }.toSet + } + override def isParticipantActiveAndCanLoginAt( participantId: ParticipantId, timestamp: CantonTimestamp, @@ -889,7 +923,6 @@ private[client] trait ParticipantTopologySnapshotLoader extends ParticipantTopol )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Map[ParticipantId, ParticipantAttributes]] - } private[client] trait PartyTopologySnapshotBaseClient { @@ -1077,18 +1110,18 @@ trait VettedPackagesSnapshotLoader extends VettedPackagesSnapshotClient with Vet packageId: PackageId, ledgerTime: CantonTimestamp, vettedPackagesLoader: VettedPackagesLoader, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[UnknownOrUnvettedPackages] override final def findUnvettedPackagesOrDependencies( participantId: ParticipantId, packages: Set[PackageId], ledgerTime: CantonTimestamp, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PackageId]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[UnknownOrUnvettedPackages] = packages.toList .parTraverse(packageId => loadUnvettedPackagesOrDependenciesUsingLoader(participantId, packageId, ledgerTime, this) ) - .map(_.flatten.toSet) + .map(_.combineAll) override final def determinePackagesWithNoVettingEntry( participantId: ParticipantId, @@ -1098,6 +1131,11 @@ trait VettedPackagesSnapshotLoader extends VettedPackagesSnapshotClient with Vet val vettedIds = vettedPackages.keySet packageIds -- vettedIds } + + override def vettedPackages(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[VettedPackage]] = + loadVettedPackages(participantId).map(vettedPackages => vettedPackages.values.toSet) } trait SynchronizerGovernanceSnapshotLoader extends SynchronizerGovernanceSnapshotClient { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClient.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClient.scala index f7323f5b72..7a7a84a857 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClient.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClient.scala @@ -16,15 +16,16 @@ import com.digitalasset.canton.lifecycle.{ UnlessShutdown, } import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.time.{Clock, TimeAwaiter} import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.{ - PackageDependencyResolverUS, + PackageDependencyResolver, TopologyStore, TopologyStoreId, } import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} +import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.daml.lf.data.Ref.PackageId @@ -117,8 +118,9 @@ trait TopologyAwaiter extends FlagCloseable { */ class StoreBasedSynchronizerTopologyClient( val clock: Clock, + val staticSynchronizerParameters: StaticSynchronizerParameters, store: TopologyStore[TopologyStoreId.SynchronizerStore], - packageDependenciesResolver: PackageDependencyResolverUS, + packageDependenciesResolver: PackageDependencyResolver, override val timeouts: ProcessingTimeout, override protected val futureSupervisor: FutureSupervisor, val loggerFactory: NamedLoggerFactory, @@ -207,6 +209,7 @@ class StoreBasedSynchronizerTopologyClient( s"Observed: sequenced=$sequencedTimestamp, effective=$effectiveTimestamp" ) observedInternal(sequencedTimestamp, effectiveTimestamp) + FutureUnlessShutdown.unit } override def numPendingChanges: Int = pendingChanges.get() @@ -214,15 +217,7 @@ class StoreBasedSynchronizerTopologyClient( private def observedInternal( sequencedTimestamp: SequencedTime, effectiveTimestamp: EffectiveTime, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - - def logIfNoPendingTopologyChanges(): Unit = - if (pendingChanges.decrementAndGet() == 0) { - logger.debug( - s"Effective at $effectiveTimestamp, there are no more pending topology changes (last were from $sequencedTimestamp)" - ) - } - + )(implicit traceContext: TraceContext): Unit = { // we update the head timestamp approximation with the current sequenced timestamp, right now updateHead( sequencedTimestamp, @@ -236,22 +231,31 @@ class StoreBasedSynchronizerTopologyClient( // right keys at the right time. if (effectiveTimestamp.value > sequencedTimestamp.value) { pendingChanges.incrementAndGet() - synchronizerTimeTracker.get match { - // use the synchronizer time tracker if available to figure out time precisely - case Some(timeTracker) => - timeTracker.awaitTick(effectiveTimestamp.value) match { - case Some(future) => - future.foreach { _ => - updateHead( - sequencedTimestamp, - effectiveTimestamp, - ApproximateTime(effectiveTimestamp.value), - potentialTopologyChange = true, - ) - logIfNoPendingTopologyChanges() - } - // the effective timestamp has already been witnessed - case None => + + // As a last resort, use the time tracker for precise time calculation. This solution does not scale well + // on its own, as all members can trigger time proofs and flood sequencers. Note that the time tracker sends + // a time proof if it's not cancelled, the observation time has elapsed, and the time-advancing broadcast was + // ineffective (e.g., was triggered too early). + awaitEffectiveTime(effectiveTimestamp, sequencedTimestamp) + } + } + + private def awaitEffectiveTime( + effectiveTimestamp: EffectiveTime, + sequencedTimestamp: SequencedTime, + )(implicit traceContext: TraceContext): Unit = { + def logIfNoPendingTopologyChanges(): Unit = + if (pendingChanges.decrementAndGet() == 0) { + logger.debug( + s"Effective at $effectiveTimestamp, there are no more pending topology changes (last were from $sequencedTimestamp)" + ) + } + + synchronizerTimeTracker.get match { + case Some(timeTracker) => + timeTracker.awaitTick(effectiveTimestamp.value) match { + case Some(future) => + future.foreach { _ => updateHead( sequencedTimestamp, effectiveTimestamp, @@ -259,12 +263,20 @@ class StoreBasedSynchronizerTopologyClient( potentialTopologyChange = true, ) logIfNoPendingTopologyChanges() - } - case None => - logger.warn("Not advancing the time using the time tracker as it's unavailable") - } + } + case None => + // the effective timestamp has already been witnessed + updateHead( + sequencedTimestamp, + effectiveTimestamp, + ApproximateTime(effectiveTimestamp.value), + potentialTopologyChange = true, + ) + logIfNoPendingTopologyChanges() + } + case None => + logger.warn("Not advancing the time using the time tracker as it's unavailable") } - FutureUnlessShutdown.unit } /** Returns whether a snapshot for the given timestamp is available. */ @@ -360,11 +372,11 @@ class StoreBasedSynchronizerTopologyClient( object StoreBasedSynchronizerTopologyClient { - object NoPackageDependencies extends PackageDependencyResolverUS { + object NoPackageDependencies extends PackageDependencyResolver { override def packageDependencies(packagesId: PackageId)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]]( + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] = + EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]]( FutureUnlessShutdown.pure(Right(Set.empty[PackageId])) ) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala index 22bd1fa52c..eed9ce5f1c 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala @@ -8,7 +8,6 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.{KeyPurpose, SigningKeyUsage} import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.protocol.{ @@ -42,7 +41,7 @@ import scala.reflect.ClassTag class StoreBasedTopologySnapshot( val timestamp: CantonTimestamp, store: TopologyStore[TopologyStoreId], - packageDependencyResolver: PackageDependencyResolverUS, + packageDependencyResolver: PackageDependencyResolver, val loggerFactory: NamedLoggerFactory, )(implicit val executionContext: ExecutionContext) extends TopologySnapshotLoader @@ -106,29 +105,38 @@ class StoreBasedTopologySnapshot( .getOrElse(FutureUnlessShutdown.pure(Map.empty)) override private[client] def loadUnvettedPackagesOrDependenciesUsingLoader( - participant: ParticipantId, + participantId: ParticipantId, packageId: PackageId, ledgerTime: CantonTimestamp, vettedPackagesLoader: VettedPackagesLoader, )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[Set[PackageId]] = + ): FutureUnlessShutdown[UnknownOrUnvettedPackages] = for { - vetted <- vettedPackagesLoader.loadVettedPackages(participant) + vetted <- vettedPackagesLoader.loadVettedPackages(participantId) validAtLedgerTime = (pkg: PackageId) => vetted.get(pkg).exists(_.validAt(ledgerTime)) // check that the main package is vetted res <- if (!validAtLedgerTime(packageId)) // main package is not vetted - FutureUnlessShutdown.pure(Set(packageId)) + FutureUnlessShutdown.pure(UnknownOrUnvettedPackages.unvetted(participantId, packageId)) else { // check which of the dependencies aren't vetted packageDependencyResolver .packageDependencies(packageId) - .map(dependencies => dependencies.filter(dependency => !validAtLedgerTime(dependency))) - .leftMap(Set(_)) - .merge - + .value + .map { + case Left((unknown, unknownTo)) => + UnknownOrUnvettedPackages.unknown( + unknownTo, + unknown, + ) + case Right(dependencies) => + UnknownOrUnvettedPackages.unvetted( + participantId, + dependencies.filter(dependency => !validAtLedgerTime(dependency)), + ) + } } } yield res @@ -334,7 +342,7 @@ class StoreBasedTopologySnapshot( participantId -> ParticipantAttributes( reducedPermission, participantAttributes.loginAfter, - onboarding, + onboarding = onboarding, ) } }.toMap @@ -479,19 +487,17 @@ class StoreBasedTopologySnapshot( private def getParticipantsWithCertificates( storedTxs: StoredTopologyTransactions[Replace, TopologyMapping] - )(implicit traceContext: TraceContext): Set[ParticipantId] = storedTxs - .collectOfMapping[SynchronizerTrustCertificate] - .result - .groupBy(_.mapping.participantId) - .collect { case (pid, seq) => - // invoke collectLatestMapping only to warn in case a participantId's synchronizer trust certificate is not unique - collectLatestMapping( - TopologyMapping.Code.SynchronizerTrustCertificate, - seq.sortBy(_.validFrom), - ).discard - pid - } - .toSet + )(implicit traceContext: TraceContext): Map[ParticipantId, SynchronizerTrustCertificate] = + storedTxs + .collectOfMapping[SynchronizerTrustCertificate] + .result + .groupBy(_.mapping.participantId) + .flatMap { case (pid, seq) => + collectLatestMapping( + TopologyMapping.Code.SynchronizerTrustCertificate, + seq.sortBy(_.validFrom), + ).map(pid -> _) + } private def getParticipantsWithCertAndKeys( storedTxs: StoredTopologyTransactions[Replace, TopologyMapping], @@ -538,7 +544,7 @@ class StoreBasedTopologySnapshot( participantsFilter: Seq[ParticipantId] )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[Map[ParticipantId, ParticipantSynchronizerPermission]] = + ): FutureUnlessShutdown[Map[ParticipantId, ParticipantAttributes]] = for { // Looks up synchronizer parameters for default rate limits. synchronizerParametersState <- findTransactions( @@ -575,11 +581,12 @@ class StoreBasedTopologySnapshot( } yield { // 1. Participant needs to have requested access to synchronizer by issuing a synchronizer trust certificate val participantsWithCertificates = getParticipantsWithCertificates(storedTxs) + val participantsIdsWithCertificates = participantsWithCertificates.keySet // 2. Participant needs to have keys registered on the synchronizer val participantsWithCertAndKeys = - getParticipantsWithCertAndKeys(storedTxs, participantsWithCertificates) + getParticipantsWithCertAndKeys(storedTxs, participantsIdsWithCertificates) // Warn about participants with cert but no keys - (participantsWithCertificates -- participantsWithCertAndKeys).foreach { pid => + (participantsIdsWithCertificates -- participantsWithCertAndKeys).foreach { pid => logger.warn( s"Participant $pid has a synchronizer trust certificate, but no keys on synchronizer ${synchronizerParametersState.synchronizerId}" ) @@ -588,36 +595,41 @@ class StoreBasedTopologySnapshot( val participantSynchronizerPermissions = getParticipantSynchronizerPermissions(storedTxs, participantsWithCertAndKeys) - val participantIdSynchronizerPermissionsMap = participantsWithCertAndKeys.toSeq.mapFilter { - pid => - if ( - synchronizerParametersState.parameters.onboardingRestriction.isRestricted && !participantSynchronizerPermissions - .contains(pid) - ) { - // 4a. If the synchronizer is restricted, we must have found a ParticipantSynchronizerPermission for the participants, otherwise - // the participants shouldn't have been able to onboard to the synchronizer in the first place. - // In case we don't find a ParticipantSynchronizerPermission, we don't return the participant with default permissions, but we skip it. - logger.warn( - s"Unable to find ParticipantSynchronizerPermission for participant $pid on synchronizer ${synchronizerParametersState.synchronizerId} with onboarding restrictions ${synchronizerParametersState.parameters.onboardingRestriction} at $referenceTime" + participantsWithCertAndKeys.toSeq.mapFilter { pid => + val supportedFeatures = + participantsWithCertificates.get(pid).toList.flatMap(_.featureFlags) + if ( + synchronizerParametersState.parameters.onboardingRestriction.isRestricted && !participantSynchronizerPermissions + .contains(pid) + ) { + // 4a. If the synchronizer is restricted, we must have found a ParticipantSynchronizerPermission for the participants, otherwise + // the participants shouldn't have been able to onboard to the synchronizer in the first place. + // In case we don't find a ParticipantSynchronizerPermission, we don't return the participant with default permissions, but we skip it. + logger.warn( + s"Unable to find ParticipantSynchronizerPermission for participant $pid on synchronizer ${synchronizerParametersState.synchronizerId} with onboarding restrictions ${synchronizerParametersState.parameters.onboardingRestriction} at $referenceTime" + ) + None + } else { + val permissions = participantSynchronizerPermissions + .getOrElse( + pid, + ParticipantSynchronizerPermission + .default(synchronizerParametersState.synchronizerId, pid), ) - None - } else { - // 4b. Apply default permissions/trust of submission/ordinary if missing participant synchronizer permission and - // grab rate limits from dynamic synchronizer parameters if not specified - Some( - pid -> participantSynchronizerPermissions - .getOrElse( - pid, - ParticipantSynchronizerPermission - .default(synchronizerParametersState.synchronizerId, pid), - ) - .setDefaultLimitIfNotSet( - DynamicSynchronizerParameters.defaultParticipantSynchronizerLimits - ) + .setDefaultLimitIfNotSet( + DynamicSynchronizerParameters.defaultParticipantSynchronizerLimits ) - } + // 4b. Apply default permissions/trust of submission/ordinary if missing participant synchronizer permission and + // grab rate limits from dynamic synchronizer parameters if not specified + Some( + pid -> ParticipantAttributes( + permissions.permission, + permissions.loginAfter, + supportedFeatures, + ) + ) + } }.toMap - participantIdSynchronizerPermissionsMap } override def loadParticipantStates( @@ -628,9 +640,7 @@ class StoreBasedTopologySnapshot( if (participants.isEmpty) FutureUnlessShutdown.pure(Map()) else - loadParticipantStatesHelper(participants).map(_.map { case (pid, pdp) => - pid -> pdp.toParticipantAttributes - }) + loadParticipantStatesHelper(participants) /** abstract loading function used to obtain the full key collection for a key owner */ override def allKeys(owner: Member)(implicit @@ -672,20 +682,25 @@ class StoreBasedTopologySnapshot( SynchronizerTrustCertificate.code, MediatorSynchronizerState.code, SequencerSynchronizerState.code, + OwnerToKeyMapping.code, // TODO(#28232) remove once OTK / STC invariant is enforce ), filterUid = None, filterNamespace = None, - ).map( - _.result.view - .map(_.mapping) + ).map { txs => + val mappings = txs.result.view.map(_.mapping) + val validKeys = mappings.collect { case OwnerToKeyMapping(member, _) => + member + }.toSet + mappings .flatMap { case dtc: SynchronizerTrustCertificate => Seq(dtc.participantId) case mds: MediatorSynchronizerState => mds.active ++ mds.observers case sds: SequencerSynchronizerState => sds.active ++ sds.observers case _ => Seq.empty } + .filter(validKeys.contains) .toSet - ) + } override def isMemberKnown(member: Member)(implicit traceContext: TraceContext @@ -703,14 +718,20 @@ class StoreBasedTopologySnapshot( .from(participants) .map { participantsNE => findTransactions( - types = Seq(SynchronizerTrustCertificate.code), + types = Seq(SynchronizerTrustCertificate.code, OwnerToKeyMapping.code), filterUid = Some(participantsNE.toSeq), filterNamespace = None, - ).map( - _.collectOfMapping[SynchronizerTrustCertificate].result + ).map { txs => + val mappings = txs.result.map(_.mapping) + val hasValidKeys = + mappings.flatMap(_.select[OwnerToKeyMapping].toList).map(_.member).toSet + txs + .collectOfMapping[SynchronizerTrustCertificate] + .result .map(_.mapping.participantId: Member) + .filter(hasValidKeys.contains) .toSet - ) + } } .getOrElse(FutureUnlessShutdown.pure(Set.empty[Member])) @@ -822,7 +843,7 @@ class StoreBasedTopologySnapshot( } } - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = findTransactions( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/SynchronizerTopologyClientHeadStateInitializer.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/SynchronizerTopologyClientHeadStateInitializer.scala index 91f8d19092..d918d568d7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/SynchronizerTopologyClientHeadStateInitializer.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/SynchronizerTopologyClientHeadStateInitializer.scala @@ -5,6 +5,8 @@ package com.digitalasset.canton.topology.client import com.digitalasset.canton.data.SynchronizerPredecessor import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.StaticSynchronizerParameters +import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} import com.digitalasset.canton.tracing.TraceContext @@ -17,6 +19,7 @@ trait SynchronizerTopologyClientHeadStateInitializer { def initialize( client: SynchronizerTopologyClientWithInit, synchronizerPredecessor: Option[SynchronizerPredecessor], + staticSynchronizerParameters: StaticSynchronizerParameters, )(implicit executionContext: ExecutionContext, traceContext: TraceContext, @@ -35,10 +38,11 @@ object SynchronizerTopologyClientHeadStateInitializer { def computeInitialHeadUpdate( maxTimestamp: Option[(SequencedTime, EffectiveTime)], synchronizerPredecessor: Option[SynchronizerPredecessor], + topologyChangeDelay: NonNegativeFiniteDuration, ): Option[(SequencedTime, EffectiveTime)] = { val upgradeTimestamps: Option[(SequencedTime, EffectiveTime)] = synchronizerPredecessor .map(_.upgradeTime) - .map(ts => (SequencedTime(ts), EffectiveTime(ts))) + .map(ts => (SequencedTime(ts), EffectiveTime(ts) + topologyChangeDelay)) /* On the successor (so if the predecessor is defined), then the topology is known until the upgrade time. @@ -57,6 +61,7 @@ final class DefaultHeadStateInitializer(store: TopologyStore[TopologyStoreId.Syn override def initialize( client: SynchronizerTopologyClientWithInit, synchronizerPredecessor: Option[SynchronizerPredecessor], + staticSynchronizerParameters: StaticSynchronizerParameters, )(implicit executionContext: ExecutionContext, traceContext: TraceContext, @@ -68,6 +73,7 @@ final class DefaultHeadStateInitializer(store: TopologyStore[TopologyStoreId.Syn .computeInitialHeadUpdate( maxTimestamp, synchronizerPredecessor, + staticSynchronizerParameters.topologyChangeDelay, ) .foreach { case (sequenced, effective) => client.updateHead( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala index 9009044f0d..d050c48d93 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala @@ -6,10 +6,9 @@ package com.digitalasset.canton.topology.processing import cats.data.EitherT import com.daml.nonempty.NonEmpty import com.daml.nonempty.NonEmptyReturningOps.* -import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.crypto.CryptoPureApi import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.topology.TopologyStateProcessor import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.topology.store.{ @@ -18,15 +17,16 @@ import com.digitalasset.canton.topology.store.{ TopologyStore, TopologyStoreId, } +import com.digitalasset.canton.topology.transaction.checks.RequiredTopologyMappingChecks import com.digitalasset.canton.topology.transaction.{ SignedTopologyTransaction, TopologyChangeOp, TopologyMapping, - ValidatingTopologyMappingChecks, } import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.MonadUtil.syntax.* +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Sink, Source} import scala.concurrent.ExecutionContext @@ -48,19 +48,15 @@ import scala.concurrent.ExecutionContext class InitialTopologySnapshotValidator( pureCrypto: CryptoPureApi, store: TopologyStore[TopologyStoreId], - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends TopologyTransactionHandling( - store, - timeouts, - loggerFactory, - ) { + validateInitialSnapshot: Boolean, + override val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext, materializer: Materializer) + extends NamedLogging { - override protected lazy val stateProcessor: TopologyStateProcessor = + protected val stateProcessor: TopologyStateProcessor = TopologyStateProcessor.forInitialSnapshotValidation( store, - new ValidatingTopologyMappingChecks(store, loggerFactory), + new RequiredTopologyMappingChecks(store, loggerFactory, relaxSynchronizerStateChecks = true), pureCrypto, loggerFactory, ) @@ -77,28 +73,98 @@ class InitialTopologySnapshotValidator( final def validateAndApplyInitialTopologySnapshot( initialSnapshot: GenericStoredTopologyTransactions )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = { + val finalSnapshot = preprocessInitialSnapshot(initialSnapshot) + if (!validateInitialSnapshot) { + logger.info("Skipping initial topology snapshot validation") + EitherT.right(store.bulkInsert(finalSnapshot)) + } else { + logger.info( + s"Validating ${finalSnapshot.result.size}/${initialSnapshot.result.size} transactions to initialize the topology store ${store.storeId}" + ) + val groupedBySequencedTime: Seq[ + ( + (SequencedTime, EffectiveTime), + Seq[StoredTopologyTransaction[TopologyChangeOp, TopologyMapping]], + ) + ] = finalSnapshot.result + // process transactions from the same timestamp at once + .groupBy(stored => (stored.sequenced, stored.validFrom)) + .toSeq + // ensure that the groups of transactions come in the correct order + .sortBy { case (timestamps, _) => timestamps } + + for { + _ <- EitherT.right(store.deleteAllData()) + _ <- + groupedBySequencedTime.sequentialTraverse_ { case ((sequenced, validFrom), storedTxs) => + processTransactionsAtSequencedTime(sequenced, validFrom, storedTxs) + } + + // this comparison of the input and output topology snapshots serves as a security guard/barrier + mismatch <- + EitherT + .right( + Source(finalSnapshot.result) + .zip( + store.findEssentialStateAtSequencedTime( + SequencedTime.MaxValue, + includeRejected = true, + ) + ) + .zipWithIndex + .dropWhile { case ((fromInitial, fromStore), _) => + // we don't do a complete == comparison, because the snapshot might contain transactions with superfluous + // signatures that are now filtered out. As long as the hash, validFrom, validUntil, isProposal and rejection reason + // agree between initial and stored topology transaciton, we accept the result. + StoredTopologyTransaction.equalIgnoringSignatures(fromInitial, fromStore) + } + runWith (Sink.headOption) + ) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- EitherT.fromEither[FutureUnlessShutdown]( + mismatch + .map { case ((fromInitial, fromStore), idx) => + s"""Mismatch between transactions at index $idx from the initial snapshot and the topology store: + |Initial: $fromInitial + |Store: $fromStore""".stripMargin + } + .toLeft(()) + ) + } yield { + logger.info( + s"Successfully validated and imported ${finalSnapshot.result.size} topology transactions into the topology store ${store.storeId}." + ) + } + } + } - // the following preprocessing is necessary because the topology transactions have been assigned the same timestamp - // upon export and it's possible that the following situation happened: - // --------------- - // original store: - // ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts2 - // ts2: tx hashOfSignatures = h1, validFrom = ts2 - // --------------- - // since the topology transaction was stored at two different timestamps, they were inserted into the table just as expected. - // but upon export the transactions have the same timestamp: - // --------------- - // initial snapshot: - // ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts1 - // ts1: tx hashOfSignatures = h1, validFrom = ts1 - // --------------- - // Therefore the second insert would be ignored because of the deduplication via the unique index and "on conflict do nothing". - // To work around this, we combine the two transaction entries (they are literally the same) by doing the following: - // * take the validFrom value from the first occurrence - // * take the validUntil value from the last occurrence - // * only retain the first occurrence of the transaction with the updated validFrom/validUntil. We need to do this - // because there could be another transaction between the duplicates, that depends on the first duplicate to have been valid. - val finalSnapshot = StoredTopologyTransactions( + /** The following preprocessing is necessary because the topology transactions have been assigned + * the same timestamp upon export and it's possible that the following situation happened: + * {{{ + * original store: + * ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts2 + * ts2: tx hashOfSignatures = h1, validFrom = ts2 + * }}} + * since the topology transaction was stored at two different timestamps, they were inserted into + * the table just as expected. but upon export the transactions have the same timestamp: + * {{{ + * initial snapshot: + * ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts1 + * ts1: tx hashOfSignatures = h1, validFrom = ts1 + * }}} + * --------------- Therefore the second insert would be ignored because of the deduplication via + * the unique index and "on conflict do nothing". To work around this, we combine the two + * transaction entries (they are literally the same) by doing the following: + * - take the validFrom value from the first occurrence + * - take the validUntil value from the last occurrence + * - only retain the first occurrence of the transaction with the updated validFrom/validUntil. + * We need to do this because there could be another transaction between the duplicates, that + * depends on the first duplicate to have been valid. + */ + private def preprocessInitialSnapshot( + initialSnapshot: GenericStoredTopologyTransactions + )(implicit traceContext: TraceContext): GenericStoredTopologyTransactions = + StoredTopologyTransactions( initialSnapshot.result // first retain the global order of the topology transactions within the snapshot .zipWithIndex @@ -148,7 +214,8 @@ class InitialTopologySnapshotValidator( .map { nonRejectedNE => val (txWithMinIndex, minIndex) = nonRejectedNE.minBy1 { case (tx_, idx) => idx } val (txWithMaxIndex, _) = nonRejectedNE.maxBy1 { case (tx_, idx) => idx } - val retainedTransaction = txWithMinIndex.copy(validUntil = txWithMaxIndex.validUntil) + val retainedTransaction = + txWithMinIndex.copy(validUntil = txWithMaxIndex.validUntil) if (nonRejectedNE.sizeIs > 1) { logger.info(s"""Combining duplicate valid transactions at $sequenced |originals: $nonRejected @@ -171,87 +238,31 @@ class InitialTopologySnapshotValidator( .map { case (tx, _idx) => tx } ) - logger.debug( - s"Validating ${finalSnapshot.result.size}/${initialSnapshot.result.size} transactions to initialize the topology store ${store.storeId}" - ) - val groupedBySequencedTime: Seq[ - ( - (SequencedTime, EffectiveTime), - Seq[StoredTopologyTransaction[TopologyChangeOp, TopologyMapping]], - ) - ] = finalSnapshot.result - // process transactions from the same timestamp at once - .groupBy(stored => (stored.sequenced, stored.validFrom)) - .toSeq - // ensure that the groups of transactions come in the correct order - .sortBy { case (timestamps, _) => timestamps } - - for { - _ <- EitherT.right(store.deleteAllData()) - _ <- - groupedBySequencedTime.sequentialTraverse_ { case ((sequenced, validFrom), storedTxs) => - processTransactionsAtSequencedTime(sequenced, validFrom, storedTxs) - } - snapshotFromStore <- EitherT.right( - store.findEssentialStateAtSequencedTime(SequencedTime.MaxValue, includeRejected = true) - ) - // this comparison of the input and output topology snapshots serves as a security guard/barrier - _ <- finalSnapshot.result.zip(snapshotFromStore.result).zipWithIndex.sequentialTraverse_ { - case ((fromInitial, fromStore), idx) => - EitherTUtil.condUnitET[FutureUnlessShutdown]( - // we don't do a complete == comparison, because the snapshot might contain transactions with superfluous - // signatures that are now filtered out. As long as the hash, validFrom, validUntil, isProposal and rejection reason - // agree between initial and stored topology transaciton, we accept the result. - StoredTopologyTransaction.equalIgnoringSignatures(fromInitial, fromStore), - s"""Mismatch between transactions at index $idx from the initial snapshot and the topology store: - |Initial: $fromInitial - |Store: $fromStore""".stripMargin, - ) - } - } yield () - } - private def processTransactionsAtSequencedTime( sequenced: SequencedTime, - effectiveTimeFromSnapshot: EffectiveTime, + effectiveTime: EffectiveTime, storedTxs: Seq[StoredTopologyTransaction[TopologyChangeOp, TopologyMapping]], )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = - for { - effectiveTime <- EitherT - .right( - timeAdjuster - .trackAndComputeEffectiveTime(sequenced, strictMonotonicity = true) - ) - - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( - effectiveTime == effectiveTimeFromSnapshot, - s"The computed effective time ($effectiveTime) for sequenced time ($sequenced) is different than the effective time from the topology snapshot ($effectiveTimeFromSnapshot).", + EitherT + .right( + stateProcessor + .validateAndApplyAuthorization( + sequenced, + effectiveTime, + storedTxs.map(_.transaction), + expectFullAuthorization = false, + // when validating the initial snapshot, missing signing key signatures are only + // acceptable, if the transaction was part of the genesis topology snapshot + transactionMayHaveMissingSigningKeySignatures = + sequenced.value == SignedTopologyTransaction.InitialTopologySequencingTime, + // The snapshot might contain transactions that only add additional + // signatures. Since the genesis snapshot usually has all transactions at the same timestamp + // and we compare the provided snapshot with what is stored one-by-one, + // we don't want to compact during the validation of the initial snapshot, as this + // would combine multiple transactions into one. + compactTransactions = false, + ) ) + .map(_ => ()) - validationResult <- EitherT - .right( - stateProcessor - .validateAndApplyAuthorization( - sequenced, - effectiveTime, - storedTxs.map(_.transaction), - expectFullAuthorization = false, - // when validating the initial snapshot, missing signing key signatures are only - // acceptable, if the transaction was part of the genesis topology snapshot - transactionMayHaveMissingSigningKeySignatures = - sequenced.value == SignedTopologyTransaction.InitialTopologySequencingTime, - // The snapshot might contain transactions that only add additional - // signatures. Since the genesis snapshot usually has all transactions at the same timestamp - // and we compare the provided snapshot with what is stored one-by-one, - // we don't want to compact during the validation of the initial snapshot, as this - // would combine multiple transactions into one. - compactTransactions = false, - ) - ) - (validatedTxs, _) = validationResult - _ = inspectAndAdvanceTopologyTransactionDelay( - effectiveTime, - validatedTxs, - ) - } yield () } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala index cb551329ec..b89a97c7b9 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala @@ -30,6 +30,8 @@ trait TerminateProcessing { def notifyUpgradeAnnouncement(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit + + def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit } object TerminateProcessing { @@ -53,5 +55,7 @@ object TerminateProcessing { override def notifyUpgradeAnnouncement(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit = () + + override def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit = () } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala deleted file mode 100644 index b9142e892e..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.time.* -import com.digitalasset.canton.topology.store.TopologyStore.Change -import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ErrorUtil - -import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.ExecutionContext - -/** Computes the effective timestamps of topology transactions - * - * Transaction validation and processing depends on the topology state at the given sequencing - * time. If topology transactions became effective immediately, we would have to inspect every - * event first if there is a topology state and wait until all the topology processing has finished - * before evaluating the transaction. This would cause a sequential bottleneck. - * - * To avoid this bottleneck, topology transactions become effective only in the future at an - * "effective time", where effective time >= sequencingTime + - * synchronizerParameters.topologyChangeDelay. - * - * However, the synchronizerParameters can change and so can the topologyChangeDelay. So it is - * non-trivial to apply the right topologyChangeDelay. Also, if the topologyChangeDelay is - * decreased, the effective timestamps of topology transactions could "run backwards", which would - * break topology management. - * - * This class computes the effective timestamps of topology transactions from their sequencing - * timestamps. It makes sure that effective timestamps are strictly monotonically increasing. For - * better performance, it keeps the current as well as all future topologyChangeDelays in memory, - * so that all computations can be performed without reading from the database. - */ -class TopologyTimestampPlusEpsilonTracker( - store: TopologyStore[TopologyStoreId], - val timeouts: ProcessingTimeout, - val loggerFactory: NamedLoggerFactory, -)(implicit executionContext: ExecutionContext) - extends NamedLogging { - - /** Stores a topologyChangeDelay together with its validity. */ - private case class State(topologyChangeDelay: NonNegativeFiniteDuration, validFrom: EffectiveTime) - - /** The currently active as well as all future states, sorted by `State.validFrom` in descending - * order - */ - private val states = new AtomicReference[List[State]](List.empty) - - /** The maximum effective time assigned to a topology transaction. Used to enforce that the - * effective time is strictly monotonically increasing. - * - * A value of `EffectiveTime.MaxValue` indicates that the tracker has not yet been initialized. - */ - private val maximumEffectiveTime = - new AtomicReference[EffectiveTime](EffectiveTime.MaxValue) - - /** Computes the effective time for a given sequencing time. The computed effective time is - * strictly monotonically increasing if requested and monotonically increasing otherwise. - * - * The computed effective time is at least sequencingTime + topologyChangeDelay(sequencingTime). - * - * The methods of this tracker must be called as follows: - * 1. Make sure the topologyStore contains all topology transactions effective at - * `sequencedTime`. - * 1. Call trackAndComputeEffectiveTime for the first sequenced event. This will also - * initialize the tracker. - * 1. Call adjustTopologyChangeDelay, if the sequenced event at `sequencedTime` contains a - * valid topology transaction that changes the topologyChangeDelay. - * 1. Call trackAndComputeEffectiveTime for the next sequenced event. - * 1. Go to 3. - * - * @param strictMonotonicity - * whether the returned effective time must be strictly greater than the previous one computed. - * As it changes internal state, all synchronizer members must call - * `trackAndComputeEffectiveTime(sequencedTime, true)` for exactly the same `sequencedTime`s - * (in ascending order). In practice, `strictMonotonicity` should be true iff the underlying - * sequenced event contains at least one topology transaction, it is addressed to - * [[com.digitalasset.canton.sequencing.protocol.AllMembersOfSynchronizer]] and it has no - * topologyTimestamp. - */ - def trackAndComputeEffectiveTime(sequencedTime: SequencedTime, strictMonotonicity: Boolean)( - implicit traceContext: TraceContext - ): FutureUnlessShutdown[EffectiveTime] = for { - // initialize the tracker, if necessary - _ <- - if (maximumEffectiveTime.get() == EffectiveTime.MaxValue) - initialize(sequencedTime) - else FutureUnlessShutdown.unit - } yield { - cleanup(sequencedTime) - - // This is sequencedTime + topologyChangeDelay(sequencedTime) - val rawEffectiveTime = rawEffectiveTimeOf(sequencedTime) - - if (strictMonotonicity) { - // All synchronizer members run this branch for the same sequenced times. - // Therefore, all members will update maximumEffectiveTime in the same way. - - val effectiveTime = - maximumEffectiveTime.updateAndGet(_.immediateSuccessor() max rawEffectiveTime) - - if (effectiveTime != rawEffectiveTime) { - // For simplicity, let's allow the synchronizer to decrease the topologyChangeDelay arbitrarily. - // During a transition period, the effective time needs to be corrected. - logger.info( - s"Computed effective time of $rawEffectiveTime would go backwards. Using $effectiveTime instead." - ) - } - effectiveTime - } else { - // We do not update the state here, as different members run this piece of codes with different sequencing times. - // The effective times computed here are monotonically increasing, even when rawEffectiveTime goes backwards, - // as we bump maximumEffectiveTime whenever a SynchronizerParameterState transaction expires. - maximumEffectiveTime.get() max rawEffectiveTime - } - } - - private def initialize( - sequencedTime: SequencedTime - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] = for { - // find the current and upcoming change delays - currentAndUpcomingChangeDelays <- store.findCurrentAndUpcomingChangeDelays(sequencedTime.value) - currentChangeDelay = currentAndUpcomingChangeDelays.last1 - - // Find the maximum stored effective time for the last processed sequenced time BEFORE `sequencedTime` - // for initialization of maximumEffectiveTime. - // Note that this must include rejections and proposals, - // as trackAndComputeEffectiveTime may change maximumEffectiveTime based on proposals / rejections. - maximumTimestampInStoreO <- store.maxTimestamp( - sequencedTime.immediatePredecessor, - includeRejected = true, - ) - maximumEffectiveTimeInStore = maximumTimestampInStoreO - .map { case (_, effective) => effective } - .getOrElse(EffectiveTime.MinValue) - - // Find all topologyChangeDelay transactions that have expired - // - at or after sequencing the current delay - // - before sequencedTime (so that the expiry is in the past) - expiredTopologyDelays <- store.findExpiredChangeDelays( - validUntilMinInclusive = currentChangeDelay.sequenced.value, - validUntilMaxExclusive = sequencedTime.value, - ) - } yield { - - // Initialize state based on current and upcoming change delays - val initialStates = currentAndUpcomingChangeDelays.map { - case Change.TopologyDelay(_, validFrom, _, changeDelay) => State(changeDelay, validFrom) - } - logger.info(s"Initializing with $initialStates...") - states.set(initialStates) - - // Initialize maximumEffectiveTime based on the maximum effective time in the store. - // This will cover all adjustments made within trackAndComputeEffectiveTime. - logger.info(s"Maximum effective time in store is $maximumEffectiveTimeInStore.") - maximumEffectiveTime.set(maximumEffectiveTimeInStore) - - // Make sure that maximumEffectiveTime is at least: - // - the maximum effective time a change delay can "produce" - // - taken over all expired topology change delays. - // This will cover all adjustments made within cleanup. - val maximumEffectiveTimeAtExpiryO = expiredTopologyDelays.collect { - case Change.TopologyDelay(_, _, Some(validUntil), changeDelay) => - validUntil + changeDelay - }.maxOption - logger.info(s"Maximum effective time at expiry is $maximumEffectiveTimeAtExpiryO.") - maximumEffectiveTimeAtExpiryO.foreach(t => maximumEffectiveTime.updateAndGet(_ max t).discard) - } - - /** Adds the correct `topologyChangeDelay` to the given `sequencingTime`. The resulting effective - * time may be non-monotonic. - */ - private def rawEffectiveTimeOf( - sequencingTime: SequencedTime - )(implicit traceContext: TraceContext): EffectiveTime = { - val topologyChangeDelay = states - .get() - .collectFirst { - case state if sequencingTime.value > state.validFrom.value => - state.topologyChangeDelay - } - .getOrElse( - ErrorUtil.internalError( - new IllegalArgumentException( - s"Unable to determine effective time for $sequencingTime. State: ${states.get()}" - ) - ) - ) - - EffectiveTime(sequencingTime.value) + topologyChangeDelay - } - - /** Remove states that have expired before sequencedTime. Bump maximumEffectiveTime to - * topologyChangeDelay + validUntil(topologyChangeDelay), if topologyChangeDelay has expired. - */ - private def cleanup(sequencedTime: SequencedTime): Unit = { - val oldStates = states - .getAndUpdate { oldStates => - val (futureStates, pastStates) = oldStates.span(_.validFrom.value >= sequencedTime.value) - - val currentStateO = pastStates.headOption - futureStates ++ currentStateO.toList - } - - val pastStates: List[State] = oldStates.filter(_.validFrom.value < sequencedTime.value) - pastStates - .sliding(2) - .toSeq - .collect { case Seq(next, prev) => - // Note that next.validFrom == prev.validUntil. - maximumEffectiveTime.getAndUpdate(_ max next.validFrom + prev.topologyChangeDelay).discard - } - .discard - } - - /** Inform the tracker about a potential change to topologyChangeDelay. Must be called whenever a - * [[com.digitalasset.canton.topology.transaction.SynchronizerParametersState]] is committed. - * Must not be called for rejections, proposals, and transactions that expire immediately. - * - * @throws java.lang.IllegalArgumentException - * if effectiveTime is not strictly monotonically increasing - */ - def adjustTopologyChangeDelay( - effectiveTime: EffectiveTime, - newTopologyChangeDelay: NonNegativeFiniteDuration, - )(implicit traceContext: TraceContext): Unit = - states - .getAndUpdate { oldStates => - val oldHeadState = oldStates.headOption - val newHeadState = State(newTopologyChangeDelay, effectiveTime) - ErrorUtil.requireArgument( - oldHeadState.forall(_.validFrom.value < effectiveTime.value), - s"Invalid adjustment of topologyChangeDelay from $oldHeadState to $newHeadState", - ) - logger.info(s"Updated topology change delay from=$oldHeadState to $newHeadState.") - newHeadState +: oldStates - } - .discard[List[State]] -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidator.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidator.scala index daf837e6d6..35f99b23f5 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidator.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidator.scala @@ -17,7 +17,7 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction AuthorizedNamespaceDelegation, } import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.MultiTransactionHashMismatch +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.Authorization as AuthorizationRejections import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction @@ -233,7 +233,9 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( toValidate.signatures.map(_.authorizingLongTermKey) -- allKeysUsedForAuthorization.keys for { // check that all signatures cover the transaction hash - _ <- wronglyCoveredHashesNE.map(MultiTransactionHashMismatch(toValidate.hash, _)).toLeft(()) + _ <- wronglyCoveredHashesNE + .map(AuthorizationRejections.MultiTransactionHashMismatch(toValidate.hash, _)) + .toLeft(()) _ <- Either.cond[TopologyTransactionRejection, Unit]( // there must be at least 1 key used for the signatures for one of the delegation mechanisms (unvalidatedSigningKeysCoveringHash -- superfluousKeys).nonEmpty, @@ -241,7 +243,7 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( logger.info( s"The keys ${superfluousKeys.mkString(", ")} have no delegation to authorize the transaction $toValidate" ) - TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys) + AuthorizationRejections.NoDelegationFoundForKeys(superfluousKeys) }, ) @@ -251,12 +253,12 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( logger.info( "Removing all superfluous keys results in a transaction without any signatures at all." ) - TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys) + AuthorizationRejections.NoDelegationFoundForKeys(superfluousKeys) }) namespaceAuthorizationKeysNE <- NonEmpty .from(namespaceAuthorizationKeys) - .toRight(TopologyTransactionRejection.NotAuthorized) + .toRight(AuthorizationRejections.NotAuthorized) _ <- validateSignatures( txWithSignaturesToVerify, @@ -346,7 +348,7 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( _ <- Either.cond( signature.coversHash(txWithSignaturesToVerify.hash), (), - TopologyTransactionRejection + AuthorizationRejections .MultiTransactionHashMismatch( txWithSignaturesToVerify.hash, signature.hashesCovered, @@ -359,7 +361,7 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( signature.signature, keyIdsWithUsage.forgetNE(publicKey.id), ) - .leftMap(TopologyTransactionRejection.SignatureCheckFailed.apply) + .leftMap(AuthorizationRejections.SignatureCheckFailed.apply) } yield () @@ -427,7 +429,7 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( } ValidatedTopologyTransaction( toValidate, - Some(TopologyTransactionRejection.NotAuthorized), + Some(AuthorizationRejections.NotAuthorized), ) } } @@ -503,7 +505,7 @@ class TopologyTransactionAuthorizationValidator[+PureCrypto <: CryptoPureApi]( if txSynchronizerId != underlyingSynchronizerId.logical => ValidatedTopologyTransaction( toValidate, - Some(TopologyTransactionRejection.InvalidSynchronizer(txSynchronizerId)), + Some(AuthorizationRejections.InvalidSynchronizer(txSynchronizerId)), ) case _ => ValidatedTopologyTransaction(toValidate, None) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandling.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandling.scala deleted file mode 100644 index 90707d5c56..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandling.scala +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.topology.TopologyStateProcessor -import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction -import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} -import com.digitalasset.canton.topology.transaction.{SynchronizerParametersState, TopologyChangeOp} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ErrorUtil - -import scala.concurrent.ExecutionContext - -abstract class TopologyTransactionHandling( - store: TopologyStore[TopologyStoreId], - val timeouts: ProcessingTimeout, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging { - - protected val timeAdjuster = - new TopologyTimestampPlusEpsilonTracker(store, timeouts, loggerFactory) - - protected def stateProcessor: TopologyStateProcessor - - protected def inspectAndAdvanceTopologyTransactionDelay( - effectiveTimestamp: EffectiveTime, - validated: Seq[GenericValidatedTopologyTransaction], - )(implicit traceContext: TraceContext): Unit = { - val synchronizerParamChanges = validated.flatMap( - _.collectOf[TopologyChangeOp.Replace, SynchronizerParametersState] - .filter(tx => - tx.rejectionReason.isEmpty && !tx.transaction.isProposal && !tx.expireImmediately - ) - .map(_.mapping) - ) - - synchronizerParamChanges match { - case Seq() => // normally, we shouldn't have any adjustment - case Seq(synchronizerParametersState) => - // Report adjustment of topologyChangeDelay - timeAdjuster.adjustTopologyChangeDelay( - effectiveTimestamp, - synchronizerParametersState.parameters.topologyChangeDelay, - ) - - case _: Seq[SynchronizerParametersState] => - // As all SynchronizerParametersState transactions have the same `uniqueKey`, - // the topologyTransactionProcessor ensures that only the last one is committed. - // All other SynchronizerParameterState are rejected or expired immediately. - ErrorUtil.internalError( - new IllegalStateException( - s"Unable to commit several SynchronizerParametersState transactions at the same effective time.\n$validated" - ) - ) - } - } - -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessingSubscriber.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessingSubscriber.scala index efe43a654e..521aa211c7 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessingSubscriber.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessingSubscriber.scala @@ -35,10 +35,7 @@ trait TopologyTransactionProcessingSubscriber { /** This must be called whenever a topology transaction is committed. It may be called at * additional timestamps with `transactions` being empty. Calls must have strictly increasing - * `sequencedTimestamp` and `effectiveTimestamp`. The `effectiveTimestamp` must be the one - * computed by - * [[com.digitalasset.canton.topology.processing.TopologyTimestampPlusEpsilonTracker]] for - * `sequencedTimestamp`. + * `sequencedTimestamp` and `effectiveTimestamp`. * * During crash recovery previous calls of this method may be replayed. Therefore, * implementations must be idempotent. diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala index 8274434c72..af0a542c50 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala @@ -17,6 +17,7 @@ import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.protocol.messages.{ DefaultOpenEnvelope, ProtocolMessage, @@ -27,12 +28,12 @@ import com.digitalasset.canton.sequencing.protocol.{AllMembersOfSynchronizer, De import com.digitalasset.canton.time.{Clock, SynchronizerTimeTracker} import com.digitalasset.canton.topology.client.* import com.digitalasset.canton.topology.processing.TopologyTransactionProcessor.subscriptionTimestamp -import com.digitalasset.canton.topology.store.TopologyStore.Change import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.checks.RequiredTopologyMappingChecks import com.digitalasset.canton.topology.transaction.{ SynchronizerUpgradeAnnouncement, - ValidatingTopologyMappingChecks, + TopologyChangeOp, } import com.digitalasset.canton.topology.{ PhysicalSynchronizerId, @@ -63,23 +64,18 @@ class TopologyTransactionProcessor( val terminateProcessing: TerminateProcessing, futureSupervisor: FutureSupervisor, exitOnFatalFailures: Boolean, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, + override val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) - extends TopologyTransactionHandling( - store, - timeouts, - loggerFactory, - ) - with NamedLogging + extends NamedLogging with FlagCloseable { private val psid = store.storeId.psid - override protected lazy val stateProcessor: TopologyStateProcessor = + protected lazy val stateProcessor: TopologyStateProcessor = TopologyStateProcessor.forTransactionProcessing( store, - new ValidatingTopologyMappingChecks(store, loggerFactory), + new RequiredTopologyMappingChecks(store, loggerFactory), pureCrypto, loggerFactory, ) @@ -139,12 +135,13 @@ class TopologyTransactionProcessor( // find effective time of sequenced Ts (directly from store) // merge times ) - currentEpsilon <- epsilonForTimestamp(sequencedTs.value) } yield { // we have (ts+e, ts) and quite a few te in the future, so we create list of upcoming changes and sort them val head = ( - EffectiveTime(sequencedTs.value.plus(currentEpsilon.changeDelay.unwrap)), + EffectiveTime( + sequencedTs.value.plus(pureCrypto.staticSynchronizerParameters.topologyChangeDelay.unwrap) + ), ApproximateTime(sequencedTs.value), ) val tail = upcoming.map(x => (x.validFrom, x.validFrom.toApproximate)) @@ -275,21 +272,22 @@ class TopologyTransactionProcessor( serializer.executeUS( { val hasTransactions = txs.nonEmpty - for { - effectiveTime <- - timeAdjuster.trackAndComputeEffectiveTime(sequencedTime, hasTransactions) - _ <- - if (hasTransactions) { - // we need to inform the acs commitment processor about the incoming change - // this is safe to do here, as the acs commitment processor `publish` method will only be - // invoked long after the outer future here has finished processing - acsCommitmentScheduleEffectiveTime(Traced(effectiveTime)) - - process(sequencedTime, effectiveTime, sc, txs) - } else { - tickleListeners(sequencedTime, effectiveTime) - } - } yield () + val effectiveTime = + EffectiveTime( + sequencedTime.value.plus( + pureCrypto.staticSynchronizerParameters.topologyChangeDelay.unwrap + ) + ) + if (hasTransactions) { + // we need to inform the acs commitment processor about the incoming change + // this is safe to do here, as the acs commitment processor `publish` method will only be + // invoked long after the outer future here has finished processing + acsCommitmentScheduleEffectiveTime(Traced(effectiveTime)) + + process(sequencedTime, effectiveTime, sc, txs) + } else { + tickleListeners(sequencedTime, effectiveTime) + } }, "processing topology transactions", ) @@ -413,11 +411,6 @@ class TopologyTransactionProcessor( maxTimestampFromStore().map(_.map { case (sequenced, _effective) => sequenced }) ) - private def epsilonForTimestamp(asOfExclusive: CantonTimestamp)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Change.TopologyDelay] = - store.currentChangeDelay(asOfExclusive) - private def maxTimestampFromStore()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SequencedTime, EffectiveTime)]] = @@ -476,11 +469,6 @@ class TopologyTransactionProcessor( ) (validated, _) = validationResult - _ = inspectAndAdvanceTopologyTransactionDelay( - effectiveTimestamp, - validated, - ) - validTransactions = validated.collect { case tx if tx.rejectionReason.isEmpty && !tx.transaction.isProposal => tx.transaction } @@ -492,12 +480,14 @@ class TopologyTransactionProcessor( */ validUpgradeAnnouncements = validTransactions .mapFilter(_.selectMapping[SynchronizerUpgradeAnnouncement]) - .map(_.mapping) - // TODO(#26580) Handle cancellation - _ = validUpgradeAnnouncements.foreach(announcement => - terminateProcessing.notifyUpgradeAnnouncement(announcement.successor) - ) + _ = validUpgradeAnnouncements.foreach { announcement => + announcement.operation match { + case TopologyChangeOp.Replace => + terminateProcessing.notifyUpgradeAnnouncement(announcement.mapping.successor) + case TopologyChangeOp.Remove => terminateProcessing.notifyUpgradeCancellation() + } + } _ <- synchronizeWithClosing("notify-topology-transaction-observers")( MonadUtil.sequentialTraverse(listeners.get()) { listenerGroup => @@ -541,6 +531,7 @@ object TopologyTransactionProcessor { pureCrypto: SynchronizerCryptoPureApi, parameters: CantonNodeParameters, clock: Clock, + staticSynchronizerParameters: StaticSynchronizerParameters, futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, )( @@ -563,6 +554,7 @@ object TopologyTransactionProcessor { val cachingClientF = CachingSynchronizerTopologyClient.create( clock, + staticSynchronizerParameters, topologyStore, synchronizerPredecessor, StoreBasedSynchronizerTopologyClient.NoPackageDependencies, diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala index 77421448a0..27e2985101 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala @@ -7,11 +7,14 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.topology.processing.SequencedTime -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.{MediatorId, Member, ParticipantId, SequencerId} import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.PekkoUtil import io.grpc.Status +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source import scala.concurrent.ExecutionContext @@ -19,7 +22,7 @@ trait TopologyStateForInitializationService { def initialSnapshot(member: Member)(implicit executionContext: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[GenericStoredTopologyTransactions] + ): Source[GenericStoredTopologyTransaction, NotUsed] } final class StoreBasedTopologyStateForInitializationService( @@ -58,7 +61,7 @@ final class StoreBasedTopologyStateForInitializationService( override def initialSnapshot(member: Member)(implicit executionContext: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[GenericStoredTopologyTransactions] = { + ): Source[GenericStoredTopologyTransaction, NotUsed] = { val effectiveFromF = member match { case participant @ ParticipantId(_) => synchronizerTopologyStore @@ -78,7 +81,7 @@ final class StoreBasedTopologyStateForInitializationService( ) } - effectiveFromF.flatMap { effectiveFromO => + val sourceF = effectiveFromF.map { effectiveFromO => effectiveFromO .map { effectiveFrom => // This is not a mistake: all transactions with @@ -99,18 +102,22 @@ final class StoreBasedTopologyStateForInitializationService( ) } .getOrElse( - synchronizerTopologyStore - .maxTimestamp(SequencedTime.MaxValue, includeRejected = true) - .flatMap { maxTimestamp => - FutureUnlessShutdown.failed( - Status.FAILED_PRECONDITION - .withDescription( - s"No onboarding transaction found for $member as of $maxTimestamp" + PekkoUtil + .futureSourceUS( + synchronizerTopologyStore + .maxTimestamp(SequencedTime.MaxValue, includeRejected = true) + .flatMap { maxTimestamp => + FutureUnlessShutdown.failed( + Status.FAILED_PRECONDITION + .withDescription( + s"No onboarding transaction found for $member as of $maxTimestamp" + ) + .asException() ) - .asException() - ) - } + } + ) ) } + PekkoUtil.futureSourceUS(sourceF) } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala index 788691bfd0..fda53a17c3 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -3,9 +3,10 @@ package com.digitalasset.canton.topology.store +import cats.Monoid import cats.data.EitherT +import cats.implicits.catsSyntaxParallelTraverse1 import cats.syntax.either.* -import cats.syntax.functorFilter.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ProtoDeserializationError @@ -18,15 +19,16 @@ import com.digitalasset.canton.config.CantonRequireTypes.{ import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.parallelInstanceFutureUnlessShutdown import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.protocol.DynamicSynchronizerParameters import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.admin.v30 as topoV30 +import com.digitalasset.canton.topology.admin.v30 as adminTopoV30 import com.digitalasset.canton.topology.client.SynchronizerTopologyClient import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction @@ -34,24 +36,31 @@ import com.digitalasset.canton.topology.store.StoredTopologyTransactions.{ GenericStoredTopologyTransactions, PositiveStoredTopologyTransactions, } -import com.digitalasset.canton.topology.store.TopologyStore.Change.TopologyDelay -import com.digitalasset.canton.topology.store.TopologyStore.{Change, EffectiveStateChange} +import com.digitalasset.canton.topology.store.TopologyStore.EffectiveStateChange import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.db.DbTopologyStore import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash import com.digitalasset.canton.topology.transaction.TopologyTransaction.{ GenericTopologyTransaction, TxHash, } import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.util.collection.MapsUtil +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} import com.digitalasset.daml.lf.data.Ref.PackageId import com.google.common.annotations.VisibleForTesting +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source +import scala.collection.mutable import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration import scala.reflect.ClassTag @@ -64,7 +73,6 @@ sealed trait TopologyStoreId extends PrettyPrinting with Product with Serializab def forSynchronizer: Option[PhysicalSynchronizerId] = None } - object TopologyStoreId { /** A topology store storing sequenced topology transactions @@ -136,7 +144,20 @@ final case class StoredTopologyTransaction[+Op <: TopologyChangeOp, +M <: Topolo transaction: SignedTopologyTransaction[Op, M], rejectionReason: Option[String300], ) extends DelegatedTopologyTransactionLike[Op, M] + with HasVersionedWrapper[StoredTopologyTransaction[TopologyChangeOp, TopologyMapping]] with PrettyPrinting { + + override protected def companionObj: StoredTopologyTransaction.type = StoredTopologyTransaction + + def toAdminProtoV30: adminTopoV30.TopologyTransactions.Item = + adminTopoV30.TopologyTransactions.Item( + sequenced = Some(sequenced.value.toProtoTimestamp), + validFrom = Some(validFrom.value.toProtoTimestamp), + validUntil = validUntil.map(_.value.toProtoTimestamp), + transaction = transaction.toByteString, + rejectionReason = rejectionReason.map(_.str), + ) + override protected def transactionLikeDelegate: TopologyTransactionLike[Op, M] = transaction override protected def pretty: Pretty[StoredTopologyTransaction.this.type] = @@ -159,7 +180,37 @@ final case class StoredTopologyTransaction[+Op <: TopologyChangeOp, +M <: Topolo .map(_ => this.asInstanceOf[StoredTopologyTransaction[TargetOp, M]]) } -object StoredTopologyTransaction { +object StoredTopologyTransaction + extends HasVersionedMessageCompanion[ + StoredTopologyTransaction[TopologyChangeOp, TopologyMapping] + ] { + + override def name: String = "stored topology transaction" + + def fromProtoV30( + proto: adminTopoV30.TopologyTransactions.Item + ): ParsingResult[StoredTopologyTransaction[TopologyChangeOp, TopologyMapping]] = + for { + sequenced <- ProtoConverter + .parseRequired(SequencedTime.fromProtoPrimitive, "sequenced", proto.sequenced) + validFrom <- ProtoConverter + .parseRequired(EffectiveTime.fromProtoPrimitive, "valid_from", proto.validFrom) + validUntil <- proto.validUntil.traverse(EffectiveTime.fromProtoPrimitive) + rejectionReason <- proto.rejectionReason.traverse( + String300.fromProtoPrimitive(_, "rejection_reason") + ) + signedTx <- SignedTopologyTransaction.fromTrustedByteStringPVV(proto.transaction) + } yield StoredTopologyTransaction(sequenced, validFrom, validUntil, signedTx, rejectionReason) + + override def supportedProtoVersions: StoredTopologyTransaction.SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(30) -> ProtoCodec( + ProtocolVersion.v34, + supportedProtoVersion(adminTopoV30.TopologyTransactions.Item)(fromProtoV30), + _.toAdminProtoV30, + ) + ) + type GenericStoredTopologyTransaction = StoredTopologyTransaction[TopologyChangeOp, TopologyMapping] @@ -223,6 +274,8 @@ abstract class TopologyStore[+StoreID <: TopologyStoreId](implicit def storeId: StoreID + def protocolVersion: ProtocolVersion + /** fetch the effective time updates greater than or equal to a certain timestamp * * this function is used to recover the future effective timestamp such that we can reschedule @@ -232,105 +285,6 @@ abstract class TopologyStore[+StoreID <: TopologyStoreId](implicit traceContext: TraceContext ): FutureUnlessShutdown[Seq[TopologyStore.Change]] - /** Yields the currently valid and all upcoming topology change delays. Namely: - * - The change delay with validFrom < sequencedTime and validUntil.forall(_ >= sequencedTime), - * or the initial default value, if no such change delay exists. - * - All change delays with validFrom >= sequencedTime and sequenced < sequencedTime. Excludes: - * - Proposals - * - Rejected transactions - * - Transactions with `validUntil.contains(validFrom)` - * - * The result is sorted descending by validFrom. So the current change delay comes last. - */ - def findCurrentAndUpcomingChangeDelays(sequencedTime: CantonTimestamp)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[NonEmpty[List[TopologyStore.Change.TopologyDelay]]] = for { - storedTransactions <- doFindCurrentAndUpcomingChangeDelays(sequencedTime) - } yield { - val storedDelays = storedTransactions.toList - .mapFilter(TopologyStore.Change.selectTopologyDelay) - // First sort ascending as lists are optimized for prepending. - // Below, we'll reverse the final list. - .sortBy(_.validFrom) - - val currentO = storedDelays.headOption.filter(_.validFrom.value < sequencedTime) - val initialDefaultO = currentO match { - case Some(_) => None - case None => - Some( - TopologyDelay( - SequencedTime.MinValue, - EffectiveTime.MinValue, - storedDelays.headOption.map(_.validFrom), - DynamicSynchronizerParameters.topologyChangeDelayIfAbsent, - ) - ) - } - - NonEmpty - .from((initialDefaultO.toList ++ storedDelays).reverse) - // The sequence must be non-empty, as either currentO or initialDefaultO is defined. - .getOrElse(throw new NoSuchElementException("Unexpected empty sequence.")) - } - - /** Implementation specific parts of findCurrentAndUpcomingChangeDelays. Implementations must - * filter by validFrom, validUntil, sequenced, isProposal, and rejected. Implementations may or - * may not apply further filters. Implementations should not spend resources for sorting. - */ - protected def doFindCurrentAndUpcomingChangeDelays(sequencedTime: CantonTimestamp)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Iterable[GenericStoredTopologyTransaction]] - - /** Yields the topologyChangeDelay valid at a given time or, if there is none in the store, the - * initial default value. - */ - def currentChangeDelay( - asOfExclusive: CantonTimestamp - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[TopologyStore.Change.TopologyDelay] = - for { - txs <- findPositiveTransactions( - asOf = asOfExclusive, - asOfInclusive = false, - isProposal = false, - types = Seq(SynchronizerParametersState.code), - filterUid = None, - filterNamespace = None, - ) - } yield { - txs.collectLatestByUniqueKey - .collectOfMapping[SynchronizerParametersState] - .result - .headOption - .map(tx => - Change.TopologyDelay( - tx.sequenced, - tx.validFrom, - tx.validUntil, - tx.mapping.parameters.topologyChangeDelay, - ) - ) - .getOrElse( - TopologyStore.Change.TopologyDelay( - SequencedTime(CantonTimestamp.MinValue), - EffectiveTime(CantonTimestamp.MinValue), - None, - DynamicSynchronizerParameters.topologyChangeDelayIfAbsent, - ) - ) - } - - /** Yields all topologyChangeDelays that have expired within a given time period. Does not yield - * any proposals or rejections. - */ - def findExpiredChangeDelays( - validUntilMinInclusive: CantonTimestamp, - validUntilMaxExclusive: CantonTimestamp, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Seq[TopologyStore.Change.TopologyDelay]] - /** Finds the transaction with maximum effective time that has been sequenced at or before * `sequencedTime` and yields the sequenced / effective time of that transaction. * @@ -408,6 +362,10 @@ abstract class TopologyStore[+StoreID <: TopologyStoreId](implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] + def bulkInsert( + initialSnapshot: GenericStoredTopologyTransactions + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + @VisibleForTesting protected[topology] def dumpStoreContent()(implicit traceContext: TraceContext @@ -475,7 +433,7 @@ abstract class TopologyStore[+StoreID <: TopologyStoreId](implicit def findEssentialStateAtSequencedTime( asOfInclusive: SequencedTime, includeRejected: Boolean, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[GenericStoredTopologyTransactions] + )(implicit traceContext: TraceContext): Source[GenericStoredTopologyTransaction, NotUsed] /** Checks whether the given signed topology transaction has signatures (at this point still * unvalidated) from signing keys, for which there aren't yet signatures in the store. @@ -584,41 +542,10 @@ object TopologyStore { } object Change { - final case class TopologyDelay( - sequenced: SequencedTime, - validFrom: EffectiveTime, - validUntil: Option[EffectiveTime], - changeDelay: NonNegativeFiniteDuration, - ) extends Change - final case class Other(sequenced: SequencedTime, validFrom: EffectiveTime) extends Change def selectChange(tx: GenericStoredTopologyTransaction): Change = - (tx, tx.mapping) match { - case (tx, x: SynchronizerParametersState) => - Change.TopologyDelay( - tx.sequenced, - tx.validFrom, - tx.validUntil, - x.parameters.topologyChangeDelay, - ) - case (tx, _) => Change.Other(tx.sequenced, tx.validFrom) - } - - def selectTopologyDelay( - tx: GenericStoredTopologyTransaction - ): Option[TopologyDelay] = (tx.operation, tx.mapping) match { - case (Replace, SynchronizerParametersState(_, parameters)) => - Some( - Change.TopologyDelay( - tx.sequenced, - tx.validFrom, - tx.validUntil, - parameters.topologyChangeDelay, - ) - ) - case (_: TopologyChangeOp, _: TopologyMapping) => None - } + Change.Other(tx.sequenced, tx.validFrom) } def apply[StoreID <: TopologyStoreId]( @@ -685,39 +612,111 @@ object TopologyStore { before: PositiveStoredTopologyTransactions, after: PositiveStoredTopologyTransactions, ) + + /** determine valid parties within the given mappings (requires p2p, otk and stc) */ + private[store] def determineValidParties( + mappings: Seq[TopologyMapping], + filterParty: String, + filterParticipant: String, + ): Set[PartyId] = { + val (filterPartyIdentifier, filterPartyNamespaceO) = + UniqueIdentifier.splitFilter(filterParty) + val ( + filterParticipantIdentifier, + filterParticipantNamespaceO, + ) = + UniqueIdentifier.splitFilter(filterParticipant) + val validParticipants = determineValidParticipants(mappings) + val validParties = mutable.HashSet[PartyId]() + mappings.foreach { + case ptp: PartyToParticipant + if (filterParty.isEmpty || ptp.partyId.uid + .matchesFilters(filterPartyIdentifier, filterPartyNamespaceO)) && + (filterParticipant.isEmpty || ptp.participants + .exists( + _.participantId.uid + .matchesFilters(filterParticipantIdentifier, filterParticipantNamespaceO) + )) && ptp.participants.exists(h => validParticipants.contains(h.participantId)) => + validParties.add(ptp.partyId).discard + case cert: SynchronizerTrustCertificate + if (filterParty.isEmpty || cert.participantId.adminParty.uid + .matchesFilters(filterPartyIdentifier, filterPartyNamespaceO)) + && (filterParticipant.isEmpty || cert.participantId.adminParty.uid.matchesFilters( + filterParticipantIdentifier, + filterParticipantNamespaceO, + )) + && validParticipants + .contains(cert.participantId) => + validParties.add(cert.participantId.adminParty).discard + case _ => () + } + validParties.toSet + } + + /** Given a series of topology transactions, determine the participant ids that have OTK and STC + */ + private def determineValidParticipants( + txs: Iterable[TopologyMapping] + ): Set[ParticipantId] = { + val validParticipants = mutable.Map[ParticipantId, (Boolean, Boolean)]() + txs.foreach { + case OwnerToKeyMapping( + pid: ParticipantId, + _, + ) => // assumption: keys is checked as a state variant + validParticipants + .updateWith(pid) { + case None => Some((true, false)) + case Some((_, stc)) => Some((true, stc)) + } + .discard + case SynchronizerTrustCertificate(pid, _, _) => + validParticipants + .updateWith(pid) { + case None => Some((false, true)) + case Some((otk, _)) => Some((otk, true)) + } + .discard + case _ => () + } + validParticipants.filter { case (_, (otk, stc)) => otk && stc }.keySet.toSet + } + } sealed trait TimeQuery { - def toProtoV30: topoV30.BaseQuery.TimeQuery + def toProtoV30: adminTopoV30.BaseQuery.TimeQuery } object TimeQuery { case object HeadState extends TimeQuery { - override def toProtoV30: topoV30.BaseQuery.TimeQuery = - topoV30.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty()) + override def toProtoV30: adminTopoV30.BaseQuery.TimeQuery = + adminTopoV30.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty()) } final case class Snapshot(asOf: CantonTimestamp) extends TimeQuery { - override def toProtoV30: topoV30.BaseQuery.TimeQuery = - topoV30.BaseQuery.TimeQuery.Snapshot(asOf.toProtoTimestamp) + override def toProtoV30: adminTopoV30.BaseQuery.TimeQuery = + adminTopoV30.BaseQuery.TimeQuery.Snapshot(asOf.toProtoTimestamp) } final case class Range(from: Option[CantonTimestamp], until: Option[CantonTimestamp]) extends TimeQuery { - override def toProtoV30: topoV30.BaseQuery.TimeQuery = topoV30.BaseQuery.TimeQuery.Range( - topoV30.BaseQuery.TimeRange(from.map(_.toProtoTimestamp), until.map(_.toProtoTimestamp)) - ) + override def toProtoV30: adminTopoV30.BaseQuery.TimeQuery = + adminTopoV30.BaseQuery.TimeQuery.Range( + adminTopoV30.BaseQuery + .TimeRange(from.map(_.toProtoTimestamp), until.map(_.toProtoTimestamp)) + ) } def fromProto( - proto: topoV30.BaseQuery.TimeQuery, + proto: adminTopoV30.BaseQuery.TimeQuery, fieldName: String, ): ParsingResult[TimeQuery] = proto match { - case topoV30.BaseQuery.TimeQuery.Empty => + case adminTopoV30.BaseQuery.TimeQuery.Empty => Left(ProtoDeserializationError.FieldNotSet(fieldName)) - case topoV30.BaseQuery.TimeQuery.Snapshot(value) => + case adminTopoV30.BaseQuery.TimeQuery.Snapshot(value) => CantonTimestamp.fromProtoTimestamp(value).map(Snapshot.apply) - case topoV30.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState) - case topoV30.BaseQuery.TimeQuery.Range(value) => + case adminTopoV30.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState) + case adminTopoV30.BaseQuery.TimeQuery.Range(value) => for { fromO <- value.from.traverse(CantonTimestamp.fromProtoTimestamp) toO <- value.until.traverse(CantonTimestamp.fromProtoTimestamp) @@ -725,10 +724,56 @@ object TimeQuery { } } -trait PackageDependencyResolverUS { +object UnknownOrUnvettedPackages { + + val empty: UnknownOrUnvettedPackages = UnknownOrUnvettedPackages(Map.empty, Map.empty) + + implicit val monoid: Monoid[UnknownOrUnvettedPackages] = + new Monoid[UnknownOrUnvettedPackages] { + override def empty: UnknownOrUnvettedPackages = UnknownOrUnvettedPackages.empty + override def combine( + x: UnknownOrUnvettedPackages, + y: UnknownOrUnvettedPackages, + ): UnknownOrUnvettedPackages = + UnknownOrUnvettedPackages( + unknown = MapsUtil.mergeMapsOfSets(x.unknown, y.unknown), + unvetted = MapsUtil.mergeMapsOfSets(x.unvetted, y.unvetted), + ) + + } + + def unknown(participantId: ParticipantId, packageId: PackageId): UnknownOrUnvettedPackages = + empty.copy(unknown = Map(participantId -> Set(packageId))) + def unvetted(participantId: ParticipantId, packageId: PackageId): UnknownOrUnvettedPackages = + empty.copy(unvetted = Map(participantId -> Set(packageId))) + def unvetted( + participantId: ParticipantId, + packageIds: Set[PackageId], + ): UnknownOrUnvettedPackages = + if (packageIds.isEmpty) empty else empty.copy(unvetted = Map(participantId -> packageIds)) +} + +final case class UnknownOrUnvettedPackages( + unknown: Map[ParticipantId, Set[PackageId]], + unvetted: Map[ParticipantId, Set[PackageId]], +) { + def isEmpty: Boolean = unknown.isEmpty && unvetted.isEmpty + def unknownOrUnvetted: Map[ParticipantId, Set[PackageId]] = + MapsUtil.mergeMapsOfSets(unknown, unvetted) +} + +trait PackageDependencyResolver { def packageDependencies(packageId: PackageId)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] + + def packageDependencies(packages: List[PackageId])(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] = + packages + .parTraverse(packageDependencies) + .map(_.flatten.toSet -- packages) } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala index f0ac3ccc5e..4de2c8b440 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala @@ -6,14 +6,10 @@ package com.digitalasset.canton.topology.store import cats.syntax.functorFilter.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmptyReturningOps.* -import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.admin.v30 as adminV30 -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.TopologyStore.EffectiveStateChange import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.version.* @@ -34,18 +30,8 @@ final case class StoredTopologyTransactions[+Op <: TopologyChangeOp, +M <: Topol def toTopologyState: List[M] = result.map(_.mapping).toList - def toProtoV30: adminV30.TopologyTransactions = adminV30.TopologyTransactions( - items = result.map { item => - adminV30.TopologyTransactions.Item( - sequenced = Some(item.sequenced.toProtoPrimitive), - validFrom = Some(item.validFrom.toProtoPrimitive), - validUntil = item.validUntil.map(_.toProtoPrimitive), - // these transactions are serialized as versioned topology transactions - transaction = item.transaction.toByteString, - rejectionReason = item.rejectionReason.map(_.unwrap), - ) - } - ) + def toProtoV30: adminV30.TopologyTransactions = + adminV30.TopologyTransactions(items = result.map(_.toAdminProtoV30)) def collectOfType[T <: TopologyChangeOp: ClassTag]: StoredTopologyTransactions[T, M] = StoredTopologyTransactions( @@ -146,37 +132,10 @@ object StoredTopologyTransactions def fromProtoV30( value: adminV30.TopologyTransactions - ): ParsingResult[GenericStoredTopologyTransactions] = { - def parseItem( - item: adminV30.TopologyTransactions.Item - ): ParsingResult[GenericStoredTopologyTransaction] = - for { - sequenced <- ProtoConverter.parseRequired( - SequencedTime.fromProtoPrimitive, - "sequenced", - item.sequenced, - ) - validFrom <- ProtoConverter.parseRequired( - EffectiveTime.fromProtoPrimitive, - "valid_from", - item.validFrom, - ) - validUntil <- item.validUntil.traverse(EffectiveTime.fromProtoPrimitive) - rejectionReason <- item.rejectionReason.traverse( - String300.fromProtoPrimitive(_, "rejection_reason") - ) - transaction <- SignedTopologyTransaction.fromTrustedByteStringPVV(item.transaction) - } yield StoredTopologyTransaction( - sequenced, - validFrom, - validUntil, - transaction, - rejectionReason, - ) + ): ParsingResult[GenericStoredTopologyTransactions] = value.items - .traverse(parseItem) + .traverse(StoredTopologyTransaction.fromProtoV30) .map(StoredTopologyTransactions(_)) - } def empty[Op <: TopologyChangeOp]: StoredTopologyTransactions[Op, TopologyMapping] = StoredTopologyTransactions[Op, TopologyMapping](Seq.empty) diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala index 4cdfb41180..21606d5da2 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala @@ -27,257 +27,286 @@ sealed trait TopologyTransactionRejection extends PrettyPrinting with Product wi } object TopologyTransactionRejection { - final case class NoDelegationFoundForKeys(keys: Set[Fingerprint]) - extends TopologyTransactionRejection { - override def asString: String = s"No delegation found for keys ${keys.mkString(", ")}" - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.UnauthorizedTransaction.Failure(asString) - - } - case object NotAuthorized extends TopologyTransactionRejection { - override def asString: String = "Not authorized" - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.UnauthorizedTransaction.Failure(asString) - } - - final case class UnknownParties(parties: Seq[PartyId]) extends TopologyTransactionRejection { - override def asString: String = s"Parties ${parties.sorted.mkString(", ")} are unknown." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.UnknownParties.Failure(parties) + /** list of rejections produced by state processor */ + object Processor { + final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt) + extends TopologyTransactionRejection { + override def asString: String = + show"The given serial $actual does not match the expected serial $expected" + override protected def pretty: Pretty[SerialMismatch] = + prettyOfClass(param("expected", _.expected), param("actual", _.actual)) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.SerialMismatch.Failure(expected, actual) + } } - final case class OnboardingRestrictionInPlace( - participant: ParticipantId, - restriction: OnboardingRestriction, - loginAfter: Option[CantonTimestamp], - ) extends TopologyTransactionRejection { - override def asString: String = - s"Participant $participant onboarding rejected as restrictions $restriction are in place." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.ParticipantOnboardingRefused.Reject(participant, restriction) - } - - final case class NoCorrespondingActiveTxToRevoke(mapping: TopologyMapping) - extends TopologyTransactionRejection { - override def asString: String = - s"There is no active topology transaction matching the mapping of the revocation request: $mapping" - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.NoCorrespondingActiveTxToRevoke.Mapping(mapping) - } - - final case class InvalidTopologyMapping(err: String) extends TopologyTransactionRejection { - override def asString: String = s"Invalid mapping: $err" - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidTopologyMapping.Reject(err) - } - - final case class CannotRemoveMapping(mappingCode: TopologyMapping.Code) - extends TopologyTransactionRejection { - override def asString: String = - s"Removal of $mappingCode is not supported. Use Replace instead." - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.CannotRemoveMapping.Reject(mappingCode) - } + /** list of rejections which are created due to authorization checks */ + object Authorization { + + case object NotAuthorized extends TopologyTransactionRejection { + override def asString: String = "Not authorized" + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.UnauthorizedTransaction.Failure(asString) + } + + final case class NoDelegationFoundForKeys(keys: Set[Fingerprint]) + extends TopologyTransactionRejection { + override def asString: String = s"No delegation found for keys ${keys.mkString(", ")}" + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.UnauthorizedTransaction.Failure(asString) + + } + final case class MultiTransactionHashMismatch( + expected: TxHash, + actual: NonEmpty[Set[TxHash]], + ) extends TopologyTransactionRejection { + override def asString: String = + s"The given transaction hash set $actual did not contain the expected hash $expected of the transaction." + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MultiTransactionHashMismatch.Failure(expected, actual) + } + final case class SignatureCheckFailed(err: SignatureCheckError) + extends TopologyTransactionRejection { + override def asString: String = err.toString + override protected def pretty: Pretty[SignatureCheckFailed] = prettyOfClass( + param("err", _.err) + ) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidSignatureError.Failure(err) + } + + final case class InvalidSynchronizer(synchronizerId: SynchronizerId) + extends TopologyTransactionRejection { + override def asString: String = show"Invalid synchronizer $synchronizerId" + override protected def pretty: Pretty[InvalidSynchronizer] = prettyOfClass( + param("synchronizer", _.synchronizerId) + ) - final case class RemoveMustNotChangeMapping(actual: TopologyMapping, expected: TopologyMapping) - extends TopologyTransactionRejection { - override def asString: String = "Remove operation must not change the mapping to remove." + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidSynchronizer.Failure(synchronizerId) + } - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.RemoveMustNotChangeMapping.Reject(actual, expected) } - final case class SignatureCheckFailed(err: SignatureCheckError) - extends TopologyTransactionRejection { - override def asString: String = err.toString - override protected def pretty: Pretty[SignatureCheckFailed] = prettyOfClass(param("err", _.err)) + /** list of rejections which are created due to invariants on the topology state */ + object RequiredMapping { + + final case class OnboardingRestrictionInPlace( + participant: ParticipantId, + restriction: OnboardingRestriction, + loginAfter: Option[CantonTimestamp], + ) extends TopologyTransactionRejection { + override def asString: String = + s"Participant $participant onboarding rejected as restrictions $restriction are in place." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.ParticipantOnboardingRefused.Reject(participant, restriction) + } + + final case class NoCorrespondingActiveTxToRevoke(mapping: TopologyMapping) + extends TopologyTransactionRejection { + override def asString: String = + s"There is no active topology transaction matching the mapping of the revocation request: $mapping" + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.NoCorrespondingActiveTxToRevoke.Mapping(mapping) + } + + final case class InvalidTopologyMapping(err: String) extends TopologyTransactionRejection { + override def asString: String = s"Invalid mapping: $err" + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidTopologyMapping.Reject(err) + } + + final case class CannotRemoveMapping(mappingCode: TopologyMapping.Code) + extends TopologyTransactionRejection { + override def asString: String = + s"Removal of $mappingCode is not supported. Use Replace instead." + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.CannotRemoveMapping.Reject(mappingCode) + } + + final case class RemoveMustNotChangeMapping(actual: TopologyMapping, expected: TopologyMapping) + extends TopologyTransactionRejection { + override def asString: String = "Remove operation must not change the mapping to remove." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.RemoveMustNotChangeMapping.Reject(actual, expected) + } + + final case class InsufficientKeys(members: Seq[Member]) extends TopologyTransactionRejection { + override def asString: String = + s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both." + + override protected def pretty: Pretty[InsufficientKeys] = prettyOfClass( + param("members", _.members) + ) - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidSignatureError.Failure(err) - } - final case class InvalidSynchronizer(synchronizerId: SynchronizerId) - extends TopologyTransactionRejection { - override def asString: String = show"Invalid synchronizer $synchronizerId" - override protected def pretty: Pretty[InvalidSynchronizer] = prettyOfClass( - param("synchronizer", _.synchronizerId) - ) - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidSynchronizer.Failure(synchronizerId) - } + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InsufficientKeys.Failure(members) + } - final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt) - extends TopologyTransactionRejection { - override def asString: String = - show"The given serial $actual does not match the expected serial $expected" - override protected def pretty: Pretty[SerialMismatch] = - prettyOfClass(param("expected", _.expected), param("actual", _.actual)) - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.SerialMismatch.Failure(expected, actual) - } - final case class AssumptionViolation(str: String) extends TopologyTransactionRejection { - override def asString: String = str - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InternalError.AssumptionViolation(str) - } + final case class UnknownMembers(members: Seq[Member]) extends TopologyTransactionRejection { + override def asString: String = s"Members ${members.sorted.mkString(", ")} are unknown." - final case class MultiTransactionHashMismatch( - expected: TxHash, - actual: NonEmpty[Set[TxHash]], - ) extends TopologyTransactionRejection { - override def asString: String = - s"The given transaction hash set $actual did not contain the expected hash $expected of the transaction." - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.MultiTransactionHashMismatch.Failure(expected, actual) - } + override protected def pretty: Pretty[UnknownMembers] = prettyOfClass( + param("members", _.members) + ) - final case class InsufficientKeys(members: Seq[Member]) extends TopologyTransactionRejection { - override def asString: String = - s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both." + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.UnknownMembers.Failure(members) + } - override protected def pretty: Pretty[InsufficientKeys] = prettyOfClass( - param("members", _.members) - ) + final case class MissingSynchronizerParameters(effective: EffectiveTime) + extends TopologyTransactionRejection { + override def asString: String = s"Missing synchronizer parameters at $effective" - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InsufficientKeys.Failure(members) - } + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MissingTopologyMapping.MissingSynchronizerParameters(effective) + } - final case class UnknownMembers(members: Seq[Member]) extends TopologyTransactionRejection { - override def asString: String = s"Members ${members.sorted.mkString(", ")} are unknown." + final case class NamespaceAlreadyInUse(namespace: Namespace) + extends TopologyTransactionRejection { + override def asString: String = s"The namespace $namespace is already used by another entity." - override protected def pretty: Pretty[UnknownMembers] = prettyOfClass( - param("members", _.members) - ) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace) - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.UnknownMembers.Failure(members) - } + override protected def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfClass( + param("namespace", _.namespace) + ) + } - final case class ParticipantStillHostsParties(participantId: ParticipantId, parties: Seq[PartyId]) - extends TopologyTransactionRejection { - override def asString: String = - s"Cannot remove synchronizer trust certificate for $participantId because it still hosts parties ${parties - .mkString(",")}" + final case class PartyIdConflictWithAdminParty(partyId: PartyId) + extends TopologyTransactionRejection { + override def asString: String = + s"The partyId $partyId is the same as an already existing admin party." - override protected def pretty: Pretty[ParticipantStillHostsParties] = - prettyOfClass(param("participantId", _.participantId), param("parties", _.parties)) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.PartyIdConflictWithAdminParty.Reject(partyId) - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.IllegalRemovalOfSynchronizerTrustCertificate - .ParticipantStillHostsParties( - participantId, - parties, + override protected def pretty: Pretty[PartyIdConflictWithAdminParty.this.type] = + prettyOfClass( + param("partyId", _.partyId) ) - } - - final case class MissingSynchronizerParameters(effective: EffectiveTime) - extends TopologyTransactionRejection { - override def asString: String = s"Missing synchronizer parameters at $effective" - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.MissingTopologyMapping.MissingSynchronizerParameters(effective) - } - - final case class NamespaceAlreadyInUse(namespace: Namespace) - extends TopologyTransactionRejection { - override def asString: String = s"The namespace $namespace is already used by another entity." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace) - - override protected def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfClass( - param("namespace", _.namespace) - ) - } - - final case class PartyIdConflictWithAdminParty(partyId: PartyId) - extends TopologyTransactionRejection { - override def asString: String = - s"The partyId $partyId is the same as an already existing admin party." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.PartyIdConflictWithAdminParty.Reject(partyId) - - override protected def pretty: Pretty[PartyIdConflictWithAdminParty.this.type] = prettyOfClass( - param("partyId", _.partyId) - ) - } - - final case class ParticipantIdConflictWithPartyId(participantId: ParticipantId, partyId: PartyId) - extends TopologyTransactionRejection { - override def asString: String = - s"Tried to onboard participant $participantId while party $partyId with the same UID already exists." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.ParticipantIdConflictWithPartyId.Reject(participantId, partyId) + } + + final case class ParticipantIdConflictWithPartyId( + participantId: ParticipantId, + partyId: PartyId, + ) extends TopologyTransactionRejection { + override def asString: String = + s"Tried to onboard participant $participantId while party $partyId with the same UID already exists." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.ParticipantIdConflictWithPartyId.Reject(participantId, partyId) + + override protected def pretty: Pretty[ParticipantIdConflictWithPartyId.this.type] = + prettyOfClass( + param("participantId", _.participantId), + param("partyId", _.partyId), + ) + } + + final case class MediatorsAlreadyInOtherGroups( + group: NonNegativeInt, + mediators: Map[MediatorId, NonNegativeInt], + ) extends TopologyTransactionRejection { + override def asString: String = + s"Tried to add mediators to group $group, but they are already assigned to other groups: ${mediators.toSeq + .sortBy(_._1.toProtoPrimitive) + .mkString(", ")}" + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MediatorsAlreadyInOtherGroups.Reject(group, mediators) + } + + final case class OngoingSynchronizerUpgrade(synchronizerId: SynchronizerId) + extends TopologyTransactionRejection { + override def asString: String = + s"The topology state of synchronizer $synchronizerId is frozen due to an ongoing synchronizer migration and no more topology changes are allowed." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.OngoingSynchronizerUpgrade.Reject(synchronizerId) + } + + final case class InvalidSynchronizerSuccessor( + currentSynchronizerId: PhysicalSynchronizerId, + successorSynchronizerId: PhysicalSynchronizerId, + ) extends TopologyTransactionRejection { + override def asString: String = + s"The declared successor $successorSynchronizerId of synchronizer $currentSynchronizerId is not valid." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidSynchronizerSuccessor.Reject.conflictWithCurrentPSId( + currentSynchronizerId, + successorSynchronizerId, + ) + } - override protected def pretty: Pretty[ParticipantIdConflictWithPartyId.this.type] = - prettyOfClass( - param("participantId", _.participantId), - param("partyId", _.partyId), - ) - } + final case class InvalidUpgradeTime( + synchronizerId: SynchronizerId, + effective: EffectiveTime, + upgradeTime: CantonTimestamp, + ) extends TopologyTransactionRejection { + override def asString: String = + s"The upgrade time $upgradeTime must be after the effective ${effective.value} of the synchronizer upgrade announcement for synchronizer $synchronizerId." - final case class MediatorsAlreadyInOtherGroups( - group: NonNegativeInt, - mediators: Map[MediatorId, NonNegativeInt], - ) extends TopologyTransactionRejection { - override def asString: String = - s"Tried to add mediators to group $group, but they are already assigned to other groups: ${mediators.toSeq - .sortBy(_._1.toProtoPrimitive) - .mkString(", ")}" - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.MediatorsAlreadyInOtherGroups.Reject(group, mediators) - } + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidUpgradeTime.Reject(synchronizerId, effective, upgradeTime) + } - final case class MembersCannotRejoinSynchronizer(members: Seq[Member]) - extends TopologyTransactionRejection { - override def asString: String = - s"Member ${members.sorted} tried to rejoin a synchronizer from which they had previously left." + final case class ParticipantCannotRejoinSynchronizer(participantId: ParticipantId) + extends TopologyTransactionRejection { + override def asString: String = + s"Participant $participantId tried to rejoin a synchronizer which it had previously left." - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.MemberCannotRejoinSynchronizer.Reject(members) - } + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MemberCannotRejoinSynchronizer.Reject(Seq(participantId)) + } - final case class OngoingSynchronizerUpgrade(synchronizerId: SynchronizerId) - extends TopologyTransactionRejection { - override def asString: String = - s"The topology state of synchronizer $synchronizerId is frozen due to an ongoing synchronizer migration and no more topology changes are allowed." + final case class CannotReregisterKeys(member: Member) extends TopologyTransactionRejection { + override def asString: String = + s"Member $member tried to re-register its keys which they have previously removed." - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.OngoingSynchronizerUpgrade.Reject(synchronizerId) - } + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MemberCannotRejoinSynchronizer.RejectNewKeys(member) + } - final case class InvalidSynchronizerSuccessor( - currentSynchronizerId: PhysicalSynchronizerId, - successorSynchronizerId: PhysicalSynchronizerId, - ) extends TopologyTransactionRejection { - override def asString: String = - s"The declared successor $successorSynchronizerId of synchronizer $currentSynchronizerId is not valid." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidSynchronizerSuccessor.Reject( - currentSynchronizerId, - successorSynchronizerId, - ) } - final case class InvalidUpgradeTime( - synchronizerId: SynchronizerId, - effective: EffectiveTime, - upgradeTime: CantonTimestamp, - ) extends TopologyTransactionRejection { - override def asString: String = - s"The upgrade time $upgradeTime must be after the effective ${effective.value} of the synchronizer upgrade announcement for synchronizer $synchronizerId." + /** list of rejections which are created to help operators avoiding mistakes */ + object OptionalMapping { + + final case class ParticipantStillHostsParties( + participantId: ParticipantId, + parties: Seq[PartyId], + ) extends TopologyTransactionRejection { + override def asString: String = + s"Cannot remove synchronizer trust certificate or owner to key mapping for $participantId because it still hosts parties ${parties + .mkString(",")}" + + override protected def pretty: Pretty[ParticipantStillHostsParties] = + prettyOfClass(param("participantId", _.participantId), param("parties", _.parties)) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.IllegalRemovalOfActiveTopologyTransactions + .ParticipantStillHostsParties( + participantId, + parties, + ) + } + + final case class MembersCannotRejoinSynchronizer(members: Seq[Member]) + extends TopologyTransactionRejection { + override def asString: String = + s"Member ${members.sorted} tried to rejoin a synchronizer from which they had previously left." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MemberCannotRejoinSynchronizer.Reject(members) + } - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidUpgradeTime.Reject(synchronizerId, effective, upgradeTime) } } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala index f8455362a4..bf1732bee0 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.topology.store.db -import cats.syntax.functorFilter.* import cats.syntax.option.* import cats.syntax.traverse.* import com.daml.nameof.NameOf.functionFullName @@ -35,19 +34,22 @@ import com.digitalasset.canton.topology.transaction.TopologyTransaction.{ TxHash, } import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.{MonadUtil, PekkoUtil} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source import slick.jdbc.canton.SQLActionBuilder import slick.jdbc.{GetResult, TransactionIsolation} import slick.sql.SqlStreamingAction import scala.concurrent.ExecutionContext +import scala.math.Ordering.Implicits.* class DbTopologyStore[StoreId <: TopologyStoreId]( override protected val storage: DbStorage, val storeId: StoreId, - protocolVersion: ProtocolVersion, + override val protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(100), @@ -183,7 +185,33 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( .withTransactionIsolation(TransactionIsolation.Serializable), operationName = "update-topology-transactions", ) + } + + override def bulkInsert( + initialSnapshot: GenericStoredTopologyTransactions + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val inserts = initialSnapshot.result + .grouped(1000) + .map( + insertSignedTransaction[GenericStoredTopologyTransaction](tx => + TransactionEntry( + sequenced = tx.sequenced, + validFrom = tx.validFrom, + validUntil = tx.validUntil, + signedTx = tx.transaction, + rejectionReason = tx.rejectionReason, + ) + ) + ) + .toSeq + storage.update_( + DBIO + .seq(inserts*) + .transactionally + .withTransactionIsolation(TransactionIsolation.Serializable), + operationName = "bulk-insert", + ) } @VisibleForTesting @@ -322,12 +350,31 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( ++ sql")" ) ++ sql" OR " - // SynchronizerTrustCertificate filtering + // SynchronizerTrustCertificate filtering for the party ++ Seq( sql"(transaction_type = ${SynchronizerTrustCertificate.code}" // In SynchronizerTrustCertificate part of the filter, compare not only to participant, but also to party identifier // to enable searching for the admin party ++ conditionalAppend(filterParty, filterPartyIdentifier, filterPartyNamespaceO) + ++ sql")" + ) + ++ sql" OR " + // SynchronizerTrustCertificate filtering for the participant state + ++ Seq( + sql"(transaction_type = ${SynchronizerTrustCertificate.code}" + ++ conditionalAppend( + filterParticipant, + filterParticipantIdentifier, + filterParticipantNamespaceO, + ) + ++ sql")" + ) + ++ sql" OR " + // OwnerToKeyMapping filtering: + // We also need to include the owner to key mappings in our result such that we can determine + // whether a participant is actually active + ++ Seq( + sql"(transaction_type = ${OwnerToKeyMapping.code}" ++ conditionalAppend( filterParticipant, filterParticipantIdentifier, @@ -338,23 +385,14 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( ++ sql")" ) toStoredTopologyTransactions(storage.query(query, operationName = functionFullName)) - .map( - _.result.toSet - .flatMap[PartyId](_.mapping match { - case ptp: PartyToParticipant - if filterParticipant.isEmpty || ptp.participants - .exists( - _.participantId.uid - .matchesFilters(filterParticipantIdentifier, filterParticipantNamespaceO) - ) => - Set(ptp.partyId) - case cert: SynchronizerTrustCertificate - if filterParty.isEmpty || cert.participantId.adminParty.uid - .matchesFilters(filterPartyIdentifier, filterPartyNamespaceO) => - Set(cert.participantId.adminParty) - case _ => Set.empty - }) - ) + .map { txs => + val mappings = txs.result.map(_.mapping) + TopologyStore.determineValidParties( + mappings, + filterParty = filterParty, + filterParticipant = filterParticipant, + ) + } } override def findPositiveTransactions( @@ -460,13 +498,47 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( includeRejected: Boolean, )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[GenericStoredTopologyTransactions] = { - val timeFilter = sql" AND sequenced <= ${asOfInclusive.value}" + ): Source[GenericStoredTopologyTransaction, NotUsed] = { logger.debug(s"Querying essential state as of $asOfInclusive") - val query = buildQueryForTransactions(timeFilter, includeRejected = includeRejected) - toStoredTopologyTransactions(storage.query(query, operationName = "essentialState")) - .map(_.asSnapshotAtMaxEffectiveTime) + val maxEffectiveTimeF = + maxTimestamp(asOfInclusive, includeRejected = includeRejected).map(_.map { + case (_, effective @ EffectiveTime(_)) => effective + }) + + val sourceF = maxEffectiveTimeF.map { + case None => Source.empty + case Some(maxEffective) => + val timeFilter = sql" AND sequenced <= ${asOfInclusive.value}" + Source + .unfoldAsync(Option.empty[Long]) { idOffset => + val query = buildQueryForTransactionsWithId( + timeFilter ++ idOffset.map(offset => sql" AND id > $offset").toList, + includeRejected = includeRejected, + limit = storage.limit(maxItemsInSqlQuery.value), + orderBy = " order by id", + ) + storage + .query(query, operationName = "essentialState") + .map { rows => + if (rows.isEmpty) None + else { + val (ids, txData) = rows.unzip + val transactions = toStoredTopologyTransactions(txData).result.map { storedTx => + // unset validUntil later than maxEffective, so that the node processing this + // topology snapshot sees the transactions as they were at the effective time + if (storedTx.validUntil.exists(_ > maxEffective)) { + storedTx.copy(validUntil = None) + } else storedTx + } + Some(ids.lastOption -> transactions) + } + } + .onShutdown(None) + } + .mapConcat(identity) + } + PekkoUtil.futureSourceUS(sourceF) } override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit @@ -482,33 +554,6 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( .map(res => res.result.map(TopologyStore.Change.selectChange).distinct) } - protected def doFindCurrentAndUpcomingChangeDelays(sequencedTime: CantonTimestamp)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Iterable[GenericStoredTopologyTransaction]] = { - val query = buildQueryForTransactions( - sql""" AND transaction_type = ${SynchronizerParametersState.code} - AND (valid_from >= $sequencedTime OR valid_until is NULL OR valid_until >= $sequencedTime) - AND (valid_until is NULL or valid_from != valid_until) - AND sequenced < $sequencedTime - AND is_proposal = false """ - ) - toStoredTopologyTransactions(storage.query(query, operationName = functionFullName)) - .map(_.result) - } - - override def findExpiredChangeDelays( - validUntilMinInclusive: CantonTimestamp, - validUntilMaxExclusive: CantonTimestamp, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Seq[TopologyStore.Change.TopologyDelay]] = { - val query = buildQueryForTransactions( - sql" AND transaction_type = ${SynchronizerParametersState.code} AND $validUntilMinInclusive <= valid_until AND valid_until < $validUntilMaxExclusive AND is_proposal = false " - ) - toStoredTopologyTransactions(storage.query(query, operationName = functionFullName)) - .map(_.result.mapFilter(TopologyStore.Change.selectTopologyDelay)) - } - override def maxTimestamp(sequencedTime: SequencedTime, includeRejected: Boolean)(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SequencedTime, EffectiveTime)]] = { @@ -810,6 +855,7 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( ) private type QueryAction = DbAction.ReadTransactional[Vector[QueryResult]] + private def buildQueryForTransactions( subQuery: SQLActionBuilder, limit: String = "", @@ -820,15 +866,34 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( sql"SELECT instance, sequenced, valid_from, valid_until, rejection_reason FROM common_topology_transactions WHERE store_id = $transactionStoreIdName" ++ subQuery ++ (if (!includeRejected) sql" AND rejection_reason IS NULL" else sql"") ++ sql" #$orderBy #$limit" - query.as[ + query.as[QueryResult] + } + + private type QueryResultWithId = ( + Long, ( GenericSignedTopologyTransaction, CantonTimestamp, CantonTimestamp, Option[CantonTimestamp], Option[String300], - ) - ] + ), + ) + + private type QueryActionWithId = DbAction.ReadTransactional[Vector[QueryResultWithId]] + + private def buildQueryForTransactionsWithId( + subQuery: SQLActionBuilder, + limit: String, + orderBy: String, + includeRejected: Boolean, + ): QueryActionWithId = { + val query = + sql"SELECT id, instance, sequenced, valid_from, valid_until, rejection_reason FROM common_topology_transactions WHERE store_id = $transactionStoreIdName" ++ + subQuery ++ Option + .when(!includeRejected)(sql" AND rejection_reason IS NULL") + .toList ++ sql" #$orderBy #$limit" + query.as[QueryResultWithId] } private def toStoredTopologyTransactions( @@ -846,6 +911,20 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( ) }) ) + private def toStoredTopologyTransactions( + result: Vector[QueryResult] + ): GenericStoredTopologyTransactions = + StoredTopologyTransactions( + result.map { case (tx, sequencedTs, validFrom, validUntil, rejectionReason) => + StoredTopologyTransaction( + SequencedTime(sequencedTs), + EffectiveTime(validFrom), + validUntil.map(EffectiveTime(_)), + tx, + rejectionReason, + ) + } + ) override def currentDispatchingWatermark(implicit traceContext: TraceContext diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala index 2b89b69083..553f073956 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.topology.store.memory -import cats.syntax.functorFilter.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.config.ProcessingTimeout @@ -30,8 +29,11 @@ import com.digitalasset.canton.topology.transaction.TopologyTransaction.{ TxHash, } import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.PekkoUtil import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion} import com.google.common.annotations.VisibleForTesting +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source import java.util.concurrent.atomic.AtomicReference import scala.collection.mutable @@ -41,7 +43,7 @@ import scala.math.Ordering.Implicits.* class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( val storeId: StoreId, - protocolVersion: ProtocolVersion, + override val protocolVersion: ProtocolVersion, val loggerFactory: NamedLoggerFactory, override val timeouts: ProcessingTimeout, )(implicit ec: ExecutionContext) @@ -187,6 +189,34 @@ class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( FutureUnlessShutdown.unit } + override def bulkInsert( + initialSnapshot: GenericStoredTopologyTransactions + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + initialSnapshot.result.foreach { tx => + val uniqueKey = ( + tx.mapping.uniqueKey, + tx.serial, + tx.validFrom, + tx.operation, + tx.transaction.representativeProtocolVersion, + tx.transaction.hashOfSignatures(protocolVersion), + tx.hash, + ) + if (topologyTransactionsStoreUniqueIndex.add(uniqueKey)) { + topologyTransactionStore.append( + TopologyStoreEntry( + tx.transaction, + tx.sequenced, + from = tx.validFrom, + until = tx.validUntil, + rejected = tx.rejectionReason, + ) + ) + } + } + FutureUnlessShutdown.unit + } + @VisibleForTesting override protected[topology] def dumpStoreContent()(implicit traceContext: TraceContext @@ -254,33 +284,15 @@ class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( // is not a proposal !entry.transaction.isProposal && // is of type Replace - entry.operation == TopologyChangeOp.Replace && - // matches a party to participant mapping (with appropriate filters) - (entry.mapping match { - case ptp: PartyToParticipant => - ptp.partyId.uid.matchesFilters(prefixPartyIdentifier, prefixPartyNS) && - (filterParticipant.isEmpty || - ptp.participants.exists( - _.participantId.uid - .matchesFilters(prefixParticipantIdentifier, prefixParticipantNS) - )) - case cert: SynchronizerTrustCertificate => - cert.participantId.adminParty.uid - .matchesFilters(prefixPartyIdentifier, prefixPartyNS) && - cert.participantId.uid - .matchesFilters(prefixParticipantIdentifier, prefixParticipantNS) - case _ => false - }) - - val topologyStateStoreSeq = blocking(synchronized(topologyTransactionStore.toSeq)) + entry.operation == TopologyChangeOp.Replace + + val mappings = + blocking(synchronized(topologyTransactionStore.toSeq)).filter(filter).map(_.mapping) + FutureUnlessShutdown.pure( - topologyStateStoreSeq - .foldLeft(Set.empty[PartyId]) { - case (acc, elem) if !filter(elem) => acc - case (acc, elem) => - elem.mapping.maybeUid.fold(acc)(x => acc + PartyId(x)) - } + TopologyStore.determineValidParties(mappings, filterParty, filterParticipant) ) + } override def inspect( @@ -448,10 +460,10 @@ class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( includeRejected: Boolean, )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[GenericStoredTopologyTransactions] = + ): Source[GenericStoredTopologyTransaction, NotUsed] = { // asOfInclusive is the effective time of the transaction that onboarded the member. // 1. load all transactions with a sequenced time <= asOfInclusive, including proposals - filteredState( + val dataF = filteredState( blocking(synchronized { topologyTransactionStore.toSeq }), @@ -460,7 +472,9 @@ class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( ).map( // 2. transform the result such that the validUntil fields are set as they were at maxEffective time of the snapshot _.asSnapshotAtMaxEffectiveTime - ) + ).map(stored => Source(stored.result)) + PekkoUtil.futureSourceUS(dataF) + } override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit traceContext: TraceContext @@ -479,47 +493,6 @@ class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( } } - protected def doFindCurrentAndUpcomingChangeDelays(sequencedTime: CantonTimestamp)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Iterable[GenericStoredTopologyTransaction]] = FutureUnlessShutdown.pure { - blocking { - synchronized { - topologyTransactionStore - .filter(entry => - entry.mapping.code == SynchronizerParametersState.code && - (entry.from.value >= sequencedTime || entry.until.forall(_.value >= sequencedTime)) && - !entry.until.contains(entry.from) && - entry.sequenced.value < sequencedTime && - entry.rejected.isEmpty && - !entry.transaction.isProposal - ) - .map(_.toStoredTransaction) - } - } - } - - override def findExpiredChangeDelays( - validUntilMinInclusive: CantonTimestamp, - validUntilMaxExclusive: CantonTimestamp, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Seq[TopologyStore.Change.TopologyDelay]] = - FutureUnlessShutdown.pure { - blocking { - synchronized { - topologyTransactionStore - .filter( - _.until.exists(until => - validUntilMinInclusive <= until.value && until.value < validUntilMaxExclusive - ) - ) - .map(_.toStoredTransaction) - .toSeq - .mapFilter(TopologyStore.Change.selectTopologyDelay) - } - } - } - override def maxTimestamp(sequencedTime: SequencedTime, includeRejected: Boolean)(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SequencedTime, EffectiveTime)]] = FutureUnlessShutdown.wrap { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala index 8c18ea9f24..97006f44b0 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.data.CantonTimestamp final case class ParticipantAttributes( permission: ParticipantPermission, loginAfter: Option[CantonTimestamp] = None, + features: Seq[SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag] = Seq.empty, onboarding: Boolean = false, ) { def canConfirm: Boolean = permission.canConfirm && !onboarding diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala index 65f1582682..40d771e47f 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala @@ -281,6 +281,32 @@ object SignedTopologyTransaction isProposal = isProposal, )(protocolVersionRepresentativeFor(protocolVersion)) + def duplicateSigningKeys( + signatures: NonEmpty[Set[TopologyTransactionSignature]] + ): Set[Fingerprint] = signatures.toSeq + .map(_.authorizingLongTermKey) + .groupBy1(identity) + .filter(_._2.sizeIs > 1) + .keySet + + def create[Op <: TopologyChangeOp, M <: TopologyMapping]( + transaction: TopologyTransaction[Op, M], + signatures: NonEmpty[Set[TopologyTransactionSignature]], + isProposal: Boolean, + protocolVersion: ProtocolVersion, + ): Either[String, SignedTopologyTransaction[Op, M]] = { + val duplicates = duplicateSigningKeys(signatures) + Either.cond( + duplicates.isEmpty, + SignedTopologyTransaction[Op, M]( + transaction, + signatures, + isProposal, + )(protocolVersionRepresentativeFor(protocolVersion)), + s"Transaction has duplicate signatures: ${duplicates.mkString(", ")}", + ) + } + private def signAndCreateWithAssignedKeyUsages[Op <: TopologyChangeOp, M <: TopologyMapping]( transaction: TopologyTransaction[Op, M], keysWithUsage: NonEmpty[Map[Fingerprint, NonEmpty[Set[SigningKeyUsage]]]], diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala index 3fada4e94d..bda907630a 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.networking.{Endpoint, UrlValidator} import com.digitalasset.canton.protocol.v30.Enums +import com.digitalasset.canton.protocol.v30.Enums.ParticipantFeatureFlag import com.digitalasset.canton.protocol.v30.NamespaceDelegation.Restriction import com.digitalasset.canton.protocol.v30.TopologyMapping.Mapping import com.digitalasset.canton.protocol.{ @@ -38,6 +39,7 @@ import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ CanSignAllMappings, } import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuth.* import com.digitalasset.canton.topology.transaction.TopologyMapping.{ Code, @@ -103,7 +105,7 @@ sealed trait TopologyMapping extends Product with Serializable with PrettyPrinti } object TopologyMapping { - private[transaction] def participantIdFromProtoPrimitive( + def participantIdFromProtoPrimitive( proto: String, fieldName: String, ): ParsingResult[ParticipantId] = { @@ -946,10 +948,15 @@ object PartyToKeyMapping extends TopologyMappingCompanion { } /** Participant synchronizer trust certificate + * @param featureFlags + * Protocol features supported by [[participantId]] on [[synchronizerId]]. Feature flags are used + * to add targeted support for a protocol feature or bugfix without requiring a new protocol + * version. Care must be taken to not create ledger forks when using such flags. */ final case class SynchronizerTrustCertificate( participantId: ParticipantId, synchronizerId: SynchronizerId, + featureFlags: Seq[ParticipantTopologyFeatureFlag] = Seq.empty, ) extends TopologyMapping { override def companion: SynchronizerTrustCertificate.type = SynchronizerTrustCertificate @@ -958,6 +965,7 @@ final case class SynchronizerTrustCertificate( v30.SynchronizerTrustCertificate( participantUid = participantId.uid.toProtoPrimitive, synchronizerId = synchronizerId.toProtoPrimitive, + featureFlags = featureFlags.map(_.toProtoV30), ) override def toProtoV30: v30.TopologyMapping = @@ -982,6 +990,40 @@ final case class SynchronizerTrustCertificate( } object SynchronizerTrustCertificate extends TopologyMappingCompanion { + final case class ParticipantTopologyFeatureFlag private (value: Int)( + name: Option[String] = None + ) { + def toProtoV30: v30.Enums.ParticipantFeatureFlag = + v30.Enums.ParticipantFeatureFlag.fromValue(value) + override def toString: String = name.getOrElse(s"UnrecognizedFeatureFlag($value)") + } + + object ParticipantTopologyFeatureFlag { + + /** Feature flag enabled when the participant supports the fix for a bug that incorrectly + * rejects externally signed transactions with a locally created contract used in a subview. + * See https://github.com/DACH-NY/canton/issues/27883 Used only in PV33. + */ + val ExternalSigningLocalContractsInSubview: ParticipantTopologyFeatureFlag = + ParticipantTopologyFeatureFlag( + v30.Enums.ParticipantFeatureFlag.PARTICIPANT_FEATURE_FLAG_PV33_EXTERNAL_SIGNING_LOCAL_CONTRACT_IN_SUBVIEW.value + )(Some("ExternalSigningLocalContractsInSubview")) + + val knownTopologyFeatureFlags: Seq[ParticipantTopologyFeatureFlag] = Seq( + ExternalSigningLocalContractsInSubview + ) + + def fromProtoV30( + valueP: v30.Enums.ParticipantFeatureFlag + ): Option[ParticipantTopologyFeatureFlag] = + knownTopologyFeatureFlags + .find(_.value == valueP.value) + .orElse( + Option.when(valueP != ParticipantFeatureFlag.PARTICIPANT_FEATURE_FLAG_UNSPECIFIED)( + ParticipantTopologyFeatureFlag(valueP.value)() + ) + ) + } def uniqueKey(participantId: ParticipantId, synchronizerId: SynchronizerId): MappingHash = TopologyMapping.buildUniqueKey(code)( @@ -999,9 +1041,11 @@ object SynchronizerTrustCertificate extends TopologyMappingCompanion { "participant_uid", ) synchronizerId <- SynchronizerId.fromProtoPrimitive(valueP.synchronizerId, "synchronizer_id") + featureFlags = valueP.featureFlags.flatMap(ParticipantTopologyFeatureFlag.fromProtoV30) } yield SynchronizerTrustCertificate( participantId, synchronizerId, + featureFlags, ) } @@ -1102,9 +1146,6 @@ final case class ParticipantSynchronizerPermission( override def companion: ParticipantSynchronizerPermission.type = ParticipantSynchronizerPermission - def toParticipantAttributes: ParticipantAttributes = - ParticipantAttributes(permission, loginAfter) - def toProto: v30.ParticipantSynchronizerPermission = v30.ParticipantSynchronizerPermission( synchronizerId = synchronizerId.toProtoPrimitive, @@ -1273,6 +1314,7 @@ final case class VettedPackage( validFromInclusive = validFromInclusive.map(_.toProtoTimestamp), validUntilExclusive = validUntilExclusive.map(_.toProtoTimestamp), ) + override protected def pretty: Pretty[VettedPackage.this.type] = prettyOfClass( param("packageId", _.packageId), paramIfDefined("validFromInclusive", _.validFromInclusive), diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala deleted file mode 100644 index cf4f15e7a3..0000000000 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala +++ /dev/null @@ -1,934 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.transaction - -import cats.Monad -import cats.data.EitherT -import cats.instances.order.* -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.KeyPurpose -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, OnboardingRestriction} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.processing.EffectiveTime -import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ - InvalidTopologyMapping, - NamespaceAlreadyInUse, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.{Code, MappingHash} -import com.digitalasset.canton.topology.transaction.TopologyMappingChecks.PendingChangesLookup -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.EitherTUtil -import com.google.common.annotations.VisibleForTesting - -import scala.concurrent.ExecutionContext -import scala.math.Ordering.Implicits.* - -object TopologyMappingChecks { - type PendingChangesLookup = Map[MappingHash, GenericSignedTopologyTransaction] -} - -trait TopologyMappingChecks { - def checkTransaction( - effective: EffectiveTime, - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericSignedTopologyTransaction], - pendingChanges: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] -} - -object NoopTopologyMappingChecks extends TopologyMappingChecks { - override def checkTransaction( - effective: EffectiveTime, - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericSignedTopologyTransaction], - pendingChanges: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - EitherTUtil.unitUS -} - -class ValidatingTopologyMappingChecks( - store: TopologyStore[TopologyStoreId], - val loggerFactory: NamedLoggerFactory, -)(implicit - executionContext: ExecutionContext -) extends TopologyMappingChecks - with NamedLogging { - - def checkTransaction( - effective: EffectiveTime, - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericSignedTopologyTransaction], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - val checkFirstIsNotRemove = EitherTUtil - .condUnitET[FutureUnlessShutdown]( - !(toValidate.operation == TopologyChangeOp.Remove && inStore.isEmpty), - TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(toValidate.mapping), - ) - val checkReplaceIsNotMaxSerial = EitherTUtil.condUnitET[FutureUnlessShutdown]( - toValidate.operation == TopologyChangeOp.Remove || - (toValidate.operation == TopologyChangeOp.Replace && toValidate.serial < PositiveInt.MaxValue), - TopologyTransactionRejection.InvalidTopologyMapping( - s"The serial for a REPLACE must be less than ${PositiveInt.MaxValue}." - ), - ) - val checkRemoveDoesNotChangeMapping = EitherT.fromEither[FutureUnlessShutdown]( - inStore - .collect { - case expected - if toValidate.operation == TopologyChangeOp.Remove && toValidate.mapping != expected.mapping => - TopologyTransactionRejection - .RemoveMustNotChangeMapping(toValidate.mapping, expected.mapping) - } - .toLeft(()) - ) - - lazy val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match { - case (Code.SynchronizerTrustCertificate, None | Some(Code.SynchronizerTrustCertificate)) => - val checkReplace = toValidate - .select[TopologyChangeOp.Replace, SynchronizerTrustCertificate] - .map( - checkSynchronizerTrustCertificateReplace( - effective, - _, - inStore.flatMap(_.selectMapping[SynchronizerTrustCertificate]), - pendingChangesLookup, - ) - ) - - val checkRemove = toValidate - .select[TopologyChangeOp.Remove, SynchronizerTrustCertificate] - .map(checkSynchronizerTrustCertificateRemove(effective, _, pendingChangesLookup)) - - checkReplace.orElse(checkRemove) - - case (Code.PartyToParticipant, None | Some(Code.PartyToParticipant)) => - toValidate - .select[TopologyChangeOp.Replace, PartyToParticipant] - .map( - checkPartyToParticipant( - effective, - _, - inStore.flatMap(_.select[TopologyChangeOp.Replace, PartyToParticipant]), - pendingChangesLookup, - ) - ) - - case (Code.OwnerToKeyMapping, None | Some(Code.OwnerToKeyMapping)) => - val checkReplace = toValidate - .select[TopologyChangeOp.Replace, OwnerToKeyMapping] - .map(checkOwnerToKeyMappingReplace) - - val checkRemove = toValidate - .select[TopologyChangeOp.Remove, OwnerToKeyMapping] - .map( - checkOwnerToKeyMappingRemove( - effective, - _, - pendingChangesLookup, - ) - ) - - checkReplace.orElse(checkRemove) - - case (Code.MediatorSynchronizerState, None | Some(Code.MediatorSynchronizerState)) => - toValidate - .select[TopologyChangeOp.Replace, MediatorSynchronizerState] - .map( - checkMediatorSynchronizerStateReplace( - effective, - _, - inStore.flatMap(_.select[TopologyChangeOp.Replace, MediatorSynchronizerState]), - pendingChangesLookup, - ) - ) - case (Code.SequencerSynchronizerState, None | Some(Code.SequencerSynchronizerState)) => - toValidate - .select[TopologyChangeOp.Replace, SequencerSynchronizerState] - .map( - checkSequencerSynchronizerStateReplace( - effective, - _, - inStore.flatMap(_.select[TopologyChangeOp.Replace, SequencerSynchronizerState]), - pendingChangesLookup, - ) - ) - - case ( - Code.DecentralizedNamespaceDefinition, - None | Some(Code.DecentralizedNamespaceDefinition), - ) => - toValidate - .select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition] - .map( - checkDecentralizedNamespaceDefinitionReplace( - effective, - _, - inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]), - pendingChangesLookup, - ) - ) - - case ( - Code.NamespaceDelegation, - None | Some(Code.NamespaceDelegation), - ) => - toValidate - .select[TopologyChangeOp.Replace, NamespaceDelegation] - .map(checkNamespaceDelegationReplace(effective, _, pendingChangesLookup)) - - case (Code.SynchronizerParametersState, None | Some(Code.SynchronizerParametersState)) => - toValidate - .select[TopologyChangeOp.Remove, SynchronizerParametersState] - .map(_ => - EitherT.leftT[FutureUnlessShutdown, Unit]( - TopologyTransactionRejection - .CannotRemoveMapping(Code.SynchronizerParametersState): TopologyTransactionRejection - ) - ) - - case ( - Code.SynchronizerUpgradeAnnouncement, - None | Some(Code.SynchronizerUpgradeAnnouncement), - ) => - toValidate - .select[TopologyChangeOp.Replace, SynchronizerUpgradeAnnouncement] - .map(checkSynchronizerUpgradeAnnouncement(effective, _)) - - case _otherwise => None - } - - for { - _ <- checkFirstIsNotRemove - _ <- checkReplaceIsNotMaxSerial - _ <- checkRemoveDoesNotChangeMapping - _ <- checkNoOngoingSynchronizerUpgrade(effective, toValidate, pendingChangesLookup) - _ <- checkOpt.getOrElse(EitherTUtil.unitUS) - } yield () - - } - - private val mappingsAllowedDuringSynchronizerUpgrade = - TopologyMapping.Code.logicalSynchronizerUpgradeMappings - - /** Check that the topology state is not frozen if this store is a synchronizer store. All other - * stores are not subject to freezing the topology state. - */ - private def checkNoOngoingSynchronizerUpgrade( - effective: EffectiveTime, - toValidate: GenericSignedTopologyTransaction, - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - Monad[EitherT[FutureUnlessShutdown, TopologyTransactionRejection, *]].whenA( - store.storeId.isSynchronizerStore - )(for { - results <- loadFromStore( - effective, - Set(Code.SynchronizerUpgradeAnnouncement), - pendingChangesLookup, - ) - announcements = NonEmpty.from( - results.flatMap(_.selectMapping[SynchronizerUpgradeAnnouncement].toList) - ) - _ <- announcements match { - case None => EitherTUtil.unitUS[TopologyTransactionRejection] - case Some(announcement) => - EitherTUtil.condUnitET[FutureUnlessShutdown]( - mappingsAllowedDuringSynchronizerUpgrade.contains(toValidate.mapping.code), - TopologyTransactionRejection.OngoingSynchronizerUpgrade( - announcement.head1.mapping.successorSynchronizerId.logical - ): TopologyTransactionRejection, - ) - } - } yield {}) - - private def loadHistoryFromStore( - effectiveTime: EffectiveTime, - code: Code, - pendingChangesLookup: PendingChangesLookup, - maxSerialExclusive: PositiveInt, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Seq[ - GenericSignedTopologyTransaction - ]] = - EitherT.right[TopologyTransactionRejection]( - store - .inspect( - proposals = false, - // effective time has exclusive semantics, but TimeQuery.Range.until has always had inclusive semantics. - // therefore, we take the immediatePredecessor here - timeQuery = - TimeQuery.Range(from = None, until = Some(effectiveTime.value.immediatePredecessor)), - asOfExclusiveO = None, - op = None, - types = Seq(code), - idFilter = None, - namespaceFilter = None, - ) - .map { storedTxs => - val pending = pendingChangesLookup.values - .filter(pendingTx => - !pendingTx.isProposal && pendingTx.transaction.mapping.code == code - ) - val allTransactions = (storedTxs.result.map(_.transaction) ++ pending) - // only look at the >history< of the mapping (up to exclusive the max serial), because - // otherwise it would be looking also at the future, which could lead to the wrong conclusion - // (eg detecting a member as "rejoining". - allTransactions.filter(_.serial < maxSerialExclusive) - } - ) - - @VisibleForTesting - private[transaction] def loadFromStore( - effective: EffectiveTime, - codes: Set[Code], - pendingChangesLookup: PendingChangesLookup, - filterUid: Option[NonEmpty[Seq[UniqueIdentifier]]] = None, - filterNamespace: Option[NonEmpty[Seq[Namespace]]] = None, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Seq[ - SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping] - ]] = - EitherT - .right[TopologyTransactionRejection]( - store - .findPositiveTransactions( - effective.value, - asOfInclusive = false, - isProposal = false, - types = codes.toSeq, - filterUid = filterUid, - filterNamespace = filterNamespace, - ) - .map { storedTxs => - val latestStored = storedTxs.collectLatestByUniqueKey.signedTransactions - - // we need to proactively look up the pending changes that match the filter, - // because there might be a pending transaction that isn't in the store yet (eg. serial=1) - val pendingChangesMatchingFilter = - pendingChangesLookup.values.filter { tx => - // proposals shouldn't end up in PendingChangesLookup, but better to emulate what the store filter does - !tx.isProposal && - codes.contains(tx.mapping.code) && - filterNamespace.forall(_.exists(_ == tx.mapping.namespace)) && - filterUid.forall(uids => tx.mapping.maybeUid.exists(uids.contains(_))) - } - - TopologyTransactions - .collectLatestByUniqueKey( - Seq.empty[GenericSignedTopologyTransaction] ++ - latestStored ++ pendingChangesMatchingFilter - ) - .flatMap(_.selectOp[TopologyChangeOp.Replace]) - } - ) - - private def ensureParticipantDoesNotHostParties( - effective: EffectiveTime, - participantId: ParticipantId, - pendingChangesLookup: PendingChangesLookup, - )(implicit traceContext: TraceContext) = - for { - storedPartyToParticipantMappings <- loadFromStore( - effective, - Set(Code.PartyToParticipant), - pendingChangesLookup, - ) - participantHostsParties = storedPartyToParticipantMappings.view - .flatMap(_.selectMapping[PartyToParticipant]) - .collect { - case tx if tx.mapping.participants.exists(_.participantId == participantId) => - tx.mapping.partyId - } - .toSeq - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - participantHostsParties.isEmpty, - TopologyTransactionRejection.ParticipantStillHostsParties( - participantId, - participantHostsParties, - ), - ) - } yield () - - private def checkSynchronizerTrustCertificateRemove( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - /* Checks that the DTC is not being removed if the participant still hosts a party. - * This check is potentially quite expensive: we have to fetch all party to participant mappings, because - * we cannot index by the hosting participants. - */ - ensureParticipantDoesNotHostParties( - effective, - toValidate.mapping.participantId, - pendingChangesLookup, - ) - - private def loadSynchronizerParameters( - effective: EffectiveTime, - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, DynamicSynchronizerParameters] = - loadFromStore(effective, Set(Code.SynchronizerParametersState), pendingChangesLookup) - .subflatMap { synchronizerParamCandidates => - val params = synchronizerParamCandidates.view - .flatMap(_.selectMapping[SynchronizerParametersState]) - .map(_.mapping.parameters) - .toList - params match { - case Nil => - logger.error( - "Can not determine synchronizer parameters." - ) - Left(TopologyTransactionRejection.MissingSynchronizerParameters(effective)) - case param :: Nil => Right(param) - case param :: rest => - logger.error( - s"Multiple synchronizer parameters at $effective ${rest.size + 1}. Using first one: $param." - ) - Right(param) - } - } - - private def checkSynchronizerTrustCertificateReplace( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SynchronizerTrustCertificate], - inStore: Option[SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate]], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - // Checks if the participant is allowed to submit its synchronizer trust certificate - val participantId = toValidate.mapping.participantId - - def loadOnboardingRestriction() - : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, OnboardingRestriction] = - loadSynchronizerParameters(effective, pendingChangesLookup).map(_.onboardingRestriction) - - def checkSynchronizerIsNotLocked(restriction: OnboardingRestriction) = - EitherTUtil.condUnitET[FutureUnlessShutdown]( - restriction.isOpen, { - logger.info( - s"Synchronizer is locked at $effective. Rejecting onboarding of new participant ${toValidate.mapping}" - ) - TopologyTransactionRejection - .OnboardingRestrictionInPlace( - participantId, - restriction, - None, - ) - }, - ) - - def checkParticipantIsNotRestricted( - restrictions: OnboardingRestriction - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - // using the flags to check for restrictions instead of == UnrestrictedOpen to be more - // future proof in case we will add additional restrictions in the future and would miss a case, - // because there is no exhaustiveness check without full pattern matching - if (restrictions.isUnrestricted && restrictions.isOpen) { - // No further checks to be done. any participant can join the synchronizer - EitherTUtil.unitUS - } else if (restrictions.isRestricted && restrictions.isOpen) { - // Only participants with explicit permission may join the synchronizer - loadFromStore( - effective, - Set(Code.ParticipantSynchronizerPermission), - pendingChangesLookup, - filterUid = Some(NonEmpty(Seq, toValidate.mapping.participantId.uid)), - ).subflatMap { storedPermissions => - val isAllowlisted = storedPermissions.view - .flatMap(_.selectMapping[ParticipantSynchronizerPermission]) - .collectFirst { - case x if x.mapping.synchronizerId == toValidate.mapping.synchronizerId => - x.mapping.loginAfter - } - isAllowlisted match { - case Some(Some(loginAfter)) if loginAfter > effective.value => - // this should not happen except under race conditions, as sequencers should not let participants login - logger.warn( - s"Rejecting onboarding of ${toValidate.mapping.participantId} as the participant still has a login ban until $loginAfter" - ) - Left( - TopologyTransactionRejection - .OnboardingRestrictionInPlace(participantId, restrictions, Some(loginAfter)) - ) - case Some(_) => - logger.info( - s"Accepting onboarding of ${toValidate.mapping.participantId} as it is allow listed" - ) - Either.unit - case None => - logger.info( - s"Rejecting onboarding of ${toValidate.mapping.participantId} as it is not allow listed as of ${effective.value}" - ) - Left( - TopologyTransactionRejection - .OnboardingRestrictionInPlace(participantId, restrictions, None) - ) - } - } - } else { - EitherT.leftT( - TopologyTransactionRejection - .OnboardingRestrictionInPlace(participantId, restrictions, None) - ) - } - - def checkPartyIdDoesntExist() = for { - ptps <- loadFromStore( - effective, - Set(Code.PartyToParticipant), - pendingChangesLookup, - filterUid = Some(NonEmpty(Seq, participantId.uid)), - ) - conflictingPartyIdO = ptps - .flatMap(_.selectMapping[PartyToParticipant]) - .headOption - .map(_.mapping) - _ <- conflictingPartyIdO match { - case Some(ptp) => - isExplicitAdminPartyAllocation( - ptp, - TopologyTransactionRejection.ParticipantIdConflictWithPartyId( - participantId, - ptp.partyId, - ), - ) - case None => EitherTUtil.unitUS[TopologyTransactionRejection] - } - } yield () - - def checkParticipantDoesNotRejoin() = EitherTUtil.condUnitET[FutureUnlessShutdown]( - inStore.forall(_.operation != TopologyChangeOp.Remove), - TopologyTransactionRejection.MembersCannotRejoinSynchronizer( - Seq(toValidate.mapping.participantId) - ), - ) - - for { - _ <- checkParticipantDoesNotRejoin() - _ <- checkPartyIdDoesntExist() - restriction <- loadOnboardingRestriction() - _ <- checkSynchronizerIsNotLocked(restriction) - _ <- checkParticipantIsNotRestricted(restriction) - } yield () - } - private val requiredKeyPurposes = Set(KeyPurpose.Encryption, KeyPurpose.Signing) - - /** Checks the following: - * - threshold is less than or equal to the number of confirming participants - * - new participants have a valid DTC - * - new participants have an OTK with at least 1 signing key and 1 encryption key - */ - private def checkPartyToParticipant( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], - inStore: Option[SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant]], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - import toValidate.mapping - def checkParticipants() = { - val newParticipants = mapping.participants.map(_.participantId).toSet -- - inStore.toList.flatMap(_.mapping.participants.map(_.participantId)) - for { - participantTransactions <- loadFromStore( - effective, - Set(Code.SynchronizerTrustCertificate, Code.OwnerToKeyMapping), - pendingChangesLookup, - filterUid = Some(NonEmpty(Seq, mapping.partyId.uid) ++ newParticipants.toSeq.map(_.uid)), - ) - - // if we found a DTC with the same uid as the partyId, - // check that the PTP is an explicit admin party allocation, otherwise reject the PTP - foundAdminPartyWithSameUID = participantTransactions - .flatMap(_.selectMapping[SynchronizerTrustCertificate]) - .exists(_.mapping.participantId.uid == mapping.partyId.uid) - _ <- EitherTUtil.ifThenET(foundAdminPartyWithSameUID)( - isExplicitAdminPartyAllocation( - mapping, - TopologyTransactionRejection.PartyIdConflictWithAdminParty( - mapping.partyId - ), - ) - ) - - // check that all participants are known on the synchronizer - missingParticipantCertificates = newParticipants -- participantTransactions - .flatMap(_.selectMapping[SynchronizerTrustCertificate]) - .map(_.mapping.participantId) - - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - missingParticipantCertificates.isEmpty, - TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq), - ) - - // check that all known participants have keys registered - participantsWithInsufficientKeys = - newParticipants -- participantTransactions - .flatMap(_.selectMapping[OwnerToKeyMapping]) - .view - .filter { tx => - val keyPurposes = tx.mapping.keys.map(_.purpose).toSet - requiredKeyPurposes.forall(keyPurposes) - } - .map(_.mapping.member) - .collect { case pid: ParticipantId => pid } - .toSeq - - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - participantsWithInsufficientKeys.isEmpty, - TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq), - ) - } yield { - () - } - } - - for { - _ <- checkParticipants() - } yield () - - } - - private def checkOwnerToKeyMappingReplace( - toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, OwnerToKeyMapping] - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - // check for at least 1 signing and 1 encryption key - val keysByPurpose = toValidate.mapping.keys.forgetNE.groupBy(_.purpose) - val signingKeys = keysByPurpose.getOrElse(KeyPurpose.Signing, Seq.empty) - - val minimumSigningKeyRequirement = - EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - // all nodes require signing keys - signingKeys.nonEmpty, - TopologyTransactionRejection.InvalidTopologyMapping( - "OwnerToKeyMapping must contain at least 1 signing key." - ), - ) - - val encryptionKeys = keysByPurpose.getOrElse(KeyPurpose.Encryption, Seq.empty) - val isParticipant = toValidate.mapping.member.code == ParticipantId.Code - - val minimumEncryptionKeyRequirement = - EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - // all nodes require signing keys - // non-participants don't need encryption keys - !isParticipant || encryptionKeys.nonEmpty, - TopologyTransactionRejection.InvalidTopologyMapping( - "OwnerToKeyMapping for participants must contain at least 1 encryption key." - ), - ) - minimumSigningKeyRequirement.flatMap(_ => minimumEncryptionKeyRequirement) - } - - private def checkOwnerToKeyMappingRemove( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp.Remove, OwnerToKeyMapping], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - toValidate.mapping.member match { - case participantId: ParticipantId => - ensureParticipantDoesNotHostParties(effective, participantId, pendingChangesLookup) - case _ => EitherTUtil.unitUS - } - - private def checkMediatorSynchronizerStateReplace( - effectiveTime: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState], - inStore: Option[ - SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState] - ], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap( - _.mapping.allMediatorsInGroup - )).map(identity[Member]) - - def checkMediatorNotAlreadyAssignedToOtherGroup() = - for { - result <- loadFromStore( - effectiveTime, - Set(Code.MediatorSynchronizerState), - pendingChangesLookup, - ) - mediatorsAlreadyAssignedToGroups = result - .flatMap(_.selectMapping[MediatorSynchronizerState]) - // only look at other groups to avoid a race between validating this proposal and - // having persisted the same transaction as fully authorized from other synchronizer owners. - .filter(_.mapping.group != toValidate.mapping.group) - .flatMap(tx => - tx.mapping.allMediatorsInGroup.collect { - case med if newMediators.contains(med) => med -> tx.mapping.group - } - ) - .toMap - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( - mediatorsAlreadyAssignedToGroups.isEmpty, - TopologyTransactionRejection.MediatorsAlreadyInOtherGroups( - toValidate.mapping.group, - mediatorsAlreadyAssignedToGroups, - ): TopologyTransactionRejection, - ) - } yield () - - def checkMediatorsDontRejoin() - : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - loadHistoryFromStore( - effectiveTime, - code = Code.MediatorSynchronizerState, - pendingChangesLookup, - toValidate.serial, - ) - .flatMap { mdsHistory => - val allMediatorsPreviouslyOnSynchronizer = mdsHistory.view - .flatMap(_.selectMapping[MediatorSynchronizerState]) - .flatMap(_.mapping.allMediatorsInGroup) - .toSet[Member] - val rejoiningMediators = newMediators.intersect(allMediatorsPreviouslyOnSynchronizer) - EitherTUtil.condUnitET( - rejoiningMediators.isEmpty, - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(rejoiningMediators.toSeq), - ) - } - - for { - _ <- checkMediatorNotAlreadyAssignedToOtherGroup() - _ <- checkMediatorsDontRejoin() - } yield () - } - - private def checkSequencerSynchronizerStateReplace( - effectiveTime: EffectiveTime, - toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState], - inStore: Option[ - SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] - ], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - val newSequencers = (toValidate.mapping.allSequencers.toSet -- inStore.toList.flatMap( - _.mapping.allSequencers - )).map(identity[Member]) - - def checkSequencersDontRejoin() - : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - loadHistoryFromStore( - effectiveTime, - code = Code.SequencerSynchronizerState, - pendingChangesLookup, - toValidate.serial, - ) - .flatMap { sdsHistory => - val allSequencersPreviouslyOnSynchronizer = sdsHistory.view - .flatMap(_.selectMapping[SequencerSynchronizerState]) - .flatMap(_.mapping.allSequencers) - .toSet[Member] - val rejoiningSequencers = newSequencers.intersect(allSequencersPreviouslyOnSynchronizer) - EitherTUtil.condUnitET( - rejoiningSequencers.isEmpty, - TopologyTransactionRejection - .MembersCannotRejoinSynchronizer(rejoiningSequencers.toSeq), - ) - } - - checkSequencersDontRejoin() - } - - private def checkDecentralizedNamespaceDefinitionReplace( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[ - TopologyChangeOp.Replace, - DecentralizedNamespaceDefinition, - ], - inStore: Option[SignedTopologyTransaction[ - TopologyChangeOp, - DecentralizedNamespaceDefinition, - ]], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - - def checkDecentralizedNamespaceDerivedFromOwners() - : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - if (inStore.isEmpty) { - // The very first decentralized namespace definition must have namespace computed from the owners - EitherTUtil.condUnitET( - toValidate.mapping.namespace == DecentralizedNamespaceDefinition - .computeNamespace(toValidate.mapping.owners), - InvalidTopologyMapping( - s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}" - ), - ) - } else { - EitherTUtil.unitUS - } - - def checkNoClashWithNamespaceDelegations()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - loadFromStore( - effective, - Set(Code.NamespaceDelegation), - pendingChangesLookup, - filterUid = None, - filterNamespace = Some(NonEmpty(Seq, toValidate.mapping.namespace)), - ).flatMap { namespaceDelegations => - EitherTUtil.condUnitET( - namespaceDelegations.isEmpty, - NamespaceAlreadyInUse(toValidate.mapping.namespace), - ) - } - - def checkOwnersAreNormalNamespaces()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - loadFromStore( - effective, - Set(Code.NamespaceDelegation), - pendingChangesLookup, - filterUid = None, - filterNamespace = Some(toValidate.mapping.owners.toSeq), - ).flatMap { namespaceDelegations => - val foundNSDs = namespaceDelegations - .filter(NamespaceDelegation.isRootCertificate) - .map(_.mapping.namespace) - .toSet - val missingNSDs = toValidate.mapping.owners -- foundNSDs - - EitherTUtil.condUnitET( - missingNSDs.isEmpty, - InvalidTopologyMapping( - s"No root certificate found for ${missingNSDs.toSeq.sorted.mkString(", ")}" - ), - ) - } - - for { - _ <- checkDecentralizedNamespaceDerivedFromOwners() - _ <- checkNoClashWithNamespaceDelegations() - _ <- checkOwnersAreNormalNamespaces() - } yield () - } - - private def checkNamespaceDelegationReplace( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[ - TopologyChangeOp.Replace, - NamespaceDelegation, - ], - pendingChangesLookup: PendingChangesLookup, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - def checkNoClashWithDecentralizedNamespaces()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = - loadFromStore( - effective, - Set(Code.DecentralizedNamespaceDefinition), - pendingChangesLookup, - filterUid = None, - filterNamespace = Some(NonEmpty(Seq, toValidate.mapping.namespace)), - ).flatMap { dns => - val foundDecentralizedNamespaceWithSameNamespace = dns.nonEmpty - EitherTUtil.condUnitET( - !foundDecentralizedNamespaceWithSameNamespace, - NamespaceAlreadyInUse(toValidate.mapping.namespace), - ) - } - - checkNoClashWithDecentralizedNamespaces() - } - - private def checkSynchronizerUpgradeAnnouncement( - effective: EffectiveTime, - toValidate: SignedTopologyTransaction[ - TopologyChangeOp.Replace, - SynchronizerUpgradeAnnouncement, - ], - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = for { - _ <- store.storeId.forSynchronizer match { - case Some(psid) => - EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - psid < toValidate.mapping.successorSynchronizerId, - TopologyTransactionRejection.InvalidSynchronizerSuccessor( - psid, - toValidate.mapping.successorSynchronizerId, - ), - ) - case None => EitherTUtil.unitUS - } - _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( - toValidate.mapping.upgradeTime > effective.value, - TopologyTransactionRejection.InvalidUpgradeTime( - toValidate.mapping.successorSynchronizerId.logical, - effective = effective, - upgradeTime = toValidate.mapping.upgradeTime, - ), - ) - - } yield () - - /** Checks whether the given PTP is considered an explicit admin party allocation. This is true if - * all following conditions are met: - * - threshold == 1 - * - there is only a single hosting participant - * - with Submission permission - * - participantId.adminParty == partyId - */ - private def isExplicitAdminPartyAllocation( - ptp: PartyToParticipant, - rejection: => TopologyTransactionRejection, - ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { - // check that the PTP doesn't try to allocate a party that is the same as an already existing admin party. - // we allow an explicit allocation of an admin like party though on the same participant - val singleHostingParticipant = - ptp.participants.sizeCompare(1) == 0 - - val partyIsAdminParty = - ptp.participants.forall(participant => - participant.participantId.adminParty == ptp.partyId && - participant.permission == ParticipantPermission.Submission - ) - - // technically we don't need to check for threshold == 1, because we already require that there is only a single participant - // and the threshold may not exceed the number of participants. this is checked in PartyToParticipant.create - val threshold1 = ptp.threshold == PositiveInt.one - - EitherTUtil.condUnitET[FutureUnlessShutdown]( - singleHostingParticipant && partyIsAdminParty && threshold1, - rejection, - ) - } - -} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala index 8daea8d259..e09604cbbf 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala @@ -185,6 +185,7 @@ object TopologyTransaction override val name: String = "TopologyTransaction" type GenericTopologyTransaction = TopologyTransaction[TopologyChangeOp, TopologyMapping] + type PositiveTopologyTransaction = TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping] val versioningTable: VersioningTable = VersioningTable( diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/OptionalTopologyMappingChecks.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/OptionalTopologyMappingChecks.scala new file mode 100644 index 0000000000..76d65d2ffb --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/OptionalTopologyMappingChecks.scala @@ -0,0 +1,276 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction.checks + +import cats.data.EitherT +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.topology.processing.EffectiveTime +import com.digitalasset.canton.topology.store.{ + TimeQuery, + TopologyStore, + TopologyStoreId, + TopologyTransactionRejection, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyMapping.Code +import com.digitalasset.canton.topology.transaction.checks.TopologyMappingChecks.PendingChangesLookup +import com.digitalasset.canton.topology.transaction.{ + MediatorSynchronizerState, + OwnerToKeyMapping, + PartyToParticipant, + SequencerSynchronizerState, + SignedTopologyTransaction, + SynchronizerTrustCertificate, + TopologyChangeOp, +} +import com.digitalasset.canton.topology.{Member, ParticipantId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil + +import scala.concurrent.ExecutionContext + +/** Topology mapping checks preventing a user to accidentally break their node + * + * The following checks run as part of the topology manager write step and are there to avoid + * breaking the topology state by accident. + */ +class OptionalTopologyMappingChecks( + store: TopologyStore[TopologyStoreId], + loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext +) extends TopologyMappingChecksWithStore(store, loggerFactory) { + + private def loadHistoryFromStore( + effectiveTime: EffectiveTime, + code: Code, + pendingChangesLookup: PendingChangesLookup, + maxSerialExclusive: PositiveInt, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Seq[ + GenericSignedTopologyTransaction + ]] = + EitherT.right[TopologyTransactionRejection]( + store + .inspect( + proposals = false, + // effective time has exclusive semantics, but TimeQuery.Range.until has always had inclusive semantics. + // therefore, we take the immediatePredecessor here + timeQuery = + TimeQuery.Range(from = None, until = Some(effectiveTime.value.immediatePredecessor)), + asOfExclusiveO = None, + op = None, + types = Seq(code), + idFilter = None, + namespaceFilter = None, + ) + .map { storedTxs => + val pending = pendingChangesLookup.values + .filter(pendingTx => + !pendingTx.currentTx.isProposal && pendingTx.currentTx.transaction.mapping.code == code + ) + .map(_.currentTx) + val allTransactions = (storedTxs.result.map(_.transaction) ++ pending) + // only look at the >history< of the mapping (up to exclusive the max serial), because + // otherwise it would be looking also at the future, which could lead to the wrong conclusion + // (eg detecting a member as "rejoining". + allTransactions.filter(_.serial < maxSerialExclusive) + } + ) + + def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + lazy val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match { + case (Code.OwnerToKeyMapping, None | Some(Code.OwnerToKeyMapping)) => + val checkRemove = toValidate + .select[TopologyChangeOp.Remove, OwnerToKeyMapping] + .map( + checkOwnerToKeyMappingRemove( + effective, + _, + pendingChangesLookup, + ) + ) + checkRemove + case (Code.SynchronizerTrustCertificate, None | Some(Code.SynchronizerTrustCertificate)) => + val checkRemove = toValidate + .select[TopologyChangeOp.Remove, SynchronizerTrustCertificate] + .map(checkSynchronizerTrustCertificateRemove(effective, _, pendingChangesLookup)) + + checkRemove + + case (Code.MediatorSynchronizerState, None | Some(Code.MediatorSynchronizerState)) => + toValidate + .select[TopologyChangeOp.Replace, MediatorSynchronizerState] + .map( + checkMediatorSynchronizerStateReplace( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp.Replace, MediatorSynchronizerState]), + pendingChangesLookup, + ) + ) + case (Code.SequencerSynchronizerState, None | Some(Code.SequencerSynchronizerState)) => + toValidate + .select[TopologyChangeOp.Replace, SequencerSynchronizerState] + .map( + checkSequencerSynchronizerStateReplace( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp.Replace, SequencerSynchronizerState]), + pendingChangesLookup, + ) + ) + case _otherwise => None + } + checkOpt.getOrElse(EitherTUtil.unitUS) + } + + private def ensureParticipantDoesNotHostParties( + effective: EffectiveTime, + participantId: ParticipantId, + pendingChangesLookup: PendingChangesLookup, + )(implicit traceContext: TraceContext) = + for { + storedPartyToParticipantMappings <- loadFromStore( + effective, + Set(Code.PartyToParticipant), + pendingChangesLookup.values, + ) + participantHostsParties = storedPartyToParticipantMappings.view + .flatMap(_.selectMapping[PartyToParticipant]) + .collect { + case tx if tx.mapping.participants.exists(_.participantId == participantId) => + tx.mapping.partyId + } + .toSeq + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + participantHostsParties.isEmpty, + TopologyTransactionRejection.OptionalMapping.ParticipantStillHostsParties( + participantId, + participantHostsParties, + ), + ) + } yield () + + private def checkOwnerToKeyMappingRemove( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Remove, OwnerToKeyMapping], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + toValidate.mapping.member match { + case participantId: ParticipantId => + // this means that we can also remove some checks from the party onboarding + ensureParticipantDoesNotHostParties(effective, participantId, pendingChangesLookup) + case _ => EitherTUtil.unitUS + } + + private def checkSynchronizerTrustCertificateRemove( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + /* Checks that the DTC is not being removed if the participant still hosts a party. + * This check is potentially quite expensive: we have to fetch all party to participant mappings, because + * we cannot index by the hosting participants. + */ + ensureParticipantDoesNotHostParties( + effective, + toValidate.mapping.participantId, + pendingChangesLookup, + ) + + private def checkMediatorSynchronizerStateReplace( + effectiveTime: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState], + inStore: Option[ + SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState] + ], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + + val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap( + _.mapping.allMediatorsInGroup + )).map(identity[Member]) + + def checkMediatorsDontRejoin() + : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + loadHistoryFromStore( + effectiveTime, + code = Code.MediatorSynchronizerState, + pendingChangesLookup, + toValidate.serial, + ) + .flatMap { mdsHistory => + val allMediatorsPreviouslyOnSynchronizer = mdsHistory.view + .flatMap(_.selectMapping[MediatorSynchronizerState]) + .flatMap(_.mapping.allMediatorsInGroup) + .toSet[Member] + val rejoiningMediators = newMediators.intersect(allMediatorsPreviouslyOnSynchronizer) + EitherTUtil.condUnitET( + rejoiningMediators.isEmpty, + TopologyTransactionRejection.OptionalMapping.MembersCannotRejoinSynchronizer( + rejoiningMediators.toSeq + ), + ) + } + + checkMediatorsDontRejoin() + + } + + private def checkSequencerSynchronizerStateReplace( + effectiveTime: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState], + inStore: Option[ + SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] + ], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + val newSequencers = (toValidate.mapping.allSequencers.toSet -- inStore.toList.flatMap( + _.mapping.allSequencers + )).map(identity[Member]) + + def checkSequencersDontRejoin() + : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + loadHistoryFromStore( + effectiveTime, + code = Code.SequencerSynchronizerState, + pendingChangesLookup, + toValidate.serial, + ) + .flatMap { sdsHistory => + val allSequencersPreviouslyOnSynchronizer = sdsHistory.view + .flatMap(_.selectMapping[SequencerSynchronizerState]) + .flatMap(_.mapping.allSequencers) + .toSet[Member] + val rejoiningSequencers = newSequencers.intersect(allSequencersPreviouslyOnSynchronizer) + EitherTUtil.condUnitET( + rejoiningSequencers.isEmpty, + TopologyTransactionRejection.OptionalMapping + .MembersCannotRejoinSynchronizer(rejoiningSequencers.toSeq), + ) + } + + checkSequencersDontRejoin() + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/TopologyMappingChecks.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/TopologyMappingChecks.scala new file mode 100644 index 0000000000..739d32f9e0 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/checks/TopologyMappingChecks.scala @@ -0,0 +1,932 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction.checks + +import cats.Monad +import cats.data.EitherT +import cats.instances.order.* +import cats.syntax.either.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.KeyPurpose +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, OnboardingRestriction} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.TopologyStateProcessor.MaybePending +import com.digitalasset.canton.topology.processing.EffectiveTime +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.RequiredMapping as RequiredMappingRejection +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyMapping.{Code, MappingHash} +import com.digitalasset.canton.topology.transaction.checks.TopologyMappingChecks.PendingChangesLookup +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.ExecutionContext +import scala.math.Ordering.Implicits.* + +object TopologyMappingChecks { + type PendingChangesLookup = scala.collection.Map[MappingHash, MaybePending] + + class All(fst: TopologyMappingChecks, rest: TopologyMappingChecks*)(implicit + executionContext: ExecutionContext + ) extends TopologyMappingChecks { + private val all = (fst +: rest).toList + override def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], + pendingChanges: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + MonadUtil.sequentialTraverse_(all)( + _.checkTransaction(effective, toValidate, inStore, pendingChanges) + ) + } + +} + +trait TopologyMappingChecks { + def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], + pendingChanges: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] +} + +object NoopTopologyMappingChecks extends TopologyMappingChecks { + override def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], + pendingChanges: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + EitherTUtil.unitUS +} + +abstract class TopologyMappingChecksWithStore( + store: TopologyStore[TopologyStoreId], + val loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext +) extends TopologyMappingChecks + with NamedLogging { + + @VisibleForTesting + private[transaction] def loadFromStore( + effective: EffectiveTime, + codes: Set[Code], + pendingChanges: Iterable[MaybePending], + filterUid: Option[NonEmpty[Seq[UniqueIdentifier]]] = None, + filterNamespace: Option[NonEmpty[Seq[Namespace]]] = None, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Seq[ + SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping] + ]] = + EitherT + .right[TopologyTransactionRejection]( + store + .findPositiveTransactions( + effective.value, + asOfInclusive = false, + isProposal = false, + types = codes.toSeq, + filterUid = filterUid, + filterNamespace = filterNamespace, + ) + .map { storedTxs => + val latestStored = storedTxs.collectLatestByUniqueKey.signedTransactions + + // we need to proactively look up the pending changes that match the filter, + // because there might be a pending transaction that isn't in the store yet (eg. serial=1) + val pendingChangesMatchingFilter = + pendingChanges.view + .filter { maybePending => + val tx = maybePending.currentTx + // proposals shouldn't end up in PendingChangesLookup, but better to emulate what the store filter does + !tx.isProposal && + codes.contains(tx.mapping.code) && + filterNamespace.forall(_.exists(_ == tx.mapping.namespace)) && + filterUid.forall(uids => tx.mapping.maybeUid.exists(uids.contains(_))) + } + .map(_.currentTx) + .toSeq + + TopologyTransactions + .collectLatestByUniqueKey(latestStored ++ pendingChangesMatchingFilter) + .flatMap(_.selectOp[TopologyChangeOp.Replace]) + } + ) + +} + +/** Topology mapping checks which verify invariants on the topology state + * + * The following checks must be passed by every transaction which is added to the topology state. + * + * @param relaxSynchronizerStateChecks + * if true (during initial snapshot validation), we assume that the invariant holds that SSS and + * MSS only reference members with valid keys. + */ +class RequiredTopologyMappingChecks( + store: TopologyStore[TopologyStoreId], + loggerFactory: NamedLoggerFactory, + relaxSynchronizerStateChecks: Boolean = false, +)(implicit + executionContext: ExecutionContext +) extends TopologyMappingChecksWithStore(store, loggerFactory) { + + def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + val checkFirstIsNotRemove = EitherTUtil + .condUnitET[FutureUnlessShutdown]( + !(toValidate.operation == TopologyChangeOp.Remove && inStore.isEmpty), + RequiredMappingRejection.NoCorrespondingActiveTxToRevoke(toValidate.mapping), + ) + val checkReplaceIsNotMaxSerial = EitherTUtil.condUnitET[FutureUnlessShutdown]( + toValidate.operation == TopologyChangeOp.Remove || + (toValidate.operation == TopologyChangeOp.Replace && toValidate.serial < PositiveInt.MaxValue), + RequiredMappingRejection.InvalidTopologyMapping( + s"The serial for a REPLACE must be less than ${PositiveInt.MaxValue}." + ), + ) + + def mappingMismatch(expected: TopologyMapping): Boolean = (toValidate.mapping, expected) match { + // When removing the synchronizer trust certificate, no need to mandate that the removal mapping has the same + // feature flags.. + case ( + removeCertificate: SynchronizerTrustCertificate, + inStoreCertificate: SynchronizerTrustCertificate, + ) => + removeCertificate.uniqueKey != inStoreCertificate.uniqueKey + case _ => + toValidate.mapping != expected + } + + val checkRemoveDoesNotChangeMapping = EitherT.fromEither[FutureUnlessShutdown]( + inStore + .collect { + case expected + if toValidate.operation == TopologyChangeOp.Remove && mappingMismatch( + expected.mapping + ) => + RequiredMappingRejection + .RemoveMustNotChangeMapping(toValidate.mapping, expected.mapping) + } + .toLeft(()) + ) + + lazy val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match { + case (Code.SynchronizerTrustCertificate, None | Some(Code.SynchronizerTrustCertificate)) => + val checkReplace = toValidate + .select[TopologyChangeOp.Replace, SynchronizerTrustCertificate] + .map( + checkSynchronizerTrustCertificateReplace( + effective, + _, + inStore.flatMap(_.selectMapping[SynchronizerTrustCertificate]), + pendingChangesLookup, + ) + ) + + checkReplace + + case (Code.PartyToParticipant, None | Some(Code.PartyToParticipant)) => + toValidate + .select[TopologyChangeOp.Replace, PartyToParticipant] + .map( + checkPartyToParticipant( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp.Replace, PartyToParticipant]), + pendingChangesLookup, + ) + ) + + case (Code.OwnerToKeyMapping, None | Some(Code.OwnerToKeyMapping)) => + // TODO(#28232) check that remove doesn't happen on keys that are in use + val checkReplace = toValidate + .select[TopologyChangeOp.Replace, OwnerToKeyMapping] + .map( + checkOwnerToKeyMappingReplace(_, inStore.flatMap(_.selectMapping[OwnerToKeyMapping])) + ) + + checkReplace + + case (Code.MediatorSynchronizerState, None | Some(Code.MediatorSynchronizerState)) => + toValidate + .select[TopologyChangeOp.Replace, MediatorSynchronizerState] + .map( + checkMediatorSynchronizerStateReplace( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp.Replace, MediatorSynchronizerState]), + pendingChangesLookup, + ) + ) + case (Code.SequencerSynchronizerState, None | Some(Code.SequencerSynchronizerState)) => + toValidate + .select[TopologyChangeOp.Replace, SequencerSynchronizerState] + .map( + checkSequencerSynchronizerStateReplace( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp.Replace, SequencerSynchronizerState]), + pendingChangesLookup, + ) + ) + + case ( + Code.DecentralizedNamespaceDefinition, + None | Some(Code.DecentralizedNamespaceDefinition), + ) => + toValidate + .select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition] + .map( + checkDecentralizedNamespaceDefinitionReplace( + effective, + _, + inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]), + pendingChangesLookup, + ) + ) + + case ( + Code.NamespaceDelegation, + None | Some(Code.NamespaceDelegation), + ) => + toValidate + .select[TopologyChangeOp.Replace, NamespaceDelegation] + .map(checkNamespaceDelegationReplace(effective, _, pendingChangesLookup)) + + case (Code.SynchronizerParametersState, None | Some(Code.SynchronizerParametersState)) => + toValidate + .select[TopologyChangeOp.Remove, SynchronizerParametersState] + .map(_ => + EitherT.leftT[FutureUnlessShutdown, Unit]( + RequiredMappingRejection + .CannotRemoveMapping(Code.SynchronizerParametersState): TopologyTransactionRejection + ) + ) + + case ( + Code.SynchronizerUpgradeAnnouncement, + None | Some(Code.SynchronizerUpgradeAnnouncement), + ) => + toValidate + .select[TopologyChangeOp.Replace, SynchronizerUpgradeAnnouncement] + .map(checkSynchronizerUpgradeAnnouncement(effective, _)) + + case _otherwise => None + } + + for { + _ <- checkFirstIsNotRemove + _ <- checkReplaceIsNotMaxSerial + _ <- checkRemoveDoesNotChangeMapping + _ <- checkNoOngoingSynchronizerUpgrade(effective, toValidate, pendingChangesLookup) + _ <- checkOpt.getOrElse(EitherTUtil.unitUS) + } yield () + + } + + private val mappingsAllowedDuringSynchronizerUpgrade = + TopologyMapping.Code.logicalSynchronizerUpgradeMappings + + /** Check that the topology state is not frozen if this store is a synchronizer store. All other + * stores are not subject to freezing the topology state. + */ + private def checkNoOngoingSynchronizerUpgrade( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransaction, + pendingChanges: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + val pendingSynchronizerAnnouncements = store.storeId.forSynchronizer.flatMap { synchronizerId => + pendingChanges.get(SynchronizerUpgradeAnnouncement.uniqueKey(synchronizerId.logical)) + } + + Monad[EitherT[FutureUnlessShutdown, TopologyTransactionRejection, *]].whenA( + store.storeId.isSynchronizerStore + )(for { + results <- loadFromStore( + effective, + Set(Code.SynchronizerUpgradeAnnouncement), + pendingSynchronizerAnnouncements.toList, + ) + announcements = NonEmpty.from( + results.flatMap(_.selectMapping[SynchronizerUpgradeAnnouncement].toList) + ) + _ <- announcements match { + case None => EitherTUtil.unitUS[TopologyTransactionRejection] + case Some(announcement) => + EitherTUtil.condUnitET[FutureUnlessShutdown]( + mappingsAllowedDuringSynchronizerUpgrade.contains(toValidate.mapping.code), + RequiredMappingRejection.OngoingSynchronizerUpgrade( + announcement.head1.mapping.successorSynchronizerId.logical + ): TopologyTransactionRejection, + ) + } + } yield {}) + } + + private def loadSynchronizerParameters( + effective: EffectiveTime, + synchronizerId: SynchronizerId, + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, DynamicSynchronizerParameters] = + loadFromStore( + effective, + Set(Code.SynchronizerParametersState), + pendingChangesLookup.get(SynchronizerParametersState.uniqueKey(synchronizerId)).toList, + ) + .subflatMap { synchronizerParamCandidates => + val params = synchronizerParamCandidates.view + .flatMap(_.selectMapping[SynchronizerParametersState]) + .map(_.mapping.parameters) + .toList + params match { + case Nil => + logger.error( + "Can not determine synchronizer parameters." + ) + Left(RequiredMappingRejection.MissingSynchronizerParameters(effective)) + case param :: Nil => Right(param) + case param :: rest => + logger.error( + s"Multiple synchronizer parameters at $effective ${rest.size + 1}. Using first one: $param." + ) + Right(param) + } + } + + private def checkSynchronizerTrustCertificateReplace( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SynchronizerTrustCertificate], + inStore: Option[SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate]], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + // Checks if the participant is allowed to submit its synchronizer trust certificate + val participantId = toValidate.mapping.participantId + + def loadOnboardingRestriction() + : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, OnboardingRestriction] = + loadSynchronizerParameters(effective, toValidate.mapping.synchronizerId, pendingChangesLookup) + .map(_.onboardingRestriction) + + def checkSynchronizerIsNotLocked(restriction: OnboardingRestriction) = + EitherTUtil.condUnitET[FutureUnlessShutdown]( + restriction.isOpen, { + logger.info( + s"Synchronizer is locked at $effective. Rejecting onboarding of new participant ${toValidate.mapping}" + ) + RequiredMappingRejection.OnboardingRestrictionInPlace( + participantId, + restriction, + None, + ) + }, + ) + + def checkParticipantIsNotRestricted( + restrictions: OnboardingRestriction + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + // using the flags to check for restrictions instead of == UnrestrictedOpen to be more + // future proof in case we will add additional restrictions in the future and would miss a case, + // because there is no exhaustiveness check without full pattern matching + if (restrictions.isUnrestricted && restrictions.isOpen) { + // No further checks to be done. any participant can join the synchronizer + EitherTUtil.unitUS + } else if (restrictions.isRestricted && restrictions.isOpen) { + // Only participants with explicit permission may join the synchronizer + loadFromStore( + effective, + Set(Code.ParticipantSynchronizerPermission), + pendingChangesLookup + .get( + ParticipantSynchronizerPermission.uniqueKey( + toValidate.mapping.synchronizerId, + toValidate.mapping.participantId, + ) + ) + .toList, + filterUid = Some(NonEmpty(Seq, toValidate.mapping.participantId.uid)), + ).subflatMap { storedPermissions => + val isAllowlisted = storedPermissions.view + .flatMap(_.selectMapping[ParticipantSynchronizerPermission]) + .collectFirst { + case x if x.mapping.synchronizerId == toValidate.mapping.synchronizerId => + x.mapping.loginAfter + } + isAllowlisted match { + case Some(Some(loginAfter)) if loginAfter > effective.value => + // this should not happen except under race conditions, as sequencers should not let participants login + logger.warn( + s"Rejecting onboarding of ${toValidate.mapping.participantId} as the participant still has a login ban until $loginAfter" + ) + Left( + RequiredMappingRejection + .OnboardingRestrictionInPlace(participantId, restrictions, Some(loginAfter)) + ) + case Some(_) => + logger.info( + s"Accepting onboarding of ${toValidate.mapping.participantId} as it is allow listed" + ) + Either.unit + case None => + logger.info( + s"Rejecting onboarding of ${toValidate.mapping.participantId} as it is not allow listed as of ${effective.value}" + ) + Left( + RequiredMappingRejection + .OnboardingRestrictionInPlace(participantId, restrictions, None) + ) + } + } + } else { + EitherT.leftT( + RequiredMappingRejection + .OnboardingRestrictionInPlace(participantId, restrictions, None) + ) + } + + def checkPartyIdDoesntExist() = for { + ptps <- loadFromStore( + effective, + Set(Code.PartyToParticipant), + pendingChangesLookup.get(PartyToParticipant.uniqueKey(participantId.adminParty)).toList, + filterUid = Some(NonEmpty(Seq, participantId.uid)), + ) + conflictingPartyIdO = ptps + .flatMap(_.selectMapping[PartyToParticipant]) + .headOption + .map(_.mapping) + _ <- conflictingPartyIdO match { + case Some(ptp) => + isExplicitAdminPartyAllocation( + ptp, + RequiredMappingRejection.ParticipantIdConflictWithPartyId( + participantId, + ptp.partyId, + ), + ) + case None => EitherTUtil.unitUS[TopologyTransactionRejection] + } + } yield () + + def checkParticipantDoesNotRejoin() = EitherTUtil.condUnitET[FutureUnlessShutdown]( + inStore.forall(_.operation != TopologyChangeOp.Remove), + RequiredMappingRejection.ParticipantCannotRejoinSynchronizer( + toValidate.mapping.participantId + ), + ) + + for { + _ <- checkParticipantDoesNotRejoin() + _ <- checkPartyIdDoesntExist() + restriction <- loadOnboardingRestriction() + _ <- checkSynchronizerIsNotLocked(restriction) + _ <- checkParticipantIsNotRestricted(restriction) + } yield () + } + + /** Checks the following: + * - threshold is less than or equal to the number of confirming participants + * - new participants have a valid DTC + * - new participants have an OTK (valid keys are checked as part of OTK checks) + */ + private def checkPartyToParticipant( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], + inStore: Option[SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant]], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + import toValidate.mapping + def checkParticipants() = { + val newParticipants = mapping.participants.map(_.participantId).toSet -- + inStore.toList.flatMap(_.mapping.participants.map(_.participantId)) + for { + participantTransactions <- loadFromStore( + effective, + Set(Code.SynchronizerTrustCertificate, Code.OwnerToKeyMapping), + (newParticipants.toSeq.map(_.uid) :+ mapping.partyId.uid).flatMap { uid => + val pid = ParticipantId(uid) + val otks = pendingChangesLookup.get(OwnerToKeyMapping.uniqueKey(pid)).toList + val dtcs = store.storeId.forSynchronizer.flatMap { synchronizerId => + pendingChangesLookup.get( + SynchronizerTrustCertificate.uniqueKey(pid, synchronizerId.logical) + ) + } + otks ++ dtcs + }.toList, + filterUid = Some(NonEmpty(Seq, mapping.partyId.uid) ++ newParticipants.toSeq.map(_.uid)), + ) + + // if we found a DTC with the same uid as the partyId, + // check that the PTP is an explicit admin party allocation, otherwise reject the PTP + foundAdminPartyWithSameUID = participantTransactions + .flatMap(_.selectMapping[SynchronizerTrustCertificate]) + .exists(_.mapping.participantId.uid == mapping.partyId.uid) + _ <- EitherTUtil.ifThenET(foundAdminPartyWithSameUID)( + isExplicitAdminPartyAllocation( + mapping, + RequiredMappingRejection.PartyIdConflictWithAdminParty( + mapping.partyId + ), + ) + ) + + // check that all participants are known on the synchronizer + // note that this check does not provide strong guarantees as it is only + // checked at time of creation. a removal of a participant may still + // lead to dangling party to participant mappings. + missingParticipantCertificates = newParticipants -- participantTransactions + .flatMap(_.selectMapping[SynchronizerTrustCertificate]) + .map(_.mapping.participantId) + + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + missingParticipantCertificates.isEmpty, + RequiredMappingRejection.UnknownMembers(missingParticipantCertificates.toSeq), + ) + + // check that all known participants have keys registered + // note same comment as above. + participantsWithInsufficientKeys = + newParticipants -- participantTransactions + .flatMap(_.selectMapping[OwnerToKeyMapping]) + .map(_.mapping.member) + .collect { case pid: ParticipantId => pid } + + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + participantsWithInsufficientKeys.isEmpty, + RequiredMappingRejection.InsufficientKeys( + participantsWithInsufficientKeys.toSeq + ), + ) + } yield { + () + } + } + + for { + _ <- checkParticipants() + } yield () + + } + + private def checkOwnerToKeyMappingReplace( + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, OwnerToKeyMapping], + inStore: Option[SignedTopologyTransaction[TopologyChangeOp, OwnerToKeyMapping]], + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + + // cannot re-add after remove + val noAddingAfterRemove = + EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + inStore.forall(p => p.operation == TopologyChangeOp.Replace), + TopologyTransactionRejection.RequiredMapping.CannotReregisterKeys(toValidate.mapping.member), + ) + + // check for at least 1 signing and 1 encryption key + val keysByPurpose = toValidate.mapping.keys.forgetNE.groupBy(_.purpose) + val signingKeys = keysByPurpose.getOrElse(KeyPurpose.Signing, Seq.empty) + + val minimumSigningKeyRequirement = + EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + // all nodes require signing keys + signingKeys.nonEmpty, + RequiredMappingRejection.InvalidTopologyMapping( + "OwnerToKeyMapping must contain at least 1 signing key." + ), + ) + + val encryptionKeys = keysByPurpose.getOrElse(KeyPurpose.Encryption, Seq.empty) + val isParticipant = toValidate.mapping.member.code == ParticipantId.Code + + val minimumEncryptionKeyRequirement = + EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + // all nodes require signing keys + // non-participants don't need encryption keys + !isParticipant || encryptionKeys.nonEmpty, + RequiredMappingRejection.InvalidTopologyMapping( + "OwnerToKeyMapping for participants must contain at least 1 encryption key." + ), + ) + noAddingAfterRemove + .flatMap(_ => minimumSigningKeyRequirement) + .flatMap(_ => minimumEncryptionKeyRequirement) + } + + private def checkNewSynchronizerMembersHaveKeys( + effective: EffectiveTime, + pendingChangesLookup: PendingChangesLookup, + newMembers: Set[Member], + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + if (relaxSynchronizerStateChecks) EitherTUtil.unitUS + else + NonEmpty.from(newMembers).fold(EitherTUtil.unitUS[TopologyTransactionRejection]) { members => + loadFromStore( + effective, + Set(Code.SynchronizerTrustCertificate, Code.OwnerToKeyMapping), + newMembers.flatMap { member => + pendingChangesLookup.get(OwnerToKeyMapping.uniqueKey(member)).toList + }.toList, + filterUid = Some(members.toSeq.map(_.uid)), + ).flatMap { stored => + val found = stored + .flatMap(_.selectMapping[OwnerToKeyMapping]) + .filterNot(_.isProposal) + .map(_.mapping.member) + .toSet + val noKeys = newMembers -- found + EitherTUtil.condUnitET[FutureUnlessShutdown]( + noKeys.isEmpty, + RequiredMappingRejection + .InsufficientKeys(noKeys.toSeq): TopologyTransactionRejection, + ) + } + } + + private def checkMediatorSynchronizerStateReplace( + effectiveTime: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState], + inStore: Option[ + SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState] + ], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap( + _.mapping.allMediatorsInGroup + )).map(identity[Member]) + + def checkMediatorNotAlreadyAssignedToOtherGroup() = + for { + result <- loadFromStore( + effectiveTime, + Set(Code.MediatorSynchronizerState), + pendingChangesLookup.values, + ) + mediatorsAlreadyAssignedToGroups = result + .flatMap(_.selectMapping[MediatorSynchronizerState]) + // only look at other groups to avoid a race between validating this proposal and + // having persisted the same transaction as fully authorized from other synchronizer owners. + .filter(_.mapping.group != toValidate.mapping.group) + .flatMap(tx => + tx.mapping.allMediatorsInGroup.collect { + case med if newMediators.contains(med) => med -> tx.mapping.group + } + ) + .toMap + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + mediatorsAlreadyAssignedToGroups.isEmpty, + RequiredMappingRejection.MediatorsAlreadyInOtherGroups( + toValidate.mapping.group, + mediatorsAlreadyAssignedToGroups, + ): TopologyTransactionRejection, + ) + } yield () + + val notAlreadyAssignedET = checkMediatorNotAlreadyAssignedToOtherGroup() + val allNewHaveKeysET = checkNewSynchronizerMembersHaveKeys( + effectiveTime, + pendingChangesLookup, + newMembers = newMediators, + ) + + for { + _ <- notAlreadyAssignedET + _ <- allNewHaveKeysET + } yield () + } + + private def checkSequencerSynchronizerStateReplace( + effectiveTime: EffectiveTime, + toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState], + inStore: Option[ + SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] + ], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + val newSequencers = (toValidate.mapping.allSequencers.toSet -- inStore.toList.flatMap( + _.mapping.allSequencers + )).map(identity[Member]) + + checkNewSynchronizerMembersHaveKeys( + effectiveTime, + pendingChangesLookup, + newMembers = newSequencers, + ) + + } + + private def checkDecentralizedNamespaceDefinitionReplace( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[ + TopologyChangeOp.Replace, + DecentralizedNamespaceDefinition, + ], + inStore: Option[SignedTopologyTransaction[ + TopologyChangeOp, + DecentralizedNamespaceDefinition, + ]], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + + def checkDecentralizedNamespaceDerivedFromOwners() + : EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + if (inStore.isEmpty) { + // The very first decentralized namespace definition must have namespace computed from the owners + EitherTUtil.condUnitET( + toValidate.mapping.namespace == DecentralizedNamespaceDefinition + .computeNamespace(toValidate.mapping.owners), + RequiredMappingRejection.InvalidTopologyMapping( + s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}" + ), + ) + } else { + EitherTUtil.unitUS + } + + def checkNoClashWithNamespaceDelegations()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + loadFromStore( + effective, + Set(Code.NamespaceDelegation), + pendingChangesLookup + .get( + NamespaceDelegation.uniqueKey( + toValidate.mapping.namespace, + toValidate.mapping.namespace.fingerprint, + ) + ) + .toList, + filterUid = None, + filterNamespace = Some(NonEmpty(Seq, toValidate.mapping.namespace)), + ).flatMap { namespaceDelegations => + EitherTUtil.condUnitET( + namespaceDelegations.isEmpty, + RequiredMappingRejection.NamespaceAlreadyInUse(toValidate.mapping.namespace), + ) + } + + def checkOwnersAreNormalNamespaces()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + loadFromStore( + effective, + Set(Code.NamespaceDelegation), + toValidate.mapping.owners.forgetNE.flatMap(ns => + pendingChangesLookup.get(NamespaceDelegation.uniqueKey(ns, ns.fingerprint)) + ), + filterUid = None, + filterNamespace = Some(toValidate.mapping.owners.toSeq), + ).flatMap { namespaceDelegations => + val foundNSDs = namespaceDelegations + .filter(NamespaceDelegation.isRootCertificate) + .map(_.mapping.namespace) + .toSet + val missingNSDs = toValidate.mapping.owners -- foundNSDs + + EitherTUtil.condUnitET( + missingNSDs.isEmpty, + RequiredMappingRejection.InvalidTopologyMapping( + s"No root certificate found for ${missingNSDs.toSeq.sorted.mkString(", ")}" + ), + ) + } + + for { + _ <- checkDecentralizedNamespaceDerivedFromOwners() + _ <- checkNoClashWithNamespaceDelegations() + _ <- checkOwnersAreNormalNamespaces() + } yield () + } + + private def checkNamespaceDelegationReplace( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[ + TopologyChangeOp.Replace, + NamespaceDelegation, + ], + pendingChangesLookup: PendingChangesLookup, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + def checkNoClashWithDecentralizedNamespaces()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = + loadFromStore( + effective, + Set(Code.DecentralizedNamespaceDefinition), + pendingChangesLookup + .get( + DecentralizedNamespaceDefinition.uniqueKey(toValidate.mapping.namespace) + ) + .toList, + filterUid = None, + filterNamespace = Some(NonEmpty(Seq, toValidate.mapping.namespace)), + ).flatMap { dns => + val foundDecentralizedNamespaceWithSameNamespace = dns.nonEmpty + EitherTUtil.condUnitET( + !foundDecentralizedNamespaceWithSameNamespace, + RequiredMappingRejection.NamespaceAlreadyInUse(toValidate.mapping.namespace), + ) + } + + checkNoClashWithDecentralizedNamespaces() + } + + private def checkSynchronizerUpgradeAnnouncement( + effective: EffectiveTime, + toValidate: SignedTopologyTransaction[ + TopologyChangeOp.Replace, + SynchronizerUpgradeAnnouncement, + ], + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = for { + _ <- store.storeId.forSynchronizer match { + case Some(psid) => + EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + psid < toValidate.mapping.successorSynchronizerId, + RequiredMappingRejection.InvalidSynchronizerSuccessor( + psid, + toValidate.mapping.successorSynchronizerId, + ), + ) + case None => EitherTUtil.unitUS + } + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown][TopologyTransactionRejection]( + toValidate.mapping.upgradeTime > effective.value, + RequiredMappingRejection.InvalidUpgradeTime( + toValidate.mapping.successorSynchronizerId.logical, + effective = effective, + upgradeTime = toValidate.mapping.upgradeTime, + ), + ) + + } yield () + + /** Checks whether the given PTP is considered an explicit admin party allocation. This is true if + * all following conditions are met: + * - threshold == 1 + * - there is only a single hosting participant + * - with Submission permission + * - participantId.adminParty == partyId + */ + private def isExplicitAdminPartyAllocation( + ptp: PartyToParticipant, + rejection: => TopologyTransactionRejection, + ): EitherT[FutureUnlessShutdown, TopologyTransactionRejection, Unit] = { + // check that the PTP doesn't try to allocate a party that is the same as an already existing admin party. + // we allow an explicit allocation of an admin like party though on the same participant + val singleHostingParticipant = + ptp.participants.sizeCompare(1) == 0 + + val partyIsAdminParty = + ptp.participants.forall(participant => + participant.participantId.adminParty == ptp.partyId && + participant.permission == ParticipantPermission.Submission + ) + + // technically we don't need to check for threshold == 1, because we already require that there is only a single participant + // and the threshold may not exceed the number of participants. this is checked in PartyToParticipant.create + val threshold1 = ptp.threshold == PositiveInt.one + + EitherTUtil.condUnitET[FutureUnlessShutdown]( + singleHostingParticipant && partyIsAdminParty && threshold1, + rejection, + ) + } + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/BisectUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/BisectUtil.scala new file mode 100644 index 0000000000..0a3f690bcf --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/BisectUtil.scala @@ -0,0 +1,125 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} +import scala.concurrent.ExecutionContext +import scala.util.Try + +trait BisectHandle { + def completed(): Unit + def next(stage: String): BisectHandle +} + +trait BisectUtil { + def track[T](stage: String)(res: => T): T + def trackAsync[F[_], T](stage: String)(res: => F[T])(implicit + ec: ExecutionContext, + F: Thereafter[F], + ): F[T] + + def start(stage: String): BisectHandle +} + +object BisectUtil { + object Noop extends BisectUtil { + override def track[T](stage: String)(res: => T): T = res + def trackAsync[F[_], T](stage: String)(res: => F[T])(implicit + ec: ExecutionContext, + F: Thereafter[F], + ): F[T] = res + override def start(stage: String): BisectHandle = new BisectHandle { + override def completed(): Unit = () + override def next(nextStage: String): BisectHandle = this + } + } + def measure( + name: String, + loggerFactory: NamedLoggerFactory, + report: PositiveInt = PositiveInt.tryCreate(60), + ): BisectUtil = + new BisectUtilImpl(name, loggerFactory, report) + + /** small helper class to debug sequential performance issues + * + * profiling doesn't help usually to debug a sequential pipeline. this class here can be used to + * easily bisect the issues. + * + * often you might have a pipeline stage1 -> stage2 (stepA, stepB) -> stage3 -> ... -> stageN the + * stages normally only take a very small time - so averaging over many invocations helps. + * + * just generate the util and then wrap the calls bisect.track("stage1")(stage1) etc. or val h1 = + * bisect.start("stage1") stage1 h2 = h1.next("stage2") stage2 h2.completed() etc. + * + * every "report" calls (default 500 - you might want to increase this) the current timings are + * printed to the log. + * + * just run a test and look at the last report call which tells you where the time was spent + */ + private class BisectUtilImpl( + name: String, + override protected val loggerFactory: NamedLoggerFactory, + report: PositiveInt = PositiveInt.tryCreate(500), + ) extends NamedLogging + with BisectUtil { + import Thereafter.syntax.* + + @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) + override def track[T](stage: String)(res: => T): T = { + val handle = start(stage) + Try(res).thereafter(_ => handle.completed()).get + } + + def trackAsync[F[_], T](stage: String)(res: => F[T])(implicit + ec: ExecutionContext, + F: Thereafter[F], + ): F[T] = { + val handle = start(stage) + F.thereafter(res)(_ => handle.completed()) + } + + private val counter = new AtomicInteger(0) + private val measurements = new AtomicReference[Map[String, Long]](Map()) + + override def start(stage: String): BisectHandle = + new BisectHandle { + private val startTime = System.nanoTime() + private val isComplete = new AtomicBoolean(false) + + private def update() = { + val elapsed = System.nanoTime() - startTime + if (isComplete.getAndSet(true)) { + throw new IllegalStateException(s"Stage '$stage' already completed") + } + measurements.updateAndGet { current => + current.updatedWith(stage)(timeO => Some(elapsed + timeO.getOrElse(0L))) + } + () + } + + override def completed(): Unit = { + update() + if (counter.incrementAndGet() % report.value == 0) { + val str = measurements + .get() + .toSeq + .sortBy { case (_, time) => -time } + .map { case (stage, time) => + s"$stage: ${time / 1000000L} ms" + } + .mkString("\n ") + noTracingLogger.info(s"Bisect report of $name:\n $str") + } + } + + override def next(nextStage: String): BisectHandle = { + completed() + start(nextStage) + } + } + } +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala index 40e4967381..405b93a1ce 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala @@ -15,7 +15,7 @@ import scala.annotation.nowarn */ object LfTransactionUtil { - implicit val orderTransactionVersion: Order[LfLanguageVersion] = + implicit val orderSerializationVersion: Order[LfLanguageVersion] = Order.fromOrdering def consumedContractId(node: LfActionNode): Option[LfContractId] = node match { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala index f2aa7135e2..76b07d7643 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala @@ -89,6 +89,9 @@ object MonadUtil { def whenM[M[_]](condM: M[Boolean])(trueM: => M[Unit])(implicit monad: Monad[M]): M[Unit] = monad.ifM(condM)(trueM, monad.unit) + def when[M[_]](cond: Boolean)(trueM: => M[Unit])(implicit monad: Monad[M]): M[Unit] = + if (cond) trueM else monad.unit + def sequentialTraverse[X, M[_], S]( xs: Seq[X] )(f: X => M[S])(implicit monad: Monad[M]): M[Seq[S]] = { diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala index ce1f9ab08e..7b60fcf153 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala @@ -177,6 +177,13 @@ object PekkoUtil extends HasLoggerName { FutureUnlessShutdown.outcomeF(future) } + /** Convenience function that turns a Source[T] inside a FutureUnlessShutdown into a Source[T]. + */ + def futureSourceUS[T](sourceFUS: FutureUnlessShutdown[Source[T, NotUsed]])(implicit + ec: ExecutionContext + ): Source[T, NotUsed] = + Source.futureSource(sourceFUS.onShutdown(Source.empty)).mapMaterializedValue(_ => NotUsed) + /** A version of [[org.apache.pekko.stream.scaladsl.FlowOps.mapAsync]] that additionally allows to * pass state of type `S` between every subsequent element. Unlike * [[org.apache.pekko.stream.scaladsl.FlowOps.statefulMapConcat]], the state is passed diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala index df4833f1c0..3a8acba9f1 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala @@ -13,7 +13,7 @@ import scala.util.Try object ResourceUtil { /** Does resource management the same way as [[withResource]], but returns an Either instead of - * throwing exceptions. + * throwing exceptions. The exception are fatal exceptions, which are rethrown. * * @param r * resource that will be used to derive some value and will be closed automatically in the end @@ -40,7 +40,12 @@ object ResourceUtil { */ @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) def withResource[T <: AutoCloseable, V](r: => T)(f: T => V): V = - withResourceM(r)(resource => Try(f(resource))).get + withResourceM(r)(resource => + // tryCatchAll is analogous to Java's try-with-resource behavior, + // which also acts on fatal exceptions like OutOfMemoryError. + // See https://docs.oracle.com/javase/specs/jls/se18/html/jls-14.html#jls-14.20.3 + TryUtil.tryCatchAll(f(resource)) + ).get final private[util] class ResourceMonadApplied[M[_]]( private val dummy: Boolean = true diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/RoseTree.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/RoseTree.scala new file mode 100644 index 0000000000..663623d8f0 --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/RoseTree.scala @@ -0,0 +1,336 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.Apply +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.util.RoseTree.{HashCodeState, MapState, hashCodeInit} +import monocle.macros.syntax.lens.* + +import scala.annotation.tailrec +import scala.collection.mutable +import scala.collection.mutable.ListBuffer +import scala.util.hashing.MurmurHash3 + +/** Implements a finitely branching tree, also known as a rose tree. All methods are stack-safe. + */ +final class RoseTree[+A] private ( + val root: A, + val children: Seq[RoseTree[A]], + val size: Int, +) extends Product + with Serializable { + + /** Iterator over all elements in this tree in preorder */ + def preorder: Iterator[A] = RoseTree.PreorderIterator(this) + + /** Tail-recursive implementation equivalent to + * + * {{{ + * def foldl[State, Result](tree: RoseTree[A])(init: RoseTree[A] => State)(finish: State => Result)(update: (State, Result) => State): Result = + * finish(children.foldLeft(init(this))((acc, child) => update(acc, child.foldl(init)(finish)(update)))) + * }}} + */ + def foldl[State, Result](init: RoseTree[A] => State)(finish: State => Result)( + update: (State, Result) => State + ): Result = RoseTree.foldLeft(RoseTree.roseTreeOps[A], this)(init)(finish)(update) + + /** Tail-recursive implementation equivalent to + * + * {{{ + * def map[A](f: A => B): RoseTree[B] = RoseTree(f(root), children.map(_.map(f))*) + * }}} + */ + def map[B](f: A => B): RoseTree[B] = + RoseTree.foldLeft(RoseTree.roseTreeOps[A], this)( + init = t => MapState(f(t.root), t.size, List.empty) + )(finish = { case MapState(mappedRoot, size, reverseVisitedChildren) => + new RoseTree(mappedRoot, reverseVisitedChildren.reverse, size) + })(update = (state, child) => state.focus(_.visitedReverse).modify(child +: _)) + + /** Tail-recursive implementation equivalent to + * {{{ + * def zipWith[B, C](that: RoseTree[B])(f: (A, B) => C): RoseTree[C] = + * RoseTree( + * f(this.root, that.root), + * (this.children.zip(that.children).map { case (l, r) => l.zipWith(r)(f) }) *, + * ) + * }}} + */ + def zipWith[B, C](that: RoseTree[B])(f: (A, B) => C): RoseTree[C] = + RoseTree.zipWith(this, that)(f) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + override def equals(other: Any): Boolean = + if (this eq other.asInstanceOf[Object]) true + else + other match { + case that: RoseTree[_] => that.canEqual(this) && RoseTree.equals(this, that) + case _ => false + } + + /** Tail-recursive implementation for [[RoseTree]] equivalent to + * [[scala.util.hashing.MurmurHash3$.productHash(x:Product):Int*]] + */ + override def hashCode(): Int = + RoseTree.foldLeft(RoseTree.roseTreeOps[A], this)( + init = t => HashCodeState(MurmurHash3.mix(hashCodeInit, t.root.##), 1) + )( + finish = { case HashCodeState(h, arity) => MurmurHash3.finalizeHash(h, arity) } + )(update = (state, child) => HashCodeState(MurmurHash3.mix(state.hash, child), state.arity + 1)) + + @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) + override def canEqual(that: Any): Boolean = that.isInstanceOf[RoseTree[_]] + + override def productArity: Int = 1 + children.size + override def productElement(n: Int): Any = + if (n == 0) root else children(n - 1) + override def productPrefix: String = RoseTree.productPrefix + + override def toString: String = { + val builder = new mutable.StringBuilder() + RoseTree.foldLeft(RoseTree.roseTreeOps[A], this)( + init = t => builder.append(", RoseTree(").append(t.root).discard + )( + finish = _ => builder.append(")").discard + )(update = (_, _) => ()) + + builder.drop(2).toString() + } +} + +object RoseTree { + private def productPrefix = "RoseTree" + + def apply[A](root: A, children: RoseTree[A]*): RoseTree[A] = + new RoseTree(root, children, 1 + children.map(_.size).sum) + + def unapply[A](tree: RoseTree[A]): Some[(A, Seq[RoseTree[A]])] = + Some((tree.root, tree.children)) + + implicit val applyRoseTree: Apply[RoseTree] = new Apply[RoseTree] { + override def map[A, B](fa: RoseTree[A])(f: A => B): RoseTree[B] = fa.map(f) + + override def ap[A, B](ff: RoseTree[A => B])(fa: RoseTree[A]): RoseTree[B] = + zipWith(ff, fa)((f, a) => f(a)) + + override def map2[A, B, Z](fa: RoseTree[A], fb: RoseTree[B])(f: (A, B) => Z): RoseTree[Z] = + zipWith(fa, fb)(f) + } + + /** Pre-order iterator for a rose tree. Not an inner class of [[RoseTree]] so that the iterator + * does not prevent garbage collection of already visited nodes by storing references to them. + */ + private final class PreorderIterator[A] private ( + stack: ListBuffer[RoseTree[A]], + initialSize: Int, + ) extends Iterator[A] { + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + private var sizeV: Int = initialSize + + override def knownSize: Int = sizeV + + override def hasNext: Boolean = stack.nonEmpty + + override def next(): A = { + if (stack.isEmpty) + throw new NoSuchElementException("No further elements in the rose tree") + + val RoseTree(root, children) = stack.remove(0) + stack.prependAll(children) + sizeV -= 1 + root + } + } + private object PreorderIterator { + def apply[A](tree: RoseTree[A]): PreorderIterator[A] = + new PreorderIterator[A](mutable.ListBuffer(tree), tree.size) + } + + trait TreeOps[Node] { + def children(node: Node): Iterator[Node] + } + + private[this] case object RoseTreeOps extends TreeOps[RoseTree[?]] { + override def children(node: RoseTree[?]): Iterator[RoseTree[?]] = node.children.iterator + } + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def roseTreeOps[A]: TreeOps[RoseTree[A]] = RoseTreeOps.asInstanceOf[TreeOps[RoseTree[A]]] + + /** The `foldLeft` Zipper data structure for an individual node + * @param state + * The accumulated state of the `foldLeft` for all children visited so far + * @param unvisitedSiblings + * An iterator over the remaining siblings of the current node + */ + private final case class FoldlZipperLevel[Node, B]( + state: B, + unvisitedSiblings: Iterator[Node], + ) + + def foldLeft[Node, State, Result](treeOps: TreeOps[Node], tree: Node)( + init: Node => State + )( + finish: State => Result + )( + update: (State, Result) => State + ): Result = { + // Called when we have folded over all children of a node. Moves up the tree + // until we find an unvisited sibling or end at the root. + @tailrec def moveUp( + c: Result, + parents: List[FoldlZipperLevel[Node, State]], + ): Either[Result, (Node, List[FoldlZipperLevel[Node, State]])] = + NonEmpty.from(parents) match { + case None => Left(c) + case Some(parentsNE) => + val parentLevel = parentsNE.head1 + val siblingIter = parentLevel.unvisitedSiblings + val newState = update(parentLevel.state, c) + if (siblingIter.hasNext) { + val nextSibling = siblingIter.next() + val newParentLevel = parentLevel.copy(state = newState) + Right((nextSibling, newParentLevel +: parentsNE.tail1)) + } else { + moveUp(finish(newState), parentsNE.tail1) + } + } + + @tailrec def go( + current: Node, + parents: List[FoldlZipperLevel[Node, State]], + ): Result = { + val state = init(current) + val childIter = treeOps.children(current) + if (childIter.hasNext) { + // Descent to the next level in the tree + go(childIter.next(), FoldlZipperLevel(state, childIter) +: parents) + } else { + // Move up in the tree until we find further siblings to process + val c = finish(state) + moveUp(c, parents) match { + case Left(result) => result + case Right((nextSibling, newParents)) => + go(nextSibling, newParents) + } + } + } + go(tree, List.empty) + } + + /** The `zipWith` Zipper data structure for an individual node + * @param state + * The accumulated result of zipping the already visited children + * @param unvisitedSiblingsL + * An iterator over the remaining siblings of the left node + * @param unvisitedSiblingsR + * An iterator over the remaining siblings of the right node + */ + private final case class ZipWithZipperLevel[A, B, S]( + state: S, + unvisitedSiblingsL: Iterator[RoseTree[A]], + unvisitedSiblingsR: Iterator[RoseTree[B]], + ) + + private final case class ZipWithState[C](zippedRoot: C, visitedReverse: List[RoseTree[C]]) + + private def zipWith[A, B, C](left: RoseTree[A], right: RoseTree[B])( + f: (A, B) => C + ): RoseTree[C] = { + type State = ZipWithState[C] + + // Called when we have zipped over all the children of two nodes. Moves up the two trees + // until we find unvisited siblings or end at the roots. + @tailrec def moveUp( + c: RoseTree[C], + parents: List[ZipWithZipperLevel[A, B, State]], + ): Either[ + RoseTree[C], + (RoseTree[A], RoseTree[B], List[ZipWithZipperLevel[A, B, State]]), + ] = + NonEmpty.from(parents) match { + case None => Left(c) + case Some(parentsNE) => + val parentLevel = parentsNE.head1 + val siblingIterL = parentLevel.unvisitedSiblingsL + val siblingIterR = parentLevel.unvisitedSiblingsR + val newState = parentLevel.state.focus(_.visitedReverse).modify(c +: _) + if (siblingIterL.hasNext && siblingIterR.hasNext) { + val nextSiblingL = siblingIterL.next() + val nextSiblingR = siblingIterR.next() + val newParentLevel = parentLevel.copy(state = newState) + Right((nextSiblingL, nextSiblingR, newParentLevel +: parentsNE.tail1)) + } else { + val assembled = RoseTree(newState.zippedRoot, newState.visitedReverse.reverse*) + moveUp(assembled, parentsNE.tail1) + } + } + + @tailrec def go( + currentL: RoseTree[A], + currentR: RoseTree[B], + zipper: List[ZipWithZipperLevel[A, B, State]], + ): RoseTree[C] = { + val zippedRoot = f(currentL.root, currentR.root) + val childIterL = currentL.children.iterator + val childIterR = currentR.children.iterator + if (childIterL.hasNext && childIterR.hasNext) { + val state = ZipWithState(zippedRoot, List.empty) + go( + childIterL.next(), + childIterR.next(), + ZipWithZipperLevel(state, childIterL, childIterR) +: zipper, + ) + } else { + val zipped = RoseTree(zippedRoot) + moveUp(zipped, zipper) match { + case Left(result) => result + case Right((nextL, nextR, newParents)) => + go(nextL, nextR, newParents) + } + } + } + + go(left, right, List.empty) + } + + private final case class MapState[B]( + mappedRoot: B, + size: Int, + visitedReverse: List[RoseTree[B]], + ) + + private def equals[A, B](first: RoseTree[A], second: RoseTree[B]): Boolean = { + @tailrec def go( + stack: List[(Iterator[RoseTree[A]], Iterator[RoseTree[B]])] + ): Boolean = + NonEmpty.from(stack) match { + case None => true + case Some(stackNE) => + val (next1, next2) = stackNE.head1 + if (next1.hasNext != next2.hasNext) false + else if (!next1.hasNext) go(stackNE.tail1) + else { + val current1 = next1.next() + val current2 = next2.next() + if (current1.size != current2.size || current1.root != current2.root) false + else { + val children1 = current1.children.iterator + val children2 = current2.children.iterator + go((children1, children2) +: stack) + } + } + } + + go(List(Iterator(first) -> Iterator(second))) + } + + private final case class HashCodeState(hash: Int, arity: Int) + + private val hashCodeInit: Int = + MurmurHash3.mix(MurmurHash3.productSeed, productPrefix.hashCode) + +} diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala index 48d6621d8d..6ffe4c4966 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala @@ -29,6 +29,7 @@ import scala.util.{Failure, Success, Try} * * myAsyncComputation.thereafter(result => ...) // synchronous body * myAsyncComputation.thereafterF(result => ...) // asynchronous body + * (myTry : Try[...]).thereafter(result => ...) * }}} * * It is preferred to similar functions such as [[scala.concurrent.Future.andThen]] because it diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala index b2015f7191..41e25c9ffe 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.util import java.util.concurrent.CompletionException import scala.annotation.tailrec +import scala.concurrent.ExecutionException import scala.util.control.NonFatal import scala.util.{Failure, Success, Try} @@ -22,6 +23,18 @@ object TryUtil { case NonFatal(e) => Failure(e) } + /** Constructs a `Try` using the by-name parameter. This method will ensure any throwable is + * caught and a `Failure` object is returned. + * + * IT IS GENERALLY NOT RECOMMENDED TO CATCH FATAL EXCEPTIONS SUCH AS + * [[java.lang.OutOfMemoryError]]. USE THIS METHOD WITH CAUTION. + */ + def tryCatchAll[A](r: => A): Try[A] = + try Success(r) + catch { + case e: Throwable => Failure(e) + } + implicit final class ForFailedOps[A](private val a: Try[A]) extends AnyVal { @inline def forFailed(f: Throwable => Unit): Unit = a.fold(f, _ => ()) @@ -30,7 +43,7 @@ object TryUtil { def valueOr[B >: A](f: Throwable => B): B = a.fold(f, identity) } - /** Unwraps all [[java.util.concurrent.CompletionException]] from a failure and leaves only the + /** Unwraps all [[java.util.concurrent.CompletionException]]s from a failure and leaves only the * wrapped causes (unless there is no such cause) */ def unwrapCompletionException[A](x: Try[A]): Try[A] = x match { @@ -42,8 +55,26 @@ object TryUtil { @tailrec private def stripCompletionException(throwable: Throwable): Throwable = throwable match { case ce: CompletionException => - if (ce.getCause != null) stripCompletionException(ce.getCause) - else ce + val cause = ce.getCause + if (cause != null) stripCompletionException(cause) else ce + case _ => throwable + } + + /** Unwraps all [[java.util.concurrent.ExecutionException]]s from a failure and leaves only the + * wrapped causes (unless there is no such cause) + */ + def unwrapExecutionException[A](x: Try[A]): Try[A] = x match { + case _: Success[_] => x + case Failure(ex) => + val stripped = stripExecutionException(ex) + if (stripped eq ex) x else Failure(stripped) + } + + @tailrec def stripExecutionException(throwable: Throwable): Throwable = throwable match { + case exec: ExecutionException => + val cause = exec.getCause + if (cause != null) stripExecutionException(cause) else exec case _ => throwable } + } diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala index 61540522e1..ac9ad14c29 100644 --- a/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala @@ -63,6 +63,10 @@ object RetryEither { .leftFlatMap { err => if (stopOnLeft.exists(fn => fn(err))) { // Stop the retry attempts on this particular Left if stopOnLeft is true + LoggerUtil.logAtLevel( + failLogLevel, + s"Operation $operationName failed, stopping retries: $err", + ) Left(err) } else if (retryCount <= 0) { // Stop the recursion with the error if we exhausted the max retries diff --git a/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala b/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala new file mode 100644 index 0000000000..d8aaf86a9e --- /dev/null +++ b/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag + +object ParticipantProtocolFeatureFlags { + + /** Feature flags supported by participant node for each PV + */ + val supportedFeatureFlagsByPV: Map[ProtocolVersion, Set[ParticipantTopologyFeatureFlag]] = Map( + ProtocolVersion.v34 -> Set.empty + ) +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataSpec.scala b/canton/community/base/src/test/scala/com/digitalasset/canton/store/packagemeta/PackageMetadataSpec.scala similarity index 87% rename from canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataSpec.scala rename to canton/community/base/src/test/scala/com/digitalasset/canton/store/packagemeta/PackageMetadataSpec.scala index 95fc4728cd..bd0043f155 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataSpec.scala +++ b/canton/community/base/src/test/scala/com/digitalasset/canton/store/packagemeta/PackageMetadataSpec.scala @@ -1,12 +1,12 @@ // Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -package com.digitalasset.canton.platform.store.packagemeta +package com.digitalasset.canton.store.packagemeta import cats.implicits.catsSyntaxSemigroup import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.{ +import com.digitalasset.canton.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ LocalPackagePreference, PackageResolution, } @@ -15,7 +15,6 @@ import com.digitalasset.daml.lf.data.Ref.IdentifierConverter import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec -// TODO(#24866): Revisit test coverage for [[PackageMetadata]] and cover [[PackageMetadata.from]] as well class PackageMetadataSpec extends AnyWordSpec with Matchers { "PackageMetadata.combine" should { @@ -142,6 +141,40 @@ class PackageMetadataSpec extends AnyWordSpec with Matchers { s"Conflicting versioned package names for the same package id $pkgId1. Previous (${(pkgName1, pkg1Version1)}) vs uploaded(${(pkgName1, pkg1Version2)})" } } + + "allow two different package-ids for same (package-name, version)" in new Scope { + private val pkgMeta1 = PackageMetadata( + packageIdVersionMap = Map(pkgId1 -> (pkgName1, pkg1Version1)), + packageNameMap = Map( + pkgName1 -> PackageResolution( + LocalPackagePreference(pkg1Version1, pkgId1), + NonEmpty(Set, pkgId1), + ) + ), + ) + + private val pkgMeta2 = PackageMetadata( + packageIdVersionMap = Map(pkgId2 -> (pkgName1, pkg1Version1)), + packageNameMap = Map( + pkgName1 -> PackageResolution( + LocalPackagePreference(pkg1Version1, pkgId2), + NonEmpty(Set, pkgId2), + ) + ), + ) + + private val result: PackageMetadata = pkgMeta1 |+| pkgMeta2 + result.packageIdVersionMap shouldBe Map( + pkgId1 -> (pkgName1, pkg1Version1), + pkgId2 -> (pkgName1, pkg1Version1), + ) + result.packageNameMap shouldBe Map( + pkgName1 -> PackageResolution( + LocalPackagePreference(pkg1Version1, pkgId2), + NonEmpty(Set, pkgId1, pkgId2), + ) + ) + } } "PackageMetadata.resolveTypeConRef" should { diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java index a7da59181d..3eb7cc6b80 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java @@ -111,28 +111,6 @@ public Ct toContract(CreatedEvent createdEvent) throws IllegalArgumentException return companion.fromCreatedEvent(createdEvent); } - /** Method will be removed in 3.4.0 */ - // TODO(#23504) remove - @Deprecated - public TransactionFilter transactionFilter(Optional> parties) { - return transactionFilter(filter, parties); - } - - // TODO(#23504) remove - @Deprecated - private static TransactionFilter transactionFilter( - Filter filter, Optional> partiesO) { - Map partyToFilters = - partiesO - .map( - parties -> - parties.stream().collect(Collectors.toMap(Function.identity(), x -> filter))) - .orElse(Collections.emptyMap()); - - Optional anyPartyFilterO = partiesO.isEmpty() ? Optional.of(filter) : Optional.empty(); - return new TransactionFilter(partyToFilters, anyPartyFilterO); - } - public UpdateFormat updateFormat(Optional> parties) { return updateFormat(filter, parties, verbose, transactionShape); } @@ -171,8 +149,14 @@ public EventFormat eventFormat(Optional> parties) { private static EventFormat eventFormat( Filter filter, Optional> partiesO, boolean verbose) { - TransactionFilter transactionFilter = transactionFilter(filter, partiesO); - return new EventFormat( - transactionFilter.getPartyToFilters(), transactionFilter.getAnyPartyFilter(), verbose); + Map partyToFilters = + partiesO + .map( + parties -> + parties.stream().collect(Collectors.toMap(Function.identity(), x -> filter))) + .orElse(Collections.emptyMap()); + + Optional anyPartyFilterO = partiesO.isEmpty() ? Optional.of(filter) : Optional.empty(); + return new EventFormat(partyToFilters, anyPartyFilterO, verbose); } } diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java index 4b3d6f1109..cb792f964b 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java @@ -13,7 +13,7 @@ import java.util.stream.Stream; import org.checkerframework.checker.nullness.qual.NonNull; -public final class CreatedEvent implements Event, TreeEvent { +public final class CreatedEvent implements Event { private final @NonNull List<@NonNull String> witnessParties; @@ -48,6 +48,8 @@ public final class CreatedEvent implements Event, TreeEvent { private final @NonNull Boolean acsDelta; + private final @NonNull String representativePackageId; + public CreatedEvent( @NonNull List<@NonNull String> witnessParties, @NonNull Long offset, @@ -63,7 +65,8 @@ public CreatedEvent( @NonNull Collection<@NonNull String> signatories, @NonNull Collection<@NonNull String> observers, @NonNull Instant createdAt, - @NonNull Boolean acsDelta) { + @NonNull Boolean acsDelta, + @NonNull String representativePackageId) { this.witnessParties = List.copyOf(witnessParties); this.offset = offset; this.nodeId = nodeId; @@ -79,6 +82,7 @@ public CreatedEvent( this.observers = Set.copyOf(observers); this.createdAt = createdAt; this.acsDelta = acsDelta; + this.representativePackageId = representativePackageId; } @NonNull @@ -166,6 +170,11 @@ public boolean isAcsDelta() { return acsDelta; } + @NonNull + public String getRepresentativePackageId() { + return representativePackageId; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -185,7 +194,8 @@ public boolean equals(Object o) { && Objects.equals(signatories, that.signatories) && Objects.equals(observers, that.observers) && Objects.equals(createdAt, that.createdAt) - && Objects.equals(acsDelta, that.acsDelta); + && Objects.equals(acsDelta, that.acsDelta) + && Objects.equals(representativePackageId, that.representativePackageId); } @Override @@ -205,7 +215,8 @@ public int hashCode() { signatories, observers, createdAt, - acsDelta); + acsDelta, + representativePackageId); } @Override @@ -242,6 +253,8 @@ public String toString() { + createdAt + ", acsDelta=" + acsDelta + + ", representativePackageId=" + + representativePackageId + '}'; } @@ -271,7 +284,8 @@ public String toString() { .setSeconds(this.createdAt.getEpochSecond()) .setNanos(this.createdAt.getNano()) .build()) - .setAcsDelta(this.isAcsDelta()); + .setAcsDelta(this.isAcsDelta()) + .setRepresentativePackageId(this.getRepresentativePackageId()); contractKey.ifPresent(a -> builder.setContractKey(a.toProto())); return builder.build(); } @@ -290,7 +304,6 @@ private static Stream toProtoInterfaceViews( .build()); } - @SuppressWarnings("deprecation") public static CreatedEvent fromProto(EventOuterClass.CreatedEvent createdEvent) { var splitInterfaceViews = createdEvent.getInterfaceViewsList().stream() @@ -321,6 +334,7 @@ public static CreatedEvent fromProto(EventOuterClass.CreatedEvent createdEvent) createdEvent.getObserversList(), Instant.ofEpochSecond( createdEvent.getCreatedAt().getSeconds(), createdEvent.getCreatedAt().getNanos()), - createdEvent.getAcsDelta()); + createdEvent.getAcsDelta(), + createdEvent.getRepresentativePackageId()); } } diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java index dbc4a6c49b..a9889064a9 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java @@ -10,8 +10,8 @@ import java.util.Optional; public final class DisclosedContract { - public final Identifier templateId; - public final String contractId; + public final Optional templateId; + public final Optional contractId; public final ByteString createdEventBlob; public final Optional synchronizerId; @@ -22,17 +22,34 @@ public final class DisclosedContract { */ @Deprecated public DisclosedContract(Identifier templateId, String contractId, ByteString createdEventBlob) { - this.templateId = templateId; - this.contractId = contractId; + this.templateId = Optional.of(templateId); + this.contractId = Optional.of(contractId); this.createdEventBlob = createdEventBlob; this.synchronizerId = Optional.empty(); } + /** + * Constructor that requires the contract-id and template-id. + * + * @deprecated since 3.4.0. It will be removed in a future release + */ + @Deprecated public DisclosedContract( Identifier templateId, String contractId, ByteString createdEventBlob, String synchronizerId) { + this.templateId = Optional.of(templateId); + this.contractId = Optional.of(contractId); + this.createdEventBlob = createdEventBlob; + this.synchronizerId = Optional.of(synchronizerId); + } + + public DisclosedContract( + ByteString createdEventBlob, + String synchronizerId, + Optional templateId, + Optional contractId) { this.templateId = templateId; this.contractId = contractId; this.createdEventBlob = createdEventBlob; @@ -42,9 +59,10 @@ public DisclosedContract( public CommandsOuterClass.DisclosedContract toProto() { CommandsOuterClass.DisclosedContract.Builder builder = CommandsOuterClass.DisclosedContract.newBuilder() - .setTemplateId(this.templateId.toProto()) - .setContractId(this.contractId) .setCreatedEventBlob(this.createdEventBlob); + + contractId.ifPresent(builder::setContractId); + templateId.ifPresent(id -> builder.setTemplateId(id.toProto())); synchronizerId.ifPresent(builder::setSynchronizerId); return builder.build(); } diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java index 62cbb54bac..065de2ba00 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java @@ -16,18 +16,4 @@ public static CreatedEvent singleCreatedEvent(List events) { throw new IllegalArgumentException( "Expected exactly one created event from the transaction, got: " + events); } - - /** @hidden */ - public static ExercisedEvent firstExercisedEvent(TransactionTree txTree) { - var maybeExercisedEvent = - txTree.getRootNodeIds().stream() - .map(nodeId -> txTree.getEventsById().get(nodeId)) - .filter(e -> e instanceof ExercisedEvent) - .map(e -> (ExercisedEvent) e) - .findFirst(); - - return maybeExercisedEvent.orElseThrow( - () -> - new IllegalArgumentException("Expect an exercised event but not found. tx: " + txTree)); - } } diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java index de0898f3e5..e0c6efebfe 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java @@ -9,7 +9,7 @@ import java.util.Optional; import org.checkerframework.checker.nullness.qual.NonNull; -public final class ExercisedEvent implements TreeEvent, Event { +public final class ExercisedEvent implements Event { private final List witnessParties; diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java index 7c256ab29c..77ac2c4b52 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java @@ -14,16 +14,6 @@ public final class GetActiveContractsRequest { @NonNull private final Long activeAtOffset; - // TODO(#23504) remove - @Deprecated - public GetActiveContractsRequest( - @NonNull TransactionFilter transactionFilter, boolean verbose, @NonNull Long activeAtOffset) { - this.eventFormat = - new EventFormat( - transactionFilter.getPartyToFilters(), transactionFilter.getAnyPartyFilter(), verbose); - this.activeAtOffset = activeAtOffset; - } - public GetActiveContractsRequest(@NonNull EventFormat eventFormat, @NonNull Long activeAtOffset) { this.eventFormat = eventFormat; this.activeAtOffset = activeAtOffset; @@ -32,18 +22,10 @@ public GetActiveContractsRequest(@NonNull EventFormat eventFormat, @NonNull Long public static GetActiveContractsRequest fromProto( StateServiceOuterClass.GetActiveContractsRequest request) { if (request.hasEventFormat()) { - if (request.hasFilter() || request.getVerbose()) - throw new IllegalArgumentException( - "Request has both eventFormat and filter/verbose defined."); return new GetActiveContractsRequest( EventFormat.fromProto(request.getEventFormat()), request.getActiveAtOffset()); } else { - if (!request.hasFilter()) - throw new IllegalArgumentException("Request has neither eventFormat nor filter defined."); - return new GetActiveContractsRequest( - TransactionFilter.fromProto(request.getFilter()), - request.getVerbose(), - request.getActiveAtOffset()); + throw new IllegalArgumentException("Request has no eventFormat defined."); } } @@ -54,17 +36,6 @@ public StateServiceOuterClass.GetActiveContractsRequest toProto() { .build(); } - // TODO(i23504) remove - @NonNull - public TransactionFilter getTransactionFilter() { - return new TransactionFilter(eventFormat.getPartyToFilters(), eventFormat.getAnyPartyFilter()); - } - - // TODO(i23504) remove - public boolean isVerbose() { - return eventFormat.getVerbose(); - } - @NonNull public Long getActiveAtOffset() { return activeAtOffset; diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByIdRequest.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByIdRequest.java deleted file mode 100644 index 9cb54eb399..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByIdRequest.java +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.UpdateServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Collectors; - -/** - * Helper wrapper of a grpc message used in GetTransactionById and GetTransactionTreeById calls. - * Class will be removed in 3.4.0. - */ -// TODO(#23504) remove -@Deprecated -public final class GetTransactionByIdRequest { - - @NonNull private final String updateId; - - @NonNull private final TransactionFormat transactionFormat; - - public GetTransactionByIdRequest( - @NonNull String updateId, @NonNull List<@NonNull String> requestingParties) { - this.updateId = updateId; - Map partyFilters = - requestingParties.stream() - .collect( - Collectors.toMap( - party -> party, - party -> - new CumulativeFilter( - Map.of(), - Map.of(), - Optional.of(Filter.Wildcard.HIDE_CREATED_EVENT_BLOB)))); - EventFormat eventFormat = new EventFormat(partyFilters, Optional.empty(), true); - this.transactionFormat = new TransactionFormat(eventFormat, TransactionShape.ACS_DELTA); - } - - public GetTransactionByIdRequest( - @NonNull String updateId, @NonNull TransactionFormat transactionFormat) { - this.updateId = updateId; - this.transactionFormat = transactionFormat; - } - - @NonNull - public String getUpdateId() { - return updateId; - } - - @NonNull - public List<@NonNull String> getRequestingParties() { - return transactionFormat.getEventFormat().getParties().stream().toList(); - } - - @NonNull - public TransactionFormat getTransactionFormat() { - return transactionFormat; - } - - public static GetTransactionByIdRequest fromProto( - UpdateServiceOuterClass.GetTransactionByIdRequest request) { - if (request.hasTransactionFormat()) { - if (!request.getRequestingPartiesList().isEmpty()) - throw new IllegalArgumentException( - "Request has both transactionFormat and requestingParties defined."); - return new GetTransactionByIdRequest( - request.getUpdateId(), TransactionFormat.fromProto(request.getTransactionFormat())); - } else { - if (request.getRequestingPartiesList().isEmpty()) - throw new IllegalArgumentException( - "Request has neither transactionFormat nor requestingParties defined."); - return new GetTransactionByIdRequest( - request.getUpdateId(), request.getRequestingPartiesList()); - } - } - - public UpdateServiceOuterClass.GetTransactionByIdRequest toProto() { - return UpdateServiceOuterClass.GetTransactionByIdRequest.newBuilder() - .setUpdateId(updateId) - .setTransactionFormat(transactionFormat.toProto()) - .build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetTransactionByIdRequest that = (GetTransactionByIdRequest) o; - return Objects.equals(updateId, that.updateId) - && Objects.equals(transactionFormat, that.transactionFormat); - } - - @Override - public int hashCode() { - return Objects.hash(updateId, transactionFormat); - } - - @Override - public String toString() { - return "GetTransactionByIdRequest{" - + "updateId=" - + updateId - + ", transactionFormat=" - + transactionFormat - + '}'; - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequest.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequest.java deleted file mode 100644 index dfedab5ca6..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequest.java +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.UpdateServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Collectors; - -/** - * Helper wrapper of a grpc message used in GetTransactionByOffset and GetTransactionTreeByOffset - * calls. Class will be removed in 3.4.0. - */ -// TODO(#23504) remove -@Deprecated -public final class GetTransactionByOffsetRequest { - - @NonNull private final Long offset; - - @NonNull private final TransactionFormat transactionFormat; - - public GetTransactionByOffsetRequest( - @NonNull Long offset, @NonNull List<@NonNull String> requestingParties) { - this.offset = offset; - Map partyFilters = - requestingParties.stream() - .collect( - Collectors.toMap( - party -> party, - party -> - new CumulativeFilter( - Map.of(), - Map.of(), - Optional.of(Filter.Wildcard.HIDE_CREATED_EVENT_BLOB)))); - EventFormat eventFormat = new EventFormat(partyFilters, Optional.empty(), true); - this.transactionFormat = new TransactionFormat(eventFormat, TransactionShape.ACS_DELTA); - } - - public GetTransactionByOffsetRequest( - @NonNull Long offset, @NonNull TransactionFormat transactionFormat) { - this.offset = offset; - this.transactionFormat = transactionFormat; - } - - @NonNull - public Long getOffset() { - return offset; - } - - @NonNull - public List<@NonNull String> getRequestingParties() { - return transactionFormat.getEventFormat().getParties().stream().toList(); - } - - @NonNull - public TransactionFormat getTransactionFormat() { - return transactionFormat; - } - - public static GetTransactionByOffsetRequest fromProto( - UpdateServiceOuterClass.GetTransactionByOffsetRequest request) { - if (request.hasTransactionFormat()) { - if (!request.getRequestingPartiesList().isEmpty()) - throw new IllegalArgumentException( - "Request has both transactionFormat and requestingParties defined."); - return new GetTransactionByOffsetRequest( - request.getOffset(), TransactionFormat.fromProto(request.getTransactionFormat())); - } else { - if (request.getRequestingPartiesList().isEmpty()) - throw new IllegalArgumentException( - "Request has neither transactionFormat nor requestingParties defined."); - return new GetTransactionByOffsetRequest( - request.getOffset(), request.getRequestingPartiesList()); - } - } - - public UpdateServiceOuterClass.GetTransactionByOffsetRequest toProto() { - return UpdateServiceOuterClass.GetTransactionByOffsetRequest.newBuilder() - .setOffset(offset) - .setTransactionFormat(transactionFormat.toProto()) - .build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetTransactionByOffsetRequest that = (GetTransactionByOffsetRequest) o; - return Objects.equals(offset, that.offset) - && Objects.equals(transactionFormat, that.transactionFormat); - } - - @Override - public int hashCode() { - return Objects.hash(offset, transactionFormat); - } - - @Override - public String toString() { - return "GetTransactionByOffsetRequest{" - + "offset=" - + offset - + ", transactionFormat=" - + transactionFormat - + '}'; - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java deleted file mode 100644 index c0ab9de801..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.UpdateServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.Objects; - -/** - * Helper wrapper of a grpc message used in GetTransactionById and GetTransactionByOffset calls. - * Class will be removed in 3.4.0. - */ -// TODO(#23504) remove -@Deprecated -public final class GetTransactionResponse { - - @NonNull private final Transaction transaction; - - private GetTransactionResponse(@NonNull Transaction transaction) { - this.transaction = transaction; - } - - public Transaction getTransaction() { - return transaction; - } - - public static GetTransactionResponse fromProto( - UpdateServiceOuterClass.GetTransactionResponse response) { - return new GetTransactionResponse(Transaction.fromProto(response.getTransaction())); - } - - public UpdateServiceOuterClass.GetTransactionResponse toProto() { - return UpdateServiceOuterClass.GetTransactionResponse.newBuilder() - .setTransaction(transaction.toProto()) - .build(); - } - - @Override - public String toString() { - return "GetTransactionResponse{" + "transaction=" + transaction + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetTransactionResponse that = (GetTransactionResponse) o; - return Objects.equals(transaction, that.transaction); - } - - @Override - public int hashCode() { - return Objects.hash(transaction); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreeResponse.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreeResponse.java deleted file mode 100644 index c79ce00d0b..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreeResponse.java +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.UpdateServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.Objects; - -// TODO(#23504) remove -@Deprecated -public final class GetTransactionTreeResponse { - - @NonNull private final TransactionTree transactionTree; - - private GetTransactionTreeResponse(@NonNull TransactionTree transactionTree) { - this.transactionTree = transactionTree; - } - - public TransactionTree getTransactionTree() { - return transactionTree; - } - - public static GetTransactionTreeResponse fromProto( - UpdateServiceOuterClass.GetTransactionTreeResponse response) { - return new GetTransactionTreeResponse(TransactionTree.fromProto(response.getTransaction())); - } - - public UpdateServiceOuterClass.GetTransactionTreeResponse toProto() { - return UpdateServiceOuterClass.GetTransactionTreeResponse.newBuilder() - .setTransaction(transactionTree.toProto()) - .build(); - } - - @Override - public String toString() { - return "GetTransactionTreeResponse{" + "transactionTree=" + transactionTree + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetTransactionTreeResponse that = (GetTransactionTreeResponse) o; - return Objects.equals(transactionTree, that.transactionTree); - } - - @Override - public int hashCode() { - return Objects.hash(transactionTree); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdateTreesResponse.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdateTreesResponse.java deleted file mode 100644 index 427e070067..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdateTreesResponse.java +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.UpdateServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.Objects; -import java.util.Optional; - -/** Helper wrapper of a grpc message used in GetUpdateTrees call. Class will be removed in 3.4.0. */ -// TODO(#23504) remove -@Deprecated -public final class GetUpdateTreesResponse { - - @NonNull private final Optional transactionTree; - - @NonNull private final Optional reassignment; - - @NonNull private final Optional offsetCheckpoint; - - private GetUpdateTreesResponse( - @NonNull Optional transactionTree, - @NonNull Optional reassignment, - @NonNull Optional offsetCheckpoint) { - this.transactionTree = transactionTree; - this.reassignment = reassignment; - this.offsetCheckpoint = offsetCheckpoint; - } - - public GetUpdateTreesResponse(@NonNull TransactionTree transactionTree) { - this(Optional.of(transactionTree), Optional.empty(), Optional.empty()); - } - - public GetUpdateTreesResponse(@NonNull Reassignment reassignment) { - this(Optional.empty(), Optional.of(reassignment), Optional.empty()); - } - - public GetUpdateTreesResponse(@NonNull OffsetCheckpoint offsetCheckpoint) { - this(Optional.empty(), Optional.empty(), Optional.of(offsetCheckpoint)); - } - - @NonNull - public Optional getTransactionTree() { - return transactionTree; - } - - @NonNull - public Optional getReassignment() { - return reassignment; - } - - @NonNull - public Optional getOffsetCheckpoint() { - return offsetCheckpoint; - } - - public static GetUpdateTreesResponse fromProto( - UpdateServiceOuterClass.GetUpdateTreesResponse response) { - return new GetUpdateTreesResponse( - response.hasTransactionTree() - ? Optional.of(TransactionTree.fromProto(response.getTransactionTree())) - : Optional.empty(), - response.hasReassignment() - ? Optional.of(Reassignment.fromProto(response.getReassignment())) - : Optional.empty(), - response.hasOffsetCheckpoint() - ? Optional.of(OffsetCheckpoint.fromProto(response.getOffsetCheckpoint())) - : Optional.empty()); - } - - public UpdateServiceOuterClass.GetUpdateTreesResponse toProto() { - var builder = UpdateServiceOuterClass.GetUpdateTreesResponse.newBuilder(); - transactionTree.ifPresent(t -> builder.setTransactionTree(t.toProto())); - reassignment.ifPresent(r -> builder.setReassignment(r.toProto())); - offsetCheckpoint.ifPresent(c -> builder.setOffsetCheckpoint(c.toProto())); - return builder.build(); - } - - @Override - public String toString() { - return "GetUpdateTreesResponse{" - + "transactionTree=" - + transactionTree - + ", reassignment=" - + reassignment - + ", offsetCheckpoint=" - + offsetCheckpoint - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetUpdateTreesResponse that = (GetUpdateTreesResponse) o; - return Objects.equals(transactionTree, that.transactionTree) - && Objects.equals(reassignment, that.reassignment) - && Objects.equals(offsetCheckpoint, that.offsetCheckpoint); - } - - @Override - public int hashCode() { - return Objects.hash(transactionTree, reassignment, offsetCheckpoint); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdatesRequest.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdatesRequest.java index 19e81554af..613fc38ca8 100644 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdatesRequest.java +++ b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUpdatesRequest.java @@ -18,41 +18,6 @@ public final class GetUpdatesRequest { @NonNull private final UpdateFormat updateFormat; - // TODO(#23504) remove - @Deprecated - public GetUpdatesRequest( - @NonNull Long beginExclusive, - @NonNull Optional endInclusive, - @NonNull TransactionFilter transactionFilter, - boolean verbose) { - this.beginExclusive = beginExclusive; - this.endInclusive = endInclusive; - EventFormat eventFormat = - new EventFormat( - transactionFilter.getPartyToFilters(), transactionFilter.getAnyPartyFilter(), verbose); - Optional transactionFormat = - Optional.of(new TransactionFormat(eventFormat, TransactionShape.ACS_DELTA)); - Optional> allFilterPartiesO = - transactionFilter.getAnyPartyFilter().isPresent() - ? - // a filter for the wildcard party is defined then we want the topology events for all - // the parties (denoted by the empty set) - Optional.of(Set.of()) - : (transactionFilter.getParties().isEmpty() - ? - // by-party filters are not defined, do not fetch any topology events - Optional.empty() - : - // by-party filters are not defined, fetch any topology events for the parties - // specified - Optional.of(transactionFilter.getParties())); - Optional topologyFormat = - Optional.of( - new TopologyFormat(allFilterPartiesO.map(ParticipantAuthorizationTopologyFormat::new))); - this.updateFormat = - new UpdateFormat(transactionFormat, Optional.of(eventFormat), topologyFormat); - } - public GetUpdatesRequest( @NonNull Long beginExclusive, @NonNull Optional endInclusive, @@ -64,21 +29,12 @@ public GetUpdatesRequest( public static GetUpdatesRequest fromProto(UpdateServiceOuterClass.GetUpdatesRequest request) { if (request.hasUpdateFormat()) { - if (request.hasFilter() || request.getVerbose()) - throw new IllegalArgumentException( - "Request has both updateFormat and filter/verbose defined."); return new GetUpdatesRequest( request.getBeginExclusive(), request.hasEndInclusive() ? Optional.of(request.getEndInclusive()) : Optional.empty(), UpdateFormat.fromProto(request.getUpdateFormat())); } else { - if (!request.hasFilter()) - throw new IllegalArgumentException("Request has neither updateFormat nor filter defined."); - return new GetUpdatesRequest( - request.getBeginExclusive(), - request.hasEndInclusive() ? Optional.of(request.getEndInclusive()) : Optional.empty(), - TransactionFilter.fromProto(request.getFilter()), - request.getVerbose()); + throw new IllegalArgumentException("Request has no updateFormat defined."); } } @@ -92,31 +48,6 @@ public UpdateServiceOuterClass.GetUpdatesRequest toProto() { return builder.build(); } - // TODO(#23504) remove - public UpdateServiceOuterClass.GetUpdatesRequest toProtoLegacy() { - UpdateServiceOuterClass.GetUpdatesRequest.Builder builder = - UpdateServiceOuterClass.GetUpdatesRequest.newBuilder() - .setBeginExclusive(beginExclusive) - .setVerbose( - updateFormat - .getIncludeTransactions() - .map(TransactionFormat::getEventFormat) - .map(EventFormat::getVerbose) - .orElse(false)); - - updateFormat - .getIncludeTransactions() - .map(TransactionFormat::getEventFormat) - .ifPresent( - t -> - builder.setFilter( - new TransactionFilter(t.getPartyToFilters(), t.getAnyPartyFilter()).toProto())); - - endInclusive.ifPresent(builder::setEndInclusive); - - return builder.build(); - } - @NonNull public Long getBeginExclusive() { return beginExclusive; diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponse.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponse.java deleted file mode 100644 index 1d3047eca7..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponse.java +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.CommandServiceOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.Objects; - -/** - * Helper wrapper of a grpc message used in SubmitAndWaitForTransactionTree call. Class will be - * removed in 3.4.0. - */ -// TODO(#23504) remove -@Deprecated -public final class SubmitAndWaitForTransactionTreeResponse { - - @NonNull private final TransactionTree transaction; - - private SubmitAndWaitForTransactionTreeResponse(@NonNull TransactionTree transaction) { - this.transaction = transaction; - } - - @NonNull - public TransactionTree getTransaction() { - return transaction; - } - - public static SubmitAndWaitForTransactionTreeResponse fromProto( - CommandServiceOuterClass.SubmitAndWaitForTransactionTreeResponse response) { - return new SubmitAndWaitForTransactionTreeResponse( - TransactionTree.fromProto(response.getTransaction())); - } - - public CommandServiceOuterClass.SubmitAndWaitForTransactionTreeResponse toProto() { - return CommandServiceOuterClass.SubmitAndWaitForTransactionTreeResponse.newBuilder() - .setTransaction(transaction.toProto()) - .build(); - } - - @Override - public String toString() { - - return "SubmitAndWaitForTransactionTreeResponse{" + "transaction=" + transaction + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SubmitAndWaitForTransactionTreeResponse that = (SubmitAndWaitForTransactionTreeResponse) o; - return Objects.equals(transaction, that.transaction); - } - - @Override - public int hashCode() { - return Objects.hash(transaction); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionFilter.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionFilter.java deleted file mode 100644 index 69ca774a55..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionFilter.java +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.TransactionFilterOuterClass; -import com.daml.ledger.javaapi.data.codegen.ContractCompanion; -import com.daml.ledger.javaapi.data.codegen.ContractTypeCompanion; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.util.*; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** Filter used in the state and update service stream queries. Class will be removed in 3.4.0. */ -// TODO(#23504) remove -@Deprecated -public final class TransactionFilter { - - private Map partyToFilters; - private Optional anyPartyFilterO; - - public static TransactionFilter fromProto( - TransactionFilterOuterClass.TransactionFilter transactionFilter) { - Map partyToFilters = - transactionFilter.getFiltersByPartyMap(); - HashMap converted = new HashMap<>(partyToFilters.size()); - for (Map.Entry entry : partyToFilters.entrySet()) { - converted.put(entry.getKey(), Filter.fromProto(entry.getValue())); - } - - TransactionFilterOuterClass.Filters anyPartyFilters = transactionFilter.getFiltersForAnyParty(); - Filter convertedAnyPartyFilter = Filter.fromProto(anyPartyFilters); - - Optional anyPartyFilterO = - (convertedAnyPartyFilter instanceof NoFilter) - ? Optional.empty() - : Optional.of(convertedAnyPartyFilter); - - return new TransactionFilter(converted, anyPartyFilterO); - } - - public TransactionFilterOuterClass.TransactionFilter toProto() { - HashMap partyToFilters = - new HashMap<>(this.partyToFilters.size()); - for (Map.Entry entry : this.partyToFilters.entrySet()) { - partyToFilters.put(entry.getKey(), entry.getValue().toProto()); - } - - TransactionFilterOuterClass.TransactionFilter.Builder builder = - TransactionFilterOuterClass.TransactionFilter.newBuilder() - .putAllFiltersByParty(partyToFilters); - - this.anyPartyFilterO.ifPresent(value -> builder.setFiltersForAnyParty(value.toProto())); - - return builder.build(); - } - - public Set getParties() { - return partyToFilters.keySet(); - } - - public Map getPartyToFilters() { - return partyToFilters; - } - - public Optional getAnyPartyFilter() { - return anyPartyFilterO; - } - - public TransactionFilter( - @NonNull Map<@NonNull String, @NonNull Filter> partyToFilters, - @NonNull Optional<@NonNull Filter> anyPartyFilterO) { - this.partyToFilters = partyToFilters; - this.anyPartyFilterO = anyPartyFilterO; - } - - public static TransactionFilter transactionFilter( - ContractTypeCompanion contractCompanion, Optional> partiesO) { - Filter filter = - (contractCompanion instanceof ContractCompanion) - ? new CumulativeFilter( - Collections.emptyMap(), - Collections.singletonMap( - contractCompanion.TEMPLATE_ID, Filter.Template.HIDE_CREATED_EVENT_BLOB), - Optional.empty()) - : new CumulativeFilter( - Map.of( - contractCompanion.TEMPLATE_ID, - Filter.Interface.INCLUDE_VIEW_HIDE_CREATED_EVENT_BLOB), - Collections.emptyMap(), - Optional.empty()); - - Map partyToFilters = - partiesO - .map( - parties -> - parties.stream().collect(Collectors.toMap(Function.identity(), x -> filter))) - .orElse(Map.of()); - - Optional anyPartyFilterO = partiesO.isEmpty() ? Optional.of(filter) : Optional.empty(); - - return new TransactionFilter(partyToFilters, anyPartyFilterO); - } - - @Override - public String toString() { - return "TransactionFilter{" - + "partyToFilters=" - + partyToFilters - + ", anyPartyFilterO=" - + anyPartyFilterO - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TransactionFilter that = (TransactionFilter) o; - return Objects.equals(partyToFilters, that.partyToFilters) - && Objects.equals(anyPartyFilterO, that.anyPartyFilterO); - } - - @Override - public int hashCode() { - return Objects.hash(partyToFilters, anyPartyFilterO); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTree.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTree.java deleted file mode 100644 index 1d70ee773e..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTree.java +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.TraceContextOuterClass; -import com.daml.ledger.api.v2.TransactionOuterClass; -import org.checkerframework.checker.nullness.qual.NonNull; - -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.BiFunction; -import java.util.stream.Collectors; - -/** - * Helper wrapper of a grpc message used in GetUpdateTrees, GetTransactionTreeById, - * GetTransactionTreeByOffset, and SubmitAndWaitForTransactionTree calls. Class will be removed in - * 3.4.0. - */ -// TODO(#23504) remove -@Deprecated -public final class TransactionTree { - - @NonNull private final String updateId; - - @NonNull private final String commandId; - - @NonNull private final String workflowId; - - @NonNull private final Instant effectiveAt; - - @NonNull private final Long offset; - - @NonNull private final Map eventsById; - - @NonNull private final String synchronizerId; - - private final TraceContextOuterClass.@NonNull TraceContext traceContext; - - @NonNull private final Instant recordTime; - - public TransactionTree( - @NonNull String updateId, - @NonNull String commandId, - @NonNull String workflowId, - @NonNull Instant effectiveAt, - @NonNull Long offset, - @NonNull Map<@NonNull Integer, @NonNull TreeEvent> eventsById, - @NonNull String synchronizerId, - TraceContextOuterClass.@NonNull TraceContext traceContext, - @NonNull Instant recordTime) { - this.updateId = updateId; - this.commandId = commandId; - this.workflowId = workflowId; - this.effectiveAt = effectiveAt; - this.offset = offset; - this.eventsById = eventsById; - this.synchronizerId = synchronizerId; - this.traceContext = traceContext; - this.recordTime = recordTime; - } - - public static TransactionTree fromProto(TransactionOuterClass.TransactionTree tree) { - Instant effectiveAt = - Instant.ofEpochSecond(tree.getEffectiveAt().getSeconds(), tree.getEffectiveAt().getNanos()); - Map eventsById = - tree.getEventsByIdMap().values().stream() - .collect( - Collectors.toMap( - e -> { - if (e.hasCreated()) return e.getCreated().getNodeId(); - else if (e.hasExercised()) return e.getExercised().getNodeId(); - else - throw new IllegalArgumentException( - "Event is neither created nor exercised: " + e); - }, - TreeEvent::fromProtoTreeEvent)); - return new TransactionTree( - tree.getUpdateId(), - tree.getCommandId(), - tree.getWorkflowId(), - effectiveAt, - tree.getOffset(), - eventsById, - tree.getSynchronizerId(), - tree.getTraceContext(), - Utils.instantFromProto(tree.getRecordTime())); - } - - public TransactionOuterClass.TransactionTree toProto() { - return TransactionOuterClass.TransactionTree.newBuilder() - .setUpdateId(updateId) - .setCommandId(commandId) - .setWorkflowId(workflowId) - .setEffectiveAt( - com.google.protobuf.Timestamp.newBuilder() - .setSeconds(effectiveAt.getEpochSecond()) - .setNanos(effectiveAt.getNano()) - .build()) - .setOffset(offset) - .putAllEventsById( - eventsById.values().stream() - .collect(Collectors.toMap(TreeEvent::getNodeId, TreeEvent::toProtoTreeEvent))) - .setSynchronizerId(synchronizerId) - .setTraceContext(traceContext) - .setRecordTime(Utils.instantToProto(recordTime)) - .build(); - } - - /** - * A generic class that encapsulates a transaction tree along with a list of the wrapped root - * events of the tree. The wrapped root events are used to construct the tree that is described by - * the transaction tree as a tree of WrappedEvents. - * - * @param the type of the wrapped events - */ - public static class WrappedTransactionTree { - /** The original transaction tree. */ - private final TransactionTree transactionTree; - /** The list of wrapped root events generated from the transaction tree. */ - private final List wrappedRootEvents; - - public WrappedTransactionTree( - TransactionTree transactionTree, List wrappedRootEvents) { - this.transactionTree = transactionTree; - this.wrappedRootEvents = wrappedRootEvents; - } - - public TransactionTree getTransactionTree() { - return transactionTree; - } - - public List getWrappedRootEvents() { - return wrappedRootEvents; - } - } - - /** - * Constructs a tree described by the transaction tree. - * - * @param the type of the wrapped events of the constructed tree - * @param createWrappedEvent the function that constructs a WrappedEvent node of the tree given - * the current node and its converted children as a list of WrappedEvents nodes - * @return the original transaction tree and the list of the wrapped root events - */ - public WrappedTransactionTree toWrappedTree( - BiFunction, WrappedEvent> createWrappedEvent) { - - List wrappedRootEvents = TransactionTreeUtils.buildTree(this, createWrappedEvent); - - return new WrappedTransactionTree<>(this, wrappedRootEvents); - } - - @NonNull - public String getUpdateId() { - return updateId; - } - - @NonNull - public String getCommandId() { - return commandId; - } - - @NonNull - public String getWorkflowId() { - return workflowId; - } - - @NonNull - public Instant getEffectiveAt() { - return effectiveAt; - } - - @NonNull - public Long getOffset() { - return offset; - } - - @NonNull - public Map getEventsById() { - return eventsById; - } - - /** - * Computes the node ids of the root nodes (i.e. the nodes that do not have any ancestors). A node - * can be considered a root if there are not any ancestors of it. There is no guarantee that the - * root node was also a root in the original transaction tree (i.e. before filtering out events - * from the original transaction tree). - * - * @return the root node ids - */ - @NonNull - public List getRootNodeIds() { - Map lastDescendantById = - getEventsById().entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - entry -> - entry.getValue().toProtoTreeEvent().hasExercised() - ? entry - .getValue() - .toProtoTreeEvent() - .getExercised() - .getLastDescendantNodeId() - : entry.getKey())); - - List nodeIds = getEventsById().keySet().stream().sorted().toList(); - - List rootNodes = new ArrayList<>(); - - int index = 0; - while (index < nodeIds.size()) { - Integer nodeId = nodeIds.get(index); - Integer lastDescendant = lastDescendantById.get(nodeId); - if (lastDescendant == null) { - throw new RuntimeException("Node with id " + nodeId + " not found"); - } - - rootNodes.add(nodeId); - while (index < nodeIds.size() && nodeIds.get(index) <= lastDescendant) { - index++; - } - } - - return rootNodes; - } - - @NonNull - public String getSynchronizerId() { - return synchronizerId; - } - - public TraceContextOuterClass.@NonNull TraceContext getTraceContext() { - return traceContext; - } - - @NonNull - public Instant getRecordTime() { - return recordTime; - } - - /** - * Computes the children nodes of an exercised event. It finds the candidate nodes that could be - * children of the event given (i.e. its descendants). Then it repeatedly finds from the - * candidates the one with the lowest id and adds it to its children and removes the child's - * descendants from the list with the candidates. A node can be considered a child of another node - * if there are not any intermediate descendants between its parent and itself. There is no - * guarantee that the child was a child of its parent in the original transaction tree (i.e. - * before filtering out events from the original transaction tree). - * - * @param exercised the exercised event - * @return the children's node ids - */ - @NonNull - public List<@NonNull Integer> getChildNodeIds(ExercisedEvent exercised) { - Integer nodeId = exercised.getNodeId(); - Integer lastDescendant = exercised.getLastDescendantNodeId(); - - List candidates = - getEventsById().entrySet().stream() - .filter(entry -> entry.getKey() > nodeId && entry.getKey() <= lastDescendant) - .sorted(Map.Entry.comparingByKey()) - .map(Map.Entry::getValue) - .toList(); - - List childNodes = new ArrayList<>(); - - int index = 0; - while (index < candidates.size()) { - TreeEvent node = candidates.get(index); - // first candidate will always be a child since it is not a descendant of another intermediate - // node - Integer childNodeId = node.getNodeId(); - Integer childLastDescendant = - node.toProtoTreeEvent().hasExercised() - ? node.toProtoTreeEvent().getExercised().getLastDescendantNodeId() - : childNodeId; - - // add child to children and skip its descendants - childNodes.add(childNodeId); - index++; - while (index < candidates.size() - && candidates.get(index).getNodeId() <= childLastDescendant) { - index++; - } - } - - return childNodes; - } - - @Override - public String toString() { - return "TransactionTree{" - + "updateId='" - + updateId - + '\'' - + ", commandId='" - + commandId - + '\'' - + ", workflowId='" - + workflowId - + '\'' - + ", effectiveAt=" - + effectiveAt - + ", offset='" - + offset - + '\'' - + ", eventsById=" - + eventsById - + ", synchronizerId='" - + synchronizerId - + '\'' - + ", traceContext=" - + traceContext - + ", recordTime=" - + recordTime - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TransactionTree that = (TransactionTree) o; - return Objects.equals(updateId, that.updateId) - && Objects.equals(commandId, that.commandId) - && Objects.equals(workflowId, that.workflowId) - && Objects.equals(effectiveAt, that.effectiveAt) - && Objects.equals(eventsById, that.eventsById) - && Objects.equals(offset, that.offset) - && Objects.equals(synchronizerId, that.synchronizerId) - && Objects.equals(traceContext, that.traceContext) - && Objects.equals(recordTime, that.recordTime); - } - - @Override - public int hashCode() { - return Objects.hash( - updateId, - commandId, - workflowId, - effectiveAt, - offset, - eventsById, - synchronizerId, - traceContext, - recordTime); - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTreeUtils.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTreeUtils.java deleted file mode 100644 index e6187373ee..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTreeUtils.java +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import java.util.*; -import java.util.function.BiFunction; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -public class TransactionTreeUtils { - - private TransactionTreeUtils() {} - - /** - * Constructs a tree described by a transaction tree. - * - * @param the type of the wrapped events of the constructed tree - * @param transactionTree the transaction tree - * @param createWrappedEvent the function that constructs a WrappedEvent node of the tree given - * the current node and its converted children as a list of WrappedEvents nodes - * @return the list of the wrapped root events. Method will be removed in 3.4.0 - */ - public static List buildTree( - TransactionTree transactionTree, - BiFunction, WrappedEvent> createWrappedEvent) { - List nodes = - transactionTree.getEventsById().values().stream() - .map( - treeEvent -> - new Node( - treeEvent.getNodeId(), - treeEvent.toProtoTreeEvent().hasExercised() - ? treeEvent.toProtoTreeEvent().getExercised().getLastDescendantNodeId() - : treeEvent.getNodeId())) - .toList(); - - List rootNodeIds = transactionTree.getRootNodeIds(); - - // fill the nodes with their children - buildNodeForest(nodes, rootNodeIds); - - Map nodesById = - nodes.stream().collect(Collectors.toMap(node -> node.nodeId, node -> node)); - - Stream rootNodes = - rootNodeIds.stream() - .map( - rootNodeId -> { - Node rootNode = nodesById.get(rootNodeId); - if (rootNode == null) { - throw new RuntimeException("Node with id " + rootNodeId + " not found"); - } else return rootNode; - }); - - return rootNodes - .map( - rootNode -> - buildWrappedEventTree( - rootNode, nodesById, transactionTree.getEventsById(), createWrappedEvent)) - .toList(); - } - - private static class Node { - Integer nodeId; - Integer lastDescendantNodeId; - List children; - - Node(Integer nodeId, Integer lastDescendant) { - this.nodeId = nodeId; - this.lastDescendantNodeId = lastDescendant; - this.children = new ArrayList<>(); - } - } - - private static WrappedEvent buildWrappedEventTree( - Node root, - Map nodesById, - Map eventsById, - BiFunction, WrappedEvent> createWrappedEvent) { - Integer nodeId = root.nodeId; - Node node = nodesById.get(nodeId); - if (node == null) { - throw new RuntimeException("node with id " + nodeId + " not found"); - } - - TreeEvent treeEvent = eventsById.get(nodeId); - if (treeEvent == null) { - throw new RuntimeException("TreeEvent with id " + nodeId + " not found"); - } - - // build children subtrees - List childrenWrapped = - node.children.stream() - .map(child -> buildWrappedEventTree(child, nodesById, eventsById, createWrappedEvent)) - .collect(Collectors.toList()); - - return createWrappedEvent.apply(treeEvent, childrenWrapped); - } - - // fill nodes with their children based on the last descendant - private static void buildNodeForest(List nodes, List rootNodeIds) { - - List rootNodes = - nodes.stream() - .sorted(Comparator.comparingInt(node -> node.nodeId)) - .filter(node -> rootNodeIds.contains(node.nodeId)) - .toList(); - - // ensure that nodes are sorted - List nodesSorted = - nodes.stream().sorted(Comparator.comparingInt(node -> node.nodeId)).toList(); - - for (Node root : rootNodes) { - buildNodeTree(root, nodesSorted); - } - } - - private static void buildNodeTree(Node root, List allNodes) { - Deque stack = new ArrayDeque<>(); - stack.push(root); - - for (Node node : allNodes) { - // Skip nodes that are not descendants of this root - if (node.nodeId <= root.nodeId || node.nodeId > root.lastDescendantNodeId) continue; - - // Pop nodes from the stack if the current node is not a descendant of the top node - while (!stack.isEmpty() && node.nodeId > stack.peek().lastDescendantNodeId) { - stack.pop(); - } - - // Add the current node as a child of the top node in the stack - if (!stack.isEmpty()) { - stack.peek().children.add(node); - } - - // Push the current node onto the stack - stack.push(node); - } - } -} diff --git a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TreeEvent.java b/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TreeEvent.java deleted file mode 100644 index 8882cd0d96..0000000000 --- a/canton/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TreeEvent.java +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data; - -import com.daml.ledger.api.v2.TransactionOuterClass; -import java.util.List; -import org.checkerframework.checker.nullness.qual.NonNull; - -/** - * This interface represents events in {@link TransactionTree}s. - * - * @see CreatedEvent - * @see ExercisedEvent - * @see TransactionTree; Interface will be removed in 3.4.0 - */ -// TODO(#23504) remove -@Deprecated -public interface TreeEvent { - - @NonNull - List<@NonNull String> getWitnessParties(); - - @NonNull - Long getOffset(); - - @NonNull - Integer getNodeId(); - - @NonNull - Identifier getTemplateId(); - - @NonNull - String getPackageName(); - - @NonNull - String getContractId(); - - default TransactionOuterClass.TreeEvent toProtoTreeEvent() { - TransactionOuterClass.TreeEvent.Builder eventBuilder = - TransactionOuterClass.TreeEvent.newBuilder(); - if (this instanceof CreatedEvent) { - CreatedEvent event = (CreatedEvent) this; - eventBuilder.setCreated(event.toProto()); - } else if (this instanceof ExercisedEvent) { - ExercisedEvent event = (ExercisedEvent) this; - eventBuilder.setExercised(event.toProto()); - } else { - throw new RuntimeException( - "this should be CreatedEvent or ExercisedEvent, found " + this.toString()); - } - return eventBuilder.build(); - } - - static TreeEvent fromProtoTreeEvent(TransactionOuterClass.TreeEvent event) { - if (event.hasCreated()) { - return CreatedEvent.fromProto(event.getCreated()); - } else if (event.hasExercised()) { - return ExercisedEvent.fromProto(event.getExercised()); - } else { - throw new UnsupportedEventTypeException(event.toString()); - } - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/ContractFilterSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/ContractFilterSpec.scala index a4ecbd9d66..fba4c9e9c1 100644 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/ContractFilterSpec.scala +++ b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/ContractFilterSpec.scala @@ -9,7 +9,6 @@ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import java.util.{Collections, Optional} -import scala.annotation.nowarn import scala.jdk.CollectionConverters.{MapHasAsJava, SetHasAsJava} import scala.util.chaining.scalaUtilChainingOps @@ -40,8 +39,6 @@ class ContractFilterSpec extends AnyFlatSpec with Matchers { Optional.empty(), ) - // TODO(#23504) remove suppression of deprecation warnings - @nowarn("cat=deprecation") private def assertFilters( contractFilter: ContractFilter[_], expectedIncluded: Boolean, @@ -78,18 +75,6 @@ class ContractFilterSpec extends AnyFlatSpec with Matchers { expectedShape, ) - // TODO(#23504) remove - contractFilter.transactionFilter(Optional.of(partiesSet)) shouldBe new TransactionFilter( - expectedPartyToFilters, - Optional.empty(), - ) - - // TODO(#23504) remove - contractFilter.transactionFilter(Optional.empty()) shouldBe new TransactionFilter( - Collections.emptyMap(), - Optional.of(expectedCumulativeFilter), - ) - contractFilter.eventFormat(Optional.of(partiesSet)) shouldBe expectedEventFormatWithParties contractFilter.eventFormat(Optional.empty()) shouldBe expectedWildcardEventFormat diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/EventSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/EventSpec.scala index fb0b14d446..a221654c22 100644 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/EventSpec.scala +++ b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/EventSpec.scala @@ -59,6 +59,7 @@ class EventSpec extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyC mutatingObservers, base.createdAt, base.isAcsDelta, + base.getRepresentativePackageId, ) mutatingWitnesses.add("INTRUDER!") diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/Generators.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/Generators.scala index 10658da0bc..fe1b2fcf7d 100644 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/Generators.scala +++ b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/Generators.scala @@ -15,7 +15,6 @@ import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.{Arbitrary, Gen} import java.time.{Duration, Instant, LocalDate} -import scala.annotation.nowarn import scala.jdk.CollectionConverters.* import scala.util.chaining.scalaUtilChainingOps @@ -239,21 +238,6 @@ object Generators { .build() } - // TODO(#23504) remove as TreeEvent is deprecated - @nowarn("cat=deprecation") - def treeEventGen: Gen[v2.TransactionOuterClass.TreeEvent] = { - import v2.TransactionOuterClass.TreeEvent - for { - event <- Gen.oneOf( - createdEventGen.map(e => (b: TreeEvent.Builder) => b.setCreated(e)), - exercisedEventGen.map(e => (b: TreeEvent.Builder) => b.setExercised(e)), - ) - } yield v2.TransactionOuterClass.TreeEvent - .newBuilder() - .pipe(event) - .build() - } - def topologyEventGen: Gen[v2.TopologyTransactionOuterClass.TopologyEvent] = { import v2.TopologyTransactionOuterClass.TopologyEvent for { @@ -366,6 +350,7 @@ object Generators { signatories <- Gen.listOf(Gen.asciiPrintableStr) observers <- Gen.listOf(Gen.asciiPrintableStr) isAcsDelta <- Arbitrary.arbBool.arbitrary + representativePackageId <- identifierGen.map(_.getPackageId) } yield v2.EventOuterClass.CreatedEvent .newBuilder() .setCreatedAt(createdAt) @@ -381,6 +366,7 @@ object Generators { .addAllSignatories(signatories.asJava) .addAllObservers(observers.asJava) .setAcsDelta(isAcsDelta) + .setRepresentativePackageId(representativePackageId) .build() val createdEventGen: Gen[v2.EventOuterClass.CreatedEvent] = @@ -919,57 +905,6 @@ object Generators { getDescendants(node, 0)._1 } - // TODO(#23504) remove as TransactionTree is deprecated - @nowarn("cat=deprecation") - def transactionTreeGenWithIdsInPreOrder: Gen[v2.TransactionOuterClass.TransactionTree] = { - import v2.TransactionOuterClass.{TransactionTree, TreeEvent} - def treeEventGen(nodeId: Int, lastDescendantNodeId: Int): Gen[(Integer, TreeEvent)] = - for { - event <- - if (lastDescendantNodeId == nodeId) // the node is a leaf node - Gen.oneOf( - createdEventGen(nodeId).map(e => (b: TreeEvent.Builder) => b.setCreated(e)), - exercisedEventGen(nodeId, lastDescendantNodeId).map(e => - (b: TreeEvent.Builder) => b.setExercised(e) - ), - ) - else - exercisedEventGen(nodeId, lastDescendantNodeId).map(e => - (b: TreeEvent.Builder) => b.setExercised(e) - ) - } yield Int.box(nodeId) -> v2.TransactionOuterClass.TreeEvent - .newBuilder() - .pipe(event) - .build() - for { - updateId <- Arbitrary.arbString.arbitrary - commandId <- Arbitrary.arbString.arbitrary - workflowId <- Arbitrary.arbString.arbitrary - effectiveAt <- instantGen - nodeIds <- genNodeTree(maxDepth = 5, maxChildren = 5).map(assignIdsInPreOrder) - multipleRoots <- Gen.oneOf(Gen.const(false), Gen.const(nodeIds.sizeIs > 1)) - nodeIdsFiltered = if (multipleRoots) nodeIds.filterNot(_.id == 0) else nodeIds - eventsById <- Gen.sequence(nodeIdsFiltered.map { case NodeIds(start, end) => - treeEventGen(start, end) - }) - offset <- Arbitrary.arbLong.arbitrary - synchronizerId <- Arbitrary.arbString.arbitrary - traceContext <- Gen.const(Utils.newProtoTraceContext("parent", "state")) - recordTime <- instantGen - } yield TransactionTree - .newBuilder() - .setUpdateId(updateId) - .setCommandId(commandId) - .setWorkflowId(workflowId) - .setEffectiveAt(Utils.instantToProto(effectiveAt)) - .putAllEventsById(eventsById.asScala.toMap.asJava) - .setOffset(offset) - .setSynchronizerId(synchronizerId) - .setTraceContext(traceContext) - .setRecordTime(Utils.instantToProto(recordTime)) - .build() - } - def transactionGen: Gen[v2.TransactionOuterClass.Transaction] = { import v2.TransactionOuterClass.Transaction import v2.EventOuterClass.Event @@ -1031,43 +966,6 @@ object Generators { .map(_.sortBy(e => fromProtoEvent(e).getNodeId)) } yield transaction.toBuilder.clearEvents().addAllEvents(eventsFiltered.asJava).build() - // TODO(#23504) remove as TransactionTree is deprecated - @nowarn("cat=deprecation") - def transactionTreeGen: Gen[v2.TransactionOuterClass.TransactionTree] = { - import v2.TransactionOuterClass.{TransactionTree, TreeEvent} - def idTreeEventPairGen = - treeEventGen.map { e => - val id: Integer = e.getKindCase match { - case TreeEvent.KindCase.CREATED => e.getCreated.getNodeId - case TreeEvent.KindCase.EXERCISED => e.getExercised.getNodeId - case TreeEvent.KindCase.KIND_NOT_SET => sys.error("unrecognized TreeEvent") - } - id -> e - } - for { - updateId <- Arbitrary.arbString.arbitrary - commandId <- Arbitrary.arbString.arbitrary - workflowId <- Arbitrary.arbString.arbitrary - effectiveAt <- instantGen - eventsById <- Gen.mapOfN(10, idTreeEventPairGen) - offset <- Arbitrary.arbLong.arbitrary - synchronizerId <- Arbitrary.arbString.arbitrary - traceContext <- Gen.const(Utils.newProtoTraceContext("parent", "state")) - recordTime <- instantGen - } yield TransactionTree - .newBuilder() - .setUpdateId(updateId) - .setCommandId(commandId) - .setWorkflowId(workflowId) - .setEffectiveAt(Utils.instantToProto(effectiveAt)) - .putAllEventsById(eventsById.asJava) - .setOffset(offset) - .setSynchronizerId(synchronizerId) - .setTraceContext(traceContext) - .setRecordTime(Utils.instantToProto(recordTime)) - .build() - } - def topologyTransactionGen: Gen[v2.TopologyTransactionOuterClass.TopologyTransaction] = { import v2.TopologyTransactionOuterClass.TopologyTransaction for { @@ -1123,23 +1021,6 @@ object Generators { .build() } - // TODO(#23504) remove as GetTransactionByOffsetRequest is deprecated - @nowarn("cat=deprecation") - def getTransactionByOffsetRequestGen - : Gen[v2.UpdateServiceOuterClass.GetTransactionByOffsetRequest] = { - import v2.UpdateServiceOuterClass.GetTransactionByOffsetRequest as Request - for { - offset <- Arbitrary.arbLong.arbitrary - requestingParties <- Gen - .listOf(Arbitrary.arbString.arbitrary.suchThat(_.nonEmpty)) - .suchThat(_.nonEmpty) - } yield Request - .newBuilder() - .setOffset(offset) - .addAllRequestingParties(requestingParties.asJava) - .build() - } - def getUpdateByOffsetRequestGen: Gen[v2.UpdateServiceOuterClass.GetUpdateByOffsetRequest] = { import v2.UpdateServiceOuterClass.GetUpdateByOffsetRequest as Request for { @@ -1164,42 +1045,6 @@ object Generators { .build() } - // TODO(#23504) remove as GetTransactionByIdRequest is deprecated - @nowarn("cat=deprecation") - def getTransactionByIdRequestGen: Gen[v2.UpdateServiceOuterClass.GetTransactionByIdRequest] = { - import v2.UpdateServiceOuterClass.GetTransactionByIdRequest as Request - for { - updateId <- Arbitrary.arbString.arbitrary.suchThat(_.nonEmpty) - requestingParties <- Gen - .listOf(Arbitrary.arbString.arbitrary.suchThat(_.nonEmpty)) - .suchThat(_.nonEmpty) - } yield Request - .newBuilder() - .setUpdateId(updateId) - .addAllRequestingParties(requestingParties.asJava) - .build() - } - - // TODO(#23504) remove as GetTransactionResponse is deprecated - @nowarn("cat=deprecation") - def getTransactionResponseGen: Gen[v2.UpdateServiceOuterClass.GetTransactionResponse] = - transactionGen.map( - v2.UpdateServiceOuterClass.GetTransactionResponse - .newBuilder() - .setTransaction(_) - .build() - ) - - // TODO(#23504) remove as GetTransactionTreeResponse is deprecated - @nowarn("cat=deprecation") - def getTransactionTreeResponseGen: Gen[v2.UpdateServiceOuterClass.GetTransactionTreeResponse] = - transactionTreeGen.map( - v2.UpdateServiceOuterClass.GetTransactionTreeResponse - .newBuilder() - .setTransaction(_) - .build() - ) - def getUpdatesRequestGen: Gen[v2.UpdateServiceOuterClass.GetUpdatesRequest] = { import v2.UpdateServiceOuterClass.GetUpdatesRequest as Request for { @@ -1270,28 +1115,6 @@ object Generators { .build() } - // TODO(#23504) remove as GetUpdateTreesResponse is deprecated - @nowarn("cat=deprecation") - def getUpdateTreesResponseGen: Gen[v2.UpdateServiceOuterClass.GetUpdateTreesResponse] = { - import v2.UpdateServiceOuterClass.GetUpdateTreesResponse as Response - for { - update <- Gen.oneOf( - transactionTreeGen.map(transactionTree => - (b: Response.Builder) => b.setTransactionTree(transactionTree) - ), - reassignmentGen.map(reassingment => - (b: Response.Builder) => b.setReassignment(reassingment) - ), - offsetCheckpointGen.map(checkpoint => - (b: Response.Builder) => b.setOffsetCheckpoint(checkpoint) - ), - ) - } yield Response - .newBuilder() - .pipe(update) - .build() - } - val createCommandGen: Gen[v2.CommandsOuterClass.Command] = for { templateId <- identifierGen @@ -1494,18 +1317,6 @@ object Generators { .setReassignment(reassignment) .build() } - // TODO(#23504) remove as SubmitAndWaitForTransactionTreeResponse is deprecated - @nowarn("cat=deprecation") - def submitAndWaitForTransactionTreeResponseGen - : Gen[v2.CommandServiceOuterClass.SubmitAndWaitForTransactionTreeResponse] = { - import v2.CommandServiceOuterClass.SubmitAndWaitForTransactionTreeResponse as Response - for { - transaction <- transactionTreeGen - } yield Response - .newBuilder() - .setTransaction(transaction) - .build() - } val prefetchContractKeyGen: Gen[CommandsOuterClass.PrefetchContractKey] = for { diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByIdRequestSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByIdRequestSpec.scala deleted file mode 100644 index 253e2154a2..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByIdRequestSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class GetTransactionByIdRequestSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "GetTransactionByIdRequest.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - getTransactionByIdRequestGen - ) { transactionByIdRequest => - val converted = - GetTransactionByIdRequest.fromProto(transactionByIdRequest) - GetTransactionByIdRequest.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequestSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequestSpec.scala deleted file mode 100644 index 809ffbdd55..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionByOffsetRequestSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class GetTransactionByOffsetRequestSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "GetTransactionByOffsetRequest.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - getTransactionByOffsetRequestGen - ) { transactionByOffsetRequest => - val converted = - GetTransactionByOffsetRequest.fromProto(transactionByOffsetRequest) - GetTransactionByOffsetRequest.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionResponseSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionResponseSpec.scala deleted file mode 100644 index 7a41a28c25..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionResponseSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class GetTransactionResponseSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "GetTransactionResponse.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - getTransactionResponseGen - ) { transactionResponse => - val converted = - GetTransactionResponse.fromProto(transactionResponse) - GetTransactionResponse.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionTreeResponseSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionTreeResponseSpec.scala deleted file mode 100644 index 64ff9af5e3..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetTransactionTreeResponseSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class GetTransactionTreeResponseSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "GetTransactionTreeResponse.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - getTransactionTreeResponseGen - ) { transactionTreeResponse => - val converted = - GetTransactionTreeResponse.fromProto(transactionTreeResponse) - GetTransactionTreeResponse.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetUpdateTreesResponseSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetUpdateTreesResponseSpec.scala deleted file mode 100644 index f23affb2b0..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetUpdateTreesResponseSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class GetUpdateTreesResponseSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "GetUpdateTreesResponse.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - getUpdateTreesResponseGen - ) { updateTreesResponse => - val converted = - GetUpdateTreesResponse.fromProto(updateTreesResponse) - GetUpdateTreesResponse.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponseSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponseSpec.scala deleted file mode 100644 index 3c6293b03f..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/SubmitAndWaitForTransactionTreeResponseSpec.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn - -// TODO(#23504) remove -@nowarn("cat=deprecation") -class SubmitAndWaitForTransactionTreeResponseSpec - extends AnyFlatSpec - with Matchers - with ScalaCheckDrivenPropertyChecks { - - "SubmitAndWaitForTransactionTreeResponse.fromProto" should "convert Protoc-generated instances to data instances" in forAll( - submitAndWaitForTransactionTreeResponseGen - ) { response => - val converted = - SubmitAndWaitForTransactionTreeResponse.fromProto(response) - SubmitAndWaitForTransactionTreeResponse.fromProto(converted.toProto) shouldEqual converted - } -} diff --git a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/TransactionTreeSpec.scala b/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/TransactionTreeSpec.scala deleted file mode 100644 index 25ab62fdc6..0000000000 --- a/canton/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/TransactionTreeSpec.scala +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.ledger.javaapi.data - -import com.daml.ledger.javaapi.data.Generators.* -import org.scalatest.OptionValues -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn -import scala.jdk.CollectionConverters.* - -// TODO(#23504) remove class as TransactionTree is deprecated and the equivalent TrasnsactionSpec already exists -@nowarn("cat=deprecation") -class TransactionTreeSpec - extends AnyFlatSpec - with Matchers - with OptionValues - with ScalaCheckDrivenPropertyChecks { - - "TransactionTree.buildTree" should "convert a transaction tree to a wrapped tree" in forAll( - transactionTreeGenWithIdsInPreOrder - ) { transactionTreeOuter => - val transactionTree = TransactionTree.fromProto(transactionTreeOuter) - - case class WrappedEvent(nodeId: Int, children: List[WrappedEvent]) { - - def descendants(): Seq[WrappedEvent] = - Seq(this) ++ children ++ children.flatMap(_.descendants()) - - def lastDescendantNodeId(): Int = descendants().map(_.nodeId).maxOption.getOrElse(nodeId) - } - - val wrappedTree: Seq[WrappedEvent] = TransactionTreeUtils - .buildTree( - transactionTree, - (treeEvent: TreeEvent, children: java.util.List[WrappedEvent]) => - WrappedEvent(treeEvent.getNodeId, children.asScala.toList), - ) - .asScala - .toSeq - - transactionTree.getRootNodeIds.asScala shouldBe wrappedTree.map(_.nodeId) - - val wrappedEventsById = - wrappedTree.flatMap(_.descendants()).map(event => event.nodeId -> event).toMap - - wrappedEventsById.values.foreach { wrappedEvent => - wrappedEvent.children shouldBe wrappedEvent.children.distinct - all(wrappedEvent.children.map(_.nodeId)) should be > wrappedEvent.nodeId - } - - transactionTree.getEventsById.asScala.values - .map(_.getNodeId) shouldBe wrappedEventsById.keys.toList.sorted - - transactionTree.getEventsById.asScala.foreach { case (nodeId, treeEvent: TreeEvent) => - val event = treeEvent.toProtoTreeEvent - val lastDescendantNodeId = - if (event.hasExercised) event.getExercised.getLastDescendantNodeId else nodeId.toInt - - lastDescendantNodeId shouldBe wrappedEventsById - .get(nodeId) - .value - .lastDescendantNodeId() - } - } - - "TransactionTree.getRootNodeIds" should "provide root node ids that are not descendant of others" in forAll( - transactionTreeGenWithIdsInPreOrder - ) { transactionTreeOuter => - val transactionTree = TransactionTree.fromProto(transactionTreeOuter) - val eventDescendantsRanges = - transactionTree.getEventsById.asScala.view.mapValues(_.toProtoTreeEvent).map { - case (id, event) => - (id, if (event.hasExercised) Int.box(event.getExercised.getLastDescendantNodeId) else id) - } - - transactionTree.getRootNodeIds.asScala.foreach(nodeid => - eventDescendantsRanges.exists { case (start, end) => - nodeid > start && nodeid <= end - } shouldBe false - ) - } - - "TransactionTree.getChildNodeIds" should "find the children node ids of exercised events" in forAll( - transactionTreeGenWithIdsInPreOrder - ) { transactionTreeOuter => - val transactionTree = TransactionTree.fromProto(transactionTreeOuter) - - case class WrappedEvent(nodeId: Int, children: List[WrappedEvent]) { - - def descendants(): Seq[WrappedEvent] = - Seq(this) ++ children ++ children.flatMap(_.descendants()) - } - - val wrappedTree: Seq[WrappedEvent] = TransactionTreeUtils - .buildTree( - transactionTree, - (treeEvent: TreeEvent, children: java.util.List[WrappedEvent]) => - WrappedEvent(treeEvent.getNodeId, children.asScala.toList), - ) - .asScala - .toSeq - - val wrappedEventsById = - wrappedTree.flatMap(_.descendants()).map(event => event.nodeId -> event).toMap - - val events = transactionTree.getEventsById.asScala.values - - val exercisedEvents = - events - .filter(_.toProtoTreeEvent.hasExercised) - .map(_.toProtoTreeEvent.getExercised) - .map(ExercisedEvent.fromProto) - - exercisedEvents.foreach { event => - transactionTree - .getChildNodeIds(event) - .asScala shouldBe wrappedEventsById - .get(event.getNodeId) - .value - .children - .map(_.nodeId) - } - } -} diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql b/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql index 23617c66e8..b9a0836cc0 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 -- This is a dummy column we are adding in order to test that adding dev version migrations diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/dev/reference/V998__blocks.sql b/canton/community/common/src/main/resources/db/migration/canton/h2/dev/reference/V998__blocks.sql index 0274a37b09..7fddb510f0 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/dev/reference/V998__blocks.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/dev/reference/V998__blocks.sql @@ -1,6 +1,5 @@ --- Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates --- --- Proprietary code. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 create table blocks ( id bigint primary key, diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sha256 b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sha256 new file mode 100644 index 0000000000..a4f4ee628c --- /dev/null +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sha256 @@ -0,0 +1 @@ +10e3364f16529cc213bc0c8f8d64ec03aefef5b91eafc320e9e3297dba2e3c38 diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql index e2b346a800..38bbd75855 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 create table par_daml_packages ( @@ -70,23 +70,27 @@ create table common_crypto_public_keys ( -- Stores the immutable contracts, however a creation of a contract can be rolled back. create table par_contracts ( + internal_contract_id bigint generated always as identity, contract_id binary varying not null, -- The contract is a LfFatContractInstance serialized using the LF contract proto serializer. instance binary large object not null, package_id varchar not null, template_id varchar not null, - primary key (contract_id)); + primary key (contract_id) +); +-- Index for lookup per internal_contract_id +create index idx_par_contracts_internal on par_contracts(internal_contract_id); -- Index to speedup ContractStore.find -- package_id comes before template_id, because queries with package_id and without template_id make more sense than vice versa. -- contract_id is left out, because a query with contract_id can be served with the primary key. create index idx_par_contracts_find on par_contracts(package_id, template_id); --- provides a serial enumeration of static strings so we don't store the same string over and over in the db +-- provides an enumeration of static strings so we don't store the same string over and over in the db -- currently only storing uids create table common_static_strings ( - -- serial identifier of the string (local to this node) - id serial not null primary key, + -- identifier of the string (local to this node) + id integer generated always as identity primary key, -- the expression string varchar not null, -- the source (what kind of string are we storing here) @@ -314,7 +318,7 @@ create table par_commitment_snapshot ( -- A stable reference to a stakeholder set, that doesn't rely on the Protobuf encoding being deterministic -- a hex-encoded hash (not binary so that hash can be indexed in all db server types) stakeholders_hash varchar not null, - stakeholders varchar array not null, + stakeholders integer array not null, commitment binary large object not null, primary key (synchronizer_idx, stakeholders_hash) ); @@ -384,12 +388,11 @@ create table seq_block_height ( ); create table mediator_deduplication_store ( - mediator_id varchar not null, uuid varchar not null, request_time bigint not null, expire_after bigint not null ); -create index idx_mediator_deduplication_store_expire_after on mediator_deduplication_store(mediator_id, expire_after); +create index idx_mediator_deduplication_store_expire_after on mediator_deduplication_store(expire_after); create type pruning_phase as enum ('started', 'completed'); @@ -449,7 +452,7 @@ create table common_head_sequencer_counters ( -- members can read all events from `registered_ts` create table sequencer_members ( member varchar primary key, - id serial unique, + id integer generated always as identity unique, registered_ts bigint not null, pruned_previous_event_timestamp bigint, enabled bool not null default true @@ -680,8 +683,8 @@ create index idx_seq_in_flight_aggregated_sender_temporal on seq_in_flight_aggre -- stores the topology-x state transactions create table common_topology_transactions ( - -- serial identifier used to preserve insertion order - id bigserial not null primary key, + -- identifier used to preserve insertion order + id bigint generated always as identity primary key, -- the id of the store store_id varchar not null, -- the timestamp at which the transaction is sequenced by the sequencer @@ -732,6 +735,24 @@ create table common_topology_transactions ( create index idx_common_topology_transactions on common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); +-- for: +-- - DbTopologyStore.findProposalsByTxHash +-- - DbTopologyStore.findLatestTransactionsAndProposalsByTxHash +create index idx_common_topology_transactions_by_tx_hash + on common_topology_transactions (store_id, tx_hash, is_proposal, valid_from, valid_until, rejection_reason); + +-- for: +-- - DbTopologyStore.findEffectiveStateChanges +create index idx_common_topology_transactions_effective_changes + on common_topology_transactions (store_id, is_proposal, valid_from, valid_until, rejection_reason); + + +-- for: +-- - DbTopologyStore.update, updating the valid_until column for past transactions +create index idx_common_topology_transactions_for_valid_until_update + on common_topology_transactions (store_id, mapping_key_hash, serial_counter, valid_from); + + -- Stores the traffic balance updates create table seq_traffic_control_balance_updates ( -- member the traffic balance update is for @@ -932,3 +953,23 @@ create table acs_slow_counter_participants is_added_to_metrics boolean not null, primary key(synchronizer_id,participant_id) ); + +-- Specifies the event that triggers the execution of a pending operation +create type pending_operation_trigger_type as enum ('synchronizer_reconnect'); + +-- Stores operations that must be completed, ensuring execution even after a node restart (e.g., following a crash) +create table common_pending_operations ( + id int not null generated always as identity, + operation_trigger pending_operation_trigger_type not null, + -- The name of the procedure to execute for this operation. + operation_name varchar not null, + -- A key to uniquely identify an instance of an operation, allowing multiple pending operations of the same type + -- An empty string indicates no specific key + operation_key varchar not null, + -- The serialized protobuf message for the operation, wrapped for versioning (HasProtocolVersionedWrapper) + operation bytea not null, + -- The ID of the synchronizer instance this operation is associated with + synchronizer_id varchar not null, + primary key (id), + unique (synchronizer_id, operation_key, operation_name) +); diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 index 3c451bbfd8..58fdb8ad3d 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 @@ -1 +1 @@ -1f621518fd484c4e234653ef7675f4531bbf796a7508580428590669985cfdce +007db6ac41ea40c060b586a9eb9b25d20c074549cea8b6c6b53f2f19359c7f64 diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql index d5789ce2b0..1fd6c91b39 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql @@ -1,7 +1,7 @@ -- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 -CREATE ALIAS array_intersection FOR "com.digitalasset.canton.store.db.h2.H2FunctionAliases.arrayIntersection"; +create alias array_intersection for "com.digitalasset.canton.store.db.h2.H2FunctionAliases.arrayIntersection"; --------------------------------------------------------------------------------------------------- -- Parameters @@ -12,27 +12,27 @@ CREATE ALIAS array_intersection FOR "com.digitalasset.canton.store.db.h2.H2Funct -- ledger_end_publication_time are always defined at the same time. I.e., either -- all are NULL, or all are defined. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_parameters ( - participant_id VARCHAR NOT NULL, - ledger_end BIGINT, - ledger_end_sequential_id BIGINT, - ledger_end_string_interning_id INTEGER, - ledger_end_publication_time BIGINT, - participant_pruned_up_to_inclusive BIGINT +create table lapi_parameters ( + participant_id varchar not null, + ledger_end bigint, + ledger_end_sequential_id bigint, + ledger_end_string_interning_id integer, + ledger_end_publication_time bigint, + participant_pruned_up_to_inclusive bigint ); -CREATE TABLE lapi_post_processing_end ( +create table lapi_post_processing_end ( -- null signifies the participant begin - post_processing_end BIGINT + post_processing_end bigint ); -CREATE TABLE lapi_ledger_end_synchronizer_index ( - synchronizer_id INTEGER PRIMARY KEY NOT NULL, - sequencer_timestamp BIGINT, - repair_timestamp BIGINT, - repair_counter BIGINT, - record_time BIGINT NOT NULL +create table lapi_ledger_end_synchronizer_index ( + synchronizer_id integer primary key not null, + sequencer_timestamp bigint, + repair_timestamp bigint, + repair_counter bigint, + record_time bigint not null ); --------------------------------------------------------------------------------------------------- @@ -40,465 +40,694 @@ CREATE TABLE lapi_ledger_end_synchronizer_index ( -- -- A table for tracking party allocation submissions --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_entries ( - ledger_offset BIGINT NOT NULL, - recorded_at BIGINT NOT NULL, - submission_id VARCHAR, - party VARCHAR, - typ VARCHAR NOT NULL, - rejection_reason VARCHAR, - is_local BOOLEAN, - party_id INTEGER, - - CONSTRAINT check_party_entry_type - CHECK ( - (typ = 'accept' AND rejection_reason IS NULL) OR - (typ = 'reject' AND rejection_reason IS NOT NULL) +create table lapi_party_entries ( + ledger_offset bigint not null, + recorded_at bigint not null, + submission_id varchar, + party varchar, + typ varchar not null, + rejection_reason varchar, + is_local boolean, + party_id integer, + + constraint check_party_entry_type + check ( + (typ = 'accept' and rejection_reason is null) or + (typ = 'reject' and rejection_reason is not null) ) ); -CREATE INDEX lapi_party_entries_idx ON lapi_party_entries (submission_id); -CREATE INDEX lapi_party_entries_party_and_ledger_offset_idx ON lapi_party_entries(party, ledger_offset); -CREATE INDEX lapi_party_entries_party_id_and_ledger_offset_idx ON lapi_party_entries(party_id, ledger_offset); +create index lapi_party_entries_idx on lapi_party_entries (submission_id); +create index lapi_party_entries_party_and_ledger_offset_idx on lapi_party_entries(party, ledger_offset); +create index lapi_party_entries_party_id_and_ledger_offset_idx on lapi_party_entries(party_id, ledger_offset); --------------------------------------------------------------------------------------------------- -- Completions --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_command_completions ( - completion_offset BIGINT NOT NULL, - record_time BIGINT NOT NULL, - publication_time BIGINT NOT NULL, - user_id VARCHAR NOT NULL, - submitters INTEGER ARRAY NOT NULL, - command_id VARCHAR NOT NULL, +create table lapi_command_completions ( + completion_offset bigint not null, + record_time bigint not null, + publication_time bigint not null, + user_id integer not null, + submitters binary large object not null, + command_id varchar not null, -- The update ID is `NULL` for rejected transactions/reassignments. - update_id VARCHAR, + update_id binary varying, -- The submission ID will be provided by the participant or driver if the application didn't provide one. -- Nullable to support historical data. - submission_id VARCHAR, + submission_id varchar, -- The three alternatives below are mutually exclusive, i.e. the deduplication -- interval could have specified by the application as one of: -- 1. an initial offset -- 2. a duration (split into two columns, seconds and nanos, mapping protobuf's 1:1) -- 3. an initial timestamp - deduplication_offset BIGINT, - deduplication_duration_seconds BIGINT, - deduplication_duration_nanos INT, + deduplication_offset bigint, + deduplication_duration_seconds bigint, + deduplication_duration_nanos int, -- The three columns below are `NULL` if the completion is for an accepted transaction. -- The `rejection_status_details` column contains a Protocol-Buffers-serialized message of type -- `daml.platform.index.StatusDetails`, containing the code, message, and further details -- (decided by the ledger driver), and may be `NULL` even if the other two columns are set. - rejection_status_code INTEGER, - rejection_status_message VARCHAR, - rejection_status_details BINARY LARGE OBJECT, - synchronizer_id INTEGER NOT NULL, - message_uuid VARCHAR, - is_transaction BOOLEAN NOT NULL, - trace_context BINARY LARGE OBJECT NOT NULL + rejection_status_code integer, + rejection_status_message varchar, + rejection_status_details binary large object, + synchronizer_id integer not null, + message_uuid varchar, + is_transaction boolean not null, + trace_context binary large object not null ); -CREATE INDEX lapi_command_completions_user_id_offset_idx ON lapi_command_completions USING btree (user_id, completion_offset); -CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset); -CREATE INDEX lapi_command_completions_publication_time_idx ON lapi_command_completions USING btree (publication_time, completion_offset); -CREATE INDEX lapi_command_completions_synchronizer_record_time_idx ON lapi_command_completions USING btree (synchronizer_id, record_time); -CREATE INDEX lapi_command_completions_synchronizer_offset_idx ON lapi_command_completions USING btree (synchronizer_id, completion_offset); +create index lapi_command_completions_user_id_offset_idx on lapi_command_completions using btree (user_id, completion_offset); +create index lapi_command_completions_offset_idx on lapi_command_completions using btree (completion_offset); +create index lapi_command_completions_publication_time_idx on lapi_command_completions using btree (publication_time, completion_offset); +create index lapi_command_completions_synchronizer_record_time_idx on lapi_command_completions using btree (synchronizer_id, record_time); +create index lapi_command_completions_synchronizer_offset_idx on lapi_command_completions using btree (synchronizer_id, completion_offset); + +--------------------------------------------------------------------------------------------------- +-- Events: Activate Contract +--------------------------------------------------------------------------------------------------- +create table lapi_events_activate_contract ( + -- update related columns + event_offset bigint not null, + update_id binary large object not null, + workflow_id varchar, + command_id varchar, + submitters binary large object, + record_time bigint not null, + synchronizer_id integer not null, + trace_context binary large object not null, + external_transaction_hash binary large object, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + additional_witnesses binary large object, -- create events + source_synchronizer_id integer, -- assign events + reassignment_counter bigint, -- assign events + reassignment_id binary large object, -- assign events + representative_package_id integer not null, -- create events + + -- contract related columns + internal_contract_id bigint not null, -- all event types + create_key_hash varchar -- create +); + +-- sequential_id index +create index lapi_events_activate_sequential_id_idx on lapi_events_activate_contract using btree (event_sequential_id); +-- event_offset index +create index lapi_events_activate_offset_idx on lapi_events_activate_contract using btree (event_offset); +-- internal_contract_id index +create index lapi_events_activate_internal_contract_id_idx on lapi_events_activate_contract using btree (internal_contract_id, event_sequential_id); +-- contract_key index +create index lapi_events_activate_contract_key_idx on lapi_events_activate_contract using btree (create_key_hash, event_sequential_id); + +-- filter table for stakeholders +create table lapi_filter_activate_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_activate_stakeholder_ps_idx on lapi_filter_activate_stakeholder using btree (party_id, event_sequential_id); +create index lapi_filter_activate_stakeholder_pts_idx on lapi_filter_activate_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_activate_stakeholder_ts_idx on lapi_filter_activate_stakeholder using btree (template_id, event_sequential_id); +create index lapi_filter_activate_stakeholder_s_idx on lapi_filter_activate_stakeholder using btree (event_sequential_id, first_per_sequential_id); + +-- filter table for additional witnesses +create table lapi_filter_activate_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_activate_witness_ps_idx on lapi_filter_activate_witness using btree (party_id, event_sequential_id); +create index lapi_filter_activate_witness_pts_idx on lapi_filter_activate_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_activate_witness_ts_idx on lapi_filter_activate_witness using btree (template_id, event_sequential_id); +create index lapi_filter_activate_witness_s_idx on lapi_filter_activate_witness using btree (event_sequential_id, first_per_sequential_id); + +--------------------------------------------------------------------------------------------------- +-- Events: Deactivate Contract +--------------------------------------------------------------------------------------------------- +create table lapi_events_deactivate_contract ( + -- update related columns + event_offset bigint not null, + update_id binary large object not null, + workflow_id varchar, + command_id varchar, + submitters binary large object, + record_time bigint not null, + synchronizer_id integer not null, + trace_context binary large object not null, + external_transaction_hash binary large object, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + deactivated_event_sequential_id bigint, -- all event types + additional_witnesses binary large object, -- consuming events + exercise_choice integer, -- consuming events + exercise_choice_interface integer, -- consuming events + exercise_argument binary large object, -- consuming events + exercise_result binary large object, -- consuming events + exercise_actors binary large object, -- consuming events + exercise_last_descendant_node_id integer, -- consuming events + exercise_argument_compression smallint, -- consuming events + exercise_result_compression smallint, -- consuming events + reassignment_id binary large object, -- unassign events + assignment_exclusivity bigint, -- unassign events + target_synchronizer_id integer, -- unassign events + reassignment_counter bigint, -- unassign events + + -- contract related columns + contract_id binary large object not null, -- all event types + internal_contract_id bigint not null, -- all event types + template_id integer not null, -- all event types + package_id integer not null, -- all event types + stakeholders binary large object not null, -- all event types + ledger_effective_time bigint -- consuming events +); + +-- sequential_id index +create index lapi_events_deactivate_sequential_id_idx on lapi_events_deactivate_contract using btree (event_sequential_id); +-- event_offset index +create index lapi_events_deactivate_offset_idx on lapi_events_deactivate_contract using btree (event_offset); +-- internal_contract_id index +create index lapi_events_deactivate_internal_contract_id_idx on lapi_events_deactivate_contract using btree (internal_contract_id, event_sequential_id); +-- deactivation reference index +create index lapi_events_deactivated_event_sequential_id_idx on lapi_events_deactivate_contract using btree (deactivated_event_sequential_id); + +-- filter table for stakeholders +create table lapi_filter_deactivate_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_deactivate_stakeholder_ps_idx on lapi_filter_deactivate_stakeholder using btree (party_id, event_sequential_id); +create index lapi_filter_deactivate_stakeholder_pts_idx on lapi_filter_deactivate_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_deactivate_stakeholder_ts_idx on lapi_filter_deactivate_stakeholder using btree (template_id, event_sequential_id); +create index lapi_filter_deactivate_stakeholder_s_idx on lapi_filter_deactivate_stakeholder using btree (event_sequential_id, first_per_sequential_id); + +-- filter table for additional witnesses +create table lapi_filter_deactivate_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_deactivate_witness_ps_idx on lapi_filter_deactivate_witness using btree (party_id, event_sequential_id); +create index lapi_filter_deactivate_witness_pts_idx on lapi_filter_deactivate_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_deactivate_witness_ts_idx on lapi_filter_deactivate_witness using btree (template_id, event_sequential_id); +create index lapi_filter_deactivate_witness_s_idx on lapi_filter_deactivate_witness using btree (event_sequential_id, first_per_sequential_id); + +--------------------------------------------------------------------------------------------------- +-- Events: Various Witnessed +--------------------------------------------------------------------------------------------------- +create table lapi_events_various_witnessed ( + -- tx related columns + event_offset bigint not null, + update_id binary large object not null, + workflow_id varchar, + command_id varchar, + submitters binary large object, + record_time bigint not null, + synchronizer_id integer not null, + trace_context binary large object not null, + external_transaction_hash binary large object, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + additional_witnesses binary large object, -- all event types + consuming boolean, -- exercise + exercise_choice integer, -- exercise + exercise_choice_interface integer, -- exercise + exercise_argument binary large object, -- exercise + exercise_result binary large object, -- exercise + exercise_actors binary large object, -- exercise + exercise_last_descendant_node_id integer, -- exercise + exercise_argument_compression smallint, -- exercise + exercise_result_compression smallint, -- exercise + representative_package_id integer, -- create events + + -- contract related columns + contract_id binary large object, + internal_contract_id bigint, + template_id integer, + package_id integer, + ledger_effective_time bigint +); + +-- sequential_id index +create index lapi_events_various_sequential_id_idx on lapi_events_various_witnessed using btree (event_sequential_id); +-- event_offset index +create index lapi_events_various_offset_idx on lapi_events_various_witnessed using btree (event_offset); +-- internal_contract_id index +create index lapi_events_various_internal_contract_id_idx on lapi_events_various_witnessed using btree (internal_contract_id, event_sequential_id); + +-- filter table for additional witnesses +create table lapi_filter_various_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_various_witness_ps_idx on lapi_filter_various_witness using btree (party_id, event_sequential_id); +create index lapi_filter_various_witness_pts_idx on lapi_filter_various_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_various_witness_ts_idx on lapi_filter_various_witness using btree (template_id, event_sequential_id); +create index lapi_filter_various_witness_s_idx on lapi_filter_various_witness using btree (event_sequential_id, first_per_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: create --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_create ( +-- deprecated, to be removed. See #28008 +create table lapi_events_create ( -- * fixed-size columns first to avoid padding - event_sequential_id bigint NOT NULL, -- event identification: same ordering as event_offset - ledger_effective_time bigint NOT NULL, -- transaction metadata - node_id integer NOT NULL, -- event metadata + event_sequential_id bigint not null, -- event identification: same ordering as event_offset + ledger_effective_time bigint not null, -- transaction metadata + node_id integer not null, -- event metadata -- * event identification - event_offset BIGINT NOT NULL, + event_offset bigint not null, -- * transaction metadata - update_id VARCHAR NOT NULL, - workflow_id VARCHAR, + update_id binary varying not null, + workflow_id varchar, -- * submitter info (only visible on submitting participant) - command_id VARCHAR, - submitters INTEGER ARRAY, + command_id varchar, + submitters binary large object, -- * shared event information - contract_id BINARY VARYING NOT NULL, - template_id INTEGER NOT NULL, - package_id INTEGER NOT NULL, - flat_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- stakeholders - tree_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- informees + contract_id binary varying not null, + template_id integer not null, + package_id integer not null, + representative_package_id integer not null, + flat_event_witnesses binary large object not null, -- stakeholders + tree_event_witnesses binary large object not null, -- informees -- * contract data - create_argument BINARY LARGE OBJECT NOT NULL, - create_signatories INTEGER ARRAY NOT NULL, - create_observers INTEGER ARRAY NOT NULL, - create_key_value BINARY LARGE OBJECT, - create_key_hash VARCHAR, - create_key_maintainers INTEGER ARRAY, + create_argument binary large object not null, + create_signatories binary large object not null, + create_observers binary large object not null, + create_key_value binary large object, + create_key_hash varchar, + create_key_maintainers binary large object, -- * compression flags - create_argument_compression SMALLINT, - create_key_value_compression SMALLINT, + create_argument_compression smallint, + create_key_value_compression smallint, -- * contract authentication data - authentication_data BINARY LARGE OBJECT NOT NULL, + authentication_data binary large object not null, - synchronizer_id INTEGER NOT NULL, - trace_context BINARY LARGE OBJECT NOT NULL, - record_time BIGINT NOT NULL, - external_transaction_hash BINARY LARGE OBJECT + synchronizer_id integer not null, + trace_context binary large object not null, + record_time bigint not null, + external_transaction_hash binary large object, + internal_contract_id bigint not null ); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_create_event_offset_idx ON lapi_events_create (event_offset); +create index lapi_events_create_event_offset_idx on lapi_events_create (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_create_event_sequential_id_idx ON lapi_events_create (event_sequential_id); +create index lapi_events_create_event_sequential_id_idx on lapi_events_create (event_sequential_id); -- lookup by contract_id -CREATE INDEX lapi_events_create_contract_id_idx ON lapi_events_create (contract_id); +create index lapi_events_create_contract_id_idx on lapi_events_create (contract_id); -- lookup by contract_key -CREATE INDEX lapi_events_create_create_key_hash_idx ON lapi_events_create (create_key_hash, event_sequential_id); +create index lapi_events_create_create_key_hash_idx on lapi_events_create (create_key_hash, event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: consuming exercise --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_consuming_exercise ( +-- deprecated, to be removed. See #28008 +create table lapi_events_consuming_exercise ( -- * fixed-size columns first to avoid padding - event_sequential_id bigint NOT NULL, -- event identification: same ordering as event_offset - ledger_effective_time bigint NOT NULL, -- transaction metadata - node_id integer NOT NULL, -- event metadata + event_sequential_id bigint not null, -- event identification: same ordering as event_offset + ledger_effective_time bigint not null, -- transaction metadata + node_id integer not null, -- event metadata -- * event identification - event_offset BIGINT NOT NULL, + event_offset bigint not null, -- * transaction metadata - update_id VARCHAR NOT NULL, - workflow_id VARCHAR, + update_id binary varying not null, + workflow_id varchar, -- * submitter info (only visible on submitting participant) - command_id VARCHAR, - submitters INTEGER ARRAY, + command_id varchar, + submitters binary large object, -- * shared event information - contract_id BINARY VARYING NOT NULL, - template_id INTEGER NOT NULL, - package_id INTEGER NOT NULL, - flat_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- stakeholders - tree_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- informees + contract_id binary varying not null, + template_id integer not null, + package_id integer not null, + flat_event_witnesses binary large object not null, -- stakeholders + tree_event_witnesses binary large object not null, -- informees -- * choice data - exercise_choice VARCHAR NOT NULL, - exercise_argument BINARY LARGE OBJECT NOT NULL, - exercise_result BINARY LARGE OBJECT, - exercise_actors INTEGER ARRAY NOT NULL, - exercise_last_descendant_node_id INTEGER NOT NULL, + exercise_choice integer not null, + exercise_choice_interface integer, + exercise_argument binary large object not null, + exercise_result binary large object, + exercise_actors binary large object not null, + exercise_last_descendant_node_id integer not null, -- * compression flags - exercise_argument_compression SMALLINT, - exercise_result_compression SMALLINT, - - synchronizer_id INTEGER NOT NULL, - trace_context BINARY LARGE OBJECT NOT NULL, - record_time BIGINT NOT NULL, - external_transaction_hash BINARY LARGE OBJECT + exercise_argument_compression smallint, + exercise_result_compression smallint, + + synchronizer_id integer not null, + trace_context binary large object not null, + record_time bigint not null, + external_transaction_hash binary large object, + deactivated_event_sequential_id bigint ); +-- deactivations +create index lapi_events_consuming_exercise_deactivated_idx on lapi_events_consuming_exercise (deactivated_event_sequential_id, event_sequential_id); + -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_consuming_exercise_event_offset_idx ON lapi_events_consuming_exercise (event_offset); +create index lapi_events_consuming_exercise_event_offset_idx on lapi_events_consuming_exercise (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_consuming_exercise_event_sequential_id_idx ON lapi_events_consuming_exercise (event_sequential_id); +create index lapi_events_consuming_exercise_event_sequential_id_idx on lapi_events_consuming_exercise (event_sequential_id); -- lookup by contract id -CREATE INDEX lapi_events_consuming_exercise_contract_id_idx ON lapi_events_consuming_exercise (contract_id); +create index lapi_events_consuming_exercise_contract_id_idx on lapi_events_consuming_exercise (contract_id); --------------------------------------------------------------------------------------------------- -- Events: non-consuming exercise --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_non_consuming_exercise ( +-- deprecated, to be removed. See #28008 +create table lapi_events_non_consuming_exercise ( -- * fixed-size columns first to avoid padding - event_sequential_id bigint NOT NULL, -- event identification: same ordering as event_offset - ledger_effective_time bigint NOT NULL, -- transaction metadata - node_id integer NOT NULL, -- event metadata + event_sequential_id bigint not null, -- event identification: same ordering as event_offset + ledger_effective_time bigint not null, -- transaction metadata + node_id integer not null, -- event metadata -- * event identification - event_offset BIGINT NOT NULL, + event_offset bigint not null, -- * transaction metadata - update_id VARCHAR NOT NULL, - workflow_id VARCHAR, + update_id binary varying not null, + workflow_id varchar, -- * submitter info (only visible on submitting participant) - command_id VARCHAR, - submitters INTEGER ARRAY, + command_id varchar, + submitters binary large object, -- * shared event information - contract_id BINARY VARYING NOT NULL, - template_id INTEGER NOT NULL, - package_id INTEGER NOT NULL, - tree_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- informees + contract_id binary varying not null, + template_id integer not null, + package_id integer not null, + tree_event_witnesses binary large object not null, -- informees -- * choice data - exercise_choice VARCHAR NOT NULL, - exercise_argument BINARY LARGE OBJECT NOT NULL, - exercise_result BINARY LARGE OBJECT, - exercise_actors INTEGER ARRAY NOT NULL, - exercise_last_descendant_node_id INTEGER NOT NULL, + exercise_choice integer not null, + exercise_choice_interface integer, + exercise_argument binary large object not null, + exercise_result binary large object, + exercise_actors binary large object not null, + exercise_last_descendant_node_id integer not null, -- * compression flags - exercise_argument_compression SMALLINT, - exercise_result_compression SMALLINT, + exercise_argument_compression smallint, + exercise_result_compression smallint, - synchronizer_id INTEGER NOT NULL, - trace_context BINARY LARGE OBJECT NOT NULL, - record_time BIGINT NOT NULL, - external_transaction_hash BINARY LARGE OBJECT + synchronizer_id integer not null, + trace_context binary large object not null, + record_time bigint not null, + external_transaction_hash binary large object ); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_non_consuming_exercise_event_offset_idx ON lapi_events_non_consuming_exercise (event_offset); +create index lapi_events_non_consuming_exercise_event_offset_idx on lapi_events_non_consuming_exercise (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_non_consuming_exercise_event_sequential_id_idx ON lapi_events_non_consuming_exercise (event_sequential_id); +create index lapi_events_non_consuming_exercise_event_sequential_id_idx on lapi_events_non_consuming_exercise (event_sequential_id); -CREATE TABLE lapi_string_interning ( - internal_id integer PRIMARY KEY NOT NULL, - external_string VARCHAR +create table lapi_string_interning ( + internal_id integer primary key not null, + external_string varchar ); --------------------------------------------------------------------------------------------------- -- Events: Unassign --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_unassign ( +-- deprecated, to be removed. See #28008 +create table lapi_events_unassign ( -- * fixed-size columns first to avoid padding - event_sequential_id bigint NOT NULL, -- event identification: same ordering as event_offset + event_sequential_id bigint not null, -- event identification: same ordering as event_offset -- * event identification - event_offset BIGINT NOT NULL, + event_offset bigint not null, -- * transaction metadata - update_id VARCHAR NOT NULL, - workflow_id VARCHAR, + update_id binary varying not null, + workflow_id varchar, -- * submitter info (only visible on submitting participant) - command_id VARCHAR, + command_id varchar, - submitter INTEGER NOT NULL, - node_id integer NOT NULL, -- event metadata + submitter integer not null, + node_id integer not null, -- event metadata -- * shared event information - contract_id BINARY VARYING NOT NULL, - template_id INTEGER NOT NULL, - package_id INTEGER NOT NULL, - flat_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- stakeholders + contract_id binary varying not null, + template_id integer not null, + package_id integer not null, + flat_event_witnesses binary large object not null, -- stakeholders -- * common reassignment - source_synchronizer_id INTEGER NOT NULL, - target_synchronizer_id INTEGER NOT NULL, - reassignment_id VARCHAR NOT NULL, - reassignment_counter BIGINT NOT NULL, + source_synchronizer_id integer not null, + target_synchronizer_id integer not null, + reassignment_id binary large object not null, + reassignment_counter bigint not null, -- * unassigned specific - assignment_exclusivity BIGINT, + assignment_exclusivity bigint, - trace_context BINARY LARGE OBJECT NOT NULL, - record_time BIGINT NOT NULL + trace_context binary large object not null, + record_time bigint not null, + deactivated_event_sequential_id bigint ); -- sequential_id index for paging -CREATE INDEX lapi_events_unassign_event_sequential_id_idx ON lapi_events_unassign (event_sequential_id); +create index lapi_events_unassign_deactivated_idx on lapi_events_unassign (deactivated_event_sequential_id, event_sequential_id); + +-- sequential_id index for paging +create index lapi_events_unassign_event_sequential_id_idx on lapi_events_unassign (event_sequential_id); -- multi-column index supporting per contract per synchronizer lookup before/after sequential id query -CREATE INDEX lapi_events_unassign_contract_id_composite_idx ON lapi_events_unassign (contract_id, source_synchronizer_id, event_sequential_id); +create index lapi_events_unassign_contract_id_composite_idx on lapi_events_unassign (contract_id, source_synchronizer_id, event_sequential_id); -- covering index for queries resolving offsets to sequential IDs. For temporary incomplete reassignments implementation. -CREATE INDEX lapi_events_unassign_event_offset_idx ON lapi_events_unassign (event_offset, event_sequential_id); +create index lapi_events_unassign_event_offset_idx on lapi_events_unassign (event_offset, event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: Assign --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_assign ( +-- deprecated, to be removed. See #28008 +create table lapi_events_assign ( -- * fixed-size columns first to avoid padding - event_sequential_id bigint NOT NULL, -- event identification: same ordering as event_offset + event_sequential_id bigint not null, -- event identification: same ordering as event_offset -- * event identification - event_offset BIGINT NOT NULL, + event_offset bigint not null, -- * transaction metadata - update_id VARCHAR NOT NULL, - workflow_id VARCHAR, + update_id binary varying not null, + workflow_id varchar, -- * submitter info (only visible on submitting participant) - command_id VARCHAR, + command_id varchar, - submitter INTEGER NOT NULL, - node_id integer NOT NULL, -- event metadata + submitter integer not null, + node_id integer not null, -- event metadata -- * shared event information - contract_id BINARY VARYING NOT NULL, - template_id INTEGER NOT NULL, - package_id INTEGER NOT NULL, - flat_event_witnesses INTEGER ARRAY NOT NULL DEFAULT ARRAY[], -- stakeholders + contract_id binary varying not null, + template_id integer not null, + package_id integer not null, + flat_event_witnesses binary large object not null, -- stakeholders -- * common reassignment - source_synchronizer_id INTEGER NOT NULL, - target_synchronizer_id INTEGER NOT NULL, - reassignment_id VARCHAR NOT NULL, - reassignment_counter BIGINT NOT NULL, + source_synchronizer_id integer not null, + target_synchronizer_id integer not null, + reassignment_id binary large object not null, + reassignment_counter bigint not null, -- * assigned specific - create_argument BINARY LARGE OBJECT NOT NULL, - create_signatories INTEGER ARRAY NOT NULL, - create_observers INTEGER ARRAY NOT NULL, - create_key_value BINARY LARGE OBJECT, - create_key_hash VARCHAR, - create_key_maintainers INTEGER ARRAY, - create_argument_compression SMALLINT, - create_key_value_compression SMALLINT, - ledger_effective_time BIGINT NOT NULL, - authentication_data BINARY LARGE OBJECT NOT NULL, - - trace_context BINARY LARGE OBJECT NOT NULL, - record_time BIGINT NOT NULL + create_argument binary large object not null, + create_signatories binary large object not null, + create_observers binary large object not null, + create_key_value binary large object, + create_key_hash varchar, + create_key_maintainers binary large object, + create_argument_compression smallint, + create_key_value_compression smallint, + ledger_effective_time bigint not null, + authentication_data binary large object not null, + + trace_context binary large object not null, + record_time bigint not null, + internal_contract_id bigint not null ); -- sequential_id index for paging -CREATE INDEX lapi_events_assign_event_sequential_id_idx ON lapi_events_assign (event_sequential_id); +create index lapi_events_assign_event_sequential_id_idx on lapi_events_assign (event_sequential_id); -- covering index for queries resolving offsets to sequential IDs. For temporary incomplete reassignments implementation. -CREATE INDEX lapi_events_assign_event_offset_idx ON lapi_events_assign (event_offset, event_sequential_id); +create index lapi_events_assign_event_offset_idx on lapi_events_assign (event_offset, event_sequential_id); -- index for queries resolving contract ID to sequential IDs. -CREATE INDEX lapi_events_assign_event_contract_id_idx ON lapi_events_assign (contract_id, event_sequential_id); +create index lapi_events_assign_event_contract_id_idx on lapi_events_assign (contract_id, event_sequential_id); -- index for queries resolving (contract ID, synchronizer id, sequential ID) to sequential IDs. -CREATE INDEX lapi_events_assign_event_contract_id_synchronizer_id_seq_id_idx ON lapi_events_assign (contract_id, target_synchronizer_id, event_sequential_id); +create index lapi_events_assign_event_contract_id_synchronizer_id_seq_id_idx on lapi_events_assign (contract_id, target_synchronizer_id, event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: Topology (participant authorization mappings) --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_party_to_participant ( - event_sequential_id BIGINT NOT NULL, - event_offset BIGINT NOT NULL, - update_id VARCHAR NOT NULL, - party_id INTEGER NOT NULL, - participant_id VARCHAR NOT NULL, - participant_permission INTEGER NOT NULL, - participant_authorization_event INTEGER NOT NULL, - synchronizer_id INTEGER NOT NULL, - record_time BIGINT NOT NULL, - trace_context BINARY LARGE OBJECT NOT NULL +create table lapi_events_party_to_participant ( + event_sequential_id bigint not null, + event_offset bigint not null, + update_id binary varying not null, + party_id integer not null, + participant_id integer not null, + participant_permission integer not null, + participant_authorization_event integer not null, + synchronizer_id integer not null, + record_time bigint not null, + trace_context binary large object not null ); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_party_to_participant_event_offset_idx ON lapi_events_party_to_participant (event_offset); +create index lapi_events_party_to_participant_event_offset_idx on lapi_events_party_to_participant (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_party_to_participant_event_sequential_id_idx ON lapi_events_party_to_participant (event_sequential_id); +create index lapi_events_party_to_participant_event_sequential_id_idx on lapi_events_party_to_participant (event_sequential_id); -- party_id with event_sequential_id for id queries -CREATE INDEX lapi_events_party_to_participant_event_party_sequential_id_idx ON lapi_events_party_to_participant (party_id, event_sequential_id); +create index lapi_events_party_to_participant_event_party_sequential_id_idx on lapi_events_party_to_participant (party_id, event_sequential_id); -- party_id with event_sequential_id for id queries -CREATE INDEX lapi_events_party_to_participant_event_did_recordt_idx ON lapi_events_party_to_participant (synchronizer_id, record_time); +create index lapi_events_party_to_participant_event_did_recordt_idx on lapi_events_party_to_participant (synchronizer_id, record_time); ----------------------------- -- Filter tables for events ----------------------------- -- create stakeholders -CREATE TABLE lapi_pe_create_id_filter_stakeholder ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_pts_idx ON lapi_pe_create_id_filter_stakeholder(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_ps_idx ON lapi_pe_create_id_filter_stakeholder(party_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_ts_idx ON lapi_pe_create_id_filter_stakeholder(template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_s_idx ON lapi_pe_create_id_filter_stakeholder(event_sequential_id); - -CREATE TABLE lapi_pe_create_id_filter_non_stakeholder_informee ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_pts_idx ON lapi_pe_create_id_filter_non_stakeholder_informee(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_ps_idx ON lapi_pe_create_id_filter_non_stakeholder_informee(party_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_ts_idx ON lapi_pe_create_id_filter_non_stakeholder_informee(template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_s_idx ON lapi_pe_create_id_filter_non_stakeholder_informee(event_sequential_id); - -CREATE TABLE lapi_pe_consuming_id_filter_stakeholder ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_pts_idx ON lapi_pe_consuming_id_filter_stakeholder(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ps_idx ON lapi_pe_consuming_id_filter_stakeholder(party_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ts_idx ON lapi_pe_consuming_id_filter_stakeholder(template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_s_idx ON lapi_pe_consuming_id_filter_stakeholder(event_sequential_id); - -CREATE TABLE lapi_pe_reassignment_id_filter_stakeholder ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_pts_idx ON lapi_pe_reassignment_id_filter_stakeholder(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_ps_idx ON lapi_pe_reassignment_id_filter_stakeholder(party_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_ts_idx ON lapi_pe_reassignment_id_filter_stakeholder(template_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_s_idx ON lapi_pe_reassignment_id_filter_stakeholder(event_sequential_id); - -CREATE TABLE lapi_pe_assign_id_filter_stakeholder ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_pts_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ps_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ts_idx ON lapi_pe_assign_id_filter_stakeholder(template_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_s_idx ON lapi_pe_assign_id_filter_stakeholder(event_sequential_id); - -CREATE TABLE lapi_pe_consuming_id_filter_non_stakeholder_informee ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_pts_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_ps_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee(party_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_ts_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee(template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_s_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee(event_sequential_id); - -CREATE TABLE lapi_pe_non_consuming_id_filter_informee ( - event_sequential_id BIGINT NOT NULL, - template_id INTEGER NOT NULL, - party_id INTEGER NOT NULL -); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_pts_idx ON lapi_pe_non_consuming_id_filter_informee(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_ps_idx ON lapi_pe_non_consuming_id_filter_informee(party_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_ts_idx ON lapi_pe_non_consuming_id_filter_informee(template_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_s_idx ON lapi_pe_non_consuming_id_filter_informee(event_sequential_id); +-- deprecated, to be removed. See #28008 +create table lapi_pe_create_id_filter_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_create_id_filter_stakeholder_pts_idx on lapi_pe_create_id_filter_stakeholder(party_id, template_id, event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_ps_idx on lapi_pe_create_id_filter_stakeholder(party_id, event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_ts_idx on lapi_pe_create_id_filter_stakeholder(template_id, event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_s_idx on lapi_pe_create_id_filter_stakeholder(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_create_id_filter_non_stakeholder_informee ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_create_id_filter_non_stakeholder_informee_pts_idx on lapi_pe_create_id_filter_non_stakeholder_informee(party_id, template_id, event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_ps_idx on lapi_pe_create_id_filter_non_stakeholder_informee(party_id, event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_ts_idx on lapi_pe_create_id_filter_non_stakeholder_informee(template_id, event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_s_idx on lapi_pe_create_id_filter_non_stakeholder_informee(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_consuming_id_filter_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_consuming_id_filter_stakeholder_pts_idx on lapi_pe_consuming_id_filter_stakeholder(party_id, template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_ps_idx on lapi_pe_consuming_id_filter_stakeholder(party_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_ts_idx on lapi_pe_consuming_id_filter_stakeholder(template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_s_idx on lapi_pe_consuming_id_filter_stakeholder(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_reassignment_id_filter_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_reassignment_id_filter_stakeholder_pts_idx on lapi_pe_reassignment_id_filter_stakeholder(party_id, template_id, event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_ps_idx on lapi_pe_reassignment_id_filter_stakeholder(party_id, event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_ts_idx on lapi_pe_reassignment_id_filter_stakeholder(template_id, event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_s_idx on lapi_pe_reassignment_id_filter_stakeholder(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_assign_id_filter_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_assign_id_filter_stakeholder_pts_idx on lapi_pe_assign_id_filter_stakeholder(party_id, template_id, event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_ps_idx on lapi_pe_assign_id_filter_stakeholder(party_id, event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_ts_idx on lapi_pe_assign_id_filter_stakeholder(template_id, event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_s_idx on lapi_pe_assign_id_filter_stakeholder(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_consuming_id_filter_non_stakeholder_informee ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_pts_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee(party_id, template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_ps_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee(party_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_ts_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee(template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_s_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee(event_sequential_id, first_per_sequential_id); + +-- deprecated, to be removed. See #28008 +create table lapi_pe_non_consuming_id_filter_informee ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_pe_non_consuming_id_filter_informee_pts_idx on lapi_pe_non_consuming_id_filter_informee(party_id, template_id, event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_ps_idx on lapi_pe_non_consuming_id_filter_informee(party_id, event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_ts_idx on lapi_pe_non_consuming_id_filter_informee(template_id, event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_s_idx on lapi_pe_non_consuming_id_filter_informee(event_sequential_id, first_per_sequential_id); --------------------------------------------------------------------------------------------------- -- Transaction meta information -- -- This table is used in point-wise lookups. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_update_meta( - update_id VARCHAR NOT NULL, - event_offset BIGINT NOT NULL, - publication_time BIGINT NOT NULL, - record_time BIGINT NOT NULL, - synchronizer_id INTEGER NOT NULL, - event_sequential_id_first BIGINT NOT NULL, - event_sequential_id_last BIGINT NOT NULL -); -CREATE INDEX lapi_update_meta_uid_idx ON lapi_update_meta(update_id); -CREATE INDEX lapi_update_meta_event_offset_idx ON lapi_update_meta(event_offset); -CREATE INDEX lapi_update_meta_publication_time_idx ON lapi_update_meta USING btree (publication_time, event_offset); -CREATE INDEX lapi_update_meta_synchronizer_record_time_idx ON lapi_update_meta USING btree (synchronizer_id, record_time); -CREATE INDEX lapi_update_meta_synchronizer_offset_idx ON lapi_update_meta USING btree (synchronizer_id, event_offset); +create table lapi_update_meta( + update_id binary varying not null, + event_offset bigint not null, + publication_time bigint not null, + record_time bigint not null, + synchronizer_id integer not null, + event_sequential_id_first bigint not null, + event_sequential_id_last bigint not null +); +create index lapi_update_meta_uid_idx on lapi_update_meta(update_id); +create index lapi_update_meta_event_offset_idx on lapi_update_meta(event_offset); +create index lapi_update_meta_publication_time_idx on lapi_update_meta using btree (publication_time, event_offset); +create index lapi_update_meta_synchronizer_record_time_idx on lapi_update_meta using btree (synchronizer_id, record_time); +create index lapi_update_meta_synchronizer_offset_idx on lapi_update_meta using btree (synchronizer_id, event_offset); -- NOTE: We keep participant user and party record tables independent from indexer-based tables, such that -- we maintain a property that they can be moved to a separate database without any extra schema changes. @@ -508,13 +737,13 @@ CREATE INDEX lapi_update_meta_synchronizer_offset_idx ON lapi_update_meta USING -- This table stores identity provider records used in the ledger api identity provider config -- service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_identity_provider_config +create table lapi_identity_provider_config ( - identity_provider_id VARCHAR PRIMARY KEY NOT NULL, - issuer VARCHAR NOT NULL UNIQUE, - jwks_url VARCHAR NOT NULL, - is_deactivated BOOLEAN NOT NULL, - audience VARCHAR NULL + identity_provider_id varchar primary key not null, + issuer varchar not null unique, + jwks_url varchar not null, + is_deactivated boolean not null, + audience varchar null ); --------------------------------------------------------------------------------------------------- @@ -522,14 +751,14 @@ CREATE TABLE lapi_identity_provider_config -- -- This table stores user data used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_users ( - internal_id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - user_id VARCHAR NOT NULL UNIQUE, - primary_party VARCHAR, - identity_provider_id VARCHAR REFERENCES lapi_identity_provider_config (identity_provider_id), - is_deactivated BOOLEAN NOT NULL, - resource_version BIGINT NOT NULL, - created_at BIGINT NOT NULL +create table lapi_users ( + internal_id integer generated always as identity primary key, + user_id varchar not null unique, + primary_party varchar, + identity_provider_id varchar references lapi_identity_provider_config (identity_provider_id), + is_deactivated boolean not null, + resource_version bigint not null, + created_at bigint not null ); --------------------------------------------------------------------------------------------------- @@ -537,17 +766,17 @@ CREATE TABLE lapi_users ( -- -- This table stores user rights used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_user_rights ( - user_internal_id INTEGER NOT NULL REFERENCES lapi_users (internal_id) ON DELETE CASCADE, - user_right INTEGER NOT NULL, - for_party VARCHAR, - for_party2 VARCHAR GENERATED ALWAYS AS (CASE - WHEN for_party IS NOT NULL - THEN for_party - ELSE '' - END), - granted_at BIGINT NOT NULL, - UNIQUE (user_internal_id, user_right, for_party2) +create table lapi_user_rights ( + user_internal_id integer not null references lapi_users (internal_id) on delete cascade, + user_right integer not null, + for_party varchar, + for_party2 varchar generated always as (case + when for_party is not null + then for_party + else '' + end), + granted_at bigint not null, + unique (user_internal_id, user_right, for_party2) ); --------------------------------------------------------------------------------------------------- @@ -555,32 +784,32 @@ CREATE TABLE lapi_user_rights ( -- -- This table stores additional per user data used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_user_annotations ( - internal_id INTEGER NOT NULL REFERENCES lapi_users (internal_id) ON DELETE CASCADE, - name VARCHAR NOT NULL, - val VARCHAR, - updated_at BIGINT NOT NULL, - UNIQUE (internal_id, name) +create table lapi_user_annotations ( + internal_id integer not null references lapi_users (internal_id) on delete cascade, + name varchar not null, + val varchar, + updated_at bigint not null, + unique (internal_id, name) ); -INSERT INTO lapi_users(user_id, primary_party, identity_provider_id, is_deactivated, resource_version, created_at) - VALUES ('participant_admin', NULL, NULL, false, 0, 0); -INSERT INTO lapi_user_rights(user_internal_id, user_right, for_party, granted_at) - SELECT internal_id, 1, NULL, 0 - FROM lapi_users - WHERE user_id = 'participant_admin'; +insert into lapi_users(user_id, primary_party, identity_provider_id, is_deactivated, resource_version, created_at) + values ('participant_admin', null, null, false, 0, 0); +insert into lapi_user_rights(user_internal_id, user_right, for_party, granted_at) + select internal_id, 1, null, 0 + from lapi_users + where user_id = 'participant_admin'; --------------------------------------------------------------------------------------------------- -- Party records -- -- This table stores additional per party data used in the ledger api party management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_records ( - internal_id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - party VARCHAR NOT NULL UNIQUE, - identity_provider_id VARCHAR REFERENCES lapi_identity_provider_config (identity_provider_id), - resource_version BIGINT NOT NULL, - created_at BIGINT NOT NULL +create table lapi_party_records ( + internal_id integer generated always as identity primary key, + party varchar not null unique, + identity_provider_id varchar references lapi_identity_provider_config (identity_provider_id), + resource_version bigint not null, + created_at bigint not null ); --------------------------------------------------------------------------------------------------- @@ -588,10 +817,10 @@ CREATE TABLE lapi_party_records ( -- -- This table stores additional per party data used in the ledger api party management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_record_annotations ( - internal_id INTEGER NOT NULL REFERENCES lapi_party_records (internal_id) ON DELETE CASCADE, - name VARCHAR NOT NULL, - val VARCHAR, - updated_at BIGINT NOT NULL, - UNIQUE (internal_id, name) +create table lapi_party_record_annotations ( + internal_id integer not null references lapi_party_records (internal_id) on delete cascade, + name varchar not null, + val varchar, + updated_at bigint not null, + unique (internal_id, name) ); diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sha256 b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sha256 deleted file mode 100644 index 67be962bed..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sha256 +++ /dev/null @@ -1 +0,0 @@ -fd35472b82ac37086bfc0708ae6056027a73dd75286cc9705028860e9baa6210 diff --git a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sql b/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sql deleted file mode 100644 index f9ff07fbd3..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V6_0__mediator_topology_initialization_fix.sql +++ /dev/null @@ -1,6 +0,0 @@ --- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. --- SPDX-License-Identifier: Apache-2.0 - --- add the column with the default value false -alter table mediator_synchronizer_configuration -add column is_topology_initialized bool not null default false; diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql index 23617c66e8..b9a0836cc0 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 -- This is a dummy column we are adding in order to test that adding dev version migrations diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/reference/V998__blocks.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/reference/V998__blocks.sql index 0c83c1828e..adb0f24f11 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/reference/V998__blocks.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/reference/V998__blocks.sql @@ -1,6 +1,5 @@ --- Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates --- --- Proprietary code. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 create table blocks ( id bigint primary key, diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sha256 deleted file mode 100644 index 5917dcb06f..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sha256 +++ /dev/null @@ -1 +0,0 @@ -ba030fe599c1d26776c85b8c38bd67e05b631f4728882762a379010d6e3d125a diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sql deleted file mode 100644 index 1620e5e987..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V10_0__sequencer_pruning_more_autoanalyze.sql +++ /dev/null @@ -1,39 +0,0 @@ --- By default analyze is triggered when a table has been vacuumed or when considerable part of the table changed. --- For very large tables (auto-)vacuuming is too slow, leading to statistics not being updated often enough. --- This leads to suboptimal query plans (falling back to Seq Scans), which can be avoided by running analyze more often. --- We use 1'000'000 rows as a threshold, with the reasoning: not too often, but enough to keep the query planner happy. -alter table sequencer_events - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); - -alter table sequencer_payloads - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); - -alter table seq_block_height - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); - -alter table seq_traffic_control_consumed_journal - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); - -alter table seq_in_flight_aggregated_sender - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); - -alter table seq_in_flight_aggregation - set ( - autovacuum_analyze_scale_factor = 0.0, - autovacuum_analyze_threshold = 1000000 - ); diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 index 1240553b5d..e85884f838 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 @@ -1 +1 @@ -36a73456981806018045c7953a532f4827916d5d04fd2e40392180b0cb8aae97 +4cc2b02347d7f4492cce4d12b7d91535e99eb1073f9eed9cc55cbd4a612d882a diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql index 4dc6f4f46c..bb22a24124 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 create table par_daml_packages ( @@ -71,24 +71,27 @@ create table common_crypto_public_keys ( -- Stores the immutable contracts, however a creation of a contract can be rolled back. create table par_contracts ( + internal_contract_id bigint generated always as identity, contract_id bytea not null, -- The contract is a serialized LfFatContractInst using the LF contract proto serializer. instance bytea not null, package_id varchar collate "C" not null, template_id varchar collate "C" not null, - primary key (contract_id) + primary key (contract_id) include (internal_contract_id) ); +-- Index for lookup per internal_contract_id +create index idx_par_contracts_internal on par_contracts(internal_contract_id); -- Index to speedup ContractStore.find -- package_id comes before template_id, because queries with package_id and without template_id make more sense than vice versa. -- contract_id is left out, because a query with contract_id can be served with the primary key. create index idx_par_contracts_find on par_contracts(package_id, template_id); --- provides a serial enumeration of static strings so we don't store the same string over and over in the db +-- provides an enumeration of static strings so we don't store the same string over and over in the db -- currently only storing uids create table common_static_strings ( - -- serial identifier of the string (local to this node) - id serial not null primary key, + -- identifier of the string (local to this node) + id integer generated always as identity primary key, -- the expression string varchar collate "C" not null, -- the source (what kind of string are we storing here) @@ -207,6 +210,10 @@ create table common_sequenced_events ( primary key (physical_synchronizer_idx, ts) ); +-- Disable Postgres compression; the proto message is already compressed +alter table common_sequenced_events + alter column sequenced_event set storage external; + create unique index idx_common_sequenced_events_sequencer_counter on common_sequenced_events(physical_synchronizer_idx, sequencer_counter); create table par_synchronizer_connection_configs( @@ -328,7 +335,7 @@ create table par_commitment_snapshot ( -- A stable reference to a stakeholder set, that doesn't rely on the Protobuf encoding being deterministic -- a hex-encoded hash (not binary so that hash can be indexed in all db server types) stakeholders_hash varchar collate "C" not null, - stakeholders varchar[] collate "C" not null, + stakeholders integer[] not null, commitment bytea not null, primary key (synchronizer_idx, stakeholders_hash) ); @@ -457,7 +464,7 @@ create table common_head_sequencer_counters ( -- members can read all events from `registered_ts` create table sequencer_members ( member varchar collate "C" primary key, - id serial unique, + id integer generated always as identity unique, registered_ts bigint not null, -- we keep the latest event's timestamp below the pruning timestamp, -- so that we can produce a valid first event above the pruning timestamp with previousTimestamp populated @@ -648,12 +655,11 @@ create table sequencer_synchronizer_configuration ( create table mediator_deduplication_store ( - mediator_id varchar collate "C" not null, uuid varchar collate "C" not null, request_time bigint not null, expire_after bigint not null ); -create index idx_mediator_deduplication_store_expire_after on mediator_deduplication_store(mediator_id, expire_after); +create index idx_mediator_deduplication_store_expire_after on mediator_deduplication_store(expire_after); create table common_pruning_schedules( -- node_type is one of "MED", or "SEQ" @@ -686,8 +692,8 @@ create table seq_in_flight_aggregated_sender( -- stores the topology-x state transactions create table common_topology_transactions ( - -- serial identifier used to preserve insertion order - id bigserial not null primary key, + -- identifier used to preserve insertion order + id bigint generated always as identity primary key, -- the id of the store store_id varchar collate "C" not null, -- the timestamp at which the transaction is sequenced by the sequencer @@ -737,6 +743,26 @@ create table common_topology_transactions ( ); create index idx_common_topology_transactions on common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); +-- for: +-- - DbTopologyStore.findProposalsByTxHash +-- - DbTopologyStore.findLatestTransactionsAndProposalsByTxHash +create index idx_common_topology_transactions_by_tx_hash + on common_topology_transactions (store_id, tx_hash, is_proposal, valid_from, valid_until, rejection_reason); + +-- for: +-- - DbTopologyStore.findEffectiveStateChanges +create index idx_common_topology_transactions_effective_changes + on common_topology_transactions (store_id, is_proposal, valid_from, valid_until, rejection_reason) + where is_proposal = false; + + +-- for: +-- - DbTopologyStore.update, updating the valid_until column for past transactions +create index idx_common_topology_transactions_for_valid_until_update + on common_topology_transactions (store_id, mapping_key_hash, serial_counter, valid_from) + where valid_until is null; + + -- Stores the traffic purchased entry updates create table seq_traffic_control_balance_updates ( -- member the traffic purchased entry update is for @@ -968,7 +994,13 @@ alter table sequencer_events autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, +-- By default analyze is triggered when a table has been vacuumed or when considerable part of the table changed. +-- For very large tables (auto-)vacuuming is too slow, leading to statistics not being updated often enough. +-- This leads to suboptimal query plans (falling back to Seq Scans), which can be avoided by running analyze more often. +-- We use 1'000'000 rows as a threshold, with the reasoning: not too often, but enough to keep the query planner happy. + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); -- Note: *_threshold is 10x of the other tables, since this table has many more rows. @@ -992,7 +1024,9 @@ alter table sequencer_payloads autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); alter table seq_block_height @@ -1002,7 +1036,9 @@ alter table seq_block_height autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); alter table seq_traffic_control_consumed_journal @@ -1012,7 +1048,9 @@ alter table seq_traffic_control_consumed_journal autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); alter table seq_in_flight_aggregated_sender @@ -1022,7 +1060,9 @@ alter table seq_in_flight_aggregated_sender autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); alter table seq_in_flight_aggregation @@ -1032,11 +1072,13 @@ alter table seq_in_flight_aggregation autovacuum_vacuum_cost_limit = 2000, autovacuum_vacuum_cost_delay = 5, autovacuum_vacuum_insert_scale_factor = 0.0, - autovacuum_vacuum_insert_threshold = 100000 + autovacuum_vacuum_insert_threshold = 100000, + autovacuum_analyze_scale_factor = 0.0, + autovacuum_analyze_threshold = 1000000 ); -- Stores participants we should not wait for before pruning when handling ACS commitment -Create TABLE acs_no_wait_counter_participants +create table acs_no_wait_counter_participants ( synchronizer_id varchar collate "C" not null, participant_id varchar collate "C" not null, @@ -1044,7 +1086,7 @@ Create TABLE acs_no_wait_counter_participants ); -- Stores configuration for metrics around slow participants -CREATE TABLE acs_slow_participant_config +create table acs_slow_participant_config ( synchronizer_id varchar collate "C" not null, threshold_distinguished integer not null, @@ -1053,7 +1095,7 @@ CREATE TABLE acs_slow_participant_config ); -- Stores distinguished or specifically measured counter participants for ACS commitment metrics -CREATE TABLE acs_slow_counter_participants +create table acs_slow_counter_participants ( synchronizer_id varchar collate "C" not null, participant_id varchar collate "C" not null, @@ -1061,3 +1103,23 @@ CREATE TABLE acs_slow_counter_participants is_added_to_metrics boolean not null, primary key(synchronizer_id,participant_id) ); + +-- Specifies the event that triggers the execution of a pending operation +create type pending_operation_trigger_type as enum ('synchronizer_reconnect'); + +-- Stores operations that must be completed, ensuring execution even after a node restart (e.g., following a crash) +create table common_pending_operations ( + id int not null generated always as identity, + operation_trigger pending_operation_trigger_type not null, + -- The name of the procedure to execute for this operation. + operation_name varchar collate "C" not null, + -- A key to uniquely identify an instance of an operation, allowing multiple pending operations of the same type + -- An empty string indicates no specific key + operation_key varchar collate "C" not null, + -- The serialized protobuf message for the operation, wrapped for versioning (HasProtocolVersionedWrapper) + operation bytea not null, + -- The ID of the synchronizer instance this operation is associated with + synchronizer_id varchar collate "C" not null, + primary key (id), + unique (synchronizer_id, operation_key, operation_name) +); diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 index a2cac038b9..1cf1b1334e 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 @@ -1 +1 @@ -6a90b96e34d15cc2e86ad7cbec3bd0681fc5cd8b92bf0b6ed4e4ba9e7c755853 +9b26d714b4bb628c3881b527261da135623f59c80d450b765102a78d9f4fd2f2 diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql index 511393d951..ee9ac941bb 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -- SPDX-License-Identifier: Apache-2.0 create schema debug; @@ -166,7 +166,10 @@ create or replace view debug.par_dars as from par_dars; create or replace view debug.par_dar_packages as -select main_package_id , package_id from par_dar_packages; + select + main_package_id, + package_id + from par_dar_packages; create or replace view debug.common_crypto_private_keys as select @@ -195,6 +198,7 @@ create or replace view debug.common_crypto_public_keys as create or replace view debug.par_contracts as select + internal_contract_id, lower(encode(contract_id, 'hex')) as contract_id, instance, package_id, @@ -489,12 +493,12 @@ create or replace view debug.sequencer_events as from sequencer_events; create or replace view debug.sequencer_event_recipients as -select + select debug.canton_timestamp(ts) as ts, debug.resolve_sequencer_member(recipient_id) as recipient_id, node_index, is_topology_event -from sequencer_event_recipients; + from sequencer_event_recipients; create or replace view debug.par_pruning_schedules as select @@ -559,7 +563,6 @@ create or replace view debug.sequencer_synchronizer_configuration as create or replace view debug.mediator_deduplication_store as select - mediator_id, uuid, debug.canton_timestamp(request_time) as request_time, debug.canton_timestamp(expire_after) as expire_after @@ -648,14 +651,14 @@ create or replace view debug.ord_availability_batch as from ord_availability_batch; create or replace view debug.ord_pbft_messages_in_progress as -select + select block_number, epoch_number, view_number, message, discriminator, from_sequencer_id -from ord_pbft_messages_in_progress; + from ord_pbft_messages_in_progress; create or replace view debug.ord_pbft_messages_completed as select @@ -687,13 +690,13 @@ create or replace view debug.ord_output_lower_bound as from ord_output_lower_bound; create or replace view debug.ord_pruning_schedules as -select + select lock, cron, max_duration, retention, min_blocks_to_keep -from ord_pruning_schedules; + from ord_pruning_schedules; create or replace view debug.ord_leader_selection_state as select @@ -718,23 +721,33 @@ create or replace view debug.ord_p2p_endpoints as client_private_key_file from ord_p2p_endpoints; -create or replace VIEW debug.acs_no_wait_counter_participants as - select - synchronizer_id, - participant_id - from acs_no_wait_counter_participants; - -create or replace VIEW debug.acs_slow_participant_config as - select - synchronizer_id, - threshold_distinguished, - threshold_default - from acs_slow_participant_config; - -create or replace VIEW debug.acs_slow_counter_participants as - select - synchronizer_id, - participant_id, - is_distinguished, - is_added_to_metrics - from acs_slow_counter_participants; +create or replace view debug.acs_no_wait_counter_participants as + select + synchronizer_id, + participant_id + from acs_no_wait_counter_participants; + +create or replace view debug.acs_slow_participant_config as + select + synchronizer_id, + threshold_distinguished, + threshold_default + from acs_slow_participant_config; + +create or replace view debug.acs_slow_counter_participants as + select + synchronizer_id, + participant_id, + is_distinguished, + is_added_to_metrics + from acs_slow_counter_participants; + +create or replace view debug.common_pending_operations as + select + id, + operation_trigger, + operation_name, + operation_key, + operation, + synchronizer_id + from common_pending_operations; diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 index fbc0352a87..aadd544df2 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 @@ -1 +1 @@ -0ea2df1a47d5794d721274c7ca6251b9cdd583a2c0457d506d2a6c5296b0747d +4fba30632d4f4f8a9916295cd0587ebfc8f61b0bab0b6ae585ee183f597350ef diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql index 96c7267dbb..8cf4da0c66 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql @@ -10,7 +10,7 @@ -- ledger_end_publication_time are always defined at the same time. I.e., either -- all are NULL, or all are defined. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_parameters ( +create table lapi_parameters ( -- stores the head offset, meant to change with every new ledger entry -- NULL denotes the participant begin ledger_end bigint, @@ -26,31 +26,31 @@ CREATE TABLE lapi_parameters ( ledger_end_publication_time bigint ); -CREATE TABLE lapi_post_processing_end ( +create table lapi_post_processing_end ( -- null signifies the participant begin post_processing_end bigint ); -CREATE TABLE lapi_ledger_end_synchronizer_index ( - synchronizer_id INTEGER PRIMARY KEY not null, - sequencer_timestamp BIGINT, - repair_timestamp BIGINT, - repair_counter BIGINT, - record_time BIGINT NOT NULL +create table lapi_ledger_end_synchronizer_index ( + synchronizer_id integer primary key not null, + sequencer_timestamp bigint, + repair_timestamp bigint, + repair_counter bigint, + record_time bigint not null ); --------------------------------------------------------------------------------------------------- -- Completions --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_command_completions ( +create table lapi_command_completions ( completion_offset bigint not null, record_time bigint not null, publication_time bigint not null, - user_id varchar collate "C" not null, - submitters integer[] not null, + user_id integer not null, + submitters bytea not null, command_id varchar collate "C" not null, -- The update ID is `NULL` for rejected transactions/reassignments. - update_id varchar collate "C", + update_id bytea, -- The submission ID will be provided by the participant or driver if the user didn't provide one. -- Nullable to support historical data. submission_id varchar collate "C", @@ -77,15 +77,217 @@ CREATE TABLE lapi_command_completions ( trace_context bytea not null ); -CREATE INDEX lapi_command_completions_user_id_offset_idx ON lapi_command_completions USING btree (user_id, completion_offset); -CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset); -CREATE INDEX lapi_command_completions_publication_time_idx ON lapi_command_completions USING btree (publication_time, completion_offset); -CREATE INDEX lapi_command_completions_synchronizer_record_time_offset_idx ON lapi_command_completions USING btree (synchronizer_id, record_time, completion_offset); -CREATE INDEX lapi_command_completions_synchronizer_offset_idx ON lapi_command_completions USING btree (synchronizer_id, completion_offset); +create index lapi_command_completions_user_id_offset_idx on lapi_command_completions using btree (user_id, completion_offset); +create index lapi_command_completions_offset_idx on lapi_command_completions using btree (completion_offset); +create index lapi_command_completions_publication_time_idx on lapi_command_completions using btree (publication_time, completion_offset); +create index lapi_command_completions_synchronizer_record_time_offset_idx on lapi_command_completions using btree (synchronizer_id, record_time, completion_offset); +create index lapi_command_completions_synchronizer_offset_idx on lapi_command_completions using btree (synchronizer_id, completion_offset); + +--------------------------------------------------------------------------------------------------- +-- Events: Activate Contract +--------------------------------------------------------------------------------------------------- +create table lapi_events_activate_contract ( + -- update related columns + event_offset bigint not null, + update_id bytea not null, + workflow_id varchar collate "C", + command_id varchar collate "C", + submitters bytea, + record_time bigint not null, + synchronizer_id integer not null, + trace_context bytea not null, + external_transaction_hash bytea, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + additional_witnesses bytea, -- create events + source_synchronizer_id integer, -- assign events + reassignment_counter bigint, -- assign events + reassignment_id bytea, -- assign events + representative_package_id integer not null, -- create events + + -- contract related columns + internal_contract_id bigint not null, -- all event types + create_key_hash varchar collate "C" -- create +); + +-- sequential_id index +create index lapi_events_activate_sequential_id_idx on lapi_events_activate_contract using btree (event_sequential_id) include (event_type, synchronizer_id); +-- event_offset index +create index lapi_events_activate_offset_idx on lapi_events_activate_contract using btree (event_offset); +-- internal_contract_id index +create index lapi_events_activate_internal_contract_id_idx on lapi_events_activate_contract using btree (internal_contract_id, event_sequential_id); +-- contract_key index +create index lapi_events_activate_contract_key_idx on lapi_events_activate_contract using btree (create_key_hash, event_sequential_id) where create_key_hash is not null; + +-- filter table for stakeholders +create table lapi_filter_activate_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_activate_stakeholder_ps_idx on lapi_filter_activate_stakeholder using btree (party_id, event_sequential_id); +create index lapi_filter_activate_stakeholder_pts_idx on lapi_filter_activate_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_activate_stakeholder_ts_idx on lapi_filter_activate_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_filter_activate_stakeholder_s_idx on lapi_filter_activate_stakeholder using btree (event_sequential_id, first_per_sequential_id); + +-- filter table for additional witnesses +create table lapi_filter_activate_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_activate_witness_ps_idx on lapi_filter_activate_witness using btree (party_id, event_sequential_id); +create index lapi_filter_activate_witness_pts_idx on lapi_filter_activate_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_activate_witness_ts_idx on lapi_filter_activate_witness using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_filter_activate_witness_s_idx on lapi_filter_activate_witness using btree (event_sequential_id, first_per_sequential_id); + +--------------------------------------------------------------------------------------------------- +-- Events: Deactivate Contract +--------------------------------------------------------------------------------------------------- +create table lapi_events_deactivate_contract ( + -- update related columns + event_offset bigint not null, + update_id bytea not null, + workflow_id varchar collate "C", + command_id varchar collate "C", + submitters bytea, + record_time bigint not null, + synchronizer_id integer not null, + trace_context bytea not null, + external_transaction_hash bytea, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + deactivated_event_sequential_id bigint, -- all event types + additional_witnesses bytea, -- consuming events + exercise_choice integer, -- consuming events + exercise_choice_interface integer, -- consuming events + exercise_argument bytea, -- consuming events + exercise_result bytea, -- consuming events + exercise_actors bytea, -- consuming events + exercise_last_descendant_node_id integer, -- consuming events + exercise_argument_compression smallint, -- consuming events + exercise_result_compression smallint, -- consuming events + reassignment_id bytea, -- unassign events + assignment_exclusivity bigint, -- unassign events + target_synchronizer_id integer, -- unassign events + reassignment_counter bigint, -- unassign events + + -- contract related columns + contract_id bytea, -- all event types + internal_contract_id bigint, -- all event types + template_id integer not null, -- all event types + package_id integer not null, -- all event types + stakeholders bytea not null, -- all event types + ledger_effective_time bigint -- consuming events +); + +-- sequential_id index +create index lapi_events_deactivate_sequential_id_idx on lapi_events_deactivate_contract using btree (event_sequential_id) include (event_type); +-- event_offset index +create index lapi_events_deactivate_offset_idx on lapi_events_deactivate_contract using btree (event_offset); +-- internal_contract_id index +create index lapi_events_deactivate_internal_contract_id_idx on lapi_events_deactivate_contract using btree (internal_contract_id, event_sequential_id) where internal_contract_id is not null; +-- internal_contract_id index serving only the consuming exercises (PersistentEventType.ConsumingExercise) +-- this index is needed by batched contract lookups for interpretation and event_query_service +create index lapi_events_deactivate_internal_contract_id_archive_idx on lapi_events_deactivate_contract using btree (internal_contract_id, event_sequential_id) where internal_contract_id is not null and event_type = 3; +-- deactivation reference index +create index lapi_events_deactivated_event_sequential_id_idx on lapi_events_deactivate_contract using btree (deactivated_event_sequential_id) include (event_sequential_id) where deactivated_event_sequential_id is not null; + +-- filter table for stakeholders +create table lapi_filter_deactivate_stakeholder ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_deactivate_stakeholder_ps_idx on lapi_filter_deactivate_stakeholder using btree (party_id, event_sequential_id); +create index lapi_filter_deactivate_stakeholder_pts_idx on lapi_filter_deactivate_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_deactivate_stakeholder_ts_idx on lapi_filter_deactivate_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_filter_deactivate_stakeholder_s_idx on lapi_filter_deactivate_stakeholder using btree (event_sequential_id, first_per_sequential_id); + +-- filter table for additional witnesses +create table lapi_filter_deactivate_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_deactivate_witness_ps_idx on lapi_filter_deactivate_witness using btree (party_id, event_sequential_id); +create index lapi_filter_deactivate_witness_pts_idx on lapi_filter_deactivate_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_deactivate_witness_ts_idx on lapi_filter_deactivate_witness using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_filter_deactivate_witness_s_idx on lapi_filter_deactivate_witness using btree (event_sequential_id, first_per_sequential_id); + +--------------------------------------------------------------------------------------------------- +-- Events: Various Witnessed +--------------------------------------------------------------------------------------------------- +create table lapi_events_various_witnessed ( + -- tx related columns + event_offset bigint not null, + update_id bytea not null, + workflow_id varchar collate "C", + command_id varchar collate "C", + submitters bytea, + record_time bigint not null, + synchronizer_id integer not null, + trace_context bytea not null, + external_transaction_hash bytea, + + -- event related columns + event_type smallint not null, -- all event types + event_sequential_id bigint not null, -- all event types + node_id integer not null, -- all event types + additional_witnesses bytea not null, -- all event types + consuming boolean, -- exercise + exercise_choice integer, -- exercise + exercise_choice_interface integer, -- exercise + exercise_argument bytea, -- exercise + exercise_result bytea, -- exercise + exercise_actors bytea, -- exercise + exercise_last_descendant_node_id integer, -- exercise + exercise_argument_compression smallint, -- exercise + exercise_result_compression smallint, -- exercise + representative_package_id integer, -- create events + + -- contract related columns + contract_id bytea, + internal_contract_id bigint, + template_id integer, + package_id integer, + ledger_effective_time bigint +); + +-- sequential_id index +create index lapi_events_various_sequential_id_idx on lapi_events_various_witnessed using btree (event_sequential_id) include (event_type); +-- event_offset index +create index lapi_events_various_offset_idx on lapi_events_various_witnessed using btree (event_offset); +-- internal_contract_id index +create index lapi_events_various_internal_contract_id_idx on lapi_events_various_witnessed using btree (internal_contract_id, event_sequential_id) where internal_contract_id is not null; + +-- filter table for additional witnesses +create table lapi_filter_various_witness ( + event_sequential_id bigint not null, + template_id integer not null, + party_id integer not null, + first_per_sequential_id boolean +); +create index lapi_filter_various_witness_ps_idx on lapi_filter_various_witness using btree (party_id, event_sequential_id); +create index lapi_filter_various_witness_pts_idx on lapi_filter_various_witness using btree (party_id, template_id, event_sequential_id); +create index lapi_filter_various_witness_ts_idx on lapi_filter_various_witness using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_filter_various_witness_s_idx on lapi_filter_various_witness using btree (event_sequential_id, first_per_sequential_id); + --------------------------------------------------------------------------------------------------- -- Events: Assign --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_assign ( +-- deprecated, to be removed. See #28008 +create table lapi_events_assign ( -- * fixed-size columns first to avoid padding event_sequential_id bigint not null, -- event identification: same ordering as event_offset @@ -93,7 +295,7 @@ CREATE TABLE lapi_events_assign ( event_offset bigint not null, -- * transaction metadata - update_id varchar collate "C" not null, + update_id bytea not null, workflow_id varchar collate "C", -- * submitter info (only visible on submitting participant) @@ -106,18 +308,18 @@ CREATE TABLE lapi_events_assign ( contract_id bytea not null, template_id integer not null, package_id integer not null, - flat_event_witnesses integer[] default '{}'::integer[] not null, -- stakeholders + flat_event_witnesses bytea not null, -- stakeholders -- * common reassignment source_synchronizer_id integer not null, target_synchronizer_id integer not null, - reassignment_id varchar collate "C" not null, + reassignment_id bytea not null, reassignment_counter bigint not null, -- * assigned specific create_argument bytea not null, - create_signatories integer[] default '{}'::integer[] not null, - create_observers integer[] default '{}'::integer[] not null, + create_signatories bytea not null, + create_observers bytea not null, create_key_value bytea, create_key_hash varchar collate "C", create_argument_compression smallint, @@ -125,27 +327,29 @@ CREATE TABLE lapi_events_assign ( ledger_effective_time bigint not null, authentication_data bytea not null, - create_key_maintainers integer[], + create_key_maintainers bytea, trace_context bytea not null, - record_time bigint not null + record_time bigint not null, + internal_contract_id bigint not null ); -- index for queries resolving contract ID to sequential IDs. -CREATE INDEX lapi_events_assign_event_contract_id_idx ON lapi_events_assign USING btree (contract_id, event_sequential_id); +create index lapi_events_assign_event_contract_id_idx on lapi_events_assign using btree (contract_id, event_sequential_id); -- index for queries resolving (contract ID, synchronizer id, sequential ID) to sequential IDs. -CREATE INDEX lapi_events_assign_event_contract_id_synchronizer_id_seq_id_idx ON lapi_events_assign USING btree (contract_id, target_synchronizer_id, event_sequential_id); +create index lapi_events_assign_event_contract_id_synchronizer_id_seq_id_idx on lapi_events_assign using btree (contract_id, target_synchronizer_id, event_sequential_id); -- covering index for queries resolving offsets to sequential IDs. For temporary incomplete reassignments implementation. -CREATE INDEX lapi_events_assign_event_offset_idx ON lapi_events_assign USING btree (event_offset, event_sequential_id); +create index lapi_events_assign_event_offset_idx on lapi_events_assign using btree (event_offset, event_sequential_id); -- sequential_id index for paging -CREATE INDEX lapi_events_assign_event_sequential_id_idx ON lapi_events_assign USING btree (event_sequential_id); +create index lapi_events_assign_event_sequential_id_idx on lapi_events_assign using btree (event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: consuming exercise --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_consuming_exercise ( +-- deprecated, to be removed. See #28008 +create table lapi_events_consuming_exercise ( -- * fixed-size columns first to avoid padding event_sequential_id bigint not null, -- event identification: same ordering as event_offset ledger_effective_time bigint not null, -- transaction metadata @@ -155,25 +359,26 @@ CREATE TABLE lapi_events_consuming_exercise ( event_offset bigint not null, -- * transaction metadata - update_id varchar collate "C" not null, + update_id bytea not null, workflow_id varchar collate "C", -- * submitter info (only visible on submitting participant) command_id varchar collate "C", - submitters integer[], + submitters bytea, -- * shared event information contract_id bytea not null, template_id integer not null, package_id integer not null, - flat_event_witnesses integer[] default '{}'::integer[] not null, -- stakeholders - tree_event_witnesses integer[] default '{}'::integer[] not null, -- informees + flat_event_witnesses bytea not null, -- stakeholders + tree_event_witnesses bytea not null, -- informees -- * choice data - exercise_choice varchar collate "C" not null, + exercise_choice integer not null, + exercise_choice_interface integer, exercise_argument bytea not null, exercise_result bytea, - exercise_actors integer[] not null, + exercise_actors bytea not null, exercise_last_descendant_node_id integer not null, -- * compression flags @@ -183,22 +388,27 @@ CREATE TABLE lapi_events_consuming_exercise ( synchronizer_id integer not null, trace_context bytea not null, record_time bigint not null, - external_transaction_hash bytea + external_transaction_hash bytea, + deactivated_event_sequential_id bigint ); +-- lookup by deactivation +create index lapi_events_consuming_exercise_deactivated_id_idx on lapi_events_consuming_exercise using btree (deactivated_event_sequential_id) include (event_sequential_id) where deactivated_event_sequential_id is not null; + -- lookup by contract id -CREATE INDEX lapi_events_consuming_exercise_contract_id_idx ON lapi_events_consuming_exercise USING hash (contract_id); +create index lapi_events_consuming_exercise_contract_id_idx on lapi_events_consuming_exercise using hash (contract_id); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_consuming_exercise_event_offset_idx ON lapi_events_consuming_exercise USING btree (event_offset); +create index lapi_events_consuming_exercise_event_offset_idx on lapi_events_consuming_exercise using btree (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_consuming_exercise_event_sequential_id_idx ON lapi_events_consuming_exercise USING btree (event_sequential_id); +create index lapi_events_consuming_exercise_event_sequential_id_idx on lapi_events_consuming_exercise using btree (event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: create --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_create ( +-- deprecated, to be removed. See #28008 +create table lapi_events_create ( -- * fixed-size columns first to avoid padding event_sequential_id bigint not null, -- event identification: same ordering as event_offset ledger_effective_time bigint not null, -- transaction metadata @@ -208,24 +418,25 @@ CREATE TABLE lapi_events_create ( event_offset bigint not null, -- * transaction metadata - update_id varchar collate "C" not null, + update_id bytea not null, workflow_id varchar collate "C", -- * submitter info (only visible on submitting participant) command_id varchar collate "C", - submitters integer[], + submitters bytea, -- * shared event information contract_id bytea not null, template_id integer not null, package_id integer not null, - flat_event_witnesses integer[] default '{}'::integer[] not null, -- stakeholders - tree_event_witnesses integer[] default '{}'::integer[] not null, -- informees + representative_package_id integer not null, + flat_event_witnesses bytea not null, -- stakeholders + tree_event_witnesses bytea not null, -- informees -- * contract data create_argument bytea not null, - create_signatories integer[] not null, - create_observers integer[] not null, + create_signatories bytea not null, + create_observers bytea not null, create_key_value bytea, create_key_hash varchar collate "C", @@ -234,28 +445,30 @@ CREATE TABLE lapi_events_create ( create_key_value_compression smallint, authentication_data bytea not null, synchronizer_id integer not null, - create_key_maintainers integer[], + create_key_maintainers bytea, trace_context bytea not null, record_time bigint not null, - external_transaction_hash bytea + external_transaction_hash bytea, + internal_contract_id bigint not null ); -- lookup by contract_id -CREATE INDEX lapi_events_create_contract_id_idx ON lapi_events_create USING hash (contract_id); +create index lapi_events_create_contract_id_idx on lapi_events_create using hash (contract_id); -- lookup by contract_key -CREATE INDEX lapi_events_create_create_key_hash_idx ON lapi_events_create USING btree (create_key_hash, event_sequential_id) WHERE create_key_hash IS NOT NULL; +create index lapi_events_create_create_key_hash_idx on lapi_events_create using btree (create_key_hash, event_sequential_id) where create_key_hash is not null; -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_create_event_offset_idx ON lapi_events_create USING btree (event_offset); +create index lapi_events_create_event_offset_idx on lapi_events_create using btree (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_create_event_sequential_id_idx ON lapi_events_create USING btree (event_sequential_id); +create index lapi_events_create_event_sequential_id_idx on lapi_events_create using btree (event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: non-consuming exercise --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_non_consuming_exercise ( +-- deprecated, to be removed. See #28008 +create table lapi_events_non_consuming_exercise ( -- * fixed-size columns first to avoid padding event_sequential_id bigint not null, -- event identification: same ordering as event_offset ledger_effective_time bigint not null, -- transaction metadata @@ -265,24 +478,25 @@ CREATE TABLE lapi_events_non_consuming_exercise ( event_offset bigint not null, -- * transaction metadata - update_id varchar collate "C" not null, + update_id bytea not null, workflow_id varchar collate "C", -- * submitter info (only visible on submitting participant) command_id varchar collate "C", - submitters integer[], + submitters bytea, -- * shared event information contract_id bytea not null, template_id integer not null, package_id integer not null, - tree_event_witnesses integer[] default '{}'::integer[] not null, -- informees + tree_event_witnesses bytea not null, -- informees -- * choice data - exercise_choice varchar collate "C" not null, + exercise_choice integer not null, + exercise_choice_interface integer, exercise_argument bytea not null, exercise_result bytea, - exercise_actors integer[] not null, + exercise_actors bytea not null, exercise_last_descendant_node_id integer not null, -- * compression flags @@ -296,15 +510,16 @@ CREATE TABLE lapi_events_non_consuming_exercise ( ); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_non_consuming_exercise_event_offset_idx ON lapi_events_non_consuming_exercise USING btree (event_offset); +create index lapi_events_non_consuming_exercise_event_offset_idx on lapi_events_non_consuming_exercise using btree (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_non_consuming_exercise_event_sequential_id_idx ON lapi_events_non_consuming_exercise USING btree (event_sequential_id); +create index lapi_events_non_consuming_exercise_event_sequential_id_idx on lapi_events_non_consuming_exercise using btree (event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: Unassign --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_unassign ( +-- deprecated, to be removed. See #28008 +create table lapi_events_unassign ( -- * fixed-size columns first to avoid padding event_sequential_id bigint not null, -- event identification: same ordering as event_offset @@ -312,7 +527,7 @@ CREATE TABLE lapi_events_unassign ( event_offset bigint not null, -- * transaction metadata - update_id varchar collate "C" not null, + update_id bytea not null, workflow_id varchar collate "C", -- * submitter info (only visible on submitting participant) @@ -325,39 +540,43 @@ CREATE TABLE lapi_events_unassign ( contract_id bytea not null, template_id integer not null, package_id integer not null, - flat_event_witnesses integer[] default '{}'::integer[] not null, -- stakeholders + flat_event_witnesses bytea not null, -- stakeholders -- * common reassignment source_synchronizer_id integer not null, target_synchronizer_id integer not null, - reassignment_id varchar collate "C" not null, + reassignment_id bytea not null, reassignment_counter bigint not null, -- * unassigned specific assignment_exclusivity bigint, trace_context bytea not null, - record_time bigint not null + record_time bigint not null, + deactivated_event_sequential_id bigint ); +-- deactivations +create index lapi_events_unassign_deactivated_idx on lapi_events_unassign using btree (deactivated_event_sequential_id) include (event_sequential_id) where deactivated_event_sequential_id is not null; + -- multi-column index supporting per contract per synchronizer lookup before/after sequential id query -CREATE INDEX lapi_events_unassign_contract_id_composite_idx ON lapi_events_unassign USING btree (contract_id, source_synchronizer_id, event_sequential_id); +create index lapi_events_unassign_contract_id_composite_idx on lapi_events_unassign using btree (contract_id, source_synchronizer_id, event_sequential_id); -- covering index for queries resolving offsets to sequential IDs. For temporary incomplete reassignments implementation. -CREATE INDEX lapi_events_unassign_event_offset_idx ON lapi_events_unassign USING btree (event_offset, event_sequential_id); +create index lapi_events_unassign_event_offset_idx on lapi_events_unassign using btree (event_offset, event_sequential_id); -- sequential_id index for paging -CREATE INDEX lapi_events_unassign_event_sequential_id_idx ON lapi_events_unassign USING btree (event_sequential_id); +create index lapi_events_unassign_event_sequential_id_idx on lapi_events_unassign using btree (event_sequential_id); --------------------------------------------------------------------------------------------------- -- Events: Topology (participant authorization mappings) --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_events_party_to_participant ( +create table lapi_events_party_to_participant ( event_sequential_id bigint not null, event_offset bigint not null, - update_id varchar collate "C" not null, + update_id bytea not null, party_id integer not null, - participant_id varchar collate "C" not null, + participant_id integer not null, participant_permission integer not null, participant_authorization_event integer not null, synchronizer_id integer not null, @@ -366,16 +585,16 @@ CREATE TABLE lapi_events_party_to_participant ( ); -- offset index: used to translate to sequential_id -CREATE INDEX lapi_events_party_to_participant_event_offset_idx ON lapi_events_party_to_participant USING btree (event_offset); +create index lapi_events_party_to_participant_event_offset_idx on lapi_events_party_to_participant using btree (event_offset); -- sequential_id index for paging -CREATE INDEX lapi_events_party_to_participant_event_sequential_id_idx ON lapi_events_party_to_participant USING btree (event_sequential_id); +create index lapi_events_party_to_participant_event_sequential_id_idx on lapi_events_party_to_participant using btree (event_sequential_id); -- party_id with event_sequential_id for id queries -CREATE INDEX lapi_events_party_to_participant_event_party_sequential_id_idx ON lapi_events_party_to_participant USING btree (party_id, event_sequential_id); +create index lapi_events_party_to_participant_event_party_sequential_id_idx on lapi_events_party_to_participant using btree (party_id, event_sequential_id); -- party_id with event_sequential_id for id queries -CREATE INDEX lapi_events_party_to_participant_event_did_recordt_idx ON lapi_events_party_to_participant USING btree (synchronizer_id, record_time); +create index lapi_events_party_to_participant_event_did_recordt_idx on lapi_events_party_to_participant using btree (synchronizer_id, record_time); --------------------------------------------------------------------------------------------------- -- Identity provider configs @@ -383,7 +602,7 @@ CREATE INDEX lapi_events_party_to_participant_event_did_recordt_idx ON lapi_even -- This table stores identity provider records used in the ledger api identity provider config -- service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_identity_provider_config ( +create table lapi_identity_provider_config ( identity_provider_id varchar collate "C" primary key not null, issuer varchar collate "C" not null unique, jwks_url varchar collate "C" not null, @@ -396,8 +615,8 @@ CREATE TABLE lapi_identity_provider_config ( -- -- This table stores additional per party data used in the ledger api party management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_records ( - internal_id serial primary key, +create table lapi_party_records ( + internal_id integer generated always as identity primary key, party varchar collate "C" not null unique, resource_version bigint not null, created_at bigint not null, @@ -409,7 +628,7 @@ CREATE TABLE lapi_party_records ( -- -- This table stores additional per party data used in the ledger api party management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_record_annotations ( +create table lapi_party_record_annotations ( internal_id integer not null references lapi_party_records (internal_id) on delete cascade, name varchar collate "C" not null, val varchar collate "C", @@ -422,8 +641,8 @@ CREATE TABLE lapi_party_record_annotations ( -- -- This table is used in point-wise lookups. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_update_meta ( - update_id varchar collate "C" not null, +create table lapi_update_meta ( + update_id bytea not null, event_offset bigint not null, publication_time bigint not null, record_time bigint not null, @@ -432,19 +651,19 @@ CREATE TABLE lapi_update_meta ( event_sequential_id_last bigint not null ); -CREATE INDEX lapi_update_meta_event_offset_idx ON lapi_update_meta USING btree (event_offset); -CREATE INDEX lapi_update_meta_uid_idx ON lapi_update_meta USING hash (update_id); -CREATE INDEX lapi_update_meta_publication_time_idx ON lapi_update_meta USING btree (publication_time, event_offset); -CREATE INDEX lapi_update_meta_synchronizer_record_time_offset_idx ON lapi_update_meta USING btree (synchronizer_id, record_time, event_offset); -CREATE INDEX lapi_update_meta_synchronizer_offset_idx ON lapi_update_meta USING btree (synchronizer_id, event_offset); +create index lapi_update_meta_event_offset_idx on lapi_update_meta using btree (event_offset); +create index lapi_update_meta_uid_idx on lapi_update_meta using hash (update_id); +create index lapi_update_meta_publication_time_idx on lapi_update_meta using btree (publication_time, event_offset); +create index lapi_update_meta_synchronizer_record_time_offset_idx on lapi_update_meta using btree (synchronizer_id, record_time, event_offset); +create index lapi_update_meta_synchronizer_offset_idx on lapi_update_meta using btree (synchronizer_id, event_offset); --------------------------------------------------------------------------------------------------- -- User entries -- -- This table stores user data used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_users ( - internal_id serial primary key, +create table lapi_users ( + internal_id integer generated always as identity primary key, user_id varchar collate "C" not null unique, primary_party varchar collate "C", created_at bigint not null, @@ -458,7 +677,7 @@ CREATE TABLE lapi_users ( -- -- This table stores user rights used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_user_rights ( +create table lapi_user_rights ( user_internal_id integer not null references lapi_users (internal_id) on delete cascade, user_right integer not null, for_party varchar collate "C", @@ -466,22 +685,22 @@ CREATE TABLE lapi_user_rights ( unique (user_internal_id, user_right, for_party) ); -CREATE UNIQUE INDEX lapi_user_rights_user_internal_id_user_right_idx - ON lapi_user_rights USING btree (user_internal_id, user_right) - WHERE (for_party is null); +create unique index lapi_user_rights_user_internal_id_user_right_idx + on lapi_user_rights using btree (user_internal_id, user_right) + where (for_party is null); -INSERT INTO lapi_users(user_id, primary_party, created_at) VALUES ('participant_admin', null, 0); -INSERT INTO lapi_user_rights(user_internal_id, user_right, for_party, granted_at) -SELECT internal_id, 1, null, 0 -FROM lapi_users -WHERE user_id = 'participant_admin'; +insert into lapi_users(user_id, primary_party, created_at) values ('participant_admin', null, 0); +insert into lapi_user_rights(user_internal_id, user_right, for_party, granted_at) +select internal_id, 1, null, 0 +from lapi_users +where user_id = 'participant_admin'; --------------------------------------------------------------------------------------------------- -- User annotations -- -- This table stores additional per user data used in the ledger api user management service. --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_user_annotations ( +create table lapi_user_annotations ( internal_id integer not null references lapi_users (internal_id) on delete cascade, name varchar collate "C" not null, val varchar collate "C", @@ -494,7 +713,7 @@ CREATE TABLE lapi_user_annotations ( -- -- A table for tracking party allocation submissions --------------------------------------------------------------------------------------------------- -CREATE TABLE lapi_party_entries ( +create table lapi_party_entries ( -- The ledger end at the time when the party allocation was added ledger_offset bigint not null, recorded_at bigint not null, --with timezone @@ -526,90 +745,104 @@ CREATE TABLE lapi_party_entries ( ); -- Index for retrieving the party allocation entry by submission id per participant -CREATE INDEX lapi_party_entries_idx ON lapi_party_entries USING btree (submission_id); +create index lapi_party_entries_idx on lapi_party_entries using btree (submission_id); -CREATE INDEX lapi_party_entries_party_and_ledger_offset_idx ON lapi_party_entries USING btree (party, ledger_offset); +create index lapi_party_entries_party_and_ledger_offset_idx on lapi_party_entries using btree (party, ledger_offset); -CREATE INDEX lapi_party_entries_party_id_and_ledger_offset_idx ON lapi_party_entries USING btree (party_id, ledger_offset); +create index lapi_party_entries_party_id_and_ledger_offset_idx on lapi_party_entries using btree (party_id, ledger_offset); -CREATE TABLE lapi_pe_assign_id_filter_stakeholder ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_assign_id_filter_stakeholder ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_pts_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ps_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ts_idx ON lapi_pe_assign_id_filter_stakeholder(template_id, event_sequential_id); -CREATE INDEX lapi_pe_assign_id_filter_stakeholder_s_idx ON lapi_pe_assign_id_filter_stakeholder(event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_pts_idx on lapi_pe_assign_id_filter_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_ps_idx on lapi_pe_assign_id_filter_stakeholder using btree (party_id, event_sequential_id); +create index lapi_pe_assign_id_filter_stakeholder_ts_idx on lapi_pe_assign_id_filter_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_assign_id_filter_stakeholder_s_idx on lapi_pe_assign_id_filter_stakeholder using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_consuming_id_filter_non_stakeholder_informee ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_consuming_id_filter_non_stakeholder_informee ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_pts_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_ps_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_ts_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_non_stakeholder_informee_s_idx ON lapi_pe_consuming_id_filter_non_stakeholder_informee USING btree (event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_pts_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_ps_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee using btree (party_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_ts_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_consuming_id_filter_non_stakeholder_informee_s_idx on lapi_pe_consuming_id_filter_non_stakeholder_informee using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_consuming_id_filter_stakeholder ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_consuming_id_filter_stakeholder ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ps_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_pts_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ts_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_s_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_ps_idx on lapi_pe_consuming_id_filter_stakeholder using btree (party_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_pts_idx on lapi_pe_consuming_id_filter_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_consuming_id_filter_stakeholder_ts_idx on lapi_pe_consuming_id_filter_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_consuming_id_filter_stakeholder_s_idx on lapi_pe_consuming_id_filter_stakeholder using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_create_id_filter_non_stakeholder_informee ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_create_id_filter_non_stakeholder_informee ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_pts_idx ON lapi_pe_create_id_filter_non_stakeholder_informee USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_ps_idx ON lapi_pe_create_id_filter_non_stakeholder_informee USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_ts_idx ON lapi_pe_create_id_filter_non_stakeholder_informee USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_non_stakeholder_informee_s_idx ON lapi_pe_create_id_filter_non_stakeholder_informee USING btree (event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_pts_idx on lapi_pe_create_id_filter_non_stakeholder_informee using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_ps_idx on lapi_pe_create_id_filter_non_stakeholder_informee using btree (party_id, event_sequential_id); +create index lapi_pe_create_id_filter_non_stakeholder_informee_ts_idx on lapi_pe_create_id_filter_non_stakeholder_informee using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_create_id_filter_non_stakeholder_informee_s_idx on lapi_pe_create_id_filter_non_stakeholder_informee using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_create_id_filter_stakeholder ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_create_id_filter_stakeholder ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_ps_idx ON lapi_pe_create_id_filter_stakeholder USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_pts_idx ON lapi_pe_create_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_ts_idx ON lapi_pe_create_id_filter_stakeholder USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_create_id_filter_stakeholder_s_idx ON lapi_pe_create_id_filter_stakeholder USING btree (event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_ps_idx on lapi_pe_create_id_filter_stakeholder using btree (party_id, event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_pts_idx on lapi_pe_create_id_filter_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_create_id_filter_stakeholder_ts_idx on lapi_pe_create_id_filter_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_create_id_filter_stakeholder_s_idx on lapi_pe_create_id_filter_stakeholder using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_non_consuming_id_filter_informee ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_non_consuming_id_filter_informee ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_pts_idx ON lapi_pe_non_consuming_id_filter_informee USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_ps_idx ON lapi_pe_non_consuming_id_filter_informee USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_ts_idx ON lapi_pe_non_consuming_id_filter_informee USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_non_consuming_id_filter_informee_s_idx ON lapi_pe_non_consuming_id_filter_informee USING btree (event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_pts_idx on lapi_pe_non_consuming_id_filter_informee using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_ps_idx on lapi_pe_non_consuming_id_filter_informee using btree (party_id, event_sequential_id); +create index lapi_pe_non_consuming_id_filter_informee_ts_idx on lapi_pe_non_consuming_id_filter_informee using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_non_consuming_id_filter_informee_s_idx on lapi_pe_non_consuming_id_filter_informee using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_pe_reassignment_id_filter_stakeholder ( +-- deprecated, to be removed. See #28008 +create table lapi_pe_reassignment_id_filter_stakeholder ( event_sequential_id bigint not null, template_id integer not null, - party_id integer not null + party_id integer not null, + first_per_sequential_id boolean ); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_ps_idx ON lapi_pe_reassignment_id_filter_stakeholder USING btree (party_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_pts_idx ON lapi_pe_reassignment_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_ts_idx ON lapi_pe_reassignment_id_filter_stakeholder USING btree (template_id, event_sequential_id); -CREATE INDEX lapi_pe_reassignment_id_filter_stakeholder_s_idx ON lapi_pe_reassignment_id_filter_stakeholder USING btree (event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_ps_idx on lapi_pe_reassignment_id_filter_stakeholder using btree (party_id, event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_pts_idx on lapi_pe_reassignment_id_filter_stakeholder using btree (party_id, template_id, event_sequential_id); +create index lapi_pe_reassignment_id_filter_stakeholder_ts_idx on lapi_pe_reassignment_id_filter_stakeholder using btree (template_id, event_sequential_id) where first_per_sequential_id; +create index lapi_pe_reassignment_id_filter_stakeholder_s_idx on lapi_pe_reassignment_id_filter_stakeholder using btree (event_sequential_id, first_per_sequential_id); -CREATE TABLE lapi_string_interning ( +create table lapi_string_interning ( internal_id integer primary key not null, external_string varchar collate "C" ); diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sha256 index 801366ff9c..354277ef75 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sha256 +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sha256 @@ -1 +1 @@ -01ffddafc7becab230f3c9ee29ef30adb39ad04d2a8d19622d7531569e81dac6 +a8ee6b0d735fc518a7744168da686cf18a069e8ad87e79104fb48c2be533c6b8 diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sql index b00082d6a1..2e049a3f90 100644 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sql +++ b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_1__lapi_3.0_views.sql @@ -65,6 +65,27 @@ $$ immutable returns null on null input; +-- convert the byte event_type representation to textual +create or replace function debug.lapi_event_type(smallint) returns varchar as +$$ +select + case + when $1 = 1 then 'Activate-Create' + when $1 = 2 then 'Activate-Assign' + when $1 = 3 then 'Deactivate-Consuming-Exercise' + when $1 = 4 then 'Deactivate-Unassign' + when $1 = 5 then 'Witnessed-Non-Consuming-Exercise' + when $1 = 6 then 'Witnessed-Create' + when $1 = 7 then 'Witnessed-Consuming-Exercise' + when $1 = 8 then 'Topology-PartyToParticipant' + when $1 is null then 'None' + else $1::text + end; +$$ + language sql + immutable + called on null input; + -- resolve a ledger api interned string create or replace function debug.resolve_lapi_interned_string(integer) returns varchar as $$ @@ -75,9 +96,15 @@ $$ returns null on null input; -- resolve multiple ledger api interned strings -create or replace function debug.resolve_lapi_interned_strings(integer[]) returns varchar[] as +create or replace function debug.resolve_lapi_interned_strings(input bytea) returns varchar[] as $$ -select array_agg(debug.resolve_lapi_interned_string(s)) from unnest($1) as s; +select array_agg(debug.resolve_lapi_interned_string( + get_byte(input, i)::int << 24 | + get_byte(input, i + 1)::int << 16 | + get_byte(input, i + 2)::int << 8 | + get_byte(input, i + 3)::int + )) +from generate_series(1, length(input) - 1, 4) as s(i); $$ language sql stable @@ -116,7 +143,7 @@ create or replace view debug.lapi_command_completions as user_id, debug.resolve_lapi_interned_strings(submitters) as submitters, command_id, - update_id, + lower(encode(update_id, 'hex')) as update_id, submission_id, deduplication_offset, deduplication_duration_seconds, @@ -130,11 +157,157 @@ create or replace view debug.lapi_command_completions as lower(encode(trace_context, 'hex')) as trace_context from lapi_command_completions; +create or replace view debug.lapi_events_activate_contract as + select + -- update related columns + event_offset, + lower(encode(update_id, 'hex')) as update_id, + workflow_id, + command_id, + debug.resolve_lapi_interned_strings(submitters) as submitters, + debug.canton_timestamp(record_time) as record_time, + debug.resolve_lapi_interned_string(synchronizer_id) as synchronizer_id, + lower(encode(trace_context, 'hex')) as trace_context, + lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash, + + -- event related columns + debug.lapi_event_type(event_type) as event_type, + event_sequential_id, + node_id, + debug.resolve_lapi_interned_strings(additional_witnesses) as additional_witnesses, + debug.resolve_lapi_interned_string(source_synchronizer_id) as source_synchronizer_id, + reassignment_counter, + lower(encode(reassignment_id, 'hex')) as reassignment_id, + debug.resolve_lapi_interned_string(representative_package_id) as representative_package_id, + + -- contract related columns + internal_contract_id, + create_key_hash + from lapi_events_activate_contract; + +create or replace view debug.lapi_filter_activate_stakeholder as + select + event_sequential_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id + from lapi_filter_activate_stakeholder; + +create or replace view debug.lapi_filter_activate_witness as + select + event_sequential_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id + from lapi_filter_activate_witness; + +create or replace view debug.lapi_events_deactivate_contract as + select + -- update related columns + event_offset, + lower(encode(update_id, 'hex')) as update_id, + workflow_id, + command_id, + debug.resolve_lapi_interned_strings(submitters) as submitters, + debug.canton_timestamp(record_time) as record_time, + debug.resolve_lapi_interned_string(synchronizer_id) as synchronizer_id, + lower(encode(trace_context, 'hex')) as trace_context, + lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash, + + -- event related columns + debug.lapi_event_type(event_type) as event_type, + event_sequential_id, + node_id, + deactivated_event_sequential_id, + debug.resolve_lapi_interned_strings(additional_witnesses) as additional_witnesses, + debug.resolve_lapi_interned_string(exercise_choice) as exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice_interface) as exercise_choice_interface, + lower(encode(exercise_argument, 'hex')) as exercise_argument, + lower(encode(exercise_result, 'hex')) as exercise_result, + debug.resolve_lapi_interned_strings(exercise_actors) as exercise_actors, + exercise_last_descendant_node_id, + debug.lapi_compression(exercise_argument_compression) as exercise_argument_compression, + debug.lapi_compression(exercise_result_compression) as exercise_result_compression, + lower(encode(reassignment_id, 'hex')) as reassignment_id, + assignment_exclusivity, + debug.resolve_lapi_interned_string(target_synchronizer_id) as target_synchronizer_id, + reassignment_counter, + + -- contract related columns + lower(encode(contract_id, 'hex')) as contract_id, + internal_contract_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(package_id) as package_id, + debug.resolve_lapi_interned_strings(stakeholders) as stakeholders, + debug.canton_timestamp(ledger_effective_time) as ledger_effective_time + from lapi_events_deactivate_contract; + +create or replace view debug.lapi_filter_deactivate_stakeholder as + select + event_sequential_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id + from lapi_filter_deactivate_stakeholder; + +create or replace view debug.lapi_filter_deactivate_witness as + select + event_sequential_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id + from lapi_filter_deactivate_witness; + +create or replace view debug.lapi_events_various_witnessed as + select + -- update related columns + event_offset, + lower(encode(update_id, 'hex')) as update_id, + workflow_id, + command_id, + debug.resolve_lapi_interned_strings(submitters) as submitters, + debug.canton_timestamp(record_time) as record_time, + debug.resolve_lapi_interned_string(synchronizer_id) as synchronizer_id, + lower(encode(trace_context, 'hex')) as trace_context, + lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash, + + -- event related columns + debug.lapi_event_type(event_type) as event_type, + event_sequential_id, + node_id, + debug.resolve_lapi_interned_strings(additional_witnesses) as additional_witnesses, + consuming, + debug.resolve_lapi_interned_string(exercise_choice) as exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice_interface) as exercise_choice_interface, + lower(encode(exercise_argument, 'hex')) as exercise_argument, + lower(encode(exercise_result, 'hex')) as exercise_result, + debug.resolve_lapi_interned_strings(exercise_actors) as exercise_actors, + exercise_last_descendant_node_id, + debug.lapi_compression(exercise_argument_compression) as exercise_argument_compression, + debug.lapi_compression(exercise_result_compression) as exercise_result_compression, + debug.resolve_lapi_interned_string(representative_package_id) as representative_package_id, + + -- contract related columns + lower(encode(contract_id, 'hex')) as contract_id, + internal_contract_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(package_id) as package_id, + debug.canton_timestamp(ledger_effective_time) as ledger_effective_time + from lapi_events_various_witnessed; + +create or replace view debug.lapi_filter_various_witness as + select + event_sequential_id, + debug.resolve_lapi_interned_string(template_id) as template_id, + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id + from lapi_filter_various_witness; + create or replace view debug.lapi_events_assign as select event_sequential_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, workflow_id, command_id, debug.resolve_lapi_interned_string(submitter) as submitter, @@ -145,7 +318,7 @@ create or replace view debug.lapi_events_assign as debug.resolve_lapi_interned_strings(flat_event_witnesses) as flat_event_witnesses, debug.resolve_lapi_interned_string(source_synchronizer_id) as source_synchronizer_id, debug.resolve_lapi_interned_string(target_synchronizer_id) as target_synchronizer_id, - reassignment_id, + lower(encode(reassignment_id, 'hex')) as reassignment_id, reassignment_counter, lower(encode(create_argument, 'hex')) as create_argument, debug.resolve_lapi_interned_strings(create_signatories) as create_signatories, @@ -158,7 +331,8 @@ create or replace view debug.lapi_events_assign as lower(encode(authentication_data, 'hex')) as authentication_data, debug.resolve_lapi_interned_strings(create_key_maintainers) as create_key_maintainers, lower(encode(trace_context, 'hex')) as trace_context, - debug.canton_timestamp(record_time) as record_time + debug.canton_timestamp(record_time) as record_time, + internal_contract_id from lapi_events_assign; @@ -168,7 +342,7 @@ create or replace view debug.lapi_events_consuming_exercise as debug.canton_timestamp(ledger_effective_time) as ledger_effective_time, node_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, workflow_id, command_id, debug.resolve_lapi_interned_strings(submitters) as submitters, @@ -177,7 +351,8 @@ create or replace view debug.lapi_events_consuming_exercise as debug.resolve_lapi_interned_string(package_id) as package_id, debug.resolve_lapi_interned_strings(flat_event_witnesses) as flat_event_witnesses, debug.resolve_lapi_interned_strings(tree_event_witnesses) as tree_event_witnesses, - exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice) as exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice_interface) as exercise_choice_interface, lower(encode(exercise_argument, 'hex')) as exercise_argument, lower(encode(exercise_result, 'hex')) as exercise_result, debug.resolve_lapi_interned_strings(exercise_actors) as exercise_actors, @@ -187,7 +362,8 @@ create or replace view debug.lapi_events_consuming_exercise as debug.resolve_lapi_interned_string(synchronizer_id) as synchronizer_id, lower(encode(trace_context, 'hex')) as trace_context, debug.canton_timestamp(record_time) as record_time, - lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash + lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash, + deactivated_event_sequential_id from lapi_events_consuming_exercise; create or replace view debug.lapi_events_create as @@ -196,13 +372,14 @@ create or replace view debug.lapi_events_create as debug.canton_timestamp(ledger_effective_time) as ledger_effective_time, node_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, workflow_id, command_id, debug.resolve_lapi_interned_strings(submitters) as submitters, lower(encode(contract_id, 'hex')) as contract_id, debug.resolve_lapi_interned_string(template_id) as template_id, debug.resolve_lapi_interned_string(package_id) as package_id, + debug.resolve_lapi_interned_string(representative_package_id) as representative_package_id, debug.resolve_lapi_interned_strings(flat_event_witnesses) as flat_event_witnesses, debug.resolve_lapi_interned_strings(tree_event_witnesses) as tree_event_witnesses, lower(encode(create_argument, 'hex')) as create_argument, @@ -217,7 +394,8 @@ create or replace view debug.lapi_events_create as debug.resolve_lapi_interned_strings(create_key_maintainers) as create_key_maintainers, lower(encode(trace_context, 'hex')) as trace_context, debug.canton_timestamp(record_time) as record_time, - lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash + lower(encode(external_transaction_hash, 'hex')) as external_transaction_hash, + internal_contract_id from lapi_events_create; create or replace view debug.lapi_events_non_consuming_exercise as @@ -226,7 +404,7 @@ create or replace view debug.lapi_events_non_consuming_exercise as debug.canton_timestamp(ledger_effective_time) as ledger_effective_time, node_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, workflow_id, command_id, debug.resolve_lapi_interned_strings(submitters) as submitters, @@ -234,7 +412,8 @@ create or replace view debug.lapi_events_non_consuming_exercise as debug.resolve_lapi_interned_string(template_id) as template_id, debug.resolve_lapi_interned_string(package_id) as package_id, debug.resolve_lapi_interned_strings(tree_event_witnesses) as tree_event_witnesses, - exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice) as exercise_choice, + debug.resolve_lapi_interned_string(exercise_choice_interface) as exercise_choice_interface, lower(encode(exercise_argument, 'hex')) as exercise_argument, lower(encode(exercise_result, 'hex')) as exercise_result, debug.resolve_lapi_interned_strings(exercise_actors) as exercise_actors, @@ -252,7 +431,7 @@ create or replace view debug.lapi_events_unassign as select event_sequential_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, workflow_id, command_id, debug.resolve_lapi_interned_string(submitter) as submitter, @@ -263,18 +442,19 @@ create or replace view debug.lapi_events_unassign as debug.resolve_lapi_interned_strings(flat_event_witnesses) as flat_event_witnesses, debug.resolve_lapi_interned_string(source_synchronizer_id) as source_synchronizer_id, debug.resolve_lapi_interned_string(target_synchronizer_id) as target_synchronizer_id, - reassignment_id, + lower(encode(reassignment_id, 'hex')) as reassignment_id, reassignment_counter, assignment_exclusivity, lower(encode(trace_context, 'hex')) as trace_context, - debug.canton_timestamp(record_time) as record_time + debug.canton_timestamp(record_time) as record_time, + deactivated_event_sequential_id from lapi_events_unassign; create or replace view debug.lapi_events_party_to_participant as select event_sequential_id, event_offset, - update_id, + lower(encode(update_id, 'hex')) as update_id, debug.resolve_lapi_interned_string(party_id) as party_id, participant_id, participant_permission, @@ -312,7 +492,7 @@ create or replace view debug.lapi_party_record_annotations as create or replace view debug.lapi_update_meta as select - update_id, + lower(encode(update_id, 'hex')) as update_id, event_offset, debug.canton_timestamp(publication_time) as publication_time, debug.canton_timestamp(record_time) as record_time, @@ -364,49 +544,56 @@ create or replace view debug.lapi_pe_assign_id_filter_stakeholder as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_assign_id_filter_stakeholder; create or replace view debug.lapi_pe_consuming_id_filter_non_stakeholder_informee as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_consuming_id_filter_non_stakeholder_informee; create or replace view debug.lapi_pe_consuming_id_filter_stakeholder as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_consuming_id_filter_stakeholder; create or replace view debug.lapi_pe_create_id_filter_non_stakeholder_informee as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_create_id_filter_non_stakeholder_informee; create or replace view debug.lapi_pe_create_id_filter_stakeholder as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_create_id_filter_stakeholder; create or replace view debug.lapi_pe_non_consuming_id_filter_informee as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_non_consuming_id_filter_informee; create or replace view debug.lapi_pe_reassignment_id_filter_stakeholder as select event_sequential_id, debug.resolve_lapi_interned_string(template_id) as template_id, - debug.resolve_lapi_interned_string(party_id) as party_id + debug.resolve_lapi_interned_string(party_id) as party_id, + first_per_sequential_id from lapi_pe_reassignment_id_filter_stakeholder; create or replace view debug.lapi_string_interning as diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sha256 b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sha256 deleted file mode 100644 index 1ca226852c..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sha256 +++ /dev/null @@ -1 +0,0 @@ -d31ba5a482b2b6bac90ffce595fdd68c7174fcfbd0c4814b0ad5324135dca676 diff --git a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sql b/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sql deleted file mode 100644 index 2e8d54e171..0000000000 --- a/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V7_0__mediator_topology_initialization_fix.sql +++ /dev/null @@ -1,21 +0,0 @@ --- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. --- SPDX-License-Identifier: Apache-2.0 - --- add the column with the default value true, so that --- any existing connection is considered initialized -alter table mediator_synchronizer_configuration - add column is_topology_initialized bool not null default true; - --- now set the default back to false -alter table mediator_synchronizer_configuration - alter column is_topology_initialized set default false; - - -create or replace view debug.mediator_synchronizer_configuration as -select - lock, - synchronizer_id, - static_synchronizer_parameters, - sequencer_connection, - is_topology_initialized -from mediator_synchronizer_configuration; diff --git a/canton/community/common/src/main/scala/com/daml/lf/CantonOnly.scala b/canton/community/common/src/main/scala/com/daml/lf/CantonOnly.scala index 37fb04d4e1..700afacbca 100644 --- a/canton/community/common/src/main/scala/com/daml/lf/CantonOnly.scala +++ b/canton/community/common/src/main/scala/com/daml/lf/CantonOnly.scala @@ -6,8 +6,8 @@ package com.digitalasset.daml.lf import com.digitalasset.canton.protocol.{ LfNode, LfNodeId, + LfSerializationVersion, LfTransaction, - LfTransactionVersion, LfVersionedTransaction, } import com.digitalasset.daml.lf.data.ImmArray @@ -25,7 +25,7 @@ object CantonOnly { nodes: Map[LfNodeId, LfNode], roots: ImmArray[LfNodeId], ): LfVersionedTransaction = - LfTransactionVersion.asVersionedTransaction(LfTransaction(nodes, roots)) + LfSerializationVersion.asVersionedTransaction(LfTransaction(nodes, roots)) def tryBuildCompiledPackages( darMap: Map[PackageId, Ast.Package], diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoader.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoader.scala index 612769f7c9..69786b85d3 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoader.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoader.scala @@ -27,6 +27,7 @@ import com.digitalasset.canton.sequencing.protocol.{HandshakeRequest, HandshakeR import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnection, + SequencerConnectionPoolDelays, SequencerConnectionValidation, SequencerConnections, SubmissionRequestAmplification, @@ -221,6 +222,7 @@ class SequencerInfoLoader( sequencerTrustThreshold = sequencerConnections.sequencerTrustThreshold, sequencerLivenessMargin = sequencerConnections.sequencerLivenessMargin, submissionRequestAmplification = sequencerConnections.submissionRequestAmplification, + sequencerConnectionPoolDelays = sequencerConnections.sequencerConnectionPoolDelays, sequencerConnectionValidation = sequencerConnectionValidation, expectedSynchronizerId = expectedSynchronizerId, ) @@ -633,6 +635,7 @@ object SequencerInfoLoader { sequencerTrustThreshold: PositiveInt, sequencerLivenessMargin: NonNegativeInt, submissionRequestAmplification: SubmissionRequestAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays, sequencerConnectionValidation: SequencerConnectionValidation, expectedSynchronizerId: Option[PhysicalSynchronizerId], )( @@ -674,6 +677,7 @@ object SequencerInfoLoader { sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ) .leftMap(SequencerInfoLoaderError.FailedToConnectToSequencers.apply) .map(connections => diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/config/ConfigDefaults.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/config/ConfigDefaults.scala index f7f1dd50a8..3969e47213 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/config/ConfigDefaults.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/config/ConfigDefaults.scala @@ -40,6 +40,7 @@ class DefaultPorts { /** Participant node default ports */ val ledgerApiPort = defaultPortStart(4001) val participantAdminApiPort = defaultPortStart(4002) + val jsonLedgerApiPort = defaultPortStart(4003) // user-manual-entry-end: participant default ports /** External sequencer node x default ports (enterprise-only) */ diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/config/NodeMonitoringConfig.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/config/NodeMonitoringConfig.scala index 04184de6b0..dfbe96698f 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/config/NodeMonitoringConfig.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/config/NodeMonitoringConfig.scala @@ -31,6 +31,7 @@ final case class GrpcHealthServerConfig( override def maxInboundMessageSize: NonNegativeInt = ServerConfig.defaultMaxInboundMessageSize override val maxTokenLifetime: NonNegativeDuration = NonNegativeDuration(Duration.Inf) override val jwksCacheConfig: JwksCacheConfig = JwksCacheConfig() + override def stream: Option[StreamLimitConfig] = None def toRemoteConfig: FullClientConfig = FullClientConfig(address, port, keepAliveClient = keepAliveServer.map(_.clientConfigFor)) } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/config/TopologyConfig.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/config/TopologyConfig.scala index 33bfb1eaa4..ca2775f73c 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/config/TopologyConfig.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/config/TopologyConfig.scala @@ -19,6 +19,12 @@ import com.digitalasset.canton.config.manual.CantonConfigValidatorDerivation * The maximum number of topology transactions sent in a topology transaction broadcast * @param broadcastRetryDelay * The delay after which a failed dispatch cycle will be triggered again. + * @param validateInitialTopologySnapshot + * Whether or not the node will validate the initial topology snapshot when onboarding to a + * synchronizer. + * @param disableOptionalTopologyChecks + * if true (default is false), don't run the optional checks which prevent accidental damage to + * this node */ final case class TopologyConfig( topologyTransactionRegistrationTimeout: NonNegativeFiniteDuration = @@ -27,6 +33,8 @@ final case class TopologyConfig( defaultTopologyTransactionObservationTimeout, broadcastBatchSize: PositiveInt = defaultBroadcastBatchSize, broadcastRetryDelay: NonNegativeFiniteDuration = defaultBroadcastRetryDelay, + validateInitialTopologySnapshot: Boolean = true, + disableOptionalTopologyChecks: Boolean = false, ) extends UniformCantonConfigValidation object TopologyConfig { diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala index 32545794f3..7b6044e363 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala @@ -3,35 +3,100 @@ package com.digitalasset.canton.crypto -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss +import com.daml.ledger.api.v2 +import com.daml.ledger.api.v2.crypto as lapiCrypto import io.scalaland.chimney.Transformer /** Utility methods to convert between Canton crypto classes and their equivalent on the ledger API. */ object LedgerApiCryptoConversions { implicit val cantonToLAPISignatureFormatTransformer - : Transformer[v30.SignatureFormat, iss.SignatureFormat] = { + : Transformer[v30.SignatureFormat, lapiCrypto.SignatureFormat] = { case v30.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => - iss.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED - case v30.SignatureFormat.SIGNATURE_FORMAT_DER => iss.SignatureFormat.SIGNATURE_FORMAT_DER - case v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT => iss.SignatureFormat.SIGNATURE_FORMAT_CONCAT - case v30.SignatureFormat.SIGNATURE_FORMAT_RAW => iss.SignatureFormat.SIGNATURE_FORMAT_RAW + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED + case v30.SignatureFormat.SIGNATURE_FORMAT_DER => lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_DER + case v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT => + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_CONCAT + case v30.SignatureFormat.SIGNATURE_FORMAT_RAW => lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_RAW case v30.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => - iss.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC case v30.SignatureFormat.Unrecognized(unrecognizedValue) => - iss.SignatureFormat.Unrecognized(unrecognizedValue) + lapiCrypto.SignatureFormat.Unrecognized(unrecognizedValue) } implicit val LAPIToCantonSignatureFormatTransformer - : Transformer[iss.SignatureFormat, v30.SignatureFormat] = { - case iss.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => + : Transformer[lapiCrypto.SignatureFormat, v30.SignatureFormat] = { + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => v30.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED - case iss.SignatureFormat.SIGNATURE_FORMAT_DER => v30.SignatureFormat.SIGNATURE_FORMAT_DER - case iss.SignatureFormat.SIGNATURE_FORMAT_CONCAT => v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT - case iss.SignatureFormat.SIGNATURE_FORMAT_RAW => v30.SignatureFormat.SIGNATURE_FORMAT_RAW - case iss.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_DER => v30.SignatureFormat.SIGNATURE_FORMAT_DER + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_CONCAT => + v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_RAW => v30.SignatureFormat.SIGNATURE_FORMAT_RAW + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => v30.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC - case iss.SignatureFormat.Unrecognized(unrecognizedValue) => + case lapiCrypto.SignatureFormat.Unrecognized(unrecognizedValue) => v30.SignatureFormat.Unrecognized(unrecognizedValue) } + + implicit val LAPIToCantonSigningKeySpec + : Transformer[v2.crypto.SigningKeySpec, v30.SigningKeySpec] = { + case v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_UNSPECIFIED => + v30.SigningKeySpec.SIGNING_KEY_SPEC_UNSPECIFIED + case v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519 => + v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519 + case v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_P256 => + v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_P256 + case v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_P384 => + v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_P384 + case v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_SECP256K1 => + v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_SECP256K1 + case v2.crypto.SigningKeySpec.Unrecognized(x) => v30.SigningKeySpec.Unrecognized(x) + } + + implicit val CantonToLAPISigningKeySpec + : Transformer[v30.SigningKeySpec, v2.crypto.SigningKeySpec] = { + case v30.SigningKeySpec.SIGNING_KEY_SPEC_UNSPECIFIED => + v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_UNSPECIFIED + case v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519 => + v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519 + case v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_P256 => + v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_P256 + case v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_P384 => + v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_P384 + case v30.SigningKeySpec.SIGNING_KEY_SPEC_EC_SECP256K1 => + v2.crypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_SECP256K1 + case v30.SigningKeySpec.Unrecognized(x) => v2.crypto.SigningKeySpec.Unrecognized(x) + } + + implicit val LAPIToCantonCryptoFormat + : Transformer[v2.crypto.CryptoKeyFormat, v30.CryptoKeyFormat] = { + case v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_UNSPECIFIED => + v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_UNSPECIFIED + case v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER => + v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER + case v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_RAW => + v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_RAW + case v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO => + v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO + case v2.crypto.CryptoKeyFormat.Unrecognized(x) => v30.CryptoKeyFormat.Unrecognized(x) + } + + implicit val CantonToLAPICryptFormat + : Transformer[v30.CryptoKeyFormat, v2.crypto.CryptoKeyFormat] = { + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_UNSPECIFIED => + v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_UNSPECIFIED + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER => + v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_RAW => + v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_RAW + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO => + v2.crypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO + // not supported on the API + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_PKCS8_PRIVATE_KEY_INFO => + v2.crypto.CryptoKeyFormat.Unrecognized(-1) + case v30.CryptoKeyFormat.CRYPTO_KEY_FORMAT_SYMBOLIC => + v2.crypto.CryptoKeyFormat.Unrecognized(-1) + case v30.CryptoKeyFormat.Unrecognized(x) => v2.crypto.CryptoKeyFormat.Unrecognized(x) + } + } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala index a7cb917f0d..51b3ada0da 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala @@ -101,7 +101,7 @@ abstract class BootstrapStage[T <: CantonNode, StageResult <: BootstrapStageOrLe (for { result <- attempt() .leftMap { err => - logger.error(s"Startup of $description failed with $err") + logger.error(s"Attempting to run $description failed with $err") bootstrap.abortThisNodeOnStartupFailure() err } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala index 4d5d4290d9..01b5881cd2 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala @@ -37,7 +37,6 @@ import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc import com.digitalasset.canton.crypto.* import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultService import com.digitalasset.canton.crypto.admin.v30.VaultServiceGrpc -import com.digitalasset.canton.crypto.kms.KmsFactory import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPrivateStoreFactory} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps @@ -196,7 +195,6 @@ final case class CantonNodeBootstrapCommonArguments[ metrics: M, storageFactory: StorageFactory, cryptoPrivateStoreFactory: CryptoPrivateStoreFactory, - kmsFactory: KmsFactory, futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, writeHealthDumpToFile: HealthDumpFunction, @@ -472,7 +470,6 @@ abstract class CantonNodeBootstrapImpl[ arguments.parameterConfig.cachingConfigs.publicKeyConversionCache, storage, arguments.cryptoPrivateStoreFactory, - arguments.kmsFactory, ReleaseProtocolVersion.latest, arguments.futureSupervisor, arguments.clock, @@ -725,7 +722,7 @@ abstract class CantonNodeBootstrapImpl[ val snapshotValidator = new InitialTopologySnapshotValidator( crypto.pureCrypto, temporaryTopologyStore, - this.timeouts, + validateInitialSnapshot = config.topology.validateInitialTopologySnapshot, this.loggerFactory, ) @@ -930,48 +927,6 @@ abstract class CantonNodeBootstrapImpl[ private val topologyManager: AuthorizedTopologyManager = createAuthorizedTopologyManager(nodeId, crypto, authorizedStore, storage) addCloseable(topologyManager) - adminServerRegistry - .addServiceU( - adminV30.TopologyManagerReadServiceGrpc - .bindService( - new GrpcTopologyManagerReadService( - member(nodeId), - temporaryStoreRegistry.stores() ++ sequencedTopologyStores :+ authorizedStore, - crypto, - lookupTopologyClient, - lookupActivePSId, - processingTimeout = parameters.processingTimeouts, - bootstrapStageCallback.loggerFactory, - ), - executionContext, - ) - ) - adminServerRegistry - .addServiceU( - adminV30.TopologyManagerWriteServiceGrpc - .bindService( - new GrpcTopologyManagerWriteService( - temporaryStoreRegistry.managers() ++ sequencedTopologyManagers :+ topologyManager, - lookupActivePSId, - temporaryStoreRegistry, - bootstrapStageCallback.loggerFactory, - ), - executionContext, - ) - ) - adminServerRegistry - .addServiceU( - adminV30.TopologyAggregationServiceGrpc.bindService( - new GrpcTopologyAggregationService( - sequencedTopologyStores.mapFilter( - TopologyStoreId.select[TopologyStoreId.SynchronizerStore] - ), - ips, - bootstrapStageCallback.loggerFactory, - ), - executionContext, - ) - ) private val topologyManagerObserver = new TopologyManagerObserver { override def addedNewTransactions( @@ -999,17 +954,62 @@ abstract class CantonNodeBootstrapImpl[ ): EitherT[FutureUnlessShutdown, String, Unit] = { // Register the observer first so that it does not race with the removal when the stage has finished. topologyManager.addObserver(topologyManagerObserver) - // Add any topology transactions that were passed as part of the init process - // This is not crash safe if we crash between storing the node-id and adding the transactions, - // but a crash can recovered (use manual for anything). - topologyManager - .add( - transactions, - forceChanges = ForceFlags.none, - expectFullAuthorization = true, - ) - .leftMap(_.cause) - .flatMap(_ => super.start()) + for { + _ <- EitherT.right(topologyManager.initialize) + // add services after the topology manager is initialized + _ = { + adminServerRegistry + .addServiceU( + adminV30.TopologyManagerReadServiceGrpc + .bindService( + new GrpcTopologyManagerReadService( + member(nodeId), + temporaryStoreRegistry.stores() ++ sequencedTopologyStores :+ authorizedStore, + crypto, + lookupTopologyClient, + lookupActivePSId, + processingTimeout = parameters.processingTimeouts, + bootstrapStageCallback.loggerFactory, + ), + executionContext, + ) + ) + adminServerRegistry + .addServiceU( + adminV30.TopologyManagerWriteServiceGrpc + .bindService( + new GrpcTopologyManagerWriteService( + temporaryStoreRegistry + .managers() ++ sequencedTopologyManagers :+ topologyManager, + lookupActivePSId, + temporaryStoreRegistry, + bootstrapStageCallback.loggerFactory, + ), + executionContext, + ) + ) + adminServerRegistry + .addServiceU( + adminV30.TopologyAggregationServiceGrpc.bindService( + new GrpcTopologyAggregationService( + sequencedTopologyStores.mapFilter( + TopologyStoreId.select[TopologyStoreId.SynchronizerStore] + ), + ips, + bootstrapStageCallback.loggerFactory, + ), + executionContext, + ) + ) + } + // Add any topology transactions that were passed as part of the init process + // This is not crash safe if we crash between storing the node-id and adding the transactions, + // but a crash can recovered (use manual for anything). + _ <- topologyManager + .add(transactions, forceChanges = ForceFlags.none, expectFullAuthorization = true) + .leftMap(_.cause) + _ <- super.start() + } yield () } override def waitingFor: Option[WaitingForExternalInput] = Some(WaitingForNodeTopology) diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/NodeFactoryArguments.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/NodeFactoryArguments.scala index 0ce4874f0e..e685094bd8 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/environment/NodeFactoryArguments.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/environment/NodeFactoryArguments.scala @@ -7,7 +7,6 @@ import cats.syntax.either.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.{LocalNodeConfig, TestingConfigInternal} -import com.digitalasset.canton.crypto.kms.KmsFactory import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.environment.CantonNodeBootstrap.HealthDumpFunction import com.digitalasset.canton.logging.NamedLoggerFactory @@ -40,7 +39,6 @@ final case class NodeFactoryArguments[ def toCantonNodeBootstrapCommonArguments( storageFactory: StorageFactory, cryptoPrivateStoreFactory: CryptoPrivateStoreFactory, - kmsFactory: KmsFactory, ): Either[String, CantonNodeBootstrapCommonArguments[NodeConfig, ParameterConfig, Metrics]] = InstanceName .create(name) @@ -54,7 +52,6 @@ final case class NodeFactoryArguments[ metrics, storageFactory, cryptoPrivateStoreFactory, - kmsFactory, futureSupervisor, loggerFactory, writeHealthDumpToFile, diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala index 18acd00b6c..fd5dc29cf0 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala @@ -3,8 +3,11 @@ package com.digitalasset.canton.interactive +import cats.data.EitherT +import cats.implicits.catsSyntaxEitherId import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher.PackageResolver import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.LfTemplateId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref.PackageId import com.digitalasset.daml.lf.engine.* @@ -42,19 +45,42 @@ class InteractiveSubmissionEnricher(engine: Engine, packageResolver: PackageReso /** Enrich FCI with type info and labels. Leave out trailing none fields. */ - def enrichContract(contract: FatContractInstance)(implicit + def enrichContract(contract: FatContractInstance, targetPackageIds: Set[PackageId])(implicit ec: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[FatContractInstance] = - consumeEnricherResult(enricher.enrichContract(contract)) + ): EitherT[FutureUnlessShutdown, String, FatContractInstance] = + EitherT(targetPackageIds.toList.minOption match { + case Some(pkgId) => + enrichCreateNode(contract.toCreateNode, pkgId).map { enriched => + FatContractInstance + .fromCreateNode( + enriched, + contract.createdAt, + contract.authenticationData, + ) + .asRight[String] + } + case None => + FutureUnlessShutdown.pure( + s"Cannot enrich contract ${contract.contractId} without knowing its package ID" + .asLeft[FatContractInstance] + ) + }) - /** Enrich create node with type info and labels. Leave out trailing none fields. - */ - def enrichCreateNode(create: Node.Create)(implicit + private def enrichCreateNode(original: Node.Create, targetPackageId: PackageId)(implicit ec: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[Node.Create] = - consumeEnricherResult(enricher.enrichCreate(create)) + ): FutureUnlessShutdown[Node.Create] = { + + def updateTemplateId(create: Node.Create, targetPackageId: PackageId): Node.Create = { + val templateId = LfTemplateId(targetPackageId, create.templateId.qualifiedName) + create.copy(templateId = templateId) + } + + consumeEnricherResult(enricher.enrichCreate(updateTemplateId(original, targetPackageId))).map( + enriched => updateTemplateId(enriched, original.templateId.packageId) + ) + } private[this] def consumeEnricherResult[V]( result: Result[V] diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractLookup.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/ContractLookup.scala similarity index 100% rename from canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractLookup.scala rename to canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/ContractLookup.scala diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala new file mode 100644 index 0000000000..3422b9db3c --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala @@ -0,0 +1,184 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store + +import cats.instances.list.* +import cats.syntax.foldable.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout} +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.participant.store.db.DbContractStore +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore +import com.digitalasset.canton.protocol.{ContractInstance, LfContractId} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.store.Purgeable +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} + +import scala.concurrent.ExecutionContext + +trait ContractStore extends ContractLookup with Purgeable with FlagCloseable { + + def lookupPersistedIfCached(id: LfContractId)(implicit + traceContext: TraceContext + ): Option[Option[PersistedContractInstance]] + + def lookupPersisted(id: LfContractId)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[PersistedContractInstance]] + + def lookupBatchedNonCached(internalContractIds: Iterable[Long])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[Long, PersistedContractInstance]] + + def lookupBatchedNonCachedInternalIds(contractIds: Iterable[LfContractId])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, Long]] + +// TODO(#27996): this query supposed to be used for LAPI streaming to leverage the contract cache. As getting internal contract ID is not possible ATM, this optimization will be implemented later +// def lookupPersistedIfCached(internalContractId: Long)(implicit +// traceContext: TraceContext +// ): Option[Option[PersistedContractInstance]] + + override type ContractsCreatedAtTime = CreationTime.CreatedAt + + /** Stores contracts created by a request. + * + * @param contracts + * The created contracts to be stored + */ + def storeContracts(contracts: Seq[ContractInstance])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + def storeContract(contract: ContractInstance)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = storeContracts(Seq(contract)) + + /** Debug find utility to search pcs + */ + def find( + exactId: Option[String], + filterPackage: Option[String], + filterTemplate: Option[String], + limit: Int, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] + + /** Debug find utility to search pcs. Omits contracts that are not found. + */ + def findWithPayload( + contractIds: NonEmpty[Seq[LfContractId]] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] + + /** Deletes multiple contracts from the contract store. + * + * Ignores errors due to a contract not being present in the store, fails on other errors. + */ + def deleteIgnoringUnknown(contractIds: Iterable[LfContractId])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] + + // TODO(i24535): implement this on db level + def hasActiveContracts( + partyId: PartyId, + contractIds: Iterator[LfContractId], + batchSize: Int = 10, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Boolean] = { + val lfParty = partyId.toLf + + contractIds + .grouped(batchSize) + .toList + .findM(cids => + lookupStakeholders(cids.toSet).value.map { + case Right(x) => + x.exists { case (_, listParties) => listParties.contains(lfParty) } + case Left(_) => false + } + ) + .map(_.nonEmpty) + } + + // TODO(i24535): implement this on db level + def isSignatoryOnActiveContracts( + partyId: PartyId, + contractIds: Iterator[LfContractId], + batchSize: Int = 10, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Boolean] = { + val lfParty = partyId.toLf + contractIds + .grouped(batchSize) + .toList + .findM(cids => + lookupSignatories(cids.toSet).value.map { + case Right(x) => + x.exists { case (_, listParties) => listParties.contains(lfParty) } + case Left(_) => false + } + ) + .map(_.nonEmpty) + } +} + +object ContractStore { + def create( + storage: Storage, + processingTimeouts: ProcessingTimeout, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): ContractStore = + storage match { + case _: MemoryStorage => + new InMemoryContractStore(processingTimeouts, loggerFactory) + + case dbStorage: DbStorage => + new DbContractStore( + dbStorage, + cacheConfig = cachingConfigs.contractStore, + dbQueryBatcherConfig = batchingConfig.contractStoreAggregator, + insertBatchAggregatorConfig = batchingConfig.aggregator, + processingTimeouts, + loggerFactory, + ) + } +} + +final case class PersistedContractInstance( + // internalContractId: Long, TODO(#27996): getting the internal contract ID with DbBulkUpdateProcessor is not possible without major rewrite there + inst: FatContractInstance { type CreatedAtTime <: CreationTime.CreatedAt } +) { + def asContractInstance: ContractInstance = ContractInstance.create(inst) match { + case Right(contract) => contract + case Left(e) => throw new DbDeserializationException(s"Invalid contract instance: $e") + } +} + +sealed trait ContractStoreError extends Product with Serializable with PrettyPrinting + +sealed trait ContractLookupError extends ContractStoreError + +final case class UnknownContract(contractId: LfContractId) extends ContractLookupError { + override protected def pretty: Pretty[UnknownContract] = prettyOfClass(unnamedParam(_.contractId)) +} +final case class UnknownContracts(contractIds: Set[LfContractId]) extends ContractLookupError { + override protected def pretty: Pretty[UnknownContracts] = prettyOfClass( + unnamedParam(_.contractIds) + ) +} +final case class FailedConvert(contractId: LfContractId) extends ContractLookupError { + override protected def pretty: Pretty[FailedConvert] = prettyOfClass(unnamedParam(_.contractId)) +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala new file mode 100644 index 0000000000..7102f8c377 --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala @@ -0,0 +1,491 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store.db + +import cats.data.{EitherT, OptionT} +import cats.implicits.{toBifunctorOps, toTraverseOps} +import cats.syntax.parallel.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.caching.ScaffeineCache +import com.digitalasset.canton.config.CantonRequireTypes.String2066 +import com.digitalasset.canton.config.{ + BatchAggregatorConfig, + BatchingConfig, + CacheConfig, + ProcessingTimeout, +} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} +import com.digitalasset.canton.participant.store.* +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain} +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.store.db.{DbBulkUpdateProcessor, DbDeserializationException} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.EitherUtil.RichEitherIterable +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil, MonadUtil, TryUtil} +import com.digitalasset.canton.{LfPartyId, checked} +import com.digitalasset.daml.lf.transaction.{CreationTime, TransactionCoder} +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +import scala.collection.immutable +import scala.concurrent.ExecutionContext +import scala.util.{Failure, Try} + +class DbContractStore( + override protected val storage: DbStorage, + cacheConfig: CacheConfig, + dbQueryBatcherConfig: BatchAggregatorConfig, + insertBatchAggregatorConfig: BatchAggregatorConfig, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(protected implicit val ec: ExecutionContext) + extends ContractStore + with DbStore { self => + + import DbStorage.Implicits.* + import storage.api.* + import storage.converters.* + + private val profile = storage.profile + + override protected[store] def logger: TracedLogger = super.logger + + // TODO(#27996): optimize: evict proto deserialization from the DB threads (suggested: using a proper pekko-stream with deser stage over the batches, or do deser on client thread -but then it might be redundant-) + implicit def contractGetResult(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[PersistedContractInstance] = idAndContractGetResult.andThen(_._2) + + implicit def idAndContractGetResult(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[(Long, PersistedContractInstance)] = GetResult { r => + val internalContractId = r.nextLong() + internalContractId -> PersistedContractInstance( + // internalContractId = internalContractId, TODO(#27996): not supported just yet + inst = TransactionCoder + .decodeFatContractInstance(ByteString.copyFrom(r.<<[Array[Byte]])) + .leftMap(e => s"Failed to decode contract instance: $e") + .flatMap { decoded => + decoded.traverseCreateAt { + case createdAt: CreationTime.CreatedAt => + Right(createdAt: CreationTime.CreatedAt) + case _ => + Left( + s"Creation time must be CreatedAt for contract instances with id ${decoded.contractId}" + ) + } + } + .fold( + error => throw new DbDeserializationException(s"Invalid contract instance: $error"), + identity, + ) + ) + } + + implicit def contractSetParameter: SetParameter[ContractInstance] = (c, pp) => pp >> c.encoded + + private val cache + : ScaffeineCache.TunnelledAsyncCache[LfContractId, Option[PersistedContractInstance]] = + ScaffeineCache.buildMappedAsync[LfContractId, Option[PersistedContractInstance]]( + cacheConfig.buildScaffeine() + )(logger, "DbContractStore.cache") + + private def invalidateCache(key: LfContractId): Unit = + cache.invalidate(key) + + // batch aggregator used for single point queries: damle will run many "lookups" + // during interpretation. they will hit the db like a nail gun. the batch + // aggregator will limit the number of parallel queries to the db and "batch them" + // together. so if there is high load with a lot of interpretation happening in parallel + // batching will kick in. + private val batchAggregatorLookup + : BatchAggregator[LfContractId, Option[PersistedContractInstance]] = { + val processor: BatchAggregator.Processor[LfContractId, Option[PersistedContractInstance]] = + new BatchAggregator.Processor[LfContractId, Option[PersistedContractInstance]] { + override val kind: String = "serializable contract" + override def logger: TracedLogger = DbContractStore.this.logger + + override def executeBatch(ids: NonEmpty[Seq[Traced[LfContractId]]])(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): FutureUnlessShutdown[Iterable[Option[PersistedContractInstance]]] = + storage.query(lookupQuery(ids.map(_.value)), functionFullName)( + traceContext, + callerCloseContext, + ) + + override def prettyItem: Pretty[LfContractId] = implicitly + } + BatchAggregator( + processor, + dbQueryBatcherConfig, + ) + } + + private val contractsBaseQuery = + sql"""select internal_contract_id, instance from par_contracts""" + + private def lookupQuery( + ids: NonEmpty[Seq[LfContractId]] + ): DbAction.ReadOnly[Seq[Option[PersistedContractInstance]]] = { + import DbStorage.Implicits.BuilderChain.* + + // TODO(#27996): optimize: pass-as-array the parameters instead of variable sized list of params + val inClause = DbStorage.toInClause("contract_id", ids) + (contractsBaseQuery ++ sql" where " ++ inClause) + .as[PersistedContractInstance] + .map { contracts => + val foundContracts = contracts + .map(contract => (contract.asContractInstance.contractId, contract)) + .toMap + ids.map(foundContracts.get) + } + } + + private def bulkLookupQuery( + ids: NonEmpty[Seq[LfContractId]] + ): DbAction.ReadOnly[immutable.Iterable[PersistedContractInstance]] = + // TODO(#27996): optimize: pass-as-array the parameters instead of variable sized list of params + lookupQuery(ids).map(_.flatten) + + override def lookup( + id: LfContractId + )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, ContractInstance] = + OptionT(lookupPersisted(id).map(_.map(_.asContractInstance))) + + override def lookupPersistedIfCached(id: LfContractId)(implicit + traceContext: TraceContext + ): Option[Option[PersistedContractInstance]] = + cache.getIfPresentSync(id) + + override def lookupPersisted(id: LfContractId)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[PersistedContractInstance]] = + cache.getFuture(id, _ => batchAggregatorLookup.run(id)) + + override def lookupManyExistingUncached( + ids: Seq[LfContractId] + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, LfContractId, List[ContractInstance]] = + NonEmpty + .from(ids) + .map(ids => + EitherT(lookupManyUncachedInternal(ids).map(ids.toList.zip(_).traverse { + case (id, contract) => + contract.toRight(id).map(_.asContractInstance) + })) + ) + .getOrElse(EitherT.rightT(List.empty)) + + // TODO(#27996): optimize: pass-as-array the parameters instead of variable sized list of params - this is not needed in that case anymore + private def lookupManyUncachedInternal( + ids: NonEmpty[Seq[LfContractId]] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Seq[Option[PersistedContractInstance]]] = + MonadUtil + .batchedSequentialTraverseNE( + parallelism = BatchingConfig().parallelism, + // chunk the ids to query to avoid hitting prepared statement limits + chunkSize = DbStorage.maxSqlParameters, + )( + ids + )(chunk => storage.query(lookupQuery(chunk), functionFullName)) + + override def find( + exactId: Option[String], + filterPackage: Option[String], + filterTemplate: Option[String], + limit: Int, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] = { + + import DbStorage.Implicits.BuilderChain.* + + // If filter is set: returns a conjunctive (`and` prepended) constraint on attribute `name`. + // Otherwise empty sql action. + def createConjunctiveFilter( + name: String, + filter: Option[String], + ): Option[SQLActionBuilderChain] = + filter + .map { f => + sql" #$name " ++ (f match { + case rs if rs.startsWith("!") => sql"= ${rs.drop(1)}" // Must be equal + case rs if rs.startsWith("^") => sql"""like ${rs.drop(1) + "%"}""" // Starts with + case rs => sql"""like ${"%" + rs + "%"}""" // Contains + }) + } + + val pkgFilter = createConjunctiveFilter("package_id", filterPackage) + val templateFilter = createConjunctiveFilter("template_id", filterTemplate) + val coidFilter: Option[SQLActionBuilderChain] = exactId.map { stringContractId => + val lfContractId = LfContractId.assertFromString(stringContractId) + sql" contract_id = $lfContractId" + } + val limitFilter = sql" #${storage.limit(limit)}" + + val whereClause = + List(pkgFilter, templateFilter, coidFilter) + .foldLeft(Option.empty[SQLActionBuilderChain]) { + case (None, Some(filter)) => Some(sql" where " ++ filter) + case (acc, None) => acc + case (Some(acc), Some(filter)) => Some(acc ++ sql" and " ++ filter) + } + .getOrElse(toSQLActionBuilderChain(sql" ")) + + val contractsQuery = contractsBaseQuery ++ whereClause ++ limitFilter + + storage + .query(contractsQuery.as[PersistedContractInstance], functionFullName) + .map(_.map(_.asContractInstance).toList) + } + + override def findWithPayload( + contractIds: NonEmpty[Seq[LfContractId]] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] = + storage + .query( + bulkLookupQuery(contractIds), + functionFullName, + ) + .map(_.map(c => c.inst.contractId -> c.asContractInstance).toMap) + + override def storeContracts(contracts: Seq[ContractInstance])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + contracts.parTraverse_(storeContract) + + private def storeContract(contract: ContractInstance)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[Unit] = + batchAggregatorInsert.run(contract).flatMap(FutureUnlessShutdown.fromTry) + + // TODO(#27996): DbBulkUpdateProcessor is not suitable in this form to get back the auto generated internal_contract_id-s. This need to be normal processor with a custom approach. + private val batchAggregatorInsert: BatchAggregator[ContractInstance, Try[Unit]] = { + val processor = new DbBulkUpdateProcessor[ContractInstance, Unit] { + override protected implicit def executionContext: ExecutionContext = + DbContractStore.this.ec + override protected def storage: DbStorage = DbContractStore.this.storage + override def kind: String = "stored contract" + override def logger: TracedLogger = DbContractStore.this.logger + + override def executeBatch(items: NonEmpty[Seq[Traced[ContractInstance]]])(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): FutureUnlessShutdown[Iterable[Try[Unit]]] = + bulkUpdateWithCheck(items, "DbContractStore.insert")(traceContext, self.closeContext) + + override protected def bulkUpdateAction(items: NonEmpty[Seq[Traced[ContractInstance]]])( + implicit batchTraceContext: TraceContext + ): DBIOAction[Array[Int], NoStream, Effect.All] = { + def setParams(pp: PositionedParameters)(contract: ContractInstance): Unit = { + + val packageId = contract.templateId.packageId + val templateId = checked(String2066.tryCreate(contract.templateId.qualifiedName.toString)) + + pp >> contract.contractId + pp >> packageId + pp >> templateId + pp >> contract + } + + val query = + profile match { + case _: DbStorage.Profile.Postgres => + """insert into par_contracts as c ( + contract_id, package_id, template_id, instance) + values (?, ?, ?, ?) + on conflict(contract_id) do nothing""" + case _: DbStorage.Profile.H2 => + """merge into par_contracts c + using (select cast(? as binary varying) contract_id, + cast(? as varchar) package_id, + cast(? as varchar) template_id, + cast(? as binary large object) instance + from dual) as input + on (c.contract_id = input.contract_id) + when not matched then + insert (contract_id, instance, package_id, template_id) + values (input.contract_id, input.instance, input.package_id, input.template_id)""" + } + // TODO(#27996): optimize: transposed-arrays with unset instead of JDBC batching for PG + DbStorage.bulkOperation(query, items.map(_.value), profile)(setParams) + + } + + override protected def onSuccessItemUpdate(item: Traced[ContractInstance]): Try[Unit] = + Try { + val contract: ContractInstance = item.value + cache.put(contract.contractId, Option(PersistedContractInstance(contract.inst))) + } + + private def failWith(message: String)(implicit + loggingContext: ErrorLoggingContext + ): Failure[Nothing] = + ErrorUtil.internalErrorTry(new IllegalStateException(message)) + + override protected type CheckData = ContractInstance + override protected type ItemIdentifier = LfContractId + override protected def itemIdentifier(item: ContractInstance): ItemIdentifier = + item.contractId + override protected def dataIdentifier(state: CheckData): ItemIdentifier = state.contractId + + override protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit + batchTraceContext: TraceContext + ): DbAction.ReadOnly[immutable.Iterable[CheckData]] = + bulkLookupQuery(itemsToCheck).map(_.map(_.asContractInstance))(ec) + + override protected def analyzeFoundData( + item: ContractInstance, + foundData: Option[ContractInstance], + )(implicit + traceContext: TraceContext + ): Try[Unit] = + foundData match { + case None => + // the contract is not in the db + invalidateCache(item.contractId) + failWith(s"Failed to insert contract ${item.contractId}") + case Some(data) => + if (data == item) { + cache.put(item.contractId, Some(PersistedContractInstance(item.inst))) + TryUtil.unit + } else { + invalidateCache(data.contractId) + failWith( + s"Stored contracts are immutable, but found different contract ${item.contractId}" + ) + } + } + + override def prettyItem: Pretty[ContractInstance] = + ContractInstance.prettyGenContractInstance + } + + BatchAggregator(processor, insertBatchAggregatorConfig) + } + + override def deleteIgnoringUnknown( + contractIds: Iterable[LfContractId] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + import DbStorage.Implicits.BuilderChain.* + NonEmpty.from(contractIds.toSeq) match { + case None => FutureUnlessShutdown.unit + case Some(cids) => + val inClause = DbStorage.toInClause("contract_id", cids) + storage + .update_( + (sql"""delete from par_contracts where """ ++ inClause).asUpdate, + functionFullName, + ) + .thereafter(_ => cache.invalidateAll(contractIds)) + } + } + + override def purge()(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + storage + .update_( + sqlu"""delete from par_contracts""", + functionFullName, + ) + .thereafter(_ => cache.invalidateAll()) + + override def lookupStakeholders(ids: Set[LfContractId])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = + lookupMetadata(ids).map(_.view.mapValues(_.stakeholders).toMap) + + override def lookupSignatories(ids: Set[LfContractId])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = + lookupMetadata(ids).map(_.view.mapValues(_.signatories).toMap) + + def lookupMetadata(ids: Set[LfContractId])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, ContractMetadata]] = + NonEmpty.from(ids) match { + case None => EitherT.rightT(Map.empty) + + case Some(idsNel) => + EitherT( + MonadUtil + .parTraverseWithLimit(BatchAggregatorConfig.defaultMaximumInFlight)( + idsNel.forgetNE.toSeq + )(id => lookup(id).toRight(id).value) + .map(_.collectRight) + .map { contracts => + Either.cond( + contracts.sizeCompare(ids) == 0, + contracts + .map(contract => contract.contractId -> contract.metadata) + .toMap, + UnknownContracts(ids -- contracts.map(_.contractId).toSet), + ) + } + ) + } + + override def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = + storage.query( + sql"select count(*) from par_contracts".as[Int].head, + functionFullName, + ) + + override def onClosed(): Unit = { + cache.invalidateAll() + cache.cleanUp() + super.onClosed() + } + + override def lookupBatchedNonCached(internalContractIds: Iterable[Long])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[Long, PersistedContractInstance]] = + NonEmpty + .from(internalContractIds.toSeq) + .fold(FutureUnlessShutdown.pure(Map.empty[Long, PersistedContractInstance])) { + nonEmptyInternalContractIds => + import DbStorage.Implicits.BuilderChain.* + + val inClause = DbStorage.toInClause("internal_contract_id", nonEmptyInternalContractIds) + val query = + (contractsBaseQuery ++ sql" where " ++ inClause).as[(Long, PersistedContractInstance)] + storage + .query( + query, + functionFullName, + ) + .map(_.toMap) + } + + override def lookupBatchedNonCachedInternalIds( + contractIds: Iterable[LfContractId] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[LfContractId, Long]] = + NonEmpty + .from(contractIds.toSeq) + .fold(FutureUnlessShutdown.pure(Map.empty[LfContractId, Long])) { nonEmptyContractIds => + import DbStorage.Implicits.BuilderChain.* + + val inClause = DbStorage.toInClause("contract_id", nonEmptyContractIds) + val query = + (sql"""select contract_id, internal_contract_id from par_contracts where """ ++ inClause) + .as[(LfContractId, Long)] + storage + .query( + query, + functionFullName, + ) + .map(_.toMap) + } + +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala new file mode 100644 index 0000000000..4408c62d2f --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala @@ -0,0 +1,189 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store.memory + +import cats.data.{EitherT, OptionT} +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.participant.store.* +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.atomic.AtomicLong +import scala.collection.concurrent.TrieMap +import scala.concurrent.ExecutionContext + +/** An in-memory contract store. This class is thread-safe. */ +class InMemoryContractStore( + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)( + protected implicit val ec: ExecutionContext +) extends ContractStore + with NamedLogging { + + override protected[store] def logger: TracedLogger = super.logger + + /** Invariants: + * - Every [[LfFatContractInst]] is stored under [[LfFatContractInst.contractId]]. + */ + private[this] val contracts = TrieMap.empty[LfContractId, (Long, ContractInstance)] + private[this] val internalIds = TrieMap.empty[Long, LfContractId] + private[this] val index = new AtomicLong(0) + + /** Debug find utility to search pcs + */ + override def find( + filterId: Option[String], + filterPackage: Option[String], + filterTemplate: Option[String], + limit: Int, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] = { + def search( + needle: String, + accessor: ContractInstance => String, + ): ContractInstance => Boolean = + needle match { + case rs if rs.startsWith("!") => accessor(_) == needle.drop(1) + case rs if rs.startsWith("^") => accessor(_).startsWith(needle.drop(1)) + case _ => accessor(_).contains(needle) + } + val flt1 = + filterPackage.map(search(_, _.templateId.packageId)) + val flt2 = filterTemplate.map( + search(_, _.templateId.qualifiedName.qualifiedName) + ) + val flt3 = filterId.map(search(_, _.contractId.coid)) + + def conjunctiveFilter(sc: ContractInstance): Boolean = + flt1.forall(_(sc)) && flt2.forall(_(sc)) && flt3.forall(_(sc)) + FutureUnlessShutdown.pure( + contracts.values.view.map(_._2).filter(conjunctiveFilter).take(limit).toList + ) + } + + def findWithPayload( + contractIds: NonEmpty[Seq[LfContractId]] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] = + FutureUnlessShutdown.pure( + contractIds + .map(cid => cid -> contracts.get(cid).map(_._2)) + .collect { case (cid, Some(contract)) => cid -> contract } + .toMap + ) + + override def lookup( + id: LfContractId + )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, ContractInstance] = { + logger.debug(s"Looking up contract: $id") + OptionT(FutureUnlessShutdown.pure { + val result = contracts.get(id).map(_._2) + result.fold(logger.debug(s"Contract $id not found"))(contract => + logger.debug( + s"Found contract $id of type ${contract.templateId.qualifiedName.qualifiedName}" + ) + ) + result + }) + } + + override def storeContracts( + contracts: Seq[ContractInstance] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + contracts.foreach(store) + FutureUnlessShutdown.unit + } + + private def store(storedContract: ContractInstance): Unit = { + val idx = index.getAndIncrement() + contracts + .putIfAbsent(storedContract.contractId, (idx, storedContract)) + .discard[Option[(Long, ContractInstance)]] + internalIds + .putIfAbsent(idx, storedContract.contractId) + .discard[Option[LfContractId]] + } + + override def deleteIgnoringUnknown( + ids: Iterable[LfContractId] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + ids.foreach(id => + contracts + .remove(id) + .flatMap { case (iid, _) => internalIds.remove(iid) } + .discard[Option[LfContractId]] + ) + FutureUnlessShutdown.unit + } + + override def purge()(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + contracts.clear() + internalIds.clear() + FutureUnlessShutdown.unit + } + + override def lookupStakeholders(ids: Set[LfContractId])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = { + val res = contracts.filter { case (cid, _) => ids.contains(cid) }.map { case (cid, (_, c)) => + (cid, c.stakeholders) + } + EitherT.cond(res.sizeCompare(ids) == 0, res.toMap, UnknownContracts(ids -- res.keySet)) + } + + override def lookupSignatories(ids: Set[LfContractId])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = { + val res = contracts.filter { case (cid, _) => ids.contains(cid) }.map { case (cid, (_, c)) => + (cid, c.inst.signatories) + } + EitherT.cond(res.sizeCompare(ids) == 0, res.toMap, UnknownContracts(ids -- res.keySet)) + } + + override def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = + FutureUnlessShutdown.pure(contracts.size) + + override def lookupPersistedIfCached(id: LfContractId)(implicit + traceContext: TraceContext + ): Option[Option[PersistedContractInstance]] = + Some( + contracts.get(id).map(c => PersistedContractInstance(c._2.inst)) + ) + + override def lookupPersisted(id: LfContractId)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[PersistedContractInstance]] = + FutureUnlessShutdown.pure( + contracts.get(id).map(c => PersistedContractInstance(c._2.inst)) + ) + + override def lookupBatchedNonCached(internalContractIds: Iterable[Long])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[Long, PersistedContractInstance]] = + FutureUnlessShutdown.pure( + internalContractIds + .flatMap( + internalIds + .get(_) + .flatMap(contracts.get) + .map { case (iid, c) => (iid, PersistedContractInstance(c.inst)) } + ) + .toMap + ) + + override def lookupBatchedNonCachedInternalIds(contractIds: Iterable[LfContractId])(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, Long]] = + FutureUnlessShutdown.pure( + contractIds + .flatMap(cid => contracts.get(cid).map { case (iid, _) => (cid, iid) }) + .toMap + ) +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdAbsolutizer.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdAbsolutizer.scala index 7325a81c4a..b83bd9ad9c 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdAbsolutizer.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdAbsolutizer.scala @@ -83,7 +83,7 @@ class ContractIdAbsolutizer( } relativeSuffixesInArg = relativeSuffixesInArgBuilder.result() absolutizedCid <- absolutizeContractId(fci.contractId) - absolutizedCreationTime <- absolutizationData.updateLedgerTime(fci.createdAt) + absolutizedCreationTime <- absolutizationData.updateLedgerTime(absolutizedCid, fci.createdAt) contractIdVersion <- CantonContractIdVersion .extractCantonContractIdVersion(fci.contractId) .leftMap(err => s"Invalid contract ID version: $err") @@ -125,7 +125,10 @@ class ContractIdAbsolutizer( object ContractIdAbsolutizer { sealed trait ContractIdAbsolutizationData extends Product with Serializable { - def updateLedgerTime(relativeCreationTime: CreationTime): Either[String, CreationTime.CreatedAt] + def updateLedgerTime( + contractId: LfContractId, + relativeCreationTime: CreationTime, + ): Either[String, CreationTime.CreatedAt] def absolutizeAuthenticationData( relativeSuffixesInArg: SortedSet[RelativeContractIdSuffixV2], authenticationData: ContractAuthenticationData, @@ -134,13 +137,16 @@ object ContractIdAbsolutizer { case object ContractIdAbsolutizationDataV1 extends ContractIdAbsolutizationData { override def updateLedgerTime( - relativeCreationTime: CreationTime + contractId: LfContractId, + relativeCreationTime: CreationTime, ): Either[String, CreationTime.CreatedAt] = relativeCreationTime match { case absolute: CreationTime.CreatedAt => Right(absolute) case _ => - Left(s"Invalid creation time for V1 contract ID absolutization: $relativeCreationTime") + Left( + s"Invalid creation time for V1 contract ID $contractId absolutization: $relativeCreationTime" + ) } override def absolutizeAuthenticationData( @@ -155,16 +161,17 @@ object ContractIdAbsolutizer { } final case class ContractIdAbsolutizationDataV2( - creatingTransactionId: TransactionId, + creatingTransactionId: UpdateId, ledgerTimeOfTx: CantonTimestamp, ) extends ContractIdAbsolutizationData { override def updateLedgerTime( - relativeCreationTime: CreationTime + contractId: LfContractId, + relativeCreationTime: CreationTime, ): Either[String, CreationTime.CreatedAt] = relativeCreationTime match { case CreationTime.Now => Right(CreationTime.CreatedAt(ledgerTimeOfTx.toLf)) case createdAt: CreationTime.CreatedAt => Left( - s"Invalid creation time for V2 contract ID absolutization: $createdAt, expected 'now'" + s"Invalid creation time for V2 contract ID $contractId absolutization: $createdAt, expected 'now'" ) } @@ -177,7 +184,7 @@ object ContractIdAbsolutizer { _ <- v2.creatingTransactionId.traverse_(transactionId => Either.left( - s"Cannot absolutize authentication data that already contains a transaction ID ${transactionId.unwrap.toHexString}" + s"Cannot absolutize authentication data that already contains a transaction ID ${transactionId.toHexString}" ) ) _ <- Either.cond( diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdSuffixer.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdSuffixer.scala index c075f83ba0..e508a8a14f 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdSuffixer.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractIdSuffixer.scala @@ -6,19 +6,22 @@ package com.digitalasset.canton.protocol import com.digitalasset.canton.crypto.{HashOps, HmacOps} import com.digitalasset.canton.protocol.ContractIdSuffixer.RelativeSuffixResult import com.digitalasset.canton.util.LfTransactionUtil +import com.digitalasset.daml.lf.crypto.Hash.HashingMethod import com.digitalasset.daml.lf.data.Bytes import com.digitalasset.daml.lf.transaction.CreationTime -import com.digitalasset.daml.lf.value.Value.ThinContractInstance /** Turns local contract IDs into relative contract IDs */ class ContractIdSuffixer(hashOps: HashOps & HmacOps, contractIdVersion: CantonContractIdVersion) { private val unicumGenerator: UnicumGenerator = new UnicumGenerator(hashOps) + val contractHashingMethod: HashingMethod = contractIdVersion.contractHashingMethod + def relativeSuffixForLocalContract( contractSalt: ContractSalt, ledgerTime: CreationTime, createWithSuffixedArg: LfNodeCreate, + contractHash: LfHash, ): Either[String, RelativeSuffixResult] = { val contractMetadata = LfTransactionUtil.metadataFromCreate(createWithSuffixedArg) @@ -28,7 +31,7 @@ class ContractIdSuffixer(hashOps: HashOps & HmacOps, contractIdVersion: CantonCo contractSalt, ledgerTime, contractMetadata, - createWithSuffixedArg.coinst, + contractHash, ) (relativeSuffix, authenticationData) = suffixAndAuth suffixedContractId <- localId.withSuffix(relativeSuffix.toBytes) @@ -42,7 +45,7 @@ class ContractIdSuffixer(hashOps: HashOps & HmacOps, contractIdVersion: CantonCo contractSalt: ContractSalt, ledgerTime: CreationTime, contractMetadata: ContractMetadata, - contractInst: ThinContractInstance, + contractHash: LfHash, ): Either[String, (RelativeContractIdSuffix, ContractAuthenticationData)] = contractIdVersion match { case v1: CantonContractIdV1Version => @@ -56,8 +59,8 @@ class ContractIdSuffixer(hashOps: HashOps & HmacOps, contractIdVersion: CantonCo contractSalt, createdAt, contractMetadata, - contractInst, v1, + contractHash, ) val authenticationData = ContractAuthenticationDataV1(contractSalt.unwrap)(v1) (relativeSuffix, authenticationData) @@ -73,8 +76,8 @@ class ContractIdSuffixer(hashOps: HashOps & HmacOps, contractIdVersion: CantonCo val relativeSuffix = unicumGenerator.generateRelativeSuffixV2( contractSalt, contractMetadata, - contractInst, v2, + contractHash, ) val authenticationData = ContractAuthenticationDataV2( Bytes.fromByteString(contractSalt.unwrap.forHashing), diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/UnicumGenerator.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/UnicumGenerator.scala index e13b6ea1c7..395249bbeb 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/UnicumGenerator.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/UnicumGenerator.scala @@ -6,10 +6,8 @@ package com.digitalasset.canton.protocol import cats.syntax.either.* import com.digitalasset.canton.crypto.{Hash, HashOps, HashPurpose, HmacOps, Salt} import com.digitalasset.canton.serialization.DeterministicEncoding -import com.digitalasset.canton.util.LegacyContractHash import com.digitalasset.daml.lf.data.Bytes import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Versioned} -import com.digitalasset.daml.lf.value.Value.ThinContractInstance import com.google.common.annotations.VisibleForTesting /** Generates [[ContractSalt]]s and [[Unicum]]s for contract IDs such that the [[Unicum]] is a @@ -113,10 +111,10 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { * the ledger time at which the contract is created * @param metadata * contract metadata - * @param suffixedContractInstance - * the thin instance of the contract where contract IDs have already been suffixed. * @param cantonContractIdVersion * the contract id versioning + * @param contractHash + * the contract hash using the method associated with the IdVersion * * @see * UnicumGenerator for the construction details and the security properties @@ -125,23 +123,18 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { salt: ContractSalt, ledgerCreateTime: CreationTime.CreatedAt, metadata: ContractMetadata, - suffixedContractInstance: ThinContractInstance, cantonContractIdVersion: CantonContractIdV1Version, + contractHash: LfHash, ): ContractIdSuffixV1 = { val contractSaltSize = salt.unwrap.size require( contractSaltSize.toLong == cryptoOps.defaultHmacAlgorithm.hashAlgorithm.length, s"Invalid contract salt size ($contractSaltSize)", ) - val contractArgHash = LegacyContractHash.tryThinContractHash( - suffixedContractInstance, - cantonContractIdVersion.useUpgradeFriendlyHashing, - ) - val unicum = computeUnicumHash( ledgerCreateTime = ledgerCreateTime, metadata = metadata, - contractHash = contractArgHash, + contractHash = contractHash, contractSalt = salt.unwrap, ) ContractIdSuffixV1(cantonContractIdVersion, Unicum(unicum)) @@ -150,8 +143,8 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { def generateRelativeSuffixV2( salt: ContractSalt, metadata: ContractMetadata, - suffixedContractInstance: ThinContractInstance, cantonContractIdVersion: CantonContractIdV2Version, + contractArgHash: LfHash, ): RelativeContractIdSuffixV2 = { val hash = cantonContractIdVersion match { case CantonContractIdV2Version0 => @@ -160,10 +153,6 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { contractSaltSize.toLong == cryptoOps.defaultHmacAlgorithm.hashAlgorithm.length, s"Invalid contract salt size ($contractSaltSize)", ) - val contractArgHash = LegacyContractHash.tryThinContractHash( - suffixedContractInstance, - upgradeFriendly = true, - ) val nonSignatoryStakeholders = metadata.stakeholders -- metadata.signatories cryptoOps @@ -204,40 +193,6 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { RelativeContractIdSuffixV2(cantonContractIdVersion, Bytes.fromByteString(hash.unwrap)) } - /** Re-computes a contract's [[Unicum]] based on the provided salt. Used for authenticating - * contracts. - * - * @param contractSalt - * the [[ContractSalt]] computed when the original contract id was generated. - * @param ledgerCreateTime - * the ledger time at which the contract is created - * @param metadata - * contract metadata - * @param suffixedContractInstance - * the thin instance of the contract where contract IDs have already been suffixed. - * @param cantonContractIdVersion - * the contract id versioning - * @return - * the unicum if successful or a failure if the contract salt size is mismatching the - * predefined size. - */ - def recomputeUnicum( - contractSalt: Salt, - ledgerCreateTime: CreationTime.CreatedAt, - metadata: ContractMetadata, - suffixedContractInstance: ThinContractInstance, - cantonContractIdVersion: CantonContractIdV1Version, - ): Either[String, Unicum] = - recomputeUnicum( - contractSalt = contractSalt, - ledgerCreateTime = ledgerCreateTime, - metadata = metadata, - contractHash = LegacyContractHash.tryThinContractHash( - suffixedContractInstance, - cantonContractIdVersion.useUpgradeFriendlyHashing, - ), - ) - /** Re-computes a contract's [[Unicum]] based on the provided salt. Used for authenticating * contracts. * @@ -277,6 +232,21 @@ class UnicumGenerator(cryptoOps: HashOps & HmacOps) { ) } yield unicum + /** Re-computes a contract's [[Unicum]] based on the provided salt. Used for authenticating + * contracts. + * + * @param contractSalt + * the [[ContractSalt]] computed when the original contract id was generated. + * @param ledgerCreateTime + * the ledger time at which the contract is created + * @param metadata + * contract metadata + * @param contractHash + * contract hash + * @return + * the unicum if successful or a failure if the contract salt size is mismatching the + * predefined size. + */ def recomputeUnicum( contractSalt: Salt, ledgerCreateTime: CreationTime.CreatedAt, diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala index 48982ec358..726e727af8 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala @@ -544,7 +544,7 @@ object WellFormedTransaction { preparationTimes.head1, s"Different preparation times: ${preparationTimes.mkString(", ")}", ) - version = protocol.maxTransactionVersion(versions) + version = protocol.maxSerializationVersion(versions) _ <- MonadUtil .foldLeftM[Either[String, *], (Int, List[(RollbackSibling, LfNodeId)]), WithRollbackScope[ WellFormedTransaction[WithAbsoluteSuffixes] diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/store/PendingOperationStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/store/PendingOperationStore.scala new file mode 100644 index 0000000000..98180533c5 --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/store/PendingOperationStore.scala @@ -0,0 +1,195 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.data.{EitherT, OptionT} +import cats.syntax.either.* +import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.PendingOperation.{ + ConflictingPendingOperationError, + PendingOperationTriggerType, +} +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.topology.{SynchronizerId, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{HasProtocolVersionedWrapper, VersioningCompanion} +import com.google.protobuf.ByteString + +import scala.util.Try + +trait PendingOperationStore[Op <: HasProtocolVersionedWrapper[Op]] { + + protected def opCompanion: VersioningCompanion[Op] + + /** Atomically stores a pending operation, returning an error if a conflicting operation already + * exists. + * + * This check-and-insert operation is performed within a serializable transaction to prevent race + * conditions. The behavior depends on whether an operation with the same unique key + * (`synchronizerId`, `key`, `name`) already exists in the store: + * - If no operation with the key exists, the new operation is inserted. + * - If an '''identical''' operation already exists, the operation succeeds without making + * changes. + * - If an operation with the same key but '''different''' data exists, the operation fails + * with an error. + * + * @param operation + * The `PendingOperation` to insert. + * @param traceContext + * The context for tracing and logging. + * @return + * An `EitherT` that completes with: + * - `Right(())` if the operation was successfully stored or an identical one already existed. + * - `Left(ConflictingPendingOperationError)` if a conflicting operation was found. + */ + def insert(operation: PendingOperation[Op])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, ConflictingPendingOperationError, Unit] + + /** Deletes a pending operation identified by its unique composite key (`synchronizerId`, + * `operationKey`, `operationName`). + * + * This operation is '''idempotent'''. It succeeds regardless of whether the record existed prior + * to the call. + * + * @param synchronizerId + * The ID of the synchronizer scoping the operation application. + * @param operationKey + * A key to distinguish between multiple instances of the same operation. + * @param operationName + * The name of the operation to be executed. + * @param traceContext + * The context for tracing and logging. + * @return + * A future that completes when the deletion has finished. + */ + def delete( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + + /** Fetches a pending operation by its unique composite key (`synchronizerId`, `operationKey`, + * `operationName`). + * + * @param synchronizerId + * The ID of the synchronizer scoping the operation application. + * @param operationKey + * A key to distinguish between multiple instances of the same operation. + * @param operationName + * The name of the operation to be executed. + * @param traceContext + * The context for tracing and logging. + * @return + * A future that completes with `Some(operation)` if found and valid, `None` if not found, or + * fails with a `DbDeserializationException` if the stored data is corrupt. + */ + def get( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, PendingOperation[Op]] + +} + +final case class PendingOperation[Op <: HasProtocolVersionedWrapper[Op]] private[store] ( + trigger: PendingOperationTriggerType, + name: NonEmptyString, + key: String, + operation: Op, + synchronizerId: SynchronizerId, +) { + private[store] def compositeKey: (SynchronizerId, String, NonEmptyString) = + (synchronizerId, key, name) + +} + +object PendingOperation { + + private[store] def create[Op <: HasProtocolVersionedWrapper[Op]]( + trigger: String, + name: String, + key: String, + operationBytes: ByteString, + operationDeserializer: ByteString => ParsingResult[Op], + synchronizerId: String, + ): Either[String, PendingOperation[Op]] = + Try( + tryCreate(trigger, name, key, operationBytes, operationDeserializer, synchronizerId) + ).toEither.leftMap(_.getMessage) + + /** Factory method to create an instance from database values. Performs validation and throws + * DbDeserializationException on failure. + */ + private[store] def tryCreate[Op <: HasProtocolVersionedWrapper[Op]]( + trigger: String, + name: String, + key: String, + operationBytes: ByteString, + operationDeserializer: ByteString => ParsingResult[Op], + synchronizerId: String, + ): PendingOperation[Op] = { + val validTrigger = PendingOperationTriggerType + .fromString(trigger) + .getOrElse( + throw new DbDeserializationException( + s"Invalid pending_operation_trigger_type in database: $trigger" + ) + ) + val validName = if (name.isBlank) { + throw new DbDeserializationException( + s"Missing pending operation name (blank): $name" + ) + } else { + NonEmptyString.tryCreate(name) + } + val validOperation = operationDeserializer(operationBytes).valueOr(error => + throw new DbDeserializationException( + s"Failed to deserialize pending operation byte string: $error" + ) + ) + val validSynchronizerId = SynchronizerId( + UniqueIdentifier.deserializeFromDb(synchronizerId) // throws DbDeserializationException + ) + + PendingOperation(validTrigger, validName, key, validOperation, validSynchronizerId) + } + + sealed trait PendingOperationTriggerType extends Product with Serializable { + def asString: String + } + + object PendingOperationTriggerType { + case object SynchronizerReconnect extends PendingOperationTriggerType { + override def asString: String = "synchronizer_reconnect" + } + + def fromString(s: String): Either[String, PendingOperationTriggerType] = s match { + case "synchronizer_reconnect" => Right(SynchronizerReconnect) + case _ => Left(s"Unknown pending operation trigger type: $s") + } + } + + /** Signals a failed attempt to insert a pending operation because it conflicts with an existing + * one. + * + * A conflict occurs when an operation with the same unique key (`synchronizerId`, `key`, `name`) + * already exists in the store but contains different data. + * + * @param synchronizerId + * The unique identifier of the synchronizer that owns the operation. + * @param key + * The key that uniquely identifies the pending operation within its scope. + * @param name + * The name describing the type of pending operation. + */ + final case class ConflictingPendingOperationError( + synchronizerId: SynchronizerId, + key: String, + name: NonEmptyString, + ) + +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPendingOperationsStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPendingOperationsStore.scala new file mode 100644 index 0000000000..d358e7614d --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPendingOperationsStore.scala @@ -0,0 +1,163 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import cats.data.{EitherT, OptionT} +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.PendingOperation.{ + ConflictingPendingOperationError, + PendingOperationTriggerType, +} +import com.digitalasset.canton.store.{PendingOperation, PendingOperationStore} +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{HasProtocolVersionedWrapper, VersioningCompanion} +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter, TransactionIsolation} + +import java.sql.Types +import scala.annotation.unused +import scala.concurrent.ExecutionContext + +class DbPendingOperationsStore[Op <: HasProtocolVersionedWrapper[Op]]( + override protected val storage: DbStorage, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + override protected val opCompanion: VersioningCompanion[Op], +)(implicit val executionContext: ExecutionContext) + extends DbStore + with PendingOperationStore[Op] { + + import storage.api.* + + override def insert( + operation: PendingOperation[Op] + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, ConflictingPendingOperationError, Unit] = { + implicit val pendingOperationGetResult: GetResult[PendingOperation[Op]] = + DbPendingOperationsStore.getPendingOperationResult(opCompanion.fromTrustedByteString) + + val readAction = + sql""" + select operation_trigger, operation_name, operation_key, operation, synchronizer_id + from common_pending_operations + where synchronizer_id = ${operation.synchronizerId} + and operation_key = ${operation.key} + and operation_name = ${operation.name.unwrap} + """.as[PendingOperation[Op]].headOption + + val transaction = readAction.flatMap { + case Some(existingOperation) if existingOperation != operation => + DBIO.successful( + Left( + ConflictingPendingOperationError( + operation.synchronizerId, + operation.key, + operation.name, + ) + ) + ) + + case Some(_) => DBIO.successful(Right(())) + + case None => + import com.digitalasset.canton.resource.DbStorage.Implicits.setParameterByteString + @unused + implicit val setParameter: SetParameter[Op] = (v: Op, pp) => pp >> v.toByteString + @unused + implicit val setOperationTriggerType: SetParameter[PendingOperationTriggerType] = + DbPendingOperationsStore.setOperationTriggerType(storage) + + sqlu""" + insert into common_pending_operations + (operation_trigger, operation_name, operation_key, operation, synchronizer_id) + values + ( + ${operation.trigger}, + ${operation.name.unwrap}, + ${operation.key}, + ${operation.operation}, + ${operation.synchronizerId} + ) + """.map(_ => Right(())) + } + + EitherT( + storage.queryAndUpdate( + transaction.transactionally.withTransactionIsolation(TransactionIsolation.Serializable), + functionFullName, + ) + ) + } + + override def delete( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val deleteAction = + sqlu""" + delete from common_pending_operations + where synchronizer_id = $synchronizerId + and operation_key = $operationKey + and operation_name = ${operationName.unwrap} + """ + storage.update_(deleteAction, functionFullName) + } + + override def get( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, PendingOperation[Op]] = { + implicit val pendingOperationGetResult: GetResult[PendingOperation[Op]] = + DbPendingOperationsStore.getPendingOperationResult(opCompanion.fromTrustedByteString) + + val selectAction = + sql""" + select operation_trigger, operation_name, operation_key, operation, synchronizer_id + from common_pending_operations + where synchronizer_id = $synchronizerId + and operation_key = $operationKey + and operation_name = ${operationName.unwrap} + """.as[PendingOperation[Op]].headOption + OptionT.apply(storage.query(selectAction, functionFullName)) + } + +} + +object DbPendingOperationsStore { + + def getPendingOperationResult[Op <: HasProtocolVersionedWrapper[Op]]( + operationDeserializer: ByteString => ParsingResult[Op] + ): GetResult[PendingOperation[Op]] = GetResult { r => + import DbStorage.Implicits.getResultByteString + + PendingOperation.tryCreate( + trigger = r.<<[String], + name = r.<<[String], + key = r.<<[String], + operationBytes = r.<<[ByteString], + operationDeserializer, + synchronizerId = r.<<[String], + ) + } + + // For PostgreSQL, `setObject` with `Types.OTHER` is required for handling the custom enum type + def setOperationTriggerType(storage: DbStorage): SetParameter[PendingOperationTriggerType] = + storage.profile match { + case _: DbStorage.Profile.Postgres => + (t, pp) => pp.setObject(t.asString, Types.OTHER) + case _: DbStorage.Profile.H2 => + (t, pp) => pp.setString(t.asString) + } + +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStore.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStore.scala new file mode 100644 index 0000000000..6775641500 --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStore.scala @@ -0,0 +1,137 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import cats.data.{EitherT, OptionT} +import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.store.PendingOperation.ConflictingPendingOperationError +import com.digitalasset.canton.store.memory.InMemoryPendingOperationStore.compositeKey +import com.digitalasset.canton.store.{PendingOperation, PendingOperationStore} +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{HasProtocolVersionedWrapper, VersioningCompanion} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, blocking} +import scala.util.Try + +class InMemoryPendingOperationStore[Op <: HasProtocolVersionedWrapper[Op]]( + override protected val opCompanion: VersioningCompanion[Op] +)(implicit + val executionContext: ExecutionContext +) extends PendingOperationStore[Op] { + + // Allows tests to bypass validation and insert malformed data into the store + @VisibleForTesting + private[memory] val store = + TrieMap.empty[ + (SynchronizerId, String, NonEmptyString), + InMemoryPendingOperationStore.StoredPendingOperation, + ] + + override def insert( + operation: PendingOperation[Op] + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, ConflictingPendingOperationError, Unit] = + EitherT.fromEither[FutureUnlessShutdown] { + blocking { + store.synchronized { + val existingOperationO = + store.get(operation.compositeKey).map(_.toPendingOperation(opCompanion)) + existingOperationO match { + case Some(existingOperation) if existingOperation != operation => + Left( + ConflictingPendingOperationError( + operation.synchronizerId, + operation.key, + operation.name, + ) + ) + case _ => + val storedOperation = + InMemoryPendingOperationStore.StoredPendingOperation.fromPendingOperation(operation) + store.putIfAbsent(operation.compositeKey, storedOperation).discard + Right(()) + } + } + } + } + + override def delete( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + store.remove(compositeKey(synchronizerId, operationKey, operationName)).discard + FutureUnlessShutdown.pure(()) + } + + override def get( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, PendingOperation[Op]] = { + val resultF = FutureUnlessShutdown.fromTry(Try { + store + .get(compositeKey(synchronizerId, operationKey, operationName)) + .map(_.toPendingOperation(opCompanion)) + }) + OptionT(resultF) + } +} + +object InMemoryPendingOperationStore { + + /* + * The following members are exposed with `private[memory]` visibility for testing only. + * This allows tests to bypass validation and insert malformed data to verify + * the store's behavior when reading corrupt records. + */ + @VisibleForTesting + private[memory] final case class StoredPendingOperation( + trigger: String, + serializedSynchronizerId: String, + key: String, + name: String, + serializedOperation: ByteString, + ) { + def toPendingOperation[Op <: HasProtocolVersionedWrapper[Op]]( + opCompanion: VersioningCompanion[Op] + ): PendingOperation[Op] = + PendingOperation.tryCreate( + trigger, + name, + key, + serializedOperation, + opCompanion.fromTrustedByteString, + serializedSynchronizerId, + ) + } + + @VisibleForTesting + private[memory] object StoredPendingOperation { + def fromPendingOperation[Op <: HasProtocolVersionedWrapper[Op]]( + po: PendingOperation[Op] + ): StoredPendingOperation = + StoredPendingOperation( + po.trigger.asString, + po.synchronizerId.toProtoPrimitive, + po.key, + po.name.unwrap, + po.operation.toByteString, + ) + } + + private def compositeKey( + synchronizerId: SynchronizerId, + operationKey: String, + operationName: NonEmptyString, + ): (SynchronizerId, String, NonEmptyString) = + (synchronizerId, operationKey, operationName) +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala index 60ba3e8e43..1503096996 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala @@ -18,16 +18,16 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil -import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.wrapErrUS +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.{mapErrNewEUS, wrapErrUS} import com.digitalasset.canton.protocol.v30 import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.topology import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.admin.v30.* import com.digitalasset.canton.topology.admin.{grpc, v30 as adminProto} import com.digitalasset.canton.topology.client.SynchronizerTopologyClient import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, @@ -36,11 +36,15 @@ import com.digitalasset.canton.topology.store.{ } import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.util.{EitherTUtil, GrpcStreamingUtils} +import com.digitalasset.canton.util.{EitherTUtil, GrpcStreamingUtils, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{ProtoDeserializationError, topology} import com.google.protobuf.ByteString import com.google.protobuf.timestamp.Timestamp import io.grpc.stub.StreamObserver +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Sink, Source} import java.io.OutputStream import scala.concurrent.{ExecutionContext, Future} @@ -111,7 +115,7 @@ class GrpcTopologyManagerReadService( physicalSynchronizerIdLookup: PSIdLookup, processingTimeout: ProcessingTimeout, val loggerFactory: NamedLoggerFactory, -)(implicit val ec: ExecutionContext) +)(implicit val ec: ExecutionContext, materializer: Materializer) extends adminProto.TopologyManagerReadServiceGrpc.TopologyManagerReadService with NamedLogging { @@ -535,6 +539,7 @@ class GrpcTopologyManagerReadService( } yield { def partyPredicate(x: PartyToParticipant) = x.partyId.toProtoPrimitive.startsWith(request.filterParty) + def participantPredicate(x: PartyToParticipant) = request.filterParticipant.isEmpty || x.participantIds.exists( _.toProtoPrimitive.contains(request.filterParticipant) @@ -694,6 +699,51 @@ class GrpcTopologyManagerReadService( CantonGrpcUtil.mapErrNewEUS(res) } + override def exportTopologySnapshotV2( + request: ExportTopologySnapshotV2Request, + responseObserver: StreamObserver[ExportTopologySnapshotV2Response], + ): Unit = + GrpcStreamingUtils.streamToClient[ExportTopologySnapshotV2Response]( + (out: OutputStream) => getTopologySnapshotV2(request, out), + responseObserver, + byteString => ExportTopologySnapshotV2Response(byteString), + processingTimeout.unbounded.duration, + ) + + private def getTopologySnapshotV2( + request: ExportTopologySnapshotV2Request, + out: OutputStream, + ): Future[Unit] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val res = for { + baseQuery <- wrapErrUS(BaseQuery.fromProto(request.baseQuery)) + excludeTopologyMappings <- wrapErrUS( + request.excludeMappings.traverse(TopologyMapping.Code.fromString) + ) + types = TopologyMapping.Code.all.diff(excludeTopologyMappings) + storedTopologyTransactions <- listAllStoredTopologyTransactions( + baseQuery, + types, + request.filterNamespace, + ) + + protocolVersion = baseQuery.protocolVersion.getOrElse(ProtocolVersion.latest) + _ <- wrapErrUS( + EitherT.fromEither[FutureUnlessShutdown]( + MonadUtil + .sequentialTraverse(storedTopologyTransactions.result)( + _.writeDelimitedTo(protocolVersion, out) + ) + .leftMap(error => + ProtoDeserializationError + .ValueConversionError("topology_snapshot", error): ProtoDeserializationError + ) + ) + ) + } yield () + CantonGrpcUtil.mapErrNewEUS(res) + } + private def listAllStoredTopologyTransactions( baseQuery: BaseQuery, topologyMappings: Seq[TopologyMapping.Code], @@ -749,74 +799,99 @@ class GrpcTopologyManagerReadService( filterSynchronizerStore: Option[StoreId], timestamp: Option[Timestamp], out: OutputStream, - ): Future[Unit] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - - val res: EitherT[FutureUnlessShutdown, RpcError, Unit] = - for { - _ <- member match { - case _: ParticipantId => - wrapErrUS( - ProtoConverter - .required("filter_synchronizer_store", filterSynchronizerStore) - ) + ): Future[Unit] = + for { + (protocolVersion, source) <- getGenesisStateSource(filterSynchronizerStore, timestamp) + storedTransactions <- source.runWith(Sink.seq) + } yield StoredTopologyTransactions(storedTransactions) + .toByteString(protocolVersion) + .writeTo(out) + + override def genesisStateV2( + request: GenesisStateV2Request, + responseObserver: StreamObserver[GenesisStateV2Response], + ): Unit = GrpcStreamingUtils.streamToClient( + (out: OutputStream) => getGenesisStateV2(request.synchronizerStore, request.timestamp, out), + responseObserver, + byteString => GenesisStateV2Response(byteString), + processingTimeout.unbounded.duration, + ) - case _ => EitherT.rightT[FutureUnlessShutdown, RpcError](()) + private def getGenesisStateV2( + filterSynchronizerStore: Option[StoreId], + timestamp: Option[Timestamp], + out: OutputStream, + ): Future[Unit] = + for { + (protocolVersion, source) <- getGenesisStateSource(filterSynchronizerStore, timestamp) + _ <- source.runWith( + Sink.foreachAsync(1) { stored => + val result = stored.writeDelimitedTo(protocolVersion, out) + Future.fromTry(result.leftMap(new IllegalStateException(_)).toTry) } - topologyStoreO <- wrapErrUS( - filterSynchronizerStore.traverse( - grpc.TopologyStoreId.fromProtoV30(_, "filter_synchronizer_store") - ) - ) - synchronizerTopologyStore <- collectSynchronizerStore(topologyStoreO) - timestampO <- wrapErrUS( - timestamp - .traverse(CantonTimestamp.fromProtoTimestamp) - ) + ) + } yield () - sequencedTimestamp <- timestampO match { - case Some(value) => EitherT.rightT[FutureUnlessShutdown, RpcError](value) - case None => - val sequencedTimeF = synchronizerTopologyStore - .maxTimestamp(SequencedTime.MaxValue, includeRejected = true) - .map { - case Some((sequencedTime, _)) => - Right(sequencedTime.value) - - case None => - Left( - TopologyManagerError.TopologyTransactionNotFound.EmptyStore() - ) - } - - EitherT(sequencedTimeF) - } + private def getGenesisStateSource( + filterSynchronizerStore: Option[StoreId], + timestamp: Option[Timestamp], + ): Future[(ProtocolVersion, Source[GenericStoredTopologyTransaction, NotUsed])] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - topologySnapshot <- EitherT.right[RpcError]( - synchronizerTopologyStore.findEssentialStateAtSequencedTime( - SequencedTime(sequencedTimestamp), - includeRejected = false, + val sourceEUS = for { + _ <- member match { + case _: ParticipantId => + wrapErrUS( + ProtoConverter + .required("filter_synchronizer_store", filterSynchronizerStore) ) + + case _ => EitherT.rightT[FutureUnlessShutdown, RpcError](()) + } + topologyStoreO <- wrapErrUS( + filterSynchronizerStore.traverse( + grpc.TopologyStoreId.fromProtoV30(_, "filter_synchronizer_store") ) - // reset effective time and sequenced time if we are initializing the sequencer from the beginning - genesisState: StoredTopologyTransactions[TopologyChangeOp, TopologyMapping] = - StoredTopologyTransactions[TopologyChangeOp, TopologyMapping]( - topologySnapshot.result.map(stored => - StoredTopologyTransaction( - SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), - EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime), - stored.validUntil.map(_ => - EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime) - ), - stored.transaction, - stored.rejectionReason, - ) - ) - ) - } yield { - genesisState.toByteString(ProtocolVersion.latest).writeTo(out) + ) + synchronizerTopologyStore <- collectSynchronizerStore(topologyStoreO) + timestampO <- wrapErrUS( + timestamp + .traverse(CantonTimestamp.fromProtoTimestamp) + ) + + sequencedTimestamp <- timestampO match { + case Some(value) => EitherT.rightT[FutureUnlessShutdown, RpcError](value) + case None => + val sequencedTimeF = synchronizerTopologyStore + .maxTimestamp(SequencedTime.MaxValue, includeRejected = true) + .map { + case Some((sequencedTime, _)) => + Right(sequencedTime.value) + + case None => + Left(TopologyManagerError.TopologyTransactionNotFound.EmptyStore(): RpcError) + } + + EitherT(sequencedTimeF) } - CantonGrpcUtil.mapErrNewEUS(res) + + } yield synchronizerTopologyStore.protocolVersion -> synchronizerTopologyStore + .findEssentialStateAtSequencedTime( + SequencedTime(sequencedTimestamp), + includeRejected = false, + ) + .map(stored => + StoredTopologyTransaction( + SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime), + stored.validUntil + .map(_ => EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime)), + stored.transaction, + stored.rejectionReason, + ) + ) + + mapErrNewEUS(sourceEUS) } override def logicalUpgradeState( @@ -831,32 +906,42 @@ class GrpcTopologyManagerReadService( private def getLogicalUpgradeState( out: OutputStream - ): Future[Unit] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + ): Future[Unit] = + for { + (protocolVersion, source) <- getLogicalUpgradeStateSource() + _ <- source.runWith( + Sink.foreachAsync(1) { stored => + val result = stored.writeDelimitedTo(protocolVersion, out) + Future.fromTry(result.leftMap(new IllegalStateException(_)).toTry) + } + ) + } yield () - val res: EitherT[FutureUnlessShutdown, RpcError, Unit] = - for { - synchronizerTopologyStore <- collectSynchronizerStore(None) + private def getLogicalUpgradeStateSource() + : Future[(ProtocolVersion, Source[GenericStoredTopologyTransaction, NotUsed])] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - topologyClient <- EitherT.fromEither[FutureUnlessShutdown]( - topologyClientLookup(synchronizerTopologyStore.storeId).toRight( - TopologyManagerError.TopologyStoreUnknown.Failure(synchronizerTopologyStore.storeId) - ) - ) + val sourceEUS = for { + synchronizerTopologyStore <- collectSynchronizerStore(None) - topologySnapshot = topologyClient.currentSnapshotApproximation - _ <- EitherT.fromOptionF( - fopt = topologySnapshot.isSynchronizerUpgradeOngoing(), - ifNone = TopologyManagerError.NoOngoingSynchronizerUpgrade.Failure(), + topologyClient <- EitherT.fromEither[FutureUnlessShutdown]( + topologyClientLookup(synchronizerTopologyStore.storeId).toRight( + TopologyManagerError.TopologyStoreUnknown.Failure(synchronizerTopologyStore.storeId) ) + ) - topologySnapshot <- EitherT.right[RpcError]( - synchronizerTopologyStore.findEssentialStateAtSequencedTime( - SequencedTime(topologySnapshot.timestamp), - includeRejected = false, - ) + topologySnapshot = topologyClient.currentSnapshotApproximation + _ <- EitherT.fromOptionF( + fopt = topologySnapshot.synchronizerUpgradeOngoing(), + ifNone = TopologyManagerError.NoOngoingSynchronizerUpgrade.Failure(): RpcError, + ) + } yield { + synchronizerTopologyStore.protocolVersion -> synchronizerTopologyStore + .findEssentialStateAtSequencedTime( + SequencedTime(topologySnapshot.timestamp), + includeRejected = false, ) - nonLogicalUpgradeMappings = topologySnapshot.filter { stored => + .filter { stored => val isNonLSU = !TopologyMapping.Code.logicalSynchronizerUpgradeMappings.contains(stored.mapping.code) val isFullyAuthorizedOrNotExpiredProposal = @@ -864,11 +949,8 @@ class GrpcTopologyManagerReadService( isNonLSU && isFullyAuthorizedOrNotExpiredProposal } - - } yield { - nonLogicalUpgradeMappings.toByteString(ProtocolVersion.latest).writeTo(out) - } - CantonGrpcUtil.mapErrNewEUS(res) + } + CantonGrpcUtil.mapErrNewEUS(sourceEUS) } override def listSynchronizerUpgradeAnnouncement( diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala index 5ab3db6df3..1ec9486805 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala @@ -9,9 +9,11 @@ import cats.syntax.either.* import cats.syntax.parallel.* import cats.syntax.traverse.* import com.digitalasset.base.error.RpcError -import com.digitalasset.canton.ProtoDeserializationError -import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, ProtoDeserializationFailure} -import com.digitalasset.canton.config.NonNegativeFiniteDuration +import com.digitalasset.canton.ProtoDeserializationError.{ + FieldNotSet, + ProtoDeserializationFailure, + ValueConversionError, +} import com.digitalasset.canton.crypto.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} @@ -23,13 +25,18 @@ import com.digitalasset.canton.topology.admin.v30 import com.digitalasset.canton.topology.admin.v30.* import com.digitalasset.canton.topology.admin.v30.AuthorizeRequest.{Proposal, Type} import com.digitalasset.canton.topology.store.TopologyStoreId.TemporaryStore -import com.digitalasset.canton.topology.store.{StoredTopologyTransactions, TopologyStoreId} +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransaction, + StoredTopologyTransactions, + TopologyStoreId, +} import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, GrpcStreamingUtils} import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionValidation} +import com.digitalasset.canton.{ProtoDeserializationError, config} import com.google.protobuf.ByteString import com.google.protobuf.duration.Duration import io.grpc.stub.StreamObserver @@ -124,7 +131,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( waitToBecomeEffective - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) (op, serial, validatedMapping, signingKeys, forceChanges) = mapping @@ -200,7 +209,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( request.waitToBecomeEffective - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) _ <- addTransactions(signedTxs, request.store, forceChanges, waitToBecomeEffectiveO) @@ -242,7 +253,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( waitToBecomeEffectiveP - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) _ <- addTransactions(signedTxs, store, ForceFlags.all, waitToBecomeEffectiveO) @@ -250,11 +263,59 @@ class GrpcTopologyManagerWriteService( CantonGrpcUtil.mapErrNewEUS(res) } + override def importTopologySnapshotV2( + responseObserver: StreamObserver[ImportTopologySnapshotV2Response] + ): StreamObserver[ImportTopologySnapshotV2Request] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + GrpcStreamingUtils + .streamFromClient[ + ImportTopologySnapshotV2Request, + ImportTopologySnapshotV2Response, + (Option[v30.StoreId], Option[Duration]), + ]( + _.topologySnapshot, + req => (req.store, req.waitToBecomeEffective), + { case (topologySnapshot, (waitToBecomeEffective, store)) => + doImportTopologySnapshotV2(topologySnapshot, store, waitToBecomeEffective) + }, + responseObserver, + ) + } + + private def doImportTopologySnapshotV2( + topologySnapshot: ByteString, + waitToBecomeEffectiveP: Option[Duration], + store: Option[v30.StoreId], + )(implicit traceContext: TraceContext): Future[ImportTopologySnapshotV2Response] = { + val res: EitherT[FutureUnlessShutdown, RpcError, ImportTopologySnapshotV2Response] = for { + storedTxs <- EitherT.fromEither[FutureUnlessShutdown]( + GrpcStreamingUtils + .parseDelimitedFromTrusted(topologySnapshot.newInput(), StoredTopologyTransaction) + .leftMap(err => + ProtoDeserializationFailure.Wrap( + ValueConversionError("topology_snapshot", err) + ): RpcError + ) + ) + signedTxs = storedTxs.map(_.transaction) + waitToBecomeEffectiveO <- EitherT + .fromEither[FutureUnlessShutdown]( + waitToBecomeEffectiveP + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + ) + _ <- addTransactions(signedTxs, store, ForceFlags.all, waitToBecomeEffectiveO) + } yield v30.ImportTopologySnapshotV2Response() + CantonGrpcUtil.mapErrNewEUS(res) + } + private def addTransactions( signedTxs: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], store: Option[v30.StoreId], forceChanges: ForceFlags, - waitToBecomeEffective: Option[NonNegativeFiniteDuration], + waitToBecomeEffective: Option[config.NonNegativeFiniteDuration], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = @@ -342,7 +403,6 @@ class GrpcTopologyManagerWriteService( manager.managerVersion.serialization, existingTransaction, ) - .mapK(FutureUnlessShutdown.outcomeK) .leftWiden[RpcError] } yield transaction.toByteString -> transaction.hash.hash.getCryptographicEvidence } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractAuthenticator.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractAuthenticator.scala deleted file mode 100644 index d53477d8d0..0000000000 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractAuthenticator.scala +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.implicits.toBifunctorOps -import com.digitalasset.canton.crypto.{HashOps, HmacOps} -import com.digitalasset.canton.protocol.* -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Versioned} -import com.digitalasset.daml.lf.value.Value.ContractId - -/** Contract authenticator that verifies that the payload of the contract is consistent with the - * contract id - */ -trait ContractAuthenticator { - - /** authenticates the contract based on the externally generated contract hash */ - def authenticate(contract: FatContractInstance, contractHash: LfHash): Either[String, Unit] - - /** Authenticates the contract payload and metadata (consisted of ledger create time, contract - * instance and authentication data) against the contract id. This is the legacy function as it - * does not support externally generated hashes. - */ - // TODO(#27344) - Future versions of contract hash will require a minimal type calculated by the engine - def legacyAuthenticate(contract: FatContractInstance): Either[String, Unit] - -} - -object ContractAuthenticator { - - def apply(cryptoOps: HashOps & HmacOps): ContractAuthenticator = - new ContractAuthenticatorImpl( - // This unicum generator is used for all synchronizers uniformly. This means that synchronizers cannot specify - // different unicum generator strategies (e.g., different hash functions). - new UnicumGenerator(cryptoOps) - ) - -} - -class ContractAuthenticatorImpl(unicumGenerator: UnicumGenerator) extends ContractAuthenticator { - - override def legacyAuthenticate(contract: FatContractInstance): Either[String, Unit] = - for { - idVersion <- CantonContractIdVersion.extractCantonContractIdVersion(contract.contractId) - idVersionV1 <- idVersion match { - case v: CantonContractIdV1Version => Right(v) - case other => Left(s"Unsupported contract authentication id version: $other") - } - contractHash = LegacyContractHash.tryFatContractHash( - contract, - idVersionV1.useUpgradeFriendlyHashing, - ) - result <- authenticate(contract, contractHash) - } yield result - - override def authenticate( - contract: FatContractInstance, - contractHash: LfHash, - ): Either[String, Unit] = { - val gk = contract.contractKeyWithMaintainers.map(Versioned(contract.version, _)) - for { - metadata <- ContractMetadata.create(contract.signatories, contract.stakeholders, gk) - contractIdVersion <- CantonContractIdVersion - .extractCantonContractIdVersion(contract.contractId) - authenticationData <- ContractAuthenticationData - .fromLfBytes(contractIdVersion, contract.authenticationData) - .leftMap(_.toString) - _ <- authenticate( - contract.contractId, - contractIdVersion, - authenticationData, - contract.createdAt, - metadata, - contractHash, - ) - } yield () - } - - private def authenticate( - contractId: LfContractId, - contractIdVersion: CantonContractIdVersion, - authenticationData: ContractAuthenticationData, - ledgerTime: CreationTime, - metadata: ContractMetadata, - contractHash: LfHash, - ): Either[String, Unit] = { - val ContractId.V1(_, cantonContractSuffix) = contractId match { - case cid: LfContractId.V1 => cid - case _ => sys.error("ContractId V2 are not supported") - } - val createdAt = ledgerTime match { - case x: CreationTime.CreatedAt => x - case CreationTime.Now => - sys.error("Cannot authenticate contract with creation time Now") - } - contractIdVersion match { - case contractIdVersion: CantonContractIdV1Version => - for { - salt <- authenticationData match { - case ContractAuthenticationDataV1(salt) => Right(salt) - case _: ContractAuthenticationDataV2 => - Left("Cannot authenticate contract with V1 contract ID via a V2 authentication data") - } - recomputedUnicum <- unicumGenerator.recomputeUnicum( - salt, - createdAt, - metadata, - contractHash, - ) - recomputedSuffix = recomputedUnicum - .toContractIdSuffix(contractIdVersion) - _ <- Either.cond( - recomputedSuffix == cantonContractSuffix, - (), - s"Mismatching contract id suffixes. Expected: $recomputedSuffix vs actual: $cantonContractSuffix", - ) - } yield () - case _ => - // TODO(#23971) implement this for V2 - Left(s"Unsupported contract ID version $contractIdVersion") - } - } - -} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractHasher.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractHasher.scala new file mode 100644 index 0000000000..f4ff6bae11 --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractHasher.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.EitherT +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.LfNodeCreate +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.PackageConsumer.{ContinueOnInterruption, PackageResolver} +import com.digitalasset.daml.lf.crypto.Hash +import com.digitalasset.daml.lf.engine.Engine + +import scala.concurrent.ExecutionContext + +trait ContractHasher { + + def hash( + create: LfNodeCreate, + hashingMethod: Hash.HashingMethod, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, String, Hash] + +} + +object ContractHasher { + + def apply(engine: Engine, packageResolver: PackageResolver): ContractHasher = + new Impl(engine, packageResolver) + + private class Impl( + delegate: Engine, + packageResolver: PackageResolver, + continueOnInterruption: ContinueOnInterruption = () => true, + ) extends PackageConsumer(packageResolver, continueOnInterruption) + with ContractHasher { + override def hash( + create: LfNodeCreate, + hashingMethod: Hash.HashingMethod, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, String, Hash] = + consume(delegate.hashCreateNode(create, identity, hashingMethod)) + } + +} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractValidator.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractValidator.scala index 319db08f84..96abc7a88c 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractValidator.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/util/ContractValidator.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.util import cats.data.EitherT import cats.implicits.toBifunctorOps +import com.daml.logging.LoggingContext import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.crypto.{HashOps, HmacOps} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -32,6 +33,7 @@ trait ContractValidator { )(implicit ec: ExecutionContext, traceContext: TraceContext, + loggingContext: LoggingContext, ): EitherT[FutureUnlessShutdown, String, Unit] /** Authenticate the contract hash by recomputing the contract id suffix and checking it the one @@ -48,13 +50,13 @@ object ContractValidator { engine: Engine, packageResolver: PackageResolver, ): ContractValidator = - new ContractValidatorImpl( + new Impl( new UnicumGenerator(cryptoOps), LfContractValidation(engine, packageResolver), ) // TODO(#23971) add support for V2 contract ids - private class ContractValidatorImpl( + private class Impl( unicumGenerator: UnicumGenerator, lfContractValidation: LfContractValidation, ) extends ContractValidator { @@ -62,6 +64,7 @@ object ContractValidator { def authenticate(contract: FatContractInstance, targetPackageId: LfPackageId)(implicit ec: ExecutionContext, traceContext: TraceContext, + loggingContext: LoggingContext, ): EitherT[FutureUnlessShutdown, String, Unit] = for { contractIdVersion <- EitherT.fromEither[FutureUnlessShutdown]( @@ -70,6 +73,7 @@ object ContractValidator { result <- lfContractValidation.validate( contract, targetPackageId, + identity, contractIdVersion.contractHashingMethod, hash => authenticateHashInternal(contract, hash, contractIdVersion).isRight, ) @@ -129,4 +133,21 @@ object ContractValidator { } yield () } } + + object AllowAll extends ContractValidator { + override def authenticate( + contract: FatContractInstance, + targetPackageId: Ref.PackageId, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + loggingContext: LoggingContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = + EitherT.pure(()) + + override def authenticateHash( + contract: FatContractInstance, + contractHash: LfHash, + ): Either[String, Unit] = Right(()) + } } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/GrpcStreamingUtils.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/GrpcStreamingUtils.scala index c80a786bda..64d1f67ccb 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/util/GrpcStreamingUtils.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/util/GrpcStreamingUtils.scala @@ -162,8 +162,7 @@ object GrpcStreamingUtils { * [[scalapb.GeneratedMessage#writeDelimitedTo]] directly. * * @return - * either an error, or a list of versioned message instances in reverse order as appeared in - * the given stream + * either an error, or a list of versioned message instances */ def parseDelimitedFromTrusted[ValueClass <: HasRepresentativeProtocolVersion]( stream: InputStream, @@ -180,8 +179,7 @@ object GrpcStreamingUtils { * [[scalapb.GeneratedMessage#writeDelimitedTo]] directly. * * @return - * either an error, or a list of versioned message instances in reverse order as appeared in - * the given stream + * either an error, or a list of versioned message instances */ def parseDelimitedFromTrusted[ValueClass]( stream: InputStream, @@ -206,7 +204,7 @@ object GrpcStreamingUtils { read(value :: acc) } case None => - Right(acc) + Right(acc.reverse) } read(Nil) } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/LegacyContractHash.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/LegacyContractHash.scala deleted file mode 100644 index 806293ad4f..0000000000 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/util/LegacyContractHash.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.implicits.toBifunctorOps -import com.digitalasset.canton.protocol.{CantonContractIdV1Version, CantonContractIdVersion, LfHash} -import com.digitalasset.daml.lf.transaction.FatContractInstance -import com.digitalasset.daml.lf.value.Value.ThinContractInstance - -import scala.util.Try - -/** This class is used in the places where the hash has not been provided by the engine - */ -// TODO(#27344) - Future versions of contract hash will require a minimal type calculated by the engine -object LegacyContractHash { - - def tryThinContractHash( - contractInstance: ThinContractInstance, - upgradeFriendly: Boolean, - ): LfHash = - LfHash.assertHashContractInstance( - contractInstance.template, - contractInstance.arg, - contractInstance.packageName, - upgradeFriendly = upgradeFriendly, - ) - - def tryFatContractHash(contractInstance: FatContractInstance, upgradeFriendly: Boolean): LfHash = - LfHash.assertHashContractInstance( - contractInstance.templateId, - contractInstance.createArg, - contractInstance.packageName, - upgradeFriendly = upgradeFriendly, - ) - - def fatContractHash(contractInstance: FatContractInstance): Either[String, LfHash] = - for { - idVersion <- CantonContractIdVersion - .extractCantonContractIdVersion(contractInstance.contractId) - idVersionV1 <- idVersion match { - case v: CantonContractIdV1Version => Right(v) - case other => Left(s"Unsupported contract authentication id version: $other") - } - result <- Try( - tryFatContractHash(contractInstance, idVersionV1.useUpgradeFriendlyHashing) - ).toEither.leftMap { e => - s"Failed to compute contract hash for contract id ${contractInstance.contractId}: $e" - } - } yield result - -} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/util/LfContractValidation.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/util/LfContractValidation.scala index d338b74252..5a8eaf5100 100644 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/util/LfContractValidation.scala +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/util/LfContractValidation.scala @@ -4,41 +4,31 @@ package com.digitalasset.canton.util import cats.data.EitherT +import com.daml.logging.LoggingContext import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.LfNodeCreate import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.PackageConsumer.{ContinueOnInterruption, PackageResolver} import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.data.Bytes import com.digitalasset.daml.lf.data.Ref.PackageId -import com.digitalasset.daml.lf.engine.{ContractValidation, Engine} -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} +import com.digitalasset.daml.lf.engine.Engine +import com.digitalasset.daml.lf.transaction.FatContractInstance +import com.digitalasset.daml.lf.value.Value.ContractId import scala.concurrent.ExecutionContext -trait LfContractHasher { - - def hash( - create: LfNodeCreate, - hashingMethod: Hash.HashingMethod, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, String, Hash] - -} - -trait LfContractValidation extends LfContractHasher { +trait LfContractValidation { def validate( instance: FatContractInstance, targetPackageId: LfPackageId, + contractIdSubstitution: ContractId => ContractId, hashingMethod: Hash.HashingMethod, idValidator: Hash => Boolean, )(implicit ec: ExecutionContext, traceContext: TraceContext, + loggingContext: LoggingContext, ): EitherT[FutureUnlessShutdown, String, Unit] } @@ -46,10 +36,10 @@ trait LfContractValidation extends LfContractHasher { object LfContractValidation { def apply(engine: Engine, packageResolver: PackageResolver): LfContractValidation = - new LfContractValidationImpl(ContractValidation(engine), packageResolver) + new Impl(engine, packageResolver) - private class LfContractValidationImpl( - delegate: ContractValidation, + private class Impl( + delegate: Engine, packageResolver: PackageResolver, continueOnInterruption: ContinueOnInterruption = () => true, ) extends PackageConsumer(packageResolver, continueOnInterruption) @@ -58,28 +48,24 @@ object LfContractValidation { override def validate( instance: FatContractInstance, targetPackageId: PackageId, + contractIdSubstitution: ContractId => ContractId, hashingMethod: Hash.HashingMethod, idValidator: Hash => Boolean, )(implicit ec: ExecutionContext, traceContext: TraceContext, + loggingContext: LoggingContext, ): EitherT[FutureUnlessShutdown, String, Unit] = consume( - delegate.validate(instance, targetPackageId, hashingMethod, idValidator = idValidator) - ).subflatMap(identity) - - override def hash( - create: LfNodeCreate, - hashingMethod: Hash.HashingMethod, - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, String, Hash] = { + delegate.validateContractInstance( + instance, + targetPackageId, + contractIdSubstitution, + hashingMethod, + idValidator = idValidator, + ) + ).subflatMap(e => e.left.map(_.toString)) - // TODO(#23876) - provide method that takes a create node - val contract = FatContractInstance.fromCreateNode(create, CreationTime.Now, Bytes.Empty) - consume(delegate.hash(contract, create.templateId.packageId, hashingMethod)) - } } } diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersions.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersions.scala deleted file mode 100644 index 91e3d4a8b6..0000000000 --- a/canton/community/common/src/main/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersions.scala +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.protocol.LfLanguageVersion - -import scala.collection.immutable.SortedMap -import scala.math.Ordered.orderingToOrdered - -object DamlLfVersionToProtocolVersions { - - /** This Map links the Daml Lf-version to the minimum protocol version that supports it. */ - val damlLfVersionToMinimumProtocolVersions: SortedMap[LfLanguageVersion, ProtocolVersion] = - SortedMap( - LfLanguageVersion.v2_1 -> ProtocolVersion.v34, - LfLanguageVersion.v2_dev -> ProtocolVersion.dev, - ) - - def getMinimumSupportedProtocolVersion( - transactionVersion: LfLanguageVersion - ): ProtocolVersion = { - assert( - transactionVersion >= LfLanguageVersion.v2_1, - s"Canton only supports transaction versions more recent or equal to ${LfLanguageVersion.v2_1}", - ) - damlLfVersionToMinimumProtocolVersions(transactionVersion) - } - -} diff --git a/canton/community/common/src/main/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersions.scala b/canton/community/common/src/main/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersions.scala new file mode 100644 index 0000000000..c4b3880669 --- /dev/null +++ b/canton/community/common/src/main/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersions.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.digitalasset.canton.protocol.LfSerializationVersion + +import scala.collection.immutable.SortedMap +import scala.math.Ordered.orderingToOrdered + +object LfSerializationVersionToProtocolVersions { + + /** This Map links the Daml Lf-version to the minimum protocol version that supports it. */ + val lfSerializationVersionToMinimumProtocolVersions + : SortedMap[LfSerializationVersion, ProtocolVersion] = + SortedMap( + LfSerializationVersion.V1 -> ProtocolVersion.v34, + LfSerializationVersion.VDev -> ProtocolVersion.dev, + ) + + def getMinimumSupportedProtocolVersion( + serializationVersion: LfSerializationVersion + ): ProtocolVersion = { + assert( + serializationVersion >= LfSerializationVersion.V1, + s"Canton only supports LF serialization versions more recent or equal to ${LfSerializationVersion.V1}", + ) + lfSerializationVersionToMinimumProtocolVersions(serializationVersion) + } + +} diff --git a/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.AnotherTestDriverFactory b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.AnotherTestDriverFactory new file mode 100644 index 0000000000..6114834ba5 --- /dev/null +++ b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.AnotherTestDriverFactory @@ -0,0 +1 @@ +com.digitalasset.canton.driver.AnotherTestDriver1Factory diff --git a/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.TestDriverFactory b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.TestDriverFactory new file mode 100644 index 0000000000..5958f54eeb --- /dev/null +++ b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.TestDriverFactory @@ -0,0 +1,3 @@ +com.digitalasset.canton.driver.v1.TestDriver1Factory +com.digitalasset.canton.driver.v1.TestDriver2Factory +com.digitalasset.canton.driver.v2.TestDriver1Factory diff --git a/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v1.TestDriverFactory b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v1.TestDriverFactory new file mode 100644 index 0000000000..5955296fe0 --- /dev/null +++ b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v1.TestDriverFactory @@ -0,0 +1,2 @@ +com.digitalasset.canton.driver.v1.TestDriver1Factory +com.digitalasset.canton.driver.v1.TestDriver2Factory diff --git a/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v2.TestDriverFactory b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v2.TestDriverFactory new file mode 100644 index 0000000000..acb0cd0533 --- /dev/null +++ b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v2.TestDriverFactory @@ -0,0 +1 @@ +com.digitalasset.canton.driver.v2.TestDriver1Factory diff --git a/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v3.TestDriverFactory b/canton/community/common/src/test/resources/META-INF/services/com.digitalasset.canton.driver.v3.TestDriverFactory new file mode 100644 index 0000000000..e69de29bb2 diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/DefaultDamlValues.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/DefaultDamlValues.scala index 1ded9f1b07..fc79429405 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/DefaultDamlValues.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/DefaultDamlValues.scala @@ -8,7 +8,7 @@ import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration import com.digitalasset.canton.protocol.{ LfCommittedTransaction, LfHash, - LfLanguageVersion, + LfSerializationVersion, LfTransaction, LfVersionedTransaction, } @@ -45,7 +45,7 @@ object DefaultDamlValues { lazy val emptyTransaction: LfTransaction = LfTransaction(nodes = Map.empty, roots = ImmArray.empty) lazy val emptyVersionedTransaction: LfVersionedTransaction = - LfVersionedTransaction(LfLanguageVersion.v2_dev, Map.empty, ImmArray.empty) + LfVersionedTransaction(LfSerializationVersion.VDev, Map.empty, ImmArray.empty) lazy val emptyCommittedTransaction: LfCommittedTransaction = LfCommittedTransaction.subst[Id](emptyVersionedTransaction) } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/GeneratorsLf.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/GeneratorsLf.scala index 11a1d6fb82..7237091420 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/GeneratorsLf.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/GeneratorsLf.scala @@ -8,8 +8,6 @@ import com.digitalasset.canton.crypto.TestHash import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.protocol.ContractIdAbsolutizer.ContractIdAbsolutizationDataV2 import com.digitalasset.canton.protocol.{ - AuthenticatedContractIdVersionV10, - AuthenticatedContractIdVersionV11, CantonContractIdV1Version, CantonContractIdV2Version, CantonContractIdV2Version0, @@ -22,8 +20,9 @@ import com.digitalasset.canton.protocol.{ LfLanguageVersion, LfTemplateId, RelativeContractIdSuffixV2, - TransactionId, Unicum, + UpdateId, + *, } import com.digitalasset.canton.topology.{GeneratorsTopology, PartyId} import com.digitalasset.daml.lf.data.Bytes @@ -62,14 +61,13 @@ final class GeneratorsLf(val generatorsTopology: GeneratorsTopology) { locally { // If this pattern match is not exhaustive anymore, update the generators for CantonContractIdVersions below (_: CantonContractIdVersion) match { - case AuthenticatedContractIdVersionV10 => () - case AuthenticatedContractIdVersionV11 => () + case _: CantonContractIdV1Version => () case CantonContractIdV2Version0 => () } } implicit val cantonContractIdV1VersionArb: Arbitrary[CantonContractIdV1Version] = - Arbitrary(Gen.oneOf(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) + Arbitrary(Gen.oneOf(CantonContractIdVersion.allV1)) implicit val cantonContractIdV2VersionArb: Arbitrary[CantonContractIdV2Version] = Arbitrary(Gen.const(CantonContractIdV2Version0)) @@ -128,7 +126,7 @@ final class GeneratorsLf(val generatorsTopology: GeneratorsTopology) { index <- Gen.posNum[Int] ledgerTime <- Arbitrary.arbitrary[LfTimestamp] } yield { - val creatingTransactionId = TransactionId(TestHash.digest(index)) + val creatingTransactionId = UpdateId(TestHash.digest(index)) val absolutizationData = ContractIdAbsolutizationDataV2(creatingTransactionId, CantonTimestamp(ledgerTime)) val (_, absoluteCid) = ContractIdAbsolutizer diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala index db11153b06..9b6fbbb5b7 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala @@ -11,8 +11,7 @@ trait NeedsNewLfContractIds { val hasher: () => LfHash = LfHash.secureRandom(LfHash.hashPrivateKey(loggerFactory.name)) - def newLfContractId(): LfContractId = LfContractId.V1(hasher(), hasher().bytes) - - def newLfContractIdUnsuffixed(): LfContractId = LfContractId.V1(hasher()) + def newLfContractId(): LfContractId.V1 = LfContractId.V1(hasher(), hasher().bytes) + def newLfContractIdUnsuffixed(): LfContractId.V1 = LfContractId.V1(hasher()) } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala index 4c3da38da0..4a0d790ac7 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala @@ -17,6 +17,7 @@ import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnection, + SequencerConnectionPoolDelays, SequencerConnectionValidation, SubmissionRequestAmplification, } @@ -57,7 +58,7 @@ class SequencerInfoLoaderTest extends BaseTestWordSpec with HasExecutionContext private lazy val endpoint1 = Endpoint("localhost", Port.tryCreate(1001)) private lazy val endpoint2 = Endpoint("localhost", Port.tryCreate(1002)) private lazy val endpoint3 = Endpoint("localhost", Port.tryCreate(1003)) - private lazy val staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParametersWith() + private lazy val staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParameters private lazy val synchronizerAlias = SynchronizerAlias.tryCreate("synchronizer1") private def mapArgs( @@ -308,6 +309,7 @@ class SequencerInfoLoaderTest extends BaseTestWordSpec with HasExecutionContext sequencerTrustThreshold = PositiveInt.tryCreate(2), sequencerLivenessMargin = NonNegativeInt.zero, SubmissionRequestAmplification.NoAmplification, + SequencerConnectionPoolDelays.default, SequencerConnectionValidation.All, None, )(mapArgs(args)) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/DriverKmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/DriverKmsTest.scala new file mode 100644 index 0000000000..f0f618e2f3 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/DriverKmsTest.scala @@ -0,0 +1,208 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms + +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.config.KmsConfig.ExponentialBackoffConfig +import com.digitalasset.canton.crypto.kms.driver.api.v1.{ + EncryptionAlgoSpec, + EncryptionKeySpec, + KmsDriver, + KmsDriverException, + KmsDriverHealth, + PublicKey, + SigningAlgoSpec, + SigningKeySpec, +} +import com.digitalasset.canton.crypto.kms.driver.v1.DriverKms +import com.digitalasset.canton.time.PositiveFiniteDuration +import com.digitalasset.canton.util.ByteString4096 +import com.digitalasset.canton.{BaseTest, HasExecutionContext, config, crypto} +import com.typesafe.config.ConfigValueFactory +import io.opentelemetry.context.Context +import org.scalatest.wordspec.AsyncWordSpec + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +/** This test covers the non-functional properties when a KMS driver is misbehaving. + * + * Functionally the [[DriverKms]] is tested via the + * [[com.digitalasset.canton.nightly.ExternalKmsTest]]. + */ +class DriverKmsTest extends AsyncWordSpec with BaseTest with HasExecutionContext { + + private lazy val driver = new FlakyKmsDriver() + + private lazy val kms = new DriverKms( + KmsConfig.Driver( + "flaky-kms", + ConfigValueFactory.fromAnyRef(42), + retries = KmsConfig.RetryConfig(failures = + ExponentialBackoffConfig( + initialDelay = config.NonNegativeFiniteDuration.ofMillis(10), + maxDelay = config.NonNegativeDuration.ofSeconds(1), + maxRetries = 1, + ) + ), + ), + driver, + FutureSupervisor.Noop, + executorService, + PositiveFiniteDuration.tryOfSeconds(5), + wallClock, + timeouts, + loggerFactory, + directExecutionContext, + ) + + private lazy val testKeyId = KmsKeyId.tryCreate("test-key") + + "DriverKms" must { + + "handle retryable exception" in { + + val err = loggerFactory.assertLogs( + kms + .sign( + testKeyId, + ByteString4096.empty, + crypto.SigningAlgorithmSpec.EcDsaSha256, + crypto.SigningKeySpec.EcP256, + ) + .leftOrFailShutdown("sign") + .futureValue, + _.warningMessage should include( + s"KMS operation `signing with key $testKeyId` failed: KmsSignError" + ), + ) + + err shouldBe a[KmsError.KmsSignError] + err.retryable shouldBe true + } + + "handle non-retryable exception" in { + + val err = + loggerFactory.assertLogs( + kms + .generateSigningKeyPair(crypto.SigningKeySpec.EcP256) + .leftOrFailShutdown("generate signing keypair") + .futureValue, + _.warningMessage should include( + "KMS operation `generate signing key pair with name N/A` failed: KmsCreateKeyError" + ), + ) + + err shouldBe a[KmsError.KmsCreateKeyError] + err.retryable shouldBe false + } + + "handle unexpected exception inside a future" in { + loggerFactory.assertLogs( + assertThrows[RuntimeException]( + kms.getPublicSigningKey(testKeyId).failOnShutdown.futureValue + ), + _.warningMessage should include( + s"KMS operation `get signing public key for $testKeyId` failed" + ), + ) + } + + "handle unexpected exception outside of a future" in { + loggerFactory.assertLogs( + assertThrows[RuntimeException]( + kms.generateSymmetricEncryptionKey().failOnShutdown.futureValue + ), + _.warningMessage should include( + "KMS operation `generate symmetric encryption key with name N/A` failed" + ), + ) + } + + "eventually report unhealthy when the driver becomes unhealthy" in { + driver.healthState.set(KmsDriverHealth.Failed("test")) + + eventually() { + kms.isFailed shouldBe true + } + } + } + +} + +private class FlakyKmsDriver()(implicit ec: ExecutionContext) extends KmsDriver { + + val healthState: AtomicReference[KmsDriverHealth] = new AtomicReference(KmsDriverHealth.Ok) + + override def health: Future[KmsDriverHealth] = Future.successful(healthState.get) + + override def supportedSigningKeySpecs: Set[SigningKeySpec] = ??? + + override def supportedSigningAlgoSpecs: Set[SigningAlgoSpec] = ??? + + override def supportedEncryptionKeySpecs: Set[EncryptionKeySpec] = ??? + + override def supportedEncryptionAlgoSpecs: Set[EncryptionAlgoSpec] = ??? + + override def sign(data: Array[Byte], keyId: String, algoSpec: SigningAlgoSpec)( + traceContext: Context + ): Future[Array[Byte]] = + Future { + // Simulate a retryable error + throw KmsDriverException( + new RuntimeException("signing failed"), + retryable = true, + ) + } + + override def generateSigningKeyPair(signingKeySpec: SigningKeySpec, keyName: Option[String])( + traceContext: Context + ): Future[String] = + Future { + // Simulate a non-retryable error + throw KmsDriverException( + new RuntimeException("signing keypair generation failed"), + retryable = false, + ) + } + + override def getPublicKey(keyId: String)(traceContext: Context): Future[PublicKey] = + Future { + // Simulate an unexpected exception, inside a future + throw new RuntimeException("get public key failed") + } + + override def generateSymmetricKey(keyName: Option[String])( + traceContext: Context + ): Future[String] = + // Simulate unexpected exception outside of future + throw new RuntimeException("generate symmetric key failed") + + override def generateEncryptionKeyPair( + encryptionKeySpec: EncryptionKeySpec, + keyName: Option[String], + )(traceContext: Context): Future[String] = ??? + + override def decryptAsymmetric( + ciphertext: Array[Byte], + keyId: String, + algoSpec: EncryptionAlgoSpec, + )(traceContext: Context): Future[Array[Byte]] = ??? + + override def encryptSymmetric(data: Array[Byte], keyId: String)( + traceContext: Context + ): Future[Array[Byte]] = ??? + + override def decryptSymmetric(ciphertext: Array[Byte], keyId: String)( + traceContext: Context + ): Future[Array[Byte]] = ??? + + override def keyExistsAndIsActive(keyId: String)(traceContext: Context): Future[Unit] = ??? + + override def deleteKey(keyId: String)(traceContext: Context): Future[Unit] = ??? + + override def close(): Unit = ??? +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/KmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/KmsTest.scala new file mode 100644 index 0000000000..d482892022 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/KmsTest.scala @@ -0,0 +1,496 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms + +import cats.syntax.either.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{ + CachingConfigs, + CryptoConfig, + CryptoProvider, + CryptoSchemeConfig, + EncryptionSchemeConfig, + SigningSchemeConfig, +} +import com.digitalasset.canton.crypto.kms.KmsError.{ + KmsDecryptError, + KmsDeleteKeyError, + KmsEncryptError, + KmsSignError, +} +import com.digitalasset.canton.crypto.provider.jce.JcePureCrypto +import com.digitalasset.canton.crypto.{ + CryptoPureApi, + EncryptionAlgorithmSpec, + EncryptionKeySpec, + EncryptionPublicKey, + Signature, + SignatureCheckError, + SignatureFormat, + SigningAlgorithmSpec, + SigningKeySpec, + SigningKeyUsage, + SigningPublicKey, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{ByteString190, ByteString256, ByteString4096} +import com.digitalasset.canton.version.HasToByteString +import com.google.protobuf.ByteString +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.scalatest.{Assertion, BeforeAndAfterAll, FutureOutcome} + +import java.util.Calendar +import scala.concurrent.ExecutionContext + +trait KmsTest extends BaseTest with BeforeAndAfterAll { + this: FixtureAsyncWordSpec => + type KmsType <: Kms + override type FixtureParam = Fixture + + case class Fixture(pureCrypto: CryptoPureApi, kms: KmsType) + + override def afterAll(): Unit = { + super.afterAll() + defaultKms.close() + } + + override def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val fixture = Fixture(pureCrypto, defaultKms) + + withFixture(test.toNoArgAsyncTest(fixture)) + } + + protected def defaultKmsConfig: KmsType#Config + + // Keep one KMS for all the tests + protected lazy val defaultKms: KmsType = newKms(defaultKmsConfig) + + protected def newKms(config: KmsType#Config): KmsType + + val parallelExecutionContext: ExecutionContext = executionContext + + lazy val pureCrypto: CryptoPureApi = JcePureCrypto + .create( + CryptoConfig( + provider = CryptoProvider.Jce, + signing = SigningSchemeConfig( + algorithms = CryptoSchemeConfig(Some(SigningAlgorithmSpec.EcDsaSha256)), + keys = CryptoSchemeConfig(Some(SigningKeySpec.EcP256)), + ), + encryption = EncryptionSchemeConfig( + algorithms = CryptoSchemeConfig(Some(EncryptionAlgorithmSpec.RsaOaepSha256)), + keys = CryptoSchemeConfig(Some(EncryptionKeySpec.Rsa2048)), + ), + ), + CachingConfigs.defaultSessionEncryptionKeyCacheConfig, + CachingConfigs.defaultPublicKeyConversionCache, + loggerFactory, + ) + .valueOrFail("create crypto with JCE provider") + + case class KmsSigningKey( + keyId: KmsKeyId, + signingKeySpec: SigningKeySpec, + signingAlgorithmSpec: SigningAlgorithmSpec, + signatureFormat: SignatureFormat, + ) + case class KmsAsymmetricEncryptionKey private ( + keyId: KmsKeyId, + encryptionKeySpec: EncryptionKeySpec, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + ) + + object KmsAsymmetricEncryptionKey { + def create( + keyId: KmsKeyId, + encryptionKeySpec: EncryptionKeySpec, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + ): KmsAsymmetricEncryptionKey = { + require(encryptionAlgorithmSpec.supportedEncryptionKeySpecs.contains(encryptionKeySpec)) + new KmsAsymmetricEncryptionKey(keyId, encryptionKeySpec, encryptionAlgorithmSpec) + } + } + + def kmsSymmetricEncryptionKeyId: FutureUnlessShutdown[KmsKeyId] + def kmsAsymmetricEncryptionKey: FutureUnlessShutdown[KmsAsymmetricEncryptionKey] + def kmsSigningKey: FutureUnlessShutdown[KmsSigningKey] + // signing key used to verify a signature generated from another key + def kmsAnotherSigningKey: FutureUnlessShutdown[KmsSigningKey] + + def convertToSigningPublicKey( + kmsSpk: KmsSigningPublicKey, + usage: NonEmpty[Set[SigningKeyUsage]], + ): SigningPublicKey = + kmsSpk + .convertToSigningPublicKey(usage) + .valueOrFail("convert public signing key") + + def convertToEncryptionPublicKey(kmsEpk: KmsEncryptionPublicKey): EncryptionPublicKey = + kmsEpk.convertToEncryptionPublicKey + .valueOrFail("convert public encryption key") + + lazy val plainTextData = "this_is_the_plain_text_test_data" + lazy val dataToHandle: ByteString = ByteString.copyFrom(plainTextData.getBytes()) + + case class Message(bytes: ByteString) extends HasToByteString { + override def toByteString: ByteString = bytes + } + + // Data that is right up to the upper bound for sign and encrypt operations. + lazy val dataToHandle190: ByteString190 = + ByteString190.tryCreate(ByteString.copyFrom(("t" * 190).getBytes())) + lazy val dataToHandle4096: ByteString4096 = + ByteString4096.tryCreate(ByteString.copyFrom(("t" * 4096).getBytes())) + + lazy val kmsKeyIdWrong: KmsKeyId = KmsKeyId(String300.tryCreate("key_wrong_id")) + + def signVerifyTest( + pureCrypto: CryptoPureApi, + kms: KmsType, + signingKey: KmsSigningKey, + tc: TraceContext = implicitly[TraceContext], + ): FutureUnlessShutdown[Assertion] = + for { + signatureRaw <- kms + .sign( + signingKey.keyId, + ByteString4096.tryCreate(dataToHandle), + signingKey.signingAlgorithmSpec, + signingKey.signingKeySpec, + )(executionContext, tc) + .valueOrFail("sign data") + signatureBoundRaw <- kms + .sign( + signingKey.keyId, + dataToHandle4096, + signingKey.signingAlgorithmSpec, + signingKey.signingKeySpec, + )(executionContext, tc) + .valueOrFail("sign data (right to the upper bound)") + pubKeyKms <- kms + .getPublicSigningKey(signingKey.keyId)(executionContext, tc) + .valueOrFail("get public signing key from KMS key id") + pubKey = convertToSigningPublicKey(pubKeyKms, SigningKeyUsage.ProtocolOnly) + _ <- pureCrypto + .verifySignature( + dataToHandle, + pubKey, + Signature.create( + signingKey.signatureFormat, + signatureRaw, + pubKey.id, + Some(signingKey.signingAlgorithmSpec), + ), + SigningKeyUsage.ProtocolOnly, + ) + .toEitherT[FutureUnlessShutdown] + .valueOrFail("verify signature") + _ <- pureCrypto + .verifySignature( + dataToHandle4096.unwrap, + pubKey, + Signature.create( + signingKey.signatureFormat, + signatureBoundRaw, + pubKey.id, + Some(signingKey.signingAlgorithmSpec), + ), + SigningKeyUsage.ProtocolOnly, + ) + .toEitherT[FutureUnlessShutdown] + .valueOrFail("verify signature for bounded data") + } yield succeed + + def encryptDecryptSymmetricTest( + kms: KmsType, + keyId: KmsKeyId, + tc: TraceContext = implicitly[TraceContext], + ): FutureUnlessShutdown[Assertion] = + for { + encryptedData <- kms + .encryptSymmetric(keyId, ByteString4096.tryCreate(dataToHandle))(executionContext, tc) + .valueOrFail("encrypt data with symmetric key") + encryptedDataBound <- kms + .encryptSymmetric(keyId, dataToHandle4096)(executionContext, tc) + .valueOrFail("encrypt data (right to the upper bound) with symmetric key") + decryptedData <- kms + .decryptSymmetric(keyId, encryptedData)(executionContext, tc) + .valueOrFail("decrypt data with symmetric key") + decryptedDataBound <- kms + .decryptSymmetric(keyId, encryptedDataBound)(executionContext, tc) + .valueOrFail("decrypt data (right to the upper bound) with symmetric key") + } yield { + encryptedData shouldNot be(dataToHandle) + encryptedDataBound shouldNot be(dataToHandle4096) + decryptedData should be(dataToHandle) + decryptedDataBound should be(dataToHandle4096) + } + + def encryptDecryptAsymmetricTest( + pureCrypto: CryptoPureApi, + kms: KmsType, + keyId: KmsKeyId, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + ): FutureUnlessShutdown[Assertion] = + for { + publicKeyKms <- kms + .getPublicEncryptionKey(keyId) + .valueOrFail("get public key to encrypt") + publicKey = convertToEncryptionPublicKey(publicKeyKms) + encryptedData = pureCrypto + .encryptWith( + Message(dataToHandle), + publicKey, + encryptionAlgorithmSpec, + ) + .valueOrFail("encrypt data with public key") + encryptedDataBound = pureCrypto + .encryptWith( + Message(dataToHandle190.unwrap), + publicKey, + encryptionAlgorithmSpec, + ) + .valueOrFail("encrypt data (right to the upper bound) with public key") + encryptedDataCiphertext = ByteString256 + .create(encryptedData.ciphertext) + .valueOrFail("ciphertext does not conform with bound") + encryptedDataBoundCiphertext = ByteString256 + .create(encryptedDataBound.ciphertext) + .valueOrFail("ciphertext does not conform with bound") + decryptedData <- kms + .decryptAsymmetric(keyId, encryptedDataCiphertext, encryptionAlgorithmSpec) + .valueOrFail("decrypt data with private key") + decryptedDataBound <- kms + .decryptAsymmetric(keyId, encryptedDataBoundCiphertext, encryptionAlgorithmSpec) + .valueOrFail("decrypt data (right to the upper bound) with private key") + } yield { + encryptedData shouldNot be(dataToHandle) + encryptedDataBound shouldNot be(dataToHandle190) + decryptedData should be(dataToHandle) + decryptedDataBound should be(dataToHandle190) + } + + def kms(): Unit = { + + "create and delete keys" in { fixture => + /* we only run one of the key generations, based on the day of the week, to + prevent clogging our AWS instance with too many keys (each key takes effectively 7 days to be + completely removed). + */ + val dT = Calendar.getInstance() + val dow = dT.get(Calendar.DAY_OF_WEEK) + val generateKeyList = List[Kms => FutureUnlessShutdown[KmsKeyId]]( + kms => + kms + // if AWS KMS this defaults to multiRegion = false + .generateSymmetricEncryptionKey() + .valueOrFail("create KMS symmetric encryption key"), + kms => + kms + .generateAsymmetricEncryptionKeyPair(EncryptionKeySpec.Rsa2048) + .valueOrFail("create KMS asymmetric encryption key"), + kms => + kms + .generateSigningKeyPair(SigningKeySpec.EcP256) + .valueOrFail("create KMS signing key"), + ) + for { + keyId <- generateKeyList(dow % generateKeyList.size)(fixture.kms).failOnShutdown + _ <- fixture.kms + .keyExistsAndIsActive(keyId) + .valueOrFail("check key exists") + .thereafterF(_ => + fixture.kms + .deleteKey(keyId) + .valueOrFailShutdown("delete key") + ) + .failOnShutdown + keyExists <- fixture.kms.keyExistsAndIsActive(keyId).value.failOnShutdown + } yield keyExists.left.value should (be(a[KmsError.KmsKeyDisabledError]) or be( + a[KmsError.KmsCannotFindKeyError] + )) + } + + "get public key from a KMS key identifier" in { fixture => + for { + signingKey <- kmsSigningKey.failOnShutdown + encryptionKey <- kmsAsymmetricEncryptionKey.failOnShutdown + kmsSigningPubKey <- fixture.kms + .getPublicSigningKey(signingKey.keyId) + .valueOrFail("get public signing key from a KMS key id") + .failOnShutdown + kmsEncryptionPubKey <- fixture.kms + .getPublicEncryptionKey(encryptionKey.keyId) + .valueOrFail("get public encryption key from a KMS key id") + .failOnShutdown + } yield { + kmsSigningPubKey.key should not be empty + kmsEncryptionPubKey.key should not be empty + } + } + + "fail to get public key from a KMS key identifier for a symmetric key" in { fixture => + for { + keyId <- kmsSymmetricEncryptionKeyId.failOnShutdown + pubKey <- fixture.kms + .getPublicEncryptionKey(keyId) + .value + .failOnShutdown + } yield pubKey.left.value should be(a[KmsError.KmsGetPublicKeyError]) + } + + "encrypt and decrypt with pre-generated key" in { fixture => + for { + // symmetric encryption + symmetricKeyId <- kmsSymmetricEncryptionKeyId.failOnShutdown + _ <- encryptDecryptSymmetricTest(fixture.kms, symmetricKeyId).failOnShutdown + + // asymmetric encryption + asymmetricKey <- kmsAsymmetricEncryptionKey.failOnShutdown + _ <- encryptDecryptAsymmetricTest( + fixture.pureCrypto, + fixture.kms, + asymmetricKey.keyId, + asymmetricKey.encryptionAlgorithmSpec, + ).failOnShutdown + } yield succeed + } + + lazy val kmsKeyIdWrong: KmsKeyId = KmsKeyId(String300.tryCreate("key_wrong_id")) + + "fail if we encrypt and decrypt with an invalid key" in { fixture => + for { + // symmetric encryption + symmetricKeyId <- kmsSymmetricEncryptionKeyId.failOnShutdown + encryptedDataSymmetricFailed <- + fixture.kms + .encryptSymmetric(kmsKeyIdWrong, ByteString4096.tryCreate(dataToHandle)) + .value + .failOnShutdown + encryptedDataSymmetric <- fixture.kms + .encryptSymmetric(symmetricKeyId, ByteString4096.tryCreate(dataToHandle)) + .valueOrFail("symmetrically encrypt data") + .failOnShutdown + decryptedDataSymmetricFailed <- fixture.kms + .decryptSymmetric(kmsKeyIdWrong, encryptedDataSymmetric) + .value + .failOnShutdown + + // asymmetric encryption + asymmetricKeyId <- kmsAsymmetricEncryptionKey.failOnShutdown + asymmetricKeyKms <- fixture.kms + .getPublicEncryptionKey(asymmetricKeyId.keyId) + .valueOrFail("get public key to encrypt") + .failOnShutdown + asymmetricKey = convertToEncryptionPublicKey(asymmetricKeyKms) + encryptedDataAsymmetric = + fixture.pureCrypto + .encryptWith(Message(dataToHandle), asymmetricKey) + .valueOrFail("asymmetrically encrypt data") + encryptedDataAsymmetricCiphertext = ByteString256 + .create(encryptedDataAsymmetric.ciphertext) + .valueOrFail("ciphertext does not conform with bound") + decryptedDataAsymmetricFailed <- fixture.kms + .decryptAsymmetric( + kmsKeyIdWrong, + encryptedDataAsymmetricCiphertext, + encryptedDataAsymmetric.encryptionAlgorithmSpec, + ) + .value + .failOnShutdown + } yield { + encryptedDataSymmetricFailed.left.value shouldBe a[KmsEncryptError] + decryptedDataSymmetricFailed.left.value shouldBe a[KmsDecryptError] + + decryptedDataAsymmetricFailed.left.value shouldBe a[KmsDecryptError] + } + } + + "sign and verify with pre-generated key" in { fixture => + for { + signingKey <- kmsSigningKey.failOnShutdown + _ <- signVerifyTest(fixture.pureCrypto, fixture.kms, signingKey).failOnShutdown + } yield succeed + } + + "fail if we sign with an invalid key" in { fixture => + for { + signFailed <- + fixture.kms + // the key does not exist so the scheme selected does not matter + .sign( + kmsKeyIdWrong, + ByteString4096.tryCreate(dataToHandle), + SigningAlgorithmSpec.EcDsaSha256, + SigningKeySpec.EcP256, + ) + .value + .failOnShutdown + } yield signFailed.left.value shouldBe a[KmsSignError] + } + + "fail if signature is invalid or we verify with the wrong key" in { fixture => + for { + signingKey <- kmsSigningKey.failOnShutdown + signingKeyWrong <- kmsAnotherSigningKey.failOnShutdown + signatureRaw <- fixture.kms + .sign( + signingKey.keyId, + ByteString4096.tryCreate(dataToHandle), + signingKey.signingAlgorithmSpec, + signingKey.signingKeySpec, + ) + .valueOrFail("sign data") + .failOnShutdown + pubKeyKms <- fixture.kms + .getPublicSigningKey(signingKey.keyId) + .valueOrFail("get public signing key from KMS key id") + .failOnShutdown + pubKey = convertToSigningPublicKey(pubKeyKms, SigningKeyUsage.ProtocolOnly) + signature = Signature.create( + signingKey.signatureFormat, + signatureRaw, + pubKey.id, + Some(signingKey.signingAlgorithmSpec), + ) + wrongPubKeyKms <- fixture.kms + .getPublicSigningKey(signingKeyWrong.keyId) + .valueOrFail("get public signing key from KMS key id") + .failOnShutdown + wrongPubKey = convertToSigningPublicKey(wrongPubKeyKms, SigningKeyUsage.ProtocolOnly) + wrongSignature = Signature.create( + signingKey.signatureFormat, + // add a 1 byte at the end to make the signature invalid + signatureRaw.concat(ByteString.copyFrom(Array[Byte](1.byteValue()))), + pubKey.id, + Some(signingKey.signingAlgorithmSpec), + ) + invalidSignature <- fixture.pureCrypto + .verifySignature(dataToHandle, pubKey, wrongSignature, SigningKeyUsage.ProtocolOnly) + .toEitherT[FutureUnlessShutdown] + .value + .failOnShutdown + verifyFailed <- fixture.pureCrypto + .verifySignature(dataToHandle, wrongPubKey, signature, SigningKeyUsage.ProtocolOnly) + .toEitherT[FutureUnlessShutdown] + .value + .failOnShutdown + } yield { + invalidSignature.left.value shouldBe a[SignatureCheckError] + verifyFailed.left.value shouldBe a[SignatureCheckError] + } + } + + "fail if deleting a non-existent key" in { fixture => + for { + deleteKeyFailed <- fixture.kms.deleteKey(kmsKeyIdWrong).value.failOnShutdown + } yield deleteKeyFailed.left.value shouldBe a[KmsDeleteKeyError] + } + + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKms.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKms.scala new file mode 100644 index 0000000000..3d6ea0ba78 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKms.scala @@ -0,0 +1,417 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms + +import cats.data.EitherT +import cats.syntax.bifunctor.* +import cats.syntax.either.* +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto +import com.digitalasset.canton.crypto.{ + AsymmetricEncrypted, + CryptoKey, + CryptoPrivateStoreApi, + Encrypted, + EncryptionAlgorithmSpec, + EncryptionKeyPair, + EncryptionKeySpec, + EncryptionPrivateKey, + EncryptionPublicKey, + KeyName, + SigningAlgorithmSpec, + SigningKeyPair, + SigningKeySpec, + SigningKeyUsage, + SigningPrivateKey, + SigningPublicKey, + SymmetricKey, +} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.serialization.DeserializationError +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.collection.TrieMapUtil +import com.digitalasset.canton.util.{ + ByteString190, + ByteString256, + ByteString4096, + ByteString6144, + EitherTUtil, +} +import com.digitalasset.canton.version.HasToByteString +import com.google.protobuf.ByteString + +import java.util.concurrent.atomic.AtomicInteger +import scala.collection.concurrent.TrieMap +import scala.concurrent.ExecutionContext + +class SymbolicKms( + private val crypto: SymbolicCrypto, + override val config: KmsConfig, + override val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +) extends Kms + with NamedLogging { + + override type Config = KmsConfig + + override def name: String = "symbolic-kms" + + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + + // store all private keys that belong in the KMS + private val storedPrivateKeyMap: TrieMap[KmsKeyId, CryptoKey] = TrieMap.empty + // store public signing keys + private val storedPublicSigningKeyMap: TrieMap[KmsKeyId, SigningPublicKey] = TrieMap.empty + // store public encryption keys + private val storedPublicEncryptionKeyMap: TrieMap[KmsKeyId, EncryptionPublicKey] = TrieMap.empty + + private val counter = new AtomicInteger(0) + + override protected def generateSigningKeyPairInternal( + signingKeySpec: SigningKeySpec, + name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keys <- crypto.privateCrypto match { + case api: CryptoPrivateStoreApi => + api + .generateSigningKeypair(signingKeySpec, SigningKeyUsage.ProtocolOnly)( + TraceContext.empty + ) + .leftMap[KmsError](err => KmsError.KmsCreateKeyError(err.show)) + case _ => + EitherT + .leftT[FutureUnlessShutdown, SigningKeyPair]( + KmsError.KmsCreateKeyRequestError( + "The selected crypto private store API does not allow exporting private keys" + ) + ) + .leftWiden[KmsError] + } + kmsKeyId = + KmsKeyId(String300.tryCreate(s"symbolic-kms-signing-key-${counter.getAndIncrement()}")) + _ = TrieMapUtil + .insertIfAbsent( + storedPrivateKeyMap, + kmsKeyId, + keys.privateKey, + () => + KmsError.KmsCreateKeyError( + "Duplicate symbolic KMS signing private key: " + kmsKeyId.toString + ), + ) + _ = TrieMapUtil + .insertIfAbsent( + storedPublicSigningKeyMap, + kmsKeyId, + keys.publicKey, + () => + KmsError.KmsCreateKeyError( + "Duplicate symbolic KMS signing public key: " + kmsKeyId.toString + ), + ) + } yield kmsKeyId + + override protected def generateSymmetricEncryptionKeyInternal( + name: Option[KeyName] + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + (for { + key <- crypto.pureCrypto + .generateSymmetricKey() + .leftMap[KmsError](err => KmsError.KmsCreateKeyError(err.show)) + kmsKeyId = + KmsKeyId( + String300.tryCreate( + s"symbolic-kms-symmetric-encryption-key-${counter.getAndIncrement()}" + ) + ) + _ = TrieMapUtil + .insertIfAbsent( + storedPrivateKeyMap, + kmsKeyId, + key, + () => + KmsError.KmsCreateKeyError( + "Duplicate symbolic KMS symmetric encryption key: " + kmsKeyId.toString + ), + ) + } yield kmsKeyId).toEitherT + + override protected def generateAsymmetricEncryptionKeyPairInternal( + encryptionKeySpec: EncryptionKeySpec, + name: Option[KeyName], + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsKeyId] = + for { + keys <- crypto.privateCrypto match { + case api: CryptoPrivateStoreApi => + api + .generateEncryptionKeypair(encryptionKeySpec)(TraceContext.empty) + .leftMap[KmsError](err => KmsError.KmsCreateKeyError(err.show)) + case _ => + EitherT + .leftT[FutureUnlessShutdown, EncryptionKeyPair]( + KmsError.KmsCreateKeyRequestError( + "The selected crypto private store API does not allow exporting private keys" + ) + ) + .leftWiden[KmsError] + } + kmsKeyId = + KmsKeyId( + String300.tryCreate( + s"symbolic-kms-asymmetric-encryption-key-${counter.getAndIncrement()}" + ) + ) + _ = TrieMapUtil + .insertIfAbsent( + storedPrivateKeyMap, + kmsKeyId, + keys.privateKey, + () => + KmsError.KmsCreateKeyError( + "Duplicate symbolic KMS encryption private key: " + kmsKeyId.toString + ), + ) + _ = TrieMapUtil + .insertIfAbsent( + storedPublicEncryptionKeyMap, + kmsKeyId, + keys.publicKey, + () => + KmsError.KmsCreateKeyError( + "Duplicate symbolic KMS encryption public key: " + kmsKeyId.toString + ), + ) + } yield kmsKeyId + + override protected def getPublicSigningKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsSigningPublicKey] = + for { + pubKey <- storedPublicSigningKeyMap + .get(keyId) + .toRight[KmsError]( + KmsError.KmsGetPublicKeyError(keyId, "public signing key does not exist") + ) + .toEitherT[FutureUnlessShutdown] + kmsPubKey <- EitherT.rightT[FutureUnlessShutdown, KmsError]( + KmsSigningPublicKey + .createSymbolic(pubKey.key, pubKey.keySpec) + ) + } yield kmsPubKey + + override protected def getPublicEncryptionKeyInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, KmsEncryptionPublicKey] = + for { + pubKey <- storedPublicEncryptionKeyMap + .get(keyId) + .toRight[KmsError]( + KmsError.KmsGetPublicKeyError(keyId, "public encryption key does not exist") + ) + .toEitherT[FutureUnlessShutdown] + kmsPubKey <- EitherT.rightT[FutureUnlessShutdown, KmsError]( + KmsEncryptionPublicKey + .createSymbolic(pubKey.key, pubKey.keySpec) + ) + } yield kmsPubKey + + override protected def keyExistsAndIsActiveInternal( + keyId: KmsKeyId + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = + EitherTUtil.condUnitET[FutureUnlessShutdown]( + storedPrivateKeyMap.contains(keyId), + KmsError.KmsCannotFindKeyError(keyId, "cannot find key in KMS store"), + ) + + override protected def encryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString4096, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString6144] = + storedPrivateKeyMap + .get(keyId) match { + case Some(key: SymmetricKey) => + crypto.pureCrypto + .encryptSymmetricWith(SymbolicKmsMessage(data.unwrap), key) + .toEitherT[FutureUnlessShutdown] + .transform { + case Left(err) => Left(KmsError.KmsEncryptError(keyId, err.show)) + case Right(enc) => + ByteString6144 + .create(enc.ciphertext) + .leftMap(err => + KmsError + .KmsEncryptError( + keyId, + s"generated ciphertext does not adhere to bound: $err)", + ) + ) + } + case None => + EitherT.leftT(KmsError.KmsEncryptError(keyId, "KMS key does not exists")) + case _ => + EitherT.leftT(KmsError.KmsEncryptError(keyId, "KMS key is not a symmetric key")) + } + + override protected def decryptSymmetricInternal( + keyId: KmsKeyId, + data: ByteString6144, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString4096] = + storedPrivateKeyMap + .get(keyId) match { + case Some(key: SymmetricKey) => + val encryptedData = Encrypted.fromByteString[SymbolicKmsMessage](data.unwrap) + for { + decryptedData <- crypto.pureCrypto + .decryptWith[SymbolicKmsMessage](encryptedData, key)( + SymbolicKmsMessage.fromByteString(_) + ) + .toEitherT[FutureUnlessShutdown] + .transform { + case Left(err) => Left(KmsError.KmsDecryptError(keyId, err.show)) + case Right(dec) => + ByteString4096 + .create(dec.bytes) + .leftMap[KmsError](err => + KmsError.KmsDecryptError( + keyId, + s"plaintext does not adhere to bound: $err)", + ) + ) + } + } yield decryptedData + case None => + EitherT.leftT(KmsError.KmsDecryptError(keyId, "KMS key does not exists")) + case _ => + EitherT.leftT(KmsError.KmsDecryptError(keyId, "KMS key is not a symmetric key")) + } + + override protected def decryptAsymmetricInternal( + keyId: KmsKeyId, + data: ByteString256, + encryptionAlgorithmSpec: EncryptionAlgorithmSpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString190] = + storedPrivateKeyMap + .get(keyId) match { + case Some(key: EncryptionPrivateKey) => + for { + decryptedData <- crypto.pureCrypto + .decryptWith[SymbolicKmsMessage]( + AsymmetricEncrypted(data.unwrap, encryptionAlgorithmSpec, key.id), + key, + )( + SymbolicKmsMessage.fromByteString(_) + ) + .toEitherT[FutureUnlessShutdown] + .transform { + case Left(err) => Left(KmsError.KmsDecryptError(keyId, err.show)) + case Right(dec) => + ByteString190 + .create(dec.bytes) + .leftMap[KmsError](err => + KmsError.KmsDecryptError( + keyId, + s"plaintext does not adhere to bound: $err)", + ) + ) + } + } yield decryptedData + case None => + EitherT.leftT(KmsError.KmsDecryptError(keyId, "KMS key does not exists")) + case _ => + EitherT.leftT( + KmsError.KmsDecryptError(keyId, "KMS key is not a private encryption key") + ) + } + + override protected def signInternal( + keyId: KmsKeyId, + data: ByteString4096, + signingAlgorithmSpec: SigningAlgorithmSpec, + signingKeySpec: SigningKeySpec, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, ByteString] = + storedPrivateKeyMap + .get(keyId) match { + case Some(key: SigningPrivateKey) => + // symbolic KMS is only used for testing so it's fine to use directly the usage coming from the signing key + crypto.pureCrypto + .signBytes(data.unwrap, key, key.usage) + .leftMap[KmsError](err => KmsError.KmsSignError(keyId, err.show)) + .map(_.unwrap) + .toEitherT[FutureUnlessShutdown] + case None => + EitherT.leftT(KmsError.KmsSignError(keyId, "KMS signing key does not exist")) + case _ => + EitherT.leftT(KmsError.KmsSignError(keyId, "KMS key is not a signing key")) + } + + override protected def deleteKeyInternal(keyId: KmsKeyId)(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, KmsError, Unit] = { + storedPublicSigningKeyMap.remove(keyId).discard + storedPublicEncryptionKeyMap.remove(keyId).discard + storedPrivateKeyMap.remove(keyId) match { + case Some(_) => EitherT.rightT[FutureUnlessShutdown, KmsError](()) + case None => + EitherT.leftT[FutureUnlessShutdown, Unit]( + KmsError.KmsDeleteKeyError(keyId, "invalid key id") + ) + } + } + + override protected def withRetries[T]( + description: String, + checkKeyCreation: Boolean, + )( + task: => EitherT[FutureUnlessShutdown, KmsError, T] + )(implicit ec: ExecutionContext, tc: TraceContext): EitherT[FutureUnlessShutdown, KmsError, T] = + task + + override def onClosed(): Unit = LifeCycle.close(crypto)(logger) + + private case class SymbolicKmsMessage(bytes: ByteString) extends HasToByteString { + override def toByteString: ByteString = bytes + } + + private object SymbolicKmsMessage { + def fromByteString(bytes: ByteString): Either[DeserializationError, SymbolicKmsMessage] = Right( + SymbolicKmsMessage(bytes) + ) + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKmsTest.scala new file mode 100644 index 0000000000..36e7c5ff8b --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/kms/SymbolicKmsTest.scala @@ -0,0 +1,119 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.kms + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.crypto.provider.symbolic.{SymbolicCrypto, SymbolicPureCrypto} +import com.digitalasset.canton.crypto.{ + CryptoPureApi, + EncryptionAlgorithmSpec, + EncryptionKeySpec, + EncryptionPublicKey, + SignatureFormat, + SigningAlgorithmSpec, + SigningKeySpec, + SigningKeyUsage, + SigningPublicKey, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.util.ByteString190 +import com.google.protobuf.ByteString +import org.scalatest.Assertion +import org.scalatest.wordspec.FixtureAsyncWordSpec + +class SymbolicKmsTest extends FixtureAsyncWordSpec with KmsTest { + override type KmsType = SymbolicKms + + // Use null for the config as there's no KmsConfig subclass for SymbolicKms which is a test only KMS implementation + @SuppressWarnings(Array("org.wartremover.warts.Null")) + override protected val defaultKmsConfig = null + + override protected def newKms(config: KmsType#Config) = + new SymbolicKms( + SymbolicCrypto.create( + testedReleaseProtocolVersion, + timeouts, + loggerFactory, + ), + config, + timeouts, + loggerFactory, + ) + + override lazy val kmsSymmetricEncryptionKeyId: FutureUnlessShutdown[KmsKeyId] = + for { + kmsKeyId <- defaultKms + .generateSymmetricEncryptionKey() + .valueOrFail("create KMS symmetric encryption key") + } yield kmsKeyId + + override lazy val kmsAsymmetricEncryptionKey: FutureUnlessShutdown[KmsAsymmetricEncryptionKey] = + for { + kmsKeyId <- defaultKms + .generateAsymmetricEncryptionKeyPair(EncryptionKeySpec.Rsa2048) + .valueOrFail("create KMS asymmetric encryption key") + } yield KmsAsymmetricEncryptionKey.create( + kmsKeyId, + EncryptionKeySpec.Rsa2048, + EncryptionAlgorithmSpec.RsaOaepSha256, + ) + + override lazy val kmsSigningKey: FutureUnlessShutdown[KmsSigningKey] = + for { + kmsKeyId <- defaultKms + .generateSigningKeyPair(SigningKeySpec.EcP256) + .valueOrFail("create KMS signing key") + } yield KmsSigningKey( + kmsKeyId, + SigningKeySpec.EcP256, + SigningAlgorithmSpec.EcDsaSha256, + SignatureFormat.Symbolic, + ) + + override lazy val kmsAnotherSigningKey: FutureUnlessShutdown[KmsSigningKey] = + for { + kmsKeyId <- defaultKms + .generateSigningKeyPair(SigningKeySpec.EcP256) + .valueOrFail("create KMS signing key") + } yield KmsSigningKey( + kmsKeyId, + SigningKeySpec.EcP256, + SigningAlgorithmSpec.EcDsaSha256, + SignatureFormat.Symbolic, + ) + + override lazy val pureCrypto: CryptoPureApi = + new SymbolicPureCrypto + + /* For the asymmetric encryption test the bound is slightly different for the symbolic KMS + * because the encryption is done as a simple aggregation of (RandomIv, KeyId, Message). + * Although the bound is 190bytes, we set the byte string ('dataToHandle190') to have 164bytes because + * it's the maximum size that a symbolic encryption can handle so that it can produce a + * <256 length ciphertext. This is a requirement because the decryption function is bounded + * by a ByteString256, which corresponds to the ciphertext length for RSA2048-OAEP-SHA256. + */ + override lazy val dataToHandle190: ByteString190 = + ByteString190.tryCreate(ByteString.copyFrom(("t" * 164).getBytes())) + + override def convertToSigningPublicKey( + kmsSpk: KmsSigningPublicKey, + usage: NonEmpty[Set[SigningKeyUsage]], + ): SigningPublicKey = kmsSpk + .convertToSymbolicSigningPublicKey(usage) + .valueOrFail("convert public signing key") + + override def convertToEncryptionPublicKey( + kmsEpk: KmsEncryptionPublicKey + ): EncryptionPublicKey = kmsEpk.convertToSymbolicEncryptionPublicKey + .valueOrFail("convert public encryption key") + + protected def checkKeyIsDeleted(keyId: KmsKeyId): FutureUnlessShutdown[Assertion] = + for { + res <- defaultKms.keyExistsAndIsActive(keyId).value + } yield res shouldBe false + + "Symbolic KMS" must { + behave like kms() + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala index 35b67ae128..c24ff957d4 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala @@ -9,7 +9,6 @@ import com.digitalasset.canton.config.{CachingConfigs, CryptoConfig, PositiveFin import com.digitalasset.canton.crypto.* import com.digitalasset.canton.crypto.CryptoTestHelper.TestMessage import com.digitalasset.canton.crypto.SigningKeySpec.EcSecp256k1 -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.MemoryStorage @@ -49,7 +48,6 @@ class JceCryptoTest ), new MemoryStorage(loggerFactory, timeouts), CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - CommunityKmsFactory, // Does not matter for the test as we do not use KMS testedReleaseProtocolVersion, futureSupervisor, wallClock, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/AwsKmsCryptoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/AwsKmsCryptoTest.scala new file mode 100644 index 0000000000..9cdf73b240 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/AwsKmsCryptoTest.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.digitalasset.canton.config.KmsConfig + +class AwsKmsCryptoTest extends KmsCryptoTest with HasPredefinedAwsKmsKeys { + override val kmsConfig = Some(KmsConfig.Aws.defaultTestConfig) + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/GcpKmsCryptoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/GcpKmsCryptoTest.scala new file mode 100644 index 0000000000..8101da081d --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/GcpKmsCryptoTest.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.digitalasset.canton.config.KmsConfig + +class GcpKmsCryptoTest extends KmsCryptoTest with PredefinedGcpKmsKeys { + override val kmsConfig = Some(KmsConfig.Gcp.defaultTestConfig) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsCryptoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsCryptoTest.scala new file mode 100644 index 0000000000..f3238329b0 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsCryptoTest.scala @@ -0,0 +1,143 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{ + CachingConfigs, + CryptoConfig, + CryptoProvider, + CryptoSchemeConfig, + EncryptionSchemeConfig, + KmsConfig, + PrivateKeyStoreConfig, + SigningSchemeConfig, +} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.resource.MemoryStorage +import com.digitalasset.canton.tracing.NoReportingTracerProvider +import org.scalatest.BeforeAndAfterAll +import org.scalatest.wordspec.AsyncWordSpec + +trait KmsCryptoTest + extends AsyncWordSpec + with SigningTest + with EncryptionTest + with RandomTest + with BeforeAndAfterAll + with KmsKeysRegistration { + + protected def kmsConfig: Option[KmsConfig] + + private def createCryptoConfig( + supportedSigningAlgorithmSpecs: NonEmpty[Set[SigningAlgorithmSpec]] + ) = + CryptoConfig( + provider = CryptoProvider.Kms, + encryption = EncryptionSchemeConfig( + algorithms = CryptoSchemeConfig( + default = Some(EncryptionAlgorithmSpec.RsaOaepSha256), + allowed = Some(NonEmpty.mk(Set, EncryptionAlgorithmSpec.RsaOaepSha256)), + ) + ), + signing = SigningSchemeConfig( + algorithms = CryptoSchemeConfig( + default = Some(SigningAlgorithmSpec.EcDsaSha256), + allowed = Some(supportedSigningAlgorithmSpecs), + ) + ), + kms = kmsConfig, + privateKeyStore = PrivateKeyStoreConfig(None), + ) + + lazy val cryptoConfig: CryptoConfig = + createCryptoConfig( + NonEmpty.mk(Set, SigningAlgorithmSpec.EcDsaSha256, SigningAlgorithmSpec.EcDsaSha384) + ) + + /* A crypto configuration with a restricted set of signing algorithm specifications to test that + * the sign/verify function fails when called with an unsupported one (or with a key not supported + * by any of them). + */ + lazy val cryptoConfigRestricted: CryptoConfig = + createCryptoConfig(NonEmpty.mk(Set, SigningAlgorithmSpec.EcDsaSha256)) + + private def createKmsCrypto(config: CryptoConfig) = + Crypto + .create( + config, + CachingConfigs.defaultSessionEncryptionKeyCacheConfig, + CachingConfigs.defaultPublicKeyConversionCache, + new MemoryStorage(loggerFactory, timeouts), + new CryptoPrivateStoreFactory( + CryptoProvider.Kms, + kmsConfig, + CachingConfigs.kmsMetadataCache, + config.privateKeyStore, + replicaManager = None, + futureSupervisor = futureSupervisor, + clock = wallClock, + executionContext = executorService, + ), + testedReleaseProtocolVersion, + futureSupervisor, + wallClock, + executorService, + timeouts, + loggerFactory, + NoReportingTracerProvider, + ) + .valueOrFail("create crypto") + + lazy val kmsCryptoF: FutureUnlessShutdown[Crypto] = createKmsCrypto(cryptoConfig) + lazy val kmsCryptoRestrictedF: FutureUnlessShutdown[Crypto] = createKmsCrypto( + cryptoConfigRestricted + ) + + "KmsCrypto" can { + + // We only support Ed25519 for verifying signatures, not for generating or converting keys into + val supportedSigningAlgorithmSpecs = + CryptoProvider.Kms.signingAlgorithms.supported + .filterNot(_ == SigningAlgorithmSpec.Ed25519) + + behave like signingProvider( + CryptoProvider.Kms.signingKeys.supported, + supportedSigningAlgorithmSpecs, + CryptoProvider.Kms.supportedSignatureFormats, + kmsCryptoF, + Some(kmsCryptoRestrictedF), + Some(SigningAlgorithmSpec.EcDsaSha384), // unsupported for `kmsCryptoRestrictedF` + ) + + // We only support ECIES for encryption, not for generating or converting keys into + val supportedEncryptionAlgorithmSpecs = + CryptoProvider.Kms.encryptionAlgorithms.supported.filterNot(scheme => + Seq( + EncryptionAlgorithmSpec.EciesHkdfHmacSha256Aes128Cbc + ).contains(scheme) + ) + + behave like encryptionProvider( + supportedEncryptionAlgorithmSpecs, + CryptoProvider.Kms.symmetric.supported, + kmsCryptoF, + Some(EncryptionAlgorithmSpec.EciesHkdfHmacSha256Aes128Cbc), // unsupported + ) + behave like randomnessProvider(kmsCryptoF.map(_.pureCrypto)) + /* some crypto tests are not executed for the following reasons: + * - [[PrivateKeySerializationTest]] + [[JavaPrivateKeyConverterTest]]: the private keys are stored externally + * (e.g. an AWS KMS) and thus inaccessible to Canton + */ + } + + override def afterAll(): Unit = { + kmsCryptoRestrictedF + .onShutdown(throw new RuntimeException("Aborted due to shutdown.")) + .foreach(_.close()) + kmsCryptoF.onShutdown(throw new RuntimeException("Aborted due to shutdown.")).foreach(_.close()) + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsKeysRegistration.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsKeysRegistration.scala new file mode 100644 index 0000000000..111e134fa7 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/KmsKeysRegistration.scala @@ -0,0 +1,145 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.crypto.{ + Crypto, + CryptoTestHelper, + EncryptionKeyGenerationError, + EncryptionKeySpec, + EncryptionPublicKey, + SigningKeyGenerationError, + SigningKeySpec, + SigningKeyUsage, + SigningPublicKey, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import org.scalatest.wordspec.AsyncWordSpec + +/** Defines the necessary methods to register KMS keys in Canton. + */ +trait KmsKeysRegistration extends CryptoTestHelper with PredefinedKmsKeys { + this: AsyncWordSpec => + + /** Gets a new signing key by registering and returning a KMS pre-generated key. + * + * @return + * a signing public key + */ + override protected def getSigningPublicKey( + crypto: Crypto, + usage: NonEmpty[Set[SigningKeyUsage]], + keySpec: SigningKeySpec, + ): FutureUnlessShutdown[SigningPublicKey] = { + val (kmsKeyId, _) = predefinedSigningKeys + .get(keySpec) + .valueOrFail(s"no pre-generated signing KMS keys for $keySpec") + registerSigningPublicKey(crypto, keySpec, usage, kmsKeyId) + } + + /** Gets two new signing keys by registering two different KMS pre-generated keys. + */ + override protected def getTwoSigningPublicKeys( + crypto: Crypto, + usage: NonEmpty[Set[SigningKeyUsage]], + keySpec: SigningKeySpec, + ): FutureUnlessShutdown[(SigningPublicKey, SigningPublicKey)] = { + val (kmsKeyId, kmsKeyIdOther) = predefinedSigningKeys + .get(keySpec) + .valueOrFail(s"no pre-generated signing KMS keys for $keySpec") + for { + pubKey1 <- registerSigningPublicKey(crypto, keySpec, usage, kmsKeyId) + pubKey2 <- registerSigningPublicKey(crypto, keySpec, usage, kmsKeyIdOther) + } yield (pubKey1, pubKey2) + } + + private def registerSigningPublicKey( + crypto: Crypto, + keySpec: SigningKeySpec, + usage: NonEmpty[Set[SigningKeyUsage]], + kmsKeyId: KmsKeyId, + ): FutureUnlessShutdown[SigningPublicKey] = { + + def checkSigningScheme( + key: SigningPublicKey + ): Either[SigningKeyGenerationError, SigningPublicKey] = + if (key.keySpec != keySpec) + Left( + SigningKeyGenerationError.GeneralKmsError( + s"it is a ${key.keySpec} key but we are expecting a $keySpec signing key" + ) + ) + else Right(key) + + crypto.privateCrypto match { + case crypto: KmsPrivateCrypto => + crypto + .registerSigningKey(kmsKeyId, usage) + .subflatMap(spk => checkSigningScheme(spk)) + .valueOrFail("check signing key") + case _ => fail("using an incorrect private crypto api") + } + } + + /** Gets a new encryption key by registering and returning a KMS pre-generated key. + * + * @return + * an encryption public key + */ + override def getEncryptionPublicKey( + crypto: Crypto, + encryptionKeySpec: EncryptionKeySpec, + ): FutureUnlessShutdown[EncryptionPublicKey] = { + val (kmsKeyId, _) = predefinedAsymmetricEncryptionKeys + .get(encryptionKeySpec) + .valueOrFail(s"no pre-generated encryption KMS keys for $encryptionKeySpec") + registerEncryptionPublicKey(crypto, encryptionKeySpec, kmsKeyId) + } + + /** Gets two new encryption keys by registering two different KMS pre-generated keys. + */ + override protected def getTwoEncryptionPublicKeys( + crypto: Crypto, + encryptionKeySpec: EncryptionKeySpec, + ): FutureUnlessShutdown[(EncryptionPublicKey, EncryptionPublicKey)] = { + val (kmsKeyId, kmsKeyIdOther) = predefinedAsymmetricEncryptionKeys + .get(encryptionKeySpec) + .valueOrFail(s"no pre-generated encryption KMS keys for $encryptionKeySpec") + for { + pubKey1 <- registerEncryptionPublicKey(crypto, encryptionKeySpec, kmsKeyId) + pubKey2 <- registerEncryptionPublicKey(crypto, encryptionKeySpec, kmsKeyIdOther) + } yield (pubKey1, pubKey2) + } + + private def registerEncryptionPublicKey( + crypto: Crypto, + keySpec: EncryptionKeySpec, + kmsKeyId: KmsKeyId, + ): FutureUnlessShutdown[EncryptionPublicKey] = { + + def checkEncryptionScheme( + key: EncryptionPublicKey + ): Either[EncryptionKeyGenerationError, EncryptionPublicKey] = + if (key.keySpec != keySpec) + Left( + EncryptionKeyGenerationError.GeneralKmsError( + s"it is a ${key.keySpec} key but we are expecting a $keySpec encryption key" + ) + ) + else Right(key) + + crypto.privateCrypto match { + case crypto: KmsPrivateCrypto => + crypto + .registerEncryptionKey(kmsKeyId) + .subflatMap(epk => checkEncryptionScheme(epk)) + .valueOrFail("retrieve encryption key") + case _ => fail("using an incorrect private crypto api") + } + + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedAwsKmsKeys.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedAwsKmsKeys.scala new file mode 100644 index 0000000000..7bc9981184 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedAwsKmsKeys.scala @@ -0,0 +1,40 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.crypto.{EncryptionKeySpec, SigningKeySpec} + +trait HasPredefinedAwsKmsKeys extends PredefinedKmsKeys { + + override val predefinedSymmetricEncryptionKey: KmsKeyId = + KmsKeyId(String300.tryCreate("alias/canton-kms-test-key")) + + override val predefinedSigningKeys: Map[SigningKeySpec, (KmsKeyId, KmsKeyId)] = Map( + SigningKeySpec.EcP256 -> ( + KmsKeyId(String300.tryCreate("alias/canton-kms-test-signing-key")), + KmsKeyId(String300.tryCreate("alias/canton-kms-test-another-signing-key")) + ), + SigningKeySpec.EcP384 -> ( + KmsKeyId(String300.tryCreate("alias/canton-kms-test-signing-key-P384")), + KmsKeyId(String300.tryCreate("alias/canton-kms-test-another-signing-key-P384")) + ), + SigningKeySpec.EcSecp256k1 -> ( + KmsKeyId(String300.tryCreate("alias/canton-kms-test-signing-key-secp256k1")), + KmsKeyId(String300.tryCreate("alias/canton-kms-test-another-signing-key-secp256k1")) + ), + ) + + override val predefinedAsymmetricEncryptionKeys: Map[EncryptionKeySpec, (KmsKeyId, KmsKeyId)] = + Map( + EncryptionKeySpec.Rsa2048 -> ( + KmsKeyId(String300.tryCreate("alias/canton-kms-test-asymmetric-key")), + KmsKeyId(String300.tryCreate("alias/canton-kms-test-another-asymmetric-key")) + ) + ) + +} + +object PredefinedAwsKmsKeys extends HasPredefinedAwsKmsKeys {} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedGcpKmsKeys.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedGcpKmsKeys.scala new file mode 100644 index 0000000000..add772047b --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedGcpKmsKeys.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.crypto.{EncryptionKeySpec, SigningKeySpec} + +trait PredefinedGcpKmsKeys extends PredefinedKmsKeys { + + override val predefinedSymmetricEncryptionKey: KmsKeyId = + KmsKeyId(String300.tryCreate("canton-kms-test-key")) + + override val predefinedSigningKeys: Map[SigningKeySpec, (KmsKeyId, KmsKeyId)] = Map( + SigningKeySpec.EcP256 -> ( + KmsKeyId(String300.tryCreate("canton-kms-test-signing-key")), + KmsKeyId(String300.tryCreate("canton-kms-test-another-signing-key")) + ), + SigningKeySpec.EcP384 -> ( + KmsKeyId(String300.tryCreate("canton-kms-test-signing-key-P384")), + KmsKeyId(String300.tryCreate("canton-kms-test-another-signing-key-P384")) + ), + SigningKeySpec.EcSecp256k1 -> ( + KmsKeyId(String300.tryCreate("canton-kms-test-signing-key-secp256k1")), + KmsKeyId(String300.tryCreate("canton-kms-test-another-signing-key-secp256k1")) + ), + ) + + override val predefinedAsymmetricEncryptionKeys: Map[EncryptionKeySpec, (KmsKeyId, KmsKeyId)] = + Map( + EncryptionKeySpec.Rsa2048 -> ( + KmsKeyId(String300.tryCreate("canton-kms-test-asymmetric-key")), + KmsKeyId(String300.tryCreate("canton-kms-test-another-asymmetric-key")) + ) + ) + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedKmsKeys.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedKmsKeys.scala new file mode 100644 index 0000000000..32156e8f22 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/kms/PredefinedKmsKeys.scala @@ -0,0 +1,14 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.provider.kms + +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.crypto.{EncryptionKeySpec, SigningKeySpec} + +/** Stores the pre-generated keys for the different KMSs */ +trait PredefinedKmsKeys { + def predefinedSymmetricEncryptionKey: KmsKeyId + def predefinedSigningKeys: Map[SigningKeySpec, (KmsKeyId, KmsKeyId)] + def predefinedAsymmetricEncryptionKeys: Map[EncryptionKeySpec, (KmsKeyId, KmsKeyId)] +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStoreTest.scala new file mode 100644 index 0000000000..09cc959919 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/EncryptedCryptoPrivateStoreTest.scala @@ -0,0 +1,105 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store + +import com.digitalasset.canton.crypto.KeyPurpose.Encryption +import com.digitalasset.canton.crypto.kms.{KmsKeyId, SymbolicKms} +import com.digitalasset.canton.crypto.store.db.DbCryptoPrivateStore +import com.digitalasset.canton.crypto.{EncryptionPrivateKey, KeyName} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ByteString4096 +import org.scalatest.wordspec.AsyncWordSpec + +trait EncryptedCryptoPrivateStoreTest extends AsyncWordSpec with CryptoPrivateStoreExtendedTest { + this: DbTest => + + override def cleanDb( + storage: DbStorage + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + import storage.api.* + + /* We delete all private keys that ARE encrypted (wrapper_key_id != NULL). + This conditional delete is to avoid conflicts with the crypto private store tests. */ + storage.update( + DBIO.seq( + sqlu"delete from common_crypto_private_keys where wrapper_key_id IS NOT NULL" + ), + operationName = s"${this.getClass}: Delete from encrypted private crypto table", + ) + } + + // Use null for the config as there's no KmsConfig subclass for SymbolicKms which is a test only KMS implementation + @SuppressWarnings(Array("org.wartremover.warts.Null")) + lazy val symbolicKms: SymbolicKms = new SymbolicKms( + crypto, + null, + timeouts, + loggerFactory, + ) + + lazy val keyId: KmsKeyId = + symbolicKms + .generateSymmetricEncryptionKey() + .valueOrFailShutdown("create symbolic KMS key") + .futureValue + + lazy val dbStore = + new DbCryptoPrivateStore(storage, testedReleaseProtocolVersion, timeouts, loggerFactory) + + lazy val encryptedStore = new EncryptedCryptoPrivateStore( + dbStore, + symbolicKms, + keyId, + testedReleaseProtocolVersion, + timeouts, + loggerFactory, + ) + + "EncryptedCryptoPrivateStore" can { + behave like cryptoPrivateStoreExtended(encryptedStore, encrypted = true) + + "stores private keys encrypted" in { + val encKeyName = uniqueKeyName("encKey_") + val encKey: EncryptionPrivateKey = crypto.newSymbolicEncryptionKeyPair().privateKey + val encKeyWithName: EncryptionPrivateKeyWithName = + EncryptionPrivateKeyWithName(encKey, Some(KeyName.tryCreate(encKeyName))) + val encKeyBytesWithName = encKey.toByteString(testedReleaseProtocolVersion.v) + + for { + _ <- encryptedStore + .storeDecryptionKey(encKey, encKeyWithName.name) + .valueOrFailShutdown("store key 1") + expectedEncValue <- symbolicKms + .encryptSymmetric( + encryptedStore.wrapperKeyId, + ByteString4096.tryCreate(encKeyBytesWithName), + ) + .valueOrFailShutdown("encrypt value") + resultEncrypted <- dbStore + .listPrivateKeys(Encryption, encrypted = true) + .valueOrFailShutdown("list keys without decrypting") + resultClear <- encryptedStore + .listPrivateKeys(Encryption, encrypted = true) + .valueOrFailShutdown("list keys after decrypting") + } yield { + resultEncrypted.size shouldBe 1 + // this check works because the symbolic "encryption" is deterministic + resultEncrypted.head.data shouldBe expectedEncValue.unwrap + + resultClear.size shouldBe 1 + resultClear.head.data shouldEqual encKeyBytesWithName + } + } + + } +} + +class EncryptedCryptoPrivateStoreTestH2 extends EncryptedCryptoPrivateStoreTest with H2Test + +class EncryptedCryptoPrivateStoreTestPostgres + extends EncryptedCryptoPrivateStoreTest + with PostgresTest diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/KmsMetadataStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/KmsMetadataStoreTest.scala new file mode 100644 index 0000000000..6657641d9e --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/store/KmsMetadataStoreTest.scala @@ -0,0 +1,187 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.CachingConfigs +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.crypto.store.KmsMetadataStore.KmsMetadata +import com.digitalasset.canton.crypto.store.db.DbKmsMetadataStore +import com.digitalasset.canton.crypto.store.memory.InMemoryKmsMetadataStore +import com.digitalasset.canton.crypto.{Fingerprint, KeyPurpose, SigningKeyUsage} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionChecksAsyncWordSpec} +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AsyncWordSpec +import org.scalatest.{BeforeAndAfterAll, EitherValues, OptionValues} + +trait KmsMetadataStoreTest + extends BeforeAndAfterAll + with EitherValues + with BaseTest + with ProtocolVersionChecksAsyncWordSpec + with HasExecutionContext { + this: AsyncWordSpec & Matchers & OptionValues => + + private val metadataA = { + val fingerprint = Fingerprint.tryFromString(Iterator.continually('a').take(68).mkString) + val keyId = KmsKeyId(String300.tryCreate(Iterator.continually('a').take(300).mkString)) + KmsMetadata(fingerprint, keyId, KeyPurpose.Signing, Some(SigningKeyUsage.ProtocolOnly)) + } + + private val metadataB = { + val fingerprint = + Fingerprint.tryFromString(Iterator.continually('b').take(68).mkString) + val keyId = KmsKeyId(String300.tryCreate(Iterator.continually('b').take(300).mkString)) + KmsMetadata(fingerprint, keyId, KeyPurpose.Encryption) + } + + private val fingerprintC = + Fingerprint.tryFromString(Iterator.continually('c').take(68).mkString) + + def kmsMetadataStore(mk: () => KmsMetadataStore): Unit = { + "kms metadata store" should { + "store and get metadata" in { + val store = mk() + + for { + _ <- store.store(metadataA) + _ <- store.store(metadataB) + getMA <- store.get(metadataA.fingerprint) + getMB <- store.get(metadataB.fingerprint) + getMC <- store.get(fingerprintC) + } yield { + getMA shouldBe Some(metadataA) + getMB shouldBe Some(metadataB) + getMC shouldBe None + } + }.failOnShutdown + + "be idempotent if inserting the same metadata twice" in { + val store = mk() + for { + _ <- store.store(metadataA) + _ <- store.store(metadataA) + getMA <- store.get(metadataA.fingerprint) + } yield { + getMA shouldBe Some(metadataA) + } + }.failOnShutdown + + "fail to insert 2 metadata with the same fingerprint" in { + val store = mk() + + val res = loggerFactory.assertLoggedWarningsAndErrorsSeq( + for { + _ <- store.store(metadataA) + _ <- store.store(metadataA.copy(kmsKeyId = metadataB.kmsKeyId)) + } yield (), + LogEntry.assertLogSeq( + Seq( + ( + _.errorMessage should include("has existing"), + "expected logged DB failure", + ) + ) + ), + ) + recoverToSucceededIf[IllegalStateException]( + res.onShutdown(throw new RuntimeException("aborted due to shutdown.")) + ) + } + + "delete metadata" in { + val store = mk() + + for { + _ <- store.store(metadataA) + _ <- store.store(metadataB) + afterInsert <- store.get(metadataA.fingerprint) + _ <- store.delete(metadataA.fingerprint) + afterDelete <- store.get(metadataA.fingerprint) + getMB <- store.get(metadataB.fingerprint) + } yield { + afterInsert shouldBe Some(metadataA) + afterDelete shouldBe None + getMB shouldBe Some(metadataB) // B should still be there + } + }.failOnShutdown + + "list metadata" in { + val store = mk() + + for { + _ <- store.store(metadataA) + _ <- store.store(metadataB) + metadata <- store.list() + } yield { + metadata should contain theSameElementsAs List(metadataA, metadataB) + } + }.failOnShutdown + + "invalidate internal cache when storing a new value" in { + val store = mk() + + for { + firstGet <- store.get(metadataA.fingerprint) + _ <- store.store(metadataA) + secondGet <- store.get(metadataA.fingerprint) + } yield { + firstGet shouldBe None + secondGet shouldBe Some(metadataA) + } + }.failOnShutdown + + "invalidate internal cache when deleting a value" in { + val store = mk() + + for { + _ <- store.store(metadataA) + firstGet <- store.get(metadataA.fingerprint) + _ <- store.delete(metadataA.fingerprint) + secondGet <- store.get(metadataA.fingerprint) + } yield { + firstGet shouldBe Some(metadataA) + secondGet shouldBe None + } + }.failOnShutdown + } + } +} + +trait DbKmsMetadataStoreTest extends AsyncWordSpec with BaseTest with KmsMetadataStoreTest { + this: DbTest => + override def cleanDb( + storage: DbStorage + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + import storage.api.* + storage.update(DBIO.seq(sqlu"truncate table common_kms_metadata_store"), functionFullName) + } + + "KmsMetadataStore" should { + behave like kmsMetadataStore(() => + new DbKmsMetadataStore( + storage, + CachingConfigs.testing.kmsMetadataCache, + timeouts, + loggerFactory, + ) + ) + } +} + +class KmsMetadataStoreTestPostgres extends DbKmsMetadataStoreTest with PostgresTest + +class KmsMetadataStoreTestH2 extends DbKmsMetadataStoreTest with H2Test + +class KmsMetadataStoreTestInMemory extends AsyncWordSpec with BaseTest with KmsMetadataStoreTest { + "InMemoryKmsMetadataStore" should { + behave like kmsMetadataStore(() => new InMemoryKmsMetadataStore(loggerFactory)) + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala index c1fcd51141..690987b99a 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.config.{ CryptoProvider, SessionSigningKeysConfig, } -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory import com.digitalasset.canton.crypto.signer.SyncCryptoSigner import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.crypto.verifier.SyncCryptoVerifier @@ -68,6 +67,7 @@ trait SyncCryptoTest requiredHashAlgorithms = CryptoProvider.Jce.hash.supported, requiredCryptoKeyFormats = CryptoProvider.Jce.supportedCryptoKeyFormats, requiredSignatureFormats = CryptoProvider.Jce.supportedSignatureFormats, + topologyChangeDelay = StaticSynchronizerParameters.defaultTopologyChangeDelay, enableTransparencyChecks = false, protocolVersion = testedProtocolVersion, serial = NonNegativeInt.zero, @@ -125,7 +125,6 @@ trait SyncCryptoTest CachingConfigs.defaultPublicKeyConversionCache, new MemoryStorage(loggerFactory, timeouts), CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - CommunityKmsFactory, testedReleaseProtocolVersion, futureSupervisor, wallClock, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/validations/SyncSchemeValidationsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/validations/SyncSchemeValidationsTest.scala index 6e30e2dc41..ec0ea7c155 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/validations/SyncSchemeValidationsTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/crypto/validations/SyncSchemeValidationsTest.scala @@ -7,7 +7,6 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{CachingConfigs, CryptoConfig, CryptoProvider} import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreExtended, CryptoPrivateStoreFactory} import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.resource.MemoryStorage @@ -36,7 +35,6 @@ class SyncSchemeValidationsTest extends AnyWordSpec with BaseTest with HasExecut CachingConfigs.defaultPublicKeyConversionCache, new MemoryStorage(loggerFactory, timeouts), CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - CommunityKmsFactory, testedReleaseProtocolVersion, futureSupervisor, wallClock, @@ -80,6 +78,7 @@ class SyncSchemeValidationsTest extends AnyWordSpec with BaseTest with HasExecut requiredHashAlgorithms = CryptoProvider.Jce.hash.supported, requiredCryptoKeyFormats = CryptoProvider.Jce.supportedCryptoKeyFormats, requiredSignatureFormats = NonEmpty.mk(Set, SignatureFormat.Der), + topologyChangeDelay = StaticSynchronizerParameters.defaultTopologyChangeDelay, enableTransparencyChecks = false, protocolVersion = testedProtocolVersion, serial = NonNegativeInt.zero, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala index 2c0e7da8f1..3db5e51e38 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala @@ -90,7 +90,7 @@ class ActionDescriptionTest extends AnyWordSpec with BaseTest { "the key value cannot be serialized" in { LookupByKeyActionDescription.create( LfVersioned( - ExampleTransactionFactory.transactionVersion, + ExampleTransactionFactory.serializationVersion, LfGlobalKey .build( LfTransactionBuilder.defaultTemplateId, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala index db2373985a..94c7c8b155 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala @@ -7,7 +7,10 @@ import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.{CryptoPureApi, HashPurpose} import com.digitalasset.canton.data.GenTransactionTree.ViewWithWitnessesAndRecipients -import com.digitalasset.canton.data.LightTransactionViewTree.InvalidLightTransactionViewTree +import com.digitalasset.canton.data.LightTransactionViewTree.{ + InvalidLightTransactionViewTree, + ToFullViewTreesResult, +} import com.digitalasset.canton.data.MerkleTree.{BlindSubtree, RevealIfNeedBe, RevealSubtree} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.protocol.* @@ -17,6 +20,7 @@ import com.digitalasset.canton.sequencing.protocol.{MemberRecipient, Recipients, import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient import com.digitalasset.canton.topology.transaction.ParticipantPermission +import com.digitalasset.canton.util.RoseTree import com.digitalasset.canton.{ BaseTestWordSpec, FailOnShutdown, @@ -24,7 +28,7 @@ import com.digitalasset.canton.{ LfPartyId, ProtocolVersionChecksAnyWordSpec, } -import monocle.PIso +import monocle.{PIso, PLens} import scala.annotation.nowarn @@ -71,6 +75,24 @@ class GenTransactionTreeTest .valueOrFail("fail to create light view tree") } + // To test the correct treatment of associated information with each light transaction view tree, + // we take the view position as the associated information in this test. + private def idLens: PLens[ + (LightTransactionViewTree, ViewPosition), + (FullTransactionViewTree, RoseTree[ViewPosition]), + (LightTransactionViewTree, ViewPosition), + (FullTransactionViewTree, RoseTree[ViewPosition]), + ] = PIso.id + + private def withViewPosition[T <: TransactionViewTree](tree: T): (T, ViewPosition) = + (tree, tree.viewPosition) + + private def withViewPositions[T <: TransactionViewTree](tree: T): (T, RoseTree[ViewPosition]) = { + val viewPosTree = + tree.view.allSubviewsWithPositionTree(tree.viewPosition).map { case (_, viewPos) => viewPos } + (tree, viewPosTree) + } + forEvery(factory.standardHappyCases) { example => s"$example" can { val transactionTree = example.transactionTree @@ -129,22 +151,24 @@ class GenTransactionTreeTest ) { lt => LightTransactionViewTree.fromTrustedByteString( ((example.cryptoOps, randomnessLength), testedProtocolVersion) - )( - lt.toByteString - ) shouldBe Right(lt) + )(lt.toByteString) shouldBe Right(lt) } } "correctly reconstruct the full transaction view trees from the lightweight ones" in { - val allLightTrees = - allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) val allTrees = example.transactionTree.allTransactionViewTrees - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - allLightTrees - ) shouldBe (allTrees, Seq.empty, Seq.empty) + val allLightTrees = allLightTransactionViewTreesWithRandomKeys(allTrees) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = false, + allLightTrees.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + allTrees.map(withViewPositions), + Seq.empty, + Seq.empty, + ) } "correctly reconstruct the top-level transaction view trees from the lightweight ones" in { @@ -154,10 +178,13 @@ class GenTransactionTreeTest ) val allTrees = example.transactionTree.allTransactionViewTrees.filter(_.isTopLevel) - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = true)( - allLightTrees - ) shouldBe (allTrees, Seq.empty, Seq.empty) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = true, + allLightTrees.map(withViewPosition), + ) shouldBe ToFullViewTreesResult(allTrees.map(withViewPositions), Seq.empty, Seq.empty) } "correctly reconstruct the top-level transaction view trees from the lightweight ones for each informee" in { @@ -187,15 +214,17 @@ class GenTransactionTreeTest val topLevelForInf = allTrees.filter(t => topLevelHashesForInf.contains(t.viewHash)) val allLightWeightForInf = allLightTrees.filter(_._2.flatten.contains(inf)).map(_._1).toList - LightTransactionViewTree - .toFullViewTrees( - PIso.id, - testedProtocolVersion, - factory.cryptoOps, - topLevelOnly = true, - )( - allLightWeightForInf - ) shouldBe (topLevelForInf, Seq.empty, Seq.empty) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = true, + allLightWeightForInf.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + topLevelForInf.map(withViewPositions), + Seq.empty, + Seq.empty, + ) } } @@ -221,10 +250,17 @@ class GenTransactionTreeTest ) ) - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees - ) shouldBe (expectedFullTrees, badLightTrees, Seq.empty) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = false, + inputLightTrees.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + expectedFullTrees.map(withViewPositions), + badLightTrees.map(withViewPosition), + Seq.empty, + ) } "correctly process duplicate views" in { @@ -234,16 +270,30 @@ class GenTransactionTreeTest val allFullTrees = example.transactionTree.allTransactionViewTrees val inputLightTrees1 = allLightTrees.flatMap(tree => Seq(tree, tree)) - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees1 - ) shouldBe (allFullTrees, Seq.empty, allLightTrees) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = false, + inputLightTrees1.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + allFullTrees.map(withViewPositions), + Seq.empty, + allLightTrees.map(withViewPosition), + ) val inputLightTrees2 = allLightTrees ++ allLightTrees - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees2 - ) shouldBe (allFullTrees, Seq.empty, allLightTrees) + LightTransactionViewTree.toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = false, + inputLightTrees2.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + allFullTrees.map(withViewPositions), + Seq.empty, + allLightTrees.map(withViewPosition), + ) } "correctly process views in an unusual order" in { @@ -253,9 +303,17 @@ class GenTransactionTreeTest val inputLightTrees = allLightTrees.sortBy(_.viewPosition.position.size) val allFullTrees = example.transactionTree.allTransactionViewTrees LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees - ) shouldBe (allFullTrees, Seq.empty, Seq.empty) + .toFullViewTrees( + idLens, + testedProtocolVersion, + factory.cryptoOps, + topLevelOnly = false, + inputLightTrees.map(withViewPosition), + ) shouldBe ToFullViewTreesResult( + allFullTrees.map(withViewPositions), + Seq.empty, + Seq.empty, + ) } } } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsData.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsData.scala index ae158d97a4..8700c47f2c 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsData.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsData.scala @@ -16,8 +16,7 @@ import com.digitalasset.canton.data.MerkleTree.VersionedMerkleTree import com.digitalasset.canton.data.ViewPosition.{MerklePathElement, MerkleSeqIndex} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.sequencing.protocol.{MediatorGroupRecipient, TimeProof} -import com.digitalasset.canton.time.TimeProofTestUtil +import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.{GeneratorsTopology, ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} import com.digitalasset.canton.util.collection.SeqUtil @@ -188,7 +187,7 @@ final class GeneratorsData( // We consider only this specific value because the goal is not exhaustive testing of LF (de)serialization chosenValue <- Gen.long.map(ValueInt64.apply) - version <- Arbitrary.arbitrary[LfLanguageVersion] + version <- Arbitrary.arbitrary[LfSerializationVersion] actors <- boundedSetGen[LfPartyId] seed <- Arbitrary.arbitrary[LfHash] @@ -600,20 +599,6 @@ final class GeneratorsData( .value ) - private val timeProofArb: Arbitrary[TimeProof] = Arbitrary( - for { - timestamp <- Arbitrary.arbitrary[CantonTimestamp] - previousEventTimestamp <- Arbitrary.arbitrary[Option[CantonTimestamp]] - counter <- nonNegativeLongArb.arbitrary.map(_.unwrap) - targetSynchronizerId <- Arbitrary.arbitrary[Target[PhysicalSynchronizerId]] - } yield TimeProofTestUtil.mkTimeProof( - timestamp, - previousEventTimestamp, - counter, - targetSynchronizerId, - ) - ) - implicit val unassignmentViewArb: Arbitrary[UnassignmentView] = Arbitrary( for { salt <- Arbitrary.arbitrary[Salt] @@ -623,7 +608,7 @@ final class GeneratorsData( targetSynchronizerId <- Arbitrary .arbitrary[Target[PhysicalSynchronizerId]] .map(_.map(_.copy(protocolVersion = protocolVersion))) - timeProof <- timeProofArb.arbitrary + targetTimestamp <- Arbitrary.arbitrary[Target[CantonTimestamp]] hashOps = TestHash // Not used for serialization @@ -632,7 +617,7 @@ final class GeneratorsData( salt, contracts, targetSynchronizerId, - timeProof, + targetTimestamp, sourceProtocolVersion, ) ) @@ -670,7 +655,7 @@ final class GeneratorsData( reassigningParticipants <- boundedSetGen[ParticipantId] sourceSynchronizer <- Arbitrary.arbitrary[PhysicalSynchronizerId].map(Source(_)) targetSynchronizer <- Arbitrary.arbitrary[PhysicalSynchronizerId].map(Target(_)) - targetTimestamp <- Arbitrary.arbitrary[CantonTimestamp] + targetTimestamp <- Arbitrary.arbitrary[Target[CantonTimestamp]] unassignmentTs <- Arbitrary.arbitrary[CantonTimestamp] } yield UnassignmentData( submitterMetadata, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala index 8c841bfa22..eafc42bfd4 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala @@ -22,7 +22,7 @@ class TransactionViewTest extends AnyWordSpec with BaseTest with HasExecutionCon private val contractInst: LfThinContractInst = ExampleTransactionFactory.contractInstance() - private val cantonContractIdVersion: CantonContractIdV1Version = AuthenticatedContractIdVersionV11 + private val cantonContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1 private val createdId: LfContractId = cantonContractIdVersion.fromDiscriminator( ExampleTransactionFactory.lfHash(3), @@ -307,7 +307,7 @@ class TransactionViewTest extends AnyWordSpec with BaseTest with HasExecutionCon archivedInSubviews = Set(otherAbsoluteId), resolvedKeys = Map( ExampleTransactionFactory.defaultGlobalKey -> - LfVersioned(ExampleTransactionFactory.transactionVersion, AssignedKey(absoluteId)) + LfVersioned(ExampleTransactionFactory.serializationVersion, AssignedKey(absoluteId)) ), ).value diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver.scala new file mode 100644 index 0000000000..8e6515f912 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver.scala @@ -0,0 +1,14 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver + +import com.digitalasset.canton.driver.api.v1.DriverFactory + +private[driver] trait AnotherTestDriverFactory extends DriverFactory { + override type Driver = AnotherTestDriver +} + +private[driver] trait AnotherTestDriver { + def anotherTest: Int +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver1.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver1.scala new file mode 100644 index 0000000000..b0baa683b7 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/AnotherTestDriver1.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver + +import com.digitalasset.canton.buildinfo.BuildInfo +import org.slf4j.Logger +import pureconfig.generic.semiauto.{deriveReader, deriveWriter} +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.ExecutionContext + +private[driver] class AnotherTestDriver1Factory extends AnotherTestDriverFactory { + + override def name: String = "test1" + + override def version: Int = 1 + + override def buildInfo: Option[String] = Some(BuildInfo.version) + + override type ConfigType = AnotherTestDriver1Config + + override def configReader: ConfigReader[AnotherTestDriver1Config] = + deriveReader[AnotherTestDriver1Config] + + override def configWriter(confidential: Boolean): ConfigWriter[AnotherTestDriver1Config] = + deriveWriter[AnotherTestDriver1Config] + + override def create( + config: AnotherTestDriver1Config, + loggerFactory: Class[_] => Logger, + executionContext: ExecutionContext, + ): AnotherTestDriver = + new AnotherTestDriver1(config, loggerFactory) +} + +class AnotherTestDriver1(config: AnotherTestDriver1Config, loggerFactory: Class[_] => Logger) + extends AnotherTestDriver { + + private val logger: Logger = loggerFactory(getClass) + + logger.debug(s"Loaded another test driver1 with config: $config") + + override def anotherTest: Int = + config.testInt + +} + +private[driver] final case class AnotherTestDriver1Config(testInt: Int) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/DriverLoaderTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/DriverLoaderTest.scala new file mode 100644 index 0000000000..90b96ba0c6 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/DriverLoaderTest.scala @@ -0,0 +1,159 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver + +import com.digitalasset.canton.driver.v1.DriverLoader +import com.digitalasset.canton.util.EitherUtil.RichEither +import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext} +import com.typesafe.config.ConfigValueFactory +import pureconfig.ConfigSource + +import scala.jdk.CollectionConverters.* + +class DriverLoaderTest extends BaseTestWordSpec with HasExecutionContext { + + private def driverConfig(configString: String): TestConfig.Driver = { + val config = ConfigSource + .string(configString) + .load[TestConfig] + .valueOrFail("config loading") + + config match { + case _: TestConfig.Builtin => fail("Config should be for a driver") + case driverConfig: TestConfig.Driver => driverConfig + } + } + + "DriverLoader" can { + + "load and run a test driver" in { + + val config = driverConfig( + "{ type = driver, name = test1, test-int = 23, config { test-string = foobar } }" + ) + + val driver = DriverLoader + .load[v1.TestDriverFactory, TestDriverFactory]( + config.name, + config.config, + loggerFactory, + executorService, + ) + .valueOrFail("load driver") + + driver.test shouldEqual "FOOBAR" + + } + + "load and run second test driver implementation" in { + + val config = driverConfig( + "{ type = driver, name = test2, test-int = 23, config { another-test-string = foobar } }" + ) + + val driver = DriverLoader + .load[v1.TestDriverFactory, TestDriverFactory]( + config.name, + config.config, + loggerFactory, + executorService, + ) + .valueOrFail("load driver") + + driver.test shouldEqual ("foobar".reverse) + + } + + "load and run another test driver with a different factory" in { + + val config = ConfigValueFactory.fromMap(Map("test-int" -> 42).asJava) + + val driver = DriverLoader + .load[AnotherTestDriverFactory, AnotherTestDriverFactory]( + "test1", + config, + loggerFactory, + executorService, + ) + .valueOrFail("load another driver") + + driver.anotherTest shouldEqual 42 + + } + + "load and run a driver implementation of a different version" in { + val config = driverConfig( + "{ type = driver, name = test1, test-int = 5, config { test-string = 23 } }" + ) + + val driver = DriverLoader + .load[v2.TestDriverFactory, TestDriverFactory]( + config.name, + config.config, + loggerFactory, + executorService, + ) + .valueOrFail("load driver") + + driver.test shouldEqual 23 + + } + + "fail to load driver with invalid driver config" in { + + // Invalid driver config with an unknown key + val config = driverConfig( + "{ type = driver, name = test1, test-int = 23, config { random-string = foobar } }" + ) + + DriverLoader + .load[v1.TestDriverFactory, TestDriverFactory]( + config.name, + config.config, + loggerFactory, + executorService, + ) + .tapLeft(err => logger.debug(s"Expected error: $err")) + .left + .value should include("Failed to read driver config") + + } + + "fail to load unknown driver" in { + + DriverLoader + .load[v1.TestDriverFactory, TestDriverFactory]( + "foobar", + // Random config, does not matter + ConfigValueFactory.fromAnyRef(42), + loggerFactory, + executorService, + ) + .tapLeft(err => logger.debug(s"Expected error: $err")) + .left + .value should include( + "Driver 'foobar' (interface com.digitalasset.canton.driver.v1.TestDriverFactory) not found. Found drivers: 'test1 v1', 'test2 v1', 'test1 v2'" + ) + } + + "fail to load a driver with wrong driver API version" in { + + DriverLoader + .load[v3.TestDriverFactory, TestDriverFactory]( + "test1", + // Random config, does not matter + ConfigValueFactory.fromAnyRef(42), + loggerFactory, + executorService, + ) + .tapLeft(err => logger.debug(s"Expected error: $err")) + .left + .value should include( + "Driver 'test1' (interface com.digitalasset.canton.driver.v3.TestDriverFactory) not found. Found drivers: 'test1 v1', 'test2 v1', 'test1 v2'" + ) + } + + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/TestDriver.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/TestDriver.scala new file mode 100644 index 0000000000..4d11203eaf --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/TestDriver.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver + +import com.digitalasset.canton.driver.api.v1.DriverFactory +import com.typesafe.config.ConfigObject +import pureconfig.ConfigReader +import pureconfig.generic.semiauto.deriveReader + +trait TestDriver + +trait TestDriverFactory extends DriverFactory { + override type Driver <: TestDriver +} + +private[driver] sealed trait TestConfig { + def testInt: Int +} + +private[driver] object TestConfig { + final case class Builtin(testInt: Int) extends TestConfig + final case class Driver(name: String, testInt: Int, config: ConfigObject) extends TestConfig + + implicit val builtinReader: ConfigReader[Builtin] = deriveReader[Builtin] + implicit val driverReader: ConfigReader[Driver] = deriveReader[Driver] + implicit val reader: ConfigReader[TestConfig] = deriveReader[TestConfig] + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver.scala new file mode 100644 index 0000000000..6c1f134ae7 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v1 + +import com.digitalasset.canton.driver + +private[driver] trait TestDriverFactory extends driver.TestDriverFactory { + override def version: Int = 1 + + override type Driver = TestDriver +} + +private[v1] trait TestDriver extends driver.TestDriver { + def test: String +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver1.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver1.scala new file mode 100644 index 0000000000..8f21aef8cd --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver1.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v1 + +import org.slf4j.Logger +import pureconfig.generic.semiauto.{deriveReader, deriveWriter} +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.ExecutionContext + +private[driver] class TestDriver1Factory extends TestDriverFactory { + + override def name: String = "test1" + + override def buildInfo: Option[String] = None + + override type ConfigType = TestDriver1Config + + override def configReader: ConfigReader[TestDriver1Config] = deriveReader[TestDriver1Config] + + override def configWriter(confidential: Boolean): ConfigWriter[TestDriver1Config] = + deriveWriter[TestDriver1Config] + + override def create( + config: TestDriver1Config, + loggerFactory: Class[_] => Logger, + executionContext: ExecutionContext, + ): TestDriver = + new TestDriver1(config, loggerFactory) +} + +private[driver] class TestDriver1(config: TestDriver1Config, loggerFactory: Class[_] => Logger) + extends TestDriver { + + private val logger: Logger = loggerFactory(getClass) + + logger.debug(s"Loaded test driver1 with config: $config") + + def test: String = + // Do something in the driver based on a config value to ensure the right config is passed in and the driver is called + config.testString.toUpperCase + +} + +private[driver] final case class TestDriver1Config(testString: String) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver2.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver2.scala new file mode 100644 index 0000000000..027d43551f --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v1/TestDriver2.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v1 + +import org.slf4j.Logger +import pureconfig.generic.semiauto.{deriveReader, deriveWriter} +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.ExecutionContext + +private[driver] class TestDriver2Factory extends TestDriverFactory { + + override def name: String = "test2" + + override def buildInfo: Option[String] = None + + override type ConfigType = TestDriver2Config + + override def configReader: ConfigReader[TestDriver2Config] = deriveReader[TestDriver2Config] + + override def configWriter(confidential: Boolean): ConfigWriter[TestDriver2Config] = + deriveWriter[TestDriver2Config] + + override def create( + config: TestDriver2Config, + loggerFactory: Class[_] => Logger, + executionContext: ExecutionContext, + ): TestDriver = + new TestDriver2(config, loggerFactory) +} + +private[driver] class TestDriver2(config: TestDriver2Config, loggerFactory: Class[_] => Logger) + extends TestDriver { + + private val logger: Logger = loggerFactory(getClass) + + logger.debug(s"Loaded test driver2 with config: $config") + + def test: String = + // Do something in the driver based on a config value to ensure the right config is passed in and the driver is called + config.anotherTestString.reverse + +} + +private[driver] final case class TestDriver2Config(anotherTestString: String) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver.scala new file mode 100644 index 0000000000..48353048a8 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver.scala @@ -0,0 +1,18 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v2 + +import com.digitalasset.canton.driver + +private[driver] trait TestDriverFactory extends driver.TestDriverFactory { + override def version: Int = 2 + + override type Driver = TestDriver +} + +private[v2] trait TestDriver extends driver.TestDriver { + + // Breaking change from v1 to v2: String -> Int return type + def test: Int +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver1.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver1.scala new file mode 100644 index 0000000000..7df0744da6 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v2/TestDriver1.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v2 + +import org.slf4j.Logger +import pureconfig.generic.semiauto.{deriveReader, deriveWriter} +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.ExecutionContext + +private[driver] class TestDriver1Factory extends TestDriverFactory { + + override def name: String = "test1" + + override def buildInfo: Option[String] = None + + override type ConfigType = TestDriver1Config + + override def configReader: ConfigReader[TestDriver1Config] = deriveReader[TestDriver1Config] + + override def configWriter(confidential: Boolean): ConfigWriter[TestDriver1Config] = + deriveWriter[TestDriver1Config] + + override def create( + config: TestDriver1Config, + loggerFactory: Class[_] => Logger, + executionContext: ExecutionContext, + ): TestDriver = + new TestDriver1(config, loggerFactory) +} + +private[driver] class TestDriver1(config: TestDriver1Config, loggerFactory: Class[_] => Logger) + extends TestDriver { + + private val logger: Logger = loggerFactory(getClass) + + logger.debug(s"Loaded test driver1 with config: $config") + + def test: Int = + config.testString.toIntOption.getOrElse(42) + +} + +private[driver] final case class TestDriver1Config(testString: String) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v3/TestDriver.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v3/TestDriver.scala new file mode 100644 index 0000000000..f052603e5d --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/driver/v3/TestDriver.scala @@ -0,0 +1,18 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.driver.v3 + +import com.digitalasset.canton.driver + +private[driver] trait TestDriverFactory extends driver.TestDriverFactory { + override def version: Int = 3 + + override type Driver = TestDriver +} + +private[v3] trait TestDriver extends driver.TestDriver { + + // Breaking change from v2 to v3: Int -> Boolean return type + def test: Boolean +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala index 59e9e62765..4634b0826a 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala @@ -12,6 +12,7 @@ import org.scalatest.wordspec.AnyWordSpec import org.slf4j import org.slf4j.event.Level +import scala.annotation.tailrec import scala.collection.immutable.ListMap import scala.concurrent.duration.* import scala.concurrent.{Future, Promise} @@ -432,6 +433,36 @@ class SuppressingLoggerTest extends AnyWordSpec with BaseTest with HasExecutionC ) .futureValue } + + "suppress the right messages when suppression rule changes" in { + // This is a regression test. Chosing iterations high enough so the problem is reproduced reliably. + val iterations = 10000 + + // Log iterations many message at INFO level + // Do that in the background (separate thread) + val f = Future { + for (i <- 0 until iterations) { + logger.info(s"Info message: $i") + } + } + + // Do the following until the logging completes or a test failure is detected (whichever happens first). + @tailrec + def go(): Unit = { + // Suppress all messages, do not check anything. + loggerFactory.assertLogsSeq(SuppressionRule.FullSuppression)((), _ => succeed) + + // Suppress only WARN messages, check that nothing has been suppressed (because no warnings are logged.) + // Prior to creation of this test there used to be a race condition in SuppressingLogger that + // would make this assertion fail. + loggerFactory.assertLogsSeq(SuppressionRule.Level(Level.WARN))((), _ shouldBe empty) + + if (!f.isCompleted) go() + } + go() + + f.futureValue + } } "Throwable.addSuppressed" should { @@ -455,7 +486,7 @@ class SuppressingLoggerTest extends AnyWordSpec with BaseTest with HasExecutionC class LoggingTester extends NamedLogging { val underlyingNamedLogger = new TestNamedLogger def skipLogEntry(_logEntry: LogEntry): Boolean = false - val loggerFactory: SuppressingLogger = + override val loggerFactory: SuppressingLogger = new SuppressingLogger(underlyingNamedLogger, pollTimeout = 10.millis, skipLogEntry) val underlyingLogger: slf4j.Logger = underlyingNamedLogger.logger } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/MultiHostNameResolverTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/MultiHostNameResolverTest.scala new file mode 100644 index 0000000000..78ed789461 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/MultiHostNameResolverTest.scala @@ -0,0 +1,138 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.{BaseTestWordSpec, HasExecutorService} +import io.grpc.NameResolver.{Listener2, ServiceConfigParser} +import io.grpc.internal.GrpcUtil +import io.grpc.{NameResolver, Status, SynchronizationContext} + +import java.net.InetSocketAddress +import scala.concurrent.{Future, Promise} + +@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) +class MultiHostNameResolverTest extends BaseTestWordSpec with HasExecutorService { + private def mkEndpoints(endpoints: String*): NonEmpty[Seq[Endpoint]] = + NonEmpty + .from( + endpoints + .map(_.split(":").take(2)) + .map(e => Endpoint(e(0), Port.tryCreate(e(1).toInt))) + ) + .value + + // pretty dodgy attempt to invoke the name resolver for asserting the behavior it exhibits + // we're testing our usage of GRPC internals so may prove flaky + // will likely attempt to resolve addresses using DNS so may fail if a network is not accessible + private def resolve(endpoints: String*): Future[Either[Status, NameResolver.ResolutionResult]] = { + val synchronizationContext = new SynchronizationContext( + // this is pretty crude but if this is hit it will log an error that will cause our error check to fail + (_, unhandledException) => + logger.error(s"Unhandled exception on grpc sync context", unhandledException) + ) + + val resolver = new MultiHostNameResolver( + mkEndpoints(endpoints*), + NameResolver.Args + .newBuilder() + .setDefaultPort(80) + .setProxyDetector(GrpcUtil.DEFAULT_PROXY_DETECTOR) + .setSynchronizationContext(synchronizationContext) + .setServiceConfigParser(mock[ServiceConfigParser]) + .setScheduledExecutorService(scheduledExecutor()) + .build(), + ) + + // we're going to assume in our usage that the listener is only called once with an error or result + // technically the listener supports updating results overtime but that's a feature we don't currently use + val resultP = Promise[Either[Status, NameResolver.ResolutionResult]]() + + resolver.start(new Listener2 { + override def onResult(resolutionResult: NameResolver.ResolutionResult): Unit = + resultP.trySuccess(Right(resolutionResult)) + override def onError(error: Status): Unit = resultP.trySuccess(Left(error)) + }) + + resultP.future + } + + "resolving addresses" should { + "handle localhost" in { + val result = valueOrFail(resolve("localhost:1234").futureValue)("resolve localhost address") + val address = result.getAddressesOrError.getValue + .get(0) + .getAddresses + .get(0) + .asInstanceOf[InetSocketAddress] + + // we wont assert the actual address as it could be different if resolved to an ipv4 or ipv6 address + // we'll just be happy an address was clearly found and check that the port is correct + address.getPort shouldBe 1234 + } + + "handle ip address" in { + val result = valueOrFail(resolve("192.0.2.1:1234").futureValue)("resolve ip address") + val address = result.getAddressesOrError.getValue + .get(0) + .getAddresses + .get(0) + .asInstanceOf[InetSocketAddress] + + // we wont assert the actual address as it could be different if resolved to an ipv4 or ipv6 address + // we'll just be happy an address was clearly found and check that the port is correct + address.getPort shouldBe 1234 + } + + "produce error if provided bad address" in { + val result = leftOrFail(resolve("nope.this.does.not.exist:1234").futureValue)( + "produce error for bad address" + ) + + result.getCode shouldBe Status.Code.UNAVAILABLE + } + + "aggregate multiple successful addresses" in { + val result = + valueOrFail(resolve("localhost:1234", "localhost:1235").futureValue)( + "aggregate multiple successful addresses" + ) + + val countOfAddresses = result.getAddressesOrError.getValue.size() + + // Count is 2 or 4, depending on whether ipv6 addresses are included in the result + countOfAddresses should (be(2) or be(4)) + } + + "fail if one address fails" in { + val error = leftOrFail( + resolve("localhost:1", "localhost:2", "nope.this.does.not.exist:3").futureValue + )("resolve with error") + + error.getCode shouldBe Status.Code.UNAVAILABLE + } + + "add localhost endpoint to attributes" in { + val result = valueOrFail(resolve("localhost:1234").futureValue)("resolve localhost address") + val endpointFromAttributes = + result.getAddressesOrError.getValue.get(0).getAttributes.get(Endpoint.ATTR_ENDPOINT) + + // the actual endpoint object is passed as an attribute, which is useful during authentication + // in order to pick the right connection to fetch tokens from for the current call + endpointFromAttributes shouldBe Endpoint("localhost", Port.tryCreate(1234)) + } + + "add ip address endpoint to attributes" in { + val result = valueOrFail(resolve("192.0.2.1:1234").futureValue)("resolve ip address") + val endpointFromAttributes = + result.getAddressesOrError.getValue.get(0).getAttributes.get(Endpoint.ATTR_ENDPOINT) + + // the actual endpoint object is passed as an attribute, which is useful during authentication + // in order to pick the right connection to fetch tokens from for the current call + endpointFromAttributes shouldBe Endpoint("192.0.2.1", Port.tryCreate(1234)) + } + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/AwsKmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/AwsKmsTest.scala new file mode 100644 index 0000000000..624d17b647 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/AwsKmsTest.scala @@ -0,0 +1,182 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.nightly + +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.crypto.kms.KmsError.{KmsDeleteKeyError, KmsKeyDisabledError} +import com.digitalasset.canton.crypto.kms.aws.AwsKms +import com.digitalasset.canton.crypto.provider.kms.HasPredefinedAwsKmsKeys +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext} +import com.digitalasset.canton.util.ResourceUtil +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.slf4j.event.Level.INFO + +class AwsKmsTest extends FixtureAsyncWordSpec with ExternalKmsTest with HasPredefinedAwsKmsKeys { + override type KmsType = AwsKms + + override protected def defaultKmsConfig: KmsConfig.Aws = + KmsConfig.Aws.defaultTestConfig + + override protected def newKms(config: KmsConfig.Aws = defaultKmsConfig) = + AwsKms + .create( + config, + timeouts, + loggerFactory, + NoReportingTracerProvider, + ) + .valueOrFail("create AWS KMS client") + + "AWS KMS" must { + behave like kms() + behave like externalKms( + wrongKmsConfig = KmsConfig.Aws( + region = "wrong-region" + ) + ) + + "create MULTI-REGION symmetric key, encrypt/decrypt and delete that key" in { _ => + ResourceUtil.withResourceM(newKms(KmsConfig.Aws.multiRegionTestConfig)) { kms => + for { + keyIdMulti <- kms + .generateSymmetricEncryptionKey() + .valueOrFail("create KMS key") + .failOnShutdown + keyExists <- keyActivenessCheckAndDelete(kms, keyIdMulti).failOnShutdown + } yield keyExists.left.value shouldBe a[KmsKeyDisabledError] + } + } + + "log requests and responses with trace contexts" in { fixture => + ResourceUtil.withResourceM(newKms(KmsConfig.Aws.testConfigWithAudit)) { kms => + TraceContext.withNewTraceContext("awk_kms") { tc => + val traceId = tc.traceId.value + + def checkTraceId(le: LogEntry) = le.mdc.get("trace-id").value shouldBe traceId + + loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(INFO))( + for { + // Run a request that will fail to test that we log on failed requests as well + deleteKeyFailed <- kms + .deleteKey(kmsKeyIdWrong)(executionContext, tc) + .value + .failOnShutdown + + // Run a successful encrypt / decrypt tests + symmetricKeyId <- kmsSymmetricEncryptionKeyId.failOnShutdown + _ <- encryptDecryptSymmetricTest(kms, symmetricKeyId, tc = tc).failOnShutdown + signingKey <- kmsSigningKey.failOnShutdown + _ <- signVerifyTest(fixture.pureCrypto, kms, signingKey, tc = tc).failOnShutdown + } yield { + kms.close() + deleteKeyFailed.left.value shouldBe a[KmsDeleteKeyError] + }, + LogEntry.assertLogSeq( + Seq( + ( + le => { + le.infoMessage should (include( + // Make sure plaintext is not logged + "SignRequest" + ) and not include plainTextData) + checkTraceId(le) + }, + "signing request", + ), + ( + le => { + le.infoMessage should include( + "GetPublicKeyRequest" + ) + checkTraceId(le) + }, + "get public key request", + ), + ( + le => { + le.infoMessage should (include( + "Received response" + ) and include("GetPublicKeyResponse") and not include plainTextData) + checkTraceId(le) + }, + "get public key response", + ), + ( + le => { + le.infoMessage should (include( + "Received response" + ) and include("SignResponse") and not include plainTextData) + checkTraceId(le) + }, + "signing response", + ), + ( + le => { + le.infoMessage should (include( + // Make sure plaintext is not logged + "EncryptRequest" + ) and not include plainTextData) + checkTraceId(le) + }, + "encrypt request", + ), + ( + le => { + le.infoMessage should (include( + "Received response" + ) and include("EncryptResponse") and not include plainTextData and include( + "** Ciphertext placeholder **" + )) + checkTraceId(le) + }, + "encrypt response", + ), + ( + le => { + le.infoMessage should (include( + // Make sure plaintext is not logged + "DecryptRequest" + ) and not include plainTextData and include( + "** Ciphertext placeholder **" + )) + checkTraceId(le) + }, + "decrypt request", + ), + ( + le => { + le.infoMessage should include( + // Make sure plaintext is not logged + "DeleteKeyRequest" + ) + checkTraceId(le) + }, + "delete key request", + ), + ( + le => { + le.infoMessage should (include( + "Received response" + ) and include("DecryptResponse") and not include plainTextData) + checkTraceId(le) + }, + "decrypt response", + ), + ( + le => { + le.warningMessage should (include("Request") and include("failed")) + le.throwable.value.getMessage should include("Invalid keyId 'key_wrong_id'") + checkTraceId(le) + }, + "failed delete response", + ), + ) + ), + ) + } + } + } + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/ExternalKmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/ExternalKmsTest.scala new file mode 100644 index 0000000000..ff6ba85201 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/ExternalKmsTest.scala @@ -0,0 +1,111 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.nightly + +import cats.data.EitherT +import com.digitalasset.canton.crypto.kms.KmsError.KmsEncryptError +import com.digitalasset.canton.crypto.kms.{KmsError, KmsKeyId, KmsTest} +import com.digitalasset.canton.crypto.provider.kms.PredefinedKmsKeys +import com.digitalasset.canton.crypto.{ + EncryptionAlgorithmSpec, + EncryptionKeySpec, + SignatureFormat, + SigningAlgorithmSpec, + SigningKeySpec, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{ByteString4096, ResourceUtil} +import org.scalatest.Assertion +import org.scalatest.wordspec.FixtureAsyncWordSpec + +/** Implements all tests that are shared among 'external' KMSs that require an access point and a + * SDK client (i.e. AWS and GCP vs Symbolic). + */ +trait ExternalKmsTest extends KmsTest { + self: FixtureAsyncWordSpec & PredefinedKmsKeys => + + def kmsSymmetricEncryptionKeyId: FutureUnlessShutdown[KmsKeyId] = + FutureUnlessShutdown.pure(predefinedSymmetricEncryptionKey) + + def kmsAsymmetricEncryptionKey: FutureUnlessShutdown[KmsAsymmetricEncryptionKey] = + FutureUnlessShutdown.pure( + KmsAsymmetricEncryptionKey.create( + predefinedAsymmetricEncryptionKeys + .get(EncryptionKeySpec.Rsa2048) + .valueOrFail("could not find predefined Rsa2048 encryption key") + ._1, + EncryptionKeySpec.Rsa2048, + EncryptionAlgorithmSpec.RsaOaepSha256, + ) + ) + + def kmsSigningKey: FutureUnlessShutdown[KmsSigningKey] = + FutureUnlessShutdown.pure( + KmsSigningKey( + predefinedSigningKeys + .get(SigningKeySpec.EcP256) + .valueOrFail("could not find predefined P256 signing key") + ._1, + SigningKeySpec.EcP256, + SigningAlgorithmSpec.EcDsaSha256, + SignatureFormat.Der, + ) + ) + + // signing key used to verify a signature generated from another key + def kmsAnotherSigningKey: FutureUnlessShutdown[KmsSigningKey] = + FutureUnlessShutdown.pure( + KmsSigningKey( + predefinedSigningKeys + .get(SigningKeySpec.EcP256) + .valueOrFail("could not find predefined P256 signing key") + ._2, + SigningKeySpec.EcP256, + SigningAlgorithmSpec.EcDsaSha256, + SignatureFormat.Der, + ) + ) + + protected def keyActivenessCheckAndDelete( + kms: KmsType, + keyId: KmsKeyId, + ): FutureUnlessShutdown[Either[KmsError, Unit]] = { + + def checkKeyExistsAndRunEncryptionTest(): FutureUnlessShutdown[Unit] = + for { + _ <- eventually()( + kms.keyExistsAndIsActive(keyId).valueOrFail("check key exists") + ) + _ <- EitherT + .liftF[FutureUnlessShutdown, KmsError, Assertion]( + encryptDecryptSymmetricTest(kms, keyId) + ) + .valueOrFail("encrypt and decrypt") + } yield () + + for { + _ <- checkKeyExistsAndRunEncryptionTest() + .thereafterF(_ => kms.deleteKey(keyId).valueOrFailShutdown("delete key")) + // key should no longer be available + keyExists <- kms.keyExistsAndIsActive(keyId).value + } yield keyExists + } + + def externalKms(wrongKmsConfig: KmsType#Config): Unit = + "fail encryption if wrong region selected" in { _ => + ResourceUtil.withResourceM(newKms(wrongKmsConfig)) { kms => + for { + encryptedDataFailed <- kms + .encryptSymmetric( + predefinedSymmetricEncryptionKey, + ByteString4096.tryCreate(dataToHandle), + ) + .value + .failOnShutdown + } yield encryptedDataFailed.left.value shouldBe a[KmsEncryptError] + } + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/GcpKmsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/GcpKmsTest.scala new file mode 100644 index 0000000000..733d4571fb --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/nightly/GcpKmsTest.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.nightly + +import com.digitalasset.canton.config.KmsConfig +import com.digitalasset.canton.crypto.kms.KmsError.KmsKeyDisabledError +import com.digitalasset.canton.crypto.kms.gcp.GcpKms +import com.digitalasset.canton.crypto.provider.kms.PredefinedGcpKmsKeys +import com.digitalasset.canton.util.ResourceUtil +import org.scalatest.wordspec.FixtureAsyncWordSpec + +class GcpKmsTest extends FixtureAsyncWordSpec with PredefinedGcpKmsKeys with ExternalKmsTest { + override type KmsType = GcpKms + + override protected def defaultKmsConfig: KmsConfig.Gcp = + KmsConfig.Gcp.defaultTestConfig + + override protected def newKms(config: KmsConfig.Gcp = defaultKmsConfig): GcpKms = + GcpKms + .create(config, timeouts, loggerFactory) + .valueOrFail("create GCP KMS client") + + "GCP KMS" must { + behave like kms() + behave like externalKms( + wrongKmsConfig = KmsConfig.Gcp( + locationId = "wrong-region", + projectId = "", + keyRingId = "", + ) + ) + + "create MULTI-REGION symmetric key, encrypt/decrypt and delete that key" in { _ => + ResourceUtil.withResourceM(newKms(KmsConfig.Gcp.multiRegionTestConfig)) { kms => + for { + keyIdMulti <- kms + .generateSymmetricEncryptionKey() + .valueOrFail("create KMS key") + .failOnShutdown + keyExists <- keyActivenessCheckAndDelete(kms, keyIdMulti).failOnShutdown + } yield keyExists.left.value shouldBe a[KmsKeyDisabledError] + } + } + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala index 2250996f8f..bc982e39a7 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala @@ -10,7 +10,7 @@ import org.scalatest.wordspec.AnyWordSpec class CantonContractIdVersionTest extends AnyWordSpec with BaseTest { - forEvery(Seq(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) { underTest => + forEvery(CantonContractIdVersion.allV1) { underTest => s"$underTest" when { val discriminator = ExampleTransactionFactory.lfHash(1) val unicum = Unicum(TestHash.digest(1)) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/DynamicSynchronizerParametersHistoryTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/DynamicSynchronizerParametersHistoryTest.scala new file mode 100644 index 0000000000..444c6d58b1 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/DynamicSynchronizerParametersHistoryTest.scala @@ -0,0 +1,250 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.protocol.DynamicSynchronizerParametersHistory.{ + latestDecisionDeadline, + latestDecisionDeadlineEffectiveAt, +} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import org.scalatest.wordspec.AnyWordSpec + +class DynamicSynchronizerParametersHistoryTest extends AnyWordSpec with BaseTest { + + private val lowerBound = CantonTimestamp.ofEpochSecond(1000L) + + // Default "decision timeout" of 1 minute + private val paramsDefault: DynamicSynchronizerParameters = + DynamicSynchronizerParameters.defaultValues(testedProtocolVersion) + + // A bit longer than default "decision timeout" of 90s + private val paramsLonger = paramsDefault.tryUpdate( + confirmationResponseTimeout = NonNegativeFiniteDuration.tryOfSeconds(45L), + mediatorReactionTimeout = NonNegativeFiniteDuration.tryOfSeconds(45L), + ) + + // Huge decision timeout of 90 minutes, possibly due to an operator configuration mistake error scenario + private val paramsHuge = paramsDefault.tryUpdate( + confirmationResponseTimeout = NonNegativeFiniteDuration.tryOfMinutes(45L), + mediatorReactionTimeout = NonNegativeFiniteDuration.tryOfMinutes(45L), + ) + + private def params( + validFromToMaxDecisionTimeoutAndValidUntil: Seq[ + (Int, (Int, Option[Int])) + ] + ): Seq[DynamicSynchronizerParametersWithValidity] = + validFromToMaxDecisionTimeoutAndValidUntil.map { case (validFrom, (maxDecision, validUntil)) => + DynamicSynchronizerParametersWithValidity( + paramsDefault.tryUpdate( + confirmationResponseTimeout = + NonNegativeFiniteDuration.tryOfSeconds(maxDecision.toLong) / NonNegativeInt.two, + mediatorReactionTimeout = + NonNegativeFiniteDuration.tryOfSeconds(maxDecision.toLong) / NonNegativeInt.two, + ), + CantonTimestamp.ofEpochSecond(validFrom.toLong), + validUntil.map(until => CantonTimestamp.ofEpochSecond(until.toLong)), + ) + } + + private def at(when: Int) = CantonTimestamp.ofEpochSecond(when.toLong) + private def duration(seconds: Int) = + NonNegativeFiniteDuration.tryOfSeconds(seconds.toLong) + + "latestDecisionDeadline" should { + + "return the lowerBound for an empty history" in { + val history = Seq.empty[DynamicSynchronizerParametersWithValidity] + + latestDecisionDeadline(history, lowerBound) shouldBe lowerBound + } + + "calculate the deadline for a history with only one, currently valid entry" in { + val history = Seq( + DynamicSynchronizerParametersWithValidity(paramsDefault, CantonTimestamp.Epoch, None) + ) + + // The deadline is calculated from the lowerBound, as it's the latest activeness time + // for the currently valid parameters. + val expectedDeadline = lowerBound + .add(paramsDefault.confirmationResponseTimeout.unwrap) + .add(paramsDefault.mediatorReactionTimeout.unwrap) // 1000s + 60s = 1060s + + latestDecisionDeadline(history, lowerBound) shouldBe expectedDeadline + } + + "select the deadline from the currently active parameters if it is the latest" in { + val ts200 = CantonTimestamp.ofEpochSecond(200L) + val history = Seq( + // A past change with a default timeout + DynamicSynchronizerParametersWithValidity( + paramsDefault, + CantonTimestamp.Epoch, + Some(ts200), + ), + // The currently active parameters with a longer timeout + DynamicSynchronizerParametersWithValidity(paramsLonger, ts200, None), + ) + + // Deadlines: + // 1. Past change: 200s (validUntil) + 60s = 260s + // 2. Current change: 1000s (lowerBound) + 90s = 1090s + // The maximum of (lowerBound, 260, 1090) is 1090. + val expectedDeadline = CantonTimestamp.ofEpochSecond(1090L) + + latestDecisionDeadline(history, lowerBound) shouldBe expectedDeadline + } + + "consider a past configuration (mistake) resulting in a huge decision timeout" in { + // This scenario models: Default -> Huge decision timeout -> Default (currently active) + val startOfDefaults = CantonTimestamp.ofEpochSecond(100L) + val startOfHugeParams = CantonTimestamp.ofEpochSecond(200L) + val endOfHugeParams = CantonTimestamp.ofEpochSecond(300L) + + val history = Seq( + DynamicSynchronizerParametersWithValidity( + paramsDefault, + startOfDefaults, + Some(startOfHugeParams), + ), + DynamicSynchronizerParametersWithValidity( + paramsHuge, + startOfHugeParams, + Some(endOfHugeParams), + ), + DynamicSynchronizerParametersWithValidity(paramsDefault, endOfHugeParams, None), + ) + + // Deadlines: + // 1. Default period 1: 200s (validUntil) + 60s = 260s + // 2. Huge period: 300s (validUntil) + 5400s = 5700s + // 3. Current default period: 1000s (lowerBound) + 60s = 1060s + // The maximum of (lowerBound, 260, 5700, 1060) is 5700. + val expectedDeadline = CantonTimestamp.ofEpochSecond(5700L) + + latestDecisionDeadline(history, lowerBound) shouldBe expectedDeadline + } + } + + "latestDecisionDeadlineEffectiveAt" should { + "compute latest decision deadline on unchanged parameters" in { + latestDecisionDeadlineEffectiveAt( + params(Seq(0 -> (60, None))), + at(1), + ) shouldBe at(1) + duration(60) + + latestDecisionDeadlineEffectiveAt( + params(Seq(0 -> (60, None))), + at(60), + ) shouldBe at(60) + duration(60) + + latestDecisionDeadlineEffectiveAt( + params(Seq(0 -> (60, None))), + at(61), + ) shouldBe at(61) + duration(60) + } + + "compute latest decision deadline on unchanged decision time" in { + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 0 -> (60, Some(100)), + 100 -> (60, Some(200)), + 200 -> (60, None), + ) + ), + at(300), + ) shouldBe at(300) + duration(60) + } + + "compute latest decision deadline on expired effectiveAt parameters" in { + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 0 -> (60, Some(100)), + 100 -> (60, Some(200)), + 200 -> (90, Some(400)), + ) + ), + at(300), + ) shouldBe at(300) + duration(90) + } + + "compute latest decision deadline on unordered parameters" in { + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 100 -> (60, Some(200)), + 200 -> (90, None), + 0 -> (60, Some(100)), + ) + ), + at(300), + ) shouldBe at(300) + duration(90) + } + + "compute latest decision deadline on unordered parameters and all expired" in { + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 100 -> (60, Some(200)), + 200 -> (90, Some(400)), + 0 -> (60, Some(100)), + ) + ), + at(300), + ) shouldBe at(300) + duration(90) + } + + "compute latest decision deadline honoring extreme earlier max decision timeout" in { + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 0 -> (60, Some(100)), + 100 -> (20000, Some(200)), // This should determine the safe decision deadline + 200 -> (60, Some(300)), + 300 -> (60, None), + ) + ), + at(400), + ) shouldBe at(200) + duration(20000) + } + + "compute latest decision deadline honoring validUntil sort-None-last ordering" in { + val newestMaxDecisionTimeout = 90 + latestDecisionDeadlineEffectiveAt( + params( + Seq( + 0 -> (60, Some(100)), + 100 -> (60, Some(200)), + 200 -> (newestMaxDecisionTimeout, None), // this max decision time should be chosen as None sorts after Some(x) + 200 -> (60, Some(200)), + ) + ), + at(300), + ) shouldBe at(300) + duration(newestMaxDecisionTimeout) + } + + "throw on empty parameter history" in { + assertThrows[IllegalStateException]( + latestDecisionDeadlineEffectiveAt( + params(Seq.empty), + at(100), + ) + ) + } + + "throw if last parameters expired before effective at time" in { + assertThrows[IllegalArgumentException]( + latestDecisionDeadlineEffectiveAt( + params(Seq(0 -> (60, Some(100)))), + at(200), + ) + ) + } + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala index 66ca128ff7..23eae905ae 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala @@ -5,11 +5,10 @@ package com.digitalasset.canton.protocol import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.crypto.{Salt, TestHash, TestSalt} -import com.digitalasset.canton.util.{LegacyContractHash, LfTransactionBuilder} +import com.digitalasset.canton.util.{LfTransactionBuilder, TestContractHasher} import com.digitalasset.canton.{LfPartyId, protocol} import com.digitalasset.daml.lf.data.Ref.PackageName import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.{ CreationTime, FatContractInstance, @@ -49,8 +48,8 @@ object ExampleContractFactory extends EitherValues { signatories: Set[Ref.Party] = Set(signatory), stakeholders: Set[Ref.Party] = Set(signatory, observer, extra), keyOpt: Option[GlobalKeyWithMaintainers] = None, - version: LanguageVersion = LanguageVersion.default, - cantonContractIdVersion: CantonContractIdV1Version = AuthenticatedContractIdVersionV11, + version: LfSerializationVersion = LfTransactionBuilder.defaultSerializationVersion, + cantonContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1, overrideContractId: Option[ContractId] = None, ): GenContractInstance { type InstCreatedAtTime <: Time } = { @@ -75,7 +74,7 @@ object ExampleContractFactory extends EitherValues { def fromCreate( create: protocol.LfNodeCreate, createdAt: CreationTime.CreatedAt = CreationTime.CreatedAt(Time.Timestamp.now()), - cantonContractIdVersion: CantonContractIdV1Version = AuthenticatedContractIdVersionV11, + cantonContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1, ): GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = fromCreateInternal( create, @@ -97,10 +96,8 @@ object ExampleContractFactory extends EitherValues { ContractAuthenticationDataV1(salt)(cantonContractIdVersion).toLfBytes, ) - val contractHash = LegacyContractHash.tryFatContractHash( - unsuffixed, - cantonContractIdVersion.useUpgradeFriendlyHashing, - ) + val contractHash = + TestContractHasher.Sync.hash(create, cantonContractIdVersion.contractHashingMethod) val unicum = unicumGenerator .recomputeUnicum(unsuffixed, cantonContractIdVersion, contractHash) @@ -124,7 +121,7 @@ object ExampleContractFactory extends EitherValues { def buildContractId( index: Int = random.nextInt(), - cantonContractIdVersion: CantonContractIdV1Version = AuthenticatedContractIdVersionV11, + cantonContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1, ): ContractId = cantonContractIdVersion.fromDiscriminator(lfHash(index), Unicum(TestHash.digest(index))) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala index d1711bed62..ee97be2eb0 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala @@ -18,7 +18,7 @@ trait ExampleTransaction { /** Set of parties who are informees of an action (root or not) in the transaction */ def allInformees: Set[LfPartyId] = fullInformeeTree.allInformees - /** The transaction with unsuffixed contract IDs and the transaction version */ + /** The transaction with unsuffixed contract IDs and the serialization version */ def versionedUnsuffixedTransaction: LfVersionedTransaction /** Map from the nodes of the transaction to their seed if they need a seed */ @@ -56,7 +56,7 @@ trait ExampleTransaction { def transactionTree: GenTransactionTree - def transactionId: TransactionId = transactionTree.transactionId + def transactionId: UpdateId = transactionTree.transactionId def fullInformeeTree: FullInformeeTree @@ -85,7 +85,7 @@ trait ExampleTransaction { /** Transaction view trees for root views, in execution order */ def rootTransactionViewTrees: Seq[FullTransactionViewTree] - /** The transaction with suffixed contract ids and the transaction version. */ + /** The transaction with suffixed contract ids and the serialization version. */ def versionedSuffixedTransaction: LfVersionedTransaction /** @throws IllegalArgumentException diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala index ec62c2ea73..f777771c16 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala @@ -48,7 +48,7 @@ import com.digitalasset.canton.util.LfTransactionUtil.{ metadataFromExercise, metadataFromFetch, } -import com.digitalasset.canton.util.{LfTransactionBuilder, LfTransactionUtil} +import com.digitalasset.canton.util.{LfTransactionBuilder, LfTransactionUtil, TestContractHasher} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.daml.lf.data.Ref.PackageName import com.digitalasset.daml.lf.data.{Bytes, ImmArray} @@ -73,6 +73,7 @@ object ExampleTransactionFactory { import EitherValues.* val pureCrypto: CryptoPureApi = new SymbolicPureCrypto() + // Helper methods for Daml-LF types val languageVersion: LanguageVersion = LfTransactionBuilder.defaultLanguageVersion val packageId: LfPackageId = LfTransactionBuilder.defaultPackageId @@ -81,7 +82,8 @@ object ExampleTransactionFactory { val packageName: PackageName = LfTransactionBuilder.defaultPackageName val someOptUsedPackages: Option[Set[LfPackageId]] = Some(Set(packageId)) val defaultGlobalKey: LfGlobalKey = LfTransactionBuilder.defaultGlobalKey - val transactionVersion: LfLanguageVersion = LfTransactionBuilder.defaultTransactionVersion + val serializationVersion: LfSerializationVersion = + LfTransactionBuilder.defaultSerializationVersion private val random = new Random(0) @@ -91,7 +93,7 @@ object ExampleTransactionFactory { } private def versionedValueCapturing(coid: List[LfContractId]): Value.VersionedValue = - LfVersioned(transactionVersion, valueCapturing(coid)) + LfVersioned(serializationVersion, valueCapturing(coid)) def contractInstance( capturedIds: Seq[LfContractId] = Seq.empty, @@ -138,9 +140,12 @@ object ExampleTransactionFactory { version = instance.version, ) + val contractHash = + TestContractHasher.Sync.hash(unsuffixedCreateNode, contractIdVersion.contractHashingMethod) + val ContractIdSuffixer.RelativeSuffixResult(suffixedCreateNode, _, _, authenticationData) = contractIdSuffixer - .relativeSuffixForLocalContract(contractSalt, createdAt, unsuffixedCreateNode) + .relativeSuffixForLocalContract(contractSalt, createdAt, unsuffixedCreateNode, contractHash) .valueOr(err => throw new IllegalArgumentException(s"Failed to compute suffix for contract: $err") ) @@ -159,7 +164,7 @@ object ExampleTransactionFactory { deepValue(Value.MAXIMUM_NESTING + 10) } val veryDeepVersionedValue: VersionedValue = - LfVersioned(transactionVersion, veryDeepValue) + LfVersioned(serializationVersion, veryDeepValue) val veryDeepContractInstance: LfThinContractInst = LfThinContractInst( @@ -174,7 +179,7 @@ object ExampleTransactionFactory { packageName: LfPackageName = packageName, ): Versioned[LfGlobalKey] = LfVersioned( - transactionVersion, + serializationVersion, LfGlobalKey.assertBuild(templateId, value, packageName), ) @@ -182,7 +187,7 @@ object ExampleTransactionFactory { key: LfGlobalKey = defaultGlobalKey, maintainers: Set[LfPartyId] = Set(signatory), ): Versioned[LfGlobalKeyWithMaintainers] = - LfVersioned(transactionVersion, LfGlobalKeyWithMaintainers(key, maintainers)) + LfVersioned(serializationVersion, LfGlobalKeyWithMaintainers(key, maintainers)) def fetchNode( cid: LfContractId, @@ -191,7 +196,7 @@ object ExampleTransactionFactory { observers: Set[LfPartyId] = Set.empty, key: Option[LfGlobalKeyWithMaintainers] = None, byKey: Boolean = false, - version: LfLanguageVersion = transactionVersion, + version: LfSerializationVersion = serializationVersion, templateId: LfTemplateId = templateId, interfaceId: Option[LfTemplateId] = None, ): LfNodeFetch = @@ -224,7 +229,7 @@ object ExampleTransactionFactory { signatories = signatories, stakeholders = signatories ++ observers, keyOpt = key, - version = transactionVersion, + version = serializationVersion, ) } @@ -260,7 +265,7 @@ object ExampleTransactionFactory { exerciseResult = exerciseResult, keyOpt = key, byKey = byKey, - version = transactionVersion, + version = serializationVersion, ) def exerciseNodeWithoutChildren( @@ -293,7 +298,7 @@ object ExampleTransactionFactory { packageName = key.packageName, key = LfGlobalKeyWithMaintainers(key, maintainers), result = resolution, - version = transactionVersion, + version = serializationVersion, ) def nodeId(index: Int): LfNodeId = LfNodeId(index) @@ -316,10 +321,10 @@ object ExampleTransactionFactory { (nodeId(index + startIndex), node) }*) - val version = protocol.maxTransactionVersion( + val version = protocol.maxSerializationVersion( NonEmpty .from(nodesMap.values.toSeq.mapFilter(_.optVersion)) - .getOrElse(NonEmpty(Seq, transactionVersion)) + .getOrElse(NonEmpty(Seq, serializationVersion)) ) LfVersionedTransaction(version, nodesMap, roots) @@ -333,7 +338,7 @@ object ExampleTransactionFactory { val malformedLfTransaction: LfVersionedTransaction = transaction(Seq(0)) // Helper methods for contract ids and transaction ids - def transactionId(index: Int): TransactionId = TransactionId( + def transactionId(index: Int): UpdateId = UpdateId( TestHash.digest(s"transactionId$index") ) @@ -377,7 +382,7 @@ object ExampleTransactionFactory { private def asAuthenticationData( salt: Salt, version: CantonContractIdVersion, - transactionId: Option[TransactionId] = None, + transactionId: Option[UpdateId] = None, ): ContractAuthenticationData = version match { case v1: CantonContractIdV1Version => ContractAuthenticationDataV1(salt)(v1) @@ -567,7 +572,7 @@ class ExampleTransactionFactory( suffixedId(-1, 1, cantonContractIdVersion) -> suffixedId(-1, 1, cantonContractIdVersion), ), ), - SingleFetch(version = LfLanguageVersion.v2_dev), + SingleFetch(version = LfSerializationVersion.VDev), SingleExercise(seed = deriveNodeSeed(0)), SingleExerciseWithNonstakeholderActor(seed = deriveNodeSeed(0)), MultipleRoots, @@ -613,7 +618,7 @@ class ExampleTransactionFactory( ) val unicumGenerator = new UnicumGenerator(cryptoOps) - def absolutizer(transactionId: TransactionId): ContractIdAbsolutizer = { + def absolutizer(transactionId: UpdateId): ContractIdAbsolutizer = { val absolutizationData: ContractIdAbsolutizationData = cantonContractIdVersion match { case _: CantonContractIdV1Version => @@ -648,7 +653,7 @@ class ExampleTransactionFactory( val metadata = ContractMetadata.tryCreate( signatories, signatories ++ observers, - maybeKeyWithMaintainers.map(LfVersioned(transactionVersion, _)), + maybeKeyWithMaintainers.map(LfVersioned(serializationVersion, _)), ) val viewParticipantDataSalt = participantDataSalt(viewIndex) val contractSalt = cantonContractIdVersion match { @@ -671,6 +676,11 @@ class ExampleTransactionFactory( metadata.stakeholders, metadata.maybeKeyWithMaintainers, ) + val contractHash = TestContractHasher.Sync.hash( + unsuffixedCreateNode, + cantonContractIdVersion.contractHashingMethod, + ) + val ContractIdSuffixer.RelativeSuffixResult( relativeCreateNode, _, @@ -682,6 +692,7 @@ class ExampleTransactionFactory( contractSalt, relativeCreateTime, unsuffixedCreateNode, + contractHash, ) .valueOr(err => throw new IllegalArgumentException(s"Cannot compute suffix for contract: $err") @@ -699,7 +710,7 @@ class ExampleTransactionFactory( } def toAbsolute( - transactionId: TransactionId, + transactionId: UpdateId, relativeFci: FatContractInstance, ): (ContractAuthenticationData, LfContractId) = { val absoluteFci = absolutizer(transactionId) @@ -783,7 +794,7 @@ class ExampleTransactionFactory( coreInputContracts, createWithSerialization, createdInSubviewArchivedInCore, - resolvedKeys.fmap(LfVersioned(transactionVersion, _)), + resolvedKeys.fmap(LfVersioned(serializationVersion, _)), actionDescription, RollbackContext.empty, participantDataSalt(viewIndex), @@ -1050,7 +1061,7 @@ class ExampleTransactionFactory( override def versionedSuffixedTransaction: LfVersionedTransaction = LfVersionedTransaction( - version = transactionVersion, + version = serializationVersion, roots = ImmArray.empty, nodes = HashMap.empty, ) @@ -1060,13 +1071,19 @@ class ExampleTransactionFactory( override def usedAndCreated: UsedAndCreatedContracts = UsedAndCreatedContracts.empty } - abstract class SingleNode(val nodeSeed: Option[LfHash]) extends ExampleTransaction { + abstract class SingleNode( + val nodeSeed: Option[LfHash], + transactionIdOverride: Option[Eval[UpdateId]], + ) extends ExampleTransaction { override def cryptoOps: HashOps & RandomOps = ExampleTransactionFactory.this.cryptoOps def interpretedContractId: LfContractId def relativizedContractId: LfContractId + override def transactionId: UpdateId = + transactionIdOverride.fold(super.transactionId)(_.value) + lazy val absolutizedContractId: LfContractId = absolutizer(transactionId) .absolutizeContractId(relativizedContractId) @@ -1260,7 +1277,8 @@ class ExampleTransactionFactory( signatories: Set[LfPartyId] = Set(submitter), observers: Set[LfPartyId] = Set(observer), key: Option[LfGlobalKeyWithMaintainers] = None, - ) extends SingleNode(Some(seed)) { + transactionIdOverride: Option[Eval[UpdateId]] = None, + ) extends SingleNode(Some(seed), transactionIdOverride) { private def interpretedCapturedContractIds: Seq[LfContractId] = capturedContractIds.map(_._1) private def relativeCapturedContractIds: Seq[LfContractId] = capturedContractIds.map(_._2) @@ -1276,7 +1294,7 @@ class ExampleTransactionFactory( lazy val relativeContractInstance: LfThinContractInst = ExampleTransactionFactory.contractInstance(relativeCapturedContractIds) - override val absoluteContractInstance: LfThinContractInst = + override lazy val absoluteContractInstance: LfThinContractInst = ExampleTransactionFactory.contractInstance(absoluteCapturedContractIds) lazy val interpretedContractId: LfContractId = localContractId(discriminator) @@ -1344,17 +1362,23 @@ class ExampleTransactionFactory( Salt, (ContractAuthenticationData, Eval[ContractAuthenticationData]), ], - ) extends SingleNode(nodeSeed) { + transactionIdOverride: Option[Eval[UpdateId]], + ) extends SingleNode(nodeSeed, transactionIdOverride) { override def relativeAuthenticationData: ContractAuthenticationData = - authenticationData match { - case Left(salt) => asAuthenticationData(salt, cantonContractIdVersion, None) - case Right((relative, _)) => relative + cantonContractIdVersion match { + case _: CantonContractIdV2Version if relativizedContractId.isAbsolute => + absoluteAuthenticationData + case _ => + authenticationData match { + case Left(salt) => asAuthenticationData(salt, cantonContractIdVersion, None) + case Right((relative, _)) => relative + } } override def absoluteAuthenticationData: ContractAuthenticationData = authenticationData match { case Left(salt) => - asAuthenticationData(salt, cantonContractIdVersion, Some(TransactionId.zero)) + asAuthenticationData(salt, cantonContractIdVersion, Some(UpdateId.zero)) case Right((_, absoluteE)) => absoluteE.value } } @@ -1374,12 +1398,13 @@ class ExampleTransactionFactory( interpretedContractId: LfContractId = suffixedId(-1, 0, cantonContractIdVersion), relativizedContractId: LfContractId = suffixedId(-1, 0, cantonContractIdVersion), fetchedContractInstance: LfThinContractInst = contractInstance(), - version: LfLanguageVersion = transactionVersion, + version: LfSerializationVersion = serializationVersion, authenticationData: Either[ Salt, (ContractAuthenticationData, Eval[ContractAuthenticationData]), ] = Left(TestSalt.generateSalt(random.nextInt())), - ) extends SingleUseNode(None, authenticationData) { + transactionIdOverride: Option[Eval[UpdateId]] = None, + ) extends SingleUseNode(None, authenticationData, transactionIdOverride) { override def relativeContractInstance: LfThinContractInst = fetchedContractInstance override def absoluteContractInstance: LfThinContractInst = fetchedContractInstance @@ -1409,7 +1434,7 @@ class ExampleTransactionFactory( * id of the exercised contract during interpretation * @param relativizedContractId * id of the exercised contract after suffixing - * @param inputContractInstance + * @param relativeInputContractInstance * instance of the used contract. */ @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) @@ -1418,16 +1443,18 @@ class ExampleTransactionFactory( override val nodeId: LfNodeId = LfNodeId(0), interpretedContractId: LfContractId = suffixedId(-1, 0, cantonContractIdVersion), relativizedContractId: LfContractId = suffixedId(-1, 0, cantonContractIdVersion), - inputContractInstance: LfThinContractInst = contractInstance(), + relativeInputContractInstance: LfThinContractInst = contractInstance(), + absoluteInputContractInstance: Eval[LfThinContractInst] = Eval.later(contractInstance()), authenticationData: Either[ Salt, (ContractAuthenticationData, Eval[ContractAuthenticationData]), ] = Left(TestSalt.generateSalt(random.nextInt())), - ) extends SingleUseNode(Some(seed), authenticationData) { + transactionIdOverride: Option[Eval[UpdateId]] = None, + ) extends SingleUseNode(Some(seed), authenticationData, transactionIdOverride) { override def toString: String = "single exercise" - override def relativeContractInstance: LfThinContractInst = inputContractInstance - override def absoluteContractInstance: LfThinContractInst = inputContractInstance + override def relativeContractInstance: LfThinContractInst = relativeInputContractInstance + override def absoluteContractInstance: LfThinContractInst = absoluteInputContractInstance.value private def genNode(id: LfContractId): LfNodeExercises = exerciseNodeWithoutChildren( @@ -1465,7 +1492,7 @@ class ExampleTransactionFactory( Salt, (ContractAuthenticationData, Eval[ContractAuthenticationData]), ] = Left(TestSalt.generateSalt(random.nextInt())), - ) extends SingleUseNode(Some(seed), authenticationData) { + ) extends SingleUseNode(Some(seed), authenticationData, None) { override def toString: String = "single exercise" override def relativeContractInstance: LfThinContractInst = inputContractInstance @@ -1499,7 +1526,7 @@ class ExampleTransactionFactory( (ContractAuthenticationData, Eval[ContractAuthenticationData]), ] = Left(TestSalt.generateSalt(random.nextInt())), consuming: Boolean = true, - ) extends SingleUseNode(Some(seed), authenticationData) { + ) extends SingleUseNode(Some(seed), authenticationData, None) { val upgradedTemplateId: canton.protocol.LfTemplateId = templateId.copy(pkg = upgradePackageId) private def genNode(id: LfContractId): LfNodeExercises = @@ -1522,7 +1549,7 @@ class ExampleTransactionFactory( Salt, (ContractAuthenticationData, Eval[ContractAuthenticationData]), ] = Left(TestSalt.generateSalt(random.nextInt())), - ) extends SingleUseNode(Some(seed), authenticationData) { + ) extends SingleUseNode(Some(seed), authenticationData, None) { override def relativeContractInstance: LfThinContractInst = inputContractInstance override def absoluteContractInstance: LfThinContractInst = inputContractInstance @@ -1545,7 +1572,7 @@ class ExampleTransactionFactory( } def absolutizeAuthenticationData( - transactionId: TransactionId, + transactionId: UpdateId, createInfo: CreateInfo, ): ContractAuthenticationData = { val absoluteFci = absolutizer(transactionId).absolutizeFci(createInfo.relativeFci).value @@ -1573,6 +1600,7 @@ class ExampleTransactionFactory( seed = deriveNodeSeed(0), nodeId = LfNodeId(0), viewPosition = rootViewPosition(0, rootViewCount), + transactionIdOverride = Some(Eval.later(transactionId)), ) private val create1: SingleCreate = SingleCreate( seed = deriveNodeSeed(1), @@ -1583,8 +1611,14 @@ class ExampleTransactionFactory( suffixedId(-1, 1, cantonContractIdVersion) -> suffixedId(-1, 1, cantonContractIdVersion), create0.interpretedContractId -> create0.relativizedContractId, ), + transactionIdOverride = Some(Eval.later(transactionId)), + ) + private val fetch2: SingleFetch = SingleFetch( + LfNodeId(2), + suffixedId(-1, 2), + suffixedId(-1, 2), + transactionIdOverride = Some(Eval.later(transactionId)), ) - private val fetch2: SingleFetch = SingleFetch(LfNodeId(2), suffixedId(-1, 2), suffixedId(-1, 2)) private val fetch3: SingleFetch = SingleFetch( nodeId = LfNodeId(3), @@ -1592,11 +1626,12 @@ class ExampleTransactionFactory( relativizedContractId = create0.relativizedContractId, fetchedContractInstance = create0.relativeContractInstance, // ensure we test merging transactions with different versions - version = LfLanguageVersion.v2_dev, + version = LfSerializationVersion.VDev, authenticationData = Right( create0.relativeAuthenticationData -> Eval.later(absolutizeAuthenticationData(transactionId, create0.createInfo)) ), + transactionIdOverride = Some(Eval.later(transactionId)), ) private val exercise4: SingleExercise = SingleExercise(deriveNodeSeed(4), LfNodeId(4), suffixedId(-1, 4), suffixedId(-1, 4)) @@ -1605,11 +1640,13 @@ class ExampleTransactionFactory( nodeId = LfNodeId(5), interpretedContractId = create1.interpretedContractId, relativizedContractId = create1.relativizedContractId, - inputContractInstance = create1.relativeContractInstance, + relativeInputContractInstance = create1.relativeContractInstance, + absoluteInputContractInstance = Eval.later(create1.absoluteContractInstance), authenticationData = Right( create1.relativeAuthenticationData -> Eval.later(absolutizeAuthenticationData(transactionId, create1.createInfo)) ), + transactionIdOverride = Some(Eval.later(transactionId)), ) private val examples: List[SingleNode] = diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsContract.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsContract.scala index 2f32a580c9..dcb34a6a62 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsContract.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsContract.scala @@ -29,7 +29,7 @@ final class GeneratorsContract(version: CantonContractIdVersion) { Arbitrary( for { salt <- Arbitrary.arbitrary[Salt] - transactionId <- Gen.option(Arbitrary.arbitrary[TransactionId]) + transactionId <- Gen.option(Arbitrary.arbitrary[UpdateId]) relativeArgumentSuffixes <- boundedListGen[ByteString] } yield ContractAuthenticationDataV2( Bytes.fromByteString(salt.forHashing), diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala index 71cc602905..09be7b9535 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala @@ -25,6 +25,7 @@ import com.digitalasset.canton.topology.{ PhysicalSynchronizerId, SynchronizerId, } +import com.digitalasset.canton.util.TestContractHasher import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} import com.digitalasset.canton.{GeneratorsLf, LfPartyId, ReassignmentCounter} import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Versioned} @@ -57,6 +58,7 @@ final class GeneratorsProtocol( requiredHashAlgorithms <- nonEmptySetGen[HashAlgorithm] requiredCryptoKeyFormats <- nonEmptySetGen[CryptoKeyFormat] requiredSignatureFormats <- nonEmptySetGen[SignatureFormat] + topologyChangeDelay <- Arbitrary.arbitrary[NonNegativeFiniteDuration] enableTransparencyChecks <- Arbitrary.arbitrary[Boolean] serial <- Arbitrary.arbitrary[NonNegativeInt] @@ -67,6 +69,7 @@ final class GeneratorsProtocol( requiredHashAlgorithms, requiredCryptoKeyFormats, requiredSignatureFormats, + topologyChangeDelay, enableTransparencyChecks, protocolVersion, serial, @@ -80,7 +83,6 @@ final class GeneratorsProtocol( confirmationResponseTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] mediatorReactionTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] assignmentExclusivityTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - topologyChangeDelay <- Arbitrary.arbitrary[NonNegativeFiniteDuration] mediatorDeduplicationMargin <- Arbitrary.arbitrary[NonNegativeFiniteDuration] // Because of the potential multiplication by 2 below, we want a reasonably small value @@ -130,7 +132,6 @@ final class GeneratorsProtocol( confirmationResponseTimeout, mediatorReactionTimeout, assignmentExclusivityTimeout, - topologyChangeDelay, ledgerTimeRecordTimeTolerance, updatedMediatorDeduplicationTimeout, reconciliationInterval, @@ -230,7 +231,7 @@ final class GeneratorsProtocol( for { index <- Gen.posNum[Int] time <- Arbitrary.arbitrary[CantonTimestamp] - transactionId <- Arbitrary.arbitrary[TransactionId] + transactionId <- Arbitrary.arbitrary[UpdateId] } yield { val discriminator = ExampleTransactionFactory.lfHash(index) val salt = ContractSalt.createV2(symbolicCrypto)( @@ -265,6 +266,8 @@ final class GeneratorsProtocol( salt, relativeLedgerCreateTime, unsuffixedCreateNode, + TestContractHasher.Sync + .hash(unsuffixedCreateNode, contractIdSuffixer.contractHashingMethod), ) .valueOr(err => throw new IllegalArgumentException(s"Failed to suffix contract: $err")) relativeFci = FatContractInstance.fromCreateNode( diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala index 2ce5fc6a1b..4f695eee55 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala @@ -30,7 +30,8 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { "SerializableContractInstance" should { - forEvery(CantonContractIdVersion.all) { contractIdVersion => + // TODO(#23971) use CantonContractIdVersion.all + forEvery(CantonContractIdVersion.allV1) { contractIdVersion => s"deserialize $contractIdVersion correctly" in { val someContractSalt = TestSalt.generateSalt(0) @@ -59,7 +60,7 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { metadata, CreationTime.CreatedAt(CantonTimestamp.now().toLf), )( - ContractAuthenticationDataV1(someContractSalt)(AuthenticatedContractIdVersionV11) + ContractAuthenticationDataV1(someContractSalt)(contractIdVersion) ) val sci = SerializableContract.fromLfFatContractInst(ci.inst).value SerializableContract.fromProtoVersioned( @@ -70,12 +71,12 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { } "SerializableContract.fromFatContract" when { - val transactionVersion = LfLanguageVersion.v2_dev + val serializationVersion = LfSerializationVersion.V1 val createdAt = LfTimestamp.Epoch val contractSalt = TestSalt.generateSalt(0) val authenticationData = - ContractAuthenticationDataV1(contractSalt)(AuthenticatedContractIdVersionV11) + ContractAuthenticationDataV1(contractSalt)(CantonContractIdVersion.maxV1) val contractIdDiscriminator = ExampleTransactionFactory.lfHash(0) val contractIdSuffix = @@ -84,7 +85,7 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { val invalidFormatContractId = LfContractId.assertFromString("00" * 34) val authenticatedContractId = - AuthenticatedContractIdVersionV11.fromDiscriminator(contractIdDiscriminator, contractIdSuffix) + CantonContractIdVersion.maxV1.fromDiscriminator(contractIdDiscriminator, contractIdSuffix) val pkgName = Ref.PackageName.assertFromString("pkgName") @@ -96,7 +97,7 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { signatories = Set(alice), stakeholders = Set(alice), keyOpt = None, - version = transactionVersion, + version = serializationVersion, ) val disclosedContract = @@ -115,7 +116,7 @@ class SerializableContractTest extends AnyWordSpec with BaseTest { rawContractInstance = SerializableRawContractInstance .create( LfVersioned( - transactionVersion, + serializationVersion, LfValue.ThinContractInstance( packageName = pkgName, template = templateId, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala index 9b916c359f..1966e4ec80 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala @@ -192,7 +192,7 @@ class WellFormedTransactionTest extends AnyWordSpec with BaseTest with HasExecut exerciseResult = None, keyOpt = None, byKey = false, - version = ExampleTransactionFactory.transactionVersion, + version = ExampleTransactionFactory.serializationVersion, ), ), WithoutSuffixes, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala index ef2f30f9ab..1c17d3d942 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala @@ -16,7 +16,7 @@ import com.digitalasset.daml.lf.transaction.{ CreationTime, FatContractInstance, Node, - TransactionVersion, + SerializationVersion, } import com.digitalasset.daml.lf.value.{Value, Value as V} import com.google.protobuf.ByteString @@ -90,7 +90,7 @@ trait HashUtilsTest { this: Matchers => signatories = signatories, stakeholders = stakeholders, keyOpt = None, - version = TransactionVersion.minVersion, + version = SerializationVersion.minVersion, ) def defRef(module: String, name: String): Ref.Identifier = diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala index 83b9b641b2..6c4e7c1fde 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala @@ -5,12 +5,11 @@ package com.digitalasset.canton.protocol.hash import com.digitalasset.canton.BaseTest import com.digitalasset.canton.crypto.Hash -import com.digitalasset.canton.protocol.LfHash import com.digitalasset.canton.protocol.hash.TransactionHash.NodeHashingError import com.digitalasset.canton.protocol.hash.TransactionHash.NodeHashingError.IncompleteTransactionTree +import com.digitalasset.canton.protocol.{LfHash, LfSerializationVersion} import com.digitalasset.daml.lf.data.* import com.digitalasset.daml.lf.data.Ref.{ChoiceName, PackageName, Party} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.* import com.digitalasset.daml.lf.value.Value.ContractId import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA @@ -50,7 +49,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha stakeholders = Set[Party](Ref.Party.assertFromString("alice"), Ref.Party.assertFromString("charlie")), keyOpt = None, - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, ) private val createNodeEncoding = """'01' # 01 (Node Encoding Version) @@ -110,7 +109,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha keyOpt = None, byKey = false, interfaceId = None, - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, ) private val fetchNodeEncoding = """'01' # 01 (Node Encoding Version) @@ -176,7 +175,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha exerciseResult = Some(VA.text.inj("result")), keyOpt = None, byKey = false, - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, ) private val exerciseNodeHash = "070970eb4b2de72561dafb67017ca33850650a8103e5134e16044ba78991f48c" @@ -188,7 +187,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha result = Some( ContractId.V1.assertFromString(contractId1) ), - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, ) private val rollbackNode = Node.Rollback( @@ -742,7 +741,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha "TransactionBuilder" should { val roots = ImmArray(NodeId(0), NodeId(5)) val transaction = VersionedTransaction( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, roots = roots, nodes = subNodesMap, ) @@ -761,7 +760,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha an[IncompleteTransactionTree] shouldBe thrownBy { TransactionHash.tryHashTransactionV1( VersionedTransaction( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, roots = roots, nodes = Map.empty, ), @@ -773,7 +772,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha "not hash NodeIds" in { TransactionHash.tryHashTransactionV1( VersionedTransaction( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, roots = shiftNodeIds(roots), nodes = shiftNodeIds(subNodesMap), ), @@ -784,7 +783,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha "not produce collision in children" in { TransactionHash.tryHashTransactionV1( VersionedTransaction( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, roots = roots.reverse, nodes = subNodesMap, ), @@ -800,7 +799,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha hashTracer = hashTracer, ) hashTracer.result shouldBe s"""'00000030' # Hash Purpose - |# Transaction Version + |# Serialization Version |'00000003' # 3 (int) |'322e31' # 2.1 (string) |# Root Nodes @@ -815,7 +814,7 @@ class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with Ha "Full Transaction Hash" should { val roots = ImmArray(NodeId(0), NodeId(5)) val transaction = VersionedTransaction( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, roots = roots, nodes = subNodesMap, ) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockTest.scala new file mode 100644 index 0000000000..ac1b15f3ba --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockTest.scala @@ -0,0 +1,309 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.Eval +import cats.data.EitherT +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.{DbLockConfig, DefaultProcessingTimeouts} +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.SuppressingLogger.LogEntryCriterion +import com.digitalasset.canton.logging.{SuppressingLogger, TracedLogger} +import com.digitalasset.canton.store.db.* +import com.digitalasset.canton.time.{PositiveFiniteDuration, SimClock} +import com.digitalasset.canton.util.retry +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy +import com.digitalasset.canton.{BaseTest, HasExecutorService} +import com.typesafe.scalalogging.Logger +import org.scalatest.FutureOutcome +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.util.AsyncExecutorWithMetrics + +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +object UniqueDbLockCounter { + private lazy val lockCounter = new AtomicInteger(DbLockCounters.FIRST_TESTING) + + def get(): DbLockCounter = DbLockCounter(lockCounter.getAndIncrement()) +} + +@SuppressWarnings(Array("org.wartremover.warts.Var")) +trait DbLockTest extends FixtureAsyncWordSpec with BaseTest with HasExecutorService { + + def parallelExecutionContext: ExecutionContext = executorService + + protected lazy val lockConfig: DbLockConfig = DbLockConfig() + + private lazy val ds = DbLockedConnection + .createDataSource( + setup.config.config, + 1, + PositiveFiniteDuration.tryOfSeconds(10), + ) + .valueOrFail("Failed to create datasource") + + case class FixtureParam(lock: DbLock, clock: SimClock, db: Database) + + protected def createLock( + lockId: DbLockId, + lockMode: DbLockMode, + db: Database, + clock: SimClock, + ): DbLock + + protected def allocateLockId(counter: DbLockCounter): DbLockId + + protected def setup: DbStorageSetup + + private def createDb(): Database = { + val executor = + AsyncExecutorWithMetrics.createSingleThreaded( + "DbLockTest", + noTracingLogger, + ) + KeepAliveConnection.createDatabaseFromConnection( + new KeepAliveConnection(ds.createConnection()), + Logger(logger.underlying), + executor, + ) + } + + override def afterAll(): Unit = + try { + ds.close() + super.afterAll() + } finally setup.close() + + def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val db = createDb() + val clock = new SimClock(loggerFactory = loggerFactory) + val lockId = DbLockId.create("test", UniqueDbLockCounter.get()) + + // By default create an exclusive lock + val lock = createLock(lockId, DbLockMode.Exclusive, db, clock) + + complete { + withFixture(test.toNoArgAsyncTest(FixtureParam(lock, clock, db))) + } lastly { + lock.close() + db.close() + } + } + + "DbLock" can { + + "acquire a lock" in { f => + for { + _ <- f.lock.acquire().valueOrFail("acquire lock") + _ <- f.lock.release().valueOrFail("release lock") + } yield succeed + + } + + "acquire a shared lock from two sessions" in { f => + val sharedLockId = allocateLockId(UniqueDbLockCounter.get()) + val sharedLock1 = createLock(sharedLockId, DbLockMode.Shared, f.db, f.clock) + + // Same lock id but with a new DB connection + val db2 = createDb() + val sharedLock2 = createLock(sharedLockId, DbLockMode.Shared, db2, f.clock) + + val exclusiveLock = createLock(sharedLockId, DbLockMode.Exclusive, f.db, f.clock) + + for { + _ <- sharedLock1.acquire().valueOrFail("acquire lock") + _ <- sharedLock2.acquire().valueOrFail("acquire lock") + // An exclusive lock on the shared lock id must fail + acquiredExclusive <- exclusiveLock + .tryAcquire() + .getOrElse(false) + _ <- sharedLock1.release().valueOrFail("release lock") + _ <- sharedLock2.release().valueOrFail("release lock") + } yield { + acquiredExclusive shouldBe false + } + } + + "acquire a lock twice should fail" in { f => + for { + _ <- f.lock.acquire().valueOrFail("acquire lock") + fail <- f.lock.acquire().value + _ <- f.lock.release().valueOrFail("release lock") + } yield { + fail.left.value shouldBe a[DbLockError.LockAlreadyAcquired] + } + } + + "acquire a lock from two sessions should fail" in { f => + // Same lock id but with a new DB connection + val db2 = createDb() + val lock2 = createLock(f.lock.lockId, DbLockMode.Exclusive, db2, f.clock) + + for { + _ <- f.lock.acquire().valueOrFail("acquire lock") + acquired <- lock2.tryAcquire().valueOrFail("try acquire lock") + _ <- f.lock.release().valueOrFail("release lock") + } yield { + acquired shouldBe false + } + } + + "check if the lock is taken by another session" in { f => + // Same lock id but with a new DB connection + val db2 = createDb() + val lock2 = createLock(f.lock.lockId, DbLockMode.Exclusive, db2, f.clock) + + for { + _ <- f.lock.acquire().valueOrFail("acquire lock") + taken1 <- lock2.isTaken.valueOrFail("lock taken") + _ <- f.lock.release().valueOrFail("release lock") + // It might take a bit to propagate (especially if there is concurrent DB activity on other tests) + _ = Threading.sleep(5000) + taken2 <- lock2.isTaken.valueOrFail("lock taken") + } yield { + taken1 shouldBe true + taken2 shouldBe false + } + } + + "release a non-acquired lock should fail" in { f => + for { + fail <- f.lock.release().value + } yield { + fail.left.value shouldBe a[DbLockError.LockAlreadyReleased] + } + } + + "detect when the lock is lost" in { f => + for { + _ <- f.lock.acquire().valueOrFail("acquire lock") + // Releasing the lock without changing the lock state + _ <- f.lock.releaseInternal().valueOrFail("release internal") + // Advance the time to trigger a check lock + _ = loggerFactory.assertLogs( + // Add 1 second to the health check period to account for schedule jitter + f.clock.advance(lockConfig.healthCheckPeriod.plusSeconds(1).asJava), + _.warningMessage should include(s"Lock ${f.lock.lockId} was lost"), + ) + } yield f.lock.lockState.get() shouldBe DbLock.LockState.Lost + } + + "concurrent acquisition and release" in { f => + def acquireRelease(threadId: Int, attempt: Int): Future[Unit] = { + + val flagCloseable: FlagCloseable = FlagCloseable(logger, timeouts) + + val suppressRetryWarnRule = + LogEntryCriterion(Level.WARN, getClass.getName, """.* operation \'test-.+\'.*""".r) + val suppressRetryWarnLoggerFactory = + SuppressingLogger(getClass, skipLogEntry = suppressRetryWarnRule.matches) + val suppressRetryWarnLogger = + TracedLogger(suppressRetryWarnLoggerFactory.getLogger(getClass)) + + def retryOp(op: () => EitherT[Future, DbLockError, Unit], opName: String): Future[Unit] = + // Suppress warnings of retries + suppressRetryWarnLoggerFactory.suppressWarnings { + retry + .Pause( + suppressRetryWarnLogger, + flagCloseable, + retry.Forever, + 5.millisecond, + operationName = opName, + ) + .apply(op().value, AllExceptionRetryPolicy) + .map(_ => ()) + } + + // Wait before starting the acquire/release cycle to give other threads a chance to acquire the lock first + Threading.sleep(10) + + for { + _ <- retryOp(() => f.lock.acquire(), s"test-acquire-$threadId-$attempt") + _ <- retryOp(() => f.lock.release(), s"test-release-$threadId-$attempt") + } yield () + } + + def acquireReleaseLoop(threadId: Int, attempts: Int): Future[Unit] = + if (attempts > 0) + acquireRelease(threadId, attempts).flatMap(_ => + acquireReleaseLoop(threadId, attempts - 1) + ) + else + Future.unit + + val f1 = acquireReleaseLoop(0, 10) + val f2 = acquireReleaseLoop(1, 10) + val f3 = acquireReleaseLoop(2, 10) + + for { + _ <- f1 + _ <- f2 + _ <- f3 + } yield succeed + } + + "allocate a lock id" in { _ => + val c1 = UniqueDbLockCounter.get() + val c2 = UniqueDbLockCounter.get() + val id1 = allocateLockId(c1) + val id2 = allocateLockId(c2) + id1 shouldNot equal(id2) + + val id3 = DbLockId.create("", c1) + val id4 = DbLockId.create("", c2) + id3 should not be id4 + val id5 = DbLockId.create("foo", c2) + id5 should not be id4 + } + + "create lock with allocated lock id" in { f => + val id = allocateLockId(UniqueDbLockCounter.get()) + val lock = createLock(id, DbLockMode.Exclusive, f.db, f.clock) + for { + _ <- lock.acquire().valueOrFail(s"acquire lock with lock id: $id") + _ <- lock.release().valueOrFail(s"release lock with lock id: $id") + } yield succeed + } + + } + +} + +class DbLockTestPostgres extends DbLockTest { + + override protected lazy val setup: PostgresDbStorageSetup = + DbStorageSetup.postgres(loggerFactory)(parallelExecutionContext) + + override def createLock( + lockId: DbLockId, + lockMode: DbLockMode, + db: Database, + clock: SimClock, + ): DbLock = + setup.storage.profile match { + case profile: DbStorage.Profile.Postgres => + new DbLockPostgres( + profile, + db, + lockId, + lockMode, + lockConfig, + DefaultProcessingTimeouts.testing, + clock, + loggerFactory, + Eval.now(false), + )(executorService) + case _ => fail("Database profile must be a Postgres profile") + } + + override protected def allocateLockId(counter: DbLockCounter): DbLockId = + PostgresDbLock + .allocateLockId(setup.config, counter)(loggerFactory) + .valueOrFail(s"allocate lock id for counter: $counter") +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionPoolTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionPoolTest.scala new file mode 100644 index 0000000000..f9b399604a --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionPoolTest.scala @@ -0,0 +1,312 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.either.* +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{DbLockedConnectionPoolConfig, DefaultProcessingTimeouts} +import com.digitalasset.canton.logging.{NamedLoggerFactory, SuppressionRule} +import com.digitalasset.canton.resource.DbLockedConnection.State +import com.digitalasset.canton.store.db.* +import com.digitalasset.canton.time.{Clock, SimClock, WallClock} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{BaseTest, CloseableTest, HasExecutorService} +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.scalatest.{Assertion, FutureOutcome} +import org.slf4j.event.Level +import slick.util.{AsyncExecutorWithMetrics, AsyncExecutorWithShutdown} + +import java.sql.SQLTransientException +import java.util.concurrent.Executors +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future, Promise} + +trait DbLockedConnectionPoolTest + extends FixtureAsyncWordSpec + with BaseTest + with HasExecutorService + with CloseableTest { + + protected def setup: DbStorageSetup + + private lazy val executor = AsyncExecutorWithMetrics.createSingleThreaded( + "DbLockedConnectionPoolTest", + noTracingLogger, + ) + + case class FixtureParam( + pool: DbLockedConnectionPool, + config: DbLockedConnectionPoolConfig, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + poolSize: PositiveInt, + clock: SimClock, + loggerFactory: NamedLoggerFactory, + ) + + protected def createConnectionPool( + config: DbLockedConnectionPoolConfig, + poolSize: PositiveInt, + clock: Clock, + poolId: String, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + loggerFactory: NamedLoggerFactory, + writeExecutor: Option[AsyncExecutorWithShutdown] = None, + ): DbLockedConnectionPool = + DbLockedConnectionPool + .create( + DbLock + .isSupported(setup.storage.profile) + .valueOrFail(s"Storage profile `${setup.storage.profile}` does not support DB locks"), + setup.storage.dbConfig, + config, + poolSize, + mainLockCounter, + poolLockCounter, + clock, + DefaultProcessingTimeouts.testing, + exitOnFatalFailures = true, + futureSupervisor, + loggerFactory.append("poolId", poolId), + writeExecutor.getOrElse(executor), + )(executorService, TraceContext.empty, closeContext) + .valueOrFail("Failed to create connection pool") + + protected def awaitActive( + pool: DbLockedConnectionPool, + config: DbLockedConnectionPoolConfig, + clock: SimClock, + ): Assertion = + eventually(timeUntilSuccess = 60.seconds) { + clock.advance(config.healthCheckPeriod.asJava) + val active = pool.isActive + logger.debug(s"Checking if pool is active... $active") + assert(active) + } + + protected def awaitActive(f: FixtureParam): Assertion = awaitActive(f.pool, f.config, f.clock) + + def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val fixtureLoggerFactory = + loggerFactory.append("case", test.pos.map(_.lineNumber.toString).getOrElse("unknown")) + val clock = new SimClock(loggerFactory = fixtureLoggerFactory) + val config = DbLockedConnectionPoolConfig() + val poolSize = + PositiveInt.tryCreate(Math.max(Threading.detectNumberOfThreads(noTracingLogger).value / 4, 1)) + val mainLockCounter = UniqueDbLockCounter.get() + val poolLockCounter = UniqueDbLockCounter.get() + val pool = createConnectionPool( + config, + poolSize, + clock, + "fixture", + mainLockCounter, + poolLockCounter, + fixtureLoggerFactory, + ) + complete { + withFixture( + test.toNoArgAsyncTest( + FixtureParam( + pool, + config, + mainLockCounter, + poolLockCounter, + poolSize, + clock, + fixtureLoggerFactory, + ) + ) + ) + } lastly { + pool.close() + clock.close() + } + } + + override def afterAll(): Unit = + try { + executor.close() + setup.close() + } finally super.afterAll() + + "DbLockedConnectionPool" can { + + "create a connection pool and become active" in { f => + awaitActive(f) + } + + "create two connection pools and one becomes active" in { f => + awaitActive(f) + + logger.debug(s"Creating a second connection pool") + val pool2 = createConnectionPool( + f.config, + f.poolSize, + f.clock, + "pool2", + f.mainLockCounter, + f.poolLockCounter, + f.loggerFactory, + ) + assert(!pool2.isActive) + + logger.debug(s"Closing first (active) connection pool") + f.pool.close() + logger.debug(s"First (active) connection pool is being closed") + + awaitActive(pool2, f.config, f.clock) + pool2.close() + + succeed + } + + "when provided, use write executor to schedule queries" in { f => + // Close the fixture pool and create a new one with a non empty write executor + awaitActive(f) + logger.debug(s"Closing active connection pool") + f.pool.close() + val executor = Executors.newSingleThreadExecutor() + val lockExecutionContext = ExecutionContext.fromExecutor(executor) + val asyncExecutor = new AsyncExecutorWithShutdown { + override def executionContext: ExecutionContext = lockExecutionContext + override def close(): Unit = executor.shutdown() + override def isShuttingDown: Boolean = false + } + + val pool2 = createConnectionPool( + f.config, + f.poolSize, + f.clock, + "pool2", + f.mainLockCounter, + f.poolLockCounter, + f.loggerFactory, + Some(asyncExecutor), + ) + awaitActive(pool2, f.config, f.clock) + + val connections = pool2.getPool.value + + val p = Promise[Unit]() + val mockFuture = Future(timeouts.default.await("Mock query")(p.future))(lockExecutionContext) + val hasLockFuture = connections.head.state match { + case State.Connected(_, lock) => lock.hasLock.value + case _ => fail("Expected connected connection") + } + + // The lock check can't complete because the mock query is running on the single threaded write executor + always()(hasLockFuture.isCompleted shouldBe false) + p.success(()) + + timeouts.default.await("Wait for mock query")(mockFuture) + timeouts.default.await("Wait for lock")(hasLockFuture) + + pool2.close() + + succeed + } + + "fail-over on main connection close" in { f => + awaitActive(f) + + val clock2 = new WallClock(DefaultProcessingTimeouts.testing, loggerFactory) + val pool2 = createConnectionPool( + f.config, + f.poolSize, + clock2, + "pool2", + f.mainLockCounter, + f.poolLockCounter, + f.loggerFactory, + ) + assert(!pool2.isActive) + + val mainConnection = + f.pool.mainConnection.get.valueOrFail("Cannot get main connection of active pool") + + logger.debug("Close main connection of active pool") + mainConnection + .closeUnderlying(Level.WARN) + + logger.debug("Wait until passive pool acquires main connection lock") + eventually() { + assert(pool2.mainConnection.isActive) + } + + def expectedWarns(lockId: DbLockId) = Seq( + s"Failed to check database lock status for $lockId, assuming lost", + s"Lock $lockId was lost", + "Locked connection was lost, trying to rebuild", + ) + + logger.debug("Wait until former active pool becomes passive") + loggerFactory.assertLogsSeqString( + SuppressionRule.LevelAndAbove(Level.WARN), + expectedWarns(f.pool.mainConnection.lockId), + ) { + eventually() { + // Continuously advance time to make sure that the lock, connection, and pool checks have ran and picked up the updated health from sub-resources + f.clock.advance(f.config.healthCheckPeriod.asJava) + + // Ensure the pool went passive + assert(f.pool.isPassive) + + // Ensure the main connection got replaced + f.pool.mainConnection.state match { + case State.Connected(connection, lock) + if connection != mainConnection && !lock.isAcquired => + succeed + case _ => fail("connection is not yet passive") + } + } + } + + logger.debug("Wait until new pool becomes active") + eventually() { + // Eventually the second pool is active + assert(pool2.isActive) + } + + pool2.close() + clock2.close() + + succeed + } + + "connection pool must mark connections as in-use" in { f => + awaitActive(f) + + val conns = Range(0, f.poolSize.unwrap).map { _ => + // Not all the connections in the pool might have been ready, so retry here + eventually() { + Either + .catchOnly[SQLTransientException](f.pool.createConnection()) + .valueOrFail("Could not create connection") + } + } + + // We are out of connections + assertThrows[SQLTransientException] { + f.pool.createConnection() + } + + // Free one of the initial connections + conns.headOption.foreach(_.close()) + + // Creating a new connection should succeed now + f.pool.createConnection() + + succeed + } + } +} + +class DbLockedConnectionPoolTestPostgres extends DbLockedConnectionPoolTest { + + override protected lazy val setup: PostgresDbStorageSetup = + DbStorageSetup.postgres(loggerFactory)(executorService) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionTest.scala new file mode 100644 index 0000000000..3705b89fad --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbLockedConnectionTest.scala @@ -0,0 +1,268 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import com.digitalasset.canton.config.{DbLockedConnectionConfig, DefaultProcessingTimeouts} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.store.db.* +import com.digitalasset.canton.time.{Clock, PositiveFiniteDuration, SimClock} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{BaseTest, CloseableTest, HasExecutorService} +import org.scalatest.FutureOutcome +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.slf4j.event.Level +import slick.util.AsyncExecutorWithMetrics + +trait DbLockedConnectionTest + extends FixtureAsyncWordSpec + with BaseTest + with HasExecutorService + with CloseableTest { + + protected def setup: DbStorageSetup + + private lazy val ds = + DbLockedConnection + .createDataSource( + setup.storage.dbConfig.config, + poolSize = 1, + connectionTimeout = PositiveFiniteDuration.tryOfSeconds(10), + ) + .valueOrFail("Failed to create datasource") + + private lazy val executor = AsyncExecutorWithMetrics.createSingleThreaded( + "DbLockedConnectionTest", + noTracingLogger, + ) + + case class FixtureParam( + connection: DbLockedConnection, + lockId: DbLockId, + clock: SimClock, + config: DbLockedConnectionConfig, + ) + + // Throws an exception if the connection is not healthy/available + protected def checkHealth(connection: DbLockedConnection): Unit = + connection.get.valueOrFail(s"Connection $connection not available") + + protected def awaitHealthy(connection: DbLockedConnection): Unit = + eventually() { + checkHealth(connection) + } + + protected def createConnection( + connId: String, + lockId: DbLockId, + lockMode: DbLockMode, + clock: Clock, + config: DbLockedConnectionConfig, + ): DbLockedConnection = + DbLockedConnection + .create( + DbLock + .isSupported(setup.storage.profile) + .valueOrFail("Storage profile does not support DB locks"), + ds, + lockId, + lockMode, + config.copy(passiveCheckPeriod = PositiveFiniteDuration.tryOfSeconds(1).toConfig), + isMainConnection = true, + DefaultProcessingTimeouts.testing, + exitOnFatalFailures = false, + clock, + loggerFactory.append("connId", connId), + futureSupervisor, + executor, + logLockOwnersOnLockAcquisitionAttempt = true, + )(executorService, TraceContext.empty) + + protected def allocateLockId(lockCounter: DbLockCounter): DbLockId + + protected def freshLockId(): DbLockId = allocateLockId(UniqueDbLockCounter.get()) + + def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val clock = new SimClock(loggerFactory = loggerFactory) + val lockId = allocateLockId(UniqueDbLockCounter.get()) + val config = DbLockedConnectionConfig() + val lockedConnection = createConnection("fixture", lockId, DbLockMode.Exclusive, clock, config) + complete { + withFixture(test.toNoArgAsyncTest(FixtureParam(lockedConnection, lockId, clock, config))) + } lastly { + lockedConnection.close() + } + } + + override def afterAll(): Unit = + try { + ds.close() + executor.close() + setup.close() + } finally super.afterAll() + + "DbLockedConnection" can { + + "create a connection and acquire an exclusive lock" in { f => + awaitHealthy(f.connection) + succeed + } + + "create multiple connections with a shared lock" in { f => + val lockId = freshLockId() + + val connections = Range(0, 10).map { idx => + createConnection(s"pool-$idx", lockId, DbLockMode.Shared, f.clock, f.config) + } + + // Eventually all connections should become healthy + connections.foreach(awaitHealthy) + + connections.foreach(_.close()) + + succeed + } + + "create multiple connections for an exclusive lock id, only one is active" in { f => + awaitHealthy(f.connection) + + // Create a second connection for the same lock id and exclusive mode + val conn = createConnection(s"conflict", f.lockId, DbLockMode.Exclusive, f.clock, f.config) + + DbLockedConnection.awaitInitialized(conn, 50, 200, logger) + assert(!conn.isActive) + + // Trigger a health check and ensure the initial connection is active, the second is still passive + f.clock.advance(f.config.healthCheckPeriod.asJava) + assert(f.connection.isActive) + assert(!conn.isActive) + + // Close the active connection + f.connection.close() + f.clock.advance( + (f.config.healthCheckPeriod * 2).asJava + ) + // Observe the second connection becoming active instead + eventually() { + assert(conn.isActive) + } + + // Create a third connection + val conn2 = createConnection(s"conflict", f.lockId, DbLockMode.Exclusive, f.clock, f.config) + DbLockedConnection.awaitInitialized(conn2, 50, 200, logger) + assert(!conn2.isActive) + + // Close the second connection + conn.close() + f.clock.advance( + (f.config.healthCheckPeriod * 2).asJava + ) + + // Observe the third becomes active + eventually() { + assert(conn2.isActive) + } + + conn2.close() + succeed + } + + "have a lock owner" in { f => + var pid: Option[Long] = None + + awaitHealthy(f.connection) + + val conn = loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.DEBUG))( + { + val conn = + createConnection(s"lock-owners", f.lockId, DbLockMode.Exclusive, f.clock, f.config) + DbLockedConnection.awaitInitialized(conn, 50, 200, logger) + conn + }, + entries => { + val lockAcquiredByEntry = + entries.find(_.debugMessage.contains("Failed to acquire lock")) + pid = lockAcquiredByEntry.flatMap { logEntry => + """.*own it: ([0-9]*)""".r + .findFirstMatchIn(logEntry.debugMessage) + .map(_.group(1).toLong) + } + pid shouldBe defined + }, + ) + + var newPid: Option[Long] = None + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.DEBUG))( + { + // Now we close the fixture connection, and we should see `conn` logging that it acquired the lock + // along with the pid it got assigned + f.connection.close() + eventually() { + assert(conn.isActive) + } + }, + entries => { + val lockAcquiredByEntry = + entries.find(_.debugMessage.contains("Lock successfully acquired")) + newPid = lockAcquiredByEntry.flatMap { logEntry => + """.*own it: ([0-9]*)""".r + .findFirstMatchIn(logEntry.debugMessage) + .map(_.group(1).toLong) + } + newPid shouldBe defined + }, + ) + conn.close() + succeed + } + + "recover when connection is closed" in { f => + awaitHealthy(f.connection) + + f.connection.get.valueOrFail("Connection not available") match { + case keepAliveConnection: KeepAliveConnection => + logger.debug(s"Cutting DB connection $keepAliveConnection") + keepAliveConnection.underlying.close() + case _ => fail("Connection not of type KeepAliveConnection") + } + + def expectedWarns(lockId: DbLockId) = Seq( + s"Failed to check database lock status for $lockId, assuming lost", + s"Lock $lockId was lost", + "Locked connection was lost, trying to rebuild", + ) + + val prevState = f.connection.state + val prevConnectedState = prevState match { + case connected: DbLockedConnection.State.Connected => connected + case s => fail(s"Locked connection must be connected, but state is $s") + } + + loggerFactory.assertLogsSeqString( + SuppressionRule.LevelAndAbove(Level.WARN), + expectedWarns(prevConnectedState.lock.lockId), + ) { + eventually() { + // Add 1 second to the health check period to account for schedule jitter + // Continuously advance time to make sure that both the DB lock check has run and a follow-up connection check + f.clock.advance(f.config.healthCheckPeriod.plusSeconds(1).asJava) + checkHealth(f.connection) + f.connection.state shouldNot be(prevState) + } + } + + } + } + +} + +class DbLockedConnectionTestPostgres extends DbLockedConnectionTest { + + override protected lazy val setup: PostgresDbStorageSetup = + DbStorageSetup.postgres(loggerFactory)(executorService) + + override protected def allocateLockId(lockCounter: DbLockCounter): DbLockId = + PostgresDbLock + .allocateLockId(setup.config, lockCounter)(loggerFactory) + .valueOrFail(s"Failed to allocate lock id for $lockCounter") +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageMultiTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageMultiTest.scala new file mode 100644 index 0000000000..c15e649b44 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageMultiTest.scala @@ -0,0 +1,403 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{ + DbLockedConnectionConfig, + DbLockedConnectionPoolConfig, + DefaultProcessingTimeouts, + PositiveFiniteDuration, +} +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + FutureUnlessShutdown, + HasCloseContext, +} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.metrics.CommonMockMetrics +import com.digitalasset.canton.resource.DbLockedConnection.State +import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException +import com.digitalasset.canton.store.db.* +import com.digitalasset.canton.time.{Clock, SimClock} +import com.digitalasset.canton.util.FutureUtil +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.wordspec.FixtureAsyncWordSpec +import org.scalatest.{Assertion, BeforeAndAfterAll, FutureOutcome, Suite} +import org.slf4j.event.Level +import slick.dbio.{Effect, NoStream} +import slick.sql.SqlAction + +import java.util.concurrent.atomic.AtomicBoolean +import scala.concurrent.duration.* + +trait DbStorageMultiTestBase extends FlagCloseable with HasCloseContext with BeforeAndAfterAll { + self: Suite with BaseTest with HasExecutionContext => + protected def setup: DbStorageSetup + + trait Fixture { + def storage: DbStorage + } + + protected val tableName: String + + protected val storageTimeout = 60.seconds + + def createTable(f: Fixture): FutureUnlessShutdown[Unit] = createTable(f.storage) + + def createTable(storage: DbStorage): FutureUnlessShutdown[Unit] = { + import storage.api.* + storage.update_(sqlu"create table #$tableName (foo varchar)", functionFullName) + } + + def dropTable(f: Fixture): FutureUnlessShutdown[Unit] = dropTable(f.storage) + + def dropTable(storage: DbStorage): FutureUnlessShutdown[Unit] = { + import storage.api.* + storage.update_(sqlu"drop table #$tableName", functionFullName) + } + + def writeToTable(f: Fixture, str: String): FutureUnlessShutdown[Unit] = + writeToTable(f.storage, str) + + def writeToTableQuery(storage: DbStorage, str: String): SqlAction[Int, NoStream, Effect.Write] = { + import storage.api.* + sqlu"insert into #$tableName(foo) values ($str)" + } + + def writeToTable(storage: DbStorage, str: String): FutureUnlessShutdown[Unit] = + storage.update_(writeToTableQuery(storage, str), functionFullName) + + def writeAndVerify(f: Fixture): FutureUnlessShutdown[Assertion] = writeAndVerify(f.storage) + + def writeAndVerify(storage: DbStorage): FutureUnlessShutdown[Assertion] = { + import storage.api.* + + val testString = "foobar" + for { + _ <- writeToTable(storage, testString) + result <- storage.query( + sql"select foo from #$tableName".as[String].headOption, + functionFullName, + ) + } yield result.value shouldEqual testString + } + + def verifyActiveStorage(storage: DbStorage): FutureUnlessShutdown[Assertion] = + (for { + _ <- createTable(storage) + result <- writeAndVerify(storage) + _ <- dropTable(storage) + } yield result) + + override def afterAll(): Unit = { + super.afterAll() + close() + } +} + +trait DbStorageMultiTest + extends FixtureAsyncWordSpec + with BaseTest + with HasExecutionContext + with DbStorageMultiTestBase { + + override protected lazy val tableName = "db_storage_multi_pooled_test" + + case class FixtureParam( + storage: DbStorageMulti, + clock: SimClock, + onPassiveCalled: AtomicBoolean, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + ) extends Fixture + + protected def setup: DbStorageSetup + + protected lazy val connectionConfig: DbLockedConnectionConfig = DbLockedConnectionConfig( + healthCheckPeriod = PositiveFiniteDuration.ofSeconds(1), + passiveCheckPeriod = PositiveFiniteDuration.ofSeconds(5), + ) + + protected lazy val connectionPoolConfig: DbLockedConnectionPoolConfig = + DbLockedConnectionPoolConfig( + connection = connectionConfig, + healthCheckPeriod = PositiveFiniteDuration.ofSeconds(1), + ) + + protected def createStorage( + customClock: Option[Clock] = None, + onActive: () => FutureUnlessShutdown[Unit] = () => FutureUnlessShutdown.unit, + onPassive: () => FutureUnlessShutdown[Option[CloseContext]] = () => + FutureUnlessShutdown.pure(None), + name: String, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + ): DbStorageMulti = { + val writePoolSize = PositiveInt.tryCreate(8) + val readPoolSize = setup.config.numReadConnectionsCanton( + forParticipant = false, + withWriteConnectionPool = true, + withMainConnection = false, + ) + DbStorageMulti + .create( + setup.config, + connectionPoolConfig, + readPoolSize, + writePoolSize, + mainLockCounter, + poolLockCounter, + onActive, + onPassive, + CommonMockMetrics.dbStorage, + None, + customClock, + None, + DefaultProcessingTimeouts.testing, + exitOnFatalFailures = true, + futureSupervisor, + loggerFactory.append("storageId", name), + ) + .valueOrFailShutdown("create DB storage") + } + + protected def setBoolean(ref: AtomicBoolean, active: Boolean): () => FutureUnlessShutdown[Unit] = + () => FutureUnlessShutdown.pure(ref.set(active)) + + def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val clock = new SimClock(loggerFactory = loggerFactory) + val onPassiveCalled = new AtomicBoolean(false) + val mainLockCounter = UniqueDbLockCounter.get() + val poolLockCounter = UniqueDbLockCounter.get() + val storage = + createStorage( + name = "fixture", + onPassive = () => { + setBoolean(onPassiveCalled, active = true)().map(_ => None) + }, + customClock = Some(clock), + mainLockCounter = mainLockCounter, + poolLockCounter = poolLockCounter, + ) + val fixture = FixtureParam(storage, clock, onPassiveCalled, mainLockCounter, poolLockCounter) + + complete { + withFixture(test.toNoArgAsyncTest(fixture)) + } lastly { + storage.close() + } + } + + def terminateConnection(storage: DbStorageMulti): Unit = { + val conn = storage.writeConnectionPool.mainConnection.get.valueOrFail("Storage not connected") + logger.debug(s"Terminate connection $conn") + // Attempt to close the DB connection of the active replica to trigger a fail-over + conn.underlying.close() + } + + def triggerDisconnectCheck(f: FixtureParam): Unit = { + + // When the connection is cut, the db lock, single connection db, and multi storage will each warn + def expectedWarns(lockId: DbLockId): List[String] = List( + s"Failed to check database lock status for $lockId, assuming lost", + s"Lock $lockId was lost", + "Locked connection was lost, trying to rebuild", + ) + + def getMainLockOrFail: DbLock = f.storage.writeConnectionPool.mainConnection.state match { + case State.Connected(_connection, lock) => lock + case s => fail(s"Connection pool not connected: $s") + } + + val previousLock = getMainLockOrFail + + loggerFactory.assertLogsSeqString( + SuppressionRule.LevelAndAbove(Level.WARN), + expectedWarns(previousLock.lockId), + ) { + eventually(storageTimeout) { + // Advance the time to trigger a connection check + f.clock.advance(connectionPoolConfig.healthCheckPeriod.asJava) + + // Wait until the lock has been replaced with a new lock or set to None + val lock = getMainLockOrFail + assert(lock != previousLock) + + assert(!f.storage.isActive) + } + } + } + + protected def createStorageForFailover( + replicaActive: AtomicBoolean, + mainLockCounter: DbLockCounter, + poolLockCounter: DbLockCounter, + ): DbStorageMulti = { + val storage = createStorage( + onActive = setBoolean(replicaActive, active = true), + onPassive = () => { + setBoolean(replicaActive, active = false)().map(_ => None) + }, + name = "failover", + mainLockCounter = mainLockCounter, + poolLockCounter = poolLockCounter, + ) + + storage + } + + protected def withNewStorage[T]( + newStorage: DbStorageMulti + )(fn: DbStorageMulti => FutureUnlessShutdown[T]): FutureUnlessShutdown[T] = + FutureUtil + .logOnFailureUS(fn(newStorage), "failed to run function with new storage") + .thereafter(_ => newStorage.close()) + + protected def awaitActive(f: FixtureParam): Assertion = + eventually(storageTimeout) { + f.clock.advance(connectionPoolConfig.healthCheckPeriod.asJava) + assert(f.storage.isActive) + } + + "DbStorageMulti" can { + + "run write queries" in { f => + awaitActive(f) + verifyActiveStorage(f.storage).failOnShutdown + } + + "synchronize two instances" in { f => + awaitActive(f) + + val storage1 = f.storage + import storage1.api.* + + withNewStorage( + createStorage( + name = "storage2", + mainLockCounter = f.mainLockCounter, + poolLockCounter = f.poolLockCounter, + ) + ) { storage2 => + for { + _ <- createTable(f) + testString1 = "foobar" + testString2 = "barfoo" + _ <- writeToTable(f, testString1) + // The insertion with the second storage should fail as it is not active + failedUpdate = storage2.update( + writeToTableQuery(storage2, testString2), + "failing insert", + maxRetries = 1, + ) + result1 <- storage1.query(sql"select foo from #$tableName".as[String], "select foo") + result2 <- storage2.query(sql"select foo from #$tableName".as[String], "select foo") + _ <- dropTable(f) + } yield { + failedUpdate.failOnShutdown.failed.futureValue shouldBe a[PassiveInstanceException] + + // We do not expect the testString2 to have been inserted + result1 shouldEqual Vector(testString1) + result2 shouldEqual Vector(testString1) + } + }.failOnShutdown + } + + "graceful fail-over with set passive" in { f => + awaitActive(f) + + val replicaActive2 = new AtomicBoolean(false) + + withNewStorage( + createStorageForFailover(replicaActive2, f.mainLockCounter, f.poolLockCounter) + ) { storage2 => + assert(!storage2.isActive, "New storage must not be active") + + // Initiate the graceful fail-over by setting the active replica to passive + val setPassiveF = f.storage.setPassive() + + eventually(storageTimeout) { + // Advance time to trigger health checks on storage1 + f.clock.advance(connectionPoolConfig.healthCheckPeriod.asJava) + + // Eventually the passive replica must have taken over + assert(storage2.isActive) + } + + setPassiveF.value.map { _ => + // Once the command as been completed, the active replica must be passive + eventually(storageTimeout) { + assert(!f.storage.isActive) + } + } + + }.failOnShutdown + } + + "set passive with single replica should fail" in { f => + awaitActive(f) + + // Command succeeds but there is no other replica active to take over + f.storage.setPassive().valueOrFailShutdown("set passive").futureValue + + // Storage must still be active afterwards + awaitActive(f) + } + + "fail-over to passive replica" in { f => + val storage1 = f.storage + + val replicaActive2 = new AtomicBoolean(false) + + // Wait until storage1 has acquired the lock and is considered active + awaitActive(f) + + withNewStorage( + createStorageForFailover(replicaActive2, f.mainLockCounter, f.poolLockCounter) + ) { storage2 => + assert(!storage2.isActive, "New storage must not be active") + + // Terminate the connection of the active storage to the DB + terminateConnection(storage1) + + // Wait until storage2 main connection is active + eventually(storageTimeout) { + assert(storage2.writeConnectionPool.mainConnection.isActive) + } + + // Trigger disconnect check for previous active storage + triggerDisconnectCheck(f) + + eventually(storageTimeout) { + // Advance time to make sure we trigger the connection and lock checks + f.clock.advance(connectionPoolConfig.healthCheckPeriod.asJava) + + // Eventually the passive replica is active + assert(storage2.isActive) + + // The onActive for storage2 must have been called + assert(replicaActive2.get()) + + // Eventually the active replica must be marked passive + assert(!storage1.isActive) + + // The onPassive for storage1 must have been called + assert(f.onPassiveCalled.get()) + } + + // We can now write using storage2 + verifyActiveStorage(storage2) + }.failOnShutdown + } + } +} + +class DbStorageMultiTestPostgres extends DbStorageMultiTest { + + override protected lazy val setup: PostgresDbStorageSetup = DbStorageSetup.postgres(loggerFactory) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala index 3577bf6753..21a863a448 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala @@ -69,7 +69,7 @@ trait DbStorageSingleTest "fail on invalid database" in { val config = modifyDatabaseName("foobar") - loggerFactory.suppressWarningsAndErrors { + loggerFactory.assertLogs( DbStorageSingle .create( config, @@ -81,8 +81,9 @@ trait DbStorageSingleTest DefaultProcessingTimeouts.testing, loggerFactory, ) - .leftOrFailShutdown("storage create") shouldBe a[String] - } + .leftOrFailShutdown("storage create") shouldBe a[String], + _.message should include("""database "foobar" does not exist"""), + ) } "fail on invalid port" in { diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/resource/WithDbLockTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/WithDbLockTest.scala new file mode 100644 index 0000000000..2f8efad9be --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/resource/WithDbLockTest.scala @@ -0,0 +1,154 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.parallel.* +import com.digitalasset.canton.config.{DbLockedConnectionConfig, ProcessingTimeout} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances} +import com.digitalasset.canton.resource.WithDbLock.{WithDbLockError, withDbLock} +import com.digitalasset.canton.store.db.{DbTest, PostgresTest} +import com.digitalasset.canton.time.WallClock +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.{EitherTUtil, FutureUtil} +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.FutureOutcome +import org.scalatest.wordspec.FixtureAsyncWordSpec + +import java.time.Duration.ofMillis +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.{Future, Promise} + +trait WithDbLockTest extends FixtureAsyncWordSpec with BaseTest with HasExecutionContext { + this: DbTest => + import WithDbLockTest.* + + override def cleanDb(storage: DbStorage)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + // we're not actually storing anything so there's nothing to clean + FutureUnlessShutdown.unit + + implicit val prettyString: Pretty[String] = PrettyInstances.prettyString + + class Env extends AutoCloseable { + val processingTimeout = ProcessingTimeout() + val clock = new WallClock(processingTimeout, loggerFactory) + + def withLock[A: Pretty, B](lockCounter: DbLockCounter = DefaultLockCounter)( + fn: => EitherT[Future, A, B] + ): EitherT[Future, WithDbLockError, B] = + withDbLock( + s"${getClass.getSimpleName}:$lockCounter", + lockCounter, + ProcessingTimeout(), + storage.dbConfig, + DbLockedConnectionConfig(), + storage.profile, + futureSupervisor, + clock, + loggerFactory, + logLockOwnersOnLockAcquisitionAttempt = false, + )(fn.mapK(FutureUnlessShutdown.outcomeK)).onShutdown(fail("shutdown")) + + override def close(): Unit = clock.close() + } + + override type FixtureParam = Env + + override def withFixture(fixture: OneArgAsyncTest): FutureOutcome = { + val env = new Env() + + complete { + withFixture(fixture.toNoArgAsyncTest(env)) + } lastly env.close() + } + + "prevents concurrent runs" in { env => + import env.* + val latch = new AtomicInteger(0) + def run(): EitherT[Future, String, Unit] = { + if (!latch.compareAndSet(0, 1)) + fail("Latch was in incorrect state at start of call") + val promise = Promise[Unit]() + + // just delay long enough so un-synchronized calls would likely be noticed + FutureUtil.doNotAwait( + clock.scheduleAfter(_ => promise.success(()), ofMillis(50)).failOnShutdown("delay"), + "delay", + ) + + for { + _ <- EitherT.right[String](promise.future) + } yield { + if (!latch.compareAndSet(1, 0)) { + fail("Latch was in incorrect state at end of call") + } + } + } + + for { + // run a handful of functions that would blow up if not synchronized with a db lock + result <- (0 to 3).toList.parTraverse(_ => env.withLock()(run())).value.map(_ => ()) + } yield result shouldBe () + } + + "when block fails" should { + "unlock when an error is returned" in { env => + import env.* + for { + error <- withLock()(EitherT.leftT[Future, Unit]("BOOM")).value + // if the above doesn't unlock this block won't run + success <- withLock()(EitherTUtil.unit[String]).value + } yield { + error shouldBe Left(WithDbLockError.OperationError("BOOM")) + success shouldBe Either.unit + } + } + + "unlock when an exception is thrown" in { env => + import env.* + val ex = new RuntimeException("BOOM") + for { + returnedException <- withLock()(EitherT.right[String](Future.failed[Unit](ex))).value.failed + // if the above doesn't unlock this block won't run + success <- withLock()(EitherTUtil.unit[String]).value + } yield { + returnedException shouldBe ex + success shouldBe Either.unit + } + } + } + + "distinct locks can be taken at once" in { env => + import env.* + val l1P = Promise[Unit]() + val l2P = Promise[Unit]() + + // take two locks with separate counters and check they don't deadlock + val l1F = withLock(lockCounter = DefaultLockCounter) { + l1P.success(()) + EitherT.right[String](l2P.future) + } + val l2F = withLock(lockCounter = AnotherLockCounter) { + l2P.success(()) + EitherT.right[String](l1P.future) + } + + for { + _ <- List(l1F, l2F).parSequence.value + } yield succeed + } + +} + +object WithDbLockTest { + val DefaultLockCounter = UniqueDbLockCounter.get() + val AnotherLockCounter = UniqueDbLockCounter.get() +} + +class WithDbLockTestPostgres extends WithDbLockTest with PostgresTest diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobScheduleTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobScheduleTest.scala new file mode 100644 index 0000000000..839d570cc9 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobScheduleTest.scala @@ -0,0 +1,232 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.scheduler + +import cats.syntax.option.* +import com.daml.nonempty.NonEmptyUtil +import com.digitalasset.canton.BaseTestWordSpec +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.scheduler.JobSchedule.NextRun +import com.digitalasset.canton.scheduler.JobScheduler.* +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, PositiveSeconds, SimClock} + +import java.time.Instant + +class JobScheduleTest extends BaseTestWordSpec { + private val clock = new SimClock(CantonTimestamp.Epoch, loggerFactory) + private val dummyRetention = PositiveSeconds.tryOfSeconds(7L) + private val hourlyScheduleForTenMinutes = new PruningCronSchedule( + Cron.tryCreate(s"0 0 /1 * * ? *"), + PositiveSeconds.tryOfMinutes(10L), + dummyRetention, + clock, + logger, + ) + + "An interval schedule" when { + + "schedule according to interval" in { + val interval = PositiveSeconds.tryOfHours(2L) + val schedule = + JobSchedule.fromPruningSchedule(None, interval.some, clock, logger).value + val NextRun(wait, sameSchedule) = schedule.determineNextRun(Done).value + + wait.duration shouldBe interval.duration + sameSchedule shouldBe schedule + } + + "not schedule when interval is infinite" in { + val schedule = JobSchedule + .fromPruningSchedule(None, None, clock, logger) + schedule shouldBe None + } + } + + "A cron schedule" when { + // See additional coverage in CronTest + "schedule according to cron" in { + val hoursFromMidnight = 7L + val schedule = + new PruningCronSchedule( + Cron.tryCreate(s"0 0 $hoursFromMidnight * * ? *"), + PositiveSeconds.tryOfSeconds(5L), + dummyRetention, + clock, + logger, + ) + + val NextRun(wait, sameSchedule) = schedule.determineNextRun(Done).value + wait shouldBe NonNegativeFiniteDuration.tryOfHours(hoursFromMidnight) + sameSchedule shouldBe schedule + } + + def durationUntilNextRun( + resultFromPreviousRun: ScheduledRunResult, + datetime: String, + ): Option[NonNegativeFiniteDuration] = + hourlyScheduleForTenMinutes.waitDurationUntilNextRun( + resultFromPreviousRun, + CantonTimestamp.assertFromInstant(Instant.parse(datetime)), + logger, + ) + + "schedule work at the beginning of next window if work in current window is done" in { + durationUntilNextRun( + Done, + "2024-05-17T10:05:00.00Z", + ) shouldBe NonNegativeFiniteDuration.tryOfMinutes(55L).some + } + + "schedule work immediately if current window is still active" in { + durationUntilNextRun( + MoreWorkToPerform, + "2024-05-17T10:09:56.00Z", + ) shouldBe NonNegativeFiniteDuration.Zero.some + } + + "schedule work at the beginning of next window if current window no longer active" in { + durationUntilNextRun( + MoreWorkToPerform, + "2024-05-17T10:10:00.00Z", + ) shouldBe NonNegativeFiniteDuration.tryOfMinutes(50L).some + } + + "schedule retry after backoff if current window is still active after backoff" in { + durationUntilNextRun( + Error( + "enough time to retry in current window", + backoff = NonNegativeFiniteDuration.tryOfSeconds(3L), + logAsInfo = true, + ), + "2024-05-17T10:09:56.00Z", + ) shouldBe NonNegativeFiniteDuration.tryOfSeconds(3L).some + } + + "schedule retry in next window if current window no longer active after backoff" in { + durationUntilNextRun( + Error( + "cannot retry in current window", + backoff = NonNegativeFiniteDuration.tryOfMinutes(1L), + logAsInfo = true, + ), + "2024-05-17T10:09:56.00Z", + ) shouldBe (NonNegativeFiniteDuration.tryOfMinutes(50L) + NonNegativeFiniteDuration + .tryOfSeconds( + 4L + )).some + } + + "schedule retry in next window even if backoff is larger than gaps between windows" in { + durationUntilNextRun( + Error( + "enough time to retry in current window", + backoff = NonNegativeFiniteDuration.tryOfHours(6L), + logAsInfo = true, + ), + "2024-05-17T10:09:56.00Z", + ) shouldBe (NonNegativeFiniteDuration.tryOfMinutes(50L) + NonNegativeFiniteDuration + .tryOfSeconds( + 4L + )).some + } + + "schedule work in next window if current time is outside of any window on any result" in { + Seq[ScheduledRunResult]( + Done, + MoreWorkToPerform, + Error("outside of any window", backoff = NonNegativeFiniteDuration.Zero, logAsInfo = true), + ).foreach { result => + clue(s"Trying result $result") { + durationUntilNextRun( + result, + "2024-05-17T10:20:00.00Z", + ) shouldBe NonNegativeFiniteDuration.tryOfMinutes(40L).some + } + } + } + + "schedule infinite wait on expired cron" in { + val schedule = + new PruningCronSchedule( + Cron.tryCreate(s"0 0 * * * ? 1969"), // before epoch + PositiveSeconds.tryOfSeconds(5L), + dummyRetention, + clock, + logger, + ) + val nextRun = schedule.determineNextRun(Done) + nextRun shouldBe None + } + } + + "A compound schedule" when { + + "pick the earlier cron schedule" in { + checkCompoundSchedule(7L, 8L) + } + + "pick the earlier interval schedule" in { + checkCompoundSchedule(8L, 6L) + } + + "pick the earliest cron schedule" in { + val fiveHoursFromNow = hoursFromNowCronSchedule(5L) + val compoundSchedule = + JobSchedule( + List(hoursFromNowCronSchedule(6L), fiveHoursFromNow, hoursFromNowCronSchedule(7L)) + ).value + val NextRun(wait, firstSchedule) = compoundSchedule.determineNextRun(Done).value + + wait shouldBe NonNegativeFiniteDuration.tryOfHours(5L) + firstSchedule shouldBe fiveHoursFromNow + } + + "pick no next run time on all expired schedules" in { + val compoundSchedule = + JobSchedule( + List(neverAnymoreSchedule(1967), neverAnymoreSchedule(1968), neverAnymoreSchedule(1969)) + ).value + val nextRun = compoundSchedule.determineNextRun(Done) + nextRun shouldBe None + } + } + + private def hoursFromNowCronSchedule(hoursUntilCron: Long): IndividualSchedule = + new PruningCronSchedule( + Cron.tryCreate(s"0 0 $hoursUntilCron * * ? *"), + PositiveSeconds.tryOfSeconds(5L), + dummyRetention, + clock, + logger, + ) + + private def neverAnymoreSchedule(previousYear: Long): IndividualSchedule = { + require(previousYear < 1970) + new PruningCronSchedule( + Cron.tryCreate(s"0 0 0 * * ? $previousYear"), + PositiveSeconds.tryOfSeconds(5L), + dummyRetention, + clock, + logger, + ) + } + + private def checkCompoundSchedule(hoursUntilCron: Long, hoursInterval: Long) = { + val cronSchedule = hoursFromNowCronSchedule(hoursUntilCron) + val intervalSchedule = new IntervalSchedule(PositiveSeconds.tryOfHours(hoursInterval)) + + val compoundSchedule = + new CompoundSchedule(NonEmptyUtil.fromUnsafe(Set(cronSchedule, intervalSchedule))) + + val NextRun(wait, firstSchedule) = compoundSchedule.determineNextRun(Done).value + + // Expect the earlier time to be returned. + wait shouldBe NonNegativeFiniteDuration.tryOfHours( + Math.min(hoursUntilCron, hoursInterval) + ) + + // Expect the schedule with the earlier time to be returned. + firstSchedule shouldBe (if (hoursUntilCron < hoursInterval) cronSchedule else intervalSchedule) + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobSchedulerTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobSchedulerTest.scala new file mode 100644 index 0000000000..7a7950ce8a --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobSchedulerTest.scala @@ -0,0 +1,154 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.scheduler + +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.time.{PositiveSeconds, WallClock} +import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext, config} + +import scala.collection.mutable.ListBuffer +import scala.concurrent.Future + +/** Test two things: + * 1. behavior of scheduler in response to ScheduledRunResult's + * 1. resilience of scheduler to various job exceptions + */ +class JobSchedulerTest + extends BaseTestWordSpec + with HasExecutionContext + with IgnoresTransientSchedulerErrors { + + private val clock = new WallClock(timeouts, loggerFactory) + + private val everyThreeSeconds = new PruningCronSchedule( + Cron.create("*/3 * * * * ? *").getOrElse(fail("bad cron")), + PositiveSeconds.tryOfSeconds(1L), + PositiveSeconds.tryOfSeconds(8L), // retention not used in non-pruning test + clock, + logger, + ) + + def runScheduler[T](schedule: PruningCronSchedule, duration: config.NonNegativeDuration)( + job: IndividualSchedule => Future[ + (JobScheduler.ScheduledRunResult, T) + ] + ): Seq[(JobScheduler.ScheduledRunResult, T)] = { + val runs = ListBuffer.empty[(JobScheduler.ScheduledRunResult, T)] + val scheduler = new JobTestScheduler( + job(_).map { case tuple @ (result, _) => + runs.append(tuple) + result + }, + clock, + timeouts, + loggerFactory, + ) + + timeouts.unbounded + .await[List[(JobScheduler.ScheduledRunResult, T)]]("letting scheduler run")((for { + _configured <- scheduler.setSchedule(schedule) + _started <- scheduler.start() + _sleptLettingSchedulerRun = Threading.sleep(duration.duration.toMillis) + } yield runs.result()).transform { t => + scheduler.stop() + t + }) + } + + private def runTask(result: JobScheduler.ScheduledRunResult, waitInMillis: Int) = { + val startedAt = clock.now + Threading.sleep(waitInMillis.toLong) + logger.info(s"Returning $result started at $startedAt") + Future.successful(result -> startedAt) + } + + "A scheduler" when { + + "only schedule done tasks once per window" in { + val events = runScheduler( + everyThreeSeconds, + config.NonNegativeDuration.ofSeconds(10L), + )(_ => runTask(JobScheduler.Done, 200)) + + // 3 or 4 runs spaced out 3 seconds expected in a 10 second period + events.size should be >= 3 + events.size should be <= 4 + } + + "repeatedly schedule tasks when more work to perform" in { + val events = runScheduler( + everyThreeSeconds, + config.NonNegativeDuration.ofSeconds(10L), + )(_ => runTask(JobScheduler.MoreWorkToPerform, 100)) + + // Should see at least 12 runs somewhat conservatively to minimize flakiness + events.size should be >= 12 + } + + "repeatedly schedule tasks when errors" in { + val events = ignoreTransientSchedulerErrors("JobSchedulerTest") { + runScheduler( + everyThreeSeconds, + config.NonNegativeDuration.ofSeconds(10L), + )(_ => runTask(JobScheduler.Error("caught by scheduler job task"), 100)) + } + + // Should see at least 3 runs showing that thrown exceptions don't interrupt the scheduler + events.size should be >= 3 + + // The backoff after an error is expected to push out the next execution to the subsequent window + events.size should be <= 4 + } + + "reschedule in spite of non-fatal exceptions" in { + var throwOrNot = false + val events = ignoreTransientSchedulerErrors("JobSchedulerTest") { + runScheduler( + everyThreeSeconds, + config.NonNegativeDuration.ofSeconds(10L), + )(_ => + Future { + val result = JobScheduler.MoreWorkToPerform + val startedAt = clock.now + Threading.sleep(100) + if (throwOrNot) { + throwOrNot = false + throw new IllegalStateException("Thrown non-fatal exception") + } else { + throwOrNot = true + logger.info(s"Returning $result started at $startedAt") + } + result -> startedAt + } + ) + } + + // Should see at least 3 runs showing that thrown exceptions don't interrupt the scheduler + events.size should be >= 3 + + // The backoff after an exception is expected to push out the next execution to the subsequent window + events.size should be <= 4 + } + + "tolerate extremely long running tasks" in { + var sleep = 1000L * 15L + val events = runScheduler( + everyThreeSeconds, + config.NonNegativeDuration.ofSeconds(20L), + )(_ => + Future { + val result = JobScheduler.Done + val startedAt = clock.now + Threading.sleep(sleep) + sleep = 200L + logger.info(s"Returning $result started at $startedAt") + result -> startedAt + } + ) + + events.size should be >= 1 + } + + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobTestScheduler.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobTestScheduler.scala new file mode 100644 index 0000000000..3229e190ac --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/scheduler/JobTestScheduler.scala @@ -0,0 +1,81 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.scheduler + +import cats.data.EitherT +import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.time.{Clock, PositiveSeconds} +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.Future + +class JobTestScheduler( + job: IndividualSchedule => Future[ + JobScheduler.ScheduledRunResult + ], + clock: Clock, + processingTimeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContextIdlenessExecutorService) + extends JobScheduler( + "test-scheduler", + processingTimeouts, + loggerFactory, + ) { + private val schedule = new AtomicReference[Option[PruningCronSchedule]](None) + + override def schedulerJob(schedule: IndividualSchedule)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[JobScheduler.ScheduledRunResult] = + FutureUnlessShutdown.outcomeF(job(schedule)) + + override def initializeSchedule()(implicit + traceContext: TraceContext + ): Future[Option[JobSchedule]] = Future.successful(schedule.get) + + override def close(): Unit = stop()(TraceContext.todo) + + def setSchedule(newSchedule: PruningCronSchedule)(implicit + traceContext: TraceContext + ): Future[Unit] = + updateAndRestart(schedule.set(Some(newSchedule))) + + override def clearSchedule()(implicit traceContext: TraceContext): Future[Unit] = + updateAndRestart(schedule.set(None)) + + override def updateCron(cron: Cron)(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = + updateAndRestartET( + schedule.updateAndGet( + _.map(old => new PruningCronSchedule(cron, old.maxDuration, old.retention, clock, logger)) + ) + ) + + override def updateMaxDuration(maxDuration: PositiveSeconds)(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = updateAndRestartET( + schedule.updateAndGet( + _.map(old => new PruningCronSchedule(old.cron, maxDuration, old.retention, clock, logger)) + ) + ) + + private def updateAndRestart[T]( + update: => T + )(implicit traceContext: TraceContext): Future[Unit] = { + update + reactivateSchedulerIfActive() + } + + private def updateAndRestartET[T]( + update: => T + )(implicit traceContext: TraceContext): EitherT[Future, String, Unit] = { + update + reactivateSchedulerIfActiveET() + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala index 3d69956cfc..d5443e596a 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala @@ -73,6 +73,9 @@ trait ConnectionPoolTestHelpers { protected lazy val authConfig: AuthenticationTokenManagerConfig = AuthenticationTokenManagerConfig() + protected lazy val sequencerConnectionPoolDelays: SequencerConnectionPoolDelays = + SequencerConnectionPoolDelays.default + protected lazy val testCrypto: SynchronizerCrypto = SynchronizerCrypto( SymbolicCrypto @@ -154,6 +157,8 @@ trait ConnectionPoolTestHelpers { SequencerConnectionXPoolConfig( connections = configs, trustThreshold = trustThreshold, + minRestartConnectionDelay = sequencerConnectionPoolDelays.minRestartDelay, + maxRestartConnectionDelay = sequencerConnectionPoolDelays.maxRestartDelay, expectedPSIdO = expectedSynchronizerIdO, ) } @@ -162,6 +167,7 @@ trait ConnectionPoolTestHelpers { nbConnections: PositiveInt, trustThreshold: PositiveInt, attributesForConnection: Int => ConnectionAttributes, + responsesForConnection: PartialFunction[Int, TestResponses] = Map(), expectedSynchronizerIdO: Option[PhysicalSynchronizerId] = None, testTimeouts: ProcessingTimeout = timeouts, blockValidation: Int => Boolean = _ => false, @@ -172,6 +178,7 @@ trait ConnectionPoolTestHelpers { val poolFactory = new TestSequencerConnectionXPoolFactory( attributesForConnection, + responsesForConnection, validationBlocker, authConfig, testMember, @@ -182,7 +189,7 @@ trait ConnectionPoolTestHelpers { testTimeouts, loggerFactory, ) - val pool = poolFactory.create(config).valueOrFail("create connection pool") + val pool = poolFactory.create(config, name = "test").valueOrFail("create connection pool") val listener = new TestHealthListener(pool.health) pool.health.registerOnHealthChange(listener) @@ -195,22 +202,22 @@ trait ConnectionPoolTestHelpers { } protected def mkSubscriptionPoolConfig( - trustThreshold: PositiveInt, - reserve: NonNegativeInt, + livenessMargin: NonNegativeInt ): SequencerSubscriptionPoolConfig = - SequencerSubscriptionPoolConfig(trustThreshold, reserve) + SequencerSubscriptionPoolConfig( + livenessMargin = livenessMargin, + subscriptionRequestDelay = sequencerConnectionPoolDelays.subscriptionRequestDelay, + ) protected def withSubscriptionPool[V]( - trustThreshold: PositiveInt, livenessMargin: NonNegativeInt, connectionPool: SequencerConnectionXPool, )(f: (SequencerSubscriptionPool, TestHealthListener) => V): V = { - val config = mkSubscriptionPoolConfig(trustThreshold, livenessMargin) + val config = mkSubscriptionPoolConfig(livenessMargin) val subscriptionPoolFactory = new SequencerSubscriptionPoolFactoryImpl( sequencerSubscriptionFactory = new TestSequencerSubscriptionXFactory(timeouts, loggerFactory), subscriptionHandlerFactory = TestSubscriptionHandlerXFactory, - clock = wallClock, timeouts = timeouts, loggerFactory = loggerFactory, ) @@ -232,6 +239,7 @@ trait ConnectionPoolTestHelpers { nbConnections: PositiveInt, trustThreshold: PositiveInt, attributesForConnection: Int => ConnectionAttributes, + responsesForConnection: PartialFunction[Int, TestResponses] = Map(), expectedSynchronizerIdO: Option[PhysicalSynchronizerId] = None, livenessMargin: NonNegativeInt, )(f: (SequencerSubscriptionPool, TestHealthListener) => V): V = @@ -239,11 +247,12 @@ trait ConnectionPoolTestHelpers { nbConnections, trustThreshold, attributesForConnection, + responsesForConnection, expectedSynchronizerIdO, ) { (connectionPool, _, _, _) => connectionPool.start().futureValueUS.valueOrFail("initialization") - withSubscriptionPool(trustThreshold, livenessMargin, connectionPool) { + withSubscriptionPool(livenessMargin, connectionPool) { (subscriptionPool, subscriptionPoolListener) => f(subscriptionPool, subscriptionPoolListener) } @@ -251,7 +260,7 @@ trait ConnectionPoolTestHelpers { } -private object ConnectionPoolTestHelpers { +protected object ConnectionPoolTestHelpers { import BaseTest.* lazy val failureUnavailable: Either[Exception, Nothing] = @@ -366,6 +375,7 @@ private object ConnectionPoolTestHelpers { private class TestSequencerConnectionXPoolFactory( attributesForConnection: Int => ConnectionAttributes, + responsesForConnection: PartialFunction[Int, TestResponses], validationBlocker: TestValidationBlocker, authConfig: AuthenticationTokenManagerConfig, member: Member, @@ -381,6 +391,7 @@ private object ConnectionPoolTestHelpers { private val connectionFactory = new TestInternalSequencerConnectionXFactory( attributesForConnection, + responsesForConnection, validationBlocker, futureSupervisor, timeouts, @@ -390,7 +401,8 @@ private object ConnectionPoolTestHelpers { val createdConnections: CreatedConnections = connectionFactory.createdConnections override def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -417,6 +429,7 @@ private object ConnectionPoolTestHelpers { sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -427,6 +440,7 @@ private object ConnectionPoolTestHelpers { protected class TestInternalSequencerConnectionXFactory( attributesForConnection: Int => ConnectionAttributes, + responsesForConnection: PartialFunction[Int, TestResponses], validationBlocker: TestValidationBlocker, futureSupervisor: FutureSupervisor, timeouts: ProcessingTimeout, @@ -450,13 +464,17 @@ private object ConnectionPoolTestHelpers { ) ) - val responses = new TestResponses( - apiResponses = Iterator.continually(correctApiResponse), - handshakeResponses = Iterator.continually(successfulHandshake), - synchronizerAndSeqIdResponses = Iterator.continually(correctSynchronizerIdResponse), - staticParametersResponses = Iterator.continually(correctStaticParametersResponse), - acknowledgeResponses = Iterator.continually(positiveAcknowledgeResponse), - validationBlocker.delayF(index), + val responses = responsesForConnection.applyOrElse( + index, + (_: Int) => + new TestResponses( + apiResponses = Iterator.continually(correctApiResponse), + handshakeResponses = Iterator.continually(successfulHandshake), + synchronizerAndSeqIdResponses = Iterator.continually(correctSynchronizerIdResponse), + staticParametersResponses = Iterator.continually(correctStaticParametersResponse), + acknowledgeResponses = Iterator.continually(positiveAcknowledgeResponse), + validationBlocker.delayF(index), + ), ) val stubFactory = new TestSequencerConnectionXStubFactory(responses, loggerFactory) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala index 991db59ca2..381268e6ad 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala @@ -35,6 +35,8 @@ final class GeneratorsSequencing(generatorsTopology: GeneratorsTopology) { implicit val sequencerConnectionArb: Arbitrary[SequencerConnection] = genArbitrary implicit val submissionRequestAmplificationArb: Arbitrary[SubmissionRequestAmplification] = genArbitrary + implicit val sequencerConnectionPoolDelaysArb: Arbitrary[SequencerConnectionPoolDelays] = + genArbitrary implicit val sequencerConnectionsArb: Arbitrary[SequencerConnections] = Arbitrary( for { @@ -47,11 +49,13 @@ final class GeneratorsSequencing(generatorsTopology: GeneratorsTopology) { .choose(0, connections.size - sequencerTrustThreshold.unwrap) .map(NonNegativeInt.tryCreate) submissionRequestAmplification <- submissionRequestAmplificationArb.arbitrary + sequencerConnectionPoolDelays <- sequencerConnectionPoolDelaysArb.arbitrary } yield SequencerConnections.tryMany( connections, sequencerTrustThreshold, sequencerLivenessMargin, submissionRequestAmplification, + sequencerConnectionPoolDelays, ) ) } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala index 8d77b05b6c..27fe22ca04 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala @@ -3,14 +3,13 @@ package com.digitalasset.canton.sequencing -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ SequencerConnectionXError, SequencerConnectionXState, } import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level.INFO class GrpcInternalSequencerConnectionXTest extends AnyWordSpec @@ -173,29 +172,6 @@ class GrpcInternalSequencerConnectionXTest } } - "retry if the server is unavailable during any request" in { - val responses = TestResponses( - apiResponses = Seq(failureUnavailable, correctApiResponse), - handshakeResponses = Seq(failureUnavailable, successfulHandshake), - synchronizerAndSeqIdResponses = Seq(failureUnavailable, correctSynchronizerIdResponse1), - staticParametersResponses = Seq(failureUnavailable, correctStaticParametersResponse), - ) - withConnection(responses) { (connection, listener) => - loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(INFO))( - { - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Validated) - connection.attributes shouldBe Some(correctConnectionAttributes) - }, - forExactly(4, _) { - _.infoMessage should include("Waiting for 1ms before retrying...") - }, - ) - - responses.assertAllResponsesSent() - } - } - "validate the connection attributes after restart" in { val responses = TestResponses( apiResponses = Seq.fill(2)(correctApiResponse), diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala index 5f0ca6b667..54e6d59462 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala @@ -30,7 +30,6 @@ import com.digitalasset.canton.util.OrderedBucketMergeHub.{ NewConfiguration, } import com.digitalasset.canton.util.{EitherTUtil, OrderedBucketMergeConfig, ResourceUtil} -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ BaseTest, HasExecutionContext, @@ -335,7 +334,6 @@ class SequencerAggregatorPekkoTest priorEventO: Option[ProcessingSerializedEvent], event: SequencedSerializedEvent, sequencerId: SequencerId, - protocolVersion: ProtocolVersion, ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = EitherTUtil.unitUS } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala index 3d7e19d65e..252e7135d7 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.util.LoggerUtil import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, config} import org.scalatest.Assertion import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level.WARN +import org.slf4j.event.Level.{INFO, WARN} import scala.concurrent.duration.* @@ -135,6 +135,79 @@ class SequencerConnectionXPoolImplTest } } + "retry a connection that fails to validate" in { + // Test the following scenario involving restarts: + // + // - start + // - getApi -> KO + // - restart + // - getApi -> OK, performHandshake -> KO + // - restart + // - getApi -> OK, performHandshake -> OK, getSynchronizerAndSequencerIds -> KO + // - restart + // - getApi -> OK, performHandshake -> OK, getSynchronizerAndSequencerIds -> OK, getStaticSynchronizerParameters -> KO + // - restart + // - getApi -> OK, performHandshake -> OK, getSynchronizerAndSequencerIds -> OK, getStaticSynchronizerParameters -> OK + // - failure, triggering a restart + // - getApi -> OK, performHandshake -> OK, getSynchronizerAndSequencerIds -> OK, getStaticSynchronizerParameters -> OK + val testResponses = TestResponses( + apiResponses = failureUnavailable +: Seq.fill(5)(correctApiResponse), + handshakeResponses = failureUnavailable +: Seq.fill(4)(successfulHandshake), + synchronizerAndSeqIdResponses = + failureUnavailable +: Seq.fill(3)(correctSynchronizerIdResponse1), + staticParametersResponses = + failureUnavailable +: Seq.fill(2)(correctStaticParametersResponse), + ) + + withConnectionPool( + nbConnections = PositiveInt.one, + trustThreshold = PositiveInt.one, + attributesForConnection = + index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), + responsesForConnection = { case 0 => testResponses }, + ) { (pool, createdConnections, listener, _) => + val minRestartConnectionDelay = + sequencerConnectionPoolDelays.minRestartDelay.duration.toMillis + val exponentialDelays = (0 until 4).map(minRestartConnectionDelay << _) + + def retryLogEntry(delay: Long) = + s"Scheduling restart after $delay millisecond" + (if (delay > 1) "s" else "") + + loggerFactory.assertLogsSeq( + SuppressionRule.LevelAndAbove(INFO) && SuppressionRule.LoggerNameContains( + "ConnectionHandler" + ) + )( + { + pool.start().futureValueUS.valueOrFail("initialization") + listener.shouldStabilizeOn(ComponentHealthState.Ok()) + }, + // 4 retries, due to one failure at each call + // The retry delay is exponential + _.map(_.message).filter(_.contains("Scheduling restart after")) + shouldBe exponentialDelays.map(retryLogEntry), + ) + + loggerFactory.assertLogsSeq( + SuppressionRule.LevelAndAbove(INFO) && SuppressionRule.LoggerNameContains( + "ConnectionHandler" + ) + )( + { + createdConnections(0).fail(reason = "test") + listener.shouldStabilizeOn(ComponentHealthState.Ok()) + }, + // ... but is reset after the connection has been validated + _.map(_.message).filter(_.contains("Scheduling restart after")).loneElement + shouldBe retryLogEntry(minRestartConnectionDelay), + ) + + // Ensure all responses were used, confirming that the proper retries took place. + // The test would fail if it were to consume more responses, because they default to failures. + testResponses.assertAllResponsesSent() + } + } + "take into account an expected physical synchronizer ID" in { withConnectionPool( nbConnections = PositiveInt.tryCreate(10), diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala index 71b8580c4b..b745a2c78c 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala @@ -117,8 +117,8 @@ class SequencedEventTestFixture( ).onShutdown(throw new RuntimeException("failed to create carlos event")).futureValue ) - // TODO(i26481): adjust when the new connection pool is stable - private val useNewConnectionPool = testedProtocolVersion >= ProtocolVersion.dev + // TODO(i27260): cleanup when the new connection pool is stable + private val useNewConnectionPool = true def mkAggregator( config: MessageAggregationConfig = MessageAggregationConfig( diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala index b8c102725f..07c5adb1e4 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala @@ -639,7 +639,6 @@ class SequencedEventValidatorTest syncCryptoApi, deliver4.timestamp, Some(deliver3.timestamp), - testedProtocolVersion, warnIfApproximate = false, ) .failOnShutdown diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala index 354213456d..2a18e8910c 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala @@ -7,10 +7,17 @@ import cats.data.EitherT import cats.syntax.either.* import cats.syntax.foldable.* import com.daml.metrics.api.MetricsContext +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config as cantonConfig import com.digitalasset.canton.config.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.{ + NonNegativeInt, + NonNegativeLong, + Port, + PositiveInt, +} import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto import com.digitalasset.canton.crypto.{ Fingerprint, @@ -25,6 +32,7 @@ import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, Un import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances} import com.digitalasset.canton.logging.{LogEntry, NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.metrics.{CommonMockMetrics, TrafficConsumptionMetrics} +import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, UnsignedProtocolMessage} import com.digitalasset.canton.protocol.{ DynamicSynchronizerParametersLookup, @@ -39,7 +47,10 @@ import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ ConnectionAttributes, SequencerConnectionXHealth, } -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.{ + SequencerConnectionXPoolConfig, + SequencerConnectionXPoolError, +} import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.PreviousTimestampMismatch import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason.{ @@ -87,7 +98,7 @@ import com.digitalasset.canton.topology.DefaultTestIdentities.{ participant1, } import com.digitalasset.canton.topology.client.{SynchronizerTopologyClient, TopologySnapshot} -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.canton.version.{ @@ -785,8 +796,9 @@ final class SequencerClientTest } else Set(upgradeTime.immediatePredecessor, upgradeTime, upgradeTime.immediateSuccessor) - acknowledgedTimestamps = env.transport.acknowledgedTimestamps - .get() ++ env.pool.acknowledgedTimestamps.get() + acknowledgedTimestamps = + if (env.useNewConnectionPool) env.pool.acknowledgedTimestamps.get + else env.transport.acknowledgedTimestamps.get _ = acknowledgedTimestamps shouldBe expectedAcknowledgedTimestamps @@ -1075,7 +1087,7 @@ final class SequencerClientTest val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) } yield { val originalSubscriber = env.transport.subscriber.value originalSubscriber.request.timestamp shouldBe None @@ -1109,7 +1121,7 @@ final class SequencerClientTest _ <- env.transport.subscriber.value.sendToHandler(nextDeliver) _ <- env.client.flushClean() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) } yield { val originalSubscriber = env.transport.subscriber.value originalSubscriber.request.timestamp shouldBe None @@ -1129,7 +1141,7 @@ final class SequencerClientTest // TODO(i26481): Enable new connection pool (test uses changeTransport()) val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { env.transport.lastSend.get() shouldBe None @@ -1149,7 +1161,7 @@ final class SequencerClientTest // TODO(i26481): Enable new connection pool (test uses changeTransport()) val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.logout().value } yield { env.transport.logoutCalled shouldBe false @@ -1167,7 +1179,7 @@ final class SequencerClientTest val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { env.transport.lastSend.get() shouldBe None @@ -1190,7 +1202,8 @@ final class SequencerClientTest SequencerAlias.tryCreate("somethingElse"), daSequencerId, secondTransport, - ) + ), + None, ) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { @@ -1209,26 +1222,31 @@ final class SequencerClientTest ) val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - error <- loggerFactory - .assertLogs( - env - .changeTransport( - SequencerTransports.default( - secondSequencerId, - secondTransport, - ) - ), - _.errorMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment", - ) - .failed - } yield { - error - } - testF.futureValueUS shouldBe an[IllegalArgumentException] - testF.futureValueUS.getMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment" + // When using the connection pool, this test does not make sense + if (!env.useNewConnectionPool) { + val testF = for { + _ <- env.subscribeAfter() + error <- loggerFactory + .assertLogs( + env + .changeTransport( + SequencerTransports.default( + secondSequencerId, + secondTransport, + ), + None, + ), + _.errorMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment", + ) + .failed + } yield { + error + } + + testF.futureValueUS shouldBe an[IllegalArgumentException] + testF.futureValueUS.getMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment" + } env.client.close() } } @@ -1316,16 +1334,23 @@ final class SequencerClientTest ) def changeTransport( - newTransport: SequencerClientTransport & SequencerClientTransportPekko + newTransport: SequencerClientTransport & SequencerClientTransportPekko, + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], )(implicit ev: Client <:< RichSequencerClient): FutureUnlessShutdown[Unit] = changeTransport( - SequencerTransports.default(daSequencerId, newTransport) + SequencerTransports.default(daSequencerId, newTransport), + newConnectionPoolConfigO, ) - def changeTransport(sequencerTransports: SequencerTransports[?])(implicit + def changeTransport( + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit ev: Client <:< RichSequencerClient ): FutureUnlessShutdown[Unit] = - ev(client).changeTransport(sequencerTransports) + ev(client) + .changeTransport(sequencerTransports, newConnectionPoolConfigO) + .valueOrFail("changeTransport") def sendAsync( batch: Batch[DefaultOpenEnvelope], @@ -1369,6 +1394,11 @@ final class SequencerClientTest Set.empty ) + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + EitherT.rightT(None) + override def logout()(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, Status, Unit] = { @@ -1509,7 +1539,14 @@ final class SequencerClientTest override val health: SequencerConnectionXHealth = new SequencerConnectionXHealth.AlwaysValidated(s"$name-health", logger) - override def config: ConnectionXConfig = ??? + override def config: ConnectionXConfig = ConnectionXConfig( + name = name, + endpoint = Endpoint("dummy-endpoint", Port.tryCreate(0)), + transportSecurity = false, + customTrustCertificates = None, + expectedSequencerIdO = None, + tracePropagation = TracingConfig.Propagation.Disabled, + ) override def attributes: ConnectionAttributes = ConnectionAttributes( @@ -1551,6 +1588,10 @@ final class SequencerClientTest traceContext: TraceContext ): EitherT[FutureUnlessShutdown, Status, Unit] = ??? + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = ??? + override def downloadTopologyStateForInit( request: TopologyStateForInitRequest, timeout: Duration, @@ -1622,7 +1663,13 @@ final class SequencerClientTest traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SequencerConnectionXPoolError, Unit] = ??? - override def config: SequencerConnectionXPool.SequencerConnectionXPoolConfig = ??? + override def config: SequencerConnectionXPool.SequencerConnectionXPoolConfig = + SequencerConnectionXPoolConfig( + connections = NonEmpty(Seq, connection.config), + trustThreshold = PositiveInt.one, + minRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, + maxRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, + ) override def updateConfig(newConfig: SequencerConnectionXPool.SequencerConnectionXPoolConfig)( implicit traceContext: TraceContext @@ -1838,9 +1885,8 @@ final class SequencerClientTest val connectionPool = MockPool() - // TODO(i26481): adjust when the new connection pool is stable - val useNewConnectionPool = - useNewConnectionPoolO.getOrElse(testedProtocolVersion >= ProtocolVersion.dev) + // TODO(i26481): adjust when everything in this test can be enabled for the connection pool + val useNewConnectionPool = useNewConnectionPoolO.getOrElse(true) val client = new RichSequencerClientImpl( psid, synchronizerPredecessor = synchronizerPredecessor, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/ExpiringInMemorySequencingTimeReadingsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/ExpiringInMemorySequencingTimeReadingsTest.scala new file mode 100644 index 0000000000..dccf96cb56 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/ExpiringInMemorySequencingTimeReadingsTest.scala @@ -0,0 +1,143 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeFetcherTest.{ + sequencerIds, + ts, +} +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeReadings.TimeReading +import com.digitalasset.canton.time.{ + Clock, + NonNegativeFiniteDuration, + PositiveFiniteDuration, + SimClock, +} +import com.digitalasset.canton.topology.SequencerId +import org.scalatest.wordspec.AnyWordSpec + +import java.util.concurrent.atomic.AtomicReference + +class ExpiringInMemorySequencingTimeReadingsTest extends AnyWordSpec with BaseTest { + + "Getting time readings" should { + "expire old time readings and filter time readings as requested" in { + val t = CantonTimestamp.MaxValue + val timesRef = + new AtomicReference( + Map( + sequencerIds(0) -> TimeReading( + reading = Some(t), + receivedAt = CantonTimestamp.Epoch, + ), + sequencerIds(1) -> TimeReading( + reading = Some(t), + receivedAt = ts(1), + ), + sequencerIds(2) -> TimeReading( + reading = Some(t), + receivedAt = ts(2), + ), + ) + ) + val readings = + newTimeReadings(localClock = new SimClock(ts(2), loggerFactory), timesRef = timesRef) + + val expectedUnexpiredTimes = + Map( + sequencerIds(1) -> TimeReading( + reading = Some(t), + receivedAt = ts(1), + ), + sequencerIds(2) -> TimeReading( + reading = Some(t), + receivedAt = ts(2), + ), + ) + + readings.getTimeReadings(maxTimeReadingsAge = None) shouldBe expectedUnexpiredTimes + timesRef.get() shouldBe expectedUnexpiredTimes + + readings.getTimeReadings(maxTimeReadingsAge = + Some(NonNegativeFiniteDuration.tryOfSeconds(1)) + ) shouldBe + Map( + sequencerIds(2) -> TimeReading( + reading = Some(t), + receivedAt = ts(2), + ) + ) + timesRef.get() shouldBe expectedUnexpiredTimes + + readings + .getTimeReadings(maxTimeReadingsAge = + Some(NonNegativeFiniteDuration.tryOfSeconds(0)) + ) shouldBe Map.empty + } + } + + "Computing the valid sequencing time interval" should { + "work correctly" in { + val readings = newTimeReadings() + Table( + ("number of timestamps", "trust threshold", "expected result"), + (0, PositiveInt.tryCreate(1), None), + (1, PositiveInt.tryCreate(1), Some(ts(0) -> ts(0))), + (2, PositiveInt.tryCreate(1), Some(ts(0) -> ts(1))), + (2, PositiveInt.tryCreate(2), None), + (3, PositiveInt.tryCreate(2), Some(ts(1) -> ts(1))), + (4, PositiveInt.tryCreate(2), Some(ts(1) -> ts(2))), + (4, PositiveInt.tryCreate(3), None), + (5, PositiveInt.tryCreate(3), Some(ts(2) -> ts(2))), + (6, PositiveInt.tryCreate(3), Some(ts(2) -> ts(3))), + ).forEvery { case (numTimestamps, trustThreshold, expectedResult) => + val timestamps = (0 until numTimestamps).map(i => ts(i)).toVector + readings.validTimeInterval( + timestamps, + trustThreshold, + ) shouldBe expectedResult + } + } + } + + "Recording a reading" should { + "keep the latest reading and its reception time" in { + val timesRef = + new AtomicReference( + Map( + sequencerIds(0) -> TimeReading(Some(ts(0)), ts(0)), + sequencerIds(1) -> TimeReading(Some(ts(1)), ts(0)), + sequencerIds(2) -> TimeReading(Some(ts(1)), ts(0)), + ) + ) + val readings = newTimeReadings(timesRef = timesRef) + + readings.recordReading(sequencerIds(0), Some(ts(0)), ts(1)) + readings.recordReading(sequencerIds(1), Some(ts(0)), ts(1)) + readings.recordReading(sequencerIds(2), Some(ts(2)), ts(2)) + readings.recordReading(sequencerIds(3), Some(ts(3)), ts(3)) + + timesRef.get() shouldBe Map( + sequencerIds(0) -> TimeReading(Some(ts(0)), ts(1)), + sequencerIds(1) -> TimeReading(Some(ts(1)), ts(0)), + sequencerIds(2) -> TimeReading(Some(ts(2)), ts(2)), + sequencerIds(3) -> TimeReading(Some(ts(3)), ts(3)), + ) + } + } + + private def newTimeReadings( + localClock: Clock = wallClock, + timesRef: AtomicReference[Map[SequencerId, TimeReading]] = new AtomicReference(), + ) = + new ExpiringInMemorySequencingTimeReadings( + localClock, + timeReadingsRetention = PositiveFiniteDuration.tryOfSeconds(2), + loggerFactory = loggerFactory, + timesRef, + ) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/OneCallAtATimeSourcesAccessorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/OneCallAtATimeSourcesAccessorTest.scala new file mode 100644 index 0000000000..8f4eb54aa8 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/OneCallAtATimeSourcesAccessorTest.scala @@ -0,0 +1,257 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, PromiseUnlessShutdown} +import com.digitalasset.canton.sequencing.client.time.fetcher.OneCallAtATimeSourcesAccessor.QueryTimeSourcesRunningTask +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeFetcherTest.{ + TestTimeSource, + TestTimeSourcesPool, + aTimeout, + sequencerIds, + ts, +} +import com.digitalasset.canton.time.{Clock, SimClock} +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.wordspec.AsyncWordSpec + +import java.util.concurrent.atomic.AtomicReference + +class OneCallAtATimeSourcesAccessorTest + extends AsyncWordSpec + with BaseTest + with HasExecutionContext { + + "Querying time sources" when { + + "no other concurrent call is running, regardless of 'concurrent' being true" should { + "invoke the time sources with the passed timeout and return their results' count" in { + val sequencingTimeFutures = + Vector( + Seq.fill(2)(FutureUnlessShutdown.pure(Some(ts(0)))), + Seq.fill(2)(FutureUnlessShutdown.pure(None)), + ) + val timeSources = sequencingTimeFutures.map(TestTimeSource(_*)) + val timeSourcesPool = new TestTimeSourcesPool(timeSources) + val readings = mock[SequencingTimeReadings] + val timeSourcesAccessor = newTimeSourcesAccessor(readings) + + def invokeSources(concurrent: Boolean) = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool.timeSources(PositiveInt.tryCreate(2), exclusions = Set.empty).toMap, + aTimeout, + concurrent, + ) + + for { + r1 <- invokeSources(true) + r2 <- invokeSources(false) + } yield { + verify(readings, times(2)).recordReading( + eqTo(sequencerIds(0)), + eqTo(Some(ts(0))), + any[CantonTimestamp], + ) + verify(readings, times(2)).recordReading( + eqTo(sequencerIds(1)), + eqTo(None), + any[CantonTimestamp], + ) + r1 shouldBe Map(sequencerIds(0) -> Some(ts(0)), sequencerIds(1) -> None) + r2 shouldBe Map(sequencerIds(0) -> Some(ts(0)), sequencerIds(1) -> None) + timeSources.map(_.invocationsCountRef.get()) should contain only 2 + timeSources.flatMap(_.timeoutsRef.get()) should contain theSameElementsAs + Seq.fill(4)(aTimeout) + } + }.failOnShutdown + } + + "another call is running and 'concurrent' is true" should { + "run the invocations concurrently" in { + val sequencingTimePromises = + Vector.fill(4)( + PromiseUnlessShutdown.unsupervised[Option[CantonTimestamp]]() + ) + val timeSources = sequencingTimePromises.map(p => TestTimeSource(p.futureUS)) + val timeSourcesPool = new TestTimeSourcesPool(timeSources) + val readings = mock[SequencingTimeReadings] + val runningTaskRef = new AtomicReference(Option.empty[QueryTimeSourcesRunningTask]) + val timeSourcesAccessor = + newTimeSourcesAccessor(readings, runningTaskRef = runningTaskRef) + + val f1 = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool.timeSources(PositiveInt.tryCreate(2), exclusions = Set.empty).toMap, + aTimeout, + ) + eventually() { + runningTaskRef.get().isDefined shouldBe true + } + val f2 = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool + .timeSources( + PositiveInt.tryCreate(2), + exclusions = sequencerIds.slice(0, 2).toSet, + ) + .toMap, + aTimeout, + concurrent = true, + ) + + sequencingTimePromises(2).outcome_(Some(ts(0))) + sequencingTimePromises(3).outcome_(Some(ts(0))) + for { + r2 <- f2 + _ = sequencingTimePromises(0).outcome_(Some(ts(0))) + _ = sequencingTimePromises(1).outcome_(Some(ts(0))) + r1 <- f1 + } yield { + for (i <- 0 to 3) + verify(readings, times(1)).recordReading( + eqTo(sequencerIds(i)), + eqTo(Some(ts(0))), + any[CantonTimestamp], + ) + r1 shouldBe Map(sequencerIds(0) -> Some(ts(0)), sequencerIds(1) -> Some(ts(0))) + r2 shouldBe Map(sequencerIds(2) -> Some(ts(0)), sequencerIds(3) -> Some(ts(0))) + timeSources.map(_.invocationsCountRef.get()) should contain only 1 + timeSources.flatMap(_.timeoutsRef.get()) should contain theSameElementsAs + Seq.fill(4)(aTimeout) + } + }.failOnShutdown + } + + "another call is running and 'concurrent' is false" when { + "the timeout does not expire" should { + "call every source only once and return the aggregated result" in { + val sequencingTimePromises = + Vector.fill(3)( + PromiseUnlessShutdown.unsupervised[Option[CantonTimestamp]]() + ) + val timeSources = sequencingTimePromises.map(p => TestTimeSource(p.futureUS)) + val timeSourcesPool = new TestTimeSourcesPool(timeSources) + val readings = mock[SequencingTimeReadings] + val runningTaskRef = new AtomicReference(Option.empty[QueryTimeSourcesRunningTask]) + val timeSourcesAccessor = + newTimeSourcesAccessor(readings, runningTaskRef = runningTaskRef) + + val f1 = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool.timeSources(PositiveInt.tryCreate(2), exclusions = Set.empty).toMap, + aTimeout, + ) + eventually() { + runningTaskRef.get().isDefined shouldBe true + } + val f2 = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool + .timeSources( + PositiveInt.tryCreate(2), + exclusions = Set(sequencerIds(0)), // Overlap == sequencerIds(1) + ) + .toMap, + aTimeout, + ) + + sequencingTimePromises(0).outcome_(Some(ts(0))) + sequencingTimePromises(1).outcome_(Some(ts(0))) + for { + r1 <- f1 + _ = sequencingTimePromises(2).outcome_(Some(ts(0))) + r2 <- f2 + } yield { + for (i <- 0 to 2) + verify(readings, times(1)).recordReading( + eqTo(sequencerIds(i)), + eqTo(Some(ts(0))), + any[CantonTimestamp], + ) + r1 shouldBe Map(sequencerIds(0) -> Some(ts(0)), sequencerIds(1) -> Some(ts(0))) + r2 shouldBe Map( + sequencerIds(0) -> Some(ts(0)), + sequencerIds(1) -> Some(ts(0)), + sequencerIds(2) -> Some(ts(0)), + ) + timeSources.map(_.invocationsCountRef.get()) should contain only 1 + timeSources.flatMap(_.timeoutsRef.get()) should contain theSameElementsAs + Seq.fill(3)(aTimeout) + } + }.failOnShutdown + } + + "the timeout expires" should { + "call every source only once and return the new result only" in { + val sequencingTimePromises = + Vector.fill(3)( + PromiseUnlessShutdown.unsupervised[Option[CantonTimestamp]]() + ) + val timeSources = sequencingTimePromises.map(p => TestTimeSource(p.futureUS)) + val timeSourcesPool = new TestTimeSourcesPool(timeSources) + val simClock = new SimClock(CantonTimestamp.Epoch, loggerFactory) + val readings = mock[SequencingTimeReadings] + val runningTaskRef = new AtomicReference(Option.empty[QueryTimeSourcesRunningTask]) + val timeSourcesAccessor = newTimeSourcesAccessor(readings, simClock, runningTaskRef) + + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool.timeSources(PositiveInt.tryCreate(2), exclusions = Set.empty).toMap, + aTimeout, + ) + .discard + eventually() { + runningTaskRef.get().isDefined shouldBe true + } + val f2 = + timeSourcesAccessor + .queryTimeSources( + timeSourcesPool + .timeSources( + PositiveInt.tryCreate(2), + exclusions = Set(sequencerIds(0)), // Overlap == sequencerIds(1) + ) + .toMap, + aTimeout, + ) + + sequencingTimePromises(1).outcome_(Some(ts(0))) + sequencingTimePromises(2).outcome_(Some(ts(0))) + simClock.advance(aTimeout.duration) + f2.map { result => + for (i <- 1 to 2) + verify(readings, times(1)).recordReading( + eqTo(sequencerIds(i)), + eqTo(Some(ts(0))), + any[CantonTimestamp], + ) + result shouldBe Map(sequencerIds(2) -> Some(ts(0))) + eventually() { + timeSources.map(_.invocationsCountRef.get()) should contain only 1 + timeSources.flatMap(_.timeoutsRef.get()) should contain theSameElementsAs + Seq.fill(3)(aTimeout) + } + } + }.failOnShutdown + } + } + } + + private def newTimeSourcesAccessor( + readings: SequencingTimeReadings, + localClock: Clock = wallClock, + runningTaskRef: AtomicReference[Option[QueryTimeSourcesRunningTask]] = new AtomicReference( + Option.empty[QueryTimeSourcesRunningTask] + ), + ) = + new OneCallAtATimeSourcesAccessor(localClock, readings, loggerFactory, runningTaskRef) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcherTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcherTest.scala new file mode 100644 index 0000000000..3432d3bfe2 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/time/fetcher/SequencingTimeFetcherTest.scala @@ -0,0 +1,393 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.time.fetcher + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeFetcher.* +import com.digitalasset.canton.sequencing.client.time.fetcher.SequencingTimeReadings.TimeReading +import com.digitalasset.canton.sequencing.client.time.fetcher.TimeSourcesAccessor.TimeSources +import com.digitalasset.canton.time.{ + Clock, + NonNegativeFiniteDuration, + PositiveFiniteDuration, + SimClock, +} +import com.digitalasset.canton.topology.{SequencerId, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.Assertions.fail +import org.scalatest.wordspec.AsyncWordSpec + +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.collection.immutable.Queue + +class SequencingTimeFetcherTest extends AsyncWordSpec with BaseTest with HasExecutionContext { + + import SequencingTimeFetcherTest.* + + "Asking the current sequencing time info" should { + "get the time readings and produce a result" in { + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])) + .thenReturn(Map.empty) + when( + timeReadingsMock.validTimeInterval(any[Vector[CantonTimestamp]], any[PositiveInt])( + any[TraceContext] + ) + ) + .thenReturn(None) + val timeSourcesAccessorMock = mock[TimeSourcesAccessor] + when(timeSourcesAccessorMock.timeReadings).thenReturn(timeReadingsMock) + val fetcher = newTimeFetcher(timeSourcesAccessorMock) + + fetcher.currentSequencingTimeInfo(None) shouldBe + aSequencingTimeInfo + verify(timeReadingsMock, times(1)).getTimeReadings(eqTo(None)) + verify(timeReadingsMock, times(1)) + .validTimeInterval(eqTo(Vector.empty), eqTo(PositiveInt.tryCreate(2)))(any[TraceContext]) + succeed + } + } + + "Checking whether the synchronizer has reached a sequencing time" when { + + "there are enough positives" should { + "return true" in { + val t = CantonTimestamp.MaxValue + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])).thenReturn( + Map( + sequencerIds(0) -> TimeReading(Some(t), ts(0)), + sequencerIds(1) -> TimeReading(Some(t), ts(0)), + ) + ) + val timeSourcesAccessorMock = mock[TimeSourcesAccessor] + when(timeSourcesAccessorMock.timeReadings).thenReturn(timeReadingsMock) + val fetcher = newTimeFetcher(timeSourcesAccessorMock) + + fetcher.hasReached(t, aTimeout).map { result => + verify(timeReadingsMock, times(1)).getTimeReadings(eqTo(None)) + result shouldBe true + } + }.failOnShutdown + } + + "there aren't enough positives" when { + + "there aren't enough time sources" should { + "return false immediately" in { + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])).thenReturn( + Map(sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0))) + ) + val timeSourcesAccessorMock = mock[TimeSourcesAccessor] + when(timeSourcesAccessorMock.timeReadings).thenReturn(timeReadingsMock) + val timeSourcesPool = + spy( + new TestTimeSourcesPool( + sources = Seq.empty, + trustThreshold = PositiveInt.tryCreate(2), + ) + ) + + val fetcher = newTimeFetcher(timeSourcesAccessorMock, timeSourcesPool) + + fetcher.hasReached(CantonTimestamp.MaxValue, aTimeout).map { result => + verify(timeSourcesAccessorMock, times(1)).timeReadings + verify(timeReadingsMock, times(1)).getTimeReadings(eqTo(None)) + verify(timeSourcesPool, times(1)) + .timeSources(eqTo(PositiveInt.tryCreate(1)), eqTo(Set(sequencerIds(0))))( + any[TraceContext] + ) + result shouldBe false + } + }.failOnShutdown + } + + "there are enough time sources" when { + + "querying the time sources and receiving enough positives" should { + "return true" in { + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])) + .thenReturn( + Map(sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0))), + Map( + sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0)), + sequencerIds(1) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0)), + ), + ) + val timeSourcesAccessorMock = mock[TimeSourcesAccessor] + when(timeSourcesAccessorMock.timeReadings).thenReturn(timeReadingsMock) + when( + timeSourcesAccessorMock.queryTimeSources( + any[TimeSources], + any[PositiveFiniteDuration], + any[Boolean], + )(any[TraceContext]) + ).thenReturn( + FutureUnlessShutdown.pure( + Map( + sequencerIds(1) -> Some(CantonTimestamp.MaxValue) + ) + ) + ) + val timeSourcesPool = + spy( + new TestTimeSourcesPool( + sources = Seq.fill(2)( + TestTimeSource( + FutureUnlessShutdown.pure(Some(CantonTimestamp.MaxValue)) + ) + ), + trustThreshold = PositiveInt.tryCreate(2), + ) + ) + + val fetcher = newTimeFetcher(timeSourcesAccessorMock, timeSourcesPool) + + fetcher + .hasReached( + CantonTimestamp.MaxValue, + aTimeout, + maxTimeReadingsAge = Some(NonNegativeFiniteDuration.Zero), + ) + .map { result => + verify(timeReadingsMock, times(1)).getTimeReadings( + eqTo(Some(NonNegativeFiniteDuration.Zero)) + ) + verify(timeReadingsMock, times(1)).getTimeReadings(eqTo(None)) + verify(timeSourcesAccessorMock, times(1)).queryTimeSources( + argThat[TimeSources](_.keySet == Set(sequencerIds(1))), + eqTo(aTimeout), + eqTo(false), + )(any[TraceContext]) + verify(timeSourcesPool, times(1)) + .timeSources(eqTo(PositiveInt.tryCreate(1)), eqTo(Set(sequencerIds(0))))( + any[TraceContext] + ) + result shouldBe true + } + }.failOnShutdown + } + + "querying the time sources and not receiving enough positives" when { + + "there is no time left" should { + "return false" in { + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])) + .thenReturn( + Map(sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0))), + Map( + sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0)), + sequencerIds(1) -> TimeReading(None, ts(0)), + ), + ) + val timeSourcesPool = + spy( + new TestTimeSourcesPool( + sources = Seq.fill(2)( + TestTimeSource( + FutureUnlessShutdown.pure(Some(CantonTimestamp.MaxValue)) + ) + ), + trustThreshold = PositiveInt.tryCreate(2), + ) + ) + val simClock = new SimClock(CantonTimestamp.Epoch, loggerFactory) + val timeSourcesAccessorSpiedFake = + spy( + new TestTimeSourcesAccessor( + timeReadingsMock, + onQuery = () => simClock.advance(aTimeout.duration), + ) + ) + + val fetcher = newTimeFetcher(timeSourcesAccessorSpiedFake, timeSourcesPool, simClock) + + fetcher.hasReached(CantonTimestamp.MaxValue, aTimeout).map { result => + verify(timeReadingsMock, times(2)).getTimeReadings(eqTo(None)) + verify(timeSourcesAccessorSpiedFake, times(1)).queryTimeSources( + argThat[TimeSources]( + _.keySet == Set(sequencerIds(1)) + ), + eqTo(aTimeout), + eqTo(false), + )(any[TraceContext]) + verify(timeSourcesPool, times(1)) + .timeSources(eqTo(PositiveInt.tryCreate(1)), eqTo(Set(sequencerIds(0))))( + any[TraceContext] + ) + result shouldBe false + } + }.failOnShutdown + } + + "there is time left" should { + "recurse" in { + val timeReadingsMock = mock[SequencingTimeReadings] + when(timeReadingsMock.getTimeReadings(any[Option[NonNegativeFiniteDuration]])) + .thenReturn( + Map(sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0))), + Map( + sequencerIds(0) -> TimeReading(Some(CantonTimestamp.MaxValue), ts(0)), + sequencerIds(1) -> TimeReading(None, ts(0)), + ), + ) + val timeSourcesPool = + spy( + new TestTimeSourcesPool( + sources = Seq.fill(3)( + TestTimeSource( + FutureUnlessShutdown.pure(Some(CantonTimestamp.MaxValue)) + ) + ), + trustThreshold = PositiveInt.tryCreate(2), + ) + ) + val simClock = new SimClock(CantonTimestamp.Epoch, loggerFactory) + val timeSourcesAccessorSpiedFake = + spy( + new TestTimeSourcesAccessor( + timeReadingsMock, + onQuery = () => simClock.advance(aTimeout.duration.dividedBy(2L)), + ) + ) + + val fetcher = newTimeFetcher(timeSourcesAccessorSpiedFake, timeSourcesPool, simClock) + + fetcher + .hasReached( + CantonTimestamp.MaxValue, + aTimeout, + aMaxTimeReadingsAge, + ) + .map { result => + verify(timeReadingsMock, times(1)).getTimeReadings(eqTo(aMaxTimeReadingsAge)) + verify(timeReadingsMock, times(3)).getTimeReadings(eqTo(None)) + verify(timeSourcesAccessorSpiedFake, times(1)).queryTimeSources( + argThat[TimeSources]( + _.keySet == Set(sequencerIds(1)) + ), + eqTo(aTimeout), + eqTo(false), + )(any[TraceContext]) + verify(timeSourcesAccessorSpiedFake, times(1)).queryTimeSources( + argThat[TimeSources]( + _.keySet == Set(sequencerIds(2)) + ), + eqTo(PositiveFiniteDuration.tryCreate(aTimeout.duration.dividedBy(2L))), + eqTo(false), + )(any[TraceContext]) + verify(timeSourcesPool, times(1)) + .timeSources(eqTo(PositiveInt.tryCreate(1)), eqTo(Set(sequencerIds(0))))( + any[TraceContext] + ) + verify(timeSourcesPool, times(1)) + .timeSources( + eqTo(PositiveInt.tryCreate(1)), + eqTo(sequencerIds.slice(0, 2).toSet), + )( + any[TraceContext] + ) + result shouldBe false + } + }.failOnShutdown + } + } + } + } + } + + private def newTimeFetcher( + timeSourcesAccessor: TimeSourcesAccessor, + timeSourcesPool: TimeSourcesPool = new TestTimeSourcesPool(), + localClock: Clock = wallClock, + ) = + new SequencingTimeFetcher( + timeSourcesPool, + timeSourcesAccessor, + localClock, + loggerFactory, + ) +} + +object SequencingTimeFetcherTest { + + private[fetcher] class TestTimeSourcesPool( + sources: Seq[TestTimeSource] = Seq.empty, + trustThreshold: PositiveInt = PositiveInt.tryCreate(2), + ) extends SequencingTimeFetcher.TimeSourcesPool { + + override def readTrustThreshold(): PositiveInt = + trustThreshold + + override def timeSources(count: PositiveInt, exclusions: Set[SequencerId])(implicit + traceContext: TraceContext + ): Seq[ + (SequencerId, PositiveFiniteDuration => FutureUnlessShutdown[Option[CantonTimestamp]]) + ] = + sources.zipWithIndex + .map { case (source, i) => + sequencerIds(i) -> ((timeout: PositiveFiniteDuration) => source.fetchTime(timeout)) + } + .filterNot { case (sequencerId, _) => exclusions.contains(sequencerId) } + .take(count.unwrap) + } + + private[fetcher] final case class TestTimeSource( + results: FutureUnlessShutdown[Option[CantonTimestamp]]* + ) { + val invocationsCountRef = new AtomicInteger(0) + val timeoutsRef: AtomicReference[Seq[PositiveFiniteDuration]] = + new AtomicReference(Queue.empty[PositiveFiniteDuration]) + + def fetchTime( + timeout: PositiveFiniteDuration + ): FutureUnlessShutdown[Option[CantonTimestamp]] = { + val idx = invocationsCountRef.getAndIncrement() + timeoutsRef.updateAndGet(_ :+ timeout) + Option + .when(results.isDefinedAt(idx))(results(idx)) + .getOrElse(fail(s"Time source invoked more than ${results.size} times")) + } + } + + private class TestTimeSourcesAccessor( + readings: SequencingTimeReadings, + onQuery: () => Unit, + ) extends TimeSourcesAccessor { + override def timeReadings: SequencingTimeReadings = readings + + override def queryTimeSources( + timeSources: TimeSources, + timeout: PositiveFiniteDuration, + concurrent: Boolean, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[SequencerId, Option[CantonTimestamp]]] = { + onQuery() + FutureUnlessShutdown.pure(Map.empty) + } + } + + private[fetcher] def ts(seconds: Int) = CantonTimestamp.Epoch.plusSeconds(seconds.toLong) + + private[fetcher] val sequencerIds: Seq[SequencerId] = + (0 to 3).map(n => SequencerId(UniqueIdentifier.tryCreate("ns", s"$n"))) + + private[fetcher] val aTimeout = PositiveFiniteDuration.tryOfSeconds(1) + + private[fetcher] val aMaxTimeReadingsAge = Some(NonNegativeFiniteDuration.tryOfDays(1)) + + private[fetcher] val aSequencingTimeInfo = + SequencingTimeInfo( + validTimeInterval = None, + forTrustThreshold = PositiveInt.tryCreate(2), + basedOnTimeReadings = Map.empty, + ) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/store/PendingOperationStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/store/PendingOperationStoreTest.scala new file mode 100644 index 0000000000..32ef040101 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/store/PendingOperationStoreTest.scala @@ -0,0 +1,311 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.implicits.catsSyntaxEitherId +import com.digitalasset.canton.config.CantonRequireTypes.NonEmptyString +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protobuf.VersionedMessageV0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.PendingOperation.{ + ConflictingPendingOperationError, + PendingOperationTriggerType, +} +import com.digitalasset.canton.store.PendingOperationStoreTest.TestPendingOperationMessage.createMessage +import com.digitalasset.canton.store.PendingOperationStoreTest.{ + TestPendingOperationMessage, + createOp, + op1, + op1Modified, + op2, + op3, +} +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} +import com.google.protobuf.ByteString +import org.scalatest.Assertion +import org.scalatest.wordspec.{AnyWordSpec, AsyncWordSpec} + +import scala.concurrent.Future + +trait PendingOperationStoreTest[Op <: HasProtocolVersionedWrapper[Op]] + extends AsyncWordSpec + with BaseTest + with HasExecutionContext + with FailOnShutdown { + + /** A "backdoor" method for tests to insert a malformed operation bypassing the validation in the + * public `insert` method. + */ + protected def insertCorruptedData( + op: PendingOperation[TestPendingOperationMessage], + store: Option[PendingOperationStore[TestPendingOperationMessage]] = None, + corruptOperationBytes: Option[ByteString] = None, + ): Future[Unit] + + def pendingOperationsStore(mk: () => PendingOperationStore[TestPendingOperationMessage]): Unit = { + + def withStore( + testCode: PendingOperationStore[TestPendingOperationMessage] => Future[Assertion] + ): Future[Assertion] = + testCode(mk()) + + "insert and retrieve an operation" in withStore { store => + for { + _ <- store.insert(op1).value + retrieved <- store.get(op1.synchronizerId, op1.key, op1.name).value + } yield retrieved shouldBe Some(op1) + } + + "return None for a non-existent operation" in withStore { store => + store + .get( + DefaultTestIdentities.synchronizerId, + "non-existent-key", + NonEmptyString.tryCreate("non-existent-name"), + ) + .value + .map(_ shouldBe None) + } + + "succeed when inserting an identical operation twice" in withStore { store => + for { + _ <- store.insert(op1).value + _ <- store.insert(op1).value + retrieved <- store.get(op1.synchronizerId, op1.key, op1.name).value + } yield retrieved shouldBe Some(op1) + } + + "fail when inserting a conflicting operation" in withStore { store => + val conflictingInsertResult = for { + _ <- store.insert(op1) + _ <- store.insert(op1Modified) // Attempt to insert with same key but with different data + } yield () + + conflictingInsertResult.value.map { result => + val expectedError = ConflictingPendingOperationError( + synchronizerId = op1.synchronizerId, + key = op1.key, + name = op1.name, + ) + result shouldBe Left(expectedError) + } + } + + "insert and delete an operation" in withStore { store => + val testF = for { + insertResult <- store.insert(op2).value + _ = insertResult shouldBe Right(()) + retrievedBefore <- store.get(op2.synchronizerId, op2.key, op2.name).value + _ = retrievedBefore shouldBe Some(op2) + _ <- store.delete(op2.synchronizerId, op2.key, op2.name) + retrievedAfter <- store.get(op2.synchronizerId, op2.key, op2.name).value + } yield { + retrievedAfter shouldBe None + } + testF + } + + "succeed when deleting a non-existent operation" in withStore { store => + store + .delete(DefaultTestIdentities.synchronizerId, "key", NonEmptyString.tryCreate("name")) + .map(_ => succeed) + } + + "succeed in inserting an operation with an empty key" in withStore { store => + val opWithEmptyKey = createOp(name = "opName1", key = "", data = "data") + for { + _ <- store.insert(opWithEmptyKey).value + retrieved <- store + .get(opWithEmptyKey.synchronizerId, opWithEmptyKey.key, opWithEmptyKey.name) + .value + } yield retrieved shouldBe Some(opWithEmptyKey) + } + + "fail with an exception when getting a corrupt operation" in withStore { store => + val testFlow = for { + _ <- FutureUnlessShutdown.outcomeF( + insertCorruptedData( + op3, + Some(store), + corruptOperationBytes = Some(ByteString.copyFromUtf8("unparseable garbage")), + ) + ) + _ <- store.get(op3.synchronizerId, op3.key, op3.name).value + } yield () + + whenReady(testFlow.unwrap.failed) { e => + e shouldBe a[DbDeserializationException] + e.getMessage should include("Failed to deserialize pending operation byte string") + } + } + + "insert atomically when called concurrently" in withStore { store => + // Create a list of operations: one original and 10 conflicting ops: + val conflictingOps = List.fill(10)(op1Modified) + val allOps = op1 :: conflictingOps + + // Sanity-checks: + // - Assert that all 10 conflicting operations are identical + conflictingOps.foreach(_ shouldBe op1Modified) + // - Assert that the original operation is indeed different from the modified one + op1 should not be op1Modified + // - Assert that their composite keys are identical, which is why they conflict + op1.compositeKey shouldBe op1Modified.compositeKey + + // Shuffle to ensure the "winner" of the race is not deterministic :D + val randomizedOps = scala.util.Random.shuffle(allOps) + + // Concurrently execute an insert for every operation + val insertFutures = randomizedOps.map(op => store.insert(op).value) + val allInsertsF = FutureUnlessShutdown.sequence(insertFutures) + + for { + results <- allInsertsF // Wait for all concurrent inserts to complete + actuallyStoredOp <- store.get(op1.synchronizerId, op1.key, op1.name).value + } yield { + val (successfulInserts, failedInserts) = results.partition(_.isRight) + + actuallyStoredOp should be(defined) + val winner = actuallyStoredOp.value + winner should (be(op1) or be(op1Modified)) + + // Asserts that the number of successful and failed inserts is consistent + // with whichever operation won the race to insert. + winner match { + case op if op == op1 => + withClue("If op1 won the race, all op1Modified inserts should have failed:") { + successfulInserts should have length 1 + failedInserts should have length 10 + } + + case op if op == op1Modified => + withClue("If op1Modified won the race, only the op1 inserts should have failed:") { + successfulInserts should have length 10 + failedInserts should have length 1 + } + + // To satisfy the compiler, make the match exhaustive and robust + case _ => + fail(s"The winner of the race was an unexpected operation: $winner") + } + + } + } + + } +} + +object PendingOperationStoreTest { + + private def createOp( + name: String, + key: String, + data: String, + ): PendingOperation[TestPendingOperationMessage] = + PendingOperation + .tryCreate( + trigger = PendingOperationTriggerType.SynchronizerReconnect.asString, + name = name, + key = key, + operationBytes = createMessage(data).toByteString, + operationDeserializer = TestPendingOperationMessage.fromTrustedByteString, + synchronizerId = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + ) + + def createInvalid( + trigger: String = PendingOperationTriggerType.SynchronizerReconnect.asString, + name: String = "valid-name", + key: String = "valid-key", + operationBytes: ByteString = createMessage("valid-data").toByteString, + operationDeserializer: ByteString => ParsingResult[TestPendingOperationMessage] = + TestPendingOperationMessage.fromTrustedByteString, + synchronizerId: String = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + ) = + PendingOperation.create( + trigger, + name, + key, + operationBytes, + operationDeserializer, + synchronizerId, + ) + + protected val op1: PendingOperation[TestPendingOperationMessage] = + createOp("opName1", "opKey1", "operation-1-data") + protected val op2: PendingOperation[TestPendingOperationMessage] = + createOp("opName2", "opKey2", "operation-2-data") + protected val op3: PendingOperation[TestPendingOperationMessage] = + createOp("opName3", "opKey3", "operation-3-data") + protected val op1Modified: PendingOperation[TestPendingOperationMessage] = + op1.copy(operation = createMessage("modified-data")) + + private def protocolVersionRepresentative( + pv: ProtocolVersion + ): RepresentativeProtocolVersion[TestPendingOperationMessage.type] = + TestPendingOperationMessage.protocolVersionRepresentativeFor(pv) + + final case class TestPendingOperationMessage(data: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TestPendingOperationMessage.type + ] + ) extends HasProtocolVersionedWrapper[TestPendingOperationMessage] { + @transient override protected lazy val companionObj: TestPendingOperationMessage.type = + TestPendingOperationMessage + + def toProtoV0: VersionedMessageV0 = VersionedMessageV0(data) + } + + object TestPendingOperationMessage + extends VersioningCompanion[TestPendingOperationMessage] + with IgnoreInSerializationTestExhaustivenessCheck { + + def name: String = "TestPendingOperationMessage" + + override val versioningTable: VersioningTable = VersioningTable( + ProtoVersion(30) -> VersionedProtoCodec(ProtocolVersion.v34)(VersionedMessageV0)( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0, + ) + ) + + def fromProtoV0(message: VersionedMessageV0): ParsingResult[TestPendingOperationMessage] = + createMessage(message.msg).asRight + + def createMessage(data: String): TestPendingOperationMessage = + TestPendingOperationMessage(data)(protocolVersionRepresentative(ProtocolVersion.minimum)) + } +} + +final class PendingOperationTest extends AnyWordSpec with BaseTest { + + import com.digitalasset.canton.store.PendingOperationStoreTest.createInvalid + + "PendingOperation" should { + + "fail to create an operation with an unknown trigger" in { + val result = createInvalid(trigger = "unknown-trigger") + result shouldBe Left("Invalid pending_operation_trigger_type in database: unknown-trigger") + } + + "fail to create an operation with an empty name" in { + val result = createInvalid(name = "") + result shouldBe Left("Missing pending operation name (blank): ") + } + + "fail to create an operation with invalid operation bytes" in { + val result = createInvalid(operationBytes = ByteString.copyFromUtf8("some-data")) + result.left.value should include("Failed to deserialize pending operation byte string") + } + + "fail to create an operation with invalid synchronizer ID" in { + val result = createInvalid(synchronizerId = "invalid-sync-id") + result.left.value should include("Failed to parse a unique ID invalid-sync-id") + } + + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbPendingOperationsStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbPendingOperationsStoreTest.scala new file mode 100644 index 0000000000..d88e9a8c26 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbPendingOperationsStoreTest.scala @@ -0,0 +1,115 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.store.PendingOperation.PendingOperationTriggerType +import com.digitalasset.canton.store.PendingOperationStoreTest.TestPendingOperationMessage +import com.digitalasset.canton.store.{ + PendingOperation, + PendingOperationStore, + PendingOperationStoreTest, +} +import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.tracing.TraceContext +import com.google.protobuf.ByteString +import org.h2.jdbc.JdbcSQLDataException +import org.postgresql.util.PSQLException +import org.scalatest.BeforeAndAfterAll +import slick.jdbc.SetParameter + +import scala.annotation.unused +import scala.concurrent.Future + +sealed trait DbPendingOperationsStoreTest + extends PendingOperationStoreTest[TestPendingOperationMessage] + with BeforeAndAfterAll { + this: DbTest => + + override def cleanDb( + storage: DbStorage + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + import storage.api.* + storage.update_( + sqlu"truncate table common_pending_operations restart identity", + functionFullName, + ) + } + + override protected def insertCorruptedData( + op: PendingOperation[TestPendingOperationMessage], + store: Option[PendingOperationStore[TestPendingOperationMessage]] = None, + corruptOperationBytes: Option[ByteString] = None, + ): Future[Unit] = { + import DbStorage.Implicits.setParameterByteString + import storage.api.* + @unused + implicit val setParameter: SetParameter[TestPendingOperationMessage] = + (v: TestPendingOperationMessage, pp) => pp >> v.toByteString + @unused + implicit val setOperationTriggerType: SetParameter[PendingOperationTriggerType] = + DbPendingOperationsStore.setOperationTriggerType(storage) + + val operationBytes = corruptOperationBytes.getOrElse(ByteString.empty()) + + val upsertCorruptAction = storage.profile match { + case _: DbStorage.Profile.Postgres => + sqlu""" + insert into common_pending_operations + (operation_trigger, operation_name, operation_key, operation, synchronizer_id) + values + (${op.trigger}, ${op.name.unwrap}, ${op.key}, $operationBytes, ${op.synchronizerId}) + on conflict (synchronizer_id, operation_key, operation_name) do update + set operation = $operationBytes + """ + case _: DbStorage.Profile.H2 => + sqlu""" + merge into common_pending_operations + (operation_trigger, operation_name, operation_key, operation, synchronizer_id) + key (synchronizer_id, operation_key, operation_name) + values + (${op.trigger}, ${op.name.unwrap}, ${op.key}, $operationBytes, ${op.synchronizerId}) + """ + } + storage.update_(upsertCorruptAction, functionFullName).failOnShutdown + + } + + "DbPendingOperationsStore" should { + behave like pendingOperationsStore(() => + new DbPendingOperationsStore(storage, timeouts, loggerFactory, TestPendingOperationMessage) + ) + + "fail on write when inserting an invalid trigger type" in { + import DbStorage.Implicits.setParameterByteString + import storage.api.* + + val insertInvalidTriggerType = + sqlu""" + insert into common_pending_operations(operation_trigger, operation_name, operation_key, operation, synchronizer_id) + values ('invalid_trigger_type', 'valid-name', 'valid-key', ${ByteString.empty}, ${DefaultTestIdentities.synchronizerId}) + """ + + val resultF: Future[Unit] = storage + .update_( + insertInvalidTriggerType, + "fail on write when inserting an invalid trigger type", + ) + .failOnShutdown + + storage.profile match { + case _: DbStorage.Profile.Postgres => + recoverToSucceededIf[PSQLException](resultF) + case _: DbStorage.Profile.H2 => + recoverToSucceededIf[JdbcSQLDataException](resultF) + } + } + } +} + +class PendingOperationsStoreTestH2 extends DbPendingOperationsStoreTest with H2Test + +class PendingOperationsStorePostgres extends DbPendingOperationsStoreTest with PostgresTest diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStoreTest.scala new file mode 100644 index 0000000000..51f95c663b --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/store/memory/InMemoryPendingOperationStoreTest.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import com.digitalasset.canton.store.PendingOperationStoreTest.TestPendingOperationMessage +import com.digitalasset.canton.store.{ + PendingOperation, + PendingOperationStore, + PendingOperationStoreTest, +} +import com.google.protobuf.ByteString + +import scala.concurrent.Future + +class InMemoryPendingOperationStoreTest + extends PendingOperationStoreTest[TestPendingOperationMessage] { + + override protected def insertCorruptedData( + op: PendingOperation[TestPendingOperationMessage], + store: Option[PendingOperationStore[TestPendingOperationMessage]], + corruptOperationBytes: Option[ByteString] = None, + ): Future[Unit] = { + // Cast the store to its concrete type to access its internal state + val inMemoryStore = + store.value.asInstanceOf[InMemoryPendingOperationStore[TestPendingOperationMessage]] + + val corruptStoredOp = InMemoryPendingOperationStore.StoredPendingOperation( + trigger = op.trigger.asString, + serializedSynchronizerId = op.synchronizerId.toProtoPrimitive, + key = op.key, + name = op.name.unwrap, + serializedOperation = corruptOperationBytes.getOrElse(op.operation.toByteString), + ) + + // Insert the corrupt data into the storage (the internal map) + inMemoryStore.store.put(op.compositeKey, corruptStoredOp) + + Future.successful(()) + } + + "InMemoryPendingOperationStore" should { + behave like pendingOperationsStore(() => + new InMemoryPendingOperationStore(TestPendingOperationMessage) + ) + } +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala deleted file mode 100644 index e7e3751f7b..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import cats.syntax.option.* -import com.digitalasset.canton.SequencerCounter -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.sequencing.protocol.{Batch, Deliver, SignedContent, TimeProof} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent -import com.digitalasset.canton.topology.{DefaultTestIdentities, PhysicalSynchronizerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ReassignmentTag.Target - -object TimeProofTestUtil { - def mkTimeProof( - timestamp: CantonTimestamp, - previousEventTimestamp: Option[CantonTimestamp] = None, - counter: Long = 0L, - targetSynchronizer: Target[PhysicalSynchronizerId] = Target( - DefaultTestIdentities.physicalSynchronizerId - ), - ): TimeProof = { - val deliver = Deliver.create( - previousEventTimestamp, - timestamp, - targetSynchronizer.unwrap, - TimeProof.mkTimeProofRequestMessageId.some, - Batch.empty(targetSynchronizer.unwrap.protocolVersion), - None, - Option.empty[TrafficReceipt], - ) - val signedContent = - SignedContent( - deliver, - SymbolicCrypto.emptySignature, - None, - targetSynchronizer.unwrap.protocolVersion, - ) - val event = OrdinarySequencedEvent(SequencerCounter(counter), signedContent)(TraceContext.empty) - TimeProof - .fromEvent(event) - .fold(err => sys.error(s"Failed to create time proof: $err"), identity) - } -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala index b782ef8fea..c0be0da679 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala @@ -5,14 +5,18 @@ package com.digitalasset.canton.topology import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag import com.digitalasset.canton.version.ProtocolVersion import magnolify.scalacheck.auto.* -import org.scalacheck.Arbitrary +import org.scalacheck.{Arbitrary, Gen} final class GeneratorsTopology(protocolVersion: ProtocolVersion) { import com.digitalasset.canton.config.GeneratorsConfig.* import com.digitalasset.canton.Generators.* + implicit val unrecognizedFeatureFlagArb: Arbitrary[ParticipantTopologyFeatureFlag] = Arbitrary { + Gen.oneOf(ParticipantTopologyFeatureFlag.knownTopologyFeatureFlags) + } implicit val fingerprintArb: Arbitrary[Fingerprint] = Arbitrary( string68Arb.arbitrary.map(Fingerprint.tryFromString) ) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala index 706221c69e..0fbf02a3af 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala @@ -149,6 +149,7 @@ class TopologyManagerTest extends AnyWordSpec with BaseTest with HasExecutionCon timeouts, ), new SynchronizerOutboxQueue(loggerFactory), + disableOptionalTopologyChecks = false, exitOnFatalFailures = exitOnFatal, timeouts = timeouts, futureSupervisor = futureSupervisor, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala index e76b4de6b5..33296fedef 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala @@ -36,7 +36,11 @@ class DefaultHeadStateInitializerTest ) initializer - .initialize(topologyClientMock, synchronizerPredecessor = None) + .initialize( + topologyClientMock, + synchronizerPredecessor = None, + defaultStaticSynchronizerParameters, + ) .map { _ => verify(topologyClientMock).updateHead( SequencedTime(maxSequencedTimestamp), @@ -56,7 +60,11 @@ class DefaultHeadStateInitializerTest val initializer = new DefaultHeadStateInitializer(topologyStoreMock) initializer - .initialize(topologyClientMock, synchronizerPredecessor = None) + .initialize( + topologyClientMock, + synchronizerPredecessor = None, + defaultStaticSynchronizerParameters, + ) .map { _ => verify(topologyClientMock, never).updateHead( any[SequencedTime], diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala index f941ceb6f5..a565751dd3 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala @@ -9,9 +9,11 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.{SigningKeyUsage, SigningPublicKey} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} -import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.time.{Clock, SynchronizerTimeTracker} import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.PartyInfo import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore @@ -22,8 +24,16 @@ import com.digitalasset.canton.topology.store.{ } import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.ParticipantPermission.* -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, SequencerCounter} +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.{ + BaseTest, + FailOnShutdown, + HasExecutionContext, + LfPartyId, + SequencerCounter, +} import org.scalatest.wordspec.AsyncWordSpec +import org.slf4j.event.Level @SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable")) trait StoreBasedTopologySnapshotTest @@ -70,6 +80,7 @@ trait StoreBasedTopologySnapshotTest val client = new StoreBasedSynchronizerTopologyClient( mock[Clock], + defaultStaticSynchronizerParameters, store, StoreBasedSynchronizerTopologyClient.NoPackageDependencies, DefaultProcessingTimeouts.testing, @@ -161,6 +172,23 @@ trait StoreBasedTopologySnapshotTest awaitSequencedTimestampF.isCompleted shouldBe true } + "await tick when effective time is in the future" in { + val fixture = new Fixture() + import fixture.* + + // given + val timeTracker = mock[SynchronizerTimeTracker] + when(timeTracker.awaitTick(ts2)).thenReturn(None) + client.setSynchronizerTimeTracker(timeTracker) + + // when + observed(SequencedTime(ts1), EffectiveTime(ts2)) + + // then + verify(timeTracker).awaitTick(ts2) + succeed + } + "correctly get notified on updateHead" in { Table("potential topology change", true, false).forEvery { potentialTopologyChange => val fixture = new Fixture() @@ -349,6 +377,251 @@ trait StoreBasedTopologySnapshotTest compareMappings(admin1b, Map(participant1 -> ParticipantPermission.Observation)) } } + + "filter out participants without otk or stc" in { + val fixture = new Fixture() + val party1participant1 = mkAdd( + PartyToParticipant.tryCreate( + party1, + PositiveInt.one, + Seq( + HostingParticipant(participant1, Submission) + ), + ) + ) + val party2participant2 = mkAdd( + PartyToParticipant.tryCreate( + party2, + PositiveInt.one, + Seq( + HostingParticipant(participant2, Submission) + ), + ) + ) + val party3participant1_2_3 = mkAdd( + PartyToParticipant.tryCreate( + party3, + PositiveInt.one, + Seq( + HostingParticipant(participant1, Submission), + HostingParticipant(participant2, Submission), + HostingParticipant(participant3, Submission), + ), + ) + ) + val lfParty1 = party1.toLf + val lfParty2 = party2.toLf + val lfParty3 = party3.toLf + val allParticipants = Seq(participant1, participant2, participant3) + val allParties = Seq(party1, party2, party3).map(_.toLf) + val allPartiesAndParticipants = + Seq((party1, participant1), (party2, participant2), (party3, participant3)) + loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( + { + for { + _ <- fixture.add( + ts, + Seq( + dpc1, + p1_otk, // we have OTK for P1 + p2_dtc, // we have DTC for P2, + p3_otk, + p3_dtc, + party1participant1, + party2participant2, + party3participant1_2_3, + ), + ) + _ = fixture.client.observed( + ts.immediateSuccessor, + ts.immediateSuccessor, + SequencerCounter(0), + Seq(), + ) + snapshot <- fixture.client.snapshot(ts.immediateSuccessor) + // PartyKeyTopologySnapshotClient + activeParticipantsOfParties <- snapshot.activeParticipantsOfParties(allParties) + activeParticipantsOfPartiesWithInfo <- snapshot.activeParticipantsOfPartiesWithInfo( + allParties + ) + activeParticipantsOf <- MonadUtil + .sequentialTraverse(allParties)(p => + snapshot.activeParticipantsOf(p).map(r => (p, r)) + ) + .map(_.toMap) + allHaveActiveParticipants <- MonadUtil + .sequentialTraverse(allParties)(p => + snapshot.allHaveActiveParticipants(Set(p)).value.map((p, _)) + ) + .map(_.toMap) + isHostedByAtLeastOneParticipantF <- MonadUtil + .sequentialTraverse(allParties)(p => + snapshot + .isHostedByAtLeastOneParticipantF(Set(p), { case (_, _) => true }) + .map((p, _)) + ) + .map(_.toMap) + hostedOn <- MonadUtil + .sequentialTraverse(Seq(participant1, participant2, participant3))(par => + snapshot.hostedOn(allParties.toSet, par).map((par, _)) + ) + .map(_.toMap) + allHostedOn <- MonadUtil + .sequentialTraverse( + allPartiesAndParticipants + ) { case (party, participant) => + snapshot.allHostedOn(Set(party.toLf), participant).map((party, _)) + } + .map(_.toMap) + canConfirm <- MonadUtil + .sequentialTraverse(allPartiesAndParticipants) { case (party, participant) => + snapshot.canConfirm(participant, Set(party.toLf)).map((party, _)) + } + .map(_.toMap) + hasNoConfirmer <- MonadUtil + .sequentialTraverse(allParties)(p => + snapshot + .hasNoConfirmer(Set(p)) + .map((p, _)) + ) + .map(_.toMap) + canNotSubmit <- MonadUtil + .sequentialTraverse(allPartiesAndParticipants) { case (party, participant) => + snapshot.canNotSubmit(participant, Seq(party.toLf)).map(c => (party, c.toSeq)) + } + .map(_.toMap) + activeParticipantsOfAll <- MonadUtil + .sequentialTraverse(allParties)(p => + snapshot.activeParticipantsOfAll(List(p)).value.map((p, _)) + ) + .map(_.toMap) + knownParties <- snapshot.inspectKnownParties(filterParty = "", filterParticipant = "") + // KeyTopologySnapshotClient + signingKeys <- MonadUtil + .sequentialTraverse(allParticipants)(p => + snapshot + .signingKeys( + p, + filterUsage = SigningKeyUsage.ProtocolWithProofOfOwnership, + ) + .map((p, _)) + ) + .map(_.toMap) + encryptionKeys <- MonadUtil + .sequentialTraverse(allParticipants)(p => snapshot.encryptionKeys(p).map((p, _))) + .map(_.toMap) + // ParticipantTopologySnapshotClient + isParticipantActive <- MonadUtil + .sequentialTraverse(allParticipants)(p => snapshot.isParticipantActive(p).map((p, _))) + .map(_.toMap) + isParticipantActiveAndCanLoginAt <- MonadUtil + .sequentialTraverse(allParticipants)(p => + snapshot.isParticipantActiveAndCanLoginAt(p, ts.plusSeconds(1)).map((p, _)) + ) + .map(_.toMap) + // MembersTopologySnapshotClient + allMembers <- snapshot.allMembers() + isMemberKnown <- MonadUtil + .sequentialTraverse(allParticipants)(p => snapshot.isMemberKnown(p).map((p, _))) + .map(_.toMap) + } yield { + activeParticipantsOfParties shouldBe Map( + lfParty1 -> Set.empty, + lfParty2 -> Set.empty, + lfParty3 -> Set(participant3), + ) + activeParticipantsOfPartiesWithInfo shouldBe Map( + lfParty1 -> PartyInfo(threshold = PositiveInt.one, participants = Map()), + lfParty2 -> PartyInfo(threshold = PositiveInt.one, participants = Map()), + lfParty3 -> PartyInfo( + threshold = PositiveInt.one, + participants = + Map(participant3 -> ParticipantAttributes(ParticipantPermission.Submission)), + ), + ) + activeParticipantsOf shouldBe Map( + lfParty1 -> Map(), + lfParty2 -> Map(), + lfParty3 -> Map( + participant3 -> ParticipantAttributes(ParticipantPermission.Submission) + ), + ) + allHaveActiveParticipants shouldBe Map( + lfParty1 -> Left(Set(lfParty1)), + lfParty2 -> Left(Set(lfParty2)), + lfParty3 -> Right(()), + ) + isHostedByAtLeastOneParticipantF shouldBe Map( + lfParty1 -> Set.empty[LfPartyId], + lfParty2 -> Set.empty[LfPartyId], + lfParty3 -> Set(lfParty3), + ) + hostedOn shouldBe Map( + participant1 -> Map(), + participant2 -> Map(), + participant3 -> Map( + lfParty3 -> ParticipantAttributes(ParticipantPermission.Submission) + ), + ) + allHostedOn shouldBe Map(party1 -> false, party2 -> false, party3 -> true) + canConfirm shouldBe Map( + party1 -> Set.empty, + party2 -> Set.empty, + party3 -> Set(lfParty3), + ) + hasNoConfirmer shouldBe Map( + lfParty1 -> Set(lfParty1), + lfParty2 -> Set(lfParty2), + lfParty3 -> Set.empty, + ) + canNotSubmit shouldBe Map( + party1 -> List(lfParty1), + party2 -> List(lfParty2), + party3 -> List.empty, + ) + activeParticipantsOfAll shouldBe Map( + lfParty1 -> Left(Set(lfParty1)), + lfParty2 -> Left(Set(lfParty2)), + lfParty3 -> Right(Set(participant3)), + ) + knownParties shouldBe Set(participant3.adminParty, party3) + signingKeys(participant1) should not be empty + signingKeys(participant2) shouldBe empty + signingKeys(participant3) should not be empty + encryptionKeys(participant1) should not be empty + encryptionKeys(participant2) shouldBe empty + encryptionKeys(participant3) should not be empty + isParticipantActive shouldBe Map( + participant1 -> false, + participant2 -> false, + participant3 -> true, + ) + isParticipantActiveAndCanLoginAt shouldBe Map( + participant1 -> false, + participant2 -> false, + participant3 -> true, + ) + allMembers should contain(participant3) + allMembers should not contain (participant1) + allMembers should not contain (participant2) + isMemberKnown shouldBe Map( + participant1 -> false, + participant2 -> false, + participant3 -> true, + ) + succeed + } + }, + seq => { + forAll(seq) { msg => + msg.warningMessage should include( + "has a synchronizer trust certificate, but no keys on synchronizer" + ) + } + }, + ) + } + } } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala index fbe88ab90c..41cee8d329 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala @@ -4,11 +4,10 @@ package com.digitalasset.canton.topology.processing import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.FailOnShutdown import com.digitalasset.canton.config.CantonRequireTypes.String300 -import com.digitalasset.canton.config.DefaultProcessingTimeouts import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.{SigningKeyUsage, SynchronizerCryptoPureApi} +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.store.db.{DbTest, PostgresTest} import com.digitalasset.canton.topology.PhysicalSynchronizerId import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper @@ -25,9 +24,11 @@ import com.digitalasset.canton.topology.transaction.{ SignedTopologyTransaction, } import com.digitalasset.canton.version.ProtocolVersionValidation +import com.digitalasset.canton.{FailOnShutdown, HasActorSystem} abstract class InitialTopologySnapshotValidatorTest extends TopologyTransactionHandlingBase + with HasActorSystem with FailOnShutdown { import Factory.* @@ -41,7 +42,7 @@ abstract class InitialTopologySnapshotValidatorTest val validator = new InitialTopologySnapshotValidator( new SynchronizerCryptoPureApi(defaultStaticSynchronizerParameters, crypto), store, - DefaultProcessingTimeouts.testing, + validateInitialSnapshot = true, loggerFactory, ) (validator, store) @@ -227,7 +228,9 @@ abstract class InitialTopologySnapshotValidatorTest ) ) :+ StoredTopologyTransaction( SequencedTime(ts(1)), - EffectiveTime(ts(1).plus((dmp1_k1.mapping.parameters.topologyChangeDelay.duration))), + EffectiveTime( + ts(1).plus((StaticSynchronizerParameters.defaultTopologyChangeDelay.unwrap)) + ), validUntil = None, okmS1k7_without_k7_signature, None, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala deleted file mode 100644 index be74ade2de..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.store.{ - TopologyStoreId, - TopologyTransactionRejection, - ValidatedTopologyTransaction, -} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.{DefaultTestIdentities, TestingOwnerWithKeys} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalactic.source -import org.scalatest.wordspec.FixtureAnyWordSpec -import org.scalatest.{Assertion, Outcome} - -class TopologyTimestampPlusEpsilonTrackerTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext { - - protected class Fixture { - val crypto = new TestingOwnerWithKeys( - DefaultTestIdentities.sequencerId, - loggerFactory, - parallelExecutionContext, - ) - val store = new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - - var tracker: TopologyTimestampPlusEpsilonTracker = _ - reInit() - - def reInit(): Unit = - tracker = new TopologyTimestampPlusEpsilonTracker( - store, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - - def commitChangeDelay(sequenced: Long, effective: Long, topologyChangeDelay: Long): Unit = { - val sequencedTimeTyped = SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)) - val effectiveTimeTyped = EffectiveTime(CantonTimestamp.ofEpochMicro(effective)) - val topologyChangeDelayTyped = NonNegativeFiniteDuration.tryOfMicros(topologyChangeDelay) - - tracker.adjustTopologyChangeDelay( - effectiveTimeTyped, - topologyChangeDelayTyped, - ) - storeChangeDelay( - sequencedTimeTyped, - effectiveTimeTyped, - topologyChangeDelayTyped, - ) - } - - def storeChangeDelay( - sequenced: SequencedTime, - effective: EffectiveTime, - topologyChangeDelay: NonNegativeFiniteDuration, - ): Unit = { - val tx = crypto.mkAdd( - SynchronizerParametersState( - DefaultTestIdentities.synchronizerId, - DynamicSynchronizerParameters.initialValues( - topologyChangeDelay, - testedProtocolVersion, - ), - ), - crypto.SigningKeys.key1, - ) - store - .update( - sequenced, - effective, - removeMapping = Map(tx.mapping.uniqueKey -> PositiveInt.one), - removeTxs = Set.empty, - List(ValidatedTopologyTransaction(tx, None)), - ) - .futureValueUS - } - - def storeRejection(sequenced: Long, effective: Long): Unit = { - - val tx = ValidatedTopologyTransaction( - crypto.TestingTransactions.p1p1, - Some(TopologyTransactionRejection.NotAuthorized), - ) - - store - .update( - SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)), - EffectiveTime(CantonTimestamp.ofEpochMicro(effective)), - removeMapping = Map.empty, - removeTxs = Set.empty, - Seq(tx), - ) - .futureValueUS - } - - def assertEffectiveTime( - sequenced: Long, - strictMonotonicity: Boolean, - expectedEffective: Long, - )(implicit pos: source.Position): Assertion = - tracker - .trackAndComputeEffectiveTime( - SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)), - strictMonotonicity, - ) - .futureValueUS - .value shouldBe CantonTimestamp.ofEpochMicro(expectedEffective) - } - - type FixtureParam = Fixture - - override protected def withFixture(test: OneArgTest): Outcome = test(new Fixture) - - "The tracker" should { - - "correctly compute effective times with constant topologyChangeDelay" in { f => - import f.* - commitChangeDelay(-1, -1, 250) - - assertEffectiveTime(0, strictMonotonicity = true, 250) - assertEffectiveTime(5, strictMonotonicity = true, 255) - assertEffectiveTime(5, strictMonotonicity = false, 255) - } - - "correctly compute effective times when the topologyChangeDelay increases" in { f => - import f.* - // initialize delay - commitChangeDelay(-1, -1, 250) - - // increase delay - assertEffectiveTime(0, strictMonotonicity = true, 250) - commitChangeDelay(0, 250, 1000) - - // until 250, we should get the old delay - assertEffectiveTime(1, strictMonotonicity = true, 251) - assertEffectiveTime(100, strictMonotonicity = true, 350) - assertEffectiveTime(250, strictMonotonicity = true, 500) - - // after 250, we should get the new delay - assertEffectiveTime(251, strictMonotonicity = true, 1251) - assertEffectiveTime(260, strictMonotonicity = true, 1260) - assertEffectiveTime(350, strictMonotonicity = true, 1350) - assertEffectiveTime(500, strictMonotonicity = true, 1500) - } - - "correctly compute effective times when the topologyChangeDelay decreases" in { f => - import f.* - - // initialize delay - commitChangeDelay(-1, -1, 250) - - // increase delay - assertEffectiveTime(0, strictMonotonicity = true, 250) - commitChangeDelay(0, 250, 100) - - // until 250, we should get the old delay - assertEffectiveTime(1, strictMonotonicity = false, 251) - assertEffectiveTime(100, strictMonotonicity = false, 350) - assertEffectiveTime(250, strictMonotonicity = false, 500) - - // after 250, we should get the new delay, but with corrections to guarantee monotonicity - assertEffectiveTime(251, strictMonotonicity = false, 500) - assertEffectiveTime(252, strictMonotonicity = false, 500) - assertEffectiveTime(253, strictMonotonicity = true, 501) - assertEffectiveTime(254, strictMonotonicity = false, 501) - assertEffectiveTime(300, strictMonotonicity = false, 501) - assertEffectiveTime(300, strictMonotonicity = true, 502) - - // after 402, we should get the new delay without corrections - assertEffectiveTime(403, strictMonotonicity = true, 503) - assertEffectiveTime(404, strictMonotonicity = false, 504) - assertEffectiveTime(410, strictMonotonicity = true, 510) - assertEffectiveTime(500, strictMonotonicity = false, 600) - assertEffectiveTime(600, strictMonotonicity = true, 700) - } - - "initialization should load upcoming epsilon changes" in { f => - import f.* - - // Commit a series of changes and check effective times. - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 110) // delay2 - assertEffectiveTime(100, strictMonotonicity = false, 200) - assertEffectiveTime(111, strictMonotonicity = true, 221) - storeRejection(111, 221) - assertEffectiveTime(120, strictMonotonicity = true, 230) - commitChangeDelay(120, 230, 120) // delay3 - assertEffectiveTime(231, strictMonotonicity = false, 351) - - // Now re-initialize tracker and check if up-coming changes are loaded from store - reInit() - // This will initialize the tracker to sequencedTime = 100, i.e. delay1 is effective, delay2 is upcoming, and delay3 not yet processed. - // delay1 should be loaded from the store, as it is effective - assertEffectiveTime(100, strictMonotonicity = false, 200) - // delay2 should be loaded from the store, as it has been upcoming during initialization - assertEffectiveTime(111, strictMonotonicity = true, 221) - storeRejection(111, 221) - assertEffectiveTime(120, strictMonotonicity = true, 230) - // delay3 needs to be replayed as its sequencing time is after the init time of 100 - commitChangeDelay(120, 230, 120) - assertEffectiveTime(231, strictMonotonicity = false, 351) - } - - "initialization should load upcoming transactions (including rejections)" in { f => - import f.* - - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // set initial delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 50) // decrease delay to delay2 - - // delay1 is still effective - assertEffectiveTime(110, strictMonotonicity = true, 210) - storeRejection(110, 210) - - // delay2 is now effective, but the effective time is corrected - assertEffectiveTime(111, strictMonotonicity = true, 211) - storeRejection(111, 211) - assertEffectiveTime(120, strictMonotonicity = true, 212) - storeRejection(120, 212) - assertEffectiveTime(130, strictMonotonicity = false, 212) - // delay2 is now effective without any correction - assertEffectiveTime(164, strictMonotonicity = true, 214) - storeRejection(164, 214) - - // Now re-initialize and check if previous transactions are reloaded - reInit() - // delay2 is already effective, but the effective time is corrected - assertEffectiveTime(130, strictMonotonicity = false, 212) - // delay2 is now effective without any correction - assertEffectiveTime(164, strictMonotonicity = true, 214) - storeRejection(164, 214) - } - - "initialization should load expired synchronizerParametersChanges" in { f => - import f.* - - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // set initial delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 50) // decrease delay to delay2 - - // delay1 is still effective - assertEffectiveTime(110, strictMonotonicity = false, 210) - // delay2 is now effective, but the effective time is corrected - assertEffectiveTime(120, strictMonotonicity = false, 210) - // delay2 is now effective without any correction - assertEffectiveTime(162, strictMonotonicity = false, 212) - - // Now re-initialize and check if the expiry of delay1 is reloaded - reInit() - assertEffectiveTime(120, strictMonotonicity = false, 210) - // delay2 is now effective without any correction - assertEffectiveTime(162, strictMonotonicity = false, 212) - } - } - -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala index a87aed0f98..aae0f519ac 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.DefaultTestIdentities.participant2 import com.digitalasset.canton.topology.store.* import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.Authorization.{ MultiTransactionHashMismatch, NoDelegationFoundForKeys, NotAuthorized, @@ -157,7 +157,7 @@ abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHas Seq( None, Some { - case TopologyTransactionRejection.SignatureCheckFailed(_) => true + case TopologyTransactionRejection.Authorization.SignatureCheckFailed(_) => true case _ => false }, ), @@ -181,7 +181,7 @@ abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHas validatedTopologyTransactions, Seq( Some { - case TopologyTransactionRejection.SignatureCheckFailed( + case TopologyTransactionRejection.Authorization.SignatureCheckFailed( UnsupportedKeySpec( Factory.SigningKeys.key1_unsupportedSpec.keySpec, defaultStaticSynchronizerParameters.requiredSigningSpecs.keys, @@ -367,7 +367,7 @@ abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHas Seq( None, Some { - case TopologyTransactionRejection.InvalidSynchronizer(_) => true + case TopologyTransactionRejection.Authorization.InvalidSynchronizer(_) => true case _ => false }, ), @@ -687,7 +687,7 @@ abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHas res, Seq( Some { - case TopologyTransactionRejection.SignatureCheckFailed( + case TopologyTransactionRejection.Authorization.SignatureCheckFailed( InvalidSignature(`sig_k1_emptySignature`, _, _) ) => true @@ -1335,7 +1335,7 @@ abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHas ) resultOnlySuperfluousSignatures.loneElement.rejectionReason shouldBe Some( - TopologyTransactionRejection.NoDelegationFoundForKeys(Set(key3.id, key5.id)) + TopologyTransactionRejection.Authorization.NoDelegationFoundForKeys(Set(key3.id, key5.id)) ) } } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala index b622f793d3..490920648b 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala @@ -10,15 +10,7 @@ import com.digitalasset.canton.crypto.{SigningPublicKey, SynchronizerCryptoPureA import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.protocol.messages.TopologyTransactionsBroadcast -import com.digitalasset.canton.sequencing.SubscriptionStart.FreshSubscription -import com.digitalasset.canton.sequencing.protocol.{ - AllMembersOfSynchronizer, - OpenEnvelope, - Recipients, -} import com.digitalasset.canton.store.db.{DbTest, PostgresTest} -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SynchronizerTimeTracker} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore @@ -29,7 +21,7 @@ import com.digitalasset.canton.topology.store.{ } import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.{FailOnShutdown, SequencerCounter} abstract class TopologyTransactionProcessorTest @@ -604,164 +596,6 @@ abstract class TopologyTransactionProcessorTest .value .value shouldBe ts(3) } - - /** this test checks that only fully authorized synchronizer parameter changes are used to - * update the topology change delay for adjusting the effective time - * - * 1. initialize the topology store with a decentralized namespace with 2 owners and - * default synchronizer parameters (topologyChangeDelay=250ms) - * 1. process a proposal to update the topology change delay - * 1. process the fully authorized update to the topology change delay - * 1. process some other topology change delay - * - * only in step 4. should the updated topology change delay be used to compute the effective - * time - */ - "only track fully authorized synchronizer parameter state changes" in { - import SigningKeys.{ec as _, *} - val dnsNamespace = - DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns2)) - val synchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("test-synchronizer", dnsNamespace)).toPhysical - - val dns = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnsNamespace, - PositiveInt.two, - NonEmpty(Set, ns1, ns2), - ) - .value, - signingKeys = NonEmpty(Set, key1, key2), - ) - val initialSynchronizerParameters = mkAddMultiKey( - SynchronizerParametersState( - synchronizerId.logical, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ), - signingKeys = NonEmpty(Set, key1, key2), - ) - - val initialTopologyChangeDelay = - initialSynchronizerParameters.mapping.parameters.topologyChangeDelay.duration - val updatedTopologyChangeDelay = initialTopologyChangeDelay.plusMillis(50) - - val updatedSynchronizerParams = SynchronizerParametersState( - synchronizerId.logical, - DynamicSynchronizerParameters.initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryCreate(updatedTopologyChangeDelay), - testedProtocolVersion, - ), - ) - val synchronizerParameters_k1 = mkAdd( - updatedSynchronizerParams, - signingKey = key1, - serial = PositiveInt.two, - isProposal = true, - ) - val synchronizerParameters_k2 = mkAdd( - updatedSynchronizerParams, - signingKey = key2, - serial = PositiveInt.two, - isProposal = true, - ) - - val initialTopologyState = List(ns1k1_k1, ns2k2_k2, dns, initialSynchronizerParameters) - .map(ValidatedTopologyTransaction(_)) - - def mkEnvelope(transaction: GenericSignedTopologyTransaction) = - Traced( - List( - OpenEnvelope( - TopologyTransactionsBroadcast( - synchronizerId, - List(transaction), - ), - recipients = Recipients.cc(AllMembersOfSynchronizer), - )(testedProtocolVersion) - ) - ) - - // in block1 we propose a new topology change delay. the transaction itself will be - // stored with the default topology change delay of 250ms and should NOT trigger a change - // in topology change delay, because it's only a proposal - val block1 = mkEnvelope(synchronizerParameters_k1) - // in block2 we fully authorize the update to synchronizer parameters - val block2 = mkEnvelope(synchronizerParameters_k2) - // in block3 we should see the new topology change delay being used to compute the effective time - val block3 = mkEnvelope(ns3k3_k3) - - val store = mkStore(synchronizerId) - - store - .update( - sequenced = SequencedTime(CantonTimestamp.MinValue.immediateSuccessor), - effective = EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = initialTopologyState, - ) - .futureValueUS - - val (proc, _) = mk(store) - - val synchronizerTimeTrackerMock = mock[SynchronizerTimeTracker] - when(synchronizerTimeTrackerMock.awaitTick(any[CantonTimestamp])(anyTraceContext)) - .thenAnswer(None) - - proc.subscriptionStartsAt(FreshSubscription, synchronizerTimeTrackerMock).futureValueUS - - // ================== - // process the blocks - - // block1: first proposal to update topology change delay - // use proc.processEnvelopes directly so that the effective time is properly computed from topology change delays - proc - .processEnvelopes(SequencerCounter(0), SequencedTime(ts(0)), None, block1) - .flatMap(_.unwrap) - .futureValueUS - - // block2: second proposal to update the topology change delay, making it fully authorized - proc - .processEnvelopes(SequencerCounter(1), SequencedTime(ts(1)), None, block2) - .flatMap(_.unwrap) - .futureValueUS - - // block3: any topology transaction is now processed with the updated topology change delay - proc - .processEnvelopes(SequencerCounter(2), SequencedTime(ts(2)), None, block3) - .flatMap(_.unwrap) - .futureValueUS - - // ======================================== - // check the applied topology change delays - - // 1. fetch the proposal from block1 at a time when it has become effective - val storedSynchronizerParametersProposal = - fetchTx(store, ts(0).plusSeconds(1), isProposal = true) - .collectOfMapping[SynchronizerParametersState] - .result - .loneElement - // the proposal itself should be processed with the default topology change delay - storedSynchronizerParametersProposal.validFrom.value - storedSynchronizerParametersProposal.sequenced.value shouldBe initialTopologyChangeDelay - - // 2. fetch the latest fully authorized synchronizer parameters transaction from block2 at a time when it has become effective - val storedSynchronizerParametersUpdate = fetchTx(store, ts(1).plusSeconds(1)) - .collectOfMapping[SynchronizerParametersState] - .result - .loneElement - // the transaction to change the topology change delay itself should still be processed with the default topology change delay - storedSynchronizerParametersUpdate.validFrom.value - storedSynchronizerParametersUpdate.sequenced.value shouldBe initialTopologyChangeDelay - - // 3. fetch the topology transaction from block3 at a time when it has become effective - val storedNSD3 = fetchTx(store, ts(2).plusSeconds(1)) - .collectOfMapping[NamespaceDelegation] - .filter(_.mapping.namespace == ns3) - .result - .loneElement - // the transaction should be processed with the updated topology change delay - storedNSD3.validFrom.value - storedNSD3.sequenced.value shouldBe updatedTopologyChangeDelay - } } } } diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala index 3908552320..bb2d302872 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.topology.store import cats.syntax.option.* -import com.digitalasset.canton.FailOnShutdown import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -13,12 +12,16 @@ import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.{FailOnShutdown, HasActorSystem} +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.{Sink, Source} import org.scalatest.wordspec.AsyncWordSpec trait DownloadTopologyStateForInitializationServiceTest extends AsyncWordSpec with TopologyStoreTestBase - with FailOnShutdown { + with FailOnShutdown + with HasActorSystem { protected def mkStore( synchronizerId: PhysicalSynchronizerId @@ -91,6 +94,9 @@ trait DownloadTopologyStateForInitializationServiceTest .map(_ => store) } + private def toFuture[A](source: Source[A, NotUsed]): FutureUnlessShutdown[Seq[A]] = + FutureUnlessShutdown.outcomeF(source.runWith(Sink.seq[A])) + "DownloadTopologyStateForInitializationService" should { "return a valid topology state" when { "there's only one SynchronizerTrustCertificate" in { @@ -101,9 +107,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = None, loggerFactory, ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) + result <- toFuture(service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId)) } yield { - import storedTxs.result // all transactions should be valid and not expired result.foreach(_.validUntil shouldBe empty) result.map(_.transaction) shouldBe Seq(dnd_p1seq, ptp_fred_p1, dtc_p2_synchronizer1) @@ -117,9 +122,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = None, loggerFactory, ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) + result <- toFuture(service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId)) } yield { - import storedTxs.result // all transactions should be valid and not expired result.foreach(_.validUntil shouldBe empty) result.map(_.transaction) shouldBe Seq(dnd_p1seq, ptp_fred_p1, dtc_p2_synchronizer1) @@ -135,9 +139,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = Some(ts6), loggerFactory, ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) + result <- toFuture(service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId)) } yield { - import storedTxs.result // all transactions should have a validUntil <= ts6 forAll(result)(_.validUntil.map(_.value) should be <= Option(ts6)) result shouldBe bootstrapTransactionsWithUpdates @@ -155,9 +158,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = None, loggerFactory, ) - storedTxs <- service.initialSnapshot(med1Id) + result <- toFuture(service.initialSnapshot(med1Id)) } yield { - import storedTxs.result // all transactions should be valid and not expired result.foreach(_.validUntil shouldBe empty) result.map(_.transaction) shouldBe Seq( @@ -177,9 +179,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = None, loggerFactory, ) - storedTxs <- service.initialSnapshot(med1Id) + result <- toFuture(service.initialSnapshot(med1Id)) } yield { - import storedTxs.result // all transactions should be valid and not validUntil capped at ts6 result.foreach(_.validUntil.foreach(_.value should be <= ts6)) result.map(_.transaction) shouldBe Seq( @@ -225,9 +226,8 @@ trait DownloadTopologyStateForInitializationServiceTest sequencingTimeLowerBoundExclusive = None, loggerFactory, ) - storedTxs <- service.initialSnapshot(p2Id) + result <- toFuture(service.initialSnapshot(p2Id)) } yield { - import storedTxs.result // all transactions should be valid and not expired result.foreach(_.validUntil.foreach(_.value should be < ts6)) result diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala index 70f2357ed7..1b19420d6e 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala @@ -5,10 +5,10 @@ package com.digitalasset.canton.topology.store import cats.syntax.option.* import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.FailOnShutdown import com.digitalasset.canton.config.CantonRequireTypes.{String255, String300} import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.topology.processing.{ EffectiveTime, InitialTopologySnapshotValidator, @@ -16,7 +16,6 @@ import com.digitalasset.canton.topology.processing.{ } import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions import com.digitalasset.canton.topology.store.TopologyStore.EffectiveStateChange -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.topology.transaction.TopologyMapping.Code import com.digitalasset.canton.topology.transaction.{TopologyMapping, *} @@ -28,10 +27,16 @@ import com.digitalasset.canton.topology.{ } import com.digitalasset.canton.util.MonadUtil import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{FailOnShutdown, HasActorSystem} +import org.apache.pekko.stream.scaladsl.Sink import org.scalatest.Assertion import org.scalatest.wordspec.AsyncWordSpec -trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with FailOnShutdown { +trait TopologyStoreTest + extends AsyncWordSpec + with TopologyStoreTestBase + with FailOnShutdown + with HasActorSystem { val testData = new TopologyStoreTestData(testedProtocolVersion, loggerFactory, executionContext) import testData.* @@ -196,7 +201,8 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa ts5 -> (ptp_fred_p1, None, None), ts5 -> (dtc_p2_synchronizer1, ts6.some, None), ts6 -> (dtc_p2_synchronizer1_update, None, None), - ts6 -> (mds_med1_synchronizer1_invalid, ts6.some, s"No delegation found for keys ${seqKey.fingerprint}".some), + ts6 -> (mds_med1_synchronizer1_invalid, ts6.some, + s"No delegation found for keys ${seqKey.fingerprint}".some), ).map { case (from, (tx, until, rejection)) => StoredTopologyTransaction( SequencedTime(from), @@ -468,12 +474,11 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa "able to inspect" in { val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - for { _ <- new InitialTopologySnapshotValidator( pureCrypto = testData.factory.syncCryptoClient.crypto.pureCrypto, store = store, - timeouts = timeouts, + validateInitialSnapshot = true, loggerFactory = loggerFactory, ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) .valueOrFail("topology bootstrap") @@ -574,11 +579,11 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa allParties shouldBe Set( dtc_p1_synchronizer1.mapping.participantId.adminParty, ptp_fred_p1.mapping.partyId, - dtc_p2_synchronizer1.mapping.participantId.adminParty, + // p2 cannot appear here as OTKP2 is only a proposal ) onlyFred shouldBe Set(ptp_fred_p1.mapping.partyId) fredFullySpecified shouldBe Set(ptp_fred_p1.mapping.partyId) - onlyParticipant2 shouldBe Set(dtc_p2_synchronizer1.mapping.participantId.adminParty) + onlyParticipant2 shouldBe Set() // p2 cannot appear as OTKP2 is only a proposal neitherParty shouldBe Set.empty } } @@ -613,7 +618,7 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa _ <- new InitialTopologySnapshotValidator( factory.syncCryptoClient.crypto.pureCrypto, store, - timeouts, + validateInitialSnapshot = true, loggerFactory, ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) .valueOrFail("topology bootstrap") @@ -634,13 +639,17 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa _ <- update(store, ts5, add = Seq(dtc_p2_synchronizer1)) _ <- update(store, ts6, add = Seq(mds_med1_synchronizer1)) - transactionsAtTs6 <- store.findEssentialStateAtSequencedTime( - asOfInclusive = SequencedTime(ts6), - includeRejected = true, + transactionsAtTs6 <- FutureUnlessShutdown.outcomeF( + store + .findEssentialStateAtSequencedTime( + asOfInclusive = SequencedTime(ts6), + includeRejected = true, + ) + .runWith(Sink.seq) ) } yield { expectTransactions( - transactionsAtTs6, + StoredTopologyTransactions(transactionsAtTs6), Seq( otk_p1, dtc_p2_synchronizer1, @@ -657,7 +666,7 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa _ <- new InitialTopologySnapshotValidator( factory.syncCryptoClient.crypto.pureCrypto, store, - timeouts, + validateInitialSnapshot = true, loggerFactory, ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) .valueOrFail("topology bootstrap") @@ -700,14 +709,22 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa ), ) - essentialStateTransactions <- store.findEssentialStateAtSequencedTime( - SequencedTime(ts6), - includeRejected = false, + essentialStateTransactions <- FutureUnlessShutdown.outcomeF( + store + .findEssentialStateAtSequencedTime( + SequencedTime(ts6), + includeRejected = false, + ) + .runWith(Sink.seq) ) - essentialStateTransactionsWithRejections <- store.findEssentialStateAtSequencedTime( - SequencedTime(ts6), - includeRejected = true, + essentialStateTransactionsWithRejections <- FutureUnlessShutdown.outcomeF( + store + .findEssentialStateAtSequencedTime( + SequencedTime(ts6), + includeRejected = true, + ) + .runWith(Sink.seq) ) upcomingTransactions <- store.findUpcomingEffectiveChanges(asOfInclusive = ts4) @@ -767,7 +784,7 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa // Essential state currently encompasses all transactions at the specified time expectTransactions( - essentialStateTransactions, + StoredTopologyTransactions(essentialStateTransactions), bootstrapTransactions.result .filter(tx => tx.validFrom.value <= ts6 && tx.rejectionReason.isEmpty) .map(_.transaction), @@ -775,7 +792,7 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa // Essential state with rejection currently encompasses all transactions at the specified time expectTransactions( - essentialStateTransactionsWithRejections, + StoredTopologyTransactions(essentialStateTransactionsWithRejections), bootstrapTransactions.result.map(_.transaction), ) @@ -842,7 +859,11 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa additions = Seq( ValidatedTopologyTransaction( bad_otk, - Some(TopologyTransactionRejection.InvalidTopologyMapping("bad signature")), + Some( + TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping( + "bad signature" + ) + ), ), ValidatedTopologyTransaction(good_otk), ), @@ -952,7 +973,8 @@ trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with Fa ), ValidatedTopologyTransaction( transaction = rejectedPartyToParticipant, - rejectionReason = Some(InvalidTopologyMapping("sad")), + rejectionReason = + Some(TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping("sad")), ), ValidatedTopologyTransaction( transaction = proposedPartyToParticipant, diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala index 131448409c..3f440c5d22 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala @@ -150,7 +150,6 @@ class TopologyStoreTestData( synchronizer1_p1p2_synchronizerId, DynamicSynchronizerParameters .initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.Zero, protocolVersion = testedProtocolVersion, mediatorReactionTimeout = NonNegativeFiniteDuration.Zero, ), @@ -163,8 +162,7 @@ class TopologyStoreTestData( synchronizer1_p1p2_synchronizerId, DynamicSynchronizerParameters .initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.Zero, - protocolVersion = testedProtocolVersion, + protocolVersion = testedProtocolVersion ), ) )(dnd_p1p2_keys*) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala index 02d5430266..49c875b57c 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala @@ -69,7 +69,7 @@ trait DbTopologyStoreTest extends TopologyStoreTest with DbTopologyStoreHelper { _ <- new InitialTopologySnapshotValidator( testData.factory.syncCryptoClient.crypto.pureCrypto, store, - timeouts, + validateInitialSnapshot = true, loggerFactory, ).validateAndApplyInitialTopologySnapshot(topologySnapshot) .valueOrFail("topology bootstrap") diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/OptionalTopologyMappingChecksTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/OptionalTopologyMappingChecksTest.scala new file mode 100644 index 0000000000..783d1ff5b0 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/OptionalTopologyMappingChecksTest.scala @@ -0,0 +1,155 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore +import com.digitalasset.canton.topology.store.{ + TopologyStore, + TopologyTransactionRejection, + ValidatedTopologyTransaction, +} +import com.digitalasset.canton.topology.transaction.checks.OptionalTopologyMappingChecks +import com.digitalasset.canton.topology.{DefaultTestIdentities, MediatorId, SequencerId} + +import scala.annotation.nowarn + +@nowarn("msg=match may not be exhaustive") +class OptionalTopologyMappingChecksTest + extends BaseTopologyMappingChecksTest[OptionalTopologyMappingChecks] { + + override protected def mkChecks( + store: TopologyStore[SynchronizerStore] + ): OptionalTopologyMappingChecks = + new OptionalTopologyMappingChecks( + store, + loggerFactory, + ) + + "OptionalTopologyMappingChecks" when { + import DefaultTestIdentities.{synchronizerId, participant1, party1} + import factory.TestingTransactions.* + + "validating SynchronizerTrustCertificate" should { + "reject a removal when the participant still hosts a party" in { + val (checks, store) = mk() + val ptp = factory.mkAdd( + PartyToParticipant.tryCreate( + party1, + PositiveInt.one, + Seq(participant1 -> ParticipantPermission.Submission), + ) + ) + addToStore( + store, + ptp, + ) + val prior = factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) + + val dtc = + factory.mkRemove(SynchronizerTrustCertificate(participant1, synchronizerId)) + checkTransaction(checks, dtc, Some(prior)) shouldBe Left( + TopologyTransactionRejection.OptionalMapping + .ParticipantStillHostsParties(participant1, Seq(party1)) + ) + } + } + "validating MediatorSynchronizerState" should { + + "report MembersCannotRejoinSynchronizer for mediators that are being re-onboarded" in { + val (checks, store) = mk() + val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) + + val Seq(group0, group1) = mkMediatorGroups( + PositiveInt.one, + NonNegativeInt.zero -> Seq(med1, med3), + NonNegativeInt.one -> Seq(med2, med3), + ) + + addToStore(store, (transactions :+ group0 :+ group1)*) + + val Seq(group0RemoveMed1, group1RemoveMed2) = mkMediatorGroups( + PositiveInt.two, + NonNegativeInt.zero -> Seq(med3), + NonNegativeInt.one -> Seq(med3), + ) + + store + .update( + SequencedTime(ts1), + EffectiveTime(ts1), + removeMapping = Map( + group0.mapping.uniqueKey -> PositiveInt.one, + group1.mapping.uniqueKey -> PositiveInt.one, + ), + removeTxs = Set.empty, + additions = Seq( + ValidatedTopologyTransaction(group0RemoveMed1), + ValidatedTopologyTransaction(group1RemoveMed2), + ), + ) + .futureValueUS + + val Seq(med1RejoinsGroup0, med2RejoinsGroup0) = mkMediatorGroups( + PositiveInt.three, + // try joining the same group + NonNegativeInt.zero -> Seq(med1, med3), + // try joining another group + NonNegativeInt.zero -> Seq(med2, med3), + ) + + checkTransaction(checks, med1RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( + TopologyTransactionRejection.OptionalMapping.MembersCannotRejoinSynchronizer(Seq(med1)) + ) + + checkTransaction(checks, med2RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( + TopologyTransactionRejection.OptionalMapping.MembersCannotRejoinSynchronizer(Seq(med2)) + ) + } + + } + + "validating SequencerSynchronizerState" should { + + "report MembersCannotRejoinSynchronizer for sequencers that are being re-onboarded" in { + val (checks, store) = mk() + val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) + + val sds_S1_S2 = makeSynchronizerState( + PositiveInt.one, + seq1, + seq2, + ) + + addToStore(store, (transactions :+ sds_S1_S2)*) + + val sds_S1 = makeSynchronizerState(PositiveInt.two, seq1) + + store + .update( + SequencedTime(ts1), + EffectiveTime(ts1), + removeMapping = Map( + sds_S1.mapping.uniqueKey -> PositiveInt.one + ), + removeTxs = Set.empty, + additions = Seq( + ValidatedTopologyTransaction(sds_S1) + ), + ) + .futureValueUS + + val sds_S1_rejoining_S2 = makeSynchronizerState(PositiveInt.three, seq1, seq2) + + checkTransaction(checks, sds_S1_rejoining_S2, Some(sds_S1)) shouldBe Left( + TopologyTransactionRejection.OptionalMapping.MembersCannotRejoinSynchronizer(Seq(seq2)) + ) + } + + } + + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/RequiredTopologyMappingChecksTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/RequiredTopologyMappingChecksTest.scala new file mode 100644 index 0000000000..f44fc311ba --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/RequiredTopologyMappingChecksTest.scala @@ -0,0 +1,1150 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.instances.order.* +import cats.syntax.either.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} +import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, OnboardingRestriction} +import com.digitalasset.canton.time.PositiveSeconds +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.DefaultTestIdentities.{ + mediatorId, + sequencerId, + synchronizerId, +} +import com.digitalasset.canton.topology.TopologyStateProcessor.MaybePending +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore +import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore +import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ + CanSignAllButNamespaceDelegations, + CanSignAllMappings, +} +import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ + Confirmation, + Observation, + Submission, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace +import com.digitalasset.canton.topology.transaction.TopologyMapping.Code +import com.digitalasset.canton.topology.transaction.checks.TopologyMappingChecks.PendingChangesLookup +import com.digitalasset.canton.topology.transaction.checks.{ + RequiredTopologyMappingChecks, + TopologyMappingChecks, +} +import com.digitalasset.canton.{ + BaseTest, + FailOnShutdown, + HasExecutionContext, + ProtocolVersionChecksAnyWordSpec, +} +import org.scalatest.wordspec.AnyWordSpec + +import java.time +import scala.annotation.nowarn +import scala.language.implicitConversions + +private[transaction] abstract class BaseTopologyMappingChecksTest[T <: TopologyMappingChecks] + extends AnyWordSpec + with BaseTest + with HasExecutionContext + with ProtocolVersionChecksAnyWordSpec + with FailOnShutdown { + + protected def mkChecks(store: TopologyStore[SynchronizerStore]): T + + protected lazy val factory = new TestingOwnerWithKeys( + DefaultTestIdentities.mediatorId, + loggerFactory, + initEc = parallelExecutionContext, + ) + + protected def mk() = { + val store = + new InMemoryTopologyStore( + SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), + testedProtocolVersion, + loggerFactory, + timeouts, + ) + val check = mkChecks(store) + (check, store) + } + protected def addToStore( + store: TopologyStore[SynchronizerStore], + transactions: GenericSignedTopologyTransaction* + ): Unit = + store + .update( + sequenced = SequencedTime.MinValue, + effective = EffectiveTime.MinValue, + removeMapping = Map.empty, + removeTxs = Set.empty, + additions = transactions.map(ValidatedTopologyTransaction(_)), + ) + .futureValueUS + + protected def generateMemberIdentities[M <: Member]( + numMembers: Int, + uidToMember: UniqueIdentifier => M, + ): (Seq[M], Seq[GenericSignedTopologyTransaction]) = { + val allKeys = { + import factory.SigningKeys.* + Seq(key1, key2, key3, key4, key5, key6) + } + val (memberIds, identityTransactions) = (1 to numMembers).map { idx => + val key = allKeys(idx) + val member = + uidToMember(UniqueIdentifier.tryCreate(s"member$idx", Namespace(key.fingerprint))) + member -> List( + factory.mkAdd( + NamespaceDelegation.tryCreate(member.namespace, key, CanSignAllMappings), + key, + ), + factory.mkAdd(OwnerToKeyMapping.tryCreate(member, NonEmpty(Seq, key)), key), + ) + }.unzip + + memberIds -> identityTransactions.flatten + } + + protected def mkMediatorGroups( + serial: PositiveInt, + groupSetup: (NonNegativeInt, Seq[MediatorId])* + ): Seq[SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState]] = + groupSetup.map { case (group, mediators) => + factory.mkAdd( + MediatorSynchronizerState + .create( + synchronizerId, + group, + PositiveInt.one, + active = mediators, + Seq.empty, + ) + .value, + // the signing key is not relevant for the test + signingKey = factory.SigningKeys.key1, + serial = serial, + ) + } + + protected def makeSynchronizerState( + serial: PositiveInt, + sequencers: SequencerId* + ): SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] = + factory.mkAdd( + SequencerSynchronizerState + .create( + synchronizerId, + PositiveInt.one, + active = sequencers, + Seq.empty, + ) + .value, + // the signing key is not relevant for the test + signingKey = factory.SigningKeys.key1, + serial = serial, + ) + def checkTransaction( + checks: TopologyMappingChecks, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction] = None, + pendingChanges: PendingChangesLookup = Map.empty, + ): Either[TopologyTransactionRejection, Unit] = + checks + .checkTransaction(EffectiveTime.MaxValue, toValidate, inStore, pendingChanges) + .value + .futureValueUS + + implicit def toHostingParticipant( + participantToPermission: (ParticipantId, ParticipantPermission) + ): HostingParticipant = + HostingParticipant(participantToPermission._1, participantToPermission._2) + +} + +@nowarn("msg=match may not be exhaustive") +class RequiredTopologyMappingChecksTest + extends BaseTopologyMappingChecksTest[RequiredTopologyMappingChecks] { + + override protected def mkChecks( + store: TopologyStore[SynchronizerStore] + ): RequiredTopologyMappingChecks = + new RequiredTopologyMappingChecks( + store, + loggerFactory, + ) + + "RequiredTopologyMappingChecks" when { + import DefaultTestIdentities.{synchronizerId, participant1, participant2, participant3, party1} + import factory.TestingTransactions.* + + def checkTransaction( + checks: TopologyMappingChecks, + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction] = None, + pendingChanges: PendingChangesLookup = Map.empty, + ): Either[TopologyTransactionRejection, Unit] = + checks + .checkTransaction(EffectiveTime.MaxValue, toValidate, inStore, pendingChanges) + .value + .futureValueUS + + implicit def toHostingParticipant( + participantToPermission: (ParticipantId, ParticipantPermission) + ): HostingParticipant = + HostingParticipant(participantToPermission._1, participantToPermission._2) + + "validating any Mapping" should { + "reject removal of non-existent mappings" in { + import factory.SigningKeys.key1 + val (checks, _) = mk() + + val removeNsdSerial1 = factory.mkRemove( + NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), + serial = PositiveInt.one, + ) + // also check that for serial > 1 + val removeNsdSerial3 = factory.mkRemove( + NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), + serial = PositiveInt.three, + ) + checkTransaction(checks, removeNsdSerial1) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.NoCorrespondingActiveTxToRevoke( + removeNsdSerial1.mapping + ) + ) + checkTransaction(checks, removeNsdSerial3) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.NoCorrespondingActiveTxToRevoke( + removeNsdSerial3.mapping + ) + ) + } + + "reject only REPLACE transactions with the highest possible serial" in { + import factory.SigningKeys.key1 + val (checks, _) = mk() + + val maxSerialReplace = factory.mkAdd( + NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), + serial = PositiveInt.MaxValue, + ) + checkTransaction(checks, maxSerialReplace) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping( + s"The serial for a REPLACE must be less than ${PositiveInt.MaxValue}." + ) + ) + + val maxSerialMinsOneReplace = factory.mkAdd( + NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), + serial = PositiveInt.tryCreate(PositiveInt.MaxValue.value - 1), + ) + val maxSerialRemove = factory.mkRemove( + NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), + serial = PositiveInt.MaxValue, + ) + + checkTransaction(checks, toValidate = maxSerialMinsOneReplace) shouldBe Right(()) + checkTransaction( + checks, + toValidate = maxSerialRemove, + inStore = Some(maxSerialMinsOneReplace), + ) shouldBe Right(()) + } + + "reject if removal also changes the content" in { + import factory.SigningKeys.{key1, key2} + val (checks, _) = mk() + + val removeNs1k2 = factory.mkRemove( + NamespaceDelegation + .tryCreate( + Namespace(key1.fingerprint), + key2, + // changing the mapping compared to ns1k2 by setting CanSignAllMappings + CanSignAllMappings, + ), + serial = PositiveInt.two, + ) + checkTransaction(checks, removeNs1k2, Some(ns1k2)) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.RemoveMustNotChangeMapping( + removeNs1k2.mapping, + ns1k2.mapping, + ) + ) + } + + "respect pending changes when loading additional data for validations" in { + import factory.SigningKeys.{key1, key2, key3} + val (checks, store) = mk() + val ns1 = Namespace(key1.fingerprint) + val ns2 = Namespace(key2.fingerprint) + val ns3 = Namespace(key3.fingerprint) + + val nsd1Replace_1 = + factory.mkAdd(NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings)) + val nsd1Remove_2 = factory.mkRemove( + NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), + serial = PositiveInt.two, + ) + val nsd1ReplaceProposal_3 = factory.mkAdd( + NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), + serial = PositiveInt.three, + isProposal = true, + ) + + val nsd2Replace_1 = + factory.mkAdd(NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings)) + val nsd2Remove_2 = factory.mkRemove( + NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings), + serial = PositiveInt.two, + ) + + val nsd3Replace_1 = + factory.mkAdd(NamespaceDelegation.tryCreate(ns3, key3, CanSignAllMappings)) + + store + .update( + SequencedTime(ts), + EffectiveTime(ts), + removeMapping = Map.empty, + removeTxs = Set.empty, + additions = Seq(nsd1Replace_1, nsd2Replace_1).map(ValidatedTopologyTransaction(_)), + ) + .futureValueUS + + store + .update( + SequencedTime(ts + seconds(1)), + EffectiveTime(ts + seconds(1)), + removeMapping = Map(nsd1Remove_2.mapping.uniqueKey -> nsd1Remove_2.serial), + removeTxs = Set.empty, + additions = Seq(ValidatedTopologyTransaction(nsd1Remove_2)), + ) + .futureValueUS + + store + .update( + SequencedTime(ts + seconds(2)), + EffectiveTime(ts + seconds(2)), + removeMapping = Map.empty, + removeTxs = Set.empty, + additions = Seq(ValidatedTopologyTransaction(nsd1ReplaceProposal_3)), + ) + .futureValueUS + + /* + * The store contains the following transactions: + * TS0: Replace NSD1, Replace NSD2 + * TS1: Remove NSD1 + * TS2: Replace Proposal NSD1 + */ + + // TS0: load without pending changes + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor), + codes = Set(Code.NamespaceDelegation), + pendingChanges = Seq.empty, + ) + .futureValueUS + .value should contain theSameElementsAs Seq(nsd1Replace_1, nsd2Replace_1) + + // TS0: load with Removal NS2 as pending change + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor), + codes = Set(Code.NamespaceDelegation), + pendingChanges = Seq(MaybePending(nsd2Remove_2)), + ) + .futureValueUS + .value shouldBe Seq(nsd1Replace_1) + + // TS0: load with Replace NS3 as pending change without prior transactions in the store + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor), + codes = Set(Code.NamespaceDelegation), + pendingChanges = Seq(MaybePending(nsd3Replace_1)), + ) + .futureValueUS + .value should contain theSameElementsAs Seq( + nsd1Replace_1, + nsd2Replace_1, + nsd3Replace_1, + ) + + // TS0: load with Replace NS3 as pending change without prior transactions in the store and also matching a + // namespace filter + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor), + codes = Set(Code.NamespaceDelegation), + pendingChanges = Seq(MaybePending(nsd3Replace_1)), + filterNamespace = Some(NonEmpty(Seq, ns2, ns3)), + ) + .futureValueUS + .value should contain theSameElementsAs Seq(nsd2Replace_1, nsd3Replace_1) + + // TS1: don't load Remove NS1 from the store + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor + seconds(1)), + codes = Set(Code.NamespaceDelegation), + Seq.empty, + ) + .futureValueUS + .value shouldBe Seq(nsd2Replace_1) + + // TS1: don't load Remove NS1 from the store mixed with Remove NS2 as pending change + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor + seconds(1)), + codes = Set(Code.NamespaceDelegation), + Seq(MaybePending(nsd2Remove_2)), + ) + .futureValueUS + .value shouldBe Seq.empty + + // TS2: don't load proposals + checks + .loadFromStore( + EffectiveTime(ts.immediateSuccessor + seconds(2)), + codes = Set(Code.NamespaceDelegation), + Seq.empty, + ) + .futureValueUS + .value shouldBe Seq(nsd2Replace_1) + + } + } + + "validating DecentralizedNamespaceDefinition" should { + "reject namespaces not derived from their owners' namespaces" in { + val (checks, store) = mk() + val (keys, namespaces, rootCerts) = setUpRootCerts( + factory.SigningKeys.key1, + factory.SigningKeys.key2, + factory.SigningKeys.key3, + ) + + addToStore(store, rootCerts*) + + val dns = factory.mkAddMultiKey( + DecentralizedNamespaceDefinition + .create( + Namespace(Fingerprint.tryFromString("bogusNamespace")), + PositiveInt.one, + NonEmpty.from(namespaces).value.toSet, + ) + .value, + signingKeys = keys.toSet, + // using serial=2 here to test that we don't special case serial=1 + serial = PositiveInt.two, + ) + + checkTransaction(checks, dns, None) should matchPattern { + case Left(TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping(err)) + if err.contains("not derived from the owners") => + } + } + + "reject if a namespace delegation with the same namespace already exists" in { + val (checks, store) = mk() + val (keys, namespaces, rootCerts) = setUpRootCerts( + factory.SigningKeys.key1, + factory.SigningKeys.key2, + factory.SigningKeys.key3, + ) + + val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) + + // we are creating namespace delegation with the same namespace as the decentralized namespace. + // this nsd however is not actually fully authorized, but for the purpose of this test, we want to see + // that the decentralized namespace definition gets rejected. + val conflicting_nsd = factory.mkAdd( + NamespaceDelegation + .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), + factory.SigningKeys.key8, + ) + addToStore(store, (rootCerts :+ conflicting_nsd)*) + + val dnd = factory.mkAddMultiKey( + DecentralizedNamespaceDefinition + .create( + dnd_namespace, + PositiveInt.one, + NonEmpty.from(namespaces).value.toSet, + ) + .value, + signingKeys = keys.toSet, + serial = PositiveInt.one, + ) + + checkTransaction(checks, dnd, None) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.NamespaceAlreadyInUse(`dnd_namespace`) + ) + } + + "reject if an owning namespace does not have a root certificate" in { + val (checks, store) = mk() + val (keys, namespaces, rootCerts) = setUpRootCerts( + factory.SigningKeys.key1, + factory.SigningKeys.key2, + factory.SigningKeys.key3, + ) + + def createDND(owners: Seq[Namespace], keys: Seq[SigningPublicKey]) = + factory.mkAddMultiKey( + DecentralizedNamespaceDefinition + .create( + DecentralizedNamespaceDefinition.computeNamespace(owners.toSet), + PositiveInt.one, + NonEmpty.from(owners).value.toSet, + ) + .value, + signingKeys = NonEmpty.from(keys).value.toSet, + serial = PositiveInt.one, + ) + + val dnd_k1k2 = createDND(namespaces.take(2), keys.take(2)) + + addToStore(store, (rootCerts :+ dnd_k1k2)*) + + val ns4 = Namespace(factory.SigningKeys.key4.fingerprint) + + val dnd_invalid = createDND( + namespaces.takeRight(2) ++ Seq(ns4, dnd_k1k2.mapping.namespace), + // we don't have to provide all keys for this transaction to be fully authorized, + // because the test doesn't check authorization, just semantic validity. + keys.takeRight(2), + ) + checkTransaction(checks, dnd_invalid, None) should matchPattern { + case Left(TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping(err)) + if err.contains( + s"No root certificate found for ${Seq(ns4, dnd_k1k2.mapping.namespace).sorted.mkString(", ")}" + ) => + } + } + } + + "validating NamespaceDelegation" should { + "reject a namespace delegation if a decentralized namespace with the same namespace already exists" in { + val (checks, store) = mk() + val (rootKeys, namespaces, rootCerts) = setUpRootCerts( + factory.SigningKeys.key1, + factory.SigningKeys.key2, + factory.SigningKeys.key3, + ) + + val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) + + val dnd = factory.mkAddMultiKey( + DecentralizedNamespaceDefinition + .create( + dnd_namespace, + PositiveInt.one, + NonEmpty.from(namespaces).value.toSet, + ) + .value, + signingKeys = rootKeys.toSet, + serial = PositiveInt.one, + ) + + addToStore(store, (rootCerts :+ dnd)*) + + // we are creating namespace delegation with the same namespace as the decentralized namespace. + // even if it is signed by enough owners of the decentralized namespace, we don't allow namespace delegations + // for a decentralized namespace, because + // 1. it goes against the very purpose of a decentralized namespace + // 2. the authorization machinery is actually not prepared to deal with it + // A similar effect can be achieved by setting the threshold of the DND to 1 + val conflicting_nsd = factory.mkAddMultiKey( + NamespaceDelegation + .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), + rootKeys.toSet, + ) + + checkTransaction(checks, conflicting_nsd, None) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.NamespaceAlreadyInUse(`dnd_namespace`) + ) + } + } + + "validating PartyToParticipant" should { + + "reject when participants don't have a DTC" in { + val (checks, store) = mk() + addToStore(store, p2_dtc) + + val failureCases = Seq(Seq(participant1), Seq(participant1, participant2)) + + failureCases.foreach { participants => + val ptp = factory.mkAdd( + PartyToParticipant.tryCreate( + party1, + PositiveInt.one, + participants.map[HostingParticipant](_ -> Submission), + ) + ) + checkTransaction(checks, ptp) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.UnknownMembers(Seq(participant1)) + ) + } + } + + "reject when participants don't have an OTK" in { + val (checks, store) = mk() + + addToStore(store, p1_dtc, p2_dtc, p3_dtc) + + val missingKeyCases = Seq(participant1) + + missingKeyCases.foreach { participant => + val ptp = factory.mkAdd( + PartyToParticipant.tryCreate( + party1, + PositiveInt.one, + Seq(participant -> Submission), + ) + ) + checkTransaction(checks, ptp) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InsufficientKeys(Seq(participant)) + ) + } + } + + "handle conflicts between partyId and existing admin parties from synchronizer trust certificates" in { + // the defaults below are a valid explicit admin party allocation for participant1.adminParty + def mkPTP( + partyId: PartyId = participant1.adminParty, + participants: Seq[HostingParticipant] = + Seq(HostingParticipant(participant1, Submission)), + ) = factory.mkAdd( + PartyToParticipant + .create( + partyId = partyId, + threshold = PositiveInt.one, + participants = participants, + ) + .value + ) + + val (checks, store) = mk() + addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc) + + // handle the happy case + checkTransaction(checks, mkPTP()) shouldBe Either.unit + + // unhappy scenarios + val invalidParticipantPermission = Seq( + mkPTP(participants = Seq(HostingParticipant(participant1, Confirmation))), + mkPTP(participants = Seq(HostingParticipant(participant1, Observation))), + ) + + val invalidNumberOfHostingParticipants = mkPTP(participants = + Seq( + HostingParticipant(participant1, Submission), + HostingParticipant(participant2, Submission), + ) + ) + + val foreignParticipant = + mkPTP(participants = Seq(HostingParticipant(participant2, Submission))) + + // we don't need to explicitly check threshold > 1, because we already reject the PTP if participants.size > 1 + // and the threshold can never be higher than the number of participants + + val unhappyCases = invalidParticipantPermission ++ Seq( + foreignParticipant, + invalidNumberOfHostingParticipants, + ) + + forAll(unhappyCases)(ptp => + checkTransaction(checks, ptp) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.PartyIdConflictWithAdminParty( + ptp.mapping.partyId + ) + ) + ) + } + + "report no errors for valid mappings" in { + val (checks, store) = mk() + addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc) + + val validCases = Seq[(PositiveInt, Seq[HostingParticipant])]( + PositiveInt.one -> Seq(participant1 -> Confirmation), + PositiveInt.one -> Seq(participant1 -> Submission), + PositiveInt.one -> Seq(participant1 -> Observation, participant2 -> Confirmation), + PositiveInt.two -> Seq(participant1 -> Confirmation, participant2 -> Submission), + PositiveInt.two -> Seq( + participant1 -> Observation, + participant2 -> Submission, + participant3 -> Submission, + ), + ) + + validCases.foreach { case (threshold, participants) => + val ptp = factory.mkAdd( + PartyToParticipant.tryCreate( + party1, + threshold, + participants, + ) + ) + checkTransaction(checks, ptp) shouldBe Either.unit + } + } + + } + + "validating SynchronizerTrustCertificate" should { + + "handle conflicts with existing party allocations" in { + val explicitAdminPartyParticipant1 = factory.mkAdd( + PartyToParticipant + .create( + partyId = participant1.adminParty, + threshold = PositiveInt.one, + participants = Seq(HostingParticipant(participant1, Submission)), + ) + .value + ) + + // we allocate a party with participant2's UID on participant1. + // this is not an explicit admin party allocation, the party just so happens to use the same UID as participant2. + val partyWithParticipant2Uid = factory.mkAdd( + PartyToParticipant + .create( + partyId = participant2.adminParty, + threshold = PositiveInt.one, + participants = Seq(HostingParticipant(participant1, Submission)), + ) + .value + ) + + val dop = factory.mkAdd( + SynchronizerParametersState( + synchronizerId, + DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), + ) + ) + + val (checks, store) = mk() + + // normally it's not possible to have a valid PTP without an already existing DTC of the hosting participants. + // but let's pretend for this check. + addToStore(store, dop, explicitAdminPartyParticipant1, partyWithParticipant2Uid) + + // happy case: we allow the DTC (either a creation or modifying an existing one) + // if there is a valid explicit admin party allocation + checkTransaction(checks, p1_dtc, None) shouldBe Either.unit + + // unhappy case: there already exists a normal party allocation with the same UID + checkTransaction(checks, p2_dtc, None) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.ParticipantIdConflictWithPartyId( + participant2, + partyWithParticipant2Uid.mapping.partyId, + ) + ) + } + + "reject the addition if the synchronizer is locked" in { + Seq(OnboardingRestriction.RestrictedLocked, OnboardingRestriction.UnrestrictedLocked) + .foreach { restriction => + val (checks, store) = mk() + val dop = factory.mkAdd( + SynchronizerParametersState( + synchronizerId, + DynamicSynchronizerParameters + .defaultValues(testedProtocolVersion) + .tryUpdate(onboardingRestriction = restriction), + ) + ) + addToStore(store, dop) + + val dtc = + factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) + + checkTransaction(checks, dtc) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.OnboardingRestrictionInPlace( + participant1, + restriction, + None, + ) + ) + } + } + + "reject the addition if the synchronizer is restricted" in { + val (checks, store) = mk() + val dop = factory.mkAdd( + SynchronizerParametersState( + synchronizerId, + DynamicSynchronizerParameters + .defaultValues(testedProtocolVersion) + .tryUpdate(onboardingRestriction = OnboardingRestriction.RestrictedOpen), + ) + ) + addToStore( + store, + dop, + factory.mkAdd( + ParticipantSynchronizerPermission( + synchronizerId, + participant1, + ParticipantPermission.Submission, + None, + None, + ) + ), + ) + + // participant2 does not have permission from the synchronizer to join + checkTransaction( + checks, + factory.mkAdd(SynchronizerTrustCertificate(participant2, synchronizerId)), + ) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.OnboardingRestrictionInPlace( + participant2, + OnboardingRestriction.RestrictedOpen, + None, + ) + ) + + // participant1 has been permissioned by the synchronizer + checkTransaction( + checks, + factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)), + None, + ) shouldBe Either.unit + } + + "reject a rejoining participant" in { + val (checks, store) = mk() + val dtcRemoval = factory.mkRemove( + SynchronizerTrustCertificate( + participant1, + synchronizerId, + ) + ) + addToStore( + store, + dtcRemoval, + ) + val rejoin = + factory.mkAdd( + SynchronizerTrustCertificate(participant1, synchronizerId), + serial = PositiveInt.two, + ) + + checkTransaction(checks, rejoin, Some(dtcRemoval)) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.ParticipantCannotRejoinSynchronizer( + participant1 + ) + ) + } + + } + + "validating MediatorSynchronizerState" should { + + "report no errors for valid mappings" in { + val (checks, store) = mk() + val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) + addToStore(store, transactions*) + + val Seq(mds1) = mkMediatorGroups(PositiveInt.one, (NonNegativeInt.zero -> Seq(med1))) + val Seq(mds2) = mkMediatorGroups(PositiveInt.two, (NonNegativeInt.zero -> Seq(med1, med2))) + + checkTransaction(checks, mds1) shouldBe Either.unit + checkTransaction(checks, mds2, Some(mds1)) shouldBe Either.unit + } + + "bail if mediator has no keys" in { + import DefaultTestIdentities.mediatorId + val (checks, store) = mk() + val mds1 = factory.mkAdd( + MediatorSynchronizerState + .create( + synchronizerId, + NonNegativeInt.zero, + PositiveInt.one, + active = Seq(mediatorId), + Seq.empty, + ) + .value, + // the signing key is not relevant for the test + signingKey = factory.SigningKeys.key1, + serial = PositiveInt.one, + ) + checkTransaction(checks, mds1) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InsufficientKeys( + Seq(mediatorId) + ) + ) + + } + + "report MediatorsAlreadyAssignedToGroups for duplicate mediator assignments" in { + val (checks, store) = mk() + val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) + + val Seq(group0, group1, group2) = mkMediatorGroups( + PositiveInt.one, + NonNegativeInt.zero -> Seq(med1), + NonNegativeInt.one -> Seq(med2), + NonNegativeInt.two -> Seq(med1, med2, med3), + ) + + addToStore(store, (transactions :+ group0 :+ group1)*) + + checkTransaction(checks, group2, None) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.MediatorsAlreadyInOtherGroups( + NonNegativeInt.two, + Map(med1 -> NonNegativeInt.zero, med2 -> NonNegativeInt.one), + ) + ) + } + + "report mediators defined both as active and observers" in { + val (Seq(med1, med2), _transactions) = generateMemberIdentities(2, MediatorId(_)) + + MediatorSynchronizerState + .create( + synchronizerId, + NonNegativeInt.zero, + PositiveInt.one, + active = Seq(med1, med2), + observers = Seq(med1), + ) shouldBe Left( + s"the following mediators were defined both as active and observer: $med1" + ) + } + + "handle validation of proposal with a concurrent update of the store" in { + val (checks, store) = mk() + val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) + + val Seq(group0_add_med1) = + mkMediatorGroups(PositiveInt.one, NonNegativeInt.zero -> Seq(med1)) + + addToStore(store, (transactions :+ group0_add_med1)*) + + val Seq(group0_add_med2) = mkMediatorGroups( + PositiveInt.two, + NonNegativeInt.zero -> Seq(med1, med2), + ) + + // let's pretend that group0_add_med2 was broadcast by other synchronizerOwners + // and became fully authorized (not necessarily effective yet though) and stored + // between determining the previous effective transaction (group0_add_med1) at the start of + // the processing of group0_add_med2 and validating group0_add_med2 + store + .update( + SequencedTime(ts1), + EffectiveTime(ts1), + removeMapping = Map( + group0_add_med1.mapping.uniqueKey -> PositiveInt.one + ), + removeTxs = Set.empty, + additions = Seq( + ValidatedTopologyTransaction(group0_add_med2) + ), + ) + .futureValueUS + checkTransaction(checks, group0_add_med2, Some(group0_add_med1)) shouldBe Right(()) + } + } + + "validating SequencerSynchronizerState" should { + + "report no errors for valid mappings" in { + val (checks, store) = mk() + val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) + addToStore(store, transactions*) + + val sds1 = makeSynchronizerState(PositiveInt.one, seq1) + val sds2 = makeSynchronizerState(PositiveInt.two, seq1, seq2) + + checkTransaction(checks, sds1) shouldBe Either.unit + checkTransaction(checks, sds2, Some(sds1)) shouldBe Either.unit + } + + "bail if sequencer has no keys" in { + val (checks, store) = mk() + val sss = factory.mkAdd( + SequencerSynchronizerState + .create( + synchronizerId, + PositiveInt.one, + active = Seq(sequencerId), + Seq.empty, + ) + .value, + // the signing key is not relevant for the test + signingKey = factory.SigningKeys.key1, + serial = PositiveInt.one, + ) + checkTransaction(checks, sss) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InsufficientKeys( + Seq(sequencerId) + ) + ) + + } + + "report sequencers defined both as active and observers" in { + val (Seq(seq1, seq2), _transactions) = generateMemberIdentities(2, SequencerId(_)) + + SequencerSynchronizerState + .create( + synchronizerId, + PositiveInt.one, + active = Seq(seq1, seq2), + observers = Seq(seq1), + ) shouldBe Left( + s"the following sequencers were defined both as active and observer: $seq1" + ) + } + + "handle validation of a proposal with a concurrent update in the store" in { + val (checks, store) = mk() + val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) + + val sds_add_seq1 = makeSynchronizerState(PositiveInt.one, seq1) + + addToStore(store, (transactions :+ sds_add_seq1)*) + + val sds_add_seq2 = makeSynchronizerState(PositiveInt.two, seq1, seq2) + + // let's pretend that sds_add_seq2 was broadcast by other synchronizerOwners + // and became fully authorized (not necessarily effective yet though) and stored + // between determining the previous effective transaction (sds_add_seq1) at the start of + // the processing of sds_add_seq2 and validating sds_add_seq2. + store + .update( + SequencedTime(ts1), + EffectiveTime(ts1), + removeMapping = Map( + sds_add_seq1.mapping.uniqueKey -> PositiveInt.one + ), + removeTxs = Set.empty, + additions = Seq( + ValidatedTopologyTransaction(sds_add_seq2) + ), + ) + .futureValueUS + checkTransaction(checks, sds_add_seq2, Some(sds_add_seq1)) shouldBe Right(()) + } + + } + + "validating OwnerToKeyMapping" should { + "report no errors for valid mappings" in { + val (checks, _) = mk() + val okm_sequencer = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(sequencerId, NonEmpty(Seq, factory.SigningKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + val okm_mediator = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(mediatorId, NonEmpty(Seq, factory.SigningKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + val okm_participant = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate( + participant1, + NonEmpty(Seq, factory.EncryptionKeys.key1, factory.SigningKeys.key1), + ), + NonEmpty(Set, factory.SigningKeys.key1), + ) + + checkTransaction(checks, okm_sequencer) shouldBe Either.unit + checkTransaction(checks, okm_mediator) shouldBe Either.unit + checkTransaction(checks, okm_participant) shouldBe Either.unit + } + "reject minimum key violations" in { + val (checks, _) = mk() + val okm_sequencerNoSigningKey = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(sequencerId, NonEmpty(Seq, factory.EncryptionKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + val okm_mediatorNoSigningKey = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(mediatorId, NonEmpty(Seq, factory.EncryptionKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + val okm_participantNoSigningKey = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(participant1, NonEmpty(Seq, factory.EncryptionKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + val okm_participantNoEncryptionKey = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(participant1, NonEmpty(Seq, factory.SigningKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + ) + + Seq(okm_sequencerNoSigningKey, okm_mediatorNoSigningKey, okm_participantNoSigningKey) + .foreach(tx => + checkTransaction(checks, tx) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping( + "OwnerToKeyMapping must contain at least 1 signing key." + ) + ) + ) + checkTransaction(checks, okm_participantNoEncryptionKey) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.InvalidTopologyMapping( + "OwnerToKeyMapping for participants must contain at least 1 encryption key." + ) + ) + } + "reject re-adding of keys" in { + val (checks, _) = mk() + + def mkAdd(serial: PositiveInt) = factory.mkAddMultiKey( + OwnerToKeyMapping.tryCreate(sequencerId, NonEmpty(Seq, factory.SigningKeys.key1)), + NonEmpty(Set, factory.SigningKeys.key1), + serial = serial, + ) + + val okm1 = mkAdd(PositiveInt.one) + val okm2 = factory.mkRemoveTx(okm1) + val okm3 = mkAdd(PositiveInt.three) + checkTransaction(checks, okm3, inStore = Some(okm2)) shouldBe Left( + TopologyTransactionRejection.RequiredMapping.CannotReregisterKeys(sequencerId) + ) + + } + } + } + + private def setUpRootCerts(keys: SigningPublicKey*): ( + NonEmpty[Seq[SigningPublicKey]], + Seq[Namespace], + Seq[SignedTopologyTransaction[Replace, NamespaceDelegation]], + ) = { + val (namespaces, rootCerts) = + keys.map { key => + val namespace = Namespace(key.fingerprint) + namespace -> factory.mkAdd( + NamespaceDelegation.tryCreate( + namespace, + key, + CanSignAllMappings, + ), + signingKey = key, + ) + }.unzip + val keysNE = NonEmpty.from(keys).value + (keysNE, namespaces, rootCerts) + } + + private def seconds(s: Int) = PositiveSeconds.tryCreate(time.Duration.ofSeconds(s.toLong)) + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala deleted file mode 100644 index f05e1ecc01..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala +++ /dev/null @@ -1,1150 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.transaction - -import cats.instances.order.* -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} -import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, OnboardingRestriction} -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.{mediatorId, sequencerId} -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, -} -import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ - Confirmation, - Observation, - Submission, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.topology.transaction.TopologyMappingChecks.PendingChangesLookup -import com.digitalasset.canton.{ - BaseTest, - FailOnShutdown, - HasExecutionContext, - ProtocolVersionChecksAnyWordSpec, -} -import org.scalatest.wordspec.AnyWordSpec - -import java.time -import scala.annotation.nowarn -import scala.language.implicitConversions - -@nowarn("msg=match may not be exhaustive") -class ValidatingTopologyMappingChecksTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ProtocolVersionChecksAnyWordSpec - with FailOnShutdown { - - private lazy val factory = new TestingOwnerWithKeys( - DefaultTestIdentities.mediatorId, - loggerFactory, - initEc = parallelExecutionContext, - ) - - def mk() = { - val store = - new InMemoryTopologyStore( - SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val check = new ValidatingTopologyMappingChecks(store, loggerFactory) - (check, store) - } - - "TopologyMappingChecks" when { - import DefaultTestIdentities.{synchronizerId, participant1, participant2, participant3, party1} - import factory.TestingTransactions.* - - def checkTransaction( - checks: TopologyMappingChecks, - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericSignedTopologyTransaction] = None, - pendingChangesLookup: PendingChangesLookup = Map.empty, - ): Either[TopologyTransactionRejection, Unit] = - checks - .checkTransaction(EffectiveTime.MaxValue, toValidate, inStore, pendingChangesLookup) - .value - .futureValueUS - - implicit def toHostingParticipant( - participantToPermission: (ParticipantId, ParticipantPermission) - ): HostingParticipant = - HostingParticipant(participantToPermission._1, participantToPermission._2) - - "validating any Mapping" should { - "reject removal of non-existent mappings" in { - import factory.SigningKeys.key1 - val (checks, _) = mk() - - val removeNsdSerial1 = factory.mkRemove( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.one, - ) - // also check that for serial > 1 - val removeNsdSerial3 = factory.mkRemove( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.three, - ) - checkTransaction(checks, removeNsdSerial1) shouldBe Left( - TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(removeNsdSerial1.mapping) - ) - checkTransaction(checks, removeNsdSerial3) shouldBe Left( - TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(removeNsdSerial3.mapping) - ) - } - - "reject only REPLACE transactions with the highest possible serial" in { - import factory.SigningKeys.key1 - val (checks, _) = mk() - - val maxSerialReplace = factory.mkAdd( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.MaxValue, - ) - checkTransaction(checks, maxSerialReplace) shouldBe Left( - TopologyTransactionRejection.InvalidTopologyMapping( - s"The serial for a REPLACE must be less than ${PositiveInt.MaxValue}." - ) - ) - - val maxSerialMinsOneReplace = factory.mkAdd( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.tryCreate(PositiveInt.MaxValue.value - 1), - ) - val maxSerialRemove = factory.mkRemove( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.MaxValue, - ) - - checkTransaction(checks, toValidate = maxSerialMinsOneReplace) shouldBe Right(()) - checkTransaction( - checks, - toValidate = maxSerialRemove, - inStore = Some(maxSerialMinsOneReplace), - ) shouldBe Right(()) - } - - "reject if removal also changes the content" in { - import factory.SigningKeys.{key1, key2} - val (checks, _) = mk() - - val removeNs1k2 = factory.mkRemove( - NamespaceDelegation - .tryCreate( - Namespace(key1.fingerprint), - key2, - // changing the mapping compared to ns1k2 by setting CanSignAllMappings - CanSignAllMappings, - ), - serial = PositiveInt.two, - ) - checkTransaction(checks, removeNs1k2, Some(ns1k2)) shouldBe Left( - TopologyTransactionRejection.RemoveMustNotChangeMapping( - removeNs1k2.mapping, - ns1k2.mapping, - ) - ) - } - - "respect pending changes when loading additional data for validations" in { - import factory.SigningKeys.{key1, key2, key3} - val (checks, store) = mk() - val ns1 = Namespace(key1.fingerprint) - val ns2 = Namespace(key2.fingerprint) - val ns3 = Namespace(key3.fingerprint) - - val nsd1Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings)) - val nsd1Remove_2 = factory.mkRemove( - NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), - serial = PositiveInt.two, - ) - val nsd1ReplaceProposal_3 = factory.mkAdd( - NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), - serial = PositiveInt.three, - isProposal = true, - ) - - val nsd2Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings)) - val nsd2Remove_2 = factory.mkRemove( - NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings), - serial = PositiveInt.two, - ) - - val nsd3Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns3, key3, CanSignAllMappings)) - - store - .update( - SequencedTime(ts), - EffectiveTime(ts), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq(nsd1Replace_1, nsd2Replace_1).map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - store - .update( - SequencedTime(ts + seconds(1)), - EffectiveTime(ts + seconds(1)), - removeMapping = Map(nsd1Remove_2.mapping.uniqueKey -> nsd1Remove_2.serial), - removeTxs = Set.empty, - additions = Seq(ValidatedTopologyTransaction(nsd1Remove_2)), - ) - .futureValueUS - - store - .update( - SequencedTime(ts + seconds(2)), - EffectiveTime(ts + seconds(2)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq(ValidatedTopologyTransaction(nsd1ReplaceProposal_3)), - ) - .futureValueUS - - /* - * The store contains the following transactions: - * TS0: Replace NSD1, Replace NSD2 - * TS1: Remove NSD1 - * TS2: Replace Proposal NSD1 - */ - - // TS0: load without pending changes - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map.empty, - ) - .futureValueUS - .value should contain theSameElementsAs Seq(nsd1Replace_1, nsd2Replace_1) - - // TS0: load with Removal NS2 as pending change - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - Map(nsd2Remove_2.mapping.uniqueKey -> nsd2Remove_2), - ) - .futureValueUS - .value shouldBe Seq(nsd1Replace_1) - - // TS0: load with Replace NS3 as pending change without prior transactions in the store - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map(nsd3Replace_1.mapping.uniqueKey -> nsd3Replace_1), - ) - .futureValueUS - .value should contain theSameElementsAs Seq( - nsd1Replace_1, - nsd2Replace_1, - nsd3Replace_1, - ) - - // TS0: load with Replace NS3 as pending change without prior transactions in the store and also matching a - // namespace filter - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map(nsd3Replace_1.mapping.uniqueKey -> nsd3Replace_1), - filterNamespace = Some(NonEmpty(Seq, ns2, ns3)), - ) - .futureValueUS - .value should contain theSameElementsAs Seq(nsd2Replace_1, nsd3Replace_1) - - // TS1: don't load Remove NS1 from the store - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(1)), - codes = Set(Code.NamespaceDelegation), - Map.empty, - ) - .futureValueUS - .value shouldBe Seq(nsd2Replace_1) - - // TS1: don't load Remove NS1 from the store mixed with Remove NS2 as pending change - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(1)), - codes = Set(Code.NamespaceDelegation), - Map(nsd2Remove_2.mapping.uniqueKey -> nsd2Remove_2), - ) - .futureValueUS - .value shouldBe Seq.empty - - // TS2: don't load proposals - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(2)), - codes = Set(Code.NamespaceDelegation), - Map.empty, - ) - .futureValueUS - .value shouldBe Seq(nsd2Replace_1) - - } - } - - "validating DecentralizedNamespaceDefinition" should { - "reject namespaces not derived from their owners' namespaces" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - addToStore(store, rootCerts*) - - val dns = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - Namespace(Fingerprint.tryFromString("bogusNamespace")), - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = keys.toSet, - // using serial=2 here to test that we don't special case serial=1 - serial = PositiveInt.two, - ) - - checkTransaction(checks, dns, None) should matchPattern { - case Left(TopologyTransactionRejection.InvalidTopologyMapping(err)) - if err.contains("not derived from the owners") => - } - } - - "reject if a namespace delegation with the same namespace already exists" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) - - // we are creating namespace delegation with the same namespace as the decentralized namespace. - // this nsd however is not actually fully authorized, but for the purpose of this test, we want to see - // that the decentralized namespace definition gets rejected. - val conflicting_nsd = factory.mkAdd( - NamespaceDelegation - .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), - factory.SigningKeys.key8, - ) - addToStore(store, (rootCerts :+ conflicting_nsd)*) - - val dnd = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnd_namespace, - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = keys.toSet, - serial = PositiveInt.one, - ) - - checkTransaction(checks, dnd, None) shouldBe Left( - TopologyTransactionRejection.NamespaceAlreadyInUse(`dnd_namespace`) - ) - } - - "reject if an owning namespace does not have a root certificate" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - def createDND(owners: Seq[Namespace], keys: Seq[SigningPublicKey]) = - factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - DecentralizedNamespaceDefinition.computeNamespace(owners.toSet), - PositiveInt.one, - NonEmpty.from(owners).value.toSet, - ) - .value, - signingKeys = NonEmpty.from(keys).value.toSet, - serial = PositiveInt.one, - ) - - val dnd_k1k2 = createDND(namespaces.take(2), keys.take(2)) - - addToStore(store, (rootCerts :+ dnd_k1k2)*) - - val ns4 = Namespace(factory.SigningKeys.key4.fingerprint) - - val dnd_invalid = createDND( - namespaces.takeRight(2) ++ Seq(ns4, dnd_k1k2.mapping.namespace), - // we don't have to provide all keys for this transaction to be fully authorized, - // because the test doesn't check authorization, just semantic validity. - keys.takeRight(2), - ) - checkTransaction(checks, dnd_invalid, None) should matchPattern { - case Left(TopologyTransactionRejection.InvalidTopologyMapping(err)) - if err.contains( - s"No root certificate found for ${Seq(ns4, dnd_k1k2.mapping.namespace).sorted.mkString(", ")}" - ) => - } - } - } - - "validating NamespaceDelegation" should { - "reject a namespace delegation if a decentralized namespace with the same namespace already exists" in { - val (checks, store) = mk() - val (rootKeys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) - - val dnd = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnd_namespace, - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = rootKeys.toSet, - serial = PositiveInt.one, - ) - - addToStore(store, (rootCerts :+ dnd)*) - - // we are creating namespace delegation with the same namespace as the decentralized namespace. - // even if it is signed by enough owners of the decentralized namespace, we don't allow namespace delegations - // for a decentralized namespace, because - // 1. it goes against the very purpose of a decentralized namespace - // 2. the authorization machinery is actually not prepared to deal with it - // A similar effect can be achieved by setting the threshold of the DND to 1 - val conflicting_nsd = factory.mkAddMultiKey( - NamespaceDelegation - .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), - rootKeys.toSet, - ) - - checkTransaction(checks, conflicting_nsd, None) shouldBe Left( - TopologyTransactionRejection.NamespaceAlreadyInUse(`dnd_namespace`) - ) - } - } - - "validating PartyToParticipant" should { - - "reject when participants don't have a DTC" in { - val (checks, store) = mk() - addToStore(store, p2_dtc) - - val failureCases = Seq(Seq(participant1), Seq(participant1, participant2)) - - failureCases.foreach { participants => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - participants.map[HostingParticipant](_ -> Submission), - ) - ) - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.UnknownMembers(Seq(participant1)) - ) - } - } - - "reject when participants don't have a valid encryption or signing key" in { - val (checks, store) = mk() - val p2MissingEncKey = factory.mkAdd( - OwnerToKeyMapping.tryCreate(participant2, NonEmpty(Seq, factory.SigningKeys.key1)) - ) - val p3MissingSigningKey = factory.mkAdd( - OwnerToKeyMapping.tryCreate(participant3, NonEmpty(Seq, factory.EncryptionKeys.key1)) - ) - - addToStore(store, p1_dtc, p2_dtc, p3_dtc, p2MissingEncKey, p3MissingSigningKey) - - val missingKeyCases = Seq(participant1, participant2, participant3) - - missingKeyCases.foreach { participant => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - Seq(participant -> Submission), - ) - ) - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.InsufficientKeys(Seq(participant)) - ) - } - } - - "handle conflicts between partyId and existing admin parties from synchronizer trust certificates" in { - // the defaults below are a valid explicit admin party allocation for participant1.adminParty - def mkPTP( - partyId: PartyId = participant1.adminParty, - participants: Seq[HostingParticipant] = - Seq(HostingParticipant(participant1, Submission)), - ) = factory.mkAdd( - PartyToParticipant - .create( - partyId = partyId, - threshold = PositiveInt.one, - participants = participants, - ) - .value - ) - - val (checks, store) = mk() - addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc) - - // handle the happy case - checkTransaction(checks, mkPTP()) shouldBe Either.unit - - // unhappy scenarios - val invalidParticipantPermission = Seq( - mkPTP(participants = Seq(HostingParticipant(participant1, Confirmation))), - mkPTP(participants = Seq(HostingParticipant(participant1, Observation))), - ) - - val invalidNumberOfHostingParticipants = mkPTP(participants = - Seq( - HostingParticipant(participant1, Submission), - HostingParticipant(participant2, Submission), - ) - ) - - val foreignParticipant = - mkPTP(participants = Seq(HostingParticipant(participant2, Submission))) - - // we don't need to explicitly check threshold > 1, because we already reject the PTP if participants.size > 1 - // and the threshold can never be higher than the number of participants - - val unhappyCases = invalidParticipantPermission ++ Seq( - foreignParticipant, - invalidNumberOfHostingParticipants, - ) - - forAll(unhappyCases)(ptp => - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.PartyIdConflictWithAdminParty(ptp.mapping.partyId) - ) - ) - } - - "report no errors for valid mappings" in { - val (checks, store) = mk() - addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc) - - val validCases = Seq[(PositiveInt, Seq[HostingParticipant])]( - PositiveInt.one -> Seq(participant1 -> Confirmation), - PositiveInt.one -> Seq(participant1 -> Submission), - PositiveInt.one -> Seq(participant1 -> Observation, participant2 -> Confirmation), - PositiveInt.two -> Seq(participant1 -> Confirmation, participant2 -> Submission), - PositiveInt.two -> Seq( - participant1 -> Observation, - participant2 -> Submission, - participant3 -> Submission, - ), - ) - - validCases.foreach { case (threshold, participants) => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - threshold, - participants, - ) - ) - checkTransaction(checks, ptp) shouldBe Either.unit - } - } - - } - - "validating SynchronizerTrustCertificate" should { - "reject a removal when the participant still hosts a party" in { - val (checks, store) = mk() - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - Seq(participant1 -> Submission), - ) - ) - addToStore( - store, - ptp, - ) - val prior = factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - - val dtc = - factory.mkRemove(SynchronizerTrustCertificate(participant1, synchronizerId)) - checkTransaction(checks, dtc, Some(prior)) shouldBe Left( - TopologyTransactionRejection.ParticipantStillHostsParties(participant1, Seq(party1)) - ) - } - - "handle conflicts with existing party allocations" in { - val explicitAdminPartyParticipant1 = factory.mkAdd( - PartyToParticipant - .create( - partyId = participant1.adminParty, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(participant1, Submission)), - ) - .value - ) - - // we allocate a party with participant2's UID on participant1. - // this is not an explicit admin party allocation, the party just so happens to use the same UID as participant2. - val partyWithParticipant2Uid = factory.mkAdd( - PartyToParticipant - .create( - partyId = participant2.adminParty, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(participant1, Submission)), - ) - .value - ) - - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ) - ) - - val (checks, store) = mk() - - // normally it's not possible to have a valid PTP without an already existing DTC of the hosting participants. - // but let's pretend for this check. - addToStore(store, dop, explicitAdminPartyParticipant1, partyWithParticipant2Uid) - - // happy case: we allow the DTC (either a creation or modifying an existing one) - // if there is a valid explicit admin party allocation - checkTransaction(checks, p1_dtc, None) shouldBe Either.unit - - // unhappy case: there already exists a normal party allocation with the same UID - checkTransaction(checks, p2_dtc, None) shouldBe Left( - TopologyTransactionRejection.ParticipantIdConflictWithPartyId( - participant2, - partyWithParticipant2Uid.mapping.partyId, - ) - ) - } - - "reject the addition if the synchronizer is locked" in { - Seq(OnboardingRestriction.RestrictedLocked, OnboardingRestriction.UnrestrictedLocked) - .foreach { restriction => - val (checks, store) = mk() - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .tryUpdate(onboardingRestriction = restriction), - ) - ) - addToStore(store, dop) - - val dtc = - factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - - checkTransaction(checks, dtc) shouldBe Left( - TopologyTransactionRejection.OnboardingRestrictionInPlace( - participant1, - restriction, - None, - ) - ) - } - } - - "reject the addition if the synchronizer is restricted" in { - val (checks, store) = mk() - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .tryUpdate(onboardingRestriction = OnboardingRestriction.RestrictedOpen), - ) - ) - addToStore( - store, - dop, - factory.mkAdd( - ParticipantSynchronizerPermission( - synchronizerId, - participant1, - ParticipantPermission.Submission, - None, - None, - ) - ), - ) - - // participant2 does not have permission from the synchronizer to join - checkTransaction( - checks, - factory.mkAdd(SynchronizerTrustCertificate(participant2, synchronizerId)), - ) shouldBe Left( - TopologyTransactionRejection.OnboardingRestrictionInPlace( - participant2, - OnboardingRestriction.RestrictedOpen, - None, - ) - ) - - // participant1 has been permissioned by the synchronizer - checkTransaction( - checks, - factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)), - None, - ) shouldBe Either.unit - } - - "reject a rejoining participant" in { - val (checks, store) = mk() - val dtcRemoval = factory.mkRemove( - SynchronizerTrustCertificate( - participant1, - synchronizerId, - ) - ) - addToStore( - store, - dtcRemoval, - ) - val rejoin = - factory.mkAdd( - SynchronizerTrustCertificate(participant1, synchronizerId), - serial = PositiveInt.two, - ) - - checkTransaction(checks, rejoin, Some(dtcRemoval)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(participant1)) - ) - } - - } - - "validating MediatorSynchronizerState" should { - def mkGroups( - serial: PositiveInt, - groupSetup: (NonNegativeInt, Seq[MediatorId])* - ): Seq[SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState]] = - groupSetup.map { case (group, mediators) => - factory.mkAdd( - MediatorSynchronizerState - .create( - synchronizerId, - group, - PositiveInt.one, - active = mediators, - Seq.empty, - ) - .value, - // the signing key is not relevant for the test - signingKey = factory.SigningKeys.key1, - serial = serial, - ) - } - - "report no errors for valid mappings" in { - val (checks, store) = mk() - val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) - addToStore(store, transactions*) - - val Seq(mds1) = mkGroups(PositiveInt.one, (NonNegativeInt.zero -> Seq(med1))) - val Seq(mds2) = mkGroups(PositiveInt.two, (NonNegativeInt.zero -> Seq(med1, med2))) - - checkTransaction(checks, mds1) shouldBe Either.unit - checkTransaction(checks, mds2, Some(mds1)) shouldBe Either.unit - } - - "report MediatorsAlreadyAssignedToGroups for duplicate mediator assignments" in { - val (checks, store) = mk() - val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) - - val Seq(group0, group1, group2) = mkGroups( - PositiveInt.one, - NonNegativeInt.zero -> Seq(med1), - NonNegativeInt.one -> Seq(med2), - NonNegativeInt.two -> Seq(med1, med2, med3), - ) - - addToStore(store, (transactions :+ group0 :+ group1)*) - - checkTransaction(checks, group2, None) shouldBe Left( - TopologyTransactionRejection.MediatorsAlreadyInOtherGroups( - NonNegativeInt.two, - Map(med1 -> NonNegativeInt.zero, med2 -> NonNegativeInt.one), - ) - ) - } - - "report mediators defined both as active and observers" in { - val (Seq(med1, med2), _transactions) = generateMemberIdentities(2, MediatorId(_)) - - MediatorSynchronizerState - .create( - synchronizerId, - NonNegativeInt.zero, - PositiveInt.one, - active = Seq(med1, med2), - observers = Seq(med1), - ) shouldBe Left( - s"the following mediators were defined both as active and observer: $med1" - ) - } - - "report MembersCannotRejoinSynchronizer for mediators that are being re-onboarded" in { - val (checks, store) = mk() - val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) - - val Seq(group0, group1) = mkGroups( - PositiveInt.one, - NonNegativeInt.zero -> Seq(med1, med3), - NonNegativeInt.one -> Seq(med2, med3), - ) - - addToStore(store, (transactions :+ group0 :+ group1)*) - - val Seq(group0RemoveMed1, group1RemoveMed2) = mkGroups( - PositiveInt.two, - NonNegativeInt.zero -> Seq(med3), - NonNegativeInt.one -> Seq(med3), - ) - - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - group0.mapping.uniqueKey -> PositiveInt.one, - group1.mapping.uniqueKey -> PositiveInt.one, - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(group0RemoveMed1), - ValidatedTopologyTransaction(group1RemoveMed2), - ), - ) - .futureValueUS - - val Seq(med1RejoinsGroup0, med2RejoinsGroup0) = mkGroups( - PositiveInt.three, - // try joining the same group - NonNegativeInt.zero -> Seq(med1, med3), - // try joining another group - NonNegativeInt.zero -> Seq(med2, med3), - ) - - checkTransaction(checks, med1RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(med1)) - ) - - checkTransaction(checks, med2RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(med2)) - ) - } - - "handle validation of proposal with a concurrent update of the store" in { - val (checks, store) = mk() - val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) - - val Seq(group0_add_med1) = mkGroups(PositiveInt.one, NonNegativeInt.zero -> Seq(med1)) - - addToStore(store, (transactions :+ group0_add_med1)*) - - val Seq(group0_add_med2) = mkGroups( - PositiveInt.two, - NonNegativeInt.zero -> Seq(med1, med2), - ) - - // let's pretend that group0_add_med2 was broadcast by other synchronizerOwners - // and became fully authorized (not necessarily effective yet though) and stored - // between determining the previous effective transaction (group0_add_med1) at the start of - // the processing of group0_add_med2 and validating group0_add_med2 - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - group0_add_med1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(group0_add_med2) - ), - ) - .futureValueUS - checkTransaction(checks, group0_add_med2, Some(group0_add_med1)) shouldBe Right(()) - } - } - - "validating SequencerSynchronizerState" should { - - def mkSDS( - serial: PositiveInt, - sequencers: SequencerId* - ): SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] = - factory.mkAdd( - SequencerSynchronizerState - .create( - synchronizerId, - PositiveInt.one, - active = sequencers, - Seq.empty, - ) - .value, - // the signing key is not relevant for the test - signingKey = factory.SigningKeys.key1, - serial = serial, - ) - - "report no errors for valid mappings" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - addToStore(store, transactions*) - - val sds1 = mkSDS(PositiveInt.one, seq1) - val sds2 = mkSDS(PositiveInt.two, seq1, seq2) - - checkTransaction(checks, sds1) shouldBe Either.unit - checkTransaction(checks, sds2, Some(sds1)) shouldBe Either.unit - } - "report sequencers defined both as active and observers" in { - val (Seq(seq1, seq2), _transactions) = generateMemberIdentities(2, SequencerId(_)) - - SequencerSynchronizerState - .create( - synchronizerId, - PositiveInt.one, - active = Seq(seq1, seq2), - observers = Seq(seq1), - ) shouldBe Left( - s"the following sequencers were defined both as active and observer: $seq1" - ) - } - - "report MembersCannotRejoinSynchronizer for sequencers that are being re-onboarded" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - - val sds_S1_S2 = mkSDS( - PositiveInt.one, - seq1, - seq2, - ) - - addToStore(store, (transactions :+ sds_S1_S2)*) - - val sds_S1 = mkSDS(PositiveInt.two, seq1) - - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - sds_S1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(sds_S1) - ), - ) - .futureValueUS - - val sds_S1_rejoining_S2 = mkSDS(PositiveInt.three, seq1, seq2) - - checkTransaction(checks, sds_S1_rejoining_S2, Some(sds_S1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(seq2)) - ) - } - - "handle validation of a proposal with a concurrent update in the store" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - - val sds_add_seq1 = mkSDS(PositiveInt.one, seq1) - - addToStore(store, (transactions :+ sds_add_seq1)*) - - val sds_add_seq2 = mkSDS(PositiveInt.two, seq1, seq2) - - // let's pretend that sds_add_seq2 was broadcast by other synchronizerOwners - // and became fully authorized (not necessarily effective yet though) and stored - // between determining the previous effective transaction (sds_add_seq1) at the start of - // the processing of sds_add_seq2 and validating sds_add_seq2. - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - sds_add_seq1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(sds_add_seq2) - ), - ) - .futureValueUS - checkTransaction(checks, sds_add_seq2, Some(sds_add_seq1)) shouldBe Right(()) - } - - } - - "validating OwnerToKeyMapping" should { - "report no errors for valid mappings" in { - val (checks, _) = mk() - val okm_sequencer = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(sequencerId, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_mediator = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(mediatorId, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participant = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate( - participant1, - NonEmpty(Seq, factory.EncryptionKeys.key1, factory.SigningKeys.key1), - ), - NonEmpty(Set, factory.SigningKeys.key1), - ) - - checkTransaction(checks, okm_sequencer) shouldBe Either.unit - checkTransaction(checks, okm_mediator) shouldBe Either.unit - checkTransaction(checks, okm_participant) shouldBe Either.unit - } - "reject minimum key violations" in { - val (checks, _) = mk() - val okm_sequencerNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(sequencerId, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_mediatorNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(mediatorId, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participantNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(participant1, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participantNoEncryptionKey = factory.mkAddMultiKey( - OwnerToKeyMapping.tryCreate(participant1, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - - Seq(okm_sequencerNoSigningKey, okm_mediatorNoSigningKey, okm_participantNoSigningKey) - .foreach(tx => - checkTransaction(checks, tx) shouldBe Left( - InvalidTopologyMapping( - "OwnerToKeyMapping must contain at least 1 signing key." - ) - ) - ) - checkTransaction(checks, okm_participantNoEncryptionKey) shouldBe Left( - InvalidTopologyMapping( - "OwnerToKeyMapping for participants must contain at least 1 encryption key." - ) - ) - } - } - } - - private def generateMemberIdentities[M <: Member]( - numMembers: Int, - uidToMember: UniqueIdentifier => M, - ): (Seq[M], Seq[GenericSignedTopologyTransaction]) = { - val allKeys = { - import factory.SigningKeys.* - Seq(key1, key2, key3, key4, key5, key6) - } - val (memberIds, identityTransactions) = (1 to numMembers).map { idx => - val key = allKeys(idx) - val member = - uidToMember(UniqueIdentifier.tryCreate(s"member$idx", Namespace(key.fingerprint))) - member -> List( - factory.mkAdd( - NamespaceDelegation.tryCreate(member.namespace, key, CanSignAllMappings), - key, - ), - factory.mkAdd(OwnerToKeyMapping.tryCreate(member, NonEmpty(Seq, key)), key), - ) - }.unzip - - memberIds -> identityTransactions.flatten - } - - private def addToStore( - store: TopologyStore[SynchronizerStore], - transactions: GenericSignedTopologyTransaction* - ): Unit = - store - .update( - sequenced = SequencedTime.MinValue, - effective = EffectiveTime.MinValue, - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = transactions.map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - private def setUpRootCerts(keys: SigningPublicKey*): ( - NonEmpty[Seq[SigningPublicKey]], - Seq[Namespace], - Seq[SignedTopologyTransaction[Replace, NamespaceDelegation]], - ) = { - val (namespaces, rootCerts) = - keys.map { key => - val namespace = Namespace(key.fingerprint) - namespace -> factory.mkAdd( - NamespaceDelegation.tryCreate( - namespace, - key, - CanSignAllMappings, - ), - signingKey = key, - ) - }.unzip - val keysNE = NonEmpty.from(keys).value - (keysNE, namespaces, rootCerts) - } - - private def seconds(s: Int) = PositiveSeconds.tryCreate(time.Duration.ofSeconds(s.toLong)) - -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractAuthenticatorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractAuthenticatorTest.scala deleted file mode 100644 index 49f19a98d8..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractAuthenticatorTest.scala +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.syntax.either.* -import com.digitalasset.canton.crypto.TestSalt -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.{BaseTest, LfPackageName, LfPartyId} -import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.data.Ref.IdString -import com.digitalasset.daml.lf.transaction.CreationTime.CreatedAt -import com.digitalasset.daml.lf.transaction.{FatContractInstance, Versioned} -import com.digitalasset.daml.lf.value.Value -import com.digitalasset.daml.lf.value.Value.ValueText -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Duration - -class ContractAuthenticatorTest extends AnyWordSpec with BaseTest { - - forEvery(Seq(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) { - authContractIdVersion => - s"ContractAuthenticatorImpl with $authContractIdVersion" when { - "using a valid contract id" should { - "correctly authenticate the contract" in new WithContractAuthenticator( - authContractIdVersion - ) { - contractAuthenticator.legacyAuthenticate(fatContractInstance) shouldBe Either.unit - contractAuthenticator.authenticate( - fatContractInstance, - LegacyContractHash.tryFatContractHash( - fatContractInstance, - contractIdVersion.useUpgradeFriendlyHashing, - ), - ) shouldBe Either.unit - } - } - - "using a normalized values" should { - if (authContractIdVersion >= AuthenticatedContractIdVersionV11) { - - val unNormalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))), - (None, Value.ValueOptional(None)), - ), - ) - - val normalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))) - ), - ) - - val unNormalizedContract = ExampleContractFactory.build(argument = unNormalizedArg) - val normalizedContract = - ExampleContractFactory.modify(unNormalizedContract, arg = Some(normalizedArg)) - - "correctly authenticate the contract" in new WithContractAuthenticator( - authContractIdVersion - ) { - contractAuthenticator.legacyAuthenticate( - unNormalizedContract.inst - ) shouldBe Either.unit - contractAuthenticator.legacyAuthenticate(normalizedContract.inst) shouldBe Either.unit - } - } - } - - "using an invalid contract id" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val invalidContractId = ExampleContractFactory.buildContractId() - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, contractId = Some(invalidContractId)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using an invalid hash" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - val invalidHash: LfHash = ExampleTransactionFactory.lfHash(7) - shouldFailAuthentication(fatContractInstance, Some(invalidHash)) - } - } - - "using a changed salt/authentication data" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - - val authenticationData = - ContractAuthenticationDataV1(TestSalt.generateSalt(42))(contractIdVersion).toLfBytes - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, authenticationData = Some(authenticationData)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed ledger time" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val changedTime = - CreatedAt(contractInstance.inst.createdAt.time.add(Duration.ofDays(1L))) - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, createdAt = Some(changedTime)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed contract argument" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, arg = Some(ValueText("changed"))) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed template-id" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - templateId = Some(LfTemplateId.assertFromString("definitely:changed:template")), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed package-name" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - packageName = - Some(LfPackageName.assertFromString("definitely-changed-package-name")), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using changed signatories" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val changedSignatory: IdString.Party = - LfPartyId.assertFromString("changed::signatory") - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstance.metadata.signatories + changedSignatory, - stakeholders = contractInstance.metadata.stakeholders + changedSignatory, - maybeKeyWithMaintainersVersioned = - contractInstance.metadata.maybeKeyWithMaintainersVersioned, - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using changed observers" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val changedObserver: IdString.Party = - LfPartyId.assertFromString("changed::observer") - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstance.metadata.signatories, - stakeholders = contractInstance.metadata.stakeholders + changedObserver, - maybeKeyWithMaintainersVersioned = - contractInstance.metadata.maybeKeyWithMaintainersVersioned, - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed key value" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val changeKey = keyWithMaintainers.copy(globalKey = - LfGlobalKey.assertBuild( - contractInstance.templateId, - ValueText("changed"), - contractInstance.inst.packageName, - ) - ) - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstanceWithKey, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstanceWithKey.metadata.signatories, - stakeholders = contractInstanceWithKey.metadata.stakeholders, - maybeKeyWithMaintainersVersioned = - Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed key maintainers" should { - "fail authentication" in new WithContractAuthenticator(authContractIdVersion) { - private val changeKey = keyWithMaintainers.copy(maintainers = Set.empty) - private val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstanceWithKey, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstanceWithKey.metadata.signatories, - stakeholders = contractInstanceWithKey.metadata.stakeholders, - maybeKeyWithMaintainersVersioned = - Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - } - } -} - -class WithContractAuthenticator(protected val contractIdVersion: CantonContractIdV1Version) - extends BaseTest { - - def shouldFailAuthentication( - invalid: FatContractInstance, - contractHashO: Option[LfHash] = None, - ): Unit = { - val contractHash = contractHashO.getOrElse(LegacyContractHash.fatContractHash(invalid).value) - - inside(contractAuthenticator.authenticate(invalid, contractHash)) { case Left(error) => - error should startWith("Mismatching contract id suffixes.") - } - } - - protected val pureCrypto = new SymbolicPureCrypto() - protected val contractIdSuffixer = new ContractIdSuffixer(pureCrypto, contractIdVersion) - protected val unicumGenerator = new UnicumGenerator(pureCrypto) - protected val contractAuthenticator = new ContractAuthenticatorImpl(unicumGenerator) - protected val contractInstance = - ExampleContractFactory.build[CreatedAt](cantonContractIdVersion = contractIdVersion) - protected val keyWithMaintainers = ExampleContractFactory.buildKeyWithMaintainers() - protected val contractInstanceWithKey = ExampleContractFactory.build[CreatedAt]( - cantonContractIdVersion = contractIdVersion, - keyOpt = Some(keyWithMaintainers), - ) - protected val fatContractInstance: FatContractInstance = contractInstance.inst -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala deleted file mode 100644 index 079e047d83..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.syntax.either.* -import com.digitalasset.canton.crypto.TestSalt -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.util.PackageConsumer.PackageResolver -import com.digitalasset.canton.{ - BaseTest, - FailOnShutdown, - HasExecutionContext, - LfPackageName, - LfPartyId, -} -import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.engine.{Engine, EngineConfig} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.CreationTime.CreatedAt -import com.digitalasset.daml.lf.transaction.{FatContractInstance, Versioned} -import com.digitalasset.daml.lf.value.Value -import com.digitalasset.daml.lf.value.Value.ValueText -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec - -import java.time.Duration -import scala.concurrent.Future - -class ContractValidatorTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown { - - private val engine = new Engine( - EngineConfig(LanguageVersion.StableVersions(LanguageVersion.Major.V2)) - ) - private val packageResolver: PackageResolver = - _ => _ => FutureUnlessShutdown.pure(None) - - private val pureCrypto = new SymbolicPureCrypto() - private val underTest = ContractValidator(pureCrypto, engine, packageResolver) - - private def shouldFailAuthentication(invalid: FatContractInstance): Future[Assertion] = - underTest - .authenticate(invalid, invalid.templateId.packageId) - .value - .map(e => - inside(e) { case Left(error) => - error should startWith("Contract did not validate") - } - ) - - forEvery(Seq(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) { - authContractIdVersion => - s"ContractAuthenticatorImpl with $authContractIdVersion" when { - - val contractInstance = - ExampleContractFactory.build[CreatedAt](cantonContractIdVersion = authContractIdVersion) - val fatContractInstance: FatContractInstance = contractInstance.inst - val targetPackageId = contractInstance.templateId.packageId - - val keyWithMaintainers = ExampleContractFactory.buildKeyWithMaintainers() - val contractInstanceWithKey = ExampleContractFactory.build[CreatedAt]( - cantonContractIdVersion = authContractIdVersion, - keyOpt = Some(keyWithMaintainers), - ) - - "using a valid contract id" should { - "correctly authenticate the contract" in { - underTest - .authenticate(fatContractInstance, targetPackageId) - .value - .map(_ shouldBe Either.unit) - } - } - - "using a normalized values" should { - if (authContractIdVersion >= AuthenticatedContractIdVersionV11) { - - val unNormalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))), - (None, Value.ValueOptional(None)), - ), - ) - - val normalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))) - ), - ) - - val unNormalizedContract = ExampleContractFactory.build(argument = unNormalizedArg) - val normalizedContract = - ExampleContractFactory.modify(unNormalizedContract, arg = Some(normalizedArg)) - - "correctly authenticate the unNormalizedContract" in { - underTest - .authenticate(unNormalizedContract.inst, unNormalizedContract.templateId.packageId) - .value - .map(_ shouldBe Either.unit) - } - "correctly authenticate the normalizedContract" in { - underTest - .authenticate(normalizedContract.inst, normalizedContract.templateId.packageId) - .value - .map(_ shouldBe Either.unit) - } - - } - } - - "using an invalid contract id" should { - "fail authentication" in { - val invalidContractId = ExampleContractFactory.buildContractId() - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, contractId = Some(invalidContractId)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed salt/authentication data" should { - "fail authentication" in { - val authenticationData = ContractAuthenticationDataV1(TestSalt.generateSalt(42))( - authContractIdVersion - ).toLfBytes - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, authenticationData = Some(authenticationData)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed ledger time" should { - "fail authentication" in { - val changedTime = - CreatedAt(contractInstance.inst.createdAt.time.add(Duration.ofDays(1L))) - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, createdAt = Some(changedTime)) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed contract argument" should { - "fail authentication" in { - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt](contractInstance, arg = Some(ValueText("changed"))) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed template-id" should { - "fail authentication" in { - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - templateId = Some(LfTemplateId.assertFromString("definitely:changed:template")), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed package-name" should { - "fail authentication" in { - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - packageName = - Some(LfPackageName.assertFromString("definitely-changed-package-name")), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using changed signatories" should { - "fail authentication" in { - val changedSignatory: LfPartyId = - LfPartyId.assertFromString("changed::signatory") - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstance.metadata.signatories + changedSignatory, - stakeholders = contractInstance.metadata.stakeholders + changedSignatory, - maybeKeyWithMaintainersVersioned = - contractInstance.metadata.maybeKeyWithMaintainersVersioned, - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using changed observers" should { - "fail authentication" in { - val changedObserver: LfPartyId = - LfPartyId.assertFromString("changed::observer") - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstance, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstance.metadata.signatories, - stakeholders = contractInstance.metadata.stakeholders + changedObserver, - maybeKeyWithMaintainersVersioned = - contractInstance.metadata.maybeKeyWithMaintainersVersioned, - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed key value" should { - "fail authentication" in { - val changeKey = keyWithMaintainers.copy(globalKey = - LfGlobalKey.assertBuild( - contractInstance.templateId, - ValueText("changed"), - contractInstance.inst.packageName, - ) - ) - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstanceWithKey, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstanceWithKey.metadata.signatories, - stakeholders = contractInstanceWithKey.metadata.stakeholders, - maybeKeyWithMaintainersVersioned = - Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - - "using a changed key maintainers" should { - "fail authentication" in { - val changeKey = keyWithMaintainers.copy(maintainers = Set.empty) - val invalid: FatContractInstance = ExampleContractFactory - .modify[CreatedAt]( - contractInstanceWithKey, - metadata = Some( - ContractMetadata.tryCreate( - signatories = contractInstanceWithKey.metadata.signatories, - stakeholders = contractInstanceWithKey.metadata.stakeholders, - maybeKeyWithMaintainersVersioned = - Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), - ) - ), - ) - .inst - shouldFailAuthentication(invalid) - } - } - } - } - -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala index 40e93786e4..1db4252568 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala @@ -22,7 +22,9 @@ object LfTransactionBuilder { // Helper methods for Daml-LF types val defaultLanguageVersion: LanguageVersion = LanguageVersion.default - val defaultTransactionVersion: LfLanguageVersion = LfLanguageVersion.AllV2.min + val defaultSerializationVersion: LfSerializationVersion = + LfSerializationVersion.V1 +// LfSerializationVersion.assign(LfLanguageVersion.AllV2.min) val defaultPackageId: LfPackageId = LfPackageId.assertFromString("pkg") val defaultTemplateId: Ref.Identifier = diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala index a3b6bd6217..ce39a5f0c8 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala @@ -63,6 +63,18 @@ class ResourceUtilTest extends AnyWordSpec with BaseTest with HasExecutionContex exception.getSuppressed shouldBe empty } + "deal with an InterruptedException" in { + val resource = new DoNothingResource + val ex = new InterruptedException("Interrupted") + val exception = intercept[InterruptedException]( + ResourceUtil.withResource(resource)(_ => throw ex) + ) + + resource.closeCount shouldBe 1 + exception shouldBe ex + exception.getSuppressed shouldBe empty + } + "rethrow exception from closing" in { val msg = "rethrow-exception message" val resource = new ThrowOnCloseResource(msg) @@ -116,7 +128,7 @@ class ResourceUtilTest extends AnyWordSpec with BaseTest with HasExecutionContex } "withResourceEither" should { - "have the same behavior as withResource but return an Either with the result or exception" in { + "have the same behavior as withResource but return an Either with the result or non-fatal exception" in { ResourceUtil.withResourceEither(new DoNothingResource)(_ => "good") shouldBe Right("good") val msg = "Something happened" @@ -136,6 +148,13 @@ class ResourceUtilTest extends AnyWordSpec with BaseTest with HasExecutionContex } } + "rethrow fatal exceptions" in { + val interrupted = new InterruptedException("Interrupted") + TryUtil.tryCatchInterrupted( + ResourceUtil.withResourceEither(new DoNothingResource)(_ => throw interrupted) + ) shouldBe Failure(interrupted) + } + "catch exceptions during resource construction" in { val ex = TestException("Resource construction failed") val exception = ResourceUtil.withResourceEither((throw ex): AutoCloseable)(_ => ()) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/RoseTreeTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/RoseTreeTest.scala new file mode 100644 index 0000000000..b68cacb6bb --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/util/RoseTreeTest.scala @@ -0,0 +1,267 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.Eq +import cats.laws.discipline.ApplyTests +import com.digitalasset.canton.BaseTestWordSpec +import org.scalacheck.{Arbitrary, Gen} +import org.scalatest.wordspec.AnyWordSpec + +import scala.collection.mutable +import scala.util.{Success, Try} + +class RoseTreeTest extends AnyWordSpec with BaseTestWordSpec { + + private def recursiveMap[A, B](tree: RoseTree[A])(f: A => B): RoseTree[B] = + RoseTree(f(tree.root), tree.children.map(recursiveMap(_)(f))*) + + private def recursivePreorder[A](tree: RoseTree[A]): Seq[A] = + tree.root +: tree.children.flatMap(recursivePreorder) + + private def recursiveHashcode[A](tree: RoseTree[A]): Int = + scala.util.hashing.MurmurHash3.productHash(tree) + + private def recursiveSize[A](tree: RoseTree[A]): Int = 1 + tree.children.map(recursiveSize).sum + + private def recursiveZipWith[A, B, C](left: RoseTree[A], right: RoseTree[B])( + f: (A, B) => C + ): RoseTree[C] = + RoseTree( + f(left.root, right.root), + left.children.zip(right.children).map { case (l, r) => recursiveZipWith(l, r)(f) }* + ) + + private def recursiveFoldl[A, State, Result](tree: RoseTree[A])( + init: RoseTree[A] => State + )(finish: State => Result)(update: (State, Result) => State): Result = + finish( + tree.children.foldLeft(init(tree))((acc, child) => + update(acc, recursiveFoldl(child)(init)(finish)(update)) + ) + ) + + private val width = 15000 + private def wideTree(multiplier: Int): RoseTree[Int] = + RoseTree(0, (1 to width).map(i => RoseTree(i * multiplier))*) + + private val depth = 15000 + def deepTree(multiplier: Int): RoseTree[Int] = + (1 to depth).foldRight(RoseTree(0))((i, acc) => RoseTree(i * multiplier, acc)) + + val trees = Table( + "trees", + RoseTree(1), + RoseTree(1, RoseTree(2), RoseTree(3)), + RoseTree(1, RoseTree(2, RoseTree(4), RoseTree(5)), RoseTree(3)), + RoseTree(1, RoseTree(2, RoseTree(4, RoseTree(6), RoseTree(7)), RoseTree(5)), RoseTree(3)), + ) + + "size" should { + "correctly count the size" in { + forEvery(trees) { tree => + tree.size shouldEqual recursiveSize(tree) + } + } + + "be stack safe for wide trees" in { + wideTree(1).size shouldBe (width + 1) + } + + "be stack safe for deep trees" in { + deepTree(1).size shouldBe (depth + 1) + } + } + + "equals" should { + "be correct" in { + forEvery(trees.zipWithIndex) { case (first, i) => + forEvery(trees.zipWithIndex) { case (second, j) => + (first == second) shouldEqual (i == j) + } + } + + RoseTree(1) shouldEqual RoseTree(1) + } + + "be stack safe for wide trees" in { + (wideTree(1) == wideTree(2)) shouldBe false + (wideTree(1) == wideTree(1)) shouldBe true + } + + "be stack safe for deep trees" in { + (deepTree(1) == deepTree(2)) shouldBe false + (deepTree(1) == deepTree(1)) shouldBe true + } + } + + "hashcode" should { + "produce different hashes for different trees" in { + val hashes = trees.map(_.hashCode()) + hashes.distinct shouldBe hashes + } + + "implement the specification" in { + trees(0).hashCode() shouldBe recursiveHashcode(trees(0)) + + trees.map(_.hashCode()) shouldBe trees.map(recursiveHashcode) + } + + "be stack safe for wide trees" in { + Try(wideTree(1).hashCode()) shouldBe a[Success[_]] + } + + "be stack safe for deep trees" in { + Try(deepTree(1).hashCode()) shouldBe a[Success[_]] + } + } + + "preorder" should { + "be correct" in { + forEvery(trees) { tree => + tree.preorder.toSeq shouldEqual recursivePreorder(tree) + } + } + + "be stack safe for wide trees" in { + wideTree(1).preorder.toSeq shouldEqual (0 to width) + } + + "be stack safe for deep trees" in { + deepTree(1).preorder.toSeq shouldEqual ((1 to depth) :+ 0) + } + } + + "toString" should { + "be reasonable" in { + RoseTree(1).toString shouldBe "RoseTree(1)" + RoseTree( + 1, + RoseTree(2), + RoseTree(3), + ).toString shouldBe "RoseTree(1, RoseTree(2), RoseTree(3))" + } + + "be stack safe for wide trees" in { + wideTree(1).toString should startWith("RoseTree(0, RoseTree(1), RoseTree(2), RoseTree(3)") + } + + "be stack safe for deep trees" in { + deepTree(1).toString should startWith("RoseTree(1, RoseTree(2, RoseTree(3, RoseTree(4") + } + } + + "foldl" should { + "be correct" in { + forEvery(trees) { tree => + def init(builder: mutable.Builder[String, Seq[String]])(t: RoseTree[Int]): Int = { + builder += s"Init($t)" + 1 + } + def finish(builder: mutable.Builder[String, Seq[String]])(i: Int): Int = { + builder += s"Fisnish($i)" + i + 1 + } + def update(builder: mutable.Builder[String, Seq[String]])(acc: Int, res: Int): Int = { + builder += s"Update($acc, $res)" + acc + res + } + + val callsB = Seq.newBuilder[String] + val specsB = Seq.newBuilder[String] + + val folded = tree.foldl(init(callsB))(finish(callsB))(update(callsB)) + val calls = callsB.result() + + val speced = recursiveFoldl(tree)(init(specsB))(finish(specsB))(update(specsB)) + val specs = specsB.result() + + folded shouldEqual speced + calls shouldEqual specs + } + } + + "be stack safe for wide trees" in { + wideTree(1).foldl(_ => 1)(Predef.identity)(_ + _) shouldEqual width + 1 + } + + "be stack safe for deep trees" in { + deepTree(1).foldl(_ => 1)(Predef.identity)(_ + _) shouldEqual depth + 1 + } + } + + "map" should { + "be correct" in { + forEvery(trees) { tree => + val mapped = tree.map(_ * 2) + mapped shouldEqual recursiveMap(tree)(_ * 2) + mapped.size shouldEqual tree.size + } + } + + "visit the nodes in preorder" in { + forEvery(trees) { tree => + val visited = Seq.newBuilder[Int] + def visit(i: Int): Int = { + visited += i + i + } + tree.map(visit) shouldBe tree + visited.result() shouldBe tree.preorder.toSeq + } + } + + "be stack safe for wide trees" in { + wideTree(1).map(_ * 2) shouldEqual wideTree(2) + } + + "be stack safe for deep trees" in { + deepTree(1).map(_ * 2) shouldEqual deepTree(2) + } + } + + "zipWith" should { + "be correct" in { + forEvery(trees) { left => + forEvery(trees) { right => + val zipped = left.zipWith(right)(_ + _) + val spec = recursiveZipWith(left, right)(_ + _) + zipped shouldEqual spec + zipped.size shouldEqual spec.size + } + } + } + + "be stack safe for wide trees" in { + wideTree(1).zipWith(wideTree(2))(_ + _) shouldEqual wideTree(3) + } + + "be stack safe for deep trees" in { + deepTree(1).zipWith(deepTree(2))(_ + _) shouldEqual deepTree(3) + } + } + + "Apply" should { + import RoseTreeTest.* + checkAllLaws("Apply", ApplyTests[RoseTree].apply[Int, Int, String]) + } + +} + +object RoseTreeTest { + implicit def eqRoseTree[A]: Eq[RoseTree[A]] = Eq.fromUniversalEquals + + private def arbitraryBoundedRoseTree[A: Arbitrary]( + depth: Int, + width: Int, + ): Arbitrary[RoseTree[A]] = Arbitrary { + for { + root <- Arbitrary.arbitrary[A] + children <- Gen.listOfN(width, arbitraryBoundedRoseTree[A](depth - 1, width - 1).arbitrary) + } yield RoseTree(root, children*) + } + + implicit def arbitraryRoseTree[A: Arbitrary]: Arbitrary[RoseTree[A]] = + arbitraryBoundedRoseTree(3, 3) +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/TestContractHasher.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/TestContractHasher.scala new file mode 100644 index 0000000000..133335c108 --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/util/TestContractHasher.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.EitherT +import com.daml.crypto.MessageDigestPrototype +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.{LfHash, LfNodeCreate} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.daml.lf.crypto.Hash +import com.digitalasset.daml.lf.crypto.Hash.HashingMethod + +import scala.concurrent.ExecutionContext + +object TestContractHasher { + + object Async extends ContractHasher { + override def hash(create: LfNodeCreate, hashingMethod: HashingMethod)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, String, LfHash] = + EitherT.pure[FutureUnlessShutdown, String](hashInternal(create, hashingMethod)) + } + + object Sync { + def hash(create: LfNodeCreate, hashingMethod: HashingMethod): LfHash = + hashInternal(create, hashingMethod) + } + + private def hashCreate( + create: LfNodeCreate, + upgradeFriendly: Boolean, + ): LfHash = + LfHash.assertHashContractInstance( + create.templateId, + create.arg, + create.packageName, + upgradeFriendly, + ) + + private def hashInternal(create: LfNodeCreate, hashingMethod: HashingMethod): Hash = + hashingMethod match { + + case HashingMethod.Legacy => + hashCreate(create, upgradeFriendly = false) + + case HashingMethod.UpgradeFriendly => + hashCreate(create, upgradeFriendly = true) + + /** To calculate the TypeNormal hash requires that the template package is available + */ + case HashingMethod.TypedNormalForm => + val md = MessageDigestPrototype.Sha256.newDigest + md.update(hashCreate(create, upgradeFriendly = true).bytes.toByteArray) + md.update("TypedNormalForm".getBytes) + LfHash.assertFromByteArray(md.digest()) + + } + +} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ThereafterTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ThereafterTest.scala index 8ec6f1d592..0bb6912930 100644 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/util/ThereafterTest.scala +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/util/ThereafterTest.scala @@ -52,6 +52,24 @@ trait ThereafterTest extends AnyWordSpec with BaseTest { Try(fixture.theContent(y)) shouldBe Failure(ex) } + "run the body after an InterruptedException" in { + // This is an example of an exception that's not considered `NonFatal`. + // Such exceptions are not caught by `Try.apply` and get boxed sometimes. + val interrupt = new InterruptedException("INTERRUPTED") + val x = fixture.fromTry(Failure[Unit](interrupt)) + val res = sut.thereafter(x) { content => + fixture.isCompleted(x) shouldBe true + TryUtil.unwrapExecutionException( + TryUtil.tryCatchInterrupted(fixture.theContent(content)) + ) shouldBe Failure(interrupt) + () + } + val y = fixture.await(res) + TryUtil.unwrapExecutionException( + TryUtil.tryCatchInterrupted(fixture.theContent(y)) + ) shouldBe Failure(interrupt) + } + "propagate an exception in the body" in { val ex = new RuntimeException("BODY FAILURE") val x = fixture.fromTry(Success(())) diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala deleted file mode 100644 index 5b0dd2a18e..0000000000 --- a/canton/community/common/src/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.protocol.LfLanguageVersion -import org.scalatest.wordspec.AnyWordSpec - -import scala.math.Ordered.orderingToOrdered -import scala.util.Try - -class DamlLfVersionToProtocolVersionsTest extends AnyWordSpec with BaseTest { - - val supportedTransactionVersions = LfLanguageVersion.AllV2.filter(_ >= LfLanguageVersion.v2_1) - - "DamlLFVersionToProtocolVersions" should { - supportedTransactionVersions.foreach { version => - s"find the minimum protocol version for $version" in { - assert( - Try( - DamlLfVersionToProtocolVersions.getMinimumSupportedProtocolVersion(version) - ).isSuccess, - s"Add $version to damlLfVersionToProtocolVersions Map", - ) - - } - } - } -} diff --git a/canton/community/common/src/test/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersionsTest.scala b/canton/community/common/src/test/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersionsTest.scala new file mode 100644 index 0000000000..f4ff9baebf --- /dev/null +++ b/canton/community/common/src/test/scala/com/digitalasset/canton/version/LfSerializationVersionToProtocolVersionsTest.scala @@ -0,0 +1,30 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.protocol.LfSerializationVersion +import org.scalatest.wordspec.AnyWordSpec + +import scala.util.Try + +class LfSerializationVersionToProtocolVersionsTest extends AnyWordSpec with BaseTest { + + val supportedSerializationVersions = + List(LfSerializationVersion.V1, LfSerializationVersion.VDev) + + "DamlLFVersionToProtocolVersions" should { + supportedSerializationVersions.foreach { version => + s"find the minimum protocol version for $version" in { + assert( + Try( + LfSerializationVersionToProtocolVersions.getMinimumSupportedProtocolVersion(version) + ).isSuccess, + s"Add $version to damlLfVersionToProtocolVersions Map", + ) + + } + } + } +} diff --git a/canton/community/common/src/test/test/java/com/digitalasset/canton/annotations/UnstableTest.java b/canton/community/common/src/test/test/java/com/digitalasset/canton/annotations/UnstableTest.java deleted file mode 100644 index f21acb89a2..0000000000 --- a/canton/community/common/src/test/test/java/com/digitalasset/canton/annotations/UnstableTest.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.annotations; - -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Annotation for tagging whole test suites as unstable. Unstable tests will only run as part of - * unstable_test jobs. Unstable tests are still periodically executed and failures are reported to - * DataDog. But pull requests can still be merged, even if unstable tests fail. - */ -@org.scalatest.TagAnnotation -@Inherited -@Retention(RUNTIME) -@Target({METHOD, TYPE}) -public @interface UnstableTest {} diff --git a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientTest.scala b/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientTest.scala deleted file mode 100644 index 987210ed41..0000000000 --- a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientTest.scala +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.protocol - -import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex -import com.digitalasset.canton.topology.{ParticipantId, PartyId, UniqueIdentifier} -import com.digitalasset.canton.{BaseTest, ProtoDeserializationError} -import org.scalatest.wordspec.AnyWordSpec - -class RecipientTest extends AnyWordSpec with BaseTest { - val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party")) - - val memberRecipient = MemberRecipient(ParticipantId("participant1")) - val sequencersOfSynchronizer = SequencersOfSynchronizer - val mediatorGroupRecipient = MediatorGroupRecipient(MediatorGroupIndex.tryCreate(99312312)) - val allRecipients = AllMembersOfSynchronizer - - "recipient test serialization" should { - "be able to convert back and forth" in { - Recipient.fromProtoPrimitive( - memberRecipient.toProtoPrimitive, - "recipient", - ) shouldBe Right(memberRecipient) - - Recipient.fromProtoPrimitive( - sequencersOfSynchronizer.toProtoPrimitive, - "recipient", - ) shouldBe Right(sequencersOfSynchronizer) - - Recipient.fromProtoPrimitive( - mediatorGroupRecipient.toProtoPrimitive, - "recipient", - ) shouldBe Right(mediatorGroupRecipient) - - Recipient.fromProtoPrimitive( - allRecipients.toProtoPrimitive, - "recipient", - ) shouldBe Right(allRecipients) - } - - "act sanely on invalid inputs" in { - forAll( - Seq( - "nothing valid", - "", - "::", - "INV::invalid", - "POP", - "POP::incomplete", - "POP::incomplete::", - "POP,,alice::party", - "MOD::99312312::", - "MOD::99312312::gibberish", - "MOD::not-a-number", - "MOD::99312312993123129931231299312312", - ) - ) { str => - Recipient - .fromProtoPrimitive(str, "recipient") - .left - .value shouldBe a[ProtoDeserializationError] - } - } - } -} diff --git a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala b/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala deleted file mode 100644 index 06eb1f567b..0000000000 --- a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.protocol - -import cats.syntax.option.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.sequencing.protocol.Recipients.cc -import com.digitalasset.canton.sequencing.protocol.RecipientsTest.* -import com.digitalasset.canton.topology.ParticipantId -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - lazy val recipients: Recipients = Recipients(NonEmpty(Seq, t5, t2, t3, t5, t6)) - - "Recipients" should { - - "filter for a member that doesn't occur" in { - recipients.forMember(p7, Set.empty) shouldBe None - } - - "filter for a member that appears in one tree" in { - recipients.forMember(p6, Set.empty) shouldBe Some(Recipients(NonEmpty(Seq, t6))) - } - - "filter for a member that appears in several trees" in { - recipients.forMember(p3, Set.empty) shouldBe Some(Recipients(NonEmpty(Seq, t3, t3, t3))) - } - - "be preserved through serialization / deserialization" in { - val proto = recipients.toProtoV30 - val fromProto = Recipients.fromProtoV30(proto) - fromProto shouldBe Right(recipients) - } - - "store all recipients" in { - val all = recipients.allRecipients - all shouldBe Set(recP1, recP2, recP3, recP4, recP5, recP6) - } - - "test for a single group when present" in { - val recipients = - Recipients(NonEmpty(Seq, RecipientsTree.leaf(NonEmpty.mk(Set, p2, p1, p3)))) - recipients.asSingleGroup shouldBe NonEmpty - .mk(Set, MemberRecipient(p3), MemberRecipient(p2), MemberRecipient(p1)) - .some - } - - "test for a single group when not present" in { - - // Multiple trees - val case1 = - Recipients( - NonEmpty( - List, - RecipientsTree.leaf(NonEmpty.mk(Set, p2, p1, p3)), - RecipientsTree.leaf(NonEmpty.mk(Set, p2)), - ) - ) - case1.asSingleGroup shouldBe None - - // Tree with height > 1 - val case2 = - Recipients( - NonEmpty( - List, - RecipientsTree( - NonEmpty.mk(Set, recP2, recP1, recP3), - Seq(RecipientsTree.leaf(NonEmpty.mk(Set, p1))), - ), - ) - ) - case2.asSingleGroup shouldBe None - } - - "correctly compute leaf members" in { - val recipients = Recipients( - NonEmpty( - List, - RecipientsTree( - NonEmpty.mk(Set, participant(1), participant(2)), - Seq( - RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, participant(3))), - RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, participant(4))), - RecipientsTree( - NonEmpty.mk(Set, participant(5)), - Seq( - RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, participant(6), participant(2))) - ), - ), - ), - ), - ) - ) - recipients.leafRecipients shouldBe - NonEmpty.mk(Set, recP2, recP3, recP4, recP6) - } - } -} - -object RecipientsTest { - - def participantRecipient(participant: ParticipantId) = MemberRecipient(participant) - - lazy val p1 = ParticipantId("participant1") - lazy val p2 = ParticipantId("participant2") - lazy val p3 = ParticipantId("participant3") - lazy val p4 = ParticipantId("participant4") - lazy val p5 = ParticipantId("participant5") - lazy val p6 = ParticipantId("participant6") - lazy val p7 = ParticipantId("participant7") - lazy val p8 = ParticipantId("participant8") - lazy val p9 = ParticipantId("participant9") - lazy val p10 = ParticipantId("participant10") - lazy val p11 = ParticipantId("participant11") - lazy val p12 = ParticipantId("participant12") - lazy val p13 = ParticipantId("participant13") - lazy val p14 = ParticipantId("participant14") - lazy val p15 = ParticipantId("participant15") - lazy val p16 = ParticipantId("participant16") - lazy val p17 = ParticipantId("participant17") - lazy val p18 = ParticipantId("participant18") - lazy val p19 = ParticipantId("participant19") - - lazy val recP1 = participantRecipient(p1) - lazy val recP2 = participantRecipient(p2) - lazy val recP3 = participantRecipient(p3) - lazy val recP4 = participantRecipient(p4) - lazy val recP5 = participantRecipient(p5) - lazy val recP6 = participantRecipient(p6) - lazy val recP7 = participantRecipient(p7) - lazy val recP8 = participantRecipient(p8) - - lazy val t1 = RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, recP1)) - lazy val t2 = RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, recP2)) - - lazy val t3 = RecipientsTree(NonEmpty.mk(Set, recP3), Seq(t1, t2)) - lazy val t4 = RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, recP4)) - - lazy val t5 = RecipientsTree(NonEmpty.mk(Set, recP5), Seq(t3, t4)) - - lazy val t6 = RecipientsTree.recipientsLeaf(NonEmpty.mk(Set, recP6)) - - def testInstance: Recipients = { - val dummyMember = ParticipantId("dummyParticipant") - cc(dummyMember) - } - - def participant(i: Int): MemberRecipient = participantRecipient( - ParticipantId(s"participant$i") - ) - -} diff --git a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala b/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala deleted file mode 100644 index feb9def2b9..0000000000 --- a/canton/community/common/src/test/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.protocol - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.topology.{Member, ParticipantId} -import org.scalatest.wordspec.AnyWordSpec - -class RecipientsTreeTest extends AnyWordSpec with BaseTest { - def rec(member: Member): Recipient = MemberRecipient(member) - - private lazy val p1: Member = ParticipantId("participant1") - private lazy val p2: Member = ParticipantId("participant2") - private lazy val p3: Member = ParticipantId("participant3") - private lazy val p4: Member = ParticipantId("participant4") - private lazy val p5: Member = ParticipantId("participant5") - private lazy val p6: Member = ParticipantId("participant6") - - private lazy val mod1: MediatorGroupRecipient = MediatorGroupRecipient(NonNegativeInt.zero) - private lazy val mod2: MediatorGroupRecipient = MediatorGroupRecipient(NonNegativeInt.one) - - private lazy val t1 = RecipientsTree.leaf(NonEmpty(Set, p1, p5)) - private lazy val t2 = RecipientsTree.leaf(NonEmpty(Set, p3)) - - private lazy val t3 = RecipientsTree(NonEmpty(Set, rec(p4), rec(p2), mod2), Seq(t1, t2)) - - private lazy val t4 = RecipientsTree.recipientsLeaf(NonEmpty(Set, rec(p2), rec(p6), mod2)) - - private lazy val t5 = RecipientsTree(NonEmpty(Set, rec(p1), mod1), Seq(t3, t4)) - - "RecipientsTree" when { - "allRecipients" should { - "give all recipients" in { - t5.allRecipients shouldBe Set(p1, p2, p3, p4, p5, p6).map(rec) ++ Set(mod1, mod2) - } - } - - "forMember" should { - "give all subtrees containing the member" in { - t5.forMember(p2, Set.empty).toSet shouldBe Set(t4, t3) - t5.forMember(p5, Set(mod2)).toSet shouldBe Set(t4, t3) - } - - // If a member appears in both the root of a subtree and in the root of a sub-subtree, it receives only the top-level subtree. - "give only the top-level subtree when there is a subtree and a sub-subtree" in { - t5.forMember(p1, Set.empty) shouldBe List(t5) - t5.forMember(p5, Set(mod1)) shouldBe List(t5) - } - } - - "allPaths" should { - "give all paths within the tree" in { - t5.allPaths shouldBe Seq( - Seq(Set(rec(p1), mod1), Set(rec(p4), rec(p2), mod2), Set(rec(p1), rec(p5))), - Seq(Set(rec(p1), mod1), Set(rec(p4), rec(p2), mod2), Set(rec(p3))), - Seq(Set(rec(p1), mod1), Set(rec(p2), rec(p6), mod2)), - ) - } - } - } - - "serialization and deserialization" should { - "preserve the same thing" in { - - val serialized = t5.toProtoV30 - val deserialized = RecipientsTree.fromProtoV30(serialized) - - deserialized shouldBe Right(t5) - } - } -} diff --git a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/hello.proto b/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/hello.proto deleted file mode 100644 index 90ce523f62..0000000000 --- a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/hello.proto +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton.protobuf; - -// Just used in tests -service HelloService { - rpc Hello(Hello.Request) returns (Hello.Response) {} - rpc HelloStreamed(Hello.Request) returns (stream Hello.Response) {} -} - -message Hello { - message Request { - string msg = 1; - } - message Response { - string msg = 1; - } -} diff --git a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto b/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto deleted file mode 100644 index 3805343701..0000000000 --- a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton.protobuf; - -message Base { - oneof sum { - string one = 1; - } -} - -// Same as Base, but with another field -message AddField { - oneof sum { - string one = 1; - } - DummyMessage three = 3; -} - -message DummyMessage { - string content = 1; -} - -message AttackAddField { - string one = 1; - int32 three = 3; -} - -message AttackAddFieldSameType { - string one = 1; - bytes three = 3; -} - -// Same as Base, but with another variant -message AddVariant { - oneof sum { - string one = 1; - DummyMessage two = 2; - } -} - -message AttackAddVariant { - string one = 1; - bytes two = 2; -} - -message VersionedDummyMessage { - oneof version { - DummyMessage v0 = 1; - } -} - -// Switching to repeated -message Single { - string one = 1; - DummyMessage two = 2; -} - -message Repeated { - repeated string one = 1; - repeated DummyMessage two = 2; -} - -message AttackRepeated { - repeated string one = 1; - repeated bytes two = 2; -} - -message UnsignedInt { - uint32 unsigned = 1; -} -message SignedInt { - int32 signed = 1; -} -message UnsignedLong { - uint64 unsigned = 1; -} diff --git a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/scalapb/package.proto b/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/scalapb/package.proto deleted file mode 100644 index a54d62822b..0000000000 --- a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/scalapb/package.proto +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton; - -import "scalapb/scalapb.proto"; - -option (scalapb.options) = { - scope: PACKAGE - preserve_unknown_fields: false - no_default_values_in_constructor: true -}; diff --git a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/versioned-messages.proto b/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/versioned-messages.proto deleted file mode 100644 index 12ace28e65..0000000000 --- a/canton/community/common/src/test/test/protobuf/com/digitalasset/canton/test/versioned-messages.proto +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton.protobuf; - -import "scalapb/scalapb.proto"; - -message VersionedMessageV0 { - option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; - string msg = 1; -} - -message VersionedMessageV1 { - option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; - string msg = 1; - int32 value = 2; -} - -message VersionedMessageV2 { - option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion"; - string msg = 1; - int32 iValue = 2; - double dValue = 3; -} diff --git a/canton/community/common/src/test/test/resources/blake2xb-golden-tests.txt b/canton/community/common/src/test/test/resources/blake2xb-golden-tests.txt deleted file mode 100644 index 760a368bb6..0000000000 --- a/canton/community/common/src/test/test/resources/blake2xb-golden-tests.txt +++ /dev/null @@ -1,256 +0,0 @@ - -f0 -b5aa -bc38f1 -57624fb2 -ea9d54f5f2 -2bcb84c09d35 -2df3b0c53f2967 -26de76fed412b6f1 -b91f740750ffdb2aa9 -e161ee158218cfd98f91 -6bdaf88b0922b637274001 -eff9f92cbd769c81a64c20f2 -313f0d863f80e115d342afc286 -9ac7c942487e8f48b1bec271562d -221d56ed5ac8b0936111c773f9a744 -d0a446adfd1fe0cc61e42e70772584c2 -a7248218b83af8ca2728d9aca773438100 -7ada5a3cd09be192f152e36eb77a49228bd5 -8969cf3e34b108498786f3c9807a54da29c5df -d6981306dc3865bebe4ee085ae81a11d43601bba -e1f7cd444fc960831929a1ba4c81316537e315dab1 -dd675ed39403b1500a2a619889c2d9d91791fe15d177 -d503e3a0261176767e318f079e9c4941cee791d3db3e03 -4bcb663054528ad4cc568d12ee1ac0e33790e0635189e53e -ee8ad528ce57395cfc101654a2e205deb62429640755a6067c -8435fb101e96b9dbca75a444212a99211ccb35173e9f1c2b01c9 -bb849abcb58a9e8a3a2becdf0550774cff0093d5ee5dab5e9db38a -0a74d45327c814c45abc41713680021f487be9a133455a7550fad2e2 -f662e426340a253766ad9a1a13b7f60db4bdf953a04456789b5a261543 -1a03271192bdc3981f166cb43945d4d78878cd2a2ab620f56337f4cf1206 -e8cdc5a0f15392219c8ad35abf0c1a976fb430debe801887ac8000a7968fd5 -b5d259e2e3a86c77cbf6d53f9dc78daddc2afd84dbb4ba7e9891227fec079d5a -d3818948294fdecac8411f860f7cad50469df5d1485524e059d4dd8cfb69c32bbd -8e3ee191e4d30346f19ab6904b6e810d416a87a1da3c7f78445db72fe49f615705a6 -c2622f680f4350aadb5dc35300e08197b1e968a1df8b3091cadc3abfe261a269605319 -7030a487b0c270d224a0f2eba284b876ac44576a119546af47627417ddce0f4650bb8b56 -1c09ce4bedf8ee67d19430f8a4708d73f0be22e19c55fd397471e7705ff9958603911f6a38 -84f7f7502bd140a07b57e0f69863874a635403c111836b3fdfd27a030582e6d8e46162b62cf7 -63971b351ba119eff6342fdfda6bd558edada5c56e65ec0648ba3455fca1a3d51b603c028ead8f -64e5a1f06e4d5cb5859ff01f3af1dcecdb89729d97aad3d4c8cd96eed4bd10487f918ea0ec6c96c0 -d3cbcee509e8efa84f4c54f6eb097f17ba98c23024097f8ae5498d364d45afecb6ec1654e2e3c073c1 -c35631a7735d09551b02c64eb3edfcfdd515c12646bb51695ac51681b33841197e92f6c3a0a2691cfbd9 -4835394320ff955c272fce2a6eefc8279aaf63492610912ccb525a9f3c78870397c6dd35119900e88385d8 -66c4227b1a29889d5cc7e2025fa4649c365e56d153eb4c4e1790b3f0a26a3553c51f198b04851401af1c0acb -a1550d81d0e465b8e1bd228e0d3b71c29a23f8cf58d9c43361ecac7eff8698cd68bca923c25d08be3c92bb9926 -3ba25c21d16bb7c93ce16fc914b1bdb5b7ec249ba7dda6be1533c76e8dc20b704913cbe53201086b0e14e5901042 -0c4c0363a32aa180de2d3aebad786dfa1612141ae77f2eefedda3fc6366f34ef8d64e496a30e972ead3760f13553f5 -60fea990c5efb33b64005694d1ec92c90ea86f434ffc603cc26393a6bcdaaf99c8993f6f2fc5a3080a5cca5532697a08 -67c9623f20ada31853c15e3d973b8a9c69643c3e328908bd138fa7d74789b4e5408da66ca04f0a286cda823c738958665c -dcda6413fdbd760d59bcdf5d28000c099818c9237511acdc6e0ad40819d47c40f0f883bb0b98d3caefb7fbd281db805d3aa8 -120c123171c486c9a11af8d7a1aeef60f78d071c8edb55fd97959261e4c708ac06eacee87e657b84a5072a7989b101c98b0415 -434cb92246e474ce066de67de1fce06ab17416438598d3cff730faf1deb4574812e877f6a3f2dbc30a3e48a2cf4e441da32c4ee9 -dd4cde9f87ecd959a41c6454869e55342ce0e5d2ff306b3d4dd2263365e192ee6781fe463175280d4682b397b8d6020699ed3a9611 -df8471721ee7aae06d5aab20ac9ab5624797cd0311ee38116eec76748a42aa5ad23d1e3dcadbfba6c296aceaa05512cf1f2a2b415c14 -d226e59df92c995bd5fac8b9cdbd4bce4e11d6b2a2cc382b253a5188e7f4164063d1daff2254b4cfc7ecca462b7c1e11080c1ae51dd908 -0316b9cf375915d70a2c0a0f6560a609b3fd43bc8b26b8489caaece3c8cab25beecc3bb86d3860d6f2ca9297625fa2f2d5fcca5f6f0a32f6 -828bdf033346520f262ac1383a5e7091640fb9df39c51f82de52bc61b7284f4d7cfb1e90fa19d0ffe3f38dfd60fd05136d66c190cc47639634 -9388577eb0bacab40bfc38e2333f82fe72b575392db914da2706bf2a30787d638b6b31343e8245e4dbb2c962cf3f940d8ed0945d2db04b902b0c -5a79d4de91c298c87bfba338f3f85efe00275f6e5463419af83c34129586f30a3d36f57bdf68c9b5e16373c9f9921866c302bc75722c314fc57cf4 -73068c5b623b14802a9eb899f6285ebb7e601c4d916b0255762b7cccc2161417695818a605fed681fd4016e4cb1ad3a42cd88fe1a3f73367ae0aaff6 -668d87bfd70619d877ee8d8f86b5d5ecc4df2aacbad00f2dcf7c80741be0b890bf404bf62c1b4c8d1b0201ffb92d85fdc45149fa58f557a9c6a0190e7d -cbaab54fe4bfd9ed5d9b7fc97d8a4105af147e13f009514ddb083402ee941ecc7dc3c286308d9555bee67cd73505142758db79fd819ed399e490b801623c -d9942e996573688a348aa0fd1a2951b11d7732103acc23f31f27b222d5103879b9d3837f2571a7aebffd170ad03cfd89281f48fa70edb7c9f4103b5b8bb791 -571be91037c15145e2ab4894a7bb8d8a3cab75e6e64ef296e760c15cf8f3f3acfa5c894ee56cb6ac2db9b32c39a1cc39f96c50dd333f1059230482f3ed2d9246 -c6f0b1b66f22726cef3e4fca2325d2bb4e922b39f9df5ef548d321419c07391fc311904407f98db7d7462db1e8576138baeac2a76400b2a2f72b4497c19e239430 -43cb5507bcb6d7162f7b1d6b81a958b1e21ed2db9ae907a663819ae0d613ebb43c4b39359ff859ce90a9f65c369c6d30b83aa56b0107a5193a6fadced2d6c0ec5be9 -d412ca3d1b8fa9adcda589c5c422c295c3fe953ffd896e7a98f9262689bd10670047d9224b685c449b6daa5ff5d660c3ecbe5b3865652932d35cf15176de70e4356ed9 -54955d39b0806ec95897e117064c4b173f20fb248596ac8b00ce57de2d88c01a12f42d3a6f0de1d1af8c41c0b2f8dcd613532314f773bb3ad0f470500466f0d8ae599164 -713b87f7f4f2edb42d279e9ca3bef61e8ceaad72e6ea90cfcae4f9638798e00e8d4b464bf49ff26231dfca5a4dc8b3d47afb36708f494ea2c20cb9bd3d344901a42a95ff82 -c9421a7a80df6882010c6c4aff7ddf920924fc77246d1870da90f183c14dc3264faeace4c76426020d8383efaca2abfbb0957f1cc8249a212699019d36afae811253e8bb3b26 -2f28d45cdb35951f9e043335c0df22d53e238a7b2df3bfd74d5656bb7e65f24d12c35fe0254669622edb9f76fe2672a7978dff201aecfd2605b2b326a73a43fd470dff9d8d98bb -93694e4b1a7b15ac94963b9111f86a29266f4beabf1fce740ee44fd264ff44eadd8d0df5aafba8b8b65f48513a5920bcccd2e4d9c3a90b71fe51e11e2857df2e0379debecb4ade7f -48887c63b6a5b7351632689a03b53cfba034c653cb65ba6756e0f816eb630663b263ea897025b65703cac600e1a450d71c945f7063d1606f0950da744f47ce0021d7a180e943ee9aef -44c7a8ed8751f54d0e5428d1ae063f1ec081d93acca64542d28d8c11aa0011caec398a2897b3c3a15ed382610b23620e833ab295d9a0eb61afd2948b4093d9e5df08d01d03dd6834742b -d625de2d8ee2ad2ef5a207af5092eb7965c4df09ac6b55ebe2bdaae799162a32e576925129f32f02c00e42bb2ce5afd73e0c64b9fa8298fe1495f0c8f201ad5c6780b83d58787cb2b4d8f2 -36370178c82981799e8622265c63ecf2329875efec250e995a8de5064aa5f1dd80d2854adb1d806f6bf2360c567c34e802d58fdd0ea15008b20492e09a6e11ff340de57dd8b03aa319d61c41 -554918ff5b98e3df9c43ab9559a75e8eeb8f2bcd5c4bc87b8a9c2329df8fc35c241bbf9ad354fba00c318a2057e9eea6184260fb11072f57c6a587add3043c9f2bbc162abc6e50d06c7c673c9f -72715c4ad8a633712074b9f133fea34cad5f7c44aa12ae7f1027b03500a9a132a7ecc477c0d18cf4a1a794e064eeca6743534ce07dba5ca211251c903a6d2729d02728161fb8812ff511e7e49c16 -98a842b0fbce066ec6778cc378ca4b90c69ef7570247cf789dd5c9a502f5f7dcb3ab02c32f06375c2e153237babe51d0cf3fee0cd75bf1e34095a98ba712a2e11c1017500abc9238dc1494c527c4d5 -2e548c4d06fe63198b95a9629e5c68372f8c51b0f2689a2a0b7994a204dee4a5669525786c5709e68ff35faa3d29be6f8902ff1bd742c4f1534fe3d6b5cf0b4f6f1c0f9415bd9801ca0c33c8e11940d2 -9ea0b2e651b1d1755a7f67e78d2333b658bd4dd49157795657a7ad8bbce158dc0d4a002e5e737f52ab1e55abd0cfadac928a977949b264946af48045920b46da19f63649dc116217f6ac67355edb94c46f -740fe0d1f4db6f6337d300089af7a166ce44da3c6e71e80ad6e92d604f80ac067eb077f2ac2ad76bc0604a8089ddbd22ed3373438e5ee0f25dc34ed466ff4b420d77db7e1e9d88a4ac919fc421116b62e673 -c435ab1e8d4697596abec90384c47634689ec73e87fb2d360f30bc5b4b47268bf3a1f31b271b800132f8a45dc85f82ee7620d50db5bdd400d36a36e1c87cb2f637c30afbd07ae34417e77ef2dd24e017deb7b9 -7f32551c0b81ba6233553c8ce988da296fa2e345262950188cf6372aca5bcb8cedbbec424978310adddec426551681f93a9b4cf6e15a06ac70650dd211386498d45dbd6b70d66b843f73f07fdec611bbc5ee0440 -d8e9839dd767f514a33125051022d7e50c05d6521d852fbaa635fed502e59554bc9b8a1a31753f4fe90d2f270b27e73d65edcdecc18055d53fe1859744ca3d5f39bfa6b23a4cbdb9c326d7b3be831ebf7c0abbe676 -a1d587c939232585840b8f9ff27503efcbc1f59bca47f2dbae3fcf8ce26743debf6d67936f3d45bf2cf7474eb8f69b0765f362867be29a7ccfe41710e2c3c9fb5ab8a0a860612461e7f4b52ef28c73a087ef0852116e -63dad275e477674191cf03436e979fbf16b1220e81ea8965fe53828e46f06a1dbd1a6bb03cabfbca70261f63d5cf491e54e31c024e87394a3cbaa9ea1cab3a2f3a6f5a015888c01150286460dabadc8d0af900bced9a64 -bbe011c112d53f842c0cffe98d96855b8d775c8c1572d29ffcf3feb0bae18de317e1db03f847fcba90ed941095ce0b2b96c8b1c7d9dc2afda7f08d16ca6c0f32d3a5cafca2fac92487c5a177af200c9adc866112629beb26 -ddafad4d5b5fcbc07f2222f9765f750f7a526e5894a165bbc9ddd6de71e23775246601393f488b61ca9f26aba2d3847de759f4082999e472e40c829e6923282b4f6c1d3702071457c7fb2dbd0db54c5c6072159d1fdcdf90b0 -275cd52846616d516fe77a6c7a4069ce46f120b66fe8043be79dd70906abb006ce5ced75dee096277b8c26899323cb8567a8c389578fa0ddb0b0988ded7d96ca69d8b78abd52663fd20e66a0ef4660e1f38460db06304479e421 -39e75539fdefb2bd552b0009e7b2fa583afc8a2b011b505de62aba44c545e13b86590c731fc2fd848a219a510c3f1184ba0149283668ba93dd5a056cf5b752200659991351a7db19f04fcc7f96b3d25de5b4a726c7dad1b7ec7768 -cdb61379ab20f7975a61b52dfc3f218d5f803f08e8286881aecc94b92f46923973f227b7a1637c1269cc87b9634bce578858f4e9b04fa60ee0516899d71573c8e7560886dfea6d08b744010c0a9c236f3caabf523cf3a3d7a075e23b -a1b3588cc3742b70cf826f7af7c45ed5b4cba2559541e34cdd3562a216afdaa2a4e39436183ece09c222bc77ed5cf7b806b7f67c703f5c273a7d5879a630029224140f9b33bf2f4243372e8f781851f7db7d3dd8c795d161605257aaf4 -3443227e56b16df3f7e15deeb5c8c0713300348703503a82474f964612ae13a047925c3f5b6b364af3f5f89f3b8fbfd1814a42856ed777b90e702256d241938a60b16d00a65143762ca29f577405301979aef51ac5c666247dc2f932ffe0 -e0cde88552c1f5e0f9d3e7a97ccb49996aedc8d38093edd3930094002306f729b8f55d1fdd54db364173ce2abfdb65a35e698f78ffa02686119217597b26216af81ce7c7701e9ecc74dda65feba3d63e3e7dfc1f0a2c7ca13c552fe16c2830 -610a5ed42281e3b93a210848ebb8203b8eacafc2a19a502d8baaa2e604a573bac3acd16f265ea4befde07c5de8c0c5cd019877d90de6c3e93df1afb6930ae311bd52b7c6e7c677aac72df9edd2657264d145755dc936193e1ecc44edb1246dfe -99f96987a955b931534b13d38fb49f383078112195ac492fd6cc44a6efb2e161ab87caa50a594fc7f86328f374d4b21c7802a84f99fa498c22d62c461c2945347e6abf1749afe2e22c0aae2f053b6a6bc7854f56503f3ea6d70193287bf9b82f23 -3ddc943c2a5e5d0786d8671083dd8893906b02a610db62bfe64c7e6ab086646474483193062f08f903866d6050b50ef55213e3935aaeccbc385f90ebd040f7e7efca8be101824770d1e5e7ef92fd65d148e63fa627469c3f2b5fd5e5e4476159665c -3df18ae1f63e037219787a95ad960e967671a8389c1ef07c17be3632d5bfb30ddd86ccd7b53f191baa81dba189665407df6d3b1931c7a94c2ad62bb6ed9b7da1dc9b2a5b98cc069abb2c7e58648ed4436d359eb60fe5425c16103d20c793ca66bf847d -726ead16f67729ba596654a551eb126e99457962286fc54bb6baf50d93c283409694db0142264b697e6d9be81bd7f63e4965c784ef0af12529294ef7795e9d64c371b15c1a5701c48dae9e2a3d908602c4a82bbeddb9a20eca30b591140f76fbc11a3df2 -bb100a9b0c720ffb4e57422af4017c7eee0396f9c1b1174e0248298d521ad171ffac53c622b68f45c9482b3e520170f44b9ac4855f25874674afec56ba9f608c6c7a6e8bc9b77dc7f5f48a148052e649ff31004a47dc1b3f15bc668b060862684bae6cc402 -cde6c07be9135468c33b09e648fa0551578d8b7317fffa60add7e430838633f0fc1efe2783468c96c74e2208ac947d726b139b5f5b682bf615910ea9a911195f71ed8ed899f7b8ed126d6452cbcdbf6aee558662de0f689d766e69ab2244f5ca70e6bbe9cc4d -591fbc984e4b79a372b2dc951afbc269dc51afcca0f131840335fd93275b40b32be13db09b36a8bbbe7644470093210e04ed4832e6aea478fb5028320ca8ef513f27d0b3d1c018fd7d2fe1bb1b8da6fab196dbebc33043b13469a114153267a3d4668d062d109f -b14803314977b0e29e3ac469b17206b527b6e95e3a47f537bd7e5e18ce69e0e783737f8b1d993c48b0dc2078b0bacf2f752ade8a0a709f8b27bb5efac90177d6bad0d946223992cab2bedd83f8e874f13839578bfbb1e283423616b9cc9c5a7793ae921664338d4e -5b3f9b47a210b68bb40b96396fdfdc4b130cf0c4f7708277a6bce760837dd8508f4e321e09cd36cde6fb6125ede04599064fd9a7675c0508b240b8352e71168668681eb6a0aac08dd9145439ae2ceb9c7b0e575230c51e1f89a08fdd4590c3c1bc2c397b64d098302c -dd31b6a184c00932da12262e6030c8045d45433e15b975eaad70144143c8b9ede3c71f4bc0324c04617ffd377362caed64ee57e40cbd952b559b54b59fc86832b687d08931403f854f26297251e606c75f41717228a3a6eff683fad7528f252960a286e15edac01a5a59 -872cba270f1ac791d7444b1222ba36706735ee0a2794cbb33eb0e7f1e091f06124f61c1a1332e78f71290c8e9bd3f8f7b73d7619958a2d9a8ec9e7345e43c2b49868264ce15785577f4b7542b4dacbea045850d38c006e40f61a710b660ffb1be2a9697d0c50802c19fe2f -190acd9ec74d1e20da6d30b7c8ba4a8477d87cc700569017e74dbd1fde1e66fb746d43cd115e7d4e4e960cf23a762325c2fe0a36fe5f9b1f5b3d100cd0427c9747b4ed2fb8e4c1f8e86da805884c55333f5d8b29db7317699919f927b235aed26014b4bb0ccf02b6b3ee4ea6 -d7b9dc5c89dc7a2f6caa59d66faf48c7d3cd85b40241ed5f839f7693a637e2f995300cdbb942dda736929ec84cbb4113a982666b9f49f7758b1dd8cfe1edf2049f8f822afeb9d7b469839325e1a854a0a48fed747ccdecd01f1dec302899578a27947eac0ecf4f07742b311053 -1c8f939474416a28984e8e5edd261c73757210a84a070b8feff99a3395e5f61f4eb5fc97b4a10015d5adc35fdc79be330112cb1133c9ec8362872029cff48c1aed3a4734d343208a809ecb1280e442ff80cdd793ffe3a0feb207de7ffdf685f595633b758f80f0e932464935e79d -bf442201603db1da2d8e8dbebad06c0856aa36008825fff03295f3e81219f0708983414a8e584c2e40bc897a777a03923b3be75dd66b764863c67b7dcb18fa781e2543f8c1a901e9e7a50c125f7ed0202f5fe5ffa4e2ebb3242e36e2ffa25ac5fa6d86ee556310a7cecc84a023b16a -33166d74ead94fd2ea667981edbe87ff5a7418098953bd4a293ceb01954e83991ac116ad990bd176a885ebec291a3b2385d78e7b2c1034849b413a66bfea98910e5aaf3c3a83b726ec63c94b8f36832235f5986eefa495e7e9e1320ad00ff57b7898284a0f1550986cfb5ad938bc8e35 -eca675d47a8c160371cfde83919fb31b65653792432281718d113780c0d0c1d8eed4f5e0238606d66fa0a3b515716ba58535c7d36a2a3835f7599ed6601a7e1467adc1720514d78946a1658139482d3ec38cf5d6aeb58f79ec51780b780a58df316a05784764d791e3a8f37368137e8ce3 -923bbbb27c5a11d4d5305a35646543efc0ff2c38b8602f306024b2b5954d940039720677ec0e873c8e0e83f9581a045867e9b2c02edb359249d9e006dcc6c79f75c9cc5dee9c5f04ae43268d5a4a1da37122904b2750aa8aa43800b7ff90e07041b9752cc7001928d9fca5e73874e4fd78eb -16353325821cbee3c476b1f872fc6822a902426f812affdffb50b5cb7c8b5c550133c9135e0ac6068c3f8f0709f1a720717da2833c3a83dd9e6faddc45502950c33ac14d35dd05a96cc7a41158fbfbada5e5775668c6d0724a454446655f25e7a212e6d6b6335df1c86d0db17332fb4d12698f -04a979bac8684f95894f1c4db8009a33bcc0c054858c7e8ec40f0820d9c98e09758be2b492426333cdbb0606fa7981033aca5afe0d13c89bb51b8c3f5b65593183c91eb23165a141b4ed8b9064469c71301541b8f7d087d5bdbf192b99c8f5cc440f01c3e29631c5d10c88f9f3ef9236fb42a1e5 -19352454e048610cb26842f57414f8f62285eac8944f6c448a09b2706c8b853267ef45d93a5056a89b2f28dece8475b4232f6206ca0c9090ce731b0dffa5130383eaed7a81f06c457a4684e5ee1783d4792e0d47681b7262757ed3446f037e6a9808972585cb2ef0074c07994d30caceecf9a9d66a -965ac58510f8f8f446dc09b91d3eda85b3a2de1350a4ede9aa95391bd116898fd4e70c0311df9353e602b1d8f1060d69b8dd8672fcd6d1dd7249c804c5b4031d22896019809434483beb3eceeca78f11415a71e101df3fe5eef09afa97a1d1c66ee3f3efc08cc35a5268e06521b1f0742a45d0fb4053 -5f2215f843ba3949f5c68cdf5cac13ecc64a589ba0d752ef877a4928dc462d918395c83bd1c1f5dc3036621e75a51038f9467a8023800d6545b970abda4029ce1fabf0887ae3721f3494dc15a6eaab704969a5b4670c9339f181ea91eb7085be064154f6a359f12b6715e6a1190fe9fa2aac0b1a082f91 -8e6a13e3d41197e3f8b897761594dd9ba97ea1166a281fa01e2804e92597dd596d25726df67d9d7fcef9607a6fd248ae1502ee743f6d0eb3a1a8efc621f86bcacde2c53e091f6778eed63bcf5092ea732ed2ef7f71090f4c41d0b6567a4d7fa62c40ce14d7321f5fc18261a7c86fc06c764e94eb6b72f63f -13fe6282f3c1379432cba49fa10fa325d165fe17e7923620c4172759bc8989395d16715cffd3bc719d72558a19cda78fca79477ae6342da459aec809692976ef227fea180e4df795766883c4260320acfa8e8128c6bd616ded9714e9d5badb3a22e93ee69fdf5496d9ca6c5c3a93dc524bc519861d80dea323 -6812a934dde83d1b997082e980f7c4b01f12e354cd064131de1380d10627549fcbd13db3405bef9fdc9bea482e72a29e727a233b0a5df6bafbf5512e30d58cee5cb21cf351199251f5dd8d45bce9c868d562f6eb6898952a82082eb5d334c69fc85543491d04c5bdfbf8b50337bff27a503563d8d7baedde9207 -f8c6b86a31408a2278b8b6ceb60eab1254987587eadf7655ffe26389e3119319ead76d4c1086ba5ca8c42aad07e607de1205594483184401ebd3fa5ac8bfdc3276c84f78b9a2c3d52580c6e7ee439168c30720fde06738753140c64206902bb597a794bc3e359053716c7cf1ccfaf3916f79902358501b13f81498 -81672d4663cb2ffb96f8cf0646a522b58dd0ceb087da631e24b57446345f7a2ed9b684e1f22bc6f20a51004b58fc4cd3f575af5ac846aa777f9be473362fccdb8155d24ea889cbfb418f774b96c8ad1c6e5b5da2af8722f74661691b56662ad5fdba5022385717151d33e2d1f4d373c8260778881fcaf9efdf676a12 -f7abea1397adedb382ef9efe62949b6b3a4358470937ec54c5e7df6d30ae6db38082bb2d56f56eceef44bc13e4372a2d6af84a671fb7fe007513e9d5f1161774ebda4fd832184118cb7ac265c043be9c65f63c418ccd27a4c9da085b12e6c6533db311755bd1e678d3934581af794c0587c8203822dbbe865653b2aef8 -9c9b67c43ccff3b84ee5b83d17c2d8ae44dd079821967b2176336a1667c7249099ece48abc047351fa6bf730c55c10823442350e164116fa4e0b290ea378bcde454ac8ec4d6962462d63917321a5d509fd2bcdccd47ddf5302c5696815fb1ceaca869dfa07285b1b43f19874c53793583f689bc3952f34272bb7da273c24 -95edd838e7fe5a3916372ba59f6b58222f66552b6321066ac66159efa14cd7e06365c3430d325e9a8bc8945e595a0569de98ed571d340fa63f8ee506d9aa8070f9b70757a8d31fa5d677cedb5909fdaf12cac56b4e138d1e072ffdb126dbc850159bc581c98f3c26e27b8c79ca50d77dd622eeffe10a95882ab2d93d0c9a19 -926f571626650610f95622628f738040814e59315fe7af85a8e346d18c28cfc6f3cab985db9947917d0fc128b138af2ecb02fd840ed91c363f8d52608ea405e37e2a522d0f1bf185cf2c3199fd9f1957f7216f6f2e6ea661c6a3196e77608402373dc9c36e35b2eff1fe17ae8f269e5241956088130f8e7b94cf042391482329 -afc3dc4a953e845bc367f2930acf37a902e0b2fc61563119f41260c5d50bfed64951b127611789bab0e9679325a24c4642e0e80ff392c42c340e2bbb6d208c7e28e833a0d8adee30f907afca672835acb7b41063d804cef1e8df7e2688d9803d4d34b31200a4e2ef25280bace4e11266a1250653e89b2e9b350616dcc09bda9241 -0323b5622248d8ef0fc718e54c0296c99176043504f4f8739caf6078d17ddfb8f738e35e8a2469e62c57fde5b3678b66dc3ffa8291251ed099340a6bb07987bb47bb2bca76f58346d3ac254442ff6ed32712a80ad20b622c1e2a7e010b2a30915fcce91ad88c3eb6137c347cb2943970b2eb72b463209703c034c82bd22a302c5527 -6db9fb1727b40f3736250d908386e19f2329afd69389826073c2ad5eef09eb57f2e3b7bc746b4b7d346dbbadfa4c3e368300f6c21535eb3f3b5cf400fdef2084d38d1a042e3093cac8074a915ab7c8593f171ce6eaab28abb1b83786f0095be1757c7a71a38fac667d16f9f7c4ed2629f1465fafe635f624ee946f8d08e0587b62349b -0733913340ec863e87e9c0c29882a73aa820fd764130cbbeeca52c70b20b65a4437af34cfcd220b22ffc1d7f7dd6c143653177035cd29dcf5a68834de1a6d1e517b381ad173a9dd31aa93c7bd57ebc58214c8106910df2b3879377686ca7aaa9e39e8ee7fe65dc1c87749b475a24edb68b423135aa47c7f423034b4be5fa3eb06b1f67ec -d9e86e137b90bdfe911a9fe8181f733d6a1a1bf1bef0a6e8e21ecbc2b52cbdb33b00097d3a2329eea102266fc9a5828f20d8f79b0b38e6e46f832c4dd09f2022eeb4de8a063cee2777b18f57e9184bcea014511c793f6ec65b2cb5b829cf02e32089663a7807f7f5f292fe2bf07a2a2efddcdbf0998e7511e0fb92ca96d2851de61ac1d92f -a802b116e08094afd366f0884b21917f20cffa2bbbc962f0338b75d0374ab0957c42c4abef8fa2a0bd9f208b54ccd39b0dfbadd13a4f9a2e6b699ab8938112e3fdb907de7dd3105388b137f998ceb943132aa97fc5b616d2a2f038e3eb8ca8b85abf0d74b70a5c64d8d39c5d01d6f653431f73e5ee74dbd12b770f87ede864d67a30942efbfe -f69081e0dcace4ce289806fb4fdeb0b48599dffb1ddf7f5f558e101fd1a0528d534a5286db0f1e18cae824849ddd440a735801a24c84fff16ab92c4a09e091c3316d72677c3dcec71a9bb412b8763858dd649f28150642b850e642a17923632be4bde995d01d43225f72d3ac91d7fb55d8bed4e8deb4a8e88ed71811933e6e4a126e1a1e275633 -eefbad10e1a20fb3a4747860dd0a5d1ae60a5dc9da7919f21b3aff8cf40f465221f1ded0584e73d02f1f3d598ccc1259b1a39f173ad03c4e3573528bb1e4aa410e5ac0702f16c53f71b041e06a631195066ddfb5c97ca6c6955ebfd9aa24f5ba650f2a9fbb574e30a0b19ae4bb485b422e3a47fde01fd22fd72633c11e397bffe55af45bef687673 -97e9262370583e5abe378e45febbfb8738691395eab70550021a94c31a82069c22beff6edbb9c65341c6eee246cef57f25ff864ad0eff66cc3b9a41fd3d82287528dfaa12452f9bea39997c00552d45fea39a460ecc2f23d7a58673f93acc4bc48513c0d01298af2195a2d0b692d5ce0b4ccc85c82b45a9a43f70a6e91800dacfc022b27d535cbc147 -ea995f5f197d938e1958a041708710e6632bf48f92a1ef5b1ba1fed9b0566e9cd6b1fefbb77b2009e98fdf14d0f0e6d14dd33fd7ae1bc4d4de7ab1c614ecdbf5651707f1386a6120651cfd2a561a31019f80b50b330d0e5d052a434d053b76594f93bba3ad7b2e048d2dba4fa7c3498fe8f310c0ceaae5c12e26d74aff0a6717aa16850aa2b07115fc9f -d1cc5d7ec1035fcc4160d5cd7ff1a3c89194697ccb0a00cc3ed4b3d48ee71eb5fcf228746e20d4b3e93dabc12427c15bbc3147f00b124d812437d19eb6f9ea5236f87052a5fbb379e27091ad829199365115275061c79f20521053a88fca71cd7b0afc377fe4fe34d9d56d21816d88a374f7df5de258123f35ee1ebbf9cb20f1ae94705581f67f24f626f6 -2fe0f366c5051ece560570f2c2783604c1bac4c84c2156c916fa5ef7839ba296343eb2e26c9dc446441897c62a9fa56fcce2fb92af4db0ca6d16999514a1b63bee0f0b949cba08fa3e5aad137df5cf5656e7fc3b09ed8c69873861871911091303f855d79e678f674fb74830b263e22be7ac7b89434fed87e0df401ade983a672ad919565cef1ed9403a41b7 -e234ec499d037ff0ad5e3698ecdb7dae1e10dad50e4d5507545395913fb51831e4f767e578a7e17bbbb77f57d1abf76bc1d419e6f38383b26fb639a5ae6e14a910a2b22ed2a41aa18437862ce6c2fe8d1206f21900d50f26b1f24024c8ff36e9b662b3c4c0687364921d2fa6f6d0cba9e76d4b2b4b2a74f14dd8c2e1a752e99bf1e5154ef64b095197b0e7da71 -c5b8214c2baa3953871edac53f0513cfae89f14c99eb9119075430c8882f71f2efdde2aee59b8395dd84db4dbd0c0dc0d2f248159d9e994799491aa75b02093cff37fc9a4e06a09ae5b6d2bb80bb46c21eebcc2a03ad0bfb1cdf86197af8b5cba960ba137fb9c3ff8656c4b38dba954944f05a921f98e19a19d89aad62db2ae7c12804e0947970cdf30fbdc056bc -7de7a86206aed65ca62e28e8021707156d74cdf87e0de02acfdfb0fdb46de5a70a06b4907e3d90ce9aa016723adcab4186fba4dd054c10f715ddb95991b10a18afedeb83746d17d3d3287645c00b4b9cdec703fdb4a802bf919514c605957865b27c1b601d2a1a0010f9e5de3839a325b99e6b8bf6691b4c298221297250488fc406878fecd9c6cd7319cc1bc8f869 -6c441cb15bb438db10c972797d08b719aba3987c056800016fb542a3daa944c4226b9b3c41260c8013721158b36f6aa3f3118524bc91b68b35def994a010d05e35cb29a3c784968eb8ce322edd3c3d5f1fbe89970a1817d7d5b7359342c964e2d4adc992cc27ac5322ba43c352ceebd88e08aeadc090a7a62983fdffa66002a862d24be79f20a408fdf051d302972e81 -5e6e5eb22f30350dd71c5f3253d403d85471d2130967e049ed4294f7e137743bb60c0b11b0f5818c0224bdb4ae1295458a98857b6a32ffbfd1f7d2863acce5c844e044bb314e34df2222721614d0d51e5bb2c04548228a1693d90783dd985818d25bcc6c61ff875dc4b6fc0eaf6af89e58d981904b522a589ddb0178d6b3a1d1c395922584b62c67e965e840589f658c63 -e0cc757725180643bf8d08d9256aa7acea53a56dad9b49ff86e73792e721c96aa5c496d2922665ce3ab27fcaaf596d2aeac7ccba1e5f56c1bb3aae070dd01a702dce11ba34fbe71b102c35df3420928e90e84671640279ede57748346a3bb8643a37cffc092490760406146e7922e45680f6520b694f8e599b857074981be25e89bbdf82f9b1af169936a2ac1b2eb1fb7513 -f992da612169ab7b8184e28fc2fbbcb5006b3e92f084052dfbd89a74cc65dd0c361a0e0c764a315f58ee5123ef8d48cd6c5d8421e8bcecf0fa1bc2933671d856fe30dbd9e9492c4c3970804297df06f08336b05e5f5227b568b7d99570d9b7ee54aef3a8bb236a736605403fe0945fd85cccb0ba083f20034d6c625bf5a75e090f42af954f444aad730ba13489e972bfcf0a15 -762a04d1740f3a31150b0763b5b3b91d3e1203b9939e3d45a6bf21e96ba6c82214f1b7481137084c234445406aebf30d7b2148afedb78c19e308ef49debdef5dca50926bf123d9be9f0a39d0f59e2de55f512075c2ff4d5b426168f31284e1aa5385127dcd054ab144c26c351f5a70d9ffb7735c43b10a83e790df8da1a8311c7175dc8e2a79f4bc7b47cba13a1d8af0440ef70b -f5b7e6dfe2febe4e8280667743680cb85ffa1c520ca8651046dadeb10d38e6a0cbdd2abc9dbdf4e5c7f0d81497acdf291fa41848c30a6bee17330ec49bc440ba92b4b5bf3515cb02e5675f7f09856041560fa38e4f26c6309f2c4be814138839ed8ef64be1cc13d322bc9eac111090a24e0a7ac29fb7c9b9bc8f864f2dc96f862598026352530ab7d3120dffdcaca1560b7b52bbe8 -ac6436d8b5bc121875027945b6ec42ac48bc7d37c81ab624851121e6f8938a67f49efc5223205478e25ce51c6a802773be807b2e61a448a7656b7c9f22622e8e9101486c8c6ea443ad17402f2f373123236137925cfbc5d8a154a55b9e7295f0b0dc3e58c91dfef8eff278e770c9007d5247f481dbce8ec0c129e49a95fe4ae2ebe9ec6a75dbe7c9c44d29218e1a69389da9783933ae -47a8cca3b77e63f270d2448200d9f3606374f7e708d3e60669896f159e2e80192c141210cbce44c06369f339d93f97c1107affef1722cd2238546dd69505bf7a2f894bae87f13209d03fcf372413aedef8fef4583270c6bd787a452647e3534ce8cfde89d03e3a4bf8100e4b57c04d6844492af0eaac44e1482814e038039d37d41d7df47d7098254ae1fac3bc3b2af97b46eb2af9b8ca -a7f309a4215057b16b9084a95cae92e7b91526786b63acd8f8c5c13d7ed0ed696994f07b96d9cd2c416909529ac914a128634ccd9979edfe256205998569b395a06095de53699bd1e9ffef2638432a4cbd4d02b53b600fd34e04d2032555d7ccee0a217e6d96c67c76467b62bd4cf4099210b8155f8ec0ebcf4336047c45d925622e328be20b4966aa8706bc36fd222def584579decd3f59 -39e0a9b4109ac94bc86adcc6f3b13e5b6bd12980f6b6203a6de641804791164fddcfc888db5cd5d26d9e7bdb8e2d1467f5870031a93b55b4b8e872adb1886c98e698dbc19d6eac9c767ab2b562d3e4a726f2c8782db54b27b0ace7836dbf86ea5dddc3ca95447c17b90f97a6d925c913b0df825135b93f32e7c845a0c40ec7abb07970c928b6e2153de1f5f927a872624a1a6329e3d675cdbc -34c075d9d6d050229c6f9575cd477ef976a83026b7979776c1a255382de751894a47e9905c16a596a6fdbb557825cfe194cce09d520009ec70b4d3e591c96130c882a282334b9def2b0ec09714380a3437e8f0f568a00b91e5ec6617eb64db9a0e5a631e089ba4cc3030b918def43d5e2d745362ec7caf4302dea3741686f423df8904a03732968a16528a36b26acd4c6c677a724cc19181f040 -b43e4a514c52415dfaa0e9d69e7a329520093e5760a1d79116d756c177518245757f603d3f859a48ad27f7ef25c210eb6660a37fd27f8dd4dc29f16b9717507f3cef8ee8c49b0cb44ca0cbe2cb2762d91ea3f49db133271212d7dcfdd6afddabfa34c5bd3f6c5f57e12b6d4d13e1eabd96baa27da286b139e2fad4896ffb7701d6bf57df16d2779b6b46aebf4d498d991d6387e5ed9cd23fd1c847 -f132c18f218b14ab6add0c359f2c81638f9df0d11a951236818e81fd7d436b97e18c45abd3307ccbc3bc93e0b17c1c66bd65d089d16e78236f557cefb1e6219586d223c284144199e3fbd715c6d5adb5f5dffed926c8cb9fc825602b3f206b91d4aaab5b868b6610bbabbfcb8b3c96400c4045e47951ccdaacd2d72a3c8f8bc265db7553eca4f53a7e816628ca70f1ed5943d33fefc7c4462dbe4c5a -5dff03f0b320ab343c4b63733b193bc2ac369c015ed55ed7217207b0cc86582758cc59620e02abafd241c892f237130178186f97e50a90154a3f020a2bec33e49d5d06b17e13bc3ddcbbcfb6503c9eb14e64a10a9b1bde1aca7fa6f1af33c182795c00c283d033b5f7420265ac8194e79327aa4817ef04d4e9991035e7fb5efbbfe7426098392c3d5a33908ab6cdf7bca5354880e138d341853e5401ec -89e2b44833586822f237d1cf49e47479aea3a909bd703f2623faa889544032e52e7084670456375a58297f04e73cb6bb05a2af8e7d6f295972192f143001caee5dcb15d93bf02133cb5056b94dfe3f64283f2f1c59ef9f8cf7732563d088a67447fb92d13159b0950de9c4efee5cd4da5847830f62144b553803601e695960ad04e3d37232056dd1cb8a90ff304b172dfb035226d29cbd0b59e9d5b21c3e -7bef5d050056bf05c4c735ca53f59a5db8ba571a09a16012c469552c5a2231a0d8b52452051ccb79b11153f2afd3c9e5f2a491bc8d49a81f320b1dda084b3010f86eaa22dc5cab4b179b2189556f09467013258e0f1afba6264af97bbcbc844d7136103f403f56db79cdb997b7d0a20240852025648f7507101b81a6883ebfa49255ed6cc0318082fa48a3927a91ee6d73160bc7665faa67a5716005e4931b -750212ce39be39e573bf4a4787e969103a41dd9e2d0e9025026c5ff30c6a66e3f42378e1ebfbcb193cc8b695ef90d94b1dd6b785fbc3010d95e9f4a91108d3fcf97ab46ed7059839adec545599369703756a3939c23979e254671a1b3840953f7a7b089cc33089e3da314a8bb1899d70efa47e9320b81ffaa3364c7e403351e26ab49d9a7e6f288cca67ed5c1120fb9c8f1d58557036dbecab75b0f40a9d9647 -7d927ba14c4d09e95ced48ab6aa295b68262ec0ad054025de80da8cd73e4a38cede35ab2abfbc29bda89dc6e5185b313d9de1f21cd8020c1b45bfefca13725966603d3b0a19d906e76a1599eb6612edbcd98abec8278d1147f1cff473a626636f75e0c2f691146ace47b4bea98e78b34c3aa0f2ea3df7f57a10d4cae3aba3f2323fc44c0eb8db6c1b3fe0562328461eed1c3da8c2543150e0b535faa8727397395 -bf24edc1bbb0ba5f27a8bcb2c6c10fe342e7e3f05b47990dc118aa4afb459842c91faca491e57c32a73b09ef42fbd00e1cab092a616523ce8392a8d65537c4dbca23928d7c85df694d7cd7353adea0ba1f5b944d5396660003f394f9db0b75e7f4188dfd1e4ed6bc0d6e651d3e0b51a576913c7bcd6b2e585f80f9b2c23f76d3a756f2d905bcbc52290e73d29a1453b7555419cff091679d0accb3a0d687ad115020 -f633b297ac617d6e4885ece567e1d25979f305be0a2f8d8f35cd48def39b96848d26419832cd6871126d862c7b00870116e23aac91d3ac7d428b61521f7dfd676459261e47b47b2e389960cf2925050266bfd09de6df95097c2978334d85779036b82c4a934e29646bb076a9f9762d56fa18cb59f37c026267461e8ebf18bedb565520f7b1f2dda53c026539f31b63e5b09166595cddf7f1a0812f23fdffffc63c169c -1abb663429f560454807260b09a5b7291f483127d168259872e964f0de5f885a2280cd3f75ecbb7afe1fa4bf5edf058a3f591a37315fa132d3d18ca52c5ded5048370f9717cd64e42a964a5d708a492f2bf7fed270e570fa493152d3b794ae440259fa0dfb56dafe068f40785272854b06d4bc022ef1815846f5389ffc3a48b15e40e69875586824f6efbc44669f0457afd3e69ab8437c0e594206430a8ca8f81d787ac0 -5f81f7efaf96c3d6f2586b7ea870c287b8b4d9e3f785867ae56a8a93307c13695dd1300b423b5004f0a03b0ff3a84b012e47086da6a7700b1ace111c753de88844af71217bbe4d0b8d905cca16a163999baa30e514d402e22b265ee33032e6e8e69b7aa871130f779d40bd8d89f47c72623421f54c0de9138817a436ce2b3d8645994427524dd26348b6caba28768e924b3faa468c4abf68b8a39da2b39aa8431af99997d8 -9f7fe6ca671658925daa4ab04f5cd68b9ab5e41b504f4f85a504affd2e3b8caad9d7a735640b348dd657a30fac592708803e31fc675e0dec7e344f4c55ffd707b67f1c5f80af611ba923b9c2abd71294c2a29f75f3d686948abb2b5aba5c324af2ca5711342f7eee49be3e19e97fc59cf4a5edf82f7bc01a49ea90c94f3d549a45ce01ab785f2174a0ba35e2bbb3738ed4bf4b8b708d94163e74faa108034fb8defd5c506c62 -e07157420910a5ffe21df9ee78671ce38984c83a89a3219a6e8873c569f378a2afe4132e9b768a6a5391a6733897e642aac6f9b7020b2750ee9abe3d13ffd24be62b62f943420c503a68ab8cb6830762a59e42039f723b06667b6cc483dda77105b65ee205de8b9452e8fb7c5009be1107d255b79a5bc5f2ed9bf8e6e92aa0f7b5e70d676dd66fd445bd2583f225b5cec24e8c8d725b27b1ec218abb485490a696318ca6da50f6 -50fbfaef285279077944e04de5c0ceb42d7fa25b9e40f4efb2730c605e9b868e5fa3de3e5dc39a838eecc31bdfccc0e67587fbf9b2bfa8bb96f77a9ec3a0baf84014dbbedc288c4307c8648a97051b39bf30825766fab4974ebe3396dc4b9209d6de68640cea6548d2e660d5cf375cdc2519ddc396769ee5aadf4cba872610fd1f4322b3adf0b02f9437b28ed007beab1212e15fc5a854f9bb7d8b78d7f760f89f854675ee0e8b70 -f650845cc5512c33490a9eddf7d940dabd432789a736a105e44737b1ec4459706cedfddb4a1e6774a5c113d4195cc5073bf2b9e4e403bbbe349b687c5d9b938502568231b294a445c6e0cec07f4010ef5e88d700ad796b5488c9f26735e82fc556cf196759a6346130b6a103ecd89134c2b9a8763b5afaeac942c69cbd5e0f0b05caf8460ac7adbb0af868e943874320888d2687299b0ece196e93fdad44f6b355264c6cbe233c4709 -faeb1f08b867088b601a8d773405fba4fa28ada560c2e9e46a34eccde500b7080a35bbbe108bfcdb0f28cfb0a6fa0ac50b80fa0917b65868439129707bb26eb13290fd2ad8c60061c20b3b75668d0d2ed539f1dd99076e58513b302004977f92069c077c4e6332ee044c14d0cbb71d480a344080cd12f7f31e17245a55165cbc6727053443a1264361f41a7784f6043d93cd8bf0fc0f2141ad1cdbf366f612e16d07f49ee8398142f1b9 -0b703c4014d626e29fa067f138861ec42f3b71fce5cd19110f0dfcef1f50b3d07880cd07d2db8f6c2f4975bd674dd6b62c0b98bf97ea54eb541ffc66f73d6b2c16d1fb08e208163289be8a8f05c423e05e68523f75baac9d61fa3b0f6e1bfb4a7faad3007197c37ec3b0e34ceebcec9592501faabf812fff49ca8c2c5373bc7f4d75de7b1a2e5e6bd32cb77c6c2d6fd58cd56326f33cd61ba0940b4c1086e606de79ddb50f7cb182cc5742 -a992d3932a5d000119aeb6933b815ede519b52c7176c6d62ae98a39b70bd52c602075ed884fd82bd0b2380df2f8f244bf759fbfc5cf99954ac5cb9adb70317b4d52e1f982293e0d5a377753740f8f744ae4aa025fc66ed382002ba7f405a2f0cbce92ae70b20660da3b3ac10abf4f179e02553c2520b8b7c997ed51233fce90918547a6004a4f729711df06c8d2b29f65f24024459ea040a6bad1cc27fb1c0d8ff3d756a6c9bc74dc0a9703f -152718090ecce8f70756546834cb6591fe853759c6eff8771c36d81e08c4458b080041d2f3d3a2f5fdc5efade8144dca0176d68c61909ef985060b522cec9f8ec6d54ee2453f1d670a75ebe7ab12c7de5a30d65c28fdf561599dc19c72c8f75e54eaa2de391909a948aa47c9a76358ef46554791bc18c289f8535bb9d30101bbc6d840347903c2b4f61cad5c2f6f04227df38108236a7a2f2bfef15ddcbc597257b48e8a5718c668d61872641f -63d02fc7a14e2881eb47db6c79104f866a15d26f9f84d2c55adaf26b3c010a69ba973d586de5b12ad51e89a899c9b4743a60017dbf356b7c5a485da33047c028d580bcfe8d1408a1dbce0194af2a84011a6ea16dd5efddd7073e8a0c024c5f5dc4e71f36dea8229976962de385162896d0eedebb22ba35d7275b9ba8a5aede12c78843ad540a28838728bc1d4ad24e53c91f9d025371cbc230032a836212ca45aec4b611ebc14b5d353c54e06e6c -cad989474092fba2670873c9cebc67dec86eb823dde7b0b99f1178d298be05828e4aa3eb1dc369fe7c6058b8372184156600adb5624da2ad769c689a7cbcb5e5c38e259a45d4b83ff0e93011e3fff285601fd209db19134883c1fd97e5979f97f7da4df2f3ac489290494dfc6748008f96b98e92637d4eaca6953c2cae677dd6395d2884ad59e632592c15df904cd7c9c8e481228e23667860dc3f5d2e6c4ea1ce0c0a73076e6a747aee3cf3c3647e -36b5ba6d0fcb50ee37aaf66544d34b4106ff8f865c24b9c8ea769d6b16894ca0592dd3d709124f18997a98aa2c88e0a45af0a5fcbc4cbfa7baf15b246c74a26ec0e672bef688a9b619b081b63e7a30e09c0c8442de9fb071e73909f5d50b6c1d0692004242d3750d793f8a767d28fbb8b4bd40b6fe7fefdace8ee530aca73f75f5b0e000e242e1c6d31e3a3adde861668721439bf952edcdaab40560e30795c9578436f0373f6316a66dee75f2a13fd7 -b9d5f19e82dcd525aeca9808a2d76174a04574e48265396a5aef082e66c867a0551bc30f9d1f044009db3c2d0d698678a9734ddeaed08d96df5e6efffb40c758c81ea7e5924f5530d60efe3a983351f54388683b21fb08cbbcd95aab9306454dd9104cd7d0b6b1cea85d7630d38b818082badf854af8104fdaa76e4c186b77fe0047f3e3566cea7db732d893b3453ff52cef9d0e7cbc58a5417c547454a353cd90dbcef06dda6a2643ee50f00dcdb9019a -742d6d0d638dada3fa15074f7e6ca29f861131e7784a46720687b3d4534db7090d1312d1a215cbc5adad439e741f938e3cc31d2b92561e9302bc54ab4588ba4e89d0d538437e11960a83a11a1e52a30dce185cf3bc3ea671b0e24d54f6561e502f6d987b6de7a49e057b38123acc7125fd68ebf3e8fda86b64baf026fa8ad53c2ec32f0af41cd37c56d624f83611e0f10861b11f78b9999680f4aab8ec35298897c206522e554cc032c8a1847d4112b40dee -aea024c695d12c1e8e5b8d181ea49771fa6941188fbe128216b65f20849e11d61855ed132c2458524ff7f7bf4bfbdb31a09a5e3f1257243553e35c8f78b64803ac2c10db6dba6662caac0049aafdc627d65d040bdef334fcf5bdcb4e4aa25629cfe86faec497d1bb7bb9c9c581fb89fb91f7898ff9f2f3ac3db4c8b58fcfc1fe741a5ac6fd34c49cd058b48f39432345da0699bec367b04f4b5591a30097a451a593d0df658e9a9e15e1f5481e23d137104f1d -d28db81040d09a6b5303b588f280411511fc5f8c32a8fd6739f38f5b633d0175391c12e7f429cd387eec7b2bda428e56b93877da802354f5622a67ae458c37ac9676d7ad065e2764adfcdf8082001f9a2b86f0f46162f4a5cb8007122fec5d3838806a9758a6440433e808c8392f55e27c295f517ede674126739f7d32d923c6c09003cdb701ddb53e2cb48545cf184a142f6916694c9d823366ea900b49bb20fde261d55790160a41ff42f2b0a199c6272a6bd9 -ace60c7ac074b101965aee3c36049cdf4b8a409f81e713771d519294819bcdd1b36e2bc76c4b7559830ebd7dc838696def0e1aae649e0ceb583eeddd0b94a239dfbf18b5dba8800898187b1c4c7eac811f43b8d8e6d9d250a35810c7171ecc79b4967bcf73c016cbfe8aa7cbebbaff236abfde7135bc6e29fa9e2af007eb5e524c15a1008d5535309ef3209276f14f27d05955d92e0d7d3eb05e4bbd4301625964129893abadb60f6f8b7d6c3c015b8570cbb4522c -5b5c19a7fb153284634acd3a98da6a66e31f66ffb581d71befe94f958105d814aa2b370c245134b25f3547abd0101abde238110b7d7f25206cb8aa57a4e1415b205c6cf3b46af23981d1cf48b6d6159040b279ba60ab78a14d08f6a3377b28892b5bb3d0e44f980290cceec226f90d5f4457a5bebe8d1a39e2e98c3b4e2010ef9eb24438a23ae73d0386bc5c9f56b581ca358b164ac7c051933e2ca54648456af3bfde933fd090aa0a3d57c5cbc3b3df57ea4a31b5a8 -dd7b1c4ad1d97d73a7f5b00c0f45c1cd33be706a31aa44b36ddf6704796da1eb23d2195dd92740221b97bfc11a11fd0c5a1f8717ffd84bb5401e965a3e987e4a6c91a5163a0d2860e3c96f0acdc30ae389048f5eead04606f8f2d313b78623962d55f5c81aafa9f4e6c754f9525b1fef34403ca08d2c0e20d0cd61f6957b2b096471130e4d1d714e4e270e4fc29d45c536c035642afad9bf17e893c4e37c13935d9055a926a9ff0d5460eb3f809646e18222fef84d28ae -195e390bc6f727cbc247c31f58dba36117921596afae5be4fa0f33d1a8d454ff417bc95f03fdae775325ff64dc6918354adbb586844d66490814ee513700fe893d7640e81e24ab461ee79221308b245d5e54d99d1f7472a4262ee2ba759963a5970c46153add4bc04328fda5983ebfe903e2b47e076b48d517f7f0a6cff9ada7d9bb07d787c0acc11a2ebe22fc352f3517640e9dc5395b92ae769d00251dbae88a809d0673f08525494ee3ff7fb9956a23a6ab37dfe2b13b -2b18ec0134bd03907c3a81f39186adc4b025043d58deb0c327673d73a4d79b1720d843fb4f7bab22fb4126f4378a801b9fdde70051a48c59a4dbfd094cfc8bd658855ce16af0e563750c5f7909c273a78815a55b30e019a5ee26752a0a25db5032d1735f0df1c03c078a43ad190944cf2b6c89933466b49abc32f2e5242077e13b48c92d22e232e53a52c4bacee3b1e03d61c7fb8578cb8c58605fab06f86c010f5722f7dec13cba3931143f979269a4d7031068771cd7131a -2f9268871cd9a46480466df658d8ab1513de8ae18aea3175b00ebc92af48e363384b24723c780371e1c6a45444dcf17182c0a66c315c73de24f430a49aa8f2f8947ebc7bb8cc10fbf85fc8fbe134d2f6e9e11eea9dc79d0de6bc227ed831567d55a939f388cd4b2ec2c057e5ed8eb583b4addc14f0f2a5842e974556426e6d4510b56fb2bb0cb8518ce3a4e14dcdd3377329280364b0b1f602e72ba15e27e0991255801983211917f26c196bef06c3cdd90291def8c677a257e5 -d567627598873f4dec3f6236240abe5b6943ac8dfbf2774dd7f40efacebc50b07fe52e6b89595553ecd93bf9065db058163ae63552d2fe4f3d19a614715981b7a503c6052c3e9e2747f6018b5275ffe078216c46f3820d964a2d11e85eb031f6f314602f462dd3c3aecc8a4f77d4e73bc44505201fd3e8e580d2b04c3f4c885f0d13e52b505067f3f605e9b637b5ad81d3d2cffb07f88f12ace18da209c10d0f4d9aa38c5a17995c92c8fa28d55fc731ecdcafa65d956dd65ada03 -e6f01a669f9e61cec57e3256c7a7c23b840b749fcc849b9e46d66f5903f770c7bddde56e969a46228dd2d69a8e5bceb5bb06a0555375178e15cb9c5957b2f52568a41778659a0841fa62cce468ba409bbc30e1a70facb45e0c748f08ad36ce113612f1217281f822546e29ac37466e32fbbf9fc878a12a75c5849c7efb6ccd2c3163bd2fd9ca8349dfbbd234c15da524256ce20d150e54086cdb6a83d3ae83a0b9c4a49cb5cd67ad91719dabc6179df90012b5193c120179c0b69987 -f3d1c40217c3ed135e5e6afb91770819b1596034a0a183ceb9ba5a1050f4cdacea0c8ce35111abecda4a09615fcc0ca476531b24d67e94f11b30b15fdae2c31d09995a2ee9f8db40667656dc197dc35dff1416d968a572424c7fea2de1f4c23bf6ead4345c881cbcf22c4a98ba1d3d3c6100e4e4a21e9197d3d54634a5d3c18dafcb9a8270f4550cfdd17cf77e06e1e72a6181d9342dbdbd1b656eaf735a07afc9ca4e883ca545e041f6aabadff6b1ece06870a534aebd638db701ceb9 -4e80407aad5316ba80492fde6cd6caa97b1eb853111cdf4909bb0ef9ca3828cf94d059349f363e1c5afa16aa1f18c95e9b0b44b2ff348bcca79877e294beb7405c88b05dec34b775947d0fae8ec1da26c02bd5035788d27305707181fa60327c5825e2fc50e175e2922753307b994d27f902f0cc72b5f2e3b78ac3ea66973400b8faff4e346e48405eb2bedf96f70fbbda6ab905dad86e766dc3db774a358f16a1d416cdc0bc8a0d99a90fe23780c2da3ea7774aa976025cf784e46eda77 -a01a2a35365e7f0b3349529bdafc41cf031feac97e6254182bbc6f78ccc97b918dd51ba1279c24f0ee5a257b8dfb3e838567da4fde3fa4b2b49d108b5e843f8ea2453e2a5ba4cee6bfcb9e224d172369d7d8fa3e8fdac85aa257498b28b0af88559213cb147b6116ec0f7fc872dd6a84f246ca1f41b10ca43fc19c8f20ea5d63c4c39bc2c257ca5aaf7a89f2e50aba5eb6b069c200f733d7f68f2f11f4c430b932d40e7e62e84c22b75952cfd941dc505085f12869bc520dc645b00d0cdaa0 -03c4b34be5d2a1891b10a0a74e4cccd5a0be17ae1f2388a972ad699db8c247c4ec013ac22fe6c6c1a75751834101a17c930c90dd3805963235aa8909edd60211cd97f2896332f606164a3ddb1aa9465fa8c994aab818768166828e3d7a81b9aeb5dedf93555fc351782663167e2e36b618fb16abcb6d64de99971082ca76ed6ec17d5d0cd8b45e0336ff3061a5e06c54793b8eb10a1b772c8cfe390e5d32ccf61c05a618f5130af24b33068ce35dde6e3a9acf7550797078294e69a9b6c10be1 -52f245b0d61ee4f1b173511bd008d3970a25b5022250ec2b9f9a28b68b3b0c8d274ead30fb9fc1f9b3b5f2c3e7125c4fad241dd3f5f4d0c186f64ebe09d879922a682f638c73c0419e7a729329809a7325a76851b1df2eb4cdb4eca2204779b8acc052c62551e274b9137b1c50d822cca8d4cd0b8eb7554ba448b7ac6409eaa38093281c5017260ce2bba9bce09b3467178cba5bfa899101ea3d073cf778944afe12651ab713743218c28092e6d37b41721f191e006f29b5ac33f973d671e943d9 -24b7bb806303fb0581f5baaa960cbea9b2eaf6ad927d073237e4d77cc52c306a407a4b1094c668061ecb445eb3de6f1880bd72db303bc05af8a5b72ab54014a032c28af1d71a62fed15f95b468557a28fbf06eb22caad469b20702b3e067e96ebe06ec31a61ffc2cd4edcb19c11abaeb5e303860869ec7ce19061bef3522a6c3b0c64e11c7226bab5547ccf4042bf59b1bc0c2c41dd1a7db42418e835e7871bf121bc9b1aa037c3796214e31b682f8393a1531d1734e2bf0237be24002f8c2a8a7ca -21535b47e5d30e131aaa9572e94390d6466ea90f4daaa27b2211a9725ef1715be8805ca5dd95e01a649d23984d5e1dbd461ca6c6d9c9c4d62779bcd3c286103e6d3a86d289a86c58cf84941e74d022cc75942d41af9da94602361e1839a4d8232c3d0ad09f8db42d13e66f79bc22bf52950abad83a84fe6c071aabd718c243ce9f11d84a266b172c08f0b17bb07d0032cc27d60fe21f29479474f52563b9eb42e40a7c2188404019e02ecda1c588a3b9684191b19dd33bbde2fb3e9d5ecd1317594127 -f64e8a480d548be1e8dfbfc1a6c494b81e9c630d05c9e1c843d35c62109496e03c954da403b57249e6c3863f3f7289c47bd97bbfc927de8edd896c2dc4dd02971bec98624cfaa7244543c4bdc02c0ba6edcbe543cfe80a34245d5fc4abbb5a60588df8a1783d655c65606d4fb3a3568b1b44c1ab7397ad8117c5d6d9033890e2558ac2e2b9c8e262191cb35b2c7f77d4ab0c459473beea90eb8129a4cb4008febac2bf51997ec1074acdb75b8c446803b8f0d4cdd24d411c7cdd58f21e587a98a79a8562 -ea65942ff43fa6092e4056100586228f2d44cd8f7020d7c9a0927af28fc4cfda7d7f8202b1dec3ac153d186b97729508f8875bc46c5213bb3254717facf81fb1b750f56b0e25923d428aee8f06ffa9f55bb9d06b7144c98926f9dc82cb7de678d0d217816d73821b34e60ec41a64e4b9cbabfa8a88ba9559ded2ad1c2e5c3b54654af840715d7de483c1844ed17e8d515d13016ad5dbb83e09d1eab459b68720672ffe1d8ac982fb5ffebaf08b7b94fcdd9481ce3bc07df4d4aacdf06b4f145871133b8296 -2cd24ad3e4a9f3b145eb0c899f4e9622724c3ee8afe865f8f1aa10003c584cc6eaf3639154ba7ae2ceb4c4daad3b2e9712bdd50fcb8bb844a080ae9ae2565a562333b098ae9f56fcad5219cf37bd7a093191eee913cd46231ca9290ca858e8c057a4862700c701178a908795932a16d95d17e4000d71911ac1048d82cfaf6c8007f3c50ba8b1eb87d07d66d62a19ed638079d4a5e813de2863362b2237b9c6940708373ebf162fe5365cae6f43a535a73e6f49d6ca51e8ef3811bd395cb84fcb7387db81d7fd -35d3281fcd49033ff7255c49ee4b084e90a34cabbba2984fb4ce4f66a62b514977b328050f0af3b9ec9b2907abca5413de2ca1aa05edeadd440d5a261c861cb3e726488913917cc07e2c4763024aaad13d37158f1606bcda253d1332811f0fde69d411bf8296d00b45830d300567dbaefa79ae5f152a7a6212f0c481838a9319d042404dd3e64892b592fefd3b1127c300cb541388867dae011b749672008958764dad93c13898a4b612e6a137bdfa4ccf0da58aa0c25c096ba79cfa49ec9af689e761855fd712 -d055196d7bf4fbe53b8fac09d12e55f2401fe2dfdb423fc25c6e787a10ba2c192885c2ee5fedaa4d2cd1c880833bc32e2095246311d47f464629ad53c82cd0eca24de0801cc5d5f72c5f0d37733ca62b9dd47dfbbfb1f66ecbb1b710e342afbee3ba971c1fc735c9441e910ea7fd9669dd78d1fd4053dd06856744a122be93e5f73ecf04606af47d49403e3e658849c3a76d38833d96271ed76b0ad924b5aea8ee680b1da889991d52da6a4b7ea12c848e134fdbb1305e27c2fbce7233280c3b3bea6a1219fcc3bc -5d3e88955c388dcf6177185f894fa7901bc5874a9e73d9596da159dd88b77fcccb3ad5fed768ee6d69c05d6e38df5a679eb433e0161b3464b4b8157cffec2c450a28eab12c11b18ccbb68f3ae14c71a233e114c4868ccbd1e9eca1a2b6ca4a63779508099080d3de3396649344423a8b445d34e5902725627608e9b5ec920a8202d82a5eefbb3b3360d5eacbec5d9817a64d111052e5f030622ffca610e1af69beb2296825f2409a1042e4012daab54d649f5ae284ccfa665e6fe80fd910f39cfe3860f3adee2912c6 -392bfcad38f2938b8d7880f70e33c14070fe54843ce8622ebbd9c5fd9d7cca22156dc73c1dd616449826ce33e4bfeb4d536c8b3a72aa23cdd512bd16a7c7ed5febe356c8869c5db31d67b4fa34ceec7015a19391c4b5d8ff95dcf414aeac3e80d261689275be9b70e336cb13d9255d05084c367f42d92c18939e89018e0b5e3ab9a51bd9eaef72829e964e65c8d70e47ee0668af16d27a0307da66a9c4297da67963ac1bff76083e3a87ff12aa25aa5d042a744bc01157102cebe6521d7b2e5932e81fe2a23341534823 -97d63a07164ce86d6b413acfe23156e919e1c40e320ee6419b9aea50271506d739aafaa4829862b611786a772e7aeced9007e09bd7524309095f1643ac8d18af3d3a95f9864b18d2e89df43a3a4597b0801f2ce05811ccfaa88c8e94373378bf325fa7fb6f05cdd0c8ec6cbe8db438ae131f5097353eba012e18f5d1499e735ff4bc951986390530998726e7a90b0ed71d16e8986074dde9d3770005a748fdcf411ddf0b03615896d2e0cabeddb07c57d74ef262e1778016c8246625c237be901bb8a6c05cdb1ec2f3f4b7 -5d14d28542ed0c9c21aa82de98c45157b83675341370700d01a9cdf62c3254ec8e44bb1346f503b561ddcda6f1176816449993f99f870d774bf6610af93cf00c5d36e08a6e006c4dc78c6605345c8abad4a8405f575cf40744b1c789f987cba44c31a022d98d20e79d214659653dc1d9812c7b7f82ed38b469e8c718a8f4a281f71911929ed1b5d4e618c4250dcd6980bdc64cb34f57d0d4778511c38456c40300ee6b0b2f50f64542a44a8c9b3b41d4c14bc06b4e166200c1a22bf0f11d51f07dd130ed482f6a5804c6ea11 -b606c4c803672e40423f7b2017825cc6d87f7db31cb155458427d40824f4d8ef0e77b8f2aa152a3938e1acdc8db298728ded23dd2eab091f91273c284b8f644328d16d7568c112f4f0d1209a857a6fcd9ed00fda2d8bf2409a01fe2cb771006fae826ae58d7f5d4af94415569395bddf575a116d6daebbca841469f06ca234edd6348e078506d5f3699e8fa74fbeb65e6e182e40af3b129bbfab140a287d95bced6a4ddf4bc942eeccbb875c60aff88987642b499d6d50f2d37beb1b54d9a27dc25350b324e13b4dbad157d18d -29606ee5ab59bb463bdb766a319af2085a36d5d5d92b83e60092c0f568ebc8bd2c7139cc0042f7e248c0c8a89936a39f4655a78b66e668451562fc7c7f9127a7254f4fa39fdb21528f21aacc04d86ca7d985056db91d70cf46ddd89a54a78cb2f133ae1310ab830813637fddaad4d70118b68f50919476e81bae14010d8b5dbfc88b2f92b048476139e7d47f6501ef8b0e8b52e1626924d6f90fa32a7ca62e1fceebd50dab14baae9e21a68a64af88962b6d8c55e0e7e6cc22233405e7a1d29360058bfff24051db40ebed223271 -63b44af02cda636a386591f0f2698336e2110a2552e43783ad6da06ded94e314fd50adf6661f1caef42c42f60d9c4e50261a5eb45267fbb457deb03ad0317c4c9ece21c6595d17c7c854a3589aa6e75e04a9865f821d3b8552acba9bac49c959188de5fdf7f81a26e4f634ecfcf46ab5acac7233b697ef91b79a04dca30fc2959bae72c0a9806c74a59c53f6eb322e00301b8c4858f6d554a43a4e2f2486306704ae96b0b815802caaa96f4078b27e5bb7968da16b5a6c5b0168be405c95647bd21b3055e6c849d65f0510d458ee25 -d33b0bb08e56a3ba99a7bc02d236c884110bd637c293804ba2dc254473461ebc307a311658232ebdd608177e1be7f9fb66d912a433ae6bd1500822d7becbe453f01e1df8b7b3903d2b8dcffe1ac23e42b33d8d5b446e22f5dd220ab3f22217d1c992b30d656cb56bd3c1199e8672328a8598599c6099bfe3d452c942a285f35f96a85584e11e9e4586f726b721098294fd27e3b4ca3ecd127989e1202eeb218fa5d74aa66fd5533a22b25b213eafc8dffbabef6e17362b9c1888e82b00108cbf8ce096348bab79d7d53ce997a1b182e1 -623b1417140ab573ca90ded7bd86f7fe01292df33d285d2a2ded9fc6ad13060769d18cf5aff2e276231a172a9ff46800434ef60f8feed67e10058a6d32dbb111aa286db0a8f0980a5e55c6498f4e380bf31b1a4af1332dbea6cc0add86f563f1ba70df596b29eb9fc694201590a63e817cf455bdaf49ca1e5a4ee4250643e8f30389eca76e03251b41ef211ff1d17250ff7bf7a72993687f6cbd1e73015d42485ca36c995352e77b966c2f77a201ef57d5d3d8272bb87931077df73ea3937195b4bc6c95cc7d975053c150c6f354a5cb6c -debdf7e34d1927d34002aeda057f7c56a5d2fc56ee130c91007432860e1da1a940a71293f371b2da670ecc5a7e3fbfe8779c1546cf4939a6f36dca6aec54018770ec3c9945cba91a83edb3fd32ca6182c01d0e1b74c1d80a4e5f5537a17c2200fbe0659dedbd4b3200ead90ed34a8549759eb3a21eaf6f8f9bb1b9525f11bb4e10ea55b04174dec2a7fb6b5ba2dc212d4f4e45e6b948ab3d6600f51767ade1339c26277cdf0b3627df43e227aff9a38800fc496f6c4b3cda3dcb5bb1c3dd03ff916266d5f6f4bf1df0ed4024afe84ad1edc5 -dcdc862adacdbdbb9b1d43ba399136029cd9901fd16f443311ce1009a17b2bbd118a92db41f60bd9640be21488c671c8267b7ef10d94f001d94bc43cc783351eb05a419c183a6abec9af39d91edfca281f0c53db8bba509140924327739f394af61b77352543530b1364fee4dec9a04bfcc3aa51373692087b4d3115a7295e549736abebaeb87c64066d3e1d5752988395bfe67c9b5fe9598e313a39766486fca2bc053c4ed09b5dee30b182cabda9395ab140809fae76ccd5851ca625c8ef0dc8eed9308248aba77a06fe6d581aa103b43e00 -2a94dc0ec9592004cb301aa3adc90da3d326794934c8c05e17915d31d3912b133b0508d16d47c77c03cc7097d68f1879a39139260d39a10ec407db9680048e8ed06f3cb4ee9e53afd01ae78da4f18d0e7e298bdc882239b22c4e0790863cd88fd3485b633adf21d279811c4eaee6f61a3b0b6146be2075c08a9c97883062b4ca2a16c4f309406a3782fdb3646e51b8d52d25a15c7af7edbba28693906dc8497b9240518b2457003a9c55c7a41601329ba7eb46e82af1db23d1ddbe1a67dd358a9cfddd3497bd91cf0d4e41edaae8d2232e3acbf5 -b390805562b9503677a1a5adefa7eb55ebb22acc2c7fd7032326c3f87900a6d429eda3663b77eed2a5df929e1b43763325bde8ed901092e4099aa96f89b42c2620a1d8a2f20f772187c4b30d62dc49f12fa929396249c41936e2bc379474c8d8ae0d71fef5307644893eaa14b46ebeb581bb139956e1ff4064301d03862cd358eb156c9dfce5712b35b149e42b53be3097e813305b8a689d4a211259d73ed116fed8fd6ed457f58665289c73799137aa57925037d2a4898e19609a576609a539d640a6a0898d76e7d1170de06e98a87c0aecce67e7 -d527df309ff5112d906def8c461e300942e10324f9ea54fc47ac4e81bc7f0e83f9eb3c7f347ec1877340c78346f4618f52681eec7828b38583719def723ef4b405553373e280668c33d846ad90ce5e074f31cf6ea49b08e86cfe2ba6039a7259ef9d73297d310c6db2e17491f524811edeff14383280dcd3a6ac23cf170bcae854b7bfd476195b3ff0762f1ef4bd0d5c1727968fb79c3dd15b256d6cd53ddd0ddf4e29eadf3f75013d9099a351c53e9c4e604516f050dc6b2883d07a28e691798aab696cabf607bdcb6f59fc32e1079d20424995d13c -58ced7f7d6ecfaeddf35b67823815da814b008028d25af3b79364d93ac4aa8c120a27745598f742a52a4dadc2298df0d7d0fbc23c250cd097a0076b89017c8707963e0b90f06161dbb4df4822bfcd2656870aceb9a5adae5cae7de37c3df6abaf2ec751cd163f03613e60409ddf579dd9b732ba3c429271f3200251c560b4010e9310233426904f8e2418798373ece661646e8e511a75b0df17eadaedcc64259cf8c4fea77d754eb09f378edc79259325ba9414865385e6347efd0f41de3c52c6f27d6c8b92d97a29c1e06d37874e0c58c3d940f0f996c -6a1d428d191bb36d060f1263573118da568af27ed52b96c71dcfd8e4a61274c64bd3627ccc59825ac8f2325b2a7cd46be2fcd5c22f3ea1b7a8920ee8d150542f08e3595b225404a125a96ba66f9ce1fd36d57f12bef1c66fbea22144d1353d65a072d506d0187e2e8aaaa25d1c7c8695e3293f01fbddfd44307f687f6389c34a2969ccdbdfc6237b382063f6f6a9aaca24e370e88ccec8e74972fcb6934c08dcded213830f6430b37a82b05f408c8209f95ea2bce17b712e73ec83acbf3bc51a2b6881e3f3bdf02684b6b752e7abe723679191e26abe2cc7 -acd7222469ae8767f7c949610852bb7f120a51bc6561fbf66cc7396b38dfbdf33049302b4f26caa93b2844c6c4d46b6ec0f5384c9767358751b7c148830d957e68c08e11ef9a0fd7f381aaca2238c773f4d2f885fafa151d17a12746c7c28a57b2ec7c575d88b9d98652ff9140c1a4c50f31ee4491e53572bf16a10b29efa94a2c079046604c0715ff4fa1c4ea8fda3cf30fa8ce37e53740274e83f6dcc4a63d24d34b3ed9393b671d3b9915dde6fdeda18ca5d670277c434d793090bed30966dbaab252966afba1d426ae2d19b5c74b16d3bd36528cb42b4d -97ef05ca9a81c3ccb8e993d10943be011b8ca3e8307ff65b1ca259d70718f22bed4fe50de5e46d6abdfb3da2bf9669c6ade7746d44a40ae0655e9e8b4dec1f21c41a9e907fb0b4beafe49ede427c7da456d9c9139530875ddcd9e6e1602480e63ab8426fcafa6eaa3f4a68e3e04d53b64312e25e3339d0084a987b53c11dae4cab7091141018f9f1780753e87aee6317b9e249135ca32d26289783ca2af99a2d29ef35b92d4f6541e5e337b85716266441867d89f0af4b855ce0db3fcd0b7b71d8491d43023ef06586070e167d2dcd90ce9aee71f9043913f459 -faab0206e2bd10ec36f111531e9114f4ee7fa43deb60b268254c0e3d29a4cdf928497a097791816a8ee8220d8bcd6e5ae6d403ce64c7bac17104dfed8f56870f067bbb210aad4b042654fdc7d5e7c1598eef1f307fe871d00e6d69d68067dd98a7d91abb8040393455f606da8349beb2faa52bccec14c4f1f4d9609b3b23dc24b031c65e7eb67ed4faf8e096511403c871a9f64e4b8dc3e557e9bb5d6716d158924bc4e5b53d81138b2643c253fe9276110956e553790e0ea89a79366934198c21f9532b43e3675552dad56b447f4bab67ce04d53101b7734a50b7 -48d6d638ea797322909ec196c41ecd64717c3ef45a9c7cc24820524b1e20e430c59a9fe3efd044c692a2b7fa0d476d546b35cb09e8144877c62ade19bfeaf598d8087a97acb68f8add97862c1db10f0fc032a74ba3c8fe4fbd07a28bb9a3c107ad0a2a0e0da23eb74ab55f8a2b7b511e1bdae340b1d8803b46edbcef3f537c8a6ec2806b89dac13989b89186587792f42e5cc2f8d08f9bb989f00b770e4c4a29e1c0689809b950c04dd34e7e7f74823b1bfcc4f855bc955ec7fa53d9a6d582a5186ca1c282f030869fe5d7caee534b98ca7748c37476c6c69a277051 -2894313e0e99da4a8d543ab6dd9803eeb369cd4b3a7c590e2e56b9f99487c16bef7eb190ff51fd2aa6b93723e712717cf721106115f10b2951141deb33b18ef7ef1e7145ed9b5eff22fa30780f05948adc7195118e8411f853b3a26caf220e81d241121dd431716a58994b7d97bf76b4acec5424818e545c4c334586efb63907dd436e92bd04aee200bd7dcb7cc1ca5f39e67e355b9e1fce7ddf882e324bcf957500212461df00303eba46f538c6de2a1681d08432e3e28ed69767b1538b09eef75637da24d100ca8acbe418760edfa21a1125a8dcdb30762544405405 -0a760b5a7b059f5f97b19552306d18463598a21ce5d01c9d228bdf61bc5eb4a6820cefc9e3d59018f775e945e20518f9520ac91a7469b612d738a900c94e0ac42431aeae194040c02b6d628f1815e5270edd3bf616221b0238e779cfca37c3034a0a747a0c0b25a60d9fc67abd1fbee5498355cde9821814edc8785b2f965d29eccb4aa1b6c5c417150afe9e2537bad0b696228e073d73b0e6753fd165831b479c95adeeb2dea1466ab405ec85bf72a436a0764bda5581369fab7dc094cb0e8556e3336bf1c6380c1f35cec4f38cb2e2ab03969ae62c7fa81b3a43869cdd -6f268254d6fcea73605bd2ce4d85df1c319e2ec84dcb204d46037e25d3acc81051f9a32be04f687b642a6a18d506b26b0c6c8f2c00a6bf1726c628113069beede1020cfc255391be45cdf3ebda30042bb300c3053608716ecf5f8c435bb10d4f5da66a8695788b034c28956d2fc6fe5dcf4b3285fab8fb650d3c4c6ee0ecaffa47f8177eab9ebec5f8adc5a8cfaa9c3adbc94b91629413e6da4781a86525a3b27597c78b0642cce5f12e5bcb844d2439bf901c3934d66e17f2414b1b8a62b53447203cdbb9299f928799a0701c58cd816afc73f0001f58b4097cad8e1412e5 -bbdf17fb1bb249561642899623e7c7f5cd41a403171b675bbe59e5ede54a76ebe2cddfe9eb77a4a66494a09748f25e1fc3f0bd744bc685ea2199196e0859d6a4b6733f866b7b2df0ed69eb5c5ff6223a520c9ea99840c9c5ff0795d9ba45118d491d4fd6ed8413dc22e0f1ecd64e64a01c7b93ef9a9ee7dba83bae239d116637ccef80f25cca04acfa82eed665c46c98a9bc04121f70d781c73ab892f7982d0e772ab37dfdc3b84d2f357efbd60542ade377ba416d9d5a595c96d17ed8dd5c8a32f114ec99512dc2001227013eba20356120f0f712291c8da6df5681e2197ef4 -439943e702aed09907d07e36c81f1fba89772351f4b60fd69e3058e88330e242470c0bba6e42a3c16e1b89115eeb4226c2d9d2e49ffba7038b3bca20e08028947b166957ff2bd91d21bcc6377f105b3d49a91cae8eb6b9b701de96a423445dde2472ba3eb001261c17ca50a955c0daf9832c7fe86f9434f88d2411d7a030389e7d93f14b6568b300aab8f54865343ae1863852827c9f72e7102e92a1f6d67c55ddc6a2b216241893d010bbe104d2229acb0282263979d5b0b86e2768ad7a59ed51935d29bdb7989bc3b9900c6e7e2ca65d27b9673d2c8def797c3fa554a032b8c9 -4f66b96ecfb7dd7f1fe069e77f9a40ea372bac1f13c0c8b29e03a4384a928ddcf6d0c7b29e429991d43a1d835878f4d597b59da447b448209788dc3cae8f7b3f110490e1bd0e7d096d1d4b433b2acc70031b74daafee42f3ea8cfb12aa2a72bf12217457e3ccd4660a9ce8c6b1adc002dd5e50faa748546920b61e27f1e6ae0fcb4eda0336381d81833321eb8edef96ed046bb88416c95cfed95d30321ba53952c9b738ea3a6c8650ae31bcd1342016ec070e4527ac9509b4542d9983ad63ca226528448d46ffd6417f70c78dbc5160f546d92a4ab0854aa6abe37481824ea956792 -e71efa0eae7d17b57212f0a1b9e9ce30e9442bcbe26312fe8dc1dc0e2b0b1ef028e0e98ac816aca2af4a725a0abce96c0907cca5c07c612707dc785eff79e759393258f90b981d7f4d89833629d32507aeab8348d628484e67b4783c0bce6d810ccbffdc77ee2796553c9182f5ef9ef6d84774518c05374ea6cac33f720767d7a8ed29c3c422a3667a692e0bb8cf9439d879ef90659636442bbe07438dcc1bba764c6497433fc000a09b7eb5518b2c179364e829f7a1128c7504935503ebc7d1d59166a843ce018f721e4d554fd27b731570ddda8482e67f03e6669ed4ef2511aa7bd9 -fc646e856c320f2b9caa33bd90bf08231db8740d7fd3ced036411aa80b7650b58ae100bc07195e88d8ccc460aa58557482a794f15204a51ee45adb7986bff62003a32083e5bab62d66ac406dd74bfaa09cbfd21f2467457a51c3cd4988d40628d65b6363e186f7be7195d110d772f3ae0a8c24be2b0d28ffbbe00b133cce4ecb51651f0d8f6ed63ef5ed012c93bf58c221ee7837c6c7ea0c09302570cbf2316e76474cf264633c5b28e71988ebf9bdc055f127e19b49a46d892291b76f70ac290f87c8534292d76c4c7bac67a2dc498a81c108e52b8c0db290628121882a067cffe235a2 -c6f6b3eca36be502cfe65b1d4803854336969b65febede26d9513e83c6d55a38948a85c54997c99f206fbef972f473a8aee5ab44d32eb75f38f03ecaa31223cbc4bff215772061afb48a80705e1511d0cdd4ddf00a365a09d7e1e8daf0f32629bde8576e2055e5fee04053f661224f96e28c3c3b56c8bcc6bfe14c7a224242dcf0e3e7f002192655846037017acaf069c63a44b72a343a14cfed90ced833822de6118a5b5b257bbce56d24ae81bc731e0b4a318e45a84310bbcb569833dde17b396f76b4b0f72f4e59239ab3738d028319765e3e79dc752f2aecf2a3ab5c51923d8d6bc58d -d11ae03c75a7b0bc1723d301b4bd2775085801d01ae5cccb9dec444e46e44f0f413ab0ac34a005a4b7877cfcbc6d7db3b46071c0e73b90a430f4cd3a2a4576763926df0894dfdaf47ecf18d2d4a9844e818ade7c11a993d11349e04a6b3da2090889e0ac67fbb0b86817215505a728bacd2e3dd7be9f80ec92c591037d16fd1b8f706c95c097b18f01aa4577437bb2a38c569a64fe262192fe00921df4a9d95f3e481fcf422d7d35fccdfab474f633e17dc041285d6fd59831056846166cf8f95e56a6204239794125b1502376f1934ff62b35a2dcf1f51b53720a96f191d72032138035cff2 -6d243cfc8bc00b2def28def7543a0ca2b0d531c4be9cd1cef41d53bb2b84da4f3e1f58c2fe89a49658dc0ff614beaec3949dbc673a45fce18e7bfaf7953e16b8298c406e5013949e268aeed343a2abb4ffc1e740937f40fc5c99313209688929a6fab1223ce62e924ec290c21702acc2627a1862098cf3eaed6ab08004eca7108b1b02fd6188e04353012a5eac7bf17547ffa761cb7430fec5d21d576bafa3aee71be6787d6d210a72cda07bf8fefbbd49c3326826836698ba003f3482005907d5fd7f4fc8d31ed92802b6ad28df0c174cdb525238dfe82cc324b628f3359ccb57f4024c06c17e -579efb8aa51c50b13766d79a95712358ad522c2a1baa33b10df4b6817f8909e3d855b037f9f382a18aed61fa776ceb53dcb9bd2adfddb7b69e417e3ec6740b363852625ad0182e686274b3556c1fd71b3cb5df25c64ed23ce194f247022ac398408e804de1fe525046f6455c41122a3818f24b312c5db11714537f75d0f96d3c6ce02e379046a7878514157398153f9187dc5ef160e9f3572dd7abe016fc710ef0ab7670610305ec612f084026771e93274bed74cadeb5a6522076af6db38fd184c07c3721f281754119221cb49e1c35ad07838565f10f234e05bae1d88f66d8e9ab5e51d838a151 -a857f1dae5f4e7fc5b8035e82a3df1735dc0eb001c70da569d93e85efcb3ee64bb58c553770ae07c9fd6bbc5cbc8b6743b7587f1a75d277eed7599f946d94944aa668568dcb42fec6a3a7144f52c89731996207664ec0bd7aa0aae2dec262bbb3a3f4edc902619e5e5e24656f98d5dec3b9ac6937b3a27a913e43782dddfa351dc863b9b72465f653f59e1cc2cf32e04ead53cd231ec6f00603517b191bdc3434b989ff9d8e83f4ecd0bd1a145593e245b8fff15bdbfdcbbd7e1696d28df5ac6d285bff0eac38bb5342dd7ceb630e4f238019ca1235e13b8cef8f03b0945a3b1f777cef905b15a1087 -e0bdc65893480aab82ac4665e5e732a634619d7cb68fe5cfc25a4726c15ca1fd604d45aff79387153e8466f724c902c2a1ada5c53d61daca9320722c47342fef394645b5b8631dbf45046afd67b682ffca139ccf97f1f94dc0ee90155c4eed46dc42e658b105d592d0a70eb43a68a0dd9f3b8eb6609355c8169cfa483956afa46ff9ea55eaf0e66a7c36ca0d19d6986175c034d4105976580ff9d9d4959d00025b5978ae7c76fde710f7d8c9161befb62f40179be1d072f43610709af18f472798e96586a11dea0b1e37ecb4254d9b0b376916ec412f5668e93f332f8a1ef883f57f2fec44ada795286a -bc9fb80a5685dde8244f6f552a32794a8fe86ac9a3822fcc753484726513c7e129c5794b1055e1202f1cd91ebc5ee789d131c532c9efd2248beeea52cbe0eb96287a6e3a4a8b763afb37f3176e11e2c4fd9c593c3246f51bb5092f94e7d6d63b5ba5942dda975c01c3b4990a11a8571ce3494809584605d4b9d06b45d1a9604616b10477caa617542c6a89f1e8a155a1ba4b0e31c63497a8fd48ed62b47ea098f4850b9d386a2a0de0a1d793d20e720c4e1d63ab2e19133bcb2a379ca830bea32ac8103eb9105207eb10c812c0fe3dee657a357ecb13e405cb23bfbad572bee5ca80fb5bc4b315c3821b28 -f9ae35ffbbb49c533eb324cd02252de0aedaa3748c4c8884c389ca6abae2e953e405212dd687237efc676f7a000235fb13d604e0481617839493bd10a2ccac9c7d8d11186dd33134a41da716ee7a4a7e5085e48fea22b9b753709b9d86d264a521978955b2e4836573859f7124d6c9d89107f55914f33cd009fef23fd8f28c85fc53d6a7ff331ab2df6899ea0565ae4fe2f0168830ff1c20f39f994f37a857d502002b1239f7809b117856bfb92eaff2e4d8c05c718fde83825431003c5c11e661ae40b516289e3e347957669a7f20ddc665dc3bcab5bd42f2e03bca3511d83519f4a6cdb8c67e0f33b12dfd -664950c1caf4e5737671bb02158b45a938ba5aca3f7c153b64ef531c1d7e9e79bf78678abc9480046286cbf03bcea3db6de2cc5663193198e8dfa9907f7712892fc522ba644d46bd956bd8a8ce8a2d35266ef237e6c1a9fd0ec6e5c5ceccd7f726e4dad639eaa21cb475e9765671cf041f45b88840d60b22c1537112c72471f4d2430b6ace85ae80eaf4da52fb2ae1ad15ba2c5e7754da94de5b00f6aab061c1d96a7a524ffbc1ea526d3d1744d4682985e8a1427c72f90aee27505c73ae7e378e371c6000c4602007c2fc3be33936b15483a60e0838aea4834cf8d38325ad3430a1614d56ddb0370e8c8ef748 -3deef64b2abd079784b7dbaabe360aa3f6cb20763520eff7069ec1704c9789beea0fe3390ba5af842abde876b371d43a94771b513a081099e136d32f4f8a88f4c9630db04f05ae6019b814489a5ecb7ace0c25476ae1decd59c6dda06de38e3e06347cd2294aeaaf941f0e030a895c2f2b2bc88e2ca698dcf6b6f18f24479e383a36caa47224719e581a20002bf2a21d8650f031f7dd1870c3153693b624608069f30a0ba6cf5a9a1eb712d92bb97ad3a3327a41069e23a7445c02d6de1e46b35b4a8a44134ee19886afbef0a4834f7a7fda53c1f784aee2ffaeecd86e7df02be15b62ea204aa3a082637c4ea34a -fa80ca58dee32b10b4282f576ac3f88ea89530aa712ca01a708761cfbe2a14de2fb4d5ffcc486ffab600ef97e79e4d734337b637947d04f1aa87e60020be8a26937d0e701b39c2ef09b54cc1fc784931bcc5d6b58b01bf8f636c6d40545ef5a7a5aff122f21d72e40fa1b3bea67c5a6c27127c55ccf61b601f4d59438a453c6e8ef9f1904e5c209556c085393c4ea7152412090961dc0f406dd7c008d00c8bac435b6f77ce8f26240d3ca3653d86a542240b34209ae9ab87086a539a10f9fa5551b9d13ed9501877faa3708219a2b0b2678ec57bb1ad31a8d0462ee7b2cc38f2644969b742c0da8aefe33c5185e088 -d87dc4024a0266375b6e4ba966765b1f98a02c0b14ae969d3a00bcbcc2c1b741dc96035fddc310d2b2f801e019252489084363589be8242dd4c5454cfec5cd68858d519d9f1a2660d522a399638a3dce554fbb3b9c5956f8046f7e2c488739f6fc399c208c8abb94bef1e057a4e64b8a2b4a1e71903a4ebb5540934919828696f09fe0a2bf4560d9206f7bf7d5e78ac1ccf8e4650d05cffb71b20725249f82b62f94730e854c2e50cdab1bda0888ca1137b4bc32a7b469191ea7ee33a329fc5cef8c096934ccd6142f109163b4efb93f12e85307da35eb6562ef110d4eedd0baa1ed720aa77c2dafccb1a33c6f5d8a23 -c0ce6af7494dff967497d120cf99bc0fdabdd04831ba57bd6fa5d7f5d1378b1fad4aa0c5638b3aeb34730aa782515e9a720ba112933adeeed13f5407959bad9715057001402327698a8512af562b75bb70e0f9883df3726407edf3a6cfdd410718ed739969ddafa6b4e186b3ee77dbf47cad4ed5e7a458bb927b8efccdd63a5b2399e49926e68c6d4dafaa639354e0ba349187a0cf4f3e92774a33bf95878ac585fd72b5544ae54295a3a0d8fd0d063b0e6e77feb7deb3e617e263de65531d60d138eb2e54de5d50b12c47c23ba4bc91bc477556ac56b0706629a2a89657253ccd36746918be8d0b57b9e97c6146466554 -6df5132e38e7c63b5e09d42239bf16f6a53187733ba07287b51f2362196dcd9347cf74d9f6be301ed993b63ceff5192e6f68966dee3277587bf4845bb345af7907217cfd0f3c99c34a0d8b723f8c70c5648b998e22ad0c4612b778235f757755b5fdc4f00684d5aed5c135fcf487f06cedf9f11934715b66589d6af2188a3b4e885d28e6f223f60d98817415a2d47607ed5d5b43a7559cb2bde1021f168a9d4a89d1cda0801e2c876e03208a841ed48ce86965b822039e99d56fa82d62bcd9f50deb810420e456e80f535be7baa5c1d3087f5145690a4dcf284a106ba6f5903fc0f1ecb57a7b81485710c82edf7090cf382a -925437b13c121ef97e09a3ab9a90ccba96896302f81c52697109dfc0987eeef28b9f1062981c61076b7b2d028bb6547f50401c1268d192570bcd05d5003c9bcf3845995f195339696e010981afa1adbc79857df2f72757eb4c72dd61944b68cfd1805bc248ccef6a20874add8029a8b9768d632e74fd03698d959b71b3e9e801280c022ba6d1b193cad60227a22fada2a0ff5f00b673e866127cc2da1c355cf58093fcc65580d2f1795c2ecae21ac5f0bb5737d748dbe3f83d26bc5194b00a50250367fc687d813acb857acdd580aeef2637fa78c2a7ee2dd7543d4a40d37e49673aa073932dfb75e9d79c087fe757db4414d6 -a840502c7c8a93e3a9722b1baa51b553df591b2091a842678e4c68e34c92afcb1099b3d3334e247aa2acc24e03a32b438dec4ffa644f114ff50e3683d695562754134c73ffe785f1a2c87591a50239402d6302c30c8365dd8f50dfeb5c2479f760eb119f31686e29ae973b46ce646463e1e56c0f8a6252b85d83bfd17fa22ce98a9dc2880db8fe277d6f92cd4cf7cf73cd930c9e33cf61395a36548b31ca1f8b27dd43100fe9df2884a7b384f14bf7ccb69e8a8b21884012058c11e3e1078727e452dbbc49c26db558c3d00032dffb21ae2841a186fc66d5bc5243ddad577727fbee6bf8c2d0af778773a1b5250e875483bca9c0 -8e474a9a84ca66665afbe283ea1dcc50e9a4e962a8c4a57aae5531047a062852db6b2a0622fb46cd62be1be9136a41834ce55ea5676142415b7c3ad60901a365df3197a375d9b2d78eaab078eaa1df2e0bce6e5f6c983a73f15d8275ebc31867a1b85abad097742e6213841ea0f2c96ca9860d73a4908b8544de88c82e12a32f38c8af1434c720a82dac08152ec7b3acae5482664a68ef92a5eb8e7c27a45f2750c0b4e7f057d6fb3bc36b07ea16735e12c14d0c1ba4dc5f6788428f036b4e5e4fe59766e80f864d11962f02805c0ddb7f9705faa0e2eae4d3c7f1b44af42baebe8b079bc063bfe14638a126926c9984210a2b932d -c39669d1c430c3e2c1724f007eabc83ab5965414fbcb96c5496285529885cdc7fe1e499d7a10f697b7d6b1d96720481ef33758100b1237aab8d204cdfecfc3324ec5232c18c95e427a16ccfdd2755850f142e67f61b5cde4a4b17b2427ce216dd0021edb094c78321a6e73a120da59e11188064db3432b30942b5caf3d8692d4762fb64b0a725c097d747366cba193de4651e92de640911838c351a43e85a391d85638b38a85c7083ee02e41bded091399a77851ddd026ac2d8cf11f8b07883d238f7e1e19acb2ef215e1d4a033cca51d7d7ff132bb89cfcde2693b3a41efd5123a0f17a64d7e0a6d2e5b77283e99ab1c69fcc6d20e0 -7dbeac7fa7003c93db93ad5c10e1c5a7d1f3d25df52edc39192ad115a9aa142986803a35912edf5568ff4d35a8a68a2db44d5c2ae93c7239198c642dc0732e28f703dbdf4b586a3ad2db363fe17c27c77e08344cd8fa36db95665ca974b5a0613f3eb584eb6b371ef1432d39edbed3ef88104d0664a006b2b08ade648f90da57661b267ec637c147bdfec665bb05e01e4d607070b8eeddbfce52ab461b4a54d4c3c3eb33c6213eeb5581c7d752669d70ba1542c9f83a3e8e5445afc468306180268083aa7c0c471929dc70150d3886e2fdd8ffa1821f956b3eb1cb5d8870c36910bed17f32872a8c36e6df6a77d2b8ba67d0e367b71137 -9e75cfd15638c15d60ea531b2806b51d3ae590e64d6aac611812992e870fba84f76c367f78c8b26de7033f87896468edc89d88a5ee582429548b620ec67388ee80888be6f513009777d9243dc6d71f3b3251418ff9d2aef57287d7e9d1a62437f54d39dec07aa36bb28ee45d3c7f050b8f9a3e37e233e3aa91711287510dd5111616c0fe19ce08390f6033408dfcc5ad37bb6af02e8ccf794e5609d5e16e971aa36e21304dfcdb4368131db4acf38f7c911368e4df2b42fb02068509e3a15b9d59b87292d684966e7492a1f46e2923a9a40324b0bec5f7d1751b41feb97def10447a278a062150bba4129e6ba7206bad86d4b6d7d98b06b7 -e6cd6a12f97317f3c1fc588b7a4f1afa8abde43821301514f2970b224af94a9032efa0c6f97a8434dd37bba19471faeaff3b8a8f9cb5a9acc71f00917563a8c835bb97e4fda77aa709a4e88937b852e957b01f0f2385b82db6327185a131efeca048f2a853ffe5b1cfc5310513efaef893f95360447acffc38cc409f7ab0485722aab359a37918c52019c86689dbb9a4f0f38c9917d76b22910ed656ffee07acefb88ec7f0809e7f0203fd3cd4a1e7527cce0c029b7c80852b86455cb9d87a6f0878f08b0d001afba2768f33334d81be572503b1cc3a0af7807ec41a4dfaad3a50fc96476af744cd7c49d919454c187d156799e583a8c74d03 -02608e76c626822f416e6deb6056eba09a6898fd696174e39620d960e47b78de1fc006d8130521843c8f3e610a6295fe15950c8974b2f7b18f3850a257eae17269a0268ce18b321f480d96e2750e923bd32d6c05fafb4ed3eb49d45c02f2c5358baa0411743c96285bee23da543dd8e21ac15326f9d9eabd3c3feb98d91cc99c0322d52622321946a688e28180c1212e75461d205eaa0080ca2667c670747f8bb5b18ceaadcb4fbcf5ad8be2878030b510c6fcd564c848bb08b5b877da740e684d9d52654324067c8a32f90c8ef40a9ad0067b183d1c18f93d5437f08bf03e4a04cebdbf8a075e88ce8b95669b71dff7e40d384d20d1c06a31af -2f4119c21c013098b20b8dbd84b47fd5011c72df62b939746b7c8d496ab4173cdd2d8ff9952619fbcc86d1ef2777f17638de90c1644b17e27d7ed97da0074a2f530b2441eb6d0eb56eb46ce882105bf2ae3d956c4d7e5be803c5dab0ce7c55548306cb9105ab5d098288d8aeedc03cca581721ef1cab2e04e315cd7f8bddae7c9ac4afa865a15bbf558b8f4205a6fdf405d021b67a0326efda528149b1729c26b3b4d3869425f324b5f0865a6be0ed9dd04893f1fa2da06b0e5665fc317e89b47cb71fe6e673878ae4839fbbdb26fae94cf37583985b642186afafa3c896c55e9284ee2b7e5fde9596c42d5136a5024ce6f0c6ba5fed11928ef0ad -a270cd89ec091f4862974d10dca5a283b8c332f7b4a99527ad63fb86e1d4b64edc1281e5278d000c9c6f1bb5fca1d687f689ec64ab32d61b3f47a23c98ef70718cc1510b4785c2b56e3b619b3e5c184628e0c96255257b345a6c42a589fa245e2fdbd7819b8f0460fc371d683c37a468a5eb61bfd5338fdedb66d70ac110949a19b9e417b60d6fdf511eb41737c35ae15975f5a98125198f53214375ae8361f2d1a4d9df67c21067a676301a040e2ff99b7f9f4b7f27a5a2db82c56f8fdb366aeb3deaeff45d163c859ee2d60f11a16193a3b81f51ab9c268d53883c166fbf2af91f34735b170278a8d594c4489ef6fc530e2faa10e78c90274084b5 -3e4cc5a816a4eb2e2c4a7fa626ed70a7dd08bc3d8b3fe70ed007c76db3fc62be345d00107519a2f16c31479b9ab74553169b8a6a54c3e1bf5c142a946cba2d1cff48bfb4c4896209514a349c6367df2ade1d6b5848a4aad085db2e48ca933f9217a11ffd55f1addc12f20abfbd71382836df2283e739bd003031acafb7331fbe4baef9a166f45f504f6aae650e29733a3b8f15cf39c99506cfd1bf2bd7a70ad600fd27bf34a18a8b94be6e7ccd0d92fc004de9d3f06268878ff7af6c796d350388d28760e9930a8de562d4a99f5c7446520a186337389f3763305209212571f573d0cb26ab0cbddb0b09eec2112feffcde44dcc641d2396dbd1a31d965 -f34675a5f4c344c1616dfffbe5ed963b6308a5409b7d0106a2a733117f9ad88923d33478d4d0f52058f03bf7c2da3f26221cc0495fe9edb16bd32682965f992d7e9a14daae5cd44f29d4dd92d0b4f1a893394c659c2a755231ab20e59a31aec6451d3b301d6e7a41027fb8a2520177094b7422575803e72e647de294a04c4f34f8e487036de84679f3c5f915608cfd15d565e24b8ae27acefcbf54b033a83882745f6418a217ccd0f8ae4e10ff04e67f57b36d94dcd5b442f6e36e452ffbf6ec7ba6490e079419252d54ab64c5afdde196d0b5c352ad70ce39b16791cccbb33d498d5a7ffd2ae2174b34b23f78e8972a5fa04f7ebe66203d681bb163aa18 -65caea398636380c6955c7549491c91157776fa1a6514355837e51fc6bbc35b7bb8b44fe019c1be93ce474e810305e36e5cd445b417001cb2b8bba78af6fdc1c12b83e326a5d323752930c5fe879629d5f5772f872b3db4ddb1cbf43ef3115e344327b3dcba6a7d8c82511c74a70b12b405481e66dbd1b8a7a9cdab1d52bdcde972aba064915ceee02e7901e757d1470fabc32f9ab873508c6e243b956cac2d63aeb32b179f2cfab3cb4c2345dfb6a18c05b97f9e659c0020de22f85b5ceef470a5ad6e8597c8570a85be25d48d60151577f9a4fbe2c09862dd57ff734e156f66fd7107ccfe0e46193d2272ce6d6c0dfc0a81cef52cbd61d2964aea53922bb diff --git a/canton/community/common/src/test/test/resources/tls/participant.pem b/canton/community/common/src/test/test/resources/tls/participant.pem deleted file mode 100644 index 4a0e3af984..0000000000 --- a/canton/community/common/src/test/test/resources/tls/participant.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC4iQ6l9+JpqJ6R -DrqJuyVmjvBK7ibL0VjJNs4taOyNYDzbm3oYqlc69A+Uqjf7cOclzHgW/NtuEKZE -F71e7iHbgexiOoDAbl3HFa/2LN7fGnp6q1kgXOKeQ/wJfPsM20pMoJTq6+ug+4L/ -q/qSEOGK9DDi/5Jd1QYtFLiukqL5/ElfzEHWPO+5Tzrj81buKGlJMsjDZ6IGTzsZ -AgfQi+r3zzL0ICoi239vXIKeGoSz62Fm36wa+RlG3MB4QpBelfFMryBx9BVs/avY -sPe9VvBLqtwVppMDUTSfXTaydNDnkjrTwpdbJwMNHoR/Yd4QHNHqGDiUa6aF58H3 -t2aAbQVjpkKB2epLl81o0hkv4cXhCQZciwxlvNN00Zl631WHoOp5buTLy6wzQAC6 -TOnJW8Y9ZBcvoKMOtLATWYHGF7sJhnr7c9MKo6hu2uccAlISyOvB9uVOk9ygO6k3 -DWM4IxJSOhCXVX+EOiy8RGg1tEPbVn2HBhm2iV6rgniURD0FoujDkNFJXBkJmTAW -GBbu8tzjTQdxlnnpw8J4LDcowyV/tWjx9f4eIsbqoyhBAircxCReUJYpwo5/QC3f -FYwXH8F6OlSEi4FdY3ExYrE1p5W37t8yQ+fSOAD8aWIg9K4HbIdYcy+wmLQojlSR -F+EO0Se/boQjiDywxONUuS907/sjqwIDAQABAoICAQCY12QbkQGlc7TJ/oIAG9Jx -bIiX90KI4/7+wXbpY5Mx0Xj2h4MMNOIFASp6frXZe3zn8L+ygoIYWo/rrCH8I45F -agnonf4P5BmmEE9qvWVnMl4phgxO4sAMR9DT5wFkd6jFaOeO4BKMhmTM8JucKYE1 -fuWSzg1jzFHOOvBwHyH/nbcEMoCBOn1vZbEoJl9k6HGz3+0q84XeZfdT0ju4vnrB -/4p7arybnGOjT+MbomkxQA6vjyjUOmd2Ktb7hF5hCR4P3qkn2Pt6B20zD/hRiVWD -xqN9BzeSssq8CA+KpXh1pMc2p2CHbft3/UPO4bd0bvpqQt9yBuuo0vjz/KJYUnP3 -2UaG6B/eolYTSHYtBx6RRH2TZ1p1mB4BR7bAuB4sfWgj3qELJDKXpNHzjwqbHyvK -jNg/ZSAuIURQLI20ndOP+goLMWyJX3s3LpgDEJrhtiUkeKRo7NXjEk+3RyuRVtOA -p+wNtmdshVNJ0wri/RCW3Cvn39bEBKkQA8RQTrqBgVNG9FEPZJk3trWWdHxuZ1Vr -KAAHVJ0GIQAkMKhDW8JtKGFi6ZWYGLoLqmHuUzivvlo6v0iGxz89bdbxLMhOrSc0 -ro9qWds+B+/6pFvExtf6ABc/OoRJ0e/nMFVtKeaK0Q2KTBqAO4a7pL9pLOvICPfs -MorYU6h161jTXZp5hA7lcQKCAQEA6UAkiEjCU+S6vFFRICRY8lWM4tSs6GQaZ6SH -i0iMnJZSm9XrxvGgx5/edyy9g9NKSbxDsQQdc2fQ5oaJ+ChAEC2c12PAIlUC17/6 -dqqNrqNgVT2651z5xLLNEkIqX+16fqVWRt+u3tq53zZ95eQi6PhkIyBsfarqMgRx -J4qfqs0o141RXUPRaJX+XYs5/UnZVwUXtHPUhikIIGzKzFHUPLs5uNEwUbQdUWdf -600ANu5yRY/Jy6XqMfGqZniOS4EeMT7v+CofW+XTpGzztxpnxHDgZONzl77TMymE -O1Wve18DvjJtBjK06f7+HEASl/z2DDDnlbyecrnR65gB6u6J+QKCAQEAyoiU/F4u -Oj34NQEpBCjbPCW/89Z9BV9E4YzQla6MyE6xlxsveJydmAMGAKLgFAncuyQJwAR+ -v5Dam8bucLguwPlE63OCHlbp2wWU5DOGBa7dvnT8/EKHXEUWOzMMYh9+BMCkg8/T -aP0hl2URXL5poXS5Q4onFlUEhVqNKkG+/2Zs8gn+bhD3sQNfIuWaVS1iNpZUaLnm -Ea+A/gf9I/dygV40BAgaB5BRLXM1pTTJNKBbVCv+5ZElO/FdRwtLCj41rzisjSob -2uQxbGt2c4z9fY639YjTxBHhcvGrNeuPnmCL3CJ8wY38kKh0/bV6MunlTAziHFke -JTaYVJzHLYMjwwKCAQAiuc72j0aHMO1wLfcS/vZTFdOEzb70VjrsJT2cye9NPMAN -vVp94ulZ7v4y5fFNF/7eXW4Cg6dS0Cj0uSD/Vha7kd7g3lPziI6EymsikZ/IWUB1 -UFxsJ5Zz517Kkhk28Ockxb7DjHe/a/byOW2i9UURRDG8XQ5p4zE8wxaJmYTN6/9h -oAN2DCu4E2SsTpcSk5UM9JAj6H7CfodcNHY41xHu/LzKPDKiZ1taDDbmlC8s/nD6 -1D5p2Ei2qNECsn4U1aNGF6WoX7UNfHj26NWb9A1w3JE70/UYqWtOGac7IjWe+3VW -MrGyk4gFslXopM1f3WRQSlOgIlXApNOQ7K+UZ3EZAoIBAGLvYUf+gbp1tx+9V1nu -01mIX0IvzpanOTCs5t+XirH2rw3Pe9sBNoKhR3dcGPXw82B/dDvWdkwB6Kgb/zIU -5tHhcmdJ35AOiOIy0c6/1IJGL4/v+f0ISrMqFHgKscvk8lxD9pllFjK9JIGeH2Mc -qODz8eQqQnnIQhMabsbrou+EC0gY1a7SnwxbR0f2vPNK3iUoPkeCN/7/qpVtNZ0y -LT9A00v84So8t0rqcpQnOvp4MuTlMPjXdEbZ2uTvgVKZnQPhPXkfzseYnQQHYq+D -efixlO9eTYQD8AuEPkxyq4+EPg3k0uiCAZRwleWxluX0F8yUpoeOiQlIqBT9GNcE -rp0CggEAbyLH7THIKvDMDq49z4JKvHeF6FnwfqbnSGonbwlPXpJADSiHwKbgJuBy -c4PtWsKLAR/iiUNcmC8M3GTeNtoHmp+d8lvj9jjUpXvUt/oWwg5k4VtMZStdZ1Wl -KUnJuH0/8t3UtHzvHffhzpt61hjZMzT0ThxLkG2OYd0Xwg10tpQop/KXOol1+I1k -rpvP8v300LDHCwU0Ug2xiFxRw+7sZtTA4/8Nl7Nj1+KzQJUFpmV//E1XxU2/Kmhp -33zcG41hHx5SKW3FmOHw/omFRiNX+sGoUaPs0Uz3+dnquiADyZZci1Rv0w9CGvhr -IdiRR2cexNXF+WLvgrQxSI6HosPnxg== ------END PRIVATE KEY----- diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/CheckedTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/CheckedTest.scala deleted file mode 100644 index 759dcc691c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/CheckedTest.scala +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import org.scalatest.wordspec.AnyWordSpec - -class CheckedTest extends AnyWordSpec with BaseTest { - - "checked" must { - "be the identity operation" in { - checked[Int](5) shouldBe 5 - checked[String]("abc") shouldBe "abc" - } - } - - class TestException extends RuntimeException - - "checked" must { - "show up in the stack trace" in { - def throwException: TestException = throw new TestException - - val ex: TestException = - try { checked(throwException) } - catch { case ex: TestException => ex } - assert( - ex.getStackTrace.exists(ste => - ste.getMethodName == "checked" && ste.getClassName == "com.digitalasset.canton.package$" - ), - ex.getStackTrace.mkString(", "), - ) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ComparesLfTransactions.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/ComparesLfTransactions.scala deleted file mode 100644 index eaad319f90..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ComparesLfTransactions.scala +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.daml.ledger.javaapi.data.Identifier -import com.digitalasset.canton.ComparesLfTransactions.TxTree -import com.digitalasset.canton.logging.pretty.PrettyTestInstances.* -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.protocol.* -import com.digitalasset.daml.lf.data.{FrontStack, ImmArray} -import com.digitalasset.daml.lf.transaction.NodeId -import com.digitalasset.daml.lf.transaction.test.NodeIdTransactionBuilder -import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.{ - toIdentifier, - toPackageId, -} -import org.scalatest.{Assertion, Suite} - -/** Test utility to compare actual and expected lf transactions using a human-readable, hierarchical - * serialization of lf nodes. - */ -trait ComparesLfTransactions { - - this: Suite => - - /** Main compare entry point of two lf transactions that asserts that the "nested" representations - * of lf transactions match relying on pretty-printing to produce a human-readable, multiline and - * hierarchical serialization. - */ - def assertTransactionsMatch( - expectedTx: LfVersionedTransaction, - actualTx: LfVersionedTransaction, - ): Assertion = { - - // Nest transaction nodes and eliminate redundant node-id child references. - def nestedLfNodeFromFlat(tx: LfVersionedTransaction): Seq[TxTree] = { - def go(nid: LfNodeId): TxTree = tx.nodes(nid) match { - case en: LfNodeExercises => - TxTree(en.copy(children = ImmArray.empty), en.children.toSeq.map(go)*) - case rn: LfNodeRollback => - TxTree(rn.copy(children = ImmArray.empty), rn.children.toSeq.map(go)*) - case leafNode: LfLeafOnlyActionNode => TxTree(leafNode) - } - tx.roots.toSeq.map(go) - } - - // Compare the nested transaction structure, easier to reason about in case of diffs - also not sensitive to node-ids. - val expectedNested = nestedLfNodeFromFlat(expectedTx) - val actualNested = nestedLfNodeFromFlat(actualTx) - - assert(actualNested == expectedNested) - } - - // Various helpers that help "hide" lf value boilerplate from transaction representations for improved readability. - def args(values: LfValue*): LfValue.ValueRecord = - LfValue.ValueRecord(None, values.map(None -> _).to(ImmArray)) - - def seq(values: LfValue*): LfValue.ValueList = valueList(values) - - def valueList(values: IterableOnce[LfValue]): LfValue.ValueList = - LfValue.ValueList(FrontStack.from(values)) - - val notUsed: LfValue = LfValue.ValueUnit - - protected def templateIdFromIdentifier( - identifier: Identifier - ): LfInterfaceId = - toIdentifier(s"${identifier.getModuleName}:${identifier.getEntityName}")( - toPackageId(identifier.getPackageId) - ) - -} - -object ComparesLfTransactions { - - /** The TxTree class adds the ability to arrange LfTransaction nodes in a tree structure rather - * than the flat node-id-based arrangement. - */ - final case class TxTree(lfNode: LfNode, childNodes: TxTree*) extends PrettyPrinting { - override lazy val pretty: Pretty[TxTree] = prettyOfClass( - unnamedParam(_.lfNode), - unnamedParamIfNonEmpty(_.childNodes), - ) - - def lfTransaction: LfVersionedTransaction = - buildLfTransaction(this) - } - - def buildLfTransaction(trees: TxTree*): LfVersionedTransaction = { - val builder = new NodeIdTransactionBuilder - - def addChild(parentNid: NodeId)(child: TxTree): Unit = { - val nid = builder.add(child.lfNode, parentNid) - child.childNodes.foreach(addChild(nid)) - } - - trees.foreach { tree => - val nid = builder.add(tree.lfNode) - tree.childNodes.foreach(addChild(nid)) - } - - builder.build() - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/DefaultDamlValues.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/DefaultDamlValues.scala deleted file mode 100644 index 1ded9f1b07..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/DefaultDamlValues.scala +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import cats.Id -import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration -import com.digitalasset.canton.protocol.{ - LfCommittedTransaction, - LfHash, - LfLanguageVersion, - LfTransaction, - LfVersionedTransaction, -} -import com.digitalasset.daml.lf.data.{ImmArray, Ref} - -/** Default values for objects from the Daml repo for unit testing */ -object DefaultDamlValues { - def lfUserId(index: Int = 0): Ref.UserId = - Ref.UserId.assertFromString(s"user-id-$index") - def userId(index: Int = 0): UserId = UserId(lfUserId(index)) - - def lfCommandId(index: Int = 0): Ref.CommandId = - Ref.CommandId.assertFromString(s"command-id-$index") - def commandId(index: Int = 0): CommandId = CommandId(lfCommandId(index)) - - def submissionId(index: Int = 0): LedgerSubmissionId = - LedgerSubmissionId.assertFromString(s"submission-id-$index") - - lazy val deduplicationDuration: DeduplicationDuration = DeduplicationDuration( - java.time.Duration.ofSeconds(100) - ) - - def lfTransactionId(index: Int): Ref.TransactionId = - Ref.TransactionId.assertFromString(s"lf-transaction-id-$index") - - def lfhash(index: Int = 0): LfHash = { - val bytes = new Array[Byte](32) - for (i <- 0 to 3) { - bytes(i) = (index >>> (24 - i * 8)).toByte - } - LfHash.assertFromByteArray(bytes) - } - - lazy val emptyTransaction: LfTransaction = - LfTransaction(nodes = Map.empty, roots = ImmArray.empty) - lazy val emptyVersionedTransaction: LfVersionedTransaction = - LfVersionedTransaction(LfLanguageVersion.v2_dev, Map.empty, ImmArray.empty) - lazy val emptyCommittedTransaction: LfCommittedTransaction = - LfCommittedTransaction.subst[Id](emptyVersionedTransaction) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/Generators.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/Generators.scala deleted file mode 100644 index bedc5213f6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/Generators.scala +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.CantonRequireTypes.{ - AbstractLengthLimitedString, - LengthLimitedStringCompanion, -} -import com.google.protobuf.ByteString -import org.scalacheck.{Arbitrary, Gen} - -object Generators { - private val nonEmptyMaxSize: Int = 4 - - implicit val byteStringArb: Arbitrary[ByteString] = Arbitrary( - Gen.stringOfN(256, Gen.alphaNumChar).map(ByteString.copyFromUtf8) - ) - - implicit val userIdArb: Arbitrary[UserId] = Arbitrary( - Gen.stringOfN(32, Gen.alphaNumChar).map(UserId.assertFromString) - ) - implicit val commandIdArb: Arbitrary[CommandId] = Arbitrary( - Gen.stringOfN(32, Gen.alphaNumChar).map(CommandId.assertFromString) - ) - implicit val ledgerSubmissionIdArb: Arbitrary[LedgerSubmissionId] = Arbitrary( - Gen.stringOfN(32, Gen.alphaNumChar).map(LedgerSubmissionId.assertFromString) - ) - implicit val workflowIdArb: Arbitrary[WorkflowId] = Arbitrary( - Gen.stringOfN(32, Gen.alphaNumChar).map(WorkflowId.assertFromString) - ) - - def reassignmentCounterGen: Gen[ReassignmentCounter] = - Gen.choose(0, Long.MaxValue).map(i => ReassignmentCounter(i)) - - def lengthLimitedStringGen[A <: AbstractLengthLimitedString]( - companion: LengthLimitedStringCompanion[A] - ): Gen[A] = for { - length <- Gen.choose(1, companion.maxLength.unwrap) - str <- Gen.stringOfN(length, Gen.alphaNumChar) - } yield companion.tryCreate(str) - - def nonEmptyListGen[T](implicit arb: Arbitrary[T]): Gen[NonEmpty[List[T]]] = for { - size <- Gen.choose(1, nonEmptyMaxSize - 1) - element <- arb.arbitrary - elements <- Gen.containerOfN[List, T](size, arb.arbitrary) - } yield NonEmpty(List, element, elements*) - - def nonEmptySetGen[T](implicit arb: Arbitrary[T]): Gen[NonEmpty[Set[T]]] = - nonEmptyListGen[T].map(_.toSet) - def nonEmptySet[T](implicit arb: Arbitrary[T]): Arbitrary[NonEmpty[Set[T]]] = - Arbitrary(nonEmptyListGen[T].map(_.toSet)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/GeneratorsLf.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/GeneratorsLf.scala deleted file mode 100644 index 4d6cb32af3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/GeneratorsLf.scala +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.{ - AuthenticatedContractIdVersionV11, - ExampleTransactionFactory, - LfContractId, - LfGlobalKey, - LfHash, - LfLanguageVersion, - LfTemplateId, - Unicum, -} -import com.digitalasset.canton.topology.{GeneratorsTopology, PartyId} -import com.digitalasset.daml.lf.transaction.Versioned -import com.digitalasset.daml.lf.value.Value.ValueInt64 -import org.scalacheck.{Arbitrary, Gen} - -final class GeneratorsLf(generatorsTopology: GeneratorsTopology) { - import com.digitalasset.canton.data.GeneratorsDataTime.* - import generatorsTopology.* - - implicit val lfPartyIdArb: Arbitrary[LfPartyId] = Arbitrary( - Arbitrary.arbitrary[PartyId].map(_.toLf) - ) - - implicit val lfTimestampArb: Arbitrary[LfTimestamp] = Arbitrary( - Arbitrary.arbitrary[CantonTimestamp].map(_.underlying) - ) - - implicit val LedgerUserIdArb: Arbitrary[LedgerUserId] = Arbitrary( - Gen.stringOfN(8, Gen.alphaChar).map(LedgerUserId.assertFromString) - ) - - implicit val lfCommandIdArb: Arbitrary[LfCommandId] = Arbitrary( - Gen.stringOfN(8, Gen.alphaChar).map(LfCommandId.assertFromString) - ) - - implicit val lfSubmissionIdArb: Arbitrary[LfSubmissionId] = Arbitrary( - Gen.stringOfN(8, Gen.alphaChar).map(LfSubmissionId.assertFromString) - ) - - implicit val lfWorkflowIdArb: Arbitrary[LfWorkflowId] = Arbitrary( - Gen.stringOfN(8, Gen.alphaChar).map(LfWorkflowId.assertFromString) - ) - - implicit val lfContractIdArb: Arbitrary[LfContractId] = Arbitrary( - for { - index <- Gen.posNum[Int] - contractIdDiscriminator = ExampleTransactionFactory.lfHash(index) - - suffix <- Gen.posNum[Int] - contractIdSuffix = Unicum( - Hash.build(TestHash.testHashPurpose, HashAlgorithm.Sha256).add(suffix).finish() - ) - } yield AuthenticatedContractIdVersionV11.fromDiscriminator( - contractIdDiscriminator, - contractIdSuffix, - ) - ) - - implicit val lfHashArb: Arbitrary[LfHash] = Arbitrary( - Gen.posNum[Int].map(ExampleTransactionFactory.lfHash) - ) - - implicit val lfChoiceNameArb: Arbitrary[LfChoiceName] = Arbitrary( - Gen.stringOfN(8, Gen.alphaChar).map(LfChoiceName.assertFromString) - ) - - implicit val lfPackageId: Arbitrary[LfPackageId] = Arbitrary( - Gen.stringOfN(64, Gen.alphaChar).map(LfPackageId.assertFromString) - ) - - implicit val lfTemplateIdArb: Arbitrary[LfTemplateId] = Arbitrary(for { - packageName <- Gen.stringOfN(8, Gen.alphaChar) - moduleName <- Gen.stringOfN(8, Gen.alphaChar) - scriptName <- Gen.stringOfN(8, Gen.alphaChar) - } yield LfTemplateId.assertFromString(s"$packageName:$moduleName:$scriptName")) - - private val lfVersionedGlobalKeyGen: Gen[Versioned[LfGlobalKey]] = for { - templateId <- Arbitrary.arbitrary[LfTemplateId] - // We consider only this specific value because the goal is not exhaustive testing of LF (de)serialization - value <- Gen.long.map(ValueInt64.apply) - } yield ExampleTransactionFactory.globalKey(templateId, value) - - implicit val lfGlobalKeyArb: Arbitrary[LfGlobalKey] = Arbitrary( - lfVersionedGlobalKeyGen.map(_.unversioned) - ) - - implicit val lfVersionedGlobalKeyArb: Arbitrary[Versioned[LfGlobalKey]] = Arbitrary( - lfVersionedGlobalKeyGen - ) - - implicit val LfLanguageVersionArb: Arbitrary[LfLanguageVersion] = - Arbitrary(Gen.oneOf(LfLanguageVersion.AllV2)) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/HasActorSystem.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/HasActorSystem.scala deleted file mode 100644 index 8bcccf5e66..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/HasActorSystem.scala +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.lifecycle.LifeCycle -import com.digitalasset.canton.logging.NamedLogging -import com.digitalasset.canton.util.PekkoUtil -import org.apache.pekko.actor.ActorSystem -import org.scalatest.{BeforeAndAfterAll, Suite} - -/** Mixin to provide an implicit [[org.apache.pekko.actor.ActorSystem]] to a test suite */ -trait HasActorSystem extends BeforeAndAfterAll { - this: Suite with HasExecutionContext with NamedLogging => - - protected implicit lazy val actorSystem: ActorSystem = - PekkoUtil.createActorSystem(getClass.getSimpleName) - - protected def timeouts: ProcessingTimeout - - override def afterAll(): Unit = - try LifeCycle.close(LifeCycle.toCloseableActorSystem(actorSystem, logger, timeouts))(logger) - finally super.afterAll() -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala deleted file mode 100644 index db11153b06..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.digitalasset.canton.protocol.{LfContractId, LfHash} - -/** mixin for tests that need new, unique LfContractIds (V1-based) */ -trait NeedsNewLfContractIds { - this: BaseTest => - - val hasher: () => LfHash = LfHash.secureRandom(LfHash.hashPrivateKey(loggerFactory.name)) - - def newLfContractId(): LfContractId = LfContractId.V1(hasher(), hasher().bytes) - - def newLfContractIdUnsuffixed(): LfContractId = LfContractId.V1(hasher()) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/PrivateConstructorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/PrivateConstructorTest.scala deleted file mode 100644 index 66a789f793..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/PrivateConstructorTest.scala +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import org.scalatest.wordspec.AnyWordSpec - -class PrivateConstructorTest extends AnyWordSpec with BaseTest { - "private constructor" must { - "taken into account" in { - assertDoesNotCompile( - """ - |final case class MyClass private (i: Int) - | - |object Other { - | val instance = MyClass(42) - | println(instance) - |} - |""".stripMargin - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/SequentialTestByKey.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/SequentialTestByKey.scala deleted file mode 100644 index fae2ccece3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/SequentialTestByKey.scala +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import org.scalatest.{BeforeAndAfterAll, Suite} - -import java.util.concurrent.Semaphore -import scala.collection.concurrent.TrieMap - -/** Our tests are typically run in parallel but sometimes we want to make sure specific tests are - * not run in parallel. One example of this are tests that change the same DB table and it doesnt - * make sense to change the table just for the sake of parallel testing ability. By adding this - * mixin, semaphores will be used to only allow one test concurrently with the same semaphoreKey - */ -trait SequentialTestByKey extends BeforeAndAfterAll { - self: Suite => - - protected val semaphoreKey: Option[String] - - override def beforeAll(): Unit = { - semaphoreKey.foreach(TestSemaphoreUtil.acquire) - super.beforeAll() - } - - override def afterAll(): Unit = { - super.afterAll() - semaphoreKey.foreach(TestSemaphoreUtil.release) - } -} - -object TestSemaphoreUtil { - private val semaphoreMap = TrieMap[String, Semaphore]() - - def acquire(key: String): Unit = { - val sem = semaphoreMap.getOrElseUpdate(key, new Semaphore(1)) - sem.acquire() - } - - def release(key: String): Unit = - semaphoreMap.get(key).foreach(_.release()) - - // pre-defined semaphore keys here - val SEQUENCER_DB_H2 = Some("sequencer-db-h2") - val SEQUENCER_DB_PG = Some("sequencer-db-pg") -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/UniqueBoundedCounterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/UniqueBoundedCounterTest.scala deleted file mode 100644 index 3eaa48ad5a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/UniqueBoundedCounterTest.scala +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import better.files.File -import com.digitalasset.canton.concurrent.Threading -import org.scalatest.BeforeAndAfterEach -import org.scalatest.flatspec.AnyFlatSpec - -import java.util.concurrent.ConcurrentHashMap -import scala.concurrent.duration.* -import scala.concurrent.{Await, Future} -import scala.util.{Failure, Success, Try} - -class UniqueBoundedCounterTest - extends AnyFlatSpec - with BaseTest - with BeforeAndAfterEach - with HasExecutionContext { - - private var dataFile: File = _ - private var lockFile: File = _ - private val testLogger = logger.underlying - - override def beforeEach(): Unit = { - dataFile = File.newTemporaryFile("unique_bounded_counter_test", ".dat") - lockFile = File(dataFile.pathAsString + ".lock") - super.beforeEach() - } - - override def afterEach(): Unit = { - dataFile.delete(swallowIOExceptions = true) - lockFile.delete(swallowIOExceptions = true) - super.afterEach() - } - - behavior of "UniqueBoundedCounter" - - it should "initialize counter to the start value if the file is new" in { - val initial = 50 - val counter = new UniqueBoundedCounter( - dataFile, - startValue = initial, - maxValue = 100, - )(testLogger) - - val result = counter.get() - result.success.value should be(initial) - } - - it should "use the existing value if the file already exists" in { - val initial = 123 - val maxValue = 200 - // Create and write an initial value manually first (simulate existing file) - val counter1 = - new UniqueBoundedCounter(dataFile, startValue = initial, maxValue = maxValue)( - testLogger - ) - counter1.incrementAndGet() - - // Create a new instance pointing to the same file, with a different initialValue - val counter2 = - new UniqueBoundedCounter(dataFile, startValue = 166, maxValue = maxValue)( - testLogger - ) - - // Get should return the value written previously, ignoring the new start value - counter2.get().success.value should be(initial + 1) - } - - it should "increment correctly" in { - val initial = 10 - val counter = new UniqueBoundedCounter( - dataFile, - startValue = initial, - maxValue = 100, - )(testLogger) - counter.incrementAndGet().success.value should be(initial + 1) - counter.incrementAndGet().success.value should be(initial + 2) - counter.get().success.value should be(initial + 2) - } - - it should "wrap around correctly when maximum value is reached" in { - val initial = 2 - val maxVal = 5 - val counter = new UniqueBoundedCounter( - dataFile, - startValue = initial, - maxValue = maxVal, - )(testLogger) - - counter.get().success.value should be(initial) - counter.incrementAndGet().success.value should be(initial + 1) - counter.incrementAndGet().success.value should be(initial + 2) - counter.incrementAndGet().success.value should be(maxVal) - counter.incrementAndGet().success.value should be(initial) - } - - it should "generate unique counters concurrently without lock exceptions" in { - val numThreads = Threading.detectNumberOfThreads(noTracingLogger).unwrap - val incrementsPerThread = 10000 - val totalIncrements = numThreads * incrementsPerThread - val startValue = 1000 - // Use Int.MaxValue to effectively prevent wrap-around during this concurrency test - val counter = new UniqueBoundedCounter(dataFile, startValue, Int.MaxValue)(testLogger) - - val obtainedValues = ConcurrentHashMap.newKeySet[Int]() - - val futures: Seq[Future[Unit]] = (1 to numThreads).map { threadId => - Future { - // Set a descriptive thread name for logging/debugging - val currentThread = Thread.currentThread() - currentThread.setName(s"${getClass.getSimpleName}_lock-contention-test_worker-$threadId") - for (i <- 1 to incrementsPerThread) { - val result: Try[Int] = counter.incrementAndGet() - result match { - case Success(value) => - // Attempt to add the obtained value. add() returns false if it was already present - if (!obtainedValues.add(value)) { - fail( - s"Duplicate value detected: $value by thread ${currentThread.getName} increment $i" - ) - } - case Failure(e) => - // If any increment fails (e.g., lock timeout after retries), fail the future/test - fail( - s"Concurrent increment failed for thread ${currentThread.getName} increment $i: ${e.getMessage}", - e, - ) - } - } - } - } - - // Wait long enough; may run slower when run with other tests concurrently - Await.result(Future.sequence(futures), 3.minutes) - - val finalValue = counter.get() - finalValue.success.value should be(startValue + totalIncrements) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/UnstableTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/UnstableTest.scala deleted file mode 100644 index 47a4a777a1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/UnstableTest.scala +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import org.scalatest.Tag - -/** Tag used to mark individual test cases as unstable. - * - * See [[annotations.UnstableTest]] for more information. - */ -object UnstableTest extends Tag("UnstableTest") diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheBehaviorSpecBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheBehaviorSpecBase.scala deleted file mode 100644 index fa3676a06c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheBehaviorSpecBase.scala +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import com.digitalasset.canton.HasExecutionContext -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpecLike - -trait ConcurrentCacheBehaviorSpecBase - extends ConcurrentCacheSpecBase - with AnyWordSpecLike - with Matchers - with HasExecutionContext - with NamedLogging { - - override protected def loggerFactory: NamedLoggerFactory = NamedLoggerFactory.root - - name should { - "compute the correct results" in { - val cache = newCache() - - cache.getOrAcquire(1, _.toString) should be("1") - cache.getOrAcquire(2, _.toString) should be("2") - cache.getOrAcquire(3, _.toString) should be("3") - cache.getOrAcquire(2, _.toString) should be("2") - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheCachingSpecBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheCachingSpecBase.scala deleted file mode 100644 index 9347ba953d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheCachingSpecBase.scala +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import com.digitalasset.canton.HasExecutionContext -import com.digitalasset.canton.logging.NamedLogging -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpecLike - -import java.util.concurrent.atomic.AtomicInteger - -trait ConcurrentCacheCachingSpecBase - extends ConcurrentCacheSpecBase - with AnyWordSpecLike - with Matchers - with HasExecutionContext - with NamedLogging { - name should { - "compute once, and cache" in { - val cache = newCache() - val counter = new AtomicInteger(0) - - def compute(value: Integer): String = { - counter.incrementAndGet() - value.toString - } - - cache.getOrAcquire(1, compute) - cache.getOrAcquire(1, compute) - cache.getOrAcquire(1, compute) - cache.getOrAcquire(2, compute) - - counter.get() should be(2) - } - - "return `None` on `getIfPresent` if the value is not present" in { - val cache = newCache() - - cache.getIfPresent(7) should be(None) - } - - "return the value on `getIfPresent` if the value is present" in { - val cache = newCache() - - cache.getOrAcquire(7, _.toString) should be("7") - cache.getIfPresent(7) should be(Some("7")) - } - - "`put` values" in { - val cache = newCache() - - cache.put(7, "7") - cache.getIfPresent(7) should be(Some("7")) - - val counter = new AtomicInteger(0) - - def compute(value: Integer): String = { - counter.incrementAndGet() - value.toString - } - - cache.getOrAcquire(7, compute) should be("7") - counter.get() should be(0) - } - } - - "`putAll` values" in { - val cache = newCache() - - cache.putAll(Map(Int.box(7) -> "7", Int.box(8) -> "8")) - cache.getIfPresent(7) should be(Some("7")) - cache.getIfPresent(8) should be(Some("8")) - - val counter = new AtomicInteger(0) - - def compute(value: Integer): String = { - counter.incrementAndGet() - value.toString - } - - cache.getOrAcquire(7, compute) should be("7") - cache.getOrAcquire(8, compute) should be("8") - - counter.get() should be(0) - } - - "`invalidateAll` values" in { - val cache = newCache() - - cache.putAll(Map(Int.box(7) -> "7", Int.box(8) -> "8")) - cache.getIfPresent(7) should be(Some("7")) - cache.getIfPresent(8) should be(Some("8")) - - cache.invalidateAll() - - cache.getIfPresent(7) should be(None) - cache.getIfPresent(8) should be(None) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheEvictionSpecBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheEvictionSpecBase.scala deleted file mode 100644 index c9db67f00d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheEvictionSpecBase.scala +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import org.scalatest.concurrent.Eventually -import org.scalatest.matchers.should.Matchers -import org.scalatest.time.{Second, Span} -import org.scalatest.wordspec.AnyWordSpecLike - -import scala.concurrent.ExecutionContext -import scala.util.Random - -trait ConcurrentCacheEvictionSpecBase - extends ConcurrentCacheBehaviorSpecBase - with AnyWordSpecLike - with Matchers - with Eventually { - - override implicit def patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(1, Second))) - - protected def newLargeCache()(implicit - executionContext: ExecutionContext - ): ConcurrentCache[Integer, String] - - name should { - "evict values eventually, once the limit has been reached" in { - val cache = newLargeCache() - val values = Iterator.continually[Integer](Random.nextInt()).take(1000).toSet.toVector - - values.foreach { value => - cache.getOrAcquire(value, _.toString) - } - - // The cache may not evict straight away. We should keep trying. - eventually { - val cachedValues = values.map(cache.getIfPresent).filter(_.isDefined) - // It may evict more than expected, and it might grow past the bounds again before we check. - cachedValues.length should (be > 16 and be < 500) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheSpecBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheSpecBase.scala deleted file mode 100644 index 3e12492a04..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ConcurrentCacheSpecBase.scala +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import scala.concurrent.ExecutionContext - -trait ConcurrentCacheSpecBase { - protected def name: String - - protected def newCache()(implicit - executionContext: ExecutionContext - ): ConcurrentCache[Integer, String] -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTesting.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTesting.scala deleted file mode 100644 index 211629ef14..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTesting.scala +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import java.util.concurrent.ConcurrentMap -import scala.jdk.CollectionConverters.* - -final class MapBackedCacheForTesting[Key, Value](store: ConcurrentMap[Key, Value]) - extends ConcurrentCache[Key, Value] { - override def put(key: Key, value: Value): Unit = { - store.put(key, value) - () - } - - override def putAll(mappings: Map[Key, Value]): Unit = - store.putAll(mappings.asJava) - - override def getIfPresent(key: Key): Option[Value] = - Option(store.get(key)) - - override def getOrAcquire(key: Key, acquire: Key => Value): Value = - store.computeIfAbsent(key, acquire(_)) - - override def invalidateAll(): Unit = store.clear() -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTestingSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTestingSpec.scala deleted file mode 100644 index d067e52edb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTestingSpec.scala +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.ConcurrentHashMap - -final class MapBackedCacheForTestingSpec - extends AnyWordSpec - with Matchers - with ConcurrentCacheBehaviorSpecBase - with ConcurrentCacheCachingSpecBase { - override def name: String = "map-backed cache" - - override protected def newCache()(implicit - executionContext: scala.concurrent.ExecutionContext - ): ConcurrentCache[Integer, String] = - new MapBackedCacheForTesting(new ConcurrentHashMap) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MappedCacheSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MappedCacheSpec.scala deleted file mode 100644 index 52a503b403..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/MappedCacheSpec.scala +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} - -final class MappedCacheSpec - extends AnyWordSpec - with Matchers - with ConcurrentCacheBehaviorSpecBase - with ConcurrentCacheCachingSpecBase { - override def name: String = "mapped cache" - - override protected def newCache()(implicit - executionContext: scala.concurrent.ExecutionContext - ): ConcurrentCache[Integer, String] = - new MapBackedCacheForTesting(new ConcurrentHashMap) - - name should { - "transform the values into and out of the cache" in { - val store: ConcurrentMap[Int, String] = new ConcurrentHashMap - val cache: ConcurrentCache[Int, String] = new MapBackedCacheForTesting(store) - val mappedCache = cache.mapValues[String]( - mapAfterReading = value => value.substring(6), - mapBeforeWriting = value => Some("value " + value), - ) - - mappedCache.put(7, "seven") - Option(store.get(7)) should be(Some("value seven")) - mappedCache.getIfPresent(7) should be(Some("seven")) - } - - "allow the mapping to change type" in { - val store: ConcurrentMap[Int, String] = new ConcurrentHashMap - val cache: ConcurrentCache[Int, String] = new MapBackedCacheForTesting(store) - val mappedCache = cache.mapValues[Double]( - mapAfterReading = value => java.lang.Double.parseDouble(value), - mapBeforeWriting = value => Some(value.toString), - ) - - mappedCache.put(7, 789.5) - Option(store.get(7)) should be(Some("789.5")) - mappedCache.getIfPresent(7) should be(Some(789.5)) - } - - "do not write if the mapping is lossy" in { - val store: ConcurrentMap[Int, Int] = new ConcurrentHashMap - val cache: ConcurrentCache[Int, Int] = new MapBackedCacheForTesting(store) - val mappedCache = cache.mapValues[String]( - mapAfterReading = value => value.toString, - mapBeforeWriting = value => - try { - Some(Integer.parseInt(value)) - } catch { - case _: NumberFormatException => None - }, - ) - - mappedCache.put(1, "one two three") - mappedCache.put(7, "789") - - Option(store.get(1)) should be(None) - Option(store.get(7)) should be(Some(789)) - - mappedCache.getIfPresent(1) should be(None) - mappedCache.getIfPresent(7) should be(Some("789")) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/NoCacheSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/NoCacheSpec.scala deleted file mode 100644 index b9622d329e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/NoCacheSpec.scala +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicInteger - -class NoCacheSpec extends AnyWordSpec with ConcurrentCacheBehaviorSpecBase { - override protected lazy val name: String = "a non-existent cache" - - override protected def newCache()(implicit - executionContext: scala.concurrent.ExecutionContext - ): ConcurrentCache[Integer, String] = - Cache.none - - "a non-existent cache" should { - "compute every time" in { - val cache = newCache() - val counter = new AtomicInteger(0) - - def compute(value: Integer): String = { - counter.incrementAndGet() - value.toString - } - - cache.getOrAcquire(1, compute) - cache.getOrAcquire(1, compute) - cache.getOrAcquire(1, compute) - cache.getOrAcquire(2, compute) - - counter.get() should be(4) - } - - "always return `None` on `getIfPresent`" in { - val cache = Cache.none[Integer, String] - - cache.getIfPresent(7) should be(None) - cache.getOrAcquire(7, _.toString) should be("7") - cache.getIfPresent(7) should be(None) - } - - "do nothing on `put`" in { - val cache = Cache.none[Integer, String] - - cache.put(7, "7") - cache.getIfPresent(7) should be(None) - - val counter = new AtomicInteger(0) - - def compute(value: Integer): String = { - counter.incrementAndGet() - value.toString - } - - cache.getOrAcquire(7, compute) should be("7") - counter.get() should be(1) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ScaffeineCacheTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ScaffeineCacheTest.scala deleted file mode 100644 index b443b9362b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/ScaffeineCacheTest.scala +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import com.digitalasset.canton.config.CachingConfigs -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} - -class ScaffeineCacheTest extends AsyncWordSpec with BaseTest with FailOnShutdown { - - private def getValueBroken: Int => FutureUnlessShutdown[Nothing] = (_: Int) => - FutureUnlessShutdown.abortedDueToShutdown - private def getValue(input: Int): FutureUnlessShutdown[Int] = FutureUnlessShutdown.pure(input) - - "buildAsync" should { - "Get a value when not shutting down" in { - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValue, - )(logger, "") - for { - result <- keysCache.get(10) - } yield { - result shouldBe 10 - } - } - - "Handle AbortDueToShutdown in get" in { - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValueBroken, - )(logger, "") - - for { - result <- keysCache.get(10).unwrap - } yield { - result shouldBe UnlessShutdown.AbortedDueToShutdown - } - } - - // Note that when Scaffeine.getAll returns a failed future that wraps the underlying exception - // with java.util.concurrent.CompletionException - "Handle AbortDueToShutdown in getAll" in { - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValueBroken, - allLoader = Some(_ => FutureUnlessShutdown.abortedDueToShutdown), - )(logger, "") - - for { - result <- keysCache.getAll(Set(10)).unwrap - } yield { - result shouldBe UnlessShutdown.AbortedDueToShutdown - } - } - - "Handle AbortedDueToShutdown in compute" in { - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValue, - )(logger, "") - - for { - result <- keysCache.compute(10, (_, _) => FutureUnlessShutdown.abortedDueToShutdown).unwrap - } yield { - result shouldBe UnlessShutdown.AbortedDueToShutdown - } - } - - "Pass cached value to compute" in { - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValue, - )(logger, "") - val previousValue = new AtomicReference[Option[Int]]() - for { - _ <- keysCache.get(10) - newValue <- keysCache.compute( - 10, - (_, previousO) => { - previousValue.set(previousO) - FutureUnlessShutdown.pure(20) - }, - ) - } yield { - newValue shouldBe 20 - previousValue.get should contain(10) - } - } - - "Allow entries to be cleared" in { - val loads = new AtomicInteger(0) - def getValueCount(input: Int): FutureUnlessShutdown[Int] = { - loads.incrementAndGet() - FutureUnlessShutdown.pure(input) - } - - val keysCache = - ScaffeineCache.buildAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = getValueCount, - )(logger, "") - - for { - _ <- keysCache.get(2) - _ <- keysCache.get(3) - _ <- keysCache.getAll(Seq(2, 3)) - _ = keysCache.clear((i, _) => i == 2) - _ <- keysCache.get(2) - _ <- keysCache.get(3) - } yield { - loads.get() shouldBe 3 // Initial 2 + 1 reload - } - } - } - - "buildTracedAsync" should { - "ignore the trace context stored with a key" in { - val counter = new AtomicInteger() - val keysCache = ScaffeineCache.buildTracedAsync[FutureUnlessShutdown, Int, Int]( - cache = CachingConfigs.testing.keyCache.buildScaffeine(), - loader = _ => input => getValue(counter.incrementAndGet() + input), - )(logger, "") - for { - result1 <- keysCache.get(10)(TraceContext.empty) - result2 <- keysCache.get(10)(TraceContext.createNew("test")) - } yield { - result1 shouldBe result2 - } - }.failOnShutdown - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/SizedCacheSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/SizedCacheSpec.scala deleted file mode 100644 index 4bd9a77b2d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/caching/SizedCacheSpec.scala +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.caching - -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.ExecutionContext - -class SizedCacheSpec - extends AnyWordSpec - with ConcurrentCacheBehaviorSpecBase - with ConcurrentCacheCachingSpecBase - with ConcurrentCacheEvictionSpecBase { - override protected lazy val name: String = "a sized cache" - - override protected def newCache()(implicit - executionContext: ExecutionContext - ): ConcurrentCache[Integer, String] = - SizedCache.from[Integer, String](SizedCache.Configuration(maximumSize = 16)) - - override protected def newLargeCache()(implicit - executionContext: ExecutionContext - ): ConcurrentCache[Integer, String] = - SizedCache.from[Integer, String](SizedCache.Configuration(maximumSize = 128)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala deleted file mode 100644 index 31aa791564..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/common/sequencer/grpc/SequencerInfoLoaderTest.scala +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.common.sequencer.grpc - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.common.sequencer.SequencerConnectClient.SynchronizerClientBootstrapInfo -import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader.{ - LoadSequencerEndpointInformationResult, - SequencerInfoLoaderError, -} -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port, PositiveInt} -import com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.networking.Endpoint -import com.digitalasset.canton.sequencing.{ - GrpcSequencerConnection, - SequencerConnection, - SequencerConnectionValidation, - SubmissionRequestAmplification, -} -import com.digitalasset.canton.topology.{ - PhysicalSynchronizerId, - SequencerId, - SynchronizerId, - UniqueIdentifier, -} -import com.digitalasset.canton.tracing.TracingConfig -import com.digitalasset.canton.version.{ProtocolVersionCompatibility, ReleaseVersion} -import com.digitalasset.canton.{ - BaseTest, - BaseTestWordSpec, - HasExecutionContext, - SequencerAlias, - SynchronizerAlias, -} -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.Materializer -import org.scalatest.Assertion - -import scala.concurrent.Promise - -class SequencerInfoLoaderTest extends BaseTestWordSpec with HasExecutionContext { - - private lazy val sequencer1 = SequencerId( - UniqueIdentifier.tryFromProtoPrimitive("sequencer1::namespace") - ) - private lazy val sequencer2 = SequencerId( - UniqueIdentifier.tryFromProtoPrimitive("sequencer2::namespace") - ) - private lazy val sequencerAlias1 = SequencerAlias.tryCreate("sequencer1") - private lazy val sequencerAlias2 = SequencerAlias.tryCreate("sequencer2") - private lazy val sequencerAlias3 = SequencerAlias.tryCreate("sequencer3") - private lazy val synchronizerId1 = SynchronizerId.tryFromString("first::namespace").toPhysical - private lazy val synchronizerId2 = SynchronizerId.tryFromString("second::namespace").toPhysical - private lazy val endpoint1 = Endpoint("localhost", Port.tryCreate(1001)) - private lazy val endpoint2 = Endpoint("localhost", Port.tryCreate(1002)) - private lazy val endpoint3 = Endpoint("localhost", Port.tryCreate(1003)) - private lazy val staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParametersWith() - private lazy val synchronizerAlias = SynchronizerAlias.tryCreate("synchronizer1") - - private def mapArgs( - args: List[ - ( - SequencerAlias, - Endpoint, - Either[SequencerInfoLoaderError, SynchronizerClientBootstrapInfo], - ) - ] - ): List[LoadSequencerEndpointInformationResult] = - args - .map { case (alias, endpoint, result) => - ( - GrpcSequencerConnection( - NonEmpty.mk(Seq, endpoint), - transportSecurity = false, - None, - alias, - None, - ), - result, - ) - } - .map { - case (conn, Right(result)) => - LoadSequencerEndpointInformationResult.Valid( - conn, - result, - staticSynchronizerParameters, - ) - case (conn, Left(result)) => - LoadSequencerEndpointInformationResult.NotValid(conn, result) - } - - private def run( - expectSynchronizerId: Option[PhysicalSynchronizerId], - args: List[ - ( - SequencerAlias, - Endpoint, - Either[SequencerInfoLoaderError, SynchronizerClientBootstrapInfo], - ) - ], - validation: SequencerConnectionValidation = SequencerConnectionValidation.All, - threshold: PositiveInt = PositiveInt.one, - ): Either[Seq[LoadSequencerEndpointInformationResult.NotValid], Unit] = SequencerInfoLoader - .validateNewSequencerConnectionResults( - expectSynchronizerId, - validation, - threshold, - logger, - )(mapArgs(args)) - - private def hasError( - expectSynchronizerId: Option[PhysicalSynchronizerId], - args: List[ - ( - SequencerAlias, - Endpoint, - Either[SequencerInfoLoaderError, SynchronizerClientBootstrapInfo], - ) - ], - validation: SequencerConnectionValidation = SequencerConnectionValidation.All, - threshold: PositiveInt = PositiveInt.one, - )(check: String => Assertion): Assertion = { - val result = run(expectSynchronizerId, args, validation, threshold) - result.left.value should have length (1) - result.left.value.foreach(x => check(x.error.cause)) - succeed - } - - "endpoint result validation" should { - "left is returned as left" in { - hasError( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias2, - endpoint2, - Left(SequencerInfoLoaderError.InvalidState("booh")), - ), - ), - )(_ should include("booh")) - } - "detect mismatches in synchronizer id" in { - hasError( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias2, - endpoint2, - Right(SynchronizerClientBootstrapInfo(synchronizerId2, sequencer2)), - ), - ), - )(_ should include("Synchronizer id mismatch")) - } - "detect if synchronizer id does not match expected one" in { - hasError( - Some(synchronizerId2), - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ) - ), - )(_ should include("does not match expected")) - } - "detect mismatches in sequencer-id between an alias" in { - hasError( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias1, - endpoint2, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer2)), - ), - ), - )(_ should include("sequencer-id mismatch")) - } - "detect the same sequencer-id among different sequencer aliases" in { - hasError( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias2, - endpoint2, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ), - )(_ should include("same sequencer-id reported by different alias")) - } - "accept if everything is fine" in { - run( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias2, - endpoint2, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer2)), - ), - ), - ).value shouldBe (()) - } - "tolerate errors if threshold can be reached" in { - forAll( - Seq(SequencerConnectionValidation.Active, SequencerConnectionValidation.ThresholdActive) - ) { validation => - run( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - (sequencerAlias2, endpoint2, Left(SequencerInfoLoaderError.InvalidState("booh"))), - ( - sequencerAlias3, - endpoint3, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer2)), - ), - ), - validation, - threshold = PositiveInt.tryCreate(2), - ).value shouldBe (()) - } - } - "tolerate errors for Active if threshold can not be reached" in { - run( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - (sequencerAlias2, endpoint2, Left(SequencerInfoLoaderError.InvalidState("booh2"))), - (sequencerAlias3, endpoint3, Left(SequencerInfoLoaderError.InvalidState("booh3"))), - ), - SequencerConnectionValidation.Active, - threshold = PositiveInt.tryCreate(2), - ).value shouldBe (()) - } - "complain about errors for StrictActive if threshold can not be reached" in { - val result = run( - None, - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - (sequencerAlias2, endpoint2, Left(SequencerInfoLoaderError.InvalidState("booh2"))), - (sequencerAlias3, endpoint3, Left(SequencerInfoLoaderError.InvalidState("booh3"))), - ), - SequencerConnectionValidation.ThresholdActive, - threshold = PositiveInt.tryCreate(2), - ) - result.left.value should have length 2 - forAll(result.left.value)(_.error.cause should (include("booh2") or include("booh3"))) - } - } - - "aggregation" should { - - def aggregate( - args: List[ - ( - SequencerAlias, - Endpoint, - Either[SequencerInfoLoaderError, SynchronizerClientBootstrapInfo], - ) - ] - ): Either[SequencerInfoLoaderError, SequencerInfoLoader.SequencerAggregatedInfo] = - SequencerInfoLoader.aggregateBootstrapInfo( - logger, - sequencerTrustThreshold = PositiveInt.tryCreate(2), - SubmissionRequestAmplification.NoAmplification, - SequencerConnectionValidation.All, - None, - )(mapArgs(args)) - - "accept if everything is fine" in { - aggregate( - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - ( - sequencerAlias2, - endpoint2, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer2)), - ), - ) - ) match { - case Right(_) => succeed - case Left(value) => fail(value.toString) - } - } - - "reject if we don't have enough sequencers" in { - aggregate( - List( - ( - sequencerAlias1, - endpoint1, - Right(SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1)), - ), - (sequencerAlias2, endpoint2, Left(SequencerInfoLoaderError.InvalidState("booh"))), - ) - ) match { - case Right(_) => fail("should not succeed") - case Left(_) => succeed - } - } - - } - - "sequencer info loading" should { - val actorSystem = ActorSystem() - implicit val materializer: Materializer = Materializer(actorSystem) - val sequencerInfoLoader = new SequencerInfoLoader( - ProcessingTimeout(), - TracingConfig.Propagation.Disabled, - clientProtocolVersions = ProtocolVersionCompatibility.supportedProtocols( - includeAlphaVersions = true, - includeBetaVersions = true, - release = ReleaseVersion.current, - ), - minimumProtocolVersion = Some(testedProtocolVersion), - dontWarnOnDeprecatedPV = false, - loggerFactory = loggerFactory, - ) - // Futures in loadSequencerInfoAsync can race such that more than the expected tolerance can be returned. - val toleranceForRaciness = 3 - - "return complete results when requested" in { - val scs = sequencerConnections(PositiveInt.tryCreate(10)) - val res = sequencerInfoLoader - .loadSequencerEndpointsParallel( - synchronizerAlias, - scs, - parallelism = NonNegativeInt.tryCreate(3), - maybeThreshold = None, - )(loadSequencerInfoFactory(Map(2 -> nonValidResultF(scs(1))))) - .futureValueUS - res.size shouldBe scs.size - val (valid, invalid) = splitValidAndInvalid(res) - valid shouldBe (1 to 10).filterNot(_ == 2) - invalid shouldBe Seq(2) - } - - "return partial results when threshold specified" in { - val threshold = PositiveInt.tryCreate(3) - val scs = sequencerConnections(PositiveInt.tryCreate(10)) - val invalidSequencerConnections = Map( - 2 -> nonValidResultF(scs(1)), - 3 -> nonValidResultF(scs(2)), - ) - val res = sequencerInfoLoader - .loadSequencerEndpointsParallel( - synchronizerAlias, - scs, - parallelism = NonNegativeInt.tryCreate(3), - maybeThreshold = Some(threshold), - )(loadSequencerInfoFactory(invalidSequencerConnections)) - .futureValueUS - val (valid, _) = splitValidAndInvalid(res) - valid.intersect(Seq(2, 3)) shouldBe Seq.empty - // note that we can't say anything about invalid due to raciness, e.g. - // if 1, 4, and 5 are loaded first as actually happened on CI. - // So we can't even assert: invalid shouldBe Seq(2, 3) - assertBetween( - "partial result size", - valid.size, - threshold.unwrap, - toleranceForRaciness, - ) - } - - "not get stuck on hung sequencers" in { - val threshold = PositiveInt.tryCreate(5) - val delayedPromises = - Seq(1, 3, 4) - .map(_ -> Promise[UnlessShutdown[LoadSequencerEndpointInformationResult]]()) - .toMap - val scs = sequencerConnections(PositiveInt.tryCreate(10)) - val res = sequencerInfoLoader - .loadSequencerEndpointsParallel( - synchronizerAlias, - scs, - parallelism = NonNegativeInt.tryCreate(4), - maybeThreshold = Some(threshold), - )(loadSequencerInfoFactory(delayedPromises.map { case (k, v) => - k -> FutureUnlessShutdown(v.future) - })) - .futureValueUS - val (valid, invalid) = splitValidAndInvalid(res) - valid.intersect(Seq(1, 3, 4)) shouldBe Seq.empty - invalid shouldBe Seq.empty - res.size shouldBe threshold.unwrap - assertBetween("hung sequencers result size", res.size, threshold.unwrap, toleranceForRaciness) - logger.info("Before exiting test, complete futures") - delayedPromises.foreach { case (i, p) => p.success(Outcome(validResult(scs(i - 1)))) } - } - - "return early in case of a failed Future" in { - val scs = sequencerConnections(PositiveInt.tryCreate(10)) - val invalidSequencerConnections = Map( - 5 -> FutureUnlessShutdown.failed(new RuntimeException("booh")) - ) - loggerFactory - .assertThrowsAndLogsAsync[RuntimeException]( - sequencerInfoLoader - .loadSequencerEndpointsParallel( - synchronizerAlias, - scs, - parallelism = NonNegativeInt.tryCreate(3), - maybeThreshold = None, - )(loadSequencerInfoFactory(invalidSequencerConnections)) - .failOnShutdown, - assertion = _.getMessage should include("booh"), - _.errorMessage should include( - "Exception loading sequencer Sequencer 'sequencer5' info in synchronizer Synchronizer 'synchronizer1'" - ), - ) - .futureValue - } - } - - private def sequencerConnections(n: PositiveInt): NonEmpty[Seq[SequencerConnection]] = - NonEmpty - .from( - (1 to n.value) - .map(i => - GrpcSequencerConnection( - NonEmpty.mk(Seq, endpoint1), - transportSecurity = false, - None, - SequencerAlias.tryCreate(s"sequencer$i"), - None, - ) - ) - ) - .value - - private def loadSequencerInfoFactory( - m: Map[Int, FutureUnlessShutdown[LoadSequencerEndpointInformationResult]] - ): SequencerConnection => FutureUnlessShutdown[LoadSequencerEndpointInformationResult] = sc => - m.getOrElse( - sc.sequencerAlias.unwrap.drop("sequencer".length).toInt, - FutureUnlessShutdown.pure(validResult(sc)), - ) - - private def validResult(sc: SequencerConnection): LoadSequencerEndpointInformationResult = - LoadSequencerEndpointInformationResult.Valid( - sc, - SynchronizerClientBootstrapInfo(synchronizerId1, sequencer1), - staticSynchronizerParameters, - ) - - private def nonValidResultF( - sc: SequencerConnection - ): FutureUnlessShutdown[LoadSequencerEndpointInformationResult] = - FutureUnlessShutdown.pure( - LoadSequencerEndpointInformationResult - .NotValid(sc, SequencerInfoLoaderError.InvalidState("booh")) - ) - - private def assertBetween(check: String, value: Int, expected: Int, tolerance: Int): Assertion = - clue(check) { - value should (be >= expected and be <= (expected + tolerance)) - } - - private def splitValidAndInvalid( - results: Seq[LoadSequencerEndpointInformationResult] - ): (Seq[Int], Seq[Int]) = { - val valid = results.collect { case LoadSequencerEndpointInformationResult.Valid(c, _, _) => - c.sequencerAlias.unwrap.drop("sequencer".length).toInt - }.sorted - val notValid = results.collect { case LoadSequencerEndpointInformationResult.NotValid(c, _) => - c.sequencerAlias.unwrap.drop("sequencer".length).toInt - }.sorted - (valid, notValid) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/DirectExecutionContextTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/DirectExecutionContextTest.scala deleted file mode 100644 index 6efb737efb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/DirectExecutionContextTest.scala +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.concurrent - -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.Semaphore -import scala.concurrent.Future -import scala.util.{Failure, Success} - -class DirectExecutionContextTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - val testException = new RuntimeException("test exception") - - val testThread: Thread = Thread.currentThread() - def assertRunsInTestThread(): Assertion = Thread.currentThread() shouldBe testThread - def assertRunsNotInTestThread(): Assertion = Thread.currentThread() should not be testThread - - "A direct execution context" should { - "execute new futures synchronously in the calling thread" in { - val future = Future { - assertRunsInTestThread() - 1 + 1 - }(directExecutionContext) - - future.value shouldEqual Some(Success(2)) - } - - "embed exceptions from synchronous computations into the future" in { - val future = Future { - throw testException - }(directExecutionContext) - - future.value shouldEqual Some(Failure(testException)) - } - - "log exceptions from asynchronous computations" in { - loggerFactory.assertLogs( - { - val blocker = new Semaphore(0) - - val longRunningFuture = Future { - assertRunsNotInTestThread() - blocker.acquire() - 1 + 1 - }(parallelExecutionContext) - - longRunningFuture.onComplete { _ => - assertRunsNotInTestThread() - throw testException - }(directExecutionContext) - - blocker.release() - - longRunningFuture.futureValue shouldEqual 2 - }, - { err => - err.errorMessage shouldBe "A fatal error has occurred in DirectExecutionContext. Terminating thread." - err.throwable shouldBe Some(testException) - }, - ) - } - - "be stack-safe in general" in { - logger.debug("Entering 'be stack-safe in general'...") - def rec(n: Int): Future[Int] = - Future - .successful(n) - .flatMap(i => if (i > 0) rec(i - 1) else Future.successful(0))(directExecutionContext) - - rec(100000).futureValue - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitorTest.scala deleted file mode 100644 index 7bd32c72b1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitorTest.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.concurrent - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.logging.{LogEntry, SuppressingLogger} -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.ScheduledExecutorService -import scala.concurrent.duration.* -import scala.concurrent.{Await, Future} - -@SuppressWarnings(Array("com.digitalasset.canton.RequireBlocking")) -class ExecutionContextMonitorTest extends AnyWordSpec with BaseTest { - - def runAndCheck(loggerFactory: SuppressingLogger, check: Seq[LogEntry] => Assertion): Unit = { - implicit val scheduler: ScheduledExecutorService = - Threading.singleThreadScheduledExecutor( - loggerFactory.threadName + "-test-scheduler", - noTracingLogger, - ) - - val ecName = loggerFactory.threadName + "test-my-ec" - implicit val ec = Threading.newExecutionContext(ecName, noTracingLogger) - val monitor = - new ExecutionContextMonitor( - loggerFactory, - NonNegativeFiniteDuration.tryOfSeconds(1), - NonNegativeFiniteDuration.tryOfSeconds(2), - DefaultProcessingTimeouts.testing, - ) - monitor.monitor(ec) - - // As we are setting min num threads in fork join pool, we also need to - // set this here as otherwise this test becomes flaky when running in the - // sequential test - val numThreads = Threading.minParallelismForForkJoinPool - .max(Threading.detectNumberOfThreads(noTracingLogger)) - .value - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - for (_ <- 1 to 2) { - val futs = (1 to (numThreads * 2)).map(_ => - Future { - logger.debug("Starting to block") - // Do not use `blocking` because we do not want to spawn a new thread in this test - Thread.sleep(2000) - logger.debug("Stopping to block") - } - ) - Await.result(Future.sequence(futs), 120.seconds) - } - monitor.close() - scheduler.shutdown() - ec.shutdown() - }, - check, - ) - } - - "execution context monitor" should { - - "report nicely if futures are stuck" in { - - // Separate suppression logger that does not skip the warnings coming from the execution context monitor - val loggerFactory = SuppressingLogger(getClass, skipLogEntry = _ => false) - runAndCheck( - loggerFactory, - { seq => - seq.foreach { entry => - assert( - entry.warningMessage.contains("is stuck or overloaded") || - entry.warningMessage.contains("is still stuck or overloaded") || - entry.warningMessage.contains("is just overloaded"), - s"did not match expected warning messages: ${entry.toString}", - ) - } - seq should not be empty - }, - ) - } - - "default SuppressingLogger skips its warning" in { - runAndCheck(loggerFactory, seq => seq shouldBe empty) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/FutureSupervisorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/FutureSupervisorTest.scala deleted file mode 100644 index d72b5f3bdb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/FutureSupervisorTest.scala +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.concurrent - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.concurrent.FutureSupervisorImplTest.ScaleTestFutureTimeout -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.* -import com.digitalasset.canton.{BaseTest, HasExecutionContext, config} -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.duration.{DurationInt, DurationLong} -import scala.concurrent.{Future, Promise} -import scala.util.Success - -trait FutureSupervisorTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - def futureSupervisor(supervisor: FutureSupervisor): Unit = { - - "support a lot of concurrent supervisions" in { - // Ensure that there is no quadratic algorithm anywhere in the supervision code - val count = 1000000 - - val supervisedCompleted = (1 to count).toList.map { i => - futureSupervisor.supervised(s"test-$i-completed")(Future.successful(i)) - } - Future.sequence(supervisedCompleted).futureValue - - val promise = Promise[Unit]() - val supervisedIncomplete = (1 to count).toList.map { i => - futureSupervisor.supervised(s"test-$i-incomplete")(promise.future) - } - promise.success(()) - Future.sequence(supervisedIncomplete).futureValue - } - - "supervising a completed promise's future is a no-op" in { - val promise = new SupervisedPromise[Unit]("to be completed immediately", supervisor) - - promise.trySuccess(()) - promise.future.value shouldBe Some(Success(())) - } - - "repeated calls to supervised promises are cached" in { - val promise = new SupervisedPromise[CantonTimestamp]("future is cached", supervisor) - - val fut1 = promise.future - val fut2 = promise.future - - fut1 shouldBe fut2 - - val timestamp = CantonTimestamp.now() - promise.trySuccess(timestamp) - - val fut3 = promise.future - - fut3 shouldBe fut1 - fut3.value shouldBe Some(Success(timestamp)) - } - } -} - -class NoOpFutureSupervisorTest extends FutureSupervisorTest { - "NoOpFutureSupervisor" should { - behave like futureSupervisor(FutureSupervisor.Noop) - } -} - -class FutureSupervisorImplTest extends FutureSupervisorTest { - - "FutureSupervisorImpl" should { - behave like futureSupervisor( - new FutureSupervisor.Impl(config.NonNegativeDuration.ofSeconds(10))(scheduledExecutor()) - ) - - "complain about a slow future" in { - val supervisor = - new FutureSupervisor.Impl(config.NonNegativeDuration.ofMillis(10))(scheduledExecutor()) - val promise = Promise[Unit]() - val fut = loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val fut = supervisor.supervised("slow-future")(promise.future) - eventually() { - loggerFactory.fetchRecordedLogEntries should not be empty - } - fut - }, - { - // If the test is running slow we might get the warning multiple times - logEntries => - logEntries should not be empty - forAll(logEntries) { - _.warningMessage should include("slow-future has not completed after") - } - }, - ) - promise.success(()) - fut.futureValue - } - - "stop supervision if the future is discarded" in { - val supervisor = - new FutureSupervisor.Impl(config.NonNegativeDuration.ofMillis(10))(scheduledExecutor()) - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - supervisor - .supervised("discarded-future") { - // Do not assign this to a val so that no reference to the future is kept anywhere and it can be GC'ed. - Promise[Unit]().future - } - .discard[Future[Unit]] - // Normally, the future should still be scheduled. However, it is possible that the GC has already run - // and so there's nothing to do to simulate - NonEmpty.from(supervisor.inspectScheduled) match { - case None => - logger.debug("No scheduled future found: Was probably already GC'ed.") - case Some(scheduled) => - logger.debug("Simulating garbage collection on the weak reference.") - scheduled.head1.fut.underlying.clear() - } - // Wait until the schedule is removed - eventually() { - supervisor.inspectScheduled shouldBe Seq.empty - } - }, - // We don't know how long it takes for the GC simulation to run. So there may be any number of warnings about the slow future. - forAll(_) { - _.warningMessage should include("discarded-future has not completed after") - }, - ) - } - - "scale for many supervised futures" in { - // Another (stricter) test ensuring that there is no quadratic algorithm anywhere in the supervision code - val count = 100000 - val supervisor = - new FutureSupervisor.Impl( - config.NonNegativeDuration.ofSeconds(ScaleTestFutureTimeout.toSeconds) - )(scheduledExecutor()) - val promise = Promise[Unit]() - val startTime = System.nanoTime - - val supervisedFutures = (1 to count).toList.map { i => - supervisor.supervised(s"test-$i")(promise.future) - } - supervisor.inspectScheduled.size shouldBe count - promise.success(()) - Future.sequence(supervisedFutures).futureValue - - val totalTime = (System.nanoTime - startTime).nanos - if (totalTime > ScaleTestFutureTimeout) { - fail( - s"Sequenced future took longer (${totalTime.toMillis} milliseconds) to complete than " + - s"the scale test timeout of $ScaleTestFutureTimeout" - ) - } - - eventually() { - supervisor.inspectScheduled shouldBe Seq.empty - } - } - } -} - -object FutureSupervisorImplTest { - private val ScaleTestFutureTimeout = 1.second -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/IdlenessExecutorServiceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/IdlenessExecutorServiceTest.scala deleted file mode 100644 index 5970ddb3ad..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/IdlenessExecutorServiceTest.scala +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.concurrent - -import cats.Monad -import cats.syntax.either.* -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.{BaseTest, BaseTestWordSpec} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.{ForkJoinPool, LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} -import scala.annotation.tailrec -import scala.concurrent.duration.* -import scala.concurrent.{Future, Promise} - -trait IdlenessExecutorServiceTest extends BaseTest { this: AnyWordSpec => - - def awaitIdleness(mk: () => ExecutionContextIdlenessExecutorService): Unit = { - "return after the futures are completed" in { - import IdlenessExecutorServiceTest.busyWait - - implicit val ec: ExecutionContextIdlenessExecutorService = mk() - - val waitMillis = 100.milliseconds - - val promise = Promise[Unit]() - val future = for { - _ <- promise.future - _ <- Future(busyWait(waitMillis)) - } yield () - promise.completeWith(Future { - busyWait(waitMillis) - }) - - val start = Deadline.now - val terminated = ec.awaitIdleness(waitMillis * 100) - val end = Deadline.now - assume( - future.isCompleted, - "The future has failed to complete", - ) // Cancel test if future fails to complete for some reason. - assert(terminated, "awaitIdleness failed to detect that the execution context is idle.") - withClue("awaitIdleness has returned too late") { - end - start should be < waitMillis * 20 - } - - loggerFactory.suppressWarningsAndErrors { - ec.shutdown() - } - } - - "return after the given timeout" in { - - import IdlenessExecutorServiceTest.busyWait - - implicit val ec: ExecutionContextIdlenessExecutorService = mk() - - val running = new AtomicBoolean(true) - - // Periodically schedule new tasks - val waitMillis = 1.millisecond - val future = Monad[Future].tailRecM(()) { _ => - Future[Either[Unit, Unit]] { - busyWait(waitMillis) - if (Thread.interrupted) { - Thread.currentThread.interrupt() - Either.unit - } else if (!running.get()) { - Either.unit - } else { - Left(()) - } - } - } - - val start = Deadline.now - val terminated = ec.awaitIdleness(1.millisecond) - val end = Deadline.now - assert(!future.isCompleted, "The future has terminated unexpectedly") - assert(!terminated, "awaitIdleness incorrectly indicates that the execution context is idle") - withClue("awaitIdleness has returned too late") { - end - start should be < 2.seconds - } - - running.set(false) - loggerFactory.suppressWarningsAndErrors { - ExecutorServiceExtensions(ec)(logger, DefaultProcessingTimeouts.testing).close() - } - - } - - } -} - -object IdlenessExecutorServiceTest { - - def busyWait(duration: FiniteDuration): Unit = { - val deadline = duration.fromNow - - @tailrec def go(): Unit = - if (deadline.isOverdue()) () - else if (Thread.interrupted()) { - Thread.currentThread.interrupt() - () - } else go() - go() - } -} - -class ForkJoinIdlenessExecutorServiceTest - extends AnyWordSpec - with BaseTestWordSpec - with IdlenessExecutorServiceTest { - - "awaitIdleness" should { - behave like awaitIdleness { () => - val pool = new ForkJoinPool() - new ForkJoinIdlenessExecutorService( - pool, - pool, - throwable => logger.error(s"Error: $throwable"), - loggerFactory.threadName + "-fork-join-pool", - ) - } - } - -} - -class ThreadPoolIdlenessExecutorServiceTest - extends AnyWordSpec - with BaseTestWordSpec - with IdlenessExecutorServiceTest { - - "awaitIdleness" should { - behave like awaitIdleness { () => - val pool = - new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable]()) - new ThreadPoolIdlenessExecutorService( - pool, - throwable => logger.error(s"Error: $throwable"), - loggerFactory.threadName + "-thread-pool-executor", - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ThreadingTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ThreadingTest.scala deleted file mode 100644 index c74308c7db..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/concurrent/ThreadingTest.scala +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.concurrent - -import cats.syntax.parallel.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.{LazyValWithContext, ResourceUtil} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} -import java.util.concurrent.{Semaphore, TimeUnit} -import scala.concurrent.{ExecutionContext, Future, blocking} - -@SuppressWarnings(Array("com.digitalasset.canton.GlobalExecutionContext")) -class ThreadingTest extends AnyWordSpec with BaseTest { - - private lazy val configuredNumerOfThreads: PositiveInt = - Threading.detectNumberOfThreads(noTracingLogger) - private lazy val expectedNumberOfParallelTasks: PositiveInt = - configuredNumerOfThreads.max(Threading.minParallelismForForkJoinPool) - private val expectedNumberOfParallelTasksWrappedInBlocking: PositiveInt = - PositiveInt.tryCreate(200) - private val numberOfTasksToMakeExecutionContextBusy: PositiveInt = PositiveInt.tryCreate(200) - - private val numberOfExtraTasks: Int = 20 - - "A new execution context" when { - - "nothing else is happening" must { - s"provide at least $configuredNumerOfThreads threads" in { - withTaskRunnerOnNewEc(configuredNumerOfThreads, wrapInBlocking = false) { taskRunner => - taskRunner.startTasks() - taskRunner.assertTasksRunning() - } - } - - s"provide at least $expectedNumberOfParallelTasksWrappedInBlocking threads for blocking calls" in { - withTaskRunnerOnNewEc( - expectedNumberOfParallelTasksWrappedInBlocking, - wrapInBlocking = true, - ) { taskRunner => - taskRunner.startTasks() - taskRunner.assertTasksRunning() - } - } - } - - "global execution context is busy" must { - def withGlobalEcBusy(body: => Unit): Unit = - withTaskRunner( - s"global-$numberOfTasksToMakeExecutionContextBusy-blocking", - numberOfTasksToMakeExecutionContextBusy, - wrapInBlocking = true, - ExecutionContext.global, - ) { taskRunner => - taskRunner.startTasks() - taskRunner.assertTasksRunning() - body - } - - s"provide at least $configuredNumerOfThreads threads" in { - withGlobalEcBusy { - - withTaskRunnerOnNewEc(configuredNumerOfThreads, wrapInBlocking = false) { taskRunner => - taskRunner.startTasks() - - taskRunner.assertTasksRunning() - } - } - } - - s"provide at least $expectedNumberOfParallelTasksWrappedInBlocking threads for blocking calls" in { - withGlobalEcBusy { - - withTaskRunnerOnNewEc( - expectedNumberOfParallelTasksWrappedInBlocking, - wrapInBlocking = true, - ) { taskRunner => - taskRunner.startTasks() - - taskRunner.assertTasksRunning() - } - } - } - } - - "another new execution context is busy" must { - - s"provide at least $configuredNumerOfThreads threads" in { - withTaskRunnerOnNewEc(numberOfTasksToMakeExecutionContextBusy, wrapInBlocking = true) { - taskRunner => - taskRunner.startTasks() - taskRunner.assertTasksRunning() - - withTaskRunnerOnNewEc(configuredNumerOfThreads, wrapInBlocking = false) { taskRunner => - taskRunner.startTasks() - - taskRunner.assertTasksRunning() - } - } - } - - s"provide at least $expectedNumberOfParallelTasksWrappedInBlocking threads for blocking calls" in { - withTaskRunnerOnNewEc(numberOfTasksToMakeExecutionContextBusy, wrapInBlocking = true) { - taskRunner => - taskRunner.startTasks() - taskRunner.assertTasksRunning() - - withTaskRunnerOnNewEc( - expectedNumberOfParallelTasksWrappedInBlocking, - wrapInBlocking = true, - ) { taskRunner => - taskRunner.startTasks() - - taskRunner.assertTasksRunning() - } - } - } - } - - def withTaskRunnerOnNewEc(numberOfTasksToRun: PositiveInt, wrapInBlocking: Boolean)( - body: TaskRunner => Unit - ): Unit = - withNewExecutionContext { ec => - val description = - if (wrapInBlocking) s"ec-$numberOfTasksToRun-blocking" else s"ec-$numberOfTasksToRun" - withTaskRunner(description, numberOfTasksToRun, wrapInBlocking, ec)(body) - } - - def withTaskRunner( - description: String, - numberOfTasksToRun: PositiveInt, - wrapInBlocking: Boolean, - ec: ExecutionContext, - )( - body: TaskRunner => Unit - ): Unit = ResourceUtil.withResource( - new TaskRunner(description, numberOfTasksToRun.value, wrapInBlocking)(ec) - )(body) - - class TaskRunner( - val description: String, - val numberOfTasksToRun: Int, - val wrapInBlocking: Boolean, - )(implicit - val ec: ExecutionContext - ) extends AutoCloseable { - - private val running = new Semaphore(0) - private val blocker = new Semaphore(0) - private val closed = new AtomicBoolean(false) - - private val taskFuture: AtomicReference[Option[Future[Unit]]] = new AtomicReference(None) - - def startTasks(): Unit = { - // Reset semaphores to be on the safe side - blocker.drainPermits() - running.drainPermits() - - // Start computation - val idle = taskFuture.compareAndSet( - None, { - val blockingTasks = ((0 until numberOfTasksToRun): Seq[Int]).parTraverse_ { i => - Future { - logger.debug(s"$description: Starting task $i...") - if (closed.get()) { - logger.warn(s"$description: Task $i started after closing. Aborting...") - } else { - // Only do this, if the runner has not been closed. - // So that tasks running after close are not counted. - running.release() - - logger.info( - s"$description: Started task $i. (Total: ${running.availablePermits()})\n$ec" - ) - - if (wrapInBlocking) - blocking { - blocker.acquire() - } - else - blocker.acquire() - - logger.debug(s"$description: Terminated task $i") - } - } - } - - logger.info(s"$description: Starting $numberOfExtraTasks extra tasks...") - - val extraTasks = submitExtraTasks(description) - - Some(for { - r <- blockingTasks - _ <- extraTasks - } yield r) - }, - ) - - // Fail test, if some computation has already been running - withClue(s"No tasks running by this task runner:") { - idle shouldEqual true - } - } - - def assertTasksRunning(): Unit = { - val runningTasks = - if (running.tryAcquire(numberOfTasksToRun, 10, TimeUnit.SECONDS)) numberOfTasksToRun - else running.availablePermits() - - logger.info(s"$description: Found $runningTasks running tasks.\n$ec") - - withClue(s"Number of tasks running in parallel:") { - runningTasks shouldEqual numberOfTasksToRun - } - } - - override def close(): Unit = { - logger.info(s"$description: Initiating shutdown...") - closed.set(true) - blocker.release(numberOfTasksToRun) - withClue(s"Tasks properly terminating") { - taskFuture.get().map(_.futureValue) - } - taskFuture.set(None) - } - } - } - - "The parallel ExecutionContext" must { - "be stack-safe in general" in { - logger.debug("Entering 'the parallel ExecutionContext should be stack-safe in general'...") - - val parallelExecutionContext = - Threading.newExecutionContext( - "threading-test-execution-context", - noTracingLogger, - ) - - def rec(n: Int): Future[Int] = - Future - .successful(n) - .flatMap(i => if (i > 0) rec(i - 1) else Future.successful(0))(parallelExecutionContext) - - try { - rec(100000).futureValue - } finally { - parallelExecutionContext.shutdown() - } - } - } - - "lazy val initialization" can { - class LazyValTest(semaphore: Semaphore) { - lazy val blocker: Int = { - // The `blocking` here does not suffice because the Scala compiler inserts a `this.synchronized` around - // this initialization block without wrapping it in a `blocking` call itself. - blocking(semaphore.acquire()) - semaphore.release() - 1 - } - - def blockerWithContext: Int = _blockerWithContext.get(()) - private[this] val _blockerWithContext = new LazyValWithContext[Int, Unit]({ _ => - blocking(semaphore.acquire()) - semaphore.release() - 1 - }) - } - - "deplete the threads in a fork-join pool" in { - withNewExecutionContext { implicit ec => - val semaphore = new Semaphore(1) - blocking(semaphore.acquire()) - val lvt = new LazyValTest(semaphore) - - // Use a few more threads to avoid flakes - val concurrentInitializationThreads = expectedNumberOfParallelTasks.unwrap + 1 - val futures = ((1 to (concurrentInitializationThreads)): Seq[Int]).parTraverse_ { _ => - Future(lvt.blocker) - } - - // Sleep a bit to make sure that all futures are blocked - Threading.sleep(500) - - /* Now submit another future that unblocks the first initializer - * Unfortunately, this will not execute because there are already `expectedNumberOfParallelTasks` - * many threads blocked by the `synchronized` call in the `blocker` initializer. - */ - val unblockF = Future(semaphore.release()) - val extraTasks = submitExtraTasks("lazy-val test") - - always() { - futures.isCompleted shouldBe false - } - - // To make the test terminate, manually unblock the initializer - semaphore.release() - futures.futureValue - unblockF.futureValue - extraTasks.futureValue - } - } - - "not deplete the threads in a fork-join pool when using LazyValWithContext" in { - withNewExecutionContext { implicit ec => - val semaphore = new Semaphore(1) - blocking(semaphore.acquire()) - val lvt = new LazyValTest(semaphore) - - // Use a few more threads to avoid flakes - val concurrentInitializationThreads = expectedNumberOfParallelTasks.unwrap + 1 - val futures = ((1 to (concurrentInitializationThreads)): Seq[Int]).parTraverse_ { _ => - Future(lvt.blockerWithContext) - } - - // Sleep a bit to make sure that all futures are blocked - Threading.sleep(500) - - // Now submit another future that unblocks the first initializer. - val unblockF = Future(semaphore.release()) - futures.futureValue - unblockF.futureValue - } - } - } - - def submitExtraTasks(description: String)(implicit ec: ExecutionContext): Future[Unit] = - // Run some extra tasks to keep submitting to the fork join pool. - // This is necessary, because the fork join pool occasionally fails to create a worker thread. - // It is ok to do so in this test, because there are plenty of extra tasks in production. - ((0 until numberOfExtraTasks): Seq[Int]).parTraverse_ { i => - Future { - logger.debug(s"$description: Running extra task $i...") - } - } - - def withNewExecutionContext(body: ExecutionContext => Unit): Unit = - ResourceUtil.withResource( - ExecutorServiceExtensions( - Threading.newExecutionContext( - "threading-test-execution-context", - noTracingLogger, - ) - )(logger, DefaultProcessingTimeouts.testing) - ) { case ExecutorServiceExtensions(ec) => - body(ec) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DbConfigTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DbConfigTest.scala deleted file mode 100644 index 32b1ebcf08..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DbConfigTest.scala +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.DbConfig.* -import com.typesafe.config.{Config, ConfigFactory} -import org.scalatest.wordspec.AnyWordSpec - -class DbConfigTest extends AnyWordSpec with BaseTest { - private def pgConfig(url: String = "jdbc:postgresql://0.0.0.0:0/dbName") = - Postgres(ConfigFactory.parseString(s""" - |{ - | url = "$url" - | user = "user" - | password = "pass" - | driver = org.postgresql.Driver - |} - """.stripMargin)) - - private lazy val h2ConfigWithoutRequiredOptions = - H2(ConfigFactory.parseString(s""" - |{ - | url = "jdbc:h2:mem:dbName" - | user = "user" - | password = "pass" - |} - """.stripMargin)) - - private lazy val h2ConfigWithRequiredOptions = - H2(ConfigFactory.parseString(s""" - |{ - | url = "jdbc:h2:mem:dbName;MODE=PostgreSQL;DB_CLOSE_DELAY=-1" - | user = "user" - | password = "pass" - |} - """.stripMargin)) - - private lazy val numCores = Threading.detectNumberOfThreads(noTracingLogger).unwrap - private lazy val numThreads = Math.max(1, numCores / 2) - private lazy val connTimeout = DbConfig.defaultConnectionTimeout.unwrap.toMillis - - private def pgExpectedConfig(expectedNumThreads: Int): Config = - ConfigFactory.parseString(s"""{ - | driver = org.postgresql.Driver - | numThreads = $expectedNumThreads - | password = "pass" - | poolName = "poolName" - | url = "jdbc:postgresql://0.0.0.0:0/dbName" - | user = "user" - | connectionTimeout = $connTimeout - | initializationFailTimeout = 1 - | properties = { - | tcpKeepAlive = true - | } - |} - """.stripMargin) - - "DbConfig.configWithFallback" should { - val config = pgConfig() - "Add default values to postgres config" in { - DbConfig - .configWithFallback(config)( - config.numReadConnectionsCanton( - forParticipant = true, - withWriteConnectionPool = false, - withMainConnection = false, - ), - "poolName", - logger, - ) shouldBe pgExpectedConfig(numThreads) - } - - "Adjust the number of threads for replicated participants" in { - DbConfig.configWithFallback(config)( - config.numReadConnectionsCanton( - forParticipant = true, - withWriteConnectionPool = true, - withMainConnection = false, - ), - "poolName", - logger, - ) shouldBe pgExpectedConfig(Math.max(1, numThreads / 2)) - } - - "Adjust the number of threads for replicated participants write connection pool" in { - DbConfig.configWithFallback(config)( - config.numWriteConnectionsCanton( - forParticipant = true, - withWriteConnectionPool = true, - withMainConnection = true, - ), - "poolName", - logger, - ) shouldBe pgExpectedConfig(Math.max(1, numThreads / 2 - 1)) - } - - "Add default values to H2 config and enforced options" in { - DbConfig.configWithFallback(h2ConfigWithRequiredOptions)( - config.numReadConnectionsCanton( - forParticipant = true, - withWriteConnectionPool = false, - withMainConnection = false, - ), - "poolName", - logger, - ) shouldBe - ConfigFactory.parseString(s"""{ - | driver = org.h2.Driver - | numThreads = 1 - | password = "pass" - | poolName = "poolName" - | url = "jdbc:h2:mem:dbName;MODE=PostgreSQL;DB_CLOSE_DELAY=-1" - | user = "user" - | connectionTimeout = $connTimeout - | initializationFailTimeout = 1 - |} - """.stripMargin) - - loggerFactory.suppressWarningsAndErrors( - DbConfig.configWithFallback(h2ConfigWithoutRequiredOptions)( - config.numReadConnectionsCanton( - forParticipant = true, - withWriteConnectionPool = false, - withMainConnection = false, - ), - "poolName", - logger, - ) shouldBe - ConfigFactory.parseString(s"""{ - | driver = org.h2.Driver - | numThreads = 1 - | password = "pass" - | poolName = "poolName" - | properties:{DB_CLOSE_DELAY:"-1",MODE:"PostgreSQL"} - | url = "jdbc:h2:mem:dbName" - | user = "user" - | connectionTimeout = $connTimeout - | initializationFailTimeout = 1 - |} - """.stripMargin) - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DeprecatedConfigUtilsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DeprecatedConfigUtilsTest.scala deleted file mode 100644 index 63b8734062..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/DeprecatedConfigUtilsTest.scala +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.DeprecatedConfigUtils.* -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} -import com.typesafe.config.ConfigFactory -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level.INFO -import pureconfig.ConfigReader -import pureconfig.generic.semiauto.* - -class DeprecatedConfigUtilsTest extends AnyWordSpec with BaseTest { - case class TestConfig(s: Option[String], i: Option[Int], nested: NestedTestConfig) - case class NestedTestConfig(newS: String = "bye", newI: Int = 31, newJ: Int = 34) - - private implicit val deprecations: DeprecatedFieldsFor[TestConfig] = - new DeprecatedFieldsFor[TestConfig] { - override val movedFields: List[MovedConfigPath] = List( - MovedConfigPath("i", "nested.new-i", "nested.new-j"), - MovedConfigPath("s", "nested.new-s"), - ) - } - - implicit val nestedTestConfigReader: ConfigReader[NestedTestConfig] = - deriveReader[NestedTestConfig] - implicit val testConfigReader: ConfigReader[TestConfig] = - deriveReader[TestConfig].applyDeprecations - - private val expectedLogs = LogEntry.assertLogSeq( - Seq( - ( - _.message shouldBe "Config field at s is deprecated. Please use the following path(s) instead: nested.new-s.", - "deprecated field not logged", - ), - ( - _.message shouldBe "Config field at i is deprecated. Please use the following path(s) instead: nested.new-i, nested.new-j.", - "deprecated field not logged", - ), - ), - Seq.empty, - ) _ - - "DeprecatedConfigUtils" should { - "use deprecated values as fallback" in { - val config = ConfigFactory.parseString(""" - |{ - | s = "hello" - | i = 5 - |} - |""".stripMargin) - - loggerFactory.assertLogsSeq(SuppressionRule.Level(INFO))( - { - val testConfig = pureconfig.ConfigSource - .fromConfig(config) - .loadOrThrow[TestConfig] - - testConfig.s shouldBe None - testConfig.i shouldBe None - testConfig.nested.newS shouldBe "hello" // Uses "hello", despite newS having a default value, because newS was not set - testConfig.nested.newI shouldBe 5 // Uses 5 as default value for newI because it was not set - testConfig.nested.newJ shouldBe 5 // Uses 5 as default value for newI2 because it was not set - }, - expectedLogs, - ) - } - - "not overwrite new values" in { - val config = ConfigFactory.parseString(""" - |{ - | s = "hello" - | i = 5 - | nested { - | new-i = 10 - | new-j = 11 - | new-s = "bonjour" - | } - |} - |""".stripMargin) - - loggerFactory.assertLogsSeq(SuppressionRule.Level(INFO))( - { - val testConfig = pureconfig.ConfigSource - .fromConfig(config) - .loadOrThrow[TestConfig] - - testConfig.s shouldBe None - testConfig.i shouldBe None - testConfig.nested.newS shouldBe "bonjour" // Uses "bonjour" because newS was set - testConfig.nested.newI shouldBe 10 // Uses 10 because newI was set - testConfig.nested.newJ shouldBe 11 // Uses 11 because newI was set - }, - expectedLogs, - ) - } - - "use new default values if nothing is set" in { - val config = ConfigFactory.parseString(""" - |{ - | nested {} - |} - |""".stripMargin) - - val testConfig = pureconfig.ConfigSource - .fromConfig(config) - .loadOrThrow[TestConfig] - - testConfig.s shouldBe empty - testConfig.i shouldBe empty - testConfig.nested.newS shouldBe "bye" // Uses "bye" because nothing is set and it's the default value - testConfig.nested.newI shouldBe 31 // Uses 31 because nothing is set and it's the default value - testConfig.nested.newJ shouldBe 34 // Uses 34 because nothing is set and it's the default value - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/GeneratorsConfig.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/GeneratorsConfig.scala deleted file mode 100644 index eef886fa9b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/GeneratorsConfig.scala +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.config.CantonRequireTypes.{String185, String255, String68} -import com.digitalasset.canton.config.RequireTypes.* -import com.digitalasset.canton.{Generators, config} -import org.scalacheck.{Arbitrary, Gen} - -import scala.concurrent.duration - -object GeneratorsConfig { - import org.scalatest.EitherValues.* - - // Refined Int - implicit val nonNegativeIntArb: Arbitrary[NonNegativeInt] = Arbitrary( - Gen.choose(0, Int.MaxValue).map(NonNegativeInt.tryCreate) - ) - implicit val positiveIntArb: Arbitrary[PositiveInt] = Arbitrary( - Gen.choose(1, Int.MaxValue).map(PositiveInt.tryCreate) - ) - - // Refined Long - implicit val nonNegativeLongArb: Arbitrary[NonNegativeLong] = Arbitrary( - Gen.choose(0, Long.MaxValue).map(NonNegativeLong.tryCreate) - ) - implicit val positiveLongArb: Arbitrary[PositiveLong] = Arbitrary( - Gen.choose(1, Long.MaxValue).map(PositiveLong.tryCreate) - ) - - implicit val string68Arb: Arbitrary[String68] = Arbitrary( - Generators.lengthLimitedStringGen(String68) - ) - implicit val string185Arb: Arbitrary[String185] = Arbitrary( - Generators.lengthLimitedStringGen(String185) - ) - implicit val string255Arb: Arbitrary[String255] = Arbitrary( - Generators.lengthLimitedStringGen(String255) - ) - - implicit val configNonNegativeFiniteDurationArb: Arbitrary[config.NonNegativeFiniteDuration] = - Arbitrary( - Arbitrary - .arbitrary[NonNegativeLong] - .map(i => scala.concurrent.duration.FiniteDuration(i.unwrap, duration.NANOSECONDS)) - .map(d => config.NonNegativeFiniteDuration.fromDuration(d).value) - ) - - implicit val portArb: Arbitrary[Port] = Arbitrary( - Gen.choose(Port.minValidPort, Port.maxValidPort).map(Port.tryCreate) - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringTest.scala deleted file mode 100644 index 375c676570..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringTest.scala +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.CantonRequireTypes.{String255, String3, String36} -import org.scalatest.wordspec.AnyWordSpec - -class LengthLimitedStringTest extends AnyWordSpec with BaseTest { - "LengthLimitedString" should { - "have a correctly working .create" in { - val ok = String255.create("123") - val ok2 = String255.create("") - val ok3 = String255.create("a" * 255) - val not_ok = String255.create("a" * 256, Some("Incantation")) - - ok.value.unwrap shouldBe "123" - ok2.value.unwrap shouldBe "" - ok3.value.unwrap shouldBe "a" * 255 - not_ok.left.value shouldBe a[String] - not_ok.left.value should (include("maximum length of 255") and include("Incantation")) - } - - "have a correctly working .tryCreate" in { - val ok = String255.tryCreate("123") - val ok2 = String255.tryCreate("") - val ok3 = String255.tryCreate("a" * 255) - - ok.unwrap shouldBe "123" - ok2.unwrap shouldBe "" - ok3.unwrap shouldBe "a" * 255 - a[IllegalArgumentException] should be thrownBy String255.tryCreate("a" * 256) - } - - "have symmetric equality with strings" in { - // TODO(#23301) This test does not really test symmetry and symmetry in fact does not hold. - val s = "s" - val s255 = String255.tryCreate("s") - (s255 == s) shouldBe true - (s255 == s255) shouldBe true - // TODO(i23301): uncomment this line once fixed. -// (s255 == "bar") shouldBe ("bar" == s255) - } - - "respect supplementary pairs upon truncation" in { - // String with a supplementary pair for 😂 at truncation point - val s = "ab\uD83D\uDE02" - - val s3 = String3.createWithTruncation(s) - s3 shouldBe "ab" // The supplementary pair is removed in full - - val s36 = String36.createWithTruncation(s) - s36 shouldBe s - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringWrapperTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringWrapperTest.scala deleted file mode 100644 index a1b3cb9e2f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/LengthLimitedStringWrapperTest.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.CantonRequireTypes.{ - LengthLimitedStringWrapper, - LengthLimitedStringWrapperCompanion, - String255, - String300, -} -import org.scalatest.wordspec.AnyWordSpec - -final case class TestWrapper(override val str: String255) extends LengthLimitedStringWrapper -object TestWrapper extends LengthLimitedStringWrapperCompanion[String255, TestWrapper] { - override def instanceName: String = "TestWrapper" - override protected def companion: String255.type = String255 - override protected def factoryMethodWrapper(str: String255): TestWrapper = TestWrapper(str) -} - -final case class TestWrapper2(override val str: String255) extends LengthLimitedStringWrapper -object TestWrapper2 extends LengthLimitedStringWrapperCompanion[String255, TestWrapper2] { - override def instanceName: String = "TestWrapper2" - override protected def companion: String255.type = String255 - override protected def factoryMethodWrapper(str: String255): TestWrapper2 = TestWrapper2(str) -} - -class LengthLimitedStringWrapperTest extends AnyWordSpec with BaseTest { - "TestWrapper" should { - "have a correctly working .create" in { - val ok = TestWrapper.create("123") - val ok2 = TestWrapper.create("") - val ok3 = TestWrapper.create("a" * 255) - val not_ok = TestWrapper.create("a" * 256) - - ok.value.unwrap shouldBe "123" - ok2.value.unwrap shouldBe "" - ok3.value.unwrap shouldBe "a" * 255 - not_ok.left.value shouldBe a[String] - not_ok.left.value should (include("maximum length of 255") and include("TestWrapper")) - } - - "have a correctly working .tryCreate" in { - val ok = TestWrapper.tryCreate("123") - val ok2 = TestWrapper.tryCreate("") - val ok3 = TestWrapper.tryCreate("a" * 255) - - ok.unwrap shouldBe "123" - ok2.unwrap shouldBe "" - ok3.unwrap shouldBe "a" * 255 - a[IllegalArgumentException] should be thrownBy TestWrapper.tryCreate("a" * 256) - } - - "have equals and hashcode functions that work like we expect them to" in { - val string = "123" - val string_124 = "124" - val limited = String255.tryCreate("123") - val limited_300 = String300.tryCreate("123") - val wrapper = TestWrapper.tryCreate("123") - val wrapper2 = TestWrapper2.tryCreate("123") - // comparisons between String and LengthLimitedString/LengthLimitedStringWrapper - limited == string shouldBe true - wrapper == string shouldBe true - wrapper2 == string shouldBe true - limited.hashCode() == string.hashCode() shouldBe true - wrapper.hashCode() == string.hashCode() shouldBe true - wrapper2.hashCode() == string.hashCode() shouldBe true - - limited == string_124 shouldBe false - wrapper == string_124 shouldBe false - wrapper2 == string_124 shouldBe false - - // TODO(i23301): uncomment this line once fixed. -// string == limited shouldBe false -// string == wrapper shouldBe false -// string == wrapper2 shouldBe false - - // sanity checks that we don't have otherwise unintended behaviour - // comparisons between LengthLimitedString and LengthLimitedStringWrapper - limited == wrapper shouldBe false - wrapper == limited shouldBe false - - // comparisons between different LengthLimitedStringWrapper - wrapper == wrapper2 shouldBe false - limited == limited_300 shouldBe false - val wrapper_same = TestWrapper.tryCreate("123") - wrapper == wrapper_same shouldBe true - - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/RefinedNonNegativeDurationTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/RefinedNonNegativeDurationTest.scala deleted file mode 100644 index c3963b785d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/config/RefinedNonNegativeDurationTest.scala +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.config - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RefinedNonNegativeDuration.noisyAwaitResultForTesting -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level - -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import scala.annotation.unused -import scala.collection.mutable -import scala.concurrent.duration.* -import scala.concurrent.{Future, TimeoutException} -import scala.util.{Failure, Success, Try} - -class RefinedNonNegativeDurationTest extends AnyWordSpec with BaseTest { - - "noisyAwaitResult" should { - - lazy val timeout = 27.seconds - lazy val warnAfter = 8.seconds - lazy val task = "test task" - lazy val noKillSwitch: Unit => Boolean = _ => false - def noStackTracesInUnitTest: Thread => Boolean = _ => false - - "Keep retrying until the timeout has expired" in { - - val (logs, logF, numAwaits) = state - val expectedAwaits = List(8.seconds, 4.seconds, 4.seconds, 4.seconds, 4.seconds, 3.seconds) - val ready = neverSucceed(expectedAwaits, numAwaits)(_, _) - - val res = noisyAwaitResultForTesting( - Future.unit, - task, - timeout, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - - logs.toList should be(loggedMessages(task, expectedAwaits, failure = true)) - - res should matchPattern { - case Failure(exn: TimeoutException) - if exn.getMessage == s"Task $task did not complete within $timeout." => - } - } - - "Never wait longer than max retry interval of 10 seconds" in { - val (logs, logF, numAwaits) = state - val largeTimeout = 42.seconds - val largeWarnAfter = 22.seconds // large enough such that half is still larger than 10 seconds - val expectedAwaits = List( - 10.seconds, - 10.seconds, - 2.seconds, // Lowered to 2 seconds to hit 22 second warning cut-off - 10.seconds, - 10.seconds, - ) - val ready = neverSucceed(expectedAwaits, numAwaits)(_, _) - - val res = noisyAwaitResultForTesting( - Future.unit, - task, - largeTimeout, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - largeWarnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - - logs.toList should be( - loggedMessages( - task, - expectedAwaits, - failure = true, - expectedDebugLogs = 2, // first two entries are debug as they happen before largeWarnAfter - ) - ) - - res should matchPattern { - case Failure(exn: TimeoutException) - if exn.getMessage == s"Task $task did not complete within $largeTimeout." => - } - } - - "Stop as soon as the Future completes, with a finite timeout" in { - - val (logs, logF, numAwaits) = state - val expectedAwaits = List(8.seconds, 4.seconds) - - val ready = succeedAfter(2, Future.unit, numAwaits, expectedAwaits)(_, _) - - val res = noisyAwaitResultForTesting( - Future.unit, - task, - timeout, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - - logs.toList should be(loggedMessages(task, expectedAwaits)) - - res should be(Success(())) - } - - "Stop as soon as the Future completes, with an infinite timeout" in { - - val (logs, logF, numAwaits) = state - val expectedAwaits = List(8.seconds, 4.seconds) - - val ready = succeedAfter(2, Future.unit, numAwaits, expectedAwaits)(_, _) - - val res = noisyAwaitResultForTesting( - Future.unit, - task, - Duration.Inf, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - - logs.toList should be(loggedMessages(task, expectedAwaits)) - - res should be(Success(())) - } - - "Not get confused when the future throws a Timeout exception" in { - - val (logs, logF, numAwaits) = state - val expectedAwaits = List(8.seconds, 4.seconds) - - val ready = succeedAfter( - 2, - Future.failed { - new TimeoutException(s"This is a different timeout exception") - }, - numAwaits, - expectedAwaits, - )(_, _) - - try { - noisyAwaitResultForTesting( - Future.unit, - task, - Duration.Inf, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - } catch { - case exn: TimeoutException => - exn.getMessage shouldBe s"This is a different timeout exception" - - logs.toList should be(loggedMessages(task, expectedAwaits)) - - case _: Throwable => - fail( - s"Noisy await result should return the exception thrown by the future being blocked on." - ) - } - - } - - "Not block on the future when the timeout is zero" in { - - val (logs, logF, numAwaits) = state - val expectedAwaits = List() - val ready = neverSucceed(expectedAwaits, numAwaits)(_, _) - - val res = noisyAwaitResultForTesting( - Future.unit, - task, - 0.seconds, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - noKillSwitch, - noStackTracesInUnitTest, - )(ready) - - logs.toList should be(loggedMessages(task, expectedAwaits, failure = true)) - - res should matchPattern { - case Failure(exn: TimeoutException) - if exn.getMessage == s"Task $task did not complete within 0 seconds." => - } - } - - "Cancel the await when the kill-switch is triggered" in { - - val (_logs, logF, numAwaits) = state - val expectedAwaits = - List(8.seconds, 4.seconds, 4.seconds, 4.seconds, 4.seconds, 3.seconds, 3.seconds, 3.seconds) - val killSwitch: AtomicBoolean = new AtomicBoolean(false) - val ready = killswitchAfter(expectedAwaits, numAwaits, killSwitch)(_, _) - - try { - noisyAwaitResultForTesting( - Future.unit, - task, - timeout, - logF, - () => nanoTime(expectedAwaits, numAwaits)(), - warnAfter, - _ => killSwitch.get(), - noStackTracesInUnitTest, - )(ready) - fail(s"Noisy wait result was expected to throw") - } catch { - case exn: TimeoutException => - exn.getMessage should include("Noisy await result test task cancelled with kill-switch") - } - - numAwaits.get() shouldBe 4 - } - - } - - private def loggedMessages( - task: => String, - awaits: List[FiniteDuration], - failure: Boolean = false, - expectedDebugLogs: Int = 0, - ) = { - val cumulativeAwaits = cumulative(awaits) - val infos = cumulativeAwaits - .zip((1 to awaits.length).map(i => if (i <= expectedDebugLogs) Level.DEBUG else Level.INFO)) - .map { case (duration, level) => - level -> s"Task $task still not completed after $duration. Continue waiting..." - } - if (failure) - infos :+ Level.WARN -> s"Task $task did not complete within ${cumulativeAwaits.lastOption - .getOrElse(0.seconds)}. Stack traces:\n" - else infos - } - - private def cumulative(expectedAwaits: List[FiniteDuration]) = - expectedAwaits - .foldLeft(List.empty[Duration]) { case (acc, x) => - val total = acc.headOption.getOrElse(0.seconds) - x.plus(total) :: acc - } - .reverse - - private def state - : (mutable.ArrayDeque[(Level, String)], (Level, String) => Unit, AtomicInteger) = { - val logs = mutable.ArrayDeque[(Level, String)]() - def logF(l: Level, s: String): Unit = { - val tuple = (l, s) - logs += tuple - () - } - (logs, logF, new AtomicInteger()) - } - - private def succeedAfter( - @unused _n: Int, - success: => Future[Unit], - counter: AtomicInteger, - expectedAwaits: List[Duration], - )(@unused _f: Future[Unit], d: Duration): Try[Future[Unit]] = { - val i = counter.get() - if (i >= 2) Success(success) - else { - d should be(expectedAwaits(i)) - counter.incrementAndGet() - Failure(new TimeoutException) - } - } - - def neverSucceed( - expectedAwaits: List[Duration], - counter: AtomicInteger, - )(@unused _f: Future[Unit], d: Duration): Try[Future[Unit]] = { - d should be(expectedAwaits(counter.get())) - counter.incrementAndGet() - Failure(new TimeoutException) - } - - def killswitchAfter( - expectedAwaits: List[Duration], - counter: AtomicInteger, - killSwitch: AtomicBoolean, - )(@unused _f: Future[Unit], d: Duration): Try[Future[Unit]] = { - d should be(expectedAwaits(counter.get())) - val c = counter.incrementAndGet() - if (c > 3) killSwitch.set(true) - Failure(new TimeoutException) - } - - /** @param counter - * A counter that is **externally** incremented for every iteration of the retry loop - */ - def nanoTime(expectedAwaits: List[Duration], counter: AtomicInteger)(): Long = { - val soFar = expectedAwaits.take(counter.get).foldLeft(0.seconds: Duration) { case (acc, next) => - acc.plus(next) - } - soFar.toNanos - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/Blake2xbTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/Blake2xbTest.scala deleted file mode 100644 index 8de51d9aa9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/Blake2xbTest.scala +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import scala.io.Source - -class Blake2xbTest extends AnyWordSpec with BaseTest { - - import TestVectors.* - - def hexString(buf: Array[Byte]): String = buf.map("%02X" format _).mkString.toLowerCase() - - "Blake2xb" should { - // Golden test taken from - // https://github.com/facebook/folly/blob/993de57926e7b17306ac9c5c46781a15d1b04414/folly/experimental/crypto/test/Blake2xbTest.cpp - "pass the golden tests" in { - val input = new Array[Byte](256) - - for (i <- 0.until(256)) { - input(i) = i.toByte - } - - forAll(hexVectors) { v => - val len = v.length / 2 - val out = hexString(Blake2xb.digest(input, len)) - assert( - out == v, - s"""Blake2xb digest of length $len should be - |$v - |but was - |$out""".stripMargin, - ) - } - } - } -} - -object TestVectors { - - val resourceName = "blake2xb-golden-tests.txt" - val hexVectors: List[String] = Source.fromResource(resourceName).getLines().toList - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/CryptoPureApiCantonCompatibilityTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/CryptoPureApiCantonCompatibilityTest.scala deleted file mode 100644 index f84a119ee3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/CryptoPureApiCantonCompatibilityTest.scala +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.protocol.{ - AuthenticatedContractIdVersionV11, - ExampleTransactionFactory, - Unicum, -} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -/** Daml lf and daml-on-x impose a maximum limit on contractIds for which Canton uses hashes. Test - * that (hex) string versions of all hash variants are below the limit. - */ -class CryptoPureApiCantonCompatibilityTest extends AnyWordSpec with BaseTest { - - private val cantonContractIdVersion = AuthenticatedContractIdVersionV11 - private val longString = - (('a' to 'z').mkString + ('A' to 'Z').mkString + ('0' to '9').mkString + ":") * 10 - - forAll(HashAlgorithm.algorithms.values) { algorithm => - s"${algorithm.name}" should { - val hash = - Hash.digest(TestHash.testHashPurpose, ByteString.copyFromUtf8(longString), algorithm) - val maxHashSize = 46 - - s"produce binary hashes of length less than $maxHashSize bytes" in { - hash.getCryptographicEvidence.size() should be <= maxHashSize - } - - s"produce string hashes of length ${maxHashSize * 2} chars" in { - hash.toHexString.length should be <= (maxHashSize * 2) - } - - "be able to build a ContractId" in { - cantonContractIdVersion.fromDiscriminator( - ExampleTransactionFactory.submissionSeed, - Unicum(hash), - ) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/EncodableString.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/EncodableString.scala deleted file mode 100644 index db583c8e36..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/EncodableString.scala +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.serialization.{DeterministicEncoding, HasCryptographicEvidence} -import com.google.protobuf.ByteString - -final case class EncodableString(string: String) extends HasCryptographicEvidence { - def encodeDeterministically: ByteString = DeterministicEncoding.encodeString(string) - override def getCryptographicEvidence: ByteString = encodeDeterministically -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/GeneratorsCrypto.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/GeneratorsCrypto.scala deleted file mode 100644 index 9f0d0a7e0c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/GeneratorsCrypto.scala +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.daml.nonempty.{NonEmpty, NonEmptyUtil} -import com.digitalasset.canton.config.CantonRequireTypes.String68 -import com.digitalasset.canton.config.{DefaultProcessingTimeouts, PositiveFiniteDuration} -import com.digitalasset.canton.crypto.provider.jce.JcePrivateCrypto -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.{BaseTest, Generators} -import com.google.protobuf.ByteString -import magnolify.scalacheck.auto.* -import org.scalacheck.* - -import scala.annotation.nowarn - -object GeneratorsCrypto { - import Generators.* - import com.digitalasset.canton.data.GeneratorsDataTime.* - import org.scalatest.EitherValues.* - - implicit val signingKeyUsageArb: Arbitrary[SigningKeyUsage] = genArbitrary - implicit val signingAlgorithmSpecArb: Arbitrary[SigningAlgorithmSpec] = genArbitrary - implicit val signingKeySpecArb: Arbitrary[SigningKeySpec] = genArbitrary - implicit val symmetricKeySchemeArb: Arbitrary[SymmetricKeyScheme] = genArbitrary - implicit val encryptionAlgorithmSpecArb: Arbitrary[EncryptionAlgorithmSpec] = genArbitrary - implicit val encryptionKeySpecArb: Arbitrary[EncryptionKeySpec] = genArbitrary - implicit val hashAlgorithmArb: Arbitrary[HashAlgorithm] = genArbitrary - implicit val saltAlgorithmArb: Arbitrary[SaltAlgorithm] = genArbitrary - @nowarn("msg=Der in object CryptoKeyFormat is deprecated") - implicit val cryptoKeyFormatArb: Arbitrary[CryptoKeyFormat] = genArbitrary - - implicit val signingKeySpecsNESArb: Arbitrary[NonEmpty[Set[SigningKeySpec]]] = - Generators.nonEmptySet[SigningKeySpec] - implicit val encryptionKeySpecsNESArb: Arbitrary[NonEmpty[Set[EncryptionKeySpec]]] = - Generators.nonEmptySet[EncryptionKeySpec] - implicit val symmetricKeySchemeNESArb: Arbitrary[NonEmpty[Set[SymmetricKeyScheme]]] = - Generators.nonEmptySet[SymmetricKeyScheme] - implicit val hashAlgorithmNESArb: Arbitrary[NonEmpty[Set[HashAlgorithm]]] = - Generators.nonEmptySet[HashAlgorithm] - implicit val cryptoKeyFormatNESArb: Arbitrary[NonEmpty[Set[CryptoKeyFormat]]] = - Generators.nonEmptySet[CryptoKeyFormat] - - implicit val fingerprintArb: Arbitrary[Fingerprint] = Arbitrary( - Generators.lengthLimitedStringGen(String68).map(s => Fingerprint.tryFromString(s.str)) - ) - - val validUsageGen: Gen[Set[SigningKeyUsage]] = for { - usages <- Gen.someOf(SigningKeyUsage.All) - if SigningKeyUsage.isUsageValid(NonEmptyUtil.fromUnsafe(usages.toSet)) - } yield usages.toSet - - // TODO(#15813): Change arbitrary signing keys to match real keys - implicit val signingPublicKeyArb: Arbitrary[SigningPublicKey] = Arbitrary(for { - key <- Arbitrary.arbitrary[ByteString] - keySpec <- Arbitrary.arbitrary[SigningKeySpec] - format = CryptoKeyFormat.Symbolic - usage <- Gen - .nonEmptyListOf(Gen.oneOf(SigningKeyUsage.All.toList)) - .map(usages => NonEmptyUtil.fromUnsafe(usages.toSet)) - .suchThat(usagesNE => SigningKeyUsage.isUsageValid(usagesNE)) - } yield SigningPublicKey.create(format, key, keySpec, usage).value) - - @nowarn("msg=Raw in object SignatureFormat is deprecated") - implicit val signatureFormatArb: Arbitrary[SignatureFormat] = genArbitrary - - implicit val signatureDelegationArb: Arbitrary[SignatureDelegation] = Arbitrary( - for { - periodFrom <- Arbitrary.arbitrary[CantonTimestamp] - periodDuration <- Gen.choose(1, 86400L).map(PositiveFiniteDuration.ofSeconds) - period = SignatureDelegationValidityPeriod(periodFrom, periodDuration) - - signingKeySpec <- Arbitrary - .arbitrary[SigningKeySpec] - - /** The session signing keys inside the signature delegation are a special type of signing key - * where the format is fixed (i.e. DerX509Spki) and their scheme is identified by the - * 'sessionKeySpec' protobuf field. Therefore, we cannot use the usual - * Arbitrary.arbitrary[SigningKey] because it produces keys in a Symbolic format. - */ - sessionKey = JcePrivateCrypto - .generateSigningKeypair( - signingKeySpec, - SigningKeyUsage.ProtocolOnly, - ) - .value - - format <- Arbitrary.arbitrary[SignatureFormat] - signature <- Arbitrary.arbitrary[ByteString] - algorithmO <- Arbitrary.arbitrary[Option[SigningAlgorithmSpec]] - - } yield SignatureDelegation - .create( - sessionKey.publicKey, - period, - Signature.create( - format = format, - signature = signature, - signedBy = sessionKey.id, - signingAlgorithmSpec = algorithmO, - signatureDelegation = None, - ), - ) - .value - ) - - // Needed to ensure we go via `create()`, which migrates `Raw` - implicit val signatureArb: Arbitrary[Signature] = Arbitrary( - for { - signature <- Arbitrary.arbitrary[ByteString] - longTermKey <- Arbitrary.arbitrary[Fingerprint] - signingAlgorithmSpec <- Arbitrary.arbitrary[Option[SigningAlgorithmSpec]] - format <- Arbitrary.arbitrary[SignatureFormat] - signatureDelegation <- Arbitrary.arbitrary[Option[SignatureDelegation]] - signedBy = signatureDelegation.map(_.sessionKey.id).getOrElse(longTermKey) - } yield Signature.create(format, signature, signedBy, signingAlgorithmSpec, signatureDelegation) - ) - - implicit val hashArb: Arbitrary[Hash] = Arbitrary( - for { - hashAlgorithm <- hashAlgorithmArb.arbitrary - hash <- Gen - .stringOfN(hashAlgorithm.length.toInt, Gen.alphaNumChar) - .map(ByteString.copyFromUtf8) - } yield Hash.tryCreate(hash, hashAlgorithm) - ) - - implicit val saltArb: Arbitrary[Salt] = Arbitrary( - for { - saltAlgorithm <- saltAlgorithmArb.arbitrary - salt <- Gen - .stringOfN(saltAlgorithm.length.toInt, Gen.alphaNumChar) - .map(ByteString.copyFromUtf8) - } yield Salt.create(salt, saltAlgorithm).value - ) - - private lazy val loggerFactoryNotUsed = - NamedLoggerFactory.unnamedKey("test", "NotUsed-GeneratorsCrypto") - - private lazy val crypto = SymbolicCrypto.create( - BaseTest.testedReleaseProtocolVersion, - DefaultProcessingTimeouts.testing, - loggerFactoryNotUsed, - ) - - // TODO(#15813): Change arbitrary encryption keys to match real keys - implicit val encryptionPublicKeyArb: Arbitrary[EncryptionPublicKey] = Arbitrary(for { - key <- Arbitrary.arbitrary[ByteString] - keySpec <- Arbitrary.arbitrary[EncryptionKeySpec] - format = CryptoKeyFormat.Symbolic - } yield EncryptionPublicKey.create(format, key, keySpec).value) - - // TODO(#14515) Check that the generator is exhaustive - implicit val publicKeyArb: Arbitrary[PublicKey] = Arbitrary( - Gen.oneOf(Arbitrary.arbitrary[SigningPublicKey], Arbitrary.arbitrary[EncryptionPublicKey]) - ) - - // Test key intended for signing an unassignment result message. - lazy val testSigningKey: SigningPublicKey = - crypto.generateSymbolicSigningKey(usage = SigningKeyUsage.ProtocolOnly) - - def sign( - signingKeyId: Fingerprint, - strToSign: String, - purpose: HashPurpose, - usage: NonEmpty[Set[SigningKeyUsage]], - ): Signature = { - val hash = crypto.pureCrypto.build(purpose).addWithoutLengthPrefix(strToSign).finish() - crypto.sign(hash, signingKeyId, usage) - } - - implicit val symmetricKeyArb: Arbitrary[SymmetricKey] = - Arbitrary( - for { - format <- Arbitrary.arbitrary[CryptoKeyFormat] - key <- Arbitrary.arbitrary[ByteString] - scheme <- Arbitrary.arbitrary[SymmetricKeyScheme] - } yield new SymmetricKey(format, key, scheme) - ) - - implicit def asymmetricEncryptedArb[T]: Arbitrary[AsymmetricEncrypted[T]] = - Arbitrary( - for { - ciphertext <- Arbitrary.arbitrary[ByteString] - encryptionAlgorithmSpec <- Arbitrary.arbitrary[EncryptionAlgorithmSpec] - fingerprint <- Gen - .stringOfN(68, Gen.alphaChar) - .map(str => Fingerprint.tryFromString(String68.tryCreate(str))) - } yield AsymmetricEncrypted.apply(ciphertext, encryptionAlgorithmSpec, fingerprint) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashBuilderTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashBuilderTest.scala deleted file mode 100644 index d737c6e7aa..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashBuilderTest.scala +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class HashBuilderTest extends AnyWordSpec with BaseTest { - - def testHashBuilder(sutName: String, sut: HashPurpose => HashBuilder): Unit = { - def defaultBuilder: HashBuilder = sut(HashPurpose.MerkleTreeInnerNode) - - sutName should { - val builder1 = defaultBuilder - val hashEmpty = builder1.finish() - - "addWithoutLengthPrefix fail after finish" in { - assertThrows[IllegalStateException](builder1.addWithoutLengthPrefix(ByteString.EMPTY)) - } - "finish fail after finish" in { - assertThrows[IllegalStateException](builder1.finish()) - } - - val hashArrayL1 = - defaultBuilder.addWithoutLengthPrefix(ByteString.copyFrom(new Array[Byte](1))).finish() - "yield different hashes for empty and non-empty inputs" in { - assert(hashArrayL1.getCryptographicEvidence != hashEmpty.getCryptographicEvidence) - } - - val hashArrayL2 = - defaultBuilder.addWithoutLengthPrefix(ByteString.copyFrom(new Array[Byte](2))).finish() - "yield different hashes for longer inputs" in { - assert(hashArrayL2.getCryptographicEvidence != hashArrayL1.getCryptographicEvidence) - } - - val hashArrayL11 = - defaultBuilder - .addWithoutLengthPrefix(new Array[Byte](1)) - .addWithoutLengthPrefix(new Array[Byte](1)) - .finish() - "yield the same hash for the same concatenation of chunks" in { - assert(hashArrayL2.getCryptographicEvidence == hashArrayL11.getCryptographicEvidence) - } - - val hashEmpty2 = sut(HashPurpose.SequencedEventSignature).finish() - "yield different hashes for different purposes" in { - assert(hashEmpty.getCryptographicEvidence != hashEmpty2.getCryptographicEvidence) - } - - val builder2 = defaultBuilder - val builder3 = builder2.addWithoutLengthPrefix(new Array[Byte](1)) - "addWithoutLengthPrefix returns the modified builder" in { - assert(builder2 eq builder3) - } - } - } - - testHashBuilder( - "HashBuilderFromMessageDigest", - purpose => HashBuilderFromMessageDigest(HashAlgorithm.Sha256, purpose), - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashPurposeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashPurposeTest.scala deleted file mode 100644 index d0076784b0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashPurposeTest.scala +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.discard.Implicits.DiscardOps -import org.scalatest.wordspec.AnyWordSpec - -class HashPurposeTest extends AnyWordSpec with BaseTest { - "HashPurpose" should { - "allow purposes with different IDs" in { - HashPurpose(java.lang.Integer.MAX_VALUE - 1, "MAX-1").discard - HashPurpose(java.lang.Integer.MAX_VALUE, "MAX").discard - } - - "fail when two HashPurposes have the same ID" in { - assertThrows[IllegalArgumentException]( - HashPurpose(HashPurpose.MerkleTreeInnerNode.id, "duplicate") - ) - } - } -} - -object HashPurposeTest { - - /** Mockito argument matcher that matches any [[HashPurpose]]. */ - def anyHashPurpose: HashPurpose = { - org.mockito.ArgumentMatchers.any[Int] - TestHash.testHashPurpose - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashTest.scala deleted file mode 100644 index aadc2ba2c5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HashTest.scala +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class HashTest extends AnyWordSpec with BaseTest { - - private val longString = - (('a' to 'z').mkString + ('A' to 'Z').mkString + ('0' to '9').mkString + ":") * 10 - - forAll(HashAlgorithm.algorithms.values) { algorithm => - s"${algorithm.name}" should { - val hash = - Hash.digest(TestHash.testHashPurpose, ByteString.copyFromUtf8(longString), algorithm) - - "serializing and deserializing via bytestring" in { - Hash.fromByteString(hash.getCryptographicEvidence) === Right(hash) - } - - "serializing and deserializing via hexstring" in { - Hash.fromHexString(hash.toHexString) === Right(hash) - } - - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HmacTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HmacTest.scala deleted file mode 100644 index 30c741c2ef..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/HmacTest.scala +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class HmacTest extends AnyWordSpec with BaseTest { - - private lazy val longString = PseudoRandom.randomAlphaNumericString(256) - private lazy val crypto = new SymbolicPureCrypto - - forAll(HmacAlgorithm.algorithms) { algorithm => - s"HMAC ${algorithm.name}" should { - - "serializing and deserializing via protobuf" in { - val secret = HmacSecret.generate(crypto) - val hmac = - Hmac - .compute(secret, ByteString.copyFromUtf8(longString), algorithm) - .valueOr(err => fail(err.toString)) - val hmacP = hmac.toProtoV30 - Hmac.fromProtoV30(hmacP).value shouldBe (hmac) - } - - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/LtHash16Test.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/LtHash16Test.scala deleted file mode 100644 index 6e761bf64e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/LtHash16Test.scala +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class LtHash16Test extends AnyWordSpec with BaseTest { - - "LtHash16" should { - // Golden test taken from - // https://github.com/facebook/folly/blob/993de57926e7b17306ac9c5c46781a15d1b04414/folly/experimental/crypto/test/LtHashTest.cpp - "pass the golden test" in { - val h = LtHash16() - h.add("a".getBytes) - h.add("b".getBytes) - h.add("hello".getBytes) - h.remove("b".getBytes) - val computed = h.hexString() - val expected = - "353cae6169e519eb9cf80edd2c5b33810276227e77a09030e3ac3b00299c9716c6b592b262b2b05ad82db539f23fc03baa1ffacc9704fe078219307c02c0f501c810895c19a771934855d091e30db8eba564596f071400fcca93b69115055c55e0b333b5583ec0068a219289b557be5b24cfa679ae8e20b9084c77eadab966e4f94239d5f671371aa17c41f0510aaaeb6e28fed0eb37b57c5ff8f6c64a0395ddb32d2948abee9ae84930ee0d43d015b2f577cadb558eef33e715f349114c1937817ff26b606f1f33a1f3b4a72eaa3b24573a78d06b315857a8295675ec2bfc9897b644f60d401c4315bea8a6ad410f77e3969aaa032d31526df0c271665647c98f1e4d3946b659e47f45480c3eac9b0e0b742501595b24d5362d3f6f4ba8a4fcda7d87951ade9ec184a45c2fd5bff5282835c29071551e96d940c3ed19bb3124c3b37080dc3c80bc22f61b431195b9489bed3244e0e522bf8f8c752145b01ee47701085ffa1238f3a1d5e778052b393330fff8b586d9399cced75d4d15697f9015174d3302d97b1cc55ae20cdb573d4061d2940b213a35808122e7d55bd53e2c9ba1779c8a19532ff1e65a440e871f96e086dce6693efba86e033f7e3b04069f9eeccc0f5c5947af0b04f5528be1b57bba0912eeb52fdd11f0cac0e40ae641bbc40207188adbfe13463c880e84016476facae56f7f6de26e7f508a277a409988aabec7f9bb552000e3f7a44f51ec5c7c98979a227403464797a06fae0d7aa951bb429cb9df4ed65a430a98e0c88f7d4e47e1256f17c4b126f05b885154507b3b80a2a1b6e1f43eea48b4b93cab0622bd002a25dd5d1b69fe05c11619837eba6edfac493d663409f5ce82762584205fe49e8f718fbc5a92823cd9a17c1c9a07ce9f2535c918c6ee0f0729b67eb0be8b5e0edc990260679fdf5a9991a6d62ec1d72f5e5a478dbf0e5cbd1703daf5f170411d0d7aca4921cb644ec1d86e02711d09359b0f2b45a5b9fe57e122add8b5ae27aaeb44aa77a9fe187a67ea7447b27d02b4bf41fc5024350dc8838fb8f977535ba4481569a74d90306e0c9979a9d149be950223d1ca5d425b9ec281ee3884d8e8a1ad0d00504f0f57ff35e0ee33d184f35fd28dfa348686fb926da95597fb947acc509a5cb7cfe1eeb33dfdf9b4b384346c862cfb198f6948a6f6d53a74848043c8b647076b0a90151bd40c58d32434ebf549aa92f4a5b7581b7ec6821ca3485cf8e2a6ce0f5e204ed5a92c84618c2828e5c6f222ec3c48e37dcada7ce28bba5c09740170d32aa004b43cde46d45f9912528a3a7a7f30fb6019548dd174b4d7b0bafb232920b972362db4d863a5e0a9e30a041ecb874a7acbd378ccb11ffbffcd086ad797be5b4de07859d0b1fb3e4835a84ea224940482a3849cf392528dfcf8920d4b4bfc40606e852d85b7bfd1f2723214969dab6adfb8c26dc5f51b1b043b8a25df1eadd90d1a23240b735943841ae4e13564ddb6f0f7dcac1db82a34ab9ca042f8c4690727c7a0fac98c10dac065a57dff8010e9d49ba3b801622e8b786bb44079ceecf61ff7cc07be8672c647b525ffea7c3fab95d40d9d36e220bb3a5292880faf05a8dd94e60a4ff0ccfc124d2dca03a85d0864bfa28cddb7bdcc83ff717239dae979596691b6e3062068e6ea442ebd354bc653b0e5b750bcbfaec275c77ab82bd3452e4776734df686d6bae946855a4659dd3566f48d0879a00c06a7ce81c0f234e0203ce68ffc9434f3f10281d76110887a4b460514f761b517f1d151d88724160fbeff7f69a5a23eae2bf48916ad55c084b908d955519a67096b94638fa10d8d153a60d0c44f2d9148ad549fb1e64ac423aac1fdc754bc44a69573578c6b881bca177698e68d6ffdc2d7d89469f2e1039e8e3b955581a56c15519590b65bd9bcc3b3b1a95d1d484c2585ebdfd8a15c737b436456934d9b8439d92d1212bf8799028780d9f35d208c093ba6506aff74979faa10fa807398e8fb769be070318caee6b4f5091d8d9254656d0a1e838ba73ed0f0c8e8d4a0d19f9e91340578baa7ce5aec9f73f8e26db9273c544f11d6b8e5e142f4a8ad70a9e21c3dc2c7b4403073c4722e0af775f98c37ae0645e1829dec574de3108f62965a5354aaa7695c1e4feab1fc8ccf9a5e2a7ed0758e411ea9ea25f4f659a36cc5aa0bed2a9ce4518cd1aa1b4ed94c2d62596059d20cd948a058b78ef9ad3c9e7c7c9fd433c42701aad7aff74fb14ea39812c3e68b6ca8585432ecd53a7dfeece8e6a73b0ecddabc8c9da37b140adee6308c540bedbcf77d49762e7efaeededf51966503315fb287b69a08854ec58fd41c2f214c3273cc48bc71718b801c27936c7fd339b7f78c2eda6835e7d532ef6496cbbc7b018cc48ed49e33c16e16d2bdc98f47f376208770b5d5b6e789b30ca55ad4e8cd09b6bd90b66e8d4abfd0fbc3e98fc28f3913e476161d0f0f7477d3ca066adce7567d1af90dc3415970199ada286e22ea90892da107c34d3745c5d5d3f290fbd2d0b64942117955e94b343517d76959f0764216ce27bc33e772fefe4a4820315d67b43302f73e8002cc6144c3f3ad66c307eb2f9e0192a00da9ddf262b600dd0a49721da25ca71997d17c3441cd9588c5469f6927966d06676f89243387f01ebd2094beac0716a2e486d85524c51ba2898abb8b8df3a169f93fe6333f4f6a868738969905ba55176f1b8055d19749c0c5122ab1eaf34b0eb458fc10a656811fe4a7bb588eac3450b6d61c7f634588996ef2d903478cde206f58c1a93069c7df80a28394d05e9a8ed99b5312e9cbec0a2dba2e3e3e24e854798e9dc8c09922fadb987e945a765e2f614993f2b56055481e3702371d4eb86c872ca65269125be61d86" - computed shouldBe expected - } - - "be commutative" in { - val h1 = LtHash16() - val h2 = LtHash16() - h1.add("a".getBytes) - h1.add("b".getBytes) - h2.add("b".getBytes) - h2.add("a".getBytes) - h1.get() shouldBe h2.get() - } - - "removal should be the inverse of addition" in { - val h = LtHash16() - val emptyBytes = h.getByteString() - val abc = "abc".getBytes() - h.add(abc) - val abcBytes = h.getByteString() - val defg = "defg".getBytes() - h.add(defg) - h.remove(defg) - h.getByteString() shouldBe abcBytes - h.remove(abc) - h.getByteString() shouldBe emptyBytes - } - - "correctly determine empty commitment bytes" in { - val h = LtHash16() - LtHash16.isNonEmptyCommitment(h.getByteString()) shouldBe false - } - - "correctly determine non-empty commitment bytes" in { - val h = LtHash16() - h.add("123".getBytes()) - LtHash16.isNonEmptyCommitment(h.getByteString()) shouldBe true - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PasswordBasedEncryptionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PasswordBasedEncryptionTest.scala deleted file mode 100644 index 0e4ac6014a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PasswordBasedEncryptionTest.scala +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.crypto.CryptoTestHelper.TestMessage -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AsyncWordSpec - -trait PasswordBasedEncryptionTest extends FailOnShutdown { - this: AsyncWordSpec & BaseTest & HasExecutionContext => - - def pbeProvider( - supportedPbkdfSchemes: Set[PbkdfScheme], - supportedSymmetricKeySchemes: Set[SymmetricKeyScheme], - newCrypto: => FutureUnlessShutdown[PasswordBasedEncryptionOps & EncryptionOps], - ): Unit = - s"encrypt with passwords" should { - - forAll(supportedPbkdfSchemes) { pbkdfScheme => - forAll(supportedSymmetricKeySchemes) { symmetricKeyScheme => - s"generate symmetric key for $symmetricKeyScheme from password using $pbkdfScheme" in { - newCrypto.map { crypto => - val pbkey = crypto - .deriveSymmetricKey("hello world", symmetricKeyScheme, pbkdfScheme, saltO = None) - .valueOrFail("Failed to derive key from password") - - pbkey.salt.unwrap.size() shouldEqual pbkdfScheme.defaultSaltLengthInBytes - pbkey.key.key.size() shouldEqual symmetricKeyScheme.keySizeInBytes - } - } - - s"generate the same symmetric key in $symmetricKeyScheme for the same password when given the same salt using $pbkdfScheme" in { - newCrypto.map { crypto => - val pbkey1 = crypto - .deriveSymmetricKey("hello world", symmetricKeyScheme, pbkdfScheme, saltO = None) - .valueOrFail("Failed to derive key from password") - - val pbkey2 = crypto - .deriveSymmetricKey( - "hello world", - symmetricKeyScheme, - pbkdfScheme, - saltO = Some(pbkey1.salt), - ) - .valueOrFail("Failed to derive key from password") - - pbkey1.salt.unwrap shouldEqual pbkey2.salt.unwrap - pbkey1.key shouldEqual pbkey2.key - } - } - - s"encrypt and decrypt using a password with $symmetricKeyScheme and $pbkdfScheme" in { - newCrypto.map { crypto => - val message = TestMessage(ByteString.copyFromUtf8("foobar")) - val password = "hello world" - val encrypted = crypto - .encryptWithPassword( - message.toByteString, - password, - symmetricKeyScheme, - pbkdfScheme, - ) - .valueOrFail("Failed to encrypt with password") - - val decrypted = crypto - .decryptWithPassword(encrypted, password)(TestMessage.fromByteString) - .valueOrFail("Failed to decrypt") - - decrypted shouldEqual message - } - } - - s"encrypt with one password and fail to decrypt with another password using $symmetricKeyScheme and $pbkdfScheme" in { - newCrypto.map { crypto => - val message = TestMessage(ByteString.copyFromUtf8("foobar")) - val encrypted = crypto - .encryptWithPassword( - message.toByteString, - "hello world", - symmetricKeyScheme, - pbkdfScheme, - ) - .valueOrFail("Failed to encrypt with password") - - val decryptedE = crypto - .decryptWithPassword(encrypted, "hallo welt")(TestMessage.fromByteString) - - decryptedE.left.value shouldBe a[PasswordBasedEncryptionError.DecryptError] - } - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PublicKeyValidationTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PublicKeyValidationTest.scala deleted file mode 100644 index 185fbc30b2..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/PublicKeyValidationTest.scala +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AsyncWordSpec - -import scala.concurrent.Future - -trait PublicKeyValidationTest extends BaseTest with CryptoTestHelper { this: AsyncWordSpec => - - private def modifyPublicKey( - publicKey: PublicKey, - newFormat: CryptoKeyFormat, - ): PublicKey = - publicKey match { - case epk: EncryptionPublicKey => - epk.replaceFormat(newFormat) - case spk: SigningPublicKey => - spk.replaceFormat(newFormat) - case _ => fail(s"unsupported key type") - } - - private def keyValidationTest[K <: PublicKey]( - supportedCryptoKeyFormats: Set[CryptoKeyFormat], - name: String, - newCrypto: => Future[Crypto], - newPublicKey: Crypto => Future[PublicKey], - ): Unit = - // change format - forAll(supportedCryptoKeyFormats) { format => - s"Validate $name public key with format \"$format\"" in { - for { - crypto <- newCrypto - publicKey <- newPublicKey(crypto) - newPublicKeyWithTargetFormat = modifyPublicKey(publicKey, format) - validationRes = CryptoKeyValidation.parseAndValidatePublicKey( - newPublicKeyWithTargetFormat, - errString => errString, - ) - } yield - if (format == publicKey.format || format == CryptoKeyFormat.Symbolic) - validationRes shouldEqual Either.unit - else - validationRes.left.value should include( - s"Failed to deserialize $format public key: KeyParseAndValidateError" - ) - } - } - - /** Test public key validation - */ - def publicKeyValidationProvider( - supportedSigningKeySpecs: Set[SigningKeySpec], - supportedEncryptionKeySpecs: Set[EncryptionKeySpec], - supportedCryptoKeyFormats: Set[CryptoKeyFormat], - newCrypto: => Future[Crypto], - ): Unit = - "Validate public keys" should { - forAll(supportedSigningKeySpecs) { signingKeySpec => - keyValidationTest[SigningPublicKey]( - supportedCryptoKeyFormats, - if (signingKeySpec.toString == "EC-P256") "EC-P256-Signing" else signingKeySpec.toString, - newCrypto, - crypto => - getSigningPublicKey( - crypto, - SigningKeyUsage.ProtocolOnly, - signingKeySpec, - ).failOnShutdown, - ) - } - - forAll(supportedEncryptionKeySpecs) { encryptionKeySpec => - keyValidationTest[EncryptionPublicKey]( - supportedCryptoKeyFormats, - if (encryptionKeySpec.toString == "EC-P256") "EC-P256-Encryption" - else encryptionKeySpec.toString, - newCrypto, - crypto => getEncryptionPublicKey(crypto, encryptionKeySpec).failOnShutdown, - ) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/SaltTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/SaltTest.scala deleted file mode 100644 index ad4476c08d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/SaltTest.scala +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.provider.symbolic.{SymbolicCrypto, SymbolicPureCrypto} -import org.scalatest.wordspec.AnyWordSpec - -class SaltTest extends AnyWordSpec with BaseTest { - - "Salt" should { - - "serializing and deserializing via protobuf" in { - val salt = TestSalt.generateSalt(0) - val saltP = salt.toProtoV30 - Salt.fromProtoV30(saltP).value shouldBe salt - } - - "generate a fresh salt seeds" in { - val crypto = SymbolicCrypto.create( - testedReleaseProtocolVersion, - timeouts, - loggerFactory, - ) - val salt1 = SaltSeed.generate()(crypto.pureCrypto) - val salt2 = SaltSeed.generate()(crypto.pureCrypto) - - salt1 shouldBe a[SaltSeed] - salt1 should not equal salt2 - } - - "derive a salt" in { - val hmacOps = new SymbolicPureCrypto - val seedSalt = TestSalt.generateSeed(0) - val salt = Salt.deriveSalt(seedSalt, 0, hmacOps) - - // The derived salt must be different than the seed salt value - salt.value.unwrap should not equal seedSalt.unwrap - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestFingerprint.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestFingerprint.scala deleted file mode 100644 index b7e92dcf0d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestFingerprint.scala +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import com.google.protobuf.ByteString - -object TestFingerprint { - - def generateFingerprint(id: String): Fingerprint = { - val hash = Hash.digest( - HashPurpose.PublicKeyFingerprint, - ByteString.copyFrom(id.getBytes), - HashAlgorithm.Sha256, - ) - Fingerprint.tryFromString(hash.toLengthLimitedHexString) - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestSalt.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestSalt.scala deleted file mode 100644 index 9665ba4c6d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/TestSalt.scala +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto - -import cats.syntax.either.* - -object TestSalt { - - def generateSeed(index: Int): SaltSeed = - SaltSeed(TestHash.digest(index).unwrap) - - // Generates a deterministic salt for hashing based on the provided index - // Assumes TestHash uses SHA-256 - def generateSalt(index: Int): Salt = - Salt - .create(TestHash.digest(index).unwrap, SaltAlgorithm.Hmac(HmacAlgorithm.HmacSha256)) - .valueOr(err => throw new IllegalStateException(err.toString)) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/deterministic/encryption/SP800HashDRBGSecureRandomTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/deterministic/encryption/SP800HashDRBGSecureRandomTest.scala deleted file mode 100644 index bd8c64f918..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/deterministic/encryption/SP800HashDRBGSecureRandomTest.scala +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.deterministic.encryption - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class SP800HashDRBGSecureRandomTest extends AnyWordSpec with BaseTest { - - "SP800HashDRBGSecureRandom" should { - - "generate the same bytes if the 'seed' used is the same" in { - val message = "test" - val numBytes = 1024 - - val newRandom1 = SP800HashDRBGSecureRandom(message.getBytes(), loggerFactory) - val newRandom2 = SP800HashDRBGSecureRandom(message.getBytes(), loggerFactory) - - val arrRandom1FirstBytes = new Array[Byte](numBytes) - val arrRandom1SecondBytes = new Array[Byte](numBytes) - val arrRandom2FirstBytes = new Array[Byte](numBytes) - val arrRandom2SecondBytes = new Array[Byte](numBytes) - - newRandom1.nextBytes(arrRandom1FirstBytes) - newRandom1.nextBytes(arrRandom1SecondBytes) - newRandom2.nextBytes(arrRandom2FirstBytes) - newRandom2.nextBytes(arrRandom2SecondBytes) - - arrRandom1FirstBytes shouldBe arrRandom2FirstBytes - arrRandom1SecondBytes shouldBe arrRandom2SecondBytes - arrRandom1FirstBytes should not be arrRandom1SecondBytes - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala deleted file mode 100644 index a795f810a6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.provider.jce - -import com.digitalasset.canton.config.CryptoConfig -import com.digitalasset.canton.config.CryptoProvider.Jce -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.CryptoTestHelper.TestMessage -import com.digitalasset.canton.crypto.SigningKeySpec.EcSecp256k1 -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.MemoryStorage -import com.digitalasset.canton.tracing.NoReportingTracerProvider -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AsyncWordSpec - -class JceCryptoTest - extends AsyncWordSpec - with SigningTest - with EncryptionTest - with PrivateKeySerializationTest - with PasswordBasedEncryptionTest - with RandomTest - with PublicKeyValidationTest - with CryptoKeyFormatMigrationTest { - - "JceCrypto" can { - - def jceCrypto(): FutureUnlessShutdown[Crypto] = - Crypto - .create( - CryptoConfig(provider = Jce), - new MemoryStorage(loggerFactory, timeouts), - CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - CommunityKmsFactory, // Does not matter for the test as we do not use KMS - testedReleaseProtocolVersion, - nonStandardConfig = false, - futureSupervisor, - wallClock, - executionContext, - timeouts, - loggerFactory, - NoReportingTracerProvider, - ) - .valueOrFail("failed to create crypto") - - behave like migrationTest( - // No legacy keys for secp256k1 - Jce.signingKeys.supported.filterNot(_ == EcSecp256k1), - Jce.encryptionKeys.supported, - jceCrypto(), - ) - - behave like signingProvider( - Jce.signingKeys.supported, - Jce.signingAlgorithms.supported, - Jce.supportedSignatureFormats, - jceCrypto(), - ) - behave like encryptionProvider( - Jce.encryptionAlgorithms.supported, - Jce.symmetric.supported, - jceCrypto(), - ) - behave like privateKeySerializerProvider( - Jce.signingKeys.supported, - Jce.encryptionKeys.supported, - jceCrypto(), - ) - - forAll( - Jce.encryptionAlgorithms.supported.filter(_.supportDeterministicEncryption) - ) { encryptionAlgorithmSpec => - forAll(encryptionAlgorithmSpec.supportedEncryptionKeySpecs.forgetNE) { keySpec => - s"Deterministic hybrid encrypt " + - s"with $encryptionAlgorithmSpec and a $keySpec key" should { - - val newCrypto = jceCrypto() - - behave like hybridEncrypt( - keySpec, - (message, publicKey) => - newCrypto.map(crypto => - crypto.pureCrypto.encryptDeterministicWith( - message, - publicKey, - encryptionAlgorithmSpec, - ) - ), - newCrypto, - ) - - "yield the same ciphertext for the same encryption" in { - val message = TestMessage(ByteString.copyFromUtf8("foobar")) - for { - crypto <- jceCrypto() - publicKey <- getEncryptionPublicKey(crypto, keySpec) - encrypted1 = crypto.pureCrypto - .encryptDeterministicWith( - message, - publicKey, - encryptionAlgorithmSpec, - ) - .valueOrFail("encrypt") - _ = assert(message.bytes != encrypted1.ciphertext) - encrypted2 = crypto.pureCrypto - .encryptDeterministicWith( - message, - publicKey, - encryptionAlgorithmSpec, - ) - .valueOrFail("encrypt") - _ = assert(message.bytes != encrypted2.ciphertext) - } yield encrypted1.ciphertext shouldEqual encrypted2.ciphertext - } - } - } - } - - behave like randomnessProvider(jceCrypto().map(_.pureCrypto)) - - behave like pbeProvider( - Jce.pbkdf.valueOrFail("no PBKDF schemes configured").supported, - Jce.symmetric.supported, - jceCrypto().map(_.pureCrypto), - ) - - behave like publicKeyValidationProvider( - Jce.signingKeys.supported, - Jce.encryptionKeys.supported, - Jce.supportedCryptoKeyFormats, - jceCrypto().failOnShutdown, - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCryptoTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCryptoTest.scala deleted file mode 100644 index 1f4d8d4b25..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCryptoTest.scala +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.provider.symbolic - -import com.digitalasset.canton.crypto.{ - EncryptionTest, - PasswordBasedEncryptionTest, - PrivateKeySerializationTest, - RandomTest, - SigningTest, -} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import org.scalatest.wordspec.AsyncWordSpec - -class SymbolicCryptoTest - extends AsyncWordSpec - with SigningTest - with EncryptionTest - with PrivateKeySerializationTest - with PasswordBasedEncryptionTest - with RandomTest { - - "SymbolicCrypto" can { - - def symbolicCrypto(): FutureUnlessShutdown[SymbolicCrypto] = - FutureUnlessShutdown.pure( - SymbolicCrypto.create( - testedReleaseProtocolVersion, - timeouts, - loggerFactory, - ) - ) - - behave like signingProvider( - SymbolicCryptoProvider.supportedSigningSpecs.keys.forgetNE, - SymbolicCryptoProvider.supportedSigningSpecs.algorithms.forgetNE, - SymbolicCryptoProvider.supportedSignatureFormats, - symbolicCrypto(), - ) - behave like encryptionProvider( - SymbolicCryptoProvider.supportedEncryptionSpecs.algorithms.forgetNE, - SymbolicCryptoProvider.supportedSymmetricKeySchemes, - symbolicCrypto(), - ) - behave like privateKeySerializerProvider( - SymbolicCryptoProvider.supportedSigningSpecs.keys.forgetNE, - SymbolicCryptoProvider.supportedEncryptionSpecs.keys.forgetNE, - symbolicCrypto(), - ) - behave like randomnessProvider(symbolicCrypto().map(_.pureCrypto)) - - behave like pbeProvider( - SymbolicCryptoProvider.supportedPbkdfSchemes, - SymbolicCryptoProvider.supportedSymmetricKeySchemes, - symbolicCrypto().map(_.pureCrypto), - ) - - // Symbolic crypto does not support Java key conversion, thus not tested - - // Symbolic crypto does not support public key validation, thus not tested - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtendedTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtendedTest.scala deleted file mode 100644 index 0384f59491..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtendedTest.scala +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store - -import cats.data.EitherT -import com.digitalasset.canton.crypto.KeyPurpose.{Encryption, Signing} -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{ - EncryptionPrivateKey, - Fingerprint, - KeyName, - PrivateKey, - SigningKeyUsage, - SigningPrivateKey, -} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import org.scalatest.wordspec.AsyncWordSpec - -trait CryptoPrivateStoreExtendedTest extends CryptoPrivateStoreTest { - this: AsyncWordSpec => - - def cryptoPrivateStoreExtended( - newStore: => CryptoPrivateStoreExtended, - encrypted: Boolean, - ): Unit = { - - val crypto = SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory) - crypto.setRandomKeysFlag(true) - - val sigKey1Name: String = uniqueKeyName("sigKey1_") - val encKey1Name: String = uniqueKeyName("encKey1_") - - val sigKey1: SigningPrivateKey = - crypto.newSymbolicSigningKeyPair(SigningKeyUsage.ProtocolOnly).privateKey - val sigKey1WithName: SigningPrivateKeyWithName = - SigningPrivateKeyWithName(sigKey1, Some(KeyName.tryCreate(sigKey1Name))) - val sigKey1BytesWithName = - (sigKey1.toByteString(testedReleaseProtocolVersion.v), sigKey1WithName.name) - - val sigKey2: SigningPrivateKey = - crypto.newSymbolicSigningKeyPair(SigningKeyUsage.ProtocolOnly).privateKey - val sigKey2WithName: SigningPrivateKeyWithName = SigningPrivateKeyWithName(sigKey2, None) - val sigKey2BytesWithName = - (sigKey2.toByteString(testedReleaseProtocolVersion.v), sigKey2WithName.name) - - val encKey1: EncryptionPrivateKey = crypto.newSymbolicEncryptionKeyPair().privateKey - val encKey1WithName: EncryptionPrivateKeyWithName = - EncryptionPrivateKeyWithName(encKey1, Some(KeyName.tryCreate(encKey1Name))) - val encKey1BytesWithName = - (encKey1.toByteString(testedReleaseProtocolVersion.v), encKey1WithName.name) - - val encKey2: EncryptionPrivateKey = crypto.newSymbolicEncryptionKeyPair().privateKey - val encKey2WithName: EncryptionPrivateKeyWithName = EncryptionPrivateKeyWithName(encKey2, None) - val encKey2BytesWithName = - (encKey2.toByteString(testedReleaseProtocolVersion.v), encKey2WithName.name) - - def storePrivateKey( - store: CryptoPrivateStore, - privateKey: PrivateKey, - id: Fingerprint, - name: Option[KeyName], - ): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] = - store match { - case extended: CryptoPrivateStoreExtended => - extended.storePrivateKey(privateKey, name) - case _ => - EitherT.leftT[FutureUnlessShutdown, Unit]( - CryptoPrivateStoreError.FailedToInsertKey( - id, - "crypto private store does not implement the necessary method to store a private key", - ) - ) - } - - behave like cryptoPrivateStore( - newStore, - storePrivateKey, - ) - - "store encryption keys correctly when added incrementally" in { - val store = newStore - for { - _ <- store - .storeDecryptionKey(encKey1, encKey1WithName.name) - .valueOrFailShutdown("store key 1") - _ <- store.storeDecryptionKey(encKey2, None).valueOrFailShutdown("store key 2") - result <- store.listPrivateKeys(Encryption, encrypted).valueOrFailShutdown("list keys") - } yield { - result.map(storedKey => (storedKey.data, storedKey.name)) shouldEqual Set( - encKey1BytesWithName, - encKey2BytesWithName, - ) - } - } - - "store signing keys correctly when added incrementally" in { - val store = newStore - for { - _ <- store.storeSigningKey(sigKey1, sigKey1WithName.name).valueOrFailShutdown("store key 1") - _ <- store.storeSigningKey(sigKey2, None).valueOrFailShutdown("store key 2") - result <- store.listPrivateKeys(Signing, encrypted).valueOrFailShutdown("list keys") - } yield { - result.map(storedKey => (storedKey.data, storedKey.name)) shouldEqual Set( - sigKey1BytesWithName, - sigKey2BytesWithName, - ) - } - } - - "idempotent store of encryption keys" in { - val store = newStore - for { - _ <- store - .storeDecryptionKey(encKey1, encKey1WithName.name) - .valueOrFailShutdown("store key 1 with name") - - // Should succeed - _ <- store - .storeDecryptionKey(encKey1, encKey1WithName.name) - .valueOrFailShutdown("store key 1 with name again") - - // Should fail due to different name - failedInsert <- store.storeDecryptionKey(encKey1, None).failOnShutdown.value - - result <- store - .listPrivateKeys(Encryption, encrypted) - .valueOrFailShutdown("list private keys") - } yield { - failedInsert.left.value shouldBe a[CryptoPrivateStoreError] - result.map(storedKey => (storedKey.data, storedKey.name)) shouldEqual Set( - encKey1BytesWithName - ) - } - } - - "idempotent store of signing keys" in { - val store = newStore - for { - _ <- store - .storeSigningKey(sigKey1, sigKey1WithName.name) - .valueOrFailShutdown("store key 1 with name") - - // Should succeed - _ <- store - .storeSigningKey(sigKey1, sigKey1WithName.name) - .valueOrFailShutdown("store key 1 with name again") - - // Should fail due to different name - failedInsert <- store.storeSigningKey(sigKey1, None).failOnShutdown.value - - result <- store.listPrivateKeys(Signing, encrypted).valueOrFailShutdown("list private keys") - } yield { - failedInsert.left.value shouldBe a[CryptoPrivateStoreError] - result.map(storedKey => (storedKey.data, storedKey.name)) shouldEqual Set( - sigKey1BytesWithName - ) - } - } - - "check if private key is encrypted" in { - val store = newStore - for { - _ <- store - .storeDecryptionKey(encKey1, encKey1WithName.name) - .valueOrFailShutdown("store key 1") - encryptedRes <- store.encrypted(encKey1.id).valueOrFailShutdown("encrypted") - } yield encryptedRes.isDefined shouldBe encrypted - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreTest.scala deleted file mode 100644 index 6dcc5d9388..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreTest.scala +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store - -import cats.data.EitherT -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import org.scalatest.wordspec.AsyncWordSpec - -trait CryptoPrivateStoreTest extends BaseTest { this: AsyncWordSpec => - - def uniqueKeyName(name: String): String = name + getClass.getSimpleName - - lazy val crypto: SymbolicCrypto = - SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory) - crypto.setRandomKeysFlag(true) - - lazy val sigKey1Name: String = uniqueKeyName("sigKey1_") - lazy val encKey1Name: String = uniqueKeyName("encKey1_") - - lazy val sigKey1: SigningPrivateKey = - crypto.newSymbolicSigningKeyPair(SigningKeyUsage.ProtocolOnly).privateKey - lazy val sigKey1WithName: SigningPrivateKeyWithName = - SigningPrivateKeyWithName(sigKey1, Some(KeyName.tryCreate(sigKey1Name))) - - lazy val encKey1: EncryptionPrivateKey = crypto.newSymbolicEncryptionKeyPair().privateKey - lazy val encKey1WithName: EncryptionPrivateKeyWithName = - EncryptionPrivateKeyWithName(encKey1, Some(KeyName.tryCreate(encKey1Name))) - - def cryptoPrivateStore( - newStore: => CryptoPrivateStore, - storePrivateKey: ( - CryptoPrivateStore, - PrivateKey, - Fingerprint, - Option[KeyName], - ) => EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit], - ): Unit = { - - "check existence of private key" in { - val store = newStore - for { - _ <- storePrivateKey(store, sigKey1, sigKey1.id, sigKey1WithName.name).failOnShutdown - _ <- storePrivateKey(store, encKey1, encKey1.id, encKey1WithName.name).failOnShutdown - signRes <- store.existsSigningKey(sigKey1.id).failOnShutdown - encRes <- store.existsDecryptionKey(encKey1.id).failOnShutdown - } yield { - signRes shouldBe true - encRes shouldBe true - } - } - - "filter signing keys based on usage" in { - val store = newStore - - val sigKey2: SigningPrivateKey = - crypto - .newSymbolicSigningKeyPair(SigningKeyUsage.NamespaceOnly) - .privateKey - val sigKey2WithName: SigningPrivateKeyWithName = - SigningPrivateKeyWithName(sigKey2, Some(KeyName.tryCreate(uniqueKeyName("sigKey2_")))) - - val sigKeyAllUsage: SigningPrivateKey = - crypto.newSymbolicSigningKeyPair(SigningKeyUsage.All).privateKey - val sigKeyAllUsageWithName: SigningPrivateKeyWithName = - SigningPrivateKeyWithName( - sigKeyAllUsage, - Some(KeyName.tryCreate(uniqueKeyName("sigKeyAllUsage_"))), - ) - - for { - _ <- storePrivateKey(store, sigKey1, sigKey1.id, sigKey1WithName.name).failOnShutdown - _ <- storePrivateKey(store, sigKey2, sigKey2.id, sigKey2WithName.name).failOnShutdown - protocolKeys <- store - .filterSigningKeys( - NonEmpty.mk(Seq, sigKey1.id, sigKey2.id), - SigningKeyUsage.ProtocolOnly, - ) - .failOnShutdown - authenticationKeys <- store - .filterSigningKeys( - NonEmpty.mk(Seq, sigKey1.id, sigKey2.id), - SigningKeyUsage.SequencerAuthenticationOnly, - ) - .failOnShutdown - - _ <- storePrivateKey( - store, - sigKeyAllUsage, - sigKeyAllUsage.id, - sigKeyAllUsageWithName.name, - ).failOnShutdown - filterProtocolWithOldAndNewKeys <- store - .filterSigningKeys( - NonEmpty.mk(Seq, sigKey1.id, sigKey2.id, sigKeyAllUsage.id), - SigningKeyUsage.ProtocolOnly, - ) - .failOnShutdown - filterAuthenticationWithOldAndNewKeys <- store - .filterSigningKeys( - NonEmpty.mk(Seq, sigKey1.id, sigKey2.id, sigKeyAllUsage.id), - SigningKeyUsage.NamespaceOnly, - ) - .failOnShutdown - - // remove the keys with usage - _ <- store.removePrivateKey(sigKey1.id).failOnShutdown - _ <- store.removePrivateKey(sigKey2.id).failOnShutdown - filterWithOldKeys <- store - .filterSigningKeys(NonEmpty.mk(Seq, sigKeyAllUsage.id), SigningKeyUsage.ProtocolOnly) - .failOnShutdown - } yield { - protocolKeys.loneElement shouldBe sigKey1.id - authenticationKeys shouldBe Seq.empty - filterProtocolWithOldAndNewKeys shouldBe Seq(sigKey1.id, sigKeyAllUsage.id) - filterAuthenticationWithOldAndNewKeys shouldBe Seq(sigKey2.id, sigKeyAllUsage.id) - filterWithOldKeys.loneElement shouldBe sigKeyAllUsage.id - } - } - - "delete key successfully" in { - val store = newStore - for { - _ <- storePrivateKey(store, sigKey1, sigKey1.id, sigKey1WithName.name).failOnShutdown - _ <- store.removePrivateKey(sigKey1.id).failOnShutdown - res <- store.existsSigningKey(sigKey1.id).failOnShutdown - } yield res shouldBe false - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPublicStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPublicStoreTest.scala deleted file mode 100644 index e411e87686..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/CryptoPublicStoreTest.scala +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import org.scalatest.wordspec.AsyncWordSpec - -trait CryptoPublicStoreTest extends BaseTest { this: AsyncWordSpec => - - def cryptoPublicStore(newStore: => CryptoPublicStore, backedByDatabase: Boolean): Unit = { - - val crypto = SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory) - - val sigKey1: SigningPublicKey = - crypto.generateSymbolicSigningKey(Some("sigKey1"), SigningKeyUsage.ProtocolOnly) - val sigKey1WithName: SigningPublicKeyWithName = - SigningPublicKeyWithName(sigKey1, Some(KeyName.tryCreate("sigKey1"))) - val sigKey2: SigningPublicKey = - crypto.generateSymbolicSigningKey(Some("sigKey2"), SigningKeyUsage.ProtocolOnly) - val sigKey2WithName: SigningPublicKeyWithName = SigningPublicKeyWithName(sigKey2, None) - - val encKey1: EncryptionPublicKey = crypto.generateSymbolicEncryptionKey(Some("encKey1")) - val encKey1WithName: EncryptionPublicKeyWithName = - EncryptionPublicKeyWithName(encKey1, Some(KeyName.tryCreate("encKey1"))) - val encKey2: EncryptionPublicKey = crypto.generateSymbolicEncryptionKey(Some("encKey2")) - val encKey2WithName: EncryptionPublicKeyWithName = EncryptionPublicKeyWithName(encKey2, None) - - "save encryption keys correctly when added incrementally" in { - val store = newStore - for { - _ <- store.storeEncryptionKey(encKey1, encKey1WithName.name) - _ <- store.storeEncryptionKey(encKey2, None) - result <- store.encryptionKeys - result2 <- store.listEncryptionKeys - } yield { - result shouldEqual Set(encKey1, encKey2) - result2 shouldEqual Set(encKey1WithName, encKey2WithName) - } - }.failOnShutdown - - if (backedByDatabase) { - "not rely solely on cache" in { - val store = newStore - val separateStore = newStore - for { - _ <- store.storeEncryptionKey(encKey1, encKey1WithName.name) - _ <- store.storeEncryptionKey(encKey2, None) - result1 <- separateStore.encryptionKey(encKey1.fingerprint).value - result2 <- separateStore.encryptionKey(encKey2.fingerprint).value - - _ <- store.storeSigningKey(sigKey1, sigKey1WithName.name) - _ <- store.storeSigningKey(sigKey2, None) - result3 <- separateStore.signingKey(sigKey1.fingerprint).value - result4 <- separateStore.signingKey(sigKey2.fingerprint).value - } yield { - result1 shouldEqual Some(encKey1) - result2 shouldEqual Some(encKey2) - - result3 shouldEqual Some(sigKey1) - result4 shouldEqual Some(sigKey2) - } - }.failOnShutdown - } - - "save signing keys correctly when added incrementally" in { - val store = newStore - for { - _ <- store.storeSigningKey(sigKey1, sigKey1WithName.name) - _ <- store.storeSigningKey(sigKey2, None) - result <- store.signingKeys - result2 <- store.listSigningKeys - } yield { - result shouldEqual Set(sigKey1, sigKey2) - result2 shouldEqual Set(sigKey1WithName, sigKey2WithName) - } - }.failOnShutdown - - "delete public keys" in { - val store = newStore - for { - _ <- store.storeSigningKey(sigKey1, sigKey1WithName.name) - result1 <- store.signingKeys - _ <- store.deleteKey(sigKey1.id) - result2 <- store.signingKeys - _ <- store.storeSigningKey(sigKey1, None) - } yield { - result1 shouldEqual Set(sigKey1) - result2 shouldEqual Set() - } - }.failOnShutdown - - "idempotent store of encryption keys" in { - val store = newStore - for { - _ <- store.storeEncryptionKey(encKey1, encKey1WithName.name) - - // Should succeed - _ <- store.storeEncryptionKey(encKey1, encKey1WithName.name) - - // Should fail due to different name - _failedInsert <- loggerFactory.assertInternalErrorAsyncUS[IllegalStateException]( - store.storeEncryptionKey(encKey1, None), - _.getMessage shouldBe s"Existing public key for ${encKey1.id} is different than inserted key", - ) - - result <- store.listEncryptionKeys - } yield { - result shouldEqual Set(encKey1WithName) - } - }.failOnShutdown - - "idempotent store of signing keys" in { - val store = newStore - for { - _ <- store - .storeSigningKey(sigKey1, sigKey1WithName.name) - - // Should succeed - _ <- store - .storeSigningKey(sigKey1, sigKey1WithName.name) - - // Should fail due to different name - _failedInsert <- loggerFactory.assertInternalErrorAsyncUS[IllegalStateException]( - store.storeSigningKey(sigKey1, None), - _.getMessage should startWith( - s"Existing public key for ${sigKey1.id} is different than inserted key" - ), - ) - - result <- store.listSigningKeys - } yield { - result shouldEqual Set(sigKey1WithName) - } - }.failOnShutdown - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStoreTest.scala deleted file mode 100644 index 2dcb47fc8d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStoreTest.scala +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store.db - -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreExtendedTest -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} -import com.digitalasset.canton.tracing.TraceContext -import org.scalatest.wordspec.AsyncWordSpec - -trait DbCryptoPrivateStoreTest extends AsyncWordSpec with CryptoPrivateStoreExtendedTest { - this: DbTest => - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - - /* We delete all private keys that ARE NOT encrypted (wrapper_key_id == NULL). - This conditional delete is to avoid conflicts with the encrypted crypto private store tests. */ - storage.update( - DBIO.seq( - sqlu"delete from common_crypto_private_keys where wrapper_key_id IS NULL" - ), - operationName = s"${this.getClass}: Delete from private crypto table", - ) - } - - "DbCryptoPrivateStore" can { - behave like cryptoPrivateStoreExtended( - new DbCryptoPrivateStore(storage, testedReleaseProtocolVersion, timeouts, loggerFactory), - encrypted = false, - ) - } -} - -class CryptoPrivateStoreTestH2 extends DbCryptoPrivateStoreTest with H2Test - -class CryptoPrivateStoreTestPostgres extends DbCryptoPrivateStoreTest with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStoreTest.scala deleted file mode 100644 index 622ad347b0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStoreTest.scala +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store.db - -import com.digitalasset.canton.crypto.store.CryptoPublicStoreTest -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} -import com.digitalasset.canton.tracing.TraceContext -import org.scalatest.wordspec.AsyncWordSpec - -trait DbCryptoPublicStoreTest extends AsyncWordSpec with CryptoPublicStoreTest { - this: DbTest => - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - - storage.update( - DBIO.seq( - sqlu"truncate table common_crypto_public_keys" - ), - operationName = s"${this.getClass}: Truncate public crypto tables", - ) - } - - "DbCryptoPublicStore" can { - behave like cryptoPublicStore( - new DbCryptoPublicStore(storage, testedReleaseProtocolVersion, timeouts, loggerFactory), - backedByDatabase = true, - ) - } -} - -class CryptoPublicStoreTestH2 extends DbCryptoPublicStoreTest with H2Test - -class CryptoPublicStoreTestPostgres extends DbCryptoPublicStoreTest with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPrivateStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPrivateStoreTestInMemory.scala deleted file mode 100644 index 28dc9c36a8..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPrivateStoreTestInMemory.scala +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store.memory - -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreExtendedTest -import org.scalatest.wordspec.AsyncWordSpec - -class CryptoPrivateStoreTestInMemory extends AsyncWordSpec with CryptoPrivateStoreExtendedTest { - "InMemoryCryptoPrivateStore" should { - behave like cryptoPrivateStoreExtended( - new InMemoryCryptoPrivateStore(testedReleaseProtocolVersion, loggerFactory), - encrypted = false, - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPublicStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPublicStoreTestInMemory.scala deleted file mode 100644 index e8fc7feed8..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPublicStoreTestInMemory.scala +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.store.memory - -import com.digitalasset.canton.crypto.store.CryptoPublicStoreTest -import org.scalatest.wordspec.AsyncWordSpec - -class CryptoPublicStoreTestInMemory extends AsyncWordSpec with CryptoPublicStoreTest { - "InMemoryCryptoPublicStore" should { - behave like cryptoPublicStore( - new InMemoryCryptoPublicStore(loggerFactory), - backedByDatabase = false, - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala deleted file mode 100644 index 7c7abb5528..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoTest.scala +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.sync - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.KmsConfig.Driver -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.config.{CryptoConfig, CryptoProvider, SessionSigningKeysConfig} -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.signer.SyncCryptoSigner -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory -import com.digitalasset.canton.crypto.verifier.SyncCryptoVerifier -import com.digitalasset.canton.crypto.{ - Crypto, - Hash, - RequiredEncryptionSpecs, - RequiredSigningSpecs, - SigningKeyUsage, - SynchronizerCryptoClient, - TestHash, -} -import com.digitalasset.canton.protocol.StaticSynchronizerParameters -import com.digitalasset.canton.resource.{MemoryStorage, Storage} -import com.digitalasset.canton.topology.DefaultTestIdentities.{participant1, participant2} -import com.digitalasset.canton.topology.client.TopologySnapshot -import com.digitalasset.canton.topology.{ - DefaultTestIdentities, - PhysicalSynchronizerId, - SynchronizerId, - TestingIdentityFactory, - TestingTopology, - UniqueIdentifier, -} -import com.digitalasset.canton.tracing.NoReportingTracerProvider -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.typesafe.config.ConfigValueFactory -import monocle.Monocle.toAppliedFocusOps -import org.scalatest.wordspec.AnyWordSpec - -trait SyncCryptoTest extends AnyWordSpec with BaseTest with HasExecutionContext { - protected val sessionSigningKeysConfig: SessionSigningKeysConfig - - // Use JceCrypto for the configured crypto schemes - private lazy val jceStaticSynchronizerParameters: StaticSynchronizerParameters = - jceStaticSynchronizerParametersWith() - - private def jceStaticSynchronizerParametersWith( - protocolVersion: ProtocolVersion = testedProtocolVersion - ): StaticSynchronizerParameters = StaticSynchronizerParameters( - requiredSigningSpecs = RequiredSigningSpecs( - CryptoProvider.Jce.signingAlgorithms.supported, - CryptoProvider.Jce.signingKeys.supported, - ), - requiredEncryptionSpecs = RequiredEncryptionSpecs( - CryptoProvider.Jce.encryptionAlgorithms.supported, - CryptoProvider.Jce.encryptionKeys.supported, - ), - requiredSymmetricKeySchemes = CryptoProvider.Jce.symmetric.supported, - requiredHashAlgorithms = CryptoProvider.Jce.hash.supported, - requiredCryptoKeyFormats = CryptoProvider.Jce.supportedCryptoKeyFormats, - requiredSignatureFormats = CryptoProvider.Jce.supportedSignatureFormats, - protocolVersion = protocolVersion, - serial = NonNegativeInt.zero, - ) - - protected lazy val otherSynchronizerId: PhysicalSynchronizerId = PhysicalSynchronizerId( - SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("other::default") - ), - jceStaticSynchronizerParameters, - ) - - protected lazy val testingTopology: TestingIdentityFactory = - TestingTopology() - .withSynchronizers( - synchronizers = DefaultTestIdentities.physicalSynchronizerId, - otherSynchronizerId, - ) - .withSimpleParticipants(participant1, participant2) - .withStaticSynchronizerParams(jceStaticSynchronizerParameters) - .withCryptoConfig(cryptoConfigWithSessionSigningKeysConfig(sessionSigningKeysConfig)) - .build(crypto, loggerFactory) - - protected lazy val defaultUsage: NonEmpty[Set[SigningKeyUsage]] = SigningKeyUsage.ProtocolOnly - protected lazy val storage: Storage = new MemoryStorage(loggerFactory, timeouts) - - private val cryptoConfig: CryptoConfig = CryptoConfig() - - // we define a "fake" [[CryptoConfig]] with a session signing keys configuration to control whether - // the testing environment uses session signing keys or not. Although the actual `crypto` implementation - // used in the tests is a JCE provider (and not a real KMS-backed environment), this "fake" configuration - // allows us to simulate a KMS-like setup. By enabling session signing keys within this config, we can trick - // the system into behaving as if it's running in a KMS environment, which is useful for testing code paths - // that depend on the presence of session signing keys without needing a real KMS infrastructure. - protected def cryptoConfigWithSessionSigningKeysConfig( - sessionSigningKeys: SessionSigningKeysConfig - ): CryptoConfig = - cryptoConfig - .focus(_.kms) - .replace( - Some( - Driver( - "mock", - ConfigValueFactory.fromAnyRef(0), - sessionSigningKeys = sessionSigningKeys, - ) - ) - ) - .focus(_.provider) - .replace(CryptoProvider.Kms) - - protected lazy val crypto: Crypto = Crypto - .create( - cryptoConfig, - storage, - CryptoPrivateStoreFactory.withoutKms(wallClock, parallelExecutionContext), - CommunityKmsFactory, - testedReleaseProtocolVersion, - nonStandardConfig = false, - futureSupervisor, - wallClock, - executorService, - timeouts, - loggerFactory, - NoReportingTracerProvider, - ) - .valueOrFailShutdown("Failed to create crypto object") - .futureValue - - protected lazy val testSnapshot: TopologySnapshot = testingTopology.topologySnapshot() - - protected lazy val hash: Hash = TestHash.digest(0) - - protected lazy val p1: SynchronizerCryptoClient = - testingTopology.forOwnerAndSynchronizer(participant1) - protected lazy val p2: SynchronizerCryptoClient = - testingTopology.forOwnerAndSynchronizer(participant2) - - protected lazy val syncCryptoSignerP1: SyncCryptoSigner = p1.syncCryptoSigner - protected lazy val syncCryptoSignerP2: SyncCryptoSigner = p2.syncCryptoSigner - - protected lazy val syncCryptoVerifierP1: SyncCryptoVerifier = p1.syncCryptoVerifier - protected lazy val syncCryptoVerifierP2: SyncCryptoVerifier = p2.syncCryptoVerifier - - def syncCryptoSignerTest(): Unit = { - "correctly sign and verify a message" in { - val signature = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - syncCryptoVerifierP1 - .verifySignature( - testSnapshot, - hash, - participant1.member, - signature, - defaultUsage, - ) - .futureValueUS - .valueOrFail("verification failed") - } - - "correctly sign and verify message from distinct participants" in { - val signature = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - syncCryptoVerifierP2 - .verifySignature( - testSnapshot, - hash, - participant1.member, - signature, - defaultUsage, - ) - .valueOrFail("verification failed") - .futureValueUS - } - - "correctly sign and verify multiple messages" in { - - val signature_1 = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - val signature_2 = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - syncCryptoVerifierP1 - .verifySignatures( - testSnapshot, - hash, - p1.member, - NonEmpty.mk(Seq, signature_1, signature_2), - defaultUsage, - ) - .valueOrFail("verification failed") - .futureValueUS - - } - - "correctly sign and verify group signatures" in { - - val signature1 = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - val signature2 = syncCryptoSignerP2 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - syncCryptoVerifierP1 - .verifyGroupSignatures( - testSnapshot, - hash, - Seq(participant1.member, participant2.member), - PositiveInt.two, - "group", - NonEmpty.mk(Seq, signature1, signature2), - defaultUsage, - ) - .valueOrFail("verification failed") - .futureValueUS - - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithLongTermKeysTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithLongTermKeysTest.scala deleted file mode 100644 index 2ab3ea330a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithLongTermKeysTest.scala +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.sync - -import com.digitalasset.canton.config.SessionSigningKeysConfig -import com.digitalasset.canton.crypto.signer.SyncCryptoSignerWithLongTermKeys -import com.digitalasset.canton.topology.DefaultTestIdentities.participant1 -import com.digitalasset.canton.topology.TestingTopology -import org.scalatest.wordspec.AnyWordSpec - -class SyncCryptoWithLongTermKeysTest extends AnyWordSpec with SyncCryptoTest { - // we explicitly disable any use of session signing keys - override protected lazy val sessionSigningKeysConfig: SessionSigningKeysConfig = - SessionSigningKeysConfig.disabled - - "A SyncCrypto with long-term keys" must { - - behave like syncCryptoSignerTest() - - "use correct sync crypto with long term keys" in { - syncCryptoSignerP1 shouldBe a[SyncCryptoSignerWithLongTermKeys] - } - - /* This test checks whether a node that does not use session keys can verify a signature - * sent by a node that uses session keys (with a signature delegation defined). - */ - "correctly verify signature that contains a delegation for a session key" in { - - // enable session keys just for signing - val testingTopologyWithSessionKeys = - TestingTopology() - .withSimpleParticipants(participant1) - .withCryptoConfig( - cryptoConfigWithSessionSigningKeysConfig(SessionSigningKeysConfig.default) - ) - .build(crypto, loggerFactory) - - val p1WithSessionKey = testingTopologyWithSessionKeys.forOwnerAndSynchronizer(participant1) - - val signature = p1WithSessionKey.syncCryptoSigner - .sign( - testingTopologyWithSessionKeys.topologySnapshot(), - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - signature.signatureDelegation should not be empty - - syncCryptoVerifierP1 - .verifySignature( - testSnapshot, - hash, - participant1.member, - signature, - defaultUsage, - ) - .valueOrFail("verification failed") - .futureValueUS - - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithSessionKeysTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithSessionKeysTest.scala deleted file mode 100644 index 1ef88cd4e9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/crypto/sync/SyncCryptoWithSessionKeysTest.scala +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.crypto.sync - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.{PositiveFiniteDuration, SessionSigningKeysConfig} -import com.digitalasset.canton.crypto.signer.SyncCryptoSignerWithSessionKeys -import com.digitalasset.canton.crypto.{ - Signature, - SignatureDelegation, - SignatureDelegationValidityPeriod, - SynchronizerCryptoClient, -} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.DefaultTestIdentities.participant1 -import com.digitalasset.canton.topology.client.TopologySnapshot -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.duration.FiniteDuration - -class SyncCryptoWithSessionKeysTest extends AnyWordSpec with SyncCryptoTest { - override protected lazy val sessionSigningKeysConfig: SessionSigningKeysConfig = - SessionSigningKeysConfig.default - - private lazy val validityDuration = sessionSigningKeysConfig.keyValidityDuration - - private def sessionKeysCache(p: SynchronizerCryptoClient) = - p.syncCryptoSigner - .asInstanceOf[SyncCryptoSignerWithSessionKeys] - .sessionKeysSigningCache - .asMap() - - private def sessionKeysVerificationCache(p: SynchronizerCryptoClient) = - p.syncCryptoVerifier.sessionKeysVerificationCache.asMap().map { case (id, (sD, _)) => (id, sD) } - - private def cleanUpSessionKeysCache(p: SynchronizerCryptoClient): Unit = { - p.syncCryptoSigner - .asInstanceOf[SyncCryptoSignerWithSessionKeys] - .sessionKeysSigningCache - .invalidateAll() - p.syncCryptoSigner.asInstanceOf[SyncCryptoSignerWithSessionKeys].pendingRequests.clear() - } - - private def cleanUpSessionKeysVerificationCache(p: SynchronizerCryptoClient): Unit = - p.syncCryptoVerifier.sessionKeysVerificationCache - .invalidateAll() - - private def cutOffDuration(p: SynchronizerCryptoClient) = - p.syncCryptoSigner.asInstanceOf[SyncCryptoSignerWithSessionKeys].cutOffDuration - - private def setSessionKeyEvictionPeriod( - p: SynchronizerCryptoClient, - newPeriod: FiniteDuration, - ): Unit = - p.syncCryptoSigner - .asInstanceOf[SyncCryptoSignerWithSessionKeys] - .sessionKeyEvictionPeriod - .set(newPeriod) - - /* Verify that a signature delegation is currently stored in the cache and contains the correct - * information: (1) it is signed by a long-term key, (2) the enclosing signature is correctly listed - * as being signed by a session key, and (3) the validity period is correct. - */ - private def checkSignatureDelegation( - topologySnapshot: TopologySnapshot, - signature: Signature, - p: SynchronizerCryptoClient = p1, - validityPeriodLength: PositiveFiniteDuration = - PositiveFiniteDuration.ofSeconds(validityDuration.underlying.toSeconds), - ): SignatureDelegation = { - - val cache = sessionKeysCache(p) - val (_, sessionKeyAndDelegation) = cache - .find { case (_, skD) => - signature.signatureDelegation.contains(skD.signatureDelegation) - } - .valueOrFail("no signature delegation") - - topologySnapshot - .signingKeys(p.member, defaultUsage) - .futureValueUS - .map(_.id) should contain( - sessionKeyAndDelegation.signatureDelegation.signature.authorizingLongTermKey - ) - - val sessionKeyId = sessionKeyAndDelegation.signatureDelegation.sessionKey.id - val validityPeriod = sessionKeyAndDelegation.signatureDelegation.validityPeriod - - // The signature contains the session key in the 'signedBy' field. - signature.signedBy shouldBe sessionKeyId - - // Verify it has the correct validity period - val margin = cutOffDuration(p).asJava.dividedBy(2) - validityPeriod shouldBe - SignatureDelegationValidityPeriod( - topologySnapshot.timestamp.minus(margin), - validityPeriodLength, - ) - - sessionKeyAndDelegation.signatureDelegation - } - - private def cleanCache(p: SynchronizerCryptoClient) = { - // make sure we start from a clean state - cleanUpSessionKeysCache(p) - cleanUpSessionKeysVerificationCache(p) - - sessionKeysCache(p) shouldBe empty - sessionKeysVerificationCache(p) shouldBe empty - } - - "A SyncCrypto with session keys" must { - - behave like syncCryptoSignerTest() - - "use correct sync crypto signer with session keys" in { - syncCryptoSignerP1 shouldBe a[SyncCryptoSignerWithSessionKeys] - cleanCache(p1) - } - - "correctly produce a signature delegation when signing a single message" in { - - val signature = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - val signatureDelegation = checkSignatureDelegation(testSnapshot, signature) - - syncCryptoVerifierP1 - .verifySignature( - testSnapshot, - hash, - participant1.member, - signature, - defaultUsage, - ) - .valueOrFail("verification failed") - .futureValueUS - - val (_, sDSigningCached) = sessionKeysCache(p1).loneElement - val (_, sDVerificationCached) = sessionKeysVerificationCache(p1).loneElement - - // make sure that nothing changed with the session key and signature delegation - sDSigningCached.signatureDelegation shouldBe signatureDelegation - sDSigningCached.signatureDelegation shouldBe sDVerificationCached - - } - - "sign and verify message with different synchronizers uses different session keys" in { - val p1OtherSynchronizer = - testingTopology.forOwnerAndSynchronizer( - owner = participant1, - synchronizerId = otherSynchronizerId, - ) - val syncCryptoSignerP1Other = p1OtherSynchronizer.syncCryptoSigner - - val signature = syncCryptoSignerP1Other - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - val signatureDelegationOther = - checkSignatureDelegation(testSnapshot, signature, p1OtherSynchronizer) - - sessionKeysCache(p1OtherSynchronizer).loneElement - - // it's different from the signature delegation for the other synchronizer - val (_, signatureDelegation) = sessionKeysCache(p1).loneElement - signatureDelegationOther.validityPeriod shouldBe signatureDelegation.signatureDelegation.validityPeriod - signatureDelegationOther.sessionKey should not be signatureDelegation.signatureDelegation.sessionKey - signatureDelegationOther.signature should not be signatureDelegation.signatureDelegation.signature - } - - "use a new session signing key when the cut-off period has elapsed" in { - - val (_, currentSessionKey) = sessionKeysCache(p1).loneElement - // select a timestamp that is after the cut-off period - val cutOffTimestamp = - currentSessionKey.signatureDelegation.validityPeriod - .computeCutOffTimestamp(cutOffDuration(p1).asJava) - - val afterCutOffSnapshot = - testingTopology.topologySnapshot(timestampOfSnapshot = cutOffTimestamp) - - val signature = syncCryptoSignerP1 - .sign( - afterCutOffSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - checkSignatureDelegation(afterCutOffSnapshot, signature) - - // There must be a second key in the cache because we used a different session key for the latest sign call. - // The previous key, although still valid, has exceeded its cutoff period. - sessionKeysCache(p1).size shouldBe 2 - - } - - "use a new session key if the long-term key is no longer active" in { - - val oldLongTermKeyId = testSnapshot - .signingKeys(participant1.member, defaultUsage) - .futureValueUS - .loneElement - .id - - testingTopology.getTopology().freshKeys.set(true) - - val newSnapshotWithFreshKeys = testingTopology.topologySnapshot() - val newLongTermKeyId = - newSnapshotWithFreshKeys - .signingKeys(participant1.member, defaultUsage) - .futureValueUS - .loneElement - .id - - newLongTermKeyId should not be oldLongTermKeyId - - val signature = syncCryptoSignerP1 - .sign( - newSnapshotWithFreshKeys, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - checkSignatureDelegation(newSnapshotWithFreshKeys, signature) - - signature.signatureDelegation - .valueOrFail("no signature delegation") - .delegatingKeyId shouldBe newLongTermKeyId - - } - - "session signing key is removed from the cache after the eviction period" in { - cleanUpSessionKeysCache(p1) - - val newEvictionPeriod = PositiveSeconds.tryOfSeconds(5).toFiniteDuration - - setSessionKeyEvictionPeriod(p1, newEvictionPeriod) - sessionKeysCache(p1) shouldBe empty - - val signature = syncCryptoSignerP1 - .sign( - testSnapshot, - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - - checkSignatureDelegation(testSnapshot, signature) - - Threading.sleep(newEvictionPeriod.toMillis + 100L) - - eventually() { - sessionKeysCache(p1).toSeq shouldBe empty - } - } - - "with decreasing timestamps we still use one session signing key" in { - - cleanCache(p1) - testingTopology.getTopology().freshKeys.set(false) - - /* This covers a test scenario where we receive a decreasing timestamp order of signing requests - and verifies that since the validity period of a key is set as ts-cutOff/2 to ts+x+cutOff/2 we - can still use the same session key created for the first request. - */ - - val signatureDelegation3 = syncCryptoSignerP1 - .sign( - testingTopology.topologySnapshot(timestampOfSnapshot = CantonTimestamp.ofEpochSecond(3)), - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - .signatureDelegation - .valueOrFail("no signature delegation") - - val signatureDelegation2 = syncCryptoSignerP1 - .sign( - testingTopology.topologySnapshot(timestampOfSnapshot = CantonTimestamp.ofEpochSecond(2)), - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - .signatureDelegation - .valueOrFail("no signature delegation") - - val signatureDelegation1 = syncCryptoSignerP1 - .sign( - testingTopology.topologySnapshot(timestampOfSnapshot = CantonTimestamp.ofEpochSecond(1)), - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - .signatureDelegation - .valueOrFail("no signature delegation") - - signatureDelegation1 should equal(signatureDelegation2) - signatureDelegation2 should equal(signatureDelegation3) - - // enough time has elapsed and the session key is no longer valid - - val signatureDelegationNew = syncCryptoSignerP1 - .sign( - testingTopology.topologySnapshot(timestampOfSnapshot = - CantonTimestamp.Epoch - .addMicros(validityDuration.unwrap.toMicros) - .add(cutOffDuration(p1).asJava) - ), - hash, - defaultUsage, - ) - .valueOrFail("sign failed") - .futureValueUS - .signatureDelegation - .valueOrFail("no signature delegation") - - signatureDelegationNew should not equal signatureDelegation1 - - } - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala deleted file mode 100644 index 2c0e7da8f1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.data.ActionDescription.* -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.util.LfTransactionBuilder -import com.digitalasset.canton.util.LfTransactionBuilder.{defaultPackageId, defaultTemplateId} -import com.digitalasset.canton.version.RepresentativeProtocolVersion -import com.digitalasset.canton.{BaseTest, LfPackageName, LfPartyId, LfVersioned} -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value.Value -import org.scalatest.wordspec.AnyWordSpec - -class ActionDescriptionTest extends AnyWordSpec with BaseTest { - - private val suffixedId: LfContractId = ExampleTransactionFactory.suffixedId(0, 0) - private val seed: LfHash = ExampleTransactionFactory.lfHash(5) - private val globalKey: LfGlobalKey = - LfGlobalKey - .build( - LfTransactionBuilder.defaultTemplateId, - Value.ValueInt64(10L), - LfPackageName.assertFromString("package-name"), - ) - .value - private val choiceName: LfChoiceName = LfChoiceName.assertFromString("choice") - - private val representativePV: RepresentativeProtocolVersion[ActionDescription.type] = - ActionDescription.protocolVersionRepresentativeFor(testedProtocolVersion) - - "An action description" should { - - "accept creation" when { - - "a valid fetch node is presented" in { - - val targetTemplateId = - Ref.Identifier(defaultPackageId, defaultTemplateId.qualifiedName) - - val actingParties = Set(LfPartyId.assertFromString("acting")) - - val node = ExampleTransactionFactory.fetchNode( - cid = suffixedId, - templateId = targetTemplateId, - actingParties = Set(LfPartyId.assertFromString("acting")), - ) - - val expected = FetchActionDescription( - inputContractId = suffixedId, - actors = actingParties, - byKey = false, - templateId = targetTemplateId, - interfaceId = None, - )(protocolVersionRepresentativeFor(testedProtocolVersion)) - - ActionDescription.fromLfActionNode( - node, - None, - Set.empty, - testedProtocolVersion, - ) shouldBe - Right(expected) - } - - } - - "reject creation" when { - "the choice argument cannot be serialized" in { - ExerciseActionDescription.create( - suffixedId, - templateId = defaultTemplateId, - choiceName, - None, - Set.empty, - ExampleTransactionFactory.veryDeepVersionedValue, - Set(ExampleTransactionFactory.submitter), - byKey = true, - seed, - failed = false, - representativePV, - ) shouldBe Left( - InvalidActionDescription( - "Failed to serialize chosen value: Provided Daml-LF value to encode exceeds maximum nesting level of 100" - ) - ) - } - - "the key value cannot be serialized" in { - LookupByKeyActionDescription.create( - LfVersioned( - ExampleTransactionFactory.transactionVersion, - LfGlobalKey - .build( - LfTransactionBuilder.defaultTemplateId, - ExampleTransactionFactory.veryDeepValue, - ExampleTransactionFactory.packageName, - ) - .value, - ), - representativePV, - ) shouldBe Left( - InvalidActionDescription( - "Failed to serialize key: Provided Daml-LF value to encode exceeds maximum nesting level of 100" - ) - ) - } - - "no seed is given when the node expects a seed" in { - ActionDescription.fromLfActionNode( - ExampleTransactionFactory.createNode(suffixedId), - None, - Set.empty, - testedProtocolVersion, - ) shouldBe - Left(InvalidActionDescription("No seed for a Create node given")) - - ActionDescription.fromLfActionNode( - ExampleTransactionFactory.exerciseNodeWithoutChildren(suffixedId), - None, - Set.empty, - testedProtocolVersion, - ) shouldBe - Left(InvalidActionDescription("No seed for an Exercise node given")) - } - - "a seed is given when the node does not expect one" in { - ActionDescription.fromLfActionNode( - ExampleTransactionFactory.fetchNode(suffixedId), - Some(seed), - Set.empty, - testedProtocolVersion, - ) shouldBe - Left(InvalidActionDescription("No seed should be given for a Fetch node")) - - ActionDescription.fromLfActionNode( - ExampleTransactionFactory - .lookupByKeyNode(globalKey, maintainers = Set(ExampleTransactionFactory.observer)), - Some(seed), - Set.empty, - testedProtocolVersion, - ) shouldBe Left(InvalidActionDescription("No seed should be given for a LookupByKey node")) - } - - "actors are not declared for a Fetch node" in { - ActionDescription.fromLfActionNode( - ExampleTransactionFactory.fetchNode(suffixedId, actingParties = Set.empty), - None, - Set.empty, - testedProtocolVersion, - ) shouldBe Left(InvalidActionDescription("Fetch node without acting parties")) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampSecondTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampSecondTest.scala deleted file mode 100644 index 46739f3b4c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampSecondTest.scala +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Instant - -class CantonTimestampSecondTest extends AnyWordSpec with BaseTest { - - "CantonTimestampSecond" should { - - "have factory method to build from Instant" in { - val i = Instant.parse("2022-12-12T12:00:00Z") - CantonTimestampSecond.fromInstant(i).value.underlying.toInstant shouldBe i - - CantonTimestampSecond - .fromInstant(Instant.parse("2022-12-12T12:00:00.500Z")) - .left - .value shouldBe a[String] - } - - "have factory method to build from CantonTimestamp" in { - val tsRounded = CantonTimestamp.fromInstant(Instant.parse("2022-12-12T12:00:00Z")).value - val tsNotRounded = tsRounded + NonNegativeFiniteDuration.tryOfMillis(500) - - CantonTimestampSecond - .fromCantonTimestamp(tsRounded) - .value - .underlying - .toInstant shouldBe tsRounded.toInstant - - CantonTimestampSecond - .fromCantonTimestamp(tsNotRounded) - .left - .value shouldBe a[String] - } - - "have floor factory method" in { - val instant0 = Instant.parse("1500-06-12T12:00:00Z") - val instant1 = Instant.parse("1500-06-12T12:00:00.200Z") - val instant2 = Instant.parse("1500-06-12T12:00:00.500Z") - val instant3 = Instant.parse("1500-06-12T12:00:00Z") - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant0).value) - .toInstant shouldBe instant0 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant1).value) - .toInstant shouldBe instant0 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant2).value) - .toInstant shouldBe instant0 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant3).value) - .toInstant shouldBe instant3 - } - - "have ceil factory method" in { - val instant0 = Instant.parse("1500-06-12T12:00:00Z") - val instant1 = Instant.parse("1500-06-12T12:00:00.200Z") - val instant2 = Instant.parse("1500-06-12T12:00:00.500Z") - val instant3 = Instant.parse("1500-06-12T12:00:00Z") - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant0).value) - .toInstant shouldBe instant0 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant1).value) - .toInstant shouldBe instant3 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant2).value) - .toInstant shouldBe instant3 - - CantonTimestampSecond - .floor(CantonTimestamp.fromInstant(instant3).value) - .toInstant shouldBe instant3 - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampTest.scala deleted file mode 100644 index b00ce52efc..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/CantonTimestampTest.scala +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Instant - -class CantonTimestampTest extends AnyWordSpec with BaseTest { - - "assertFromInstant" should { - - "not fail when the instant must lose precision" in { - - val instantWithNanos = Instant.EPOCH.plusNanos(300L) - val cantonTimestamp = CantonTimestamp.assertFromInstant(instantWithNanos) - cantonTimestamp shouldEqual CantonTimestamp.Epoch - } - } - - "out of bounds CantonTimestamp" should { - - "throw exception for underflow" in { - assertThrows[IllegalArgumentException]({ - val tooLow = CantonTimestamp.MinValue.getEpochSecond - 1 - CantonTimestamp.ofEpochSecond(tooLow) - }) - } - - "throw exception for overflow" in { - assertThrows[IllegalArgumentException]({ - val tooLarge = CantonTimestamp.MaxValue.getEpochSecond + 1 - CantonTimestamp.ofEpochSecond(tooLarge) - }) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ConcurrentHMapTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ConcurrentHMapTest.scala deleted file mode 100644 index 199438efed..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ConcurrentHMapTest.scala +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class ConcurrentHMapTest extends AnyWordSpec with BaseTest { - "ConcurrentHMap" should { - class HMapRelation[K, V] - implicit val stringToInt = new HMapRelation[String, Int] - implicit val intToBoolean = new HMapRelation[Int, Boolean] - - "behave like a mutable map" in { - val map = ConcurrentHMap[HMapRelation]("zero" -> 0) - - // get - map.get("zero") shouldBe Some(0) - map.get("zerro") shouldBe None - - // putIfAbsent - map.putIfAbsent(42, true) shouldBe None - map.putIfAbsent(42, false) shouldBe Some(true) - - // getOrElseUpdate - map.getOrElseUpdate("key", 1) shouldBe 1 - map.getOrElseUpdate("key", 2) shouldBe 1 - - // remove - map.remove(42) shouldBe Some(true) - map.get(42) shouldBe None - map.remove(42) shouldBe None - } - - "be type safe" in { - val map = ConcurrentHMap.empty[HMapRelation] - map.putIfAbsent(0, true) - - // Building - "ConcurrentHMap[HMapRelation](0 -> true)" should compile - "ConcurrentHMap[HMapRelation](0 -> 1)" shouldNot typeCheck - "ConcurrentHMap[HMapRelation](0.0 -> true)" shouldNot typeCheck - - // get - "map.get(0)" should compile - "map.get(0.0)" shouldNot typeCheck - - // getOrElseUpdate - "map.getOrElseUpdate(0, false)" should compile - "map.getOrElseUpdate(0, 0)" shouldNot typeCheck - "map.getOrElseUpdate(0.0, false)" shouldNot typeCheck - - // putIfAbsent - "map.putIfAbsent(0, false)" should compile - "map.putIfAbsent(0, 0)" shouldNot typeCheck - "map.putIfAbsent(0.0, false)" shouldNot typeCheck - - // remove - "map.remove(0)" should compile - "map.remove(0.0)" shouldNot typeCheck - } - - } - - "ConcurrentHMap limitations" should { - "not be typesafe if the relation is not a function" in { - /* - We use two evidences: - String -> String - String -> Double - */ - - class HMapRelation[K, V] - implicit val stringToString = new HMapRelation[String, String] - - val map = ConcurrentHMap.empty[HMapRelation] - - { - implicit val stringToDouble = new HMapRelation[String, Double] - map.putIfAbsent("42", 42.0) - } - - an[ClassCastException] should be thrownBy map.get("42").map(_.toLowerCase()) - } - - "not be typesafe if different keys are comparable" in { - final case class Key[A](x: Int) - - class HMapRelation[K, V] - val map = ConcurrentHMap.empty[HMapRelation] - - implicit val ev1 = new HMapRelation[Key[Int], Int] - implicit val ev2 = new HMapRelation[Key[String], String] - - // Key[Int](42) == Key[String](42) - map.putIfAbsent(Key[Int](42), 1) - - an[ClassCastException] should be thrownBy map.get(Key[String](42)).map(_.toLowerCase()) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ContractsReassignmentBatchTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ContractsReassignmentBatchTest.scala deleted file mode 100644 index a8f01ccd97..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ContractsReassignmentBatchTest.scala +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.protocol.{ - ContractMetadata, - ExampleTransactionFactory, - LfTemplateId, - SerializableContract, - Stakeholders, -} -import com.digitalasset.canton.{LfPackageName, LfPartyId, ReassignmentCounter} -import org.scalatest.EitherValues.* -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -class ContractsReassignmentBatchTest extends AnyWordSpec with Matchers { - private val contract1 = ExampleTransactionFactory.asSerializable(contractId = - ExampleTransactionFactory.suffixedId(-1, 0) - ) - private val contract2 = contract1.copy(contractId = ExampleTransactionFactory.suffixedId(-1, 1)) - private val contract3 = contract1.copy(contractId = ExampleTransactionFactory.suffixedId(-1, 2)) - - private val templateId = contract1.rawContractInstance.contractInstance.unversioned.template - private val packageName = contract1.rawContractInstance.contractInstance.unversioned.packageName - private val stakeholders = Stakeholders(contract1.metadata) - private val counter = ReassignmentCounter(1) - - "ContractsReassignmentBatch.apply" in { - val batch = ContractsReassignmentBatch(contract1, counter) - batch.contractIds.toList shouldBe List(contract1.contractId) - batch.contracts.map(_.templateId) shouldBe Seq(templateId) - batch.contracts.map(_.packageName) shouldBe Seq(packageName) - batch.stakeholders shouldBe stakeholders - } - - "ContractsReassignmentBatch.create" when { - - "no contracts" in { - ContractsReassignmentBatch.create(Seq.empty) shouldBe Left( - ContractsReassignmentBatch.EmptyBatch - ) - } - - "just one contract" in { - val batch = ContractsReassignmentBatch - .create( - Seq( - (contract1, counter) - ) - ) - .value - batch.contractIds.toList shouldBe List(contract1.contractId) - batch.contracts.map(_.templateId) shouldBe Seq(templateId) - batch.contracts.map(_.packageName) shouldBe Seq(packageName) - batch.stakeholders shouldBe stakeholders - } - - "multiple homogenous contracts" in { - val batch = ContractsReassignmentBatch - .create( - Seq( - (contract1, counter), - (contract2, counter), - (contract3, counter), - ) - ) - .value - batch.contractIds.toList shouldBe List( - contract1.contractId, - contract2.contractId, - contract3.contractId, - ) - batch.stakeholders shouldBe stakeholders - } - - "contracts with different template ids" in { - val newTemplateId = LfTemplateId.assertFromString(templateId.toString + "_but_different") - val batch = ContractsReassignmentBatch - .create( - Seq( - (contract1, counter), - (setTemplateId(contract2, newTemplateId), counter), - ) - ) - .value - - batch.contracts.map(_.templateId) shouldBe Seq(templateId, newTemplateId) - } - - "contracts with different package names" in { - val newPackageName = LfPackageName.assertFromString("new_package_name") - val batch = ContractsReassignmentBatch - .create( - Seq( - (contract1, counter), - (setPackageName(contract2, newPackageName), counter), - ) - ) - .value - - batch.contracts.map(_.packageName) shouldBe Seq(packageName, newPackageName) - } - - "contracts with different stakeholders" in { - val newStakeholders = Stakeholders.tryCreate( - stakeholders = stakeholders.all ++ Set(LfPartyId.assertFromString("extra_party")), - signatories = stakeholders.signatories, - ) - ContractsReassignmentBatch.create( - Seq( - (contract1, counter), - (setStakeholders(contract2, newStakeholders), counter), - ) - ) shouldBe Left( - ContractsReassignmentBatch.DifferingStakeholders(Seq(stakeholders, newStakeholders)) - ) - } - } - - "ContractsReassignmentBatch.partition" when { - - "no contracts" in { - ContractsReassignmentBatch.partition(Seq.empty) shouldBe Seq.empty - } - - "just one contract" in { - val Seq(batch) = ContractsReassignmentBatch.partition( - Seq( - (contract1, counter) - ) - ): @unchecked - - batch.contractIds.toList shouldBe List(contract1.contractId) - batch.stakeholders shouldBe stakeholders - } - - "multiple homogenous contracts" in { - val Seq(batch) = ContractsReassignmentBatch - .partition( - Seq( - (contract1, counter), - (contract2, counter), - (contract3, counter), - ) - ): @unchecked - - batch.contractIds.toList shouldBe List( - contract1.contractId, - contract2.contractId, - contract3.contractId, - ) - batch.stakeholders shouldBe stakeholders - } - - "contracts with different template ids" in { - val newTemplateId = LfTemplateId.assertFromString(templateId.toString + "_but_different") - val Seq(batch) = ContractsReassignmentBatch - .partition( - Seq( - (contract1, counter), - (setTemplateId(contract2, newTemplateId), counter), - ) - ): @unchecked - - batch.contractIds.toList shouldBe List(contract1.contractId, contract2.contractId) - batch.stakeholders shouldBe stakeholders - batch.contracts.map(_.templateId).toSet shouldBe Set(templateId, newTemplateId) - batch.contracts.map(_.packageName).toSet shouldBe Set(packageName) - } - - "contracts with different package names" in { - val newPackageName = LfPackageName.assertFromString("z_new_package_name") - val Seq(batch) = ContractsReassignmentBatch - .partition( - Seq( - (contract1, counter), - (setPackageName(contract2, newPackageName), counter), - ) - ): @unchecked - - batch.contractIds.toList shouldBe List(contract1.contractId, contract2.contractId) - batch.contracts.map(_.templateId).toSet shouldBe Set(templateId) - batch.contracts.map(_.packageName).toSet shouldBe Set(packageName, newPackageName) - batch.stakeholders shouldBe stakeholders - } - - "contracts with different stakeholders" in { - val newStakeholders = Stakeholders.tryCreate( - stakeholders = stakeholders.all ++ Set(LfPartyId.assertFromString("extra_party")), - signatories = stakeholders.signatories, - ) - val Seq(batch1, batch2) = ContractsReassignmentBatch - .partition( - Seq( - (contract1, counter), - (setStakeholders(contract2, newStakeholders), counter), - ) - ) - .sortBy(_.stakeholders.all.size): @unchecked - - batch1.contractIds.toList shouldBe List(contract1.contractId) - batch1.stakeholders shouldBe stakeholders - - batch2.contractIds.toList shouldBe List(contract2.contractId) - batch2.stakeholders shouldBe newStakeholders - } - } - - private def setTemplateId( - contract: SerializableContract, - templateId: LfTemplateId, - ): SerializableContract = - ExampleTransactionFactory.asSerializable( - contractId = contract.contractId, - contractInstance = ExampleTransactionFactory.contractInstance(templateId = templateId), - metadata = contract.metadata, - ) - - private def setPackageName( - contract: SerializableContract, - packageName: LfPackageName, - ): SerializableContract = - ExampleTransactionFactory.asSerializable( - contractId = contract.contractId, - contractInstance = ExampleTransactionFactory.contractInstance(packageName = packageName), - metadata = contract.metadata, - ) - - private def setStakeholders( - contract: SerializableContract, - stakeholders: Stakeholders, - ): SerializableContract = - ExampleTransactionFactory.asSerializable( - contractId = contract.contractId, - contractInstance = contract.contractInstance, - metadata = ContractMetadata.tryCreate( - stakeholders.signatories, - stakeholders.all, - contract.metadata.maybeKeyWithMaintainersVersioned, - ), - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/DeduplicationPeriodSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/DeduplicationPeriodSpec.scala deleted file mode 100644 index 9f0847abb6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/DeduplicationPeriodSpec.scala +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.daml.lf.data.Time.Timestamp -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Duration -import scala.util.Success - -class DeduplicationPeriodSpec extends AnyWordSpec with Matchers { - "calculating deduplication until" should { - val time = Timestamp.assertFromLong(100 * 1000 * 1000) - - "return expected result when sending duration" in { - val deduplicateUntil = DeduplicationPeriod.deduplicateUntil( - time, - DeduplicationPeriod.DeduplicationDuration(Duration.ofSeconds(3)), - ) - deduplicateUntil shouldEqual Success(time.add(Duration.ofSeconds(3))) - } - - "accept long durations" in { - noException should be thrownBy DeduplicationPeriod.DeduplicationDuration( - Duration.ofDays(365 * 10000) - ) - } - - "accept zero durations" in { - noException should be thrownBy DeduplicationPeriod.DeduplicationDuration( - Duration.ZERO - ) - } - - "not accept negative durations" in { - an[IllegalArgumentException] should be thrownBy DeduplicationPeriod.DeduplicationDuration( - Duration.ofSeconds(-1) - ) - } - - "accept microsecond durations" in { - noException should be thrownBy DeduplicationPeriod.DeduplicationDuration( - Duration.ofNanos(1000) - ) - } - - "not accept nanosecond durations" in { - an[IllegalArgumentException] should be thrownBy DeduplicationPeriod.DeduplicationDuration( - Duration.ofNanos(1001) - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala deleted file mode 100644 index db2373985a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala +++ /dev/null @@ -1,669 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.syntax.traverse.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.crypto.{CryptoPureApi, HashPurpose} -import com.digitalasset.canton.data.GenTransactionTree.ViewWithWitnessesAndRecipients -import com.digitalasset.canton.data.LightTransactionViewTree.InvalidLightTransactionViewTree -import com.digitalasset.canton.data.MerkleTree.{BlindSubtree, RevealIfNeedBe, RevealSubtree} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.protocol.messages.EncryptedViewMessage -import com.digitalasset.canton.protocol.messages.EncryptedViewMessage.computeRandomnessLength -import com.digitalasset.canton.sequencing.protocol.{MemberRecipient, Recipients, RecipientsTree} -import com.digitalasset.canton.topology.ParticipantId -import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient -import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.{ - BaseTestWordSpec, - FailOnShutdown, - HasExecutionContext, - LfPartyId, - ProtocolVersionChecksAnyWordSpec, -} -import monocle.PIso - -import scala.annotation.nowarn - -@nowarn("msg=match may not be exhaustive") -class GenTransactionTreeTest - extends BaseTestWordSpec - with HasExecutionContext - with ProtocolVersionChecksAnyWordSpec - with FailOnShutdown { - - val factory: ExampleTransactionFactory = new ExampleTransactionFactory()() - - private def generateRandomKeysForSubviewHashes( - subviewHashes: Seq[ViewHash], - pureCrypto: CryptoPureApi, - ): Seq[ViewHashAndKey] = - subviewHashes.map(subviewHash => - ViewHashAndKey( - subviewHash, - pureCrypto.generateSecureRandomness( - EncryptedViewMessage.computeRandomnessLength(pureCrypto) - ), - ) - ) - - private def lightTransactionViewTreeWithRandomKeys( - tvt: FullTransactionViewTree, - pureCrypto: CryptoPureApi, - ): Either[String, LightTransactionViewTree] = - LightTransactionViewTree.fromTransactionViewTree( - tvt, - // we are not interested in the correctness of the subtree keys - generateRandomKeysForSubviewHashes(tvt.subviewHashes, pureCrypto) - .map(_.viewEncryptionKeyRandomness), - testedProtocolVersion, - ) - - private def allLightTransactionViewTreesWithRandomKeys( - allTransactionViewTrees: Seq[FullTransactionViewTree] - ): Seq[LightTransactionViewTree] = { - val pureCrypto = ExampleTransactionFactory.pureCrypto - allTransactionViewTrees - .traverse(lightTransactionViewTreeWithRandomKeys(_, pureCrypto)) - .valueOrFail("fail to create light view tree") - } - - forEvery(factory.standardHappyCases) { example => - s"$example" can { - val transactionTree = example.transactionTree - - "compute the correct sequence of transaction view trees" in { - transactionTree.allTransactionViewTrees shouldEqual example.transactionViewTrees - } - - forEvery(example.transactionViewTrees.zip(example.viewWithSubviews).zipWithIndex) { - case ((expectedTransactionViewTree, (expectedView, _)), index) => - s"blind the transaction tree to the $index-th transaction view tree" in { - transactionTree.transactionViewTree( - expectedTransactionViewTree.viewHash.toRootHash - ) shouldEqual expectedTransactionViewTree - } - - s"yield the correct view for the $index-th transaction view tree" in { - expectedTransactionViewTree.view shouldEqual expectedView - } - - val topLevelExpected = - example.rootTransactionViewTrees.contains(expectedTransactionViewTree) - s"yield that the $index-th transaction view tree has isTopLevel=$topLevelExpected" in { - expectedTransactionViewTree.isTopLevel shouldEqual topLevelExpected - } - } - - val fullInformeeTree = transactionTree.tryFullInformeeTree(testedProtocolVersion) - - val expectedInformeesAndThresholdByView = example.transactionViewTrees.map { viewTree => - val viewCommonData = viewTree.view.viewCommonData.tryUnwrap - viewTree.viewPosition -> viewCommonData.viewConfirmationParameters - }.toMap - - "compute the set of informees" in { - example.fullInformeeTree.allInformees shouldEqual example.allInformees - } - - "compute the full informee tree" in { - fullInformeeTree should equal(example.fullInformeeTree) - - fullInformeeTree.informeesAndThresholdByViewPosition shouldEqual expectedInformeesAndThresholdByView - } - - "be serialized and deserialized" in { - val fullInformeeTree = example.fullInformeeTree - FullInformeeTree.fromByteString(factory.cryptoOps, testedProtocolVersion)( - fullInformeeTree.toByteString - ) shouldEqual Right(fullInformeeTree) - - val randomnessLength = computeRandomnessLength(ExampleTransactionFactory.pureCrypto) - forAll( - allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - ) { lt => - LightTransactionViewTree.fromTrustedByteString( - ((example.cryptoOps, randomnessLength), testedProtocolVersion) - )( - lt.toByteString - ) shouldBe Right(lt) - } - } - - "correctly reconstruct the full transaction view trees from the lightweight ones" in { - val allLightTrees = - allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - val allTrees = example.transactionTree.allTransactionViewTrees - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - allLightTrees - ) shouldBe (allTrees, Seq.empty, Seq.empty) - } - - "correctly reconstruct the top-level transaction view trees from the lightweight ones" in { - val allLightTrees = - allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - val allTrees = example.transactionTree.allTransactionViewTrees.filter(_.isTopLevel) - - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = true)( - allLightTrees - ) shouldBe (allTrees, Seq.empty, Seq.empty) - } - - "correctly reconstruct the top-level transaction view trees from the lightweight ones for each informee" in { - val topology = ExampleTransactionFactory.defaultTopologySnapshot - val pureCrypto = ExampleTransactionFactory.pureCrypto - - val allLightTrees = example.transactionTree - .allTransactionViewTreesWithRecipients(topology) - .valueOrFail("fail set up recipients for transaction view tree") - .futureValueUS - .map { case ViewWithWitnessesAndRecipients(tvt, witnesses, _, _) => - lightTransactionViewTreeWithRandomKeys(tvt, pureCrypto) - .valueOrFail("fail to create light transaction trees") -> witnesses - } - val allTrees = example.transactionTree.allTransactionViewTrees.toList - val allInformees = allLightTrees.map(_._1.informees).fold(Set.empty)(_.union(_)) - - forAll(allInformees) { inf => - val topLevelHashesForInf = allLightTrees - .filter(lts => - lts._2.unwrap.headOption.value.contains(inf) && lts._2.unwrap - .drop(1) - .forall(!_.contains(inf)) - ) - .map(_._1.viewHash) - .toSet - val topLevelForInf = allTrees.filter(t => topLevelHashesForInf.contains(t.viewHash)) - val allLightWeightForInf = - allLightTrees.filter(_._2.flatten.contains(inf)).map(_._1).toList - LightTransactionViewTree - .toFullViewTrees( - PIso.id, - testedProtocolVersion, - factory.cryptoOps, - topLevelOnly = true, - )( - allLightWeightForInf - ) shouldBe (topLevelForInf, Seq.empty, Seq.empty) - } - } - - "correctly report missing subviews" in { - val allLightTrees = - allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - val removedLightTreeO = allLightTrees.find(_.viewPosition.position.sizeIs > 1) - val inputLightTrees = allLightTrees.filterNot(removedLightTreeO.contains) - val badLightTrees = inputLightTrees.filter(tree => - ViewPosition.isDescendant( - removedLightTreeO.fold(ViewPosition.root)(_.viewPosition), - tree.viewPosition, - ) - ) - - val allFullTrees = example.transactionTree.allTransactionViewTrees - val expectedFullTrees = allFullTrees.filter(tree => - !ViewPosition.isDescendant( - removedLightTreeO.fold(ViewPosition.root)(_.viewPosition), - tree.viewPosition, - ) - ) - - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees - ) shouldBe (expectedFullTrees, badLightTrees, Seq.empty) - } - - "correctly process duplicate views" in { - val allLightTrees = allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - val allFullTrees = example.transactionTree.allTransactionViewTrees - - val inputLightTrees1 = allLightTrees.flatMap(tree => Seq(tree, tree)) - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees1 - ) shouldBe (allFullTrees, Seq.empty, allLightTrees) - - val inputLightTrees2 = allLightTrees ++ allLightTrees - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees2 - ) shouldBe (allFullTrees, Seq.empty, allLightTrees) - } - - "correctly process views in an unusual order" in { - val allLightTrees = allLightTransactionViewTreesWithRandomKeys( - example.transactionTree.allTransactionViewTrees - ) - val inputLightTrees = allLightTrees.sortBy(_.viewPosition.position.size) - val allFullTrees = example.transactionTree.allTransactionViewTrees - LightTransactionViewTree - .toFullViewTrees(PIso.id, testedProtocolVersion, factory.cryptoOps, topLevelOnly = false)( - inputLightTrees - ) shouldBe (allFullTrees, Seq.empty, Seq.empty) - } - } - } - - "A transaction tree" when { - - val singleCreateView = - factory.SingleCreate(ExampleTransactionFactory.lfHash(0)).rootViews.headOption.value - - // First check that the normal thing does not throw an exception. - GenTransactionTree.tryCreate(factory.cryptoOps)( - factory.submitterMetadata, - factory.commonMetadata, - factory.participantMetadata, - MerkleSeq.fromSeq(factory.cryptoOps, testedProtocolVersion)(Seq(singleCreateView)), - ) - - "several root views have the same hash" must { - "prevent creation" in { - GenTransactionTree.create(factory.cryptoOps)( - factory.submitterMetadata, - factory.commonMetadata, - factory.participantMetadata, - MerkleSeq.fromSeq(factory.cryptoOps, testedProtocolVersion)( - Seq(singleCreateView, singleCreateView) - ), - ) should matchPattern { - case Left(message: String) - if message.matches( - "A transaction tree must contain a hash at most once\\. " + - "Found the hash .* twice\\." - ) => - } - } - } - - "a view and a subview have the same hash" must { - "prevent creation" in { - val childViewCommonData = - singleCreateView.viewCommonData.tryUnwrap.copy(salt = factory.commonDataSalt(1)) - val childView = singleCreateView.tryCopy(viewCommonData = childViewCommonData) - val subviews = TransactionSubviews(Seq(childView))(testedProtocolVersion, factory.cryptoOps) - val parentView = singleCreateView.tryCopy(subviews = subviews) - - GenTransactionTree.create(factory.cryptoOps)( - factory.submitterMetadata, - factory.commonMetadata, - factory.participantMetadata, - MerkleSeq.fromSeq(factory.cryptoOps, testedProtocolVersion)(Seq(parentView)), - ) should matchPattern { - case Left(message: String) - if message.matches( - "A transaction tree must contain a hash at most once\\. " + - "Found the hash .* twice\\." - ) => - } - } - } - } - - "A transaction view tree" when { - - val example = factory.MultipleRootsAndViewNestings - - val rootViewTree = example.rootTransactionViewTrees(1) - val nonRootViewTree = example.transactionViewTrees(2) - - "everything is ok" must { - "pass sanity tests" in { - assert(rootViewTree.isTopLevel) - assert(!nonRootViewTree.isTopLevel) - } - } - - "fully blinded" must { - "reject creation" in { - val fullyBlindedTree = example.transactionTree.blind { - case _: GenTransactionTree => MerkleTree.RevealIfNeedBe - case _: CommonMetadata => MerkleTree.RevealSubtree - case _: ParticipantMetadata => MerkleTree.RevealSubtree - case _ => MerkleTree.BlindSubtree - }.tryUnwrap - - FullTransactionViewTree.create(fullyBlindedTree) shouldEqual Left( - "A transaction view tree must contain an unblinded view." - ) - } - } - - "fully unblinded" must { - "reject creation" in { - FullTransactionViewTree.create(example.transactionTree).left.value should startWith( - "A transaction view tree must not contain several unblinded views: " - ) - } - } - - "a subview of the represented view is blinded" must { - "reject creation" in { - val onlyView1Unblinded = rootViewTree.tree.blind { - case _: GenTransactionTree => RevealIfNeedBe - case v: TransactionView => - if (v == rootViewTree.view) MerkleTree.RevealIfNeedBe else MerkleTree.BlindSubtree - case _: MerkleTreeLeaf[_] => MerkleTree.RevealSubtree - }.tryUnwrap - - FullTransactionViewTree.create(onlyView1Unblinded).left.value should startWith( - "A transaction view tree must contain a fully unblinded view:" - ) - } - } - - "the submitter metadata is blinded, although view is top level" must { - "reject creation" in { - val submitterMetadataBlinded = rootViewTree.tree.blind { - case _: GenTransactionTree => RevealIfNeedBe - case _: SubmitterMetadata => MerkleTree.BlindSubtree - case _: TransactionView => MerkleTree.RevealSubtree - case _: MerkleTreeLeaf[_] => MerkleTree.RevealSubtree - }.tryUnwrap - - FullTransactionViewTree - .create(submitterMetadataBlinded) shouldEqual Left( - "The submitter metadata must be unblinded if and only if the represented view is top-level. " + - "Submitter metadata: blinded, isTopLevel: true" - ) - } - } - - "the submitter metadata is unblinded, although view is not top level" must { - "reject creation" in { - val submitterMetadata = example.transactionTree.submitterMetadata - - val submitterMetadataUnblinded = - nonRootViewTree.tree.copy(submitterMetadata = submitterMetadata) - - FullTransactionViewTree.create(submitterMetadataUnblinded) shouldEqual Left( - "The submitter metadata must be unblinded if and only if the represented view is top-level. " + - "Submitter metadata: unblinded, isTopLevel: false" - ) - } - } - - "the common metadata is blinded" must { - "reject creation" in { - val commonMetadataBlinded = rootViewTree.tree.blind { - case _: GenTransactionTree => RevealIfNeedBe - case _: CommonMetadata => MerkleTree.BlindSubtree - case _ => MerkleTree.RevealSubtree - }.tryUnwrap - - FullTransactionViewTree.create(commonMetadataBlinded) shouldEqual Left( - "The common metadata of a transaction view tree must be unblinded." - ) - } - } - - "the participant metadata is blinded" must { - "reject creation" in { - val participantMetadataBlinded = rootViewTree.tree.blind { - case _: GenTransactionTree => RevealIfNeedBe - case _: ParticipantMetadata => MerkleTree.BlindSubtree - case _ => MerkleTree.RevealSubtree - }.tryUnwrap - - FullTransactionViewTree.create(participantMetadataBlinded) shouldEqual Left( - "The participant metadata of a transaction view tree must be unblinded." - ) - } - } - } - - // Before v3, the subview hashes do not need to be passed at construction - "A light transaction view tree" when { - val example = factory.ViewInterleavings - - forEvery(example.transactionViewTrees.zipWithIndex) { case (tvt, index) => - val viewWithBlindedSubviews = tvt.view.tryCopy(subviews = tvt.view.subviews.blindFully) - val genTransactionTree = - tvt.tree.mapUnblindedRootViews(_.replace(tvt.viewHash, viewWithBlindedSubviews)) - - val dummyViewHash = ViewHash( - factory.cryptoOps.build(HashPurpose.MerkleTreeInnerNode).add("hummous").finish() - ) - val mangledSubviewHashes = - if (tvt.subviewHashes.isEmpty) Seq(dummyViewHash) - else tvt.subviewHashes.updated(0, dummyViewHash) - - "given consistent subview hashes" must { - s"pass sanity tests at creation (for the $index-th transaction view tree)" in { - val pureCrypto = ExampleTransactionFactory.pureCrypto - noException should be thrownBy LightTransactionViewTree - .tryCreate( - genTransactionTree, - generateRandomKeysForSubviewHashes(tvt.subviewHashes, pureCrypto), - testedProtocolVersion, - ) - } - } - - "given inconsistent subview hashes" must { - s"reject creation (for the $index-th transaction view tree)" in { - val pureCrypto = ExampleTransactionFactory.pureCrypto - an[InvalidLightTransactionViewTree] should be thrownBy LightTransactionViewTree - .tryCreate( - genTransactionTree, - generateRandomKeysForSubviewHashes(mangledSubviewHashes, pureCrypto), - testedProtocolVersion, - ) - - if (tvt.subviewHashes.nonEmpty) - an[InvalidLightTransactionViewTree] should be thrownBy LightTransactionViewTree - .tryCreate(genTransactionTree, Seq.empty, testedProtocolVersion) - } - } - } - } - - "A full informee tree" when { - - val example = factory.MultipleRootsAndViewNestings - - "global metadata is incorrectly blinded" must { - "reject creation" in { - def corruptGlobalMetadataBlinding(informeeTree: GenTransactionTree): GenTransactionTree = - informeeTree.copy( - submitterMetadata = ExampleTransactionFactory.blinded(factory.submitterMetadata), - commonMetadata = ExampleTransactionFactory.blinded(factory.commonMetadata), - participantMetadata = factory.participantMetadata, - ) - - val corruptedGlobalMetadataMessage = Left( - "The submitter metadata of a full informee tree must be unblinded. " + - "The common metadata of an informee tree must be unblinded. " + - "The participant metadata of an informee tree must be blinded." - ) - - val globalMetadataIncorrectlyBlinded1 = - corruptGlobalMetadataBlinding(example.fullInformeeTree.tree) - FullInformeeTree.create( - globalMetadataIncorrectlyBlinded1, - testedProtocolVersion, - ) shouldEqual corruptedGlobalMetadataMessage - - val globalMetadataIncorrectlyBlinded2 = - corruptGlobalMetadataBlinding(example.fullInformeeTree.tree) - FullInformeeTree.create( - globalMetadataIncorrectlyBlinded2, - testedProtocolVersion, - ) shouldEqual corruptedGlobalMetadataMessage - } - } - - "view metadata is incorrectly unblinded" must { - "reject creation" in { - val Seq(_, view1Unblinded) = example.transactionTree.rootViews.unblindedElements - val informeeTree = example.fullInformeeTree.tree - val Seq(_, view1) = informeeTree.rootViews.unblindedElements - - val view1WithParticipantDataUnblinded = - view1.tryCopy(viewParticipantData = view1Unblinded.viewParticipantData) - val rootViews = MerkleSeq.fromSeq(factory.cryptoOps, testedProtocolVersion)( - Seq(view1WithParticipantDataUnblinded) - ) - - val treeWithViewMetadataUnblinded = - informeeTree.copy(rootViews = rootViews) - - val corruptedViewMetadataMessage = "(?s)" + - "The view participant data in an informee tree must be blinded\\. Found .*\\." - - FullInformeeTree - .create(treeWithViewMetadataUnblinded, testedProtocolVersion) - .left - .value should fullyMatch regex corruptedViewMetadataMessage - - FullInformeeTree - .create(treeWithViewMetadataUnblinded, testedProtocolVersion) - .left - .value should fullyMatch regex corruptedViewMetadataMessage - } - } - - "a view is blinded" should { - "reject creation" in { - // Keep metadata of view0 and view1 unblinded, blind every other view - val hashesOfUnblindedViews = Set(example.view0.viewHash, example.view1.viewHash) - - val partiallyBlindedTree = - example.fullInformeeTree.tree.blind { - case _: GenTransactionTree => RevealIfNeedBe - case _: CommonMetadata => RevealSubtree - case _: SubmitterMetadata => RevealSubtree - - case v: TransactionView => - if (hashesOfUnblindedViews.contains(v.viewHash)) - RevealIfNeedBe // Necessary to reveal view0 and view1 - else BlindSubtree // This will blind every other view - case _: ViewCommonData => RevealSubtree // Necessary to reveal view0 and view1 - }.tryUnwrap - - FullInformeeTree - .create(partiallyBlindedTree, testedProtocolVersion) - .left - .value should fullyMatch regex "(?s)All views in a full informee tree must be unblinded\\. Found .*\\." - } - } - - "a view common data is blinded" should { - "reject creation" in { - val fullInformeeTree = example.fullInformeeTree.tree - val rootViews = fullInformeeTree.rootViews.unblindedElements - - val rootViewsWithCommonDataBlinded = - rootViews.map(view => - view.tryCopy(viewCommonData = ExampleTransactionFactory.blinded(view.viewCommonData)) - ) - - val viewCommonDataBlinded = - fullInformeeTree.copy(rootViews = - MerkleSeq.fromSeq(factory.cryptoOps, testedProtocolVersion)( - rootViewsWithCommonDataBlinded - ) - ) - - FullInformeeTree - .create(viewCommonDataBlinded, testedProtocolVersion) - .left - .value should fullyMatch regex "(?s)The view common data in a full informee tree must be unblinded\\. Found .*\\.\n" + - "The view common data in a full informee tree must be unblinded\\. Found .*\\." - } - } - } - - "Witnesses" must { - import GenTransactionTreeTest.* - - "correctly compute recipients from witnesses" in { - def mkWitnesses(setup: NonEmpty[Seq[Set[Int]]]): Witnesses = - Witnesses(setup.map(_.map(informee))) - - // Maps parties to participants; parties have IDs that start at 1, participants have IDs that start at 11 - val topologyMap = Map( - 1 -> Set(11), - 2 -> Set(12), - 3 -> Set(13), - 4 -> Set(14), - 5 -> Set(11, 12, 13, 15), - 6 -> Set(16), - ).map { case (partyId, participantIds) => - party(partyId) -> participantIds - .map(id => participant(id) -> ParticipantPermission.Submission) - .toMap - } - - val topology = mock[PartyTopologySnapshotClient] - when(topology.activeParticipantsOfParties(any[List[LfPartyId]])(anyTraceContext)) - .thenAnswer[Seq[LfPartyId]] { parties => - FutureUnlessShutdown.pure(topologyMap.collect { - case (party, map) if parties.contains(party) => (party, map.keySet) - }) - } - - val witnesses = mkWitnesses( - NonEmpty(Seq, Set(1, 2), Set(1, 3), Set(2, 4), Set(1, 2, 5), Set(6)) - ) - - witnesses - .toRecipients(topology) - .valueOr(err => fail(err.message)) - .futureValueUS shouldBe Recipients( - NonEmpty( - Seq, - RecipientsTree.ofRecipients( - NonEmpty.mk(Set, MemberRecipient(participant(16))), - Seq( - RecipientsTree.ofMembers( - NonEmpty(Set, 11, 12, 13, 15).map(participant), - Seq( - RecipientsTree.ofMembers( - NonEmpty.mk(Set, participant(12), participant(14)), - Seq( - RecipientsTree.ofRecipients( - NonEmpty.mk( - Set, - MemberRecipient(participant(11)), - MemberRecipient(participant(13)), - ), - Seq( - RecipientsTree.leaf(NonEmpty.mk(Set, participant(11), participant(12))) - ), - ) - ), - ) - ), - ) - ), - ), - ) - ) - } - } -} - -object GenTransactionTreeTest { - private[data] def party(i: Int): LfPartyId = LfPartyId.assertFromString(s"party$i::1") - - private[data] def informee(i: Int): LfPartyId = party(i) - - private[data] def participant(i: Int): ParticipantId = ParticipantId(s"participant$i") -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsData.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsData.scala deleted file mode 100644 index f183fe5a85..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsData.scala +++ /dev/null @@ -1,721 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.syntax.functor.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{Salt, TestHash} -import com.digitalasset.canton.data.ActionDescription.{ - CreateActionDescription, - ExerciseActionDescription, - FetchActionDescription, - LookupByKeyActionDescription, -} -import com.digitalasset.canton.data.MerkleTree.VersionedMerkleTree -import com.digitalasset.canton.data.ViewPosition.{MerklePathElement, MerkleSeqIndex} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.sequencing.protocol.{MediatorGroupRecipient, TimeProof} -import com.digitalasset.canton.time.TimeProofTestUtil -import com.digitalasset.canton.topology.{GeneratorsTopology, ParticipantId, PhysicalSynchronizerId} -import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.collection.SeqUtil -import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion} -import com.digitalasset.canton.{GeneratorsLf, LfInterfaceId, LfPackageId, LfPartyId, LfVersioned} -import com.digitalasset.daml.lf.value.Value.ValueInt64 -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.EitherValues.* - -import scala.util.Random - -final class GeneratorsData( - protocolVersion: ProtocolVersion, - generatorsLf: GeneratorsLf, - generatorsProtocol: GeneratorsProtocol, - generatorsTopology: GeneratorsTopology, -) { - import com.digitalasset.canton.Generators.* - import generatorsLf.* - import com.digitalasset.canton.config.GeneratorsConfig.* - import com.digitalasset.canton.crypto.GeneratorsCrypto.* - import com.digitalasset.canton.data.GeneratorsDataTime.* - import com.digitalasset.canton.ledger.api.GeneratorsApi.* - import generatorsTopology.* - import generatorsProtocol.* - import org.scalatest.OptionValues.* - - // If this pattern match is not exhaustive anymore, update the generator below - { - ((_: MerklePathElement) match { - case _: ViewPosition.MerkleSeqIndex => () - case _: ViewPosition.MerkleSeqIndexFromRoot => - () // This one is excluded because it is not made to be serialized - }).discard - } - implicit val merklePathElementArg: Arbitrary[MerklePathElement] = Arbitrary( - Arbitrary.arbitrary[MerkleSeqIndex] - ) - - implicit val viewPositionArb: Arbitrary[ViewPosition] = Arbitrary( - Gen.listOf(merklePathElementArg.arbitrary).map(ViewPosition(_)) - ) - - implicit val commonMetadataArb: Arbitrary[CommonMetadata] = Arbitrary( - for { - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - - mediator <- Arbitrary.arbitrary[MediatorGroupRecipient] - - salt <- Arbitrary.arbitrary[Salt] - uuid <- Gen.uuid - - hashOps = TestHash // Not used for serialization - } yield CommonMetadata - .create(hashOps)( - psid, - mediator, - salt, - uuid, - ) - ) - - implicit val participantMetadataArb: Arbitrary[ParticipantMetadata] = Arbitrary( - for { - ledgerTime <- Arbitrary.arbitrary[CantonTimestamp] - preparationTime <- Arbitrary.arbitrary[CantonTimestamp] - workflowIdO <- Gen.option(workflowIdArb.arbitrary) - salt <- Arbitrary.arbitrary[Salt] - - hashOps = TestHash // Not used for serialization - } yield ParticipantMetadata(hashOps)( - ledgerTime, - preparationTime, - workflowIdO, - salt, - protocolVersion, - ) - ) - - implicit val submitterMetadataArb: Arbitrary[SubmitterMetadata] = Arbitrary( - for { - actAs <- nonEmptySet(lfPartyIdArb).arbitrary - userId <- userIdArb.arbitrary - commandId <- commandIdArb.arbitrary - submittingParticipant <- Arbitrary.arbitrary[ParticipantId] - salt <- Arbitrary.arbitrary[Salt] - submissionId <- Gen.option(ledgerSubmissionIdArb.arbitrary) - dedupPeriod <- Arbitrary.arbitrary[DeduplicationPeriod] - maxSequencingTime <- Arbitrary.arbitrary[CantonTimestamp] - externalAuthorization <- Gen.option(Arbitrary.arbitrary[ExternalAuthorization]) - } yield SubmitterMetadata( - actAs, - userId, - commandId, - submittingParticipant, - salt, - submissionId, - dedupPeriod, - maxSequencingTime, - externalAuthorization, - hashOps = TestHash, // Not used for serialization - protocolVersion, - ) - ) - - implicit val viewConfirmationParametersArb: Arbitrary[ViewConfirmationParameters] = Arbitrary( - for { - informees <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - viewConfirmationParameters <- - Gen - .containerOf[Seq, Quorum](Arbitrary.arbitrary[Quorum](quorumArb(informees.toSeq))) - .map(ViewConfirmationParameters.tryCreate(informees, _)) - } yield viewConfirmationParameters - ) - - def quorumArb(informees: Seq[LfPartyId]): Arbitrary[Quorum] = Arbitrary( - for { - confirmersWeights <- Gen - .containerOfN[Seq, PositiveInt](informees.size, Arbitrary.arbitrary[PositiveInt]) - - random = new Random() - shuffledInformees = SeqUtil.randomSubsetShuffle( - informees.toIndexedSeq, - informees.size, - random, - ) - - confirmers = shuffledInformees.zip(confirmersWeights).toMap - threshold <- Arbitrary.arbitrary[NonNegativeInt] - } yield Quorum(confirmers, threshold) - ) - - implicit val viewCommonDataArb: Arbitrary[ViewCommonData] = Arbitrary( - for { - viewConfirmationParameters <- Arbitrary.arbitrary[ViewConfirmationParameters] - salt <- Arbitrary.arbitrary[Salt] - hashOps = TestHash // Not used for serialization - } yield ViewCommonData.tryCreate(hashOps)( - viewConfirmationParameters, - salt, - protocolVersion, - ) - ) - - private def createActionDescriptionGenFor( - rpv: RepresentativeProtocolVersion[ActionDescription.type] - ): Gen[CreateActionDescription] = - for { - contractId <- Arbitrary.arbitrary[LfContractId] - seed <- Arbitrary.arbitrary[LfHash] - } yield CreateActionDescription(contractId, seed)(rpv) - - private def exerciseActionDescriptionGenFor( - rpv: RepresentativeProtocolVersion[ActionDescription.type] - ): Gen[ExerciseActionDescription] = - for { - inputContractId <- Arbitrary.arbitrary[LfContractId] - - templateId <- Arbitrary.arbitrary[LfTemplateId] - - choice <- Arbitrary.arbitrary[LfChoiceName] - - interfaceId <- Gen.option(Arbitrary.arbitrary[LfInterfaceId]) - - packagePreference <- Gen.containerOf[Set, LfPackageId](Arbitrary.arbitrary[LfPackageId]) - - // We consider only this specific value because the goal is not exhaustive testing of LF (de)serialization - chosenValue <- Gen.long.map(ValueInt64.apply) - version <- Arbitrary.arbitrary[LfLanguageVersion] - - actors <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - seed <- Arbitrary.arbitrary[LfHash] - byKey <- Gen.oneOf(true, false) - failed <- Gen.oneOf(true, false) - - } yield ExerciseActionDescription.tryCreate( - inputContractId, - templateId, - choice, - interfaceId, - packagePreference, - LfVersioned(version, chosenValue), - actors, - byKey, - seed, - failed, - rpv, - ) - - private def fetchActionDescriptionGenFor( - rpv: RepresentativeProtocolVersion[ActionDescription.type] - ): Gen[FetchActionDescription] = - for { - inputContractId <- Arbitrary.arbitrary[LfContractId] - actors <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - byKey <- Gen.oneOf(true, false) - templateId <- Arbitrary.arbitrary[LfTemplateId] - interfaceId <- Gen.option(Arbitrary.arbitrary[LfInterfaceId]) - } yield FetchActionDescription(inputContractId, actors, byKey, templateId, interfaceId)(rpv) - - private def lookupByKeyActionDescriptionGenFor( - rpv: RepresentativeProtocolVersion[ActionDescription.type] - ): Gen[LookupByKeyActionDescription] = - for { - key <- Arbitrary.arbitrary[LfVersioned[LfGlobalKey]] - } yield LookupByKeyActionDescription.tryCreate(key, rpv) - - // If this pattern match is not exhaustive anymore, update the method below - { - ((_: ActionDescription) match { - case _: CreateActionDescription => () - case _: ExerciseActionDescription => () - case _: FetchActionDescription => () - case _: LookupByKeyActionDescription => () - }).discard - } - - implicit val actionDescriptionArb: Arbitrary[ActionDescription] = Arbitrary { - val rpv = ActionDescription.protocolVersionRepresentativeFor(protocolVersion) - - Gen.oneOf( - createActionDescriptionGenFor(rpv), - exerciseActionDescriptionGenFor(rpv), - fetchActionDescriptionGenFor(rpv), - lookupByKeyActionDescriptionGenFor(rpv), - ) - } - - private implicit val freeKeyArb: Arbitrary[FreeKey] = Arbitrary(for { - maintainers <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - } yield FreeKey(maintainers)) - - implicit val viewParticipantDataArb: Arbitrary[ViewParticipantData] = Arbitrary( - for { - actionDescription <- actionDescriptionArb.arbitrary - - coreInputs <- actionDescription match { - case ex: ExerciseActionDescription => - for { - c <- Gen - .zip( - generatorsProtocol - .serializableContractArb(canHaveEmptyKey = false) - .arbitrary - .map(_.copy(contractId = ex.inputContractId)), - Gen.oneOf(true, false), - ) - .map(InputContract.apply tupled) - - others <- Gen - .listOf( - Gen.zip( - generatorsProtocol.serializableContractArb(canHaveEmptyKey = false).arbitrary, - Gen.oneOf(true, false), - ) - ) - .map(_.map(InputContract.apply tupled)) - } yield (c +: others).groupBy(_.contractId).flatMap { case (_, contracts) => - contracts.headOption - } - - case fetch: FetchActionDescription => - generatorsProtocol - .serializableContractArb(canHaveEmptyKey = false) - .arbitrary - .map(c => - List(InputContract(c.copy(contractId = fetch.inputContractId), consumed = false)) - ) - case _: CreateActionDescription | _: LookupByKeyActionDescription => Gen.const(List.empty) - } - - createdCore <- actionDescription match { - case created: CreateActionDescription => - Gen - .zip( - generatorsProtocol - .serializableContractArb(canHaveEmptyKey = false) - .arbitrary, - Gen.oneOf(true, false), - ) - .map { case (c, rolledBack) => - List( - CreatedContract.tryCreate( - c.copy(contractId = created.contractId), - consumedInCore = false, - rolledBack = rolledBack, - ) - ) - } - - case _: ExerciseActionDescription => - Gen - .listOf( - Gen.zip( - generatorsProtocol.serializableContractArb(canHaveEmptyKey = false).arbitrary, - Gen.oneOf(true, false), - Gen.oneOf(true, false), - ) - ) - .map(_.map(CreatedContract.tryCreate tupled)) - // Deduplicating on contract id - .map( - _.groupBy(_.contract.contractId).flatMap { case (_, contracts) => - contracts.headOption - } - ) - - case _: LookupByKeyActionDescription | _: FetchActionDescription => Gen.const(List.empty) - } - - notTransient = (createdCore.map(_.contract.contractId) ++ coreInputs.map(_.contractId)).toSet - - createdInSubviewArchivedInCore <- Gen - .containerOf[Set, LfContractId]( - Arbitrary.arbitrary[LfContractId] - ) - // createdInSubviewArchivedInCore and notTransient should be disjoint - .map(_ -- notTransient) - - /* - Resolved keys - AssignedKey must correspond to a contract in core input - */ - coreInputWithResolvedKeys <- Gen.someOf(coreInputs) - assignedResolvedKeys <- Gen.sequence[List[ - (LfGlobalKey, LfVersioned[SerializableKeyResolution]) - ], (LfGlobalKey, LfVersioned[SerializableKeyResolution])](coreInputWithResolvedKeys.map { - contract => - // Unsafe .value is fine because we force the key to be defined with the generator above - val key = contract.contract.metadata.maybeKeyWithMaintainersVersioned.value - Gen - .zip(key, AssignedKey(contract.contractId)) - .map { case (LfVersioned(v, k), r) => (k.globalKey, LfVersioned(v, r)) } - }) - freeResolvedKeys <- actionDescription match { - case _: CreateActionDescription | _: FetchActionDescription => Gen.const(List.empty) - - case _: ExerciseActionDescription => - Gen.listOf( - Gen - .zip(Arbitrary.arbitrary[LfGlobalKey], Arbitrary.arbitrary[LfVersioned[FreeKey]]) - ) - - case LookupByKeyActionDescription(key) => - Arbitrary.arbitrary[LfVersioned[FreeKey]].map(res => List(key.unversioned -> res)) - } - - resolvedKeys = assignedResolvedKeys ++ freeResolvedKeys - rollbackContext <- Arbitrary.arbitrary[RollbackContext] - salt <- Arbitrary.arbitrary[Salt] - - hashOps = TestHash // Not used for serialization - } yield ViewParticipantData.tryCreate(hashOps)( - coreInputs.map(contract => (contract.contractId, contract)).toMap, - createdCore.toSeq, - createdInSubviewArchivedInCore, - resolvedKeys.toMap, - actionDescription, - rollbackContext, - salt, - protocolVersion, - ) - ) - - // If this pattern match is not exhaustive anymore, update the generator below - { - ((_: ViewType) match { - case ViewType.TransactionViewType => () - case _: ViewType.ReassignmentViewType => () - case _: ViewTypeTest => () // Only for tests, so we don't use it in the generator - }).discard - } - implicit val viewTypeArb: Arbitrary[ViewType] = Arbitrary( - Gen.oneOf[ViewType]( - ViewType.TransactionViewType, - ViewType.AssignmentViewType, - ViewType.UnassignmentViewType, - ) - ) - - private val transactionViewWithEmptyTransactionSubviewArb: Arbitrary[TransactionView] = Arbitrary( - for { - viewCommonData <- viewCommonDataArb.arbitrary - viewParticipantData <- viewParticipantDataArb.arbitrary - hashOps = TestHash - emptySubviews = TransactionSubviews.empty( - protocolVersion, - hashOps, - ) // empty TransactionSubviews - } yield TransactionView.tryCreate(hashOps)( - viewCommonData = viewCommonData, - viewParticipantData = - viewParticipantData.blindFully, // The view participant data in an informee tree must be blinded - subviews = emptySubviews, - protocolVersion, - ) - ) - - implicit val transactionViewArb: Arbitrary[TransactionView] = Arbitrary( - for { - viewCommonData <- viewCommonDataArb.arbitrary - viewParticipantData <- viewParticipantDataArb.arbitrary - hashOps = TestHash - transactionViewWithEmptySubview <- - transactionViewWithEmptyTransactionSubviewArb.arbitrary - subviews = TransactionSubviews - .apply(Seq(transactionViewWithEmptySubview))(protocolVersion, hashOps) - } yield TransactionView.tryCreate(hashOps)( - viewCommonData = viewCommonData, - viewParticipantData = viewParticipantData, - subviews = subviews, - protocolVersion, - ) - ) - - private val transactionViewForInformeeTreeArb: Arbitrary[TransactionView] = Arbitrary( - for { - viewCommonData <- viewCommonDataArb.arbitrary - viewParticipantData <- viewParticipantDataArb.arbitrary - hashOps = TestHash - transactionViewWithEmptySubview <- - transactionViewWithEmptyTransactionSubviewArb.arbitrary - subviews = TransactionSubviews - .apply(Seq(transactionViewWithEmptySubview))(protocolVersion, hashOps) - } yield TransactionView.tryCreate(hashOps)( - viewCommonData = viewCommonData, - viewParticipantData = - viewParticipantData.blindFully, // The view participant data in an informee tree must be blinded - subviews = subviews, - protocolVersion, - ) - ) - - implicit val fullInformeeTreeArb: Arbitrary[FullInformeeTree] = Arbitrary( - for { - submitterMetadata <- submitterMetadataArb.arbitrary - commonData <- commonMetadataArb.arbitrary - participantData <- participantMetadataArb.arbitrary - rootViews <- transactionViewForInformeeTreeArb.arbitrary - hashOps = TestHash - rootViewsMerkleSeq = MerkleSeq.fromSeq(hashOps, protocolVersion)(Seq(rootViews)) - genTransactionTree = GenTransactionTree - .tryCreate(hashOps)( - submitterMetadata, - commonData, - participantData.blindFully, // The view participant data in an informee tree must be blinded - rootViews = rootViewsMerkleSeq, - ) - } yield FullInformeeTree.tryCreate(tree = genTransactionTree, protocolVersion) - ) - - // here we want to test the (de)serialization of the MerkleSeq and we use SubmitterMetadata as the VersionedMerkleTree. - // other VersionedMerkleTree types are tested in their respective tests - implicit val merkleSeqArb: Arbitrary[MerkleSeq[VersionedMerkleTree[?]]] = - Arbitrary( - for { - submitterMetadataSeq <- Gen.listOf(submitterMetadataArb.arbitrary) - } yield MerkleSeq.fromSeq(TestHash, protocolVersion)(submitterMetadataSeq) - ) - - private val sourceProtocolVersion = Source(protocolVersion) - private val targetProtocolVersion = Target(protocolVersion) - - implicit val reassignmentIdArb: Arbitrary[ReassignmentId] = Arbitrary { - val hexChars: Seq[Char] = "0123456789abcdefABCDEF".toIndexedSeq - Gen.stringOfN(32, Gen.oneOf(hexChars)).map(ReassignmentId.tryCreate) - } - - implicit val reassignmentSubmitterMetadataArb: Arbitrary[ReassignmentSubmitterMetadata] = - Arbitrary( - for { - submitter <- Arbitrary.arbitrary[LfPartyId] - userId <- userIdArb.arbitrary.map(_.unwrap) - submittingParticipant <- Arbitrary.arbitrary[ParticipantId] - commandId <- commandIdArb.arbitrary.map(_.unwrap) - submissionId <- Gen.option(ledgerSubmissionIdArb.arbitrary) - workflowId <- Gen.option(workflowIdArb.arbitrary.map(_.unwrap)) - - } yield ReassignmentSubmitterMetadata( - submitter, - submittingParticipant, - commandId, - submissionId, - userId, - workflowId, - ) - ) - - implicit val assignmentCommonDataArb: Arbitrary[AssignmentCommonData] = Arbitrary( - for { - salt <- Arbitrary.arbitrary[Salt] - sourcePSId <- Arbitrary.arbitrary[Source[PhysicalSynchronizerId]] - targetPSId <- Arbitrary.arbitrary[Target[PhysicalSynchronizerId]] - - targetMediator <- Arbitrary.arbitrary[MediatorGroupRecipient] - - stakeholders <- Arbitrary.arbitrary[Stakeholders] - - uuid <- Gen.uuid - - submitterMetadata <- Arbitrary.arbitrary[ReassignmentSubmitterMetadata] - reassigningParticipants <- Arbitrary.arbitrary[Set[ParticipantId]] - - hashOps = TestHash // Not used for serialization - - } yield AssignmentCommonData - .create(hashOps)( - salt, - sourcePSId, - targetPSId, - targetMediator, - stakeholders, - uuid, - submitterMetadata, - reassigningParticipants, - ) - ) - - implicit val unassignmentCommonData: Arbitrary[UnassignmentCommonData] = Arbitrary( - for { - salt <- Arbitrary.arbitrary[Salt] - sourceSynchronizerId <- Arbitrary.arbitrary[Source[PhysicalSynchronizerId]] - - sourceMediator <- Arbitrary.arbitrary[MediatorGroupRecipient] - - stakeholders <- Arbitrary.arbitrary[Stakeholders] - reassigningParticipants <- Arbitrary.arbitrary[Set[ParticipantId]] - - uuid <- Gen.uuid - - submitterMetadata <- Arbitrary.arbitrary[ReassignmentSubmitterMetadata] - - hashOps = TestHash // Not used for serialization - - } yield UnassignmentCommonData - .create(hashOps)( - salt, - sourceSynchronizerId, - sourceMediator, - stakeholders, - reassigningParticipants, - uuid, - submitterMetadata, - sourceProtocolVersion, - ) - ) - - implicit val assignmentViewArb: Arbitrary[AssignmentView] = Arbitrary( - for { - salt <- Arbitrary.arbitrary[Salt] - contracts <- Arbitrary.arbitrary[ContractsReassignmentBatch] - reassignmentId <- Arbitrary.arbitrary[ReassignmentId] - hashOps = TestHash // Not used for serialization - - } yield AssignmentView - .create(hashOps)( - salt, - reassignmentId, - contracts, - targetProtocolVersion, - ) - .value - ) - - private def timeProofArb(protocolVersion: ProtocolVersion): Arbitrary[TimeProof] = Arbitrary( - for { - timestamp <- Arbitrary.arbitrary[CantonTimestamp] - previousEventTimestamp <- Arbitrary.arbitrary[Option[CantonTimestamp]] - counter <- nonNegativeLongArb.arbitrary.map(_.unwrap) - targetSynchronizerId <- Arbitrary.arbitrary[Target[PhysicalSynchronizerId]] - } yield TimeProofTestUtil.mkTimeProof( - timestamp, - previousEventTimestamp, - counter, - targetSynchronizerId, - protocolVersion, - ) - ) - - implicit val unassignmentViewArb: Arbitrary[UnassignmentView] = Arbitrary( - for { - salt <- Arbitrary.arbitrary[Salt] - - contracts <- Arbitrary.arbitrary[ContractsReassignmentBatch] - - targetSynchronizerId <- Arbitrary - .arbitrary[Target[PhysicalSynchronizerId]] - .map(_.map(_.copy(protocolVersion = protocolVersion))) - timeProof <- timeProofArb(protocolVersion).arbitrary - - hashOps = TestHash // Not used for serialization - - } yield UnassignmentView - .create(hashOps)( - salt, - contracts, - targetSynchronizerId, - timeProof, - sourceProtocolVersion, - ) - ) - - implicit val assignViewTreeArb: Arbitrary[AssignmentViewTree] = Arbitrary( - for { - commonData <- assignmentCommonDataArb.arbitrary - assignmentView <- assignmentViewArb.arbitrary - hash = TestHash - } yield AssignmentViewTree( - commonData, - assignmentView.blindFully, - Target(protocolVersion), - hash, - ) - ) - - implicit val unassignmentViewTreeArb: Arbitrary[UnassignmentViewTree] = Arbitrary( - for { - commonData <- unassignmentCommonData.arbitrary - unassignmentView <- unassignmentViewArb.arbitrary - hash = TestHash - } yield UnassignmentViewTree( - commonData, - unassignmentView.blindFully, - Source(protocolVersion), - hash, - ) - ) - - private val fullyBlindedTransactionViewWithEmptyTransactionSubviewArb - : Arbitrary[TransactionView] = Arbitrary( - for { - viewCommonData <- viewCommonDataArb.arbitrary - viewParticipantData <- viewParticipantDataArb.arbitrary - hashOps = TestHash - emptySubviews = TransactionSubviews.empty( - protocolVersion, - hashOps, - ) // empty TransactionSubviews - } yield TransactionView.tryCreate(hashOps)( - viewCommonData = viewCommonData.blindFully, - viewParticipantData = viewParticipantData.blindFully, - subviews = emptySubviews.blindFully, - protocolVersion, - ) - ) - - private var unblindedSubviewHashesForLightTransactionTree: Seq[ViewHashAndKey] = _ - - private val transactionViewForLightTransactionTreeArb: Arbitrary[TransactionView] = Arbitrary( - for { - viewCommonData <- viewCommonDataArb.arbitrary - viewParticipantData <- viewParticipantDataArb.arbitrary - hashOps = TestHash - transactionViewWithEmptySubview <- - fullyBlindedTransactionViewWithEmptyTransactionSubviewArb.arbitrary - subviews = TransactionSubviews - .apply(Seq(transactionViewWithEmptySubview))(protocolVersion, hashOps) - subviewHashes = subviews.trySubviewHashes - pureCrypto = ExampleTransactionFactory.pureCrypto - subviewHashesAndKeys = subviewHashes.map { hash => - ViewHashAndKey( - hash, - pureCrypto.generateSecureRandomness(pureCrypto.defaultSymmetricKeyScheme.keySizeInBytes), - ) - } - } yield { - unblindedSubviewHashesForLightTransactionTree = subviewHashesAndKeys - TransactionView.tryCreate(hashOps)( - viewCommonData = viewCommonData, - viewParticipantData = viewParticipantData, - subviews = - subviews.blindFully, // only a single view in a LightTransactionTree can be unblinded - protocolVersion, - ) - } - ) - - implicit val lightTransactionViewTreeArb: Arbitrary[LightTransactionViewTree] = Arbitrary( - for { - submitterMetadata <- submitterMetadataArb.arbitrary - commonData <- commonMetadataArb.arbitrary - participantData <- participantMetadataArb.arbitrary - rootViews <- transactionViewForLightTransactionTreeArb.arbitrary - hashOps = TestHash - rootViewsMerkleSeq = MerkleSeq.fromSeq(hashOps, protocolVersion)(Seq(rootViews)) - genTransactionTree = GenTransactionTree - .tryCreate(hashOps)( - submitterMetadata, - commonData, - participantData, - rootViews = rootViewsMerkleSeq, - ) - } yield LightTransactionViewTree.tryCreate( - tree = genTransactionTree, - unblindedSubviewHashesForLightTransactionTree, - protocolVersion, - ) - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsDataTime.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsDataTime.scala deleted file mode 100644 index 5f0c22394c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsDataTime.scala +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import org.scalacheck.{Arbitrary, Gen} - -import java.time.Duration - -object GeneratorsDataTime { - private val tenYears: Duration = Duration.ofDays(365 * 10) - - implicit val cantonTimestampArb: Arbitrary[CantonTimestamp] = Arbitrary( - Gen.choose(0, tenYears.getSeconds * 1000 * 1000).map(CantonTimestamp.ofEpochMicro) - ) - implicit val cantonTimestampSecondArb: Arbitrary[CantonTimestampSecond] = Arbitrary( - Gen.choose(0, tenYears.getSeconds).map(CantonTimestampSecond.ofEpochSecond) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala deleted file mode 100644 index 5764c18ab2..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} -import com.digitalasset.canton.protocol.messages.SetTrafficPurchasedMessage -import com.digitalasset.canton.sequencing.protocol.{ - GetTrafficStateForMemberRequest, - GetTrafficStateForMemberResponse, - TrafficState, -} -import com.digitalasset.canton.topology.{GeneratorsTopology, Member, PhysicalSynchronizerId} -import com.digitalasset.canton.version.ProtocolVersion -import org.scalacheck.Arbitrary - -final class GeneratorsTrafficData( - protocolVersion: ProtocolVersion, - generatorsTopology: GeneratorsTopology, -) { - import com.digitalasset.canton.config.GeneratorsConfig.* - import generatorsTopology.* - import GeneratorsDataTime.* - - implicit val setTrafficPurchasedArb: Arbitrary[SetTrafficPurchasedMessage] = Arbitrary( - for { - member <- Arbitrary.arbitrary[Member] - serial <- Arbitrary.arbitrary[PositiveInt] - trafficPurchased <- Arbitrary.arbitrary[NonNegativeLong] - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - } yield SetTrafficPurchasedMessage.apply( - member, - serial, - trafficPurchased, - psid, - ) - ) - - implicit val getTrafficStateForMemberRequestArb: Arbitrary[GetTrafficStateForMemberRequest] = - Arbitrary( - for { - member <- Arbitrary.arbitrary[Member] - timestamp <- Arbitrary.arbitrary[CantonTimestamp] - } yield GetTrafficStateForMemberRequest.apply( - member, - timestamp, - protocolVersion, - ) - ) - - implicit val trafficStateArb: Arbitrary[TrafficState] = Arbitrary( - for { - extraTrafficLimit <- Arbitrary.arbitrary[NonNegativeLong] - extraTrafficConsumed <- Arbitrary.arbitrary[NonNegativeLong] - baseTrafficRemainder <- Arbitrary.arbitrary[NonNegativeLong] - lastConsumedCost <- Arbitrary.arbitrary[NonNegativeLong] - timestamp <- Arbitrary.arbitrary[CantonTimestamp] - serial <- Arbitrary.arbitrary[Option[PositiveInt]] - } yield TrafficState( - extraTrafficLimit, - extraTrafficConsumed, - baseTrafficRemainder, - lastConsumedCost, - timestamp, - serial, - ) - ) - - implicit val getTrafficStateForMemberResponseArb: Arbitrary[GetTrafficStateForMemberResponse] = - Arbitrary( - for { - trafficState <- Arbitrary.arbOption[TrafficState].arbitrary - } yield GetTrafficStateForMemberResponse.apply( - trafficState, - protocolVersion, - ) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala deleted file mode 100644 index 26cabf9c7c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.HashOps -import com.digitalasset.canton.data.MerkleSeq.{Branch, MerkleSeqElement, Singleton} -import com.digitalasset.canton.data.MerkleTree.{ - BlindSubtree, - BlindingCommand, - RevealIfNeedBe, - RevealSubtree, -} -import com.digitalasset.canton.data.MerkleTreeTest.{AbstractLeaf, Leaf1} -import com.digitalasset.canton.data.ViewPosition.MerklePathElement -import com.digitalasset.canton.protocol.RootHash -import com.google.protobuf.ByteString -import org.scalatest.prop.TableFor4 -import org.scalatest.wordspec.AnyWordSpec - -class MerkleSeqTest extends AnyWordSpec with BaseTest { - - import com.digitalasset.canton.protocol.ExampleTransactionFactory.* - - private val hashOps: HashOps = MerkleTreeTest.hashOps - - private def leaf(index: Int): Leaf1 = - Leaf1(index)(AbstractLeaf.protocolVersionRepresentativeFor(testedProtocolVersion)) - - private def singleton(index: Int): Singleton[Leaf1] = - Singleton(leaf(index), testedProtocolVersion)(hashOps) - - private def branch( - first: MerkleTree[MerkleSeqElement[Leaf1]], - second: MerkleTree[MerkleSeqElement[Leaf1]], - ): Branch[Leaf1] = - Branch(first, second, testedProtocolVersion)(hashOps) - - private val Empty: MerkleSeq[Nothing] = MerkleSeq(None, testedProtocolVersion)(hashOps) - - private val OneUnblindedElement: MerkleSeq[Leaf1] = - MerkleSeq(Some(singleton(0)), testedProtocolVersion)(hashOps) - - private val OneBlindedElement: MerkleSeq[Leaf1] = - MerkleSeq( - Some( - Singleton(blinded(leaf(0)), testedProtocolVersion)(hashOps) - ), - testedProtocolVersion, - )(hashOps) - - private val OneElementFullyBlinded: MerkleSeq[Leaf1] = - MerkleSeq(Some(blinded(singleton(0))), testedProtocolVersion)(hashOps) - - private val TwoUnblindedElements: MerkleSeq[Leaf1] = - MerkleSeq(Some(branch(singleton(0), singleton(1))), testedProtocolVersion)(hashOps) - - private val TwoBlindedElements: MerkleSeq[Leaf1] = - MerkleSeq(Some(branch(blinded(singleton(0)), blinded(singleton(1)))), testedProtocolVersion)( - hashOps - ) - - private val OneBlindedOneUnblinded: MerkleSeq[Leaf1] = - MerkleSeq(Some(branch(blinded(singleton(0)), singleton(1))), testedProtocolVersion)(hashOps) - - private val TwoElementsRootHash: RootHash = - TwoUnblindedElements.rootOrEmpty - .getOrElse(throw new IllegalStateException("Missing root element")) - .rootHash - - private val TwoElementsFullyBlinded: MerkleSeq[Leaf1] = - MerkleSeq(Some(BlindedNode(TwoElementsRootHash)), testedProtocolVersion)(hashOps) - - private val SevenElementsLeft: Branch[Leaf1] = - branch(branch(singleton(0), singleton(1)), branch(singleton(2), singleton(3))) - private val SevenElementsRight: Branch[Leaf1] = - branch(branch(singleton(4), singleton(5)), singleton(6)) - private val SevenElements: MerkleSeq[Leaf1] = - MerkleSeq(Some(branch(SevenElementsLeft, SevenElementsRight)), testedProtocolVersion)(hashOps) - private val SevenElementsRootUnblinded: MerkleSeq[Leaf1] = - MerkleSeq( - Some(branch(blinded(SevenElementsLeft), blinded(SevenElementsRight))), - testedProtocolVersion, - )(hashOps) - - private val testCases - : TableFor4[String, Seq[MerkleTree[Leaf1]], MerkleSeq[Leaf1], MerkleSeq[Leaf1]] = - Table[String, Seq[MerkleTree[Leaf1]], MerkleSeq[Leaf1], MerkleSeq[Leaf1]]( - ("name", "elements", "Merkle seq", "Merkle seq with root unblinded"), - ("no elements", Seq.empty, Empty, Empty), - ("one unblinded element", Seq(leaf(0)), OneUnblindedElement, OneBlindedElement), - ( - "one blinded element", - Seq(blinded(leaf(0))), - OneElementFullyBlinded, - OneElementFullyBlinded, - ), - ("two unblinded elements", Seq(leaf(0), leaf(1)), TwoUnblindedElements, TwoBlindedElements), - ( - "one blinded and one unblinded element", - Seq(blinded(leaf(0)), leaf(1)), - OneBlindedOneUnblinded, - TwoBlindedElements, - ), - ( - "two blinded elements", - Seq(blinded(leaf(0)), blinded(leaf(1))), - TwoElementsFullyBlinded, - TwoElementsFullyBlinded, - ), - ("seven elements", (0 until 7).map(leaf), SevenElements, SevenElementsRootUnblinded), - ) - - testCases.forEvery { (name, elements, merkleSeq, merkleSeqWithRootUnblinded) => - s"A MerkleSeq with $name" can { - "be constructed" in { - MerkleSeq.fromSeq(hashOps, testedProtocolVersion)(elements) shouldEqual merkleSeq - } - - "be serialized" in { - val merkleSeqP = merkleSeq.toByteString - val merkleSeqDeserialized = - MerkleSeq - .fromByteString( - ( - hashOps, - (bytes: ByteString) => AbstractLeaf.fromByteString(testedProtocolVersion, bytes), - ), - testedProtocolVersion, - )(merkleSeqP) - .value - - merkleSeqDeserialized shouldEqual merkleSeq - } - - val rootHash = merkleSeq.rootOrEmpty.map(_.rootHash) - - "blind the root" in { - val policy = rootHash.toList.map(_ -> BlindSubtree).toMap - val expectedBlindedSeq = - MerkleSeq(rootHash.map(BlindedNode(_)), testedProtocolVersion)(hashOps) - - merkleSeq.doBlind(policy) shouldEqual expectedBlindedSeq - } - - "blind all except the root" in { - val policy: PartialFunction[RootHash, BlindingCommand] = { - case hash if rootHash.contains(hash) => RevealIfNeedBe - case _ => BlindSubtree - } - - merkleSeq.doBlind(policy) shouldEqual merkleSeqWithRootUnblinded - } - - "blind nothing" in { - val policy = rootHash.toList.map(_ -> RevealSubtree).toMap - - merkleSeq.doBlind(policy) shouldEqual merkleSeq - } - - "compute the right indices" in { - val merkleSeq = MerkleSeq.fromSeq(hashOps, testedProtocolVersion)(elements) - val indices: Seq[MerklePathElement] = MerkleSeq.indicesFromSeq(elements.size) - - assert(indices.sizeIs == elements.size) - assert(indices.distinct == indices, "indices are distinct") - val encodedIndices = indices.map(_.encodeDeterministically) - assert(encodedIndices.distinct == encodedIndices, "encoded indices are distinct") - - val indexToElement = indices.zip(elements).toMap - - merkleSeq.unblindedElementsWithIndex.foreach { case (unblinded, index) => - val assigned = indexToElement(index) - assert(assigned == unblinded, s"at index $index") - } - } - - "remain unchanged when mapped over with the identity function" in { - merkleSeq.mapM(identity) shouldBe merkleSeq - } - } - } - - "Mapping changes the tree as expected" in { - val inc: Leaf1 => Leaf1 = { case Leaf1(i) => - Leaf1(i + 1)(AbstractLeaf.protocolVersionRepresentativeFor(testedProtocolVersion)) - } - OneBlindedElement.mapM(inc) shouldBe OneBlindedElement - OneElementFullyBlinded.mapM(inc) shouldBe OneElementFullyBlinded - OneUnblindedElement.mapM(inc) shouldBe MerkleSeq(Some(singleton(1)), testedProtocolVersion)( - hashOps - ) - TwoUnblindedElements - .mapM(inc) shouldBe MerkleSeq( - Some(branch(singleton(1), singleton(2))), - testedProtocolVersion, - )(hashOps) - SevenElements.mapM(inc.compose(inc)) shouldBe SevenElements.mapM(inc).mapM(inc) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleTreeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleTreeTest.scala deleted file mode 100644 index c2a92da8bc..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/MerkleTreeTest.scala +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.syntax.either.* -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.MerkleTree.{ - BlindSubtree, - RevealIfNeedBe, - RevealSubtree, - VersionedMerkleTree, -} -import com.digitalasset.canton.data.MerkleTreeTest.* -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.protocol.RootHash -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.serialization.{ - DefaultDeserializationError, - DeserializationError, - DeterministicEncoding, - HasCryptographicEvidence, -} -import com.digitalasset.canton.version.{ - HasProtocolVersionedWrapper, - ProtoVersion, - ProtocolVersion, - RepresentativeProtocolVersion, - VersionedProtoCodec, - VersioningCompanion, -} -import com.digitalasset.canton.{BaseTest, ProtoDeserializationError} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -@SuppressWarnings(Array("org.wartremover.warts.Any")) -class MerkleTreeTest extends AnyWordSpec with BaseTest { - - private val fullyBlindedTreeHash: RootHash = RootHash(TestHash.digest("test")) - private val fullyBlindedTree: BlindedNode[Nothing] = BlindedNode(fullyBlindedTreeHash) - - private val singletonLeaf1: Leaf1 = - Leaf1(1)(AbstractLeaf.protocolVersionRepresentativeFor(testedProtocolVersion)) - private val singletonLeaf2: Leaf2 = - Leaf2(2)(AbstractLeaf.protocolVersionRepresentativeFor(testedProtocolVersion)) - private val singletonLeaf3: Leaf3 = - Leaf3(3)(AbstractLeaf.protocolVersionRepresentativeFor(testedProtocolVersion)) - - private def singletonLeafHash(index: Int): RootHash = RootHash { - val salt = TestSalt.generateSalt(index) - val data = DeterministicEncoding.encodeInt(index) - val hashBuilder = TestHash.build - hashBuilder - .add(salt.forHashing) - .add(data) - .finish() - } - - private val singletonInnerNode: InnerNode1 = InnerNode1() - private val singletonInnerNodeHash: RootHash = RootHash { - val hashBuilder = TestHash.build(HashPurpose.MerkleTreeInnerNode) - hashBuilder.add(0).finish() - } - - private val innerNodeWithSingleChild: InnerNode1 = InnerNode1(singletonLeaf1) - private val innerNodeWithSingleChildHash: RootHash = RootHash { - val hashBuilder = TestHash.build(HashPurpose.MerkleTreeInnerNode) - hashBuilder - .add(1) - .addWithoutLengthPrefix(singletonLeafHash(1).getCryptographicEvidence) - .finish() - } - - private val innerNodeWithTwoChildren: InnerNode2 = InnerNode2(singletonLeaf2, singletonLeaf3) - private val innerNodeWithTwoChildrenHash: RootHash = RootHash { - val hashBuilder = TestHash.build(HashPurpose.MerkleTreeInnerNode) - hashBuilder - .add(2) - .addWithoutLengthPrefix(singletonLeafHash(2).getCryptographicEvidence) - .addWithoutLengthPrefix(singletonLeafHash(3).getCryptographicEvidence) - .finish() - } - - private val threeLevelTree: InnerNode1 = InnerNode1(singletonLeaf1, innerNodeWithTwoChildren) - private val threeLevelTreeHash: RootHash = RootHash { - val hashBuilder = TestHash.build(HashPurpose.MerkleTreeInnerNode) - hashBuilder - .add(2) - .addWithoutLengthPrefix(singletonLeafHash(1).getCryptographicEvidence) - .addWithoutLengthPrefix(innerNodeWithTwoChildrenHash.getCryptographicEvidence) - .finish() - } - - private val threeLevelTreePartiallyBlinded: InnerNode1 = - InnerNode1(BlindedNode(singletonLeafHash(1)), innerNodeWithTwoChildren) - - "Every Merkle tree" must { - - val testCases = - Table[MerkleTree[_], RootHash, Boolean, Boolean]( - ("Merkle tree", "Expected root hash", "Is fully unblinded", "Has all leaves blinded"), - (fullyBlindedTree, fullyBlindedTreeHash, false, false), - (singletonLeaf1, singletonLeafHash(1), true, true), - (singletonInnerNode, singletonInnerNodeHash, true, true), - (innerNodeWithSingleChild, innerNodeWithSingleChildHash, true, false), - (innerNodeWithTwoChildren, innerNodeWithTwoChildrenHash, true, false), - (threeLevelTree, threeLevelTreeHash, true, false), - (threeLevelTreePartiallyBlinded, threeLevelTreeHash, false, true), - ) - - "have the expected root hash" in { - forEvery(testCases)((merkleTree, expectedRootHash, _, _) => - merkleTree.rootHash should equal(expectedRootHash) - ) - } - - "wrap something (unless it is blinded)" in { - forEvery(testCases) { (merkleTree, _, _, _) => - merkleTree match { - case BlindedNode(rootHash) => - merkleTree.unwrap should equal(Left(rootHash)) - an[UnsupportedOperationException] should be thrownBy merkleTree.tryUnwrap - case _ => - merkleTree.unwrap should equal(Right(merkleTree)) - merkleTree.tryUnwrap should equal(merkleTree) - } - } - } - - "correctly say whether it is fully unblinded" in { - forEvery(testCases) { case (merkleTree, _, fullyUnblinded, _) => - merkleTree.isFullyUnblinded shouldEqual fullyUnblinded - } - } - - "correctly say whether all leaves are blinded" in { - forEvery(testCases) { case (merkleTree, _, _, leavesBlinded) => - merkleTree.hasAllLeavesBlinded shouldEqual leavesBlinded - } - } - - "be blindable" in { - forEvery(testCases) { (merkleTree, _, _, _) => - val blindedTree = BlindedNode(merkleTree.rootHash) - merkleTree.blind { case t if t eq merkleTree => BlindSubtree } should equal(blindedTree) - merkleTree.blind { case _ => RevealIfNeedBe } should equal(blindedTree) - } - } - - "remain unchanged on Reveal" in { - forEvery(testCases) { (merkleTree, _, _, _) => - merkleTree.blind { case t if t eq merkleTree => RevealSubtree } should equal(merkleTree) - } - } - - "escalate a malformed blinding policy" in { - forEvery(testCases) { (merkleTree, _, _, _) => - merkleTree match { - case BlindedNode(_) => // skip test, as the blinding policy cannot be malformed - case _ => - an[IllegalArgumentException] should be thrownBy merkleTree.blind(PartialFunction.empty) - } - } - } - } - - "A deep Merkle tree" can { - "be partially blinded" in { - // Blind threeLevelTree to threeLevelTreePartiallyBlinded. - threeLevelTree.blind { - case InnerNode1(_*) => RevealIfNeedBe - case Leaf1(_) => BlindSubtree - case InnerNode2(_*) => RevealSubtree - } should equal(threeLevelTreePartiallyBlinded) - - // Use RevealIfNeedBe while blinding one child and revealing the other child. - innerNodeWithTwoChildren.blind { - case InnerNode2(_*) => RevealIfNeedBe - case Leaf2(_) => BlindSubtree - case Leaf3(_) => RevealSubtree - } should equal(InnerNode2(BlindedNode(singletonLeafHash(2)), singletonLeaf3)) - - // Use RevealIfNeedBe while blinding all children. - innerNodeWithTwoChildren.blind { - case InnerNode2(_*) => RevealIfNeedBe - case Leaf2(_) => BlindSubtree - case Leaf3(_) => BlindSubtree - } should equal(BlindedNode(innerNodeWithTwoChildrenHash)) - - // Use RevealIfNeedBe while blinding all children. - // Make sure that grandchildren are blinded, even though the policy assigns Reveal to them. - threeLevelTree.blind { - case InnerNode1(_*) => RevealIfNeedBe - case Leaf1(_) => BlindSubtree - case InnerNode2(_*) => BlindSubtree - case _ => RevealSubtree - } should equal(BlindedNode(threeLevelTreeHash)) - } - } -} - -@SuppressWarnings(Array("org.wartremover.warts.Any")) -object MerkleTreeTest { - type VersionedAbstractLeaf = AbstractLeaf[_ <: VersionedMerkleTree[_]] - val hashOps = new SymbolicPureCrypto - - object AbstractLeaf extends VersioningCompanion[VersionedAbstractLeaf] { - override def name: String = "AbstractLeaf" - override def versioningTable: VersioningTable = VersioningTable( - ProtoVersion(30) -> VersionedProtoCodec.raw( - ProtocolVersion.v34, - (_, _, bytes) => fromProto(30)(bytes), - _.getCryptographicEvidence, - ) - ) - - def fromProto(protoVersion: Int)(bytes: ByteString): ParsingResult[Leaf1] = - protocolVersionRepresentativeFor(ProtoVersion(protoVersion)).flatMap { rpv => - leafFromByteString(i => Leaf1(i)(rpv))(bytes).leftMap(e => - ProtoDeserializationError.OtherError(e.message) - ) - } - - } - - abstract class AbstractLeaf[ - A <: MerkleTree[_] with HasCryptographicEvidence with HasProtocolVersionedWrapper[_] - ]( - val index: Int - ) extends MerkleTreeLeaf[A](MerkleTreeTest.hashOps) - with HasCryptographicEvidence - with HasProtocolVersionedWrapper[VersionedAbstractLeaf] { - this: A => - - override def salt: Salt = TestSalt.generateSalt(index) - - override def getCryptographicEvidence: ByteString = DeterministicEncoding.encodeInt(index) - - override val hashPurpose: HashPurpose = TestHash.testHashPurpose - - override protected def pretty: Pretty[AbstractLeaf[A]] = prettyOfClass(unnamedParam(_.index)) - } - - def leafFromByteString[L <: AbstractLeaf[_]]( - mkLeaf: Int => L - )(bytes: ByteString): Either[DeserializationError, L] = - for { - indexAndRemainder <- DeterministicEncoding.decodeInt(bytes) - (index, remainder) = indexAndRemainder - _ <- Either.cond( - remainder.isEmpty, - (), - DefaultDeserializationError( - "Unable to deserialize Int from ByteString. Remaining bytes: " - ), - ) - } yield mkLeaf(index) - - final case class Leaf1(override val index: Int)( - override val representativeProtocolVersion: RepresentativeProtocolVersion[AbstractLeaf.type] - ) extends AbstractLeaf[Leaf1](index) { - override protected lazy val companionObj: AbstractLeaf.type = - AbstractLeaf - } - - final case class Leaf2(override val index: Int)( - override val representativeProtocolVersion: RepresentativeProtocolVersion[AbstractLeaf.type] - ) extends AbstractLeaf[Leaf2](index) { - override protected lazy val companionObj: AbstractLeaf.type = - AbstractLeaf - } - - final case class Leaf3(override val index: Int)( - override val representativeProtocolVersion: RepresentativeProtocolVersion[AbstractLeaf.type] - ) extends AbstractLeaf[Leaf3](index) { - override protected lazy val companionObj: AbstractLeaf.type = - AbstractLeaf - } - - abstract class AbstractInnerNode[A]( - val create: Seq[MerkleTree[_]] => MerkleTree[A], - val subtrees: MerkleTree[_]* - ) extends MerkleTreeInnerNode[A](MerkleTreeTest.hashOps) { - this: A => - - override private[data] def withBlindedSubtrees( - blindingCommandPerNode: PartialFunction[RootHash, MerkleTree.BlindingCommand] - ): MerkleTree[A] = - create(subtrees.map(_.doBlind(blindingCommandPerNode))) - - override protected def pretty: Pretty[AbstractInnerNode[A]] = prettyOfClass( - param("subtrees", _.subtrees) - ) - } - - final case class InnerNode1(override val subtrees: MerkleTree[_]*) - extends AbstractInnerNode[InnerNode1](InnerNode1.apply, subtrees*) {} - - final case class InnerNode2(override val subtrees: MerkleTree[_]*) - extends AbstractInnerNode[InnerNode2](InnerNode2.apply, subtrees*) {} -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/PeanoQueueTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/PeanoQueueTest.scala deleted file mode 100644 index 773023747f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/PeanoQueueTest.scala +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.PeanoQueue.{BeforeHead, InsertedValue, NotInserted} -import org.scalatest.wordspec.AnyWordSpec - -trait PeanoQueueTest extends BaseTest { this: AnyWordSpec => - - def peanoQueue[Discr](mk: Counter[Discr] => PeanoQueue[Counter[Discr], String]): Unit = { - import scala.language.implicitConversions - implicit def toCounter(i: Long): Counter[Discr] = Counter[Discr](i) - - val testCases = - Table[String, Counter[Discr], Seq[ - (Seq[(Counter[Discr], String)], Counter[Discr], Seq[String]) - ]]( - ("name", "initial head", "inserts, expected front, expected polled values"), - ("empty", 0, Seq.empty), - ("start with 5", 5, Seq.empty), - ("start with MinValue", Counter.MinValue, Seq.empty), - ( - "insert", - 0, - Seq( - (Seq((1, "one"), (0, "zero"), (3, "three")), 2, Seq("zero", "one")), - (Seq((2, "two")), 4, Seq("two", "three")), - ), - ), - ( - "complex", - 2, - Seq( - (Seq((10, "ten"), (12, "twelve")), 2, Seq.empty), - (Seq((2, "two"), (5, "five"), (3, "three"), (4, "four")), 6, Seq.empty), - (Seq((8, "eight"), (7, "seven"), (6, "six")), 9, Seq("two")), - ( - Seq((9, "nine")), - 11, - Seq("three", "four", "five", "six", "seven", "eight", "nine", "ten"), - ), - (Seq.empty, 11, Seq.empty), - (Seq((11, "eleven")), 13, Seq("eleven", "twelve")), - ), - ), - ( - "idempotent insert", - 0, - Seq( - ( - Seq((1, "one"), (2, "two"), (0, "zero"), (1, "one"), (4, "four")), - 3, - Seq("zero", "one", "two"), - ), - ( - Seq((-10, "negative ten"), (4, "four"), (3, "three"), (5, "five")), - 6, - Seq("three", "four", "five"), - ), - ), - ), - ) - - forEvery(testCases) { (name, initHead, insertsPolls) => - name should { - - val pq = mk(initHead) - - assert(pq.head === initHead, "have head set to the initial value") - assert(pq.front === initHead, "have front set to the initial value") - assert(pq.poll().isEmpty, "not allow to poll anything") - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var polls: Long = 0 - - insertsPolls.zipWithIndex foreach { case ((inserts, expFront, expVals), i) => - inserts.zipWithIndex foreach { case ((k, v), index) => - if (k < initHead) { - assert( - pq.alreadyInserted(k), - s"$name: $k below initial head $initHead is always considered to have been inserted", - ) - } else { - val before = inserts.take(index) ++ insertsPolls.take(i).flatMap(_._1) - if (!before.exists(_._1 == k)) - assert(!pq.alreadyInserted(k), s"$name: $k has not been inserted") - } - - pq.insert(k, v) - assert(pq.alreadyInserted(k), s"$name: $k has been inserted") - } - - assert(pq.head == initHead + polls, "have head set to the previous/initial value") - assert(pq.front == expFront, "have front set to the first missing key") - - expVals foreach { v => - assert(pq.poll() === Some((initHead + polls, v)), "return the values polled") - polls = polls + 1 - if (Long.MaxValue - polls >= initHead) - // we have not yet reached Long.MaxValue and polled everything - assert(pq.head === initHead + polls, "increment the head") - } - - assert(pq.front === expFront, "leave the front unchanged") - if (expFront == initHead + polls) - assert(pq.poll().isEmpty, "stop returning values at the front") - } - } - } - - "inserting MaxValue" should { - val pq = mk(Long.MaxValue - 1) - "fail" in { - assertThrows[IllegalArgumentException](pq.insert(Long.MaxValue, "MAX")) - } - } - - "dropUntilFront" should { - "drop all elements up to the front" in { - val pq = mk(0) - pq.insert(0, "zero") - pq.insert(1, "one") - pq.insert(3, "three") - - val last = pq.dropUntilFront() - assert(pq.poll().isEmpty) - assert(pq.head == pq.front) - assert(last.contains(Counter(1) -> "one")) - } - - "return None if nothing moves" in { - val pq = mk(0) - pq.insert(1, "one") - assert(pq.dropUntilFront().isEmpty) - assert(pq.head == Counter(0)) - } - } - - "double inserts" should { - val pq = mk(0L) - - "correctly report inserts or throw exceptions" in { - assert(pq.insert(1L, "one")) - assert(pq.insert(2L, "two")) - assert(pq.insert(4L, "four")) - assert(!pq.insert(-1L, "minus 1")) - assert(pq.insert(1L, "one")) - assert(pq.insert(4L, "four")) - assertThrows[IllegalArgumentException](pq.insert(4L, "FOUR")) - assert(pq.insert(0L, "zero")) - assertThrows[IllegalArgumentException](pq.insert(0L, "ZERO")) - assert(pq.insert(0L, "zero")) - assert(pq.poll().contains((Counter(0), "zero")), "polling 0") - assert(!pq.insert(0L, "zero")) - assert(!pq.insert(0L, "Zero")) - assert(pq.poll().contains((Counter(1), "one")), "polling 1") - assert(pq.poll().contains((Counter(2), "two")), "polling 2") - assert(pq.poll().isEmpty, "cannot poll 3") - assert(pq.insert(3L, "three")) - } - } - - "get" should { - val pq = mk(0L) - - Seq((0L, "zero"), (1L, "one"), (3L, "three")).foreach { case (k, v) => pq.insert(k, v) } - pq.poll() - - val tests = Table( - ("key", "expected value"), - (-1L, BeforeHead), - (0L, BeforeHead), - (1L, InsertedValue("one")), - (2L, NotInserted(Some("one"), Some("three"))), - (3L, InsertedValue("three")), - (Long.MaxValue, NotInserted(Some("three"), None)), - ) - tests.foreach { case (key, expected) => - assert(pq.get(key) == expected, s"Get($key) should yield $expected") - } - } - - "isEmpty" should { - "tell whether the queue is empty or not" in { - val pq = mk(0L) - - pq.isEmpty shouldBe true - - pq.insert(0L, "zero") - pq.isEmpty shouldBe false - - pq.poll() - pq.isEmpty shouldBe true - } - } - } -} - -class PeanoTreeQueueTest extends AnyWordSpec with PeanoQueueTest with BaseTest { - - "PeanoTreeQueue" should { - behave like peanoQueue(PeanoTreeQueue.apply[PeanoTreeQueueTest.Discriminator, String]) - - "maintain the invariant" must { - behave like peanoQueue[PeanoTreeQueueTest.Discriminator](init => - new PeanoTreeQueueTest.PeanoTreeQueueHelper[String](init) - ) - } - } -} - -object PeanoTreeQueueTest { - case object Discriminator - type Discriminator = Discriminator.type - type LocalCounter = Counter[Discriminator] - - class PeanoTreeQueueHelper[V](initHead: LocalCounter) - extends PeanoTreeQueue[Discriminator, V](initHead) { - - assertInvariant("initialization") - - override def insert(key: LocalCounter, value: V): Boolean = { - val added = super.insert(key, value) - - assertInvariant(s"insert ($key -> $value)") - added - } - - override def poll(): Option[(LocalCounter, V)] = { - val result = super.poll() - - assertInvariant(s"poll with result $result") - - result - } - - private def assertInvariant(msg: String): Unit = - assert(invariant, s"PeanoPriorityQueue invariant violated after $msg") - } -} - -class SynchronizedPeanoTreeQueueTest extends AnyWordSpec with PeanoQueueTest with BaseTest { - "SynchronizedPeanoTreeQueue" should { - behave like peanoQueue[PeanoTreeQueueTest.Discriminator](initHead => - new SynchronizedPeanoTreeQueue[PeanoTreeQueueTest.Discriminator, String](initHead) - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TaskSchedulerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TaskSchedulerTest.scala deleted file mode 100644 index 227ae052ce..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TaskSchedulerTest.scala +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.syntax.parallel.* -import com.daml.metrics -import com.daml.metrics.api.MetricHandle.Gauge -import com.daml.metrics.api.MetricHandle.Gauge.SimpleCloseableGauge -import com.daml.metrics.api.noop.NoOpCounter -import com.daml.metrics.api.{MetricInfo, MetricName, MetricQualification} -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.logging.{NamedEventCapturingLogger, NamedLoggerFactory} -import com.digitalasset.canton.time.SimClock -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.canton.{BaseTest, SequencerCounter} -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec -import org.slf4j.event.Level - -import java.time.Duration as JDuration -import scala.annotation.tailrec -import scala.collection.mutable -import scala.concurrent.{ExecutionContext, Future, Promise} -import scala.util.Random - -class TaskSchedulerTest extends AsyncWordSpec with BaseTest { - import TaskSchedulerTest.* - import com.digitalasset.canton.data.CantonTimestamp.ofEpochMilli - - private lazy val metrics = new MockTaskSchedulerMetrics() - - private lazy val clock: SimClock = new SimClock(loggerFactory = loggerFactory) - - private val alertAfter: JDuration = JDuration.ofSeconds(10) - - private val alertEvery: JDuration = JDuration.ofSeconds(2) - - private def mkTaskScheduler( - initSc: SequencerCounter = SequencerCounter(0), - initTs: CantonTimestamp = CantonTimestamp.Epoch, - loggerFactory: NamedLoggerFactory = loggerFactory, - ): TaskScheduler[TestTask] = new TaskScheduler( - initSc, - initTs, - alertAfter, - alertEvery, - TestTaskOrdering, - metrics, - exitOnFatalFailures = true, - timeouts, - loggerFactory, - futureSupervisor, - clock, - ) - - "TaskScheduler" should { - "correctly order tasks and barriers" in { - final case class TaskData( - timestamp: CantonTimestamp, - sequencerCounter: SequencerCounter, - kind: Int, - ) - sealed trait TickOrTask extends Product with Serializable { - def sequencerCounter: SequencerCounter - } - final case class Tick(sequencerCounter: SequencerCounter, timestamp: CantonTimestamp) - extends TickOrTask - object Tick { - def apply(args: (SequencerCounter, CantonTimestamp)): Tick = Tick(args._1, args._2) - } - final case class Task(data: TaskData, taskIndex: Int) extends TickOrTask { - override def sequencerCounter: SequencerCounter = data.sequencerCounter - } - object Task { - def apply(args: (TaskData, Int)): Task = Task(args._1, args._2) - } - - val tasksInExecutionOrder: Seq[TaskData] = - Seq( - TaskData(ofEpochMilli(3), SequencerCounter(0), Finalization), - TaskData(ofEpochMilli(3), SequencerCounter(1), Finalization), - TaskData(ofEpochMilli(3), SequencerCounter(1), Timeout), - TaskData(ofEpochMilli(3), SequencerCounter(1), Activeness), - TaskData(ofEpochMilli(3), SequencerCounter(3), Activeness), - TaskData(ofEpochMilli(4), SequencerCounter(0), Finalization), - TaskData(ofEpochMilli(4), SequencerCounter(0), Timeout), - TaskData(ofEpochMilli(4), SequencerCounter(0), Activeness), - ) - val ticksWithTasks: Map[SequencerCounter, CantonTimestamp] = Map( - SequencerCounter(0) -> ofEpochMilli(0), - SequencerCounter(1) -> ofEpochMilli(1), - SequencerCounter(3) -> ofEpochMilli(3), - ) - val ticksWithoutTasks: Map[SequencerCounter, CantonTimestamp] = Map( - SequencerCounter(2) -> ofEpochMilli(2), - SequencerCounter(4) -> ofEpochMilli(5), - ) - - val allTicks = ticksWithTasks ++ ticksWithoutTasks - val barriers: Seq[CantonTimestamp] = Seq(ofEpochMilli(2), ofEpochMilli(4), ofEpochMilli(5)) - - val indexedChanges = ticksWithoutTasks.toSeq.map(Tick.apply) ++ - tasksInExecutionOrder.zipWithIndex.map(Task.apply) - - val rand = new Random(1234567890L) - - // test a random selection of 1/720 of all permutations (the same permutation may be picked several times) - val repetitions = (7 until indexedChanges.size).product - (0 until repetitions).toList - .parTraverse_ { _ => - val shuffled = rand.shuffle(indexedChanges) - val taskScheduler = mkTaskScheduler( - initTs = CantonTimestamp.MinValue - ) - val executionOrder = mutable.Queue.empty[Int] - - val barrierFutures = barriers.map(timestamp => taskScheduler.scheduleBarrierUS(timestamp)) - - val barriersWithFutures = - barriers.zip(barrierFutures).map { case (timestamp, optFuture) => - assert(optFuture.isDefined, s"Barrier future at $timestamp was None") - (timestamp, optFuture.value) - } - - val ticksAdded = mutable.Set[SequencerCounter]() - - @tailrec def firstGapFrom(base: SequencerCounter): SequencerCounter = - if (ticksAdded.contains(base)) firstGapFrom(base + 1) else base - - def checkBarriers(): Future[Unit] = { - val missingTick = firstGapFrom(SequencerCounter(0)) - if (missingTick > SequencerCounter(0)) { - val timestamp = allTicks(missingTick - 1) - barriersWithFutures.parTraverse_ { case (barrierTimestamp, barrierCompletion) => - if (barrierTimestamp <= timestamp) { - barrierCompletion.unwrap - } else { - assert( - !barrierCompletion.isCompleted, - s"Barrier $barrierTimestamp is not completed at $timestamp", - ) - } - } - } else Future.unit - } - - val tasks = List.newBuilder[TestTask] - - for { - _ <- MonadUtil.sequentialTraverse_(shuffled.zipWithIndex) { - case (Task(TaskData(ts, sc, kind), taskCounter), idx) => - val task = TestTask(ts, sc, executionOrder, taskCounter, kind) - tasks += task - taskScheduler.scheduleTask(task) - - // If this was the final task for the sequencer counter, then add the sequencer counter's time as a tick. - if (shuffled.indexWhere(_.sequencerCounter == sc, idx + 1) == -1) { - val timestamp = ticksWithTasks(sc) - taskScheduler.addTick(sc, timestamp) - ticksAdded += sc - checkBarriers() - } else Future.unit - case (Tick(sc, ts), _) => - taskScheduler.addTick(sc, ts) - ticksAdded += sc - checkBarriers() - } - _ <- tasks.result().parTraverse_(_.done()) - } yield { - assert( - executionOrder.toSeq == tasksInExecutionOrder.indices, - s"shuffling ${shuffled.map(_.sequencerCounter)}", - ) - } - } - .map(_ => succeed) - } - - "process tasks and complete barriers when they are ready" in { - val taskScheduler = mkTaskScheduler() - val executionOrder = mutable.Queue.empty[Int] - val waitPromise = Promise[Unit]() - val task0 = TestTask(ofEpochMilli(1), SequencerCounter(0), executionOrder) - val task1 = - TestTask( - ofEpochMilli(2), - SequencerCounter(0), - executionOrder, - seqNo = 1, - waitFor = waitPromise.future, - ) - taskScheduler.scheduleTask(task1) - taskScheduler.scheduleTask(task0) - val barrier1 = taskScheduler.scheduleBarrierUS(ofEpochMilli(1)).map(_.unwrap) - val barrier2 = taskScheduler.scheduleBarrierUS(ofEpochMilli(2)).map(_.unwrap) - val barrier3 = taskScheduler.scheduleBarrierUS(ofEpochMilli(3)).map(_.unwrap) - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(2)) - taskScheduler.addTick(SequencerCounter(0), ofEpochMilli(1)) - val barrier0 = taskScheduler.scheduleBarrierUS(ofEpochMilli(1)).map(_.unwrap) - assert(barrier0.isEmpty, s"Barrier is before observed time of the task scheduler") - for { - _ <- task0.done() - _ = assert(executionOrder.toSeq == Seq(0), "only the first task has run") - _ <- barrier1.value - _ <- barrier2.value // complete the barrier even if we can't execute the task - _ = waitPromise.success(()) - _ <- task1.done() - _ = assert(executionOrder.toSeq == Seq(0, 1), "the second task has run") - _ = assert(barrier3.exists(!_.isCompleted), "The third barrier is not reached") - } yield succeed - } - - "complain about timestamps before head" in { - val taskScheduler = mkTaskScheduler() - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(-1)), - _.getMessage shouldBe "Timestamp 1969-12-31T23:59:59.999Z for sequencer counter 1 is not after current time 1970-01-01T00:00:00Z.", - ) - } - - "complain about non-increasing timestamps on ticks" in { - val taskScheduler = mkTaskScheduler() - - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(2)) - taskScheduler.addTick(SequencerCounter(7), ofEpochMilli(4)) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(3), ofEpochMilli(1)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.001Z for sequencer counter 3 is not after timestamp 1970-01-01T00:00:00.002Z of an earlier sequencer counter.", - ) // before previous counter - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(4), ofEpochMilli(2)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.002Z for sequencer counter 4 is not after timestamp 1970-01-01T00:00:00.002Z of an earlier sequencer counter.", - ) // same time as previous counter - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(5), ofEpochMilli(4)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.004Z for sequencer counter 5 is not before timestamp 1970-01-01T00:00:00.004Z of a later sequencer counter.", - ) // same as next counter - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(6), ofEpochMilli(5)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.005Z for sequencer counter 6 is not before timestamp 1970-01-01T00:00:00.004Z of a later sequencer counter.", - ) // after next counter - - taskScheduler.addTick(SequencerCounter(0), ofEpochMilli(1)) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(2), ofEpochMilli(1).addMicros(1L)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.001001Z for sequencer counter 2 is not after current time 1970-01-01T00:00:00.002Z.", - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(0), ofEpochMilli(3)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.003Z for outdated sequencer counter 0 is after current time 1970-01-01T00:00:00.002Z.", - ) // before head, but after latest observed time - } - - "ignore signals before head" in { - val taskScheduler = mkTaskScheduler() - - taskScheduler.addTick(SequencerCounter(0), ofEpochMilli(2)) - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(3)) - taskScheduler.addTick(SequencerCounter(0), ofEpochMilli(2)) - taskScheduler.addTick( - SequencerCounter(0), - ofEpochMilli(1), - ) // don't throw even if we signal a different time - succeed - } - - "complain about adding a sequencer counter twice with different times" in { - val taskScheduler = mkTaskScheduler() - - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(10)) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(20)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.020Z for sequencer counter 1 differs from timestamp 1970-01-01T00:00:00.010Z that was signalled before.", - ) - taskScheduler.addTick(SequencerCounter(1), ofEpochMilli(10)) - succeed - } - - "complain about Long.MaxValue as a sequencer counter" in { - val taskScheduler = mkTaskScheduler() - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter.MaxValue, CantonTimestamp.MaxValue), - _.getMessage shouldBe "Sequencer counter Long.MaxValue signalled to task scheduler.", - ) - } - - "scheduled tasks must be after current time" in { - val taskScheduler = mkTaskScheduler( - SequencerCounter(10), - initTs = ofEpochMilli(3), - ) - val queue = mutable.Queue.empty[Int] - - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(-1), SequencerCounter(10), queue, 1)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(2), SequencerCounter(10), queue, 1)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(3), SequencerCounter(10), queue, 1)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - - taskScheduler.scheduleTask( - TestTask( - ofEpochMilli(4), - SequencerCounter(10), - queue, - 2, - ) - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.addTick(SequencerCounter(10), ofEpochMilli(1)), - _.getMessage shouldBe "Timestamp 1970-01-01T00:00:00.001Z for sequencer counter 10 is not after current time 1970-01-01T00:00:00.003Z.", - ) - taskScheduler.addTick(SequencerCounter(11), ofEpochMilli(10)) - taskScheduler.addTick(SequencerCounter(10), ofEpochMilli(5)) - // Time advances even if a task cannot be processed yet - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(8), SequencerCounter(10), queue, 3)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(5), SequencerCounter(10), queue, 3)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - loggerFactory.assertInternalError[IllegalArgumentException]( - taskScheduler.scheduleTask(TestTask(ofEpochMilli(4), SequencerCounter(10), queue, 3)), - _.getMessage should fullyMatch regex "Timestamp .* of new task TestTask.* is not later than current time .*\\.", - ) - } - - "scheduleIfLater should work properly" in { - val taskScheduler = mkTaskScheduler( - SequencerCounter(10), - ofEpochMilli(2), - ) - val queue = mutable.Queue.empty[Int] - - taskScheduler.scheduleTaskIfLater( - ofEpochMilli(1), - _ => fail(), - ) shouldBe Left(ofEpochMilli(2)) - taskScheduler.scheduleTaskIfLater( - ofEpochMilli(2), - _ => fail(), - ) shouldBe Left(ofEpochMilli(2)) - val task1 = taskScheduler - .scheduleTaskIfLater( - ofEpochMilli(3), - TestTask( - _, - SequencerCounter(10), - queue, - 1, - ), - ) - .value - task1.timestamp shouldBe ofEpochMilli(3) - - val task2 = taskScheduler - .scheduleTaskIfLater( - ofEpochMilli(7), - TestTask( - _, - SequencerCounter(10), - queue, - 2, - ), - ) - .value - task2.timestamp shouldBe ofEpochMilli(7) - - val task3 = taskScheduler - .scheduleTaskIfLater( - ofEpochMilli(15), - TestTask( - _, - SequencerCounter(10), - queue, - 3, - ), - ) - .value - task3.timestamp shouldBe ofEpochMilli(15) - - queue.toList shouldBe Nil - taskScheduler.addTick(SequencerCounter(11), ofEpochMilli(10)) - queue.toList shouldBe Nil - taskScheduler.addTick(SequencerCounter(10), ofEpochMilli(5)) - for { - _ <- task1.done() - _ <- task2.done() - } yield { - queue.toList shouldBe List(1, 2) - } - } - - "scheduleImmediately should work properly" in { - val taskScheduler = mkTaskScheduler( - SequencerCounter(10), - ofEpochMilli(2), - ) - val queue = mutable.Queue.empty[Int] - - val task1 = taskScheduler - .scheduleTaskIfLater( - ofEpochMilli(3), - TestTask( - _, - SequencerCounter(10), - queue, - 2, - ), - ) - .value - task1.timestamp shouldBe ofEpochMilli(3) - - val task2Waiting = Promise[Unit]() - val task2 = taskScheduler - .scheduleTaskIfLater( - ofEpochMilli(7), - TestTask( - _, - SequencerCounter(10), - queue, - 3, - waitFor = task2Waiting.future, - ), - ) - .value - task2.timestamp shouldBe ofEpochMilli(7) - - val immediate1Executed = Promise[Unit]() - taskScheduler.scheduleTaskImmediately( - ts => { - queue.enqueue(1) - immediate1Executed.trySuccess(()) - ts shouldBe ofEpochMilli(2) - FutureUnlessShutdown.pure(()) - }, - implicitly, - ) shouldBe ofEpochMilli(2) - - for { - _ <- immediate1Executed.future - immediate2Executed = Promise[Unit]() - _ = { - queue.toList shouldBe List(1) - taskScheduler.addTick(SequencerCounter(11), ofEpochMilli(10)) - taskScheduler.addTick(SequencerCounter(10), ofEpochMilli(5)) - taskScheduler.scheduleTaskImmediately( - ts => { - queue.enqueue(4) - immediate2Executed.trySuccess(()) - ts shouldBe ofEpochMilli(10) - FutureUnlessShutdown.pure(()) - }, - implicitly, - ) shouldBe ofEpochMilli(10) - } - _ <- task1.done() - _ = { - queue.toList shouldBe List(1, 2) - Threading.sleep(50) - task2.done().isCompleted shouldBe false - immediate2Executed.future.isCompleted shouldBe false - task2Waiting.trySuccess(()) - } - _ <- task2.done() - _ <- immediate2Executed.future - } yield { - queue.toList shouldBe List(1, 2, 3, 4) - } - } - - "log INFO in case of missing ticks" in { - val timeoutMillis = 10L - - val capturingLoggerFactory = - new NamedEventCapturingLogger( - classOf[TaskSchedulerTest].getSimpleName, - // Skip everything below INFO level as this is not relevant here - skip = _.level.toInt < Level.INFO.toInt, - ) - val taskScheduler = mkTaskScheduler( - initTs = CantonTimestamp.ofEpochSecond(-1), - loggerFactory = capturingLoggerFactory, - ) - - def assertInfoLogged( - waitFor: Int, - lastSc: Int, - lastTimestamp: CantonTimestamp, - traceContext: TraceContext, - ): Assertion = { - capturingLoggerFactory.assertNextMessageIs( - s"Task scheduler waits for tick of sc=$waitFor. The tick with sc=$lastSc occurred at $lastTimestamp. Blocked trace ids: ${traceContext.traceId.value}", - Level.INFO, - ) - capturingLoggerFactory.assertNoMoreEvents(timeoutMillis) - } - - // An idle scheduler should not log a problem. - clock.advance(alertAfter) - capturingLoggerFactory.assertNoMoreEvents(timeoutMillis) - - // Schedule a task. The scheduler should not log a problem as the task is above the high watermark. - val task = TestTask(CantonTimestamp.ofEpochSecond(1), SequencerCounter(1))( - ec = implicitly, - traceContext = nonEmptyTraceContext1, - ) - taskScheduler.scheduleTask(task) - clock.advance(alertEvery) - capturingLoggerFactory.assertNoMoreEvents(timeoutMillis) - - // Tick the task and check that a log line is emitted - taskScheduler.addTick(SequencerCounter(1), CantonTimestamp.ofEpochSecond(1)) - clock.advance(alertEvery) - assertInfoLogged(0, -1, CantonTimestamp.Epoch, nonEmptyTraceContext1) - - // After alertEvery, the log line should be emitted again. - clock.advance(alertEvery) - assertInfoLogged(0, -1, CantonTimestamp.Epoch, nonEmptyTraceContext1) - - // Add the missing ticks, wait and check that nothing is logged. - taskScheduler.addTick(SequencerCounter(0), CantonTimestamp.ofEpochSecond(0)) - val tsOf1 = clock.now - clock.advance(alertAfter) - capturingLoggerFactory.assertNoMoreEvents(timeoutMillis) - - // Schedule a blocked barrier and check that a log line is emitted. - taskScheduler.scheduleBarrierUS(CantonTimestamp.ofEpochSecond(3))(nonEmptyTraceContext2) - taskScheduler.addTick(SequencerCounter(3), CantonTimestamp.ofEpochSecond(3)) - clock.advance(alertEvery) - assertInfoLogged(2, 1, tsOf1, nonEmptyTraceContext2) - - // Add the missing tick, wait and check that nothing is logged. - taskScheduler.addTick(SequencerCounter(2), CantonTimestamp.ofEpochSecond(2)) - clock.advance(alertAfter) - capturingLoggerFactory.assertNoMoreEvents(timeoutMillis) - } - } -} - -object TaskSchedulerTest { - - class MockTaskSchedulerMetrics extends TaskSchedulerMetrics { - val prefix: MetricName = MetricName("test") - override val sequencerCounterQueue: metrics.api.MetricHandle.Counter = NoOpCounter( - MetricInfo(prefix :+ "counter", "", MetricQualification.Debug) - ) - - override def taskQueue(size: () => Int): Gauge.CloseableGauge = - SimpleCloseableGauge(MetricInfo(MetricName("test"), "", MetricQualification.Debug), () => ()) - } - - val Finalization: Int = 0 - val Timeout: Int = 1 - val Activeness: Int = 2 - - private final case class TestTask( - override val timestamp: CantonTimestamp, - override val sequencerCounter: SequencerCounter, - queue: mutable.Queue[Int] = mutable.Queue.empty, - seqNo: Int = 0, - kind: Int = Activeness, - waitFor: Future[Unit] = Future.unit, - )(implicit val ec: ExecutionContext, val traceContext: TraceContext) - extends TaskScheduler.TimedTaskWithSequencerCounter { - - private val donePromise: Promise[Unit] = Promise[Unit]() - - def done(): Future[Unit] = donePromise.future - - override def perform(): FutureUnlessShutdown[Unit] = FutureUnlessShutdown.outcomeF { - waitFor.map { _ => - queue.enqueue(seqNo) - donePromise.success(()) - () - } - } - - override protected def pretty: Pretty[this.type] = adHocPrettyInstance - - override def close(): Unit = () - } - - private val TestTaskOrdering: Ordering[TestTask] = - Ordering.by[TestTask, (Int, SequencerCounter)](task => (task.kind, task.sequencerCounter)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewDecompositionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewDecompositionTest.scala deleted file mode 100644 index 6cb8bf450b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewDecompositionTest.scala +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.data.TransactionViewDecomposition.* -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling} -import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes -import com.digitalasset.canton.util.LfTransactionUtil -import com.digitalasset.canton.{ - BaseTest, - ComparesLfTransactions, - HasExecutionContext, - LfPartyId, - LfValue, - NeedsNewLfContractIds, -} -import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey -import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.NodeWrapper -import com.digitalasset.daml.lf.transaction.test.{ - TestIdFactory, - TestNodeBuilder, - TreeTransactionBuilder, -} -import org.scalatest.wordspec.AnyWordSpec - -class TransactionViewDecompositionTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ComparesLfTransactions - with NeedsNewLfContractIds { - - lazy val factory: TransactionViewDecompositionFactory.type = TransactionViewDecompositionFactory - s"With factory ${factory.getClass.getSimpleName}" when { - - val exampleTransactionFactory = new ExampleTransactionFactory()() - - val examples = - exampleTransactionFactory.standardHappyCases - examples foreach { example => - s"decomposing $example into views" must { - "yield the correct views" in { - factory - .fromTransaction( - exampleTransactionFactory.topologySnapshot, - example.wellFormedUnsuffixedTransaction, - RollbackContext.empty, - Some(ExampleTransactionFactory.submitter), - ) - .futureValueUS - .toList shouldEqual example.rootViewDecompositions.toList - } - } - } - } - - "A view decomposition" when { - import ExampleTransactionFactory.* - - "there are lots of top-level nodes" can { - "be constructed without stack overflow" in { - val flatTransactionSize = 10000 - - val decomposition = timeouts.default.await("Decomposing test transaction")( - TransactionViewDecompositionFactory - .fromTransaction( - defaultTopologySnapshot, - wftWithCreateNodes(flatTransactionSize, signatory, observer), - RollbackContext.empty, - None, - ) - .failOnShutdown - ) - - decomposition.size shouldBe flatTransactionSize - } - } - - "a transaction with nested rollbacks" can { - - import RollbackDecomposition.* - import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.* - - object tif extends TestIdFactory - - val alice: LfPartyId = signatory - val bob: LfPartyId = observer - val carol: LfPartyId = extra - - val embeddedRollbackExample: LfVersionedTransaction = toVersionedTransaction( - exerciseNode(tif.newCid, signatories = Set(alice)).withChildren( - exerciseNode(tif.newCid, signatories = Set(alice)).withChildren( - TestNodeBuilder - .rollback() - .withChildren( - exerciseNode(tif.newCid, signatories = Set(alice), observers = Set(carol)) - ) - ), - exerciseNode(tif.newCid, signatories = Set(alice), observers = Set(bob)), - ) - ) - - val expected = List( - RbNewTree( - rbScope(PositiveInt.one), - Set(alice), - List[RollbackDecomposition]( - RbSameTree(rbScope(PositiveInt.one)), - RbNewTree(rbScope(PositiveInt.one, PositiveInt.one), Set(alice, carol)), - RbNewTree(rbScope(PositiveInt.two), Set(alice, bob)), - ), - ) - ) - - "does not re-used rollback contexts" in { - - val decomposition = TransactionViewDecompositionFactory - .fromTransaction( - defaultTopologySnapshot, - toWellFormedUnsuffixedTransaction(embeddedRollbackExample), - RollbackContext.empty, - None, - ) - .futureValueUS - - val actual = RollbackDecomposition.rollbackDecomposition(decomposition) - - actual shouldBe expected - } - } - - "new view counting" can { - object tif extends TestIdFactory - val node = exerciseNode(tif.newCid, signatories = Set.empty) - val sameView = SameView(node, LfNodeId(0), RollbackContext.empty) - var nextThreshold: NonNegativeInt = NonNegativeInt.zero - def newView(children: TransactionViewDecomposition*): NewView = { - // Trick: Use unique thresholds to get around NewView nesting check - // that requires informees or thresholds to differ. - nextThreshold = nextThreshold + NonNegativeInt.one - NewView( - node, - ViewConfirmationParameters.tryCreate( - Set.empty, - Seq(Quorum(Map.empty, nextThreshold)), - ), - None, - LfNodeId(0), - children, - RollbackContext.empty, - ) - } - - "deal with empty transactions" in { - TransactionViewDecomposition.countNestedViews(Seq.empty) shouldBe 0 - } - - "count single view" in { - TransactionViewDecomposition.countNestedViews(Seq(newView())) shouldBe 1 - } - - "not count same view" in { - TransactionViewDecomposition.countNestedViews(Seq(newView(sameView))) shouldBe 1 - } - - "count multiple sibling views" in { - TransactionViewDecomposition.countNestedViews( - Seq(newView(newView(), sameView, newView(), sameView, newView())) - ) shouldBe 4 - } - - "count nested views" in { - TransactionViewDecomposition.countNestedViews( - Seq(newView(newView(newView(newView())), sameView, newView(newView(), newView()))) - ) shouldBe 7 - } - - } - - } - - private def wftWithCreateNodes( - size: Int, - signatory: LfPartyId, - observer: LfPartyId, - ): WellFormedTransaction[WithoutSuffixes] = { - val alice = signatory - val bob = observer - - val tx = TreeTransactionBuilder.toVersionedTransaction( - (0 until size) - .map[NodeWrapper] { _ => - TestNodeBuilder.create( - id = newLfContractIdUnsuffixed(), - templateId = ExampleTransactionFactory.templateId, - argument = args( - LfValue.ValueParty(alice), - LfValue.ValueParty(bob), - args(notUsed), - seq(LfValue.ValueParty(bob)), - ), - signatories = Set(alice), - observers = Set(bob), - key = CreateKey.NoKey, - ) - }* - ) - - toWellFormedUnsuffixedTransaction(tx) - - } - - private def toWellFormedUnsuffixedTransaction( - tx: LfVersionedTransaction - ): WellFormedTransaction[WithoutSuffixes] = - WellFormedTransaction - .normalizeAndCheck( - tx, - TransactionMetadata( - CantonTimestamp.Epoch, - CantonTimestamp.Epoch, - tx.nodes.collect { - case (nid, node) if LfTransactionUtil.nodeHasSeed(node) => nid -> hasher() - }, - ), - WithoutSuffixes, - ) - .value - -} - -sealed trait RollbackDecomposition -object RollbackDecomposition { - - final case class RbNewTree( - rb: RollbackScope, - informees: Set[LfPartyId], - children: Seq[RollbackDecomposition] = Seq.empty, - ) extends RollbackDecomposition - - final case class RbSameTree(rb: RollbackScope) extends RollbackDecomposition - - /** The purpose of this method is to map a tree [[TransactionViewDecomposition]] onto a - * [[RollbackDecomposition]] hierarchy aid comparison. The [[RollbackContext.nextChild]] value is - * significant but is not available for inspection or construction. For this reason we use trick - * of entering a rollback context and then converting to a rollback scope that has as its last - * sibling the nextChild value. - */ - def rollbackDecomposition( - decompositions: Seq[TransactionViewDecomposition] - ): List[RollbackDecomposition] = - decompositions - .map[RollbackDecomposition] { - case view: NewView => - RbNewTree( - view.rbContext.enterRollback.rollbackScope.toList, - view.viewConfirmationParameters.informees, - rollbackDecomposition(view.tailNodes), - ) - case view: SameView => - RbSameTree(view.rbContext.enterRollback.rollbackScope.toList) - } - .toList - - def rbScope(rollbackScope: RollbackSibling*): RollbackScope = rollbackScope.toList - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala deleted file mode 100644 index 738f13f967..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.syntax.either.* -import com.digitalasset.canton.crypto.{HashOps, Salt, TestSalt} -import com.digitalasset.canton.data.ViewParticipantData.InvalidViewParticipantData -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.protocol.v30.ActionDescription.FetchActionDescription -import com.digitalasset.canton.util.LfTransactionBuilder -import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.{BaseTest, HasExecutionContext, LfPackageId, LfVersioned} -import com.digitalasset.daml.lf.value.Value -import org.scalatest.wordspec.AnyWordSpec - -class TransactionViewTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - private val factory = new ExampleTransactionFactory()() - - private val hashOps: HashOps = factory.cryptoOps - - private val contractInst: LfThinContractInst = ExampleTransactionFactory.contractInstance() - - private val cantonContractIdVersion: CantonContractIdVersion = AuthenticatedContractIdVersionV11 - private val createdId: LfContractId = - cantonContractIdVersion.fromDiscriminator( - ExampleTransactionFactory.lfHash(3), - ExampleTransactionFactory.unicum(0), - ) - private val absoluteId: LfContractId = ExampleTransactionFactory.suffixedId(0, 0) - private val otherAbsoluteId: LfContractId = ExampleTransactionFactory.suffixedId(1, 1) - private val salt: Salt = factory.transactionSalt - private val nodeSeed: LfHash = ExampleTransactionFactory.lfHash(1) - private val globalKey: LfGlobalKey = - LfGlobalKey - .build( - LfTransactionBuilder.defaultTemplateId, - Value.ValueInt64(100L), - LfTransactionBuilder.defaultPackageName, - ) - .value - - private val defaultPackagePreference = Set(ExampleTransactionFactory.packageId) - - private val defaultActionDescription: ActionDescription = - ActionDescription.tryFromLfActionNode( - ExampleTransactionFactory.createNode(createdId, contractInst), - Some(ExampleTransactionFactory.lfHash(5)), - defaultPackagePreference, - testedProtocolVersion, - ) - - forEvery(factory.standardHappyCases) { example => - s"The views of $example" when { - - forEvery(example.viewWithSubviews.zipWithIndex) { case ((view, subviews), index) => - s"processing $index-th view" can { - "be folded" in { - val foldedSubviews = - view.foldLeft(Seq.newBuilder[TransactionView])((acc, v) => acc += v) - - foldedSubviews.result() should equal(subviews) - } - - "be flattened" in { - view.flatten should equal(subviews) - } - } - } - } - } - - "A view" when { - val firstSubviewIndex = TransactionSubviews.indices(1).head.toString - - "a child view has the same view common data" must { - val view = factory.SingleCreate(seed = ExampleTransactionFactory.lfHash(3)).view0 - val subViews = TransactionSubviews(Seq(view))(testedProtocolVersion, factory.cryptoOps) - "reject creation" in { - TransactionView.create(hashOps)( - view.viewCommonData, - view.viewParticipantData, - subViews, - testedProtocolVersion, - ) shouldEqual Left( - s"The subview with index $firstSubviewIndex has equal viewCommonData to a parent." - ) - } - } - - "a child view has package preferences not in the parent" must { - - val unexpectedPackage = LfPackageId.assertFromString("u1") - val view = factory.SingleExercise(seed = ExampleTransactionFactory.lfHash(3)).view0 - - "reject creation if child exercise based view is different from its parent" in { - - val subview = - TransactionView.viewParticipantDataUnsafe - .modify { vpd => - val actionDescription = vpd.tryUnwrap.actionDescription.toProtoV30 - actionDescription.getExercise.withPackagePreference(Seq(unexpectedPackage)) - val exercise = actionDescription.withExercise( - actionDescription.getExercise.withPackagePreference(Seq(unexpectedPackage)) - ) - vpd.tryUnwrap.copy(actionDescription = ActionDescription.fromProtoV30(exercise).value) - }(view) - - val subViews = TransactionSubviews(Seq(subview))(testedProtocolVersion, factory.cryptoOps) - - TransactionView - .create(hashOps)( - view.viewCommonData, - view.viewParticipantData, - subViews, - testedProtocolVersion, - ) - .left - .value shouldBe s"Detected unexpected exercise package preference: $unexpectedPackage at $firstSubviewIndex" - } - - "reject creation if child fetch based view is different from its parent" in { - - val subview = - TransactionView.viewParticipantDataUnsafe - .modify { vpd => - val actionDescription = vpd.tryUnwrap.actionDescription.toProtoV30 - val ex = actionDescription.getExercise - val fetch = actionDescription.withFetch( - FetchActionDescription( - inputContractId = ex.inputContractId, - actors = ex.actors, - byKey = false, - templateId = s"$unexpectedPackage:module:template", - interfaceId = Some("ifPkg:module:template"), - ) - ) - vpd.tryUnwrap.copy(actionDescription = ActionDescription.fromProtoV30(fetch).value) - }(view) - - val subViews = TransactionSubviews(Seq(subview))(testedProtocolVersion, factory.cryptoOps) - - TransactionView - .create(hashOps)( - view.viewCommonData, - view.viewParticipantData, - subViews, - testedProtocolVersion, - ) - .left - .value shouldBe s"Detected unexpected fetch package preference: $unexpectedPackage at $firstSubviewIndex" - } - - } - } - - "A view participant data" when { - - def create( - actionDescription: ActionDescription = defaultActionDescription, - consumed: Set[LfContractId] = Set.empty, - coreInputs: Map[LfContractId, SerializableContract] = Map.empty, - createdIds: Seq[LfContractId] = Seq(createdId), - archivedInSubviews: Set[LfContractId] = Set.empty, - resolvedKeys: Map[LfGlobalKey, LfVersioned[SerializableKeyResolution]] = Map.empty, - ): Either[String, ViewParticipantData] = { - - val created = createdIds.map { id => - val serializable = ExampleTransactionFactory.asSerializable( - id, - contractInstance = ExampleTransactionFactory.contractInstance(), - metadata = ContractMetadata.empty, - salt = TestSalt.generateSalt(1), - ) - CreatedContract.tryCreate(serializable, consumed.contains(id), rolledBack = false) - } - val coreInputs2 = coreInputs.transform { (id, contract) => - InputContract(contract, consumed.contains(id)) - } - - ViewParticipantData - .create(hashOps)( - coreInputs2, - created, - archivedInSubviews, - resolvedKeys, - actionDescription, - RollbackContext.empty, - salt, - testedProtocolVersion, - ) - .flatMap { data => - // Return error message if root action is not valid - Either - .catchOnly[InvalidViewParticipantData](data.rootAction) - .bimap(ex => ex.message, _ => data) - } - } - - "a contract is created twice" must { - "reject creation" in { - create(createdIds = Seq(createdId, createdId)).left.value should - startWith regex "createdCore contains the contract id .* multiple times at indices 0, 1" - } - } - "a used contract has an inconsistent id" must { - "reject creation" in { - val usedContract = - ExampleTransactionFactory.asSerializable( - otherAbsoluteId, - metadata = ContractMetadata.empty, - ) - - create(coreInputs = Map(absoluteId -> usedContract)).left.value should startWith( - "Inconsistent ids for used contract: " - ) - } - } - "an overlap between archivedInSubview and coreCreated" must { - "reject creation" in { - create( - createdIds = Seq(createdId), - archivedInSubviews = Set(createdId), - ).left.value should startWith( - "Contract created in a subview are also created in the core: " - ) - } - } - "an overlap between archivedInSubview and coreInputs" must { - "reject creation" in { - val usedContract = - ExampleTransactionFactory.asSerializable(absoluteId, metadata = ContractMetadata.empty) - - create( - coreInputs = Map(absoluteId -> usedContract), - archivedInSubviews = Set(absoluteId), - ).left.value should startWith("Contracts created in a subview overlap with core inputs: ") - } - } - "the created contract of the root action is not declared first" must { - "reject creation" in { - create(createdIds = Seq.empty).left.value should startWith( - "No created core contracts declared for a view that creates contract" - ) - } - "reject creation with other contract ids" in { - val otherCantonId = - cantonContractIdVersion.fromDiscriminator( - ExampleTransactionFactory.lfHash(3), - ExampleTransactionFactory.unicum(1), - ) - create(createdIds = Seq(otherCantonId, createdId)).left.value should startWith( - show"View with root action Create $createdId declares $otherCantonId as first created core contract." - ) - } - } - "the used contract of the root action is not declared" must { - - "reject creation with exercise action" in { - create( - actionDescription = ActionDescription.tryFromLfActionNode( - ExampleTransactionFactory.exerciseNodeWithoutChildren(absoluteId), - Some(nodeSeed), - defaultPackagePreference, - testedProtocolVersion, - ) - ).left.value should startWith( - show"Input contract $absoluteId of the Exercise root action is not declared as core input." - ) - } - - "reject creation with fetch action" in { - - create( - actionDescription = ActionDescription.tryFromLfActionNode( - ExampleTransactionFactory.fetchNode( - absoluteId, - Set(ExampleTransactionFactory.submitter), - ), - None, - defaultPackagePreference, - testedProtocolVersion, - ) - ).left.value should startWith( - show"Input contract $absoluteId of the Fetch root action is not declared as core input." - ) - } - - "reject creation with lookup action" in { - create( - actionDescription = ActionDescription.tryFromLfActionNode( - ExampleTransactionFactory.lookupByKeyNode( - globalKey, - maintainers = Set(ExampleTransactionFactory.submitter), - ), - None, - defaultPackagePreference, - testedProtocolVersion, - ) - ).left.value should startWith( - show"Key $globalKey of LookupByKey root action is not resolved." - ) - - } - } - - "deserialized" must { - "reconstruct the original view participant data" in { - val usedContract = - ExampleTransactionFactory.asSerializable( - absoluteId, - metadata = ContractMetadata.tryCreate( - Set.empty, - Set.empty, - Some(ExampleTransactionFactory.globalKeyWithMaintainers()), - ), - ) - - val vpd = create( - consumed = Set(absoluteId), - createdIds = Seq(createdId), - coreInputs = Map(absoluteId -> usedContract), - archivedInSubviews = Set(otherAbsoluteId), - resolvedKeys = Map( - ExampleTransactionFactory.defaultGlobalKey -> - LfVersioned(ExampleTransactionFactory.transactionVersion, AssignedKey(absoluteId)) - ), - ).value - - ViewParticipantData - .fromByteString(testedProtocolVersion, hashOps)( - vpd.getCryptographicEvidence - ) - .map(_.unwrap) shouldBe Right(Right(vpd)) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ViewPositionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ViewPositionTest.scala deleted file mode 100644 index 3a5bd49dfa..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/data/ViewPositionTest.scala +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.data - -import cats.Order -import cats.instances.order.* -import com.digitalasset.canton.BaseTestWordSpec -import com.digitalasset.canton.data.ViewPosition.MerkleSeqIndex.Direction -import com.digitalasset.canton.data.ViewPosition.{ - MerklePathElement, - MerkleSeqIndex, - MerkleSeqIndexFromRoot, -} - -class ViewPositionTest extends BaseTestWordSpec { - - def mkIndex(i: Int): MerkleSeqIndex = MerkleSeqIndex(List.fill(i)(MerkleSeqIndex.Direction.Left)) - - "Correctly determine descendants" in { - val p1 = ViewPosition(List(mkIndex(1))) - val p2 = ViewPosition(List(mkIndex(2), mkIndex(1))) - val p3 = ViewPosition(List(mkIndex(3), mkIndex(1))) - val p4 = ViewPosition(List(mkIndex(4), mkIndex(3), mkIndex(1))) - - ViewPosition.isDescendant(p1, p1) shouldBe true - ViewPosition.isDescendant(p2, p1) shouldBe true - ViewPosition.isDescendant(p3, p1) shouldBe true - ViewPosition.isDescendant(p4, p1) shouldBe true - - ViewPosition.isDescendant(p1, p2) shouldBe false - ViewPosition.isDescendant(p3, p2) shouldBe false - ViewPosition.isDescendant(p4, p2) shouldBe false - - ViewPosition.isDescendant(p1, p3) shouldBe false - ViewPosition.isDescendant(p2, p3) shouldBe false - ViewPosition.isDescendant(p4, p3) shouldBe true - - ViewPosition.isDescendant(p1, p4) shouldBe false - ViewPosition.isDescendant(p2, p4) shouldBe false - ViewPosition.isDescendant(p3, p4) shouldBe false - } - - "Correctly order positions" in { - import Direction.* - - implicit val orderViewPosition: Order[ViewPosition] = ViewPosition.orderViewPosition - implicit val orderMerklePathElement: Order[MerklePathElement] = - ViewPosition.MerklePathElement.orderMerklePathElement - - Left should be < (Right: Direction) - - MerkleSeqIndex(List.empty) should be < (MerkleSeqIndex(List(Left)): MerklePathElement) - MerkleSeqIndex(List(Left)) should be < (MerkleSeqIndex(List(Left, Left)): MerklePathElement) - MerkleSeqIndex(List(Left, Left)) should be < (MerkleSeqIndex( - List(Right, Left) - ): MerklePathElement) - MerkleSeqIndex(List(Right, Left)) should be < (MerkleSeqIndex( - List(Left, Right) - ): MerklePathElement) - MerkleSeqIndex(List(Left, Right)) should be < (MerkleSeqIndex( - List(Right, Right) - ): MerklePathElement) - - MerklePathElement.orderMerklePathElement.compare( - MerkleSeqIndexFromRoot(List(Left, Right)), - MerkleSeqIndex(List(Right, Left)), - ) shouldBe 0 - - ViewPosition( - List(MerkleSeqIndex(List(Right)), MerkleSeqIndex(List(Left))) - ) should be < ViewPosition(List(MerkleSeqIndex(List(Left)), MerkleSeqIndex(List(Left, Left)))) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/environment/BootstrapStageTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/environment/BootstrapStageTest.scala deleted file mode 100644 index 3f9a0bea15..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/environment/BootstrapStageTest.scala +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.environment - -import com.digitalasset.canton.BaseTestWordSpec - -class BootstrapStageTest extends BaseTestWordSpec { - - "implement me" in { - // TODO(#12941) implement me - succeed - } - - "active node" should { - "perform auto-init with multiple stages" in { - // TODO(#12941) implement me - succeed - } - "wait for a manual input stage for startup" in { - // TODO(#12941) implement me - succeed - } - "recover from a crash during initialisation" in { - // TODO(#12941) implement me - succeed - } - "restart with multiple stages without rerunning init" in { - // TODO(#12941) implement me - succeed - } - "cleanly abort initialisation if one stage fails" in { - // TODO(#12941) implement me (test that auto-close happens on all places) - succeed - } - "ensure user interaction does not race" in { - // TODO(#12941) implement me - succeed - } - } - "passive node" should { - "wait for active replica to complete each init step" in { - // TODO(#12941) implement me - succeed - } - "pick up manual init steps from active replica" in { - // TODO(#12941) implement me - succeed - } - "take over initialisation if active node became passive" in { - // TODO(#12941) implement me - succeed - } - "be graceful if it became passive during init" in { - // TODO(#12941) implement me: if the node becomes passive during init, we should - // gracefully wait for active node to finish the init - succeed - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/CantonErrorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/CantonErrorTest.scala deleted file mode 100644 index 90e607e427..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/CantonErrorTest.scala +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.error - -import com.digitalasset.base.error.utils.DecodedCantonError -import com.digitalasset.base.error.{ - Alarm, - AlarmErrorCode, - ErrorCategory, - ErrorClass, - ErrorCode, - ErrorGroup, -} -import com.digitalasset.canton.BaseTestWordSpec -import com.digitalasset.canton.error.TestGroup.NestedGroup.MyCode.MyError -import com.digitalasset.canton.error.TestGroup.NestedGroup.{MyCode, TestAlarmErrorCode} -import com.digitalasset.canton.logging.{ErrorLoggingContext, LogEntry} -import com.digitalasset.canton.tracing.TraceContext -import io.grpc.Status.Code -import org.slf4j.event.Level - -object TestGroup extends ErrorGroup()(ErrorClass.root()) { - object NestedGroup extends ErrorGroup() { - object MyCode extends ErrorCode(id = "NESTED_CODE", ErrorCategory.ContentionOnSharedResources) { - override def logLevel: Level = Level.ERROR - final case class MyError(arg: String)(implicit val loggingContext: ErrorLoggingContext) - extends CantonError.Impl(cause = "this is my error") - } - - object TestAlarmErrorCode extends AlarmErrorCode(id = "TEST_MALICIOUS_BEHAVIOR") { - val exception = new RuntimeException("TestAlarmErrorCode exception") - final case class MyAlarm() - extends Alarm(cause = "My alarm cause", throwableO = Some(exception)) {} - } - } -} - -class CantonErrorTest extends BaseTestWordSpec { - - "canton errors" should { - "log proper error messages and provides a nice string" in { - loggerFactory.assertLogs( - MyError("testArg"), - x => { - x.errorMessage should include(MyCode.id) - x.errorMessage should include("this is my error") - x.mdc should contain(("arg" -> "testArg")) - val loc = x.mdc.get("location") - loc should not be empty - loc.exists(_.startsWith("CantonErrorTest")) shouldBe true - }, - ) - } - - "ship the context as part of the status and support decoding from exceptions" in { - val err = loggerFactory.suppressErrors(MyError("testArg")) - - val status = DecodedCantonError.fromStatusRuntimeException(err.asGrpcError).value - - status.retryIn should not be empty - status.context("arg") shouldBe "testArg" - status.code.id shouldBe MyCode.id - status.code.category shouldBe MyCode.category - } - } - - "canton error codes" should { - "allow to recover the recoverability from the string" in { - implicit val klass = new ErrorClass(Nil) - val code = new ErrorCode(id = "TEST_ERROR", ErrorCategory.ContentionOnSharedResources) {} - ErrorCodeUtils.errorCategoryFromString( - code.toMsg("bla bla", None, limit = None) - ) should contain( - ErrorCategory.ContentionOnSharedResources - ) - } - - "Extract code from multi-line strings" in { - ErrorCodeUtils.errorCategoryFromString( - "DB_CONNECTION_LOST(13,0): It was not possible to establish a valid connection\nFull Error\n" - ) should contain( - ErrorCategory.BackgroundProcessDegradationWarning - ) - } - - } - - "An alarm" should { - "log with level WARN including the error category and details" in { - implicit val traceContext: TraceContext = nonEmptyTraceContext1 - val traceId = traceContext.traceId.value - val errorCodeStr = s"TEST_MALICIOUS_BEHAVIOR(5,${traceId.take(8)})" - - loggerFactory.assertLogs( - TestAlarmErrorCode.MyAlarm().report(), - entry => { - entry.warningMessage shouldBe s"$errorCodeStr: My alarm cause" - - withClue(entry.mdc) { - entry.mdc.get("location").value should startWith("CantonErrorTest.scala:") - entry.mdc - .get("err-context") - .value should fullyMatch regex "\\{location=CantonErrorTest\\.scala:.*\\}" - entry.mdc.get("error-code").value shouldBe errorCodeStr - entry.mdc.get("trace-id").value shouldBe traceId - entry.mdc.get("span-id").value shouldBe traceContext.spanId.value - entry.mdc.get("test").value shouldBe "CantonErrorTest" - } - entry.mdc should have size 6 - - entry.throwable shouldBe Some(TestAlarmErrorCode.exception) - }, - ) - } - - "not expose any details through GRPC" in { - val myAlarm = TestAlarmErrorCode.MyAlarm() - - val sre = myAlarm.asGrpcError - sre.getMessage shouldBe s"INVALID_ARGUMENT: ${LogEntry.SECURITY_SENSITIVE_MESSAGE_ON_API} with tid " - - val status = sre.getStatus - status.getDescription shouldBe s"${LogEntry.SECURITY_SENSITIVE_MESSAGE_ON_API} with tid " - status.getCode shouldBe Code.INVALID_ARGUMENT - - val deserializedCantonError = DecodedCantonError.fromStatusRuntimeException(sre).value - - deserializedCantonError.resources shouldBe empty - deserializedCantonError.code.category.redactDetails shouldBe true - deserializedCantonError.code.id shouldBe "NA" - deserializedCantonError.context shouldBe empty - deserializedCantonError.correlationId shouldBe empty - deserializedCantonError.traceId shouldBe empty - Option(status.getCause) shouldBe None - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/ErrorLoggingContextSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/ErrorLoggingContextSpec.scala deleted file mode 100644 index 957c1153e1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/ErrorLoggingContextSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.error - -import com.digitalasset.base.error.{BaseError, ErrorCategory, ErrorClass, ErrorCode} -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.error.testpackage.SeriousError -import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.tracing.TraceContext -import org.scalatest.freespec.AnyFreeSpec - -class ErrorLoggingContextSpec extends AnyFreeSpec with BaseTest { - - object FooErrorCodeSecuritySensitive - extends ErrorCode( - "FOO_ERROR_CODE_SECURITY_SENSITIVE", - ErrorCategory.SystemInternalAssumptionViolated, - )(ErrorClass.root()) - - classOf[ErrorLoggingContext].getSimpleName - { - - "log information pertaining to a security sensitive error" - { - class FooErrorBig(override val code: ErrorCode) extends BaseError { - override val cause: String = "cause123" - - override def context: Map[String, String] = - super.context ++ Map( - "contextKey1" -> "contextValue1", - "key????" -> "keyWithInvalidCharacters", - ) - - override def throwableO: Option[Throwable] = - Some(new RuntimeException("runtimeException123")) - } - - val traceContext: TraceContext = nonEmptyTraceContext1 - - val errorLoggerBig = ErrorLoggingContext.forClass( - loggerFactory, - getClass, - Map( - "propertyKey" -> "propertyValue" - ), - traceContext, - ) - val correlationId = - errorLoggerBig.correlationId.valueOrFail("correlationId was not set") - val correlationIdTruncated = correlationId.take(8) - - val testedError = new FooErrorBig(FooErrorCodeSecuritySensitive) - - loggerFactory.assertSingleErrorLogEntry( - within = testedError.logWithContext()(errorLoggerBig), - expectedMsg = s"FOO_ERROR_CODE_SECURITY_SENSITIVE(4,$correlationIdTruncated): cause123", - expectedMDC = Map( - "propertyKey" -> "propertyValue", - "err-context" -> s"{contextKey1=contextValue1, key????=keyWithInvalidCharacters, location=$this.scala:}", - ), - expectedThrowable = Some( - new java.lang.RuntimeException("runtimeException123") - ), - ) - - } - - "log the error message with the correct mdc" in { - val error = SeriousError.Error( - "the error argument", - context = Map("extra-context-key" -> "extra-context-value"), - ) - - val traceContext: TraceContext = nonEmptyTraceContext1 - - val errorLogger = ErrorLoggingContext.forClass( - loggerFactory, - getClass, - Map(), - traceContext, - ) - - val correlationId = errorLogger.correlationId.valueOrFail("correlationId was not set") - val correlationIdTruncated = correlationId.take(8) - - loggerFactory.assertLogs( - within = error.logWithContext()(errorLogger), - assertions = logEntry => { - logEntry.errorMessage shouldBe s"BLUE_SCREEN(4,$correlationIdTruncated): the error argument" - - val mdc = logEntry.mdc.map { case (k, v) => - (k, v.replaceAll("\\.scala:\\d+", ".scala:")) - } - mdc should contain( - "err-context" -> s"{extra-context-key=extra-context-value, location=$this.scala:}" - ) - }, - ) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/GeneratorsError.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/GeneratorsError.scala deleted file mode 100644 index d8df5b4ddf..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/error/GeneratorsError.scala +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.error - -import com.digitalasset.base.error.ErrorCategory -import magnolify.scalacheck.auto.* -import org.scalacheck.Arbitrary - -object GeneratorsError { - implicit val damlErrorCategoryArb: Arbitrary[ErrorCategory] = genArbitrary -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/health/ComponentStatusTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/health/ComponentStatusTest.scala deleted file mode 100644 index df676292d4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/health/ComponentStatusTest.scala +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.health - -import com.digitalasset.canton.BaseTestWordSpec -import com.digitalasset.canton.error.TestGroup -import com.digitalasset.canton.health.ComponentHealthState.UnhealthyState -import com.digitalasset.canton.health.ComponentStatus -import com.digitalasset.canton.logging.pretty.PrettyUtil - -class ComponentStatusTest extends BaseTestWordSpec with PrettyUtil { - "ComponentHealthState" should { - "pretty print Ok" in { - ComponentStatus( - "component", - ComponentHealthState.Ok(), - ).toString shouldBe "component : Ok()" - } - - "pretty print Ok w/ details" in { - ComponentStatus( - "component", - ComponentHealthState.Ok(Some("good stuff")), - ).toString shouldBe "component : Ok(good stuff)" - } - - "pretty print Failed" in { - ComponentStatus( - "component", - ComponentHealthState.failed("broken"), - ).toString shouldBe "component : Failed(broken)" - } - - "pretty print Failed w/ error" in { - loggerFactory.suppressErrors( - ComponentStatus( - "component", - ComponentHealthState.Failed( - UnhealthyState(Some("broken"), Some(TestGroup.NestedGroup.MyCode.MyError("bad"))) - ), - ).toString shouldBe s"component : Failed(broken, error = NESTED_CODE(2,0): this is my error)" - ) - } - - "pretty print Degraded" in { - ComponentStatus( - "component", - ComponentHealthState.degraded("broken"), - ).toString shouldBe "component : Degraded(broken)" - } - - "pretty print Degraded w/ error" in { - loggerFactory.suppressErrors( - ComponentStatus( - "component", - ComponentHealthState.Degraded( - UnhealthyState(Some("broken"), Some(TestGroup.NestedGroup.MyCode.MyError("bad"))) - ), - ).toString shouldBe s"component : Degraded(broken, error = NESTED_CODE(2,0): this is my error)" - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/integration/tests/benchmarks/LtHash16Benchmark.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/integration/tests/benchmarks/LtHash16Benchmark.scala deleted file mode 100644 index 5f14014cdb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/integration/tests/benchmarks/LtHash16Benchmark.scala +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.tests.benchmarks - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.LtHash16 -import org.scalatest.wordspec.AnyWordSpec - -class LtHash16Benchmark extends AnyWordSpec with BaseTest { - - import org.bouncycastle.crypto.digests.SHA256Digest - - def sha256(bytes: Array[Byte]): Array[Byte] = { - val digest = new SHA256Digest() - digest.update(bytes, 0, bytes.length) - val out = new Array[Byte](digest.getDigestSize) - digest.doFinal(out, 0) - out - } - - "LtHash16" should { - - // A simple benchmark to ensure that we can hash a batch of contract IDs at a rate of at least 0.5 per ms - "be fast" in { - def testMillis(numContractIds: Int): Long = { - val muchoCids = - for (i <- 1.to(numContractIds)) - yield sha256(s"disc$i".getBytes) ++ sha256(s"unic$i".getBytes) - val h = LtHash16() - val start = System.currentTimeMillis() - muchoCids.foreach(v => h.add(v)) - System.currentTimeMillis() - start - } - val numContracts = 1000 - val testTime = testMillis(numContracts).toInt - testTime should be < numContracts * 2 - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/api/GeneratorsApi.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/api/GeneratorsApi.scala deleted file mode 100644 index b93923a64e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/api/GeneratorsApi.scala +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api - -import com.digitalasset.canton.data.DeduplicationPeriod -import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} - -import java.time.Duration - -object GeneratorsApi { - import com.digitalasset.canton.ledger.offset.GeneratorsOffset.* - - implicit val deduplicationDurationArb: Arbitrary[DeduplicationDuration] = Arbitrary( - Gen.posNum[Long].map(Duration.ofMillis).map(DeduplicationDuration.apply) - ) - implicit val deduplicationPeriodArb: Arbitrary[DeduplicationPeriod] = genArbitrary -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/offset/GeneratorsOffset.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/offset/GeneratorsOffset.scala deleted file mode 100644 index d71272623f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/ledger/offset/GeneratorsOffset.scala +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.offset - -import com.digitalasset.canton.data.Offset -import org.scalacheck.{Arbitrary, Gen} - -object GeneratorsOffset { - - implicit val offsetArb: Arbitrary[Offset] = Arbitrary( - for { - l <- Gen.posNum[Long] - } yield Offset.tryFromLong(l) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/CanAbortDueToShutdownTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/CanAbortDueToShutdownTest.scala deleted file mode 100644 index 33fff11b44..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/CanAbortDueToShutdownTest.scala +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import cats.data.{EitherT, Nested, OptionT} -import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} -import com.digitalasset.canton.util.{Checked, CheckedT} -import com.digitalasset.canton.{BaseTest, HasExecutionContext, ScalaFuturesWithPatience, lifecycle} -import org.scalatest.wordspec.AnyWordSpec -import org.scalatest.{EitherValues, OptionValues} - -import scala.concurrent.Future - -trait CanAbortDueToShutdownTest extends AnyWordSpec with BaseTest { - - def canAbortDueToShutdown[F[_]]( - F: CanAbortDueToShutdown[F], - fixture: CanAbortDueToShutdownTest.Fixture[F], - ): Unit = { - "abort" should { - "return AbortedDueToShutdown" in { - fixture.value(F.abort) shouldBe AbortedDueToShutdown - } - } - - "absorbOuter" should { - "embed AbortedDueToShutdown" in { - fixture.value(F.absorbOuter(AbortedDueToShutdown)) shouldBe AbortedDueToShutdown - } - - "embed Outcomes" in { - val fa = fixture.pure(42) - fixture.value(F.absorbOuter(Outcome(fa))) shouldBe Outcome(42) - } - } - } -} - -object CanAbortDueToShutdownTest { - trait Fixture[F[_]] { - def pure[A](a: A): F[A] - def value[A](fa: F[A]): UnlessShutdown[A] - } - - private[lifecycle] object FixtureFuture - extends CanAbortDueToShutdownTest.Fixture[Future] - with ScalaFuturesWithPatience { - override def pure[A](a: A): Future[A] = Future.successful(a) - - override def value[A](fa: Future[A]): UnlessShutdown[A] = Outcome(fa.futureValue) - } -} - -class UnlessShutdownCanAbortDueToShutdownTest extends CanAbortDueToShutdownTest { - "UnlessShutdown" should { - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[UnlessShutdown], - UnlessShutdownCanAbortDueToShutdownTest.Fixture, - ) - } -} - -object UnlessShutdownCanAbortDueToShutdownTest { - private[lifecycle] object Fixture extends CanAbortDueToShutdownTest.Fixture[UnlessShutdown] { - override def pure[A](a: A): UnlessShutdown[A] = Outcome(a) - - override def value[A](fa: UnlessShutdown[A]): UnlessShutdown[A] = fa - } -} - -class FutureUnlessShutdownCanAbortDueToShutdownTest - extends CanAbortDueToShutdownTest - with HasExecutionContext { - - "FutureUnlessShutdown" should { - - FutureUnlessShutdown.unit.unwrap.futureValue - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[FutureUnlessShutdown], - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture, - ) - } -} - -object FutureUnlessShutdownCanAbortDueToShutdownTest { - private[lifecycle] object Fixture - extends CanAbortDueToShutdownTest.Fixture[FutureUnlessShutdown] - with ScalaFuturesWithPatience { - override def pure[A](a: A): FutureUnlessShutdown[A] = FutureUnlessShutdown.pure(a) - - override def value[A](fa: FutureUnlessShutdown[A]): UnlessShutdown[A] = fa.unwrap.futureValue - } -} - -class EitherTCanAbortDueToShutdownTest extends CanAbortDueToShutdownTest with HasExecutionContext { - "EitherT" when { - "applied to FutureUnlessShutdown" should { - val fixture = new EitherTCanAbortDueToShutdownTest.Fixture[FutureUnlessShutdown, String]( - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[EitherT[FutureUnlessShutdown, String, *]], - fixture, - ) - } - - "applied to UnlessShutdown" should { - val fixture = new EitherTCanAbortDueToShutdownTest.Fixture[UnlessShutdown, String]( - UnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[EitherT[UnlessShutdown, String, *]], - fixture, - ) - } - } -} - -object EitherTCanAbortDueToShutdownTest { - private[lifecycle] class Fixture[F[_], L](fixture: CanAbortDueToShutdownTest.Fixture[F]) - extends CanAbortDueToShutdownTest.Fixture[EitherT[F, L, *]] - with EitherValues { - override def pure[A](a: A): EitherT[F, L, A] = EitherT(fixture.pure(Right(a))) - - override def value[A](fa: EitherT[F, L, A]): UnlessShutdown[A] = - fixture.value(fa.value).map(_.value) - } -} - -class OptionTCanAbortDueToShutdownTest extends CanAbortDueToShutdownTest with HasExecutionContext { - "OptionT" when { - "applied to FutureUnlessShutdown" should { - val fixture = new OptionTCanAbortDueToShutdownTest.Fixture( - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[OptionT[FutureUnlessShutdown, *]], - fixture, - ) - } - - "applied to UnlessShutdown" should { - val fixture = new OptionTCanAbortDueToShutdownTest.Fixture( - UnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[OptionT[UnlessShutdown, *]], - fixture, - ) - } - } -} - -object OptionTCanAbortDueToShutdownTest { - private[lifecycle] class Fixture[F[_]](fixture: CanAbortDueToShutdownTest.Fixture[F]) - extends lifecycle.CanAbortDueToShutdownTest.Fixture[OptionT[F, *]] - with OptionValues { - override def pure[A](a: A): OptionT[F, A] = OptionT(fixture.pure(Some(a))) - - override def value[A](fa: OptionT[F, A]): UnlessShutdown[A] = - fixture.value(fa.value).map(_.value) - } -} - -class CheckedTCanAbortDueToShutdownTest extends CanAbortDueToShutdownTest with HasExecutionContext { - "CheckedT" when { - "applied to FutureUnlessShutdown" should { - val fixture = new CheckedTCanAbortDueToShutdownTest.Fixture[FutureUnlessShutdown, Unit, Unit]( - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[CheckedT[FutureUnlessShutdown, Unit, Unit, *]], - fixture, - ) - } - - "applied to UnlessShutdown" should { - val fixture = new CheckedTCanAbortDueToShutdownTest.Fixture[UnlessShutdown, Unit, Unit]( - UnlessShutdownCanAbortDueToShutdownTest.Fixture - ) - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[CheckedT[UnlessShutdown, Unit, Unit, *]], - fixture, - ) - } - } -} - -object CheckedTCanAbortDueToShutdownTest { - private[lifecycle] class Fixture[F[_], A, N](fixture: CanAbortDueToShutdownTest.Fixture[F]) - extends lifecycle.CanAbortDueToShutdownTest.Fixture[CheckedT[F, A, N, *]] - with OptionValues { - override def pure[X](a: X): CheckedT[F, A, N, X] = CheckedT(fixture.pure(Checked.result(a))) - - override def value[X](fa: CheckedT[F, A, N, X]): UnlessShutdown[X] = - fixture.value(fa.value).map(_.getResult.value) - } -} - -class NestedCanAbortDueToShutdownTest extends CanAbortDueToShutdownTest with HasExecutionContext { - "Nested" when { - "applied to FutureUnlessShutdown and Future" should { - val fixture = new NestedCanAbortDueToShutdownTest.Fixture( - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture, - CanAbortDueToShutdownTest.FixtureFuture, - ) - - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[Nested[FutureUnlessShutdown, Future, *]], - fixture, - ) - } - - "applied to Future and FutureUnlessShutdown" should { - val fixture = new NestedCanAbortDueToShutdownTest.Fixture( - CanAbortDueToShutdownTest.FixtureFuture, - FutureUnlessShutdownCanAbortDueToShutdownTest.Fixture, - ) - behave like canAbortDueToShutdown( - CanAbortDueToShutdown[Nested[Future, FutureUnlessShutdown, *]], - fixture, - ) - } - } -} - -object NestedCanAbortDueToShutdownTest { - private[lifecycle] class Fixture[F[_], G[_]]( - F: CanAbortDueToShutdownTest.Fixture[F], - G: CanAbortDueToShutdownTest.Fixture[G], - ) extends lifecycle.CanAbortDueToShutdownTest.Fixture[Nested[F, G, *]] { - override def pure[A](a: A): Nested[F, G, A] = Nested(F.pure(G.pure(a))) - - override def value[A](fa: Nested[F, G, A]): UnlessShutdown[A] = - F.value(fa.value).flatMap(G.value) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdownTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdownTest.scala deleted file mode 100644 index 153964511a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdownTest.scala +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import cats.data.EitherT -import cats.syntax.either.* -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.{BaseTest, DiscardedFuture, DiscardedFutureTest, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec -import org.wartremover.test.WartTestTraverser - -import scala.concurrent.Future - -class FutureUnlessShutdownTest extends AsyncWordSpec with BaseTest with HasExecutionContext { - "DiscardedFuture" should { - "detect discarded FutureUnlessShutdown" in { - val result = WartTestTraverser(DiscardedFuture) { - FutureUnlessShutdown.pure(()) - () - } - DiscardedFutureTest.assertErrors(result, 1) - } - - "detect discarded FutureunlessShutdown when wrapped" in { - val result = WartTestTraverser(DiscardedFuture) { - EitherT(FutureUnlessShutdown.pure(Either.unit)) - () - } - DiscardedFutureTest.assertErrors(result, 1) - } - } - - "failOnShutdownTo" should { - "fail to a Throwable on shutdown" in { - val fus = FutureUnlessShutdown.abortedDueToShutdown.failOnShutdownTo( - new RuntimeException("boom") - ) - - a[RuntimeException] shouldBe thrownBy { - fus.futureValue - } - } - - "not evaluate the Throwable if the result is an outcome" in { - var wasEvaluated = false - val fus = FutureUnlessShutdown.unit.failOnShutdownTo { - wasEvaluated = true - new RuntimeException("boom") - } - - fus.futureValue - wasEvaluated shouldBe false - } - } - - "transformAbortedF" should { - "restore aborted FutureUnlessShutdown" in { - val f = - FutureUnlessShutdown(Future(AbortedDueToShutdown)).failOnShutdownToAbortException("test") - FutureUnlessShutdown - .recoverFromAbortException(f) - .unwrap - .map(_ shouldBe UnlessShutdown.AbortedDueToShutdown) - } - "restore successful FutureUnlessShutdown" in { - val expected = UnlessShutdown.Outcome("expected") - val f = - FutureUnlessShutdown(Future.successful(expected)).failOnShutdownToAbortException("test") - FutureUnlessShutdown.recoverFromAbortException(f).unwrap.map(_ shouldBe expected) - } - "restore failed FutureUnlessShutdown" in { - val expected = new IllegalArgumentException("test") - val f = FutureUnlessShutdown(Future.failed(expected)).failOnShutdownToAbortException("test") - FutureUnlessShutdown.recoverFromAbortException(f).unwrap.failed.map(_ shouldBe expected) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleManagerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleManagerTest.scala deleted file mode 100644 index 2372874ee7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleManagerTest.scala +++ /dev/null @@ -1,703 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import cats.syntax.parallel.* -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.discard.Implicits.* -import com.digitalasset.canton.lifecycle.LifeCycleManager.ManagedResource -import com.digitalasset.canton.lifecycle.LifeCycleManagerTest.{ - LifeCycleManagerObservationResult, - TestManagedResource, - TestRunOnClosing, -} -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import scala.concurrent.duration.DurationInt -import scala.concurrent.{Future, Promise} -import scala.util.{Failure, Try} - -class LifeCycleManagerTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - "LifeCycleManager" when { - "at the root" should { - "manage ManagedResources" in { - val lcm = LifeCycleManager.root("simple", 1.second, loggerFactory) - - val resources = (1 to 3).map(i => new TestManagedResource(s"resource$i")) - val handles = resources.map(registerOrFail(lcm, _)) - - handles(1).cancel() - - lcm.closeAsync().futureValue - - resources(0).releaseCount shouldBe 1 - resources(1).releaseCount shouldBe 0 - resources(2).releaseCount shouldBe 1 - } - - "release resources in priority order" in { - val lcm = LifeCycleManager.root("priority", 1.second, loggerFactory) - - val releaseOrder = new AtomicReference[Seq[Int]](Seq.empty) - - val resources = (1 to 3).map(i => - new TestManagedResource( - s"resource$i", - _ => { - releaseOrder.getAndUpdate(_ :+ i) - Future.unit - }, - ) - ) - - resources.zipWithIndex.map { case (resource, index) => - registerOrFail(lcm, resource, priority = (-index).toShort) - } - - lcm.closeAsync().futureValue - - releaseOrder.get() shouldBe Seq(3, 2, 1) - } - - "reject registering resources after closing" in { - val lcm = LifeCycleManager.root("too late", 1.second, loggerFactory) - lcm.closeAsync().futureValue - lcm.registerManaged(new TestManagedResource("after-closing")) shouldBe AbortedDueToShutdown - } - - "register a resource multiple times" in { - val lcm = LifeCycleManager.root("multiple times", 1.second, loggerFactory) - val resource = new TestManagedResource("multi-registration resource") - val handle1 = registerOrFail(lcm, resource) - val handle2 = registerOrFail(lcm, resource) - val handle3 = registerOrFail(lcm, resource) - handle2.cancel() shouldBe true - handle1.isScheduled shouldBe true - handle3.isScheduled shouldBe true - lcm.closeAsync().futureValue - - resource.releaseCount shouldBe 2 - } - - "release resources despite failure" in { - val lcmName = "release failure" - val lcm = LifeCycleManager.root(lcmName, 1.second, loggerFactory) - val ex = new RuntimeException("Release failed") - val resourceName = "failed resource" - val resource1 = new TestManagedResource(resourceName, _ => Future.failed(ex)) - registerOrFail(lcm, resource1, priority = Short.MinValue) - val resource2 = new TestManagedResource("good resource") - registerOrFail(lcm, resource2, priority = Short.MaxValue) - - val errorF = loggerFactory.assertLogs( - lcm.closeAsync(), - _.warningMessage should include( - s"Releasing managed resource '$resourceName' from manager '$lcmName' failed" - ), - ) - val error = errorF.failed.futureValue - error shouldBe a[ShutdownFailedException] - error.getMessage should include(s"Unable to close 'LifeCycleManager($lcmName)'") - error.getSuppressed.toSeq shouldBe Seq(ex) - resource2.releaseCount shouldBe 1 - } - - "report all release failures" in { - val lcmName = "all release failure" - val lcm = LifeCycleManager.root(lcmName, 1.second, loggerFactory) - val ex1 = new RuntimeException("Release1 failed") - val ex2 = new RuntimeException("Release2 failed") - val resourceName1 = "async failed resource" - val resource1 = new TestManagedResource(resourceName1, _ => Future.failed(ex1)) - registerOrFail(lcm, resource1) - val resourceName2 = "sync failed resource" - val resource2 = new TestManagedResource(resourceName2, _ => throw ex2) - registerOrFail(lcm, resource2) - - val errorF = loggerFactory.assertLogs( - lcm.closeAsync(), - _.warningMessage should include( - s"Releasing managed resource '$resourceName2' from manager '$lcmName' failed" - ), - _.warningMessage should include( - s"Releasing managed resource '$resourceName1' from manager '$lcmName' failed" - ), - ) - val error = errorF.failed.futureValue - error shouldBe a[ShutdownFailedException] - error.getMessage should include(s"Unable to close 'LifeCycleManager($lcmName)'") - error.getSuppressed.toSet shouldBe Set(ex1, ex2) - } - - "release resources in parallel" in { - val lcm = LifeCycleManager.root("parallel release", 1.second, loggerFactory) - - val count = 3 - val promises = (0 until count).map(_ => Promise[Unit]()) - - def mkResource(index: Int): TestManagedResource = new TestManagedResource( - s"resource$index", - _ => { - val rotated: Seq[Promise[Unit]] = (promises ++ promises).slice(index, index + count) - rotated(0).success(()) - rotated.drop(1).parTraverse_(_.future).futureValue - Future.unit - }, - ) - - val resources = (0 until count).map(mkResource) - resources.foreach(lcm.registerManaged(_).failOnShutdown) - - lcm.closeAsync().futureValue - } - - "closing is idempotent" in { - val lcm = LifeCycleManager.root("idempotent", 1.second, loggerFactory) - - val promise = Promise[Unit]() - val compF = lcm.synchronizeWithClosingUS("promise")(promise.future).failOnShutdown - - val closeF1 = Future.unit.flatMap(_ => lcm.closeAsync()) - val closeF2 = lcm.closeAsync() - - closeF1.isCompleted shouldBe false - closeF2.isCompleted shouldBe false - - promise.success(()) - - compF.futureValue - closeF1.futureValue - closeF2.futureValue - } - - "synchronize with computation before releasing" in { - val lcm = LifeCycleManager.root("synchronizeWithClosing", 1.second, loggerFactory) - val closeOrder = new AtomicReference[Seq[String]](Seq.empty) - val resource = new TestManagedResource( - "res", - _ => { - closeOrder.getAndUpdate(_ :+ "resource") - Future.unit - }, - ) - registerOrFail(lcm, resource) - - val closeF = lcm - .synchronizeWithClosingSync("initiate closing from synchronization block") { - val closeF = Future.unit.flatMap(_ => lcm.closeAsync()) - Threading.sleep(100) - closeOrder.getAndUpdate(_ :+ "computation") - closeF - } - .failOnShutdown - - closeF.futureValue - closeOrder.get shouldBe Seq("computation", "resource") - } - - "synchronize with asynchronous computation before releasing" in { - val lcm = LifeCycleManager.root("synchronizeWithClosingF", 1.second, loggerFactory) - val closeOrder = new AtomicReference[Seq[String]](Seq.empty) - val resource = new TestManagedResource( - "res", - _ => { - closeOrder.getAndUpdate(_ :+ "resource") - Future.unit - }, - ) - registerOrFail(lcm, resource) - - val promise = Promise[Unit]() - val compF = lcm - .synchronizeWithClosingUS("initiate closing from asynchronous synchronization block") { - promise.future.map { _ => - closeOrder.getAndUpdate(_ :+ "computation") - () - } - } - .failOnShutdown - - val closeF = lcm.closeAsync() - Threading.sleep(100) - promise.success(()) - closeF.futureValue - compF.futureValue - closeOrder.get shouldBe Seq("computation", "resource") - } - - "short-circuits synchronize after closing" in { - val lcm = LifeCycleManager.root("synchronize too late", 1.second, loggerFactory) - lcm.closeAsync().futureValue - lcm.synchronizeWithClosingSync("short-circuit")(()) shouldBe AbortedDueToShutdown - } - - "short-circuits synchronize if closing is in progress" in { - val lcm = LifeCycleManager.root("synchronize concurrent", 1.second, loggerFactory) - val promise = Promise[Unit]() - val delayedCLosing = - lcm.synchronizeWithClosingUS("short-circuit")(promise.future).failOnShutdown - val closeF = lcm.closeAsync() - lcm.synchronizeWithClosingSync("short-circuit")(()) shouldBe AbortedDueToShutdown - promise.success(()) - closeF.futureValue - delayedCLosing.futureValue - } - - "synchronize with failing computations" in { - val lcm = LifeCycleManager.root("synchronizeWithClosing failure", 1.second, loggerFactory) - - val closeRef = new AtomicReference[Future[Unit]]() - val ex = new RuntimeException("failing computation") - - val compT = - Try( - lcm.synchronizeWithClosingSync("initiate closing from failing synchronization block") { - val closeF = lcm.closeAsync() - closeRef.set(closeF) - throw ex - } - ) - - compT shouldBe Failure(ex) - closeRef.get.futureValue - } - - "synchronize with asynchronous failing computations" in { - val lcm = LifeCycleManager.root("synchronizeWithClosingF failure", 1.second, loggerFactory) - - val closeOrder = new AtomicReference[Seq[String]](Seq.empty) - val resource = new TestManagedResource( - "res", - _ => { - closeOrder.getAndUpdate(_ :+ "resource") - Future.unit - }, - ) - registerOrFail(lcm, resource) - - val ex = new RuntimeException("failing computation") - val promise = Promise[Unit]() - val compF1 = lcm - .synchronizeWithClosingUS( - "initiate closing from failing asynchronous synchronization block" - ) { - promise.future.map { _ => - closeOrder.getAndUpdate(_ :+ "computation") - throw ex - } - } - .failOnShutdown - - val closeF = lcm.closeAsync() - Threading.sleep(100) - promise.success(()) - closeF.futureValue - compF1.failed.futureValue shouldBe ex - closeOrder.get shouldBe Seq("computation", "resource") - } - - "synchronize with failing asynchronous computation" in { - val lcm = LifeCycleManager.root( - "synchronizeWithClosingF failure synchronous", - 1.second, - loggerFactory, - ) - - val closeRef = new AtomicReference[Future[Unit]]() - val ex = new RuntimeException("failing computation") - - val compT = Try( - lcm.synchronizeWithClosingUS( - "initiate closing from failing asynchronous synchronization block" - ) { - val closeF = lcm.closeAsync() - closeRef.set(closeF) - (throw ex): Future[Unit] - } - ) - - compT shouldBe Failure(ex) - closeRef.get.futureValue - } - - "time-box synchronization" in { - val timeout = 100.milliseconds - val lcm = - LifeCycleManager.root("synchronizeWithClosingF non-termination", timeout, loggerFactory) - - Seq(1 to 10).foreach { i => - lcm.synchronizeWithClosingUS(s"computation $i")(Future.never).failOnShutdown.discard - } - - val closeF = loggerFactory.assertLogs( - lcm.closeAsync(), - _.warningMessage should include( - s"Timeout $timeout expired, but tasks still running. Shutting down forcibly." - ), - ) - - closeF.failed.futureValue shouldBe a[ShutdownFailedException] - } - - "allow late completion of synchronization" in { - val timeout = 100.milliseconds - val lcm = - LifeCycleManager.root("synchronizeWithClosingF slow", timeout, loggerFactory) - - val promise = Promise[Unit]() - val compF = lcm.synchronizeWithClosingUS("slow computation")(promise.future).failOnShutdown - - val resource = new TestManagedResource( - "res", - _ => { - logger.debug("resource release called: slow computation finishes now") - promise.success(()) - compF - }, - ) - lcm.registerManaged(resource).failOnShutdown - - val closeF = loggerFactory.assertLogs( - lcm.closeAsync(), - _.warningMessage should include( - s"Timeout $timeout expired, but readers are still active. Shutting down forcibly." - ), - ) - - closeF.futureValue - } - - "perform runOnClose before synchronization" in { - val lcm = LifeCycleManager.root("runOnClose before synchronize", 1.second, loggerFactory) - - val promise = Promise[Unit]() - - val compF = lcm - .synchronizeWithClosingUS("computation completes via runOnClose")(promise.future) - .failOnShutdown - - val runOnClose = - new TestRunOnClosing("runOnClose before synchronize", () => promise.success(())) - lcm.runOnClose(runOnClose).failOnShutdown - - val closeF = lcm.closeAsync() - closeF.futureValue - compF.futureValue - runOnClose.runCount shouldBe 1 - } - - "log and swallow runOnClose errors" in { - val lcm = LifeCycleManager.root("runOnClose before synchronize", 1.second, loggerFactory) - - val ex = new RuntimeException("runOnClose failure") - val name = "runOnClose error" - lcm.runOnClose(new TestRunOnClosing(name, () => throw ex)).failOnShutdown - - val closeF = loggerFactory.assertLogs( - lcm.closeAsync(), - entry => { - entry.warningMessage should include(s"Task '$name' failed on closing!") - entry.throwable should contain(ex) - }, - ) - - closeF.futureValue - } - - "reject runOnClosing during closing" in { - val lcm = LifeCycleManager.root("runOnClose too late", 1.second, loggerFactory) - lcm.closeAsync().futureValue - lcm.runOnClose(new TestRunOnClosing("tooLate")) shouldBe AbortedDueToShutdown - } - } - - "in a hierarchy" should { - "be closed by the parent" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child1 = LifeCycleManager.dependent("child1", root, 1.second, loggerFactory) - val child2 = LifeCycleManager.dependent("child2", root, 1.second, loggerFactory) - - val resource1 = new TestManagedResource("resource1") - val resource2 = new TestManagedResource("resource2") - - child1.registerManaged(resource1).failOnShutdown - child2.registerManaged(resource2).failOnShutdown - - root.closeAsync().futureValue - resource1.releaseCount shouldBe 1 - resource2.releaseCount shouldBe 1 - } - - "respect priority order of children" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child1 = LifeCycleManager.dependent( - "child1", - root, - 1.second, - loggerFactory, - parentPriority = Short.MinValue, - ) - val child2 = LifeCycleManager.dependent( - "child2", - root, - 1.second, - loggerFactory, - parentPriority = Short.MaxValue, - ) - - val closeOrder = new AtomicReference[Seq[Int]](Seq.empty) - - val resources = (0 until 3).map { i => - new TestManagedResource( - s"resource$i", - _ => { - closeOrder.getAndUpdate(_ :+ i) - Future.unit - }, - ) - } - root.registerManaged(resources(0)).failOnShutdown - child2.registerManaged(resources(2)).failOnShutdown - child1.registerManaged(resources(1)).failOnShutdown - - root.closeAsync().futureValue - - closeOrder.get() shouldBe Seq(1, 0, 2) - } - - "propagate the close signal immediately" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child = LifeCycleManager.dependent("child", root, 1.second, loggerFactory) - val grandchild = LifeCycleManager.dependent("grandchild", child, 1.second, loggerFactory) - - val closingStateObservations = - new AtomicReference[Seq[LifeCycleManagerObservationResult[Boolean]]](Seq.empty) - - val runRoot = new TestRunOnClosing( - "check child close status", - () => { - val childClosing = child.isClosing - val grandchildClosing = grandchild.isClosing - closingStateObservations.getAndUpdate( - _ :+ LifeCycleManagerObservationResult( - root, - child, - childClosing, - ) :+ LifeCycleManagerObservationResult( - root, - grandchild, - grandchildClosing, - ) - ) - }, - ) - root.runOnClose(runRoot).failOnShutdown - - val runChild = new TestRunOnClosing( - "check grandchild close status", - () => { - val grandChildClosing = grandchild.isClosing - closingStateObservations.getAndUpdate( - _ :+ LifeCycleManagerObservationResult(child, grandchild, grandChildClosing) - ) - }, - ) - child.runOnClose(runChild).failOnShutdown - - root.closeAsync().futureValue - - closingStateObservations.get().toSet shouldBe Set( - LifeCycleManagerObservationResult(root, child, true), - LifeCycleManagerObservationResult(root, grandchild, true), - LifeCycleManagerObservationResult(child, grandchild, true), - ) - } - - "have the parent wait for children's closing" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - - val promise = Promise[Unit]() - val compF = - child.synchronizeWithClosingUS("child computation")(promise.future).failOnShutdown - - val closeF = root.closeAsync() - always(durationOfSuccess = 500.milliseconds) { - closeF.isCompleted shouldBe false - } - promise.success(()) - closeF.futureValue - compF.futureValue - } - - "close the child independently from the parent" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - - val promise = Promise[Unit]() - val compF = - child.synchronizeWithClosingUS("child computation")(promise.future).failOnShutdown - - val closeChildF = child.closeAsync() - Threading.sleep(10) - val closeRootF = root.closeAsync() - // Make sure that the parent's close method does not complete before the child's closing has finished - always(durationOfSuccess = 500.milliseconds) { - closeRootF.isCompleted shouldBe false - } - promise.success(()) - closeChildF.futureValue - closeRootF.futureValue - compF.futureValue - } - - "synchronize with closing happens independently across managers" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - - val promise = Promise[Unit]() - val compF = - child.synchronizeWithClosingUS("child computation")(promise.future).failOnShutdown - - val resource = new TestManagedResource( - "unblock synchronize with closing of child", - _ => { - promise.success(()) - Future.unit - }, - ) - root.registerManaged(resource) - - val closeF = root.closeAsync() - compF.futureValue - closeF.futureValue - } - - "runOnClose happens everywhere before synchronization" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - - val promise = Promise[Unit]() - val rootF = - root.synchronizeWithClosingUS("root computation")(promise.future).failOnShutdown - val childF = - root.synchronizeWithClosingUS("child computation")(promise.future).failOnShutdown - - val rootHandle = root.runOnClose(new TestRunOnClosing("at root")).failOnShutdown - val childHandle = child.runOnClose(new TestRunOnClosing("at child")).failOnShutdown - - val closeF = root.closeAsync() - eventually() { - rootHandle.isScheduled shouldBe false - childHandle.isScheduled shouldBe false - } - promise.success(()) - - rootF.futureValue - childF.futureValue - closeF.futureValue - } - - "close immediately if parent is closed" in { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - root.closeAsync().futureValue - - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - child.isClosing shouldBe true - child.closeAsync().isCompleted shouldBe true - } - - "close immediately if parent is closing concurrently" in { - - // While the child registers with the parent manager, the parent manager will try to clean up obsolete - // runOnClose tasks. This test exploits this behavior to close the parent in the middle - // of the registration process. - - def triggerCloseAfter(threshold: Int): Unit = - withClue(s"attempting close after $threshold") { - val root = LifeCycleManager.root("root", 1.second, loggerFactory) - - val doneInvocationCount = new AtomicInteger() - - val registrationMonitor = new RunOnClosing { - override def name: String = "monitor" - - override def done: Boolean = { - val invocationCount = doneInvocationCount.incrementAndGet() - if (invocationCount >= threshold) { root.closeAsync().discard } - false - } - - override def run()(implicit traceContext: TraceContext): Unit = () - } - root.runOnClose(registrationMonitor).failOnShutdown - - val child = LifeCycleManager.dependent("child", root, 2.second, loggerFactory) - child.isClosing shouldBe true - root.closeAsync().futureValue - } - - // The child manager will register three tasks. So there is a separate - // test case that closes after each of them. - triggerCloseAfter(1) - triggerCloseAfter(2) - triggerCloseAfter(3) - } - } - } - - private def registerOrFail( - lcm: LifeCycleManager, - resource: TestManagedResource, - priority: Short = 0, - ): LifeCycleRegistrationHandle = - lcm - .registerManaged(resource, priority) - .failOnShutdown(s"Registering resource ${resource.name} failed due to shutdown") - -} - -object LifeCycleManagerTest { - private[lifecycle] class TestManagedResource( - override val name: String, - onRelease: TraceContext => Future[Unit] = _ => Future.unit, - ) extends ManagedResource { - private val releaseCounter: AtomicInteger = new AtomicInteger() - - def releaseCount: Int = releaseCounter.get - - override protected def releaseByManager()(implicit traceContext: TraceContext): Future[Unit] = { - releaseCounter.incrementAndGet().discard - onRelease(traceContext) - } - } - - private[lifecycle] class TestRunOnClosing( - override val name: String, - onRun: () => Unit = () => (), - ) extends RunOnClosing { - private val runCounter: AtomicInteger = new AtomicInteger() - - def runCount: Int = runCounter.get - - override def done: Boolean = false - override def run()(implicit traceContext: TraceContext): Unit = { - runCounter.incrementAndGet().discard - onRun() - } - } - - private final case class LifeCycleManagerObservationResult[+A]( - executingManager: LifeCycleManager, - checkedManager: LifeCycleManager, - result: A, - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeImplTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeImplTest.scala deleted file mode 100644 index ded7c639e0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeImplTest.scala +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.lifecycle.LifeCycleManagerTest.{ - TestManagedResource, - TestRunOnClosing, -} -import com.digitalasset.canton.lifecycle.LifeCycleScopeImpl.ThereafterTryUnlessShutdownFContent -import com.digitalasset.canton.lifecycle.LifeCycleScopeImplTest.TryUnlessShutdownFFixture -import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.{FutureThereafterTest, ThereafterTest, TryThereafterTest} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} -import scala.concurrent.duration.DurationInt -import scala.concurrent.{Future, Promise} -import scala.util.{Failure, Success, Try} - -class LifeCycleScopeImplTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ThereafterTest { - - "The empty scope" should { - "never close" in { - LifeCycleScopeImpl.empty.isClosing shouldBe false - } - - "always accept any task" in { - val scope = LifeCycleScopeImpl.empty - val handle = scope.runOnClose(new TestRunOnClosing("empty scope task")).failOnShutdown - handle.cancel() shouldBe true - } - - "always run synchronize-with-closing tasks" in { - val scope = LifeCycleScopeImpl.empty - val hasRun = new AtomicBoolean() - scope.synchronizeWithClosingSync("empty synchronize")(hasRun.set(true)).failOnShutdown - hasRun.get() shouldBe true - } - - "always run asynchronous synchronize-with-closing tasks" in { - val scope = LifeCycleScopeImpl.empty - val hasRun = new AtomicBoolean() - scope - .synchronizeWithClosingUS("empty synchronize")(Future(hasRun.set(true))) - .failOnShutdown - .futureValue - hasRun.get() shouldBe true - } - } - - "A scope with a single manager" should { - - "refuse tasks and synchronization when closing is in progress" in { - val manager = LifeCycleManager.root("single manager", 1.second, loggerFactory) - val promise = Promise[Unit]() - val compF = manager.synchronizeWithClosingUS("delay closing")(promise.future).failOnShutdown - - val scope = new LifeCycleScopeImpl(Set(manager)) - val closeF = manager.closeAsync() - - scope.runOnClose(new TestRunOnClosing("task during closing")) shouldBe AbortedDueToShutdown - scope.synchronizeWithClosingSync("synchronize during closing") { - fail("This should not run") - } shouldBe AbortedDueToShutdown - scope.synchronizeWithClosingUS("async synchronize during closing") { - fail("This should not run"): Unit - Future.unit - } shouldBe AbortedDueToShutdown - - promise.success(()) - compF.futureValue - closeF.futureValue - - scope.runOnClose(new TestRunOnClosing("task after closing")) shouldBe AbortedDueToShutdown - scope.synchronizeWithClosingSync("synchronize after closing") { - fail("This should not run") - } shouldBe AbortedDueToShutdown - scope.synchronizeWithClosingUS("async synchronize after closing") { - fail("This should not run"): Unit - Future.unit - } shouldBe AbortedDueToShutdown - } - - "support cancellation of tasks that have not yet run during closing" in { - // This test relies on RunOnClosing tasks being executed sequentially. - // If we change this so that they can execute in parallel, we will need a new strategy for testing this behavior. - - val manager = LifeCycleManager.root("single manager", 1.second, loggerFactory) - val scope = new LifeCycleScopeImpl(Set(manager)) - val handle2Ref = new AtomicReference[LifeCycleRegistrationHandle]() - - val handle1Cancelled = new AtomicBoolean() - val handle2Cancelled = new AtomicBoolean() - - val task1 = new TestRunOnClosing( - "task1 cancellation during closing", - () => handle2Cancelled.set(handle2Ref.get().cancel()), - ) - val handle1 = scope.runOnClose(task1).failOnShutdown - - val task2 = new TestRunOnClosing( - "task2 cancellation during closing", - () => handle1Cancelled.set(handle1.cancel()), - ) - val handle2 = scope.runOnClose(task2).failOnShutdown - handle2Ref.set(handle2) - - manager.closeAsync().futureValue - - // As the tasks don't execute concurrently, the one that went first should have cancelled the other. - handle1Cancelled.get() should not be handle2Cancelled.get() - - task1.runCount shouldBe (if (handle1Cancelled.get()) 0 else 1) - task2.runCount shouldBe (if (handle2Cancelled.get()) 0 else 1) - } - } - - def mkTestResource(order: AtomicReference[Seq[String]], index: Int): TestManagedResource = - new TestManagedResource( - s"test resource$index", - _ => { - order.getAndUpdate(_ :+ "test resource") - Future.unit - }, - ) - - "A scope with multiple managers" should { - "close as soon as the first manager starts closing" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val promise = Promise[Unit]() - val compF = manager1.synchronizeWithClosingUS("delay closing")(promise.future).failOnShutdown - - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - scope.isClosing shouldBe false - - val closeF = manager1.closeAsync() - scope.isClosing shouldBe true - - promise.success(()) - compF.futureValue - closeF.futureValue - scope.isClosing shouldBe true - } - - "run tasks while the first manager is closing" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val promise = Promise[Unit]() - val compF = manager1.synchronizeWithClosingUS("delay closing")(promise.future).failOnShutdown - - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - val task = new TestRunOnClosing("scope task") - val handle = scope.runOnClose(task).failOnShutdown - - handle.isScheduled shouldBe true - - val closeF = manager1.closeAsync() - eventually() { - task.runCount shouldBe 1 - } - handle.isScheduled shouldBe false - handle.cancel() shouldBe false - - promise.success(()) - compF.futureValue - closeF.futureValue - } - - "synchronize-with-closing tasks synchronize with all managers" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - - val order = new AtomicReference[Seq[String]](Seq.empty) - manager1.registerManaged(mkTestResource(order, 1)).failOnShutdown - manager2.registerManaged(mkTestResource(order, 2)).failOnShutdown - - val closeRef1 = new AtomicReference[Future[Unit]]() - val closeRef2 = new AtomicReference[Future[Unit]]() - scope.synchronizeWithClosingSync("close manager from within") { - order.getAndUpdate(_ :+ "start") - closeRef1.set(manager1.closeAsync()) - logger.debug("Give the first manager a bit of time to progress on its closing") - Threading.sleep(10) - closeRef2.set(manager2.closeAsync()) - logger.debug("Give the second manager a bit of time to progress on its closing") - Threading.sleep(10) - order.getAndUpdate(_ :+ "end") - } - - closeRef1.get.futureValue - closeRef2.get.futureValue - order.get shouldBe Seq("start", "end", "test resource", "test resource") - } - - "asynchronous synchronize-with-closing tasks synchronize with all managers" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - - val order = new AtomicReference[Seq[String]](Seq.empty) - manager1.registerManaged(mkTestResource(order, 1)).failOnShutdown - manager2.registerManaged(mkTestResource(order, 2)).failOnShutdown - - val promise = Promise[Unit]() - - val compF = scope - .synchronizeWithClosingUS("async computation") { - order.getAndUpdate(_ :+ "start") - promise.future.map { _ => - order.getAndUpdate(_ :+ "end") - () - } - } - .failOnShutdown - - val closeF1 = manager1.closeAsync() - always(durationOfSuccess = 100.milliseconds) { - closeF1.isCompleted shouldBe false - } - val closeF2 = manager2.closeAsync() - always(durationOfSuccess = 100.milliseconds) { - closeF2.isCompleted shouldBe false - } - promise.success(()) - compF.futureValue - closeF1.futureValue - closeF2.futureValue - - order.get shouldBe Seq("start", "end", "test resource", "test resource") - } - - "log exceptions thrown in run-on-close tasks exactly once" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - - val ex = new RuntimeException("Task failure") - val taskName = "failing task" - val closeRef2 = new AtomicReference[Future[Unit]]() - scope - .runOnClose( - new TestRunOnClosing( - taskName, - () => { - closeRef2.set(manager2.closeAsync()) - Threading.sleep(10) - throw ex - }, - ) - ) - .failOnShutdown - - loggerFactory.assertLogs( - manager1.closeAsync().futureValue, - // Only one log entry even though probably both managers invoke the registered task. - logEntry => { - logEntry.warningMessage should include(s"Task '$taskName' failed on closing!") - logEntry.throwable should contain(ex) - }, - ) - closeRef2.get.futureValue - } - - "delay closing of all managers until RunOnClosing tasks have finished" in { - val manager1 = LifeCycleManager.root("manager1", 1.second, loggerFactory) - val manager2 = LifeCycleManager.root("manager2", 1.second, loggerFactory) - val scope = new LifeCycleScopeImpl(Set(manager1, manager2)) - - val order = new AtomicReference[Seq[String]](Seq.empty) - manager1.registerManaged(mkTestResource(order, 1)).failOnShutdown - manager2.registerManaged(mkTestResource(order, 2)).failOnShutdown - - val closeRef2 = new AtomicReference[Future[Unit]]() - - val task = new RunOnClosing { - override def name: String = "sync with multiple managers" - - private val invoked = new AtomicBoolean() - - override def done: Boolean = invoked.get() - - override def run()(implicit traceContext: TraceContext): Unit = { - logger.debug("Mark this task as done") - invoked.set(true) - order.getAndUpdate(_ :+ "start") - logger.debug("Poke the second manager to remove obsolete tasks") - manager2 - .runOnClose(new TestRunOnClosing("trigger removal of obsolete tasks")) - .failOnShutdown - // If `closeAsync` executed the RunOnClosing tasks synchronously, - // the lazy val memoization would fail due to re-entrancy and we'd execute this run method again - closeRef2.set(manager2.closeAsync()) - logger.debug("Give the second manager a bit of time to progress on its closing") - Threading.sleep(10) - order.getAndUpdate(_ :+ "end") - } - } - scope.runOnClose(task).failOnShutdown - - manager1.closeAsync().futureValue - closeRef2.get.futureValue - - order.get shouldBe Seq("start", "end", "test resource", "test resource") - } - } - - "ThereafterTryUnlessShutdownF" when { - "used with Try" should { - behave like thereafter( - LifeCycleScopeImpl.ThereafterTryUnlessShutdownF.instance[Try], - new TryUnlessShutdownFFixture(TryThereafterTest.fixture), - ) - } - "used with Future" should { - behave like thereafter( - LifeCycleScopeImpl.ThereafterTryUnlessShutdownF.instance[Future], - new TryUnlessShutdownFFixture(FutureThereafterTest.fixture), - ) - } - } -} - -object LifeCycleScopeImplTest { - private class TryUnlessShutdownFFixture[F[_], Content[_], Shape]( - val base: ThereafterTest.Fixture[F, Content, Shape] - ) extends ThereafterTest.Fixture[ - Lambda[a => Try[UnlessShutdown[F[a]]]], - ThereafterTryUnlessShutdownFContent[Content, *], - Shape, - ] { - override type X = base.X - type FF[A] = Try[UnlessShutdown[F[A]]] - override def fromTry[A](x: Try[A]): FF[A] = Success(Outcome(base.fromTry(x))) - override def fromContent[A](content: Try[UnlessShutdown[Content[A]]]): FF[A] = - content.map(_.map(base.fromContent)) - override def isCompleted[A](x: FF[A]): Boolean = x match { - case Success(Outcome(fa)) => base.isCompleted(fa) - case _ => true - } - override def await[A](x: FF[A]): ThereafterTryUnlessShutdownFContent[Content, A] = - x.map(_.map(base.await)) - - override def contents: Seq[ThereafterTryUnlessShutdownFContent[Content, X]] = - base.contents.map(c => Success(Outcome(c))) ++ - Seq(Success(AbortedDueToShutdown), Failure(new RuntimeException("test"))) - - @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) - override def theContent[A](content: ThereafterTryUnlessShutdownFContent[Content, A]): A = - base.theContent( - content.get.onShutdown(throw new NoSuchElementException(("AbortedDueToShutdown"))) - ) - - override def splitContent[A]( - content: ThereafterTryUnlessShutdownFContent[Content, A] - ): Option[(Shape, A)] = content match { - case Success(UnlessShutdown.Outcome(x)) => base.splitContent(x) - case _ => None - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeTest.scala deleted file mode 100644 index e93fe4e1ea..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleScopeTest.scala +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import com.digitalasset.canton.discard.Implicits.* -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec -import org.scalatest.{Assertion, AssertionsUtil} - -import scala.concurrent.duration.DurationInt - -class LifeCycleScopeTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with UnmanagedLifeCycle { - import LifeCycleScopeTest.* - - "LifeCycleScope" should { - "accumulate managers" in { - val managerA = LifeCycleManager.root("managerA", 1.second, loggerFactory) - val managerB = LifeCycleManager.root("managerB", 1.second, loggerFactory) - val a = new ManagedA(managerA) - val b = new ManagedB(managerB) - val unmanaged = new Unmanaged - - implicit val scope: this.ContextLifeCycleScope = freshLifeCycleScope - - a.call(_.managers) shouldBe Set(managerA) - b.call(_.managers) shouldBe Set(managerB) - - a.callB(b, _.managers) shouldBe Set(managerA, managerB) - - a.callBIndirect(b, _.managers) shouldBe Set(managerA, managerB) - - b.call { bScope => - implicit val scope: this.ContextLifeCycleScope = - bScope.coerce[ContextLifeCycleScopeDiscriminator] - a.call(_.managers) - } shouldBe Set(managerA, managerB) - - unmanaged.call(_.managers) shouldBe Set.empty - unmanaged.callB(b, _.managers) shouldBe Set(managerB) - unmanaged.callAB(a, b, _.managers) shouldBe Set(managerA, managerB) - - a.callBViaUnmanaged(unmanaged, b, _.managers) shouldBe Set(managerA, managerB) - } - - "include own manager for checks around closing" in { - val managerA = LifeCycleManager.root("managerA", 1.second, loggerFactory) - val managerB = LifeCycleManager.root("managerB", 1.second, loggerFactory) - val a = new ManagedA(managerA) - val b = new ManagedB(managerB) - managerA.closeAsync().futureValue - - implicit val scope: this.ContextLifeCycleScope = freshLifeCycleScope - - a.callIsClosing shouldBe true - b.callIsClosing shouldBe false - - a.call { aScope => - implicit val scope: this.ContextLifeCycleScope = - aScope.coerce[ContextLifeCycleScopeDiscriminator] - b.callIsClosing - } shouldBe true - } - - "forbid implicit coercions to own life cycle scope of another object inside UnmanagedLifeCycle" in { - val manager = LifeCycleManager.root("managerA", 1.second, loggerFactory) - val a = new ManagedA(manager) - implicit val scope: this.ContextLifeCycleScope = freshLifeCycleScope - - a.discard - scope.discard - - assertCompiles("implicitly[a.ContextLifeCycleScope]") - AssertionsUtil.assertOnTypeError("implicitly[a.OwnLifeCycleScope]")( - assertImplicitLifeCycleScopeError("a.OwnLifeCycleScopeDiscriminator") - ) - } - - "forbid implicit coercions to own life cycle scope of another object inside ManagedLifeCycle" in { - val managerA = LifeCycleManager.root("managerA", 1.second, loggerFactory) - val a = new ManagedA(managerA) - a.discard - - object test extends ManagedLifeCycle { - override protected def manager: LifeCycleManager = managerA - - def test()(implicit scope: ContextLifeCycleScope): Unit = { - assertCompiles("implicitly[a.ContextLifeCycleScope]") - AssertionsUtil.assertOnTypeError("implicitly[a.OwnLifeCycleScope]")( - assertImplicitLifeCycleScopeError("a.OwnLifeCycleScopeDiscriminator") - ) - - testOwn() - } - - def testOwn()(implicit scope: OwnLifeCycleScope): Unit = { - scope.discard - assertCompiles("implicitly[a.ContextLifeCycleScope]") - AssertionsUtil.assertOnTypeError("implicitly[a.OwnLifeCycleScope]")( - assertImplicitLifeCycleScopeError("a.OwnLifeCycleScopeDiscriminator") - ) - } - } - - implicit val scope: this.ContextLifeCycleScope = freshLifeCycleScope - test.test() - } - - "not allow calling a method from the outside that expects its own scope" in { - val managerB = LifeCycleManager.root("managerB", 1.second, loggerFactory) - val b = new ManagedB(managerB) - b.discard - - AssertionsUtil.assertOnTypeError("b.expectOwnScope")( - assertImplicitLifeCycleScopeError("b.OwnLifeCycleScopeDiscriminator") - ) - } - } - - private def assertImplicitLifeCycleScopeError( - expectedDiscriminator: String - )(typeError: String): Assertion = - typeError should include( - s"Could not find a suitable LifeCycleScope for discriminator $expectedDiscriminator" - ) -} - -object LifeCycleScopeTest { - - private final class ManagedA(override protected val manager: LifeCycleManager) - extends ManagedLifeCycle { - - /** Public methods should take a [[ContextLifeCycleScope]]. */ - def call[A](k: OwnLifeCycleScope => A)(implicit scope: ContextLifeCycleScope): A = - k(implicitly[OwnLifeCycleScope]) - - def callB[A](b: ManagedB, k: LifeCycleScope[?] => A)(implicit - scope: ContextLifeCycleScope - ): A = - b.call(k) - - def callBIndirect[A](b: ManagedB, k: LifeCycleScope[?] => A)(implicit - scope: ContextLifeCycleScope - ): A = - // Transforms the context life cycle scope into the own life cycle scope. - callBIndirectInternal(b, k) - - /** Private methods should take a [[OwnLifeCycleScope]]. */ - private def callBIndirectInternal[A](b: ManagedB, k: LifeCycleScope[?] => A)(implicit - scope: OwnLifeCycleScope - ): A = - // Coerces the own life cycle scope into b's context lifecycle scope - b.call(k) - - def callBViaUnmanaged[A](unmanaged: Unmanaged, b: ManagedB, k: LifeCycleScope[?] => A)(implicit - scope: ContextLifeCycleScope - ): A = - unmanaged.callB(b, k) - - def callIsClosing(implicit scope: ContextLifeCycleScope): Boolean = { - // Unnecessary unit statement to prevent our Scala formatter from removing the block braces. - // Without block braces, IntelliJ can't find the implicit for `ownScope`, as of March 2025. - () - ownScope.isClosing - } - } - - private final class ManagedB(override protected val manager: LifeCycleManager) - extends ManagedLifeCycle { - def call[A](k: OwnLifeCycleScope => A)(implicit scope: ContextLifeCycleScope): A = - k(implicitly[OwnLifeCycleScope]) - - def callIsClosing(implicit scope: ContextLifeCycleScope): Boolean = { - // Unnecessary unit statement to prevent our Scala formatter from removing the block braces. - // Without block braces, IntelliJ can't find the implicit for `ownScope`, as of March 2025 - () - ownScope.isClosing - } - - def expectOwnScope(implicit scope: OwnLifeCycleScope): Unit = - scope.discard - } - - private final class Unmanaged extends UnmanagedLifeCycle { - def call[A](k: OwnLifeCycleScope => A)(implicit scope: ContextLifeCycleScope): A = - k(implicitly[OwnLifeCycleScope]) - - def callB[A](b: ManagedB, k: LifeCycleScope[?] => A)(implicit scope: ContextLifeCycleScope): A = - b.call(k) - - def callAB[A](a: ManagedA, b: ManagedB, k: LifeCycleScope[?] => A)(implicit - scope: ContextLifeCycleScope - ): A = - a.callB(b, k) - - def expectOwnScope(implicit scope: OwnLifeCycleScope): Unit = - scope.discard - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleTest.scala deleted file mode 100644 index 0205ea0a9e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/LifeCycleTest.scala +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.lifecycle.LifeCycle.close -import com.digitalasset.canton.logging.LogEntry -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.mutable - -class LifeCycleTest extends AnyWordSpec with BaseTest { - - case class FailingAutoCloseable(t: Throwable, description: String) extends AutoCloseable { - override def close(): Unit = throw t - override def toString: String = description - } - - "close" should { - "return happily if everything closes fine" in { - close(() => ())(logger) - } - "close all items in order" in { - val closed = mutable.Buffer[Int]() - - close( - () => closed.append(1), - () => closed.append(2), - () => closed.append(3), - )(logger) - - closed shouldBe Seq(1, 2, 3) - } - "throw a shutdown exception if something throws" in { - val underlyingException = new RuntimeException("😱") - - val thrown = loggerFactory.assertLogs( - the[ShutdownFailedException] thrownBy close( - FailingAutoCloseable(underlyingException, "kebab") - )(logger), - entry => { - entry.warningMessage shouldBe "Closing 'kebab' failed! Reason:" - entry.throwable shouldBe Some(underlyingException) - }, - ) - thrown.getMessage shouldBe "Unable to close 'kebab'." - } - "when multiple items throw an exception" should { - val closeables = - (1 to 3).map(i => FailingAutoCloseable(new RuntimeException(s"error-$i"), s"component-$i")) - - "throw and log" in { - val thrown = loggerFactory.assertLogs( - the[ShutdownFailedException] thrownBy close(closeables*)(logger), - closeables.map[LogEntry => Assertion](closeable => - entry => { - entry.warningMessage shouldBe s"Closing '${closeable.description}' failed! Reason:" - entry.throwable shouldBe Some(closeable.t) - } - )* - ) - thrown.getMessage shouldBe "Unable to close Seq('component-1', 'component-2', 'component-3')." - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/OnShutdownRunnerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/OnShutdownRunnerTest.scala deleted file mode 100644 index 1ffae8d225..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/OnShutdownRunnerTest.scala +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.{NoTracing, TraceContext} -import org.scalatest.concurrent.Eventually -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.* - -import java.util.concurrent.ConcurrentHashMap -import scala.jdk.CollectionConverters.* - -object OnShutdownRunnerTest { - private class TestResource() extends AutoCloseable with OnShutdownRunner with NamedLogging { - override protected def onFirstClose(): Unit = () - override val loggerFactory = NamedLoggerFactory.root - override def close(): Unit = super.close() - } -} - -class OnShutdownRunnerTest extends AnyWordSpec with Matchers with NoTracing with Eventually { - import OnShutdownRunnerTest.* - - "OnShutdownRunner" should { - "run all shutdown tasks" in { - - var shutdownTasks: Seq[String] = Seq.empty - - val closeable = new TestResource() - closeable.runOnOrAfterClose_(new RunOnClosing { - override val name = "first" - override val done = false - - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks = shutdownTasks :+ "first" - }) - closeable.runOnOrAfterClose_(new RunOnClosing { - override val name = "second" - override val done = false - - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks = shutdownTasks :+ "second" - }) - closeable.close() - - shutdownTasks.toSet shouldBe Set("first", "second") - } - - "behave correctly if races occur during shutdown" in { - val shutdownTasks = new ConcurrentHashMap[Int, Unit]() - val closeable = new TestResource() - val total = 100 - - // Start by adding some shutdown tasks - (0 to total / 2).foreach { i => - closeable.runOnOrAfterClose_(new ConcurrentRunOnClosingHelperClass(shutdownTasks, i)) - } - - // Then add another chunk each in it's own thread, and after a few close the closeable - val threads = (total / 2 + 1 to total).map { i => - val t = new Thread(() => { - closeable.runOnOrAfterClose_(new ConcurrentRunOnClosingHelperClass(shutdownTasks, i)) - }) - t.start() - - // Halfway through close the closeable - if (i == (total * 0.75).toInt) closeable.close() - - t - } - - eventually { - // We should run all the tasks once and only once - shutdownTasks.keySet().asScala should contain theSameElementsAs (0 to total) - } - // Make sure all threads complete - threads.foreach(_.join()) - } - - "allow to cancel shutdown tasks" in { - var shutdownTasks: Seq[String] = Seq.empty - - val closeable = new TestResource() - closeable.runOnOrAfterClose_(new RunOnClosing { - override val name = "first" - override val done = false - - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks = shutdownTasks :+ "first" - }) - val token = closeable.runOnOrAfterClose(new RunOnClosing { - override val name = "second" - override val done = false - - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks = shutdownTasks :+ "second" - }) - closeable.runOnOrAfterClose_(new RunOnClosing { - override val name = "third" - override val done = false - - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks = shutdownTasks :+ "third" - }) - token.cancel() - - closeable.close() - - shutdownTasks.toSet shouldBe Set("first", "third") - } - } - - private class ConcurrentRunOnClosingHelperClass( - shutdownTasks: ConcurrentHashMap[Int, Unit], - i: Int, - ) extends RunOnClosing { - override val name = i.toString - override val done = shutdownTasks.contains(i) - override def run()(implicit traceContext: TraceContext): Unit = - shutdownTasks.put(i, ()) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdownTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdownTest.scala deleted file mode 100644 index c4b4bebdf7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdownTest.scala +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.lifecycle - -import com.digitalasset.canton.concurrent.{FutureSupervisor, Threading} -import com.digitalasset.canton.discard.Implicits.* -import com.digitalasset.canton.lifecycle.OnShutdownRunner.PureOnShutdownRunner -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.logging.{SuppressionRule, TracedLogger} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext, config} -import org.scalatest.wordspec.AsyncWordSpec -import org.slf4j.event.Level -import org.slf4j.event.Level.WARN - -import java.util.concurrent.ScheduledExecutorService -import scala.concurrent.duration.* - -class PromiseUnlessShutdownTest extends AsyncWordSpec with BaseTest with HasExecutionContext { - - "PromiseUnlessShutdown" should { - - "complete a promise with an outcome" in { - val p = PromiseUnlessShutdown.unsupervised[Int]() - p.outcome_(42) - - // Ignore second outcome - p.outcome_(23) - - p.future.futureValue shouldBe UnlessShutdown.Outcome(42) - } - - "complete a promise due to shutdown" in { - val p = PromiseUnlessShutdown.unsupervised[Int]() - p.shutdown_() - p.future.futureValue shouldBe UnlessShutdown.AbortedDueToShutdown - } - - "detect if a promise is not completed in time" in { - implicit val scheduler: ScheduledExecutorService = scheduledExecutor() - - loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(WARN))( - { - val p = PromiseUnlessShutdown.supervised[Int]( - "supervised-promise-out-of-time", - new FutureSupervisor.Impl(config.NonNegativeDuration(5.second)), - 1.second, - Level.WARN, - ) - val f = p.future - - // Wait longer than the future supervisor warn duration - Threading.sleep(3.second.toMillis) - - // Eventually complete the promise - p.outcome_(42) - f.futureValue shouldBe UnlessShutdown.Outcome(42) - }, - entries => { - assert(entries.nonEmpty) - forEvery(entries)( - _.warningMessage should include( - "supervised-promise-out-of-time has not completed after" - ) - ) - }, - ) - } - - "supervision should start only once the future is accessed" in { - implicit val scheduler: ScheduledExecutorService = scheduledExecutor() - - val promise = loggerFactory.assertLogs(SuppressionRule.LevelAndAbove(WARN))( - { - val p = PromiseUnlessShutdown.supervised[Int]( - "supervised-promise-only-on-access", - new FutureSupervisor.Impl(config.NonNegativeDuration(5.second)), - 1.second, - Level.WARN, - ) - - // Wait longer than the future supervisor warn duration - always(durationOfSuccess = 3.seconds) { - // Account for possible interference from previous test case whose log message escapes the test case - forEvery(loggerFactory.fetchRecordedLogEntries) { - _.warningMessage should include( - "supervised-promise-out-of-time has not completed after" - ) - } - } - p - } - ) - promise.outcome_(1) - promise.futureUS.futureValueUS shouldBe 1 - } - - "abort on shutdown" in { - val onShutdownRunner = new PureOnShutdownRunner(logger) - val promise = PromiseUnlessShutdown.abortOnShutdown( - "aborted-promise", - onShutdownRunner, - FutureSupervisor.Noop, - ) - onShutdownRunner.close() - promise.future.futureValue shouldBe AbortedDueToShutdown - } - - "discarded promises do not leak memory" in { - - object RecordingOnShutdownRunner extends AutoCloseable with OnShutdownRunner { - var tasks = Seq.empty[(LifeCycleRegistrationHandle, RunOnClosing)] - - // Intercept all the tasks and add them to tasks list - override def runOnOrAfterClose(task: RunOnClosing)(implicit - traceContext: TraceContext - ): LifeCycleRegistrationHandle = { - val token = super.runOnOrAfterClose(task) - tasks = tasks :+ (token -> task) - token - } - - override protected def logger: TracedLogger = PromiseUnlessShutdownTest.this.logger - override protected def onFirstClose(): Unit = () - override def close(): Unit = super.close() - } - val promise = PromiseUnlessShutdown.abortOnShutdown[Int]( - "aborted-promise", - RecordingOnShutdownRunner, - FutureSupervisor.Noop, - ) - promise.discard - - RecordingOnShutdownRunner.tasks.size shouldBe 1 - val (token, task) = RecordingOnShutdownRunner.tasks.collectFirst { - case (tok, x: PromiseUnlessShutdown.AbortPromiseOnShutdown) => tok -> x - }.value - // Simulate the promise being GCed by clearing the weak reference - task.promiseRef.clear() - - // Register a new task to get the old task cleared: - RecordingOnShutdownRunner.runOnOrAfterClose_(new RunOnClosing { - override def name: String = "dummy" - override def done: Boolean = false - override def run()(implicit traceContext: TraceContext): Unit = () - }) - - token.isScheduled shouldBe false - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/LogEntryTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/LogEntryTest.scala deleted file mode 100644 index 819537e6d9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/LogEntryTest.scala +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging - -import com.digitalasset.canton.BaseTest -import org.scalatest.exceptions.TestFailedException -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level - -class LogEntryTest extends AnyWordSpec with BaseTest { - - val ex = new RuntimeException("test exception") - - "LogEntry" should { - "pretty print nicely" in { - val mdc = Map(CanLogTraceContext.traceIdMdcKey -> "mytraceid", "otherKey" -> "otherValue") - - val entry = - LogEntry( - Level.WARN, - "com.digitalasset.canton.MyClass:MyNode", - "message line 1\nmessage line 2", - Some(ex), - mdc, - ) - - entry.toString should startWith( - """## WARN c.d.canton.MyClass:MyNode tid:mytraceid - message line 1 - |## message line 2 - |## MDC: otherKey -> otherValue - |## java.lang.RuntimeException: test exception - |## at com.digitalasset.canton.logging.LogEntryTest.""".stripMargin - ) - } - - "mention incorrect log level" in { - val entry = LogEntry(Level.WARN, "", "test") - - val failure = the[TestFailedException] thrownBy entry.errorMessage - failure.message.value shouldBe - """Incorrect log level WARN. Expected: ERROR - |## WARN - test""".stripMargin - } - - "mention incorrect log level and logger" in { - val entry = LogEntry(Level.WARN, "MyLogger", "test") - - val failure = the[TestFailedException] thrownBy entry.commandFailureMessage - failure.message.value shouldBe - """Incorrect log level WARN. Expected: ERROR - |Incorrect logger name MyLogger. Expected one of: - | com.digitalasset.canton.integration.EnvironmentDefinition, com.digitalasset.canton.integration.EnvironmentDefinition - |## WARN MyLogger - test""".stripMargin - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedEventCapturingLogger.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedEventCapturingLogger.scala deleted file mode 100644 index e4f0e58e0b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedEventCapturingLogger.scala +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging - -import com.typesafe.scalalogging.Logger -import org.scalactic.source -import org.scalatest.Assertion -import org.scalatest.matchers.should.Matchers.* -import org.slf4j.event.Level - -import java.util.concurrent.TimeUnit -import scala.collection.immutable.ListMap -import scala.jdk.CollectionConverters.* - -/** Test logger that just writes the events into a queue for inspection - */ -@SuppressWarnings(Array("org.wartremover.warts.Null")) -class NamedEventCapturingLogger( - val name: String, - val properties: ListMap[String, String] = ListMap.empty, - outputLogger: Option[Logger] = None, - skip: LogEntry => Boolean = _ => false, -) extends NamedLoggerFactory { - - val eventQueue: java.util.concurrent.BlockingQueue[LogEntry] = - new java.util.concurrent.LinkedBlockingQueue[LogEntry]() - - private def pollEventQueue(ts: Option[Long]): LogEntry = { - val event = ts match { - case None => eventQueue.poll() - case Some(millis) => eventQueue.poll(millis, TimeUnit.MILLISECONDS) - } - if (event != null) { - outputLogger.foreach( - _.debug(s"Captured ${event.loggerName} ${event.level} ${event.message}") - ) - } - event - } - - val eventSeq: Seq[LogEntry] = eventQueue.asScala.toSeq - - private val logger = new BufferingLogger(eventQueue, name, skip(_)) - - override def appendUnnamedKey(key: String, value: String): NamedLoggerFactory = this - override def append(key: String, value: String): NamedLoggerFactory = this - - override private[logging] def getLogger(fullName: String): Logger = Logger(logger) - - def tryToPollMessage( - expectedMessage: String, - expectedLevel: Level, - expectedThrowable: Throwable = null, - ): Boolean = { - val event = eventQueue.peek() - if (event != null && eventMatches(event, expectedMessage, expectedLevel, expectedThrowable)) { - pollEventQueue(None) - true - } else { - false - } - } - - def eventMatches( - event: LogEntry, - expectedMessage: String, - expectedLevel: Level, - expectedThrowable: Throwable = null, - ): Boolean = - event.message == expectedMessage && event.level == expectedLevel && event.throwable == Option( - expectedThrowable - ) - - def assertNextMessageIs( - expectedMessage: String, - expectedLevel: Level, - expectedThrowable: Throwable = null, - timeoutMillis: Long = 2000, - )(implicit pos: source.Position): Assertion = - assertNextMessage(_ shouldBe expectedMessage, expectedLevel, expectedThrowable, timeoutMillis) - - def assertNextMessage( - messageAssertion: String => Assertion, - expectedLevel: Level, - expectedThrowable: Throwable = null, - timeoutMillis: Long = 2000, - )(implicit pos: source.Position): Assertion = - assertNextEvent( - { event => - withClue("Unexpected log message: ") { - messageAssertion(event.message) - } - withClue("Unexpected log level: ") { - event.level shouldBe expectedLevel - } - withClue("Unexpected throwable: ") { - event.throwable shouldBe Option(expectedThrowable) - } - }, - timeoutMillis, - ) - - def assertNextEvent(assertion: LogEntry => Assertion, timeoutMillis: Long = 2000)(implicit - pos: source.Position - ): Assertion = - Option(pollEventQueue(Some(timeoutMillis))) match { - case None => fail("Missing log event.") - case Some(event) => assertion(event) - } - - def assertNoMoreEvents(timeoutMillis: Long = 0)(implicit pos: source.Position): Assertion = - Option(eventQueue.poll(timeoutMillis, TimeUnit.MILLISECONDS)) match { - case None => succeed - case Some(event) => fail(s"Unexpected log event: $event") - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedLoggingTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedLoggingTest.scala deleted file mode 100644 index a3014780e1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/NamedLoggingTest.scala +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.typesafe.scalalogging.Logger -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.immutable.ListMap - -class NamedLoggingTest extends AnyWordSpec with BaseTest { - "NamedLoggerFactory" can { - "append from root" should { - "not add separator " in { - NamedLoggerFactory.root.appendUnnamedKey("ignored key", "abc").name shouldBe "abc" - } - } - "append from parent with name" should { - "add a separator" in { - NamedLoggerFactory - .unnamedKey("ignored", "parent") - .appendUnnamedKey("unnamed", "abc") - .name shouldBe "parent/abc" - } - } - "name simple key value pair" should { - "add a key value separator" in { - NamedLoggerFactory("key", "value").name shouldBe "key=value" - } - } - "append structured property from parent with name" should { - "add separators" in { - NamedLoggerFactory("component", "abc") - .appendUnnamedKey("ignored descendant", "child") - .name shouldBe "component=abc/child" - } - } - "append structured property from parent with property" should { - "add second property" in { - NamedLoggerFactory("parent", "parentId").append("component", "abc").properties shouldBe Map( - "component" -> "abc", - "parent" -> "parentId", - ) - } - } - "append of duplicate key" should { - "fail in regular append" in { - assertThrows[IllegalArgumentException]( - NamedLoggerFactory("duplicate-key", "a").append("duplicate-key", "b") - ) - } - "fail in unnamed append" in { - assertThrows[IllegalArgumentException]( - NamedLoggerFactory("duplicate-key", "a").appendUnnamedKey("duplicate-key", "b") - ) - } - } - } - - "NamedLogging" can { - "create logger" should { - "use name if available" in { - val sut = InstanceWithNamedLogging("abc") - sut.loggerFactory.createLoggerFullName.value should endWith("NamedLogging$1:abc") - } - "not add name if unavailable" in { - val sut = InstanceWithNamedLogging("") - sut.loggerFactory.createLoggerFullName.value should endWith("NamedLogging$1") - } - } - - class InstanceWithNamedLogging(val loggerFactory: MockNamedLoggerFactory) extends NamedLogging { - // eagerly create a logger instance - logger.discard - } - - object InstanceWithNamedLogging { - def apply(name: String) = - new InstanceWithNamedLogging(new MockNamedLoggerFactory(name, ListMap.empty)) - } - - class MockNamedLoggerFactory(val name: String, val properties: ListMap[String, String]) - extends NamedLoggerFactory { - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var createLoggerFullName: Option[String] = None - - override def appendUnnamedKey(key: String, value: String): NamedLoggerFactory = ??? - override def append(key: String, value: String): NamedLoggerFactory = ??? - override private[logging] def getLogger(fullName: String): Logger = { - createLoggerFullName = Some(fullName) - Logger(mock[org.slf4j.Logger]) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala deleted file mode 100644 index 59e9e62765..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.util.{ErrorUtil, FutureUtil} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.typesafe.scalalogging.Logger -import org.scalatest.exceptions.TestFailedException -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j -import org.slf4j.event.Level - -import scala.collection.immutable.ListMap -import scala.concurrent.duration.* -import scala.concurrent.{Future, Promise} - -class SuppressingLoggerTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - "suppress" should { - - "works normally without suppressions" in new LoggingTester { - logger.error("Test") - - verify(underlyingLogger).error("Test") - } - - "suppress intended log messages" in new LoggingTester { - loggerFactory.assertLogs( - logger.error("Test"), - _.errorMessage shouldBe "Test", - ) - - verify(underlyingLogger, never).error(any[String]) - verify(underlyingLogger).info("Suppressed ERROR: Test") - - // check it's no longer suppressed - logger.error("TestAgain") - - verify(underlyingLogger).error("TestAgain") - verify(underlyingLogger, atMost(1)).info(any[String]) - loggerFactory.recordedLogEntries shouldBe empty - } - - "propagate exceptions" in new LoggingTester { - val ex = new RuntimeException("Test exception") - - // An exception gets propagated - the[RuntimeException] thrownBy loggerFactory.assertLogs( - { - logger.error("Test") - throw ex - }, - _ => fail("Log messages should not be checked on exception."), - ) shouldBe ex - - // Errors get suppressed anyway - verify(underlyingLogger, never).error(any[String]) - verify(underlyingLogger).info("Suppressed ERROR: Test") - - // ... and can be checked - loggerFactory.recordedLogEntries.loneElement.errorMessage shouldBe "Test" - - // check it's no longer suppressed - logger.error("TestAgain") - - verify(underlyingLogger).error("TestAgain") - verify(underlyingLogger, atMost(1)).info(any[String]) - - // Errors will be deleted before suppressing again. - loggerFactory.assertLoggedWarningsAndErrorsSeq({}, _ shouldBe empty) - } - - "point out first failure and remaining errors" in new LoggingTester() { - val ex: TestFailedException = the[TestFailedException] thrownBy loggerFactory.assertLogs( - { - logger.error("Test1") - logger.error("foo") - logger.error("Test3") - logger.error("Test4") - }, - _.errorMessage shouldBe "Test1", - _.errorMessage shouldBe "Test2", - ) - - ex.getMessage() should startWith regex - // The ( ) here are pointless groups in the regex that ensure that the trailing whitespace is not removed automatically by the setting in editorconfig - """forEvery failed, because:( ) - | at index 1, "\[foo\]" was not equal to "\[Test2\]" - |( ) - | Remaining log entries: - | ## ERROR c\.d\.c\.l\.SuppressingLoggerTest.*:TestLogger - Test3 - | ## ERROR c\.d\.c\.l\.SuppressingLoggerTest.*:TestLogger - Test4 - |""".stripMargin - } - - "fail gracefully on unexpected errors" in new LoggingTester { - val ex: TestFailedException = the[TestFailedException] thrownBy loggerFactory.assertLogs( - { - logger.error("Test1") - logger.error("Test2") - }, - _.errorMessage shouldBe "Test1", - ) - - ex.getMessage() should fullyMatch regex - """Found unexpected log messages: - | ## ERROR c\.d\.c\.l\.SuppressingLoggerTest.*:TestLogger - Test2 - |""".stripMargin - } - - "suppress intended log messages during asynchronous operation" in new LoggingTester { - val promise: Promise[Unit] = Promise[Unit]() - - val fut = loggerFactory.assertLogs(promise.future, _.errorMessage shouldBe "Test") - - logger.error("Test") - - promise.success(()) - fut.futureValue - - verify(underlyingLogger, never).error(any[String]) - verify(underlyingLogger).info("Suppressed ERROR: Test") - - // check it's no longer suppressed - logger.error("TestAgain") - - verify(underlyingLogger).error("TestAgain") - verify(underlyingLogger, atMost(1)).info(any[String]) - loggerFactory.recordedLogEntries shouldBe empty - } - - "propagate async exceptions" in new LoggingTester { - val ex: RuntimeException = new RuntimeException("Test exception") - val promise: Promise[Unit] = Promise[Unit]() - - // An exception gets propagated0 - val future: Future[Unit] = - loggerFactory.assertLogs( - promise.future, - _ => fail("Log messages should not be checked on exception."), - ) - - logger.error("Test") - promise.failure(ex) - - future.failed.futureValue shouldBe ex - - // Errors get suppressed anyway - verify(underlyingLogger, never).error(any[String]) - verify(underlyingLogger).info("Suppressed ERROR: Test") - - // ... and can be checked - loggerFactory.recordedLogEntries.loneElement.errorMessage shouldBe "Test" - - // check it's no longer suppressed - logger.error("TestAgain") - - verify(underlyingLogger).error("TestAgain") - verify(underlyingLogger, atMost(1)).info(any[String]) - - // Errors will be deleted before suppressing again. - loggerFactory.assertLoggedWarningsAndErrorsSeq({}, _ shouldBe empty) - } - - "save messages only from the last suppression block" in new LoggingTester { - loggerFactory.assertLogs( - logger.error("First"), - _.errorMessage shouldBe "First", - ) - - loggerFactory.assertLogs( - logger.error("Second"), - _.errorMessage shouldBe "Second", - ) - } - - "nicely format messages" in new LoggingTester { - loggerFactory.assertLogs( - logger.error(s"abc ${2 + 3} def ${1 + 2}"), - _.errorMessage shouldBe "abc 5 def 3", - ) - } - - "ignore order if told so" in new LoggingTester { - loggerFactory.assertLogsUnordered( - { - logger.error("First") - logger.error("Second") - logger.error("Third") - }, - _.errorMessage shouldBe "Third", - _.errorMessage shouldBe "First", - _.errorMessage shouldBe "Second", - ) - } - - "match assertions sequentially" in new LoggingTester { - loggerFactory.assertLogsUnordered( - { - logger.error("Message") - logger.error("LongMessage") - }, - _.errorMessage should include("LongMessage"), - _.errorMessage should include("Message"), - ) - } - - "match the repeated assertions repeatedly" in new LoggingTester { - loggerFactory.assertLogsUnordered( - { - logger.error("First message") - logger.error("Second message") - }, - _.errorMessage should include("message"), - _.errorMessage should include("message"), - ) - } - - "fail gracefully on unmatched unordered messages" in new LoggingTester { - val ex: TestFailedException = - the[TestFailedException] thrownBy loggerFactory.assertLogsUnordered( - logger.error("Second message"), - _.errorMessage shouldBe "First message", - _.errorMessage shouldBe "Second message", - ) - - ex.getMessage should fullyMatch regex - """No log message has matched the assertions with index 0. - | - |Matched log messages: - |1: ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - Second message - |""".stripMargin - } - - "fail gracefully on unexpected unordered messages" in new LoggingTester { - val ex: TestFailedException = - the[TestFailedException] thrownBy loggerFactory.assertLogsUnordered( - { - logger.error("First message") - logger.error("Second message") - }, - _.errorMessage shouldBe "Second message", - ) - - ex.getMessage should fullyMatch regex - """Found unexpected log messages: - | ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - First message - | - |Matched log messages: - |0: ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - Second message - |""".stripMargin - } - - "fail gracefully on unmatched and unexpected unordered messages" in new LoggingTester { - val ex: TestFailedException = - the[TestFailedException] thrownBy loggerFactory.assertLogsUnordered( - { - logger.error("Unmatched message") - logger.error("Second message") - logger.error("Third message") - }, - _.errorMessage shouldBe "First message", - _.errorMessage shouldBe "Second message", - ) - - ex.getMessage should fullyMatch regex - """Found unexpected log messages: - | ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - Unmatched message - | ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - Third message - | - |No log message has matched the assertions with index 0. - | - |Matched log messages: - |1: ## ERROR c.d.c.l.SuppressingLoggerTest.*:TestLogger - Second message - |""".stripMargin - } - - "ignore missing optional unordered errors" in new LoggingTester { - import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality.* - loggerFactory.assertLogsUnorderedOptional( - { - logger.error("First") - logger.error("Second") - logger.error("Third") - }, - Optional -> (_.errorMessage shouldBe "Fourth"), - Required -> (_.errorMessage shouldBe "Third"), - Optional -> (_.errorMessage shouldBe "First"), - Required -> (_.errorMessage shouldBe "Second"), - ) - } - - "skip errors that are to be skipped" in new LoggingTester { - override def skipLogEntry(logEntry: LogEntry): Boolean = - logEntry.level == slf4j.event.Level.ERROR && - logEntry.loggerName.startsWith(classOf[SuppressingLogger].getName) && - logEntry.message == "message" - - loggerFactory.assertLogs( - { - logger.error("message") - logger.error("another message") - logger.error("message") - logger.error("yet another message") - }, - _.errorMessage shouldBe "another message", - _.errorMessage shouldBe "yet another message", - ) - verify(underlyingLogger, times(2)).info(s"Suppressed ERROR: message") - } - - "check sequence of log entries" in new LoggingTester { - loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( - { - logger.error("Test1") - logger.error("Test2") - logger.error("Test3") - logger.error("Test4") - }, - entries => - forAtLeast(1, entries)( - _.errorMessage shouldBe ("Test2") - ), - ) - } - - "point out failed assertion against sequence of log entries" in new LoggingTester { - the[TestFailedException] thrownBy loggerFactory.assertLogsSeq( - SuppressionRule.LevelAndAbove(Level.WARN) - )( - { - logger.error("Test1") - logger.error("Test2") - logger.error("Test3") - logger.error("Test4") - }, - entries => - forEvery(entries)( - _.errorMessage shouldBe "Test2" - ), - ) - } - - "check sequence of log entries eventually" in new LoggingTester { - loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( - { - logger.error("Test1") - logger.error("Test2") - logger.error("Test3") - logger.error("Test4") - }, - entries => - forAtLeast(1, entries)( - _.errorMessage shouldBe ("Test2") - ), - ) - FutureUtil.doNotAwait( - Future { - Threading.sleep(1.seconds.toMillis) - logger.error("Test3") - logger.error("Test4") - }, - "unexpected error", - ) - - loggerFactory.assertEventuallyLogsSeq( - SuppressionRule.LevelAndAbove(Level.WARN) - )( - {}, - entries => - forAtLeast(1, entries)( - _.errorMessage shouldBe "Test4" - ), - ) - - val async = Future { - logger.error("Test1") - logger.error("Test2") - Threading.sleep(1.seconds.toMillis) - logger.error("Test3") - logger.error("Test4") - } - - loggerFactory - .assertEventuallyLogsSeq( - SuppressionRule.LevelAndAbove(Level.WARN) - )( - async, - entries => - forAtLeast(1, entries)( - _.errorMessage shouldBe "Test4" - ), - ) - .futureValue - } - - "point out failed assertion against sequence of log entries eventually" in new LoggingTester { - the[TestFailedException] thrownBy loggerFactory.assertEventuallyLogsSeq( - SuppressionRule.LevelAndAbove(Level.WARN) - )( - { - logger.error("Test1") - logger.error("Test2") - logger.error("Test3") - logger.error("Test4") - }, - entries => - forEvery(entries)( - _.errorMessage shouldBe "Test2" - ), - timeUntilSuccess = 1.seconds, - ) - - the[TestFailedException] thrownBy loggerFactory - .assertEventuallyLogsSeq( - SuppressionRule.LevelAndAbove(Level.WARN) - )( - Future { - Threading.sleep(1.seconds.toMillis) - logger.error("Test1") - logger.error("Test2") - logger.error("Test3") - logger.error("Test4") - }, - entries => - forAtLeast(1, entries)( - _.errorMessage shouldBe "Never happen" - ), - timeUntilSuccess = 2.seconds, - ) - .futureValue - } - } - - "Throwable.addSuppressed" should { - "also log the suppressed exception" in new LoggingTester { - val ex1 = new RuntimeException("ONE") - val ex2 = new RuntimeException("TWO") - ex1.addSuppressed(ex2) - - loggerFactory.assertLogs( - logger.error("Test", ex1), - entry => { - entry.errorMessage should include("Test") - entry.throwable.value.getMessage shouldBe "ONE" - entry.throwable.value.getSuppressed should contain(ex2) - ErrorUtil.messageWithStacktrace(entry.throwable.value) should include("TWO") - }, - ) - } - } - - class LoggingTester extends NamedLogging { - val underlyingNamedLogger = new TestNamedLogger - def skipLogEntry(_logEntry: LogEntry): Boolean = false - val loggerFactory: SuppressingLogger = - new SuppressingLogger(underlyingNamedLogger, pollTimeout = 10.millis, skipLogEntry) - val underlyingLogger: slf4j.Logger = underlyingNamedLogger.logger - } - - class TestNamedLogger extends NamedLoggerFactory { - val logger: slf4j.Logger = mock[slf4j.Logger] - when(logger.isErrorEnabled).thenReturn(true) - override val name: String = "TestLogger" - override val properties: ListMap[String, String] = ListMap.empty - override def appendUnnamedKey(key: String, value: String): NamedLoggerFactory = this - override def append(key: String, value: String): NamedLoggerFactory = this - override private[logging] def getLogger(fullName: String): Logger = Logger(logger) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingImplicitResolutionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingImplicitResolutionTest.scala deleted file mode 100644 index d3ecb3cd68..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingImplicitResolutionTest.scala +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging.pretty - -// lives in a separate class so that we can test a situation without imports -class PrettyPrintingImplicitResolutionTest { - def testImplicitResolution[A: Pretty](x: A): Unit = () - - def testImplicitResultion2[A <: PrettyPrinting](x: A): Unit = testImplicitResolution(x) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingTest.scala deleted file mode 100644 index d756a3e68b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingTest.scala +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging.pretty - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.ApiLoggingConfig -import com.digitalasset.canton.util.ShowUtil.* -import org.mockito.exceptions.verification.SmartNullPointerException -import org.scalatest.wordspec.AnyWordSpec - -class PrettyPrintingTest extends AnyWordSpec with BaseTest { - - private case object ExampleSingleton extends PrettyPrinting { - override protected def pretty: Pretty[ExampleSingleton.type] = - prettyOfObject[ExampleSingleton.type] - } - - private val singletonInst: ExampleSingleton.type = ExampleSingleton - private val singletonStr: String = "ExampleSingleton" - - /** Example of a class where pretty printing needs to be implemented separately. - */ - private case class ExampleAlienClass(p1: String, p2: String) - - /** Enable pretty printing for [[ExampleAlienClass]]. - */ - private implicit val prettyAlien: Pretty[ExampleAlienClass] = { - import Pretty.* - prettyOfClass( - param("p1", _.p1.doubleQuoted), - unnamedParam(_.p2.doubleQuoted), - customParam(inst => - show"allParams: {${Seq(inst.p1.singleQuoted, inst.p2.singleQuoted).mkShow()}}" - ), - paramWithoutValue("confidential"), - ) - } - - private val alienInst: ExampleAlienClass = ExampleAlienClass("p1Val", "p2Val") - private val alienStr: String = - """ExampleAlienClass(p1 = "p1Val", "p2Val", allParams: {'p1Val', 'p2Val'}, confidential = ...)""" - - /** Example of a class that extends [[PrettyPrinting]]. - */ - private case class ExampleCaseClass(alien: ExampleAlienClass, singleton: ExampleSingleton.type) - extends PrettyPrinting { - override protected def pretty: Pretty[ExampleCaseClass] = - prettyOfClass(param("alien", _.alien), param("singleton", _.singleton)) - } - - private val caseClassInst: ExampleCaseClass = ExampleCaseClass(alienInst, ExampleSingleton) - private val caseClassStr: String = - s"ExampleCaseClass(alien = $alienStr, singleton = $singletonStr)" - - /** Example of a class that uses ad hoc pretty printing. - */ - private case class ExampleAdHocCaseClass(alien: ExampleAlienClass, caseClass: ExampleCaseClass) - extends PrettyPrinting { - override protected def pretty: Pretty[ExampleAdHocCaseClass] = adHocPrettyInstance - } - - private val adHocCaseClassInst: ExampleAdHocCaseClass = - ExampleAdHocCaseClass(alienInst, caseClassInst) - private val adHocCaseClassStr: String = - s"""ExampleAdHocCaseClass( - | ExampleAlienClass("p1Val", "p2Val"), - | $caseClassStr - |)""".stripMargin - - private case object ExampleAdHocObject extends PrettyPrinting { - override protected def pretty: Pretty[this.type] = adHocPrettyInstance - } - - private val adHocObjectInst: ExampleAdHocObject.type = ExampleAdHocObject - private val adHocObjectStr: String = "ExampleAdHocObject" - - private case class ExampleAbstractCaseClass(content: Int) extends PrettyPrinting { - override protected def pretty: Pretty[ExampleAbstractCaseClass] = prettyOfClass( - param("content", _.content) - ) - } - - private val abstractCaseClass: ExampleAbstractCaseClass = ExampleAbstractCaseClass(42) - private val abstractCaseClassStr: String = "ExampleAbstractCaseClass(content = 42)" - - private case class ExampleInfix(first: Int, second: Boolean) extends PrettyPrinting { - override protected def pretty: Pretty[ExampleInfix] = prettyInfix(_.first, "~>", _.second) - } - private val exampleInfix: ExampleInfix = ExampleInfix(1, second = true) - private val exampleInfixStr: String = "1 ~> true" - - "show is pretty" in { - singletonInst.show shouldBe singletonStr - alienInst.show shouldBe alienStr - caseClassInst.show shouldBe caseClassStr - adHocCaseClassInst.show shouldBe adHocCaseClassStr - adHocObjectInst.show shouldBe adHocObjectStr - abstractCaseClass.show shouldBe abstractCaseClassStr - exampleInfix.show shouldBe exampleInfixStr - } - - "show interpolator is pretty" in { - show"Showing $singletonInst" shouldBe s"Showing $singletonStr" - show"Showing $alienInst" shouldBe s"Showing $alienStr" - show"Showing $caseClassInst" shouldBe s"Showing $caseClassStr" - show"Showing $adHocCaseClassInst" shouldBe s"Showing $adHocCaseClassStr" - show"Showing $adHocObjectInst" shouldBe s"Showing $adHocObjectStr" - show"Showing $abstractCaseClass" shouldBe s"Showing $abstractCaseClassStr" - show"Showing $exampleInfix" shouldBe s"Showing $exampleInfixStr" - } - - "toString is pretty" in { - singletonInst.toString shouldBe singletonStr - caseClassInst.toString shouldBe caseClassStr - adHocCaseClassInst.toString shouldBe adHocCaseClassStr - adHocObjectInst.toString shouldBe adHocObjectStr - abstractCaseClass.toString shouldBe abstractCaseClassStr - exampleInfix.toString shouldBe exampleInfixStr - } - - "toString is not pretty" in { - alienInst.toString shouldBe "ExampleAlienClass(p1Val,p2Val)" - } - - "fail gracefully on a mock" in { - val mockedInst = mock[ExampleCaseClass] - - (the[SmartNullPointerException] thrownBy mockedInst.toString).getMessage should - endWith("exampleCaseClass.pretty();\n") - (the[SmartNullPointerException] thrownBy mockedInst.show).getMessage should - endWith("exampleCaseClass.pretty();\n") - import Pretty.PrettyOps - (the[SmartNullPointerException] thrownBy mockedInst.toPrettyString()).getMessage should - endWith("exampleCaseClass.pretty();\n") - } - - "print null values gracefully" in { - val nullValue: ExampleCaseClass = null - show"$nullValue" shouldBe "null" - - val nestedNullValue = ExampleCaseClass(null, null) - nestedNullValue.toString shouldBe "ExampleCaseClass(alien = null, singleton = null)" - - val nullObject: ExampleSingleton.type = null - show"$nullObject" shouldBe "null" - - val nullAdhoc: ExampleAdHocCaseClass = null - show"$nullAdhoc" shouldBe "null" - - val nullAdhocObject: ExampleAdHocObject.type = null - show"$nullAdhocObject" shouldBe "null" - - val nullThrowable: Throwable = null - show"$nullThrowable" shouldBe "null" - - val nullInfix: ExampleInfix = null - show"$nullInfix" shouldBe "null" - } - - "catch exception when pretty printing invalid control-chars" ifCrashOnPrettyPrintingErrors { - final case class Invalid(str: String) extends PrettyPrinting { - override protected[pretty] def pretty: Pretty[Invalid] = prettyOfString(_.str) - } - - final case class Invalid2(str: String) - - val invalidAnsi = "\u001b[0;31m" - val errorStr = - "Unknown ansi-escape [0;31m at index 0 inside string cannot be parsed into an fansi.Str" - - val invalidAnsi2 = "\u009bNormal string" - - val invalid = Invalid(invalidAnsi) - intercept[IllegalArgumentException]( - show"$invalid" - ).getMessage should include(errorStr) - - intercept[IllegalArgumentException]( - invalid.toString - ).getMessage should ( - include(errorStr) and include( - "The offending ANSI escape characters were replaced by a star" - ) and include("*") - ) - - intercept[IllegalArgumentException]( - Invalid(invalidAnsi2).toString - ).getMessage should ( - include("Unknown ansi-escape") and include( - "The offending ANSI escape characters were replaced by a star" - ) and include("*") - ) - - val invalid2 = Invalid2(invalidAnsi) - val config = ApiLoggingConfig() - val pprinter = new CantonPrettyPrinter(config.maxStringLength, config.maxMessageLines) - pprinter.printAdHoc(invalid2) should include(errorStr) - } - - "prettyOfClass" should { - "work for primitive classes" in { - Pretty - .prettyOfClass[Long](Pretty.unnamedParam(Predef.identity)) - .treeOf(13L) - .show shouldBe "Long(13)" - } - - "work for Null" in { - val nulll = Pretty.prettyOfClass[Null]().treeOf(null) - nulll.show shouldBe "null" - } - - "work for AnyRef" in { - Pretty.prettyOfClass[AnyRef]().treeOf(new Object).show shouldBe "Object()" - } - - "work for Java interfaces" in { - Pretty - .prettyOfClass[Runnable]() - .treeOf(new Runnable() { - override def run(): Unit = ??? - }) - .show shouldBe "Object()" - } - } - - implicit class TestSelectionSupport(label: String) { - def ifCrashOnPrettyPrintingErrors(test: => org.scalatest.compatible.Assertion) = - if (Pretty.crashOnPrettyPrintingErrors) label in test - else () - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyTestInstances.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyTestInstances.scala deleted file mode 100644 index a27e050d90..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/logging/pretty/PrettyTestInstances.scala +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.logging.pretty - -import com.digitalasset.canton.protocol.{ - LfCommittedTransaction, - LfGlobalKeyWithMaintainers, - LfNode, - LfNodeCreate, - LfNodeExercises, - LfNodeFetch, - LfNodeLookupByKey, - LfNodeRollback, - LfVersionedTransaction, -} - -/** Pretty printing implicits for use by tests only. These enable showing readable multiline diffs - * when expected and actual transactions differ unexpectedly. - */ -trait PrettyTestInstances { - import Pretty.* - - implicit lazy val prettyLfCommittedTransaction: Pretty[LfCommittedTransaction] = prettyOfClass( - param("nodes", _.nodes), - param("roots", _.roots.toList), - ) - - implicit lazy val prettyLfVersionedTransaction: Pretty[LfVersionedTransaction] = prettyOfClass( - param("nodes", _.nodes), - param("roots", _.roots.toList), - param("version", _.version), - ) - - implicit lazy val prettyLfNode: Pretty[LfNode] = { - case n: LfNodeCreate => prettyLfNodeCreate.treeOf(n) - case n: LfNodeExercises => prettyLfNodeExercises.treeOf(n) - case n: LfNodeFetch => prettyLfNodeFetch.treeOf(n) - case n: LfNodeLookupByKey => prettyLfNodeLookupByKey.treeOf(n) - case n: LfNodeRollback => prettyLfNodeRollback.treeOf(n) - } - - implicit lazy val prettyLfNodeCreate: Pretty[LfNodeCreate] = prettyOfClass( - param("coid", _.coid), - param("signatories", _.signatories), - param("stakeholders", _.stakeholders), - param("templateId", _.templateId), - param("version", _.version), - paramIfDefined("key", _.keyOpt), - param("arguments", _.arg), - ) - - implicit lazy val prettyLfNodeExercises: Pretty[LfNodeExercises] = prettyOfClass( - param("targetCoid", _.targetCoid), - param("actingParties", _.actingParties), - param("signatories", _.signatories), - param("stakeholders", _.stakeholders), - paramIfNonEmpty("choiceObservers", _.choiceObservers), - paramIfNonEmpty("children", _.children.toList), - param("choiceId", _.choiceId.singleQuoted), - param("chosenValue", _.chosenValue), - paramIfTrue("consuming", _.consuming), - param("exerciseResult", _.exerciseResult.showValueOrNone), - param("templateId", _.templateId), - param("version", _.version), - paramIfTrue("byKey", _.byKey), - paramIfDefined("key", _.keyOpt), - ) - - implicit lazy val prettyLfNodeFetch: Pretty[LfNodeFetch] = prettyOfClass( - param("coid", _.coid), - param("signatories", _.signatories), - param("stakeholders", _.stakeholders), - param("actingParties", _.actingParties), - param("templateId", _.templateId), - param("version", _.version), - paramIfTrue("byKey", _.byKey), - paramIfDefined("key", _.keyOpt), - ) - - implicit lazy val prettyLfNodeLookupByKey: Pretty[LfNodeLookupByKey] = prettyOfClass( - param("result", _.result.showValueOrNone), - param("templateId", _.templateId), - param("version", _.version), - param("key", _.key), - ) - - implicit lazy val prettyLfNodeRollback: Pretty[LfNodeRollback] = prettyOfClass( - paramIfNonEmpty("children", _.children.toList) - ) - - implicit lazy val prettyLfGlobalKeyWithMaintainers: Pretty[LfGlobalKeyWithMaintainers] = - prettyOfClass( - param("key", _.value), - param("maintainers", _.maintainers), - ) - -} - -object PrettyTestInstances extends PrettyTestInstances diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala deleted file mode 100644 index 7ad00cd47f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.metrics - -import com.daml.metrics.api.HistogramInventory -import com.daml.metrics.api.opentelemetry.OpenTelemetryMetricsFactory -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.topology.Member -import org.scalatest.Assertion - -import scala.reflect.ClassTag - -/** Utility methods to assert state of metrics. - */ -trait MetricsUtils { this: BaseTest => - - protected val onDemandMetricsReader: OpenTelemetryOnDemandMetricsReader = - new OpenTelemetryOnDemandMetricsReader() - - // def instead of val because some test suites might require a new metrics factory for each test - // If that's not the case, fix the factory once in the test - protected def metricsFactory( - histogramInventory: HistogramInventory - ): OpenTelemetryMetricsFactory = testableMetricsFactory( - this.getClass.getSimpleName, - onDemandMetricsReader, - histogramInventory.registered().map(_.name.toString()).toSet, - ) - - def getMetricValues[TargetType <: MetricValue](name: String)(implicit - M: ClassTag[TargetType] - ): Seq[TargetType] = - MetricValue - .fromMetricData( - onDemandMetricsReader - .read() - .find(_.getName.endsWith(name)) - .value - ) - .flatMap { metricData => - metricData.select[TargetType] - } - - def assertInContext(name: String, key: String, value: String): Assertion = - clue(s"metric $name has value $value for key $key in context") { - getMetricValues[MetricValue.LongPoint](name).headOption - .flatMap(_.attributes.get(key)) shouldBe Some(value) - } - - def assertNotInContext(name: String, key: String): Assertion = - clue(s"metric $name has value $value for key $key in context") { - getMetricValues[MetricValue.LongPoint](name).headOption - .flatMap(_.attributes.get(key)) shouldBe empty - } - - def assertMemberIsInContext(name: String, member: Member): Assertion = - assertInContext(name, "member", member.toString) - - def assertLongValue(name: String, expected: Long): Assertion = - clue(s"metric $name has value $expected") { - getMetricValues[MetricValue.LongPoint](name).loneElement.value shouldBe expected - } - - def assertNoValue(name: String): Assertion = - clue(s"metric $name has no value") { - onDemandMetricsReader - .read() - .exists(_.getName.endsWith(name)) shouldBe false - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/UrlValidatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/UrlValidatorTest.scala deleted file mode 100644 index bc7233f864..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/UrlValidatorTest.scala +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.networking - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.networking.UrlValidator.InvalidScheme -import org.scalatest.wordspec.AnyWordSpec - -class UrlValidatorTest extends AnyWordSpec with BaseTest { - - "the url validator" should { - - "accept valid http urls" in { - forAll( - Table( - "url", - "http://example.com", - "http://example.com/url", - "http://example.com:80/url", - ) - ) { url => - UrlValidator.validate(url).value.toString shouldBe url - } - } - - "accept valid https urls" in { - forAll( - Table( - "url", - "https://example.com", - "https://example.com/url", - "https://example.com:443/url", - ) - ) { url => - UrlValidator.validate(url).value.toString shouldBe url - } - } - - "reject urls without a scheme" in { - UrlValidator.validate("example.com").left.value shouldBe InvalidScheme(null) - } - - "reject invalid urls" in { - forAll( - Table( - "url", - "http://", - "https://", - "http:/example.com", - "https:/example.com", - "https://:443", - "http://:80", - ":443", - ) - ) { url => - leftOrFail(UrlValidator.validate(url))(s"expected invalid url $url") - } - } - - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala deleted file mode 100644 index b7c3d1177d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala +++ /dev/null @@ -1,903 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.networking.grpc - -import com.digitalasset.canton.config.ApiLoggingConfig -import com.digitalasset.canton.logging.{NamedEventCapturingLogger, TracedLogger} -import com.digitalasset.canton.protobuf.HelloServiceGrpc.HelloService -import com.digitalasset.canton.protobuf.{Hello, HelloServiceGrpc} -import com.digitalasset.canton.sequencing.authentication.grpc.Constant -import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.typesafe.scalalogging.Logger -import io.grpc.* -import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder} -import io.grpc.stub.{ServerCallStreamObserver, StreamObserver} -import org.scalactic.Equality -import org.scalatest.Assertion -import org.scalatest.prop.{TableFor2, TableFor5} -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level -import org.slf4j.event.Level.* - -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} -import scala.annotation.nowarn -import scala.concurrent.{Future, Promise} -import scala.util.control.NonFatal - -@SuppressWarnings(Array("org.wartremover.warts.Null")) -@nowarn("msg=match may not be exhaustive") -final class ApiRequestLoggerTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - private val ChannelName: String = "testSender" - - private val Request: Hello.Request = Hello.Request("Hello server") - private val Response: Hello.Response = Hello.Response("Hello client") - - // Exception messages are carefully chosen such that errors logged by SerializingExecutor will be suppressed. - private val Exception: RuntimeException = new RuntimeException( - "test exception (runtime exception)" - ) - private val CheckedException: Exception = new Exception("test exception (checked exception)") - private val Error: UnknownError = new java.lang.UnknownError("test exception (error)") - - override protected def exitOnFatal = - false // As we are testing with fatal errors, switch off call to system.exit - - private val StatusDescription: String = "test status description" - - private val Trailers: Metadata = { - val m = new Metadata() - m.put(Constant.MEMBER_ID_METADATA_KEY, "testValue") - m - } - - private val InvalidArgumentStatus: Status = - Status.INVALID_ARGUMENT.withDescription(StatusDescription) - private val AbortedStatus: Status = Status.ABORTED - private val InternalStatus: Status = - Status.INTERNAL.withDescription(StatusDescription).withCause(Exception) - private val UnknownStatus: Status = Status.UNKNOWN.withCause(Error) - private val UnauthenticatedStatus: Status = - Status.UNAUTHENTICATED.withDescription(StatusDescription) - - private val failureCases: TableFor5[Status, Metadata, String, Level, String] = Table( - ("Status", "Trailers", "Expected description", "Expected log level", "Expected log message"), - ( - InvalidArgumentStatus, - null, - StatusDescription, - INFO, - s"failed with INVALID_ARGUMENT/$StatusDescription", - ), - (AbortedStatus, Trailers, null, INFO, s"failed with ABORTED\n Trailers: $Trailers"), - (InternalStatus, null, StatusDescription, ERROR, s"failed with INTERNAL/$StatusDescription"), - ( - UnknownStatus, - Trailers, - Error.getMessage, - ERROR, - s"failed with UNKNOWN/${Error.getMessage}\n Trailers: $Trailers", - ), - ( - UnauthenticatedStatus, - null, - StatusDescription, - DEBUG, - "failed with UNAUTHENTICATED/test status description", - ), - ) - - private val throwableCases: TableFor2[String, Throwable] = Table( - ("Description", "Throwable"), - ("RuntimeException", Exception), - ("Exception", CheckedException), - ("Error", Error), - ) - - private val ClientCancelsStatus: Status = Status.CANCELLED.withDescription("Context cancelled") - private val ServerCancelsStatus: Status = - Status.CANCELLED.withDescription("cancelling due to cancellation by client") - - private val grpcClientCancelledStreamed: String = "failed with CANCELLED/call already cancelled" - - private val cancelCases: TableFor5[String, Any, Level, String, Throwable] = Table( - ( - "Description", - "Action after cancellation", - "Expected log level", - "Expected log message", - "Expected exception", - ), - ("server responding anyway", Response, INFO, grpcClientCancelledStreamed, null), - ("server completing", (), INFO, null, null), - ( - "server cancelling", - ServerCancelsStatus, - INFO, - s"failed with CANCELLED/${ServerCancelsStatus.getDescription}", - null, - ), - ( - "server failing", - InternalStatus, - ERROR, - s"failed with INTERNAL/${InternalStatus.getDescription}", - InternalStatus.getCause, - ), - ) - - private implicit val eqMetadata: Equality[Metadata] = (a: Metadata, b: Any) => { - val first = Option(a).getOrElse(new Metadata()) - val secondAny = Option(b).getOrElse(new Metadata()) - - secondAny match { - case second: Metadata => - first.toString == second.toString - case _ => false - } - } - - private def assertClientFailure( - clientCompletion: Future[_], - serverStatus: Status, - serverTrailers: Metadata = new Metadata(), - clientCause: Throwable = null, - ): Assertion = - inside(clientCompletion.failed.futureValue) { case sre: StatusRuntimeException => - sre.getStatus.getCode shouldBe serverStatus.getCode - sre.getStatus.getDescription shouldBe serverStatus.getDescription - sre.getCause shouldBe clientCause - sre.getTrailers shouldEqual serverTrailers - } - - val requestTraceContext: TraceContext = - TraceContext.withNewTraceContext("request_trace")(tc => tc) - - private val progressLogger = loggerFactory.getLogger(classOf[Env]) - - val capturingLogger: NamedEventCapturingLogger = - new NamedEventCapturingLogger( - classOf[ApiRequestLoggerTest].getSimpleName, - outputLogger = Some(progressLogger), - ) - - // need to override these loggers as we want the execution context to pick up the capturing logger - override protected val noTracingLogger: Logger = - capturingLogger.getLogger(classOf[ApiRequestLoggerTest]) - override protected val logger: TracedLogger = TracedLogger(noTracingLogger) - - private def captureSpuriousMessageAfterErrorThrow( - throwable: Throwable, - createExpectedLogMessage: (String, Boolean) => String, - ): Unit = { - // since our latest gRPC upgrade (https://github.com/DACH-NY/canton/pull/15304), - // the client might log one additional "completed" message before or after the - // fatal error being logged by gRPC - val capturedComplete = new AtomicBoolean(false) - capturedComplete.set( - capturingLogger.tryToPollMessage(createExpectedLogMessage("completed", false), DEBUG) - ) - capturingLogger.assertNextMessageIs( - s"A fatal error has occurred in $executionContextName. Terminating thread.", - ERROR, - throwable, - ) - if (!capturedComplete.get()) { - // The last "completed" message can appear spuriously - // so wait a bit to ensure it's captured here and not in the subsequent assertNoMoreEvents - // - // It is fine to poll here since this test does not expect subsequent events - Option(capturingLogger.eventQueue.poll(100L, TimeUnit.MILLISECONDS)) - .foreach { - case event - if capturingLogger.eventMatches( - event, - createExpectedLogMessage("completed", false), - DEBUG, - ) => - case other => - fail(s"Unexpected event $other") - } - } - } - - class Env(logMessagePayloads: Boolean, maxStringLength: Int, maxMetadataSize: Int) { - val service: HelloService = mock[HelloService] - - val helloServiceDefinition: ServerServiceDefinition = - HelloServiceGrpc.bindService(service, parallelExecutionContext) - - val apiRequestLogger: ApiRequestLogger = - new ApiRequestLogger( - capturingLogger, - config = ApiLoggingConfig( - messagePayloads = logMessagePayloads, - maxStringLength = maxStringLength, - maxMetadataSize = maxMetadataSize, - ), - ) - - val server: Server = InProcessServerBuilder - .forName(ChannelName) - .executor(executorService) - .addService( - ServerInterceptors - .intercept(helloServiceDefinition, apiRequestLogger, TraceContextGrpc.serverInterceptor) - ) - .build() - - server.start() - - val channel: ManagedChannel = - InProcessChannelBuilder - .forName(ChannelName) - .intercept(TraceContextGrpc.clientInterceptor) - .build() - - val client: HelloServiceGrpc.HelloServiceStub = HelloServiceGrpc.stub(channel) - - // Remove events remaining from last test case. - capturingLogger.eventQueue.clear() - - def close(): Unit = { - channel.shutdown() - channel.awaitTermination(1, TimeUnit.SECONDS) - - server.shutdown() - server.awaitTermination(1, TimeUnit.SECONDS) - } - } - - private val testCounter = new AtomicInteger(0) - - def withEnv[T]( - logMessagePayloads: Boolean = true, - maxStringLength: Int = 50, - maxMetadataSize: Int = 200, - )(test: Env => T): T = { - val env = new Env(logMessagePayloads, maxStringLength, maxMetadataSize) - val cnt = testCounter.incrementAndGet() - val result = new AtomicReference[T] - try { - progressLogger.debug(s"Starting api-request-logger-test $cnt") - - result.set(TraceContextGrpc.withGrpcContext(requestTraceContext) { - test(env) - }) - - // Check this, unless test is failing. - capturingLogger.assertNoMoreEvents() - result.get - } catch { - case e: Throwable => - progressLogger.info("Test failed with exception", e) - capturingLogger.eventQueue.iterator().forEachRemaining { event => - progressLogger.info(s"Remaining log event: $event") - } - throw e - } finally { - progressLogger.debug(s"Finished api-request-logger-test $cnt with ${result.get}") - env.close() - } - } - - "On a unary call" when { - - def createExpectedLogMessage( - content: String, - includeRequestTraceContext: Boolean = false, - ): String = { - val mainMessage = s"Request c.d.c.p.HelloService/Hello by testSender: $content" - val traceContextMessage = s"\n Request ${requestTraceContext.showTraceId}" - if (includeRequestTraceContext) mainMessage + traceContextMessage else mainMessage - } - - def assertRequestLogged: Assertion = { - capturingLogger.assertNextMessage( - _ should startWith( - "Request c.d.c.p.HelloService/Hello by testSender: " + - s"received headers Metadata(" + - s"traceparent=${requestTraceContext.asW3CTraceContext.value.parent}," + - s"grpc-accept-encoding=gzip,user-agent=grpc-java-inprocess" - ), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "received a message Request(Hello server)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("finished receiving messages"), - TRACE, - ) - } - - "intercepting a successful request" must { - "log progress" in withEnv() { implicit env => - import env.* - - when(service.hello(Request)).thenReturn(Future.successful(Response)) - - client.hello(Request).futureValue shouldBe Response - - assertRequestLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("sending response headers Metadata()"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "sending response Response(Hello client)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("succeeded(OK)"), DEBUG) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - - failureCases.forEvery { - case (status, trailers, expectedDescription, expectedLogLevel, expectedMessage) => - s"intercepting a failed request (${status.getCode})" must { - "log the failure" in withEnv() { implicit env => - import env.* - - when(service.hello(Request)) - .thenReturn(Future.failed(status.asRuntimeException(trailers))) - - assertClientFailure( - client.hello(Request), - status.withDescription(expectedDescription), - trailers, - ) - - assertRequestLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedMessage), - expectedLogLevel, - status.getCause, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } - - forEvery( - throwableCases - .collect { - // Exclude non-exceptions, as they would fall to the underlying execution context and therefore need not be - // handled by GRPC. - case (description, exception: Exception) => (description, exception) - } - ) { case (description, exception) => - s"intercepting an unexpected async $description" must { - "log progress and the throwable" in withEnv() { implicit env => - import env.* - - when(service.hello(Request)).thenAnswer(Future.failed(exception)) - - assertClientFailure( - client.hello(Request), - Status.INTERNAL.withDescription(exception.getMessage), - ) - - assertRequestLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(s"failed with INTERNAL/${exception.getMessage}"), - ERROR, - exception, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } - - throwableCases.forEvery { case (description, throwable) => - s"intercepting an unexpected sync $description" must { - "log progress and the error" in withEnv() { implicit env => - import env.* - - when(service.hello(Request)).thenThrow(throwable) - - assertClientFailure( - client.hello(Request), - Status.UNKNOWN.withDescription("Application error processing RPC"), - ) - - assertRequestLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("failed with an unexpected throwable"), - ERROR, - throwable, - ) - - throwable match { - case NonFatal(_) => - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - case _: Throwable => - captureSpuriousMessageAfterErrorThrow(throwable, createExpectedLogMessage) - } - } - } - } - - forEvery( - cancelCases - .collect { - case c @ (_, _: Status, _, _, _) => c - case c @ (_, _: Hello.Response, _, err, _) - // With grpc 1.35, io.grpc.stub.ServerCalls.ServerCallStreamObserverImpl.onNext only honors - // client-cancellations of streaming calls, so don't expect the following error in unary calls: - if err != grpcClientCancelledStreamed => - c - } - ) { - case ( - description, - afterCancelAction, - expectedLogLevel, - expectedLogMessage, - expectedThrowable, - ) => - s"intercepting a cancellation and $description" must { - "log the cancellation" in withEnv() { implicit env => - import env.* - - val receivedRequestP = Promise[Unit]() - val sendResponseP = Promise[Unit]() - - when(service.hello(Request)).thenAnswer[Hello.Request] { _ => - receivedRequestP.success(()) - sendResponseP.future.map(_ => - afterCancelAction match { - case status: Status => throw status.asRuntimeException - case response: Hello.Response => response - } - ) - } - - val context = Context.current().withCancellation() - context.run { () => - val requestF = client.hello(Request) - - receivedRequestP.future.futureValue - context.cancel(Exception) - - assertClientFailure(requestF, ClientCancelsStatus, clientCause = Exception) - } - - assertRequestLogged - capturingLogger.assertNextMessageIs(createExpectedLogMessage("cancelled"), INFO) - - // Wait until the cancellation has arrived at the server. - eventually() { - apiRequestLogger.cancelled.get() shouldBe true - } - capturingLogger.assertNoMoreEvents() - - // Server still sends a response. - sendResponseP.success(()) - - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedLogMessage), - expectedLogLevel, - expectedThrowable, - ) - } - } - } - } - - def setupStreamedService( - action: StreamObserver[Hello.Response] => Unit - )(implicit env: Env): Unit = { - import env.* - when(service.helloStreamed(refEq(Request), any[StreamObserver[Hello.Response]])) - .thenAnswer[Hello.Request, StreamObserver[Hello.Response]] { - case (_, observer: ServerCallStreamObserver[Hello.Response]) => - // Setting on cancel handler, because otherwise GRPC will throw if onNext/onComplete/onError is called after cancellation. - // (In production, we do the same.) - observer.setOnCancelHandler(() => ()) - - observer.onNext(Response) - observer.onNext(Response) - action(observer) - case (_, observer) => - logger.error(s"Invalid observer type: ${observer.getClass.getSimpleName}") - } - } - - def callStreamedServiceAndCheckClientFailure( - serverStatus: Status, - serverTrailers: Metadata = new Metadata, - clientCause: Throwable = null, - checkResponses: Boolean = true, - )(implicit env: Env): Assertion = { - import env.* - - val observer = new RecordingStreamObserver[Hello.Response] - client.helloStreamed(Request, observer) - assertClientFailure(observer.result, serverStatus, serverTrailers, clientCause) - if (checkResponses) { - observer.responses should have size 2 - } - succeed - } - - "On a streamed call" when { - - def createExpectedLogMessage( - content: String, - includeRequestTraceContext: Boolean = false, - ): String = { - val mainMessage = s"Request c.d.c.p.HelloService/HelloStreamed by testSender: $content" - val traceContextMessage = s"\n Request ${requestTraceContext.showTraceId}" - if (includeRequestTraceContext) mainMessage + traceContextMessage else mainMessage - } - - def assertRequestAndResponsesLogged: Assertion = { - capturingLogger.assertNextMessage( - _ should startWith( - "Request c.d.c.p.HelloService/HelloStreamed by testSender: " + - s"received headers Metadata(" + - s"traceparent=${requestTraceContext.asW3CTraceContext.value.parent}," + - s"grpc-accept-encoding=gzip,user-agent=grpc-java-inprocess" - ), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "received a message Request(Hello server)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("finished receiving messages"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("sending response headers Metadata()"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "sending response Response(Hello client)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "sending response Response(Hello client)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - } - - "intercepting a successful request" must { - "log progress" in withEnv() { implicit env => - import env.* - - setupStreamedService(_.onCompleted()) - - val observer = new RecordingStreamObserver[Hello.Response] - client.helloStreamed(Request, observer) - observer.result.futureValue - observer.responses should have size 2 - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs(createExpectedLogMessage("succeeded(OK)"), DEBUG) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - - failureCases.forEvery { - case (status, trailers, expectedDescription, expectedLogLevel, expectedLogMessage) => - s"intercepting a failure (${status.getCode})" must { - "log the failure" in withEnv() { implicit env => - setupStreamedService(_.onError(status.asRuntimeException(trailers))) - - callStreamedServiceAndCheckClientFailure( - status.withDescription(expectedDescription), - trailers, - ) - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedLogMessage), - expectedLogLevel, - status.getCause, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } - - throwableCases.forEvery { case (description, throwable) => - s"intercepting an unexpected async $description" must { - "log progress and the throwable" in withEnv() { implicit env => - setupStreamedService(_.onError(throwable)) - - callStreamedServiceAndCheckClientFailure( - Status.UNKNOWN.withDescription(throwable.getMessage) - ) - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(s"failed with UNKNOWN/${throwable.getMessage}"), - ERROR, - throwable, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } - - throwableCases.forEvery { case (description, throwable) => - s"intercepting an unexpected sync $description" must { - "log progress and the error" in withEnv() { implicit env => - setupStreamedService(_ => throw throwable) - - callStreamedServiceAndCheckClientFailure( - Status.UNKNOWN.withDescription("Application error processing RPC") - ) - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("failed with an unexpected throwable"), - ERROR, - throwable, - ) - - throwable match { - case NonFatal(_) => - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - case _: Throwable => - captureSpuriousMessageAfterErrorThrow(throwable, createExpectedLogMessage) - } - } - } - } - - cancelCases.forEvery { - case ( - description, - afterCancelAction, - expectedLogLevel, - expectedLogMessage, - expectedThrowable, - ) => - s"intercepting a cancellation and $description" must { - "log the cancellation" in withEnv() { implicit env => - import env.* - - val receivedRequestP = Promise[Unit]() - val sendSecondResponseP = Promise[Unit]() - - setupStreamedService { observer => - receivedRequestP.success(()) - // Send second response despite cancellation. - // This cannot be prevented, as the sending raises with the on cancel handler. - sendSecondResponseP.future.onComplete(_ => - afterCancelAction match { - case () => observer.onCompleted() - case status: Status => observer.onError(status.asRuntimeException()) - case response: Hello.Response => observer.onNext(response) - } - ) - } - - val context = Context.current().withCancellation() - context.run { () => - receivedRequestP.future.onComplete(_ => context.cancel(Exception)) - callStreamedServiceAndCheckClientFailure( - ClientCancelsStatus, - clientCause = Exception, - checkResponses = false, // Some responses may get discarded due to cancellation. - ) - } - - // since our latest gRPC upgrade (https://github.com/DACH-NY/canton/pull/15304), - // the client might log one additional "completed" message before the cancellation. - capturingLogger.tryToPollMessage(createExpectedLogMessage("completed"), DEBUG) - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs(createExpectedLogMessage("cancelled"), INFO) - - // Wait until the server has received the cancellation - eventually() { - apiRequestLogger.cancelled.get() shouldBe true - } - capturingLogger.assertNoMoreEvents() - - sendSecondResponseP.success(()) - - afterCancelAction match { - case _: Status => - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedLogMessage), - expectedLogLevel, - expectedThrowable, - ) - case _: Hello.Response => - capturingLogger.assertNextMessage( - _ should include("sending response Response(Hello client)"), - DEBUG, - ) - case () => - capturingLogger.assertNextMessage( - _ should include("HelloService/HelloStreamed by testSender: succeeded(OK)"), - DEBUG, - ) - } - } - } - } - } - - "On a unary call with payload logging suppressed" when { - def createExpectedLogMessage( - content: String, - includeRequestTraceContext: Boolean = false, - ): String = { - val mainMessage = s"Request c.d.c.p.HelloService/Hello by testSender: $content" - val traceContextMessage = s"\n Request ${requestTraceContext.showTraceId}" - if (includeRequestTraceContext) mainMessage + traceContextMessage else mainMessage - } - - "intercepting a successful request" must { - "not log any messages" in withEnv(logMessagePayloads = false) { implicit env => - import env.* - - when(service.hello(Request)).thenReturn(Future.successful(Response)) - - client.hello(Request).futureValue shouldBe Response - - capturingLogger.assertNextMessageIs(createExpectedLogMessage("received headers "), TRACE) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("received a message ", includeRequestTraceContext = true), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("finished receiving messages"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("sending response headers "), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("sending response ", includeRequestTraceContext = true), - DEBUG, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("succeeded(OK)"), DEBUG) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - - "intercepting a failure" must { - "not log any metadata" in withEnv(logMessagePayloads = false) { implicit env => - import env.* - - val status = InvalidArgumentStatus - val expectedLogMessage = s"failed with INVALID_ARGUMENT/${status.getDescription}" - - when(service.hello(Request)).thenReturn(Future.failed(status.asRuntimeException(Trailers))) - - assertClientFailure(client.hello(Request), status, Trailers) - - capturingLogger.assertNextMessageIs(createExpectedLogMessage("received headers "), TRACE) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("received a message ", includeRequestTraceContext = true), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("finished receiving messages"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedLogMessage), - INFO, - status.getCause, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } - - "On a streamed call with very short message limit" when { - def createExpectedLogMessage( - content: String, - includeRequestTraceContext: Boolean = false, - ): String = { - val mainMessage = s"Request c.d.c.p.HelloService/HelloStreamed by testSender: $content" - val traceContextMessage = s"\n Request ${requestTraceContext.showTraceId}" - if (includeRequestTraceContext) mainMessage + traceContextMessage else mainMessage - } - - def assertRequestAndResponsesLogged: Assertion = { - capturingLogger.assertNextMessageIs( - "Request c.d.c.p.HelloService/HelloStreamed by testSender: " + - "received headers Met...", - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "received a message Request(Hel...)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("finished receiving messages"), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage("sending response headers Met..."), - TRACE, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "sending response Response(Hel...)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - capturingLogger.assertNextMessageIs( - createExpectedLogMessage( - "sending response Response(Hel...)", - includeRequestTraceContext = true, - ), - DEBUG, - ) - } - - "intercepting a successful request" must { - "log a short version of messages" in withEnv(maxStringLength = 3, maxMetadataSize = 3) { - implicit env => - import env.* - - setupStreamedService(_.onCompleted()) - - val observer = new RecordingStreamObserver[Hello.Response] - client.helloStreamed(Request, observer) - observer.result.futureValue - observer.responses should have size 2 - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs(createExpectedLogMessage("succeeded(OK)"), DEBUG) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - - "intercepting a failure" must { - "log a short version of the metadata" in withEnv(maxStringLength = 3, maxMetadataSize = 3) { - implicit env => - val status = InvalidArgumentStatus - val expectedLogMessage = - s"failed with INVALID_ARGUMENT/${status.getDescription}\n Trailers: Met..." - - setupStreamedService(_.onError(status.asRuntimeException(Trailers))) - - callStreamedServiceAndCheckClientFailure(status, Trailers) - - assertRequestAndResponsesLogged - capturingLogger.assertNextMessageIs( - createExpectedLogMessage(expectedLogMessage), - INFO, - status.getCause, - ) - capturingLogger.assertNextMessageIs(createExpectedLogMessage("completed"), DEBUG) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtilTest.scala deleted file mode 100644 index 944fdba857..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtilTest.scala +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.networking.grpc - -import cats.data.EitherT -import com.digitalasset.canton.connection.GrpcApiInfoService -import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc -import com.digitalasset.canton.lifecycle.OnShutdownRunner.PureOnShutdownRunner -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.logging.TracedLogger -import com.digitalasset.canton.networking.grpc.GrpcError.* -import com.digitalasset.canton.protobuf.HelloServiceGrpc.{HelloService, HelloServiceStub} -import com.digitalasset.canton.protobuf.{Hello, HelloServiceGrpc} -import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import io.grpc.* -import io.grpc.ServerInterceptors.intercept -import io.grpc.Status.Code.* -import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder} -import io.grpc.util.MutableHandlerRegistry -import org.scalatest.Outcome -import org.scalatest.wordspec.FixtureAnyWordSpec - -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicBoolean -import scala.concurrent.duration.{Duration, DurationInt} -import scala.concurrent.{Await, ExecutionContext, Future, Promise} - -object CantonGrpcUtilTest { - val request: Hello.Request = Hello.Request("Hello server") - val response: Hello.Response = Hello.Response("Hello client") - - class Env(val service: HelloService, logger: TracedLogger)(ec: ExecutionContext) { - val channelName: String = InProcessServerBuilder.generateName() - - val registry: MutableHandlerRegistry = new MutableHandlerRegistry - - val server: Server = InProcessServerBuilder - .forName(channelName) - .fallbackHandlerRegistry(registry) - .build() - - val helloServiceDefinition: ServerServiceDefinition = - intercept(HelloServiceGrpc.bindService(service, ec), TraceContextGrpc.serverInterceptor) - val apiInfoServiceDefinition: ServerServiceDefinition = - ApiInfoServiceGrpc.bindService( - new GrpcApiInfoService("correct-api"), - ec, - ) - - registry.addService(helloServiceDefinition) - registry.addService(apiInfoServiceDefinition) - - val onShutdownRunner = new PureOnShutdownRunner(logger) - val channelBuilder: ManagedChannelBuilderProxy = ManagedChannelBuilderProxy( - InProcessChannelBuilder - .forName(channelName) - .intercept(TraceContextGrpc.clientInterceptor) - ) - val managedChannel: GrpcManagedChannel = - GrpcManagedChannel( - "channel-to-broken-client", - channelBuilder.build(), - onShutdownRunner, - logger, - ) - val client: GrpcClient[HelloServiceStub] = - GrpcClient.create(managedChannel, HelloServiceGrpc.stub) - - def sendRequest( - timeoutMs: Long = 2000 - )(implicit - traceContext: TraceContext, - ec: ExecutionContext, - ): EitherT[Future, GrpcError, Hello.Response] = - CantonGrpcUtil - .sendGrpcRequest(client, "serverName")( - _.hello(request), - "command", - Duration(timeoutMs, TimeUnit.MILLISECONDS), - logger, - ) - .onShutdown(throw new IllegalStateException("Unexpected shutdown")) - - def close(): Unit = { - managedChannel.close() - - server.shutdown() - server.awaitTermination() - } - } - -} - -@SuppressWarnings(Array("org.wartremover.warts.Null")) -class CantonGrpcUtilTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContext { - import CantonGrpcUtilTest.* - - override type FixtureParam = Env - - override def withFixture(test: OneArgTest): Outcome = { - val env = - new Env(mock[HelloService], loggerFactory.append(test.name, "").getTracedLogger(getClass))( - parallelExecutionContext - ) - try { - withFixture(test.toNoArgTest(env)) - } finally { - env.close() - } - } - - "A service" when { - - "the client sends a request" must { - "send a response" in { env => - import env.* - - server.start() - - when(service.hello(request)).thenReturn(Future.successful(response)) - - sendRequest().futureValue shouldBe response - } - } - - "the client cancels a request" must { - "abort the request with CANCELLED and not log a warning" in { env => - import env.* - - server.start() - - val promise = Promise() - when(service.hello(request)).thenReturn(promise.future) - - val context = Context.ROOT.withCancellation() - context.run { () => - val requestF = sendRequest().value - context.close() - - val err = requestF.futureValue.left.value - err shouldBe a[GrpcClientGaveUp] - err.status.getCode shouldBe CANCELLED - Option(err.status.getCause) shouldBe None - } - } - } - - "the client cancels a request with a specific exception" must { - "abort the request with CANCELLED and log a warning" in { env => - import env.* - - val ex = new IllegalStateException("test description") - - server.start() - - val promise = Promise() - when(service.hello(request)).thenReturn(promise.future) - - val context = Context.ROOT.withCancellation() - context.run { () => - val requestF = - loggerFactory.assertLogs( - sendRequest().value, - _.warningMessage shouldBe - """Request failed for serverName. - | GrpcClientGaveUp: CANCELLED/Context cancelled - | Request: command - | Causes: test description""".stripMargin, - ) - context.cancel(ex) - - val err = requestF.futureValue.left.value - err shouldBe a[GrpcClientGaveUp] - err.status.getCode shouldBe CANCELLED - err.status.getCause shouldBe ex - } - } - } - - "the request deadline exceeds" must { - "abort the request with DEADLINE_EXCEEDED" in { env => - import env.* - - server.start() - - val promise = Promise() - when(service.hello(request)).thenReturn(promise.future) - - val requestF = loggerFactory.assertLogs( - sendRequest(200).value, - _.warningMessage should - startWith("""Request failed for serverName. - | GrpcClientGaveUp: DEADLINE_EXCEEDED/""".stripMargin), - ) - - val err = requestF.futureValue.left.value - err shouldBe a[GrpcClientGaveUp] - err.status.getCode shouldBe DEADLINE_EXCEEDED - } - } - - "the request timeout is negative" must { - "abort the request with DEADLINE_EXCEEDED" in { env => - import env.* - - val requestF = loggerFactory.assertLogs( - sendRequest(-100).value, - _.warningMessage should - startWith("""Request failed for serverName. - | GrpcClientGaveUp: DEADLINE_EXCEEDED/""".stripMargin), - ) - - val err = requestF.futureValue.left.value - err shouldBe a[GrpcClientGaveUp] - err.status.getCode shouldBe DEADLINE_EXCEEDED - } - } - - "the server is permanently unavailable" must { - "give up" in { env => - import env.* - - val requestF = loggerFactory.assertLoggedWarningsAndErrorsSeq( - sendRequest( - 4_000 // Use a longer gRPC deadline to account for retries and scheduling delays - ).value, - logEntries => { - logEntries should not be empty - val (unavailableEntries, giveUpEntry) = logEntries.splitAt(logEntries.size - 1) - forEvery(unavailableEntries) { logEntry => - logEntry.warningMessage shouldBe - s"""Request failed for serverName. Is the server running? Did you configure the server address as 0.0.0.0? Are you using the right TLS settings? (details logged as DEBUG) - | GrpcServiceUnavailable: UNAVAILABLE/Could not find server: $channelName - | Request: command""".stripMargin - } - - giveUpEntry.loneElement.warningMessage shouldBe "Retry timeout has elapsed, giving up." - }, - ) - - val err = Await.result(requestF, 5.seconds).left.value - err shouldBe a[GrpcServiceUnavailable] - err.status.getCode shouldBe UNAVAILABLE - managedChannel.channel.getState(false) shouldBe ConnectivityState.TRANSIENT_FAILURE - } - } - - "the server is temporarily unavailable" must { - "wait for the server" in { env => - import env.* - - when(service.hello(request)).thenReturn(Future.successful(response)) - - val requestF = loggerFactory.assertLoggedWarningsAndErrorsSeq( - sendRequest(10000).value, - logEntries => { - logEntries should not be empty - forEvery(logEntries) { logEntry => - logEntry.warningMessage shouldBe - s"""Request failed for serverName. Is the server running? Did you configure the server address as 0.0.0.0? Are you using the right TLS settings? (details logged as DEBUG) - | GrpcServiceUnavailable: UNAVAILABLE/Could not find server: $channelName - | Request: command""".stripMargin - } - }, - ) - server.start() - - requestF.futureValue shouldBe Right(response) - } - } - - "the service is unavailable" must { - "give up" in { env => - import env.* - - server.start() - registry.removeService(helloServiceDefinition) shouldBe true - - val requestF = loggerFactory.assertLoggedWarningsAndErrorsSeq( - sendRequest().value, - logEntries => { - logEntries should not be empty - val (unavailableEntries, giveUpEntry) = logEntries.splitAt(logEntries.size - 1) - - forEvery(unavailableEntries) { logEntry => - logEntry.warningMessage shouldBe - s"""Request failed for serverName. Is the server initialized or is the server incompatible? - | GrpcServiceUnavailable: UNIMPLEMENTED/Method not found: com.digitalasset.canton.protobuf.HelloService/Hello - | Request: command""".stripMargin - } - - giveUpEntry.loneElement.warningMessage shouldBe "Retry timeout has elapsed, giving up." - }, - ) - - val err = Await.result(requestF, 5.seconds).left.value - err shouldBe a[GrpcServiceUnavailable] - err.status.getCode shouldBe UNIMPLEMENTED - } - } - - "the server fails with status INVALID_ARGUMENT" must { - "report invalid argument" in { env => - import env.* - - val status = Status.INVALID_ARGUMENT - .withCause(new NullPointerException) - .withDescription("test description") - - server.start() - when(service.hello(request)).thenReturn(Future.failed(status.asRuntimeException())) - - val requestF = loggerFactory.assertLogs( - sendRequest().value, - _.errorMessage shouldBe - """Request failed for serverName. - | GrpcClientError: INVALID_ARGUMENT/test description - | Request: command""".stripMargin, - ) - - val err = Await.result(requestF, 2.seconds).left.value - err shouldBe a[GrpcClientError] - err.status.getCode shouldBe INVALID_ARGUMENT - err.status.getDescription shouldBe "test description" - err.status.getCause shouldBe null - } - } - - "the server fails with an async IllegalArgumentException" must { - "report internal" in { env => - import env.* - - server.start() - when(service.hello(request)) - .thenReturn(Future.failed(new IllegalArgumentException("test description"))) - - val requestF = loggerFactory.assertLogs( - sendRequest().value, - _.errorMessage shouldBe - """Request failed for serverName. - | GrpcServerError: INTERNAL/test description - | Request: command""".stripMargin, - ) - - val err = Await.result(requestF, 2.seconds).left.value - err shouldBe a[GrpcServerError] - err.status.getCode shouldBe INTERNAL - err.status.getDescription shouldBe "test description" - err.status.getCause shouldBe null - } - } - - "the server fails with a sync IllegalArgumentException" must { - "report unknown" in { env => - import env.* - - server.start() - when(service.hello(request)).thenThrow(new IllegalArgumentException("test exception")) - - val requestF = loggerFactory.assertLogs( - sendRequest().value, - err => { - err.errorMessage shouldBe - """Request failed for serverName. - | GrpcServerError: UNKNOWN/Application error processing RPC - | Request: command""".stripMargin - }, - ) - - val err = Await.result(requestF, 2.seconds).left.value - err shouldBe a[GrpcServerError] - - // Note that reporting UNKNOWN to the client is not particularly helpful, but this is what GRPC does. - // We usually have the ApiRequestLogger to ensure that this case is mapped to INTERNAL. - - err.status.getCode shouldBe UNKNOWN - err.status.getDescription shouldBe "Application error processing RPC" - err.status.getCause shouldBe null - } - } - - "the client is broken" must { - "fail" in { env => - import env.* - - // Create a mocked client - val brokenClient = mock[HelloServiceStub]( - withSettings.useConstructor(managedChannel.channel, CallOptions.DEFAULT) - ) - when(brokenClient.build(*[Channel], *[CallOptions])).thenReturn(brokenClient) - - // Make the client fail with an embedded cause - val cause = new RuntimeException("test exception") - val status = Status.INTERNAL.withDescription("test description").withCause(cause) - when(brokenClient.hello(request)).thenReturn(Future.failed(status.asRuntimeException())) - - val brokenGrpcClient = GrpcClient.create(managedChannel, _ => brokenClient) - - // Send the request - val requestF = loggerFactory.assertLogs( - CantonGrpcUtil - .sendGrpcRequest(brokenGrpcClient, "serverName")( - _.hello(request), - "command", - Duration(2000, TimeUnit.MILLISECONDS), - logger, - ) - .value - .failOnShutdown, - logEntry => { - logEntry.errorMessage shouldBe - """Request failed for serverName. - | GrpcServerError: INTERNAL/test description - | Request: command - | Causes: test exception""".stripMargin - logEntry.throwable shouldBe Some(cause) - }, - ) - - // Check the response - val err = Await.result(requestF, 2.seconds).left.value - err shouldBe a[GrpcServerError] - - err.status.getCode shouldBe INTERNAL - err.status.getDescription shouldBe "test description" - err.status.getCause shouldBe cause - } - } - - def fastClosing(triggerClose: () => Unit)(env: Env): Unit = { - import env.* - - val requestMade = new AtomicBoolean(false) - - server.start() - when(service.hello(request)).thenAnswer { (_: Hello.Request) => - requestMade.set(true) - Future.never - } - - val longTimeout = 1.hour - - val start = System.nanoTime() - - val requestF = - CantonGrpcUtil.sendGrpcRequest(client, "serverName")( - _.hello(request), - "command", - longTimeout, - logger, - ) - eventually() { - requestMade.get() shouldBe true - } - triggerClose() - requestF.value.unwrap.futureValue shouldBe AbortedDueToShutdown - - val stop = System.nanoTime() - val duration = Duration.fromNanos(stop - start) - duration shouldBe <(2.seconds) // shorter than the normal shutdown timeouts - } - - "the client closes the channel" must { - "terminate immediately" in { env => - import env.* - fastClosing(() => client.channel.close())(env) - } - } - - "the client is closed" must { - "terminate immediately" in { env => - import env.* - fastClosing(() => onShutdownRunner.close())(env) - } - } - - "checking for correct API" must { - "succeed if API is correct" in { env => - import env.* - - server.start() - val resultET = CantonGrpcUtil - .checkCantonApiInfo( - "server-name", - "correct-api", - channelBuilder, - logger, - timeouts.network, - onShutdownRunner, - None, - ) - .failOnShutdown - resultET.futureValue shouldBe () - } - - "fail if API is incorrect" in { env => - import env.* - - server.start() - val requestET = CantonGrpcUtil - .checkCantonApiInfo( - "server-name", - "other-api", - channelBuilder, - logger, - timeouts.network, - onShutdownRunner, - None, - ) - .failOnShutdown - - val resultE = requestET.value.futureValue - inside(resultE) { case Left(message) => - message should include regex """Endpoint '.*' provides 'correct-api', expected 'other-api'\. This message indicates a possible mistake in configuration, please check node connection settings for 'server-name'\.""" - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/FutureTimeouts.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/FutureTimeouts.scala deleted file mode 100644 index f26fb0996d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/FutureTimeouts.scala +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.pekkostreams - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.DirectExecutionContext -import org.apache.pekko.actor.ActorSystem -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec - -import scala.concurrent.duration.FiniteDuration -import scala.concurrent.{Future, Promise, TimeoutException} -import scala.util.Try -import scala.util.control.NoStackTrace - -trait FutureTimeouts { self: AsyncWordSpec & BaseTest => - - protected def system: ActorSystem - - protected def expectTimeout(f: Future[Any], duration: FiniteDuration): Future[Assertion] = { - val promise: Promise[Any] = Promise[Any]() - - val runnable: Runnable = () => - promise.failure( - new TimeoutException(s"Future timed out after $duration as expected.") with NoStackTrace - ) - val cancellable = system.scheduler.scheduleOnce( - duration, - runnable, - )(system.dispatcher) - - f.onComplete((_: Try[Any]) => cancellable.cancel())(DirectExecutionContext(noTracingLogger)) - - recoverToSucceededIf[TimeoutException]( - Future.firstCompletedOf[Any](List[Future[Any]](f, promise.future))( - DirectExecutionContext(noTracingLogger) - ) - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherRaceSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherRaceSpec.scala deleted file mode 100644 index a1714a9694..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherRaceSpec.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.pekkostreams.dispatcher - -import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.{DirectExecutionContext, Threading} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.pekkostreams.dispatcher.DispatcherImpl.Incrementable -import com.digitalasset.canton.pekkostreams.dispatcher.SubSource.RangeSource -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.scalatest.concurrent.ScalaFutures -import org.scalatest.matchers.should.Matchers -import org.scalatest.time.{Milliseconds, Seconds, Span} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.{ExecutionContextExecutor, Future, blocking} - -// Consider merging/reviewing the tests we have around the Dispatcher! -class DispatcherRaceSpec - extends AnyWordSpec - with PekkoBeforeAndAfterAll - with Matchers - with ScalaFutures - with BaseTest { - - case class Index(value: Int) extends Incrementable[Index] with Ordered[Index] { - def increment: Index = Index(value + 1) - def compare(that: Index): Int = value.compare(that.value) - } - - override implicit def patienceConfig: PatienceConfig = - PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds))) - - "A Dispatcher" should { - "not race when creating new subscriptions" in { - // The test setup here is a little different from the above tests, - // because we wanted to be specific about emitted pairs and use of Thread.sleep. - - implicit val ec: ExecutionContextExecutor = materializer.executionContext - - val elements = new AtomicReference(Map.empty[Int, Int]) - def readElement(i: Index): Future[Index] = Future { - blocking( - Threading.sleep(10) - ) // In a previous version of Dispatcher, this sleep caused a race condition. - Index(elements.get()(i.value)) - } - def readSuccessor(i: Index): Index = i.increment - - // compromise between catching flakes and not taking too long - 0 until 25 foreach { _ => - val d: Dispatcher[Index] = - Dispatcher(name = "test", firstIndex = Index(1), headAtInitialization = None) - - // Verify that the results are what we expected - val subscriptions = 1 until 10 map { i => - elements.updateAndGet(m => m + (i -> i)) - d.signalNewHead(Index(i)) - d.startingAt( - startExclusive = Option.unless(i == 1)(Index(i - 1)), - subSource = RangeSource((startInclusive, endInclusive) => - Source - .unfoldAsync(startInclusive) { index => - if (index > endInclusive) Future.successful(None) - else { - readElement(index).map { t => - val nextIndex = readSuccessor(index) - Some((nextIndex, (index, t))) - }(DirectExecutionContext(noTracingLogger)) - } - } - ), - ).toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[(Index, Index)]]]) - .run() - } - - d.shutdown().discard - - subscriptions.zip(1 until 10) foreach { case (f, i) => - whenReady(f) { vals => - vals.map(_._1.value) should contain theSameElementsAs (i to 9) - vals.map(_._2.value) should contain theSameElementsAs (i until 10) - } - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherSpec.scala deleted file mode 100644 index 0d93cc543f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/DispatcherSpec.scala +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.pekkostreams.dispatcher - -import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.pekkostreams.FutureTimeouts -import com.digitalasset.canton.pekkostreams.dispatcher.DispatcherImpl.Incrementable -import com.digitalasset.canton.pekkostreams.dispatcher.SubSource.RangeSource -import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.TryUtil -import org.apache.pekko.stream.DelayOverflowStrategy -import org.apache.pekko.stream.scaladsl.{Sink, Source} -import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans} -import org.scalatest.matchers.should.Matchers -import org.scalatest.time.Span -import org.scalatest.time.SpanSugar.* -import org.scalatest.wordspec.AsyncWordSpec -import org.scalatest.{Assertion, BeforeAndAfter} - -import java.util.Random -import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.{Executors, TimeUnit} -import scala.collection.immutable -import scala.collection.immutable.TreeMap -import scala.concurrent.duration.Duration -import scala.concurrent.{ExecutionContext, Future, blocking} -import scala.util.{Failure, Success} - -class DispatcherSpec - extends AsyncWordSpec - with PekkoBeforeAndAfterAll - with BeforeAndAfter - with Matchers - with FutureTimeouts - with ScaledTimeSpans - with AsyncTimeLimitedTests - with BaseTest { - - // Newtype wrappers to avoid type mistakes - case class Value(v: Int) - - case class Index(i: Int) extends Ordered[Index] with Incrementable[Index] { - def increment: Index = Index(i + 1) - def compare(that: Index): Int = i.compare(that.i) - } - - /* - The values are stored indexed by Index. - The Indices form a linked list, indexed by successorStore. - */ - val r = new Random() - private val store = new AtomicReference(TreeMap.empty[Index, Value]) - private val firstIndex = Index(1) - private val nextIndex = new AtomicReference(firstIndex) - - implicit val ec: ExecutionContext = - ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(128)) - - after { - clearUp() - } - - def clearUp() = { - store.set(TreeMap.empty) - nextIndex.set(firstIndex) - } - - def valueGenerator: Index => Value = i => Value(i.i) - - def gen( - count: Int, - publishTo: Option[Dispatcher[Index]] = None, - meanDelayMs: Int = 0, - ): IndexedSeq[(Index, Value)] = { - def genManyHelper(i: Index, count: Int): LazyList[(Index, Value)] = - if (count == 0) { - LazyList.empty - } else { - val next = LazyList - .iterate(i)(i => i.increment) - .filter(i => i != nextIndex.get() && !store.get().contains(i)) - .head - val v = valueGenerator(i) - store.updateAndGet(_ + (i -> v)) - nextIndex.set(next) - publishTo foreach { d => - d.signalNewHead(i) - } - blocking(Threading.sleep(r.nextInt(meanDelayMs + 1).toLong * 2)) - LazyList.cons((i, v), genManyHelper(next, count - 1)) - } - - genManyHelper(nextIndex.get(), count).toIndexedSeq.map { case (i, v) => (i, v) } - } - - def publish(head: Index, dispatcher: Dispatcher[Index], meanDelayMs: Int = 0): Unit = { - dispatcher.signalNewHead(head) - blocking( - Threading.sleep(r.nextInt(meanDelayMs + 1).toLong * 2) - ) - } - - /** Collect the actual results between start (exclusive) and stop (inclusive) from the given - * Dispatcher, then cancels the obtained stream. - */ - private def collect( - start: Option[Index], - stop: Index, - src: Dispatcher[Index], - subSrc: SubSource[Index, Value], - delayMs: Int = 0, - ): Future[immutable.IndexedSeq[(Index, Value)]] = - if (delayMs > 0) { - src - .startingAt(startExclusive = start, subSource = subSrc, endInclusive = Some(stop)) - .delay(Duration(delayMs.toLong, TimeUnit.MILLISECONDS), DelayOverflowStrategy.backpressure) - .runWith(Sink.collection) - } else { - src - .startingAt(start, subSrc, Some(stop)) - .runWith(Sink.collection) - } - - private val rangeQuerySteppingMode = RangeSource[Index, Value]((startInclusive, endInclusive) => - Source( - store - .get() - .rangeFrom(startInclusive) - .rangeTo(endInclusive) - ) - ) - - private def slowRangeQuerySteppingMode(delayMs: Int) = - RangeSource[Index, Value]((startInclusive, endInclusive) => - Source( - store - .get() - .rangeFrom(startInclusive) - .rangeTo(endInclusive) - ) - .throttle(1, delayMs.milliseconds * 2) - ) - - def newDispatcher( - begin: Index = firstIndex, - end: Option[Index] = None, - ): Dispatcher[Index] = - Dispatcher[Index](name = "test", firstIndex = begin, headAtInitialization = end) - - private def forAllSteppingModes( - rangeQuery: RangeSource[Index, Value] = rangeQuerySteppingMode - )(f: SubSource[Index, Value] => Future[Assertion]): Future[Assertion] = - for { - _ <- f(rangeQuery) - } yield succeed - - "A Dispatcher" should { - - "fail to initialize if end index < begin index" in { - forAllSteppingModes() { _ => - recoverToSucceededIf[IllegalArgumentException]( - Future(newDispatcher(firstIndex, Some(Index(-1)))) - ) - } - } - - "return errors after being started and stopped" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - - dispatcher.shutdown().discard - - dispatcher.signalNewHead(Index(1)) // should not throw - dispatcher - .startingAt(startExclusive = None, subSource = subSrc) - .runWith(Sink.ignore) - .failed - .map(_ shouldBe a[IllegalStateException]) - } - } - - "work with one outlet" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - val pairs = gen(100) - val out = collect(None, pairs.last._1, dispatcher, subSrc) - publish(pairs.last._1, dispatcher) - out.map(_ shouldEqual pairs) - } - } - - "complete when the dispatcher completes" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - val pairs50 = gen(50) - val pairs100 = gen(50) - val i50 = pairs50.last._1 - val i100 = pairs100.last._1 - - publish(i50, dispatcher) - val out = collect(Some(i50), i100, dispatcher, subSrc) - publish(i100, dispatcher) - - dispatcher.shutdown().discard - - out.map(_ shouldEqual pairs100) - } - } - - "fail when the dispatcher fails" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - val pairs50 = gen(50) - val i50 = pairs50.last._1 - val pairs100 = gen(50) - val i100 = pairs100.last._1 - - publish(i50, dispatcher) - val out = collect(None, i100, dispatcher, subSrc) - - val expectedException = new RuntimeException("some exception") - - val newException = () => expectedException - - for { - _ <- dispatcher.cancel(newException) - _ = publish(i100, dispatcher) - - _ <- out.transform { - case Failure(`expectedException`) => TryUtil.unit - case Failure(other) => - fail(s"Expected stream failed with $expectedException but got $other") - case Success(_) => fail("Expected stream failed") - } - } yield succeed - } - } - - "work with mid-stream subscriptions" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - - val pairs50 = gen(50) - val pairs100 = gen(50) - val i50 = pairs50.last._1 - val i100 = pairs100.last._1 - - publish(i50, dispatcher) - val out = collect(Some(i50), i100, dispatcher, subSrc) - publish(i100, dispatcher) - - out.map(_ shouldEqual pairs100) - } - } - - "work with mid-stream cancellation" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - - val pairs50 = gen(50) - val i50 = pairs50.last._1 - // the below cancels the stream after reaching element 50 - val out = collect(None, i50, dispatcher, subSrc) - gen(50, publishTo = Some(dispatcher)) - - out.map(_ shouldEqual pairs50) - } - } - - "work with many outlets at different start/end indices" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - - val pairs25 = gen(25) - val pairs50 = gen(25) - val pairs75 = gen(25) - val pairs100 = gen(25) - val i25 = pairs25.last._1 - val i50 = pairs50.last._1 - val i75 = pairs75.last._1 - val i100 = pairs100.last._1 - - val outF = collect(None, i50, dispatcher, subSrc) - publish(i25, dispatcher) - val out25F = collect(Some(i25), i75, dispatcher, subSrc) - publish(i50, dispatcher) - val out50F = collect(Some(i50), i100, dispatcher, subSrc) - publish(i75, dispatcher) - val out75F = collect(Some(i75), i100, dispatcher, subSrc) - publish(i100, dispatcher) - - dispatcher.shutdown().discard - - validate4Sections(pairs25, pairs50, pairs75, pairs100, outF, out25F, out50F, out75F) - } - } - - "work with slow producers and consumers" in { - forAllSteppingModes(slowRangeQuerySteppingMode(10)) { subSrc => - val dispatcher = newDispatcher() - - val pairs25 = gen(25) - val pairs50 = gen(25) - val pairs75 = gen(25) - val pairs100 = gen(25) - val i25 = pairs25.last._1 - val i50 = pairs50.last._1 - val i75 = pairs75.last._1 - val i100 = pairs100.last._1 - - val outF = collect(None, i50, dispatcher, subSrc, delayMs = 10) - publish(i25, dispatcher) - val out25F = collect(Some(i25), i75, dispatcher, subSrc, delayMs = 10) - publish(i50, dispatcher) - val out50F = collect(Some(i50), i100, dispatcher, subSrc, delayMs = 10) - publish(i75, dispatcher) - val out75F = collect(Some(i75), i100, dispatcher, subSrc, delayMs = 10) - publish(i100, dispatcher) - - dispatcher.shutdown().discard - - validate4Sections(pairs25, pairs50, pairs75, pairs100, outF, out25F, out50F, out75F) - } - } - - "handle subscriptions for future elements by waiting for the ledger end to reach them" in { - forAllSteppingModes() { subSrc => - val dispatcher = newDispatcher() - - val startIndex = 10 - val pairs25 = gen(25).drop(startIndex) - val i25 = pairs25.last._1 - - val resultsF = collect(Some(Index(startIndex)), i25, dispatcher, subSrc) - publish(i25, dispatcher) - for { - results <- resultsF - } yield { - dispatcher.shutdown().discard - results shouldEqual pairs25 - } - } - } - - "stall subscriptions for future elements until the ledger end reaches the start index" in { - val dispatcher = newDispatcher() - - val startIndex = 10 - val pairs25 = gen(25).drop(startIndex) - val i25 = pairs25.last._1 - - expectTimeout( - collect(Some(Index(startIndex)), i25, dispatcher, rangeQuerySteppingMode), - 1.second, - ).thereafterF { _ => - dispatcher.shutdown() - } - } - - "tolerate non-monotonic Head updates" in { - val dispatcher = newDispatcher() - val pairs = gen(100) - val out = collect(None, pairs.last._1, dispatcher, rangeQuerySteppingMode) - val updateCount = 10 - val random = new Random() - 1.to(updateCount).foreach(_ => dispatcher.signalNewHead(Index(random.nextInt(100)))) - dispatcher.signalNewHead(Index(100)) - out.map(_ shouldEqual pairs).thereafterF { _ => - dispatcher.shutdown() - } - } - } - - private def validate4Sections( - pairs25: IndexedSeq[(Index, Value)], - pairs50: IndexedSeq[(Index, Value)], - pairs75: IndexedSeq[(Index, Value)], - pairs100: IndexedSeq[(Index, Value)], - outF: Future[immutable.IndexedSeq[(Index, Value)]], - out25F: Future[immutable.IndexedSeq[(Index, Value)]], - out50F: Future[immutable.IndexedSeq[(Index, Value)]], - out75F: Future[immutable.IndexedSeq[(Index, Value)]], - ) = - for { - out <- outF - out25 <- out25F - out50 <- out50F - out75 <- out75F - } yield { - out shouldEqual pairs25 ++ pairs50 - out25 shouldEqual pairs50 ++ pairs75 - out50 shouldEqual pairs75 ++ pairs100 - out75 shouldEqual pairs100 - } - - override def timeLimit: Span = scaled(30.seconds) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/SignalDispatcherTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/SignalDispatcherTest.scala deleted file mode 100644 index 65a5e575d8..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/pekkostreams/dispatcher/SignalDispatcherTest.scala +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.pekkostreams.dispatcher - -import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.digitalasset.canton.discard.Implicits.DiscardOps -import org.apache.pekko.stream.scaladsl.Sink -import org.apache.pekko.stream.testkit.scaladsl.TestSink -import org.awaitility.Awaitility.await -import org.awaitility.Durations -import org.scalatest.FutureOutcome -import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans} -import org.scalatest.matchers.should.Matchers -import org.scalatest.time.Span -import org.scalatest.time.SpanSugar.* -import org.scalatest.wordspec.FixtureAsyncWordSpec - -import java.lang -import scala.concurrent.Await - -class SignalDispatcherTest - extends FixtureAsyncWordSpec - with Matchers - with PekkoBeforeAndAfterAll - with ScaledTimeSpans - with AsyncTimeLimitedTests { - - "SignalDispatcher" should { - - "send a signal on subscription if requested" in { sut => - sut.subscribe(true).runWith(Sink.head).map(_ => succeed) - } - - "not send a signal on subscription if not requested" in { sut => - val s = sut.subscribe(false).runWith(TestSink.probe[SignalDispatcher.Signal]) - s.request(1L) - s.expectNoMessage(1.second) - succeed - } - - "output a signal when it arrives" in { sut => - val result = sut.subscribe(false).runWith(Sink.head).map(_ => succeed) - sut.signal() - result - } - - "output multiple signals when they arrive" in { sut => - val count = 10 - val result = sut.subscribe(false).take(count.toLong).runWith(Sink.seq).map(_ => succeed) - 1.to(count).foreach(_ => sut.signal()) - result - } - - "remove queues from its state when the stream terminates behind them" in { sut => - val s = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - s.request(1L) - s.expectNext(SignalDispatcher.Signal) - sut.getRunningState should have size 1L - s.cancel() - await("Cancellation handling") - .atMost(Durations.TEN_SECONDS) - .until(() => lang.Boolean.valueOf(sut.getRunningState.isEmpty)) - sut.getRunningState shouldBe empty - } - - "remove queues from its state when shutdown" in { sut => - val s = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - s.request(1L) - s.expectNext(SignalDispatcher.Signal) - sut.getRunningState should have size 1L - sut.shutdown().discard - assertThrows[IllegalStateException](sut.getRunningState) - assertThrows[IllegalStateException](sut.signal()) - s.expectComplete() - succeed - } - - "remove queues from its state when failed" in { sut => - val s = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - - s.request(1L) - s.expectNext(SignalDispatcher.Signal) - sut.getRunningState should have size 1L - - val failure = new RuntimeException("Some failure") - - // Check fail does not return a failed future - Await.result(sut.fail(() => failure), 10.seconds) - - assertThrows[IllegalStateException](sut.getRunningState) - assertThrows[IllegalStateException](sut.signal()) - s.expectError(failure) - succeed - } - - "fail sources with distinct throwables on fail" in { sut => - val s1 = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - val s2 = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - val s3 = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal]) - - s1.request(1L) - s2.request(1L) - s3.request(1L) - - s1.expectNext(SignalDispatcher.Signal) - s2.expectNext(SignalDispatcher.Signal) - s3.expectNext(SignalDispatcher.Signal) - - sut.getRunningState should have size 3L - - val errMessage = "Some failure" - val newFailure = () => new RuntimeException(errMessage) - - // Check fail does not return a failed future - Await.result(sut.fail(newFailure), 10.seconds) - - assertThrows[IllegalStateException](sut.getRunningState) - assertThrows[IllegalStateException](sut.signal()) - val capturedErrors = Set(s1.expectError(), s2.expectError(), s3.expectError()) - - // Expected set size confirms errors are distinct - capturedErrors.size shouldBe 3 - capturedErrors.foldLeft(succeed) { - case (`succeed`, err: RuntimeException) if err.getMessage == errMessage => succeed - case (`succeed`, otherErr) => fail(s"Unexpected error $otherErr") - case (failed, _) => failed - } - } - } - - override def withFixture(test: OneArgAsyncTest): FutureOutcome = - test.apply(SignalDispatcher()) - override type FixtureParam = SignalDispatcher - override def timeLimit: Span = scaled(10.seconds) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala deleted file mode 100644 index 8b271fa3ef..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protobuf - -import cats.syntax.option.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ProtoDeserializationError.BufferException -import com.digitalasset.canton.serialization.ProtoConverter -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class ProtobufParsingAttackTest extends AnyWordSpec with BaseTest { - - "adding a field to a message definition" should { - "ignore wrong field types" in { - - val attackAddField = AttackAddField("foo", 123).toByteString - - // Base parser can parse this - ProtoConverter.protoParser(Base.parseFrom)(attackAddField) shouldBe - Right(Base(Base.Sum.One("foo"))) - - // Parser for the message after the field has been added will ignore the additional field - ProtoConverter.protoParser(AddField.parseFrom)(attackAddField) shouldBe - Right(AddField(AddField.Sum.One("foo"), None)) - } - - "explode when the field deserialization fails" in { - val attackAddField = - AttackAddFieldSameType("foo", ByteString.copyFromUtf8("BYTESTRING")).toByteString - - // Base parser can parse this - ProtoConverter.protoParser(Base.parseFrom)(attackAddField) shouldBe - Right(Base(Base.Sum.One("foo"))) - - // Parser for the message after the field has been added will explode - ProtoConverter - .protoParser(AddField.parseFrom)(attackAddField) - .left - .value shouldBe a[BufferException] - } - } - - "adding an alternative to a one-of" should { - "produce different parsing results" in { - - val dummyMessage = DummyMessage("dummy") - val attackAddVariant = AttackAddVariant("bar", dummyMessage.toByteString).toByteString - - ProtoConverter.protoParser(Base.parseFrom)(attackAddVariant) shouldBe - Right(Base(Base.Sum.One("bar"))) - - ProtoConverter.protoParser(AddVariant.parseFrom)(attackAddVariant) shouldBe - Right(AddVariant(AddVariant.Sum.Two(dummyMessage))) - } - - "explode when given bad alternatives" in { - val attackAddVariant = - AttackAddVariant("bar", ByteString.copyFromUtf8("BYTESTRING")).toByteString - - ProtoConverter.protoParser(Base.parseFrom)(attackAddVariant) shouldBe - Right(Base(Base.Sum.One("bar"))) - - ProtoConverter - .protoParser(AddVariant.parseFrom)(attackAddVariant) - .left - .value shouldBe a[BufferException] - } - } - - // This test shows that it should actually be fine to switch between `optional` and `repeated` - // for fields in protobuf definitions. Our buf checks nevertheless forbid this because - // buf doesn't offer a dedicated config option; FIELD_SAME_LABEL is needed for WIRE compatibility - // and also complains about repeated, as explained in the docs. - "repeating a structured field" should { - "retain the last message" in { - val dummyMessage1 = DummyMessage("dummy1") - val dummyMessage2 = DummyMessage("dummy2") - val repeated = Repeated(Seq("a", "b"), Seq(dummyMessage1, dummyMessage2)).toByteString - - ProtoConverter.protoParser(Single.parseFrom)(repeated) shouldBe - Right(Single("b", dummyMessage2.some)) - } - - "explode when giving unparseable structured message" in { - val dummyMessage = DummyMessage("dummy") - val attackRepeated = AttackRepeated( - Seq("a", "b"), - Seq(ByteString.copyFromUtf8("NOT-A-DUMMY-MESSAGE"), dummyMessage.toByteString), - ).toByteString - - ProtoConverter - .protoParser(Single.parseFrom)(attackRepeated) - .left - .value shouldBe a[BufferException] - - ProtoConverter - .protoParser(Repeated.parseFrom)(attackRepeated) - .left - .value shouldBe a[BufferException] - } - } - - // The JVM does not have unsigned ints (except for char), so protobuf uintX are mapped to signed numbers - // in the generated Scala classes. This test here makes sure that we detect if the generated parsers - // change this behavior. - "Protobuf does not distinguish between signed and unsigned varints" in { - val signedInt = SignedInt(-1) - ProtoConverter.protoParser(UnsignedInt.parseFrom)(signedInt.toByteString) shouldBe - Right(UnsignedInt(-1)) - } - - // This test demonstrates that it is not safe to change the range of a varint in a protobuf message - "silently truncate numbers to smaller range" in { - val unsignedLong = UnsignedLong(0x7fffffffffffffffL) - ProtoConverter.protoParser(UnsignedInt.parseFrom)(unsignedLong.toByteString) shouldBe - Right(UnsignedInt(-1)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/UntypedVersionedMessageTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/UntypedVersionedMessageTest.scala deleted file mode 100644 index a01dc15fdb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protobuf/UntypedVersionedMessageTest.scala +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protobuf - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet -import com.digitalasset.canton.protobuf.UntypedVersionedMessageTest.{Message, parseNew, parseOld} -import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.version.{ - HasVersionedMessageCompanion, - HasVersionedWrapper, - ProtoVersion, - ProtocolVersion, -} -import com.google.protobuf.ByteString -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -class UntypedVersionedMessageTest extends AnyWordSpec with BaseTest { - - "UntypedVersionedMessage" should { - "be compatible with old Versioned messages" in { - /* - In this test, we refer to the Versioned* messages as "old" and the - UntypedVersionedMessage as new. - We check that old bytestring can be read with new parser and vice versa. - */ - - def test(content: String): Assertion = { - val message: Message = Message(content) - - val newByteString = message.toProtoVersioned(testedProtocolVersion).toByteString - val oldByteString = VersionedDummyMessage( - VersionedDummyMessage.Version.V0(DummyMessage(content)) - ).toByteString - - parseNew(newByteString).value shouldBe message - parseNew(oldByteString).value shouldBe message - parseOld(oldByteString).value shouldBe message - parseOld(newByteString).value shouldBe message - } - - test("Hello world!") - - // Because protobuf skip values that are equal to default value, we test for empty string - test("") - } - } -} - -object UntypedVersionedMessageTest { - def parseNew(bytes: ByteString): ParsingResult[Message] = Message.fromTrustedByteString(bytes) - def parseOld(bytes: ByteString): ParsingResult[Message] = { - def fromProtoVersioned(dummyMessageP: VersionedDummyMessage): Either[FieldNotSet, Message] = - dummyMessageP.version match { - case VersionedDummyMessage.Version.Empty => - Left(FieldNotSet("VersionedDummyMessage.version")) - case VersionedDummyMessage.Version.V0(parameters) => fromProtoV30(parameters) - } - - def fromProtoV30(dummyMessageP: DummyMessage): Right[Nothing, Message] = - Right(Message(dummyMessageP.content)) - - ProtoConverter - .protoParser(VersionedDummyMessage.parseFrom)(bytes) - .flatMap(fromProtoVersioned) - } - - final case class Message(content: String) extends HasVersionedWrapper[Message] { - - override protected def companionObj = Message - - def toProtoV30: DummyMessage = DummyMessage(content) - } - - object Message extends HasVersionedMessageCompanion[Message] { - val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( - ProtoVersion(30) -> ProtoCodec( - ProtocolVersion.v34, - supportedProtoVersion(DummyMessage)(fromProtoV30), - _.toProtoV30, - ) - ) - - val name: String = "Message" - - def fromProtoV30(messageP: DummyMessage): ParsingResult[Message] = Right( - Message(messageP.content) - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala deleted file mode 100644 index 63c6d518bd..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.* -import org.scalatest.wordspec.AnyWordSpec - -class CantonContractIdVersionTest extends AnyWordSpec with BaseTest { - - forEvery(Seq(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) { underTest => - s"$underTest" when { - val discriminator = ExampleTransactionFactory.lfHash(1) - val hash = - Hash.build(TestHash.testHashPurpose, HashAlgorithm.Sha256).add(0).finish() - - val unicum = Unicum(hash) - - val cid = underTest.fromDiscriminator(discriminator, unicum) - - "creating a contract ID from discriminator and unicum" should { - "succeed" in { - cid.coid shouldBe ( - LfContractId.V1.prefix.toHexString + - discriminator.bytes.toHexString + - underTest.versionPrefixBytes.toHexString + - unicum.unwrap.toHexString - ) - } - } - - "extracting a canton-contract-id-version" should { - "succeed" in { - CantonContractIdVersion.extractCantonContractIdVersion(cid) shouldBe Right( - underTest - ) - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ContractMetadataTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ContractMetadataTest.scala deleted file mode 100644 index 52e7b068cd..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ContractMetadataTest.scala +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class ContractMetadataTest extends AnyWordSpec with BaseTest { - - "ContractMetadata" must { - "reject creation" when { - "maintainers are not signatories" in { - val err = leftOrFail( - ContractMetadata.create( - Set.empty, - Set(ExampleTransactionFactory.submitter), - Some( - ExampleTransactionFactory.globalKeyWithMaintainers( - maintainers = Set(ExampleTransactionFactory.submitter) - ) - ), - ) - )("non-signatory maintainer") - err should include("Maintainers are not signatories: ") - } - - "signatories are not stakeholders" in { - val err = leftOrFail( - ContractMetadata.create(Set(ExampleTransactionFactory.submitter), Set.empty, None) - )("non-stakeholder signatory") - err should include("Signatories are not stakeholders: ") - } - } - - "deserialize to what it was serialized from" in { - val metadata = ContractMetadata.tryCreate( - Set(ExampleTransactionFactory.submitter, ExampleTransactionFactory.signatory), - Set( - ExampleTransactionFactory.submitter, - ExampleTransactionFactory.signatory, - ExampleTransactionFactory.observer, - ), - Some( - ExampleTransactionFactory.globalKeyWithMaintainers( - maintainers = Set(ExampleTransactionFactory.submitter) - ) - ), - ) - val serialization = metadata.toProtoVersioned(testedProtocolVersion) - - ContractMetadata.fromProtoVersioned(serialization) shouldBe Right(metadata) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala deleted file mode 100644 index 5873495ed5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleContractFactory.scala +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.LfPartyId -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.crypto.{Salt, TestHash, TestSalt} -import com.digitalasset.canton.util.LfTransactionBuilder -import com.digitalasset.daml.lf.data.Ref.PackageName -import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{ - CreationTime, - FatContractInstance, - GlobalKeyWithMaintainers, - Node, -} -import com.digitalasset.daml.lf.value.Value -import com.digitalasset.daml.lf.value.Value.{ContractId, ValueInt64} -import org.scalatest.EitherValues - -import scala.util.Random - -object ExampleContractFactory extends EitherValues { - - private val random = new Random(0) - private val unicumGenerator = new UnicumGenerator(new SymbolicPureCrypto()) - - def lfHash(index: Int = random.nextInt()): LfHash = - LfHash.assertFromBytes( - Bytes.assertFromString(f"$index%04x".padTo(LfHash.underlyingHashLength * 2, '0')) - ) - - val signatory: LfPartyId = LfPartyId.assertFromString("signatory::default") - val observer: LfPartyId = LfPartyId.assertFromString("observer::default") - val extra: LfPartyId = LfPartyId.assertFromString("extra::default") - - val templateId: LfTemplateId = LfTransactionBuilder.defaultTemplateId - val packageName: PackageName = LfTransactionBuilder.defaultPackageName - - def build( - templateId: Ref.Identifier = templateId, - packageName: Ref.PackageName = packageName, - argument: Value = ValueInt64(random.nextLong()), - createdAt: Time.Timestamp = Time.Timestamp.now(), - salt: Salt = TestSalt.generateSalt(random.nextInt()), - signatories: Set[Ref.Party] = Set(signatory), - stakeholders: Set[Ref.Party] = Set(signatory, observer, extra), - keyOpt: Option[GlobalKeyWithMaintainers] = None, - version: LanguageVersion = LanguageVersion.default, - cantonContractIdVersion: CantonContractIdVersion = AuthenticatedContractIdVersionV11, - overrideContractId: Option[ContractId] = None, - ): ContractInstance = { - - val discriminator = lfHash() - - val create = Node.Create( - coid = LfContractId.V1(discriminator), - templateId = templateId, - packageName = packageName, - arg = argument, - signatories = signatories, - stakeholders = stakeholders, - keyOpt = keyOpt, - version = version, - ) - val unsuffixed = FatContractInstance.fromCreateNode( - create, - CreationTime.CreatedAt(createdAt), - DriverContractMetadata(salt).toLfBytes(cantonContractIdVersion), - ) - - val unicum = unicumGenerator.recomputeUnicum(unsuffixed, cantonContractIdVersion).value - - val contractId = - overrideContractId.getOrElse(cantonContractIdVersion.fromDiscriminator(discriminator, unicum)) - - val inst = FatContractInstance.fromCreateNode( - create.copy(coid = contractId), - CreationTime.CreatedAt(createdAt), - DriverContractMetadata(salt).toLfBytes(cantonContractIdVersion), - ) - - ContractInstance(inst).value - - } - - def buildContractId( - index: Int = random.nextInt(), - cantonContractIdVersion: CantonContractIdVersion = AuthenticatedContractIdVersionV11, - ): ContractId = - cantonContractIdVersion.fromDiscriminator(lfHash(index), Unicum(TestHash.digest(index))) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala deleted file mode 100644 index eb8bbb6a63..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import cats.syntax.functor.* -import com.digitalasset.canton.crypto.{HashOps, RandomOps} -import com.digitalasset.canton.data.* -import com.digitalasset.canton.protocol.WellFormedTransaction.{WithSuffixes, WithoutSuffixes} -import com.digitalasset.canton.{LfKeyResolver, LfPartyId} - -/** Encapsulates the most widely used representations of a transaction. - */ -trait ExampleTransaction { - - def cryptoOps: HashOps with RandomOps - - /** Set of parties who are informees of an action (root or not) in the transaction */ - def allInformees: Set[LfPartyId] = fullInformeeTree.allInformees - - /** The transaction with unsuffixed contract IDs and the transaction version */ - def versionedUnsuffixedTransaction: LfVersionedTransaction - - /** Map from the nodes of the transaction to their seed if they need a seed */ - def seeds: Map[LfNodeId, LfHash] = metadata.seeds - - /** Metadata for the transaction as a whole */ - def metadata: TransactionMetadata - - /** @throws IllegalArgumentException - * if [[versionedUnsuffixedTransaction]] is malformed - */ - def wellFormedUnsuffixedTransaction: WellFormedTransaction[WithoutSuffixes] = - WellFormedTransaction.normalizeAndAssert( - versionedUnsuffixedTransaction, - metadata, - WithoutSuffixes, - ) - - /** The key resolver to be used for iterating over the transaction nodes */ - def keyResolver: LfKeyResolver - - /** The root views of the transaction in execution order */ - def rootViewDecompositions: Seq[TransactionViewDecomposition.NewView] - - /** The root views of the transaction in execution order */ - def rootViews: Seq[TransactionView] - - /** Associates all views (root or not) with their (direct and indirect) subviews (in execution - * order). Recall that every view is also a subview of itself. - */ - def viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] - - def inputContracts: Map[LfContractId, SerializableContract] = - transactionViewTrees.flatMap(_.viewParticipantData.coreInputs).toMap.fmap(_.contract) - - def transactionTree: GenTransactionTree - - def transactionId: TransactionId = transactionTree.transactionId - - def fullInformeeTree: FullInformeeTree - - /** The sequence of reinterpreted action descriptions for all views in execution order, with their - * witnesses - */ - def reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] - - /** All transaction view trees, including those corresponding to non-root views, in execution - * order - */ - def transactionViewTrees: Seq[FullTransactionViewTree] = reinterpretedSubtransactions.map(_._1) - - /** All transaction view trees, including those corresponding to non-root views, in execution - * order - */ - def transactionViewTreesWithWitnesses: Seq[(FullTransactionViewTree, Witnesses)] = - reinterpretedSubtransactions.map(r => r._1 -> r._3) - - /** Transaction view trees for root views, in execution order */ - def rootTransactionViewTrees: Seq[FullTransactionViewTree] - - /** The transaction with suffixed contract ids and the transaction version. */ - def versionedSuffixedTransaction: LfVersionedTransaction - - /** @throws IllegalArgumentException - * if [[versionedSuffixedTransaction]] is malformed - */ - def wellFormedSuffixedTransaction: WellFormedTransaction[WithSuffixes] = - WellFormedTransaction.normalizeAndAssert(versionedSuffixedTransaction, metadata, WithSuffixes) - - /** Yields brief description of this example, which must be suitable for naming test cases.as part - * of usable to identify - * - * Implementing classes must overwrite this method. - */ - override def toString: String = throw new UnsupportedOperationException( - "Please overwrite the toString method." - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala deleted file mode 100644 index e48afa2b30..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala +++ /dev/null @@ -1,3258 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import cats.syntax.functor.* -import cats.syntax.functorFilter.* -import cats.syntax.option.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton -import com.digitalasset.canton.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.* -import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration -import com.digitalasset.canton.data.TransactionViewDecomposition.{NewView, SameView} -import com.digitalasset.canton.data.ViewPosition.MerklePathElement -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.ExampleTransactionFactory.* -import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient -import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex -import com.digitalasset.canton.topology.client.TopologySnapshot -import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ - Confirmation, - Observation, - Submission, -} -import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, VettedPackage} -import com.digitalasset.canton.topology.{ - ParticipantId, - PhysicalSynchronizerId, - SynchronizerId, - TestingIdentityFactory, - TestingTopology, - UniqueIdentifier, -} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.LfTransactionUtil.{ - metadataFromCreate, - metadataFromExercise, - metadataFromFetch, -} -import com.digitalasset.canton.util.{LfTransactionBuilder, LfTransactionUtil} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.daml.lf.data.Ref.PackageName -import com.digitalasset.daml.lf.data.{Bytes, ImmArray} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, Versioned} -import com.digitalasset.daml.lf.value.Value -import com.digitalasset.daml.lf.value.Value.* -import org.scalatest.EitherValues - -import java.time.Duration as JDuration -import java.util.UUID -import scala.collection.immutable.HashMap -import scala.concurrent.duration.* -import scala.concurrent.{Await, ExecutionContext} -import scala.util.Random - -import BaseTest.* - -/** Provides convenience methods for creating [[ExampleTransaction]]s and parts thereof. - */ -object ExampleTransactionFactory { - import EitherValues.* - - val pureCrypto: CryptoPureApi = new SymbolicPureCrypto() - // Helper methods for Daml-LF types - val languageVersion: LanguageVersion = LfTransactionBuilder.defaultLanguageVersion - val packageId: LfPackageId = LfTransactionBuilder.defaultPackageId - val upgradePackageId: LfPackageId = LfPackageId.assertFromString("upgraded-pkg-id") - val templateId: LfTemplateId = LfTransactionBuilder.defaultTemplateId - val packageName: PackageName = LfTransactionBuilder.defaultPackageName - val someOptUsedPackages: Option[Set[LfPackageId]] = Some(Set(packageId)) - val defaultGlobalKey: LfGlobalKey = LfTransactionBuilder.defaultGlobalKey - val transactionVersion: LfLanguageVersion = LfTransactionBuilder.defaultTransactionVersion - - private val random = new Random(0) - - private def valueCapturing(coid: List[LfContractId]): Value = { - val captives = coid.map(c => (None, ValueContractId(c))) - ValueRecord(None, captives.to(ImmArray)) - } - - private def versionedValueCapturing(coid: List[LfContractId]): Value.VersionedValue = - LfVersioned(transactionVersion, valueCapturing(coid)) - - def contractInstance( - capturedIds: Seq[LfContractId] = Seq.empty, - templateId: LfTemplateId = templateId, - packageName: LfPackageName = packageName, - ): LfThinContractInst = - LfThinContractInst( - packageName = packageName, - template = templateId, - arg = versionedValueCapturing(capturedIds.toList), - ) - - def authenticatedSerializableContract( - metadata: ContractMetadata, - instance: LfThinContractInst = ExampleTransactionFactory.contractInstance(), - ledgerTime: CantonTimestamp = CantonTimestamp.Epoch, - ): SerializableContract = { - val unicumGenerator = new UnicumGenerator(new SymbolicPureCrypto()) - val contractIdVersion = - CantonContractIdVersion.maximumSupportedVersion(BaseTest.testedProtocolVersion).value - - val (contractSalt, unicum) = unicumGenerator.generateSaltAndUnicum( - psid = SynchronizerId(UniqueIdentifier.tryFromProtoPrimitive("synchronizer::da")).toPhysical, - mediator = MediatorGroupRecipient(MediatorGroupIndex.one), - transactionUuid = new UUID(1L, 1L), - viewPosition = ViewPosition(List.empty), - viewParticipantDataSalt = TestSalt.generateSalt(1), - createIndex = 0, - ledgerCreateTime = CreationTime.CreatedAt(ledgerTime.toLf), - metadata = metadata, - suffixedContractInstance = ExampleTransactionFactory.asSerializableRaw(instance), - cantonContractIdVersion = contractIdVersion, - ) - - val contractId = contractIdVersion.fromDiscriminator( - ExampleTransactionFactory.lfHash(1337), - unicum, - ) - - SerializableContract( - contractId = contractId, - contractInstance = instance, - metadata = metadata, - ledgerTime = ledgerTime, - contractSalt = contractSalt.unwrap, - ).value - } - - val veryDeepValue: Value = { - def deepValue(depth: Int): Value = - if (depth <= 0) ValueUnit else ValueOptional(Some(deepValue(depth - 1))) - - deepValue(Value.MAXIMUM_NESTING + 10) - } - val veryDeepVersionedValue: VersionedValue = - LfVersioned(transactionVersion, veryDeepValue) - - val veryDeepContractInstance: LfThinContractInst = - LfThinContractInst( - packageName = packageName, - template = templateId, - arg = veryDeepVersionedValue, - ) - - def globalKey( - templateId: LfTemplateId, - value: LfValue, - packageName: LfPackageName = packageName, - ): Versioned[LfGlobalKey] = - LfVersioned( - transactionVersion, - LfGlobalKey.assertBuild(templateId, value, packageName), - ) - - def globalKeyWithMaintainers( - key: LfGlobalKey = defaultGlobalKey, - maintainers: Set[LfPartyId] = Set.empty, - ): Versioned[LfGlobalKeyWithMaintainers] = - LfVersioned(transactionVersion, LfGlobalKeyWithMaintainers(key, maintainers)) - - def fetchNode( - cid: LfContractId, - actingParties: Set[LfPartyId] = Set.empty, - signatories: Set[LfPartyId] = Set.empty, - observers: Set[LfPartyId] = Set.empty, - key: Option[LfGlobalKeyWithMaintainers] = None, - byKey: Boolean = false, - version: LfLanguageVersion = transactionVersion, - templateId: LfTemplateId = templateId, - interfaceId: Option[LfTemplateId] = None, - ): LfNodeFetch = - LfNodeFetch( - coid = cid, - packageName = packageName, - templateId = templateId, - actingParties = actingParties, - signatories = signatories, - stakeholders = signatories ++ observers, - keyOpt = key, - byKey = byKey, - version = version, - interfaceId = interfaceId, - ) - - def createNode( - cid: LfContractId, - contractInstance: LfThinContractInst = this.contractInstance(), - signatories: Set[LfPartyId] = Set.empty, - observers: Set[LfPartyId] = Set.empty, - key: Option[LfGlobalKeyWithMaintainers] = None, - ): LfNodeCreate = { - val unversionedContractInst = contractInstance.unversioned - LfNodeCreate( - coid = cid, - packageName = unversionedContractInst.packageName, - templateId = unversionedContractInst.template, - arg = unversionedContractInst.arg, - signatories = signatories, - stakeholders = signatories ++ observers, - keyOpt = key, - version = transactionVersion, - ) - } - - def exerciseNode( - targetCoid: LfContractId, - consuming: Boolean = true, - args: List[LfContractId] = Nil, - children: List[LfNodeId] = Nil, - signatories: Set[LfPartyId] = Set.empty, - observers: Set[LfPartyId] = Set.empty, - choiceObservers: Set[LfPartyId] = Set.empty, - actingParties: Set[LfPartyId] = Set.empty, - exerciseResult: Option[Value] = Some(Value.ValueNone), - key: Option[LfGlobalKeyWithMaintainers] = None, - byKey: Boolean = false, - templateId: LfTemplateId = templateId, - packageName: LfPackageName = packageName, - ): LfNodeExercises = - LfNodeExercises( - targetCoid = targetCoid, - packageName = packageName, - templateId = templateId, - interfaceId = None, - choiceId = LfChoiceName.assertFromString("choice"), - consuming = consuming, - actingParties = actingParties, - chosenValue = valueCapturing(args), - stakeholders = signatories ++ observers, - signatories = signatories, - choiceObservers = choiceObservers, - choiceAuthorizers = None, - children = children.to(ImmArray), - exerciseResult = exerciseResult, - keyOpt = key, - byKey = byKey, - version = transactionVersion, - ) - - def exerciseNodeWithoutChildren( - targetCoid: LfContractId, - consuming: Boolean = true, - args: List[LfContractId] = Nil, - signatories: Set[LfPartyId] = Set.empty, - observers: Set[LfPartyId] = Set.empty, - actingParties: Set[LfPartyId] = Set.empty, - exerciseResult: Option[Value] = Some(Value.ValueNone), - ): LfNodeExercises = - exerciseNode( - targetCoid = targetCoid, - consuming = consuming, - args = args, - children = Nil, - signatories = signatories, - observers = observers, - actingParties = actingParties, - exerciseResult = exerciseResult, - ).copy(children = ImmArray.empty) - - def lookupByKeyNode( - key: LfGlobalKey, - maintainers: Set[LfPartyId] = Set.empty, - resolution: Option[LfContractId] = None, - ): LfNodeLookupByKey = - LfNodeLookupByKey( - templateId = key.templateId, - packageName = key.packageName, - key = LfGlobalKeyWithMaintainers(key, maintainers), - result = resolution, - version = transactionVersion, - ) - - def nodeId(index: Int): LfNodeId = LfNodeId(index) - - val submissionSeed: LfHash = LfHash.secureRandom( - LfHash.hashPrivateKey("example transaction factory tests") - )() // avoiding dependency on SeedService.staticRandom after move to ledger api server - - def transaction(rootIndices: Seq[Int], nodes: LfNode*): LfVersionedTransaction = - transactionFrom(rootIndices, 0, nodes*) - - def transactionFrom( - rootIndices: Seq[Int], - startIndex: Int, - nodes: LfNode* - ): LfVersionedTransaction = { - val roots = rootIndices.map(nodeId).to(ImmArray) - - val nodesMap = HashMap(nodes.zipWithIndex.map { case (node, index) => - (nodeId(index + startIndex), node) - }*) - - val version = protocol.maxTransactionVersion( - NonEmpty - .from(nodesMap.values.toSeq.mapFilter(_.optVersion)) - .getOrElse(NonEmpty(Seq, transactionVersion)) - ) - - LfVersionedTransaction(version, nodesMap, roots) - } - - def inventSeeds(tx: LfVersionedTransaction): Map[LfNodeId, LfHash] = - tx.nodes.collect { - case (nodeId, node) if LfTransactionUtil.nodeHasSeed(node) => nodeId -> lfHash(nodeId.index) - } - - val malformedLfTransaction: LfVersionedTransaction = transaction(Seq(0)) - - // Helper methods for contract ids and transaction ids - def transactionId(index: Int): TransactionId = TransactionId( - TestHash.digest(s"transactionId$index") - ) - - def unicum(index: Int): Unicum = Unicum(TestHash.digest(s"unicum$index")) - - def lfHash(index: Int): LfHash = - LfHash.assertFromBytes( - Bytes.assertFromString(f"$index%04x".padTo(LfHash.underlyingHashLength * 2, '0')) - ) - - def suffixedId( - discriminator: Int, - suffix: Int, - contractIdVersion: CantonContractIdVersion = AuthenticatedContractIdVersionV11, - ): LfContractId = - LfContractId.V1( - discriminator = lfHash(discriminator), - suffix = contractIdVersion.versionPrefixBytes ++ Bytes.fromByteString( - TestHash.digest(f"$suffix%04x").getCryptographicEvidence - ), - ) - - def unsuffixedId(index: Int): LfContractId.V1 = LfContractId.V1(lfHash(index)) - - def rootViewPosition(index: Int, total: Int): ViewPosition = - ViewPosition(List(MerkleSeq.indicesFromSeq(total)(index))) - - def asSerializableRaw(contractInstance: LfThinContractInst): SerializableRawContractInstance = - SerializableRawContractInstance - .create(contractInstance) - .fold(err => throw new IllegalArgumentException(err.toString), Predef.identity) - - def asSerializable( - contractId: LfContractId, - contractInstance: LfThinContractInst = this.contractInstance(), - metadata: ContractMetadata = ContractMetadata.tryCreate(Set.empty, Set(this.signatory), None), - ledgerTime: CantonTimestamp = CantonTimestamp.Epoch, - salt: Salt = TestSalt.generateSalt(random.nextInt()), - ): SerializableContract = - SerializableContract( - contractId, - asSerializableRaw(contractInstance), - metadata, - CreationTime.CreatedAt(ledgerTime.toLf), - salt, - ) - - private def serializableFromCreate( - node: LfNodeCreate, - salt: Salt, - ): SerializableContract = - asSerializable( - node.coid, - node.versionedCoinst, - metadataFromCreate(node), - salt = salt, - ) - - // Parties and participants - - val submittingParticipant: ParticipantId = ParticipantId("submittingParticipant") - val signatoryParticipant: ParticipantId = ParticipantId("signatoryParticipant") - val observerParticipant: ParticipantId = ParticipantId("observerParticipant") - val extraParticipant: ParticipantId = ParticipantId("extraParticipant") - val signatory: LfPartyId = LfPartyId.assertFromString("signatory::default") - val signatoryReplica: LfPartyId = LfPartyId.assertFromString("signatoryReplica::default") - val observer: LfPartyId = LfPartyId.assertFromString("observer::default") - val extra: LfPartyId = LfPartyId.assertFromString("extra::default") - val submitter: LfPartyId = submittingParticipant.adminParty.toLf - val submitters: List[LfPartyId] = List(submitter) - - // Request metadata - - val userId: UserId = DefaultDamlValues.userId() - val commandId: CommandId = DefaultDamlValues.commandId() - val workflowId: WorkflowId = WorkflowId.assertFromString("testWorkflowId") - - val defaultTestingTopology: TestingTopology = - TestingTopology.from( - topology = Map( - submitter -> Map(submittingParticipant -> Submission), - signatory -> Map( - signatoryParticipant -> Confirmation - ), - signatoryReplica -> Map( - signatoryParticipant -> Confirmation - ), - observer -> Map( - observerParticipant -> Observation - ), - extra -> Map( - extraParticipant -> Observation - ), - ), - participants = Map(submittingParticipant -> ParticipantAttributes(Submission)), - packages = - Seq(submittingParticipant, signatoryParticipant, observerParticipant, extraParticipant) - .map( - _ -> VettedPackage.unbounded(Seq(ExampleTransactionFactory.packageId, upgradePackageId)) - ) - .toMap, - ) - - def defaultTestingIdentityFactory: TestingIdentityFactory = - defaultTestingTopology.build() - - // Topology - def defaultTopologySnapshot: TopologySnapshot = - defaultTestingIdentityFactory.topologySnapshot() - - // Merkle trees - def blinded[A](tree: MerkleTree[A]): MerkleTree[A] = BlindedNode(tree.rootHash) - -} - -/** Factory for [[ExampleTransaction]]. Also contains a number of predefined example transactions. - * Also provides convenience methods for creating [[ExampleTransaction]]s and parts thereof. - */ -class ExampleTransactionFactory( - val cryptoOps: HashOps with HmacOps with RandomOps = new SymbolicPureCrypto, - versionOverride: Option[ProtocolVersion] = None, -)( - val transactionSalt: Salt = TestSalt.generateSalt(0), - val transactionSeed: SaltSeed = TestSalt.generateSeed(0), - val transactionUuid: UUID = UUID.fromString("11111111-2222-3333-4444-555555555555"), - val psid: PhysicalSynchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("example::default") - ).toPhysical, - val mediatorGroup: MediatorGroupRecipient = MediatorGroupRecipient(MediatorGroupIndex.zero), - val ledgerTime: CantonTimestamp = CantonTimestamp.Epoch, - val ledgerTimeUsed: CantonTimestamp = CantonTimestamp.Epoch.minusSeconds(1), - val preparationTime: CantonTimestamp = CantonTimestamp.Epoch.minusMillis(9), - val topologySnapshot: TopologySnapshot = defaultTopologySnapshot, -)(implicit ec: ExecutionContext, tc: TraceContext) - extends EitherValues { - - private val protocolVersion = versionOverride.getOrElse(BaseTest.testedProtocolVersion) - private val cantonContractIdVersion = AuthenticatedContractIdVersionV11 - private val random = new Random(0) - - private def createNewView( - rootNode: LfActionNode, - rootSeed: Option[LfHash], - rootNodeId: LfNodeId, - tailNodes: Seq[TransactionViewDecomposition], - isRoot: Boolean, - ): FutureUnlessShutdown[NewView] = { - - val rootRbContext = RollbackContext.empty - - val submittingAdminPartyO = - Option.when(isRoot)(submitterMetadata.submittingParticipant.adminParty.toLf) - TransactionViewDecompositionFactory - .informeesParticipantsAndThreshold(rootNode, topologySnapshot, submittingAdminPartyO) - .map { case (viewInformeesWithParticipantData, viewThreshold) => - val viewInformees = viewInformeesWithParticipantData.fmap(_._2) - NewView( - rootNode, - ViewConfirmationParameters.create(viewInformees, viewThreshold), - rootSeed, - rootNodeId, - tailNodes, - rootRbContext, - ) - } - } - - private def awaitCreateNewView( - rootNode: LfActionNode, - rootSeed: Option[LfHash], - rootNodeId: LfNodeId, - tailNodes: Seq[TransactionViewDecomposition], - isRoot: Boolean, - ): NewView = - Await - .result( - createNewView( - rootNode, - rootSeed, - rootNodeId, - tailNodes, - isRoot, - ), - 10.seconds, - ) - .onShutdown(throw new RuntimeException("Aborted due to shutdown")) - - /** Yields standard test cases that the sync-protocol must be able to handle. Yields only "happy" - * cases, i.e., the sync-protocol must not emit an error. - */ - lazy val standardHappyCases: Seq[ExampleTransaction] = - Seq[ExampleTransaction]( - EmptyTransaction, - SingleCreate(seed = deriveNodeSeed(0)), - SingleCreate( - seed = deriveNodeSeed(0), - capturedContractIds = Seq(suffixedId(-1, 0), suffixedId(-1, 1)), - unsuffixedCapturedContractIds = Seq(suffixedId(-1, 0), suffixedId(-1, 1)), - ), - SingleFetch(version = LfLanguageVersion.v2_dev), - SingleExercise(seed = deriveNodeSeed(0)), - SingleExerciseWithNonstakeholderActor(seed = deriveNodeSeed(0)), - MultipleRoots, - MultipleRootsAndSimpleViewNesting, - MultipleRootsAndViewNestings, - ViewInterleavings, - TransientContracts, - ) - - // Helpers for GenTransactions - - private val numberOfLeavesPerView: Int = 2 - private val numberOfLeavesAtTransactionRoot: Int = 3 - - def commonDataSalt(viewIndex: Int): Salt = - Salt.tryDeriveSalt( - transactionSeed, - viewIndex * numberOfLeavesPerView + numberOfLeavesAtTransactionRoot + 0, - cryptoOps, - ) - def participantDataSalt(viewIndex: Int): Salt = - Salt.tryDeriveSalt( - transactionSeed, - viewIndex * numberOfLeavesPerView + numberOfLeavesAtTransactionRoot + 1, - cryptoOps, - ) - - val lfTransactionSeed: LfHash = LfHash.deriveTransactionSeed( - ExampleTransactionFactory.submissionSeed, - ExampleTransactionFactory.submittingParticipant.toLf, - preparationTime.toLf, - ) - - def deriveNodeSeed(path: Int*): LfHash = - path.foldLeft(lfTransactionSeed)((seed, i) => LfHash.deriveNodeSeed(seed, i)) - - def discriminator(nodeSeed: LfHash, stakeholders: Set[LfPartyId]): LfHash = - LfHash.deriveContractDiscriminator(nodeSeed, preparationTime.toLf, stakeholders) - - val unicumGenerator = new UnicumGenerator(cryptoOps) - - def saltAndUnicum( - viewPosition: ViewPosition, - viewIndex: Int, - createIndex: Int, - suffixedContractInstance: LfThinContractInst, - metadata: ContractMetadata, - ): (Salt, Unicum) = { - val viewParticipantDataSalt = participantDataSalt(viewIndex) - val (contractSalt, unicum) = unicumGenerator - .generateSaltAndUnicum( - psid, - mediatorGroup, - transactionUuid, - viewPosition, - viewParticipantDataSalt, - createIndex, - CreationTime.CreatedAt(ledgerTime.toLf), - metadata, - asSerializableRaw(suffixedContractInstance), - cantonContractIdVersion, - ) - - contractSalt.unwrap -> unicum - } - - def fromDiscriminator( - viewPosition: ViewPosition, - viewIndex: Int, - createIndex: Int, - suffixedContractInstance: LfThinContractInst, - discriminator: LfHash, - signatories: Set[LfPartyId] = Set.empty, - observers: Set[LfPartyId] = Set.empty, - maybeKeyWithMaintainers: Option[protocol.LfGlobalKeyWithMaintainers] = None, - ): (Salt, LfContractId) = { - val metadata = ContractMetadata.tryCreate( - signatories, - signatories ++ observers, - maybeKeyWithMaintainers.map(LfVersioned(transactionVersion, _)), - ) - val (salt, unicum) = - saltAndUnicum( - viewPosition, - viewIndex, - createIndex, - suffixedContractInstance, - metadata, - ) - salt -> cantonContractIdVersion.fromDiscriminator(discriminator, unicum) - } - - def rootViewPosition(index: Int, total: Int): ViewPosition = - ViewPosition(List(MerkleSeq.indicesFromSeq(total)(index))) - - def subViewIndex(index: Int, total: Int): MerklePathElement = - TransactionSubviews.indices(total)(index) - - private def viewInternal( - node: LfActionNode, - viewConfirmationParameters: ViewConfirmationParameters, - viewIndex: Int, - consumed: Set[LfContractId], - coreInputs: Seq[SerializableContract], - created: Seq[SerializableContract], - resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], - seed: Option[LfHash], - packagePreference: Set[LfPackageId], - subviews: Seq[TransactionView], - ): TransactionView = { - val viewCommonData = - ViewCommonData.tryCreate(cryptoOps)( - viewConfirmationParameters, - commonDataSalt(viewIndex), - protocolVersion, - ) - - val createWithSerialization = created.map { contract => - val coid = contract.contractId - CreatedContract.tryCreate(contract, consumed.contains(coid), rolledBack = false) - } - - val coreInputContracts = coreInputs.map { contract => - val coid = contract.contractId - coid -> InputContract(contract, consumed.contains(coid)) - }.toMap - - val createdInSubviews = (for { - childView <- subviews - subView <- childView.flatten - createdContract <- subView.viewParticipantData.tryUnwrap.createdCore - } yield createdContract.contract.contractId).toSet - - val createdInSubviewArchivedInCore = consumed.intersect(createdInSubviews) - - val actionDescription = - ActionDescription.tryFromLfActionNode( - LfTransactionUtil.lightWeight(node), - seed, - packagePreference = packagePreference, - protocolVersion, - ) - - val viewParticipantData = ViewParticipantData.tryCreate(cryptoOps)( - coreInputContracts, - createWithSerialization, - createdInSubviewArchivedInCore, - resolvedKeys.fmap(LfVersioned(transactionVersion, _)), - actionDescription, - RollbackContext.empty, - participantDataSalt(viewIndex), - protocolVersion, - ) - - val subViews = TransactionSubviews(subviews)(protocolVersion, cryptoOps) - TransactionView.tryCreate(cryptoOps)( - viewCommonData, - viewParticipantData, - subviews = subViews, - protocolVersion, - ) - } - - def view( - node: LfActionNode, - viewIndex: Int, - consumed: Set[LfContractId], - coreInputs: Seq[SerializableContract], - created: Seq[SerializableContract], - resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], - seed: Option[LfHash], - isRoot: Boolean, - packagePreference: Set[LfPackageId], - subviews: TransactionView* - ): TransactionView = { - - val submittingAdminPartyO = - Option.when(isRoot)(submitterMetadata.submittingParticipant.adminParty.toLf) - val (rawInformeesWithParticipantData, rawThreshold) = - Await.result( - TransactionViewDecompositionFactory - .informeesParticipantsAndThreshold(node, topologySnapshot, submittingAdminPartyO) - .failOnShutdownTo(new Exception("Aborted due to shutdown")), - 10.seconds, - ) - val rawInformees = rawInformeesWithParticipantData.fmap { case (_, weight) => weight } - val viewConfirmationParameters = ViewConfirmationParameters.create(rawInformees, rawThreshold) - - viewInternal( - node, - viewConfirmationParameters, - viewIndex, - consumed, - coreInputs, - created, - resolvedKeys, - seed, - packagePreference, - subviews, - ) - } - - def viewWithInformeesMerge( - node: LfActionNode, - nodesToMerge: Seq[LfActionNode], - viewIndex: Int, - consumed: Set[LfContractId], - coreInputs: Seq[SerializableContract], - created: Seq[SerializableContract], - resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], - seed: Option[LfHash], - isRoot: Boolean, - packagePreference: Set[LfPackageId], - subviews: TransactionView* - ): TransactionView = { - - val viewConfirmationParametersToMerge = (node +: nodesToMerge).map { nodeToMerge => - val (rawInformeesWithParticipantData, rawThreshold) = - Await.result( - TransactionViewDecompositionFactory - .informeesParticipantsAndThreshold( - nodeToMerge, - topologySnapshot, - Option.when(isRoot && nodeToMerge == node)( - submitterMetadata.submittingParticipant.adminParty.toLf - ), - ) - .failOnShutdownTo(new Exception("Aborted due to shutdown")), - 10.seconds, - ) - val rawInformees = rawInformeesWithParticipantData.fmap { case (_, weight) => weight } - ViewConfirmationParameters.create(rawInformees, rawThreshold) - } - - val viewConfirmationParameters = - ViewConfirmationParameters.tryCreate( - viewConfirmationParametersToMerge - .flatMap(_.informees) - .toSet, - viewConfirmationParametersToMerge - .flatMap( - _.quorums - ) - .distinct, - ) - - viewInternal( - node, - viewConfirmationParameters, - viewIndex, - consumed, - coreInputs, - created, - resolvedKeys, - seed, - packagePreference, - subviews, - ) - } - - def mkMetadata(seeds: Map[LfNodeId, LfHash] = Map.empty): TransactionMetadata = - TransactionMetadata(ledgerTime, preparationTime, seeds) - - def versionedTransactionWithSeeds( - rootIndices: Seq[Int], - nodes: LfNode* - ): (LfVersionedTransaction, TransactionMetadata) = { - val tx = transaction(rootIndices, nodes*) - val seeds = inventSeeds(tx) - (tx, mkMetadata(seeds)) - } - - val submitterMetadata: SubmitterMetadata = - SubmitterMetadata( - NonEmpty(Set, submitter), - userId, - commandId, - submittingParticipant, - Salt.tryDeriveSalt(transactionSeed, 0, cryptoOps), - DefaultDamlValues.submissionId().some, - DeduplicationDuration(JDuration.ofSeconds(100)), - ledgerTime.plusSeconds(100), - None, - cryptoOps, - protocolVersion, - ) - - val commonMetadata: CommonMetadata = - CommonMetadata - .create(cryptoOps)( - psid, - mediatorGroup, - Salt.tryDeriveSalt(transactionSeed, 1, cryptoOps), - transactionUuid, - ) - - val participantMetadata: ParticipantMetadata = - ParticipantMetadata(cryptoOps)( - ledgerTime, - preparationTime, - Some(workflowId), - Salt.tryDeriveSalt(transactionSeed, 2, cryptoOps), - protocolVersion, - ) - - def genTransactionTree(rootViews: TransactionView*): GenTransactionTree = - GenTransactionTree.tryCreate(cryptoOps)( - submitterMetadata, - commonMetadata, - participantMetadata, - MerkleSeq.fromSeq(cryptoOps, protocolVersion)(rootViews), - ) - - def blindedForInformeeTree( - view: TransactionView, - subviews: MerkleTree[TransactionView]* - ): TransactionView = - view match { - case TransactionView(viewCommonData, viewParticipantData, _) => - val subViews = - TransactionSubviews(subviews)( - protocolVersion, - cryptoOps, - ) - TransactionView.tryCreate(cryptoOps)( - viewCommonData, - blinded(viewParticipantData), - subviews = subViews, - protocolVersion, - ) - } - - def mkFullInformeeTree(rootViews: MerkleTree[TransactionView]*): FullInformeeTree = - FullInformeeTree.tryCreate( - GenTransactionTree.tryCreate(cryptoOps)( - submitterMetadata, - commonMetadata, - blinded(participantMetadata), - MerkleSeq.fromSeq(cryptoOps, protocolVersion)(rootViews), - ), - protocolVersion, - ) - - def rootTransactionViewTree(rootViews: MerkleTree[TransactionView]*): FullTransactionViewTree = - FullTransactionViewTree.tryCreate( - GenTransactionTree.tryCreate(cryptoOps)( - submitterMetadata, - commonMetadata, - participantMetadata, - MerkleSeq.fromSeq(cryptoOps, protocolVersion)(rootViews), - ) - ) - - def leafsBlinded(view: TransactionView, subviews: MerkleTree[TransactionView]*): TransactionView = - view match { - case TransactionView(viewCommonData, viewParticipantData, _) => - val subViews = - TransactionSubviews(subviews)( - protocolVersion, - cryptoOps, - ) - TransactionView.tryCreate(cryptoOps)( - blinded(viewCommonData), - blinded(viewParticipantData), - subviews = subViews, - protocolVersion, - ) - } - - def nonRootTransactionViewTree(rootViews: MerkleTree[TransactionView]*): FullTransactionViewTree = - FullTransactionViewTree.tryCreate( - GenTransactionTree.tryCreate(cryptoOps)( - blinded(submitterMetadata), - commonMetadata, - participantMetadata, - MerkleSeq.fromSeq(cryptoOps, protocolVersion)(rootViews), - ) - ) - - // ExampleTransactions - - case object EmptyTransaction extends ExampleTransaction { - - override def keyResolver: LfKeyResolver = Map.empty - - override def cryptoOps: HashOps with RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "empty transaction" - - override def versionedUnsuffixedTransaction: LfVersionedTransaction = transaction(Seq.empty) - - override def rootViewDecompositions: Seq[NewView] = Seq.empty - - override def rootViews: Seq[TransactionView] = Seq.empty - - override def viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = Seq.empty - - override def transactionTree: GenTransactionTree = genTransactionTree() - - override def fullInformeeTree: FullInformeeTree = mkFullInformeeTree() - - override def reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq.empty - - override def rootTransactionViewTrees: Seq[FullTransactionViewTree] = Seq.empty - - override def versionedSuffixedTransaction: LfVersionedTransaction = - LfVersionedTransaction( - version = transactionVersion, - roots = ImmArray.empty, - nodes = HashMap.empty, - ) - - override def metadata: TransactionMetadata = mkMetadata() - } - - abstract class SingleNode(val nodeSeed: Option[LfHash]) extends ExampleTransaction { - override def cryptoOps: HashOps & RandomOps = ExampleTransactionFactory.this.cryptoOps - - def lfContractId: LfContractId - - def contractId: LfContractId - - def salt: Salt - - def nodeId: LfNodeId - - protected def contractInstance: LfThinContractInst - - def lfNode: LfActionNode - - def node: LfActionNode - - def reinterpretedNode: LfActionNode - - def consuming: Boolean - - def created: Seq[SerializableContract] = node match { - case n: LfNodeCreate => - Seq( - asSerializable( - n.coid, - contractInstance, - metadataFromCreate(n), - salt = salt, - ) - ) - case _ => Seq.empty - } - - def used: Seq[SerializableContract] = node match { - case n: LfNodeExercises => - Seq( - asSerializable( - n.targetCoid, - contractInstance, - metadataFromExercise(n), - salt = salt, - ) - ) - case n: LfNodeFetch => - Seq( - asSerializable( - n.coid, - contractInstance, - metadataFromFetch(n), - salt = salt, - ) - ) - case _ => Seq.empty - } - - def consumed: Set[LfContractId] = if (consuming) used.map(_.contractId).toSet else Set.empty - - def metadata: TransactionMetadata = - mkMetadata(nodeSeed.fold(Map.empty[LfNodeId, LfHash])(seed => Map(nodeId -> seed))) - - override def keyResolver: LfKeyResolver = - node.gkeyOpt.fold(Map.empty: LfKeyResolver)(k => Map(k -> LfTransactionUtil.contractId(node))) - - override lazy val versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction(Seq(0), lfNode) - - override lazy val rootViewDecompositions: Seq[NewView] = - Seq( - awaitCreateNewView( - lfNode, - nodeSeed, - nodeId, - Seq.empty, - isRoot = true, - ) - ) - - lazy val view0: TransactionView = - view(node, 0, consumed, used, created, Map.empty, nodeSeed, isRoot = true, Set.empty) - - override lazy val rootViews: Seq[TransactionView] = Seq(view0) - - override lazy val viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = Seq( - view0 -> Seq(view0) - ) - - override lazy val transactionTree: GenTransactionTree = genTransactionTree(view0) - - override lazy val fullInformeeTree: FullInformeeTree = mkFullInformeeTree( - blindedForInformeeTree(view0) - ) - - override lazy val rootTransactionViewTrees: Seq[FullTransactionViewTree] = transactionViewTrees - - override lazy val versionedSuffixedTransaction: LfVersionedTransaction = - transaction(Seq(0), node) - - override lazy val reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq( - ( - rootTransactionViewTree(view0), - (transaction(Seq(0), reinterpretedNode), metadata, keyResolver), - Witnesses( - NonEmpty( - List, - view0.viewCommonData.tryUnwrap.viewConfirmationParameters.informees, - ) - ), - ) - ) - } - - /** Single create. By default, [[submitter]] is the only signatory and [[observer]] the only - * observer. - * - * @param seed - * the node seed for the create node, used to derive the contract id - * @param capturedContractIds - * contract ids captured by the contract instance - * @throws IllegalArgumentException - * if [[unsuffixedCapturedContractIds]] and [[capturedContractIds]] have different sizes - */ - case class SingleCreate( - seed: LfHash, - override val nodeId: LfNodeId = LfNodeId(0), - viewPosition: ViewPosition = rootViewPosition(0, 1), - viewIndex: Int = 0, - capturedContractIds: Seq[LfContractId] = Seq.empty, - unsuffixedCapturedContractIds: Seq[LfContractId] = Seq.empty, - signatories: Set[LfPartyId] = Set(submitter), - observers: Set[LfPartyId] = Set(observer), - key: Option[LfGlobalKeyWithMaintainers] = None, - ) extends SingleNode(Some(seed)) { - - require( - capturedContractIds.lengthCompare(unsuffixedCapturedContractIds) == 0, - "captured contract IDs must have the same length", - ) - - override val contractInstance: LfThinContractInst = - ExampleTransactionFactory.contractInstance(capturedContractIds) - - val serializableContractInstance: SerializableRawContractInstance = asSerializableRaw( - contractInstance - ) - - val lfContractId: LfContractId = LfContractId.V1(discriminator, Bytes.Empty) - - val (salt, contractId) = - fromDiscriminator( - viewPosition, - viewIndex, - 0, - contractInstance, - discriminator, - signatories, - observers, - key, - ) - - private def discriminator: LfHash = - ExampleTransactionFactory.this.discriminator(seed, signatories union observers) - - override def toString: String = { - val captured = - if (capturedContractIds.nonEmpty) s", capturing ${capturedContractIds.size} ids" else "" - - s"single create$captured" - } - - override def lfNode: LfActionNode = - createNode( - lfContractId, - ExampleTransactionFactory.contractInstance(unsuffixedCapturedContractIds), - signatories, - observers, - key, - ) - - override def node: LfActionNode = - createNode(contractId, contractInstance, signatories, observers, key) - - override def reinterpretedNode: LfActionNode = - createNode(lfContractId, contractInstance, signatories, observers, key) - - override def consuming: Boolean = false - - } - - /** Single fetch with [[submitter]] as signatory and [[observer]] as observer and acting party. - * - * @param lfContractId - * id of the fetched contract - * @param contractId - * id of the fetched contract - * @param fetchedContractInstance - * instance of the used contract. - */ - @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) - case class SingleFetch( - override val nodeId: LfNodeId = LfNodeId(0), - lfContractId: LfContractId = suffixedId(-1, 0), - contractId: LfContractId = suffixedId(-1, 0), - fetchedContractInstance: LfThinContractInst = contractInstance(), - version: LfLanguageVersion = transactionVersion, - salt: Salt = TestSalt.generateSalt(random.nextInt()), - ) extends SingleNode(None) { - override def created: Seq[SerializableContract] = Seq.empty - - override val contractInstance: LfThinContractInst = fetchedContractInstance - - override def toString: String = "single fetch" - - private def genNode(id: LfContractId) = - fetchNode( - id, - actingParties = Set(observer), - signatories = Set(submitter), - observers = Set(observer), - version = version, - ) - - override def node: LfActionNode = genNode(contractId) - override def lfNode: LfActionNode = genNode(lfContractId) - override def reinterpretedNode: LfActionNode = node - - override def consuming: Boolean = false - } - - /** Single consuming exercise without children with [[submitter]] as signatory, acting party and - * controller, and [[observer]] as observer. - * - * @param lfContractId - * id of the exercised contract - * @param contractId - * id of the exercised contract - * @param inputContractInstance - * instance of the used contract. - */ - @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) - case class SingleExercise( - seed: LfHash, - override val nodeId: LfNodeId = LfNodeId(0), - lfContractId: LfContractId = suffixedId(-1, 0), - contractId: LfContractId = suffixedId(-1, 0), - inputContractInstance: LfThinContractInst = contractInstance(), - salt: Salt = TestSalt.generateSalt(random.nextInt()), - ) extends SingleNode(Some(seed)) { - override def toString: String = "single exercise" - - override val contractInstance: LfThinContractInst = inputContractInstance - - private def genNode(id: LfContractId): LfNodeExercises = - exerciseNodeWithoutChildren( - targetCoid = id, - actingParties = Set(submitter), - signatories = Set(submitter), - observers = Set(observer), - ) - - override def node: LfNodeExercises = genNode(contractId) - override def lfNode: LfNodeExercises = genNode(lfContractId) - override def reinterpretedNode: LfNodeExercises = node - - override def consuming: Boolean = true - } - - /** Single consuming exercise without children without any acting party or signatory, and - * [[observer]] as observer. - * - * @param lfContractId - * id of the exercised contract - * @param contractId - * id of the exercised contract - * @param inputContractInstance - * instance of the used contract. - */ - @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) - case class SingleExerciseWithoutConfirmingParties( - seed: LfHash, - override val nodeId: LfNodeId = LfNodeId(0), - lfContractId: LfContractId = suffixedId(-1, 0), - contractId: LfContractId = suffixedId(-1, 0), - inputContractInstance: LfThinContractInst = contractInstance(), - salt: Salt = TestSalt.generateSalt(random.nextInt()), - ) extends SingleNode(Some(seed)) { - override def toString: String = "single exercise" - - override val contractInstance: LfThinContractInst = inputContractInstance - - private def genNode(id: LfContractId): LfNodeExercises = - exerciseNodeWithoutChildren( - targetCoid = id, - actingParties = Set.empty, - signatories = Set.empty, - observers = Set(observer), - ) - - override def node: LfNodeExercises = genNode(contractId) - override def lfNode: LfNodeExercises = genNode(lfContractId) - override def reinterpretedNode: LfNodeExercises = node - - override def consuming: Boolean = true - } - - @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) - case class UpgradedSingleExercise( - seed: LfHash, - nodeId: LfNodeId = LfNodeId(0), - lfContractId: LfContractId = suffixedId(-1, 0), - contractId: LfContractId = suffixedId(-1, 0), - contractInstance: LfThinContractInst = ExampleTransactionFactory.contractInstance(), - salt: Salt = TestSalt.generateSalt(random.nextInt()), - consuming: Boolean = true, - ) extends SingleNode(Some(seed)) { - val upgradedTemplateId: canton.protocol.LfTemplateId = - templateId.copy(pkg = upgradePackageId) - private def genNode(id: LfContractId): LfNodeExercises = - exerciseNode(targetCoid = id, templateId = upgradedTemplateId, signatories = Set(submitter)) - override def node: LfNodeExercises = genNode(contractId) - override def lfNode: LfNodeExercises = genNode(lfContractId) - override def reinterpretedNode: LfNodeExercises = node - } - - @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) - case class SingleExerciseWithNonstakeholderActor( - seed: LfHash, - override val nodeId: LfNodeId = LfNodeId(0), - lfContractId: LfContractId = suffixedId(-1, 0), - contractId: LfContractId = suffixedId(-1, 0), - inputContractInstance: LfThinContractInst = contractInstance(), - salt: Salt = TestSalt.generateSalt(random.nextInt()), - ) extends SingleNode(Some(seed)) { - - override val contractInstance: LfThinContractInst = inputContractInstance - - private def genNode(id: LfContractId): LfActionNode = - exerciseNodeWithoutChildren( - id, - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(observer), - ) - override def node: LfActionNode = genNode(contractId) - override def lfNode: LfActionNode = genNode(lfContractId) - override def reinterpretedNode: LfActionNode = node - - override def consuming: Boolean = true - - override def toString: String = "single exercise with a non-stakeholder actor" - - } - - /** Transaction structure: 0. create - * 1. create capturing 0. 2. fetch 3. fetch 0. 4. exercise 5. exercise 1. - */ - case object MultipleRoots extends ExampleTransaction { - - override def cryptoOps: HashOps with RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "multiple roots" - - private val rootViewCount: Int = 6 - - private val create0: SingleCreate = - SingleCreate( - seed = deriveNodeSeed(0), - nodeId = LfNodeId(0), - viewPosition = rootViewPosition(0, rootViewCount), - ) - private val create1: SingleCreate = SingleCreate( - seed = deriveNodeSeed(1), - nodeId = LfNodeId(1), - viewIndex = 1, - viewPosition = rootViewPosition(1, rootViewCount), - capturedContractIds = Seq(suffixedId(-1, 1), create0.contractId), - unsuffixedCapturedContractIds = Seq(suffixedId(-1, 1), create0.lfContractId), - ) - private val fetch2: SingleFetch = SingleFetch(LfNodeId(2), suffixedId(-1, 2), suffixedId(-1, 2)) - private val fetch3: SingleFetch = - SingleFetch( - nodeId = LfNodeId(3), - lfContractId = create0.lfContractId, - contractId = create0.contractId, - fetchedContractInstance = create0.contractInstance, - version = - LfLanguageVersion.v2_dev, // ensure we test merging transactions with different versions - salt = create0.salt, - ) - private val exercise4: SingleExercise = - SingleExercise(deriveNodeSeed(4), LfNodeId(4), suffixedId(-1, 4), suffixedId(-1, 4)) - private val exercise5: SingleExercise = SingleExercise( - seed = deriveNodeSeed(5), - nodeId = LfNodeId(5), - lfContractId = create1.lfContractId, - contractId = create1.contractId, - inputContractInstance = create1.contractInstance, - salt = create1.salt, - ) - - private val examples: List[SingleNode] = - List[SingleNode](create0, create1, fetch2, fetch3, exercise4, exercise5) - require(examples.sizeIs == rootViewCount) - - override def metadata: TransactionMetadata = mkMetadata( - examples.zipWithIndex.mapFilter { case (node, index) => - node.nodeSeed.map(seed => LfNodeId(index) -> seed) - }.toMap - ) - - override def versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction(examples.map(_.nodeId.index), examples.map(_.lfNode)*) - - override def keyResolver: LfKeyResolver = Map.empty // No keys involved here - - override def rootViewDecompositions: Seq[NewView] = - examples.flatMap(_.rootViewDecompositions) - - override lazy val rootViews: Seq[TransactionView] = examples.zipWithIndex.map { - case (ex, index) => - view( - ex.node, - index, - ex.consumed, - ex.used, - ex.created, - Map.empty, - ex.nodeSeed, - isRoot = true, - Set.empty, - ) - } - - override def viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = - rootViews.map(view => view -> Seq(view)) - - override def transactionTree: GenTransactionTree = genTransactionTree(rootViews*) - - override def fullInformeeTree: FullInformeeTree = - mkFullInformeeTree(rootViews.map(blindedForInformeeTree(_))*) - - override def reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = { - val blindedRootViews = rootViews.map(blinded) - examples.zipWithIndex.map { case (example, i) => - val rootViewsWithOneViewUnblinded = blindedRootViews.updated(i, rootViews(i)) - ( - rootTransactionViewTree(rootViewsWithOneViewUnblinded*), - (transactionFrom(Seq(i), i, example.reinterpretedNode), example.metadata, Map.empty), - Witnesses( - NonEmpty( - List, - example.view0.viewCommonData.tryUnwrap.viewConfirmationParameters.informees, - ) - ), - ) - } - } - - override def rootTransactionViewTrees: Seq[FullTransactionViewTree] = transactionViewTrees - - override def versionedSuffixedTransaction: LfVersionedTransaction = - transaction(0 until rootViewCount, examples.map(_.node)*) - } - - /** Transaction structure: 0. create - * 1. exercise absolute - * 1.0. create - * 1.1. fetch 1.0. - * 1.2. create - * 1.3. exercise 1.2. - * - * In this specific scenario we make sure informees and quorums for action nodes 1.0, 1.1. and - * 1.3 are correctly merged to the parent view (v1): 0. View0 - * 1. View1 - * 1.2 View10 - */ - case object MultipleRootsAndSimpleViewNesting extends ExampleTransaction { - override def cryptoOps: HashOps & RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "transaction with multiple roots and a simple view nesting" - - val create0Agreement = "create0" - def create0Inst: LfThinContractInst = contractInstance() - val create0seed: LfHash = deriveNodeSeed(0) - val create0disc: LfHash = discriminator(create0seed, Set(submitter, observer)) - def genCreate0(cid: LfContractId): LfNodeCreate = - createNode( - cid, - contractInstance = create0Inst, - signatories = Set(submitter), - observers = Set(observer), - ) - val lfCreate0: LfNodeCreate = genCreate0(LfContractId.V1(create0disc)) - - def genExercise1(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - children = List(2, 3, 4, 5).map(nodeId), - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - - val lfExercise1Id: LfContractId = suffixedId(-1, 0) - val lfExercise1: LfNodeExercises = genExercise1(lfExercise1Id) - - def create10Inst: LfThinContractInst = contractInstance() - def create12Inst: LfThinContractInst = contractInstance() - def genCreate10( - cid: LfContractId, - contractInstance: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInstance, - signatories = Set(submitter, signatory, signatoryReplica), - ) - - def genCreate12( - cid: LfContractId, - contractInstance: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInstance, - signatories = Set(submitter, signatory, extra), - ) - - val create10Agreement = "create10" - val create10seed: LfHash = deriveNodeSeed(1, 0) - val create10disc: LfHash = - discriminator(create10seed, Set(submitter, signatory, signatoryReplica)) - - val lfCreate10: LfNodeCreate = - genCreate10(LfContractId.V1(create10disc), create10Inst) - val create12Agreement = "create12" - val create12seed: LfHash = deriveNodeSeed(1, 2) - val create12disc: LfHash = discriminator(create12seed, Set(submitter, signatory, extra)) - val lfCreate12: LfNodeCreate = - genCreate12(LfContractId.V1(create12disc), create12Inst) - - def genFetch11(cid: LfContractId): LfNodeFetch = - fetchNode( - cid, - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - val lfFetch11: LfNodeFetch = genFetch11(lfCreate10.coid) - - def genExercise13(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - - val lfExercise13Id: LfContractId = suffixedId(-1, 0) - val lfExercise13: LfNodeExercises = genExercise13(lfExercise13Id) - - override lazy val versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - lfCreate0, - lfExercise1, - lfCreate10, - lfFetch11, - lfCreate12, - lfExercise13, - ) - - val exercise1seed: LfHash = deriveNodeSeed(1) - val exercise13seed: LfHash = deriveNodeSeed(1, 3) - - override lazy val metadata: TransactionMetadata = mkMetadata( - Map( - LfNodeId(0) -> create0seed, - LfNodeId(1) -> exercise1seed, - LfNodeId(2) -> create10seed, - LfNodeId(4) -> create12seed, - LfNodeId(5) -> exercise13seed, - ) - ) - - override def keyResolver: LfKeyResolver = Map.empty // No keys involved here - - override lazy val rootViewDecompositions: Seq[NewView] = { - val v0 = awaitCreateNewView( - lfCreate0, - Some(create0seed), - LfNodeId(0), - Seq.empty, - isRoot = true, - ) - - val v10 = awaitCreateNewView( - lfCreate12, - Some(create12seed), - LfNodeId(4), - Seq.empty, - isRoot = false, - ) - - /* if running [[com.digitalasset.canton.version.ProtocolVersion.v6]] the create action 1.2 - * spawns a new view because the child's informee participants are not a subset of the parents' - * informee participants (i.e. party <> is hosted in the <>) - */ - val v1TailNodes = Seq( - SameView(lfCreate10, LfNodeId(2), RollbackContext.empty), - SameView(lfFetch11, LfNodeId(3), RollbackContext.empty), - v10, - SameView(LfTransactionUtil.lightWeight(lfExercise13), LfNodeId(5), RollbackContext.empty), - ) - - val v1Pre = - awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise1), - Some(exercise1seed), - LfNodeId(1), - v1TailNodes, - isRoot = true, - ) - - /* if running [[com.digitalasset.canton.version.ProtocolVersion.v6]] the merged action nodes' - * informees and quorums for v1 must be added to the parent's informees and quorums - */ - val (v1Informees, v1Quorums) = { - val nodesNotChildren = v1Pre.childViews.flatMap(_.tailNodes.map(_.nodeId)) - - val informeesAux = v1TailNodes.flatMap { - case SameView(lfNode, nodeId, _) if !nodesNotChildren.contains(nodeId) => - lfNode.informeesOfNode - case _ => Set.empty - }.toSet ++ v1Pre.viewConfirmationParameters.informees - - val quorumsAux = - (v1Pre.viewConfirmationParameters.quorums ++ v1TailNodes.mapFilter { - case SameView(lfNode, nodeId, _) if !nodesNotChildren.contains(nodeId) => - val confirmingParties = - LfTransactionUtil.signatoriesOrMaintainers(lfNode) | LfTransactionUtil - .actingParties(lfNode) - Some( - Quorum( - confirmers = confirmingParties.map(pId => pId -> PositiveInt.one).toMap, - threshold = NonNegativeInt.tryCreate(confirmingParties.size), - ) - ) - case _ => None - }).distinct - - (informeesAux, quorumsAux) - } - - val v1 = v1Pre.copy(viewConfirmationParameters = - ViewConfirmationParameters.tryCreate( - v1Informees, - v1Quorums, - ) - ) - - Seq(v0, v1) - } - - // Nodes with translated contract ids - val (salt0Id, create0Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(0, 2), - 0, - 0, - create0Inst, - create0disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create0: LfNodeCreate = genCreate0(create0Id) - - val exercise1Agreement = "exercise1" - val exercise1Id: LfContractId = suffixedId(-1, 0) - val exercise1: LfNodeExercises = genExercise1(exercise1Id) - val exercise1Instance: LfThinContractInst = contractInstance() - - val create10SerInst: SerializableRawContractInstance = - asSerializableRaw(create10Inst) - val (salt10Id, create10Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 2), - 1, - 0, - create10Inst, - create10disc, - signatories = Set(submitter, signatory, signatoryReplica), - ) - val create10: LfNodeCreate = genCreate10(create10Id, create10Inst) - - val fetch11: LfNodeFetch = lfFetch11 - - val (salt12Id, create12Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 1) +: rootViewPosition(1, 2), - 2, - 0, - create12Inst, - create12disc, - signatories = Set(submitter, signatory, extra), - ) - val create12: LfNodeCreate = genCreate12(create12Id, create12Inst) - - val exercise13Id: LfContractId = suffixedId(-1, 0) - val exercise13: LfNodeExercises = genExercise13(exercise13Id) - - // Views - val view0: TransactionView = - view( - create0, - 0, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create0, salt0Id)), - Map.empty, - Some(create0seed), - isRoot = true, - Set.empty, - ) - - val view10: TransactionView = - view( - create12, - 2, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create12, salt12Id)), - Map.empty, - Some(create12seed), - isRoot = false, - Set.empty, - ) - - val view1: TransactionView = - viewWithInformeesMerge( - exercise1, - Seq[LfActionNode](create10, fetch11, exercise13), - 1, - Set(exercise1Id, exercise13Id), - Seq( - asSerializable( - exercise1Id, - exercise1Instance, - metadataFromExercise(exercise1), - ledgerTime, - ) - ), - Seq(serializableFromCreate(create10, salt10Id)), - Map.empty, - Some(deriveNodeSeed(1)), - isRoot = true, - Set.empty, - view10, - ) - - override lazy val rootViews: Seq[TransactionView] = Seq(view0, view1) - - override lazy val viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = - Seq( - view0 -> Seq(view0), - view1 -> Seq(view1, view10), - view10 -> Seq(view10), - ) - - override lazy val transactionTree: GenTransactionTree = genTransactionTree(view0, view1) - - override lazy val fullInformeeTree: FullInformeeTree = - mkFullInformeeTree( - blindedForInformeeTree(view0), - blindedForInformeeTree( - view1, - blindedForInformeeTree(view10), - ), - ) - - val transactionViewTree0: FullTransactionViewTree = - rootTransactionViewTree(view0, blinded(view1)) - - val transactionViewTree1: FullTransactionViewTree = - rootTransactionViewTree(blinded(view0), view1) - - val transactionViewTree10: FullTransactionViewTree = - nonRootTransactionViewTree(blinded(view0), leafsBlinded(view1, view10)) - - val fetch11Abs: LfNodeFetch = genFetch11(create10.coid) - - override lazy val reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq( - ( - transactionViewTree0, - ( - transaction(Seq(0), lfCreate0), - mkMetadata(seeds.filter(_._1 == LfNodeId(0))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree0.informees)), - ), - ( - transactionViewTree1, - ( - transactionFrom( - Seq(1), - 1, - exercise1, - lfCreate10, - lfFetch11, - lfCreate12, - lfExercise13, - ), - mkMetadata( - seeds.filter(seed => Seq(1, 2, 3, 4, 5).map(LfNodeId.apply).contains(seed._1)) - ), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree1.informees)), - ), - ( - transactionViewTree10, - ( - transactionFrom(Seq(4), 4, lfCreate12), - mkMetadata(seeds.filter(_._1 == LfNodeId(4))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree10.informees, transactionViewTree1.informees)), - ), - ) - - override lazy val rootTransactionViewTrees: Seq[FullTransactionViewTree] = - Seq(transactionViewTree0, transactionViewTree1) - - override lazy val versionedSuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - create0, - exercise1, - create10, - fetch11Abs, - create12, - exercise13, - ) - - } - - /** Transaction structure: 0. create - * 1. exercise absolute - * 1.0. create - * 1.1. fetch 1.0. - * 1.2. create - * 1.3. exercise 1.2. - * 1.3.0. create - * 1.3.1. exercise absolute - * 1.3.1.0 create - * - * View structure: 0. View0 - * 1. View1 - * 1.3.0. View10 - * 1.3.1. View11 - * 1.3.1.0 View110 - */ - case object MultipleRootsAndViewNestings extends ExampleTransaction { - - override def cryptoOps: HashOps with RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "transaction with multiple roots and view nestings" - - def create0Inst: LfThinContractInst = contractInstance() - val create0seed: LfHash = deriveNodeSeed(0) - val create0disc: LfHash = discriminator(deriveNodeSeed(0), Set(submitter, observer)) - def genCreate0(cid: LfContractId): LfNodeCreate = - createNode( - cid, - contractInstance = create0Inst, - signatories = Set(submitter), - observers = Set(observer), - ) - val lfCreate0: LfNodeCreate = genCreate0(LfContractId.V1(create0disc)) - - def genExercise1(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - children = List(2, 3, 4, 5).map(nodeId), - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - - val lfExercise1Id: LfContractId = suffixedId(-1, 0) - val lfExercise1: LfNodeExercises = genExercise1(lfExercise1Id) - - def create10Inst: LfThinContractInst = contractInstance() - def create12Inst: LfThinContractInst = contractInstance() - def genCreate1x(cid: LfContractId, contractInstance: LfThinContractInst): LfNodeCreate = - createNode( - cid, - contractInstance = contractInstance, - signatories = Set(submitter, signatory), - ) - - val create10seed: LfHash = deriveNodeSeed(1, 0) - val create10disc: LfHash = discriminator(create10seed, Set(submitter, signatory)) - val lfCreate10: LfNodeCreate = genCreate1x(LfContractId.V1(create10disc), create10Inst) - val create12seed: LfHash = deriveNodeSeed(1, 2) - val create12disc: LfHash = discriminator(create12seed, Set(submitter, signatory)) - val lfCreate12: LfNodeCreate = genCreate1x(LfContractId.V1(create12disc), create12Inst) - - def genFetch11(cid: LfContractId): LfNodeFetch = - fetchNode( - cid, - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - val lfFetch11: LfNodeFetch = genFetch11(lfCreate10.coid) - - def genExercise13(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - children = List(nodeId(6), nodeId(7)), - actingParties = Set(submitter), - signatories = Set(signatory), - observers = Set(submitter), - ) - val lfExercise13: LfNodeExercises = genExercise13(lfCreate12.coid) - - def create130Inst: LfThinContractInst = contractInstance() - val create130seed: LfHash = deriveNodeSeed(1, 3, 0) - def genCreate130(cid: LfContractId): LfNodeCreate = - createNode( - cid, - contractInstance = create130Inst, - signatories = Set(signatory), - observers = Set(extra), - ) - val create130disc: LfHash = discriminator(create130seed, Set(signatory, extra)) - val lfCreate130: LfNodeCreate = genCreate130(LfContractId.V1(create130disc)) - - def genExercise131(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - children = List(nodeId(8)), - actingParties = Set(signatory), - signatories = Set(submitter), - observers = Set(observer), - ) - - val lfExercise131Id: LfContractId = suffixedId(-1, 1) - val lfExercise131: LfNodeExercises = genExercise131(lfExercise131Id) - - def create1310Inst: LfThinContractInst = contractInstance() - val create1310seed: LfHash = deriveNodeSeed(1, 3, 1, 0) - def genCreate1310(cid: LfContractId): LfNodeCreate = - createNode( - cid, - contractInstance = create1310Inst, - signatories = Set(submitter), - observers = Set(extra), - ) - val create1310disc: LfHash = discriminator(create1310seed, Set(submitter, extra)) - val lfCreate1310: LfNodeCreate = genCreate1310(LfContractId.V1(create1310disc)) - - override lazy val versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - lfCreate0, - lfExercise1, - lfCreate10, - lfFetch11, - lfCreate12, - lfExercise13, - lfCreate130, - lfExercise131, - lfCreate1310, - ) - - val exercise1seed: LfHash = deriveNodeSeed(1) - val exercise13seed: LfHash = deriveNodeSeed(1, 3) - val exercise131seed: LfHash = deriveNodeSeed(1, 3, 1) - - override lazy val metadata: TransactionMetadata = mkMetadata( - Map( - LfNodeId(0) -> create0seed, - LfNodeId(1) -> exercise1seed, - LfNodeId(2) -> create10seed, - LfNodeId(4) -> create12seed, - LfNodeId(5) -> exercise13seed, - LfNodeId(6) -> create130seed, - LfNodeId(7) -> exercise131seed, - LfNodeId(8) -> create1310seed, - ) - ) - - override def keyResolver: LfKeyResolver = Map.empty // No keys involved here - - override lazy val rootViewDecompositions: Seq[NewView] = { - val v0 = awaitCreateNewView( - lfCreate0, - Some(create0seed), - LfNodeId(0), - Seq.empty, - isRoot = true, - ) - - val v10 = awaitCreateNewView( - lfCreate130, - Some(create130seed), - LfNodeId(6), - Seq.empty, - isRoot = false, - ) - - val v110 = awaitCreateNewView( - lfCreate1310, - Some(create1310seed), - LfNodeId(8), - Seq.empty, - isRoot = false, - ) - - val v11 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise131), - Some(exercise131seed), - LfNodeId(7), - Seq(v110), - isRoot = false, - ) - - val v1TailNodes = Seq( - SameView(lfCreate10, LfNodeId(2), RollbackContext.empty), - SameView(lfFetch11, LfNodeId(3), RollbackContext.empty), - SameView(lfCreate12, LfNodeId(4), RollbackContext.empty), - SameView(LfTransactionUtil.lightWeight(lfExercise13), LfNodeId(5), RollbackContext.empty), - v10, - v11, - ) - val v1 = - awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise1), - Some(exercise1seed), - LfNodeId(1), - v1TailNodes, - isRoot = true, - ) - - Seq(v0, v1) - } - - // Nodes with translated contract ids - val create0SerInst: SerializableRawContractInstance = - asSerializableRaw(create0Inst) - val (salt0Id, create0Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(0, 2), - 0, - 0, - create0Inst, - create0disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create0: LfNodeCreate = genCreate0(create0Id) - - val exercise1Id: LfContractId = suffixedId(-1, 0) - val exercise1: LfNodeExercises = genExercise1(exercise1Id) - val exercise1Instance: LfThinContractInst = contractInstance() - - val create10SerInst: SerializableRawContractInstance = asSerializableRaw(create10Inst) - val (salt10Id, create10Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 2), - 1, - 0, - create10Inst, - create10disc, - signatories = Set(submitter, signatory), - ) - val create10: LfNodeCreate = genCreate1x(create10Id, create10Inst) - - val fetch11: LfNodeFetch = lfFetch11 - - val create12SerInst: SerializableRawContractInstance = - asSerializableRaw(create12Inst) - val (salt12Id, create12Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 2), - 1, - 1, - create12Inst, - create12disc, - signatories = Set(submitter, signatory), - ) - val create12: LfNodeCreate = genCreate1x(create12Id, create12Inst) - - val create130SerInst: SerializableRawContractInstance = asSerializableRaw(create130Inst) - val (salt130Id, create130Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 2) +: rootViewPosition(1, 2), - 2, - 0, - create130Inst, - create130disc, - signatories = Set(signatory), - observers = Set(extra), - ) - val create130: LfNodeCreate = genCreate130(create130Id) - - val exercise131Id: LfContractId = suffixedId(-1, 1) - val exercise131: LfNodeExercises = genExercise131(exercise131Id) - val exercise131Instance: LfThinContractInst = contractInstance() - - val create1310SerInst: SerializableRawContractInstance = asSerializableRaw(create1310Inst) - val (salt1310Id, create1310Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 1) +: subViewIndex(1, 2) +: rootViewPosition(1, 2), - 4, - 0, - create1310Inst, - create1310disc, - signatories = Set(submitter), - observers = Set(extra), - ) - val create1310: LfNodeCreate = genCreate1310(create1310Id) - - // Views - val view0: TransactionView = - view( - create0, - 0, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create0, salt0Id)), - Map.empty, - Some(create0seed), - isRoot = true, - Set.empty, - ) - val view10: TransactionView = - view( - create130, - 2, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create130, salt130Id)), - Map.empty, - Some(create130seed), - isRoot = false, - Set.empty, - ) - val view110: TransactionView = - view( - create1310, - 4, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create1310, salt1310Id)), - Map.empty, - Some(create1310seed), - isRoot = false, - Set.empty, - ) - - val view11: TransactionView = - view( - exercise131, - 3, - Set(exercise131Id), - Seq( - asSerializable( - contractId = exercise131Id, - contractInstance = exercise131Instance, - metadata = metadataFromExercise(exercise131), - ledgerTime = ledgerTime, - ) - ), - Seq.empty, - Map.empty, - Some(deriveNodeSeed(1, 3, 1)), - isRoot = false, - Set.empty, - view110, - ) - - val view1: TransactionView = - view( - exercise1, - 1, - Set(exercise1Id, create12Id), - Seq( - asSerializable( - exercise1Id, - exercise1Instance, - metadataFromExercise(exercise1), - ledgerTime, - ) - ), - Seq( - serializableFromCreate(create10, salt10Id), - serializableFromCreate(create12, salt12Id), - ), - Map.empty, - Some(deriveNodeSeed(1)), - isRoot = true, - Set.empty, - view10, - view11, - ) - - override lazy val rootViews: Seq[TransactionView] = Seq(view0, view1) - - override lazy val viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = - Seq( - view0 -> Seq(view0), - view1 -> Seq(view1, view10, view11, view110), - view10 -> Seq(view10), - view11 -> Seq(view11, view110), - view110 -> Seq(view110), - ) - - override lazy val transactionTree: GenTransactionTree = genTransactionTree(view0, view1) - - override lazy val fullInformeeTree: FullInformeeTree = - mkFullInformeeTree( - blindedForInformeeTree(view0), - blindedForInformeeTree( - view1, - blindedForInformeeTree(view10), - blindedForInformeeTree(view11, blindedForInformeeTree(view110)), - ), - ) - - val transactionViewTree0: FullTransactionViewTree = - rootTransactionViewTree(view0, blinded(view1)) - - val transactionViewTree1: FullTransactionViewTree = - rootTransactionViewTree(blinded(view0), view1) - - val transactionViewTree10: FullTransactionViewTree = - nonRootTransactionViewTree(blinded(view0), leafsBlinded(view1, view10, blinded(view11))) - - val transactionViewTree11: FullTransactionViewTree = - nonRootTransactionViewTree(blinded(view0), leafsBlinded(view1, blinded(view10), view11)) - - val transactionViewTree110: FullTransactionViewTree = - nonRootTransactionViewTree( - blinded(view0), - leafsBlinded(view1, blinded(view10), leafsBlinded(view11, view110)), - ) - - val fetch11Abs: LfNodeFetch = genFetch11(create10.coid) - val exercise13Abs: LfNodeExercises = genExercise13(create12.coid) - - override lazy val reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq( - ( - transactionViewTree0, - ( - transaction(Seq(0), lfCreate0), - mkMetadata(seeds.filter(_._1 == LfNodeId(0))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree0.informees)), - ), - ( - transactionViewTree1, - ( - transactionFrom( - Seq(1), - 1, - exercise1, - lfCreate10, - lfFetch11, - lfCreate12, - lfExercise13, - lfCreate130, - lfExercise131, - lfCreate1310, - ), - mkMetadata( - seeds.filter(seed => - Seq(1, 2, 3, 4, 5, 6, 7, 8).map(LfNodeId.apply).contains(seed._1) - ) - ), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree1.informees)), - ), - ( - transactionViewTree10, - ( - transactionFrom(Seq(6), 6, lfCreate130), - mkMetadata(seeds.filter(_._1 == LfNodeId(6))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree10.informees, transactionViewTree1.informees)), - ), - ( - transactionViewTree11, - ( - transactionFrom(Seq(7), 7, lfExercise131, lfCreate1310), - mkMetadata(seeds.filter(seed => Seq(7, 8).map(LfNodeId.apply).contains(seed._1))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree11.informees, transactionViewTree1.informees)), - ), - ( - transactionViewTree110, - ( - transactionFrom(Seq(8), 8, lfCreate1310), - mkMetadata(seeds.filter(_._1 == LfNodeId(8))), - Map.empty, - ), - Witnesses( - NonEmpty( - List, - transactionViewTree110.informees, - transactionViewTree11.informees, - transactionViewTree1.informees, - ) - ), - ), - ) - - override lazy val rootTransactionViewTrees: Seq[FullTransactionViewTree] = - Seq(transactionViewTree0, transactionViewTree1) - - override lazy val versionedSuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - create0, - exercise1, - create10, - fetch11Abs, - create12, - exercise13Abs, - create130, - exercise131, - create1310, - ) - - } - - /** Transaction structure: 0. create - * 1. exerciseN - * 1.0. exercise - * 1.0.0. create - * 1.1. create(capturing 1.0.0) - * 1.2. exercise - * 1.2.0. create(capturing 1.0.0) - * 1.3. create(capturing 1.2.0) 2. create - * - * View structure: 0. View0 - * 1. View1 - * 1.0. View10 - * 1.0.0. View100 - * 1.2. View11 - * 1.2.0. View110 2. View2 - */ - case object ViewInterleavings extends ExampleTransaction { - - override def cryptoOps: HashOps with RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "transaction with subviews and core nodes interleaved" - - def stakeholdersX: Set[LfPartyId] = Set(submitter, observer) - def genCreateX( - cid: LfContractId, - contractInst: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInst, - signatories = Set(submitter), - observers = Set(observer), - ) - - val create0Inst: LfThinContractInst = contractInstance() - val create0seed: LfHash = deriveNodeSeed(0) - val create0disc: LfHash = discriminator(create0seed, stakeholdersX) - val lfCreate0: LfNodeCreate = - genCreateX(LfContractId.V1(create0disc), create0Inst) - - def genExercise1(cid: LfContractId): LfNodeExercises = - exerciseNode( - cid, - children = List(nodeId(2), nodeId(4), nodeId(5), nodeId(7)), - signatories = Set(signatory), - observers = Set( - observer, - submitter, - ), // note the observer is not an informee, as the exercise is non-consuming - actingParties = Set(submitter), - consuming = false, - ) - val lfExercise1: LfNodeExercises = genExercise1(suffixedId(-1, 1)) - - def genExercise1X(cid: LfContractId, childIndex: Int): LfNodeExercises = - exerciseNode( - cid, - children = List(nodeId(childIndex)), - signatories = Set(signatory), - observers = Set(extra), - actingParties = Set(signatory), - ) - - val lfExercise10: LfNodeExercises = genExercise1X(suffixedId(-1, 10), 3) - - def stakeholders3X: Set[LfPartyId] = Set(signatory, observer) - def genCreate3X( - cid: LfContractId, - contractInst: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInst, - signatories = Set(signatory), - observers = Set(observer), - ) - - val create100Inst: LfThinContractInst = contractInstance() - val create100seed: LfHash = deriveNodeSeed(1, 0, 0) - val create100disc: LfHash = discriminator(create100seed, stakeholders3X) - val lfCreate100Id: LfContractId = LfContractId.V1(create100disc) - val lfCreate100: LfNodeCreate = genCreate3X(lfCreate100Id, create100Inst) - - def stakeholdersXX: Set[LfPartyId] = Set(signatory, submitter) - def genCreateXX( - cid: LfContractId, - contractInst: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInst, - signatories = stakeholdersXX, - observers = Set.empty, - ) - - def genCreate11Inst(capturedId: LfContractId): LfThinContractInst = contractInstance( - Seq(capturedId) - ) - val create11seed: LfHash = deriveNodeSeed(1, 1) - val create11disc: LfHash = discriminator(create11seed, stakeholdersXX) - val lfCreate11: LfNodeCreate = - genCreateXX(LfContractId.V1(create11disc), genCreate11Inst(lfCreate100Id)) - - val lfExercise12: LfNodeExercises = genExercise1X(suffixedId(-1, 12), 6) - - def genCreate120Inst(capturedId: LfContractId): LfThinContractInst = contractInstance( - Seq(capturedId) - ) - val lfCreate120Inst: LfThinContractInst = genCreate120Inst(lfCreate100Id) - val create120seed: LfHash = deriveNodeSeed(1, 2, 0) - val create120disc: LfHash = discriminator(create120seed, stakeholders3X) - val lfCreate120Id: LfContractId = LfContractId.V1(create120disc) - val lfCreate120: LfNodeCreate = genCreate3X(lfCreate120Id, lfCreate120Inst) - - def genCreate13Inst(capturedId: LfContractId): LfThinContractInst = contractInstance( - Seq(capturedId) - ) - val create13seed: LfHash = deriveNodeSeed(1, 3) - val create13disc: LfHash = discriminator(create13seed, stakeholdersXX) - val lfCreate13Id: LfContractId = LfContractId.V1(create13disc) - val lfCreate13: LfNodeCreate = - genCreateXX(lfCreate13Id, genCreate13Inst(lfCreate120Id)) - - val create2Inst: LfThinContractInst = contractInstance() - val create2seed: LfHash = deriveNodeSeed(2) - val create2disc: LfHash = discriminator(create2seed, stakeholdersX) - val lfCreate2: LfNodeCreate = - genCreateX(LfContractId.V1(create2disc), create2Inst) - - override lazy val versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1, 8), - lfCreate0, - lfExercise1, - lfExercise10, - lfCreate100, - lfCreate11, - lfExercise12, - lfCreate120, - lfCreate13, - lfCreate2, - ) - - val exercise1seed = deriveNodeSeed(1) - val exercise10seed = deriveNodeSeed(1, 0) - val exercise12seed = deriveNodeSeed(1, 2) - - override lazy val metadata: TransactionMetadata = mkMetadata( - Map( - LfNodeId(0) -> create0seed, - LfNodeId(1) -> exercise1seed, - LfNodeId(2) -> exercise10seed, - LfNodeId(3) -> create100seed, - LfNodeId(4) -> create11seed, - LfNodeId(5) -> exercise12seed, - LfNodeId(6) -> create120seed, - LfNodeId(7) -> create13seed, - LfNodeId(8) -> create2seed, - ) - ) - - override def keyResolver: LfKeyResolver = Map.empty // No keys involved here - - override lazy val rootViewDecompositions: Seq[NewView] = { - val v0 = awaitCreateNewView( - lfCreate0, - Some(create0seed), - LfNodeId(0), - Seq.empty, - isRoot = true, - ) - - val v100 = awaitCreateNewView( - lfCreate100, - Some(create100seed), - LfNodeId(3), - Seq.empty, - isRoot = false, - ) - - val v10 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise10), - Some(exercise10seed), - LfNodeId(2), - Seq(v100), - isRoot = false, - ) - - val v110 = awaitCreateNewView( - lfCreate120, - Some(create120seed), - LfNodeId(6), - Seq.empty, - isRoot = false, - ) - - val v11 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise12), - Some(exercise12seed), - LfNodeId(5), - Seq(v110), - isRoot = false, - ) - - val v1 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise1), - Some(exercise1seed), - LfNodeId(1), - Seq( - v10, - SameView(lfCreate11, LfNodeId(4), RollbackContext.empty), - v11, - SameView(lfCreate13, LfNodeId(7), RollbackContext.empty), - ), - isRoot = true, - ) - - val v2 = awaitCreateNewView( - lfCreate2, - Some(create2seed), - LfNodeId(8), - Seq.empty, - isRoot = true, - ) - - Seq(v0, v1, v2) - } - - val create0SerInst: SerializableRawContractInstance = - asSerializableRaw(create0Inst) - val (salt0Id, create0Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(0, 3), - 0, - 0, - create0Inst, - create0disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create0: LfNodeCreate = genCreateX(create0Id, create0Inst) - - val exercise1Id: LfContractId = suffixedId(-1, 1) - val exercise1: LfNodeExercises = genExercise1(exercise1Id) - val exercise1Instance: LfThinContractInst = contractInstance() - - val exercise10Id: LfContractId = suffixedId(-1, 10) - val exercise10: LfNodeExercises = genExercise1X(exercise10Id, 3) - val exercise10Instance: LfThinContractInst = contractInstance() - - val create100SerInst: SerializableRawContractInstance = asSerializableRaw(create100Inst) - val (salt100Id, create100Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 1) +: subViewIndex(0, 2) +: rootViewPosition(1, 3), - 3, - 0, - create100Inst, - create100disc, - signatories = Set(signatory), - observers = Set(observer), - ) - val create100: LfNodeCreate = genCreate3X(create100Id, create100Inst) - - val create11Inst: LfThinContractInst = genCreate11Inst(create100Id) - val create11SerInst: SerializableRawContractInstance = - asSerializableRaw(create11Inst) - val (salt11Id, create11Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 3), - 1, - 0, - create11Inst, - create11disc, - signatories = stakeholdersXX, - ) - val create11: LfNodeCreate = genCreateXX(create11Id, create11Inst) - - val exercise12Id: LfContractId = suffixedId(-1, 12) - val exercise12: LfNodeExercises = genExercise1X(exercise12Id, 6) - val exercise12Instance: LfThinContractInst = contractInstance() - - val create120Inst: LfThinContractInst = genCreate120Inst(create100Id) - val create120SerInst: SerializableRawContractInstance = asSerializableRaw(create120Inst) - val (salt120Id, create120Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 1) +: subViewIndex(1, 2) +: rootViewPosition(1, 3), - 5, - 0, - create120Inst, - create120disc, - signatories = Set(signatory), - observers = Set(observer), - ) - val create120: LfNodeCreate = genCreate3X(create120Id, create120Inst) - - val create13Inst: LfThinContractInst = genCreate13Inst(create120Id) - val create13SerInst: SerializableRawContractInstance = asSerializableRaw(create13Inst) - val (salt13Id, create13Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 3), - 1, - 1, - create13Inst, - create13disc, - signatories = stakeholdersXX, - ) - val create13: LfNodeCreate = genCreateXX(create13Id, create13Inst) - - val create2SerInst: SerializableRawContractInstance = asSerializableRaw(create2Inst) - val (salt2Id, create2Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(2, 3), - 6, - 0, - create2Inst, - create2disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create2: LfNodeCreate = genCreateX(create2Id, create2Inst) - - val view0: TransactionView = - view( - create0, - 0, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create0, salt0Id)), - Map.empty, - Some(create0seed), - isRoot = true, - Set.empty, - ) - - val view100: TransactionView = - view( - create100, - 3, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create100, salt100Id)), - Map.empty, - Some(create100seed), - isRoot = false, - Set.empty, - ) - - val view10: TransactionView = view( - exercise10, - 2, - Set(exercise10Id), - Seq( - asSerializable( - exercise10Id, - exercise10Instance, - metadataFromExercise(exercise10), - ledgerTime, - ) - ), - Seq.empty, - Map.empty, - Some(deriveNodeSeed(1, 0)), - isRoot = false, - Set.empty, - view100, - ) - - val view110: TransactionView = - view( - create120, - 5, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create120, salt120Id)), - Map.empty, - Some(create120seed), - isRoot = false, - Set.empty, - ) - - val view11: TransactionView = - view( - exercise12, - 4, - Set(exercise12Id), - Seq( - asSerializable( - exercise12Id, - exercise12Instance, - metadataFromExercise(exercise12), - ledgerTime, - ) - ), - Seq.empty, - Map.empty, - Some(deriveNodeSeed(1, 2)), - isRoot = false, - Set.empty, - view110, - ) - - val view1: TransactionView = - view( - exercise1, - 1, - Set.empty, - Seq( - asSerializable( - exercise1Id, - exercise1Instance, - metadataFromExercise(exercise1), - ledgerTime, - ) - ), - Seq( - serializableFromCreate(create11, salt11Id), - serializableFromCreate(create13, salt13Id), - ), - Map.empty, - Some(deriveNodeSeed(1)), - isRoot = true, - Set.empty, - view10, - view11, - ) - - val view2: TransactionView = - view( - create2, - 6, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create2, salt2Id)), - Map.empty, - Some(create2seed), - isRoot = true, - Set.empty, - ) - - override lazy val rootViews: Seq[TransactionView] = Seq(view0, view1, view2) - - override lazy val viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = - Seq( - view0 -> Seq(view0), - view1 -> Seq(view1, view10, view100, view11, view110), - view10 -> Seq(view10, view100), - view100 -> Seq(view100), - view11 -> Seq(view11, view110), - view110 -> Seq(view110), - view2 -> Seq(view2), - ) - - override lazy val transactionTree: GenTransactionTree = genTransactionTree(view0, view1, view2) - - override lazy val fullInformeeTree: FullInformeeTree = - mkFullInformeeTree( - blindedForInformeeTree(view0), - blindedForInformeeTree( - view1, - blindedForInformeeTree(view10, blindedForInformeeTree(view100)), - blindedForInformeeTree(view11, blindedForInformeeTree(view110)), - ), - blindedForInformeeTree(view2), - ) - - val transactionViewTree0: FullTransactionViewTree = - rootTransactionViewTree(view0, blinded(view1), blinded(view2)) - - val transactionViewTree1: FullTransactionViewTree = - rootTransactionViewTree(blinded(view0), view1, blinded(view2)) - - val transactionViewTree10: FullTransactionViewTree = - nonRootTransactionViewTree( - blinded(view0), - leafsBlinded(view1, view10, blinded(view11)), - blinded(view2), - ) - - val transactionViewTree100: FullTransactionViewTree = nonRootTransactionViewTree( - blinded(view0), - leafsBlinded(view1, leafsBlinded(view10, view100), blinded(view11)), - blinded(view2), - ) - - val transactionViewTree11: FullTransactionViewTree = - nonRootTransactionViewTree( - blinded(view0), - leafsBlinded(view1, blinded(view10), view11), - blinded(view2), - ) - - val transactionViewTree110: FullTransactionViewTree = nonRootTransactionViewTree( - blinded(view0), - leafsBlinded(view1, blinded(view10), leafsBlinded(view11, view110)), - blinded(view2), - ) - - val transactionViewTree2: FullTransactionViewTree = - rootTransactionViewTree(blinded(view0), blinded(view1), view2) - - val create120reinterpret: LfNodeCreate = - genCreate3X(lfCreate120Id, genCreate120Inst(create100Id)) - - override lazy val reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq( - ( - transactionViewTree0, - ( - transaction(Seq(0), lfCreate0), - mkMetadata(seeds.filter(_._1 == LfNodeId(0))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree0.informees)), - ), - ( - transactionViewTree1, - ( - transactionFrom( - Seq(1), - 1, - exercise1, - exercise10, - lfCreate100, - lfCreate11, - exercise12, - lfCreate120, - lfCreate13, - ), - mkMetadata( - seeds.filter(seed => Seq(1, 2, 3, 4, 5, 6, 7).map(LfNodeId.apply).contains(seed._1)) - ), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree1.informees)), - ), - ( - transactionViewTree10, - ( - transactionFrom(Seq(2), 2, exercise10, lfCreate100), - mkMetadata(seeds.filter(seed => Seq(2, 3).map(LfNodeId.apply).contains(seed._1))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree10.informees, transactionViewTree1.informees)), - ), - ( - transactionViewTree100, - ( - transaction(Seq(0), lfCreate100), - mkMetadata(Map(LfNodeId(0) -> create100seed)), - Map.empty, - ), - Witnesses( - NonEmpty( - List, - transactionViewTree100.informees, - transactionViewTree10.informees, - transactionViewTree1.informees, - ) - ), - ), - ( - transactionViewTree11, - ( - transactionFrom(Seq(5), 5, exercise12, create120reinterpret), - mkMetadata(seeds.filter(seed => Seq(5, 6).map(LfNodeId.apply).contains(seed._1))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree11.informees, transactionViewTree1.informees)), - ), - ( - transactionViewTree110, - ( - transaction(Seq(0), create120reinterpret), - mkMetadata(Map(LfNodeId(0) -> create120seed)), - Map.empty, - ), - Witnesses( - NonEmpty( - List, - transactionViewTree110.informees, - transactionViewTree11.informees, - transactionViewTree1.informees, - ) - ), - ), - ( - transactionViewTree2, - (transaction(Seq(0), lfCreate2), mkMetadata(Map(LfNodeId(0) -> create2seed)), Map.empty), - Witnesses(NonEmpty(List, transactionViewTree2.informees)), - ), - ) - - override lazy val rootTransactionViewTrees: Seq[FullTransactionViewTree] = - Seq(transactionViewTree0, transactionViewTree1, transactionViewTree2) - - override lazy val versionedSuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1, 8), - create0, - exercise1, - exercise10, - create100, - create11, - exercise12, - create120, - create13, - create2, - ) - } - - /** Transaction structure: 0. create - * 1. exercise(0) - * 1.0. create - * 1.1. exerciseN(1.0) - * 1.1.0. create - * 1.2. exercise(1.1.0) - * 1.3. exercise(1.0) - * - * View structure: 0. view0 - * 1. view1 - * 1.1. view10 - */ - case object TransientContracts extends ExampleTransaction { - - override def cryptoOps: HashOps with RandomOps = ExampleTransactionFactory.this.cryptoOps - - override def toString: String = "transaction with transient contracts" - - def stakeholders: Set[LfPartyId] = Set(submitter, observer) - def genCreate( - cid: LfContractId, - contractInst: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInst, - signatories = Set(submitter), - observers = Set(observer), - ) - - val create0Inst: LfThinContractInst = contractInstance() - val create0seed: LfHash = deriveNodeSeed(0) - val create0disc: LfHash = discriminator(create0seed, stakeholders) - val lfCreate0Id: LfContractId = LfContractId.V1(create0disc) - val lfCreate0: LfNodeCreate = genCreate(lfCreate0Id, create0Inst) - - def genExercise(cid: LfContractId, childIndices: List[Int]): LfNodeExercises = - exerciseNode( - cid, - actingParties = Set(submitter), - signatories = Set(submitter), - observers = Set(observer), - children = childIndices.map(nodeId), - ) - val lfExercise1: LfNodeExercises = genExercise(lfCreate0Id, List(2, 3, 5, 6)) - - val create10Inst: LfThinContractInst = contractInstance() - val create10seed: LfHash = deriveNodeSeed(1, 0) - val create10disc: LfHash = discriminator(create10seed, stakeholders) - val lfCreate10Id: LfContractId = LfContractId.V1(create10disc) - val lfCreate10: LfNodeCreate = genCreate(lfCreate10Id, create10Inst) - - def genExerciseN(cid: LfContractId, childIndex: Int): LfNodeExercises = - exerciseNode( - cid, - consuming = false, - actingParties = Set(submitter), - signatories = Set(submitter, signatory), - observers = Set(observer), - children = List(nodeId(childIndex)), - ) - val lfExercise11: LfNodeExercises = genExerciseN(lfCreate10Id, 4) - - val create110seed: LfHash = deriveNodeSeed(1, 1, 0) - val create110disc: LfHash = discriminator(create110seed, Set(submitter, signatory)) - def genCreate110( - cid: LfContractId, - contractInst: LfThinContractInst, - ): LfNodeCreate = - createNode( - cid, - contractInstance = contractInst, - signatories = Set(submitter, signatory), - observers = Set.empty, - ) - - val create110Inst: LfThinContractInst = contractInstance() - val lfCreate110Id: LfContractId = LfContractId.V1(create110disc) - val lfCreate110: LfNodeCreate = genCreate110(lfCreate110Id, create110Inst) - - val lfExercise12: LfNodeExercises = genExercise(lfCreate110Id, List.empty) - - val lfExercise13: LfNodeExercises = genExercise(lfCreate10Id, List.empty) - - override def versionedUnsuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - lfCreate0, - lfExercise1, - lfCreate10, - lfExercise11, - lfCreate110, - lfExercise12, - lfExercise13, - ) - - val exercise1seed: LfHash = deriveNodeSeed(1) - val exercise11seed: LfHash = deriveNodeSeed(1, 1) - val exercise12seed: LfHash = deriveNodeSeed(1, 2) - val exercise13seed: LfHash = deriveNodeSeed(1, 3) - - override lazy val metadata: TransactionMetadata = mkMetadata( - Map( - LfNodeId(0) -> create0seed, - LfNodeId(1) -> exercise1seed, - LfNodeId(2) -> create10seed, - LfNodeId(3) -> exercise11seed, - LfNodeId(4) -> create110seed, - LfNodeId(5) -> exercise12seed, - LfNodeId(6) -> exercise13seed, - ) - ) - - override def keyResolver: LfKeyResolver = Map.empty // No keys involved here - - override def rootViewDecompositions: Seq[TransactionViewDecomposition.NewView] = { - val v0 = awaitCreateNewView( - lfCreate0, - Some(create0seed), - LfNodeId(0), - Seq.empty, - isRoot = true, - ) - - val v10 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise11), - Some(exercise11seed), - LfNodeId(3), - Seq(SameView(lfCreate110, LfNodeId(4), RollbackContext.empty)), - isRoot = false, - ) - - val v1 = awaitCreateNewView( - LfTransactionUtil.lightWeight(lfExercise1), - Some(exercise1seed), - LfNodeId(1), - Seq( - SameView(lfCreate10, LfNodeId(2), RollbackContext.empty), - v10, - SameView(LfTransactionUtil.lightWeight(lfExercise12), LfNodeId(5), RollbackContext.empty), - SameView(LfTransactionUtil.lightWeight(lfExercise13), LfNodeId(6), RollbackContext.empty), - ), - isRoot = true, - ) - - Seq(v0, v1) - } - - val create0SerInst: SerializableRawContractInstance = - asSerializableRaw(create0Inst) - val (salt0Id, create0Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(0, 2), - 0, - 0, - create0Inst, - create0disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create0: LfNodeCreate = genCreate(create0Id, create0Inst) - - val exercise1: LfNodeExercises = genExercise(create0Id, List(2, 3, 5, 6)) - - val create10SerInst: SerializableRawContractInstance = - asSerializableRaw(create10Inst) - val (salt10Id, create10Id): (Salt, LfContractId) = - fromDiscriminator( - rootViewPosition(1, 2), - 1, - 0, - create10Inst, - create10disc, - signatories = Set(submitter), - observers = Set(observer), - ) - val create10: LfNodeCreate = genCreate(create10Id, create10Inst) - - val exercise11: LfNodeExercises = genExerciseN(create10Id, 4) - - val create110SerInst: SerializableRawContractInstance = asSerializableRaw(create110Inst) - val (salt110Id, create110Id): (Salt, LfContractId) = - fromDiscriminator( - subViewIndex(0, 1) +: rootViewPosition(1, 2), - 2, - 0, - create110Inst, - create110disc, - signatories = Set(submitter, signatory), - ) - val create110: LfNodeCreate = genCreate110(create110Id, create110Inst) - - val exercise12: LfNodeExercises = genExercise(create110Id, List.empty) - - val exercise13: LfNodeExercises = genExercise(create10Id, List.empty) - - val view0: TransactionView = - view( - create0, - 0, - Set.empty, - Seq.empty, - Seq(serializableFromCreate(create0, salt0Id)), - Map.empty, - Some(create0seed), - isRoot = true, - Set.empty, - ) - - val view10: TransactionView = view( - exercise11, - 2, - Set.empty, - Seq( - asSerializable( - create10Id, - create10Inst, - ContractMetadata.tryCreate(create10.signatories, create10.stakeholders, None), - salt = salt10Id, - ) - ), - Seq(serializableFromCreate(create110, salt110Id)), - Map.empty, - Some(deriveNodeSeed(1, 1)), - isRoot = false, - Set.empty, - ) - - val view1: TransactionView = view( - exercise1, - 1, - Set(create0Id, create10Id, create110Id), - Seq( - asSerializable( - create0Id, - create0Inst, - ContractMetadata.tryCreate(create0.signatories, create0.stakeholders, None), - salt = salt0Id, - ) - ), - Seq(serializableFromCreate(create10, salt10Id)), - Map.empty, - Some(deriveNodeSeed(1)), - isRoot = true, - Set.empty, - view10, - ) - - override def rootViews: Seq[TransactionView] = Seq(view0, view1) - - override def viewWithSubviews: Seq[(TransactionView, Seq[TransactionView])] = - Seq(view0 -> Seq(view0), view1 -> Seq(view1, view10), view10 -> Seq(view10)) - - override def transactionTree: GenTransactionTree = - genTransactionTree(view0, view1) - - override def fullInformeeTree: FullInformeeTree = - mkFullInformeeTree( - blindedForInformeeTree(view0), - blindedForInformeeTree(view1, blindedForInformeeTree(view10)), - ) - - val transactionViewTree0: FullTransactionViewTree = - rootTransactionViewTree(view0, blinded(view1)) - val transactionViewTree1: FullTransactionViewTree = - rootTransactionViewTree(blinded(view0), view1) - val transactionViewTree10: FullTransactionViewTree = - nonRootTransactionViewTree(blinded(view0), leafsBlinded(view1, view10)) - - override def reinterpretedSubtransactions: Seq[ - ( - FullTransactionViewTree, - (LfVersionedTransaction, TransactionMetadata, LfKeyResolver), - Witnesses, - ) - ] = - Seq( - ( - transactionViewTree0, - ( - transaction(Seq(0), lfCreate0), - mkMetadata(seeds.filter(_._1 == LfNodeId(0))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree0.informees)), - ), - ( - transactionViewTree1, - ( - transactionFrom( - Seq(1), - 1, - exercise1, - lfCreate10, - lfExercise11, - lfCreate110, - lfExercise12, - lfExercise13, - ), - mkMetadata( - seeds.filter(seed => Seq(1, 2, 3, 4, 5, 6).map(LfNodeId.apply).contains(seed._1)) - ), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree1.informees)), - ), - ( - transactionViewTree10, - ( - transactionFrom(Seq(3), 3, exercise11, lfCreate110), - mkMetadata(seeds.filter(seed => Seq(3, 4).map(LfNodeId.apply).contains(seed._1))), - Map.empty, - ), - Witnesses(NonEmpty(List, transactionViewTree10.informees, transactionViewTree1.informees)), - ), - ) - - override def rootTransactionViewTrees: Seq[FullTransactionViewTree] = - Seq(transactionViewTree0, transactionViewTree1) - - override def versionedSuffixedTransaction: LfVersionedTransaction = - transaction( - Seq(0, 1), - create0, - exercise1, - create10, - exercise11, - create110, - exercise12, - exercise13, - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactoryTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactoryTest.scala deleted file mode 100644 index f2fe55ffcb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactoryTest.scala +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -/** Tests that all examples provided can be created successfully. - */ -class ExampleTransactionFactoryTest extends AnyWordSpec with BaseTest with HasExecutionContext { - val factory = new ExampleTransactionFactory()() - - forEvery(factory.standardHappyCases) { example => - s"Example transaction $example" can { - - "determine the informees" in { - noException should be thrownBy example.allInformees - } - "create the lfTransaction" in { - noException should be thrownBy example.versionedUnsuffixedTransaction - } - "create a well-formed lfTransaction" in { - noException should be thrownBy example.wellFormedUnsuffixedTransaction - } - "create the view decompositions" in { - noException should be thrownBy example.rootViewDecompositions - } - "create the root views" in { - noException should be thrownBy example.rootViews - } - "create views with subviews" in { - noException should be thrownBy example.viewWithSubviews - } - "create divulged contracts" in { - noException should be thrownBy example.inputContracts - } - "create the transaction tree" in { - noException should be thrownBy example.transactionTree - } - "create the transaction id" in { - example.transactionId should equal(example.transactionTree.transactionId) - } - "create the full informee tree" in { - noException should be thrownBy example.fullInformeeTree - } - "create the transaction view trees with their reinterpreted subaction" in { - noException should be thrownBy example.reinterpretedSubtransactions - } - "create the root transaction view trees" in { - noException should be thrownBy example.rootTransactionViewTrees - } - "create the absolute transaction" in { - noException should be thrownBy example.versionedSuffixedTransaction - } - "create a well-formed absolute transaction" in { - noException should be thrownBy example.wellFormedSuffixedTransaction - } - "with a consistent number of roots" in { - val roots = example.versionedUnsuffixedTransaction.roots.length - - example.wellFormedUnsuffixedTransaction.unwrap.roots.length shouldBe roots - example.rootViewDecompositions.length shouldBe roots - example.rootViews.length shouldBe roots - example.transactionTree.rootViews.unblindedElements.length shouldBe roots - example.fullInformeeTree.tree.rootViews.unblindedElements.length shouldBe roots - example.rootTransactionViewTrees.length shouldBe roots - example.versionedSuffixedTransaction.roots.length shouldBe roots - example.wellFormedSuffixedTransaction.unwrap.roots.length shouldBe roots - } - "with a consistent number of nodes" in { - val nodes = example.versionedUnsuffixedTransaction.nodes.size - - example.wellFormedUnsuffixedTransaction.unwrap.nodes.size shouldBe nodes - example.versionedSuffixedTransaction.nodes.size shouldBe nodes - example.wellFormedSuffixedTransaction.unwrap.nodes.size shouldBe nodes - } - "with a consistent number of views" in { - val views = example.viewWithSubviews.length - - example.transactionViewTrees.length shouldBe views - } - "with consistent transaction root hashes" in { - example.transactionTree.transactionId shouldEqual example.transactionId - example.fullInformeeTree.transactionId shouldEqual example.transactionId - forEvery(example.transactionViewTrees) { - _.transactionId shouldEqual example.transactionId - } - } - "reinterpretations are well-formed" must { - example.reinterpretedSubtransactions.zipWithIndex.foreach { - case ((_, (reinterpretedTx, metadata, _keyResolver), _), i) => - s"subtransaction $i" in { - WellFormedTransaction.normalizeAndAssert( - reinterpretedTx, - metadata, - WithoutSuffixes, - ) - } - } - } - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala deleted file mode 100644 index 1345a42fe5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.{CantonTimestamp, ContractsReassignmentBatch, ViewPosition} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.protocol.SynchronizerParameters.MaxRequestSize -import com.digitalasset.canton.pruning.CounterParticipantIntervalsBehind -import com.digitalasset.canton.sequencing.TrafficControlParameters -import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, PositiveSeconds} -import com.digitalasset.canton.topology.transaction.ParticipantSynchronizerLimits -import com.digitalasset.canton.topology.{ - GeneratorsTopology, - ParticipantId, - PartyId, - PhysicalSynchronizerId, - SynchronizerId, -} -import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} -import com.digitalasset.canton.{GeneratorsLf, LfPartyId, ReassignmentCounter} -import com.digitalasset.daml.lf.transaction.{CreationTime, Versioned} -import com.google.protobuf.ByteString -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} - -final class GeneratorsProtocol( - protocolVersion: ProtocolVersion, - generatorsLf: GeneratorsLf, - generatorsTopology: GeneratorsTopology, -) { - import com.digitalasset.canton.Generators.* - import generatorsLf.* - import com.digitalasset.canton.config.GeneratorsConfig.* - import com.digitalasset.canton.crypto.GeneratorsCrypto.* - import com.digitalasset.canton.time.GeneratorsTime.* - import generatorsTopology.* - import org.scalatest.EitherValues.* - - implicit val staticSynchronizerParametersArb: Arbitrary[StaticSynchronizerParameters] = - Arbitrary(for { - requiredSigningAlgorithmSpecs <- nonEmptySetGen[SigningAlgorithmSpec] - requiredSigningKeySpecs <- nonEmptySetGen[SigningKeySpec] - requiredEncryptionAlgorithmSpecs <- nonEmptySetGen[EncryptionAlgorithmSpec] - requiredEncryptionKeySpecs <- nonEmptySetGen[EncryptionKeySpec] - requiredSymmetricKeySchemes <- nonEmptySetGen[SymmetricKeyScheme] - requiredHashAlgorithms <- nonEmptySetGen[HashAlgorithm] - requiredCryptoKeyFormats <- nonEmptySetGen[CryptoKeyFormat] - requiredSignatureFormats <- nonEmptySetGen[SignatureFormat] - serial <- Arbitrary.arbitrary[NonNegativeInt] - - parameters = StaticSynchronizerParameters( - RequiredSigningSpecs(requiredSigningAlgorithmSpecs, requiredSigningKeySpecs), - RequiredEncryptionSpecs(requiredEncryptionAlgorithmSpecs, requiredEncryptionKeySpecs), - requiredSymmetricKeySchemes, - requiredHashAlgorithms, - requiredCryptoKeyFormats, - requiredSignatureFormats, - protocolVersion, - serial, - ) - - } yield parameters) - - implicit val dynamicSynchronizerParametersArb: Arbitrary[DynamicSynchronizerParameters] = - Arbitrary( - for { - confirmationResponseTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - mediatorReactionTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - assignmentExclusivityTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - topologyChangeDelay <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - - mediatorDeduplicationMargin <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - // Because of the potential multiplication by 2 below, we want a reasonably small value - ledgerTimeRecordTimeTolerance <- Gen - .choose(0L, 10000L) - .map(NonNegativeFiniteDuration.tryOfMicros) - - representativePV = DynamicSynchronizerParameters.protocolVersionRepresentativeFor( - protocolVersion - ) - - reconciliationInterval <- Arbitrary.arbitrary[PositiveSeconds] - maxRequestSize <- Arbitrary.arbitrary[MaxRequestSize] - - trafficControlConfig <- Gen.option(Arbitrary.arbitrary[TrafficControlParameters]) - - updatedMediatorDeduplicationTimeout = ledgerTimeRecordTimeTolerance * NonNegativeInt - .tryCreate(2) + mediatorDeduplicationMargin - - sequencerAggregateSubmissionTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - onboardingRestriction <- Arbitrary.arbitrary[OnboardingRestriction] - - participantSynchronizerLimits <- Arbitrary.arbitrary[ParticipantSynchronizerLimits] - - acsCommitmentsCatchupConfig <- - for { - isNone <- Gen.oneOf(true, false) - skip <- Gen.choose(1, Math.sqrt(PositiveInt.MaxValue.value.toDouble).intValue) - trigger <- Gen.choose(1, Math.sqrt(PositiveInt.MaxValue.value.toDouble).intValue) - } yield { - if (!isNone) - Some( - new AcsCommitmentsCatchUpParameters( - PositiveInt.tryCreate(skip), - PositiveInt.tryCreate(trigger), - ) - ) - else None - } - - // Because of the potential multiplication by 2 below, we want a reasonably small value - preparationTimeRecordTimeTolerance <- Gen - .choose(0L, 10000L) - .map(NonNegativeFiniteDuration.tryOfMicros) - - dynamicSynchronizerParameters = DynamicSynchronizerParameters.tryCreate( - confirmationResponseTimeout, - mediatorReactionTimeout, - assignmentExclusivityTimeout, - topologyChangeDelay, - ledgerTimeRecordTimeTolerance, - updatedMediatorDeduplicationTimeout, - reconciliationInterval, - maxRequestSize, - sequencerAggregateSubmissionTimeout, - trafficControlConfig, - onboardingRestriction, - acsCommitmentsCatchupConfig, - participantSynchronizerLimits, - preparationTimeRecordTimeTolerance, - )(representativePV) - - } yield dynamicSynchronizerParameters - ) - - implicit val counterParticipantIntervalsBehindArb: Arbitrary[CounterParticipantIntervalsBehind] = - Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[SynchronizerId] - participantId <- Arbitrary.arbitrary[ParticipantId] - intervalsBehind <- Arbitrary.arbitrary[NonNegativeLong] - timeBehind <- Arbitrary.arbitrary[NonNegativeFiniteDuration] - asOfSequencingTime <- Arbitrary.arbitrary[CantonTimestamp] - } yield CounterParticipantIntervalsBehind( - synchronizerId, - participantId, - intervalsBehind, - timeBehind, - asOfSequencingTime, - ) - ) - - implicit val dynamicSequencingParametersArb: Arbitrary[DynamicSequencingParameters] = Arbitrary( - for { - payload <- Arbitrary.arbitrary[Option[ByteString]] - representativePV = DynamicSequencingParameters.protocolVersionRepresentativeFor( - protocolVersion - ) - dynamicSequencingParameters = DynamicSequencingParameters(payload)(representativePV) - } yield dynamicSequencingParameters - ) - - implicit val rootHashArb: Arbitrary[RootHash] = Arbitrary( - Arbitrary.arbitrary[Hash].map(RootHash(_)) - ) - implicit val viewHashArb: Arbitrary[ViewHash] = Arbitrary( - Arbitrary.arbitrary[Hash].map(ViewHash(_)) - ) - - implicit val serializableRawContractInstanceArb: Arbitrary[SerializableRawContractInstance] = { - val contractInstance = ExampleTransactionFactory.contractInstance() - Arbitrary(SerializableRawContractInstance.create(contractInstance).value) - } - - private lazy val unicumGenerator: UnicumGenerator = new UnicumGenerator(new SymbolicPureCrypto()) - - { - // If this pattern match is not exhaustive anymore, update the method below - ((_: CantonContractIdVersion) match { - case AuthenticatedContractIdVersionV10 => () - case AuthenticatedContractIdVersionV11 => () - }).discard - } - def serializableContractArb( - metadata: ContractMetadata - ): Arbitrary[SerializableContract] = { - val contractIdVersion = AuthenticatedContractIdVersionV11 - - Arbitrary( - for { - rawContractInstance <- Arbitrary.arbitrary[SerializableRawContractInstance] - ledgerCreateTime <- Arbitrary.arbitrary[CreationTime.CreatedAt] - - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - mediatorGroup <- Arbitrary.arbitrary[MediatorGroupRecipient] - - saltIndex <- Gen.choose(Int.MinValue, Int.MaxValue) - transactionUUID <- Gen.uuid - - (computedSalt, unicum) = unicumGenerator.generateSaltAndUnicum( - psid = psid, - mediator = mediatorGroup, - transactionUuid = transactionUUID, - viewPosition = ViewPosition(List.empty), - viewParticipantDataSalt = TestSalt.generateSalt(saltIndex), - createIndex = 0, - ledgerCreateTime = ledgerCreateTime, - metadata = metadata, - suffixedContractInstance = rawContractInstance, - cantonContractIdVersion = contractIdVersion, - ) - - index <- Gen.posNum[Int] - contractIdDiscriminator = ExampleTransactionFactory.lfHash(index) - - contractId = contractIdVersion.fromDiscriminator( - contractIdDiscriminator, - unicum, - ) - } yield SerializableContract( - contractId, - rawContractInstance, - metadata, - ledgerCreateTime, - contractSalt = computedSalt.unwrap, - ) - ) - } - def serializableContractArb( - canHaveEmptyKey: Boolean - ): Arbitrary[SerializableContract] = Arbitrary( - for { - metadata <- contractMetadataArb(canHaveEmptyKey).arbitrary - contract <- serializableContractArb(metadata).arbitrary - } yield contract - ) - - implicit val globalKeyWithMaintainersArb: Arbitrary[Versioned[LfGlobalKeyWithMaintainers]] = - Arbitrary( - for { - maintainers <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - key <- Arbitrary.arbitrary[LfGlobalKey] - } yield ExampleTransactionFactory.globalKeyWithMaintainers( - key, - maintainers, - ) - ) - - def contractMetadataArb(canHaveEmptyKey: Boolean): Arbitrary[ContractMetadata] = Arbitrary( - for { - maybeKeyWithMaintainers <- - if (canHaveEmptyKey) Gen.option(globalKeyWithMaintainersArb.arbitrary) - else Gen.some(globalKeyWithMaintainersArb.arbitrary) - maintainers = maybeKeyWithMaintainers.fold(Set.empty[LfPartyId])(_.unversioned.maintainers) - - signatories <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - observers <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - - allSignatories = maintainers ++ signatories - allStakeholders = allSignatories ++ observers - - // Required invariant: maintainers \subset signatories \subset stakeholders - } yield ContractMetadata.tryCreate( - signatories = allSignatories, - stakeholders = allStakeholders, - maybeKeyWithMaintainers, - ) - ) - - implicit val stakeholdersArb: Arbitrary[Stakeholders] = Arbitrary( - for { - signatories <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - observers <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - } yield Stakeholders.withSignatoriesAndObservers( - signatories = signatories, - observers = observers, - ) - ) - - implicit val requestIdArb: Arbitrary[RequestId] = genArbitrary - - implicit val rollbackContextArb: Arbitrary[RollbackContext] = - Arbitrary(Gen.listOf(Arbitrary.arbitrary[PositiveInt]).map(RollbackContext.apply)) - - implicit val createdContractArb: Arbitrary[CreatedContract] = Arbitrary( - for { - contract <- serializableContractArb(canHaveEmptyKey = true).arbitrary - consumedInCore <- Gen.oneOf(true, false) - rolledBack <- Gen.oneOf(true, false) - } yield CreatedContract - .create( - contract, - consumedInCore, - rolledBack, - ) - .value - ) - - implicit val contractReassignmentBatch: Arbitrary[ContractsReassignmentBatch] = Arbitrary( - for { - metadata <- contractMetadataArb(canHaveEmptyKey = true).arbitrary - contracts <- Gen.nonEmptyContainerOf[Seq, SerializableContract]( - serializableContractArb(metadata).arbitrary - ) - reassignmentCounters <- Gen - .containerOfN[Seq, ReassignmentCounter](contracts.length, reassignmentCounterGen) - contractCounters = contracts.zip(reassignmentCounters) - } yield ContractsReassignmentBatch.create(contractCounters).value - ) - - implicit val externalAuthorizationArb: Arbitrary[ExternalAuthorization] = Arbitrary( - for { - signatures <- Arbitrary.arbitrary[Map[PartyId, Seq[Signature]]] - hashingSchemeVersion <- Arbitrary.arbitrary[HashingSchemeVersion] - } yield ExternalAuthorization.create(signatures, hashingSchemeVersion, protocolVersion) - ) - - implicit val protocolSymmetricKeyArb: Arbitrary[ProtocolSymmetricKey] = - Arbitrary( - for { - key <- Arbitrary.arbitrary[SymmetricKey] - } yield ProtocolSymmetricKey(key, protocolVersion) - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala deleted file mode 100644 index 5ff7139747..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash, TestSalt} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.{ - BaseTest, - LfPackageName, - LfPartyId, - LfTimestamp, - LfValue, - LfVersioned, -} -import com.digitalasset.daml.lf.data.{Bytes, Ref} -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Node} -import com.digitalasset.daml.lf.value.Value -import org.scalatest.wordspec.AnyWordSpec - -class SerializableContractTest extends AnyWordSpec with BaseTest { - - private val alice = LfPartyId.assertFromString("Alice") - private val bob = LfPartyId.assertFromString("Bob") - - private val templateId = ExampleTransactionFactory.templateId - - "SerializableContractInstance" should { - - forEvery(Seq(AuthenticatedContractIdVersionV10, AuthenticatedContractIdVersionV11)) { - contractIdVersion => - s"deserialize $contractIdVersion correctly" in { - val someContractSalt = TestSalt.generateSalt(0) - - val contractId = ExampleTransactionFactory.suffixedId(0, 0, contractIdVersion) - - val metadata = ContractMetadata.tryCreate( - signatories = Set(alice), - stakeholders = Set(alice, bob), - maybeKeyWithMaintainersVersioned = Some( - ExampleTransactionFactory.globalKeyWithMaintainers( - LfGlobalKey - .build( - templateId, - Value.ValueUnit, - LfPackageName.assertFromString("package-name"), - ) - .value, - Set(alice), - ) - ), - ) - - val sci = ExampleTransactionFactory.asSerializable( - contractId, - ExampleTransactionFactory.contractInstance(Seq(contractId)), - metadata, - CantonTimestamp.now(), - someContractSalt, - ) - SerializableContract.fromProtoVersioned( - sci.toProtoVersioned(testedProtocolVersion) - ) shouldEqual Right(sci) - } - } - } - - "SerializableContract.fromFatContract" when { - val transactionVersion = LfLanguageVersion.v2_dev - - val createdAt = LfTimestamp.Epoch - val contractSalt = TestSalt.generateSalt(0) - val driverMetadata = - DriverContractMetadata(contractSalt).toLfBytes(AuthenticatedContractIdVersionV11) - - val contractIdDiscriminator = ExampleTransactionFactory.lfHash(0) - val contractIdSuffix = - Unicum(Hash.build(TestHash.testHashPurpose, HashAlgorithm.Sha256).add(0).finish()) - - val invalidFormatContractId = LfContractId.assertFromString("00" * 34) - - val authenticatedContractId = - AuthenticatedContractIdVersionV11.fromDiscriminator(contractIdDiscriminator, contractIdSuffix) - - val pkgName = Ref.PackageName.assertFromString("pkgName") - - val createNode = Node.Create( - templateId = templateId, - packageName = pkgName, - coid = authenticatedContractId, - arg = LfValue.ValueInt64(123L), - signatories = Set(alice), - stakeholders = Set(alice), - keyOpt = None, - version = transactionVersion, - ) - - val disclosedContract = - FatContractInstance.fromCreateNode( - createNode, - CreationTime.CreatedAt(createdAt), - driverMetadata, - ) - - "provided a valid disclosed contract" should { - "succeed" in { - val actual = SerializableContract - .fromFatContract(disclosedContract) - .value - - actual shouldBe SerializableContract( - contractId = authenticatedContractId, - rawContractInstance = SerializableRawContractInstance - .create( - LfVersioned( - transactionVersion, - LfValue.ThinContractInstance( - packageName = pkgName, - template = templateId, - arg = LfValue.ValueInt64(123L), - ), - ) - ) - .value, - metadata = ContractMetadata.tryCreate(Set(alice), Set(alice), None), - ledgerCreateTime = CreationTime.CreatedAt(createdAt), - contractSalt = contractSalt, - ) - } - } - - "provided a disclosed contract with unknown contract id format" should { - "fail" in { - SerializableContract - .fromFatContract( - FatContractInstance.fromCreateNode( - createNode.mapCid(_ => invalidFormatContractId), - CreationTime.CreatedAt(createdAt), - driverMetadata, - ) - ) - .left - .value shouldBe s"Invalid disclosed contract id: malformed contract id '${invalidFormatContractId.toString}'. Suffix 00 is not a supported contract-id prefix" - } - } - - "provided a disclosed contract with missing driver contract metadata" should { - "fail" in { - SerializableContract - .fromFatContract( - FatContractInstance.fromCreateNode( - createNode, - CreationTime.CreatedAt(createdAt), - cantonData = Bytes.Empty, - ) - ) - .left - .value shouldBe "Missing driver contract metadata in provided disclosed contract" - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableRawContractInstanceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableRawContractInstanceTest.scala deleted file mode 100644 index 5fa3799482..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/SerializableRawContractInstanceTest.scala +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.protocol.ExampleTransactionFactory.transactionVersion -import com.digitalasset.canton.serialization.HasCryptographicEvidenceTest -import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.transaction.Versioned -import com.digitalasset.daml.lf.value.{Value, ValueCoder} -import com.google.protobuf.ByteString -import org.scalatest.prop.TableFor3 -import org.scalatest.wordspec.AnyWordSpec - -class SerializableRawContractInstanceTest - extends AnyWordSpec - with HasCryptographicEvidenceTest - with BaseTest { - import ExampleTransactionFactory.suffixedId - - "SerializableContractInstance" should { - val absContractId1 = suffixedId(0, 1) - val absContractId2 = suffixedId(0, 2) - val absContractId3 = suffixedId(3, 4) - - val contractInst1 = ExampleTransactionFactory.contractInstance(List(absContractId1)) - val contractInst2 = ExampleTransactionFactory.contractInstance(List(absContractId2)) - val contractInst3 = ExampleTransactionFactory.contractInstance(List(absContractId3)) - val contractInst12 = - ExampleTransactionFactory.contractInstance(List(absContractId1, absContractId2)) - - val scenarios = - new TableFor3[String, SerializableRawContractInstance, SerializableRawContractInstance]( - ("test description", "first contract instance", "second contract instance"), - ( - "same transaction ID", - SerializableRawContractInstance.create(contractInst1).value, - SerializableRawContractInstance.create(contractInst2).value, - ), - ( - "different transaction ID", - SerializableRawContractInstance.create(contractInst2).value, - SerializableRawContractInstance.create(contractInst3).value, - ), - ( - "same contract ID, but different capture", - SerializableRawContractInstance.create(contractInst1).value, - SerializableRawContractInstance.create(contractInst12).value, - ), - ) - - scenarios.forEvery { case (name, coinst1, coinst2) => - name should { behave like hasCryptographicEvidenceSerialization(coinst1, coinst2) } - } - - "for a non-serializable instance" should { - val nonSerializableContractInst = ExampleTransactionFactory.veryDeepContractInstance - - "fail if no serialization is given" in { - SerializableRawContractInstance - .create(nonSerializableContractInst) - .left - .value shouldBe a[ValueCoder.EncodeError] - } - - "not attempt serialization if the serialization is provided" in { - SerializableRawContractInstance.createWithSerialization(nonSerializableContractInst)( - ByteString.EMPTY - ) - } - } - - "hashing" should { - - def inst(arg: Value): SerializableRawContractInstance = - SerializableRawContractInstance.createWithSerialization( - LfThinContractInst( - packageName = ExampleTransactionFactory.packageName, - template = ExampleTransactionFactory.templateId, - arg = Versioned(transactionVersion, arg), - ) - )(ByteString.EMPTY) - - val unNormalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))), - (None, Value.ValueOptional(None)), - ), - ) - - val normalizedArg = Value.ValueRecord( - None, - ImmArray( - (None, Value.ValueOptional(Some(Value.ValueTrue))) - ), - ) - - val unNormalizedInst = inst(unNormalizedArg) - val normalizedInst = inst(normalizedArg) - - "create different hashes for normalized and un-normalized values when not using upgrade friendly hashing" in { - unNormalizedInst.contractHash(false) != normalizedInst.contractHash(false) - } - - "create identical hashes for normalized and un-normalized values when using upgrade friendly hashing" in { - unNormalizedInst.contractHash(true) == normalizedInst.contractHash(true) - } - - } - - } -} - -object SerializableRawContractInstanceTest { - def toHexString(byte: Byte): String = { - val s = byte.toInt.toHexString - if (s.length < 2) "0" + s else s - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala deleted file mode 100644 index cd328facba..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.daml.ledger.javaapi.data.Identifier -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.ComparesLfTransactions.{TxTree, buildLfTransaction} -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.examples.java.iou -import com.digitalasset.canton.protocol.RollbackContext.RollbackScope -import com.digitalasset.canton.protocol.WellFormedTransaction.WithSuffixes -import com.digitalasset.canton.topology.{PartyId, UniqueIdentifier} -import com.digitalasset.canton.util.LfTransactionUtil -import com.digitalasset.canton.{ - BaseTest, - ComparesLfTransactions, - HasExecutionContext, - LfValue, - NeedsNewLfContractIds, -} -import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey -import com.digitalasset.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder} -import org.scalatest.wordspec.AnyWordSpec - -/** Tests WellFormedTransaction.merge particularly with respect to handling of top-level rollback - * nodes. - */ -class WellFormedTransactionMergeTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ComparesLfTransactions - with NeedsNewLfContractIds { - - private val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party")) - private val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party")) - private val carol = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party")) - - // Top-level lf transaction builder for "state-less" lf node creations. - private implicit val tb: TestNodeBuilder = TestNodeBuilder - - import TransactionBuilder.Implicits.* - - private val subTxTree0 = TxTree( - tb.fetch( - create(newLfContractId(), iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID, alice, bob), - byKey = false, - ) - ) - private val subTxTree1 = TxTree( - create(newLfContractId(), iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID, alice, bob) - ) - private val contractCreate = - create(newLfContractId(), iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID, alice, alice) - private val subTxTree2 = Seq( - TxTree(contractCreate), - TxTree(tb.fetch(contractCreate, byKey = false)), - TxTree( - tb.exercise( - contract = contractCreate, - choice = "Call", - consuming = true, - actingParties = Set(alice.toLf), - argument = LfValue.ValueUnit, - byKey = false, - ), - TxTree( - tb.rollback(), - TxTree( - create( - newLfContractId(), - iou.GetCash.TEMPLATE_ID_WITH_PACKAGE_ID, - alice, - alice, - arg = args( - LfValue.ValueParty(alice.toLf), - LfValue.ValueParty(alice.toLf), - args( - LfValue.ValueNumeric(com.digitalasset.daml.lf.data.Numeric.assertFromString("0.0")) - ), - ), - ) - ), - ), - ), - ) - private val subTxTree3 = TxTree( - create(newLfContractId(), iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID, carol, alice, Seq(bob)) - ) - private val subTxTree4 = TxTree( - tb.exercise( - contract = create(newLfContractId(), iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID, bob, bob), - choice = "Archive", - consuming = true, - actingParties = Set(bob.toLf), - argument = LfValue.ValueUnit, - byKey = false, - ) - ) - - "WellFormedTransaction.merge" should { - import scala.language.implicitConversions - implicit def toPositiveInt(i: Int): PositiveInt = PositiveInt.tryCreate(i) - - "wrap transactions under common rollback" when { - "single transaction has multiple roots" in { - val actual = merge(inputTransaction(Seq(1), Seq(subTxTree1) ++ subTxTree2: _*)) - val expected = expectedTransaction(TxTree(tb.rollback(), Seq(subTxTree1) ++ subTxTree2: _*)) - - assertTransactionsMatch(expected, actual) - } - - "two single-root transactions" in { - val actual = merge( - inputTransaction(Seq(1), subTxTree1), - inputTransaction(Seq(1), subTxTree2*), - ) - val expected = expectedTransaction(TxTree(tb.rollback(), Seq(subTxTree1) ++ subTxTree2: _*)) - - assertTransactionsMatch(expected, actual) - } - - "multilevel rollback scopes match" in { - val levels = 5 - - val actual = merge( - inputTransaction((1 to levels).map(PositiveInt.tryCreate), subTxTree1), - inputTransaction((1 to levels).map(PositiveInt.tryCreate), subTxTree2*), - ) - val expected = expectedTransaction( - (2 to levels).foldLeft(TxTree(tb.rollback(), Seq(subTxTree1) ++ subTxTree2: _*)) { - case (a, _) => TxTree(tb.rollback(), a) - } - ) - - assertTransactionsMatch(expected, actual) - - } - } - - "not wrap transactions with differing rollbacks" when { - "rollback followed by non-rollback" in { - val actual = merge( - inputTransaction(Seq(1), subTxTree1), - inputTransaction(Seq.empty, subTxTree2*), - ) - val expected = expectedTransaction(Seq(TxTree(tb.rollback(), subTxTree1)) ++ subTxTree2: _*) - - assertTransactionsMatch(expected, actual) - } - - "non-rollback followed by rollback" in { - val actual = merge( - inputTransaction(Seq.empty, subTxTree1), - inputTransaction(Seq(1), subTxTree2*), - ) - val expected = expectedTransaction(subTxTree1, TxTree(tb.rollback(), subTxTree2*)) - - assertTransactionsMatch(expected, actual) - } - - "rollbacks separated by non-rollback" in { - val actual = merge( - inputTransaction(Seq(1), subTxTree1), - inputTransaction(Seq.empty, subTxTree2*), - inputTransaction(Seq(1), subTxTree3), - ) - val expected = expectedTransaction( - Seq(TxTree(tb.rollback(), subTxTree1)) ++ - subTxTree2 :+ - TxTree(tb.rollback(), subTxTree3): _* - ) - - assertTransactionsMatch(expected, actual) - } - - "rollback have no common scope prefix" in { - val actual = merge( - inputTransaction(Seq(1), subTxTree1), - inputTransaction(Seq(2), subTxTree2*), - inputTransaction(Seq(3), subTxTree3), - ) - val expected = expectedTransaction( - TxTree(tb.rollback(), subTxTree1), - TxTree(tb.rollback(), subTxTree2*), - TxTree(tb.rollback(), subTxTree3), - ) - - assertTransactionsMatch(expected, actual) - } - } - - "partially wrap transactions with shared rollback scope prefix" when { - "rollback nesting level increases" in { - val actual = merge( - inputTransaction(Seq.empty, subTxTree0), - inputTransaction(Seq(1), subTxTree1), - inputTransaction(Seq(1, 2), subTxTree2*), - inputTransaction(Seq(1, 2, 3, 4), subTxTree3), - inputTransaction(Seq.empty, subTxTree4), - ) - val expected = expectedTransaction( - subTxTree0, - TxTree( - tb.rollback(), - subTxTree1, - TxTree( - tb.rollback(), - subTxTree2 :+ TxTree(tb.rollback(), TxTree(tb.rollback(), subTxTree3)): _* - ), - ), - subTxTree4, - ) - - assertTransactionsMatch(expected, actual) - } - - "rollback nesting level decreases" in { - val actual = merge( - inputTransaction(Seq.empty, subTxTree0), - inputTransaction(Seq(1, 2, 3, 4), subTxTree1), - inputTransaction(Seq(1, 2, 3), subTxTree2*), - inputTransaction(Seq(1), subTxTree3), - inputTransaction(Seq.empty, subTxTree4), - ) - val expected = expectedTransaction( - subTxTree0, - TxTree( - tb.rollback(), - TxTree( - tb.rollback(), - TxTree(tb.rollback(), Seq(TxTree(tb.rollback(), subTxTree1)) ++ subTxTree2: _*), - ), - subTxTree3, - ), - subTxTree4, - ) - - assertTransactionsMatch(expected, actual) - } - - "rollback scope interrupted" in { - val actual = merge( - inputTransaction(Seq.empty, subTxTree0), - inputTransaction(Seq(1, 2, 3), subTxTree1), - // Interrupting a rollback context should never happen (given the pre-order traversal), but - // this test documents how the current implementation would "reset" the tracking of rollback scopes. - inputTransaction( - Seq.empty, - subTxTree2* - ), - inputTransaction(Seq(1, 2, 3), subTxTree3), - inputTransaction(Seq.empty, subTxTree4), - ) - val expected = expectedTransaction( - Seq(subTxTree0) :+ - TxTree(tb.rollback(), TxTree(tb.rollback(), TxTree(tb.rollback(), subTxTree1))) :++ - subTxTree2 :+ - TxTree(tb.rollback(), TxTree(tb.rollback(), TxTree(tb.rollback(), subTxTree3))) :+ - subTxTree4: _* - ) - - assertTransactionsMatch(expected, actual) - } - } - } - - private def transactionHelper[T](txTrees: TxTree*)(f: LfVersionedTransaction => T): T = f( - buildLfTransaction(txTrees*) - ) - - private def inputTransaction( - rbScope: RollbackScope, - txTrees: TxTree* - ): WithRollbackScope[WellFormedTransaction[WithSuffixes]] = - transactionHelper(txTrees*)(lfTx => - WithRollbackScope( - rbScope, - WellFormedTransaction.normalizeAndAssert( - lfTx, - TransactionMetadata( - CantonTimestamp.Epoch, - CantonTimestamp.Epoch, - lfTx.nodes.collect { - case (nid, node) if LfTransactionUtil.nodeHasSeed(node) => nid -> hasher() - }, - ), - WithSuffixes, - ), - ) - ) - - private def expectedTransaction(txTrees: TxTree*): LfVersionedTransaction = buildLfTransaction( - txTrees* - ) - - private def merge( - transactions: WithRollbackScope[WellFormedTransaction[WithSuffixes]]* - ) = - valueOrFail( - WellFormedTransaction.merge( - NonEmpty.from(transactions).valueOrFail("Cannot merge empty list of transactions") - ) - )("unexpectedly failed to merge").unwrap - - private def create[T]( - cid: LfContractId, - template: Identifier, - payer: PartyId, - owner: PartyId, - viewers: Seq[PartyId] = Seq.empty, - arg: LfValue = notUsed, - )(implicit tb: TestNodeBuilder) = { - val lfPayer = payer.toLf - val lfOwner = owner.toLf - val lfViewers = viewers.map(_.toLf) - val lfObservers = Set(lfOwner) ++ lfViewers.toSet - - val lfTemplateId = templateIdFromIdentifier(template) - - tb.create( - id = cid, - templateId = lfTemplateId, - argument = template match { - case iou.Iou.TEMPLATE_ID_WITH_PACKAGE_ID => - require( - arg == notUsed, - "For IOUs, this function figures out the sig and obs parameters by itself", - ) - args( - LfValue.ValueParty(lfPayer), - LfValue.ValueParty(lfOwner), - args( - LfValue.ValueNumeric(com.digitalasset.daml.lf.data.Numeric.assertFromString("0.0")) - ), - valueList(lfObservers.map(LfValue.ValueParty.apply)), - ) - case _ => arg - }, - signatories = Set(lfPayer), - observers = lfObservers, - key = CreateKey.NoKey, - ) - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala deleted file mode 100644 index d438420ba9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol - -import com.digitalasset.canton.protocol.ExampleTransactionFactory.* -import com.digitalasset.canton.protocol.WellFormedTransaction.{State, WithSuffixes, WithoutSuffixes} -import com.digitalasset.canton.{BaseTest, HasExecutionContext, LfPackageName, LfPartyId} -import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.value.Value -import org.scalatest.prop.{TableFor3, TableFor4} -import org.scalatest.wordspec.AnyWordSpec - -class WellFormedTransactionTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - val factory: ExampleTransactionFactory = new ExampleTransactionFactory()() - - val lfAbs: LfContractId = suffixedId(0, 0) - - val contractInst = contractInstance() - val serContractInst = asSerializableRaw(contractInst) - - def createNode( - cid: LfContractId, - contractInstance: LfThinContractInst = ExampleTransactionFactory.contractInstance(), - signatories: Set[LfPartyId] = Set(signatory), - key: Option[LfGlobalKeyWithMaintainers] = None, - ): LfNodeCreate = - ExampleTransactionFactory.createNode( - cid, - signatories = signatories, - contractInstance = contractInstance, - key = key, - ) - - def fetchNode(cid: LfContractId): LfNodeFetch = - ExampleTransactionFactory.fetchNode( - cid, - actingParties = Set(submitter), - signatories = Set(signatory), - ) - - def exerciseNode(cid: LfContractId, child: Int): LfNodeExercises = - ExampleTransactionFactory.exerciseNode( - cid, - children = List(nodeId(child)), - signatories = Set(signatory), - ) - - private def nid(i: Int): String = """NodeId\(""" + i.toString + """.*\)""" - - val malformedExamples - : TableFor4[String, (LfVersionedTransaction, TransactionMetadata), State, String] = - Table( - ("Description", "Transaction and seeds", "Suffixing", "Expected Error Message"), - ( - "Orphaned node", - factory.versionedTransactionWithSeeds(Seq.empty, fetchNode(lfAbs)), - WithSuffixes, - "OrphanedNode: 0", - ), - ( - "Dangling root node", - factory.versionedTransactionWithSeeds(Seq(0)), - WithSuffixes, - "DanglingNodeId: 0", - ), - ( - "Dangling exercise child", - factory.versionedTransactionWithSeeds(Seq(0), exerciseNode(lfAbs, child = 1)), - WithSuffixes, - "DanglingNodeId: 1", - ), - ( - "Cycle", - factory.versionedTransactionWithSeeds(Seq(0), exerciseNode(lfAbs, child = 0)), - WithSuffixes, - "AliasedNode: 0", - ), - ( - "Two parents", - factory.versionedTransactionWithSeeds(Seq(0, 0), fetchNode(lfAbs)), - WithSuffixes, - "AliasedNode: 0", - ), - ( - "Negative node ID", - transactionFrom(Seq(-1), -1, fetchNode(lfAbs)) -> factory.mkMetadata(), - WithoutSuffixes, - "Negative node IDs: -1", - ), - ( - "byKey node with no key", - factory.versionedTransactionWithSeeds( - Seq(0), - ExampleTransactionFactory.fetchNode( - unsuffixedId(0), - actingParties = Set(submitter), - signatories = Set(signatory), - byKey = true, - ), - ), - WithoutSuffixes, - "byKey nodes without a key: 0", - ), - ( - "Missing seed for create", - (transaction(Seq(0), createNode(unsuffixedId(0))), factory.mkMetadata()), - WithoutSuffixes, - "Nodes without seeds: 0", - ), - ( - "Missing seed for exercise", - ( - transaction( - Seq(0), - ExampleTransactionFactory.exerciseNode(lfAbs, signatories = Set(signatory)), - ), - factory.mkMetadata(), - ), - WithoutSuffixes, - "Nodes without seeds: 0", - ), - ( - "Superfluous seed for fetch", - ( - transaction(Seq(0), fetchNode(lfAbs)), - factory.mkMetadata(Map(LfNodeId(0) -> ExampleTransactionFactory.lfHash(0))), - ), - WithSuffixes, - "Nodes with superfluous seeds: 0", - ), - ( - "Duplicate create", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - createNode(unsuffixedId(0)), - createNode(unsuffixedId(0)), - ), - WithoutSuffixes, - s"Contract id ${unsuffixedId(0).coid} is created in nodes ${nid(0)} and ${nid(1)}", - ), - ( - "Create shadows previously referenced id", - factory.versionedTransactionWithSeeds(Seq(0, 1), fetchNode(lfAbs), createNode(lfAbs)), - WithSuffixes, - s"Contract id ${lfAbs.coid} created in node ${nid(1)} is referenced before in ${nid(0)}", - ), - ( - "Unsuffixed discriminator appears with suffix in value", - factory.versionedTransactionWithSeeds( - Seq(0), - createNode( - unsuffixedId(0), - contractInstance = contractInstance(capturedIds = Seq(suffixedId(0, 1))), - ), - ), - WithoutSuffixes, - s"Contract discriminator 0000000000000000000000000000000000000000000000000000000000000000 created in ${nid(0)} is not fresh due to ${nid(0)}", - ), - ( - "Unsuffixed discriminator is used earlier with suffix", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - fetchNode(suffixedId(0, 1)), - createNode(unsuffixedId(0)), - ), - WithoutSuffixes, - s"Contract discriminator 0000000000000000000000000000000000000000000000000000000000000000 created in ${nid(1)} is not fresh due to ${nid(0)}", - ), - ( - "Unsuffixed discriminator is used with suffix in later node", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - createNode(unsuffixedId(1)), - exerciseNode(suffixedId(0, 1), 2), - fetchNode(suffixedId(1, -1)), - ), - WithoutSuffixes, - s"Contract discriminator 0001000000000000000000000000000000000000000000000000000000000000 created in ${nid( - 0 - )} is not fresh due to contract Id ${suffixedId(1, -1).coid} in ${nid(2)}", - ), - ( - "Unsuffixed discriminator is referenced with suffix in later node", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - createNode(unsuffixedId(1)), - ExampleTransactionFactory.exerciseNode( - suffixedId(0, 0), - signatories = Set(signatory), - actingParties = Set(signatory), - exerciseResult = - Some(contractInstance(capturedIds = Seq(suffixedId(1, -1))).unversioned.arg), - ), - ), - WithoutSuffixes, - s"Contract discriminator 0001000000000000000000000000000000000000000000000000000000000000 created in ${nid( - 0 - )} is not fresh due to contract Id ${suffixedId(1, -1).coid} in ${nid(1)}", - ), - ( - "Missing signatory", - factory.versionedTransactionWithSeeds( - Seq(0), - createNode(unsuffixedId(0)).copy(signatories = Set.empty), - ), - WithoutSuffixes, - "neither signatories nor maintainers present at nodes 0", - ), - ( - "Signatory not declared as informee", - factory.versionedTransactionWithSeeds( - Seq(0), - createNode(unsuffixedId(0)).copy(stakeholders = Set.empty), - ), - WithoutSuffixes, - "signatory or maintainer not declared as informee: signatory::default at node 0", - ), - ( - "Missing fetch actors", - factory - .versionedTransactionWithSeeds(Seq(0), fetchNode(lfAbs).copy(actingParties = Set.empty)), - WithSuffixes, - "fetch nodes with unspecified acting parties at nodes 0", - ), - ( - "Failure to serialize - depth limit exceeded", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - createNode(unsuffixedId(0), contractInstance = veryDeepContractInstance), - LfNodeExercises( - targetCoid = suffixedId(2, -1), - packageName = packageName, - templateId = templateId, - interfaceId = None, - choiceId = LfChoiceName.assertFromString("choice"), - consuming = false, - actingParties = Set(ExampleTransactionFactory.submitter), - chosenValue = ExampleTransactionFactory.veryDeepValue, - stakeholders = Set(ExampleTransactionFactory.submitter), - signatories = Set(ExampleTransactionFactory.submitter), - choiceObservers = Set.empty, - choiceAuthorizers = None, - children = ImmArray.empty, - exerciseResult = None, - keyOpt = None, - byKey = false, - version = ExampleTransactionFactory.transactionVersion, - ), - ), - WithoutSuffixes, - List( - """unable to serialize contract instance in node 0: """ + - s"""Provided Daml-LF value to encode exceeds maximum nesting level of ${Value.MAXIMUM_NESTING}""", - """unable to serialize chosen value in node 1: """ + - s"""Provided Daml-LF value to encode exceeds maximum nesting level of ${Value.MAXIMUM_NESTING}""", - ).sorted.mkString(", "), - ), - ( - "Failure to parse party id", - factory.versionedTransactionWithSeeds( - Seq(0), - createNode(unsuffixedId(0), signatories = Set(LfPartyId.assertFromString("bubu"))), - ), - WithoutSuffixes, - """Unable to parse party: Invalid unique identifier `bubu` .*""", - ), - ( - "Empty maintainers", - factory.versionedTransactionWithSeeds( - Seq(0, 1), - createNode( - unsuffixedId(1), - signatories = Set(signatory), - key = Some( - LfGlobalKeyWithMaintainers - .assertBuild( - templateId, - contractInst.unversioned.arg, - Set.empty, - LfPackageName.assertFromString("package-name"), - ) - ), - ), - ExampleTransactionFactory.exerciseNode( - lfAbs, - signatories = Set(signatory), - actingParties = Set(signatory), - key = Some( - LfGlobalKeyWithMaintainers.assertBuild( - templateId, - contractInst.unversioned.arg, - Set.empty, - packageName, - ) - ), - ), - ), - WithoutSuffixes, - s"Key of node 0 has no maintainer, Key of node 1 has no maintainer", - ), - ) - - // Well-formed transactions are mostly covered by ExampleTransactionFactoryTest. So we test only a special cases here. - val wellformedExamples: TableFor3[String, (LfVersionedTransaction, TransactionMetadata), State] = - Table( - ("Description", "Transaction and seeds", "Suffixing"), - ( - "Suffixed discriminators need not be fresh", - factory.versionedTransactionWithSeeds( - Seq(0, 1, 2), - fetchNode(suffixedId(0, -1)), - createNode(suffixedId(0, 0)), - fetchNode(unsuffixedId(0)), - ), - WithSuffixes, - ), - ) - - "A transaction" when { - malformedExamples.forEvery { - case (description, (transaction, metadata), state, expectedError) => - description must { - "be reported as malformed" in { - WellFormedTransaction - .normalizeAndCheck(transaction, metadata, state) - .left - .value should fullyMatch regex expectedError - an[IllegalArgumentException] must be thrownBy - WellFormedTransaction.normalizeAndAssert(transaction, metadata, WithoutSuffixes) - } - } - } - - wellformedExamples.forEvery { case (description, (transaction, metadata), state) => - description must { - "be accepted as well-formed" in { - WellFormedTransaction - .normalizeAndCheck(transaction, metadata, state) - .value shouldBe a[WellFormedTransaction[_]] - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala deleted file mode 100644 index ef2f30f9ab..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/HashUtils.scala +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.hash - -import com.daml.crypto.MessageDigestPrototype -import com.digitalasset.canton.LfTimestamp -import com.digitalasset.canton.crypto.Hash -import com.digitalasset.canton.crypto.HashAlgorithm.Sha256 -import com.digitalasset.canton.data.LedgerTimeBoundaries -import com.digitalasset.canton.protocol.LfHash -import com.digitalasset.canton.protocol.hash.HashTracer.StringHashTracer -import com.digitalasset.daml.lf.data.Ref.IdString -import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} -import com.digitalasset.daml.lf.transaction.{ - CreationTime, - FatContractInstance, - Node, - TransactionVersion, -} -import com.digitalasset.daml.lf.value.{Value, Value as V} -import com.google.protobuf.ByteString -import org.scalatest.matchers.should.Matchers - -import java.time.Duration -import java.util.UUID -import scala.collection.immutable.{SortedMap, SortedSet} - -trait HashUtilsTest { this: Matchers => - val packageId0: IdString.PackageId = Ref.PackageId.assertFromString("package") - val packageName0: IdString.PackageName = Ref.PackageName.assertFromString("package-name-0") - - implicit val contractIdOrdering: Ordering[Value.ContractId] = Ordering.by(_.coid) - val transactionUUID = UUID.fromString("4c6471d3-4e09-49dd-addf-6cd90e19c583") - val cid1 = Value.ContractId.assertFromString( - "0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5" - ) - val cid2 = Value.ContractId.assertFromString( - "0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b" - ) - val alice = Ref.Party.assertFromString("alice") - val bob = Ref.Party.assertFromString("bob") - val node1 = FatContractInstance.fromCreateNode( - dummyCreateNode(cid1, Set(alice), Set(alice)), - CreationTime.CreatedAt(LfTimestamp.Epoch.add(Duration.ofDays(10))), - Bytes.assertFromString("0010"), - ) - val node2 = FatContractInstance.fromCreateNode( - dummyCreateNode(cid2, Set(bob), Set(bob)), - CreationTime.CreatedAt(LfTimestamp.Epoch.add(Duration.ofDays(20))), - Bytes.assertFromString("0050"), - ) - val metadata = TransactionMetadataHashBuilder.MetadataV1( - actAs = SortedSet(alice, bob), - commandId = Ref.CommandId.assertFromString("command-id"), - transactionUUID = transactionUUID, - mediatorGroup = 0, - synchronizerId = "synchronizerId", - timeBoundaries = LedgerTimeBoundaries( - Time.Range( - Time.Timestamp.assertFromLong(0xaaaa), - Time.Timestamp.assertFromLong(0xbbbb), - ) - ), - preparationTime = Time.Timestamp.Epoch, - disclosedContracts = SortedMap( - cid1 -> node1, - cid2 -> node2, - ), - ) - - def assertStringTracer(stringHashTracer: StringHashTracer, hash: Hash) = { - val messageDigest = MessageDigestPrototype.Sha256.newDigest - messageDigest.update(stringHashTracer.asByteArray) - Hash.tryFromByteStringRaw(ByteString.copyFrom(messageDigest.digest()), Sha256) shouldBe hash - } - - def cid(s: String): V.ContractId = V.ContractId.V1(LfHash.hashPrivateKey(s)) - - def dummyCreateNode( - createCid: V.ContractId, - signatories: Set[Ref.Party] = Set.empty, - stakeholders: Set[Ref.Party] = Set.empty, - ): Node.Create = - Node.Create( - coid = createCid, - packageName = Ref.PackageName.assertFromString("PkgName"), - templateId = Ref.Identifier.assertFromString("-dummyPkg-:DummyModule:dummyName"), - arg = V.ValueContractId(cid("#dummyCid")), - signatories = signatories, - stakeholders = stakeholders, - keyOpt = None, - version = TransactionVersion.minVersion, - ) - - def defRef(module: String, name: String): Ref.Identifier = - Ref.Identifier( - packageId0, - Ref.QualifiedName( - Ref.DottedName.assertFromString(module), - Ref.DottedName.assertFromString(name), - ), - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/MetadataHashV1Test.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/MetadataHashV1Test.scala deleted file mode 100644 index 594e2e051a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/MetadataHashV1Test.scala +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.hash - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.Hash -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpecLike - -class MetadataHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with HashUtilsTest { - - "Metadata Encoding" should { - val expectedMetadataHash = Hash - .fromHexStringRaw("2a0690693367f70fbe83e5e99df6930dbd2336618a3a0721bb6fa3bcc88d5a53") - .getOrElse(fail("Invalid hash")) - - "be stable" in { - TransactionMetadataHashBuilder - .hashTransactionMetadataV1(metadata) - .toHexString shouldBe expectedMetadataHash.toHexString - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer(true) - - val actualMetadataHash = TransactionMetadataHashBuilder.hashTransactionMetadataV1( - metadata, - hashTracer, - ) - - hashTracer.result shouldBe """'00000030' # Hash Purpose - |'01' # 01 (Metadata Encoding Version) - |# Act As Parties - |'00000002' # 2 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |'00000003' # 3 (int) - |'626f62' # bob (string) - |# Command Id - |'0000000a' # 10 (int) - |'636f6d6d616e642d6964' # command-id (string) - |# Transaction UUID - |'00000024' # 36 (int) - |'34633634373164332d346530392d343964642d616464662d366364393065313963353833' # 4c6471d3-4e09-49dd-addf-6cd90e19c583 (string) - |# Mediator Group - |'00000000' # 0 (int) - |# Synchronizer Id - |'0000000e' # 14 (int) - |'73796e6368726f6e697a65724964' # synchronizerId (string) - |# Min Time Boundary - |'01' # Some - |'000000000000aaaa' # 43690 (long) - |# Max Time Boundary - |'01' # Some - |'000000000000bbbb' # 48059 (long) - |# Preparation Time - |'0000000000000000' # 0 (long) - |# Disclosed Contracts - |'00000002' # 2 (int) - |# Created At - |'000000c92a69c000' # 864000000000 (long) - |# Create Contract - | '01' # 01 (Node Encoding Version) - | # Create Node - | # Node Version - | '00000003' # 3 (int) - | '322e31' # 2.1 (string) - | '00' # Create Node Tag - | # Node Seed - | '00' # None - | # Contract Id - | '00000021' # 33 (int) - | '0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5' # 0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5 (contractId) - | # Package Name - | '00000007' # 7 (int) - | '506b674e616d65' # PkgName (string) - | # Template Id - | '0000000a' # 10 (int) - | '2d64756d6d79506b672d' # -dummyPkg- (string) - | '00000001' # 1 (int) - | '0000000b' # 11 (int) - | '44756d6d794d6f64756c65' # DummyModule (string) - | '00000001' # 1 (int) - | '00000009' # 9 (int) - | '64756d6d794e616d65' # dummyName (string) - | # Arg - | '08' # ContractId Type Tag - | '00000021' # 33 (int) - | '0097a092402108f5593bac7fb3c909cd316910197dd98d603042a45ab85c81e0fd' # 0097a092402108f5593bac7fb3c909cd316910197dd98d603042a45ab85c81e0fd (contractId) - | # Signatories - | '00000001' # 1 (int) - | '00000005' # 5 (int) - | '616c696365' # alice (string) - | # Stakeholders - | '00000001' # 1 (int) - | '00000005' # 5 (int) - | '616c696365' # alice (string) - |'f6bb13796130c23e43c952b86b1583032be325dc40072c02a0279069fc3656c1' # Disclosed Contract - |# Created At - |'0000019254d38000' # 1728000000000 (long) - |# Create Contract - | '01' # 01 (Node Encoding Version) - | # Create Node - | # Node Version - | '00000003' # 3 (int) - | '322e31' # 2.1 (string) - | '00' # Create Node Tag - | # Node Seed - | '00' # None - | # Contract Id - | '00000021' # 33 (int) - | '0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b' # 0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b (contractId) - | # Package Name - | '00000007' # 7 (int) - | '506b674e616d65' # PkgName (string) - | # Template Id - | '0000000a' # 10 (int) - | '2d64756d6d79506b672d' # -dummyPkg- (string) - | '00000001' # 1 (int) - | '0000000b' # 11 (int) - | '44756d6d794d6f64756c65' # DummyModule (string) - | '00000001' # 1 (int) - | '00000009' # 9 (int) - | '64756d6d794e616d65' # dummyName (string) - | # Arg - | '08' # ContractId Type Tag - | '00000021' # 33 (int) - | '0097a092402108f5593bac7fb3c909cd316910197dd98d603042a45ab85c81e0fd' # 0097a092402108f5593bac7fb3c909cd316910197dd98d603042a45ab85c81e0fd (contractId) - | # Signatories - | '00000001' # 1 (int) - | '00000003' # 3 (int) - | '626f62' # bob (string) - | # Stakeholders - | '00000001' # 1 (int) - | '00000003' # 3 (int) - | '626f62' # bob (string) - |'010ee30a2b17bd729bc5ccada01a62bfb7283641610feb5913fb46b2972368a4' # Disclosed Contract - |""".stripMargin - - actualMetadataHash shouldBe expectedMetadataHash - - assertStringTracer(hashTracer, expectedMetadataHash) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala deleted file mode 100644 index b8a6af3f3c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/NodeHashV1Test.scala +++ /dev/null @@ -1,847 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.hash - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.Hash -import com.digitalasset.canton.protocol.LfHash -import com.digitalasset.canton.protocol.hash.TransactionHash.NodeHashingError -import com.digitalasset.canton.protocol.hash.TransactionHash.NodeHashingError.IncompleteTransactionTree -import com.digitalasset.daml.lf.data.* -import com.digitalasset.daml.lf.data.Ref.{ChoiceName, PackageName, Party} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.* -import com.digitalasset.daml.lf.value.Value.ContractId -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.ValueAddend as VA -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpecLike - -class NodeHashV1Test extends BaseTest with AnyWordSpecLike with Matchers with HashUtilsTest { - - private val globalKey = GlobalKeyWithMaintainers( - GlobalKey.assertBuild( - defRef("module_key", "name"), - VA.text.inj("hello"), - PackageName.assertFromString("package_name_key"), - ), - Set[Party](Ref.Party.assertFromString("david")), - ) - - private val globalKey2 = GlobalKeyWithMaintainers( - GlobalKey.assertBuild( - defRef("module_key", "name"), - VA.text.inj("bye"), - PackageName.assertFromString("package_name_key"), - ), - Set[Party](Ref.Party.assertFromString("david")), - ) - - private val contractId1 = "0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5" - private val contractId2 = "0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b" - - private val createNode = Node.Create( - coid = ContractId.V1.assertFromString(contractId1), - packageName = packageName0, - templateId = defRef("module", "name"), - arg = VA.text.inj("hello"), - signatories = - Set[Party](Ref.Party.assertFromString("alice"), Ref.Party.assertFromString("bob")), - stakeholders = - Set[Party](Ref.Party.assertFromString("alice"), Ref.Party.assertFromString("charlie")), - keyOpt = None, - version = LanguageVersion.v2_1, - ) - - private val createNodeEncoding = """'01' # 01 (Node Encoding Version) - |# Create Node - |# Node Version - |'00000003' # 3 (int) - |'322e31' # 2.1 (string) - |'00' # Create Node Tag - |# Node Seed - |'01' # Some - |'926bbb6f341bc0092ae65d06c6e284024907148cc29543ef6bff0930f5d52c19' # node seed - |# Contract Id - |'00000021' # 33 (int) - |'0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5' # 0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5 (contractId) - |# Package Name - |'0000000e' # 14 (int) - |'7061636b6167652d6e616d652d30' # package-name-0 (string) - |# Template Id - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |# Arg - |'07' # Text Type Tag - |'00000005' # 5 (int) - |'68656c6c6f' # hello (string) - |# Signatories - |'00000002' # 2 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |'00000003' # 3 (int) - |'626f62' # bob (string) - |# Stakeholders - |'00000002' # 2 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |'00000007' # 7 (int) - |'636861726c6965' # charlie (string)""".stripMargin - - private val createNodeHash = "6d2cfe58c2294000592034f4bdfe397fe246901bb8b63e3b9e041bb478e174b7" - private val createNode2 = createNode.copy( - coid = ContractId.V1.assertFromString(contractId2) - ) - - private val fetchNode = Node.Fetch( - coid = ContractId.V1.assertFromString(contractId1), - packageName = packageName0, - templateId = defRef("module", "name"), - actingParties = - Set[Party](Ref.Party.assertFromString("alice"), Ref.Party.assertFromString("bob")), - signatories = Set[Party](Ref.Party.assertFromString("alice")), - stakeholders = Set[Party](Ref.Party.assertFromString("charlie")), - keyOpt = None, - byKey = false, - interfaceId = None, - version = LanguageVersion.v2_1, - ) - - private val fetchNodeEncoding = """'01' # 01 (Node Encoding Version) - |# Fetch Node - |# Node Version - |'00000003' # 3 (int) - |'322e31' # 2.1 (string) - |'02' # Fetch Node Tag - |# Contract Id - |'00000021' # 33 (int) - |'0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5' # 0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5 (contractId) - |# Package Name - |'0000000e' # 14 (int) - |'7061636b6167652d6e616d652d30' # package-name-0 (string) - |# Template Id - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |# Signatories - |'00000001' # 1 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |# Stakeholders - |'00000001' # 1 (int) - |'00000007' # 7 (int) - |'636861726c6965' # charlie (string) - |# Interface Id - |'00' # None - |# Acting Parties - |'00000002' # 2 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |'00000003' # 3 (int) - |'626f62' # bob (string)""".stripMargin - - private val fetchNodeHash = "c962c6098394f3d11cd6f0c795de9517d32a8e3e1979cec76cd2f66254efc610" - private val fetchNode2 = fetchNode.copy( - coid = ContractId.V1.assertFromString(contractId2) - ) - - private val exerciseNode = Node.Exercise( - targetCoid = ContractId.V1.assertFromString( - "0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5" - ), - packageName = packageName0, - templateId = defRef("module", "name"), - interfaceId = Some(defRef("interface_module", "interface_name")), - choiceId = ChoiceName.assertFromString("choice"), - consuming = true, - actingParties = - Set[Party](Ref.Party.assertFromString("alice"), Ref.Party.assertFromString("bob")), - chosenValue = VA.int64.inj(31380L), - stakeholders = Set[Party](Ref.Party.assertFromString("charlie")), - signatories = Set[Party](Ref.Party.assertFromString("alice")), - choiceObservers = Set[Party](Ref.Party.assertFromString("david")), - choiceAuthorizers = None, - children = ImmArray(NodeId(0), NodeId(2)), // Create and Fetch - exerciseResult = Some(VA.text.inj("result")), - keyOpt = None, - byKey = false, - version = LanguageVersion.v2_1, - ) - - private val exerciseNodeHash = "070970eb4b2de72561dafb67017ca33850650a8103e5134e16044ba78991f48c" - - private val lookupNode = Node.LookupByKey( - packageName = packageName0, - templateId = defRef("module", "name"), - key = globalKey, - result = Some( - ContractId.V1.assertFromString(contractId1) - ), - version = LanguageVersion.v2_1, - ) - - private val rollbackNode = Node.Rollback( - children = ImmArray(NodeId(2), NodeId(4)) // Fetch2 and Exercise - ) - - private val rollbackNodeHash = "7264d5da2fd714427453bedc0d1cdb21f52ac7aec8d4bb5ac0598d25c5fcaed9" - - private val nodeSeedCreate = - LfHash.assertFromString("926bbb6f341bc0092ae65d06c6e284024907148cc29543ef6bff0930f5d52c19") - private val nodeSeedFetch = - LfHash.assertFromString("4d2a522e9ee44e31b9bef2e3c8a07d43475db87463c6a13c4ea92f898ac8a930") - private val nodeSeedExercise = - LfHash.assertFromString("a867edafa1277f46f879ab92c373a15c2d75c5d86fec741705cee1eb01ef8c9e") - private val nodeSeedRollback = - LfHash.assertFromString("5483d5df9b245e662c0e4368b8062e8a0fd24c17ce4ded1a0e452e4ee879dd81") - private val nodeSeedCreate2 = - LfHash.assertFromString("e0c69eae8afb38872fa425c2cdba794176f3b9d97e8eefb7b0e7c831f566458f") - private val nodeSeedFetch2 = - LfHash.assertFromString("b4f0534e651ac8d5e10d95ddfcafdb123550a5e3185e3fe61ec1746a7222a88e") - - private val defaultNodeSeedsMap = Map( - NodeId(0) -> nodeSeedCreate, - NodeId(1) -> nodeSeedCreate2, - NodeId(2) -> nodeSeedFetch, - NodeId(3) -> nodeSeedFetch2, - NodeId(4) -> nodeSeedExercise, - NodeId(5) -> nodeSeedRollback, - ) - - private val subNodesMap = Map( - NodeId(0) -> createNode, - NodeId(1) -> createNode2, - NodeId(2) -> fetchNode, - NodeId(3) -> fetchNode2, - NodeId(4) -> exerciseNode, - NodeId(5) -> rollbackNode, - ) - - private def hashNodeV1( - node: Node, - nodeSeed: Option[LfHash], - subNodes: Map[NodeId, Node] = subNodesMap, - hashTracer: HashTracer, - ) = TransactionHash.tryHashNodeV1( - node, - defaultNodeSeedsMap, - nodeSeed, - subNodes, - hashTracer, - ) - - private def shiftNodeIds(array: ImmArray[NodeId]): ImmArray[NodeId] = array.map { - case NodeId(i) => NodeId(i + 1) - } - - private def shiftNodeIdsSeeds(map: Map[NodeId, LfHash]): Map[NodeId, LfHash] = map.map { - case (NodeId(i), value) => NodeId(i + 1) -> value - } - - private def shiftNodeIds(map: Map[NodeId, Node]): Map[NodeId, Node] = map.map { - case (NodeId(i), exercise: Node.Exercise) => - NodeId(i + 1) -> exercise.copy(children = shiftNodeIds(exercise.children)) - case (NodeId(i), rollback: Node.Rollback) => - NodeId(i + 1) -> rollback.copy(children = shiftNodeIds(rollback.children)) - case (NodeId(i), node) => NodeId(i + 1) -> node - } - - "V1Encoding" should { - "not encode lookup nodes" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy { - TransactionHash.tryHashNodeV1(lookupNode) - } - } - - "not encode create nodes without node seed" in { - a[NodeHashingError.MissingNodeSeed] shouldBe thrownBy { - TransactionHash.tryHashNodeV1(createNode, enforceNodeSeedForCreateNodes = true) - } - } - - "not encode exercise nodes without node seed" in { - a[NodeHashingError.MissingNodeSeed] shouldBe thrownBy { - TransactionHash.tryHashNodeV1(exerciseNode, enforceNodeSeedForCreateNodes = true) - } - } - - "encode create nodes without node seed if explicitly allowed" in { - scala.util - .Try(TransactionHash.tryHashNodeV1(createNode, enforceNodeSeedForCreateNodes = false)) - .isSuccess shouldBe true - } - } - - "CreateNodeBuilder V1" should { - val defaultHash = Hash - .fromHexStringRaw(createNodeHash) - .getOrElse(fail("Invalid hash")) - - def hashCreateNode(node: Node.Create, hashTracer: HashTracer = HashTracer.NoOp) = - hashNodeV1(node, Some(nodeSeedCreate), hashTracer = hashTracer) - - "be stable" in { - hashCreateNode(createNode).toHexString shouldBe defaultHash.toHexString - } - - "fails global keys" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy( - hashCreateNode( - createNode.copy(keyOpt = Some(globalKey2)) - ) - ) - } - - "not produce collision in contractId" in { - hashCreateNode( - createNode.copy( - coid = ContractId.V1.assertFromString( - "0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b" - ) - ) - ) should !==(defaultHash) - } - - "not produce collision in package name" in { - hashCreateNode( - createNode.copy( - packageName = PackageName.assertFromString("another_package_name") - ) - ) should !==(defaultHash) - } - - "not produce collision in template ID" in { - hashCreateNode( - createNode.copy( - templateId = defRef("othermodule", "othername") - ) - ) should !==(defaultHash) - } - - "not produce collision in arg" in { - hashCreateNode( - createNode.copy( - arg = VA.bool.inj(true) - ) - ) should !==(defaultHash) - } - - "not produce collision in signatories" in { - hashCreateNode( - createNode.copy( - signatories = Set[Party](Ref.Party.assertFromString("alice")) - ) - ) should !==(defaultHash) - } - - "not produce collision in stakeholders" in { - hashCreateNode( - createNode.copy( - stakeholders = Set[Party](Ref.Party.assertFromString("alice")) - ) - ) should !==(defaultHash) - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = hashCreateNode(createNode, hashTracer = hashTracer) - hash shouldBe defaultHash - hashTracer.result shouldBe - s"""$createNodeEncoding - |""".stripMargin - assertStringTracer(hashTracer, hash) - } - } - - "FetchNodeBuilder V1" should { - val defaultHash = Hash - .fromHexStringRaw(fetchNodeHash) - .getOrElse(fail("Invalid hash")) - - def hashFetchNode(node: Node.Fetch, hashTracer: HashTracer = HashTracer.NoOp) = - hashNodeV1(node, nodeSeed = Some(nodeSeedFetch), hashTracer = hashTracer) - - "be stable" in { - hashFetchNode(fetchNode).toHexString shouldBe defaultHash.toHexString - } - - "fail if node includes global keys" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy( - hashFetchNode(fetchNode.copy(keyOpt = Some(globalKey2))) - ) - } - - "fail if node includes byKey" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy( - hashFetchNode(fetchNode.copy(byKey = true)) - ) - } - - "not produce collision in contractId" in { - hashFetchNode( - fetchNode.copy( - coid = ContractId.V1.assertFromString( - "0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b" - ) - ) - ) should !==(defaultHash) - } - - "not produce collision in package name" in { - hashFetchNode( - fetchNode.copy( - packageName = PackageName.assertFromString("another_package_name") - ) - ) should !==(defaultHash) - } - - "not produce collision in template ID" in { - hashFetchNode( - fetchNode.copy( - templateId = defRef("othermodule", "othername") - ) - ) should !==(defaultHash) - } - - "not produce collision in actingParties" in { - hashFetchNode( - fetchNode.copy( - actingParties = Set[Party](Ref.Party.assertFromString("charlie")) - ) - ) should !==(defaultHash) - } - - "not produce collision in signatories" in { - hashFetchNode( - fetchNode.copy( - signatories = Set[Party](Ref.Party.assertFromString("bob")) - ) - ) should !==(defaultHash) - } - - "not produce collision in stakeholders" in { - hashFetchNode( - fetchNode.copy( - stakeholders = Set[Party](Ref.Party.assertFromString("alice")) - ) - ) should !==(defaultHash) - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = hashFetchNode(fetchNode, hashTracer = hashTracer) - hash shouldBe defaultHash - hashTracer.result shouldBe s"""$fetchNodeEncoding - |""".stripMargin - - assertStringTracer(hashTracer, hash) - } - } - - "ExerciseNodeBuilder V1" should { - val defaultHash = Hash - .fromHexStringRaw(exerciseNodeHash) - .getOrElse(fail("Invalid hash")) - - def hashExerciseNode( - node: Node.Exercise, - subNodes: Map[NodeId, Node] = subNodesMap, - hashTracer: HashTracer = HashTracer.NoOp, - ) = - hashNodeV1( - node, - nodeSeed = Some(nodeSeedExercise), - subNodes = subNodes, - hashTracer = hashTracer, - ) - - "be stable" in { - hashExerciseNode(exerciseNode).toHexString shouldBe defaultHash.toHexString - } - - "not include global keys" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy( - hashExerciseNode( - exerciseNode.copy(keyOpt = Some(globalKey2)) - ) - ) - } - - "not include choiceAuthorizers" in { - a[NodeHashingError.UnsupportedFeature] shouldBe thrownBy( - hashExerciseNode( - exerciseNode.copy(choiceAuthorizers = - Some(Set[Party](Ref.Party.assertFromString("alice"))) - ) - ) - ) - } - - "not include byKey" in { - hashExerciseNode( - exerciseNode.copy( - byKey = false - ) - ) shouldBe defaultHash - } - - "throw if some nodes are missing" in { - an[IncompleteTransactionTree] shouldBe thrownBy { - hashExerciseNode(exerciseNode, subNodes = Map.empty) - } - } - - "not hash NodeIds" in { - TransactionHash.tryHashNodeV1( - exerciseNode - // Shift all node ids by one and expect it to have no impact - .copy(children = shiftNodeIds(exerciseNode.children)), - subNodes = shiftNodeIds(subNodesMap), - nodeSeed = Some(nodeSeedExercise), - nodeSeeds = shiftNodeIdsSeeds(defaultNodeSeedsMap), - ) shouldBe defaultHash - } - - "not produce collision in contractId" in { - hashExerciseNode( - exerciseNode.copy( - targetCoid = ContractId.V1.assertFromString( - "0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b" - ) - ) - ) should !==(defaultHash) - } - - "not produce collision in package name" in { - hashExerciseNode( - exerciseNode.copy( - packageName = PackageName.assertFromString("another_package_name") - ) - ) should !==(defaultHash) - } - - "not produce collision in template ID" in { - hashExerciseNode( - exerciseNode.copy( - templateId = defRef("othermodule", "othername") - ) - ) should !==(defaultHash) - } - - "not produce collision in actingParties" in { - hashExerciseNode( - exerciseNode.copy( - actingParties = Set[Party](Ref.Party.assertFromString("charlie")) - ) - ) should !==(defaultHash) - } - - "not produce collision in signatories" in { - hashExerciseNode( - exerciseNode.copy( - signatories = Set[Party](Ref.Party.assertFromString("bob")) - ) - ) should !==(defaultHash) - } - - "not produce collision in stakeholders" in { - hashExerciseNode( - exerciseNode.copy( - stakeholders = Set[Party](Ref.Party.assertFromString("alice")) - ) - ) should !==(defaultHash) - } - - "not produce collision in choiceObservers" in { - hashExerciseNode( - exerciseNode.copy( - choiceObservers = Set[Party](Ref.Party.assertFromString("alice")) - ) - ) should !==(defaultHash) - } - - "not produce collision in children" in { - hashExerciseNode( - exerciseNode.copy( - children = exerciseNode.children.reverse - ) - ) should !==(defaultHash) - } - - "not produce collision in interface Id" in { - hashExerciseNode( - exerciseNode.copy( - interfaceId = None - ) - ) should !==(defaultHash) - } - - "not produce collision in exercise result" in { - hashExerciseNode( - exerciseNode.copy( - exerciseResult = None - ) - ) should !==(defaultHash) - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = hashExerciseNode(exerciseNode, hashTracer = hashTracer) - hash shouldBe defaultHash - hashTracer.result shouldBe s"""'01' # 01 (Node Encoding Version) - |# Exercise Node - |# Node Version - |'00000003' # 3 (int) - |'322e31' # 2.1 (string) - |'01' # Exercise Node Tag - |# Node Seed - |'a867edafa1277f46f879ab92c373a15c2d75c5d86fec741705cee1eb01ef8c9e' # seed - |# Contract Id - |'00000021' # 33 (int) - |'0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5' # 0007e7b5534931dfca8e1b485c105bae4e10808bd13ddc8e897f258015f9d921c5 (contractId) - |# Package Name - |'0000000e' # 14 (int) - |'7061636b6167652d6e616d652d30' # package-name-0 (string) - |# Template Id - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |# Signatories - |'00000001' # 1 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |# Stakeholders - |'00000001' # 1 (int) - |'00000007' # 7 (int) - |'636861726c6965' # charlie (string) - |# Acting Parties - |'00000002' # 2 (int) - |'00000005' # 5 (int) - |'616c696365' # alice (string) - |'00000003' # 3 (int) - |'626f62' # bob (string) - |# Interface Id - |'01' # Some - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000010' # 16 (int) - |'696e746572666163655f6d6f64756c65' # interface_module (string) - |'00000001' # 1 (int) - |'0000000e' # 14 (int) - |'696e746572666163655f6e616d65' # interface_name (string) - |# Choice Id - |'00000006' # 6 (int) - |'63686f696365' # choice (string) - |# Chosen Value - |'02' # Int64 Type Tag - |'0000000000007a94' # 31380 (long) - |# Consuming - |'01' # true (bool) - |# Exercise Result - |'01' # Some - |'07' # Text Type Tag - |'00000006' # 6 (int) - |'726573756c74' # result (string) - |# Choice Observers - |'00000001' # 1 (int) - |'00000005' # 5 (int) - |'6461766964' # david (string) - |# Children - |'00000002' # 2 (int) - |'$createNodeHash' # (Hashed Inner Node) - |'$fetchNodeHash' # (Hashed Inner Node) - |""".stripMargin - - assertStringTracer(hashTracer, hash) - } - } - - "RollbackNode Builder V1" should { - val defaultHash = Hash - .fromHexStringRaw(rollbackNodeHash) - .getOrElse(fail("Invalid hash")) - - def hashRollbackNode( - node: Node.Rollback, - subNodes: Map[NodeId, Node] = subNodesMap, - hashTracer: HashTracer = HashTracer.NoOp, - ) = - TransactionHash.tryHashNodeV1( - node, - nodeSeed = Some(nodeSeedRollback), - nodeSeeds = defaultNodeSeedsMap, - subNodes = subNodes, - hashTracer = hashTracer, - ) - - "be stable" in { - hashRollbackNode(rollbackNode).toHexString shouldBe defaultHash.toHexString - } - - "throw if some nodes are missing" in { - an[IncompleteTransactionTree] shouldBe thrownBy { - hashRollbackNode(rollbackNode, subNodes = Map.empty) - } - } - - "not hash NodeIds" in { - TransactionHash.tryHashNodeV1( - rollbackNode - // Change the node Ids values but not the nodes - .copy(children = shiftNodeIds(rollbackNode.children)), - subNodes = shiftNodeIds(subNodesMap), - nodeSeed = Some(nodeSeedRollback), - nodeSeeds = shiftNodeIdsSeeds(defaultNodeSeedsMap), - ) shouldBe defaultHash - } - - "not produce collision in children" in { - hashRollbackNode( - rollbackNode.copy( - children = rollbackNode.children.reverse - ) - ) should !==(defaultHash) - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = hashRollbackNode(rollbackNode, hashTracer = hashTracer) - hash shouldBe defaultHash - hashTracer.result shouldBe s"""'01' # 01 (Node Encoding Version) - |# Rollback Node - |'03' # Rollback Node Tag - |# Children - |'00000002' # 2 (int) - |'$fetchNodeHash' # (Hashed Inner Node) - |'$exerciseNodeHash' # (Hashed Inner Node) - |""".stripMargin - - assertStringTracer(hashTracer, hash) - } - } - - "TransactionBuilder" should { - val roots = ImmArray(NodeId(0), NodeId(5)) - val transaction = VersionedTransaction( - version = LanguageVersion.v2_1, - roots = roots, - nodes = subNodesMap, - ) - - val defaultHash = Hash - .fromHexStringRaw("154f334d24a8a5e4d0ce51ac87d93821b3256f885f21d3f779a1640abf481983") - .getOrElse(fail("Invalid hash")) - - "be stable" in { - TransactionHash - .tryHashTransactionV1(transaction, defaultNodeSeedsMap) - .toHexString shouldBe defaultHash.toHexString - } - - "throw if some nodes are missing" in { - an[IncompleteTransactionTree] shouldBe thrownBy { - TransactionHash.tryHashTransactionV1( - VersionedTransaction( - version = LanguageVersion.v2_1, - roots = roots, - nodes = Map.empty, - ), - defaultNodeSeedsMap, - ) - } - } - - "not hash NodeIds" in { - TransactionHash.tryHashTransactionV1( - VersionedTransaction( - version = LanguageVersion.v2_1, - roots = shiftNodeIds(roots), - nodes = shiftNodeIds(subNodesMap), - ), - shiftNodeIdsSeeds(defaultNodeSeedsMap), - ) shouldBe defaultHash - } - - "not produce collision in children" in { - TransactionHash.tryHashTransactionV1( - VersionedTransaction( - version = LanguageVersion.v2_1, - roots = roots.reverse, - nodes = subNodesMap, - ), - defaultNodeSeedsMap, - ) should !==(defaultHash) - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = TransactionHash.tryHashTransactionV1( - transaction, - defaultNodeSeedsMap, - hashTracer = hashTracer, - ) - hashTracer.result shouldBe s"""'00000030' # Hash Purpose - |# Transaction Version - |'00000003' # 3 (int) - |'322e31' # 2.1 (string) - |# Root Nodes - |'00000002' # 2 (int) - |'$createNodeHash' # (Hashed Inner Node) - |'$rollbackNodeHash' # (Hashed Inner Node) - |""".stripMargin - assertStringTracer(hashTracer, hash) - } - } - - "Full Transaction Hash" should { - val roots = ImmArray(NodeId(0), NodeId(5)) - val transaction = VersionedTransaction( - version = LanguageVersion.v2_1, - roots = roots, - nodes = subNodesMap, - ) - - val defaultHash = Hash - .fromHexStringRaw("adcef14ae479ab5b9424a89d5bf28ee666f336a5463f65b097d587120123d019") - .getOrElse(fail("Invalid hash")) - - "be stable" in { - TransactionHash - .tryHashTransactionWithMetadataV1(transaction, defaultNodeSeedsMap, metadata) - .toHexString shouldBe defaultHash.toHexString - } - - "explain encoding" in { - val hashTracer = HashTracer.StringHashTracer() - val hash = TransactionHash.tryHashTransactionWithMetadataV1( - transaction, - defaultNodeSeedsMap, - metadata, - hashTracer = hashTracer, - ) - hashTracer.result shouldBe s"""'00000030' # Hash Purpose - |'02' # 02 (Hashing Scheme Version) - |'154f334d24a8a5e4d0ce51ac87d93821b3256f885f21d3f779a1640abf481983' # Transaction - |'2a0690693367f70fbe83e5e99df6930dbd2336618a3a0721bb6fa3bcc88d5a53' # Metadata - |""".stripMargin - assertStringTracer(hashTracer, hash) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/ValueHashTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/ValueHashTest.scala deleted file mode 100644 index baf958a0d0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/hash/ValueHashTest.scala +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.hash - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.Hash -import com.digitalasset.daml.lf.data -import com.digitalasset.daml.lf.data.{FrontStack, ImmArray, Ref, SortedLookupList, Time} -import com.digitalasset.daml.lf.value.Value -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpecLike - -import java.time.Instant - -class ValueHashTest extends BaseTest with AnyWordSpecLike with HashUtilsTest { - "ValueBuilder" should { - def withValueBuilder(f: (LfValueBuilder, HashTracer.StringHashTracer) => Assertion) = { - val hashTracer = HashTracer.StringHashTracer() - val builder = LfValueHashBuilder.valueBuilderForV1Node(hashTracer) - f(builder, hashTracer) - } - - def assertEncode(value: Value, expectedHash: String, expectedDebugEncoding: String) = - withValueBuilder { case (builder, hashTracer) => - val hash = builder.addTypedValue(value).finish() - hash.toHexString shouldBe Hash.fromHexStringRaw(expectedHash).value.toHexString - hashTracer.result shouldBe expectedDebugEncoding - assertStringTracer(hashTracer, hash) - } - - "encode unit value" in { - assertEncode( - Value.ValueUnit, - "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", - """'00' # Unit Type Tag - |""".stripMargin, - ) - } - - "encode true value" in { - assertEncode( - Value.ValueBool(true), - "9dcf97a184f32623d11a73124ceb99a5709b083721e878a16d78f596718ba7b2", - """'01' # Bool Type Tag - |'01' # true (bool) - |""".stripMargin, - ) - } - - "encode false value" in { - assertEncode( - Value.ValueBool(false), - "47dc540c94ceb704a23875c11273e16bb0b8a87aed84de911f2133568115f254", - """'01' # Bool Type Tag - |'00' # false (bool) - |""".stripMargin, - ) - } - - "encode text value" in { - assertEncode( - Value.ValueText("hello world!"), - "a24821b4741b3616920a37dfccf3b1e271184c82fc89377ab22e5d96d9330e5c", - """'07' # Text Type Tag - |'0000000c' # 12 (int) - |'68656c6c6f20776f726c6421' # hello world! (string) - |""".stripMargin, - ) - } - - "encode numeric value" in { - // Numerics are encoded from their string representation - assertEncode( - Value.ValueNumeric(data.Numeric.assertFromString("125.1002")), - "fd23ea3b05b8b0e1d15902cccf7c6c0e6e292f0ae3e96513c8d8cec9d4f9bda9", - """'03' # Numeric Type Tag - |'00000008' # 8 (int) - |'3132352e31303032' # 125.1002 (numeric) - |""".stripMargin, - ) - } - - "encode contract id value" in { - assertEncode( - Value.ValueContractId( - Value.ContractId.V1 - .assertFromString("0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b") - ), - "6a3e241cebc1dc5e7574be0cf122edc9cb65ffc3fa59fb069c5ab3bbb4598414", - """'08' # ContractId Type Tag - |'00000021' # 33 (int) - |'0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b' # 0059b59ad7a6b6066e77b91ced54b8282f0e24e7089944685cb8f22f32fcbc4e1b (contractId) - |""".stripMargin, - ) - } - - "encode enum value" in { - assertEncode( - Value.ValueEnum(Some(defRef("module", "name")), Ref.Name.assertFromString("ENUM")), - "9c8f627c22d6871e111e1f5d1980a6a24bcc6311bbc73fd3932a4d79fa9c09f2", - """'0e' # Enum Type Tag - |'01' # Some - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |'00000004' # 4 (int) - |'454e554d' # ENUM (string) - |""".stripMargin, - ) - } - - "encode int64 value" in { - assertEncode( - Value.ValueInt64(10L), - "a74730eb88baf5118934d3675ccf50eac1e4e873000ecd75b5917c78ee30ef26", - """'02' # Int64 Type Tag - |'000000000000000a' # 10 (long) - |""".stripMargin, - ) - } - - "encode variant value" in { - assertEncode( - Value.ValueVariant( - Some(defRef("module", "name")), - Ref.Name.assertFromString("ENUM"), - Value.ValueTrue, - ), - "bceee77c48d80db35f237c3877c6711db19a8c135d2d0323c21f01afc2eb7612", - """'0d' # Variant Type Tag - |'01' # Some - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |'00000004' # 4 (int) - |'454e554d' # ENUM (string) - |'01' # Bool Type Tag - |'01' # true (bool) - |""".stripMargin, - ) - } - - "encode list value" in { - assertEncode( - Value.ValueList( - FrontStack.from( - List( - Value.ValueText("five"), - Value.ValueInt64(5L), - Value.ValueTrue, - ) - ) - ), - "6049408351d7be2c9d6ae7a1beecf08676fa72955c376c89571a1c5f04f272bd", - """'0a' # List Type Tag - |'00000003' # 3 (int) - |'07' # Text Type Tag - |'00000004' # 4 (int) - |'66697665' # five (string) - |'02' # Int64 Type Tag - |'0000000000000005' # 5 (long) - |'01' # Bool Type Tag - |'01' # true (bool) - |""".stripMargin, - ) - } - - "encode text map value" in { - assertEncode( - Value.ValueTextMap( - SortedLookupList( - Map( - "foo" -> Value.ValueNumeric(data.Numeric.assertFromString("31380.0")), - "bar" -> Value.ValueText("1284"), - ) - ) - ), - "b318100aac8cde766598a0a5bc05451feaec7c90400f75de93100d0c87ec24cf", - """'0b' # TextMap Type Tag - |'00000002' # 2 (int) - |'00000003' # 3 (int) - |'626172' # bar (string) - |'07' # Text Type Tag - |'00000004' # 4 (int) - |'31323834' # 1284 (string) - |'00000003' # 3 (int) - |'666f6f' # foo (string) - |'03' # Numeric Type Tag - |'00000007' # 7 (int) - |'33313338302e30' # 31380.0 (numeric) - |""".stripMargin, - ) - } - - "encode gen map value" in { - assertEncode( - Value.ValueGenMap( - ImmArray( - (Value.ValueInt64(5L), Value.ValueText("five")), - (Value.ValueInt64(10L), Value.ValueText("ten")), - ) - ), - "523c5247ed42efa40dd6b0165a01c1b14dcede7b39a1f33a3590fc7defc4a610", - """'0f' # GenMap Type Tag - |'00000002' # 2 (int) - |'02' # Int64 Type Tag - |'0000000000000005' # 5 (long) - |'07' # Text Type Tag - |'00000004' # 4 (int) - |'66697665' # five (string) - |'02' # Int64 Type Tag - |'000000000000000a' # 10 (long) - |'07' # Text Type Tag - |'00000003' # 3 (int) - |'74656e' # ten (string) - |""".stripMargin, - ) - } - - "encode optional empty value" in { - assertEncode( - Value.ValueOptional(None), - "a2c4aed1cf757cd9a509734a267ffc7b1166b55f4c8f9c3e3550c56e743328fc", - """'09' # Optional Type Tag - |'00' # None - |""".stripMargin, - ) - } - - "encode optional defined value" in { - assertEncode( - Value.ValueOptional(Some(Value.ValueText("hello"))), - "dfba7295cf094b6b3ffdf3e44411793264882b38c810f9dff63148890a466171", - """'09' # Optional Type Tag - |'01' # Some - |'07' # Text Type Tag - |'00000005' # 5 (int) - |'68656c6c6f' # hello (string) - |""".stripMargin, - ) - } - - "encode timestamp value" in { - assertEncode( - // Thursday, 24 October 2024 16:43:46 - Value.ValueTimestamp( - Time.Timestamp.assertFromInstant(Instant.ofEpochMilli(1729788226000L)) - ), - "18efa9ad9adfa83fc3e84cdbe9c4b9f0e7305376e858e5bd909117209111c9d5", - """'04' # Timestamp Type Tag - |'0006253bb4bf5480' # 1729788226000000 (long) - |""".stripMargin, - ) - } - - "encode date value" in { - assertEncode( - // Thursday, 24 October 2024 - Value.ValueDate(Time.Date.assertFromDaysSinceEpoch(20020)), - "6892b74f329df4e0fd850c0e4d35384d3a4aef0a2f0f32070172c1f121ff632f", - """'05' # Date Type Tag - |'00004e34' # 20020 (int) - |""".stripMargin, - ) - } - - "encode record value" in { - assertEncode( - Value.ValueRecord( - Some(defRef("module", "name")), - ImmArray( - ( - Some(Ref.Name.assertFromString("field1")), - Value.ValueTrue, - ), - ( - Some(Ref.Name.assertFromString("field2")), - Value.ValueText("hello"), - ), - ), - ), - "ae594f7684700c299af9bc0b83758f2d71836ae6d9ea628d310f0305698df6f2", - """'0c' # Record Type Tag - |'01' # Some - |'00000007' # 7 (int) - |'7061636b616765' # package (string) - |'00000001' # 1 (int) - |'00000006' # 6 (int) - |'6d6f64756c65' # module (string) - |'00000001' # 1 (int) - |'00000004' # 4 (int) - |'6e616d65' # name (string) - |'00000002' # 2 (int) - |'01' # Some - |'00000006' # 6 (int) - |'6669656c6431' # field1 (string) - |'01' # Bool Type Tag - |'01' # true (bool) - |'01' # Some - |'00000006' # 6 (int) - |'6669656c6432' # field2 (string) - |'07' # Text Type Tag - |'00000005' # 5 (int) - |'68656c6c6f' # hello (string) - |""".stripMargin, - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentTest.scala deleted file mode 100644 index 176a2b8097..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentTest.scala +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.LtHash16 -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.serialization.HasCryptographicEvidenceTest -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId, UniqueIdentifier} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class AcsCommitmentTest extends AnyWordSpec with BaseTest with HasCryptographicEvidenceTest { - private val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("synchronizer::da") - ) - private val sender = ParticipantId(UniqueIdentifier.tryFromProtoPrimitive("participant::da")) - private val counterParticipant = ParticipantId( - UniqueIdentifier.tryFromProtoPrimitive("participant2::da") - ) - private val interval = PositiveSeconds.tryOfSeconds(1) - private val period1 = CommitmentPeriod - .create( - CantonTimestamp.Epoch, - CantonTimestamp.Epoch.plusSeconds(2), - interval, - ) - .value - private val period2 = CommitmentPeriod - .create( - CantonTimestamp.Epoch.plusSeconds(2), - CantonTimestamp.Epoch.plusSeconds(4), - interval, - ) - .value - - private val h = LtHash16() - h.add("abc".getBytes()) - private val cmt = h.getByteString() - - private val commitment1 = AcsCommitment - .create( - synchronizerId.toPhysical, - sender, - counterParticipant, - period1, - cmt, - testedProtocolVersion, - ) - - private val commitment2 = AcsCommitment - .create( - synchronizerId.toPhysical, - sender, - counterParticipant, - period2, - cmt, - testedProtocolVersion, - ) - - private def fromByteString(bytes: ByteString): AcsCommitment = - AcsCommitment.fromByteString(testedProtocolVersion, bytes) match { - case Left(x) => fail(x.toString) - case Right(x) => x - } - - "AcsCommitment" should { - behave like hasCryptographicEvidenceSerialization(commitment1, commitment2) - behave like hasCryptographicEvidenceDeserialization( - commitment1, - commitment1.getCryptographicEvidence, - "commitment1", - )(fromByteString) - behave like hasCryptographicEvidenceDeserialization( - commitment2, - commitment2.getCryptographicEvidence, - "commitment2", - )(fromByteString) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/ConfirmationResponseTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/ConfirmationResponseTest.scala deleted file mode 100644 index 96062db5ac..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/ConfirmationResponseTest.scala +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.crypto.TestHash -import com.digitalasset.canton.data.{CantonTimestamp, ViewPosition} -import com.digitalasset.canton.protocol.{LocalRejectError, RequestId, RootHash} -import com.digitalasset.canton.serialization.HasCryptographicEvidenceTest -import com.digitalasset.canton.topology.{SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.{BaseTest, LfPartyId, topology} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class ConfirmationResponseTest extends AnyWordSpec with BaseTest with HasCryptographicEvidenceTest { - - private lazy val response1: ConfirmationResponses = - ConfirmationResponses.tryCreate( - RequestId(CantonTimestamp.now()), - RootHash(TestHash.digest("txid1")), - SynchronizerId(UniqueIdentifier.tryFromProtoPrimitive("da::default")).toPhysical, - topology.ParticipantId(UniqueIdentifier.tryFromProtoPrimitive("da::p1")), - NonEmpty.mk( - Seq, - ConfirmationResponse.tryCreate( - Some(ViewPosition.root), - LocalApprove(testedProtocolVersion), - Set(LfPartyId.assertFromString("p1"), LfPartyId.assertFromString("p2")), - ), - ), - testedProtocolVersion, - ) - private lazy val response2: ConfirmationResponses = - ConfirmationResponses.tryCreate( - RequestId(CantonTimestamp.now()), - RootHash(TestHash.digest("txid3")), - SynchronizerId(UniqueIdentifier.tryFromProtoPrimitive("da::default")).toPhysical, - topology.ParticipantId(UniqueIdentifier.tryFromProtoPrimitive("da::p1")), - NonEmpty.mk( - Seq, - ConfirmationResponse.tryCreate( - None, - LocalRejectError.MalformedRejects.Payloads - .Reject("test message") - .toLocalReject(testedProtocolVersion), - Set.empty, - ), - ), - testedProtocolVersion, - ) - - def fromByteString(bytes: ByteString): ConfirmationResponses = - ConfirmationResponses - .fromByteString(testedProtocolVersion, bytes) - .valueOr(err => fail(err.toString)) - - "ConfirmationResponses" should { - behave like hasCryptographicEvidenceSerialization(response1, response2) - behave like hasCryptographicEvidenceDeserialization( - response1, - response1.getCryptographicEvidence, - "response1", - )(fromByteString) - behave like hasCryptographicEvidenceDeserialization( - response2, - response2.getCryptographicEvidence, - "response2", - )(fromByteString) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsLocalVerdict.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsLocalVerdict.scala deleted file mode 100644 index a5b784bdf5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsLocalVerdict.scala +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.protocol.LocalRejectError.AssignmentRejects.AlreadyCompleted -import com.digitalasset.canton.protocol.LocalRejectError.ConsistencyRejections.{ - InactiveContracts, - LockedContracts, -} -import com.digitalasset.canton.protocol.LocalRejectError.MalformedRejects.{ - BadRootHashMessages, - CreatesExistingContracts, - MalformedRequest, - ModelConformance, - Payloads, -} -import com.digitalasset.canton.protocol.LocalRejectError.ReassignmentRejects -import com.digitalasset.canton.protocol.LocalRejectError.TimeRejects.{ - LedgerTime, - LocalTimeout, - PreparationTime, -} -import com.digitalasset.canton.protocol.LocalRejectError.UnassignmentRejects.ActivenessCheckFailed -import com.digitalasset.canton.protocol.{LocalRejectErrorImpl, Malformed} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{GeneratorsLf, LfPartyId} -import org.scalacheck.{Arbitrary, Gen} - -final case class GeneratorsLocalVerdict( - protocolVersion: ProtocolVersion, - generatorsLf: GeneratorsLf, -) { - - import generatorsLf.* - - // TODO(#14515) Check that the generator is exhaustive - private def localVerdictRejectGen: Gen[LocalReject] = { - val resources = List("resource1", "resource2") - val details = "details" - - val builders = Seq[LocalRejectErrorImpl]( - LockedContracts.Reject(resources), - InactiveContracts.Reject(resources), - LedgerTime.Reject(details), - PreparationTime.Reject(details), - LocalTimeout.Reject(), - ActivenessCheckFailed.Reject(details), - ReassignmentRejects.ValidationFailed.Reject(details), - AlreadyCompleted.Reject(details), - ) - - Gen - .oneOf(builders) - .map(_.toLocalReject(protocolVersion)) - } - - // TODO(#14515) Check that the generator is exhaustive - private def localVerdictMalformedGen: Gen[LocalReject] = { - val resources = List("resource1", "resource2") - val details = "details" - - val builders = Seq[Malformed]( - MalformedRequest.Reject(details), - Payloads.Reject(details), - ModelConformance.Reject(details), - BadRootHashMessages.Reject(details), - CreatesExistingContracts.Reject(resources), - ) - - Gen - .oneOf(builders) - .map(_.toLocalReject(protocolVersion)) - } - - // TODO(#14515) Check that the generator is exhaustive - private def localRejectGen: Gen[LocalReject] = - Gen.oneOf(localVerdictRejectGen, localVerdictMalformedGen) - - private def localApproveGen: Gen[LocalApprove] = - Gen.const(LocalApprove(protocolVersion)) - - // If this pattern match is not exhaustive anymore, update the generator below - { - ((_: LocalVerdict) match { - case _: LocalApprove => () - case _: LocalReject => () - }).discard - } - - implicit val localVerdictArb: Arbitrary[LocalVerdict] = Arbitrary( - Gen.oneOf(localApproveGen, localRejectGen) - ) - - implicit val participantRejectReasonArb: Arbitrary[(Set[LfPartyId], LocalReject)] = - Arbitrary( - for { - parties <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId]) - reject <- localRejectGen - } yield (parties, reject) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsMessages.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsMessages.scala deleted file mode 100644 index ea74b07f0c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsMessages.scala +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import com.daml.nonempty.NonEmptyUtil -import com.digitalasset.canton.crypto.{ - AsymmetricEncrypted, - Encrypted, - SecureRandomness, - Signature, - SymmetricKeyScheme, -} -import com.digitalasset.canton.data.{ - AssignmentViewTree, - CantonTimestamp, - CantonTimestampSecond, - FullInformeeTree, - GeneratorsData, - UnassignmentViewTree, - ViewPosition, - ViewType, -} -import com.digitalasset.canton.protocol.{GeneratorsProtocol, RequestId, RootHash, ViewHash} -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.transaction.{ - GeneratorsTransaction, - SignedTopologyTransaction, - TopologyChangeOp, - TopologyMapping, -} -import com.digitalasset.canton.topology.{GeneratorsTopology, ParticipantId, PhysicalSynchronizerId} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{Generators, GeneratorsLf, LfPartyId} -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} - -final class GeneratorsMessages( - protocolVersion: ProtocolVersion, - generatorsData: GeneratorsData, - generatorsLf: GeneratorsLf, - generatorsProtocol: GeneratorsProtocol, - generatorsLocalVerdict: GeneratorsLocalVerdict, - generatorsVerdict: GeneratorsVerdict, - generatorsTopology: GeneratorsTopology, - generatorTransactions: GeneratorsTransaction, -) { - import com.digitalasset.canton.Generators.* - import generatorsLf.* - import com.digitalasset.canton.crypto.GeneratorsCrypto.* - import com.digitalasset.canton.data.GeneratorsDataTime.* - import generatorsTopology.* - import generatorsData.* - import generatorsLocalVerdict.* - import generatorsProtocol.* - import generatorsVerdict.* - import generatorTransactions.* - - implicit val acsCommitmentArb: Arbitrary[AcsCommitment] = Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[PhysicalSynchronizerId] - sender <- Arbitrary.arbitrary[ParticipantId] - counterParticipant <- Arbitrary.arbitrary[ParticipantId] - - periodFrom <- Arbitrary.arbitrary[CantonTimestampSecond] - periodDuration <- Gen.choose(1, 86400L).map(PositiveSeconds.tryOfSeconds) - period = CommitmentPeriod(periodFrom, periodDuration) - - commitment <- byteStringArb.arbitrary - } yield AcsCommitment.create( - synchronizerId, - sender, - counterParticipant, - period, - commitment, - protocolVersion, - ) - ) - - implicit val confirmationResultMessageArb: Arbitrary[ConfirmationResultMessage] = Arbitrary( - for { - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - viewType <- Arbitrary.arbitrary[ViewType] - requestId <- Arbitrary.arbitrary[RequestId] - rootHash <- Arbitrary.arbitrary[RootHash] - verdict <- verdictArb.arbitrary - - // TODO(#14515) Also generate instance that makes pv above cover all the values - } yield ConfirmationResultMessage.create( - psid, - viewType, - requestId, - rootHash, - verdict, - ) - ) - - implicit val confirmationResponseArb: Arbitrary[ConfirmationResponse] = Arbitrary( - for { - localVerdict <- localVerdictArb.arbitrary - confirmingParties <- - if (localVerdict.isMalformed) Gen.const(Set.empty[LfPartyId]) - else nonEmptySet(implicitly[Arbitrary[LfPartyId]]).arbitrary.map(_.forgetNE) - viewPositionO <- localVerdict match { - case _: LocalApprove | _: LocalReject => - Gen.some(Arbitrary.arbitrary[ViewPosition]) - case _ => Gen.option(Arbitrary.arbitrary[ViewPosition]) - } - } yield ConfirmationResponse.tryCreate( - viewPositionO, - localVerdict, - confirmingParties, - ) - ) - - implicit val confirmationResponsesArb: Arbitrary[ConfirmationResponses] = Arbitrary( - for { - requestId <- Arbitrary.arbitrary[RequestId] - rootHash <- Arbitrary.arbitrary[RootHash] - synchronizerId <- Arbitrary.arbitrary[PhysicalSynchronizerId] - sender <- Arbitrary.arbitrary[ParticipantId] - responses <- Gen.nonEmptyListOf(confirmationResponseArb.arbitrary) - responsesNE = NonEmptyUtil.fromUnsafe(responses) - confirmationResponses = ConfirmationResponses.tryCreate( - requestId, - rootHash, - synchronizerId, - sender, - responsesNE, - protocolVersion, - ) - } yield confirmationResponses - ) - - // TODO(#14515) Check that the generator is exhaustive - implicit val signedProtocolMessageContentArb: Arbitrary[SignedProtocolMessageContent] = Arbitrary( - Gen.oneOf[SignedProtocolMessageContent]( - Arbitrary.arbitrary[AcsCommitment], - Arbitrary.arbitrary[ConfirmationResponses], - Arbitrary.arbitrary[ConfirmationResultMessage], - ) - ) - - implicit val typedSignedProtocolMessageContent - : Arbitrary[TypedSignedProtocolMessageContent[SignedProtocolMessageContent]] = Arbitrary(for { - content <- Arbitrary.arbitrary[SignedProtocolMessageContent] - } yield TypedSignedProtocolMessageContent(content, protocolVersion)) - - implicit val signedProtocolMessageArb - : Arbitrary[SignedProtocolMessage[SignedProtocolMessageContent]] = Arbitrary( - for { - typedMessage <- Arbitrary - .arbitrary[TypedSignedProtocolMessageContent[SignedProtocolMessageContent]] - - signatures <- nonEmptyListGen(implicitly[Arbitrary[Signature]]) - } yield SignedProtocolMessage(typedMessage, signatures)( - SignedProtocolMessage.protocolVersionRepresentativeFor(protocolVersion) - ) - ) - - implicit val serializedRootHashMessagePayloadArb: Arbitrary[SerializedRootHashMessagePayload] = - Arbitrary( - for { - bytes <- byteStringArb.arbitrary - } yield SerializedRootHashMessagePayload(bytes) - ) - - implicit val rootHashMessagePayloadArb: Arbitrary[RootHashMessagePayload] = Arbitrary( - // Gen.oneOf( - Arbitrary.arbitrary[SerializedRootHashMessagePayload] - // TODO(#17020): Disabled EmptyRootHashMessagePayload for now - figure out how to properly compare objects - // e.g using: EmptyRootHashMessagePayload.emptyRootHashMessagePayloadCast - // , Gen.const[RootHashMessagePayload](EmptyRootHashMessagePayload) - // ) - ) - - val informeeMessageArb: Arbitrary[InformeeMessage] = Arbitrary( - for { - fullInformeeTree <- Arbitrary.arbitrary[FullInformeeTree] - submittingParticipantSignature <- Arbitrary.arbitrary[Signature] - } yield InformeeMessage(fullInformeeTree, submittingParticipantSignature)(protocolVersion) - ) - - implicit val asymmetricEncrypted: Arbitrary[AsymmetricEncrypted[SecureRandomness]] = Arbitrary( - for { - encrypted <- byteStringArb.arbitrary - encryptionAlgorithmSpec <- encryptionAlgorithmSpecArb.arbitrary - fingerprint <- generatorsTopology.fingerprintArb.arbitrary - } yield AsymmetricEncrypted(encrypted, encryptionAlgorithmSpec, fingerprint) - ) - - val encryptedViewMessage: Arbitrary[EncryptedViewMessage[ViewType]] = Arbitrary( - for { - signatureO <- Gen.option(Arbitrary.arbitrary[Signature]) - viewHash <- Arbitrary.arbitrary[ViewHash] - encryptedViewBytestring <- byteStringArb.arbitrary - sessionKey <- Generators.nonEmptyListGen[AsymmetricEncrypted[SecureRandomness]] - viewType <- viewTypeArb.arbitrary - encryptedView = EncryptedView(viewType)(Encrypted.fromByteString(encryptedViewBytestring)) - synchronizerId <- Arbitrary.arbitrary[PhysicalSynchronizerId] - viewEncryptionScheme <- genArbitrary[SymmetricKeyScheme].arbitrary - } yield EncryptedViewMessage.apply( - submittingParticipantSignature = signatureO, - viewHash = viewHash, - sessionKeys = sessionKey, - encryptedView = encryptedView, - synchronizerId = synchronizerId, - viewEncryptionScheme = viewEncryptionScheme, - protocolVersion = protocolVersion, - ) - ) - - private val assignmentMediatorMessageArb: Arbitrary[AssignmentMediatorMessage] = Arbitrary( - for { - tree <- Arbitrary.arbitrary[AssignmentViewTree] - submittingParticipantSignature <- Arbitrary.arbitrary[Signature] - } yield AssignmentMediatorMessage(tree, submittingParticipantSignature)( - AssignmentMediatorMessage.protocolVersionRepresentativeFor(protocolVersion) - ) - ) - - private val unassignmentMediatorMessageArb: Arbitrary[UnassignmentMediatorMessage] = Arbitrary( - for { - tree <- Arbitrary.arbitrary[UnassignmentViewTree] - submittingParticipantSignature <- Arbitrary.arbitrary[Signature] - rpv = UnassignmentMediatorMessage.protocolVersionRepresentativeFor(protocolVersion) - } yield UnassignmentMediatorMessage(tree, submittingParticipantSignature)(rpv) - ) - - implicit val rootHashMessageArb: Arbitrary[RootHashMessage[RootHashMessagePayload]] = - Arbitrary( - for { - rootHash <- Arbitrary.arbitrary[RootHash] - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - viewType <- viewTypeArb.arbitrary - submissionTopologyTime <- Arbitrary.arbitrary[CantonTimestamp] - payload <- Arbitrary.arbitrary[RootHashMessagePayload] - } yield RootHashMessage.apply( - rootHash, - psid, - viewType, - submissionTopologyTime, - payload, - ) - ) - - implicit val topologyTransactionsBroadcast: Arbitrary[TopologyTransactionsBroadcast] = Arbitrary( - for { - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - transactions <- Gen.listOf( - Arbitrary.arbitrary[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]] - ) - } yield TopologyTransactionsBroadcast(psid, transactions) - ) - - // TODO(#14515) Check that the generator is exhaustive - implicit val unsignedProtocolMessageArb: Arbitrary[UnsignedProtocolMessage] = - Arbitrary( - Gen.oneOf[UnsignedProtocolMessage]( - rootHashMessageArb.arbitrary, - informeeMessageArb.arbitrary, - encryptedViewMessage.arbitrary, - assignmentMediatorMessageArb.arbitrary, - unassignmentMediatorMessageArb.arbitrary, - topologyTransactionsBroadcast.arbitrary, - ) - ) - - // TODO(#14515) Check that the generator is exhaustive - implicit val protocolMessageArb: Arbitrary[ProtocolMessage] = - Arbitrary(unsignedProtocolMessageArb.arbitrary) - - // TODO(#14515) Check that the generator is exhaustive - implicit val envelopeContentArb: Arbitrary[EnvelopeContent] = Arbitrary(for { - protocolMessage <- protocolMessageArb.arbitrary - } yield EnvelopeContent.tryCreate(protocolMessage, protocolVersion)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsVerdict.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsVerdict.scala deleted file mode 100644 index 6ccde99057..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsVerdict.scala +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import com.digitalasset.canton.Generators.nonEmptyListGen -import com.digitalasset.canton.LfPartyId -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.version.ProtocolVersion -import org.scalacheck.{Arbitrary, Gen} - -final case class GeneratorsVerdict( - protocolVersion: ProtocolVersion, - generatorsLocalVerdict: GeneratorsLocalVerdict, -) { - import generatorsLocalVerdict.* - - // TODO(#14515) Check that the generator is exhaustive - implicit val mediatorRejectArb: Arbitrary[Verdict.MediatorReject] = - Arbitrary( - // TODO(#14515): do we want randomness here? - Gen.const { - val status = com.google.rpc.status.Status(com.google.rpc.Code.CANCELLED_VALUE) - Verdict.MediatorReject.tryCreate(status, isMalformed = false, protocolVersion) - } - ) - - private val verdictApproveArb: Arbitrary[Verdict.Approve] = Arbitrary( - Gen.const(Verdict.protocolVersionRepresentativeFor(protocolVersion)).map(Verdict.Approve()) - ) - - private implicit val participantRejectArb: Arbitrary[Verdict.ParticipantReject] = Arbitrary( - nonEmptyListGen[(Set[LfPartyId], LocalReject)]( - participantRejectReasonArb - ).map { reasons => - Verdict.ParticipantReject(reasons)(Verdict.protocolVersionRepresentativeFor(protocolVersion)) - } - ) - - // If this pattern match is not exhaustive anymore, update the generator below - { - ((_: Verdict) match { - case _: Verdict.Approve => () - case _: Verdict.MediatorReject => () - case _: Verdict.ParticipantReject => () - }).discard - } - implicit val verdictArb: Arbitrary[Verdict] = Arbitrary( - Gen.oneOf( - verdictApproveArb.arbitrary, - mediatorRejectArb.arbitrary, - participantRejectArb.arbitrary, - ) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala deleted file mode 100644 index 16a17bf050..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.protocol.messages - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.SigningKeyUsage -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.{TestSynchronizerParameters, v30} -import com.digitalasset.canton.serialization.HasCryptographicEvidenceTest -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, - CanSignSpecificMappings, -} -import com.digitalasset.canton.version.v1.UntypedVersionedMessage -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, LfPackageId} -import com.google.protobuf.ByteString -import org.scalatest.exceptions.TestFailedException -import org.scalatest.wordspec.AnyWordSpec - -class TopologyTransactionTest - extends AnyWordSpec - with BaseTest - with HasCryptographicEvidenceTest - with FailOnShutdown - with HasExecutionContext { - - private val uid = DefaultTestIdentities.uid - private val uid2 = UniqueIdentifier.tryFromProtoPrimitive("da1::default1") - private val sequencerId = DefaultTestIdentities.daSequencerId - private val synchronizerId = DefaultTestIdentities.physicalSynchronizerId - private val crypto = - TestingTopology(sequencerGroup = - SequencerGroup( - active = Seq(SequencerId(synchronizerId.uid)), - passive = Seq.empty, - threshold = PositiveInt.one, - ) - ).build(loggerFactory).forOwnerAndSynchronizer(sequencerId, synchronizerId) - // TODO(#25072): Create keys with a single usage and change the tests accordingly - private val publicKey = - crypto.crypto.privateCrypto - .generateSigningKey(usage = SigningKeyUsage.All) - .valueOrFail("create public key") - .futureValueUS - private val defaultDynamicSynchronizerParameters = TestSynchronizerParameters.defaultDynamic - - private def mk[T <: TopologyMapping]( - mapping: T - ): TopologyTransaction[TopologyChangeOp.Replace, T] = - TopologyTransaction(TopologyChangeOp.Replace, PositiveInt.one, mapping, testedProtocolVersion) - - private val deserialize: ByteString => TopologyTransaction[TopologyChangeOp, TopologyMapping] = - bytes => - TopologyTransaction.fromByteString(testedProtocolVersionValidation, bytes) match { - case Left(err) => throw new TestFailedException(err.toString, 0) - case Right(msg) => msg - } - - private def runTest( - t1: TopologyTransaction[TopologyChangeOp, TopologyMapping], - t2: TopologyTransaction[TopologyChangeOp, TopologyMapping], - ): Unit = { - behave like hasCryptographicEvidenceSerialization(t1, t2) - behave like hasCryptographicEvidenceDeserialization(t1, t1.getCryptographicEvidence)( - deserialize - ) - } - - "synchronizer topology transactions" when { - - "namespace mappings" should { - val nsd = - mk(NamespaceDelegation.tryCreate(uid.namespace, publicKey, CanSignAllMappings)) - val nsd2 = - mk( - NamespaceDelegation.tryCreate( - uid2.namespace, - publicKey, - CanSignAllButNamespaceDelegations, - ) - ) - - runTest(nsd, nsd2) - } - - def mkProtoTransaction(nsd: v30.NamespaceDelegation) = v30.TopologyTransaction( - operation = v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_ADD_REPLACE, - serial = 1, - mapping = Some( - v30.TopologyMapping(v30.TopologyMapping.Mapping.NamespaceDelegation(nsd)) - ), - ) - - "read legacy namespace delegations" in { - // Test case: is_root_delegation=true, restriction=empty <=> CanSignAllMappings - val rootDelegationProto = v30.NamespaceDelegation( - uid.namespace.toProtoPrimitive, - Some(publicKey.toProtoV30), - isRootDelegation = true, - restriction = v30.NamespaceDelegation.Restriction.Empty, - ) - val rootFromScala = NamespaceDelegation - .create( - uid.namespace, - publicKey, - CanSignAllMappings, - ) - .value - // we don't need to check that inverse direction of serialization, because topology transactions are memoized, - // therefore we only need to be able to serialize to the new format - NamespaceDelegation.fromProtoV30(rootDelegationProto).value shouldBe rootFromScala - - val protoTx = mkProtoTransaction(rootDelegationProto) - val scalaTxFromBytes = TopologyTransaction - .fromTrustedByteString( - UntypedVersionedMessage( - UntypedVersionedMessage.Wrapper.Data(protoTx.toByteString), - 1, - ).toByteString - ) - .value - scalaTxFromBytes.toByteString - - // Test case: is_root_delegation=false, restriction=empty <=> CanSignAllButNamespaceDelegations - val nonRootDelegationProto = v30.NamespaceDelegation( - uid.namespace.toProtoPrimitive, - Some(publicKey.toProtoV30), - isRootDelegation = false, - restriction = v30.NamespaceDelegation.Restriction.Empty, - ) - val nonRootFromScala = NamespaceDelegation - .create( - uid.namespace, - publicKey, - CanSignAllButNamespaceDelegations, - ) - .value - // we don't need to check that inverse direction of serialization, because topology transactions are memoized, - // therefore we only need to be able to serialize to the new format - NamespaceDelegation.fromProtoV30(nonRootDelegationProto).value shouldBe nonRootFromScala - - // Test case: is_root_delegation=false, restriction=non-empty <=> CanSignSpecificMappings - Seq( - v30.NamespaceDelegation.Restriction - .CanSignAllMappings(v30.NamespaceDelegation.CanSignAllMappings()) -> CanSignAllMappings, - v30.NamespaceDelegation.Restriction.CanSignAllButNamespaceDelegations( - v30.NamespaceDelegation.CanSignAllButNamespaceDelegations() - ) -> CanSignAllButNamespaceDelegations, - v30.NamespaceDelegation.Restriction.CanSignSpecificMapings( - v30.NamespaceDelegation.CanSignSpecificMappings( - // all but UNSPECIFIED - (v30.Enums.TopologyMappingCode.values.toSet - v30.Enums.TopologyMappingCode.TOPOLOGY_MAPPING_CODE_UNSPECIFIED).toSeq - .sortBy(_.value) - ) - ) -> CanSignSpecificMappings(NonEmpty.from(TopologyMapping.Code.all).value.toSet), - ).foreach { case (protoRestriction, scalaRestriction) => - val restrictedDelegationProto = v30.NamespaceDelegation( - uid.namespace.toProtoPrimitive, - Some(publicKey.toProtoV30), - isRootDelegation = false, - restriction = protoRestriction, - ) - val restrictedFromScala = NamespaceDelegation - .create( - uid.namespace, - publicKey, - scalaRestriction, - ) - .value - NamespaceDelegation - .fromProtoV30(restrictedDelegationProto) - .value shouldBe restrictedFromScala - restrictedFromScala.toProto shouldBe restrictedDelegationProto - } - } - - "key to owner mappings" should { - val k1 = mk(OwnerToKeyMapping(sequencerId, NonEmpty(Seq, publicKey))) - val k2 = mk(OwnerToKeyMapping(sequencerId, NonEmpty(Seq, publicKey))) - runTest(k1, k2) - } - - "party to participant" should { - val p1 = - mk( - PartyToParticipant.tryCreate( - PartyId(uid), - PositiveInt.one, - Seq(HostingParticipant(ParticipantId(uid2), ParticipantPermission.Observation)), - ) - ) - - val p2 = - mk( - PartyToParticipant.tryCreate( - PartyId(uid), - PositiveInt.two, - Seq( - HostingParticipant(ParticipantId(uid2), ParticipantPermission.Confirmation), - HostingParticipant(ParticipantId(uid), ParticipantPermission.Submission), - ), - ) - ) - - runTest(p1, p2) - } - - "participant state" should { - val ps1 = mk( - ParticipantSynchronizerPermission( - synchronizerId, - ParticipantId(uid), - ParticipantPermission.Submission, - limits = None, - loginAfter = None, - ) - ) - val ps2 = mk( - ParticipantSynchronizerPermission( - synchronizerId, - ParticipantId(uid), - ParticipantPermission.Observation, - limits = Some(ParticipantSynchronizerLimits(NonNegativeInt.tryCreate(13))), - loginAfter = Some(CantonTimestamp.MinValue.plusSeconds(17)), - ) - ) - - runTest(ps1, ps2) - - } - - "synchronizer parameters change" should { - val dmp1 = - mk(SynchronizerParametersState(SynchronizerId(uid), defaultDynamicSynchronizerParameters)) - val dmp2 = - mk(SynchronizerParametersState(SynchronizerId(uid), defaultDynamicSynchronizerParameters)) - runTest(dmp1, dmp2) - } - - } - - "authorized store topology transactions" when { - "package vetting" should { - "honor specified LET boundaries" in { - val validFrom = CantonTimestamp.ofEpochSecond(20) - val validUntil = CantonTimestamp.ofEpochSecond(30) - val vp = - VettedPackage(LfPackageId.assertFromString("pkg-id"), Some(validFrom), Some(validUntil)) - assert(!vp.validAt(validFrom.immediatePredecessor), "before valid-from invalid") - // see https://github.com/DACH-NY/canton-network-node/issues/18259 regarding valid-from inclusivity: - assert(vp.validAt(validFrom), "valid-from must be inclusive") - assert(vp.validAt(validFrom.immediateSuccessor), "between must be valid") - assert(!vp.validAt(validUntil), "valid-until must be exclusive") - } - - "honor open ended LET boundaries" in { - val validFrom = CantonTimestamp.ofEpochSecond(20) - val untilForever = - VettedPackage(LfPackageId.assertFromString("pkg-id"), Some(validFrom), None) - assert(untilForever.validAt(CantonTimestamp.MaxValue), "valid until forever") - - val validUntil = CantonTimestamp.ofEpochSecond(20) - val sinceForever = - VettedPackage(LfPackageId.assertFromString("pkg-id"), None, Some(validUntil)) - assert(sinceForever.validAt(CantonTimestamp.MinValue), "valid since forever") - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala deleted file mode 100644 index 3577bf6753..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.resource - -import com.digitalasset.canton.config.DbConfig.Postgres -import com.digitalasset.canton.config.{DbConfig, DefaultProcessingTimeouts} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.metrics.CommonMockMetrics -import com.digitalasset.canton.store.db.DbStorageSetup -import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig -import com.digitalasset.canton.time.SimClock -import com.digitalasset.canton.{BaseTest, CloseableTest, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec - -trait DbStorageSingleTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with CloseableTest { - - def baseConfig: DbConfig - def modifyUser(user: String): DbConfig - def modifyPassword(password: String): DbConfig - def modifyPort(port: Int): DbConfig - def modifyDatabaseName(dbName: String): DbConfig - val clock = new SimClock(CantonTimestamp.Epoch, loggerFactory) - - "DbStorage" should { - - "config should not leak confidential data" in { - val stinky = "VERYSTINKYPASSWORD" - (modifyPassword(stinky).toString) should not include (stinky) - } - - "connect on correct config" in { - val config = baseConfig - DbStorageSingle - .create( - config, - connectionPoolForParticipant = false, - None, - clock, - None, - CommonMockMetrics.dbStorage, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - .valueOrFailShutdown("storage create") shouldBe a[DbStorageSingle] - } - - "fail on invalid credentials" in { - val config = modifyUser("foobar") - loggerFactory.suppressWarningsAndErrors { - DbStorageSingle - .create( - config, - connectionPoolForParticipant = false, - None, - clock, - None, - CommonMockMetrics.dbStorage, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - .leftOrFailShutdown("storage create") shouldBe a[String] - } - } - - "fail on invalid database" in { - val config = modifyDatabaseName("foobar") - loggerFactory.suppressWarningsAndErrors { - DbStorageSingle - .create( - config, - connectionPoolForParticipant = false, - None, - clock, - None, - CommonMockMetrics.dbStorage, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - .leftOrFailShutdown("storage create") shouldBe a[String] - } - } - - "fail on invalid port" in { - val config = modifyPort(14001) - loggerFactory.suppressWarningsAndErrors { - DbStorageSingle - .create( - config, - connectionPoolForParticipant = false, - None, - clock, - None, - CommonMockMetrics.dbStorage, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - .leftOrFailShutdown("storage create") shouldBe a[String] - } - } - } - -} - -class DbStorageSingleTestPostgres extends DbStorageSingleTest { - - private lazy val setup = DbStorageSetup.postgres(loggerFactory) - - private def modifyConfig(config: DbBasicConfig): Postgres = - DbConfig.Postgres(config.toPostgresConfig) - - def baseConfig: Postgres = modifyConfig(setup.basicConfig) - - def modifyUser(userName: String): Postgres = - modifyConfig(setup.basicConfig.copy(username = userName)) - - def modifyPassword(password: String): Postgres = - modifyConfig(setup.basicConfig.copy(password = password)) - - def modifyPort(port: Int): Postgres = - modifyConfig(setup.basicConfig.copy(port = port)) - - def modifyDatabaseName(dbName: String): Postgres = - modifyConfig(setup.basicConfig.copy(dbName = dbName)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/CronTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/CronTest.scala deleted file mode 100644 index 662b0c6425..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/CronTest.scala +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.scheduler - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.CantonTimestamp -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Instant - -class CronTest extends AnyWordSpec with BaseTest { - "Valid crons accepted" in { - Cron.tryCreate("/10 * * * * ? *") - Cron.tryCreate("10-20 0 * * * ? *") - Cron.tryCreate("* 0-10 7,19 * * ? *") - Cron.tryCreate("* * * * * ?") - - clue("Apparently no such thing as too many fields") { - Cron.create("* * * * * ? * foobar goo moo").map(_.toString) shouldBe Right( - "* * * * * ? * FOOBAR GOO MOO" - ) - } - } - - "Invalid crons rejected with reasonable error message" in { - Cron.create("obviously wrong") shouldBe Left( - "Invalid cron expression \"obviously wrong\": Illegal characters for this position: 'OBV'" - ) - - clue("too few fields") { - Cron.create("* * * * *") shouldBe Left( - "Invalid cron expression \"* * * * *\": Unexpected end of expression." - ) - } - - clue("last field messed up") { - Cron.create("* * * * * ? foo") shouldBe Left( - "Invalid cron expression \"* * * * * ? foo\": Illegal characters for this position: 'FOO'" - ) - } - - Cron.create("* * * * * * *") shouldBe Left( - "Invalid cron expression \"* * * * * * *\": Support for specifying both a day-of-week AND a day-of-month parameter is not implemented." - ) - - val tooLong = "1234567890" * 30 + "too long" - Cron.create(tooLong).leftOrFail("cron expression too long") should startWith - s"Invalid cron expression \"$tooLong\": requirement failed: The given string has a maximum length of 300 but a string of length 308" - - } - - "Valid crons produce reasonable future next valid times" in { - // Run every 10 minutes, on the hours of 8am/pm, in December 2022 - val cron = Cron.tryCreate("0 /10 8,20 * 12 ? 2022") - - // Another way to express the same schedule building a cartesian product of day, hour, and minute - val expectedScheduleTimes = - for { dayOfMonth <- 1 to 31; hour <- Seq(8, 20); minute <- Range(0, 60, 10) } yield date( - dayOfMonth, - hour, - minute, - ) - - // Zip with next time to juxtapose reference time with next expected schedule time - expectedScheduleTimes.zip(expectedScheduleTimes.drop(1)).foreach { - case (referenceTime, nextScheduledExpected) => - val scheduledActual = - cron - .getNextValidTimeAfter(referenceTime) - .valueOr(err => fail(s"expect valid date but got err: ${err.message}")) - scheduledActual shouldBe nextScheduledExpected - } - } - - "A valid cron that ends at a certain point yields no next valid time thereafter" in { - // Only run in 2021 and not in subsequent years - val cron = Cron.tryCreate("* * * * * ? 2021") - - val dateAfterCronWindow = date(1, 12, 0) - val never = cron.getNextValidTimeAfter(dateAfterCronWindow) - never shouldBe Left(Cron.NoNextValidTimeAfter(dateAfterCronWindow)) - } - - private def date(dayOfMonth: Int, hour: Int, minute: Int): CantonTimestamp = { - def withLeadingZero(i: Int) = "%02d".format(i) - - CantonTimestamp - .fromInstant( - Instant.parse( - s"2022-12-${withLeadingZero(dayOfMonth)}T${withLeadingZero(hour)}:${withLeadingZero(minute)}:00Z" - ) - ) - .valueOrFail("valid date expected") - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/IgnoresTransientSchedulerErrors.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/IgnoresTransientSchedulerErrors.scala deleted file mode 100644 index abe388ab79..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/scheduler/IgnoresTransientSchedulerErrors.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.scheduler - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality - -trait IgnoresTransientSchedulerErrors { - this: BaseTest => - - // Scheduled pruning can optionally produce warnings if nodes are temporarily not ready to be pruned. #14223 - protected def ignoreTransientSchedulerErrors[T]( - pruningScheduler: String - )(code: => T): T = - loggerFactory.assertLogsUnorderedOptional( - code, - ( - LogEntryOptionality.OptionalMany, - (entry: LogEntry) => { - entry.loggerName should include(pruningScheduler) - entry.warningMessage should include regex "Backing off .* or until next window after error" - }, - ), - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/AsyncResultTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/AsyncResultTest.scala deleted file mode 100644 index 2bd8ac17ee..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/AsyncResultTest.scala +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.{BaseTest, DiscardedFuture, DiscardedFutureTest} -import org.scalatest.wordspec.AnyWordSpec -import org.wartremover.test.WartTestTraverser - -class AsyncResultTest extends AnyWordSpec with BaseTest { - "DiscardedFuture" should { - "detect discarded AsyncResult" in { - val result = WartTestTraverser(DiscardedFuture) { - AsyncResult(FutureUnlessShutdown.pure(())) - () - } - DiscardedFutureTest.assertErrors(result, 1) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala deleted file mode 100644 index b14aac7a45..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.daml.grpc.adapter.ExecutionSequencerFactory -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port, PositiveInt} -import com.digitalasset.canton.connection.v30 -import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc.ApiInfoServiceStub -import com.digitalasset.canton.connection.v30.GetApiInfoResponse -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{Crypto, Fingerprint, SynchronizerCrypto} -import com.digitalasset.canton.lifecycle.LifeCycle -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.networking.Endpoint -import com.digitalasset.canton.networking.grpc.CantonGrpcUtil -import com.digitalasset.canton.sequencer.api.v30 as SequencerService -import com.digitalasset.canton.sequencer.api.v30.SequencerConnect -import com.digitalasset.canton.sequencer.api.v30.SequencerConnectServiceGrpc.SequencerConnectServiceStub -import com.digitalasset.canton.sequencer.api.v30.SequencerServiceGrpc.SequencerServiceStub -import com.digitalasset.canton.sequencing.ConnectionX.ConnectionXConfig -import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.ConnectionAttributes -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolConfig -import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.SequencerSubscriptionPoolConfig -import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenManagerConfig -import com.digitalasset.canton.sequencing.client.transports.GrpcSequencerClientAuth -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.{ - Member, - Namespace, - ParticipantId, - PhysicalSynchronizerId, - SequencerId, - SynchronizerId, -} -import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} -import com.digitalasset.canton.util.{PekkoUtil, ResourceUtil} -import com.digitalasset.canton.version.{ - ProtocolVersion, - ProtocolVersionCompatibility, - ReleaseVersion, -} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import io.grpc.stub.StreamObserver -import io.grpc.{CallOptions, Channel, Status} -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.Materializer -import org.scalatest.Assertion -import org.scalatest.matchers.should.Matchers - -import scala.collection.concurrent.TrieMap -import scala.concurrent.{ExecutionContextExecutor, Future, blocking} -import scala.util.Random - -trait ConnectionPoolTestHelpers { - this: BaseTest & HasExecutionContext => - import ConnectionPoolTestHelpers.* - - private lazy val seedForRandomness: Long = { - val seed = Random.nextLong() - logger.debug(s"Seed for randomness = $seed") - seed - } - - protected lazy val authConfig: AuthenticationTokenManagerConfig = - AuthenticationTokenManagerConfig() - - protected lazy val testCrypto: SynchronizerCrypto = - SynchronizerCrypto( - SymbolicCrypto - .create(testedReleaseProtocolVersion, timeouts, loggerFactory), - defaultStaticSynchronizerParameters, - ) - - private implicit val actorSystem: ActorSystem = - PekkoUtil.createActorSystem(loggerFactory.threadName) - - private implicit val executionSequencerFactory: ExecutionSequencerFactory = - PekkoUtil.createExecutionSequencerFactory(loggerFactory.threadName, noTracingLogger) - - override def afterAll(): Unit = - LifeCycle.close( - executionSequencerFactory, - LifeCycle.toCloseableActorSystem(actorSystem, logger, timeouts), - )(logger) - - protected lazy val testMember: Member = ParticipantId("test") - - protected def mkConnectionAttributes( - synchronizerIndex: Int, - sequencerIndex: Int, - ): ConnectionAttributes = - ConnectionAttributes( - testSynchronizerId(synchronizerIndex), - testSequencerId(sequencerIndex), - defaultStaticSynchronizerParameters, - ) - - protected def mkDummyConnectionConfig( - index: Int, - endpointIndexO: Option[Int] = None, - ): ConnectionXConfig = { - val endpoint = Endpoint(s"does-not-exist-${endpointIndexO.getOrElse(index)}", Port.tryCreate(0)) - ConnectionXConfig( - name = s"test-$index", - endpoint = endpoint, - transportSecurity = false, - customTrustCertificates = None, - tracePropagation = TracingConfig.Propagation.Disabled, - ) - } - - protected def withConnection[V]( - testResponses: TestResponses - )(f: (InternalSequencerConnectionX, TestHealthListener) => V): V = { - val stubFactory = new TestSequencerConnectionXStubFactory(testResponses, loggerFactory) - val config = mkDummyConnectionConfig(0) - - val connection = new GrpcInternalSequencerConnectionX( - config, - clientProtocolVersions, - minimumProtocolVersion, - stubFactory, - futureSupervisor, - timeouts, - loggerFactory.append("connection", config.name), - ) - - val listener = new TestHealthListener(connection.health) - connection.health.registerOnHealthChange(listener) - - ResourceUtil.withResource(connection)(f(_, listener)) - } - - protected def mkPoolConfig( - nbConnections: PositiveInt, - trustThreshold: PositiveInt, - expectedSynchronizerIdO: Option[PhysicalSynchronizerId] = None, - ): SequencerConnectionXPoolConfig = { - val configs = - NonEmpty.from((0 until nbConnections.unwrap).map(mkDummyConnectionConfig(_))).value - - SequencerConnectionXPoolConfig( - connections = configs, - trustThreshold = trustThreshold, - expectedPSIdO = expectedSynchronizerIdO, - ) - } - - protected def withConnectionPool[V]( - nbConnections: PositiveInt, - trustThreshold: PositiveInt, - attributesForConnection: Int => ConnectionAttributes, - expectedSynchronizerIdO: Option[PhysicalSynchronizerId] = None, - testTimeouts: ProcessingTimeout = timeouts, - )(f: (SequencerConnectionXPool, CreatedConnections, TestHealthListener) => V): V = { - val config = mkPoolConfig(nbConnections, trustThreshold, expectedSynchronizerIdO) - - val poolFactory = new TestSequencerConnectionXPoolFactory( - attributesForConnection, - authConfig, - testMember, - wallClock, - testCrypto.crypto, - Some(seedForRandomness), - futureSupervisor, - testTimeouts, - loggerFactory, - ) - val pool = poolFactory.create(config).valueOrFail("create connection pool") - - val listener = new TestHealthListener(pool.health) - pool.health.registerOnHealthChange(listener) - - ResourceUtil.withResource(pool)(f(_, poolFactory.createdConnections, listener)) - } - - protected def mkSubscriptionPoolConfig( - trustThreshold: PositiveInt, - reserve: NonNegativeInt, - ): SequencerSubscriptionPoolConfig = - SequencerSubscriptionPoolConfig(trustThreshold, reserve) - - protected def withSubscriptionPool[V]( - trustThreshold: PositiveInt, - livenessMargin: NonNegativeInt, - connectionPool: SequencerConnectionXPool, - )(f: (SequencerSubscriptionPool, TestHealthListener) => V): V = { - val config = mkSubscriptionPoolConfig(trustThreshold, livenessMargin) - - val subscriptionPool = SequencerSubscriptionPoolFactory.create( - initialConfig = config, - pool = connectionPool, - clock = wallClock, - timeouts = timeouts, - loggerFactory = loggerFactory, - ) - - val listener = new TestHealthListener(subscriptionPool.health) - subscriptionPool.health.registerOnHealthChange(listener) - - ResourceUtil.withResource(subscriptionPool)(f(_, listener)) - } - - protected def withConnectionAndSubscriptionPools[V]( - nbConnections: PositiveInt, - trustThreshold: PositiveInt, - attributesForConnection: Int => ConnectionAttributes, - expectedSynchronizerIdO: Option[PhysicalSynchronizerId] = None, - livenessMargin: NonNegativeInt, - )(f: (SequencerConnectionXPool, SequencerSubscriptionPool, TestHealthListener) => V): V = - withConnectionPool( - nbConnections, - trustThreshold, - attributesForConnection, - expectedSynchronizerIdO, - ) { case (connectionPool, _createdConnections, _connectionPoolListener) => - withSubscriptionPool(trustThreshold, livenessMargin, connectionPool) { - (subscriptionPool, subscriptionPoolListener) => - f(connectionPool, subscriptionPool, subscriptionPoolListener) - } - } - -} - -private object ConnectionPoolTestHelpers { - import BaseTest.* - - lazy val failureUnavailable: Either[Exception, Nothing] = - Left(Status.UNAVAILABLE.asRuntimeException()) - - lazy val correctApiResponse: Either[Exception, GetApiInfoResponse] = - Right(v30.GetApiInfoResponse(CantonGrpcUtil.ApiName.SequencerPublicApi)) - lazy val incorrectApiResponse: Either[Exception, GetApiInfoResponse] = - Right(v30.GetApiInfoResponse("this is not a valid API info")) - - lazy val successfulHandshake: Either[Exception, SequencerConnect.HandshakeResponse] = - Right( - SequencerConnect.HandshakeResponse( - testedProtocolVersion.toProtoPrimitive, - SequencerConnect.HandshakeResponse.Value - .Success(SequencerConnect.HandshakeResponse.Success()), - ) - ) - lazy val failedHandshake: Either[Exception, SequencerConnect.HandshakeResponse] = Right( - SequencerConnect.HandshakeResponse( - testedProtocolVersion.toProtoPrimitive, - SequencerConnect.HandshakeResponse.Value - .Failure(SequencerConnect.HandshakeResponse.Failure("bad handshake")), - ) - ) - - lazy val correctSynchronizerIdResponse1 - : Either[Exception, SequencerConnect.GetSynchronizerIdResponse] = Right( - SequencerConnect.GetSynchronizerIdResponse( - testSynchronizerId(1).toProtoPrimitive, - testSequencerId(1).uid.toProtoPrimitive, - ) - ) - lazy val correctSynchronizerIdResponse2 - : Either[Exception, SequencerConnect.GetSynchronizerIdResponse] = Right( - SequencerConnect.GetSynchronizerIdResponse( - testSynchronizerId(2).toProtoPrimitive, - testSequencerId(2).uid.toProtoPrimitive, - ) - ) - - lazy val correctStaticParametersResponse - : Either[Exception, SequencerConnect.GetSynchronizerParametersResponse] = Right( - SequencerConnect.GetSynchronizerParametersResponse( - SequencerConnect.GetSynchronizerParametersResponse.Parameters.ParametersV1( - defaultStaticSynchronizerParameters.toProtoV30 - ) - ) - ) - - lazy val positiveAcknowledgeResponse - : Either[Exception, SequencerService.AcknowledgeSignedResponse] = Right( - SequencerService.AcknowledgeSignedResponse() - ) - - lazy val correctConnectionAttributes: ConnectionAttributes = ConnectionAttributes( - testSynchronizerId(1), - testSequencerId(1), - defaultStaticSynchronizerParameters, - ) - - private lazy val clientProtocolVersions: NonEmpty[List[ProtocolVersion]] = - ProtocolVersionCompatibility.supportedProtocols( - includeAlphaVersions = true, - includeBetaVersions = true, - release = ReleaseVersion.current, - ) - - private lazy val minimumProtocolVersion: Option[ProtocolVersion] = Some(testedProtocolVersion) - - def testSynchronizerId(index: Int): PhysicalSynchronizerId = - SynchronizerId.tryFromString(s"test-synchronizer-$index::namespace").toPhysical - - def testSequencerId(index: Int): SequencerId = - SequencerId.tryCreate( - s"test-sequencer-$index", - Namespace(Fingerprint.tryFromString("namespace")), - ) - - private class TestSequencerConnectionXPoolFactory( - attributesForConnection: Int => ConnectionAttributes, - authConfig: AuthenticationTokenManagerConfig, - member: Member, - clock: Clock, - crypto: Crypto, - seedForRandomnessO: Option[Long], - futureSupervisor: FutureSupervisor, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - ) extends SequencerConnectionXPoolFactory { - - import SequencerConnectionXPool.{SequencerConnectionXPoolConfig, SequencerConnectionXPoolError} - - private val connectionFactory = new TestInternalSequencerConnectionXFactory( - attributesForConnection, - futureSupervisor, - timeouts, - loggerFactory, - ) - - val createdConnections: CreatedConnections = connectionFactory.createdConnections - - override def create( - initialConfig: SequencerConnectionXPoolConfig - )(implicit - ec: ExecutionContextExecutor, - esf: ExecutionSequencerFactory, - materializer: Materializer, - ): Either[SequencerConnectionXPoolError, SequencerConnectionXPool] = - for { - _ <- initialConfig.validate - } yield { - new SequencerConnectionXPoolImpl( - initialConfig, - connectionFactory, - clock, - authConfig, - member, - crypto, - seedForRandomnessO, - futureSupervisor, - timeouts, - loggerFactory, - ) - } - - override def createFromOldConfig( - sequencerConnections: SequencerConnections, - expectedPSIdO: Option[PhysicalSynchronizerId], - tracingConfig: TracingConfig, - )(implicit - ec: ExecutionContextExecutor, - esf: ExecutionSequencerFactory, - materializer: Materializer, - traceContext: TraceContext, - ): Either[SequencerConnectionXPoolError, SequencerConnectionXPool] = ??? - } - - protected class TestInternalSequencerConnectionXFactory( - attributesForConnection: Int => ConnectionAttributes, - futureSupervisor: FutureSupervisor, - timeouts: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - ) extends InternalSequencerConnectionXFactory { - val createdConnections = new CreatedConnections - - override def create(config: ConnectionXConfig)(implicit - ec: ExecutionContextExecutor, - esf: ExecutionSequencerFactory, - materializer: Materializer, - ): InternalSequencerConnectionX = { - val s"test-$indexStr" = config.name: @unchecked - val index = indexStr.toInt - - val attributes = attributesForConnection(index) - val correctSynchronizerIdResponse = Right( - SequencerConnect.GetSynchronizerIdResponse( - attributes.physicalSynchronizerId.toProtoPrimitive, - attributes.sequencerId.uid.toProtoPrimitive, - ) - ) - - val responses = new TestResponses( - apiResponses = Iterator.continually(correctApiResponse), - handshakeResponses = Iterator.continually(successfulHandshake), - synchronizerAndSeqIdResponses = Iterator.continually(correctSynchronizerIdResponse), - staticParametersResponses = Iterator.continually(correctStaticParametersResponse), - acknowledgeResponses = Iterator.continually(positiveAcknowledgeResponse), - ) - - val stubFactory = new TestSequencerConnectionXStubFactory(responses, loggerFactory) - - val connection = new GrpcInternalSequencerConnectionX( - config, - clientProtocolVersions, - minimumProtocolVersion, - stubFactory, - futureSupervisor, - timeouts, - loggerFactory.append("connection", config.name), - ) - - createdConnections.add(index, connection) - - connection - } - } - - protected class CreatedConnections { - private val connectionsMap = TrieMap[Int, InternalSequencerConnectionX]() - - def apply(index: Int): InternalSequencerConnectionX = connectionsMap.apply(index) - - def add(index: Int, connection: InternalSequencerConnectionX): Unit = - blocking { - synchronized { - connectionsMap.updateWith(index) { - case Some(_) => throw new IllegalStateException("Connection already exists") - case None => Some(connection) - } - } - } - - def snapshotAndClear(): Map[Int, InternalSequencerConnectionX] = blocking { - synchronized { - val snapshot = connectionsMap.readOnlySnapshot().toMap - connectionsMap.clear() - snapshot - } - } - - def size: Int = connectionsMap.size - } - - protected class TestResponses( - apiResponses: Iterator[Either[Exception, v30.GetApiInfoResponse]] = Iterator.empty, - handshakeResponses: Iterator[Either[Exception, SequencerConnect.HandshakeResponse]] = - Iterator.empty, - synchronizerAndSeqIdResponses: Iterator[ - Either[Exception, SequencerConnect.GetSynchronizerIdResponse] - ] = Iterator.empty, - staticParametersResponses: Iterator[ - Either[Exception, SequencerConnect.GetSynchronizerParametersResponse] - ] = Iterator.empty, - acknowledgeResponses: Iterator[ - Either[Exception, SequencerService.AcknowledgeSignedResponse] - ] = Iterator.empty, - ) extends Matchers { - private class TestApiInfoServiceStub( - channel: Channel, - options: CallOptions = CallOptions.DEFAULT, - ) extends ApiInfoServiceStub(channel, options) { - override def getApiInfo(request: v30.GetApiInfoRequest): Future[v30.GetApiInfoResponse] = { - withClue("call is not authenticated") { - options.getCredentials shouldBe null - } - nextResponse(apiResponses) - } - - override def build(channel: Channel, options: CallOptions): ApiInfoServiceStub = - new TestApiInfoServiceStub(channel, options) - } - - private class TestSequencerConnectServiceStub( - channel: Channel, - options: CallOptions = CallOptions.DEFAULT, - ) extends SequencerConnectServiceStub(channel, options) { - override def handshake( - request: SequencerConnect.HandshakeRequest - ): Future[SequencerConnect.HandshakeResponse] = - nextResponse(handshakeResponses) - - override def getSynchronizerId( - request: SequencerConnect.GetSynchronizerIdRequest - ): Future[SequencerConnect.GetSynchronizerIdResponse] = - nextResponse(synchronizerAndSeqIdResponses) - - override def getSynchronizerParameters( - request: SequencerConnect.GetSynchronizerParametersRequest - ): Future[SequencerConnect.GetSynchronizerParametersResponse] = - nextResponse(staticParametersResponses) - - override def build(channel: Channel, options: CallOptions): SequencerConnectServiceStub = - new TestSequencerConnectServiceStub(channel, options) - } - - private class TestSequencerServiceStub( - channel: Channel, - options: CallOptions = CallOptions.DEFAULT, - ) extends SequencerServiceStub(channel, options) { - override def sendAsync( - request: SequencerService.SendAsyncRequest - ): Future[SequencerService.SendAsyncResponse] = ??? - - override def subscribeV2( - request: SequencerService.SubscriptionRequestV2, - responseObserver: StreamObserver[SequencerService.SubscriptionResponse], - ): Unit = ??? - - override def acknowledgeSigned( - request: SequencerService.AcknowledgeSignedRequest - ): scala.concurrent.Future[SequencerService.AcknowledgeSignedResponse] = { - withClue("call is authenticated") { - Option(options.getCredentials) shouldBe defined - } - nextResponse(acknowledgeResponses) - } - - override def downloadTopologyStateForInit( - request: SequencerService.DownloadTopologyStateForInitRequest, - responseObserver: StreamObserver[ - SequencerService.DownloadTopologyStateForInitResponse - ], - ): Unit = ??? - - override def getTrafficStateForMember( - request: com.digitalasset.canton.sequencer.api.v30.GetTrafficStateForMemberRequest - ): scala.concurrent.Future[ - com.digitalasset.canton.sequencer.api.v30.GetTrafficStateForMemberResponse - ] = ??? - - override def build(channel: Channel, options: CallOptions): SequencerServiceStub = - new TestSequencerServiceStub(channel, options) - } - - def apiSvcFactory(channel: Channel): ApiInfoServiceStub = - new TestApiInfoServiceStub(channel) - - def sequencerConnectSvcFactory(channel: Channel): SequencerConnectServiceStub = - new TestSequencerConnectServiceStub(channel) - - def sequencerSvcFactory(channel: Channel): SequencerServiceStub = - new TestSequencerServiceStub(channel) - - private def nextResponse[T](responses: Iterator[Either[Exception, T]]): Future[T] = - if (responses.hasNext) responses.next().fold(Future.failed, Future.successful) - else Future.failed(Status.UNAVAILABLE.asRuntimeException()) - - def assertAllResponsesSent(): Assertion = { - withClue("API responses:")(apiResponses shouldBe empty) - withClue("Handshake responses:")(handshakeResponses shouldBe empty) - withClue("Synchronizer and sequencer ID responses:")( - synchronizerAndSeqIdResponses shouldBe empty - ) - withClue("Static synchronizer parameters responses:")( - staticParametersResponses shouldBe empty - ) - withClue("Acknowledge responses:")( - acknowledgeResponses shouldBe empty - ) - } - } - - object TestResponses { - def apply( - apiResponses: Seq[Either[Exception, v30.GetApiInfoResponse]] = Seq.empty, - handshakeResponses: Seq[Either[Exception, SequencerConnect.HandshakeResponse]] = Seq.empty, - synchronizerAndSeqIdResponses: Seq[ - Either[Exception, SequencerConnect.GetSynchronizerIdResponse] - ] = Seq.empty, - staticParametersResponses: Seq[ - Either[Exception, SequencerConnect.GetSynchronizerParametersResponse] - ] = Seq.empty, - acknowledgeResponses: Seq[ - Either[Exception, SequencerService.AcknowledgeSignedResponse] - ] = Seq.empty, - ): TestResponses = new TestResponses( - apiResponses.iterator, - handshakeResponses.iterator, - synchronizerAndSeqIdResponses.iterator, - staticParametersResponses.iterator, - acknowledgeResponses.iterator, - ) - } - - protected class TestSequencerConnectionXStubFactory( - testResponses: TestResponses, - loggerFactory: NamedLoggerFactory, - ) extends SequencerConnectionXStubFactory { - override def createStub(connection: ConnectionX)(implicit - ec: ExecutionContextExecutor - ): SequencerConnectionXStub = connection match { - case grpcConnection: GrpcConnectionX => - new GrpcSequencerConnectionXStub( - grpcConnection, - testResponses.apiSvcFactory, - testResponses.sequencerConnectSvcFactory, - ) - - case _ => throw new IllegalStateException(s"Connection type not supported: $connection") - } - - override def createUserStub(connection: ConnectionX, clientAuth: GrpcSequencerClientAuth)( - implicit - ec: ExecutionContextExecutor, - esf: ExecutionSequencerFactory, - materializer: Materializer, - ): UserSequencerConnectionXStub = - connection match { - case grpcConnection: GrpcConnectionX => - new GrpcUserSequencerConnectionXStub( - grpcConnection, - channel => clientAuth(testResponses.sequencerSvcFactory(channel)), - loggerFactory, - ) - - case _ => throw new IllegalStateException(s"Connection type not supported: $connection") - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionValidationLimiterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionValidationLimiterTest.scala deleted file mode 100644 index ebe82c0e78..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/ConnectionValidationLimiterTest.scala +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import cats.syntax.parallel.* -import com.digitalasset.canton.lifecycle.{ - FutureUnlessShutdown, - PromiseUnlessShutdown, - UnlessShutdown, -} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicInteger - -class ConnectionValidationLimiterTest extends AnyWordSpec with BaseTest with HasExecutionContext { - "ConnectionValidationLimiter" should { - "limit to 2 validations in the presence of a burst" in { - val promises = Seq.fill(2)(PromiseUnlessShutdown.unsupervised[Unit]()) - val counter = new AtomicInteger(0) - - def mockValidate(traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val myIndex = counter.getAndIncrement() - logger.debug(s"Running validation #$myIndex")(traceContext) - promises(myIndex).futureUS - } - - val validator = - new ConnectionValidationLimiter(mockValidate, futureSupervisor, loggerFactory) - - // Request a burst of validations - val fut = (1 to 42).toList.map(_ => validator.maybeValidate()(TraceContext.createNew("test"))) - - // Complete all validations - promises.foreach(_.outcome_(())) - - // Wait for all validation requests to complete - fut.parSequence.futureValueUS - - // We should have 2 validations - eventually()(counter.get shouldBe 2) - } - - "shut down the validations when shutdown" in { - val promises = Seq(PromiseUnlessShutdown.unsupervised[Unit]()) - val counter = new AtomicInteger(0) - - def mockValidate(traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val myIndex = counter.getAndIncrement() - logger.debug(s"Running validation #$myIndex")(traceContext) - promises(myIndex).futureUS - } - - val validator = - new ConnectionValidationLimiter(mockValidate, futureSupervisor, loggerFactory) - - // Request two validations so one gets scheduled - val fut = (1 to 2).toList.map(_ => validator.maybeValidate()(TraceContext.createNew("test"))) - - // Shutdown the validator - validator.close() - - // All validation requests should be shutdown - forAll(fut.map(_.unwrap.futureValue))(_ shouldBe UnlessShutdown.AbortedDueToShutdown) - - // Only one validation has run - eventually()(counter.get shouldBe 1) - } - - "shut down a scheduled validation when the running validation is shutdown" in { - val promises = Seq(PromiseUnlessShutdown.unsupervised[Unit]()) - val counter = new AtomicInteger(0) - - def mockValidate(traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val myIndex = counter.getAndIncrement() - logger.debug(s"Running validation #$myIndex")(traceContext) - promises(myIndex).futureUS - } - - val validator = - new ConnectionValidationLimiter(mockValidate, futureSupervisor, loggerFactory) - - // Request two validations so one gets scheduled - val fut = (1 to 2).toList.map(_ => validator.maybeValidate()(TraceContext.createNew("test"))) - - // Shutdown the first validation - promises(0).shutdown_() - - // All validation requests should be shutdown - forAll(fut.map(_.unwrap.futureValue))(_ shouldBe UnlessShutdown.AbortedDueToShutdown) - - // Only one validation has run - eventually()(counter.get shouldBe 1) - } - - "fails a scheduled validation when a validation throws" in { - val promises = Seq(PromiseUnlessShutdown.unsupervised[Unit]()) - val counter = new AtomicInteger(0) - - def mockValidate(traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val myIndex = counter.getAndIncrement() - logger.debug(s"Running validation #$myIndex")(traceContext) - promises(myIndex).futureUS - } - - val validator = - new ConnectionValidationLimiter(mockValidate, futureSupervisor, loggerFactory) - - // Request two validations so one gets scheduled - val fut = (1 to 2).toList.map(_ => validator.maybeValidate()(TraceContext.createNew("test"))) - - // Fail the first validation - promises(0).failure(new Exception("boom")) - - // All validation requests should fail - forAll(fut.map(_.unwrap.failed.futureValue))(_.getMessage should include("boom")) - - // Only one validation has run - eventually()(counter.get shouldBe 1) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/DelayLoggerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/DelayLoggerTest.scala deleted file mode 100644 index 88a70ffd92..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/DelayLoggerTest.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.daml.metrics.api.noop.NoOpGauge -import com.daml.metrics.api.{MetricInfo, MetricName, MetricQualification} -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.logging.SuppressionRule.FullSuppression -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SimClock} -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -class DelayLoggerTest extends AnyWordSpec with BaseTest { - - "checkForDelay" should { - val clock = new SimClock(start = CantonTimestamp.now(), loggerFactory = loggerFactory) - val gauge = NoOpGauge[Long](MetricInfo(MetricName("test"), "", MetricQualification.Debug), 0L) - val delayLogger = - new DelayLogger(clock, logger, NonNegativeFiniteDuration.tryOfSeconds(1), gauge) - - def probe(delayMs: Long, assertion: Seq[LogEntry] => Assertion): Unit = { - val event = mock[PossiblyIgnoredProtocolEvent] - when(event.timestamp).thenReturn(clock.now.minusMillis(delayMs)) - loggerFactory.assertLogsSeq(FullSuppression)( - delayLogger.checkForDelay(event), - assertion, - ) - } - - "not log when we haven't caught up yet" in { - probe(2000, _ shouldBe empty) - } - "log success after we caught up" in { - probe(500, forEvery(_)(_.message should include("Caught up"))) - } - "log a warning if we are late again" in { - probe(2000, forEvery(_)(_.warningMessage should include("Late batch"))) - } - "don't log another warning if we are still late" in { - probe(2000, _ shouldBe empty) - } - "log a notification if the situation is resolved" in { - probe(500, forEvery(_)(_.message should include("Caught up"))) - } - "not log another notification" in { - probe(500, _ shouldBe empty) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala deleted file mode 100644 index 2a6ddbb7d9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt} -import com.digitalasset.canton.networking.Endpoint -import com.digitalasset.canton.topology.GeneratorsTopology -import com.digitalasset.canton.{Generators, SequencerAlias} -import magnolify.scalacheck.auto.genArbitrary -import org.scalacheck.{Arbitrary, Gen} - -final class GeneratorsSequencing(generatorsTopology: GeneratorsTopology) { - import com.digitalasset.canton.config.GeneratorsConfig.* - import com.digitalasset.canton.Generators.* - import generatorsTopology.* - - implicit val sequencerAliasArb: Arbitrary[SequencerAlias] = Arbitrary( - string255Arb.arbitrary.map(str => - SequencerAlias.create(str.str).valueOr(err => throw new IllegalArgumentException(err)) - ) - ) - - implicit val endPointGen: Arbitrary[Endpoint] = - Arbitrary(for { - host <- Gen.alphaNumStr.filter(_.nonEmpty) - port <- Arbitrary.arbitrary[Port] - } yield Endpoint(host, port)) - - implicit val endPointsArb: Arbitrary[NonEmpty[Seq[Endpoint]]] = - Arbitrary(Generators.nonEmptySetGen[Endpoint].map(_.toSeq)) - - implicit val sequencerConnectionArb: Arbitrary[SequencerConnection] = genArbitrary - implicit val submissionRequestAmplificationArb: Arbitrary[SubmissionRequestAmplification] = - genArbitrary - - implicit val sequencerConnectionsArb: Arbitrary[SequencerConnections] = Arbitrary( - for { - connections <- Generators - .nonEmptySetGen[SequencerConnection] - .map(_.toSeq) - .map(_.distinctBy(_.sequencerAlias)) - sequencerTrustThreshold <- Gen.choose(1, connections.size).map(PositiveInt.tryCreate) - submissionRequestAmplification <- submissionRequestAmplificationArb.arbitrary - } yield SequencerConnections.tryMany( - connections, - sequencerTrustThreshold, - submissionRequestAmplification, - ) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcConnectionXTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcConnectionXTest.scala deleted file mode 100644 index 6b5db0f93a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcConnectionXTest.scala +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.health.{HealthElement, HealthListener} -import com.digitalasset.canton.networking.grpc.GrpcError.GrpcServiceUnavailable -import com.digitalasset.canton.sequencing.ConnectionX.{ConnectionXError, ConnectionXState} -import com.digitalasset.canton.sequencing.SequencerConnectionXStub.SequencerConnectionXStubError -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ResourceUtil -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import io.grpc.Status -import org.scalatest.Assertion -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.blocking -import scala.concurrent.duration.DurationInt - -class GrpcConnectionXTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - - "ConnectionX" should { - lazy val stubFactory = new SequencerConnectionXStubFactoryImpl(loggerFactory) - - "notify on state changes" in { - ResourceUtil.withResource(mkConnection()) { connection => - val listener = new TestHealthListener(connection.health) - connection.health.registerOnHealthChange(listener) - - connection.start() - listener.shouldStabilizeOn(ConnectionXState.Started) - - listener.clear() - connection.stop() - listener.shouldStabilizeOn(ConnectionXState.Stopped) - } - } - - "fail gRPC calls with invalid state if not started" in { - ResourceUtil.withResource(mkConnection()) { connection => - val stub = stubFactory.createStub(connection) - val result = stub.getApiName().futureValueUS - - inside(result) { - case Left( - SequencerConnectionXStubError.ConnectionError( - ConnectionXError.InvalidStateError(message) - ) - ) => - message should include("Connection is not started") - } - } - } - - "fail gRPC calls with gRPC error if there is no server" in { - ResourceUtil.withResource(mkConnection()) { connection => - connection.start() - - val stub = stubFactory.createStub(connection) - - val result = loggerFactory.assertLogs( - stub.getApiName().futureValueUS, - _.warningMessage should include("Request failed"), - ) - - inside(result) { - case Left( - SequencerConnectionXStubError.ConnectionError( - ConnectionXError.TransportError( - GrpcServiceUnavailable(_, _, status, _, _) - ) - ) - ) => - status.getCode shouldBe Status.Code.UNAVAILABLE - } - } - } - } - - private def mkConnection(): ConnectionX = { - val config = mkDummyConnectionConfig(0) - - GrpcConnectionX( - config, - timeouts, - loggerFactory, - ) - } -} - -class TestHealthListener(val element: HealthElement) extends HealthListener with Matchers { - import scala.collection.mutable - import BaseTest.eventuallyForever - - private val statesBuffer = mutable.ArrayBuffer[element.State]() - - def shouldStabilizeOn[T](state: T): Assertion = - // Check that we reach the given state, and remain on it - // The default 2 seconds is a bit short when machines are under heavy load - eventuallyForever(timeUntilSuccess = 10.seconds) { - statesBuffer.last shouldBe state - } - - def clear(): Unit = statesBuffer.clear() - - override def name: String = s"${element.name}-test-listener" - - override def poke()(implicit traceContext: TraceContext): Unit = blocking { - synchronized { - val state = element.getState - - statesBuffer += state - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala deleted file mode 100644 index c979fe7b6f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcInternalSequencerConnectionXTest.scala +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} -import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ - SequencerConnectionXError, - SequencerConnectionXState, -} -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level.INFO - -class GrpcInternalSequencerConnectionXTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - - import ConnectionPoolTestHelpers.* - - "GrpcInternalSequencerConnectionX" should { - "be validated in the happy path" in { - val responses = TestResponses( - apiResponses = Seq(correctApiResponse), - handshakeResponses = Seq(successfulHandshake), - synchronizerAndSeqIdResponses = Seq(correctSynchronizerIdResponse1), - staticParametersResponses = Seq(correctStaticParametersResponse), - ) - withConnection(responses) { (connection, listener) => - connection.start().valueOrFail("start connection") - - listener.shouldStabilizeOn(SequencerConnectionXState.Validated) - connection.attributes shouldBe Some(correctConnectionAttributes) - - responses.assertAllResponsesSent() - } - } - - "refuse to start if it is in a fatal state" in { - val responses = TestResponses( - apiResponses = Seq(correctApiResponse), - handshakeResponses = Seq(failedHandshake), - ) - withConnection(responses) { (connection, listener) => - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - connection.start().valueOrFail("start connection") - - listener.shouldStabilizeOn(SequencerConnectionXState.Fatal) - }, - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include("Validation failure: Failed handshake"), - "Handshake fails", - ) - ) - ), - ) - - // Try to restart - inside(connection.start()) { - case Left(SequencerConnectionXError.InvalidStateError(message)) => - message shouldBe "The connection is in a fatal state and cannot be started" - } - - responses.assertAllResponsesSent() - } - } - - "fail validation if the returned API is not for a sequencer" in { - val responses = TestResponses( - apiResponses = Seq(incorrectApiResponse) - ) - withConnection(responses) { (connection, listener) => - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Fatal) - connection.attributes shouldBe None - }, - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include("Validation failure: Bad API"), - "API response is invalid", - ) - ) - ), - ) - - responses.assertAllResponsesSent() - } - } - - "fail validation if the protocol handshake fails" in { - val responses = TestResponses( - apiResponses = Seq(correctApiResponse), - handshakeResponses = Seq(failedHandshake), - ) - withConnection(responses) { (connection, listener) => - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Fatal) - connection.attributes shouldBe None - }, - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include("Validation failure: Failed handshake"), - "Protocol handshake fails", - ) - ) - ), - ) - - responses.assertAllResponsesSent() - } - } - - "retry if the server is unavailable during any request" in { - val responses = TestResponses( - apiResponses = Seq(failureUnavailable, correctApiResponse), - handshakeResponses = Seq(failureUnavailable, successfulHandshake), - synchronizerAndSeqIdResponses = Seq(failureUnavailable, correctSynchronizerIdResponse1), - staticParametersResponses = Seq(failureUnavailable, correctStaticParametersResponse), - ) - withConnection(responses) { (connection, listener) => - loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(INFO))( - { - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Validated) - connection.attributes shouldBe Some(correctConnectionAttributes) - }, - forExactly(4, _) { - _.infoMessage should include("Waiting for 1ms before retrying...") - }, - ) - - responses.assertAllResponsesSent() - } - } - - "validate the connection attributes after restart" in { - val responses = TestResponses( - apiResponses = Seq.fill(2)(correctApiResponse), - handshakeResponses = Seq.fill(2)(successfulHandshake), - synchronizerAndSeqIdResponses = - Seq(correctSynchronizerIdResponse1, correctSynchronizerIdResponse2), - staticParametersResponses = Seq.fill(2)(correctStaticParametersResponse), - ) - withConnection(responses) { (connection, listener) => - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Validated) - connection.attributes shouldBe Some(correctConnectionAttributes) - - listener.clear() - connection.fail("test") - listener.shouldStabilizeOn(SequencerConnectionXState.Stopped) - listener.clear() - - // A different identity triggers a warning and the connection never gets validated - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - connection.start().valueOrFail("start connection") - listener.shouldStabilizeOn(SequencerConnectionXState.Fatal) - // Synchronizer info does not change - connection.attributes shouldBe Some(correctConnectionAttributes) - }, - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include("Sequencer connection has changed attributes"), - "Different attributes after restart", - ) - ) - ), - ) - - responses.assertAllResponsesSent() - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionXTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionXTest.scala deleted file mode 100644 index 76ed1ddf0e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/GrpcSequencerConnectionXTest.scala +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.SequencerConnectionXState -import com.digitalasset.canton.sequencing.protocol.{AcknowledgeRequest, SignedContent} -import com.digitalasset.canton.topology.ParticipantId -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class GrpcSequencerConnectionXTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - - import ConnectionPoolTestHelpers.* - - "GrpcSequencerConnectionX" should { - "have authentication hooks" in { - val member = ParticipantId("test") - - val responses = TestResponses( - apiResponses = Seq(correctApiResponse), - handshakeResponses = Seq(successfulHandshake), - synchronizerAndSeqIdResponses = Seq(correctSynchronizerIdResponse1), - staticParametersResponses = Seq(correctStaticParametersResponse), - acknowledgeResponses = Seq(positiveAcknowledgeResponse), - ) - withConnection(responses) { case (internalConnection, listener) => - internalConnection.start().valueOrFail("start connection") - - listener.shouldStabilizeOn(SequencerConnectionXState.Validated) - internalConnection.attributes shouldBe Some(correctConnectionAttributes) - - val connection = - internalConnection - .buildUserConnection(authConfig, testMember, testCrypto, wallClock) - .valueOrFail("make authenticated") - - val acknowledgeRequest = AcknowledgeRequest(member, wallClock.now, testedProtocolVersion) - val signedAcknowledgeRequest = SignedContent( - acknowledgeRequest, - SymbolicCrypto.emptySignature, - None, - testedProtocolVersion, - ) - - // The test stub checks that the call has the appropriate metadata suggesting the authenticating hooks are - // properly set up. I haven't yet found a better way to test this because the hooks are actually exercised - // only within the real gRPC implementation :-( . - // At this point it seems a good-enough check, and not worth it to spend more time for better unit tests: - // integration tests will quickly find issues if there are any. - connection - .acknowledgeSigned(signedAcknowledgeRequest, timeouts.network.unwrap) - .valueOrFail("acknowledge") - .futureValueUS shouldBe true - - responses.assertAllResponsesSent() - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityCheckerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityCheckerTest.scala deleted file mode 100644 index 089ee1458e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityCheckerTest.scala +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.sequencing.SequencedEventMonotonicityChecker.MonotonicityFailureException -import com.digitalasset.canton.sequencing.client.SequencedEventTestFixture -import com.digitalasset.canton.sequencing.protocol.ClosedEnvelope -import com.digitalasset.canton.time.SynchronizerTimeTracker -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import com.digitalasset.canton.util.{ErrorUtil, ResourceUtil} -import com.digitalasset.canton.{ - BaseTest, - HasExecutionContext, - ProtocolVersionChecksFixtureAnyWordSpec, -} -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.scalatest.Outcome -import org.scalatest.wordspec.FixtureAnyWordSpec - -import java.util.concurrent.atomic.AtomicReference - -class SequencedEventMonotonicityCheckerTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext - with ProtocolVersionChecksFixtureAnyWordSpec { - import SequencedEventMonotonicityCheckerTest.* - - override protected type FixtureParam = SequencedEventTestFixture - - override protected def withFixture(test: OneArgTest): Outcome = - ResourceUtil.withResource( - new SequencedEventTestFixture( - loggerFactory, - testedProtocolVersion, - timeouts, - futureSupervisor, - ) - )(env => withFixture(test.toNoArgTest(env))) - - private def mkHandler(): CapturingApplicationHandler = new CapturingApplicationHandler - - "handler" should { - "pass through monotonically increasing events" in { env => - import env.* - - val checker = new SequencedEventMonotonicityChecker( - previousEventTimestamp = None, - loggerFactory, - ) - val handler = mkHandler() - val checkedHandler = checker.handler(handler) - val (batch1, batch2) = bobEvents.splitAt(2) - - checkedHandler(Traced(batch1)).futureValueUS.unwrap.futureValueUS - checkedHandler(Traced(batch2)).futureValueUS.unwrap.futureValueUS - handler.invocations.get.flatMap(_.value) shouldBe bobEvents - } - - "detect non-monotonic timestamps" in { env => - import env.* - - val event1 = createEvent( - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - counter = 2L, - ).futureValueUS - val event2 = createEvent( - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - counter = 3L, - ).futureValueUS - - val checker = new SequencedEventMonotonicityChecker( - previousEventTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - loggerFactory, - ) - val handler = mkHandler() - val checkedHandler = checker.handler(handler) - - checkedHandler(Traced(Seq(event1))).futureValueUS.unwrap.futureValueUS - loggerFactory.assertThrowsAndLogs[MonotonicityFailureException]( - checkedHandler(Traced(Seq(event2))).futureValueUS.unwrap.futureValueUS, - _.errorMessage should include(ErrorUtil.internalErrorMessage), - ) - } - } - - "flow" should { - "pass through monotonically increasing events" in { env => - import env.* - - val checker = new SequencedEventMonotonicityChecker( - previousEventTimestamp = None, - loggerFactory, - ) - val eventsF = Source(bobEvents) - .map(Right(_)) - .withUniqueKillSwitchMat()(Keep.left) - .via(checker.flow) - .toMat(Sink.seq)(Keep.right) - .run() - eventsF.futureValue.map(_.value) shouldBe bobEvents.map(Right(_)) - } - - "detect non-monotonic timestamps" in { env => - import env.* - - val event1 = createEvent( - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - counter = 2L, - ).futureValueUS - val event2 = createEvent( - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - counter = 3L, - ).futureValueUS - - val checker = new SequencedEventMonotonicityChecker( - previousEventTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - loggerFactory, - ) - val eventsF = loggerFactory.assertLogs( - Source(Seq(event1, event2)) - .map(Right(_)) - .withUniqueKillSwitchMat()(Keep.left) - .via(checker.flow) - .toMat(Sink.seq)(Keep.right) - .run(), - _.errorMessage should include( - "Timestamps do not increase monotonically or previous event timestamp does not match." - ), - ) - eventsF.futureValue.map(_.value) shouldBe Seq(Right(event1)) - } - } -} - -object SequencedEventMonotonicityCheckerTest { - class CapturingApplicationHandler() - extends ApplicationHandler[SequencedEnvelopeBox, ClosedEnvelope] { - val invocations = - new AtomicReference[Seq[BoxedEnvelope[SequencedEnvelopeBox, ClosedEnvelope]]](Seq.empty) - - override def name: String = "capturing-application-handler" - override def subscriptionStartsAt( - start: SubscriptionStart, - synchronizerTimeTracker: SynchronizerTimeTracker, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = FutureUnlessShutdown.unit - - override def apply( - boxed: BoxedEnvelope[SequencedEnvelopeBox, ClosedEnvelope] - ): HandlerResult = { - invocations - .getAndUpdate(_ :+ boxed) - .discard[Seq[BoxedEnvelope[SequencedEnvelopeBox, ClosedEnvelope]]] - HandlerResult.done - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala deleted file mode 100644 index 33fd1960a4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import cats.data.EitherT -import cats.implicits.catsSyntaxOptionId -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{Fingerprint, Signature} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasRunOnClosing, OnShutdownRunner} -import com.digitalasset.canton.logging.TracedLogger -import com.digitalasset.canton.sequencing.SequencerAggregatorPekko.HasSequencerSubscriptionFactoryPekko -import com.digitalasset.canton.sequencing.SequencerAggregatorPekkoTest.Config -import com.digitalasset.canton.sequencing.client.* -import com.digitalasset.canton.sequencing.client.TestSequencerSubscriptionFactoryPekko.{ - Error, - Event, - Failure, -} -import com.digitalasset.canton.sequencing.client.TestSubscriptionError.UnretryableError -import com.digitalasset.canton.topology.{DefaultTestIdentities, SequencerId} -import com.digitalasset.canton.util.OrderedBucketMergeHub.{ - ActiveSourceTerminated, - DeadlockDetected, - DeadlockTrigger, - NewConfiguration, -} -import com.digitalasset.canton.util.{EitherTUtil, OrderedBucketMergeConfig, ResourceUtil} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{ - BaseTest, - HasExecutionContext, - ProtocolVersionChecksFixtureAnyWordSpec, -} -import com.google.protobuf.ByteString -import org.apache.pekko.stream.scaladsl.{Keep, Source} -import org.apache.pekko.stream.testkit.scaladsl.TestSink -import org.apache.pekko.stream.{KillSwitches, QueueOfferResult} -import org.scalatest.Outcome -import org.scalatest.wordspec.FixtureAnyWordSpec - -import scala.concurrent.duration.DurationInt - -class SequencerAggregatorPekkoTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext - with ProtocolVersionChecksFixtureAnyWordSpec { - - override protected type FixtureParam = SequencedEventTestFixture - - override protected def withFixture(test: OneArgTest): Outcome = - ResourceUtil.withResource( - new SequencedEventTestFixture( - loggerFactory, - testedProtocolVersion, - timeouts, - futureSupervisor, - ) - )(env => withFixture(test.toNoArgTest(env))) - - private val synchronizerId = DefaultTestIdentities.physicalSynchronizerId - - private def mkAggregatorPekko( - validator: SequencedEventValidator = SequencedEventValidator.noValidation( - DefaultTestIdentities.physicalSynchronizerId, - warn = false, - ) - )(implicit fixture: FixtureParam): SequencerAggregatorPekko = - new SequencerAggregatorPekko( - synchronizerId, - _ => validator, - PositiveInt.one, - fixture.subscriberCryptoApi.pureCrypto, - loggerFactory, - enableInvariantCheck = true, - ) - - private def fakeSignatureFor(name: String): Signature = - SymbolicCrypto.signature( - ByteString.EMPTY, - Fingerprint.tryFromString(name), - ) - - // Sort the signatures by the fingerprint of the key to get a deterministic ordering - private def normalize(event: SequencedSerializedEvent): SequencedSerializedEvent = - event.copy(signedEvent = - event.signedEvent.copy(signatures = - event.signedEvent.signatures.sortBy(_.authorizingLongTermKey.toProtoPrimitive) - ) - )(event.traceContext) - - private def mkEvents(startingTimestampO: Option[CantonTimestamp], amount: Long): Seq[Event] = { - val startTimestamp = startingTimestampO.getOrElse(CantonTimestamp.Epoch) - (0L until amount).map(i => Event(startTimestamp.addMicros(i))) - } - - private class TestAtomicHealthComponent(override val name: String) extends AtomicHealthComponent { - override protected def initialHealthState: ComponentHealthState = - ComponentHealthState.NotInitializedState - override protected def associatedHasRunOnClosing: HasRunOnClosing = - new OnShutdownRunner.PureOnShutdownRunner(logger) - override protected def logger: TracedLogger = SequencerAggregatorPekkoTest.this.logger - } - - "aggregator" should { - "pass through events from a single sequencer subscription" in { implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory) - factory.add(mkEvents(startingTimestampO = None, 3)*) - - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("")(factory)), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, _health)), sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(5) - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch).asOrdinarySerializedEvent - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(1L)).asOrdinarySerializedEvent - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(2L)).asOrdinarySerializedEvent - sink.expectNoMessage() - killSwitch.shutdown() - sink.expectComplete() - doneF.futureValue - } - - "log the error when a subscription signals an error" in { implicit fixture => - import fixture.* - val aggregator = mkAggregatorPekko() - val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory) - factory.add(Error(UnretryableError)) - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("")(factory)), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, _health)), sink) = loggerFactory.assertLogs( - { - val (handle, sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(5) - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - sink.expectNext() shouldBe Left(ActiveSourceTerminated(sequencerAlice, None)) - sink.expectNext() shouldBe Left( - DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - ) - - (handle, sink) - }, - _.warningMessage should include( - s"Sequencer subscription for $sequencerAlice failed with $UnretryableError" - ), - _.errorMessage should include( - s"Sequencer subscription for synchronizer $synchronizerId is now stuck. Needs operator intervention to reconfigure the sequencer connections." - ), - ) - killSwitch.shutdown() - sink.expectComplete() - doneF.futureValue - } - - "propagate the exception from a subscription" in { implicit fixture => - import fixture.* - val aggregator = mkAggregatorPekko() - val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory) - val ex = new Exception("Alice subscription failure") - factory.add(Failure(ex)) - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("")(factory)), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, _health)), sink) = loggerFactory.assertLogs( - { - val (handle, sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(5) - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - sink.expectNext() shouldBe Left(ActiveSourceTerminated(sequencerAlice, Some(ex))) - sink.expectNext() shouldBe Left( - DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - ) - - (handle, sink) - }, - _.errorMessage should include(s"Sequencer subscription for $sequencerAlice failed"), - _.errorMessage should include( - s"Sequencer subscription for synchronizer $synchronizerId is now stuck. Needs operator intervention to reconfigure the sequencer connections." - ), - ) - killSwitch.shutdown() - sink.expectComplete() - doneF.futureValue - } - - "support reconfiguration for single sequencers" in { implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - val ((source, (doneF, _health)), sink) = Source - .queue[OrderedBucketMergeConfig[SequencerId, Config]](1) - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory) - factory.add(mkEvents(startingTimestampO = None, 3)*) - factory.add(mkEvents(startingTimestampO = Some(CantonTimestamp.Epoch.addMicros(2L)), 3)*) - val config1 = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("V1")(factory)), - ) - val config2 = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("V2")(factory)), - ) - source.offer(config1) shouldBe QueueOfferResult.Enqueued - sink.request(10) - sink.expectNext() shouldBe Left(NewConfiguration(config1, None)) - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch).asOrdinarySerializedEvent - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(1L)).asOrdinarySerializedEvent - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(2)).asOrdinarySerializedEvent - sink.expectNoMessage() - source.offer(config2) shouldBe QueueOfferResult.Enqueued - sink.expectNext() shouldBe Left( - NewConfiguration(config2, CantonTimestamp.Epoch.addMicros(2L).some) - ) - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(3)).asOrdinarySerializedEvent - sink.expectNext().value shouldBe - Event(CantonTimestamp.Epoch.addMicros(4)).asOrdinarySerializedEvent - sink.expectNoMessage() - source.complete() - sink.expectComplete() - doneF.futureValue - } - - "pass through the event only if sufficiently many sequencer IDs send it" in { - implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - - val factoryAlice = TestSequencerSubscriptionFactoryPekko(loggerFactory) - val factoryBob = TestSequencerSubscriptionFactoryPekko(loggerFactory) - val factoryCarlos = TestSequencerSubscriptionFactoryPekko(loggerFactory) - - val signatureAlice = fakeSignatureFor("Alice") - val signatureBob = fakeSignatureFor("Bob") - val signatureCarlos = fakeSignatureFor("Carlos") - - val events = mkEvents(startingTimestampO = None, 3) - factoryAlice.add(events.take(1).map(_.copy(signatures = NonEmpty(Set, signatureAlice)))*) - factoryBob.add(events.slice(1, 2).map(_.copy(signatures = NonEmpty(Set, signatureBob)))*) - factoryCarlos.add(events.take(3).map(_.copy(signatures = NonEmpty(Set, signatureCarlos)))*) - - val config = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("Alice")(factoryAlice), - sequencerBob -> Config("Bob")(factoryBob), - sequencerCarlos -> Config("Carlos")(factoryCarlos), - ), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, _health)), sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(4) - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - normalize(sink.expectNext().value) shouldBe normalize( - Event( - timestamp = CantonTimestamp.Epoch, - NonEmpty(Set, signatureAlice, signatureCarlos), - ).asOrdinarySerializedEvent - ) - normalize(sink.expectNext().value) shouldBe normalize( - Event( - timestamp = CantonTimestamp.Epoch.addMicros(1L), - NonEmpty(Set, signatureBob, signatureCarlos), - ).asOrdinarySerializedEvent - ) - sink.expectNoMessage() - killSwitch.shutdown() - sink.expectComplete() - doneF.futureValue - } - - "support reconfiguring the threshold and sequencers" in { implicit fixture => - import fixture.* - - val validator = new SequencedEventValidatorImpl( - DefaultTestIdentities.physicalSynchronizerId, - testedProtocolVersion, - subscriberCryptoApi, - loggerFactory, - timeouts, - ) { - override protected def verifySignature( - priorEventO: Option[ProcessingSerializedEvent], - event: SequencedSerializedEvent, - sequencerId: SequencerId, - protocolVersion: ProtocolVersion, - ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = - EitherTUtil.unitUS - } - - val initialTimestamp = CantonTimestamp.Epoch.addMicros(10L) - val aggregator = mkAggregatorPekko(validator) - val ((source, (doneF, health_)), sink) = Source - .queue[OrderedBucketMergeConfig[SequencerId, Config]](1) - .viaMat( - aggregator.aggregateFlow(Right(Event(initialTimestamp).asOrdinarySerializedEvent)) - )(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - val factoryAlice = TestSequencerSubscriptionFactoryPekko(loggerFactory) - val factoryBob = TestSequencerSubscriptionFactoryPekko(loggerFactory) - val factoryCarlos = TestSequencerSubscriptionFactoryPekko(loggerFactory) - - val config1 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("Alice")(factoryAlice), - sequencerBob -> Config("BobV1")(factoryBob), - ), - ) - // Keep Alice, reconfigure Bob, add Carlos - val config2 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("Alice")(factoryAlice), - sequencerBob -> Config("BobV2")(factoryBob), - sequencerCarlos -> Config("Carlos")(factoryCarlos), - ), - ) - - val signatureAlice = fakeSignatureFor("Alice") - val signatureBob = fakeSignatureFor("Bob") - val signatureCarlos = fakeSignatureFor("Carlos") - - val events = mkEvents(Some(initialTimestamp), 4) - val events1 = events.take(2) - // alice reports events 10,11,12,13 - factoryAlice.add(events.map(_.copy(signatures = NonEmpty(Set, signatureAlice)))*) - // bob reports events 10,11 - factoryBob.add(events1.map(_.copy(signatures = NonEmpty(Set, signatureBob)))*) - - // events - val events2 = events.drop(1) - // bob reports events 12,13 - factoryBob.add(events2.drop(1).map(_.copy(signatures = NonEmpty(Set, signatureBob)))*) - // carlos reports events 11,12 - factoryCarlos.add( - events2.take(2).map(_.copy(signatures = NonEmpty(Set, signatureCarlos)))* - ) - - source.offer(config1) shouldBe QueueOfferResult.Enqueued - - sink.request(10) - sink.expectNext() shouldBe Left(NewConfiguration(config1, initialTimestamp.some)) - normalize(sink.expectNext().value) shouldBe normalize( - Event( - initialTimestamp.addMicros(1L), - NonEmpty(Set, signatureAlice, signatureBob), - ).asOrdinarySerializedEvent - ) - sink.expectNoMessage() - loggerFactory.assertLogs( - { - source.offer(config2) shouldBe QueueOfferResult.Enqueued - sink.expectNext() shouldBe Left( - NewConfiguration(config2, initialTimestamp.addMicros(1L).some) - ) - val outputs = - Set(sink.expectNext(), sink.expectNext()).map(_.map(normalize)) - val expected = Set( - Left(ActiveSourceTerminated(sequencerBob, None)), - Right( - Event(initialTimestamp.addMicros(2L), NonEmpty(Set, signatureAlice, signatureCarlos)) - ), - ).map(_.map(event => normalize(event.asOrdinarySerializedEvent))) - outputs shouldBe expected - }, - _.errorMessage should include(ResilientSequencerSubscription.ForkHappened.id), - _.warningMessage should include(s"Sequencer subscription for $sequencerBob failed with"), - ) - source.complete() - sink.expectComplete() - doneF.futureValue - } - - "forward health signal for a single sequencer" in { implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - val health = new TestAtomicHealthComponent("forward-health-signal-test") - val factory = new TestSequencerSubscriptionFactoryPekko(health, loggerFactory) - - factory.add((0L to 2L).map(offset => Event(CantonTimestamp.Epoch.addMicros(offset)))*) - factory.add(Error(UnretryableError)) - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, sequencerAlice -> Config("")(factory)), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, reportedHealth)), sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - reportedHealth.getState shouldBe ComponentHealthState.NotInitializedState - - sink.request(10) - health.resolveUnhealthy() - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - sink.expectNext() shouldBe Right(Event(CantonTimestamp.Epoch).asOrdinarySerializedEvent) - - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - health.degradationOccurred("some degradation") - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.degraded("some degradation") - } - - health.failureOccurred("some failure") - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.failed("some failure") - } - - sink.expectNext() shouldBe Right( - Event(CantonTimestamp.Epoch.addMicros(1L)).asOrdinarySerializedEvent - ) - - health.resolveUnhealthy() - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - killSwitch.shutdown() - doneF.futureValue - } - - "aggregate health signal for multiple sequencers" in { implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - val healthAlice = new TestAtomicHealthComponent("health-signal-alice") - val healthBob = new TestAtomicHealthComponent("health-signal-bob") - val healthCarlos = new TestAtomicHealthComponent("health-signal-carlos") - - val factoryAlice = new TestSequencerSubscriptionFactoryPekko(healthAlice, loggerFactory) - val factoryBob = new TestSequencerSubscriptionFactoryPekko(healthBob, loggerFactory) - val factoryCarlos = new TestSequencerSubscriptionFactoryPekko(healthCarlos, loggerFactory) - - factoryAlice.add((0L to 2L).map(offset => Event(CantonTimestamp.Epoch.addMicros(offset)))*) - factoryBob.add((0L to 2L).map(offset => Event(CantonTimestamp.Epoch.addMicros(offset)))*) - factoryCarlos.add((0L to 2L).map(offset => Event(CantonTimestamp.Epoch.addMicros(offset)))*) - - val config = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("")(factoryAlice), - sequencerBob -> Config("")(factoryBob), - sequencerCarlos -> Config("")(factoryCarlos), - ), - ) - val configSource = - Source.single(config).concat(Source.never).viaMat(KillSwitches.single)(Keep.right) - - val ((killSwitch, (doneF, reportedHealth)), sink) = configSource - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - - reportedHealth.getState shouldBe ComponentHealthState.NotInitializedState - - sink.request(10) - Seq(healthAlice, healthBob, healthCarlos).foreach(_.resolveUnhealthy()) - - sink.expectNext() shouldBe Left(NewConfiguration(config, None)) - - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - healthAlice.failureOccurred("Alice failed") - // We still have threshold many sequencers that are healthy, so the subscription is healthy overall - always(durationOfSuccess = 100.milliseconds) { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - healthBob.degradationOccurred("Bob degraded") - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.degraded( - s"Failed sequencer subscriptions for [$sequencerAlice]. Degraded sequencer subscriptions for [$sequencerBob]." - ) - } - - healthAlice.resolveUnhealthy() - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - healthAlice.degradationOccurred("Alice degraded") - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.degraded( - s"Degraded sequencer subscriptions for [$sequencerBob, $sequencerAlice]." - ) - } - - healthBob.failureOccurred("Bob failed") - healthCarlos.failureOccurred("Carlos failed") - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.failed( - s"Failed sequencer subscriptions for [$sequencerBob, $sequencerCarlos]. Degraded sequencer subscriptions for [$sequencerAlice]." - ) - } - - healthAlice.resolveUnhealthy() - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.failed( - s"Failed sequencer subscriptions for [$sequencerBob, $sequencerCarlos]." - ) - } - - killSwitch.shutdown() - doneF.futureValue - - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.failed( - s"Disconnected from synchronizer $synchronizerId" - ) - } - } - - "become unhealthy upon deadlock notifications" in { implicit fixture => - import fixture.* - - val aggregator = mkAggregatorPekko() - - val factoryAlice1 = - TestSequencerSubscriptionFactoryPekko(loggerFactory.append("factory", "alice-1")) - val factoryBob1 = - TestSequencerSubscriptionFactoryPekko(loggerFactory.append("factory", "bob-1")) - val factoryCarlos = - TestSequencerSubscriptionFactoryPekko(loggerFactory.append("factory", "carlos")) - - val signatureAlice = fakeSignatureFor("Alice") - val signatureBob = fakeSignatureFor("Bob") - val signatureCarlos = fakeSignatureFor("Carlos") - - factoryAlice1.add( - mkEvents(startingTimestampO = None, 1) - .map(_.copy(signatures = NonEmpty(Set, signatureAlice)))* - ) - factoryBob1.add( - mkEvents(startingTimestampO = None, 1) - .map(_.copy(signatures = NonEmpty(Set, signatureBob)))* - ) - factoryCarlos.add( - mkEvents(startingTimestampO = Some(CantonTimestamp.Epoch.addMicros(3L)), 1) - .map(_.copy(signatures = NonEmpty(Set, signatureCarlos)))* - ) - - val config1 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("1")(factoryAlice1), - sequencerBob -> Config("1")(factoryBob1), - sequencerCarlos -> Config("1")(factoryCarlos), - ), - ) - - val ((configSource, (doneF, reportedHealth)), sink) = Source - .queue[OrderedBucketMergeConfig[SequencerId, Config]](1) - .viaMat(aggregator.aggregateFlow(Left(None)))(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - configSource.offer(config1) shouldBe QueueOfferResult.Enqueued - - reportedHealth.getState shouldBe ComponentHealthState.NotInitializedState - - sink.request(10) - sink.expectNext() shouldBe Left(NewConfiguration(config1, None)) - normalize(sink.expectNext().value) shouldBe - normalize( - Event( - CantonTimestamp.Epoch, - NonEmpty(Set, signatureAlice, signatureBob), - ).asOrdinarySerializedEvent - ) - - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - - // Now reconfigure Alice and Bob to get into deadlock: each source provides a different next sequencer counter - clue("Create a deadlock") { - val factoryAlice2 = - TestSequencerSubscriptionFactoryPekko(loggerFactory.append("factory", "alice-2")) - val factoryBob2 = - TestSequencerSubscriptionFactoryPekko(loggerFactory.append("factory", "bob-2")) - - factoryAlice2.add( - mkEvents(startingTimestampO = Some(CantonTimestamp.Epoch.addMicros(1L)), 1)* - ) - factoryBob2.add( - mkEvents(startingTimestampO = Some(CantonTimestamp.Epoch.addMicros(2L)), 1)* - ) - val config2 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - sequencerAlice -> Config("2")(factoryAlice2), - sequencerBob -> Config("2")(factoryBob2), - sequencerCarlos -> Config("1")(factoryCarlos), - ), - ) - loggerFactory.assertLogs( - { - configSource.offer(config2) - sink.expectNext() shouldBe Left(NewConfiguration(config2, CantonTimestamp.Epoch.some)) - inside(sink.expectNext()) { - case Left(DeadlockDetected(elems, DeadlockTrigger.ElementBucketing)) => - elems should have size 3 - } - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.failed( - s"Sequencer subscriptions have diverged and cannot reach the threshold 2 for synchronizer $synchronizerId any more." - ) - } - }, - _.errorMessage should include( - s"Sequencer subscriptions have diverged and cannot reach the threshold for synchronizer $synchronizerId any more." - ), - ) - } - - clue("Resolve deadlock through reconfiguration") { - val config3 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(1), - NonEmpty( - Map, - sequencerCarlos -> Config("1")(factoryCarlos), - ), - ) - configSource.offer(config3) shouldBe QueueOfferResult.Enqueued - sink.expectNext() shouldBe Left(NewConfiguration(config3, CantonTimestamp.Epoch.some)) - sink.expectNext().value shouldBe - Event( - CantonTimestamp.Epoch.addMicros(3L), - NonEmpty(Set, signatureCarlos), - ).asOrdinarySerializedEvent - - eventually() { - reportedHealth.getState shouldBe ComponentHealthState.Ok() - } - } - - configSource.complete() - sink.expectComplete() - doneF.futureValue - - } - } -} - -object SequencerAggregatorPekkoTest { - final case class Config(id: String)( - override val subscriptionFactory: SequencerSubscriptionFactoryPekko[TestSubscriptionError] - ) extends HasSequencerSubscriptionFactoryPekko[TestSubscriptionError] -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionTest.scala deleted file mode 100644 index 2b69cb17c7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionTest.scala +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.Port -import com.digitalasset.canton.networking.Endpoint -import com.digitalasset.canton.topology.DefaultTestIdentities -import com.digitalasset.canton.{BaseTest, SequencerAlias} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class SequencerConnectionTest extends AnyWordSpec with BaseTest { - import SequencerConnectionTest.* - - "SequencerConnection.merge" should { - "merge grpc connection endpoints" in { - SequencerConnection.merge(Seq(grpc1, grpc2)) shouldBe Right(grpcMerged) - } - "fail with empty connections" in { - SequencerConnection.merge(Seq.empty) shouldBe Left( - "There must be at least one sequencer connection defined" - ) - } - "grpc connection alone remains unchanged" in { - SequencerConnection.merge(Seq(grpc1)) shouldBe Right(grpc1) - } - } -} - -object SequencerConnectionTest { - def endpoint(n: Int) = Endpoint(s"host$n", Port.tryCreate(100 * n)) - - val grpc1 = GrpcSequencerConnection( - NonEmpty(Seq, endpoint(1), endpoint(2)), - transportSecurity = false, - Some(ByteString.copyFromUtf8("certificates")), - SequencerAlias.Default, - None, - ) - val grpc2 = GrpcSequencerConnection( - NonEmpty(Seq, endpoint(3), endpoint(4)), - transportSecurity = false, - None, - SequencerAlias.Default, - Some(DefaultTestIdentities.sequencerId), - ) - val grpcMerged = GrpcSequencerConnection( - NonEmpty(Seq, endpoint(1), endpoint(2), endpoint(3), endpoint(4)), - transportSecurity = false, - Some(ByteString.copyFromUtf8("certificates")), - SequencerAlias.Default, - Some(DefaultTestIdentities.sequencerId), - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolConfigTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolConfigTest.scala deleted file mode 100644 index c7ecc892ec..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolConfigTest.scala +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class SequencerConnectionXPoolConfigTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - - "SequencerConnectionXPoolConfig" when { - "validating" should { - "check the trust threshold" in { - val config = mkPoolConfig( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(5), - ) - - inside(config.validate) { - case Left(SequencerConnectionXPoolError.InvalidConfigurationError(message)) => - message shouldBe "Trust threshold (5) must not exceed the number of connections (3)" - - } - } - - "check uniqueness of connection names" in { - val config = mkPoolConfig( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(3), - ) - val invalidConfig = config.copy(connections = - config.connections :+ mkDummyConnectionConfig(0, endpointIndexO = Some(100)) - ) - - inside(invalidConfig.validate) { - case Left(SequencerConnectionXPoolError.InvalidConfigurationError(message)) => - message shouldBe """Connection name "test-0" is used for more than one connection""" - } - } - - "check uniqueness of endpoints" in { - val config = mkPoolConfig( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(3), - ) - val invalidConfig = config.copy(connections = - config.connections :+ mkDummyConnectionConfig(3, endpointIndexO = Some(1)) - ) - - inside(invalidConfig.validate) { - case Left(SequencerConnectionXPoolError.InvalidConfigurationError(message)) => - message shouldBe """Connection endpoint "does-not-exist-1:0" is used for more than one connection""" - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala deleted file mode 100644 index 44b441c1c0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImplTest.scala +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.health.ComponentHealthState -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError -import com.digitalasset.canton.topology.SequencerId -import com.digitalasset.canton.util.LoggerUtil -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, config} -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level.WARN - -import scala.concurrent.duration.* - -class SequencerConnectionXPoolImplTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - - import ConnectionPoolTestHelpers.* - - "SequencerConnectionXPool" should { - "initialize in the happy path" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(3), - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - ) { (pool, createdConnections, listener) => - val initializedF = pool.start() - - clue("Normal start") { - initializedF.futureValueUS.valueOrFail("initialization") - listener.shouldStabilizeOn(ComponentHealthState.Ok()) - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(3) - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(1)) - } - - clue("Stop connections non-fatally") { - (0 to 1).foreach(createdConnections(_).fail(reason = "test")) - // Connections are restarted - listener.shouldStabilizeOn(ComponentHealthState.Ok()) - } - - clue("Stop connections fatally") { - (0 to 1).foreach(createdConnections(_).fatal(reason = "test")) - // Connections are not restarted - listener.shouldStabilizeOn( - ComponentHealthState.degraded( - "only 1 connection(s) to different sequencers available, trust threshold = 3" - ) - ) - pool.nbSequencers shouldBe NonNegativeInt.one - } - - createdConnections(2).fatal(reason = "test") - listener.shouldStabilizeOn(ComponentHealthState.failed("no connection available")) - } - } - - "signal when initialization times out" in { - val testTimeout = config.NonNegativeDuration.tryFromDuration(2.seconds) - - // TODO(i24790): update this test as it will no longer timeout when the check is implemented - - withConnectionPool( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(2), - // 3 connections all on the same sequencer ID -- the trust threshold of 2 is unreachable - _ => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = 1), - testTimeouts = timeouts.copy(sequencerInfo = testTimeout), - ) { case (pool, _createdConnections, listener) => - inside(pool.start().futureValueUS) { - case Left(SequencerConnectionXPoolError.TimeoutError(message)) => - message shouldBe s"Connection pool failed to initialize within ${LoggerUtil - .roundDurationForHumans(testTimeout.duration)}" - } - listener.shouldStabilizeOn(ComponentHealthState.failed("Component is closed")) - } - } - - "take into account an expected physical synchronizer ID" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(10), - trustThreshold = PositiveInt.tryCreate(2), - { - case index if index < 8 => - mkConnectionAttributes(synchronizerIndex = 2, sequencerIndex = index) - case index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index) - - }, - expectedSynchronizerIdO = Some(testSynchronizerId(1)), - ) { case (pool, _createdConnections, _listener) => - loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(WARN))( - { - pool.start().futureValueUS.valueOrFail("initialization") - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(2) - }, - logEntries => { - // All 8 connections on the wrong synchronizer should be rejected either before or after - // the threshold is reached. - // There can be more than 1 warning for a given connection if it is poked more than once when validated. - - forAll(logEntries) { entry => - forExactly( - 1, - Seq( - // Connections validated before the threshold is reached - badSynchronizerAssertion(goodSynchronizerId = 1, badSynchronizerId = 2), - // Connections validated after the threshold is reached - badBootstrapAssertion(goodSynchronizerId = 1, badSynchronizerId = 2), - ), - )(assertion => assertion(entry)) - } - - // The 8 connections must be represented - val rx = raw"(?s).* internal-sequencer-connection-test-(\d+) .*".r - logEntries.collect { - _.warningMessage match { case rx(number) => number } - }.distinct should have size 8 - }, - ) - - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(1)) - - // Changing the expected synchronizer is not supported - inside( - pool - .updateConfig(pool.config.copy(expectedPSIdO = Some(testSynchronizerId(2)))) - ) { case Left(SequencerConnectionXPoolError.InvalidConfigurationError(error)) => - error shouldBe "The expected physical synchronizer ID can only be changed during a node restart." - } - } - } - - "handle configuration changes: add and remove connections" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(5), - trustThreshold = PositiveInt.tryCreate(5), - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - ) { case (pool, createdConnections, _listener) => - pool.start().futureValueUS.valueOrFail("initialization") - - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(5) - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(1)) - - val initialConnections = createdConnections.snapshotAndClear() - - val newConnectionConfigs = (11 to 13).map(mkDummyConnectionConfig(_)) - pool - .updateConfig( - pool.config.copy(connections = - NonEmpty.from(pool.config.connections.drop(2) ++ newConnectionConfigs).value - ) - ) - .valueOrFail("update config") - createdConnections should have size 3 - - eventually() { - contentsShouldEqual( - pool.contents, - Map( - testSequencerId(2) -> Set(initialConnections(2)), - testSequencerId(3) -> Set(initialConnections(3)), - testSequencerId(4) -> Set(initialConnections(4)), - testSequencerId(11) -> Set(createdConnections(11)), - testSequencerId(12) -> Set(createdConnections(12)), - testSequencerId(13) -> Set(createdConnections(13)), - ), - ) - } - } - } - - "handle configuration changes: change the endpoint of a connection" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(3), - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - ) { case (pool, createdConnections, _listener) => - pool.start().futureValueUS.valueOrFail("initialization") - - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(3) - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(1)) - - val initialConnections = createdConnections.snapshotAndClear() - - val newConnectionConfig = mkDummyConnectionConfig(0, endpointIndexO = Some(4)) - pool - .updateConfig( - pool.config.copy(connections = - NonEmpty - .from(pool.config.connections.drop(1) :+ newConnectionConfig) - .value - ) - ) - .valueOrFail("update config") - createdConnections should have size 1 - createdConnections(0) should not be initialConnections(0) - createdConnections(0).config shouldBe newConnectionConfig - - eventually() { - contentsShouldEqual( - pool.contents, - Map( - testSequencerId(0) -> Set(createdConnections(0)), - testSequencerId(1) -> Set(initialConnections(1)), - testSequencerId(2) -> Set(initialConnections(2)), - ), - ) - } - } - } - - "handle configuration changes: change the trust threshold" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(8), - trustThreshold = PositiveInt.tryCreate(4), - // 6 connections on a synchronizer, spread among 3 sequencers - // 2 connections on a different synchronizer, spread among 2 sequencers - { - case index if index < 6 => - mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index / 2) - case index => mkConnectionAttributes(synchronizerIndex = 2, sequencerIndex = index) - }, - ) { case (pool, _createdConnections, _listener) => - val initializedF = pool.start() - - clue("Unreachable threshold") { - // A threshold of 4 cannot be reached - always() { - pool.nbSequencers shouldBe NonNegativeInt.zero - } - } - - clue("Increased threshold still unreachable") { - // Increasing the threshold does not help - pool - .updateConfig(pool.config.copy(trustThreshold = PositiveInt.tryCreate(5))) - .valueOrFail("update config") - pool.config.trustThreshold shouldBe PositiveInt.tryCreate(5) - - always() { - pool.nbSequencers shouldBe NonNegativeInt.zero - } - } - - clue("Reduced threshold ambiguous") { - inside( - pool - .updateConfig(pool.config.copy(trustThreshold = PositiveInt.tryCreate(1))) - ) { case Left(SequencerConnectionXPoolError.InvalidConfigurationError(error)) => - error should include("Please configure a higher trust threshold") - } - } - - clue("Reachable threshold") { - // A threshold of 3 can be reached with a single group, and the other connections generate warnings - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - pool - .updateConfig(pool.config.copy(trustThreshold = PositiveInt.tryCreate(3))) - .valueOrFail("update config") - pool.config.trustThreshold shouldBe PositiveInt.tryCreate(3) - - initializedF.futureValueUS.valueOrFail("initialization") - - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(3) - pool.nbConnections shouldBe NonNegativeInt.tryCreate(6) - }, - LogEntry.assertLogSeq( - Seq( - ( - badBootstrapAssertion(goodSynchronizerId = 1, badSynchronizerId = 2), - "bad bootstrap", - ) - ) - ), - ) - } - } - } - - "handle configuration changes: removed connections are ignored when checking new threshold" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(8), - trustThreshold = PositiveInt.tryCreate(6), - // 5 connections on synchronizer 1 - // 3 connections on synchronizer 2 - { - case index if index < 5 => - mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index) - case index => mkConnectionAttributes(synchronizerIndex = 2, sequencerIndex = index) - }, - ) { case (pool, _createdConnections, _listener) => - val initializedF = pool.start() - - // A threshold of 6 cannot be reached - always() { - pool.nbSequencers shouldBe NonNegativeInt.zero - } - - // Remove 2 connections on sequencer 1, add 2 connections on sequencer 2, lower the threshold to 5 - // -> 3 connections on synchronizer 1 - // 5 connections on synchronizer 2 - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val newConnectionConfigs = (8 to 9).map(mkDummyConnectionConfig(_)) - pool.updateConfig( - pool.config.copy( - trustThreshold = PositiveInt.tryCreate(5), - connections = NonEmpty - .from(pool.config.connections.drop(2) ++ newConnectionConfigs) - .value, - ) - ) - - initializedF.futureValueUS.valueOrFail("initialization") - - // Threshold should be reached on synchronizer 2 - pool.physicalSynchronizerId.value shouldBe testSynchronizerId(2) - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(5) - }, - LogEntry.assertLogSeq( - Seq( - ( - badBootstrapAssertion(goodSynchronizerId = 2, badSynchronizerId = 1), - "bad bootstrap", - ) - ) - ), - ) - } - } - - "provide connections as requested" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(6), - trustThreshold = PositiveInt.tryCreate(3), - { - case 0 | 1 | 2 => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = 1) - case 3 | 4 => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = 2) - case 5 => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = 3) - }, - ) { case (pool, createdConnections, _listener) => - pool.start().futureValueUS.valueOrFail("initialization") - - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(3) - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(1)) - eventually() { - pool.nbConnections shouldBe NonNegativeInt.tryCreate(6) - } - - val createdConfigs = (0 to 5).map(createdConnections.apply).map(_.config) - - clue("one connection per sequencer") { - val received = pool.getConnections(PositiveInt.three, exclusions = Set.empty) - - received.map(_.attributes.sequencerId) shouldBe Set( - testSequencerId(1), - testSequencerId(2), - testSequencerId(3), - ) - - val receivedConfigs = received.map(_.config) - - receivedConfigs.intersect(createdConfigs.slice(0, 3).toSet) should have size 1 - receivedConfigs.intersect(createdConfigs.slice(3, 5).toSet) should have size 1 - receivedConfigs.intersect(createdConfigs.slice(5, 6).toSet) should have size 1 - } - - clue("round robin") { - val exclusions = Set(testSequencerId(2), testSequencerId(3)) - val received1 = pool.getConnections(PositiveInt.one, exclusions) - val received2 = pool.getConnections(PositiveInt.one, exclusions) - val received3 = pool.getConnections(PositiveInt.one, exclusions) - - Set(received1, received2, received3).map(_.loneElement.config) shouldBe - Set(createdConfigs(0), createdConfigs(1), createdConfigs(2)) - - pool.getConnections(PositiveInt.one, exclusions) shouldBe received1 - } - - clue("request too many") { - val received = pool.getConnections(PositiveInt.tryCreate(5), exclusions = Set.empty) - received should have size 3 - } - - clue("stop and start") { - val exclusions = Set(testSequencerId(1), testSequencerId(3)) - - pool.getConnections(PositiveInt.three, exclusions) should have size 1 - - val connectionsOnSeq2 = Set(createdConnections(3), createdConnections(4)) - connectionsOnSeq2.foreach(_.fail(reason = "test")) - - eventually() { - // Both connections have been restarted and can be obtained - val received = - connectionsOnSeq2.map(_ => pool.getConnections(PositiveInt.three, exclusions)) - forAll(received)(_ should have size 1) - received.flatten.map(_.config) shouldBe connectionsOnSeq2.map(_.config) - } - - connectionsOnSeq2.foreach(_.fatal(reason = "test")) - eventuallyForever() { - // Connections don't get restarted - pool.getConnections(PositiveInt.three, exclusions) should have size 0 - } - } - } - } - - "initialize when there is a consensus on bootstrap info" in { - withConnectionPool( - nbConnections = PositiveInt.tryCreate(3), - trustThreshold = PositiveInt.tryCreate(2), - { - case 0 => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = 1) - case 1 => mkConnectionAttributes(synchronizerIndex = 2, sequencerIndex = 1) - case 2 => mkConnectionAttributes(synchronizerIndex = 2, sequencerIndex = 2) - }, - ) { case (pool, _createdConnections, _listener) => - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - pool.start().futureValueUS.valueOrFail("initialization") - pool.nbSequencers shouldBe NonNegativeInt.tryCreate(2) - - eventually() { - // Wait until the bad bootstrap has been logged - loggerFactory.numberOfRecordedEntries shouldBe 1 - } - pool.physicalSynchronizerId shouldBe Some(testSynchronizerId(2)) - - pool.close() - eventually() { - pool.nbSequencers shouldBe NonNegativeInt.zero - } - }, - LogEntry.assertLogSeq( - Seq( - ( - badBootstrapAssertion(goodSynchronizerId = 2, badSynchronizerId = 1), - "bad bootstrap", - ) - ) - ), - ) - } - } - } - - private def badBootstrapAssertion( - goodSynchronizerId: Int, - badSynchronizerId: Int, - ): LogEntry => Assertion = - (logEntry: LogEntry) => - logEntry.warningMessage should fullyMatch regex - raw"(?s)Connection internal-sequencer-connection-test-\d+ has invalid bootstrap info:" + - raw" expected BootstrapInfo\(test-synchronizer-$goodSynchronizerId::namespace.*," + - raw" got BootstrapInfo\(test-synchronizer-$badSynchronizerId::namespace.*" - - private def badSynchronizerAssertion( - goodSynchronizerId: Int, - badSynchronizerId: Int, - ): LogEntry => Assertion = - (logEntry: LogEntry) => - logEntry.warningMessage should fullyMatch regex - raw"(?s)Connection internal-sequencer-connection-test-\d+ is not on expected synchronizer:" + - raw" expected Some\(test-synchronizer-$goodSynchronizerId::namespace::$testedProtocolVersion-0\)," + - raw" got test-synchronizer-$badSynchronizerId::namespace.*" - - private def contentsShouldEqual( - contents: Map[SequencerId, Set[SequencerConnectionX]], - created: Map[SequencerId, Set[InternalSequencerConnectionX]], - ): Assertion = { - // Compare based on connection configs - val first = contents.view.mapValues(_.map(_.config)).toMap - val second = created.view.mapValues(_.map(_.config)).toMap - first shouldBe second - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolTest.scala deleted file mode 100644 index 71f92a7aa8..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolTest.scala +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.health.ComponentHealthState -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class SequencerSubscriptionPoolTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ConnectionPoolTestHelpers { - "SequencerSubscriptionPool" should { - "initialize in the happy path" in { - val nbConnections = PositiveInt.tryCreate(10) - val trustThreshold = PositiveInt.tryCreate(5) - val livenessMargin = NonNegativeInt.tryCreate(2) - - withConnectionAndSubscriptionPools( - nbConnections = nbConnections, - trustThreshold = trustThreshold, - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - livenessMargin = livenessMargin, - ) { (connectionPool, subscriptionPool, listener) => - connectionPool.start().futureValueUS.valueOrFail("initialization") - subscriptionPool.start() - - clue("normal start") { - listener.shouldStabilizeOn(ComponentHealthState.Ok()) - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + livenessMargin).toNonNegative - } - val initialPool = subscriptionPool.subscriptions.toSeq - - clue("Fail connections") { - initialPool.foreach(_.connection.fail(reason = "test")) - - // Connections eventually come back - eventually() { - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + livenessMargin).toNonNegative - } - } - } - } - - "handle configuration changes" in { - val nbConnections = PositiveInt.tryCreate(10) - val trustThreshold = PositiveInt.tryCreate(5) - val livenessMargin = NonNegativeInt.tryCreate(3) - - withConnectionAndSubscriptionPools( - nbConnections = nbConnections, - trustThreshold = trustThreshold, - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - livenessMargin = livenessMargin, - ) { (connectionPool, subscriptionPool, listener) => - connectionPool.start().futureValueUS.valueOrFail("initialization") - subscriptionPool.start() - - listener.shouldStabilizeOn(ComponentHealthState.Ok()) - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + livenessMargin).toNonNegative - - clue("Increase liveness margin") { - val newLivenessMargin = NonNegativeInt.tryCreate(4) - val newConfig = - subscriptionPool.config.copy(livenessMargin = newLivenessMargin) - subscriptionPool.updateConfig(newConfig) - - eventually() { - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + newLivenessMargin).toNonNegative - } - } - - clue("Decrease liveness margin") { - val newLivenessMargin = NonNegativeInt.tryCreate(1) - val newConfig = - subscriptionPool.config.copy(livenessMargin = newLivenessMargin) - subscriptionPool.updateConfig(newConfig) - - // eventuallyForever() to check we don't remove more than necessary - eventuallyForever() { - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + newLivenessMargin).toNonNegative - } - } - } - } - - "request connections as needed" in { - val nbConnections = PositiveInt.tryCreate(10) - val trustThreshold = PositiveInt.tryCreate(5) - val livenessMargin = NonNegativeInt.tryCreate(2) - - withConnectionAndSubscriptionPools( - nbConnections = nbConnections, - trustThreshold = trustThreshold, - index => mkConnectionAttributes(synchronizerIndex = 1, sequencerIndex = index), - livenessMargin = livenessMargin, - ) { (connectionPool, subscriptionPool, listener) => - connectionPool.start().futureValueUS.valueOrFail("initialization") - subscriptionPool.start() - - clue("normal start") { - listener.shouldStabilizeOn(ComponentHealthState.Ok()) - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + livenessMargin).toNonNegative - } - val initialPool = subscriptionPool.subscriptions.toSeq - val initialConnections = initialPool.map(_.connection.name).toSet - - clue("Gradually remove 3 active connections") { - initialPool.take(3).zipWithIndex.foreach { case (subscription, index) => - subscription.connection.fatal(reason = "test") - eventually() { - subscriptionPool.nbSubscriptions shouldBe (trustThreshold + livenessMargin).toNonNegative - // New connections have been obtained - val currentConnections = subscriptionPool.subscriptions.map(_.connection.name) - val delta = initialConnections.diff(currentConnections) - // The newly obtained connections are different - delta should have size (index.toLong + 1) - } - } - } - - clue("Remove 1 more to go below the active threshold") { - initialPool(3).connection.fatal(reason = "test") - - listener.shouldStabilizeOn( - ComponentHealthState.degraded( - "below liveness margin: 6 subscription(s) available, trust threshold = 5, liveness margin = 2" - ) - ) - subscriptionPool.nbSubscriptions shouldBe NonNegativeInt.tryCreate(6) - } - - clue("Remove 2 more to go below the trust threshold") { - initialPool.slice(4, 6).foreach(_.connection.fatal(reason = "test")) - - listener.shouldStabilizeOn( - ComponentHealthState.failed( - "only 4 subscription(s) available, trust threshold = 5" - ) - ) - subscriptionPool.nbSubscriptions shouldBe NonNegativeInt.tryCreate(4) - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerTestUtils.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerTestUtils.scala deleted file mode 100644 index 9bdebd66c1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/SequencerTestUtils.scala +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.sequencing.protocol.SequencerErrors.SubmissionRequestRefused -import com.digitalasset.canton.sequencing.protocol.{ - Batch, - ClosedEnvelope, - Deliver, - DeliverError, - MessageId, - SequencedEvent, - SequencerDeliverError, - SignedContent, -} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence -import com.digitalasset.canton.topology.{DefaultTestIdentities, PhysicalSynchronizerId} -import com.google.protobuf.ByteString - -object SequencerTestUtils extends BaseTest { - - object MockMessageContent { - private val bytes = ByteString.copyFromUtf8("serialized-mock-message") - def toByteString: ByteString = bytes - } - - def sign[M <: ProtocolVersionedMemoizedEvidence](content: M): SignedContent[M] = - SignedContent(content, SymbolicCrypto.emptySignature, None, testedProtocolVersion) - - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) - def mockDeliverClosedEnvelope( - timestamp: CantonTimestamp = CantonTimestamp.Epoch, - synchronizerId: PhysicalSynchronizerId = DefaultTestIdentities.physicalSynchronizerId, - deserializedFrom: Option[ByteString] = None, - messageId: Option[MessageId] = Some(MessageId.tryCreate("mock-deliver")), - topologyTimestampO: Option[CantonTimestamp] = None, - previousTimestamp: Option[CantonTimestamp] = None, - ): Deliver[ClosedEnvelope] = { - val batch = Batch.empty(testedProtocolVersion) - - val deliver = Deliver.create[ClosedEnvelope]( - previousTimestamp, - timestamp, - synchronizerId, - messageId, - batch, - topologyTimestampO, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - - deserializedFrom match { - case Some(bytes) => - // Somehow ugly way to tweak the `deserializedFrom` attribute of Deliver - SequencedEvent - .fromProtoV30(deliver.toProtoV30)(bytes) - .value - .asInstanceOf[Deliver[ClosedEnvelope]] - - case None => deliver - } - } - - def mockDeliver( - timestamp: CantonTimestamp = CantonTimestamp.Epoch, - previousTimestamp: Option[CantonTimestamp] = None, - synchronizerId: PhysicalSynchronizerId = DefaultTestIdentities.physicalSynchronizerId, - messageId: Option[MessageId] = Some(MessageId.tryCreate("mock-deliver")), - topologyTimestampO: Option[CantonTimestamp] = None, - trafficReceipt: Option[TrafficReceipt] = None, - ): Deliver[Nothing] = { - val batch = Batch.empty(testedProtocolVersion) - Deliver.create[Nothing]( - previousTimestamp, - timestamp, - synchronizerId, - messageId, - batch, - topologyTimestampO, - BaseTest.testedProtocolVersion, - trafficReceipt, - ) - } - - def mockDeliverError( - timestamp: CantonTimestamp = CantonTimestamp.Epoch, - synchronizerId: PhysicalSynchronizerId = DefaultTestIdentities.physicalSynchronizerId, - messageId: MessageId = MessageId.tryCreate("mock-deliver"), - sequencerError: SequencerDeliverError = SubmissionRequestRefused("mock-submission-refused"), - trafficReceipt: Option[TrafficReceipt] = None, - ): DeliverError = - DeliverError.create( - None, - timestamp, - synchronizerId, - messageId, - sequencerError, - BaseTest.testedProtocolVersion, - trafficReceipt, - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenTest.scala deleted file mode 100644 index 2e0d729802..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenTest.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.authentication - -import com.digitalasset.canton.BaseTestWordSpec -import com.google.protobuf.ByteString - -import java.nio.charset.Charset - -class AuthenticationTokenTest extends BaseTestWordSpec { - - "AuthenticationTokens" can { - "be compared to each other" in { - val byteString1 = ByteString.copyFrom("abcdabcdabcdabcdabcd", Charset.defaultCharset()) - val byteString1Bis = ByteString.copyFrom("abcdabcdabcdabcdabcd", Charset.defaultCharset()) - val byteString1Longer = - ByteString.copyFrom("abcdabcdabcdabcdabcdabcd", Charset.defaultCharset()) - val byteString2 = ByteString.copyFrom("defgdefgdefgdefgdefg", Charset.defaultCharset()) - - AuthenticationToken(byteString1) shouldBe AuthenticationToken(byteString1) - AuthenticationToken(byteString1) shouldBe AuthenticationToken(byteString1Bis) - AuthenticationToken(byteString1) should not be AuthenticationToken(byteString1Longer) - AuthenticationToken(byteString1) should not be AuthenticationToken(byteString2) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManagerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManagerTest.scala deleted file mode 100644 index d810d4e4b5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManagerTest.scala +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.authentication.grpc - -import cats.data.EitherT -import cats.implicits.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.{ - FutureUnlessShutdown, - PromiseUnlessShutdown, - UnlessShutdown, -} -import com.digitalasset.canton.sequencing.authentication.{ - AuthenticationToken, - AuthenticationTokenManagerConfig, -} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import io.grpc.Status -import org.mockito.ArgumentMatchersSugar -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} - -object AuthenticationTokenManagerTest extends org.mockito.MockitoSugar with ArgumentMatchersSugar { - val mockClock: Clock = mock[Clock] - when(mockClock.scheduleAt(any[CantonTimestamp => Unit], any[CantonTimestamp])) - .thenReturn(FutureUnlessShutdown.unit) -} - -class AuthenticationTokenManagerTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - private val crypto = new SymbolicPureCrypto - private val token1 = AuthenticationToken.generate(crypto) - private val token2 = AuthenticationToken.generate(crypto) - private val now = CantonTimestamp.Epoch - - "first call to getToken will obtain it" in { - val (tokenManager, mock, _) = setup() - - mock.succeed(token1) - - for { - _ <- tokenManager.getToken - } yield mock.callCount shouldBe 1 - }.failOnShutdown.futureValue - - "multiple calls to getToken before obtain has completed will return pending" in { - val (tokenManager, mock, _) = setup() - - val call1 = tokenManager.getToken - val call2 = tokenManager.getToken - - mock.succeed(token1) - - for { - result1 <- call1.value.map(_.value) - result2 <- call2.value.map(_.value) - } yield { - mock.callCount shouldBe 1 - result1 shouldEqual result2 - } - }.failOnShutdown.futureValue - - "getToken after error will cause refresh" in { - val (tokenManager, mock, _) = setup() - - for { - error1 <- loggerFactory.suppressWarningsAndErrors { - val call1 = tokenManager.getToken - mock.error("uh oh") - call1.value.map(_.left.value) - } - _ = error1.getDescription shouldBe "uh oh" - _ = mock.resetNextResult() - call2 = tokenManager.getToken - _ = mock.succeed(token1) - result2 <- call2.value.map(_.value) - } yield { - result2 shouldBe token1 - } - }.failOnShutdown.futureValue - - "invalidateToken will cause obtain to be called on next call" in { - val (tokenManager, mock, _) = setup() - - mock.succeed(token1) - - for { - result1 <- tokenManager.getToken - _ = { - mock.resetNextResult() - tokenManager.invalidateToken(result1) - mock.succeed(token2) - } - result2 <- tokenManager.getToken - } yield { - result1 shouldBe token1 - result2 shouldBe token2 - mock.callCount shouldBe 2 - } - }.failOnShutdown.futureValue - - "invalidateToken with a different token wont cause a refresh" in { - val (tokenManager, mock, _) = setup() - - mock.succeed(token1) - - for { - result1 <- tokenManager.getToken.value.map(_.value) - _ = { - tokenManager.invalidateToken(token2) - } - result2 <- tokenManager.getToken.value.map(_.value) - } yield { - result1 shouldBe result2 - mock.callCount shouldBe 1 // despite invalidation - } - }.failOnShutdown.futureValue - - "automatically renew token in due time" in { - val clockMock = mock[Clock] - val retryMe = new AtomicReference[Option[CantonTimestamp => Unit]](None) - when(clockMock.scheduleAt(any[CantonTimestamp => Unit], any[CantonTimestamp])) - .thenAnswer[CantonTimestamp => Unit, CantonTimestamp] { case (action, _) => - retryMe.getAndUpdate(_ => Some(action)) shouldBe empty - FutureUnlessShutdown.unit - } - - val (tokenManager, obtainMock, _) = setup(Some(clockMock)) - val call1 = clue("get token1") { - tokenManager.getToken - } - clue("succeed with token1")(obtainMock.succeed(token1)) - - for { - // wait for token to succeed - t1 <- call1 - _ = retryMe.get() should not be empty - _ = obtainMock.resetNextResult() - // now, invoke the scheduled renewal - _ = retryMe.get().value.apply(CantonTimestamp.Epoch) - // obtain intermediate result - t2f = tokenManager.getToken - // satisfy this request - _ = obtainMock.succeed(token2) - t3 <- tokenManager.getToken - t2 <- t2f - } yield { - t1 shouldBe token1 - t2 shouldBe token2 - t3 shouldBe token2 - } - }.failOnShutdown.futureValue - - "getToken after failure will cause refresh" in { - val (tokenManager, mock, _) = setup() - - for { - error1 <- loggerFactory.suppressWarningsAndErrors { - val call1 = tokenManager.getToken - mock.fail(new RuntimeException("uh oh")) - - call1.value.failed.map(_.getMessage) - } - _ = error1 shouldBe "uh oh" - _ = mock.resetNextResult() - call2 = tokenManager.getToken - _ = mock.succeed(token1) - result2 <- call2.value.map(_.value) - } yield result2 shouldBe token1 - }.failOnShutdown.futureValue - - private def setup( - clockO: Option[Clock] = None - ): (AuthenticationTokenManager, ObtainTokenMock, Clock) = { - val mck = new ObtainTokenMock - val clock = clockO.getOrElse(AuthenticationTokenManagerTest.mockClock) - val tokenManager = new AuthenticationTokenManager( - (_: TraceContext) => mck.obtain(), - false, - AuthenticationTokenManagerConfig(), - clock, - loggerFactory, - ) - (tokenManager, mck, clock) - } - - private class ObtainTokenMock { - private val callCounter = new AtomicInteger() - private val nextResult = - new AtomicReference[PromiseUnlessShutdown[Either[Status, AuthenticationToken]]]() - - resetNextResult() - - def callCount: Int = callCounter.get() - - def obtain(): EitherT[FutureUnlessShutdown, Status, AuthenticationTokenWithExpiry] = { - callCounter.incrementAndGet() - EitherT( - nextResult.get.futureUS.map( - _.map(token => AuthenticationTokenWithExpiry(token, now.plusSeconds(100))) - ) - ) - } - - def resetNextResult(): Unit = - nextResult.set( - PromiseUnlessShutdown.unsupervised[Either[Status, AuthenticationToken]]() - ) - - def succeed(token: AuthenticationToken): Unit = - nextResult.get().success(UnlessShutdown.Outcome(Right(token))) - - def error(message: String): Unit = - nextResult - .get() - .success(UnlessShutdown.Outcome(Left(Status.PERMISSION_DENIED.withDescription(message)))) - - def fail(throwable: Throwable): Unit = - nextResult.get().failure(throwable) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthenticationTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthenticationTest.scala deleted file mode 100644 index 8d7236eb2e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthenticationTest.scala +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.authentication.grpc - -import cats.data.EitherT -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.Port -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.lifecycle.LifeCycle.CloseableChannel -import com.digitalasset.canton.networking.Endpoint -import com.digitalasset.canton.protobuf.{Hello, HelloServiceGrpc} -import com.digitalasset.canton.sequencing.authentication.{ - AuthenticationToken, - AuthenticationTokenManagerConfig, -} -import com.digitalasset.canton.topology.{DefaultTestIdentities, SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, protobuf} -import io.grpc.* -import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder} -import io.grpc.stub.StreamObserver -import org.scalatest.FutureOutcome -import org.scalatest.wordspec.FixtureAsyncWordSpec - -import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.{ExecutionContext, Future} - -class SequencerClientAuthenticationTest extends FixtureAsyncWordSpec with BaseTest { - - private val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("test::synchronizer") - ).toPhysical - private val participantId = DefaultTestIdentities.participant1 - private val crypto = new SymbolicPureCrypto - private val token1 = AuthenticationToken.generate(crypto) - private val token2 = AuthenticationToken.generate(crypto) - - require(token1 != token2, "The generated tokens must be different") - - implicit val ec: ExecutionContext = executionContext - - type FixtureParam = Env - - override def withFixture(test: OneArgAsyncTest): FutureOutcome = { - val env = new Env - - complete { - withFixture(test.toNoArgAsyncTest(env)) - } lastly { - env.close() - } - } - - class Env extends AutoCloseable { - val channelName = InProcessServerBuilder.generateName() - val serverExpectedToken = new AtomicReference(token1) - val clientNextTokenRefresh = new AtomicReference(token1) - val authServerInterceptor = new AuthServerInterceptor(serverExpectedToken.get) - val tokenManager = - new AuthenticationTokenManager( - (_: TraceContext) => - EitherT.pure[FutureUnlessShutdown, Status]( - AuthenticationTokenWithExpiry(clientNextTokenRefresh.get(), CantonTimestamp.Epoch) - ), - false, - AuthenticationTokenManagerConfig(), - AuthenticationTokenManagerTest.mockClock, - loggerFactory, - ) - val service = InProcessServerBuilder - .forName(channelName) - .addService(HelloServiceGrpc.bindService(new GrpcHelloService, executionContext)) - .intercept(authServerInterceptor) - .build() - - service.start() - - val clientChannel = - new CloseableChannel( - InProcessChannelBuilder.forName(channelName).build(), - logger, - "auth-test-client-channel", - ) - val managers = NonEmpty.mk(Seq, Endpoint("localhost", Port.tryCreate(10)) -> tokenManager).toMap - val clientAuthentication = - new SequencerClientTokenAuthentication(synchronizerId, participantId, managers, loggerFactory) - val client = HelloServiceGrpc - .stub(clientChannel.channel) - .withInterceptors(clientAuthentication.reauthorizationInterceptor) - .withCallCredentials(clientAuthentication.callCredentials) - - override def close(): Unit = clientChannel.close() - } - - "should refresh token after a failure" in { env => - import env.* - - val request = protobuf.Hello.Request(msg = "") - for { - _ <- client.hello(request) - _ = { - clientNextTokenRefresh.set(token2) - authServerInterceptor.setValidToken(token2) - } - // first request should fail with an unauthenticated - // (replaying is too much work and wouldn't work for streamed responses anyway) - failure <- client.hello(request).failed - _ = failure should matchPattern { - case status: StatusRuntimeException - if status.getStatus.getCode == Status.UNAUTHENTICATED.getCode => - } - // the failure should have kicked off a token refresh, so this should work - _ <- client.hello(request) - } yield succeed - } - - class AuthServerInterceptor(initialToken: AuthenticationToken) extends io.grpc.ServerInterceptor { - private val validToken = new AtomicReference[AuthenticationToken](initialToken) - override def interceptCall[ReqT, RespT]( - call: ServerCall[ReqT, RespT], - headers: Metadata, - next: ServerCallHandler[ReqT, RespT], - ): ServerCall.Listener[ReqT] = { - val providedToken = headers.get(Constant.AUTH_TOKEN_METADATA_KEY) - - if (providedToken != validToken.get()) { - val returnedMetadata = new Metadata() - returnedMetadata.put(Constant.AUTH_TOKEN_METADATA_KEY, providedToken) - call.close(io.grpc.Status.UNAUTHENTICATED, returnedMetadata) - new ServerCall.Listener[ReqT] {} - } else { - next.startCall(call, headers) - } - } - - def setValidToken(token: AuthenticationToken) = validToken.set(token) - } - - class GrpcHelloService extends HelloServiceGrpc.HelloService { - private val nextResponse = new AtomicReference( - Future.successful(Hello.Response("well hey there")) - ) - - override def hello(request: Hello.Request): Future[Hello.Response] = nextResponse.get - - override def helloStreamed( - request: Hello.Request, - responseObserver: StreamObserver[Hello.Response], - ): Unit = ??? - - def setNextResponse(response: Future[Hello.Response]) = nextResponse.set(response) - def setNextResponse(statusException: StatusException) = - nextResponse.set(Future.failed(statusException)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/BftSenderTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/BftSenderTest.scala deleted file mode 100644 index 6c8edec33e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/BftSenderTest.scala +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.data.EitherT -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, PromiseUnlessShutdown} -import com.digitalasset.canton.logging.SuppressionRule -import com.digitalasset.canton.sequencing.BftSender -import com.digitalasset.canton.sequencing.BftSender.FailedToReachThreshold -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.Outcome -import org.scalatest.wordspec.FixtureAnyWordSpec -import org.slf4j.event.Level - -import scala.concurrent.duration.* - -class BftSenderTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContext { - - class Env { - val promise1 = PromiseUnlessShutdown.unsupervised[Either[String, Int]]() - val promise2 = PromiseUnlessShutdown.unsupervised[Either[String, Int]]() - val promise3 = PromiseUnlessShutdown.unsupervised[Either[String, Int]]() - val transports: NonEmpty[Map[String, MockTransport]] = NonEmpty( - Map, - "sequencer1" -> new MockTransport(EitherT(promise1.futureUS)), - "sequencer2" -> new MockTransport(EitherT(promise2.futureUS)), - "sequencer3" -> new MockTransport(EitherT(promise3.futureUS)), - ) - } - - override type FixtureParam = Env - - override def withFixture(test: OneArgTest): Outcome = - withFixture(test.toNoArgTest(new Env())) - - final class MockTransport(result: EitherT[FutureUnlessShutdown, String, Int]) { - def performRequest: EitherT[FutureUnlessShutdown, String, Int] = result - } - - private def checkNotCompleted[E, A](result: EitherT[FutureUnlessShutdown, E, A]) = - always(1.second) { - result.value.isCompleted shouldBe false - } - - private def mkRequest(threshold: PositiveInt)(implicit env: Env) = { - import env.* - BftSender.makeRequest( - "test", - futureSupervisor, - logger, - transports, - threshold, - )(_.performRequest)(identity) - } - - "BftSender" should { - "gather requests from threshold-many transports" in { implicit env => - import env.* - - val threshold = PositiveInt.tryCreate(2) - val result = mkRequest(threshold) - - checkNotCompleted(result) - promise1.outcome_(Right(1)) - checkNotCompleted(result) - promise2.outcome_(Right(2)) - checkNotCompleted(result) - promise3.outcome_(Right(1)) - - result.valueOrFailShutdown("result").futureValue shouldBe 1 - } - - "return as soon as it has enough identical responses" in { implicit env => - import env.* - - val threshold = PositiveInt.tryCreate(2) - val result = mkRequest(threshold) - - checkNotCompleted(result) - promise1.outcome_(Right(1)) - checkNotCompleted(result) - promise2.outcome_(Right(1)) - - result.valueOrFailShutdown("result").futureValue shouldBe 1 - } - - "fail early if it can't get enough responses" in { env => - import env.* - - val threshold = PositiveInt.tryCreate(3) - val promise4 = PromiseUnlessShutdown.unsupervised[Either[String, Int]]() - val promise5 = PromiseUnlessShutdown.unsupervised[Either[String, Int]]() - val transports: NonEmpty[Map[String, MockTransport]] = NonEmpty( - Map, - "sequencer1" -> new MockTransport(EitherT(promise1.futureUS)), - "sequencer2" -> new MockTransport(EitherT(promise2.futureUS)), - "sequencer3" -> new MockTransport(EitherT(promise3.futureUS)), - "sequencer4" -> new MockTransport(EitherT(promise4.futureUS)), - "sequencer5" -> new MockTransport(EitherT(promise5.futureUS)), - ) - - loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.ERROR))( - { - val result = BftSender.makeRequest( - "test", - futureSupervisor, - logger, - transports, - threshold, - )(_.performRequest)(identity) - - val exception = new RuntimeException("BOOM") - - checkNotCompleted(result) - promise1.outcome_(Right(1)) - checkNotCompleted(result) - promise2.outcome_(Right(2)) - checkNotCompleted(result) - promise3.outcome_(Left("failed")) - checkNotCompleted(result) - promise4.failure(exception) - - result.value.failOnShutdown.futureValue shouldBe Left( - FailedToReachThreshold( - Map(1 -> Set("sequencer1"), 2 -> Set("sequencer2")), - Map[String, Either[Throwable, String]]( - "sequencer3" -> Right("failed"), - "sequencer4" -> Left(exception), - ), - ) - ) - }, - logs => { - forExactly(1, logs) { m => - m.toString should include(s"test failed for sequencer4") - } - }, - ) - } - - "fail with shutdown if any response is a shutdown" in { implicit env => - import env.* - - val threshold = PositiveInt.tryCreate(3) - val result = mkRequest(threshold) - - checkNotCompleted(result) - promise1.outcome_(Right(1)) - checkNotCompleted(result) - promise2.shutdown_() - - result.value.unwrap.futureValue shouldBe AbortedDueToShutdown - } - - "subsequent results should not trigger errors" in { implicit env => - import env.* - - val threshold = PositiveInt.one - val result = mkRequest(threshold) - - checkNotCompleted(result) - promise1.outcome_(Right(1)) - - result.valueOrFailShutdown("result").futureValue shouldBe 1 - - promise2.outcome_(Right(1)) - promise3.outcome_(Left("failed")) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgementsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgementsTest.scala deleted file mode 100644 index 7a70ee386d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgementsTest.scala +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.data.EitherT -import cats.syntax.option.* -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.time.SimClock -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.mutable -import scala.concurrent.Promise -import scala.concurrent.duration.* -import scala.jdk.DurationConverters.* - -class PeriodicAcknowledgementsTest extends AnyWordSpec with BaseTest with HasExecutionContext { - @SuppressWarnings(Array("org.wartremover.warts.Var")) - class Env( - initialCleanTimestamp: Option[CantonTimestamp] = None, - acknowledged: Promise[CantonTimestamp] = Promise(), - ) extends AutoCloseable { - val clock = new SimClock(loggerFactory = PeriodicAcknowledgementsTest.this.loggerFactory) - var latestCleanTimestamp: Option[CantonTimestamp] = initialCleanTimestamp - var nextResult: EitherT[FutureUnlessShutdown, String, Boolean] = EitherT.rightT(true) - val acknowledgements = mutable.Buffer[CantonTimestamp]() - val interval = 10.seconds - - val sut = new PeriodicAcknowledgements( - true, - interval, - fetchLatestCleanTimestamp = _ => FutureUnlessShutdown.pure(latestCleanTimestamp), - acknowledge = tts => { - acknowledgements.append(tts.value) - acknowledged.trySuccess(tts.value) - nextResult - }, - clock = clock, - DefaultProcessingTimeouts.testing, - PeriodicAcknowledgementsTest.this.loggerFactory, - ) - - override def close(): Unit = sut.close() - } - - "should ack when first started" in { - val ackedP = Promise[CantonTimestamp]() - - val _env @ unchecked = - new Env(initialCleanTimestamp = CantonTimestamp.Epoch.some, acknowledged = ackedP) - - // it should pull the latest clean timestamp when created and acknowledge if present - for { - timestamp <- ackedP.future - } yield timestamp shouldBe CantonTimestamp.Epoch - } - - "should wait until the interval has elapsed before acknowledging again" in { - val env = new Env() - - val ts0 = CantonTimestamp.Epoch - val ts1 = CantonTimestamp.ofEpochSecond(5) // before 10s interval - - env.latestCleanTimestamp = ts0.some - env.clock.advance(env.interval.plus(1.millis).toJava) - - for { - _ <- env.sut.flush() - _ = env.acknowledgements should contain.only(ts0) - // set a new clean timestamp - _ = env.latestCleanTimestamp = ts1.some - // don't quite advance to the next interval - _ = env.clock.advance(env.interval.minus(1.millis).toJava) - _ <- env.sut.flush() - _ = env.acknowledgements should contain.only(ts0) - // advance past interval - _ = env.clock.advance(1.millis.toJava) - _ <- env.sut.flush() - } yield env.acknowledgements should contain.only(ts0, ts1) - } - - "should just log if acknowledging fails" in { - val env = new Env() - - env.nextResult = EitherT.leftT("BOOM") - env.latestCleanTimestamp = CantonTimestamp.Epoch.some - - for { - _ <- loggerFactory.assertLogs( - { - env.clock.advance(env.interval.plus(1.millis).toJava) - env.sut.flush() - }, - _.warningMessage shouldBe "Failed to acknowledge clean timestamp (usually because sequencer is down): BOOM", - ) - } yield succeed - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekkoTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekkoTest.scala deleted file mode 100644 index f5313cc704..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekkoTest.scala +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.crypto.Signature -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.health.HealthComponent.AlwaysHealthyComponent -import com.digitalasset.canton.health.{ComponentHealthState, HealthComponent} -import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.sequencing.SequencedSerializedEvent -import com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription.LostSequencerSubscription -import com.digitalasset.canton.sequencing.client.TestSubscriptionError.{ - FatalExn, - RetryableError, - RetryableExn, - UnretryableError, -} -import com.digitalasset.canton.sequencing.protocol.{Batch, Deliver, SignedContent} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.store.SequencedEventStore.SequencedEventWithTraceContext -import com.digitalasset.canton.topology.{DefaultTestIdentities, SequencerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.apache.pekko.stream.testkit.StreamSpec -import org.apache.pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped -import org.apache.pekko.stream.testkit.scaladsl.TestSink - -import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.duration.{Deadline, DurationInt, FiniteDuration} - -class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest { - import TestSequencerSubscriptionFactoryPekko.* - - // Override the implicit from PekkoSpec so that we don't get ambiguous implicits - override val patience: PatienceConfig = defaultPatience - - // very short to speedup test - private val InitialDelay: FiniteDuration = 1.millisecond - private val MaxDelay: FiniteDuration = - 1025.millis // 1 + power of 2 because InitialDelay keeps being doubled - - private def retryDelay(maxDelay: FiniteDuration = MaxDelay) = - SubscriptionRetryDelayRule(InitialDelay, maxDelay, maxDelay) - - private def createResilientSubscriber[E]( - subscriptionFactory: SequencerSubscriptionFactoryPekko[E], - retryDelayRule: SubscriptionRetryDelayRule = retryDelay(), - ): ResilientSequencerSubscriberPekko[E] = - new ResilientSequencerSubscriberPekko[E]( - retryDelayRule, - subscriptionFactory, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - - "ResilientSequencerSubscriberPekko" should { - "not retry on an unrecoverable error" in assertAllStagesStopped { - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "unrecoverable-error") - ) - val subscriber = createResilientSubscriber(factory) - factory.add(Error(UnretryableError)) - val subscription = subscriber.subscribeFrom(startingTimestamp = None) - loggerFactory.assertLogs( - subscription.source.toMat(Sink.ignore)(Keep.right).run().futureValue, - _.warningMessage should include( - s"Closing resilient sequencer subscription due to error: $UnretryableError" - ), - ) - // Health updates are asynchronous - eventually() { - subscription.health.getState shouldBe subscription.health.closingState - } - } - - "retry on recoverable errors" in assertAllStagesStopped { - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "retry-on-error") - ) - val subscriber = createResilientSubscriber(factory) - factory.add(Error(RetryableError)) - factory.add(Error(RetryableError)) - factory.add(Error(UnretryableError)) - val subscription = subscriber.subscribeFrom(startingTimestamp = None) - loggerFactory.assertLogs( - subscription.source - .toMat(Sink.ignore)(Keep.right) - .run() - .futureValue, - _.warningMessage should include( - s"Closing resilient sequencer subscription due to error: $UnretryableError" - ), - ) - // Health updates are asynchronous - eventually() { - subscription.health.getState shouldBe subscription.health.closingState - } - } - - "retry on exceptions until one is fatal" in { - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "retry-on-exception") - ) - val subscriber = createResilientSubscriber(factory) - factory.add(Failure(RetryableExn)) - factory.add(Failure(FatalExn)) - val subscription = subscriber.subscribeFrom(startingTimestamp = None) - loggerFactory.assertLogs( - subscription.source - .toMat(Sink.ignore)(Keep.right) - .run() - .futureValue, - _.warningMessage should include( - "The sequencer subscription encountered an exception and will be restarted" - ), - _.errorMessage should include("Closing resilient sequencer subscription due to exception"), - ) - // Health updates are asynchronous - eventually() { - subscription.health.getState shouldBe subscription.health.closingState - } - } - - "restart from last received timestamp" in { - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "restart-from-timestamp") - ) - val subscriber = createResilientSubscriber(factory) - factory.subscribe(start => genEvents(start, 10) :+ Error(RetryableError)) - factory.subscribe(start => genEvents(start, 10)) - - val ((killSwitch, doneF), sink) = - subscriber - .subscribeFrom(startingTimestamp = None) - .source - .map(_.value) - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(30) - val expectedEvents = genEvents(startTimestamp = None, 10) ++ genEvents( - startTimestamp = Some( - CantonTimestamp.Epoch.addMicros(9L) - ), // Note that 9L event repeats due to re-subscription - 10, - ) - expectedEvents.foreach(event => - clue(s"Output expecting element: $event") { - sink.expectNext(Right(event.asOrdinarySerializedEvent)) - } - ) - killSwitch.shutdown() - doneF.futureValue - } - - "correctly indicates whether we've received items when calculating the next retry delay" in assertAllStagesStopped { - val hasReceivedEventsCalls = new AtomicReference[Seq[Boolean]](Seq.empty) - val captureHasEvent = new SubscriptionRetryDelayRule { - override def nextDelay( - previousDelay: FiniteDuration, - hasReceivedEvent: Boolean, - ): FiniteDuration = { - hasReceivedEventsCalls.getAndUpdate(_ :+ hasReceivedEvent) - 1.milli - } - - override val initialDelay: FiniteDuration = 1.milli - override val warnDelayDuration: FiniteDuration = 100.millis - } - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "calculate-retry-delay") - ) - val subscriber = createResilientSubscriber(factory, captureHasEvent) - - // provide an event then close with a recoverable error - factory.add(Event(timestamp = CantonTimestamp.Epoch.addMicros(1L)), Error(RetryableError)) - // don't provide an event and close immediately with a recoverable error - factory.add(Error(RetryableError)) - // don't provide an event and close immediately - factory.add(Complete) - - subscriber - .subscribeFrom(startingTimestamp = None) - .source - .toMat(Sink.ignore)(Keep.right) - .run() - .futureValue - - // An unretryable completion does not call the retry delay rule, so there should be only two calls recorded - hasReceivedEventsCalls.get() shouldBe Seq(true, false) - } - - "retry until closing if the sequencer is permanently unavailable" in assertAllStagesStopped { - val maxDelay = 100.milliseconds - - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "retry-until-closing") - ) - val subscriber = createResilientSubscriber(factory, retryDelay(maxDelay)) - // Always close with RetryableError - for (_ <- 1 to 100) { - factory.add(Error(RetryableError)) - } - - val startTime = Deadline.now - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val subscription = subscriber.subscribeFrom(startingTimestamp = None) - // Initially, everything looks healthy - subscription.health.isFailed shouldBe false - - val (killSwitch, doneF) = subscription.source.toMat(Sink.ignore)(Keep.left).run() - // we retry until we become unhealthy - eventually(maxPollInterval = 10.milliseconds) { - subscription.health.isFailed shouldBe true - } - - // Check that it has hit MaxDelay. We can't really check an upper bound as it would make the test flaky - -startTime.timeLeft should be >= maxDelay - - killSwitch.shutdown() - doneF.futureValue - - eventually() { - subscription.health.getState shouldBe subscription.health.closingState - } - }, - logEntries => { - logEntries should not be empty - forEvery(logEntries) { - _.warningMessage should (include(s"Waiting $maxDelay before reconnecting") or include( - LostSequencerSubscription.id - )) - } - }, - ) - } - - "return to healthy when messages are received again" in assertAllStagesStopped { - val maxDelay = 100.milliseconds - - val factory = TestSequencerSubscriptionFactoryPekko( - loggerFactory.appendUnnamedKey("case", "return-to-healthy") - ) - val subscriber = createResilientSubscriber(factory, retryDelay(maxDelay)) - // retryDelay doubles the delay upon each attempt until it hits `maxDelay`, - // so we set it to two more such that we get the chance to see the unhealthy state - val retries = (Math.log(maxDelay.toMillis.toDouble) / Math.log(2.0d)).ceil.toInt + 2 - for (_ <- 1 to retries) { - factory.add(Error(RetryableError)) - } - factory.add(genEvents(startTimestamp = Some(CantonTimestamp.Epoch.addMicros(1L)), count = 9)*) - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val subscription = subscriber - .subscribeFrom(startingTimestamp = None) - - // Initially, everything looks healthy - subscription.health.isFailed shouldBe false - - val (killSwitch, doneF) = subscription.source.toMat(Sink.ignore)(Keep.left).run() - - logger.debug("Wait until the subscription becomes unhealthy") - eventually(maxPollInterval = 10.milliseconds) { - subscription.health.isFailed shouldBe true - } - - // The factory should eventually produce new elements. So we should return to healthy - logger.debug("Wait until the subscription becomes healthy again") - eventually(maxPollInterval = 10.milliseconds) { - subscription.health.getState shouldBe ComponentHealthState.Ok() - } - - killSwitch.shutdown() - doneF.futureValue - }, - logEntries => { - logEntries should not be empty - forEvery(logEntries) { - _.warningMessage should (include(s"Waiting $maxDelay before reconnecting") or include( - LostSequencerSubscription.id - )) - } - }, - ) - } - } -} - -class TestSequencerSubscriptionFactoryPekko( - health: HealthComponent, - override protected val loggerFactory: NamedLoggerFactory, -) extends SequencerSubscriptionFactoryPekko[TestSubscriptionError] - with NamedLogging { - import TestSequencerSubscriptionFactoryPekko.* - - override def sequencerId: SequencerId = DefaultTestIdentities.daSequencerId - - private val sources = new AtomicReference[Seq[Option[CantonTimestamp] => Seq[Element]]](Seq.empty) - - def add(next: Element*): Unit = subscribe(_ => next) - - def subscribe(subscribe: Option[CantonTimestamp] => Seq[Element]): Unit = - sources.getAndUpdate(_ :+ subscribe).discard - - override def create(startingTimestamp: Option[CantonTimestamp])(implicit - traceContext: TraceContext - ): SequencerSubscriptionPekko[TestSubscriptionError] = { - val srcs = sources.getAndUpdate(_.drop(1)) - val subscribe = srcs.headOption.getOrElse( - throw new IllegalStateException( - "Requesting more resubscriptions than provided by the test setup" - ) - ) - - logger.debug(s"Creating SequencerSubscriptionPekko at starting timestamp $startingTimestamp") - - val source = Source(subscribe(startingTimestamp)) - // Add an incomplete unproductive source at the end to prevent automatic completion signals - .concat(Source.never[Element]) - .withUniqueKillSwitchMat()(Keep.right) - .mapConcat { withKillSwitch => - noTracingLogger.debug(s"Processing element ${withKillSwitch.value}") - withKillSwitch.traverse { - case Error(error) => - withKillSwitch.killSwitch.shutdown() - Seq(Left(error)) - case Complete => - withKillSwitch.killSwitch.shutdown() - Seq.empty - case event: Event => Seq(Right(event.asOrdinarySerializedEvent)) - case Failure(ex) => throw ex - } - } - .takeUntilThenDrain(_.isLeft) - .watchTermination()(Keep.both) - - SequencerSubscriptionPekko[TestSubscriptionError](source, health) - } - - override val retryPolicy: SubscriptionErrorRetryPolicyPekko[TestSubscriptionError] = - new TestSubscriptionErrorRetryPolicyPekko -} - -object TestSequencerSubscriptionFactoryPekko { - def apply(loggerFactory: NamedLoggerFactory): TestSequencerSubscriptionFactoryPekko = { - val alwaysHealthyComponent = new AlwaysHealthyComponent( - "TestSequencerSubscriptionFactory", - loggerFactory.getTracedLogger(classOf[TestSequencerSubscriptionFactoryPekko]), - ) - new TestSequencerSubscriptionFactoryPekko(alwaysHealthyComponent, loggerFactory) - } - - sealed trait Element extends Product with Serializable - - final case class Error(error: TestSubscriptionError) extends Element - final case class Failure(exception: Exception) extends Element - case object Complete extends Element - final case class Event( - timestamp: CantonTimestamp, - signatures: NonEmpty[Set[Signature]] = Signature.noSignatures, - ) extends Element { - def asOrdinarySerializedEvent: SequencedSerializedEvent = - mkOrdinarySerializedEvent(timestamp, signatures) - } - - def mkOrdinarySerializedEvent( - timestamp: CantonTimestamp, - signatures: NonEmpty[Set[Signature]] = Signature.noSignatures, - ): SequencedSerializedEvent = { - val pts = - if (timestamp == CantonTimestamp.Epoch) None else Some(timestamp.addMicros(-1L)) - val sequencedEvent = Deliver.create( - pts, - timestamp, - DefaultTestIdentities.physicalSynchronizerId, - None, - Batch.empty(BaseTest.testedProtocolVersion), - None, - BaseTest.testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - val signedContent = - SignedContent.create( - sequencedEvent, - signatures.toSeq, - None, - SignedContent.protocolVersionRepresentativeFor(BaseTest.testedProtocolVersion), - ) - SequencedEventWithTraceContext(signedContent)(TraceContext.empty) - } - - def genEvents(startTimestamp: Option[CantonTimestamp], count: Long): Seq[Event] = - (0L until count).map(offset => - Event(startTimestamp.getOrElse(CantonTimestamp.Epoch).addMicros(offset)) - ) - - private class TestSubscriptionErrorRetryPolicyPekko - extends SubscriptionErrorRetryPolicyPekko[TestSubscriptionError] { - override def retryOnError(subscriptionError: TestSubscriptionError, receivedItems: Boolean)( - implicit loggingContext: ErrorLoggingContext - ): Boolean = - subscriptionError match { - case RetryableError => true - case UnretryableError => false - } - - override def retryOnException(ex: Throwable)(implicit - loggingContext: ErrorLoggingContext - ): Boolean = ex match { - case RetryableExn => true - case _ => false - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriptionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriptionTest.scala deleted file mode 100644 index e165c5c50b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriptionTest.scala +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.syntax.either.* -import com.digitalasset.canton.config.{DefaultProcessingTimeouts, ProcessingTimeout} -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} -import com.digitalasset.canton.lifecycle.{ - AsyncOrSyncCloseable, - FutureUnlessShutdown, - UnlessShutdown, -} -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription.LostSequencerSubscription -import com.digitalasset.canton.sequencing.client.SubscriptionCloseReason.SubscriptionError -import com.digitalasset.canton.sequencing.client.TestSubscriptionError.{ - FatalExn, - RetryableError, - RetryableExn, - UnretryableError, -} -import com.digitalasset.canton.sequencing.protocol.{ClosedEnvelope, SequencedEvent, SignedContent} -import com.digitalasset.canton.sequencing.{SequencedEventHandler, SequencerTestUtils} -import com.digitalasset.canton.store.SequencedEventStore.SequencedEventWithTraceContext -import com.digitalasset.canton.topology.{SequencerId, SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.Assertion -import org.scalatest.wordspec.{AnyWordSpec, AsyncWordSpec} - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import scala.collection.mutable -import scala.concurrent.duration.* -import scala.concurrent.{Future, Promise} - -sealed trait TestSubscriptionError - extends SubscriptionError - with Product - with Serializable - with PrettyPrinting { - override protected def pretty: Pretty[this.type] = prettyOfObject[this.type] -} -object TestSubscriptionError { - case object RetryableError extends TestSubscriptionError - case object UnretryableError extends TestSubscriptionError - case object RetryableExn extends Exception - case object FatalExn extends Exception - - val retryRule: CheckedSubscriptionErrorRetryPolicy[TestSubscriptionError] = - new CheckedSubscriptionErrorRetryPolicy[TestSubscriptionError] { - override protected def retryInternal(error: TestSubscriptionError, receivedItems: Boolean)( - implicit traceContext: TraceContext - ): Boolean = error match { - case RetryableError => true - case UnretryableError => false - } - - override def retryOnException(exn: Throwable, Logger: TracedLogger)(implicit - traceContext: TraceContext - ): Boolean = - exn match { - case RetryableExn => true - case _ => false - } - } -} - -final case class TestHandlerError(message: String) - -class ResilientSequencerSubscriptionTest - extends AsyncWordSpec - with BaseTest - with FailOnShutdown - with ResilientSequencerSubscriptionTestUtils - with HasExecutionContext { - - private lazy val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("synchronizer1::test") - ) - - "ResilientSequencerSubscription" should { - "not retry on an unrecoverable error" in { - val testSubscriptions = SubscriptionTestFactory.mocked.addUnrecoverable() - val subscription = createSubscription(testSubscriptions) - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - subscription.start - - for { - reason <- subscription.closeReason - } yield reason should be(UnretryableError) - }, - _.map( - _.warningMessage - ) should contain only "Closing resilient sequencer subscription due to error: UnretryableError", - ) - } - - "retry on recoverable errors" in { - val testSubscriptions = SubscriptionTestFactory.mocked - .addRecoverable() - .addRecoverable() - .addUnrecoverable() - val subscription = createSubscription(testSubscriptions) - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - subscription.start - - for { - closeReason <- subscription.closeReason - _ = closeReason should be(UnretryableError) - } yield testSubscriptions.allShouldHaveBeenUsed - }, - _.map( - _.warningMessage - ) should contain only "Closing resilient sequencer subscription due to error: UnretryableError", - ) - } - - "retry on exceptions until one is fatal" in { - val testSubscriptions = SubscriptionTestFactory.mocked - val subscription1F = testSubscriptions.addRunning() - val subscription2F = testSubscriptions.addRunning() - - val subscription = createSubscription(testSubscriptions) - subscription.start - - loggerFactory.assertLogs( - for { - subscription1 <- subscription1F - // fail this subscription - _ = subscription1.closeWithExn(RetryableExn) - subscription2 <- subscription2F - _ = subscription2.closeWithExn(FatalExn) - // wait for the next subscription to occur - closeReason <- FutureUnlessShutdown.outcomeF(subscription.closeReason.failed) - } yield { closeReason shouldBe FatalExn }, - _.warningMessage should include( - "The sequencer subscription encountered an exception and will be restarted" - ), - _.errorMessage should include("Closing resilient sequencer subscription due to exception"), - _.warningMessage should include("Underlying subscription failed to close"), - ) - } - - "restart from last received counter" in { - val testSubscriptions = SubscriptionTestFactory.mocked - val subscription1F = testSubscriptions.addRunning() - val subscription2F = testSubscriptions.addRunning() - - val subscription = createSubscription(testSubscriptions) - subscription.start - - for { - subscription1 <- subscription1F - _ = subscription1.subscribedStartingTimestamp shouldBe None - // indicate that we've processed the next event - _ <- subscription1.handleCounter(43) - // fail this subscription - _ = subscription1.closeWithReason(RetryableError) - // wait for the next subscription to occur - subscription2 <- subscription2F - } yield subscription2.subscribedStartingTimestamp shouldBe Some( - CantonTimestamp.Epoch.addMicros(43L) - ) - } - - "correctly indicates whether we've received items when calculating the next retry delay" in { - val hasReceivedEventsCalls = mutable.Buffer[Boolean]() - val captureHasEvent = new SubscriptionRetryDelayRule { - override def nextDelay( - previousDelay: FiniteDuration, - hasReceivedEvent: Boolean, - ): FiniteDuration = { - hasReceivedEventsCalls += hasReceivedEvent - 1.milli - } - override val initialDelay: FiniteDuration = 1.milli - override val warnDelayDuration: FiniteDuration = 100.millis - } - val testSubscriptions = SubscriptionTestFactory.mocked - val subscription1F = testSubscriptions.addRunning() - val subscription2F = testSubscriptions.addRunning() - val subscription3F = testSubscriptions.addRunning() - - val subscription = createSubscription(testSubscriptions, retryDelayRule = captureHasEvent) - subscription.start - - for { - subscription1 <- subscription1F - _ <- - // provide an event then close with a recoverable error - subscription1 - .handleCounter(1) - .map(_ => subscription1.closeWithReason(RetryableError)) - subscription2 <- subscription2F - _ = { - // don't provide an event and close immediately - subscription2.closeWithReason(RetryableError) - } - subscription3 <- subscription3F - _ = subscription3.closeWithReason(SubscriptionCloseReason.Closed) - closeReason <- FutureUnlessShutdown.outcomeF(subscription.closeReason) - } yield { - closeReason should be(SubscriptionCloseReason.Closed) - hasReceivedEventsCalls.toList should contain theSameElementsInOrderAs List(true, false) - } - } - - "close underlying subscription even if created after we close" in { - val resilientSequencerSubscriptionRef - : AtomicReference[ResilientSequencerSubscription[TestHandlerError]] = - new AtomicReference[ResilientSequencerSubscription[TestHandlerError]]() - val closePromise = Promise[Unit]() - - val subscription = new SequencerSubscription[TestHandlerError] { - override protected def timeouts = ResilientSequencerSubscriptionTest.this.timeouts - override protected def loggerFactory: NamedLoggerFactory = - ResilientSequencerSubscriptionTest.this.loggerFactory - override private[canton] def complete(reason: SubscriptionCloseReason[TestHandlerError])( - implicit traceContext: TraceContext - ): Unit = () - } - - val subscriptionFactory = - new SequencerSubscriptionFactory[TestHandlerError] { - override def create( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit traceContext: TraceContext): UnlessShutdown[ - (SequencerSubscription[TestHandlerError], SubscriptionErrorRetryPolicy) - ] = { - // Close the resilient sequencer subscription while it is creating the subscription - // close will block waiting for the subscription request, so start in a future but defer waiting for its completion until after its resolved - closePromise.completeWith(Future(resilientSequencerSubscriptionRef.get().close())) - eventually() { - resilientSequencerSubscriptionRef.get().isClosing shouldBe true - } - Outcome(subscription -> TestSubscriptionError.retryRule) - } - } - - val resilientSequencerSubscription = new ResilientSequencerSubscription[TestHandlerError]( - SequencerId(synchronizerId.uid), - startingTimestamp = None, // from the beginning - _ => FutureUnlessShutdown.pure(Either.unit[TestHandlerError]), - subscriptionFactory, - retryDelay(), - doNotExitOnFatalErrors, - timeouts, - loggerFactory, - ) - resilientSequencerSubscriptionRef.set(resilientSequencerSubscription) - - // kick off - resilientSequencerSubscription.start - - for { - _ <- closePromise.future - } yield subscription.isClosing shouldBe true // should have called close on underlying subscription - } - - "not create a subscription when the subscription factory returns AbortedDueToShutdown" in { - - val subscriptionFactory = - new SequencerSubscriptionFactory[TestHandlerError] { - override def create( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit traceContext: TraceContext): UnlessShutdown[ - (SequencerSubscription[TestHandlerError], SubscriptionErrorRetryPolicy) - ] = AbortedDueToShutdown - } - - val resilientSequencerSubscription = new ResilientSequencerSubscription[TestHandlerError]( - SequencerId(synchronizerId.uid), - startingTimestamp = None, // from the beginning - _ => FutureUnlessShutdown.pure(Either.unit[TestHandlerError]), - subscriptionFactory, - retryDelay(), - doNotExitOnFatalErrors, - timeouts, - loggerFactory, - ) - // kick off - resilientSequencerSubscription.start - - for { - closeReason <- resilientSequencerSubscription.closeReason - } yield { - closeReason shouldBe SubscriptionCloseReason.Shutdown - resilientSequencerSubscription.isClosing shouldBe true - } - } - - } -} - -// these tests require a parallel execution context so are separated from the main group of tests -class ResilientSequencerSubscriptionRetryTimingTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ResilientSequencerSubscriptionTestUtils { - - "retry until closing if the sequencer is permanently unavailable" in { - val startTime = Deadline.now - val maxDelay = 100.milliseconds - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val subscription = - createSubscription( - SubscriptionTestFactory.alwaysCloseWith(RetryableError), - // this will retry with a max-delay and it will warn once the max-delay is hit - retryDelayRule = retryDelay(maxDelay), - ) - - subscription.start - - // we retry until we see a warning - eventually() { - loggerFactory.numberOfRecordedEntries should be > 0 - } - - subscription.close() - }, - logEntries => { - forEvery(logEntries) { - _.warningMessage should (include(s"Waiting $maxDelay before reconnecting") or include( - LostSequencerSubscription.id - )) - } - logEntries should not be empty - }, - ) - // Check that it has hit MaxDelay. We can't really check an upper bound as it would make the test flaky - -startTime.timeLeft should be >= maxDelay - } -} - -trait ResilientSequencerSubscriptionTestUtils { - this: BaseTest with HasExecutionContext => - - // very short to speedup test - val InitialDelay: FiniteDuration = 1.millisecond - val MaxDelay: FiniteDuration = - 1025.millis // 1 + power of 2 because InitialDelay keeps being doubled - - private lazy val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("synchronizer1::test") - ) - - def retryDelay(maxDelay: FiniteDuration = MaxDelay) = - SubscriptionRetryDelayRule(InitialDelay, maxDelay, maxDelay) - - def createSubscription( - subscriptionTestFactory: SubscriptionTestFactory, - retryDelayRule: SubscriptionRetryDelayRule = retryDelay(), - ): ResilientSequencerSubscription[TestHandlerError] = { - val subscription = new ResilientSequencerSubscription( - SequencerId(synchronizerId.uid), // only used for logging - startingTimestamp = None, // from the beginning - _ => FutureUnlessShutdown.pure(Either.unit[TestHandlerError]), - subscriptionTestFactory, - retryDelayRule, - doNotExitOnFatalErrors, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - - subscription - } - - protected def doNotExitOnFatalErrors: SubscriptionCloseReason[TestHandlerError] => Unit = _ => () - - trait SubscriptionTestFactory extends SequencerSubscriptionFactory[TestHandlerError] { - protected def createInternal( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit traceContext: TraceContext): SequencerSubscription[TestHandlerError] - - override def create( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit - traceContext: TraceContext - ): UnlessShutdown[(SequencerSubscription[TestHandlerError], SubscriptionErrorRetryPolicy)] = - Outcome( - (createInternal(startingTimestamp, handler), TestSubscriptionError.retryRule) - ) - } - - object SubscriptionTestFactory { - def mocked: MockedSubscriptions = new MockedSubscriptions - - def alwaysCloseWith( - reason: SubscriptionCloseReason[TestHandlerError] - ): SubscriptionTestFactory = - new SubscriptionTestFactory { - override def createInternal( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit traceContext: TraceContext): SequencerSubscription[TestHandlerError] = - new SequencerSubscription[TestHandlerError] { - override protected def loggerFactory: NamedLoggerFactory = - ResilientSequencerSubscriptionTestUtils.this.loggerFactory - closeReasonPromise.trySuccess(reason) - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = Nil - override private[canton] def complete( - reason: SubscriptionCloseReason[TestHandlerError] - )(implicit traceContext: TraceContext): Unit = () - } - } - } - - class MockSubscriptionResponse( - mockCloseReason: Option[SubscriptionCloseReason[TestHandlerError]] = None - ) { - type SubscriberDetails = - ( - Option[CantonTimestamp], - SequencedEventHandler[TestHandlerError], - MockedSequencerSubscription, - ) - private val activeSubscription = - new AtomicReference[Option[SubscriberDetails]](None) - private val subscribedP: Promise[Unit] = Promise() - val subscribed = subscribedP.future - - class MockedSequencerSubscription( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - ) extends SequencerSubscription[TestHandlerError] { - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - override protected def loggerFactory: NamedLoggerFactory = - ResilientSequencerSubscriptionTestUtils.this.loggerFactory - - def closeWithReason(reason: SubscriptionCloseReason[TestHandlerError]): Boolean = - closeReasonPromise.trySuccess(reason) - def closeWithExn(exn: Throwable): Boolean = closeReasonPromise.tryFailure(exn) - - if (!activeSubscription.compareAndSet(None, Some((startingTimestamp, handler, this)))) { - fail("subscription has been created more than once") - } else { - subscribedP.trySuccess(()) - } - - override private[canton] def complete(reason: SubscriptionCloseReason[TestHandlerError])( - implicit traceContext: TraceContext - ): Unit = closeReasonPromise.trySuccess(reason).discard[Boolean] - - override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = Nil - - // immediately close if a close reason was specified - mockCloseReason foreach closeReasonPromise.trySuccess - } - - def create( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - ): SequencerSubscription[TestHandlerError] = - new MockedSequencerSubscription(startingTimestamp, handler) - - private def fromSubscriber[A](getter: SubscriberDetails => A): A = - activeSubscription.get() match { - case Some(details) => getter(details) - case None => fail("subscriber has not yet subscribed") - } - - def handleCounter(sc: Long): FutureUnlessShutdown[Either[TestHandlerError, Unit]] = - fromSubscriber(_._2)(SequencedEventWithTraceContext(deliverEvent(sc))(traceContext)) - - def subscribedStartingTimestamp: Option[CantonTimestamp] = fromSubscriber(_._1) - - def closeWithReason(reason: SubscriptionCloseReason[TestHandlerError]): Boolean = - fromSubscriber(_._3).closeWithReason(reason) - - def closeWithExn(exn: Throwable): Boolean = - fromSubscriber(_._3).closeWithExn(exn) - - def subscription: MockedSequencerSubscription = fromSubscriber(_._3) - - private def deliverEvent( - offset: Long - ): SignedContent[SequencedEvent[ClosedEnvelope]] = { - val deliver = SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.Epoch.addMicros(offset) - ) - SignedContent(deliver, SymbolicCrypto.emptySignature, None, testedProtocolVersion) - } - } - - class MockedSubscriptions extends SubscriptionTestFactory { - private val subscriptions = scala.collection.mutable.Buffer[MockSubscriptionResponse]() - private val nextSubscription = new AtomicInteger(0) - - def addRecoverable(): MockedSubscriptions = addClosed(RetryableError) - - def addUnrecoverable(): MockedSubscriptions = addClosed(UnretryableError) - - def addRunning(): FutureUnlessShutdown[MockSubscriptionResponse] = - FutureUnlessShutdown.outcomeF { - val mockResponse = new MockSubscriptionResponse() - add(mockResponse) - mockResponse.subscribed.map(_ => mockResponse) - } - - def addClosed(reason: SubscriptionCloseReason[TestHandlerError]): MockedSubscriptions = - add(new MockSubscriptionResponse(Some(reason))) - - def add(mockSubscriptionResponse: MockSubscriptionResponse): MockedSubscriptions = { - subscriptions += mockSubscriptionResponse - this - } - - override def createInternal( - startingTimestamp: Option[CantonTimestamp], - handler: SequencedEventHandler[TestHandlerError], - )(implicit traceContext: TraceContext): SequencerSubscription[TestHandlerError] = - subscriptions(nextSubscription.getAndIncrement()).create(startingTimestamp, handler) - - def allShouldHaveBeenUsed: Assertion = nextSubscription.get() shouldBe subscriptions.length - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SendTrackerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SendTrackerTest.scala deleted file mode 100644 index c5956c3bfa..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SendTrackerTest.scala +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import com.daml.metrics.api.{HistogramInventory, MetricName, MetricsContext} -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.metrics.{ - CommonMockMetrics, - MetricsUtils, - SequencerClientMetrics, - TrafficConsumptionMetrics, -} -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.{ - EventCostCalculator, - TrafficReceipt, - TrafficStateController, -} -import com.digitalasset.canton.sequencing.{ - RawProtocolEvent, - SequencedProtocolEvent, - SequencerTestUtils, -} -import com.digitalasset.canton.store.SequencedEventStore.SequencedEventWithTraceContext -import com.digitalasset.canton.store.memory.InMemorySendTrackerStore -import com.digitalasset.canton.store.{SavePendingSendError, SendTrackerStore} -import com.digitalasset.canton.topology.DefaultTestIdentities.participant1 -import com.digitalasset.canton.topology.{DefaultTestIdentities, TestingTopology} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.{ExecutionContext, Future, Promise} - -class SendTrackerTest extends AsyncWordSpec with BaseTest with MetricsUtils with FailOnShutdown { - private lazy val metrics = CommonMockMetrics.sequencerClient - private lazy val msgId1 = MessageId.tryCreate("msgId1") - private lazy val msgId2 = MessageId.tryCreate("msgId2") - - private def sign(event: RawProtocolEvent): SignedContent[RawProtocolEvent] = - SignedContent(event, SymbolicCrypto.emptySignature, None, testedProtocolVersion) - - private def deliverDefault(timestamp: CantonTimestamp): SequencedProtocolEvent = - SequencedEventWithTraceContext( - sign( - SequencerTestUtils.mockDeliver( - timestamp = timestamp, - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - ) - ) - )( - traceContext - ) - - private def deliver( - msgId: MessageId, - timestamp: CantonTimestamp, - trafficReceipt: Option[TrafficReceipt] = None, - ): SequencedProtocolEvent = - SequencedEventWithTraceContext( - sign( - Deliver.create( - None, - timestamp, - DefaultTestIdentities.physicalSynchronizerId, - Some(msgId), - Batch.empty(testedProtocolVersion), - None, - testedProtocolVersion, - trafficReceipt, - ) - ) - )( - traceContext - ) - - private def deliverError( - msgId: MessageId, - timestamp: CantonTimestamp, - trafficReceipt: Option[TrafficReceipt] = None, - ): SequencedProtocolEvent = - SequencedEventWithTraceContext( - sign( - DeliverError.create( - None, - timestamp, - DefaultTestIdentities.physicalSynchronizerId, - msgId, - SequencerErrors.SubmissionRequestRefused("test"), - testedProtocolVersion, - trafficReceipt, - ) - ) - )( - traceContext - ) - - private case class Env(tracker: MySendTracker, store: InMemorySendTrackerStore) - - private class MySendTracker( - initialPendingSends: Map[MessageId, CantonTimestamp], - store: SendTrackerStore, - metrics: SequencerClientMetrics, - loggerFactory: NamedLoggerFactory, - timeouts: ProcessingTimeout, - timeoutHandler: MessageId => FutureUnlessShutdown[Unit], - val trafficStateController: Option[TrafficStateController], - )(implicit executionContext: ExecutionContext) - extends SendTracker( - initialPendingSends, - store, - metrics, - loggerFactory, - timeouts, - trafficStateController, - ) { - - private val calls = new AtomicInteger() - - def callCount = calls.get() - - def assertNotCalled = callCount shouldBe 0 - - override def handleTimeout( - timestamp: CantonTimestamp - )(msgId: MessageId)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - calls.incrementAndGet() - timeoutHandler(msgId).flatMap { _ => - super.handleTimeout(timestamp)(msgId) - } - } - - } - - private val initialTrafficState = TrafficState.empty - private def mkSendTracker( - timeoutHandler: MessageId => FutureUnlessShutdown[Unit] = _ => FutureUnlessShutdown.unit - ): Env = { - val store = new InMemorySendTrackerStore() - val topologyClient = - TestingTopology(Set(DefaultTestIdentities.physicalSynchronizerId)) - .build(loggerFactory) - .forOwnerAndSynchronizer(participant1, DefaultTestIdentities.physicalSynchronizerId) - - val histogramInventory = new HistogramInventory() - val trafficStateController = new TrafficStateController( - DefaultTestIdentities.participant1, - loggerFactory, - topologyClient, - initialTrafficState, - testedProtocolVersion, - new EventCostCalculator(loggerFactory), - new TrafficConsumptionMetrics(MetricName("test"), metricsFactory(histogramInventory)), - DefaultTestIdentities.physicalSynchronizerId, - ) - val tracker = - new MySendTracker( - Map.empty, - store, - metrics, - loggerFactory, - timeouts, - timeoutHandler, - Some(trafficStateController), - ) - - Env(tracker, store) - } - - implicit private val eventSpecificMetricsContext: MetricsContext = MetricsContext( - "test" -> "value" - ) - - "tracking sends" should { - - "error if there's a previously tracked send with the same message id" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track first") - error <- tracker.track(msgId1, CantonTimestamp.MinValue).swap.valueOrFail("track second") - } yield error shouldBe SavePendingSendError.MessageIdAlreadyTracked - } - - "is able to track a send with a prior message id if a receipt is observed" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track first") - _ <- tracker.update(Seq(deliver(msgId1, CantonTimestamp.MinValue))) - _ <- tracker - .track(msgId1, CantonTimestamp.MinValue) - .valueOrFail( - "track same msgId after receipt" - ) - } yield tracker.assertNotCalled - } - - "propagate metrics context" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track first") - _ <- tracker.update( - Seq( - deliver( - msgId1, - initialTrafficState.timestamp.immediateSuccessor, - trafficReceipt = Some( - TrafficReceipt( - consumedCost = NonNegativeLong.tryCreate(1), - extraTrafficConsumed = NonNegativeLong.tryCreate(2), - baseTrafficRemainder = NonNegativeLong.tryCreate(3), - ) - ), - ) - ) - ) - _ = tracker.trafficStateController.value.updateBalance( - NonNegativeLong.tryCreate(20), - PositiveInt.one, - CantonTimestamp.MaxValue, - ) - } yield { - assertLongValue("test.extra-traffic-purchased", 20L) - assertInContext( - "test.extra-traffic-purchased", - "member", - DefaultTestIdentities.participant1.toString, - ) - assertLongValue("test.event-delivered-cost", 1L) - assertInContext( - "test.event-delivered-cost", - "synchronizer", - DefaultTestIdentities.physicalSynchronizerId.toString, - ) - assertInContext( - "test.event-delivered-cost", - "member", - DefaultTestIdentities.participant1.toString, - ) - // Event specific metrics should contain the event specific metrics context - assertInContext("test.event-delivered-cost", "test", "value") - assertLongValue("test.extra-traffic-consumed", 2L) - assertInContext( - "test.extra-traffic-consumed", - "member", - DefaultTestIdentities.participant1.toString, - ) - assertInContext( - "test.extra-traffic-consumed", - "synchronizer", - DefaultTestIdentities.physicalSynchronizerId.toString, - ) - // But not the event agnostic metrics - assertNotInContext("test.extra-traffic-consumed", "test") - } - } - - "not re-export metrics when replaying events older than current state" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track first") - _ <- tracker.update( - Seq( - deliver( - msgId1, - initialTrafficState.timestamp, - trafficReceipt = Some( - TrafficReceipt( - consumedCost = NonNegativeLong.tryCreate(1), - extraTrafficConsumed = NonNegativeLong.tryCreate(2), - baseTrafficRemainder = NonNegativeLong.tryCreate(3), - ) - ), - ) - ) - ) - } yield { - assertNoValue("event-delivered-cost") - } - } - - "metrics should contain default labels for unknown sends" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.update( - Seq( - deliver( - msgId1, - initialTrafficState.timestamp.immediateSuccessor, - trafficReceipt = Some( - TrafficReceipt( - consumedCost = NonNegativeLong.tryCreate(1), - extraTrafficConsumed = NonNegativeLong.tryCreate(2), - baseTrafficRemainder = NonNegativeLong.tryCreate(3), - ) - ), - ) - ) - ) - } yield { - assertLongValue("test.event-delivered-cost", 1L) - assertInContext( - "test.event-delivered-cost", - "member", - DefaultTestIdentities.participant1.toString, - ) - // Check there are labels for application-id and type - assertInContext("test.event-delivered-cost", "application-id", "unknown") - assertInContext("test.event-delivered-cost", "type", "unknown") - } - } - } - - "updating" should { - def verifyEventRemovesPendingSend(event: SequencedProtocolEvent) = { - val Env(tracker, store) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track msgId1") - _ <- tracker.track(msgId2, CantonTimestamp.MinValue).valueOrFail("track msgId2") - pendingSends1 <- store.fetchPendingSends - _ = pendingSends1 shouldBe Map( - msgId1 -> CantonTimestamp.MinValue, - msgId2 -> CantonTimestamp.MinValue, - ) - _ <- tracker.update(Seq(event)) - pendingSends2 <- store.fetchPendingSends - _ = pendingSends2 shouldBe Map( - msgId2 -> CantonTimestamp.MinValue - ) - } yield tracker.assertNotCalled - } - - "remove tracked send on deliver event" in verifyEventRemovesPendingSend( - deliver(msgId1, CantonTimestamp.MinValue) - ) - - "removed tracked send on deliver error event" in verifyEventRemovesPendingSend( - deliverError(msgId1, CantonTimestamp.MinValue) - ) - - "notify only timed out events" in { - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track msgId1") - _ <- tracker - .track(msgId2, CantonTimestamp.MinValue.plusSeconds(2)) - .valueOrFail("track msgId2") - _ <- - tracker.update( - Seq( - deliverDefault(CantonTimestamp.MinValue.plusSeconds(1)) - ) - ) - _ = tracker.callCount shouldBe 1 - _ <- tracker.update( - Seq( - deliverDefault(CantonTimestamp.MinValue.plusSeconds(3)) - ) - ) - } yield tracker.callCount shouldBe 2 - } - - "not get upset if we see the same message id twice" in { - // during reconnects we may replay the same deliver/deliverEvent - val Env(tracker, _) = mkSendTracker() - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track msgId1") - _ <- tracker.update(Seq(deliver(msgId1, CantonTimestamp.MinValue))) - _ <- tracker.update(Seq(deliver(msgId1, CantonTimestamp.MinValue))) - } yield succeed - } - - "call timeout handlers sequentially" in { - val concurrentCalls = new AtomicInteger() - val totalCalls = new AtomicInteger() - - val Env(tracker, _) = mkSendTracker { _ => - totalCalls.incrementAndGet() - if (!concurrentCalls.compareAndSet(0, 1)) { - fail("timeout handler was called concurrently") - } - - FutureUnlessShutdown.outcomeF(Future { - if (!concurrentCalls.compareAndSet(1, 0)) { - fail("timeout handler was called concurrently") - } - }) - } - - for { - _ <- tracker.track(msgId1, CantonTimestamp.MinValue).valueOrFail("track msgId1") - _ <- tracker.track(msgId2, CantonTimestamp.MinValue).valueOrFail("track msgId2") - _ <- tracker.update(Seq(deliverDefault(CantonTimestamp.MinValue.plusSeconds(1)))) - } yield totalCalls.get() shouldBe 2 - } - - "track callback" should { - class CaptureSendResultHandler { - private val calledWithP = Promise[UnlessShutdown[SendResult]]() - val handler: SendCallback = result => { - calledWithP.success(result) - } - - val result: FutureUnlessShutdown[SendResult] = FutureUnlessShutdown(calledWithP.future) - } - - "callback with successful send" in { - val Env(tracker, _) = mkSendTracker() - val sendResultHandler = new CaptureSendResultHandler - - for { - _ <- tracker - .track(msgId1, CantonTimestamp.MinValue, sendResultHandler.handler) - .valueOrFail("track msgId1") - _ <- tracker.update(Seq(deliver(msgId1, CantonTimestamp.MinValue))) - calledWith <- sendResultHandler.result - } yield calledWith should matchPattern { case SendResult.Success(_) => - } - } - - "callback with deliver error" in { - val Env(tracker, _) = mkSendTracker() - val sendResultHandler = new CaptureSendResultHandler - - for { - _ <- tracker - .track(msgId1, CantonTimestamp.MinValue, sendResultHandler.handler) - .valueOrFail("track msgId1") - _ <- tracker.update( - Seq(deliverError(msgId1, CantonTimestamp.MinValue)) - ) - calledWith <- sendResultHandler.result - } yield calledWith should matchPattern { case SendResult.Error(_) => - } - } - - "callback with timeout" in { - val Env(tracker, _) = mkSendTracker() - val sendResultHandler = new CaptureSendResultHandler - val sendMaxSequencingTime = CantonTimestamp.MinValue - val deliverEventTime = sendMaxSequencingTime.plusSeconds(1) - - for { - _ <- tracker - .track(msgId1, sendMaxSequencingTime, sendResultHandler.handler) - .valueOrFail("track msgId1") - _ <- tracker.update(Seq(deliverDefault(deliverEventTime))) - calledWith <- sendResultHandler.result - } yield calledWith should matchPattern { case SendResult.Timeout(`deliverEventTime`) => - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala deleted file mode 100644 index 88ea0670de..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.syntax.either.* -import com.daml.nonempty.NonEmptyUtil -import com.digitalasset.canton.SequencerCounter -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.protocol.ExampleTransactionFactory -import com.digitalasset.canton.protocol.messages.{EnvelopeContent, InformeeMessage} -import com.digitalasset.canton.sequencing.SequencerAggregator.MessageAggregationConfig -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.sequencing.{ - OrdinarySerializedEvent, - SequencedSerializedEvent, - SequencerAggregator, - SequencerTestUtils, -} -import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.namespace -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.ProtocolVersion -import com.google.protobuf.ByteString -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.Materializer -import org.scalatest.Assertions.fail -import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures} -import org.scalatest.time.{Seconds, Span} - -import scala.concurrent.ExecutionContext - -class SequencedEventTestFixture( - loggerFactory: NamedLoggerFactory, - testedProtocolVersion: ProtocolVersion, - timeouts: ProcessingTimeout, - futureSupervisor: FutureSupervisor, -)(implicit private val traceContext: TraceContext, executionContext: ExecutionContext) - extends AutoCloseable { - import ScalaFutures.* - def fixtureTraceContext: TraceContext = traceContext - - private lazy val factory: ExampleTransactionFactory = new ExampleTransactionFactory()()( - executionContext, - traceContext, - ) - - lazy val defaultSynchronizerId: PhysicalSynchronizerId = - DefaultTestIdentities.physicalSynchronizerId - lazy val subscriberId: ParticipantId = ParticipantId("participant1-id") - lazy val sequencerAlice: SequencerId = DefaultTestIdentities.sequencerId - lazy val subscriberCryptoApi: SynchronizerCryptoClient = - TestingIdentityFactory(loggerFactory).forOwnerAndSynchronizer(subscriberId) - private lazy val sequencerCryptoApi: SynchronizerCryptoClient = - TestingIdentityFactory(loggerFactory).forOwnerAndSynchronizer(sequencerAlice) - lazy val updatedCounter: Long = 42L - val sequencerBob: SequencerId = SequencerId( - UniqueIdentifier.tryCreate("da2", namespace) - ) - val sequencerCarlos: SequencerId = SequencerId( - UniqueIdentifier.tryCreate("da3", namespace) - ) - implicit val actorSystem: ActorSystem = ActorSystem( - classOf[SequencedEventTestFixture].getSimpleName - ) - implicit val materializer: Materializer = Materializer(actorSystem) - - private val alice = ParticipantId(UniqueIdentifier.tryCreate("participant", "alice")) - private val bob = ParticipantId(UniqueIdentifier.tryCreate("participant", "bob")) - private val carlos = ParticipantId(UniqueIdentifier.tryCreate("participant", "carlos")) - private val signatureAlice = SymbolicCrypto.signature( - ByteString.copyFromUtf8("signatureAlice1"), - alice.fingerprint, - ) - private val signatureBob = SymbolicCrypto.signature( - ByteString.copyFromUtf8("signatureBob1"), - bob.fingerprint, - ) - private val signatureCarlos = SymbolicCrypto.signature( - ByteString.copyFromUtf8("signatureCarlos1"), - carlos.fingerprint, - ) - lazy val aliceEvents: Seq[SequencedSerializedEvent] = (1 to 5).map(s => - createEvent( - timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong), - previousTimestamp = Option.when(s > 1)(CantonTimestamp.Epoch.plusSeconds(s.toLong - 1)), - counter = updatedCounter + s.toLong, - signatureOverride = Some(signatureAlice), - ).onShutdown(throw new RuntimeException("failed to create alice event")).futureValue - ) - lazy val bobEvents: Seq[SequencedSerializedEvent] = (1 to 5).map(s => - createEvent( - timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong), - previousTimestamp = - if (s > 1) Some(CantonTimestamp.Epoch.plusSeconds(s.toLong - 1)) else None, - counter = updatedCounter + s.toLong, - signatureOverride = Some(signatureBob), - ).onShutdown(throw new RuntimeException("failed to create bob event")).futureValue - ) - lazy val carlosEvents: Seq[SequencedSerializedEvent] = (1 to 5).map(s => - createEvent( - timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong), - previousTimestamp = - if (s > 1) Some(CantonTimestamp.Epoch.plusSeconds(s.toLong - 1)) else None, - counter = updatedCounter + s.toLong, - signatureOverride = Some(signatureCarlos), - ).onShutdown(throw new RuntimeException("failed to create carlos event")).futureValue - ) - - def mkAggregator( - config: MessageAggregationConfig = MessageAggregationConfig( - NonEmptyUtil.fromUnsafe(Set(sequencerAlice)), - PositiveInt.tryCreate(1), - ) - ) = - new SequencerAggregator( - cryptoPureApi = subscriberCryptoApi.pureCrypto, - eventInboxSize = PositiveInt.tryCreate(2), - loggerFactory = loggerFactory, - initialConfig = config, - timeouts = timeouts, - futureSupervisor = futureSupervisor, - ) - - def config( - expectedSequencers: Set[SequencerId] = Set(sequencerAlice), - sequencerTrustThreshold: Int = 1, - ): MessageAggregationConfig = - MessageAggregationConfig( - NonEmptyUtil.fromUnsafe(expectedSequencers), - PositiveInt.tryCreate(sequencerTrustThreshold), - ) - - def mkValidator( - syncCryptoApi: SynchronizerCryptoClient = subscriberCryptoApi - )(implicit executionContext: ExecutionContext): SequencedEventValidatorImpl = - new SequencedEventValidatorImpl( - defaultSynchronizerId, - testedProtocolVersion, - syncCryptoApi, - loggerFactory, - timeouts, - )(executionContext) - - def createEvent( - synchronizerId: PhysicalSynchronizerId = defaultSynchronizerId, - signatureOverride: Option[Signature] = None, - serializedOverride: Option[ByteString] = None, - counter: Long = updatedCounter, - timestamp: CantonTimestamp = CantonTimestamp.Epoch, - previousTimestamp: Option[CantonTimestamp] = None, - topologyTimestamp: Option[CantonTimestamp] = None, - ): FutureUnlessShutdown[SequencedSerializedEvent] = { - import cats.syntax.option.* - val message = { - val fullInformeeTree = factory.MultipleRootsAndViewNestings.fullInformeeTree - InformeeMessage(fullInformeeTree, Signature.noSignature)(testedProtocolVersion) - } - val envelope = ClosedEnvelope.create( - serializedOverride.getOrElse( - EnvelopeContent.tryCreate(message, testedProtocolVersion).toByteString - ), - Recipients.cc(subscriberId), - Seq.empty, - testedProtocolVersion, - ) - val deliver: Deliver[ClosedEnvelope] = Deliver.create[ClosedEnvelope]( - previousTimestamp = previousTimestamp, - timestamp, - synchronizerId, - MessageId.tryCreate("test").some, - Batch(List(envelope), testedProtocolVersion), - topologyTimestamp, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - - for { - sig <- signatureOverride - .map(FutureUnlessShutdown.pure) - .getOrElse(sign(deliver.getCryptographicEvidence, deliver.timestamp)) - } yield OrdinarySequencedEvent( - SequencerCounter(counter), - SignedContent(deliver, sig, None, testedProtocolVersion), - )(traceContext).asSequencedSerializedEvent - } - - def createEventWithCounterAndTs( - counter: Long, - timestamp: CantonTimestamp, - customSerialization: Option[ByteString] = None, - messageIdO: Option[MessageId] = None, - topologyTimestampO: Option[CantonTimestamp] = None, - previousTimestamp: Option[CantonTimestamp] = None, - )(implicit executionContext: ExecutionContext): FutureUnlessShutdown[OrdinarySerializedEvent] = { - val event = - SequencerTestUtils.mockDeliverClosedEnvelope( - timestamp = timestamp, - deserializedFrom = customSerialization, - messageId = messageIdO, - topologyTimestampO = topologyTimestampO, - previousTimestamp = previousTimestamp, - ) - for { - signature <- sign( - customSerialization.getOrElse(event.getCryptographicEvidence), - event.timestamp, - ) - } yield OrdinarySequencedEvent( - SequencerCounter(counter), - SignedContent(event, signature, None, testedProtocolVersion), - )(traceContext) - } - - def ts(offset: Int): CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(offset.toLong) - - def sign(bytes: ByteString, timestamp: CantonTimestamp)(implicit - executionContext: ExecutionContext - ): FutureUnlessShutdown[Signature] = - for { - cryptoApi <- sequencerCryptoApi.snapshot(timestamp) - signature <- cryptoApi - .sign(hash(bytes), SigningKeyUsage.ProtocolOnly) - .value - .map(_.valueOr(err => fail(s"Failed to sign: $err")))(executionContext) - } yield signature - - def hash(bytes: ByteString): Hash = - sequencerCryptoApi.pureCrypto.digest(HashPurpose.SequencedEventSignature, bytes) - - override def close(): Unit = { - actorSystem.terminate().futureValue(PatienceConfiguration.Timeout(Span(3, Seconds))) - materializer.shutdown() - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala deleted file mode 100644 index a1506d4077..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.health.HealthComponent.AlwaysHealthyComponent -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.* -import com.digitalasset.canton.sequencing.protocol.{ClosedEnvelope, SequencedEvent} -import com.digitalasset.canton.store.SequencedEventStore.IgnoredSequencedEvent -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.util.PekkoUtil.noOpKillSwitch -import com.digitalasset.canton.util.PekkoUtilTest.withNoOpKillSwitch -import com.digitalasset.canton.util.ResourceUtil -import com.digitalasset.canton.{BaseTest, HasExecutionContext, SequencerCounter} -import com.google.protobuf.ByteString -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.scalatest.wordspec.FixtureAnyWordSpec -import org.scalatest.{Assertion, Outcome} - -class SequencedEventValidatorTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext { - - override type FixtureParam = SequencedEventTestFixture - - override def withFixture(test: OneArgTest): Outcome = - ResourceUtil.withResource( - new SequencedEventTestFixture( - loggerFactory, - testedProtocolVersion, - timeouts, - futureSupervisor, - ) - )(env => withFixture(test.toNoArgTest(env))) - - "validate on reconnect" should { - "accept the prior event" in { fixture => - import fixture.* - val priorEvent = createEvent().futureValueUS - val validator = mkValidator() - validator - .validateOnReconnect(Some(priorEvent), priorEvent, DefaultTestIdentities.sequencerId) - .valueOrFail("successful reconnect") - .failOnShutdown - .futureValue - } - - "accept a new signature on the prior event" in { fixture => - import fixture.* - val priorEvent = createEvent().futureValueUS - val validator = mkValidator() - val sig = sign( - priorEvent.signedEvent.content.getCryptographicEvidence, - CantonTimestamp.Epoch, - ).futureValueUS - assert(sig != priorEvent.signedEvent.signature) - val eventWithNewSig = - priorEvent.copy(signedEvent = priorEvent.signedEvent.copy(signatures = NonEmpty(Seq, sig)))( - traceContext = fixtureTraceContext - ) - validator - .validateOnReconnect(Some(priorEvent), eventWithNewSig, DefaultTestIdentities.sequencerId) - .valueOrFail("event with regenerated signature") - .failOnShutdown - .futureValue - } - - "accept a different serialization of the same content" in { fixture => - import fixture.* - val deliver1 = createEventWithCounterAndTs(1L, CantonTimestamp.Epoch).futureValueUS - val deliver2 = createEventWithCounterAndTs( - 1L, - CantonTimestamp.Epoch, - customSerialization = Some(ByteString.copyFromUtf8("Different serialization")), - ).futureValueUS // changing serialization, but not the contents - - val validator = mkValidator() - validator - .validateOnReconnect( - Some(deliver1), - deliver2.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .valueOrFail("Different serialization should be accepted") - .failOnShutdown - .futureValue - } - - "check the synchronizer id" in { fixture => - import fixture.* - val incorrectSynchronizerId = - SynchronizerId(UniqueIdentifier.tryFromProtoPrimitive("wrong-synchronizer::id")).toPhysical - val validator = mkValidator() - val wrongSynchronizer = createEvent(incorrectSynchronizerId).futureValueUS - val err = validator - .validateOnReconnect( - Some( - IgnoredSequencedEvent( - CantonTimestamp.MinValue, - SequencerCounter(updatedCounter), - None, - )( - fixtureTraceContext - ) - ), - wrongSynchronizer, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("wrong synchronizer id on reconnect") - .failOnShutdown - .futureValue - - err shouldBe BadSynchronizerId(defaultSynchronizerId, incorrectSynchronizerId) - } - - "check for a fork" in { fixture => - import fixture.* - - def expectLog[E]( - cmd: => FutureUnlessShutdown[SequencedEventValidationError[E]] - ): SequencedEventValidationError[E] = - loggerFactory - .assertLogs(cmd, _.shouldBeCantonErrorCode(ResilientSequencerSubscription.ForkHappened)) - .failOnShutdown - .futureValue - - val priorEvent = createEvent(timestamp = CantonTimestamp.Epoch).futureValueUS - val validator = mkValidator() - val differentTimestamp = createEvent(timestamp = CantonTimestamp.MaxValue).futureValueUS - val errTimestamp = expectLog( - validator - .validateOnReconnect( - Some(priorEvent), - differentTimestamp, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("fork on timestamp") - ) - - val differentContent = createEventWithCounterAndTs( - counter = updatedCounter, - CantonTimestamp.Epoch, - ).futureValueUS - - val errContent = expectLog( - validator - .validateOnReconnect( - Some(priorEvent), - differentContent.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("fork on content") - ) - - def assertFork[E](err: SequencedEventValidationError[E])( - timestamp: CantonTimestamp, - suppliedEvent: SequencedEvent[ClosedEnvelope], - expectedEvent: Option[SequencedEvent[ClosedEnvelope]], - ): Assertion = - err match { - case ForkHappened(timestampRes, suppliedEventRes, expectedEventRes) => - ( - timestamp, - suppliedEvent, - expectedEvent, - ) shouldBe (timestampRes, suppliedEventRes, expectedEventRes) - case x => fail(s"$x is not ForkHappened") - } - - assertFork(errTimestamp)( - CantonTimestamp.Epoch, - differentTimestamp.signedEvent.content, - Some(priorEvent.signedEvent.content), - ) - - assertFork(errContent)( - CantonTimestamp.Epoch, - differentContent.signedEvent.content, - Some(priorEvent.signedEvent.content), - ) - } - - "verify the signature" in { fixture => - import fixture.* - val priorEvent = createEvent(previousTimestamp = Some(CantonTimestamp.MinValue)).futureValueUS - val badSig = - sign(ByteString.copyFromUtf8("not-the-message"), CantonTimestamp.Epoch).futureValueUS - val badEvent = createEvent( - signatureOverride = Some(badSig), - previousTimestamp = Some(CantonTimestamp.MinValue), - ).futureValueUS - val validator = mkValidator() - val result = validator - .validateOnReconnect(Some(priorEvent), badEvent, DefaultTestIdentities.sequencerId) - .leftOrFail("invalid signature on reconnect") - .failOnShutdown - .futureValue - result shouldBe a[SignatureInvalid] - } - } - - "validate" should { - "reject messages with unexpected synchronizer ids" in { fixture => - import fixture.* - val incorrectSynchronizerId = - SynchronizerId(UniqueIdentifier.tryFromProtoPrimitive("wrong-synchronizer::id")).toPhysical - val event = createEvent(incorrectSynchronizerId, counter = 0L).futureValueUS - val validator = mkValidator() - val result = validator - .validate(None, event, DefaultTestIdentities.sequencerId) - .leftOrFail("wrong synchronizer id") - .failOnShutdown - .futureValue - result shouldBe BadSynchronizerId(`defaultSynchronizerId`, `incorrectSynchronizerId`) - } - - "reject messages with invalid signatures" in { fixture => - import fixture.* - val priorEvent = - createEvent( - previousTimestamp = Some(CantonTimestamp.MinValue), - timestamp = CantonTimestamp.Epoch.immediatePredecessor, - counter = 42L, - ).futureValueUS - val badSig = - sign(ByteString.copyFromUtf8("not-the-message"), CantonTimestamp.Epoch).futureValueUS - val badEvent = createEvent( - previousTimestamp = Some(priorEvent.timestamp), - signatureOverride = Some(badSig), - timestamp = CantonTimestamp.Epoch.immediateSuccessor.immediateSuccessor, - counter = 43L, - ).futureValueUS - val validator = mkValidator() - val result = validator - .validate(Some(priorEvent), badEvent, DefaultTestIdentities.sequencerId) - .leftOrFail("invalid signature") - .failOnShutdown - .futureValue - result shouldBe a[SignatureInvalid] - } - - "validate correctly with explicit topology timestamp" in { fixture => - import fixture.* - val syncCrypto = mock[SynchronizerCryptoClient] - when(syncCrypto.pureCrypto).thenReturn(subscriberCryptoApi.pureCrypto) - when(syncCrypto.snapshot(timestamp = ts(1))(fixtureTraceContext)) - .thenAnswer[CantonTimestamp](tm => subscriberCryptoApi.snapshot(tm)(fixtureTraceContext)) - when(syncCrypto.topologyKnownUntilTimestamp).thenReturn(CantonTimestamp.MaxValue) - val validator = mkValidator(syncCryptoApi = syncCrypto) - val priorEvent = - IgnoredSequencedEvent( - previousTimestamp = Some(CantonTimestamp.MinValue), // PT=None skips the signature check - timestamp = ts(0), - counter = SequencerCounter(41), - underlying = None, - )(fixtureTraceContext) - val deliver = - createEventWithCounterAndTs( - previousTimestamp = Some(priorEvent.timestamp), - timestamp = ts(2), - counter = 42, - topologyTimestampO = Some(ts(1)), - ).futureValueUS - - valueOrFail( - validator.validate( - Some(priorEvent), - deliver.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - )( - "validate" - ).failOnShutdown.futureValue - } - - "reject the same previous timestamp, timestamp if passed in repeatedly" in { fixture => - import fixture.* - val priorEvent = - IgnoredSequencedEvent( - previousTimestamp = Some(CantonTimestamp.MinValue), - timestamp = CantonTimestamp.Epoch, - counter = SequencerCounter(41), - underlying = None, - )( - fixtureTraceContext - ) - val validator = mkValidator() - - val deliver = createEventWithCounterAndTs( - counter = 42, - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(priorEvent.timestamp), - ).futureValueUS - validator - .validate( - Some(priorEvent), - deliver.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .valueOrFail("validate1") - .failOnShutdown - .futureValue - val err = validator - .validate( - Some(deliver), - deliver.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("validate2") - .failOnShutdown - .futureValue - - err shouldBe PreviousTimestampMismatch(deliver.previousTimestamp, Some(deliver.timestamp)) - } - - "fail if the timestamp do not increase" in { fixture => - import fixture.* - val priorEvent = - IgnoredSequencedEvent( - previousTimestamp = Some(CantonTimestamp.MinValue.immediateSuccessor), - timestamp = CantonTimestamp.Epoch, - counter = SequencerCounter(41), - underlying = None, - )( - fixtureTraceContext - ) - val validator = - mkValidator() - - val deliver = createEventWithCounterAndTs( - previousTimestamp = Some(priorEvent.timestamp), - timestamp = CantonTimestamp.MinValue, - counter = 42L, - ).futureValueUS - - val error = validator - .validate( - Some(priorEvent), - deliver.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("deliver1") - .failOnShutdown - .futureValue - - error shouldBe NonIncreasingTimestamp( - newTimestamp = CantonTimestamp.MinValue, - newPreviousTimestamp = Some(priorEvent.timestamp), - oldTimestamp = CantonTimestamp.Epoch, - oldPreviousTimestamp = Some(CantonTimestamp.MinValue.immediateSuccessor), - ) - } - - "fail if there is a previous timestamp mismatch" in { fixture => - import fixture.* - val priorEventIgnore0 = - IgnoredSequencedEvent( - previousTimestamp = None, - timestamp = CantonTimestamp.Epoch, - counter = SequencerCounter(41), - underlying = None, - )(fixtureTraceContext) - val validator = mkValidator() - - val deliver1 = - createEventWithCounterAndTs( - previousTimestamp = Some(CantonTimestamp.Epoch), - timestamp = CantonTimestamp.ofEpochSecond(1), - counter = 42L, - ).futureValueUS - val deliver2 = - createEventWithCounterAndTs( - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - timestamp = CantonTimestamp.ofEpochSecond(2), - counter = 43L, - ).futureValueUS - val deliver3 = - createEventWithCounterAndTs( - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - timestamp = CantonTimestamp.ofEpochSecond(3), - counter = 44L, - ).futureValueUS - - val result1 = validator - .validate( - priorEventO = Some(priorEventIgnore0), - event = deliver2.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("deliver1") - .failOnShutdown - .futureValue - - validator - .validate( - Some(priorEventIgnore0), - deliver1.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .valueOrFail("deliver2") - .failOnShutdown - .futureValue - - val result3 = validator - .validate( - Some(deliver1), - deliver3.asSequencedSerializedEvent, - DefaultTestIdentities.sequencerId, - ) - .leftOrFail("deliver3") - .failOnShutdown - .futureValue - - result1 shouldBe PreviousTimestampMismatch( - receivedPreviousTimestamp = deliver2.previousTimestamp, - expectedPreviousTimestamp = Some(priorEventIgnore0.timestamp), - ) - result3 shouldBe PreviousTimestampMismatch( - receivedPreviousTimestamp = deliver3.previousTimestamp, - expectedPreviousTimestamp = Some(deliver1.timestamp), - ) - } - } - - "validatePekko" should { - implicit val prettyString = Pretty.prettyString - lazy val alwaysHealthyComponent = new AlwaysHealthyComponent("validatePekko source", logger) - - "propagate the first subscription errors" in { fixture => - import fixture.* - - val validator = mkValidator() - - val errors = Seq("error1", "error2") - val source = - Source(errors.map(err => withNoOpKillSwitch(Left(err)))) - .watchTermination()((_, doneF) => noOpKillSwitch -> doneF) - val subscription = SequencerSubscriptionPekko(source, alwaysHealthyComponent) - val validatedSubscription = - validator.validatePekko(subscription, None, DefaultTestIdentities.sequencerId) - val validatedEventsF = validatedSubscription.source.runWith(Sink.seq) - validatedEventsF.futureValue.map(_.value) shouldBe Seq( - Left(UpstreamSubscriptionError("error1")) - ) - } - - "deal with stuttering" in { fixture => - import fixture.* - - val validator = mkValidator() - val deliver1 = createEventWithCounterAndTs( - counter = 42L, - timestamp = CantonTimestamp.Epoch, - previousTimestamp = None, - ).futureValueUS - val deliver2 = - createEventWithCounterAndTs( - counter = 43L, - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(deliver1.timestamp), - ).futureValueUS - val deliver3 = - createEventWithCounterAndTs( - counter = 44L, - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(deliver2.timestamp), - ).futureValueUS - - val source = Source( - Seq(deliver1, deliver1, deliver2, deliver2, deliver2, deliver3).map(event => - withNoOpKillSwitch(Either.right(event.asSequencedSerializedEvent)) - ) - ).watchTermination()((_, doneF) => noOpKillSwitch -> doneF) - val subscription = SequencerSubscriptionPekko[String](source, alwaysHealthyComponent) - val validatedSubscription = - validator.validatePekko( - subscription, - Some(deliver1.asSequencedSerializedEvent), - DefaultTestIdentities.sequencerId, - ) - val validatedEventsF = validatedSubscription.source.runWith(Sink.seq) - // deliver1 should be filtered out because it's the prior event - validatedEventsF.futureValue.map(_.value) shouldBe Seq( - Right(deliver2.asSequencedSerializedEvent), - Right(deliver3.asSequencedSerializedEvent), - ) - } - - "stop upon a validation error" in { fixture => - import fixture.* - - val validator = mkValidator() - val deliver1 = createEventWithCounterAndTs( - counter = 1L, - timestamp = CantonTimestamp.Epoch, - previousTimestamp = None, - ).futureValueUS - val deliver2 = createEventWithCounterAndTs( - counter = 2L, - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(CantonTimestamp.Epoch), - ).futureValueUS - val deliver3 = createEventWithCounterAndTs( - counter = 4L, - timestamp = CantonTimestamp.ofEpochSecond(3), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - ).futureValueUS - val deliver4 = createEventWithCounterAndTs( - counter = 5L, - timestamp = CantonTimestamp.ofEpochSecond(4), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(3)), - ).futureValueUS - - val source = Source( - Seq(deliver1, deliver2, deliver3, deliver4).map(event => - withNoOpKillSwitch(Right(event.asSequencedSerializedEvent)) - ) - ).watchTermination()((_, doneF) => noOpKillSwitch -> doneF) - val subscription = SequencerSubscriptionPekko(source, alwaysHealthyComponent) - val validatedSubscription = - validator.validatePekko( - subscription, - Some(deliver1.asSequencedSerializedEvent), - DefaultTestIdentities.sequencerId, - ) - val validatedEventsF = validatedSubscription.source.runWith(Sink.seq) - // deliver1 should be filtered out because it's the prior event - validatedEventsF.futureValue.map(_.value) shouldBe Seq( - Right(deliver2.asSequencedSerializedEvent), - Left( - PreviousTimestampMismatch( - receivedPreviousTimestamp = deliver3.previousTimestamp, - expectedPreviousTimestamp = Some(deliver2.timestamp), - ) - ), - ) - } - - "stop upon a validation error on reconnect" in { fixture => - import fixture.* - - val validator = mkValidator() - val deliver1 = createEventWithCounterAndTs( - counter = 1L, - timestamp = CantonTimestamp.Epoch, - previousTimestamp = None, - ).futureValueUS - // Forked event, the fork is on the previous timestamp field - val deliver1a = - createEventWithCounterAndTs( - counter = 1L, - timestamp = CantonTimestamp.Epoch, - previousTimestamp = Some(CantonTimestamp.MinValue), - ).futureValueUS - val deliver2 = createEventWithCounterAndTs( - counter = 2L, - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(CantonTimestamp.Epoch), - ).futureValueUS - - val source = Source( - Seq(deliver1, deliver2).map(event => - withNoOpKillSwitch(Right(event.asSequencedSerializedEvent)) - ) - ).watchTermination()((_, doneF) => noOpKillSwitch -> doneF) - val subscription = SequencerSubscriptionPekko(source, alwaysHealthyComponent) - val validatedSubscription = - validator.validatePekko( - subscription, - Some(deliver1a.asSequencedSerializedEvent), - DefaultTestIdentities.sequencerId, - ) - loggerFactory.assertLogs( - validatedSubscription.source.runWith(Sink.seq).futureValue.map(_.value) shouldBe Seq( - Left( - ForkHappened( - CantonTimestamp.Epoch, - deliver1.signedEvent.content, - Some(deliver1a.signedEvent.content), - ) - ) - ), - // We get two log messages here: one from the validator that creates the error - // and one from the test case that creates the error again for the comparison - _.errorMessage should include(ResilientSequencerSubscription.ForkHappened.id), - _.errorMessage should include(ResilientSequencerSubscription.ForkHappened.id), - ) - } - - "not request a topology snapshot after a validation failure" in { fixture => - import fixture.* - - val syncCryptoApi = TestingIdentityFactory(loggerFactory) - .forOwnerAndSynchronizer( - subscriberId, - defaultSynchronizerId, - CantonTimestamp.ofEpochSecond(2), - ) - val validator = mkValidator(syncCryptoApi) - val deliver1 = createEventWithCounterAndTs( - counter = 1L, - timestamp = CantonTimestamp.Epoch, - previousTimestamp = None, - ).futureValueUS - val deliver2 = createEventWithCounterAndTs( - counter = 2L, - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(CantonTimestamp.Epoch), - ).futureValueUS - val deliver3 = createEventWithCounterAndTs( - counter = 4L, - timestamp = CantonTimestamp.ofEpochSecond(3), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - ).futureValueUS - val deliver4 = - createEventWithCounterAndTs( - counter = 5L, - timestamp = CantonTimestamp.ofEpochSecond(300), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(3)), - ).futureValueUS - - // sanity-check that the topology for deliver4 is really not available - SyncCryptoClient - .getSnapshotForTimestamp( - syncCryptoApi, - deliver4.timestamp, - Some(deliver3.timestamp), - testedProtocolVersion, - warnIfApproximate = false, - ) - .failOnShutdown - .failed - .futureValue shouldBe a[IllegalArgumentException] - - val source = Source( - Seq(deliver1, deliver2, deliver3, deliver4).map(event => - withNoOpKillSwitch(Right(event.asSequencedSerializedEvent)) - ) - ).watchTermination()((_, doneF) => noOpKillSwitch -> doneF) - val subscription = SequencerSubscriptionPekko(source, alwaysHealthyComponent) - val validatedSubscription = - validator.validatePekko( - subscription, - Some(deliver1.asSequencedSerializedEvent), - DefaultTestIdentities.sequencerId, - ) - val ((killSwitch, doneF), validatedEventsF) = - validatedSubscription.source.toMat(Sink.seq)(Keep.both).run() - // deliver1 should be filtered out because it's the prior event - validatedEventsF.futureValue.map(_.value) shouldBe Seq( - Right(deliver2.asSequencedSerializedEvent), - Left( - PreviousTimestampMismatch( - receivedPreviousTimestamp = deliver3.previousTimestamp, - expectedPreviousTimestamp = Some(deliver2.timestamp), - ) - ), - ) - killSwitch.shutdown() - doneF.futureValue - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerAggregatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerAggregatorTest.scala deleted file mode 100644 index df0b996bc9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerAggregatorTest.scala +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import com.daml.nonempty.{NonEmpty, NonEmptyUtil} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.sequencing.SequencerAggregator.SequencerAggregatorError -import com.digitalasset.canton.sequencing.{SequencedSerializedEvent, SequencerAggregator} -import com.digitalasset.canton.util.ResourceUtil -import com.digitalasset.canton.{ - BaseTest, - HasExecutionContext, - ProtocolVersionChecksFixtureAnyWordSpec, -} -import com.google.protobuf.ByteString -import org.scalatest.concurrent.ScalaFutures -import org.scalatest.wordspec.FixtureAnyWordSpec -import org.scalatest.{Assertion, Outcome} - -import scala.concurrent.{Future, Promise} - -class SequencerAggregatorTest - extends FixtureAnyWordSpec - with BaseTest - with ScalaFutures - with ProtocolVersionChecksFixtureAnyWordSpec - with HasExecutionContext { - - override type FixtureParam = SequencedEventTestFixture - - override def withFixture(test: OneArgTest): Outcome = - ResourceUtil.withResource( - new SequencedEventTestFixture( - loggerFactory, - testedProtocolVersion, - timeouts, - futureSupervisor, - ) - )(env => withFixture(test.toNoArgTest(env))) - - "Single sequencer aggregator" should { - "pass-through the event" in { fixture => - import fixture.* - val event = createEvent().futureValueUS - - val aggregator = mkAggregator() - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, event) - .futureValueUS shouldBe Right(true) - - aggregator.eventQueue.take() shouldBe event - } - - "pass-through events in sequence" in { fixture => - import fixture.* - val events = (1 to 100).map(s => - createEvent(timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong)).futureValueUS - ) - - val aggregator = mkAggregator() - - events.foreach { event => - aggregator - .combineAndMergeEvent(sequencerAlice, event) - .futureValueUS shouldBe Right(true) - aggregator.eventQueue.take() shouldBe event - } - } - - "block on queue is full" in { fixture => - import fixture.* - val events = (1 to 2).map(s => - createEvent(timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong)).futureValueUS - ) - - val aggregator = mkAggregator() - - assertNoMessageDownstream(aggregator) - - events.foreach { event => - aggregator - .combineAndMergeEvent(sequencerAlice, event) - .futureValueUS shouldBe Right(true) - } - - val blockingEvent = - createEvent(timestamp = CantonTimestamp.Epoch.plusSeconds(3L)).futureValueUS - - val p = Promise[Future[Either[SequencerAggregatorError, Boolean]]]() - p.completeWith( - Future( - aggregator - .combineAndMergeEvent(sequencerAlice, blockingEvent) - .failOnShutdown - ) - ) - always() { - p.isCompleted shouldBe false - } - aggregator.eventQueue.take() shouldBe events(0) - eventually() { - p.isCompleted shouldBe true - } - } - - "support reconfiguration to 2 out of 3" in { fixture => - import fixture.* - - val aggregator = mkAggregator() - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - .futureValueUS shouldBe Right(true) - - assertDownstreamMessage(aggregator, aliceEvents(0)) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(1)) - .futureValueUS shouldBe Right(true) - - assertDownstreamMessage(aggregator, aliceEvents(1)) - - aggregator.changeMessageAggregationConfig( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), 2) - ) - - val f1 = aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(2)) - - f1.isCompleted shouldBe false - - val f2 = aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(2)) - f2.futureValueUS shouldBe Right(false) - f1.futureValueUS shouldBe Right(true) - - aggregator.eventQueue.size() shouldBe 1 - aggregator.eventQueue.take() shouldBe aggregator - .combine(NonEmpty(Seq, aliceEvents(2), bobEvents(2))) - .value - } - - "support reconfiguration to another sequencer" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice), 1) - ) - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - .futureValueUS shouldBe Right(true) - - assertDownstreamMessage(aggregator, aliceEvents(0)) - - aggregator.changeMessageAggregationConfig( - config(Set(sequencerBob), 1) - ) - - aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) // arrived late event which we ignore - .futureValueUS shouldBe Right(false) - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(1)) - .futureValueUS shouldBe Right(true) - assertDownstreamMessage(aggregator, bobEvents(1)) - } - } - - "Sequencer aggregator with two expected sequencers" should { - "pass-through the combined event only if both sequencers emitted it" in { fixture => - import fixture.* - val event1 = createEvent().futureValueUS - val event2 = createEvent().futureValueUS - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob), sequencerTrustThreshold = 2) - ) - - assertNoMessageDownstream(aggregator) - - val f1 = aggregator - .combineAndMergeEvent(sequencerAlice, event1) - - f1.isCompleted shouldBe false - assertNoMessageDownstream(aggregator) - - val f2 = aggregator - .combineAndMergeEvent(sequencerBob, event2) - - f1.futureValueUS.discard - f2.futureValueUS.discard - - f1.isCompleted shouldBe true - f2.isCompleted shouldBe true - - assertCombinedDownstreamMessage(aggregator, event1, event2) - f1.futureValueUS shouldBe Right(true) - f2.futureValueUS shouldBe Right(false) - } - - "fail if events share timestamp but content is different" in { fixture => - import fixture.* - val event1 = createEvent().futureValueUS - val event2 = createEvent(serializedOverride = Some(ByteString.EMPTY)).futureValueUS - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob), sequencerTrustThreshold = 2) - ) - val f1 = aggregator - .combineAndMergeEvent(sequencerAlice, event1) - - f1.isCompleted shouldBe false - assertNoMessageDownstream(aggregator) - - val hashes = NonEmpty( - Set, - hash(event1.signedEvent.content.toByteString), - hash(event2.signedEvent.content.toByteString), - ) - - aggregator - .combineAndMergeEvent(sequencerBob, event2) - .futureValueUS shouldBe Left(SequencerAggregatorError.NotTheSameContentHash(hashes)) - } - - "emit events in order when all sequencers confirmed" in { fixture => - import fixture.* - val events = (1 to 2).map(s => - createEvent(timestamp = CantonTimestamp.Epoch.plusSeconds(s.toLong)).futureValueUS - ) - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob), sequencerTrustThreshold = 2) - ) - - val futures = events.map { event => - val f = aggregator.combineAndMergeEvent(sequencerAlice, event) - f.isCompleted shouldBe false - f - } - - aggregator - .combineAndMergeEvent(sequencerBob, events(0)) - .futureValueUS shouldBe Right(false) - - futures(0).futureValueUS shouldBe Right(true) - futures(1).isCompleted shouldBe false - - aggregator - .combineAndMergeEvent(sequencerBob, events(1)) - .futureValueUS shouldBe Right(false) - - futures(1).futureValueUS shouldBe Right(true) - } - - "support reconfiguration to another sequencer" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob), sequencerTrustThreshold = 2) - ) - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - .discard - - aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) - .discard - - assertCombinedDownstreamMessage(aggregator, aliceEvents(0), bobEvents(0)) - - aggregator.changeMessageAggregationConfig( - config(Set(sequencerCarlos), 1) - ) - - aggregator - .combineAndMergeEvent(sequencerCarlos, carlosEvents(1)) - .futureValueUS shouldBe Right(true) - - assertDownstreamMessage(aggregator, carlosEvents(1)) - } - } - - "Sequencer aggregator with two out of 3 expected sequencers" should { - "pass-through the combined event only if both sequencers emitted it" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 2) - ) - - assertNoMessageDownstream(aggregator) - - val f1 = aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - - f1.isCompleted shouldBe false - assertNoMessageDownstream(aggregator) - - val f2 = aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) - - eventually() { - f1.isCompleted shouldBe true - f2.isCompleted shouldBe true - } - - assertCombinedDownstreamMessage( - aggregator, - aliceEvents(0), - bobEvents(0), - ) - f1.futureValueUS shouldBe Right(true) - f2.futureValueUS shouldBe Right(false) - - val f3 = aggregator - .combineAndMergeEvent(sequencerCarlos, carlosEvents(0)) // late event - f3.isCompleted shouldBe true // should be immediately resolved - f3.futureValueUS shouldBe Right(false) - } - - "recover after skipping an event" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 2) - ) - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - .discard - aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) - .discard - - assertCombinedDownstreamMessage(aggregator, aliceEvents(0), bobEvents(0)) - - aggregator - .combineAndMergeEvent(sequencerCarlos, carlosEvents(0)) - .discard // late event - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(1)) - .discard - aggregator - .combineAndMergeEvent(sequencerCarlos, carlosEvents(1)) - .discard - - assertCombinedDownstreamMessage( - aggregator, - aliceEvents(1), - carlosEvents(1), - ) - } - - "support reconfiguration to 1 out of 3" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 2) - ) - - assertNoMessageDownstream(aggregator) - - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - .discard - aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) - .discard - - assertCombinedDownstreamMessage( - aggregator, - aliceEvents(0), - bobEvents(0), - ) - - aggregator - .combineAndMergeEvent(sequencerCarlos, carlosEvents(0)) - .discard // late event - - aggregator.changeMessageAggregationConfig( - config( - Set(sequencerAlice, sequencerBob, sequencerCarlos), - 1, - ) - ) - aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(1)) - .discard - - assertDownstreamMessage(aggregator, aliceEvents(1)) - } - - "support reconfiguration to 1 out of 3 while incomplete consensus" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 2) - ) - assertNoMessageDownstream(aggregator) - - val f = aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - f.isCompleted shouldBe false - - assertNoMessageDownstream(aggregator) - - aggregator.changeMessageAggregationConfig( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 1) - ) - - // consensus requirement is changed which is enough to push the message out - - assertDownstreamMessage(aggregator, aliceEvents(0)) - f.futureValueUS shouldBe Right(true) - } - } - - "Sequencer aggregator with 3 out of 3 expected sequencers" should { - "support reconfiguration to 1 out of 3 while overfulfilled consensus" in { fixture => - import fixture.* - - val aggregator = mkAggregator( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 3) - ) - assertNoMessageDownstream(aggregator) - - val f1 = aggregator - .combineAndMergeEvent(sequencerAlice, aliceEvents(0)) - f1.isCompleted shouldBe false - - val f2 = aggregator - .combineAndMergeEvent(sequencerBob, bobEvents(0)) - f2.isCompleted shouldBe false - - assertNoMessageDownstream(aggregator) - - aggregator.changeMessageAggregationConfig( - config(Set(sequencerAlice, sequencerBob, sequencerCarlos), sequencerTrustThreshold = 1) - ) - - // consensus requirement is changed which more than enough (2 are there, 1 is required) to push the message out - // we do accumulate all signatures still as sequencers are still expected ones - assertCombinedDownstreamMessage(aggregator, aliceEvents(0), bobEvents(0)) - } - } - - private def assertDownstreamMessage( - aggregator: SequencerAggregator, - message: SequencedSerializedEvent, - ): Assertion = - clue("Expected a single downstream message") { - aggregator.eventQueue.size() shouldBe 1 - aggregator.eventQueue.take() shouldBe message - } - - private def assertCombinedDownstreamMessage( - aggregator: SequencerAggregator, - events: SequencedSerializedEvent* - ): Assertion = clue("Expected a single combined downstream message from multiple sequencers") { - aggregator.eventQueue.size() shouldBe 1 - aggregator.eventQueue.take() shouldBe combinedMessage(aggregator, events*) - } - - private def assertNoMessageDownstream(aggregator: SequencerAggregator): Assertion = - clue("Expected no downstream messages") { - aggregator.eventQueue.size() shouldBe 0 - } - - private def combinedMessage( - aggregator: SequencerAggregator, - events: SequencedSerializedEvent* - ): SequencedSerializedEvent = - aggregator - .combine(NonEmptyUtil.fromUnsafe(events.toList)) - .value - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala deleted file mode 100644 index 1c9dfdb623..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ /dev/null @@ -1,1844 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.data.EitherT -import cats.syntax.either.* -import cats.syntax.foldable.* -import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.* -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{ - Fingerprint, - HashPurpose, - SyncCryptoApi, - SynchronizerCryptoClient, -} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.health.HealthComponent.AlwaysHealthyComponent -import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} -import com.digitalasset.canton.metrics.{CommonMockMetrics, TrafficConsumptionMetrics} -import com.digitalasset.canton.protocol.messages.{ - DefaultOpenEnvelope, - ProtocolMessage, - UnsignedProtocolMessage, -} -import com.digitalasset.canton.protocol.{ - DynamicSynchronizerParametersLookup, - SynchronizerParametersLookup, - TestSynchronizerParameters, - v30, -} -import com.digitalasset.canton.sequencing.* -import com.digitalasset.canton.sequencing.ConnectionX.ConnectionXConfig -import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ - ConnectionAttributes, - SequencerConnectionXHealth, -} -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError -import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError -import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.PreviousTimestampMismatch -import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason.{ - ClientShutdown, - UnrecoverableError, -} -import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports -import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.{ - ApplicationHandlerException, - EventValidationError, -} -import com.digitalasset.canton.sequencing.client.SubscriptionCloseReason.{ - HandlerError, - HandlerException, -} -import com.digitalasset.canton.sequencing.client.transports.{ - SequencerClientTransport, - SequencerClientTransportPekko, -} -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.{ - EventCostCalculator, - TrafficConsumed, - TrafficReceipt, - TrafficStateController, -} -import com.digitalasset.canton.serialization.HasCryptographicEvidence -import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead -import com.digitalasset.canton.store.SequencedEventStore.SequencedEventWithTraceContext -import com.digitalasset.canton.store.memory.{ - InMemorySendTrackerStore, - InMemorySequencedEventStore, - InMemorySequencerCounterTrackerStore, -} -import com.digitalasset.canton.store.{ - CursorPrehead, - SequencedEventStore, - SequencerCounterTrackerStore, -} -import com.digitalasset.canton.time.{MockTimeRequestSubmitter, SimClock, SynchronizerTimeTracker} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.{ - daSequencerId, - mediatorId, - participant1, -} -import com.digitalasset.canton.topology.client.{SynchronizerTopologyClient, TopologySnapshot} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.EitherTUtil -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion} -import io.grpc.Status -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.scaladsl.{Keep, Source} -import org.apache.pekko.stream.{BoundedSourceQueue, Materializer, QueueOfferResult} -import org.scalatest.BeforeAndAfterAll -import org.scalatest.wordspec.AnyWordSpec - -import java.time.temporal.ChronoUnit -import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference} -import scala.annotation.tailrec -import scala.concurrent.duration.Duration -import scala.concurrent.{ExecutionContext, Future, Promise, blocking} -import scala.jdk.CollectionConverters.* -import scala.util.{Failure, Success} - -class SequencerClientTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with CloseableTest - with BeforeAndAfterAll - with ProtocolVersionChecksAnyWordSpec { - - private lazy val metrics = CommonMockMetrics.sequencerClient - private lazy val deliver: Deliver[Nothing] = - SequencerTestUtils.mockDeliver( - CantonTimestamp.Epoch, - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - ) - private lazy val signedDeliver: SequencedEventWithTraceContext[ClosedEnvelope] = - SequencedEventWithTraceContext(SequencerTestUtils.sign(deliver))(traceContext) - - private lazy val nextDeliver: Deliver[Nothing] = SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(1), - previousTimestamp = Some(CantonTimestamp.Epoch), - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - ) - private lazy val deliver44: Deliver[Nothing] = SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(2), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(1)), - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - ) - private lazy val deliver45: Deliver[Nothing] = SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(3), - previousTimestamp = Some(CantonTimestamp.ofEpochSecond(2)), - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - ) - - private var actorSystem: ActorSystem = _ - private lazy val materializer: Materializer = Materializer(actorSystem) - private lazy val topologyWithTrafficControl = - TestingTopology(Set(DefaultTestIdentities.physicalSynchronizerId)) - .withDynamicSynchronizerParameters( - DefaultTestIdentities.defaultDynamicSynchronizerParameters.tryUpdate( - trafficControlParameters = Some( - TrafficControlParameters( - maxBaseTrafficAmount = NonNegativeLong.zero - ) - ) - ), - validFrom = CantonTimestamp.MinValue, - ) - .build() - .forOwnerAndSynchronizer(participant1) - - override protected def beforeAll(): Unit = { - super.beforeAll() - actorSystem = ActorSystem("SequencerClientTest") - } - - override def afterAll(): Unit = { - actorSystem.terminate().futureValue - super.afterAll() - } - - def deliver(i: Long): Deliver[Nothing] = SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.Epoch.plusSeconds(i), - previousTimestamp = if (i > 1) Some(CantonTimestamp.Epoch.plusSeconds(i - 1)) else None, - DefaultTestIdentities.physicalSynchronizerId, - ) - - private lazy val alwaysSuccessfulHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = - ApplicationHandler.success() - private lazy val failureException = new IllegalArgumentException("application handler failed") - private lazy val alwaysFailingHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = - ApplicationHandler.create("always-fails")(_ => - HandlerResult.synchronous(FutureUnlessShutdown.failed(failureException)) - ) - - private def sequencerClient(factory: EnvFactory[SequencerClient]): Unit = { - "subscribe" should { - "throws if more than one handler is subscribed" in { - val env = factory.create() - env.subscribeAfter().futureValueUS - loggerFactory.assertLogs( - env - .subscribeAfter(CantonTimestamp.MinValue, alwaysSuccessfulHandler) - .failed - .futureValueUS, - _.warningMessage shouldBe "Cannot create additional subscriptions to the sequencer from the same client", - _.errorMessage should include("Sequencer subscription failed"), - ) shouldBe a[RuntimeException] - env.client.close() - } - - "with no recorded events subscription should begin from None" in { - val env = factory.create() - env.subscribeAfter().futureValueUS - val requestedTimestamp = env.transport.subscriber.value.request.timestamp - requestedTimestamp shouldBe None - } - - "starts subscription at last stored event (for fork verification)" in { - val env = factory.create(storedEvents = Seq(deliver)) - env.subscribeAfter().futureValueUS - val startTimestamp = env.transport.subscriber.value.request.timestamp - startTimestamp shouldBe Some(deliver.timestamp) - env.client.close() - } - - "doesn't give prior event to the application handler" in { - val validated = new AtomicBoolean() - val processed = new AtomicBoolean() - val env = factory.create( - eventValidator = new SequencedEventValidator { - override def validate( - priorEvent: Option[ProcessingSerializedEvent], - event: SequencedSerializedEvent, - sequencerId: SequencerId, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = { - validated.set(true) - eventAlwaysValid.validate(priorEvent, event, sequencerId) - } - - override def validateOnReconnect( - priorEvent: Option[ProcessingSerializedEvent], - reconnectEvent: SequencedSerializedEvent, - sequencerId: SequencerId, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = - validate(priorEvent, reconnectEvent, sequencerId) - - override def validatePekko[E: Pretty]( - subscription: SequencerSubscriptionPekko[E], - priorReconnectEvent: Option[SequencedSerializedEvent], - sequencerId: SequencerId, - )(implicit - traceContext: TraceContext - ): SequencerSubscriptionPekko[SequencedEventValidationError[E]] = { - val SequencerSubscriptionPekko(source, health) = - eventAlwaysValid.validatePekko(subscription, priorReconnectEvent, sequencerId) - val observeValidation = source.map { x => - validated.set(true) - x - } - SequencerSubscriptionPekko(observeValidation, health) - } - - override def close(): Unit = () - }, - storedEvents = Seq(deliver), - ) - val transport = env.transport - - val testF = for { - _ <- env.subscribeAfter( - deliver.timestamp, - ApplicationHandler.create("") { events => - processed.set(true) - alwaysSuccessfulHandler(events) - }, - ) - _ = transport.subscriber.value.request.timestamp shouldBe Some(deliver.timestamp) - _ <- transport.subscriber.value.sendToHandler(signedDeliver) - } yield { - eventually() { - validated.get() shouldBe true - } - processed.get() shouldBe false - } - - testF.futureValueUS - env.client.close() - } - - "picks the last prior event" in { - val triggerNextDeliverHandling = new AtomicBoolean() - val env = factory.create(storedEvents = Seq(deliver, nextDeliver, deliver44)) - val testF = for { - _ <- env.subscribeAfter( - nextDeliver.timestamp.immediatePredecessor, - ApplicationHandler.create("") { events => - if (events.value.exists(_.timestamp == nextDeliver.timestamp)) { - triggerNextDeliverHandling.set(true) - } - HandlerResult.done - }, - ) - } yield () - - testF.futureValueUS - triggerNextDeliverHandling.get shouldBe true - env.client.close() - } - - "replays messages from the SequencedEventStore" in { - val processedEvents = new ConcurrentLinkedQueue[CantonTimestamp] - - val env = factory.create(storedEvents = Seq(deliver, nextDeliver, deliver44)) - env - .subscribeAfter( - deliver.timestamp, - ApplicationHandler.create("") { events => - events.value.foreach(event => processedEvents.add(event.timestamp)) - alwaysSuccessfulHandler(events) - }, - ) - .futureValueUS - - processedEvents.iterator().asScala.toSeq shouldBe Seq( - nextDeliver.timestamp, - deliver44.timestamp, - ) - env.client.close() - } - - "propagates errors during replay" in { - val syncError = - ApplicationHandlerException( - failureException, - nextDeliver.timestamp, - nextDeliver.timestamp, - ) - val syncExc = SequencerClientSubscriptionException(syncError) - - val env = factory.create(storedEvents = Seq(deliver, nextDeliver)) - - loggerFactory.assertLogs( - env.subscribeAfter(deliver.timestamp, alwaysFailingHandler).failed.futureValueUS, - logEntry => { - logEntry.errorMessage should include( - s"Synchronous event processing failed for event batch with sequencing timestamps ${nextDeliver.timestamp} to ${nextDeliver.timestamp}" - ) - logEntry.throwable shouldBe Some(failureException) - }, - logEntry => { - logEntry.errorMessage should include("Sequencer subscription failed") - logEntry.throwable.value shouldBe syncExc - }, - ) shouldBe syncExc - env.client.close() - } - - "throttle message batches" in { - val counter = new AtomicInteger(0) - val maxSeenCounter = new AtomicInteger(0) - val maxSequencerCounter = new AtomicLong(0L) - val env = factory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(0)), - options = SequencerClientConfig( - eventInboxSize = PositiveInt.tryCreate(1), - maximumInFlightEventBatches = PositiveInt.tryCreate(5), - ), - ) - - env - .subscribeAfter( - CantonTimestamp.Epoch, - ApplicationHandler.create("test-handler-throttling") { e => - val firstSc = e.value.head.counter - val lastSc = e.value.last.counter - logger.debug(s"Processing batch of events $firstSc to $lastSc") - HandlerResult.asynchronous( - FutureUnlessShutdown.outcomeF(Future { - blocking { - maxSeenCounter.synchronized { - maxSeenCounter.set(Math.max(counter.incrementAndGet(), maxSeenCounter.get())) - } - } - Threading.sleep(100) - counter.decrementAndGet().discard - maxSequencerCounter.updateAndGet(_ max lastSc.unwrap).discard - }(SequencerClientTest.this.executorService)) - ) - }, - ) - .futureValueUS - - for (i <- 1 to 100) { - env.transport.subscriber.value.sendToHandler(deliver(i.toLong)).futureValueUS - } - - eventually() { - maxSequencerCounter.get shouldBe 100 - } - - maxSeenCounter.get() shouldBe 5 - env.client.close() - } - - "time limit the synchronous application handler" in { - val env = factory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)), - storedEvents = Seq(deliver, nextDeliver, deliver44), - ) - val promise = Promise[AsyncResult[Unit]]() - - val testF = loggerFactory.assertLogs( - env.subscribeAfter( - nextDeliver.timestamp.immediatePredecessor, - ApplicationHandler.create("long running synchronous handler") { _ => - env.clock.advance( - java.time.Duration.of( - DefaultProcessingTimeouts.testing.sequencedEventProcessingBound.asFiniteApproximation.toNanos, - ChronoUnit.NANOS, - ) - ) - FutureUnlessShutdown.outcomeF(promise.future) - }, - ), - _.errorMessage should include( - "Processing of event batch with sequencing timestamps 1970-01-01T00:00:01Z to 1970-01-01T00:00:02Z started at 1970-01-01T00:00:00Z did not complete by 1970-01-02T00:00:00Z" - ), - ) - - // After the timeout has been logged as an error, complete the application handler so that the test can shut down gracefully. - promise.success(AsyncResult.immediate) - testF.futureValueUS - env.client.close() - } - - "time limit the asynchronous application handler" in { - val env = factory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)), - storedEvents = Seq(deliver, nextDeliver, deliver44), - ) - val promise = Promise[Unit]() - - val testF = loggerFactory.assertLogs( - env.subscribeAfter( - nextDeliver.timestamp.immediatePredecessor, - ApplicationHandler.create("long running asynchronous handler") { _ => - env.clock.advance( - java.time.Duration.of( - DefaultProcessingTimeouts.testing.sequencedEventProcessingBound.asFiniteApproximation.toNanos, - ChronoUnit.NANOS, - ) - ) - HandlerResult.asynchronous(FutureUnlessShutdown.outcomeF(promise.future)) - }, - ), - _.errorMessage should include( - "Processing of event batch with sequencing timestamps 1970-01-01T00:00:01Z to 1970-01-01T00:00:02Z started at 1970-01-01T00:00:00Z did not complete by 1970-01-02T00:00:00Z" - ), - ) - - // After the timeout has been logged as an error, complete the application handler so that the test can shut down gracefully. - promise.success(()) - testF.futureValueUS - env.client.close() - } - } - } - - def richSequencerClient(): Unit = { - "subscribe" should { - "stores the event in the SequencedEventStore" in { - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val storedEventF = for { - _ <- env.subscribeAfter() - _ <- transport.subscriber.value.sendToHandler(signedDeliver) - _ <- client.flush() - storedEvent <- sequencedEventStore.sequencedEvents() - } yield storedEvent - - storedEventF.futureValueUS shouldBe Seq( - signedDeliver.asOrdinaryEvent(counter = SequencerCounter(42)) - ) - env.client.close() - } - - "stores the event even if the handler fails" in { - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val storedEventF = for { - _ <- env.subscribeAfter(eventHandler = alwaysFailingHandler) - _ <- loggerFactory.assertLogs( - for { - _ <- transport.subscriber.value.sendToHandler(signedDeliver) - _ <- client.flush() - } yield (), - logEntry => { - logEntry.errorMessage should be( - "Synchronous event processing failed for event batch with sequencing timestamps 1970-01-01T00:00:00Z to 1970-01-01T00:00:00Z." - ) - logEntry.throwable.value shouldBe failureException - }, - ) - storedEvent <- sequencedEventStore.sequencedEvents() - } yield storedEvent - - storedEventF.futureValueUS shouldBe Seq( - signedDeliver.asOrdinaryEvent(counter = SequencerCounter(42)) - ) - env.client.close() - } - - "completes the sequencer client if the subscription closes due to an error" in { - val error = - EventValidationError( - PreviousTimestampMismatch( - receivedPreviousTimestamp = Some(CantonTimestamp.ofEpochSecond(666)), - expectedPreviousTimestamp = Some(CantonTimestamp.Epoch), - ) - ) - val env = RichEnvFactory.create() - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter(CantonTimestamp.MinValue, alwaysSuccessfulHandler) - subscription = transport.subscriber - // we know the resilient sequencer subscription is using this type - .map(_.subscription.asInstanceOf[MockSubscription[SequencerClientSubscriptionError]]) - .value - closeReason <- loggerFactory.assertLogs( - { - subscription.closeSubscription(error) - client.completion - }, - _.warningMessage should include("sequencer"), - ) - } yield closeReason - - closeReasonF.futureValueUS should matchPattern { - case e: UnrecoverableError if e.cause == s"handler returned error: $error" => - } - env.client.close() - } - - "completes the sequencer client if the application handler fails" in { - val error = new RuntimeException("failed handler") - val syncError = ApplicationHandlerException(error, deliver.timestamp, deliver.timestamp) - val handler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = - ApplicationHandler.create("async-failure")(_ => - FutureUnlessShutdown.failed[AsyncResult[Unit]](error) - ) - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter(CantonTimestamp.MinValue, handler) - closeReason <- loggerFactory.assertLogs( - for { - _ <- transport.subscriber.value.sendToHandler(deliver) - // Send the next event so that the client notices that an error has occurred. - _ <- client.flush() - _ <- transport.subscriber.value.sendToHandler(nextDeliver) - // wait until the subscription is closed (will emit an error) - closeReason <- client.completion - } yield closeReason, - logEntry => { - logEntry.errorMessage should be( - s"Synchronous event processing failed for event batch with sequencing timestamps ${deliver.timestamp} to ${deliver.timestamp}." - ) - logEntry.throwable shouldBe Some(error) - }, - _.errorMessage should include( - s"Sequencer subscription is being closed due to handler exception (this indicates a bug): $syncError" - ), - ) - - } yield { - client.close() // make sure that we can still close the sequencer client - closeReason - } - - closeReasonF.futureValueUS should matchPattern { - case e: UnrecoverableError if e.cause == s"handler returned error: $syncError" => - } - } - - "completes the sequencer client if the application handler shuts down synchronously" in { - val handler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = - ApplicationHandler.create("shutdown")(_ => FutureUnlessShutdown.abortedDueToShutdown) - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter(eventHandler = handler) - closeReason <- { - for { - _ <- transport.subscriber.value.sendToHandler(deliver) - // Send the next event so that the client notices that an error has occurred. - _ <- client.flush() - _ <- transport.subscriber.value.sendToHandler(nextDeliver) - closeReason <- client.completion - } yield closeReason - } - } yield { - client.close() // make sure that we can still close the sequencer client - closeReason - } - - closeReasonF.futureValueUS shouldBe ClientShutdown - } - - "completes the sequencer client if asynchronous event processing fails" in { - val error = new RuntimeException("asynchronous failure") - val asyncFailure = HandlerResult.asynchronous(FutureUnlessShutdown.failed(error)) - val asyncException = - ApplicationHandlerException(error, deliver.timestamp, deliver.timestamp) - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter( - eventHandler = ApplicationHandler.create("async-failure")(_ => asyncFailure) - ) - closeReason <- loggerFactory.assertLogs( - for { - _ <- transport.subscriber.value.sendToHandler(deliver) - // Make sure that the asynchronous error has been noticed - // We intentionally do two flushes. The first captures `handleReceivedEventsUntilEmpty` completing. - // During this it may addToFlush a future for capturing `asyncSignalledF` however this may occur - // after we've called `flush` and therefore won't guarantee completing all processing. - // So our second flush will capture `asyncSignalledF` for sure. - _ <- client.flush() - _ <- client.flush() - // Send the next event so that the client notices that an error has occurred. - _ <- transport.subscriber.value.sendToHandler(nextDeliver) - _ <- client.flush() - // wait until client completed (will write an error) - closeReason <- client.completion - _ = client.close() // make sure that we can still close the sequencer client - } yield closeReason, - logEntry => { - logEntry.errorMessage should include( - s"Asynchronous event processing failed for event batch with sequencing timestamps ${deliver.timestamp} to ${deliver.timestamp}" - ) - logEntry.throwable shouldBe Some(error) - }, - _.errorMessage should include( - s"Sequencer subscription is being closed due to handler exception (this indicates a bug): $asyncException" - ), - ) - } yield closeReason - - closeReasonF.futureValueUS should matchPattern { - case e: UnrecoverableError if e.cause == s"handler returned error: $asyncException" => - } - } - - "completes the sequencer client if asynchronous event processing shuts down" in { - val asyncShutdown = HandlerResult.asynchronous(FutureUnlessShutdown.abortedDueToShutdown) - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter( - CantonTimestamp.MinValue, - ApplicationHandler.create("async-shutdown")(_ => asyncShutdown), - ) - closeReason <- { - for { - _ <- transport.subscriber.value.sendToHandler(deliver) - _ <- client.flushClean() // Make sure that the asynchronous error has been noticed - // Send the next event so that the client notices that an error has occurred. - _ <- transport.subscriber.value.sendToHandler(nextDeliver) - _ <- client.flush() - closeReason <- client.completion - } yield closeReason - } - } yield { - client.close() // make sure that we can still close the sequencer client - closeReason - } - - closeReasonF.futureValueUS shouldBe ClientShutdown - } - - "invokes exit on fatal error handler due to a fatal error" in { - val error = - EventValidationError( - PreviousTimestampMismatch( - receivedPreviousTimestamp = Some(CantonTimestamp.ofEpochSecond(665)), - expectedPreviousTimestamp = Some(CantonTimestamp.ofEpochSecond(666)), - ) - ) - - var errorReport: String = "not reported" - def mockExitOnFatalError(message: String, logger: TracedLogger)( - traceContext: TraceContext - ): Unit = { - logger.info(s"Reporting mock fatal/exit error $message")(traceContext) - errorReport = message - } - - val env = RichEnvFactory.create(mockExitOnFatalErrorO = Some(mockExitOnFatalError)) - import env.* - val closeReasonF = for { - _ <- env.subscribeAfter(CantonTimestamp.MinValue, alwaysSuccessfulHandler) - subscription = transport.subscriber - // we know the resilient sequencer subscription is using this type - .map(_.subscription.asInstanceOf[MockSubscription[SequencerClientSubscriptionError]]) - .value - closeReason <- loggerFactory.assertLogs( - { - subscription.closeSubscription(error) - client.completion - }, - _.warningMessage should include("sequencer"), - ) - } yield closeReason - - closeReasonF.futureValueUS should matchPattern { - case e: UnrecoverableError if e.cause == s"handler returned error: $error" => - } - env.client.close() - errorReport shouldBe "Sequenced timestamp mismatch received Some(1970-01-01T00:11:05Z) but expected Some(1970-01-01T00:11:06Z). Has there been a TransportChange?" - } - } - - "subscribeTracking" should { - "updates sequencer counter prehead" in { - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val preHeadF = for { - _ <- client.subscribeTracking( - sequencerCounterTrackerStore, - alwaysSuccessfulHandler, - timeTracker, - ) - _ <- transport.subscriber.value.sendToHandler(signedDeliver) - _ <- client.flushClean() - preHead <- sequencerCounterTrackerStore.preheadSequencerCounter - } yield preHead.value - - preHeadF.futureValueUS shouldBe CursorPrehead(SequencerCounter(42), deliver.timestamp) - client.close() - } - - "replays from the sequencer counter prehead" in { - val processedEvents = new ConcurrentLinkedQueue[SequencerCounter] - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)), - storedEvents = Seq(deliver, nextDeliver, deliver44, deliver45), - cleanPrehead = Some(CursorPrehead(SequencerCounter(43), nextDeliver.timestamp)), - ) - import env.* - val preheadF = for { - _ <- client.subscribeTracking( - sequencerCounterTrackerStore, - ApplicationHandler.create("") { events => - events.value.foreach(event => processedEvents.add(event.counter)) - alwaysSuccessfulHandler(events) - }, - timeTracker, - ) - _ <- client.flushClean() - prehead <- - sequencerCounterTrackerStore.preheadSequencerCounter - } yield prehead.value - - preheadF.futureValueUS shouldBe CursorPrehead(SequencerCounter(45), deliver45.timestamp) - processedEvents.iterator().asScala.toSeq shouldBe Seq( - SequencerCounter(44), - SequencerCounter(45), - ) - client.close() - } - - "resubscribes after replay" in { - val processedEvents = new ConcurrentLinkedQueue[SequencerCounter] - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)), - storedEvents = Seq(deliver, nextDeliver, deliver44), - cleanPrehead = Some(CursorPrehead(SequencerCounter(43), nextDeliver.timestamp)), - ) - import env.* - val preheadF = for { - _ <- client.subscribeTracking( - sequencerCounterTrackerStore, - ApplicationHandler.create("") { events => - events.value.foreach(event => processedEvents.add(event.counter)) - alwaysSuccessfulHandler(events) - }, - timeTracker, - ) - _ <- transport.subscriber.value.sendToHandler(deliver45) - _ <- client.flushClean() - prehead <- sequencerCounterTrackerStore.preheadSequencerCounter - } yield prehead.value - - preheadF.futureValueUS shouldBe CursorPrehead(SequencerCounter(45), deliver45.timestamp) - - processedEvents.iterator().asScala.toSeq shouldBe Seq( - SequencerCounter(44), - SequencerCounter(45), - ) - client.close() - } - - "does not update the prehead if the application handler fails" in { - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - import env.* - val preHeadF = for { - _ <- client.subscribeTracking( - sequencerCounterTrackerStore, - alwaysFailingHandler, - timeTracker, - ) - _ <- loggerFactory.assertLogs( - for { - _ <- transport.subscriber.value.sendToHandler(signedDeliver) - _ <- client.flushClean() - } yield (), - logEntry => { - logEntry.errorMessage should be( - "Synchronous event processing failed for event batch with sequencing timestamps 1970-01-01T00:00:00Z to 1970-01-01T00:00:00Z." - ) - logEntry.throwable.value shouldBe failureException - }, - ) - preHead <- sequencerCounterTrackerStore.preheadSequencerCounter - } yield preHead - - preHeadF.futureValueUS shouldBe None - client.close() - } - - "updates the prehead only after the asynchronous processing has been completed" in { - val promises = Map[SequencerCounter, Promise[UnlessShutdown[Unit]]]( - SequencerCounter(43) -> Promise[UnlessShutdown[Unit]](), - SequencerCounter(44) -> Promise[UnlessShutdown[Unit]](), - ) - - def handler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = - ApplicationHandler.create("") { events => - assert(events.value.sizeIs == 1) - promises.get(events.value(0).counter) match { - case None => HandlerResult.done - case Some(promise) => HandlerResult.asynchronous(FutureUnlessShutdown(promise.future)) - } - } - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)), - options = SequencerClientConfig(eventInboxSize = PositiveInt.tryCreate(1)), - ) - import env.* - val testF = for { - _ <- client.subscribeTracking(sequencerCounterTrackerStore, handler, timeTracker) - _ <- transport.subscriber.value.sendToHandler(deliver) - _ <- client.flushClean() - prehead42 <- - sequencerCounterTrackerStore.preheadSequencerCounter - _ <- transport.subscriber.value.sendToHandler(nextDeliver) - prehead43 <- - sequencerCounterTrackerStore.preheadSequencerCounter - _ <- transport.subscriber.value.sendToHandler(deliver44) - _ = promises(SequencerCounter(44)).success(UnlessShutdown.unit) - prehead43a <- - sequencerCounterTrackerStore.preheadSequencerCounter - _ = promises(SequencerCounter(43)).success( - UnlessShutdown.unit - ) // now we can advance the prehead - _ <- client.flushClean() - prehead44 <- - sequencerCounterTrackerStore.preheadSequencerCounter - } yield { - prehead42 shouldBe Some(CursorPrehead(SequencerCounter(42), deliver.timestamp)) - prehead43 shouldBe Some(CursorPrehead(SequencerCounter(42), deliver.timestamp)) - prehead43a shouldBe Some(CursorPrehead(SequencerCounter(42), deliver.timestamp)) - prehead44 shouldBe Some(CursorPrehead(SequencerCounter(44), deliver44.timestamp)) - } - - testF.futureValueUS - client.close() - } - } - - "submissionCost" should { - "compute submission cost and update traffic state when receiving the receipt" in { - val env = RichEnvFactory.create( - topologyO = Some(topologyWithTrafficControl) - ) - val messageId = MessageId.tryCreate("mock-deliver") - val trafficReceipt = TrafficReceipt( - consumedCost = NonNegativeLong.tryCreate(4), - extraTrafficConsumed = NonNegativeLong.tryCreate(4), - baseTrafficRemainder = NonNegativeLong.zero, - ) - val testF = for { - _ <- env.subscribeAfter() - _ <- env - .sendAsync( - Batch.of( - testedProtocolVersion, - (new TestProtocolMessage(), Recipients.cc(participant1)), - ), - messageId = messageId, - ) - .value - _ <- env.transport.subscriber.value.sendToHandler( - SequencedEventWithTraceContext( - SequencerTestUtils.sign( - SequencerTestUtils.mockDeliver( - CantonTimestamp.MinValue.immediateSuccessor, - synchronizerId = DefaultTestIdentities.physicalSynchronizerId, - messageId = Some(messageId), - trafficReceipt = Some(trafficReceipt), - ) - ) - )( - traceContext - ) - ) - _ <- env.client.flushClean() - } yield { - env.trafficStateController.getTrafficConsumed shouldBe TrafficConsumed( - mediatorId, - CantonTimestamp.MinValue.immediateSuccessor, - trafficReceipt.extraTrafficConsumed, - trafficReceipt.baseTrafficRemainder, - trafficReceipt.consumedCost, - ) - } - - testF.futureValueUS - env.client.close() - } - - "consume traffic from deliver errors" in { - val env = RichEnvFactory.create( - topologyO = Some(topologyWithTrafficControl) - ) - val messageId = MessageId.tryCreate("mock-deliver") - val trafficReceipt = TrafficReceipt( - NonNegativeLong.tryCreate(4), - NonNegativeLong.tryCreate(4), - NonNegativeLong.zero, - ) - val testF = for { - _ <- env.subscribeAfter() - _ <- env - .sendAsync( - Batch.of( - testedProtocolVersion, - (new TestProtocolMessage(), Recipients.cc(participant1)), - ), - messageId = messageId, - ) - .value - _ <- env.transport.subscriber.value.sendToHandler( - SequencedEventWithTraceContext( - SequencerTestUtils.sign( - SequencerTestUtils.mockDeliverError( - CantonTimestamp.MinValue.immediateSuccessor, - DefaultTestIdentities.physicalSynchronizerId, - messageId = messageId, - trafficReceipt = Some(trafficReceipt), - ) - ) - )( - traceContext - ) - ) - _ <- env.client.flushClean() - } yield { - env.trafficStateController.getTrafficConsumed shouldBe TrafficConsumed( - mediatorId, - CantonTimestamp.MinValue.immediateSuccessor, - trafficReceipt.extraTrafficConsumed, - trafficReceipt.baseTrafficRemainder, - trafficReceipt.consumedCost, - ) - } - - testF.futureValueUS - env.client.close() - } - } - - "changeTransport" should { - "create second subscription from the same counter as the previous one when there are no events" in { - val secondTransport = MockTransport() - val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) - } yield { - val originalSubscriber = env.transport.subscriber.value - originalSubscriber.request.timestamp shouldBe None - originalSubscriber.subscription.isClosing shouldBe true // old subscription gets closed - env.transport.isClosing shouldBe true - - val newSubscriber = secondTransport.subscriber.value - newSubscriber.request.timestamp shouldBe None - newSubscriber.subscription.isClosing shouldBe false - secondTransport.isClosing shouldBe false - - env.client.completion.isCompleted shouldBe false - } - - testF.futureValueUS - env.client.close() - } - - "create second subscription from the same counter as the previous one when there are events" in { - val secondTransport = MockTransport() - - val env = RichEnvFactory.create( - initializeCounterAllocatorTo = Some(SequencerCounter(41)) - ) - val testF = for { - _ <- env.subscribeAfter() - - _ <- env.transport.subscriber.value.sendToHandler(deliver) - _ <- env.transport.subscriber.value.sendToHandler(nextDeliver) - _ <- env.client.flushClean() - - _ <- env.changeTransport(secondTransport) - } yield { - val originalSubscriber = env.transport.subscriber.value - originalSubscriber.request.timestamp shouldBe None - - val newSubscriber = secondTransport.subscriber.value - newSubscriber.request.timestamp shouldBe Some(nextDeliver.timestamp) - - env.client.completion.isCompleted shouldBe false - } - - testF.futureValueUS - } - - // TODO(i25218): remove "onlyRunWhen" when no longer necessary - "have new transport be used for sends" onlyRunWhen (_ < ProtocolVersion.dev) in { - val secondTransport = MockTransport() - - val env = RichEnvFactory.create() - val testF = for { - _ <- env.changeTransport(secondTransport) - _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value - } yield { - env.transport.lastSend.get() shouldBe None - secondTransport.lastSend.get() should not be None - - env.transport.isClosing shouldBe true - secondTransport.isClosing shouldBe false - } - - testF.futureValueUS - env.client.close() - } - - "have new transport be used for logout" onlyRunWhen (_ < ProtocolVersion.dev) in { - val secondTransport = MockTransport() - - val env = RichEnvFactory.create() - val testF = for { - _ <- env.changeTransport(secondTransport) - _ <- env.logout().value - } yield { - env.transport.logoutCalled shouldBe false - secondTransport.logoutCalled shouldBe true - } - - testF.futureValueUS - env.client.close() - } - - // TODO(i25218): remove "onlyRunWhen" when no longer necessary - "have new transport be used for sends when there is subscription" onlyRunWhen (_ < ProtocolVersion.dev) in { - val secondTransport = MockTransport() - - val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) - _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value - } yield { - env.transport.lastSend.get() shouldBe None - secondTransport.lastSend.get() should not be None - } - - testF.futureValueUS - env.client.close() - } - - // TODO(i25218): remove "onlyRunWhen" when no longer necessary - "have new transport be used with same sequencerId but different sequencer alias" onlyRunWhen (_ < ProtocolVersion.dev) in { - val secondTransport = MockTransport() - - val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - _ <- env.changeTransport( - SequencerTransports.single( - SequencerAlias.tryCreate("somethingElse"), - daSequencerId, - secondTransport, - ) - ) - _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value - } yield { - env.transport.lastSend.get() shouldBe None - secondTransport.lastSend.get() should not be None - } - - testF.futureValueUS - env.client.close() - } - - "fail to reassign sequencerId" in { - val secondTransport = MockTransport() - val secondSequencerId = SequencerId( - UniqueIdentifier.tryCreate("da2", Namespace(Fingerprint.tryFromString("default"))) - ) - - val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - error <- loggerFactory - .assertLogs( - env - .changeTransport( - SequencerTransports.default( - secondSequencerId, - secondTransport, - ) - ), - _.errorMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment", - ) - .failed - } yield { - error - } - - testF.futureValueUS shouldBe an[IllegalArgumentException] - testF.futureValueUS.getMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment" - env.client.close() - } - } - } - - "RichSequencerClientImpl" should { - behave like sequencerClient(RichEnvFactory) - behave like richSequencerClient() - } - - "SequencerClientImplPekko" should { - behave like sequencerClient(PekkoEnvFactory) - } - - private sealed trait Subscriber[E] { - def request: SubscriptionRequestV2 - def subscription: MockSubscription[E] - def sendToHandler(event: SequencedSerializedEvent): FutureUnlessShutdown[Unit] - - def sendToHandler(event: SequencedEvent[ClosedEnvelope]): FutureUnlessShutdown[Unit] = - sendToHandler(SequencedEventWithTraceContext(SequencerTestUtils.sign(event))(traceContext)) - } - - private case class OldStyleSubscriber[E]( - override val request: SubscriptionRequestV2, - private val handler: SequencedEventHandler[E], - override val subscription: MockSubscription[E], - ) extends Subscriber[E] { - override def sendToHandler(event: SequencedSerializedEvent): FutureUnlessShutdown[Unit] = - handler(event).transform { - case Success(UnlessShutdown.Outcome(Right(_))) => Success(UnlessShutdown.unit) - case Success(UnlessShutdown.Outcome(Left(err))) => - subscription.closeSubscription(err) - Success(UnlessShutdown.unit) - case Failure(ex) => - subscription.closeSubscription(ex) - Success(UnlessShutdown.unit) - case Success(UnlessShutdown.AbortedDueToShutdown) => - Success(UnlessShutdown.unit) - } - } - - private case class SubscriberPekko[E]( - override val request: SubscriptionRequestV2, - private val queue: BoundedSourceQueue[SequencedSerializedEvent], - override val subscription: MockSubscription[E], - ) extends Subscriber[E] { - override def sendToHandler(event: SequencedSerializedEvent): FutureUnlessShutdown[Unit] = - queue.offer(event) match { - case QueueOfferResult.Enqueued => - // TODO(#13789) This may need more synchronization - FutureUnlessShutdown.unit - case QueueOfferResult.Failure(ex) => - logger.error(s"Failed to enqueue event", ex) - fail("Failed to enqueue event") - case other => - fail(s"Could not enqueue event $event: $other") - } - } - - private case class Env[+Client <: SequencerClient]( - client: Client, - transport: MockTransport, - sequencerCounterTrackerStore: SequencerCounterTrackerStore, - sequencedEventStore: SequencedEventStore, - timeTracker: SynchronizerTimeTracker, - trafficStateController: TrafficStateController, - clock: SimClock, - ) { - - def subscribeAfter( - priorTimestamp: CantonTimestamp = CantonTimestamp.MinValue, - eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope] = alwaysSuccessfulHandler, - ): FutureUnlessShutdown[Unit] = - client.subscribeAfter( - priorTimestamp, - None, - eventHandler, - timeTracker, - PeriodicAcknowledgements.noAcknowledgements, - ) - - def changeTransport( - newTransport: SequencerClientTransport & SequencerClientTransportPekko - )(implicit ev: Client <:< RichSequencerClient): FutureUnlessShutdown[Unit] = - changeTransport( - SequencerTransports.default(daSequencerId, newTransport) - ) - - def changeTransport(sequencerTransports: SequencerTransports[?])(implicit - ev: Client <:< RichSequencerClient - ): FutureUnlessShutdown[Unit] = - ev(client).changeTransport(sequencerTransports) - - def sendAsync( - batch: Batch[DefaultOpenEnvelope], - messageId: MessageId = client.generateMessageId, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { - implicit val metricsContext: MetricsContext = MetricsContext.Empty - client.sendAsync(batch, messageId = messageId) - } - - def logout(): EitherT[FutureUnlessShutdown, Status, Unit] = client.logout() - } - - private class MockSubscription[E] extends SequencerSubscription[E] { - override protected def loggerFactory: NamedLoggerFactory = - SequencerClientTest.this.loggerFactory - - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - - override private[canton] def complete(reason: SubscriptionCloseReason[E])(implicit - traceContext: TraceContext - ): Unit = { - closeReasonPromise.success(reason) - close() - } - - def closeSubscription(reason: E): Unit = this.closeReasonPromise.success(HandlerError(reason)) - - def closeSubscription(error: Throwable): Unit = - this.closeReasonPromise.success(HandlerException(error)) - } - - private class MockTransport - extends SequencerClientTransport - with SequencerClientTransportPekko - with NamedLogging { - - private val logoutCalledRef = new AtomicReference[Boolean](false) - - override def logout()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, Status, Unit] = { - logoutCalledRef.set(true) - EitherT.pure(()) - } - - def logoutCalled: Boolean = logoutCalledRef.get() - - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - - private val subscriberRef = new AtomicReference[Option[Subscriber[_]]](None) - - // When using a parallel execution context, the order of asynchronous operations within the SequencerClient - // is not deterministic which can delay the subscription. This is why we add some retry policy to avoid flaky tests - def subscriber: Option[Subscriber[_]] = { - @tailrec def subscriber(retry: Int): Option[Subscriber[_]] = - subscriberRef.get() match { - case Some(value) => Some(value) - case None if retry >= 0 => - logger.debug( - s"Subscriber reference is not defined, will retry after sleeping. Retry: $retry" - ) - Threading.sleep(5) - subscriber(retry - 1) - case None => None - } - - subscriber(retry = 100) - } - - val lastSend = new AtomicReference[Option[SubmissionRequest]](None) - - override def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Boolean] = - EitherT.rightT(true) - - override def getTrafficStateForMember(request: GetTrafficStateForMemberRequest)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, GetTrafficStateForMemberResponse] = - EitherT.pure( - GetTrafficStateForMemberResponse( - Some( - TrafficState( - extraTrafficPurchased = NonNegativeLong.zero, - // Use the timestamp as the traffic consumed - // This allows us to assert how the state returned by this function is used by the state controller to - // refresh its own traffic state - extraTrafficConsumed = - NonNegativeLong.tryCreate(Math.abs(request.timestamp.toProtoPrimitive)), - baseTrafficRemainder = NonNegativeLong.zero, - lastConsumedCost = NonNegativeLong.zero, - timestamp = request.timestamp, - serial = None, - ) - ), - testedProtocolVersion, - ) - ) - - private def sendAsync( - request: SubmissionRequest - ): EitherT[Future, SendAsyncClientResponseError, Unit] = { - lastSend.set(Some(request)) - EitherTUtil.unit - } - - override def sendAsyncSigned( - request: SignedContent[SubmissionRequest], - timeout: Duration, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = - sendAsync(request.content).mapK(FutureUnlessShutdown.outcomeK) - - override def subscribe[E](request: SubscriptionRequestV2, handler: SequencedEventHandler[E])( - implicit traceContext: TraceContext - ): SequencerSubscription[E] = { - val subscription = new MockSubscription[E] - - if ( - !subscriberRef.compareAndSet(None, Some(OldStyleSubscriber(request, handler, subscription))) - ) { - fail("subscribe has already been called by this client") - } - - subscription - } - - override def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy = - SubscriptionErrorRetryPolicy.never - - override protected def loggerFactory: NamedLoggerFactory = - SequencerClientTest.this.loggerFactory - - override def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit - traceContext: TraceContext - ): EitherT[Future, String, TopologyStateForInitResponse] = ??? - - override type SubscriptionError = Uninhabited - - override def subscribe(request: SubscriptionRequestV2)(implicit - traceContext: TraceContext - ): SequencerSubscriptionPekko[SubscriptionError] = { - // Choose a sufficiently large queue size so that we can test throttling - val (queue, sourceQueue) = - Source.queue[SequencedSerializedEvent](200).preMaterialize()(materializer) - - val subscriber = SubscriberPekko(request, queue, new MockSubscription[Uninhabited]()) - subscriberRef.set(Some(subscriber)) - - val source = sourceQueue - .map(Either.right) - .withUniqueKillSwitchMat()(Keep.right) - .watchTermination()(Keep.both) - - SequencerSubscriptionPekko( - source, - new AlwaysHealthyComponent("sequencer-client-test-source", logger), - ) - } - - override def subscriptionRetryPolicyPekko - : SubscriptionErrorRetryPolicyPekko[SubscriptionError] = - SubscriptionErrorRetryPolicyPekko.never - } - - private object MockTransport { - def apply(): MockTransport & SequencerClientTransportPekko.Aux[Uninhabited] = new MockTransport - } - - private class MockConnection(override val name: String) extends SequencerConnectionX { - - override def health: SequencerConnectionXHealth = ??? - - override def config: ConnectionXConfig = ??? - - override def attributes: ConnectionAttributes = - ConnectionAttributes( - DefaultTestIdentities.physicalSynchronizerId, - DefaultTestIdentities.daSequencerId, - defaultStaticSynchronizerParameters, - ) - - override def fail(reason: String)(implicit traceContext: TraceContext): Unit = ??? - - override def fatal(reason: String)(implicit traceContext: TraceContext): Unit = ??? - - val lastSend = new AtomicReference[Option[SubmissionRequest]](None) - - override def sendAsync(request: SignedContent[SubmissionRequest], timeout: Duration)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = { - lastSend.set(Some(request.content)) - EitherTUtil.unitUS - } - - override def acknowledgeSigned( - signedRequest: SignedContent[AcknowledgeRequest], - timeout: Duration, - )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Boolean] = - EitherT.rightT(true) - - override def getTrafficStateForMember( - request: GetTrafficStateForMemberRequest, - timeout: Duration, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, GetTrafficStateForMemberResponse] = ??? - - override def logout()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, Status, Unit] = ??? - - override def downloadTopologyStateForInit( - request: TopologyStateForInitRequest, - timeout: Duration, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, TopologyStateForInitResponse] = ??? - - override def subscribe[E]( - request: SubscriptionRequestV2, - handler: SequencedEventHandler[E], - timeout: Duration, - )(implicit traceContext: TraceContext): SequencerSubscription[E] = ??? - - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - - override protected def loggerFactory: NamedLoggerFactory = - SequencerClientTest.this.loggerFactory - } - - private class MockPool extends SequencerConnectionXPool { - private val connection = new MockConnection(name = "test") - - override protected def loggerFactory: NamedLoggerFactory = - SequencerClientTest.this.loggerFactory - override def physicalSynchronizerId: Option[PhysicalSynchronizerId] = ??? - - override def start()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SequencerConnectionXPoolError.TimeoutError, Unit] = ??? - - override def config: SequencerConnectionXPool.SequencerConnectionXPoolConfig = ??? - - override def updateConfig(newConfig: SequencerConnectionXPool.SequencerConnectionXPoolConfig)( - implicit traceContext: TraceContext - ): Either[SequencerConnectionXPool.SequencerConnectionXPoolError, Unit] = ??? - - override def health: SequencerConnectionXPool.SequencerConnectionXPoolHealth = ??? - - override def nbSequencers: NonNegativeInt = ??? - - override def nbConnections: NonNegativeInt = ??? - - override def getConnections(nb: PositiveInt, exclusions: Set[SequencerId])(implicit - traceContext: TraceContext - ): Set[SequencerConnectionX] = Set(connection) - - override def getOneConnectionPerSequencer()(implicit - traceContext: TraceContext - ): Map[SequencerId, SequencerConnectionX] = ??? - - override def getAllConnections()(implicit - traceContext: TraceContext - ): Seq[SequencerConnectionX] = ??? - - override def contents: Map[SequencerId, Set[SequencerConnectionX]] = ??? - - override protected val timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - } - - private object MockPool { - def apply(): MockPool = new MockPool - } - - private implicit class EnrichedSequencerClient(client: RichSequencerClient) { - // flush needs to be called twice in order to finish asynchronous processing - // (see comment around shutdown in SequencerClient). So we have this small - // helper for the tests. - def flushClean(): FutureUnlessShutdown[Unit] = for { - _ <- client.flush() - _ <- client.flush() - } yield () - } - - private val eventAlwaysValid: SequencedEventValidator = SequencedEventValidator.noValidation( - DefaultTestIdentities.physicalSynchronizerId, - warn = false, - ) - - private trait EnvFactory[+Client <: SequencerClient] { - def create( - storedEvents: Seq[SequencedEvent[ClosedEnvelope]] = Seq.empty, - cleanPrehead: Option[SequencerCounterCursorPrehead] = None, - eventValidator: SequencedEventValidator = eventAlwaysValid, - options: SequencerClientConfig = SequencerClientConfig(), - topologyO: Option[SynchronizerCryptoClient] = None, - initializeCounterAllocatorTo: Option[SequencerCounter] = None, - mockExitOnFatalErrorO: Option[(String, TracedLogger) => TraceContext => Unit] = None, - )(implicit closeContext: CloseContext): Env[Client] - - protected def preloadStores( - storedEvents: Seq[SequencedEvent[ClosedEnvelope]], - cleanPrehead: Option[SequencerCounterCursorPrehead], - sequencedEventStore: SequencedEventStore, - sequencerCounterTrackerStore: SequencerCounterTrackerStore, - initializeCounterAllocatorTo: Option[SequencerCounter], - ): Unit = { - val signedEvents = storedEvents.map(SequencerTestUtils.sign) - val preloadStores = for { - _ <- initializeCounterAllocatorTo.traverse_(counter => - sequencedEventStore.reinitializeFromDbOrSetLowerBound(counter) - ) - _ <- sequencedEventStore.store( - signedEvents.map(SequencedEventWithTraceContext(_)(TraceContext.empty)) - ) - _ <- cleanPrehead.traverse_(prehead => - sequencerCounterTrackerStore.advancePreheadSequencerCounterTo(prehead) - ) - } yield () - preloadStores.futureValueUS - } - - protected def maxRequestSizeLookup: DynamicSynchronizerParametersLookup[ - SynchronizerParametersLookup.SequencerSynchronizerParameters - ] = { - val topologyClient = mock[SynchronizerTopologyClient] - val mockTopologySnapshot = mock[TopologySnapshot] - when(topologyClient.currentSnapshotApproximation(any[TraceContext])) - .thenReturn(mockTopologySnapshot) - when( - mockTopologySnapshot.findDynamicSynchronizerParametersOrDefault( - any[ProtocolVersion], - anyBoolean, - )(any[TraceContext]) - ) - .thenReturn(FutureUnlessShutdown.pure(TestSynchronizerParameters.defaultDynamic)) - SynchronizerParametersLookup.forSequencerSynchronizerParameters( - None, - topologyClient, - loggerFactory, - ) - } - - } - - private object MockRequestSigner extends RequestSigner { - override def signRequest[A <: HasCryptographicEvidence]( - request: A, - hashPurpose: HashPurpose, - snapshot: Option[SyncCryptoApi], - )(implicit - ec: ExecutionContext, - traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, String, SignedContent[A]] = { - val signedContent = SignedContent( - request, - SymbolicCrypto.emptySignature, - None, - testedProtocolVersion, - ) - EitherT(FutureUnlessShutdown.pure(Either.right[String, SignedContent[A]](signedContent))) - } - } - - private class ConstantSequencedEventValidatorFactory(eventValidator: SequencedEventValidator) - extends SequencedEventValidatorFactory { - override def create(loggerFactory: NamedLoggerFactory)(implicit - traceContext: TraceContext - ): SequencedEventValidator = - eventValidator - } - - private object TestProtocolMessage - private class TestProtocolMessage() extends ProtocolMessage with UnsignedProtocolMessage { - override def synchronizerId: PhysicalSynchronizerId = fail("shouldn't be used") - - override def representativeProtocolVersion: RepresentativeProtocolVersion[companionObj.type] = - fail("shouldn't be used") - - override protected val companionObj: AnyRef = TestProtocolMessage - - override def toProtoSomeEnvelopeContentV30: v30.EnvelopeContent.SomeEnvelopeContent = - v30.EnvelopeContent.SomeEnvelopeContent.Empty - - override def productElement(n: Int): Any = fail("shouldn't be used") - override def productArity: Int = fail("shouldn't be used") - override def canEqual(that: Any): Boolean = fail("shouldn't be used") - } - - private object RichEnvFactory extends EnvFactory[RichSequencerClient] { - override def create( - storedEvents: Seq[SequencedEvent[ClosedEnvelope]], - cleanPrehead: Option[SequencerCounterCursorPrehead], - eventValidator: SequencedEventValidator, - options: SequencerClientConfig, - topologyO: Option[SynchronizerCryptoClient] = None, - initializeCounterAllocatorTo: Option[SequencerCounter] = None, - mockExitOnFatalErrorO: Option[(String, TracedLogger) => TraceContext => Unit] = None, - )(implicit closeContext: CloseContext): Env[RichSequencerClient] = { - val clock = new SimClock(loggerFactory = loggerFactory) - val timeouts = DefaultProcessingTimeouts.testing - val transport = MockTransport() - val sendTrackerStore = new InMemorySendTrackerStore() - val sequencedEventStore = new InMemorySequencedEventStore(loggerFactory, timeouts) - val sequencerCounterTrackerStore = - new InMemorySequencerCounterTrackerStore(loggerFactory, timeouts) - val timeTracker = new SynchronizerTimeTracker( - SynchronizerTimeTrackerConfig(), - clock, - new MockTimeRequestSubmitter(), - timeouts, - loggerFactory, - ) - val eventValidatorFactory = new ConstantSequencedEventValidatorFactory(eventValidator) - - val topologyClient = - topologyO.getOrElse( - TestingTopology(Set(DefaultTestIdentities.physicalSynchronizerId)) - .build(loggerFactory) - .forOwnerAndSynchronizer(mediatorId) - ) - val trafficStateController = new TrafficStateController( - mediatorId, - loggerFactory, - topologyClient, - TrafficState.empty(CantonTimestamp.MinValue), - testedProtocolVersion, - new EventCostCalculator(loggerFactory), - TrafficConsumptionMetrics.noop, - DefaultTestIdentities.physicalSynchronizerId, - ) - val sendTracker = - new SendTracker( - Map.empty, - sendTrackerStore, - metrics, - loggerFactory, - timeouts, - Some(trafficStateController), - ) - - val client = new RichSequencerClientImpl( - DefaultTestIdentities.physicalSynchronizerId, - mediatorId, - SequencerTransports.default(DefaultTestIdentities.daSequencerId, transport), - connectionPool = MockPool(), - options.copy(useNewConnectionPool = testedProtocolVersion >= ProtocolVersion.dev), - TestingConfigInternal(), - maxRequestSizeLookup, - timeouts, - eventValidatorFactory, - clock, - MockRequestSigner, - sequencedEventStore, - sendTracker, - CommonMockMetrics.sequencerClient, - None, - replayEnabled = false, - topologyClient, - LoggingConfig(), - Some(trafficStateController), - exitOnFatalErrors = mockExitOnFatalErrorO.nonEmpty, // only "exit" when exit mock specified - loggerFactory, - futureSupervisor, - )(parallelExecutionContext, tracer) { - override protected def exitOnFatalError( - message: String, - logger: TracedLogger, - )(implicit traceContext: TraceContext): Unit = - mockExitOnFatalErrorO match { - case None => super.exitOnFatalError(message, logger)(traceContext) - case Some(exitOnFatalError) => exitOnFatalError(message, logger)(traceContext) - } - } - - preloadStores( - storedEvents, - cleanPrehead, - sequencedEventStore, - sequencerCounterTrackerStore, - initializeCounterAllocatorTo, - ) - - Env( - client, - transport, - sequencerCounterTrackerStore, - sequencedEventStore, - timeTracker, - trafficStateController, - clock, - ) - } - } - - private object PekkoEnvFactory extends EnvFactory[SequencerClient] { - override def create( - storedEvents: Seq[SequencedEvent[ClosedEnvelope]], - cleanPrehead: Option[SequencerCounterCursorPrehead], - eventValidator: SequencedEventValidator, - options: SequencerClientConfig, - topologyO: Option[SynchronizerCryptoClient] = None, - initializeCounterAllocatorTo: Option[SequencerCounter] = None, - mockExitOnFatalErrorO: Option[(String, TracedLogger) => TraceContext => Unit] = None, - )(implicit closeContext: CloseContext): Env[SequencerClient] = { - val clock = new SimClock(loggerFactory = loggerFactory) - val timeouts = DefaultProcessingTimeouts.testing - val transport = MockTransport() - val sendTrackerStore = new InMemorySendTrackerStore() - val sequencedEventStore = new InMemorySequencedEventStore(loggerFactory, timeouts) - val sequencerCounterTrackerStore = - new InMemorySequencerCounterTrackerStore(loggerFactory, timeouts) - val timeTracker = new SynchronizerTimeTracker( - SynchronizerTimeTrackerConfig(), - clock, - new MockTimeRequestSubmitter(), - timeouts, - loggerFactory, - ) - val eventValidatorFactory = new ConstantSequencedEventValidatorFactory(eventValidator) - val topologyClient = topologyO.getOrElse( - TestingTopology() - .build(loggerFactory) - .forOwnerAndSynchronizer(participant1, DefaultTestIdentities.physicalSynchronizerId) - ) - val trafficStateController = new TrafficStateController( - participant1, - loggerFactory, - topologyClient, - TrafficState.empty(CantonTimestamp.MinValue), - testedProtocolVersion, - new EventCostCalculator(loggerFactory), - TrafficConsumptionMetrics.noop, - DefaultTestIdentities.physicalSynchronizerId, - ) - val sendTracker = - new SendTracker( - Map.empty, - sendTrackerStore, - metrics, - loggerFactory, - timeouts, - Some(trafficStateController), - ) - - val client = new SequencerClientImplPekko( - DefaultTestIdentities.physicalSynchronizerId, - participant1, - SequencerTransports.default(DefaultTestIdentities.daSequencerId, transport), - connectionPool = MockPool(), - options.copy(useNewConnectionPool = testedProtocolVersion >= ProtocolVersion.dev), - TestingConfigInternal(), - maxRequestSizeLookup, - timeouts, - eventValidatorFactory, - clock, - MockRequestSigner, - sequencedEventStore, - sendTracker, - CommonMockMetrics.sequencerClient, - None, - replayEnabled = false, - topologyClient, - LoggingConfig(), - Some(trafficStateController), - exitOnTimeout = false, - loggerFactory, - futureSupervisor, - )(PrettyInstances.prettyUninhabited, parallelExecutionContext, tracer, materializer) - - preloadStores( - storedEvents, - cleanPrehead, - sequencedEventStore, - sequencerCounterTrackerStore, - initializeCounterAllocatorTo, - ) - - Env( - client, - transport, - sequencerCounterTrackerStore, - sequencedEventStore, - timeTracker, - trafficStateController, - clock, - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala deleted file mode 100644 index 34740ab713..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client - -import cats.data.EitherT -import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope -import com.digitalasset.canton.sequencing.client.TestSequencerClientSend.Request -import com.digitalasset.canton.sequencing.protocol.{ - AggregationRule, - Batch, - MessageId, - SequencingSubmissionCost, -} -import com.digitalasset.canton.topology.{DefaultTestIdentities, PhysicalSynchronizerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.EitherTUtil - -import scala.jdk.CollectionConverters.* - -/** Test implementation that stores all requests in a queue. - */ -class TestSequencerClientSend extends SequencerClientSend { - - val requestsQueue: java.util.concurrent.BlockingQueue[Request] = - new java.util.concurrent.LinkedBlockingQueue() - - def requests: Iterable[Request] = requestsQueue.asScala - - override def psid: PhysicalSynchronizerId = DefaultTestIdentities.physicalSynchronizerId - - override def sendAsync( - batch: Batch[DefaultOpenEnvelope], - topologyTimestamp: Option[CantonTimestamp], - maxSequencingTime: CantonTimestamp, - messageId: MessageId, - aggregationRule: Option[AggregationRule], - callback: SendCallback, - amplify: Boolean, - )(implicit - traceContext: TraceContext, - metricsContext: MetricsContext, - ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { - requestsQueue.add( - Request(batch, topologyTimestamp, maxSequencingTime, messageId, aggregationRule, None) - ) - EitherTUtil.unitUS[SendAsyncClientError] - } - - override def generateMaxSequencingTime: CantonTimestamp = - CantonTimestamp.MaxValue -} - -object TestSequencerClientSend { - final case class Request( - batch: Batch[DefaultOpenEnvelope], - topologyTimestamp: Option[CantonTimestamp], - maxSequencingTime: CantonTimestamp, - messageId: MessageId, - aggregationRule: Option[AggregationRule], - submissionCost: Option[SequencingSubmissionCost], - )(implicit val traceContext: TraceContext) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscriptionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscriptionTest.scala deleted file mode 100644 index 45b2447d88..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscriptionTest.scala +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client.transports - -import cats.data.EitherT -import cats.syntax.either.* -import cats.syntax.option.* -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.crypto.v30 as cryptoproto -import com.digitalasset.canton.lifecycle.OnShutdownRunner.PureOnShutdownRunner -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.networking.grpc.GrpcError -import com.digitalasset.canton.protocol.v30 -import com.digitalasset.canton.sequencer.api.v30 as v30Sequencer -import com.digitalasset.canton.sequencing.SequencerTestUtils.MockMessageContent -import com.digitalasset.canton.sequencing.client.SubscriptionCloseReason -import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.tracing.SerializableTraceContext -import com.digitalasset.canton.util.ByteStringUtil -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.google.protobuf.ByteString -import io.grpc.Context.CancellableContext -import io.grpc.Status.Code.* -import io.grpc.{Context, Status, StatusRuntimeException} -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.duration.{DurationInt, FiniteDuration} -import scala.concurrent.{Future, Promise} - -class GrpcSequencerSubscriptionTest extends AnyWordSpec with BaseTest with HasExecutionContext { - private lazy val synchronizerId: PhysicalSynchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("da::default") - ).toPhysical - - private lazy val emptyEnvelope = v30.Envelope( - content = MockMessageContent.toByteString, - recipients = None, - signatures = Nil, - ) - - private lazy val messageP: v30Sequencer.SubscriptionResponse = v30Sequencer - .SubscriptionResponse( - v30 - .SignedContent( - v30 - .SequencedEvent( - previousTimestamp = None, - timestamp = 0, - batch = Some( - v30.CompressedBatch( - algorithm = - v30.CompressedBatch.CompressionAlgorithm.COMPRESSION_ALGORITHM_UNSPECIFIED, - compressedBatch = ByteStringUtil.compressGzip( - v30.Batch(envelopes = Seq(emptyEnvelope)).toByteString - ), - ) - ), - physicalSynchronizerId = synchronizerId.toProtoPrimitive, - messageId = None, - deliverErrorReason = None, - topologyTimestamp = None, - trafficReceipt = None, - ) - .toByteString - .some, - Seq( - cryptoproto.Signature( - format = cryptoproto.SignatureFormat.SIGNATURE_FORMAT_RAW, - signature = ByteString.copyFromUtf8("not checked in this test"), - signedBy = "not checked", - signingAlgorithmSpec = - cryptoproto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_UNSPECIFIED, - signatureDelegation = None, - ) - ), - timestampOfSigningKey = None, - ) - .toByteString, - Some(SerializableTraceContext.empty.toProtoV30), - ) - - val RequestDescription = "request description" - - val ServerName = "sequencer" - - def expectedError(ex: StatusRuntimeException): Left[GrpcError, Nothing] = - Left(GrpcError(RequestDescription, ServerName, ex)) - - def createSubscription( - handler: v30Sequencer.SubscriptionResponse => EitherT[ - FutureUnlessShutdown, - String, - Unit, - ] = _ => handlerResult(Either.unit), - context: CancellableContext = Context.ROOT.withCancellation(), - ): GrpcSequencerSubscription[String, v30Sequencer.SubscriptionResponse] = - new GrpcSequencerSubscription[String, v30Sequencer.SubscriptionResponse]( - context, - new PureOnShutdownRunner(logger), - tracedEvent => handler(tracedEvent.value), // ignore Traced[..] wrapper - DefaultProcessingTimeouts.testing, - loggerFactory, - ) { - // reduce the close timeout - override def closingTimeout: FiniteDuration = 1.second - } - - private def handlerResult( - either: Either[String, Unit] - ): EitherT[FutureUnlessShutdown, String, Unit] = - EitherT(FutureUnlessShutdown.pure(either)) - - "GrpcSequencerSubscription" should { - "close normally when closed by the user" in { - val context = Context.ROOT.withCancellation() - val sut = createSubscription(context = context) - - // The user closes the observer - sut.close() - - // This must close the context - context.isCancelled shouldBe true - - sut.closeReason.futureValue shouldBe SubscriptionCloseReason.Closed - } - - "pass any exception given to onError" in { - val ex = new RuntimeException("Test exception") - - val sut = createSubscription() - - loggerFactory.assertLogs( - sut.observer.onError(ex), - entry => { - entry.errorMessage shouldBe "The sequencer subscription failed unexpectedly." - entry.throwable shouldBe Some(ex) - }, - ) - - sut.closeReason.futureValue shouldBe GrpcSubscriptionUnexpectedException(ex) - } - - "close with error when closed by the server" in { - val sut = createSubscription() - - sut.observer.onCompleted() - - inside(sut.closeReason.futureValue) { - case GrpcSubscriptionError(GrpcError.GrpcServiceUnavailable(_, _, status, _, _)) => - status.getCode shouldBe UNAVAILABLE - status.getDescription shouldBe "Connection terminated by the server." - } - } - - "use the given handler to process received messages" in { - val messagePromise = Promise[v30Sequencer.SubscriptionResponse]() - - val sut = - createSubscription(handler = m => handlerResult(Right(messagePromise.success(m)))) - - sut.observer.onNext(messageP) - - messagePromise.future.futureValue shouldBe messageP - } - - "close with exception if the handler throws" in { - val ex = new RuntimeException("Handler Error") - val sut = createSubscription(handler = - _ => EitherT(FutureUnlessShutdown.failed[Either[String, Unit]](ex)) - ) - - sut.observer.onNext(messageP) - - sut.closeReason.futureValue shouldBe SubscriptionCloseReason.HandlerException(ex) - } - - "terminate onNext only after termination of the handler" in { - val handlerCompleted = Promise[UnlessShutdown[Either[String, Unit]]]() - - val sut = - createSubscription(handler = _ => EitherT(FutureUnlessShutdown(handlerCompleted.future))) - - val onNextF = Future(sut.observer.onNext(messageP)) - - eventuallyForever(timeUntilSuccess = 0.seconds, durationOfSuccess = 100.milliseconds) { - !onNextF.isCompleted - } - - handlerCompleted.success(UnlessShutdown.Outcome(Either.unit)) - - onNextF.futureValue - } - - "not wait for the handler to complete on shutdown" in { - val handlerInvoked = Promise[Unit]() - val handlerNeverCompleted = EitherT( - FutureUnlessShutdown(Promise[UnlessShutdown.Outcome[Either[String, Unit]]]().future) - ) - - val sut = createSubscription(handler = _ => { - handlerInvoked.success(()) - handlerNeverCompleted - }) - - // Processing this message takes forever... - Future(sut.observer.onNext(messageP)).failed - .foreach(logger.error("Unexpected exception", _)) - - // Make sure that the handler has been invoked before doing the next step. - handlerInvoked.future.futureValue - - sut.close() - sut.closeReason.futureValue should (be(SubscriptionCloseReason.Closed) or - be(SubscriptionCloseReason.Shutdown)) - } - - "not invoke the handler after closing" in { - val messagePromise = Promise[v30Sequencer.SubscriptionResponse]() - - val sut = - createSubscription(handler = m => handlerResult(Right(messagePromise.success(m)))) - - sut.close() - - sut.observer.onNext(messageP) - - eventuallyForever(timeUntilSuccess = 0.seconds, durationOfSuccess = 100.milliseconds) { - !messagePromise.isCompleted - } - } - - "not log a INTERNAL error at error level after having received some items" in { - // we see this scenario when a load balancer between applications decides to reset the TCP stream, say for a timeout - val sut = - createSubscription(handler = _ => handlerResult(Either.unit)) - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - // receive some items - sut.observer.onNext( - v30Sequencer.SubscriptionResponse.defaultInstance - .copy(traceContext = Some(SerializableTraceContext.empty.toProtoV30)) - ) - sut.observer.onError(Status.INTERNAL.asRuntimeException()) - sut.close() - }, - logs => logs shouldBe empty, - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyTest.scala deleted file mode 100644 index edb1455b76..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyTest.scala +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.client.transports - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.networking.grpc.GrpcError -import com.digitalasset.canton.sequencing.authentication.MemberAuthentication -import com.digitalasset.canton.sequencing.authentication.grpc.Constant -import com.digitalasset.canton.topology.DefaultTestIdentities -import io.grpc.{Metadata, Status} -import org.scalatest.wordspec.AnyWordSpec - -class GrpcSubscriptionErrorRetryPolicyTest extends AnyWordSpec with BaseTest { - - private val requestDescription = "request description" - private val serverName = "sequencer" - - "GrpcSubscriptionErrorRetryRule" should { - val tokenExpiredMetadata = new Metadata() - tokenExpiredMetadata.put( - Constant.AUTHENTICATION_ERROR_CODE, - MemberAuthentication.MissingToken(DefaultTestIdentities.participant1).code, - ) - - val recoverableErrors = Seq( - GrpcError(requestDescription, serverName, Status.UNAVAILABLE.asRuntimeException()), - GrpcError( - requestDescription, - serverName, - Status.UNAUTHENTICATED.asRuntimeException(tokenExpiredMetadata), - ), - ) - - forEvery(recoverableErrors) { error => - s"retry if sequencer temporarily fails with ${error.status.getCode}" in { - shouldRetry(error) shouldBe true - } - } - - } - - def shouldRetry(grpcError: GrpcError): Boolean = { - val rule = new GrpcSubscriptionErrorRetryPolicy(loggerFactory) - - rule.retryOnError(GrpcSubscriptionError(grpcError), receivedItems = false) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/handlers/EventTimestampCaptureTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/handlers/EventTimestampCaptureTest.scala deleted file mode 100644 index 94558b0758..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/handlers/EventTimestampCaptureTest.scala +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.handlers - -import cats.syntax.either.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.sequencing.protocol.SignedContent -import com.digitalasset.canton.sequencing.{SequencedEventHandler, SequencerTestUtils} -import com.digitalasset.canton.serialization.HasCryptographicEvidence -import com.digitalasset.canton.store.SequencedEventStore.SequencedEventWithTraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -final case class HandlerError(message: String) - -class EventTimestampCaptureTest extends AnyWordSpec with BaseTest with HasExecutionContext { - type TestEventHandler = SequencedEventHandler[HandlerError] - - "EventTimestampCapture" should { - "return initial value if we've not successfully processed an event" in { - (new EventTimestampCapture(initial = None, loggerFactory)).latestEventTimestamp shouldBe None - (new EventTimestampCapture( - initial = Some(CantonTimestamp.ofEpochSecond(42L)), - loggerFactory, - )).latestEventTimestamp shouldBe Some(CantonTimestamp.ofEpochSecond(42L)) - } - - "update the timestamp when we successfully process an event" in { - val timestampCapture = - new EventTimestampCapture(Some(CantonTimestamp.ofEpochSecond(2L)), loggerFactory) - val handler: TestEventHandler = _ => FutureUnlessShutdown.pure(Either.unit) - val capturingHandler = timestampCapture(handler) - - val fut = capturingHandler( - SequencedEventWithTraceContext( - sign( - SequencerTestUtils.mockDeliver(timestamp = CantonTimestamp.ofEpochSecond(42)) - ) - )( - traceContext - ) - ) - - timestampCapture.latestEventTimestamp shouldBe Some(CantonTimestamp.ofEpochSecond(42)) - fut.futureValueUS shouldBe Either.unit - } - - "not update the timestamp when the handler fails" in { - val timestampCapture = - new EventTimestampCapture(Some(CantonTimestamp.ofEpochSecond(2L)), loggerFactory) - val ex = new RuntimeException - val handler: TestEventHandler = _ => FutureUnlessShutdown.failed(ex) - val capturingHandler = timestampCapture(handler) - - val fut = capturingHandler( - SequencedEventWithTraceContext( - sign( - SequencerTestUtils.mockDeliver(timestamp = CantonTimestamp.ofEpochSecond(42)) - ) - )( - traceContext - ) - ) - - timestampCapture.latestEventTimestamp shouldBe Some(CantonTimestamp.ofEpochSecond(2L)) - fut.failed.futureValueUS shouldBe ex - } - } - - private def sign[A <: HasCryptographicEvidence](content: A): SignedContent[A] = - SignedContent(content, SymbolicCrypto.emptySignature, None, testedProtocolVersion) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/GeneratorsProtocol.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/GeneratorsProtocol.scala deleted file mode 100644 index 78f02a33b6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/GeneratorsProtocol.scala +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.protocol - -import com.daml.nonempty.NonEmptyUtil -import com.digitalasset.canton.Generators -import com.digitalasset.canton.config.CantonRequireTypes.String73 -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} -import com.digitalasset.canton.crypto.{AsymmetricEncrypted, Signature} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.ProtocolSymmetricKey -import com.digitalasset.canton.protocol.messages.{GeneratorsMessages, ProtocolMessage} -import com.digitalasset.canton.sequencing.channel.{ - ConnectToSequencerChannelRequest, - ConnectToSequencerChannelResponse, -} -import com.digitalasset.canton.sequencing.protocol.channel.{ - SequencerChannelConnectedToAllEndpoints, - SequencerChannelId, - SequencerChannelMetadata, - SequencerChannelSessionKey, - SequencerChannelSessionKeyAck, -} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.serialization.{ - BytestringWithCryptographicEvidence, - HasCryptographicEvidence, -} -import com.digitalasset.canton.topology.{GeneratorsTopology, Member, PhysicalSynchronizerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.{GeneratorsVersion, ProtocolVersion} -import com.google.protobuf.ByteString -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} - -final class GeneratorsProtocol( - protocolVersion: ProtocolVersion, - generatorsMessages: GeneratorsMessages, - generatorsTopology: GeneratorsTopology, -) { - import com.digitalasset.canton.Generators.* - import com.digitalasset.canton.config.GeneratorsConfig.* - import com.digitalasset.canton.crypto.GeneratorsCrypto.* - import com.digitalasset.canton.data.GeneratorsDataTime.* - import generatorsTopology.* - import generatorsMessages.* - - implicit val acknowledgeRequestArb: Arbitrary[AcknowledgeRequest] = Arbitrary(for { - ts <- Arbitrary.arbitrary[CantonTimestamp] - member <- Arbitrary.arbitrary[Member] - } yield AcknowledgeRequest(member, ts, protocolVersion)) - - implicit val aggregationRuleArb: Arbitrary[AggregationRule] = - Arbitrary( - for { - threshold <- Arbitrary.arbitrary[PositiveInt] - eligibleMembers <- Generators.nonEmptyListGen[Member] - } yield AggregationRule(eligibleMembers, threshold)( - AggregationRule.protocolVersionRepresentativeFor(protocolVersion) - ) - ) - - implicit val closedEnvelopeArb: Arbitrary[ClosedEnvelope] = Arbitrary(for { - bytes <- Arbitrary.arbitrary[ByteString] - signatures <- Gen.listOfN(5, signatureArb.arbitrary) - - recipients <- recipientsArb.arbitrary - } yield ClosedEnvelope.create(bytes, recipients, signatures, protocolVersion)) - - implicit val openEnvelopArb: Arbitrary[OpenEnvelope[ProtocolMessage]] = Arbitrary( - for { - protocolMessage <- protocolMessageArb.arbitrary - recipients <- recipientsArb.arbitrary - } yield OpenEnvelope(protocolMessage, recipients)(protocolVersion) - ) - - implicit val envelopeArb: Arbitrary[Envelope[?]] = - Arbitrary(Gen.oneOf[Envelope[?]](closedEnvelopeArb.arbitrary, openEnvelopArb.arbitrary)) - - implicit val batchArb: Arbitrary[Batch[Envelope[?]]] = - Arbitrary(for { - envelopes <- Generators.nonEmptyListGen[Envelope[?]](envelopeArb) - } yield Batch(envelopes.map(_.closeEnvelope), protocolVersion)) - - implicit val submissionCostArb: Arbitrary[SequencingSubmissionCost] = - Arbitrary( - for { - cost <- Arbitrary.arbitrary[NonNegativeLong] - } yield SequencingSubmissionCost(cost)( - SequencingSubmissionCost.protocolVersionRepresentativeFor(protocolVersion) - ) - ) - - implicit val submissionRequestArb: Arbitrary[SubmissionRequest] = - Arbitrary( - for { - sender <- Arbitrary.arbitrary[Member] - messageId <- Arbitrary.arbitrary[MessageId] - envelopes <- Generators.nonEmptyListGen[ClosedEnvelope](closedEnvelopeArb) - batch = Batch(envelopes.map(_.closeEnvelope), protocolVersion) - maxSequencingTime <- Arbitrary.arbitrary[CantonTimestamp] - aggregationRule <- Gen.option(Arbitrary.arbitrary[AggregationRule]) - submissionCost <- GeneratorsVersion.defaultValueGen( - protocolVersion, - SubmissionRequest.submissionCostDefaultValue, - Gen.option(Arbitrary.arbitrary[SequencingSubmissionCost]), - ) - topologyTimestamp <- - if (aggregationRule.nonEmpty) - Arbitrary.arbitrary[CantonTimestamp].map(Some(_)) - else Gen.const(None) - } yield SubmissionRequest.tryCreate( - sender, - messageId, - batch, - maxSequencingTime, - topologyTimestamp, - aggregationRule, - submissionCost, - SubmissionRequest.protocolVersionRepresentativeFor(protocolVersion).representative, - ) - ) - - implicit val topologyStateForInitRequestArb: Arbitrary[TopologyStateForInitRequest] = Arbitrary( - for { - member <- Arbitrary.arbitrary[Member] - } yield TopologyStateForInitRequest(member, protocolVersion) - ) - - implicit val subscriptionRequestV2Arb: Arbitrary[SubscriptionRequestV2] = Arbitrary( - for { - member <- Arbitrary.arbitrary[Member] - timestamp <- Arbitrary.arbitrary[Option[CantonTimestamp]] - } yield SubscriptionRequestV2.apply(member, timestamp, protocolVersion) - ) - - implicit val sequencerChannelMetadataArb: Arbitrary[SequencerChannelMetadata] = - Arbitrary( - for { - channelId <- Arbitrary.arbitrary[SequencerChannelId] - initiatingMember <- Arbitrary.arbitrary[Member] - receivingMember <- Arbitrary.arbitrary[Member] - } yield SequencerChannelMetadata.apply( - channelId, - initiatingMember, - receivingMember, - protocolVersion, - ) - ) - - implicit val sequencerChannelConnectedToAllEndpointsArb - : Arbitrary[SequencerChannelConnectedToAllEndpoints] = - Arbitrary( - SequencerChannelConnectedToAllEndpoints.apply( - protocolVersion - ) - ) - - implicit val sequencerChannelSessionKeyArb: Arbitrary[SequencerChannelSessionKey] = - Arbitrary(for { - encrypted <- Arbitrary.arbitrary[AsymmetricEncrypted[ProtocolSymmetricKey]] - } yield SequencerChannelSessionKey.apply(encrypted, protocolVersion)) - - implicit val sequencerChannelSessionKeyAckArb: Arbitrary[SequencerChannelSessionKeyAck] = - Arbitrary(SequencerChannelSessionKeyAck.apply(protocolVersion)) - - implicit val connectToSequencerChannelRequestArb: Arbitrary[ConnectToSequencerChannelRequest] = - Arbitrary( - for { - request <- Gen.oneOf( - byteStringArb.arbitrary.map( - ConnectToSequencerChannelRequest.Payload(_): ConnectToSequencerChannelRequest.Request - ), - sequencerChannelMetadataArb.arbitrary.map(ConnectToSequencerChannelRequest.Metadata.apply), - ) - } yield ConnectToSequencerChannelRequest.apply( - request, - TraceContext.empty, - protocolVersion, - ) - ) - - implicit val connectToSequencerChannelResponseArb: Arbitrary[ConnectToSequencerChannelResponse] = - Arbitrary( - for { - response <- Gen.oneOf( - byteStringArb.arbitrary.map( - ConnectToSequencerChannelResponse.Payload(_): ConnectToSequencerChannelResponse.Response - ), - sequencerChannelConnectedToAllEndpointsArb.arbitrary.map(_ => - ConnectToSequencerChannelResponse.Connected - ), - ) - } yield ConnectToSequencerChannelResponse.apply( - response, - TraceContext.empty, - protocolVersion, - ) - ) - - private val sequencerDeliverErrorCodeArb: Arbitrary[SequencerDeliverErrorCode] = genArbitrary - - private implicit val sequencerDeliverErrorArb: Arbitrary[SequencerDeliverError] = - Arbitrary( - for { - code <- sequencerDeliverErrorCodeArb.arbitrary - message <- Arbitrary.arbitrary[String] - } yield code.apply(message) - ) - - private implicit val deliverErrorArb: Arbitrary[DeliverError] = Arbitrary( - for { - pts <- Arbitrary.arbitrary[Option[CantonTimestamp]] - ts <- Arbitrary.arbitrary[CantonTimestamp] - synchronizerId <- Arbitrary.arbitrary[PhysicalSynchronizerId] - messageId <- Arbitrary.arbitrary[MessageId] - error <- sequencerDeliverErrorArb.arbitrary - } yield DeliverError.create( - previousTimestamp = pts, - timestamp = ts, - synchronizerId = synchronizerId, - messageId, - error, - protocolVersion, - Option.empty[TrafficReceipt], - ) - ) - private implicit val deliverArbitrary: Arbitrary[Deliver[Envelope[?]]] = Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[PhysicalSynchronizerId] - batch <- batchArb.arbitrary - deliver <- Arbitrary(deliverGen(synchronizerId, batch, protocolVersion)).arbitrary - } yield deliver - ) - - implicit val sequencerEventArb: Arbitrary[SequencedEvent[Envelope[?]]] = - Arbitrary(Gen.oneOf(deliverErrorArb.arbitrary, deliverArbitrary.arbitrary)) - - implicit val signedContent: Arbitrary[SignedContent[HasCryptographicEvidence]] = Arbitrary( - for { - signatures <- Generators.nonEmptyListGen[Signature] - ts <- Gen.option(Arbitrary.arbitrary[CantonTimestamp]) - content <- signedProtocolMessageContentArb.arbitrary - byteStringWithCryptographicEvidence = BytestringWithCryptographicEvidence( - content.getCryptographicEvidence - ) - } yield { - // When we deserialize SignedContent we don't get the original Content, but instead we get a byteStringWithCryptographicEvidence - // This is why I pass directly a byteStringWithCryptographicEvidence instead of any Content With HasCryptographicEvidence. - SignedContent.create( - content = byteStringWithCryptographicEvidence, - signatures = signatures, - timestampOfSigningKey = ts, - representativeProtocolVersion = - SignedContent.protocolVersionRepresentativeFor(protocolVersion), - ) - } - ) - - implicit val groupRecipientArb: Arbitrary[GroupRecipient] = genArbitrary - implicit val recipientArb: Arbitrary[Recipient] = genArbitrary - implicit val memberRecipientArb: Arbitrary[MemberRecipient] = genArbitrary - - implicit val recipientsArb: Arbitrary[Recipients] = { - val protocolVersionDependentRecipientGen = Arbitrary.arbitrary[Recipient] - - Arbitrary(for { - depths <- nonEmptyListGen(Arbitrary(Gen.choose(0, 3))) - trees <- Gen.sequence[List[RecipientsTree], RecipientsTree]( - depths.forgetNE.map(recipientsTreeGen(Arbitrary(protocolVersionDependentRecipientGen))) - ) - } yield Recipients(NonEmptyUtil.fromUnsafe(trees))) - } - implicit val mediatorGroupRecipientArb: Arbitrary[MediatorGroupRecipient] = Arbitrary( - Arbitrary.arbitrary[NonNegativeInt].map(MediatorGroupRecipient(_)) - ) - implicit val messageIdArb: Arbitrary[MessageId] = Arbitrary( - Generators.lengthLimitedStringGen(String73).map(s => MessageId.tryCreate(s.str)) - ) - private def recipientsTreeGen( - recipientArb: Arbitrary[Recipient] - )(depth: Int): Gen[RecipientsTree] = { - val maxBreadth = 5 - val recipientGroupGen = nonEmptySetGen(recipientArb) - - if (depth == 0) { - recipientGroupGen.map(RecipientsTree(_, Nil)) - } else { - for { - children <- Gen.listOfN(maxBreadth, recipientsTreeGen(recipientArb)(depth - 1)) - recipientGroup <- recipientGroupGen - } yield RecipientsTree(recipientGroup, children) - } - } - def deliverGen[Env <: Envelope[?]]( - synchronizerId: PhysicalSynchronizerId, - batch: Batch[Env], - protocolVersion: ProtocolVersion, - ): Gen[Deliver[Env]] = for { - previousTimestamp <- Arbitrary.arbitrary[Option[CantonTimestamp]] - timestamp <- Arbitrary.arbitrary[CantonTimestamp] - messageIdO <- Gen.option(Arbitrary.arbitrary[MessageId]) - topologyTimestampO <- Gen.option(Arbitrary.arbitrary[CantonTimestamp]) - trafficReceipt <- Gen.option(Arbitrary.arbitrary[TrafficReceipt]) - } yield Deliver.create( - previousTimestamp, - timestamp, - synchronizerId, - messageIdO, - batch, - topologyTimestampO, - protocolVersion, - trafficReceipt, - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequestTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequestTest.scala deleted file mode 100644 index 0213e4d316..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequestTest.scala +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.sequencing.protocol - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTestWordSpec -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{Signature, TestHash} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology.DefaultTestIdentities -import com.digitalasset.canton.version.ProtocolVersion -import com.google.protobuf.ByteString - -import java.time.Duration -import java.util.UUID - -class SubmissionRequestTest extends BaseTestWordSpec { - - private lazy val defaultAggregationRule = AggregationRule( - NonEmpty(Seq, DefaultTestIdentities.participant1, DefaultTestIdentities.participant2), - PositiveInt.tryCreate(1), - testedProtocolVersion, - ) - - private lazy val defaultTopologyTimestamp = Some( - CantonTimestamp.Epoch.add(Duration.ofSeconds(1)) - ) - - private lazy val defaultSubmissionRequest = - SubmissionRequest.tryCreate( - DefaultTestIdentities.participant1, - MessageId.fromUuid(new UUID(1L, 1L)), - Batch.empty(testedProtocolVersion), - maxSequencingTime = CantonTimestamp.MaxValue, - topologyTimestamp = defaultTopologyTimestamp, - Some(defaultAggregationRule), - Option.empty[SequencingSubmissionCost], - testedProtocolVersion, - ) - - "aggregation id" should { - "authenticate the relevant fields" in { - if (testedProtocolVersion >= ProtocolVersion.v34) { - - val envelope1 = ClosedEnvelope.create( - ByteString.copyFromUtf8("Content1"), - Recipients.cc(DefaultTestIdentities.participant1), - Seq.empty, - testedProtocolVersion, - ) - val envelope2 = ClosedEnvelope.create( - ByteString.copyFromUtf8("Content2"), - Recipients.cc(DefaultTestIdentities.participant1), - Seq.empty, - testedProtocolVersion, - ) - - val differentRequests = Seq( - defaultSubmissionRequest, - defaultSubmissionRequest.copy(batch = Batch.fromClosed(testedProtocolVersion, envelope1)), - defaultSubmissionRequest.copy(batch = Batch.fromClosed(testedProtocolVersion, envelope2)), - defaultSubmissionRequest.copy(batch = - Batch.fromClosed( - testedProtocolVersion, - envelope1.copy(signatures = Seq(Signature.noSignature)), - ) - ), - defaultSubmissionRequest.copy(batch = - Batch.fromClosed(testedProtocolVersion, envelope1, envelope2) - ), - defaultSubmissionRequest.copy(batch = - Batch.fromClosed( - testedProtocolVersion, - envelope1.copy(recipients = Recipients.cc(DefaultTestIdentities.participant2)), - ) - ), - defaultSubmissionRequest.copy(maxSequencingTime = CantonTimestamp.Epoch), - defaultSubmissionRequest.copy(topologyTimestamp = Some(CantonTimestamp.MinValue)), - defaultSubmissionRequest.copy(topologyTimestamp = Some(CantonTimestamp.MaxValue)), - defaultSubmissionRequest.copy(topologyTimestamp = Some(CantonTimestamp.Epoch)), - defaultSubmissionRequest.copy(aggregationRule = - Some( - defaultAggregationRule.copy(eligibleMembers = - NonEmpty( - Seq, - DefaultTestIdentities.participant1, - DefaultTestIdentities.participant3, - ) - ) - ) - ), - defaultSubmissionRequest.copy(aggregationRule = - Some(defaultAggregationRule.copy(threshold = PositiveInt.tryCreate(2))) - ), - ) - - val aggregationIds = differentRequests.map(_.aggregationId(TestHash)) - aggregationIds.distinct.size shouldBe differentRequests.size - } - } - - "ignore sender-specific fields" in { - if (testedProtocolVersion >= ProtocolVersion.v34) { - val envelope1 = ClosedEnvelope.create( - ByteString.copyFromUtf8("some-content"), - Recipients.cc(DefaultTestIdentities.participant1, DefaultTestIdentities.participant3), - Seq.empty, - testedProtocolVersion, - ) - - val submissionRequestWithEnvelope1 = - defaultSubmissionRequest.copy(batch = Batch.fromClosed(testedProtocolVersion, envelope1)) - - def assertEquivalentRequests(requests: Seq[SubmissionRequest]): Unit = { - // Sanity check that the requests themselves are actually different - requests.distinct.size shouldBe requests.size - requests.size shouldBe >(1) - - val aggregationIds = requests.map(_.aggregationId(TestHash)) - aggregationIds.distinct.size shouldBe 1 - } - - val requestsWithoutSignatures = Seq( - submissionRequestWithEnvelope1, - submissionRequestWithEnvelope1.copy(sender = DefaultTestIdentities.participant3), - submissionRequestWithEnvelope1.copy(messageId = MessageId.fromUuid(new UUID(10, 10))), - ) - - assertEquivalentRequests(requestsWithoutSignatures) - - val envelope2 = ClosedEnvelope.create( - ByteString.copyFromUtf8("some-content"), - Recipients.cc(DefaultTestIdentities.participant1, DefaultTestIdentities.participant2), - Seq(Signature.noSignature), - testedProtocolVersion, - ) - - val submissionRequestWithEnvelope2 = - defaultSubmissionRequest.copy(batch = Batch.fromClosed(testedProtocolVersion, envelope2)) - - val someSignature = SymbolicCrypto.signature( - ByteString.copyFromUtf8("A signature"), - DefaultTestIdentities.participant1.fingerprint, - ) - - val requestsWithSignatures = Seq( - submissionRequestWithEnvelope2, - submissionRequestWithEnvelope2.copy(batch = - Batch.fromClosed( - testedProtocolVersion, - envelope2.copy(signatures = Seq(someSignature)), - ) - ), - submissionRequestWithEnvelope2.copy(batch = - Batch.fromClosed( - testedProtocolVersion, - envelope2.copy(signatures = Seq(someSignature, Signature.noSignature)), - ) - ), - submissionRequestWithEnvelope2.copy(messageId = MessageId.fromUuid(new UUID(10, 10))), - ) - - assertEquivalentRequests(requestsWithSignatures) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/DeterministicEncodingTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/DeterministicEncodingTest.scala deleted file mode 100644 index 476fd9f882..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/DeterministicEncodingTest.scala +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.serialization - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.util.UByte -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class DeterministicEncodingTest extends AnyWordSpec with BaseTest { - - "DeterministicEncoding" when { - val rest = ByteString.copyFromUtf8("rest") - - "working on Ints" should { - val goodInputs = - Table( - ("int", "serialization"), - (1, Array[Byte](0, 0, 0, 1)), - (-1, Array[Byte](-1, -1, -1, -1)), - (0x12345678, Array[Byte](0x12, 0x34, 0x56, 0x78)), - ) - - val badInputs = Table[Array[Byte]]( - "serialization", - Array[Byte](), - Array[Byte](0, 0, 0), - Array[Byte](-1), - ) - - "produce the correct serialization" in { - forAll(goodInputs) { (i, bytes) => - assert(DeterministicEncoding.encodeInt(i) === ByteString.copyFrom(bytes)) - } - } - "produce the correct deserialization" in { - forAll(goodInputs) { (i, bytes) => - assert( - DeterministicEncoding.decodeInt(ByteString.copyFrom(bytes)) === Right( - (i, ByteString.EMPTY) - ) - ) - } - } - - "produce the correct deserialization and hand back the remaining bytes" in { - forAll(goodInputs) { (i, bytes) => - assert( - DeterministicEncoding.decodeInt(ByteString.copyFrom(bytes).concat(rest)) == Right( - (i, rest) - ) - ) - } - } - - "fail if too few bytes are passed in" in { - forAll(badInputs) { bytes => - assert(DeterministicEncoding.decodeInt(ByteString.copyFrom(bytes)).isLeft) - } - } - } - - "working on Strings" should { - val goodInputs = - Table( - ("string", "serialization"), - ("", Array[Byte](0, 0, 0, 0)), - (" @", Array[Byte](0, 0, 0, 2, 32, 64)), - ) - - val badInputs = Table[Array[Byte]]( - "serialization", - Array[Byte](), - Array[Byte](0, 0, 0, 2, 32), - Array[Byte](0, 0, 2), - ) - - "produce the correct serialization" in { - forAll(goodInputs) { (s, bytes) => - assert(DeterministicEncoding.encodeString(s) === ByteString.copyFrom(bytes)) - } - } - - "produce the correct deserialization" in { - forAll(goodInputs) { (s, bytes) => - assert( - DeterministicEncoding.decodeString(ByteString.copyFrom(bytes)) === Right( - (s, ByteString.EMPTY) - ) - ) - } - } - - "produce the correct deserialization and hand back the remaining bytes" in { - forAll(goodInputs) { (s, bytes) => - assert( - DeterministicEncoding.decodeString(ByteString.copyFrom(bytes).concat(rest)) == Right( - (s, rest) - ) - ) - } - } - - "fail if invalid serializations are passed in" in { - forAll(badInputs) { bytes => - assert(DeterministicEncoding.decodeString(ByteString.copyFrom(bytes)).isLeft) - } - } - } - - "working on bytes" should { - val (b1, b2, b3) = - (Array[Byte](0, 1, 2, 3, 4, 5, 6), Array[Byte](), Array[Byte](6, 5, 4, 3, 2, 1)) - "yield the same inputs after a full cycle of one" in { - val ec1 = DeterministicEncoding.encodeBytes(ByteString.copyFrom(b1)) - val tmp = DeterministicEncoding.decodeBytes(ec1) - assert(tmp.isRight) - tmp.foreach { - case (o1, restB) => { - assertResult(b1)(o1.toByteArray) - assertResult(0)(restB.size) - } - } - } - "yield the same input after a full cycle of three" in { - val ec2 = DeterministicEncoding - .encodeBytes(ByteString.copyFrom(b1)) - .concat(DeterministicEncoding.encodeBytes(ByteString.copyFrom(b2))) - .concat(DeterministicEncoding.encodeBytes(ByteString.copyFrom(b3))) - val tmp = for { - dc1AndR <- DeterministicEncoding.decodeBytes(ec2) - dc2AndR <- DeterministicEncoding.decodeBytes(dc1AndR._2) - dc3AndR <- DeterministicEncoding.decodeBytes(dc2AndR._2) - } yield (dc1AndR._1, dc2AndR._1, dc3AndR._1, dc3AndR._2) - assert(tmp.isRight) - tmp.foreach { - case (o1, o2, o3, rest) => { - assertResult(b1)(o1.toByteArray) - assertResult(b2)(o2.toByteArray) - assertResult(b3)(o3.toByteArray) - assertResult(0)(rest.size) - } - } - } - - "fail on invalid length" in { - val testLengthEncoding = Array[Byte](-127, 0, 0, 0) - val tmp = DeterministicEncoding.decodeInt(ByteString.copyFrom(testLengthEncoding)) - assert(tmp.isRight) - tmp.foreach(xx => assert(xx._1 < 0)) - val negativeLength = Array[Byte](-127, 0, 0, 0, 1, 2, 3, 4, 5, 6) - assert(DeterministicEncoding.decodeBytes(ByteString.copyFrom(negativeLength)).isLeft) - val lengthExceeded = Array[Byte](0, 0, 0, 127, 1, 2, 3, 4, 5, 6) - assert(DeterministicEncoding.decodeBytes(ByteString.copyFrom(lengthExceeded)).isLeft) - } - } - "working on sequences" should { - "obtain original sequence" in { - Seq(Seq(1), Seq(), Seq(1, 2, 3, 4, 5)).foreach { tst => - val res = DeterministicEncoding.decodeSeqWith( - DeterministicEncoding.encodeSeqWith(tst)(DeterministicEncoding.encodeInt) - )(DeterministicEncoding.decodeInt) - res.value._1 shouldBe tst - res.foreach { case (seq, rest) => - seq shouldBe tst - assertResult(0)(rest.size) - } - } - } - } - - "working with unsigned var-ints" should { - def arrayToByteString(array: Array[Int]): ByteString = - UByte.fromArrayToByteString(array.map(UByte.tryFromUnsignedInt)) - - val goodInputs = - Table( - ("long", "serialization"), - (1L, Array[Int](1)), - (127L, Array[Int](127)), - (128L, Array[Int](128, 1)), - (255L, Array[Int](255, 1)), - (300L, Array[Int](172, 2)), - (16384L, Array[Int](128, 128, 1)), - ) - - val badInputs = Table[Array[Int]]( - "serialization", - // Input longer than 9 bytes - Array[Int](128, 128, 128, 128, 128, 128, 128, 128, 128, 128), - ) - - "produce the correct serialization" in { - forAll(goodInputs) { (i, bytes) => - assert(DeterministicEncoding.encodeUVarInt(i) === arrayToByteString(bytes)) - } - } - - "produce the correct deserialization" in { - forAll(goodInputs) { (i, bytes) => - assert( - DeterministicEncoding.decodeUVarInt(arrayToByteString(bytes)) === Right( - (i, ByteString.EMPTY) - ) - ) - } - } - - "produce the correct deserialization and hand back the remaining bytes" in { - forAll(goodInputs) { (i, bytes) => - assert( - DeterministicEncoding.decodeUVarInt(arrayToByteString(bytes).concat(rest)) == Right( - (i, rest) - ) - ) - } - } - - "fail deserialization on bad input" in { - forAll(badInputs) { bytes => - assert(DeterministicEncoding.decodeUVarInt(arrayToByteString(bytes)).isLeft) - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/HasCryptographicEvidenceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/HasCryptographicEvidenceTest.scala deleted file mode 100644 index 38f01b7591..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/HasCryptographicEvidenceTest.scala +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.serialization - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.version.* -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -trait HasCryptographicEvidenceTest { this: AnyWordSpec => - def hasCryptographicEvidenceSerialization[M <: HasCryptographicEvidence]( - sut1: M, - sut2: M, - hint: String = "", - ): Unit = { - val bytes1 = sut1.getCryptographicEvidence - "always produce the same serialization" + hint in { - val bytes1a = sut1.getCryptographicEvidence - assert(bytes1 === bytes1a) - } - - if (sut1 != sut2) { - "different objects produce different serializations" + hint in { - assert(bytes1 !== sut2.getCryptographicEvidence) - } - } - } - - def memoizedNondeterministicDeserialization[M <: ProtocolVersionedMemoizedEvidence]( - sut: M, - ser: ByteString, - hint: String = "", - )(deserialize: ByteString => M): Unit = { - hasCryptographicEvidenceDeserialization(sut, ser, hint)(deserialize) - "deserialize sets deserializedFrom correctly" + hint in { - val deserialized = deserialize(ser) - assert(deserialized.deserializedFrom.isDefined) - deserialized.deserializedFrom.foreach(xx => assertResult(ser)(xx)) - } - } - - def hasCryptographicEvidenceDeserialization[M <: HasCryptographicEvidence]( - sut: M, - ser: ByteString, - hint: String = "", - )(deserialize: ByteString => M): Unit = { - val bytes = sut.getCryptographicEvidence - - "deserialize to an equal object" + hint in { - val deserialized = deserialize(bytes) - assert(sut === deserialized) - } - - "serialize to the deserialization it was constructed from" + hint in { - val deserialized = deserialize(bytes) - assert(bytes === deserialized.getCryptographicEvidence) - assert(ser === deserialize(ser).getCryptographicEvidence) - } - } - - def tryDeserializer[M]( - deserializer: ByteString => Either[DeserializationError, M] - ): ByteString => M = - bytes => - deserializer(bytes) match { - case Right(m) => m - case Left(err) => fail(err.toString) - } - -} - -class MemoizedEvidenceTest extends AnyWordSpec with BaseTest with HasCryptographicEvidenceTest { - - val mst2: MemoizedEvidenceSUT = MemoizedEvidenceSUT(2) - val mst3: MemoizedEvidenceSUT = MemoizedEvidenceSUT(3) - val bytes = ByteString.copyFrom(Array[Byte](10, 5)) - - "MemoizedEvidence" should { - behave like hasCryptographicEvidenceSerialization(mst2, mst3) - behave like hasCryptographicEvidenceDeserialization(mst3, bytes)( - MemoizedEvidenceSUT.fromByteString - ) - } -} - -sealed case class MemoizedEvidenceSUT(b: Byte)( - override val representativeProtocolVersion: RepresentativeProtocolVersion[ - MemoizedEvidenceSUT.type - ], - override val deserializedFrom: Option[ByteString], -) extends ProtocolVersionedMemoizedEvidence { - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var counter: Byte = 0 - - protected override def toByteStringUnmemoized: ByteString = { - counter = (counter + 1).toByte - ByteString.copyFrom(Array(counter, b)) - } - - override protected val companionObj: MemoizedEvidenceSUT.type = MemoizedEvidenceSUT -} - -object MemoizedEvidenceSUT - extends BaseVersioningCompanion[MemoizedEvidenceSUT, Nothing, MemoizedEvidenceSUT, Unit] { - - val name: String = "MemoizedEvidenceSUT" - - val versioningTable: VersioningTable = VersioningTable( - ProtoVersion(30) -> UnsupportedProtoCodec(ProtocolVersion.v34) - ) - - private val defaultProtocolVersionRepresentative = protocolVersionRepresentativeFor( - BaseTest.testedProtocolVersion - ) - - def apply(b: Byte): MemoizedEvidenceSUT = new MemoizedEvidenceSUT(b)( - defaultProtocolVersionRepresentative, - None, - ) - - def fromByteString(bytes: ByteString): MemoizedEvidenceSUT = { - if (bytes.size() != 2) - throw new IllegalArgumentException(s"Only two bytes expected, got: ${bytes.toString}") - - new MemoizedEvidenceSUT(bytes.byteAt(1))(defaultProtocolVersionRepresentative, Some(bytes)) - } -} - -class MemoizedEvidenceWithFailureTest - extends AnyWordSpec - with BaseTest - with HasCryptographicEvidenceTest { - - private val msft2: MemoizedEvidenceWithFailureSUT = - MemoizedEvidenceWithFailureSUT(2)(fail = false) - private val msft3: MemoizedEvidenceWithFailureSUT = - MemoizedEvidenceWithFailureSUT(3)(fail = false) - - private val bytes = ByteString.copyFrom(Array[Byte](10, 5)) - - "MemoizedEvidenceWithFailure" should { - behave like hasCryptographicEvidenceSerialization(msft2, msft3) - behave like hasCryptographicEvidenceDeserialization(msft3, bytes)( - MemoizedEvidenceWithFailureSUT.fromByteString - ) - - "throw an exception if serialization fails" in { - assertThrows[SerializationCheckFailed[Unit]](MemoizedEvidenceWithFailureSUT(5)(fail = true)) - } - } -} - -final case class MemoizedEvidenceWithFailureSUT private (b: Byte)( - fail: Boolean, - override val deserializedFrom: Option[ByteString], -) extends MemoizedEvidenceWithFailure[Unit] { - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var counter: Byte = 0 - - protected[this] override def toByteStringChecked: Either[Unit, ByteString] = - if (fail) - Left(()) - else { - counter = (counter + 1).toByte - Right(ByteString.copyFrom(Array(counter, b))) - } -} - -private object MemoizedEvidenceWithFailureSUT { - def apply(b: Byte)(fail: Boolean): MemoizedEvidenceWithFailureSUT = - new MemoizedEvidenceWithFailureSUT(b)(fail, None) - - def apply(i: Int)(fail: Boolean): MemoizedEvidenceWithFailureSUT = - new MemoizedEvidenceWithFailureSUT(i.toByte)(fail, None) - - def fromByteString(bytes: ByteString): MemoizedEvidenceWithFailureSUT = { - if (bytes.size() != 2) - throw new IllegalArgumentException(s"Only two bytes expected, got: ${bytes.toString}") - - new MemoizedEvidenceWithFailureSUT(bytes.byteAt(1))(fail = false, Some(bytes)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/ProtoConverterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/ProtoConverterTest.scala deleted file mode 100644 index 2717aeed8e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/serialization/ProtoConverterTest.scala +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.serialization - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ProtoDeserializationError.{ - FieldNotSet, - InvariantViolation, - TimestampConversionError, -} -import com.digitalasset.canton.config.RequireTypes.{ - NonNegativeInt, - NonNegativeLong, - PositiveInt, - PositiveLong, -} -import com.digitalasset.canton.serialization.ProtoConverter.{ - InstantConverter, - parseNonNegativeInt, - parseNonNegativeLong, - parsePositiveInt, - parsePositiveLong, - required, -} -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Instant - -class ProtoConverterTest extends AnyWordSpec with BaseTest { - "InstantConverters" should { - "deserialize a timestamp" in { - val now = Instant.now() - val timestamp = InstantConverter.toProtoPrimitive(now) - val instant = InstantConverter.fromProtoPrimitive(timestamp) - assertResult(now)(instant.value) - } - "fail if timestamp is out of range" in { - val timestamp = InstantConverter.toProtoPrimitive(Instant.MAX) - val greaterThanMax = timestamp.copy(seconds = timestamp.seconds + 1) - val errorOrInstant = InstantConverter.fromProtoPrimitive(greaterThanMax) - - errorOrInstant.left.value should matchPattern { case TimestampConversionError(_) => } - } - } - - "required" should { - "return an error if the field is missing" in { - required("test", None).left.value should matchPattern { case FieldNotSet("test") => - } - } - - "return field value if available" in { - required("test", Some("value")).value shouldBe "value" - } - } - - "parse" should { - - "PositiveInt" in { - parsePositiveInt("field name", 1).value shouldBe PositiveInt.one - parsePositiveInt("field name", 0).left.value should matchPattern { - case InvariantViolation(Some("field name"), _) => - } - } - - "PositiveLong" in { - parsePositiveLong("field name", 1).value shouldBe PositiveLong.one - parsePositiveLong("field name", 0).left.value should matchPattern { - case InvariantViolation(Some("field name"), _) => - } - } - - "NonNegativeInt" in { - parseNonNegativeInt("field name", 0).value shouldBe NonNegativeInt.zero - parseNonNegativeInt("field name", -1).left.value should matchPattern { - case InvariantViolation(Some("field name"), _) => - } - } - - "NonNegativeLong" in { - parseNonNegativeLong("field name", 0).value shouldBe NonNegativeLong.zero - parseNonNegativeLong("field name", -1).left.value should matchPattern { - case InvariantViolation(Some("field name"), _) => - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/CursorPreheadStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/CursorPreheadStoreTest.scala deleted file mode 100644 index 756ec1a4e4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/CursorPreheadStoreTest.scala +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import com.digitalasset.canton.data.{CantonTimestamp, Counter} -import com.digitalasset.canton.lifecycle.HasCloseContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpecLike - -trait CursorPreheadStoreTest { - this: AsyncWordSpecLike with BaseTest with HasCloseContext with FailOnShutdown => - - def cursorPreheadStore[Discr]( - mk: () => CursorPreheadStore[Discr], - counterBuilder: Long => Counter[Discr], - ): Unit = { - - val prehead5 = CursorPrehead(counterBuilder(5), CantonTimestamp.ofEpochSecond(5)) - val prehead10 = CursorPrehead(counterBuilder(10), CantonTimestamp.ofEpochSecond(10)) - val prehead20 = CursorPrehead(counterBuilder(20), CantonTimestamp.ofEpochSecond(20)) - val prehead30 = CursorPrehead(counterBuilder(20), CantonTimestamp.ofEpochSecond(30)) - - "store and retrieve the prehead" in { - val store = mk() - for { - cursor0 <- store.prehead - _ <- store.advancePreheadTo(prehead10) - cursor10 <- store.prehead - _ <- store.advancePreheadTo(prehead20) - cursor20 <- store.prehead - } yield { - cursor0 shouldBe None - cursor10 shouldBe Some(prehead10) - cursor20 shouldBe Some(prehead20) - } - } - - "advance" should { - "only advance the prehead" in { - val store = mk() - for { - cursor0 <- store.prehead - _ <- store.advancePreheadTo(prehead10) - cursor10 <- store.prehead - _ <- store.advancePreheadTo(prehead5) - cursor5 <- store.prehead - _ <- store.advancePreheadTo(prehead20) - cursor20 <- store.prehead - } yield { - cursor0 shouldBe None - cursor10 shouldBe Some(prehead10) - cursor5 shouldBe Some(prehead10) - cursor20 shouldBe Some(prehead20) - } - } - - "not overwrite the timestamp" in { - val store = mk() - for { - _ <- store.advancePreheadTo(prehead10) - _ <- store.advancePreheadTo(prehead10.copy(timestamp = CantonTimestamp.Epoch)) - cursor10 <- store.prehead - } yield { - cursor10 shouldBe Some(prehead10) - } - } - } - - "rewind" should { - "only rewind the prehead" in { - val store = mk() - for { - _ <- store.rewindPreheadTo(Some(prehead20)) - cursorNone <- store.prehead - _ <- store.advancePreheadTo(prehead20) - cursor20 <- store.prehead - _ <- store.rewindPreheadTo(Some(prehead5)) - cursor5 <- store.prehead - _ <- store.rewindPreheadTo(Some(prehead10)) - cursor10 <- store.prehead - _ <- store.rewindPreheadTo(None) - cursorNone2 <- store.prehead - } yield { - cursorNone shouldBe None - cursor20 shouldBe Some(prehead20) - cursor5 shouldBe Some(prehead5) - cursor10 shouldBe Some(prehead5) - cursorNone2 shouldBe None - } - } - - "not overwrite the timestamp" in { - val store = mk() - for { - _ <- store.advancePreheadTo(prehead10) - _ <- store.rewindPreheadTo(Some(prehead10.copy(timestamp = CantonTimestamp.Epoch))) - cursor10 <- store.prehead - } yield { - cursor10 shouldBe Some(prehead10) - } - } - } - - "override the prehead counter" in { - val store = mk() - for { - cursor0 <- store.prehead - _ <- store.advancePreheadTo(prehead10) - cursor10 <- store.prehead - _ <- store.overridePreheadUnsafe(None) - cursorNone <- store.prehead - _ <- store.overridePreheadUnsafe(Some(prehead30)) - cursor30 <- store.prehead - _ <- store.overridePreheadUnsafe(Some(prehead20)) - cursor20 <- store.prehead - } yield { - cursor0 shouldBe None - cursor10 shouldBe Some(prehead10) - cursorNone shouldBe None - cursor30 shouldBe Some(prehead30) - cursor20 shouldBe Some(prehead20) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PrunableByTimeTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PrunableByTimeTest.scala deleted file mode 100644 index 149172ecff..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PrunableByTimeTest.scala +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import cats.instances.future.catsStdInstancesForFuture -import com.digitalasset.canton.concurrent.{ExecutorServiceExtensions, Threading} -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, LifeCycle} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} -import com.digitalasset.canton.store.memory.InMemoryPrunableByTime -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.{MonadUtil, OptionUtil} -import com.digitalasset.canton.version.HasTestCloseContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.{AsyncWordSpecLike, FixtureAsyncWordSpec} -import org.scalatest.{Assertion, FutureOutcome} - -import java.time.Instant -import java.util.concurrent.atomic.AtomicReference -import scala.Ordered.orderingToOrdered -import scala.concurrent.{ExecutionContext, Future} - -trait PrunableByTimeTest { - this: AsyncWordSpecLike & BaseTest => - - def prunableByTime(mkPrunable: ExecutionContext => PrunableByTime): Unit = { - - val ts = CantonTimestamp.assertFromInstant(Instant.parse("2019-04-04T10:00:00.00Z")) - val ts2 = ts.addMicros(1) - val ts3 = ts2.addMicros(1) - - implicit val closeContext: CloseContext = HasTestCloseContext.makeTestCloseContext(logger) - - "pruning timestamps increase" in { - val acs = mkPrunable(executionContext) - (for { - status0 <- acs.pruningStatus - _ <- acs.prune(ts) - status1 <- acs.pruningStatus - _ <- acs.prune(ts3) - status2 <- acs.pruningStatus - _ <- acs.prune(ts2) - status3 <- acs.pruningStatus - } yield { - assert(status0.isEmpty, "No pruning status initially") - assert( - status1.contains(PruningStatus(PruningPhase.Completed, ts, Some(ts))), - s"Pruning status at $ts", - ) - assert( - status2.contains(PruningStatus(PruningPhase.Completed, ts3, Some(ts3))), - s"Pruning advances to $ts3", - ) - assert( - status3.contains(PruningStatus(PruningPhase.Completed, ts3, Some(ts3))), - s"Pruning status remains at $ts3", - ) - }).failOnShutdown - } - - "pruning timestamps advance under concurrent pruning" in { - val parallelEc = Threading.newExecutionContext( - "pruning-parallel-ec", - noTracingLogger, - ) - val prunable = mkPrunable(parallelEc) - val iterations = 100 - - def timestampForIter(iter: Int): CantonTimestamp = CantonTimestamp.ofEpochSecond(iter.toLong) - def prune(iter: Int): Future[Unit] = - prunable.prune(timestampForIter(iter)).failOnShutdown - - val lastRead = new AtomicReference[Option[PruningStatus]](None) - - def read(): Future[Int] = - prunable.pruningStatus.map { statusO => - val previousO = lastRead.getAndAccumulate( - statusO, - OptionUtil.mergeWith(_, _)(Ordering[PruningStatus].max), - ) - assert( - previousO.forall(previous => statusO.exists(previous <= _)), - s"PrunableByTime pruning status decreased from $previousO to $statusO", - ) - if (statusO.exists(_.phase == PruningPhase.Started)) 1 else 0 - }(parallelEc).failOnShutdown - - val pruningsF = Future.traverse((1 to iterations).toList)(prune)(List, parallelEc) - val readingsF = MonadUtil.sequentialTraverse(1 to iterations)(_ => read())( - catsStdInstancesForFuture(parallelEc) - ) - - val testF = for { - _ <- pruningsF - readings <- readingsF - statusEnd <- prunable.pruningStatus.failOnShutdown - } yield { - logger.info(s"concurrent pruning test had ${readings.sum} intermediate readings") - val ts = timestampForIter(iterations) - assert( - statusEnd.contains(PruningStatus(PruningPhase.Completed, ts, Some(ts))) - ) - } - testF.thereafter { _ => - LifeCycle.close( - ExecutorServiceExtensions(parallelEc)(logger, DefaultProcessingTimeouts.testing) - )(logger) - } - } - - } - -} - -class PrunableByTimeLogicTest - extends FixtureAsyncWordSpec - with BaseTest - with HasExecutionContext - with HasTestCloseContext { - - override def withFixture(test: OneArgAsyncTest): FutureOutcome = { - val env = new FixtureParam(loggerFactory) - complete { - withFixture(test.toNoArgAsyncTest(env)) - } lastly {} - } - final class FixtureParam( - override val loggerFactory: NamedLoggerFactory - ) extends InMemoryPrunableByTime - with NamedLogging { - - override protected def batchingParameters: Option[PrunableByTimeParameters] = Some( - PrunableByTimeParameters.testingParams - ) - - override protected implicit val ec: ExecutionContext = - PrunableByTimeLogicTest.this.directExecutionContext - - // variable used to record which pruning intervals where invoked - val pruningRequests = - new AtomicReference[Seq[(CantonTimestamp, Option[CantonTimestamp])]](Seq.empty) - // variable used to signal how many rows we're pruning - // depending on the return value, the intervals will be either shortened or extended - val returnValues = new AtomicReference[Seq[Int]](Seq.empty) - - override protected def kind: String = "fixture" - - override protected[canton] def doPrune( - limit: CantonTimestamp, - lastPruning: Option[CantonTimestamp], - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = { - pruningRequests.updateAndGet(_ :+ (limit, lastPruning)) - FutureUnlessShutdown.pure(returnValues.getAndUpdate(_.drop(1)).headOption.getOrElse(0)) - } - - def assertRequests(from: CantonTimestamp, until: CantonTimestamp, buckets: Long): Assertion = { - val requests = pruningRequests.getAndSet(Seq.empty) - logger.debug(s"Had requests $requests") - requests should have length buckets - requests.headOption.flatMap { case (_, from) => from } should contain(from) - requests.lastOption.map { case (until, _) => until } should contain(until) - // check that series is continuous - requests.foldLeft(None: Option[CantonTimestamp]) { - case (Some(prevNext), (next, Some(prev))) => - prevNext shouldBe prev - Some(next) - case (_, (next, _)) => Some(next) - } - forAll(requests) { case (next, prev) => - assert(prev.valueOrFail(s"prev is empty for $next") < next) - } - } - - def runPruning( - increments: Seq[Int], - mult: Long = 10L, - ): FutureUnlessShutdown[Unit] = - MonadUtil - .sequentialTraverse_(increments) { counter => - pruningRequests.getAndSet(Seq.empty) - prune(ts0.plusSeconds(counter * mult)) - } - - } - - private lazy val ts0 = CantonTimestamp.Epoch - private lazy val ts1 = ts0.plusSeconds(60) - private lazy val ts2 = ts1.plusSeconds(10) - - "dynamic interval sizing" should { - "compute right number of intervals with starting value" in { (f: FixtureParam) => - f.returnValues.set(Seq.fill(20)(10)) - for { - // first pruning hits empty state, therefore 1 pruning query - _ <- f.prune(ts0).failOnShutdown - _ = { - f.pruningRequests.getAndSet(Seq.empty) should have length 1 - } - // pruning in 1 minute interval with default of max 5 seconds interval with hit the 10 bucket limit - _ <- f.prune(ts1).failOnShutdown - _ = f.assertRequests(ts0, ts1, 10) - // subsequent pruning in 10 sec interval will be spaced into 2 buckets of 5s - _ <- f.prune(ts2).failOnShutdown - _ = f.assertRequests(ts1, ts2, 2) - } yield { - succeed - } - } - "increase interval size if batches are small" in { f => - f.returnValues.set(Seq.fill(200)(1)) - f.runPruning((0 to 10)) - .map { _ => - // we started with 5s intervals. after a few iterations only returning small batches, we should just have 1 bucket - f.assertRequests(ts0.plusSeconds(9 * 10), ts0.plusSeconds(10 * 10), 1) - } - .failOnShutdown - } - "reduce interval size if batches are too big" in { f => - f.returnValues.set(Seq.fill(200)(50)) - f.runPruning((0 to 10)) - .map { _ => - // we started with 5s intervals. after a few iterations returning large batches, we should have increased to the max num buckets - f.assertRequests(ts0.plusSeconds(9 * 10), ts0.plusSeconds(10 * 10), 10) - } - .failOnShutdown - } - "don't increase interval beyond actual invocation interval" in { f => - f.returnValues.set(Seq.fill(200)(1)) - for { - // first, prune 10 times every 1s - _ <- f.runPruning((0 to 10), mult = 1).failOnShutdown - // now, if we prune for a larger interval, we shouldn't have a larger step size as the pruning interval - // was shorter than the step size - _ <- f.runPruning(Seq(20), mult = 1).failOnShutdown - } yield { - f.assertRequests(ts0.plusSeconds(10), ts0.plusSeconds(20), 2) - } - } - "limit number of buckets during big jumps" in { f => - f.returnValues.set(Seq.fill(200)(10)) - for { - _ <- f.prune(ts0).failOnShutdown - _ = { f.pruningRequests.set(Seq.empty) } - _ <- f.prune(ts0.plusSeconds(1000)).failOnShutdown - } yield { - f.assertRequests(ts0, ts0.plusSeconds(1000), 10) - } - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PruningSchedulerStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PruningSchedulerStoreTest.scala deleted file mode 100644 index 090e4eacd9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/PruningSchedulerStoreTest.scala +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import cats.data.EitherT -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.scheduler.{Cron, PruningSchedule} -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -trait PruningSchedulerStoreTest { - this: AsyncWordSpec & BaseTest & FailOnShutdown => - - protected def pruningSchedulerStore(mk: () => PruningSchedulerStore): Unit = { - - val schedule1 = PruningSchedule( - Cron.tryCreate("* /10 * * * ? *"), - PositiveSeconds.tryOfSeconds(1), - PositiveSeconds.tryOfSeconds(30), - ) - val schedule2 = PruningSchedule( - Cron.tryCreate("* * 7,19 * * ? *"), - PositiveSeconds.tryOfHours(8), - PositiveSeconds.tryOfDays(14), - ) - - assert(schedule1.cron != schedule2.cron) - assert(schedule1.maxDuration != schedule2.maxDuration) - assert(schedule1.retention != schedule2.retention) - - "be able to set, clear and get schedules" in { - val store = mk() - - for { - emptySchedule <- change(store, _.clearSchedule()) - schedule1Queried1 <- change(store, _.setSchedule(schedule1)) - emptySchedule2 <- change(store, _.clearSchedule()) - emptySchedule3 <- change(store, _.clearSchedule()) - schedule1Queried2 <- change(store, _.setSchedule(schedule1)) - schedule2Queried <- change(store, _.setSchedule(schedule2)) - } yield { - emptySchedule shouldBe None - schedule1Queried1 shouldBe Some(schedule1) - emptySchedule2 shouldBe None - emptySchedule3 shouldBe None - schedule1Queried2 shouldBe Some(schedule1) - schedule2Queried shouldBe Some(schedule2) - } - } - - "be able to update individual fields of schedules" in { - val store = mk() - - for { - _ <- store.clearSchedule() - schedule1Queried1 <- change(store, _.setSchedule(schedule1)) - scheduleChangedCron <- changeET(store, _.updateCron(schedule2.cron), "update cron") - scheduleChangedMaxDuration <- changeET( - store, - _.updateMaxDuration(schedule2.maxDuration), - "update max_duration", - ) - scheduleChangedRetention <- changeET( - store, - _.updateRetention(schedule2.retention), - "update retention", - ) - } yield { - schedule1Queried1 shouldBe Some(schedule1) - scheduleChangedCron shouldBe Some(schedule1.copy(cron = schedule2.cron)) - scheduleChangedMaxDuration shouldBe Some(schedule2.copy(retention = schedule1.retention)) - scheduleChangedRetention shouldBe Some(schedule2) - } - } - - "refuse to update fields on non-existing schedules" in { - val store = mk() - - for { - emptySchedule <- change(store, _.clearSchedule()) - _ <- expectFailureET( - "cron", - store.updateCron(schedule2.cron), - "update cron of non-existing schedule", - ) - _ <- expectFailureET( - "max_duration", - store.updateMaxDuration(schedule2.maxDuration), - "update max_duration of non-existing schedule", - ) - _ <- expectFailureET( - "retention", - store.updateRetention(schedule2.retention), - "update retention of non-existing schedule", - ) - emptySchedule2 <- store.getSchedule() - } yield { - emptySchedule shouldBe None - emptySchedule2 shouldBe None - } - - } - } - - protected def change( - store: PruningSchedulerStore, - modify: PruningSchedulerStore => FutureUnlessShutdown[Unit], - ): FutureUnlessShutdown[Option[PruningSchedule]] = for { - _ <- modify(store) - schedule <- store.getSchedule() - } yield schedule - - private def changeET( - store: PruningSchedulerStore, - modify: PruningSchedulerStore => EitherT[FutureUnlessShutdown, String, Unit], - clue: String, - ): FutureUnlessShutdown[Option[PruningSchedule]] = - change(store, store => valueOrFail(modify(store))(clue)) - - private def expectFailureET( - field: String, - change: => EitherT[FutureUnlessShutdown, String, Unit], - clue: String, - ): FutureUnlessShutdown[Unit] = for { - err <- leftOrFail(change)(clue) - _errorMatchesExpected <- - if ( - err.equals( - s"Attempt to update $field of a schedule that has not been previously configured. Use set_schedule instead." - ) - ) FutureUnlessShutdown.unit - else FutureUnlessShutdown.failed(new RuntimeException(s"Wrong error message: $err")) - } yield () -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SendTrackerStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SendTrackerStoreTest.scala deleted file mode 100644 index 793be2601d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SendTrackerStoreTest.scala +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.sequencing.protocol.MessageId -import org.scalactic.source.Position -import org.scalatest.wordspec.AsyncWordSpec -import org.scalatest.{Assertion, BeforeAndAfter} - -trait SendTrackerStoreTest extends BeforeAndAfter { - this: AsyncWordSpec with BaseTest => - - implicit class StringWrapperUS(s: String) { - def inUS(f: => FutureUnlessShutdown[Assertion])(implicit pos: Position): Unit = - s in f.onShutdown(fail(s"Unexpected shutdown in StringWrapperUS.inUS")) - } - - def sendTrackerStore(mk: () => SendTrackerStore): Unit = - "pending sends" should { - val (msgId1, msgId2, msgId3) = - (MessageId.tryCreate("1"), MessageId.tryCreate("2"), MessageId.tryCreate("3")) - val (ts1, ts2, ts3) = - ( - CantonTimestamp.MinValue, - CantonTimestamp.MinValue.plusSeconds(1), - CantonTimestamp.MinValue.plusSeconds(2), - ) - - "be able to add, remove and list pending sends" inUS { - val store = mk() - for { - _ <- valueOrFail(store.savePendingSend(msgId1, ts1))("savePendingSend msgId1") - _ <- valueOrFail(store.savePendingSend(msgId2, ts2))("savePendingSend msgId2") - pendingSends1 <- store.fetchPendingSends - _ = pendingSends1 shouldBe Map(msgId1 -> ts1, msgId2 -> ts2) - _ <- store.removePendingSend(msgId2) - _ <- valueOrFail(store.savePendingSend(msgId3, ts3))("savePendingSend msgId3") - pendingSends2 <- store.fetchPendingSends - } yield pendingSends2 shouldBe Map(msgId1 -> ts1, msgId3 -> ts3) - } - - "fail if we try to track a send with an already tracked id" inUS { - val store = mk() - - for { - _ <- valueOrFail(store.savePendingSend(msgId1, ts1))("savePendingSend msgId1") - resultE <- store.savePendingSend(msgId1, ts2).value - } yield resultE.left.value shouldBe SavePendingSendError.MessageIdAlreadyTracked - } - - "be okay tracking a send with a tracked id that has been previously used but since removed" inUS { - val store = mk() - - for { - _ <- valueOrFail(store.savePendingSend(msgId1, ts1))("savePendingSend msgId1") - _ <- store.removePendingSend(msgId1) - _ <- valueOrFail(store.savePendingSend(msgId1, ts2))("savePendingSend msgId1 again") - pendingSends <- store.fetchPendingSends - } yield pendingSends shouldBe Map(msgId1 -> ts2) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencedEventStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencedEventStoreTest.scala deleted file mode 100644 index 69a7492b3b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencedEventStoreTest.scala +++ /dev/null @@ -1,1271 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import cats.data.Validated.Valid -import cats.syntax.parallel.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.crypto.{Signature, SigningKeyUsage, SigningPublicKey, TestHash} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.sequencing.{SequencedSerializedEvent, SequencerTestUtils} -import com.digitalasset.canton.store.SequencedEventStore.* -import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, CloseableTest, FailOnShutdown, SequencerCounter} -import com.google.protobuf.ByteString -import org.scalatest.exceptions.TestFailedException -import org.scalatest.wordspec.AsyncWordSpec - -import scala.concurrent.ExecutionContext - -trait SequencedEventStoreTest extends PrunableByTimeTest with CloseableTest with FailOnShutdown { - this: AsyncWordSpec with BaseTest => - - import com.digitalasset.canton.store.SequencedEventStoreTest.SeqTuple3 - - private lazy val crypto: SymbolicCrypto = - SymbolicCrypto.create( - testedReleaseProtocolVersion, - timeouts, - loggerFactory, - ) - - private lazy val sequencerKey: SigningPublicKey = - crypto.generateSymbolicSigningKey(usage = SigningKeyUsage.ProtocolOnly) - - def sign(str: String): Signature = - crypto.sign(TestHash.digest(str), sequencerKey.id, SigningKeyUsage.ProtocolOnly) - - private lazy val synchronizerId: PhysicalSynchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("da::default") - ).toPhysical - - private def mkBatch(envelopes: ClosedEnvelope*): Batch[ClosedEnvelope] = - Batch(envelopes.toList, testedProtocolVersion) - - private def signDeliver(event: Deliver[ClosedEnvelope]): SignedContent[Deliver[ClosedEnvelope]] = - SignedContent( - event, - sign(s"deliver signature for ${event.timestamp}"), - None, - testedProtocolVersion, - ) - - private lazy val closedEnvelope = ClosedEnvelope.create( - ByteString.copyFromUtf8("message"), - RecipientsTest.testInstance, - Seq.empty, - testedProtocolVersion, - ) - - private def mkDeliver(ts: CantonTimestamp): SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - Deliver.create( - None, - ts, - synchronizerId, - Some(MessageId.tryCreate("deliver")), - mkBatch(closedEnvelope), - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - sign("deliver signature"), - None, - testedProtocolVersion, - ), - nonEmptyTraceContext2, - ) - - private lazy val singleDeliver: SequencedSerializedEvent = - mkDeliver(CantonTimestamp.ofEpochMilli(-1)) - - private lazy val singleMaxDeliverPositive: SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - Deliver.create( - Some( - CantonTimestamp.MaxValue - ), - CantonTimestamp.MaxValue, - synchronizerId, - Some(MessageId.tryCreate("single-max-positive-deliver")), - mkBatch(closedEnvelope), - Some(CantonTimestamp.MaxValue), - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - sign("single deliver signature"), - None, - testedProtocolVersion, - ), - nonEmptyTraceContext2, - ) - - private val singleMinDeliver: SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - Deliver.create( - None, - CantonTimestamp.MinValue.immediateSuccessor, - synchronizerId, - Some(MessageId.tryCreate("single-min-deliver")), - mkBatch(closedEnvelope), - Some(CantonTimestamp.MinValue.immediateSuccessor), - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - sign("single deliver signature"), - None, - testedProtocolVersion, - ), - nonEmptyTraceContext2, - ) - - private def mkDeliverEventTc1(ts: CantonTimestamp): SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils.mockDeliver(timestamp = ts, synchronizerId = synchronizerId), - sign("Mock deliver signature"), - None, - testedProtocolVersion, - ), - nonEmptyTraceContext1, - ) - - private val event: SequencedSerializedEvent = mkDeliverEventTc1(CantonTimestamp.Epoch) - - private val emptyDeliver: SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - Deliver.create( - None, - CantonTimestamp.ofEpochMilli(1), - synchronizerId, - Some(MessageId.tryCreate("empty-deliver")), - mkBatch(), - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - sign("Deliver signature"), - None, - testedProtocolVersion, - ) - ) - - private def mkDeliverError(ts: CantonTimestamp): SequencedSerializedEvent = - mkSequencedSerializedEvent( - SignedContent( - DeliverError.create( - Some( - ts.immediatePredecessor - ), - ts, - synchronizerId, - MessageId.tryCreate("deliver-error"), - SequencerErrors.SubmissionRequestRefused("paniertes schnitzel"), - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - sign("Deliver error signature"), - None, - testedProtocolVersion, - ) - ) - - private def ts(counter: Long): CantonTimestamp = CantonTimestamp.Epoch.addMicros(counter) - - private def mkSequencedSerializedEvent( - event: SignedContent[SequencedEvent[ClosedEnvelope]], - traceContext: TraceContext = TraceContext.empty, - ): SequencedSerializedEvent = - SequencedEventWithTraceContext(event)(traceContext) - - private def mkEmptyIgnoredEvent( - counter: Long, - microsSinceMin: Long = -1, - ): IgnoredSequencedEvent[Nothing] = { - val t = - if (microsSinceMin < 0) ts(counter) - else CantonTimestamp.MinValue.addMicros(microsSinceMin) - IgnoredSequencedEvent(t, SequencerCounter(counter), None)(traceContext) - } - - protected def sequencedEventStore(mkSes: ExecutionContext => SequencedEventStore): Unit = { - def mk(): SequencedEventStore = mkSes(executionContext) - - behave like prunableByTime(mkSes) - - "not find sequenced events in empty store" in { - val store = mk() - val criteria = List(ByTimestamp(CantonTimestamp.Epoch), LatestUpto(CantonTimestamp.MaxValue)) - criteria - .parTraverse_ { criterion => - store - .find(criterion) - .value - .map(res => res shouldBe Left(SequencedEventNotFoundError(criterion))) - } - .map(_ => succeed) - } - - "should find stored sequenced events" in { - val store = mk() - - val events = List[SequencedSerializedEvent]( - singleDeliver, - event, - emptyDeliver, - ) - val storedEvents = events.zipWithIndex.map { case (event, index) => - OrdinarySequencedEvent( - counter = SequencerCounter(index), - signedEvent = event.signedEvent, - )(event.traceContext) - } - val criteria = List( - ByTimestamp(CantonTimestamp.ofEpochMilli(-1)), - ByTimestamp(CantonTimestamp.Epoch), - ByTimestamp(CantonTimestamp.ofEpochMilli(1)), - ) - - for { - _stored <- store.store(events) - found <- criteria.parTraverse(store.find).toValidatedNec - } yield { - assert(found.isValid, "finding deliver events succeeds") - assert(found.map(_.toSeq) == Valid(storedEvents), "found the right deliver events") - } - } - - "store is idempotent" in { - val store = mk() - - val events1 = List[SequencedSerializedEvent]( - singleDeliver, - event, - ) - val events2 = List[SequencedSerializedEvent]( - event, - emptyDeliver, - ) - - for { - _ <- store.store(events1).onShutdown(Seq.empty) - _ <- loggerFactory.assertLogs( - store.store(events2).onShutdown(Seq.empty), - _.warningMessage should include( - "Skipping 1 events with timestamp <= 1970-01-01T00:00:00Z (presumed already processed)" - ), - ) - } yield succeed - } - - "store works for no events" in { - val store = mk() - store.store(Seq.empty).map(_ => succeed) - } - - "find works for many events" in { - val store = mk() - - val events = (0L to 99L).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.ofEpochMilli(i * 2), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - storedEvents <- store.store(events) - found <- (0L to 199L).toList - .parTraverse { i => - store.find(ByTimestamp(CantonTimestamp.ofEpochMilli(i))).value - } - } yield { - storedEvents should have size 100L - storedEvents.zipWithIndex.foreach { case (event, i) => - assert( - event.counter == SequencerCounter(i), - s"Unexpected counter=${event.counter}, expected: $i", - ) - } - assert(found.collect { case Right(ev) => ev.asSequencedSerializedEvent } == events) - assert( - found.collect { case Left(error) => error } == (1L to 100L).map(i => - SequencedEventNotFoundError(ByTimestamp(CantonTimestamp.ofEpochMilli(2 * i - 1))) - ) - ) - } - } - - "get a range by timestamp" in { - val store = mk() - val startingCounter = 1000 - val eventCount = 100L - val firstIndex = 10 - val lastIndex = 90 - val events = (1L to eventCount).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.Epoch.plusMillis(i * 2), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - storedEvents <- store.store(events) - found <- store - .findRange( - ByTimestampRange(events(firstIndex).timestamp, events(lastIndex).timestamp), - None, - ) - .valueOrFail("") - } yield { - storedEvents.zipWithIndex.foreach { case (event, i) => - assert( - event.counter == SequencerCounter(startingCounter + i + 1), - s"Unexpected counter=${event.counter}, expected: $i", - ) - } - assert( - found.map(_.asSequencedSerializedEvent).toList == events.slice(firstIndex, lastIndex + 1) - ) - } - } - - "get a range with a limit" in { - val store = mk() - val startingCounter = 1000 - val eventCount = 100L - val firstIndex = 10 - val limit = 90 - val events = (1L to eventCount).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.Epoch.plusMillis(i * 2), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - foundByTs <- store - .findRange( - ByTimestampRange(events(firstIndex).timestamp, events.lastOption.value.timestamp), - Some(limit), - ) - .valueOrFail("") - } yield { - assert( - foundByTs.map(_.asSequencedSerializedEvent).toList == events.slice( - firstIndex, - firstIndex + limit, - ) - ) - } - } - - "returns all values within a range when range bounds are not in the store" in { - val store = mk() - val startingCounter = 1000 - val eventCount = 100L - val firstIndex = 10 - val lastIndex = 90 - val delta = 10 - val events = (1L to eventCount).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.Epoch.plusMillis(i * delta), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - foundByTs1 <- store - .findRange( - ByTimestampRange( - events(firstIndex).timestamp.minusMillis(delta / 2L), - events(lastIndex).timestamp.plusMillis(delta / 2L), - ), - None, - ) - .valueOrFail("") - foundByTs2 <- store - .findRange( - ByTimestampRange( - events.headOption.value.timestamp.minusMillis(delta / 2L), - events.lastOption.value.timestamp.plusMillis(delta / 2L), - ), - None, - ) - .valueOrFail("") - - } yield { - assert( - foundByTs1.map(_.asSequencedSerializedEvent).toList == events.slice( - firstIndex, - lastIndex + 1, - ) - ) - assert(foundByTs2.map(_.asSequencedSerializedEvent).toList == events) - } - } - - "find range returns no values for empty store" in { - val store = mk() - for { - foundByTs <- store.findRange( - ByTimestampRange(CantonTimestamp.Epoch, CantonTimestamp.Epoch.plusMillis(100)), - None, - ) - } yield { - assert(foundByTs.toList == List.empty) - } - }.valueOrFail("") - - "find range returns no values when range outside store values" in { - val store = mk() - val startingCounter = 149 - val min = 50L - val max = 100L - val getTs = { (i: Long) => - CantonTimestamp.Epoch.plusMillis(i * 2 + 200) - } - val events = (min to max).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver(timestamp = getTs(i), synchronizerId = synchronizerId), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - foundByTsAbove <- store - .findRange(ByTimestampRange(getTs(max + 5), getTs(max + 10)), None) - .valueOrFail("") - - foundByTsBelow <- store - .findRange(ByTimestampRange(getTs(min - 10), getTs(min - 5)), None) - .valueOrFail("") - - } yield { - assert(foundByTsAbove.toList == List.empty) - assert(foundByTsBelow.toList == List.empty) - } - } - - "find range requires that the start of the range is not after the end" in { - val store = mk() - val startingCounter = 1000 - val events = (1L to 100L).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.Epoch.plusMillis(i * 2), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - } yield { - assertThrows[IllegalArgumentException]( - store.findRange( - ByTimestampRange(events.lastOption.value.timestamp, events.headOption.value.timestamp), - None, - ) - ) - } - } - - "find range checks overlap with pruning" in { - val store = mk() - val startingCounter = 0 - val events = (1L to 5L).toList.map { i => - mkSequencedSerializedEvent( - SignedContent( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(i), - synchronizerId = synchronizerId, - ), - sign(s"signature $i"), - None, - testedProtocolVersion, - ) - ) - } - val tsPrune = CantonTimestamp.ofEpochSecond(2) - val ts4 = CantonTimestamp.ofEpochSecond(4) - val criterionAt = ByTimestampRange(tsPrune, CantonTimestamp.MaxValue) - val criterionBelow = ByTimestampRange(CantonTimestamp.MinValue, CantonTimestamp.Epoch) - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - _ <- store.prune(tsPrune) - _ <- store - .findRange(ByTimestampRange(tsPrune.immediateSuccessor, ts4), None) - .valueOrFail("successful range query") - fail2 <- leftOrFail(store.findRange(criterionAt, None))("at pruning point") - failBelow <- leftOrFail(store.findRange(criterionBelow, None))( - "before pruning point" - ) - } yield { - val pruningStatus = PruningStatus(PruningPhase.Completed, tsPrune, Some(tsPrune)) - fail2.criterion shouldBe criterionAt - fail2.pruningStatus shouldBe pruningStatus - fail2.foundEvents.map(_.timestamp) shouldBe events - .filter(_.timestamp > tsPrune) - .map(_.timestamp) - failBelow shouldBe SequencedEventRangeOverlapsWithPruning( - criterionBelow, - pruningStatus, - Seq.empty, - ) - } - } - - "find returns the latest event" in { - val store = mk() - val startingCounter = 99 - val deliverExpectedSc100 = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.Epoch, - synchronizerId = synchronizerId, - ) - ), - nonEmptyTraceContext1, - ) - val deliverExpectedSc101 = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils - .mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(1), - synchronizerId = synchronizerId, - ) - ), - nonEmptyTraceContext2, - ) - val deliverExpectedSc103 = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = CantonTimestamp.ofEpochSecond(100000), - synchronizerId = synchronizerId, - ) - ) - ) - val emptyBatch = mkBatch() - val deliverExpectedSc102 = - mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - Some( - CantonTimestamp.ofEpochSecond(1) - ), - CantonTimestamp.ofEpochSecond(2), - synchronizerId, - Some(MessageId.tryCreate("deliver1")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - val deliverExpectedSc104 = mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - Some( - deliverExpectedSc102.timestamp - ), - CantonTimestamp.ofEpochSecond(200000), - synchronizerId, - Some(MessageId.tryCreate("deliver2")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(deliverExpectedSc100)) - findExpectingSc100 <- store - .find(LatestUpto(CantonTimestamp.MaxValue)) - .valueOrFail("find expecting sc=100") - _ <- store.store(Seq(deliverExpectedSc101, deliverExpectedSc102, deliverExpectedSc103)) - findExpectingSc103 <- store - .find(LatestUpto(CantonTimestamp.MaxValue)) - .valueOrFail("find expecting sc=103") - _ <- store.store(Seq(deliverExpectedSc104)) - findExpectingSc104 <- store - .find(LatestUpto(deliverExpectedSc104.timestamp)) - .valueOrFail("find expecting sc=104") - findExpectingSc102 <- store - .find(LatestUpto(deliverExpectedSc103.timestamp.immediatePredecessor)) - .valueOrFail("find expecting sc=102") - } yield { - findExpectingSc100 shouldBe deliverExpectedSc100.asOrdinaryEvent(counter = - SequencerCounter(100) - ) - findExpectingSc103 shouldBe deliverExpectedSc103.asOrdinaryEvent(counter = - SequencerCounter(103) - ) - findExpectingSc104 shouldBe deliverExpectedSc104.asOrdinaryEvent(counter = - SequencerCounter(104) - ) - findExpectingSc102 shouldBe deliverExpectedSc102.asOrdinaryEvent(counter = - SequencerCounter(102) - ) - } - } - - "delete old sequenced events when pruned" in { - val store = mk() - val startingCounter = 99 - - val ts0 = CantonTimestamp.Epoch - val ts1 = ts0.plusSeconds(1) - val ts2 = ts0.plusSeconds(2) - val ts3 = ts0.plusSeconds(10) - val ts4 = ts0.plusSeconds(20) - - val firstDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts0, - synchronizerId = synchronizerId, - ) - ) - ) - val secondDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts1, - synchronizerId = synchronizerId, - ) - ) - ) - val thirdDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts3, - synchronizerId = synchronizerId, - ) - ) - ) - val emptyBatch = mkBatch() - val deliver1 = - mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - None, - ts2, - synchronizerId, - Some(MessageId.tryCreate("deliver1")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - val deliver2 = - mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - Some( - deliver1.timestamp - ), - ts4, - synchronizerId, - Some(MessageId.tryCreate("deliver2")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(firstDeliver, secondDeliver, deliver1, thirdDeliver, deliver2)) - _ <- store.prune(ts2) - eventsAfterPruningOrPurging <- store.sequencedEvents() - } yield { - assert( - eventsAfterPruningOrPurging.toSet === Set( - thirdDeliver.asOrdinaryEvent(counter = SequencerCounter(103)), - deliver2.asOrdinaryEvent(counter = SequencerCounter(104)), - ), - "only events with a later timestamp left after pruning", - ) - } - } - - "delete all sequenced events when purged" in { - val store = mk() - val startingCounter = 99 - - val ts0 = CantonTimestamp.Epoch - val ts1 = ts0.plusSeconds(1) - val ts2 = ts0.plusSeconds(2) - val ts3 = ts0.plusSeconds(10) - val ts4 = ts0.plusSeconds(20) - - val firstDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts0, - synchronizerId = synchronizerId, - ) - ) - ) - val secondDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts1, - synchronizerId = synchronizerId, - ) - ) - ) - val thirdDeliver = - mkSequencedSerializedEvent( - signDeliver( - SequencerTestUtils.mockDeliver( - timestamp = ts3, - synchronizerId = synchronizerId, - ) - ) - ) - val emptyBatch = mkBatch() - val deliver1 = - mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - None, - ts2, - synchronizerId, - Some(MessageId.tryCreate("deliver1")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - val deliver2 = - mkSequencedSerializedEvent( - signDeliver( - Deliver.create( - Some( - deliver1.timestamp - ), - ts4, - synchronizerId, - Some(MessageId.tryCreate("deliver2")), - emptyBatch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - ) - ) - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(firstDeliver, secondDeliver, deliver1, thirdDeliver, deliver2)) - _ <- store.purge() - eventsAfterPruningOrPurging <- store.sequencedEvents() - } yield { - assert(eventsAfterPruningOrPurging.isEmpty, "no events with left after purging") - } - } - - "store events up to Long max limit" in { - val store = mk() - - val events = List[SequencedSerializedEvent]( - singleMinDeliver, - event, - singleMaxDeliverPositive, - ) - val criteria = List( - ByTimestamp(CantonTimestamp.MinValue.immediateSuccessor), - ByTimestamp(CantonTimestamp.Epoch), - ByTimestamp(CantonTimestamp.MaxValue), - ) - - for { - _stored <- store.store(events) - found <- criteria.parTraverse(store.find).toValidatedNec - } yield { - assert(found.isValid, "finding deliver events succeeds") - assert( - found.map(_.map(_.asSequencedSerializedEvent).toSeq) == Valid(events), - "found the right deliver events", - ) - } - } - - { - val startingCounter = 9 - lazy val deliver = mkDeliver(ts(10)) - lazy val secondDeliver = mkDeliverEventTc1(ts(11)) - lazy val deliverError = mkDeliverError(ts(12)) - - "ignore existing events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- store.ignoreEvents(SequencerCounter(11), SequencerCounter(11)).valueOrFail("") - events <- store.sequencedEvents() - range <- valueOrFail(store.findRange(ByTimestampRange(ts(11), ts(12)), limit = None))( - "findRange" - ) - byTimestamp <- valueOrFail(store.find(ByTimestamp(ts(11))))("find by timestamp") - latestUpTo <- valueOrFail(store.find(LatestUpto(ts(11))))("find latest up to") - } yield { - storedDeliver.counter.unwrap shouldBe 10 - storedSecondDeliver.counter.unwrap shouldBe 11 - storedDeliverError.counter.unwrap shouldBe 12 - events shouldBe Seq(storedDeliver, storedSecondDeliver.asIgnoredEvent, storedDeliverError) - range shouldBe Seq(storedSecondDeliver.asIgnoredEvent, storedDeliverError) - byTimestamp shouldBe storedSecondDeliver.asIgnoredEvent - latestUpTo shouldBe storedSecondDeliver.asIgnoredEvent - } - } - - "ignore non-existing events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(deliver, secondDeliver, deliverError)) - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(13), SequencerCounter(14)))( - "ignoreEvents" - ) - events <- store.sequencedEvents() - range <- valueOrFail(store.findRange(ByTimestampRange(ts(12), ts(14)), limit = None))( - "findRange" - ) - ignoredEventByTimestamp <- valueOrFail(store.find(ByTimestamp(ts(13))))( - "find by timestamp" - ) - ignoredEventLatestUpTo <- valueOrFail(store.find(LatestUpto(ts(13))))("find latest up to") - } yield { - events shouldBe Seq( - deliver.asOrdinaryEvent(counter = SequencerCounter(10)), - secondDeliver.asOrdinaryEvent(counter = SequencerCounter(11)), - deliverError.asOrdinaryEvent(counter = SequencerCounter(12)), - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - range shouldBe Seq( - deliverError.asOrdinaryEvent(counter = SequencerCounter(12)), - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - ignoredEventByTimestamp shouldBe mkEmptyIgnoredEvent(13) - ignoredEventLatestUpTo shouldBe mkEmptyIgnoredEvent(13) - } - } - - "ignore existing and non-existing events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(11), SequencerCounter(14)))( - "ignoreEvents" - ) - events <- store.sequencedEvents() - range <- valueOrFail(store.findRange(ByTimestampRange(ts(11), ts(13)), limit = None))( - "findRange" - ) - deliverByTimestamp <- valueOrFail(store.find(ByTimestamp(ts(10))))("find by timestamp") - deliverLatestUpTo <- valueOrFail(store.find(LatestUpto(ts(10))))("find latest up to") - } yield { - events shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - range shouldBe Seq( - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - ) - deliverByTimestamp shouldBe storedDeliver - deliverLatestUpTo shouldBe storedDeliver - } - } - - "add ignored events when empty" in { - val store = mk() - - for { - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(10), SequencerCounter(12)))( - "ignoreEvents" - ) - events <- store.sequencedEvents() - } yield { - events shouldBe Seq( - mkEmptyIgnoredEvent(10, 1), - mkEmptyIgnoredEvent(11, 2), - mkEmptyIgnoredEvent(12, 3), - ) - } - } - - "ignore beyond first event" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(0), SequencerCounter(14)))( - "ignoreEvents" - ) - events <- store.sequencedEvents() - } yield { - events shouldBe Seq( - storedDeliver.asIgnoredEvent, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - } - } - - "ignore no events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(deliver, secondDeliver, deliverError)) - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(1), SequencerCounter(0)))( - "ignoreEvents1" - ) - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(11), SequencerCounter(10)))( - "ignoreEvents2" - ) - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(21), SequencerCounter(20)))( - "ignoreEvents3" - ) - events <- store.sequencedEvents() - } yield { - events shouldBe Seq( - deliver.asOrdinaryEvent(counter = SequencerCounter(10)), - secondDeliver.asOrdinaryEvent(counter = SequencerCounter(11)), - deliverError.asOrdinaryEvent(counter = SequencerCounter(12)), - ) - } - } - - "ignore ignored events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(12), SequencerCounter(13)))( - "ignoreEvents1" - ) - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(11), SequencerCounter(14)))( - "ignoreEvents2" - ) - events <- store.sequencedEvents() - } yield { - events shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - } - } - - "prevent sequencer counter gaps" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(Seq(deliver, secondDeliver, deliverError)) - err <- store.ignoreEvents(SequencerCounter(20), SequencerCounter(21)).value - events <- store.sequencedEvents() - } yield { - events shouldBe Seq( - deliver.asOrdinaryEvent(counter = SequencerCounter(10)), - secondDeliver.asOrdinaryEvent(counter = SequencerCounter(11)), - deliverError.asOrdinaryEvent(counter = SequencerCounter(12)), - ) - err shouldBe Left(ChangeWouldResultInGap(SequencerCounter(13), SequencerCounter(19))) - } - } - - "unignore events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(11), SequencerCounter(14)))( - "ignoreEvents" - ) - - _ <- valueOrFail(store.unignoreEvents(SequencerCounter(20), SequencerCounter(0)))( - "unignoreEvents20-0" - ) - events1 <- store.sequencedEvents() - - _ <- valueOrFail(store.unignoreEvents(SequencerCounter(12), SequencerCounter(12)))( - "unignoreEvents12" - ) - events2 <- store.sequencedEvents() - - err3 <- store.unignoreEvents(SequencerCounter(13), SequencerCounter(13)).value - events3 <- store.sequencedEvents() - - _ <- valueOrFail(store.unignoreEvents(SequencerCounter(14), SequencerCounter(14)))( - "unignoreEvents14" - ) - events4 <- store.sequencedEvents() - - _ <- valueOrFail(store.unignoreEvents(SequencerCounter(0), SequencerCounter(20)))( - "unignoreEvents0-20" - ) - events5 <- store.sequencedEvents() - } yield { - events1 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - - events2 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - - err3 shouldBe Left(ChangeWouldResultInGap(SequencerCounter(13), SequencerCounter(13))) - events3 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - - events4 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError, - mkEmptyIgnoredEvent(13), - ) - - events5 shouldBe Seq(storedDeliver, storedSecondDeliver, storedDeliverError) - } - } - - "delete events" in { - val store = mk() - - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - eventsWithCounters <- store.store(Seq(deliver, secondDeliver, deliverError)) - (storedDeliver, storedSecondDeliver, storedDeliverError) = - eventsWithCounters.toTuple3OrFail - _ <- valueOrFail(store.ignoreEvents(SequencerCounter(11), SequencerCounter(14)))( - "ignoreEvents" - ) - _ <- store.delete(SequencerCounter(15)) - events1 <- store.sequencedEvents() - _ <- store.delete(SequencerCounter(14)) - events2 <- store.sequencedEvents() - _ <- store.delete(SequencerCounter(12)) - events3 <- store.sequencedEvents() - _ <- store.delete(SequencerCounter(0)) - events4 <- store.sequencedEvents() - } yield { - events1 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - mkEmptyIgnoredEvent(14), - ) - events2 shouldBe Seq( - storedDeliver, - storedSecondDeliver.asIgnoredEvent, - storedDeliverError.asIgnoredEvent, - mkEmptyIgnoredEvent(13), - ) - events3 shouldBe Seq(storedDeliver, storedSecondDeliver.asIgnoredEvent) - events4 shouldBe Seq.empty - } - } - } - - "store and retrieve trace context" in { - val store = mk() - val startingCounter = 0 - val events = List[SequencedSerializedEvent]( - mkDeliver(CantonTimestamp.ofEpochMilli(100)), - mkDeliverEventTc1(CantonTimestamp.ofEpochMilli(110)), - ) - for { - _ <- store.reinitializeFromDbOrSetLowerBound(counterIfEmpty = - SequencerCounter(startingCounter) - ) - _ <- store.store(events) - tc1 <- store.traceContext(CantonTimestamp.ofEpochMilli(100)) - tc2 <- store.traceContext(CantonTimestamp.ofEpochMilli(110)) - tc3 <- store.traceContext(CantonTimestamp.ofEpochMilli(111)) - } yield { - tc1 shouldBe Some(nonEmptyTraceContext2) - tc2 shouldBe Some(nonEmptyTraceContext1) - tc3 shouldBe None - } - } - - } -} - -object SequencedEventStoreTest { - private implicit class SeqTuple3[A](val s: Seq[A]) extends AnyVal { - def toTuple3OrFail: (A, A, A) = - s match { - case Seq(a, b, c) => (a, b, c) - case _ => - throw new TestFailedException( - s"Expected a sequence of 3 elements but got ${s.size} elements: $s", - 0, - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencerCounterTrackerStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencerCounterTrackerStoreTest.scala deleted file mode 100644 index b8a4d7b568..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/SequencerCounterTrackerStoreTest.scala +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store - -import com.digitalasset.canton.lifecycle.HasCloseContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, SequencerCounter} -import org.scalatest.wordspec.AsyncWordSpecLike - -trait SequencerCounterTrackerStoreTest extends CursorPreheadStoreTest { - this: AsyncWordSpecLike with BaseTest with HasCloseContext with FailOnShutdown => - - def sequencerCounterTrackerStore(mk: () => SequencerCounterTrackerStore): Unit = - "sequencer counter tracker store" should { - behave like cursorPreheadStore(() => mk().cursorStore, SequencerCounter.apply) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala deleted file mode 100644 index 57b3f87c7e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{DbConfig, DbParametersConfig} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext} -import org.scalatest.time.{Millis, Seconds, Span} -import org.scalatest.{Assertion, BeforeAndAfterAll} -import slick.jdbc.PositionedParameters -import slick.sql.SqlAction - -import java.sql.SQLException -import scala.util.{Failure, Random, Success, Try} - -trait DatabaseDeadlockTest - extends BaseTestWordSpec - with BeforeAndAfterAll - with HasExecutionContext { - this: DbTest => - - lazy val rawStorage: DbStorage = storage.underlying - import rawStorage.api.* - - val batchSize = 100 - val roundsNegative = 50 - val roundsPositive = 1 - val maxRetries = 3 - - implicit override val defaultPatience: PatienceConfig = - PatienceConfig(timeout = Span(60, Seconds), interval = Span(100, Millis)) - - def createTableAction: SqlAction[Int, NoStream, Effect.Write] - - override def beforeAll(): Unit = { - super.beforeAll() - - rawStorage - .queryAndUpdate( - DBIO.seq( - sqlu"drop table database_deadlock_test".asTry, // Try to drop, in case it already exists. - createTableAction, - ), - functionFullName, - ) - .failOnShutdown - .futureValue - } - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - rawStorage.update_( - sqlu"truncate table database_deadlock_test", - functionFullName, - ) - - case class DbBulkCommand(sql: String, setParams: PositionedParameters => Int => Unit) { - def run( - ascending: Boolean, - maxRetries: Int, - ): FutureUnlessShutdown[Array[Int]] = - rawStorage.queryAndUpdate( - DbStorage.bulkOperation( - sql, - if (ascending) 0 until batchSize else (0 until batchSize).reverse, - storage.profile, - )(setParams), - s"$functionFullName-${sql.take(10)}", - maxRetries, - ) - } - - def setIdValue(sp: PositionedParameters)(id: Int): Unit = { - sp >> id - sp >> Random.nextInt() - } - def setValueId(sp: PositionedParameters)(id: Int): Unit = { - sp >> Random.nextInt() - sp >> id - } - def setId(sp: PositionedParameters)(id: Int): Unit = - sp >> id - - def upsertCommand: DbBulkCommand - - def updateCommand: DbBulkCommand = - DbBulkCommand("update database_deadlock_test set v = ? where id = ?", setValueId) - - def deleteCommand: DbBulkCommand = - DbBulkCommand("delete from database_deadlock_test where id = ?", setId) - - "The storage" when { - // Test upserts - testQuery("bulk inserts", upsertCommand, upsertCommand) - - testQueryWithSetup( - // insert rows first to test the update part of the upsert - setup = upsertCommand.run(ascending = true, 0).futureValueUS, - "bulk upserts", - upsertCommand, - upsertCommand, - ) - - // Test updates - testQueryWithSetup( - setup = upsertCommand.run(ascending = true, 0).futureValueUS, - "bulk updates", - updateCommand, - updateCommand, - ) - - // Test deletes - testQuery( - "bulk delete + insert", - deleteCommand, - upsertCommand, - ) - } - - def testQuery( - description: String, - command1: DbBulkCommand, - command2: DbBulkCommand, - ): Unit = - testQueryWithSetup((), description, command1, command2) - - def testQueryWithSetup( - setup: => Unit, - description: String, - command1: DbBulkCommand, - command2: DbBulkCommand, - ): Unit = { - if (dbCanDeadlock) { - s"running conflicting $description" can { - "abort with a deadlock" in { - setup - assertSQLException(runWithConflictingRowOrder(command1, command2, 0)) - } - } - } - - s"running conflicting $description with retry" must { - "succeed" in { - setup - assertNoException(runWithConflictingRowOrder(command1, command2, maxRetries)) - } - } - } - - def dbCanDeadlock: Boolean = true - - def assertSQLException(body: => Try[_]): Assertion = - forAtLeast(1, 0 until roundsNegative) { _ => - inside(body) { case Failure(e: SQLException) => - assertDeadlock(e) - } - } - - def assertDeadlock(e: SQLException): Assertion - - def assertNoException(body: => Try[_]): Assertion = - forAll(0 until roundsPositive) { _ => - inside(body) { case Success(_) => succeed } - } - - def runWithConflictingRowOrder( - command1: DbBulkCommand, - command2: DbBulkCommand, - maxRetries: Int, - ): Try[Seq[Array[Int]]] = - FutureUnlessShutdown - .sequence( - Seq( - command1.run(ascending = true, maxRetries), - command2.run(ascending = false, maxRetries), - ) - ) - .transformWithHandledAborted(FutureUnlessShutdown.pure) - .futureValueUS -} - -class DatabaseDeadlockTestH2 extends DatabaseDeadlockTest with H2Test { - import rawStorage.api.* - - // H2 cannot deadlock at the moment, because we are enforcing a single connection. - // Therefore disabling negative tests. - override def dbCanDeadlock: Boolean = false - - override lazy val createTableAction: SqlAction[Int, NoStream, Effect.Write] = - sqlu"create table database_deadlock_test(id bigint primary key, v bigint not null)" - - override lazy val upsertCommand: DbBulkCommand = DbBulkCommand( - """merge into database_deadlock_test - |using (select cast(? as bigint) id, cast(? as bigint) v from dual) as input - |on (database_deadlock_test.id = input.id) - |when not matched then - | insert(id, v) - | values(input.id, input.v) - |when matched then - | update set v = input.v""".stripMargin, - setIdValue, - ) - - override def assertDeadlock(e: SQLException): Assertion = fail("unimplemented") -} - -class DatabaseDeadlockTestPostgres extends DatabaseDeadlockTest with PostgresTest { - import rawStorage.api.* - - override def mkDbConfig(basicConfig: DbBasicConfig): DbConfig.Postgres = { - // Enforce 8 connections. If there is only one connection, the test will fail to produce deadlocks. - val defaultDbConfig = super.mkDbConfig(basicConfig) - defaultDbConfig.copy(parameters = - DbParametersConfig(maxConnections = Some(PositiveInt.tryCreate(8))) - ) - } - - override lazy val createTableAction: SqlAction[Int, NoStream, Effect.Write] = - sqlu"create table database_deadlock_test(id bigint primary key, v bigint not null)" - - override lazy val upsertCommand: DbBulkCommand = DbBulkCommand( - """insert into database_deadlock_test(id, v) - |values (?, ?) - |on conflict (id) do - |update set v = excluded.v""".stripMargin, - setIdValue, - ) - - override def assertDeadlock(e: SQLException): Assertion = e.getSQLState shouldBe "40P01" -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala deleted file mode 100644 index 06c5846931..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.* -import com.digitalasset.canton.resource.{DbExceptionRetryPolicy, DbStorage} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry.ErrorKind.FatalErrorKind -import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext} -import org.scalatest.BeforeAndAfterAll -import slick.sql.SqlAction - -trait DatabaseLimitNbParamTest - extends BaseTestWordSpec - with BeforeAndAfterAll - with HasExecutionContext { - this: DbTest => - - lazy val rawStorage: DbStorage = storage.underlying - import rawStorage.api.* - - def createTableAction: SqlAction[Int, NoStream, Effect.Write] - - override def beforeAll(): Unit = { - super.beforeAll() - - rawStorage - .queryAndUpdate( - DBIO.seq( - sqlu"drop table database_limit_nb_param_test".asTry, // Try to drop, in case it already exists. - createTableAction, - ), - functionFullName, - ) - .futureValueUS - } - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - rawStorage.update_( - sqlu"truncate table database_limit_nb_param_test", - functionFullName, - ) - - def nbOfParametersLimit: Int - - def insertCommand(nbVal: Int): DbStorage.SQLActionBuilderChain - - "The storage" when { - "exceeding DB limit on the number of parameters in a statement" should { - "fail without retrying" in { - // Our test query uses 5 parameters per key in the prepared statement - val nbKeys = nbOfParametersLimit / 5 + 1 - - val query = insertCommand(nbKeys) - - rawStorage - .update(query.asUpdate, "parameter limit query", maxRetries = 1) - .transformWith { outcome => - val errorKind = DbExceptionRetryPolicy.logAndDetermineErrorKind(outcome, logger, None) - errorKind match { - case FatalErrorKind => - case _ => fail("Database error kind should be fatal") - } - - FutureUnlessShutdown.pure(true) - } - .futureValueUS shouldBe true - } - } - } - -} - -class DatabaseLimitNbParamTestPostgres extends DatabaseLimitNbParamTest with PostgresTest { - import rawStorage.api.* - - override lazy val createTableAction: SqlAction[Int, NoStream, Effect.Write] = - sqlu"""create table database_limit_nb_param_test( - key bigint primary key, - val1 bigint not null, - val2 bigint not null, - val3 bigint not null, - val4 bigint not null)""" - - override val nbOfParametersLimit: Int = (1 << 16) - 1 // As of JDBC driver v42.4.0 - - override def insertCommand(nbVal: Int): DbStorage.SQLActionBuilderChain = { - val values = - (1 to nbVal).map(key => sql"""($key, $key, $key, $key, $key)""").intercalate(sql", ") - - sql"""insert into database_limit_nb_param_test(key, val1, val2, val3, val4) - values """ ++ values ++ - sql""" on conflict (key) do nothing""" - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala deleted file mode 100644 index 31b6ac6b04..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.BeforeAndAfterAll -import org.scalatest.wordspec.AsyncWordSpec - -import scala.concurrent.Future - -trait DbIndexedStringsStoreTest - extends AsyncWordSpec - with BaseTest - with BeforeAndAfterAll - with HasExecutionContext { - this: DbTest => - - import com.digitalasset.canton.topology.DefaultTestIdentities.* - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - val query = - sqlu"truncate table common_static_strings restart identity" - storage.update( - DBIO.seq(query), - functionFullName, - ) - } - - def staticStringsStore(mk: () => IndexedStringStore): Unit = { - - val synchronizer2 = SynchronizerId.tryFromString("other::synchronizer") - - def d2idx(store: IndexedStringStore, synchronizerId: SynchronizerId): Future[Int] = - store - .getOrCreateIndex( - IndexedStringType.synchronizerId, - synchronizerId.toLengthLimitedString.asString300, - ) - .failOnShutdown - - def idx2d(store: IndexedStringStore, index: Int): Future[Option[SynchronizerId]] = - store - .getForIndex(IndexedStringType.synchronizerId, index) - .map(_.map(str => SynchronizerId.tryFromString(str.unwrap))) - .failOnShutdown - - "return the same index for a previously stored uid" in { - val store = mk() - for { - idx <- d2idx(store, synchronizer2) - idx2 <- d2idx(store, synchronizerId) - idx3 <- d2idx(store, synchronizer2) - } yield { - idx shouldBe idx3 - idx2 should not be idx - } - } - - "return the correct index for the stored uid" in { - val store = mk() - for { - in1 <- d2idx(store, synchronizer2) - in2 <- d2idx(store, synchronizerId) - in3 <- d2idx(store, synchronizer2) - lk1 <- idx2d(store, in1) - lk2 <- idx2d(store, in2) - ompt1 <- idx2d(store, 0) - ompt2 <- idx2d(store, Math.max(in1, in2) + 1) - } yield { - lk1.value shouldBe synchronizer2 - lk2.value shouldBe synchronizerId - ompt1 shouldBe empty - ompt2 shouldBe empty - } - - } - - "concurrent insertion" in { - - val store = mk() - val uidsF = Future.sequence( - (1 to 500) - .map(x => SynchronizerId.tryFromString(s"id$x::stinkynamespace")) - .map(x => - d2idx(store, x).flatMap { idx => - idx2d(store, idx).map(res => (x, idx, res)) - } - ) - ) - for { - idxs <- uidsF - } yield { - forAll(idxs) { case (uid, _, resIdx) => - resIdx.value shouldBe uid - } - idxs.map(_._2).distinct should have length (idxs.length.toLong) - } - - } - - } - - "DbStaticStringStore" should { - behave like staticStringsStore(() => new DbIndexedStringStore(storage, timeouts, loggerFactory)) - } - -} - -class IndexedStringsStoreTestH2 extends DbIndexedStringsStoreTest with H2Test - -class IndexedStringsStoreTestPostgres extends DbIndexedStringsStoreTest with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStoreTest.scala deleted file mode 100644 index 3e4a464ab3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStoreTest.scala +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.config.CantonRequireTypes.String3 -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.PruningSchedulerStoreTest -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec - -trait DbPruningSchedulerStoreTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with PruningSchedulerStoreTest - with FailOnShutdown { - this: DbTest => - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - storage.update(DBIO.seq(sqlu"truncate table common_pruning_schedules"), functionFullName) - } - - "DbPruningSchedulerStore" should { - behave like pruningSchedulerStore(() => - new DbPruningSchedulerStore( - String3.tryCreate("DBT"), - storage, - timeouts, - loggerFactory, - ) - ) - - } -} - -class DbPruningSchedulerStoreTestH2 extends DbPruningSchedulerStoreTest with H2Test - -class DbPruningSchedulerStoreTestPostgres extends DbPruningSchedulerStoreTest with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencedEventStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencedEventStoreTest.scala deleted file mode 100644 index d0a030e0c5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencedEventStoreTest.scala +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.{IndexedPhysicalSynchronizer, SequencedEventStoreTest} -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.canton.tracing.TraceContext -import org.scalatest.wordspec.AsyncWordSpec - -trait DbSequencedEventStoreTest extends AsyncWordSpec with BaseTest with SequencedEventStoreTest { - this: DbTest => - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - - storage.update( - DBIO.seq( - sqlu"truncate table common_sequenced_events", - sqlu"truncate table common_sequenced_event_store_pruning", - ), - operationName = s"${this.getClass}: truncate table sequenced_events tables", - ) - } - - "DbSequencedEventStore" should { - behave like sequencedEventStore(ec => - new DbSequencedEventStore( - storage, - IndexedPhysicalSynchronizer - .tryCreate(SynchronizerId.tryFromString("da::default").toPhysical, 1), - DefaultProcessingTimeouts.testing, - loggerFactory, - )(ec) - ) - } -} - -class SequencedEventStoreTestH2 extends DbSequencedEventStoreTest with H2Test - -class SequencedEventStoreTestPostgres extends DbSequencedEventStoreTest with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStoreTest.scala deleted file mode 100644 index 50b8b881c1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStoreTest.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.{IndexedPhysicalSynchronizer, SequencerCounterTrackerStoreTest} -import com.digitalasset.canton.topology.{SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -trait DbSequencerCounterTrackerStoreTest - extends AsyncWordSpec - with BaseTest - with SequencerCounterTrackerStoreTest - with FailOnShutdown { - this: DbTest => - - private val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("da::default") - ).toPhysical - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import storage.api.* - storage.update( - DBIO.seq(sqlu"truncate table #${DbSequencerCounterTrackerStore.cursorTable}"), - functionFullName, - ) - } - - "DbSequencerCounterTrackerStore" should { - behave like sequencerCounterTrackerStore(() => - new DbSequencerCounterTrackerStore( - IndexedPhysicalSynchronizer.tryCreate(synchronizerId, 1), - storage, - timeouts, - loggerFactory, - ) - ) - } -} - -class SequencerCounterTrackerStoreTestH2 extends DbSequencerCounterTrackerStoreTest with H2Test - -class SequencerCounterTrackerStoreTestPostgres - extends DbSequencerCounterTrackerStoreTest - with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbStorageIdempotency.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbStorageIdempotency.scala deleted file mode 100644 index 57796a8f0a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbStorageIdempotency.scala +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} -import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.metrics.DbStorageMetrics -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.resource.DbStorage.DbAction.{All, ReadTransactional} -import com.digitalasset.canton.tracing.TraceContext - -import scala.concurrent.ExecutionContext - -/** DbStorage instance for idempotency testing where we run each write action twice. */ -class DbStorageIdempotency( - val underlying: DbStorage, - override protected val timeouts: ProcessingTimeout, - override protected val loggerFactory: NamedLoggerFactory, -)(override protected implicit val ec: ExecutionContext) - extends DbStorage - with NamedLogging { - override def threadsAvailableForWriting: PositiveInt = underlying.threadsAvailableForWriting - override val profile: DbStorage.Profile = underlying.profile - override def metrics: DbStorageMetrics = underlying.metrics - override val dbConfig: DbConfig = underlying.dbConfig - override protected val logOperations: Boolean = false - - override protected[canton] def runRead[A]( - action: ReadTransactional[A], - operationName: String, - maxRetries: Int, - )(implicit traceContext: TraceContext, closeContext: CloseContext): FutureUnlessShutdown[A] = - underlying.runRead(action, operationName, maxRetries) - - override protected[canton] def runWrite[A]( - action: All[A], - operationName: String, - maxRetries: Int, - )(implicit traceContext: TraceContext, closeContext: CloseContext): FutureUnlessShutdown[A] = - underlying.runWrite(action, operationName + "-1", maxRetries).flatMap { _ => - underlying.runWrite(action, operationName + "-2", maxRetries) - } - - override def isActive: Boolean = underlying.isActive -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbTest.scala deleted file mode 100644 index 06ae8a2c98..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/db/DbTest.scala +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.db - -import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.DbConfig.{H2, Postgres} -import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, HasCloseContext} -import com.digitalasset.canton.logging.NamedLogging -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.* - -import scala.concurrent.Await -import scala.concurrent.duration.* -import scala.util.control.NonFatal - -/** Base test for writing a database backed storage test. To ensure idempotency and safety under - * retries of the store each write operation is executed twice. Each database should provide a - * DbTest implementation that can then be mixed into a storage test to provide the actual backend. - * See DbCryptoVaultStoreTest for example usage. - */ -trait DbTest - extends BeforeAndAfterAll - with BeforeAndAfterEach - with FlagCloseable - with HasCloseContext // not used here, but required by most tests. So extending it for convenience. - with HasExecutionContext - with NamedLogging { - this: Suite => - - /** Flag to define the migration mode for the schemas */ - def migrationMode: MigrationMode = - // TODO(i15561): Revert back to `== ProtocolVersion.dev` once v30 is a stable Daml 3 protocol version - if (BaseTest.testedProtocolVersion >= ProtocolVersion.v34) MigrationMode.DevVersion - else MigrationMode.Standard - - protected def mkDbConfig(basicConfig: DbBasicConfig): DbConfig - - protected def createSetup(): DbStorageSetup - - /** Hook for cleaning database before running next test. */ - protected def cleanDb(storage: DbStorage)(implicit tc: TraceContext): FutureUnlessShutdown[?] - - @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.Null")) - private var setup: DbStorageSetup = _ - - /** Stores the db storage implementation. Will throw if accessed before the test has started */ - protected lazy val storage: DbStorageIdempotency = { - val s = Option(setup).map(_.storage).getOrElse(sys.error("Test has not started")) - new DbStorageIdempotency(s, timeouts, loggerFactory) - } - - override def beforeAll(): Unit = TraceContext.withNewTraceContext("test") { implicit tc => - // Non-standard order. Setup needs to be created first, because super can be MyDbTest and therefore super.beforeAll - // may already access setup. - try { - setup = createSetup().initialized() - setup.migrateDb() - super.beforeAll() - } catch { - case NonFatal(e) => - // Logging the error, as an exception in this method will abort the test suite with no log output. - logger.error("beforeAll failed", e) - throw e - } - } - - override def afterAll(): Unit = TraceContext.withNewTraceContext("test") { implicit tc => - try { - // Non-standard order. - // First delete test data. - cleanup() - - // Free resources of MyDbTest, if there are any. - close() - super.afterAll() // This will also close the executionContext, unfortunately. - - // Release database. Fortunately, this seems not to require an executionContext. - storage.close() - setup.close() - } catch { - case NonFatal(e) => - // Logging the error, as an exception in this method will abort the test suite with no log output. - logger.error("afterAll failed", e) - throw e - } - } - - override def beforeEach(): Unit = TraceContext.withNewTraceContext("test") { implicit tc => - try { - cleanup() - super.beforeEach() - } catch { - case NonFatal(e) => - // Logging the error, as an exception in this method will abort the test suite with no log output. - logger.error("beforeEach failed", e) - throw e - } - } - - private def cleanup()(implicit tc: TraceContext): Unit = - // Use the underlying storage for clean-up operations, so we don't run clean-ups twice - Await.result(cleanDb(storage.underlying), 10.seconds) -} - -/** Run db test against h2 */ -trait H2Test extends DbTest { this: Suite => - - override protected def mkDbConfig(basicConfig: DbBasicConfig): H2 = basicConfig.toH2DbConfig - - override protected def createSetup(): DbStorageSetup = - DbStorageSetup.h2(loggerFactory, migrationMode, mkDbConfig) -} - -/** Run db test for running against postgres */ -trait PostgresTest extends DbTest { this: Suite => - - override protected def mkDbConfig(basicConfig: DbBasicConfig): Postgres = - basicConfig.toPostgresDbConfig - - override protected def createSetup(): DbStorageSetup = { - // postgres has limit of 63 chars for the db name - val dbName = this.getClass.getSimpleName.replaceAll("[^a-zA-Z0-9]", "").toLowerCase.take(63) - DbStorageSetup.postgres(loggerFactory, migrationMode, mkDbConfig, useDbNameO = Some(dbName)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/PruningSchedulerStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/PruningSchedulerStoreTestInMemory.scala deleted file mode 100644 index 26eefbbf19..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/PruningSchedulerStoreTestInMemory.scala +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.memory; - -import com.digitalasset.canton.store.PruningSchedulerStoreTest -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec; - -class PruningSchedulerStoreTestInMemory - extends AsyncWordSpec - with BaseTest - with PruningSchedulerStoreTest - with FailOnShutdown { - - "InMemoryPruningSchedulerStore" should { - behave like pruningSchedulerStore(() => new InMemoryPruningSchedulerStore(loggerFactory)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SendTrackerTrackerStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SendTrackerTrackerStoreTestInMemory.scala deleted file mode 100644 index 2d283a1d7e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SendTrackerTrackerStoreTestInMemory.scala +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.memory - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.store.SendTrackerStoreTest -import org.scalatest.wordspec.AsyncWordSpec - -class SendTrackerTrackerStoreTestInMemory - extends AsyncWordSpec - with BaseTest - with SendTrackerStoreTest { - "InMemoryPendingSendStore" should { - behave like sendTrackerStore(() => new InMemorySendTrackerStore) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencedEventStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencedEventStoreTestInMemory.scala deleted file mode 100644 index a2022f4581..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencedEventStoreTestInMemory.scala +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.memory - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.store.SequencedEventStoreTest -import org.scalatest.wordspec.AsyncWordSpec - -class SequencedEventStoreTestInMemory - extends AsyncWordSpec - with BaseTest - with SequencedEventStoreTest { - - "MessageStoreTestInMemory" should { - behave like sequencedEventStore(ec => - new InMemorySequencedEventStore(loggerFactory, timeouts)(ec) - ) - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencerCounterTrackerStoreTestInMemory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencerCounterTrackerStoreTestInMemory.scala deleted file mode 100644 index f382661f85..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/store/memory/SequencerCounterTrackerStoreTestInMemory.scala +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.store.memory - -import com.digitalasset.canton.lifecycle.{FlagCloseable, HasCloseContext} -import com.digitalasset.canton.store.SequencerCounterTrackerStoreTest -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -class SequencerCounterTrackerStoreTestInMemory - extends AsyncWordSpec - with BaseTest - with SequencerCounterTrackerStoreTest - with FlagCloseable - with HasCloseContext - with FailOnShutdown { - - "InMemorySequencerCounterTrackerStore" should { - behave like sequencerCounterTrackerStore(() => - new InMemorySequencerCounterTrackerStore(loggerFactory, timeouts) - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/ClockTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/ClockTest.scala deleted file mode 100644 index 704117d8a4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/ClockTest.scala +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.config.{DefaultProcessingTimeouts, ProcessingTimeout} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.{LogEntry, NamedLoggerFactory, SuppressionRule} -import com.digitalasset.canton.time.Clock.SystemClockRunningBackwards -import com.digitalasset.canton.topology.admin.v30.IdentityInitializationServiceGrpc.IdentityInitializationService -import com.digitalasset.canton.topology.admin.v30.{ - CurrentTimeRequest, - CurrentTimeResponse, - IdentityInitializationServiceGrpc, -} -import com.digitalasset.canton.tracing.TraceContextGrpc -import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion -import com.digitalasset.canton.{BaseTest, HasExecutionContext, config} -import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder} -import io.grpc.util.MutableHandlerRegistry -import io.grpc.{ManagedChannel, Server, ServerServiceDefinition} -import org.scalatest.concurrent.PatienceConfiguration.Timeout -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level - -import java.time.Clock as JClock -import java.time.temporal.ChronoUnit -import java.util.concurrent.atomic.AtomicReference -import scala.annotation.tailrec -import scala.concurrent.duration.* -import scala.concurrent.{Await, Future, Promise} - -@SuppressWarnings(Array("org.wartremover.warts.Var")) -class ClockTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - val sut = new WallClock(DefaultProcessingTimeouts.testing, loggerFactory) - val timeout = Timeout(6.seconds) - def testTask(now: CantonTimestamp): Unit = () - - "SimClock" should { - val ref = CantonTimestamp.Epoch - val sim = new SimClock(ref, loggerFactory) - - def testExecution(task: Future[Unit], timestamp: CantonTimestamp): Unit = { - Threading.sleep( - 25 - ) // this will only lead to failures if there is something wrong with the scheduling - assert(!task.isCompleted) - // execute task - sim.advanceTo(timestamp) - val res = Await.ready(task, timeout.value) - assert(res.isCompleted) - } - - "return correct time" in { - assert(sim.uniqueTime().isAfter(ref)) - assert(sim.uniqueTime().isBefore(ref.plusMillis(1))) - } - - "allow task to be scheduled in future" in { - val task1 = sim.scheduleAt(testTask(_), sim.uniqueTime().plusSeconds(1)).onShutdown(fail()) - testExecution(task1, sim.uniqueTime().plusSeconds(2)) - } - - "ensure that tasks are executed in proper order" in { - val now = sim.uniqueTime() - val task1 = sim.scheduleAt(testTask(_), now.plusSeconds(3)).onShutdown(fail()) - val task2 = sim.scheduleAt(testTask(_), now.plusSeconds(1)).onShutdown(fail()) - testExecution(task2, now.plusSeconds(2)) - testExecution(task1, now.plusSeconds(4)) - } - - "ensure that multiple tasks are executed, again in proper order" in { - val now = sim.uniqueTime() - val task1 = sim.scheduleAt(testTask(_), now.plusSeconds(5)).onShutdown(fail()) - val task2 = sim.scheduleAt(testTask(_), now.plusSeconds(3)).onShutdown(fail()) - val task3 = sim.scheduleAt(testTask(_), now.plusSeconds(1)).onShutdown(fail()) - testExecution(task3, now.plusSeconds(4)) - val res = Await.ready(task2, timeout.value) - assert(res.isCompleted) - testExecution(task1, now.plusSeconds(6)) - } - - "ensure that a task scheduled for now executes" in { - val now = sim.uniqueTime() - val task = sim.scheduleAt(testTask(_), now).onShutdown(fail()) - val res = Await.ready(task, timeout.value) - assert(res.isCompleted) - } - - } - - "WallClock" should { - "return unique values without diverging from real time too much" in { - val first = sut.uniqueTime() - var last = first - for (i <- 1 to 100000) { - last = sut.uniqueTime() - } - assert(first.isBefore(last)) - // make sure that in a tight loop after 100k tries the system clock returns - // unique values at least twice, and we're not just adding nanoseconds to the previous value, - // eventually overlowing - assert(last.isAfter(first.plusMillis(1))) - } - - "check that clock unique and monotonic are strict" in { - var last = sut.uniqueTime() - for (i <- 1 to 1000) { - val mt = sut.monotonicTime() - val ut = sut.uniqueTime() - mt shouldBe >=(last) - ut shouldBe >(mt) - last = ut - } - } - - "perform 100'000 currentTime in a second" in { - // avoid flakes by trying a few times to run in less than one second - // in some tests this gets flaky - @tailrec - def doCheck(num: Int): Unit = { - val first = sut.uniqueTime() - var last = first - for (i <- 1 to 100000) { - last = sut.uniqueTime() - } - if (last.isBefore(first.plusSeconds(1))) () - else { - if (num > 0) { - doCheck(num - 1) - } else { - fail(s"$last < ${first.plusSeconds(1)}") - } - } - } - doCheck(10) - } - - "scheduling one task works and completes" in { - val now = sut.uniqueTime() - val task1 = sut.scheduleAt(testTask(_), now.plusMillis(50)).onShutdown(fail()) - assert(Await.ready(task1, timeout.value).isCompleted) - } - - "scheduling of three tasks works and completes" in { - val now = sut.uniqueTime() - val tasks = Seq( - sut.scheduleAt(testTask, now.plusMillis(50)), - sut.scheduleAt(testTask, now.plusMillis(20)), - sut.scheduleAt(testTask, now), - sut.scheduleAt(testTask, now.minusMillis(1000)), - sut.scheduleAfter(testTask, java.time.Duration.ofMillis(55)), - ) - tasks.zipWithIndex.foreach { case (task, index) => - assert( - Await.ready(task.onShutdown(fail()), timeout.value).isCompleted, - s"task $index did not complete", - ) - } - } - - } - - "TickTock Skew" should { - "skew the clock by the expected value" in { - val tm = JClock.systemUTC() - val toleranceMs = 50 - - def check(skewMillis: Int) = { - val tick = TickTock.FixedSkew(skewMillis) - val hostTime = tm.instant() - val tickTime = tick.now - - if (skewMillis > 0) - tickTime.isAfter(hostTime) shouldBe true - else - tickTime.isBefore(hostTime) shouldBe true - - // We can have a small difference as we sample the clock at different times - val diff = hostTime.until(tickTime, ChronoUnit.MILLIS).toInt - Math.abs(diff - skewMillis) should be <= toleranceMs - } - - check(64738) - check(-15238) - } - } - - "Clock" should { - "warn but work if clock is adjusted backwards" in { - class MyClock(val loggerFactory: NamedLoggerFactory) extends Clock { - val nowR = new AtomicReference[CantonTimestamp](CantonTimestamp.Epoch.plusSeconds(120)) - override protected def addToQueue(queue: Queued[?]): Unit = { - val _ = tasks.add(queue) - } - - override protected def warnIfClockRunsBackwards: Boolean = true - override def now: CantonTimestamp = nowR.get() - - override def close(): Unit = {} - } - - val clock = new MyClock(loggerFactory) - - def assertNoWarning(ut: CantonTimestamp) = { - val ut2 = loggerFactory.assertLogsSeq(SuppressionRule.Level(Level.WARN))( - clock.uniqueTime(), - x => x shouldBe empty, - ) - ut should be < ut2 - ut2 - } - - val ut = clock.uniqueTime() - clock.nowR.updateAndGet(_.minusSeconds(5)) - // first time, we do get a message - val ut2 = - loggerFactory.assertLogs( - clock.uniqueTime(), - _.shouldBeCantonErrorCode(SystemClockRunningBackwards), - ) - ut should be < ut2 - // second time, we shouldn't get one - val ut3 = assertNoWarning(ut2) - // recover - clock.nowR.updateAndGet(_.plusSeconds(40)) - val ut4 = assertNoWarning(ut3) - // emit again - clock.nowR.updateAndGet(_.minusSeconds(5)) - val ut5 = - loggerFactory.assertLogs( - clock.uniqueTime(), - _.shouldBeCantonErrorCode(SystemClockRunningBackwards), - ) - ut4 should be < ut5 - } - } - - "RemoteClock" should { - "request the time" in { - val service = mock[IdentityInitializationService] - // We must mock the service before we create the clock, because the clock will immediately request a time - val ts = CantonTimestamp.ofEpochSecond(42) - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn(Future.successful(CurrentTimeResponse(ts.toProtoPrimitive))) - - val env = new RemoteClockEnv(service) - - env.clock.now shouldBe ts - env.close() - } - - "handle network delays" in { - val service = mock[IdentityInitializationService] - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn(Future.successful(CurrentTimeResponse(0L))) - val networkTimeout = config.NonNegativeDuration.tryFromDuration(1.second) - val env = new RemoteClockEnv( - service, - timeouts = timeouts.copy(network = networkTimeout), - ) - - // Delay the response for more than the network timeout - val firstAnswer = Promise[CantonTimestamp]() - val firstRequestObserved = Promise[Unit]() - when(service.currentTime(any[CurrentTimeRequest])).thenAnswer { (_: CurrentTimeRequest) => - firstRequestObserved.trySuccess(()) - firstAnswer.future.map(ts => CurrentTimeResponse(ts.toProtoPrimitive)) - } - - val ts = CantonTimestamp.ofEpochMilli(1) - loggerFactory.assertLoggedWarningsAndErrorsSeq( - { - val taskF = Future(env.clock.scheduleAt(_ => (), ts)) - - logger.info("Waiting to see the first request being observed") - firstRequestObserved.future.futureValue - logger.info("Now waiting for the network delay duration") - Threading.sleep(networkTimeout.duration.toMillis + 100) - logger.info("Now time requests are fast again") - firstAnswer.success(ts) - - taskF.futureValue.futureValueUS - }, - LogEntry.assertLogSeq( - Seq( - (_.warningMessage should include("DEADLINE_EXCEEDED"), "deadline exceeded") - ) - ), - ) - - env.close() - } - - "handle out-of-order responses" in { - val service = mock[IdentityInitializationService] - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn(Future.successful(CurrentTimeResponse(0L))) - - val env = new RemoteClockEnv(service) - - val firstAnswer = Promise[CantonTimestamp]() - val firstRequestObserved = Promise[Unit]() - - val ts1 = CantonTimestamp.ofEpochSecond(17) - val ts2 = CantonTimestamp.ofEpochSecond(23) - - when(service.currentTime(any[CurrentTimeRequest])).thenAnswer { (_: CurrentTimeRequest) => - firstRequestObserved.trySuccess(()) - firstAnswer.future.map(ts => CurrentTimeResponse(ts.toProtoPrimitive)) - } - - val firstF = Future { - env.clock.now - } - firstRequestObserved.future.futureValue - - when(service.currentTime(any[CurrentTimeRequest])).thenReturn( - Future.successful(CurrentTimeResponse(ts2.toProtoPrimitive)) - ) - - val second = env.clock.now - - firstAnswer.success(ts1) - - val first = firstF.futureValue - - second shouldBe ts2 - first shouldBe ts1 - - env.close() - } - - "handle gRPC errors" in { - val service = mock[IdentityInitializationService] - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn(Future.successful(CurrentTimeResponse(0L))) - val env = new RemoteClockEnv(service) - - val ts = CantonTimestamp.ofEpochMilli(1) - val taskF = env.clock.scheduleAt(_ => (), ts) - - // Return some random gRPC errors first, then a time - val err = DeprecatedProtocolVersion - .WarnParticipant(InstanceName.tryCreate("foo"), None) - .asGrpcError - loggerFactory.assertLogs( - { - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn( - Future.failed(err), - Future.failed(err), - Future.successful(CurrentTimeResponse(ts.toProtoPrimitive)), - ) - - taskF.futureValueUS - }, - _.errorMessage should include("Request failed for remote clock server"), - _.errorMessage should include("Request failed for remote clock server"), - ) - - env.close() - } - - "handle concurrent close gracefully" in { - val service = mock[IdentityInitializationService] - when(service.currentTime(any[CurrentTimeRequest])) - .thenReturn(Future.successful(CurrentTimeResponse(0L))) - val env = new RemoteClockEnv(service) - - val firstAnswer = Promise[CantonTimestamp]() - val firstRequestObserved = Promise[Unit]() - - when(service.currentTime(any[CurrentTimeRequest])).thenAnswer { (_: CurrentTimeRequest) => - firstRequestObserved.trySuccess(()) - firstAnswer.future.map(ts => CurrentTimeResponse(ts.toProtoPrimitive)) - } - - val nowF = Future(env.clock.now) - - firstRequestObserved.future.futureValue - env.close() - nowF.futureValue shouldBe CantonTimestamp.MinValue.immediateSuccessor - - } - } - - private class RemoteClockEnv( - val service: IdentityInitializationService, - timeouts: ProcessingTimeout = ClockTest.this.timeouts, - ) { - val channelName: String = InProcessServerBuilder.generateName() - - val registry: MutableHandlerRegistry = new MutableHandlerRegistry - - val server: Server = InProcessServerBuilder - .forName(channelName) - .fallbackHandlerRegistry(registry) - .build() - - server.start() - - val clockServiceDefinition: ServerServiceDefinition = - io.grpc.ServerInterceptors.intercept( - IdentityInitializationServiceGrpc.bindService(service, parallelExecutionContext), - TraceContextGrpc.serverInterceptor, - ) - - registry.addService(clockServiceDefinition) - - val channel: ManagedChannel = InProcessChannelBuilder - .forName(channelName) - .intercept(TraceContextGrpc.clientInterceptor) - .build() - - val clock = new RemoteClock(channel, timeouts, loggerFactory) - - def close(): Unit = { - clock.close() // This closes the channel too - - server.shutdown() - server.awaitTermination() - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/GeneratorsTime.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/GeneratorsTime.scala deleted file mode 100644 index 7fd83428fd..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/GeneratorsTime.scala +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import org.scalacheck.Arbitrary - -import java.time.Duration - -object GeneratorsTime { - import com.digitalasset.canton.config.GeneratorsConfig.* - import org.scalatest.EitherValues.* - - implicit val nonNegativeSecondsArb: Arbitrary[NonNegativeSeconds] = Arbitrary( - nonNegativeLongArb.arbitrary.map(i => NonNegativeSeconds.tryOfSeconds(i.unwrap)) - ) - - implicit val nonNegativeFiniteDurationArb: Arbitrary[NonNegativeFiniteDuration] = Arbitrary( - nonNegativeLongArb.arbitrary.map(i => - NonNegativeFiniteDuration.create(Duration.ofNanos(i.unwrap)).value - ) - ) - - implicit val positiveFiniteDurationArb: Arbitrary[PositiveFiniteDuration] = Arbitrary( - nonNegativeLongArb.arbitrary.map(i => - PositiveFiniteDuration.create(Duration.ofNanos(i.unwrap)).value - ) - ) - - implicit val positiveSecondsArb: Arbitrary[PositiveSeconds] = Arbitrary( - positiveLongArb.arbitrary.map(i => PositiveSeconds.tryOfSeconds(i.unwrap)) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/PeriodicActionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/PeriodicActionTest.scala deleted file mode 100644 index f0af44e1a9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/PeriodicActionTest.scala +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec - -import java.time.Duration -import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.Future - -@SuppressWarnings(Array("org.wartremover.warts.Var")) -class PeriodicActionTest extends AsyncWordSpec with BaseTest with HasExecutionContext { - - private val interval = NonNegativeFiniteDuration.tryOfSeconds(5) - - class Env(actionDelay: Duration = Duration.ZERO) { - val clock = new SimClock(CantonTimestamp.Epoch, loggerFactory) - val numberOfCalls = new AtomicInteger(0) - - val sut = new PeriodicAction( - clock, - interval, - loggerFactory, - ProcessingTimeout(), - "test", - )(_ => { - numberOfCalls.incrementAndGet() - clock.scheduleAfter(_ => (), actionDelay) - }) - - // Sometimes we need to make sure that the asynchronous scheduling of the next task has happened (in real time) - def eventuallyOneTaskIsScheduled() = - eventually()(clock.numberOfScheduledTasks shouldBe 1) - } - - "should call function periodically" in { - val env = new Env() - import env.* - - eventuallyOneTaskIsScheduled() - numberOfCalls.get shouldBe 0 - - clock.advance(interval.duration) - eventuallyOneTaskIsScheduled() - numberOfCalls.get shouldBe 1 - - clock.advance(interval.duration) - eventuallyOneTaskIsScheduled() - numberOfCalls.get shouldBe 2 - - clock.advance(interval.duration) - eventuallyOneTaskIsScheduled() - numberOfCalls.get shouldBe 3 - - sut.close() - - clock.advance(interval.duration) - numberOfCalls.get shouldBe 3 - clock.advance(interval.duration) - numberOfCalls.get shouldBe 3 - } - - "should not call function after we are closed" in { - val env = new Env() - import env.* - - numberOfCalls.get shouldBe 0 - - sut.close() - - clock.advance(interval.duration.multipliedBy(2L)) - numberOfCalls.get shouldBe 0 - } - - "should wait in close for potentially concurrent running action to be finished" in { - val env = new Env(interval.duration.multipliedBy(2)) - import env.* - - eventuallyOneTaskIsScheduled() - numberOfCalls.get shouldBe 0 - - clock.advance(interval.duration) - numberOfCalls.get shouldBe 1 - eventuallyOneTaskIsScheduled() - - val closingFuture = Future(sut.close()) - - // sleep in real time too, so the closingFuture has a chance to complete on a separate thread - Threading.sleep(200) - - closingFuture.isCompleted shouldBe false - numberOfCalls.get shouldBe 1 - eventuallyOneTaskIsScheduled() - - clock.advance(interval.duration) - // we only advanced one interval, so the started task is still running - numberOfCalls.get shouldBe 1 - eventuallyOneTaskIsScheduled() - closingFuture.isCompleted shouldBe false - - clock.advance(interval.duration) - numberOfCalls.get shouldBe 1 - // after task finished, and testee closed, there should be no more scheduled tasks for the clock - eventually()(clock.numberOfScheduledTasks shouldBe 0) - // This might happen also asynchronously - eventually()(closingFuture.isCompleted shouldBe true) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/RefinedDurationsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/RefinedDurationsTest.scala deleted file mode 100644 index c20d96aa92..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/RefinedDurationsTest.scala +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import java.time.Duration - -class RefinedDurationsTest extends AnyWordSpec with BaseTest { - lazy val zero = Duration.ZERO - lazy val oneSec = Duration.ofSeconds(1) - - "NonNegativeFiniteDuration" should { - "have create method" in { - Seq(zero, oneSec, Duration.ofMillis(1)).foreach { d => - NonNegativeFiniteDuration.create(d).value.unwrap shouldBe d - } - - NonNegativeFiniteDuration.create(Duration.ofSeconds(-1)).left.value shouldBe a[String] - } - } - - "NonNegativeSeconds" should { - "have create method" in { - Seq(zero, oneSec).foreach { d => - NonNegativeFiniteDuration.create(d).value.unwrap shouldBe d - } - - NonNegativeSeconds.create(Duration.ofSeconds(-1)).left.value shouldBe a[String] - NonNegativeSeconds.create(Duration.ofMillis(1)).left.value shouldBe a[String] - } - } - - "PositiveSeconds" should { - "have create method" in { - NonNegativeFiniteDuration.create(oneSec).value.unwrap shouldBe oneSec - - PositiveSeconds.create(Duration.ofSeconds(-1)).left.value shouldBe a[String] - PositiveSeconds.create(zero).left.value shouldBe a[String] - PositiveSeconds.create(Duration.ofMillis(1)).left.value shouldBe a[String] - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/SynchronizerTimeTrackerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/SynchronizerTimeTrackerTest.scala deleted file mode 100644 index 20bc6b4541..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/SynchronizerTimeTrackerTest.scala +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import cats.syntax.option.* -import com.digitalasset.canton.config.SynchronizerTimeTrackerConfig -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.sequencing.OrdinaryProtocolEvent -import com.digitalasset.canton.sequencing.protocol.{ - Batch, - Deliver, - MessageId, - SignedContent, - TimeProof, -} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent -import com.digitalasset.canton.topology.DefaultTestIdentities -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, SequencerCounter, config} -import org.scalatest.FutureOutcome -import org.scalatest.wordspec.FixtureAsyncWordSpec - -import java.time.Duration -import java.util.concurrent.atomic.AtomicBoolean -import scala.concurrent.Future - -class MockTimeRequestSubmitter extends TimeProofRequestSubmitter { - private val hasRequestedRef = new AtomicBoolean(false) - - def hasRequestedTime: Boolean = hasRequestedRef.get() - - def resetHasRequestedTime(): Unit = hasRequestedRef.set(false) - - override def fetchTimeProof()(implicit traceContext: TraceContext): Unit = - hasRequestedRef.set(true) - - override def handleTimeProof(proof: TimeProof): Unit = () - - override def close(): Unit = () -} - -class SynchronizerTimeTrackerTest extends FixtureAsyncWordSpec with BaseTest { - def ts(epochSeconds: Int): CantonTimestamp = CantonTimestamp.ofEpochSecond(epochSeconds.toLong) - - def timeProofEvent(ts: CantonTimestamp): OrdinaryProtocolEvent = - OrdinarySequencedEvent( - counter = SequencerCounter(0), - signedEvent = SignedContent( - Deliver.create( - None, - ts, - DefaultTestIdentities.physicalSynchronizerId, - TimeProof.mkTimeProofRequestMessageId.some, - Batch.empty(testedProtocolVersion), - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - SymbolicCrypto.emptySignature, - None, - testedProtocolVersion, - ), - )(traceContext) - - def otherEvent(ts: CantonTimestamp): OrdinaryProtocolEvent = { - // create a event which won't be flagged as a time proof - val event = OrdinarySequencedEvent( - counter = SequencerCounter(0), - signedEvent = SignedContent( - Deliver.create( - None, - ts, - DefaultTestIdentities.physicalSynchronizerId, - MessageId.tryCreate("not a time proof").some, - Batch.empty(testedProtocolVersion), - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - SymbolicCrypto.emptySignature, - None, - testedProtocolVersion, - ), - )(traceContext) - - // make sure future changes don't treat this as a time proof - TimeProof.fromEventO(event) shouldBe None - - event - } - - class Env { - // allow 2s to see events for a timestamp from our local clock - val observationLatencySecs = 2 - // put off requesting a time if we've seen an event within the last 4s - val patienceDurationSecs = 4 - val synchronizerTimeTrackerConfig = SynchronizerTimeTrackerConfig( - config.NonNegativeFiniteDuration.ofSeconds(observationLatencySecs.toLong), - config.NonNegativeFiniteDuration.ofSeconds(patienceDurationSecs.toLong), - ) - val clock = new SimClock(loggerFactory = loggerFactory) - val requestSubmitter = new MockTimeRequestSubmitter - val timeTracker = - new SynchronizerTimeTracker( - synchronizerTimeTrackerConfig, - clock, - requestSubmitter, - timeouts, - loggerFactory, - ) - - def observeTimeProof(epochSecs: Int): Future[Unit] = - Future.successful(timeTracker.update(Seq(timeProofEvent(ts(epochSecs))))) - - def observeTimestamp(epochSecs: Int): Future[Unit] = - Future.successful(timeTracker.update(Seq(otherEvent(ts(epochSecs))))) - - def advanceTo(epochSeconds: Int): Future[Unit] = { - clock.advanceTo(ts(epochSeconds)) - Future.unit - } - - def advanceToAndFlush(epochSecs: Int): Future[Unit] = { - clock.advanceTo(ts(epochSecs)) - timeTracker.flush() - } - - def advanceAndFlush(secs: Int): Future[Unit] = { - clock.advance(Duration.ofSeconds(secs.toLong)) - timeTracker.flush() - } - } - - override def withFixture(test: OneArgAsyncTest): FutureOutcome = { - val env = new Env - - withFixture(test.toNoArgAsyncTest(env)) - } - - override type FixtureParam = Env - - "requestTick" should { - // keep waiting if we're seeing regular events from the synchronizer - - "do nothing if a event is witnessed with an appropriate tick" in { env => - import env.* - - timeTracker.requestTick(ts(2)) - - for { - _ <- advanceTo(2) - // shouldn't have asked for a time proof as we're within the observation latency - _ = requestSubmitter.hasRequestedTime shouldBe false - _ <- advanceTo(3) - // shouldn't have asked for a time as despite our local clock being ahead of what we want to witness, - // we're still behind that plus the observation latency - _ = requestSubmitter.hasRequestedTime shouldBe false - // now we'll produce an event which is past the time we're looking for - _ <- observeTimeProof(3) - // now we'll zoom ahead and make sure we never request a time proof as we don't need one - _ <- advanceTo(100) - } yield requestSubmitter.hasRequestedTime shouldBe false - } - - "request time proof if we surpass the time we're expecting" in { env => - import env.* - - timeTracker.subscriptionResumesAfter(ts(1)) - timeTracker.requestTick(ts(2)) - - for { - _ <- advanceTo(2) - _ = requestSubmitter.hasRequestedTime shouldBe false - _ <- advanceAndFlush(observationLatencySecs) - } yield requestSubmitter.hasRequestedTime shouldBe true - } - - "request time proof only after we have received the first event" in { env => - import env.* - - timeTracker.requestTick(ts(2)) - for { - _ <- advanceTo(2) - _ = requestSubmitter.hasRequestedTime shouldBe false - _ = clock.advance(Duration.ofSeconds(observationLatencySecs.toLong)) - _ = requestSubmitter.hasRequestedTime shouldBe false - _ = observeTimestamp(1) - _ = advanceAndFlush(patienceDurationSecs) - } yield requestSubmitter.hasRequestedTime shouldBe true - } - - "request time proof immediately" in { env => - import env.* - - timeTracker.requestTick(ts(2), immediately = true) - requestSubmitter.hasRequestedTime shouldBe true - } - - "ignore requested tick if too large to track" in { env => - import env.* - - // as we wait for the observation latency after the requested synchronizer time using max value - // would cause the timestamp we're looking for to overflow - loggerFactory.assertLogs( - timeTracker.requestTicks( - Seq( - CantonTimestamp.MaxValue, - CantonTimestamp.MaxValue.minusSeconds(2), - CantonTimestamp.MaxValue.minusSeconds(1), - ) - ), - _.warningMessage should (include( - s"Ignoring request for 3 ticks from ${CantonTimestamp.MaxValue.minusSeconds(2)} to ${CantonTimestamp.MaxValue} as they are too large" - )), - ) - timeTracker.earliestExpectedObservationTime() shouldBe None - - // the upper bound is the time - observationLatency - loggerFactory.assertLogs( - timeTracker.requestTick( - CantonTimestamp.MaxValue.minus(synchronizerTimeTrackerConfig.observationLatency.asJava) - ), - _.warningMessage should (include("Ignoring request for 1 ticks") and include( - "as they are too large" - )), - ) - timeTracker.earliestExpectedObservationTime() shouldBe None - - // but slightly below that should be suitable for tracking (despite being practically useless given it's in 9999) - loggerFactory.assertLogs( - timeTracker.requestTicks( - Seq( - CantonTimestamp.MaxValue, - CantonTimestamp.MaxValue - .minus(synchronizerTimeTrackerConfig.observationLatency.asJava) - .immediatePredecessor, - ) - ), - _.warningMessage should (include("Ignoring request for 1 ticks") and include( - "as they are too large" - )), - ) - timeTracker.earliestExpectedObservationTime().isDefined shouldBe true - } - } - - "fetch" should { - "timestamp should resolve on any received event" in { env => - import env.* - - timeTracker.subscriptionResumesAfter(ts(0)) - clock.advance(Duration.ofSeconds(patienceDurationSecs.toLong)) - - // make two distinct requests for the next timestamp to ensure they will all be resolved by the same event - val fetchP1 = timeTracker.fetchTime() - val fetchP2 = timeTracker.fetchTime() - - // should have immediately requested a fresh timestamp - requestSubmitter.hasRequestedTime shouldBe true - - for { - // provide an event with a timestamp (not our response to requesting a time) - _ <- observeTimeProof(42) - fetch1 <- fetchP1.failOnShutdown("fetch first time proof") - fetch2 <- fetchP2.failOnShutdown("fetch second time proof") - } yield { - fetch1 shouldBe ts(42) - fetch2 shouldBe ts(42) - } - } - - "immediately return if we have a suitably fresh timestamp" in { env => - import env.* - - clock.advanceTo(ts(1)) - - for { - _ <- observeTimeProof(42) - _ = clock.advanceTo(ts(5)) - // should return the existing observation as it's within the freshness bounds - fetch1 <- timeTracker - .fetchTime(NonNegativeFiniteDuration.tryOfSeconds(5)) - .failOnShutdown("fetch time") - _ = fetch1 shouldBe ts(42) - // we've returned a sufficiently fresh time without causing a request - _ = requestSubmitter.hasRequestedTime shouldBe false - // however if we now request a timestamp that was received within the last 2 seconds, we'll have to go fetch one - fetch2F = timeTracker - .fetchTime(NonNegativeFiniteDuration.tryOfSeconds(2)) - .failOnShutdown("fetch time") - _ = requestSubmitter.hasRequestedTime shouldBe true - _ <- observeTimeProof(43) - fetch2 <- fetch2F - // should now hand us the new observation - } yield fetch2 shouldBe ts(43) - } - - "fetching time proof when there isn't a fresh one available should immediately force request" in { - env => - import env.* - - clock.advanceTo(ts(1)) - - for { - // observe a recent event which isn't a time proof - _ <- observeTimestamp(1) - timeProofF = timeTracker.fetchTimeProof().failOnShutdown("fetch time") - // also we've seen a recent event we know this won't suffice for a time proof - _ = requestSubmitter.hasRequestedTime shouldBe true - // then observe one - _ <- observeTimeProof(42) - timeProof <- timeProofF - } yield timeProof.timestamp shouldBe ts(42) - } - - "stop waiting on shutdown" in { env => - import env.* - - clock.advanceTo(ts(1)) - - for { - // observe a recent event which isn't a time proof - _ <- observeTimestamp(1) - timeProofF = timeTracker.fetchTimeProof() - // Shutdown the time tracker - _ = timeTracker.close() - timeProof <- timeProofF.unwrap - } yield timeProof shouldBe AbortedDueToShutdown - } - } - - "awaitTick" should { - "only resolve future when we've reached the given time" in { env => - import env.* - - for { - _ <- observeTimeProof(1) - awaitO = timeTracker.awaitTick(ts(3)) - awaitF = - awaitO.value // should have returned a future as we have not yet observed the requested synchronizer time - _ <- observeTimeProof(2) - _ = awaitF.isCompleted shouldBe false - _ <- observeTimeProof(3) - awaitedTs <- awaitF - } yield awaitedTs shouldBe ts(3) - } - - "return None if we've already witnessed an equal or greater timestamp from the synchronizer" in { - env => - import env.* - - for { - _ <- observeTimeProof(42) - awaitO = timeTracker.awaitTick(ts(10)) - } yield { - awaitO shouldBe None - timeTracker.earliestExpectedObservationTime() shouldBe None - } - } - } - - "ensure minimum time interval" should { - "should ask for time if a sufficient amount of local time progresses" in { env => - import env.* - - timeTracker.subscriptionResumesAfter(ts(0)) - // advance to our min observation duration without witnessing a time - clock.advance(synchronizerTimeTrackerConfig.minObservationDuration.asJava.plusMillis(1)) - // we should request one - requestSubmitter.hasRequestedTime shouldBe true - requestSubmitter.resetHasRequestedTime() // reset to use again - - for { - // will resolve our request to fetch a time - _ <- observeTimeProof(10) - // advance to almost the time we should wait - _ = clock.advance( - synchronizerTimeTrackerConfig.minObservationDuration.asJava.minusSeconds(1) - ) - _ <- observeTimeProof(11) // observe a time - _ = clock.advance( - synchronizerTimeTrackerConfig.minObservationDuration.asJava.minusSeconds(1) - ) - _ = requestSubmitter.hasRequestedTime shouldBe false // we shouldn't have requested a time - _ = clock.advance( - Duration.ofSeconds(1).plusMillis(1) - ) // advance one more second and a bit to hit our window - } yield requestSubmitter.hasRequestedTime shouldBe true // should now have requested - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofRequestSubmitterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofRequestSubmitterTest.scala deleted file mode 100644 index e574d59357..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofRequestSubmitterTest.scala +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import cats.data.EitherT -import cats.syntax.option.* -import com.digitalasset.canton.config.CantonRequireTypes.String73 -import com.digitalasset.canton.config.TimeProofRequestConfig -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.sequencing.client.SendAsyncClientError -import com.digitalasset.canton.sequencing.protocol.{ - Batch, - Deliver, - MessageId, - SignedContent, - TimeProof, -} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent -import com.digitalasset.canton.topology.DefaultTestIdentities -import com.digitalasset.canton.util.EitherTUtil -import com.digitalasset.canton.{BaseTest, SequencerCounter} -import org.scalatest.FutureOutcome -import org.scalatest.wordspec.FixtureAsyncWordSpec - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import scala.concurrent.{Future, Promise} - -class TimeProofRequestSubmitterTest extends FixtureAsyncWordSpec with BaseTest { - - class Env extends AutoCloseable { - val config = TimeProofRequestConfig() - val callCount = new AtomicInteger() - val nextRequestP = new AtomicReference[Option[Promise[Unit]]](None) - val nextResult = - new AtomicReference[EitherT[Future, SendAsyncClientError, Unit]]( - EitherTUtil.unit[SendAsyncClientError] - ) - val clock = new SimClock(loggerFactory = loggerFactory) - val timeRequestSubmitter = - new TimeProofRequestSubmitterImpl( - config, - _ => handleRequest(), - clock, - timeouts, - loggerFactory, - ) - - private def handleRequest(): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { - callCount.incrementAndGet() - nextRequestP.get.foreach(_.trySuccess(())) - nextRequestP.set(None) - nextResult.get().mapK(FutureUnlessShutdown.outcomeK) - } - - def triggerTime(): EitherT[Future, SendAsyncClientError, Unit] = { - callCount.incrementAndGet() - nextResult.get() - } - - def waitForNextRequest(): Future[Unit] = { - val promise = Promise[Unit]() - if (!nextRequestP.compareAndSet(None, promise.some)) { - fail("promise for next request was already setup") - } - promise.future - } - - def setupNextResultPromise(): Promise[Either[SendAsyncClientError, Unit]] = { - val promise = Promise[Either[SendAsyncClientError, Unit]]() - - nextResult.set(EitherT(promise.future)) - - promise - } - - def mkTimeProof(seconds: Int): TimeProof = { - val event = - OrdinarySequencedEvent( - counter = SequencerCounter(0), - signedEvent = SignedContent( - Deliver.create( - None, - CantonTimestamp.ofEpochSecond(seconds.toLong), - DefaultTestIdentities.physicalSynchronizerId, - Some(MessageId(String73.tryCreate(s"tick-$seconds"))), - Batch.empty(testedProtocolVersion), - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ), - SymbolicCrypto.emptySignature, - None, - testedProtocolVersion, - ), - )(traceContext) - TimeProof.fromEventO(event).value - } - - override def close(): Unit = timeRequestSubmitter.close() - } - - type FixtureParam = Env - - override def withFixture(test: OneArgAsyncTest): FutureOutcome = { - val env = new Env() - - complete { - withFixture(test.toNoArgAsyncTest(env)) - } lastly { - env.close() - } - } - - "time request submitter" should { - "avoid making concurrent calls when a request is in progress" in { env => - import env.* - - val nextRequestP = waitForNextRequest() - // should trigger a request - timeRequestSubmitter.fetchTimeProof() - // should now just reuse the pending prior request - timeRequestSubmitter.fetchTimeProof() - timeRequestSubmitter.handleTimeProof(mkTimeProof(0)) - - for { - _ <- nextRequestP - } yield callCount.get() shouldBe 1 - } - - "retry request if an appropriate event is not witnessed within our custom max-sequencing-duration" in { - env => - import env.* - - val request1F = waitForNextRequest() - timeRequestSubmitter.fetchTimeProof() // kicks off getting a time event - - for { - _ <- timeRequestSubmitter.flush() - _ <- request1F - // setup waiting for the next request but don't yet wait for it - request2F = waitForNextRequest() - // now advance past the time that we think we should have seen the time event - _ = clock.advance(config.maxSequencingDelay.asJava) - // now expect that a new request is made - _ <- timeRequestSubmitter.flush() - _ <- request2F - // if a time event is now witnessed we don't make another request - _ = timeRequestSubmitter.handleTimeProof(mkTimeProof(0)) - _ = clock.advance(config.maxSequencingDelay.asJava.plusMillis(1)) - _ <- timeRequestSubmitter.flush() - } yield callCount.get() shouldBe 2 - } - - "avoid more than one pending request when a time event is witnessed and new request started during the max-sequencing duration" in { - env => - import env.* - - timeRequestSubmitter.fetchTimeProof() - val callCountAtStart = callCount.get() - - for { - _ <- timeRequestSubmitter.flush() - // immediately witness an event - _ = timeRequestSubmitter.handleTimeProof(mkTimeProof(0)) - _ <- timeRequestSubmitter.flush() - callCountNoRequest = callCount.get() - // then immediately start a new request - request2F = waitForNextRequest() - _ = timeRequestSubmitter.fetchTimeProof() - // that should have started a new request - _ <- timeRequestSubmitter.flush() - _ <- request2F - callCountBefore = callCount.get() - // now when the maxSequencingDelay has elapsed we'll have two scheduled checks looking to see if they should attempt another request - // the first has actually already been satisfied but the second is still pending - // we want to see only a single request being made as the first request should determine it's no longer active - _ = clock.advance(config.maxSequencingDelay.asJava) - _ <- timeRequestSubmitter.flush() - callCountAfter = callCount.get() - } yield { - callCountNoRequest shouldBe callCountAtStart - callCountAfter shouldBe (callCountBefore + 1) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala deleted file mode 100644 index 67146c9552..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.time - -import cats.syntax.option.* -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.sequencing.protocol.{Batch, Deliver, SignedContent, TimeProof} -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt -import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent -import com.digitalasset.canton.topology.{DefaultTestIdentities, PhysicalSynchronizerId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ReassignmentTag.Target -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{BaseTest, SequencerCounter} - -object TimeProofTestUtil { - def mkTimeProof( - timestamp: CantonTimestamp, - previousEventTimestamp: Option[CantonTimestamp] = None, - counter: Long = 0L, - targetSynchronizer: Target[PhysicalSynchronizerId] = Target( - DefaultTestIdentities.physicalSynchronizerId - ), - protocolVersion: ProtocolVersion = BaseTest.testedProtocolVersion, - ): TimeProof = { - val deliver = Deliver.create( - previousEventTimestamp, - timestamp, - targetSynchronizer.unwrap, - TimeProof.mkTimeProofRequestMessageId.some, - Batch.empty(protocolVersion), - None, - protocolVersion, - Option.empty[TrafficReceipt], - ) - val signedContent = - SignedContent(deliver, SymbolicCrypto.emptySignature, None, protocolVersion) - val event = OrdinarySequencedEvent(SequencerCounter(counter), signedContent)(TraceContext.empty) - TimeProof - .fromEvent(event) - .fold(err => sys.error(s"Failed to create time proof: $err"), identity) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala deleted file mode 100644 index e5a0113f08..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.crypto.Fingerprint -import com.digitalasset.canton.version.ProtocolVersion -import magnolify.scalacheck.auto.* -import org.scalacheck.Arbitrary - -final class GeneratorsTopology(protocolVersion: ProtocolVersion) { - import com.digitalasset.canton.config.GeneratorsConfig.* - - implicit val fingerprintArb: Arbitrary[Fingerprint] = Arbitrary( - string68Arb.arbitrary.map(Fingerprint.tryFromString) - ) - implicit val namespaceArb: Arbitrary[Namespace] = Arbitrary( - fingerprintArb.arbitrary.map(Namespace(_)) - ) - implicit val uniqueIdentifierArb: Arbitrary[UniqueIdentifier] = Arbitrary( - for { - id <- string185Arb.arbitrary - fp <- string68Arb.arbitrary - } yield UniqueIdentifier.tryCreate(id.str, fp.str) - ) - implicit val synchronizerIdArb: Arbitrary[SynchronizerId] = genArbitrary - implicit val mediatorIdArb: Arbitrary[MediatorId] = genArbitrary - implicit val sequencerIdArb: Arbitrary[SequencerId] = genArbitrary - implicit val memberArb: Arbitrary[Member] = genArbitrary - implicit val partyIdArb: Arbitrary[PartyId] = genArbitrary - implicit val identityArb: Arbitrary[Identity] = genArbitrary - - implicit val physicalSynchronizerIdArb: Arbitrary[PhysicalSynchronizerId] = Arbitrary(for { - synchronizerId <- synchronizerIdArb.arbitrary - serial <- Arbitrary.arbitrary[NonNegativeInt] - } yield PhysicalSynchronizerId(synchronizerId, protocolVersion, serial)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala deleted file mode 100644 index cede005d81..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.topology.transaction.MediatorSynchronizerState -import org.scalatest.wordspec.AnyWordSpec - -import scala.annotation.nowarn - -@nowarn("msg=match may not be exhaustive") -class MediatorGroupDeltaComputationsTest extends AnyWordSpec with BaseTest { - private def mediatorIdFor(idx: Int) = - MediatorId(UniqueIdentifier.tryCreate(s"mediator$idx", s"m$idx")) - - private lazy val Seq(m1, m2, m3, m4) = (1 to 4).map(mediatorIdFor) - - def range(from: Int, to: Int): Seq[MediatorId] = from to to map mediatorIdFor - - def mds(active: Seq[MediatorId], observers: Seq[MediatorId]): Option[MediatorSynchronizerState] = - Some( - MediatorSynchronizerState - .create( - DefaultTestIdentities.synchronizerId, - NonNegativeInt.zero, - PositiveInt.one, - active, - observers, - ) - .value - ) - - "MediatorGroupDeltaComputations.verifyProposalConsistency" should { - "succeed on non-overlapping mediatorIds" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = range(1, 2), - removes = range(3, 4), - observerAdds = range(5, 6), - observerRemoves = range(7, 8), - updateThreshold = None, - ) - .value shouldBe () - } - - "succeed when making active mediators observers and vice versa" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = range(1, 2), - removes = range(3, 4), - observerAdds = range(3, 4), - observerRemoves = range(1, 2), - updateThreshold = None, - ) - .value shouldBe () - } - - "complain about empty changes" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = Nil, - removes = Nil, - observerAdds = Nil, - observerRemoves = Nil, - updateThreshold = None, - ) - .leftOrFail("bad proposal") shouldBe "no mediator group changes proposed" - } - - "complain about overlapping adds and removes" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = range(1, 2), - removes = range(2, 3), - observerAdds = Nil, - observerRemoves = Nil, - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "the same mediators MED::mediator2::m2 cannot be added and removed as active in the same proposal" - } - - "complain about overlapping adds and observer adds" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = range(1, 2), - removes = Nil, - observerAdds = range(2, 3), - observerRemoves = Nil, - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "the same mediators MED::mediator2::m2 cannot be added as active and observer in the same proposal" - } - - "complain about overlapping observer adds and observer removes" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = Nil, - removes = Nil, - observerAdds = range(1, 2), - observerRemoves = range(2, 3), - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "the same mediators MED::mediator2::m2 cannot be added and removed as observer in the same proposal" - } - - "complain about overlapping removes and observer removes" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = Nil, - removes = range(1, 2), - observerAdds = Nil, - observerRemoves = range(2, 3), - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "the same mediators MED::mediator2::m2 cannot be removed as active and observer in the same proposal" - } - - "complain about multiple overlapping changes" in { - MediatorGroupDeltaComputations - .verifyProposalConsistency( - adds = range(1, 2) :+ m4, - removes = range(2, 4), - observerAdds = range(2, 3), - observerRemoves = range(1, 2), - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe - "the same mediators MED::mediator2::m2,MED::mediator4::m4 cannot be added and removed as active in the same proposal, " + - "the same mediators MED::mediator2::m2 cannot be added as active and observer in the same proposal, " + - "the same mediators MED::mediator2::m2 cannot be added and removed as observer in the same proposal, " + - "the same mediators MED::mediator2::m2 cannot be removed as active and observer in the same proposal" - } - } - - "MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState" should { - - "succeed with a brand-new MDS with an active and an observer mediator" in { - MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState( - None, - adds = Seq(m1), - removes = Nil, - observerAdds = Seq(m2), - observerRemoves = Nil, - updateThreshold = None, - ) - } - - "succeed adding new active and observer mediators to existing MDS" in { - MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState( - mds(Seq(m1), Seq(m2)), - adds = Seq(m3), - removes = Nil, - observerAdds = Seq(m4), - observerRemoves = Nil, - updateThreshold = None, - ) - } - - "complain when adding existing active and observer mediators" in { - MediatorGroupDeltaComputations - .verifyProposalAgainstCurrentState( - mds(Seq(m1), Seq(m2, m3)), - adds = Seq(m1), - removes = Nil, - observerAdds = Seq(m2, m3), - observerRemoves = Nil, - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "mediators MED::mediator1::m1 to be added already active, " + - "mediators MED::mediator2::m2,MED::mediator3::m3 to be added as observer already observer" - } - - "complain when removing non-existing active and observer mediators" in { - MediatorGroupDeltaComputations - .verifyProposalAgainstCurrentState( - mds(Seq(m1), Seq(m2, m3)), - adds = Nil, - removes = Seq(m2, m3), - observerAdds = Nil, - observerRemoves = Seq(m1), - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "mediators MED::mediator2::m2,MED::mediator3::m3 to be removed not active, " + - "mediators MED::mediator1::m1 to be removed as observer not observer" - } - - "complain when removing last active mediator" in { - MediatorGroupDeltaComputations - .verifyProposalAgainstCurrentState( - mds(Seq(m1), Seq.empty), - adds = Nil, - removes = Seq(m1), - observerAdds = Nil, - observerRemoves = Nil, - updateThreshold = None, - ) - .leftOrFail( - "bad proposal" - ) shouldBe "mediator group without active mediators" - } - - "complain when setting threshold too high" in { - MediatorGroupDeltaComputations - .verifyProposalAgainstCurrentState( - mds(Seq(m1), Seq.empty), - adds = Nil, - removes = Nil, - observerAdds = Nil, - observerRemoves = Nil, - updateThreshold = Some(PositiveInt.two), - ) - .leftOrFail( - "bad proposal" - ) shouldBe "mediator group threshold 2 larger than active mediator size 1" - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PartyToParticipantComputationsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PartyToParticipantComputationsTest.scala deleted file mode 100644 index d27589747f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PartyToParticipantComputationsTest.scala +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ - Confirmation, - Observation, - Submission, -} -import org.scalatest.wordspec.AnyWordSpec - -class PartyToParticipantComputationsTest extends AnyWordSpec with BaseTest { - - private lazy val computations = new PartyToParticipantComputations(loggerFactory) - - private def participantIdFor(idx: Int) = - ParticipantId(UniqueIdentifier.tryCreate(s"participant$idx", s"participant$idx-identity")) - - private val p1 = participantIdFor(1) - private val p2 = participantIdFor(2) - private val p3 = participantIdFor(3) - - "PartyToParticipantComputations" should { - "return an error when adds and remove overlap" in { - computations - .computeNewPermissions( - Map(p1 -> Submission), - adds = List((p2, Submission)), - removes = List(p2), - ) - .left - .value should include( - "Permissions for the following participant were found in adds and removes:" - ) - } - - "return an error when trying to remove a participant which is not permissioned" in { - computations - .computeNewPermissions( - Map(p1 -> Submission), - removes = List(p2), - ) - .left - .value shouldBe s"Cannot remove permission for participants that are not permissioned: ${Set(p2)}" - - computations - .computeNewPermissions( - Map(p1 -> Submission), - removes = List(p1), - ) - .value shouldBe empty - } - - "allow to add and remove permissions" in { - computations - .computeNewPermissions( - Map(p1 -> Submission, p2 -> Observation), - adds = List((p3, Confirmation)), - removes = List(p1), - ) - .value shouldBe Map(p2 -> Observation, p3 -> Confirmation) - } - - "update existing added permissions" in { - val updated = Map(p1 -> Confirmation) - computations - .computeNewPermissions(Map(p1 -> Submission), adds = List((p1, Confirmation))) - .value shouldBe updated - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PhysicalSynchronizerIdTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PhysicalSynchronizerIdTest.scala deleted file mode 100644 index c5c6454bee..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/PhysicalSynchronizerIdTest.scala +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.crypto.Fingerprint -import com.digitalasset.canton.version.ProtocolVersion -import org.scalatest.EitherValues -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import scala.util.Random - -class PhysicalSynchronizerIdTest extends AnyWordSpec with EitherValues with Matchers { - "PhysicalSynchronizerId" should { - "parsed from string" in { - val namespace: Namespace = Namespace(Fingerprint.tryFromString("default")) - val lsid1: SynchronizerId = SynchronizerId(UniqueIdentifier.tryCreate("da", namespace)) - val lsid2: SynchronizerId = SynchronizerId(UniqueIdentifier.tryCreate("da-second", namespace)) - val pv = ProtocolVersion.latest - - val str1 = s"da::default::${pv.toString}-0" - val str2 = s"da-second::default::${pv.toString}-1" - - PhysicalSynchronizerId.fromString(str1).value shouldBe PhysicalSynchronizerId( - lsid1, - pv, - NonNegativeInt.zero, - ) - PhysicalSynchronizerId.fromString(str2).value shouldBe PhysicalSynchronizerId( - lsid2, - pv, - NonNegativeInt.one, - ) - } - - "be properly ordered" in { - val namespace: Namespace = Namespace(Fingerprint.tryFromString("default")) - val lsid: SynchronizerId = SynchronizerId(UniqueIdentifier.tryCreate("da", namespace)) - - val psid_latest_0 = - PhysicalSynchronizerId(lsid, ProtocolVersion.latest, serial = NonNegativeInt.zero) - val psid_latest_1 = - PhysicalSynchronizerId(lsid, ProtocolVersion.latest, serial = NonNegativeInt.one) - val psid_dev_2 = - PhysicalSynchronizerId(lsid, ProtocolVersion.dev, serial = NonNegativeInt.two) - - val inCorrectOrder = List(psid_latest_0, psid_latest_1, psid_dev_2) - Random.shuffle(inCorrectOrder).sorted shouldBe inCorrectOrder - - val inCorrectOptionOrder = None :: inCorrectOrder.map(Some(_)) - Random.shuffle(inCorrectOptionOrder).sorted shouldBe inCorrectOptionOrder - - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TestingIdentityFactoryTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TestingIdentityFactoryTest.scala deleted file mode 100644 index 8620b17dc0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TestingIdentityFactoryTest.scala +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import cats.data.EitherT -import cats.syntax.either.* -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, SynchronizerParameters} -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, ParticipantPermission} -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.Await -import scala.concurrent.duration.* - -class TestingIdentityFactoryTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown { - - import DefaultTestIdentities.* - - private def getMyHash(hashOps: HashOps, message: String = "dummySignature"): Hash = - hashOps.build(TestHash.testHashPurpose).addWithoutLengthPrefix(message).finish() - def await(eitherT: EitherT[FutureUnlessShutdown, SignatureCheckError, Unit]) = - eitherT.value.futureValueUS - - private def increaseConfirmationResponseTimeout(old: DynamicSynchronizerParameters) = - old.tryUpdate(confirmationResponseTimeout = - old.confirmationResponseTimeout + NonNegativeFiniteDuration.tryOfSeconds(1) - ) - - private val synchronizerParameters1 = SynchronizerParameters.WithValidity( - CantonTimestamp.Epoch, - Some(CantonTimestamp.ofEpochSecond(10)), - increaseConfirmationResponseTimeout(defaultDynamicSynchronizerParameters), - ) - - private val synchronizerParameters2 = SynchronizerParameters.WithValidity( - CantonTimestamp.ofEpochSecond(10), - None, - increaseConfirmationResponseTimeout(synchronizerParameters1.parameter), - ) - - val synchronizerParameters = List(synchronizerParameters1, synchronizerParameters2) - - "testing topology" when { - - def compare(setup: TestingIdentityFactory): Unit = { - val p1 = setup.forOwnerAndSynchronizer(participant1) - val p2 = setup.forOwnerAndSynchronizer(participant2) - val hash = getMyHash(p1.pureCrypto) - val hash2 = getMyHash(p1.pureCrypto, "somethingElse") - - val signature = - Await - .result( - p1.currentSnapshotApproximation.sign(hash, SigningKeyUsage.ProtocolOnly).value, - 10.seconds, - ) - .failOnShutdown - .valueOr(err => fail(s"Failed to sign: $err")) - - "signature of participant1 is verifiable by participant1" in { - await( - p1.currentSnapshotApproximation - .verifySignature(hash, participant1, signature, SigningKeyUsage.ProtocolOnly) - ) shouldBe Either.unit - } - "signature of participant1 is verifiable by participant2" in { - await( - p2.currentSnapshotApproximation - .verifySignature(hash, participant1, signature, SigningKeyUsage.ProtocolOnly) - ) shouldBe Either.unit - } - "signature verification fails for wrong key owner" in { - await( - p1.currentSnapshotApproximation - .verifySignature(hash, participant2, signature, SigningKeyUsage.ProtocolOnly) - ).left.value shouldBe a[SignatureCheckError.SignerHasNoValidKeys] - } - "signature fails for invalid hash" in { - await( - p1.currentSnapshotApproximation - .verifySignature(hash2, participant1, signature, SigningKeyUsage.ProtocolOnly) - ).left.value shouldBe a[SignatureCheckError] - await( - p1.currentSnapshotApproximation - .verifySignature(hash2, participant2, signature, SigningKeyUsage.ProtocolOnly) - ).left.value shouldBe a[SignatureCheckError] - } - "signature fails for invalid key usage" in { - await( - p1.currentSnapshotApproximation - .verifySignature( - hash, - participant1, - signature, - SigningKeyUsage.SequencerAuthenticationOnly, - ) - ).left.value shouldBe a[SignatureCheckError.SignatureWithWrongKey] - } - "participant1 is active" in { - Seq(p1, p2).foreach( - _.currentSnapshotApproximation.ipsSnapshot - .isParticipantActive(participant1) - .futureValueUS shouldBe true - ) - } - "party1 is active" in { - p1.currentSnapshotApproximation.ipsSnapshot - .activeParticipantsOf(party1.toLf) - .futureValueUS shouldBe Map( - participant1 -> ParticipantAttributes(ParticipantPermission.Confirmation) - ) - } - "participant2 can't sign messages without appropriate keys" in { - Await - .result( - p2.currentSnapshotApproximation.sign(hash, SigningKeyUsage.ProtocolOnly).value, - 10.seconds, - ) - .failOnShutdown - .left - .value shouldBe a[SyncCryptoError] - } - - def checkSynchronizerKeys( - sequencers: Seq[SequencerId], - mediators: Seq[MediatorId], - expectedLength: Int, - ): Unit = { - val allMembers = sequencers ++ mediators - val membersToKeys = p1.currentSnapshotApproximation.ipsSnapshot - .signingKeys(allMembers, SigningKeyUsage.All) - .futureValueUS - allMembers - .flatMap(membersToKeys.get(_)) - .foreach(_ should have length expectedLength.toLong) - } - - "synchronizer entities have keys" in { - val sequencers = p1.currentSnapshotApproximation.ipsSnapshot - .sequencerGroup() - .futureValueUS - .valueOrFail("did not find SequencerSynchronizerState") - .active - - val mediators = - p1.currentSnapshotApproximation.ipsSnapshot.mediatorGroups().futureValueUS.flatMap(_.all) - // We expect two different signing keys: one for protocol signing and one for sequencer authentication. - checkSynchronizerKeys(sequencers, mediators, 2) - } - "invalid synchronizer entities don't have keys" in { - val did = participant2.uid - require(did != DefaultTestIdentities.synchronizerId.unwrap) - checkSynchronizerKeys( - sequencers = Seq(SequencerId(participant2.uid.tryChangeId("fake-sequencer"))), - mediators = Seq(MediatorId(participant2.uid.tryChangeId("fake-mediator"))), - 0, - ) - } - - "serve synchronizer parameters corresponding to correct timestamp" in { - def getParameters(ts: CantonTimestamp): DynamicSynchronizerParameters = - p1.ips - .awaitSnapshot(ts) - .flatMap(_.findDynamicSynchronizerParametersOrDefault(testedProtocolVersion)) - .futureValueUS - - val transitionTs = synchronizerParameters1.validUntil.value - - getParameters(CantonTimestamp.Epoch) shouldBe defaultDynamicSynchronizerParameters - getParameters(transitionTs.minusMillis(1)) shouldBe synchronizerParameters1.parameter - - getParameters( - transitionTs - ) shouldBe synchronizerParameters1.parameter // validFrom is exclusive - - getParameters(transitionTs.plusMillis(1)) shouldBe synchronizerParameters2.parameter - } - - } - - "initialised directly" should { - val topology = Map( - party1.toLf -> Map( - participant1 -> ParticipantPermission.Confirmation - ) - ) - val setup = TestingTopology - .from( - topology = topology, - synchronizerParameters = synchronizerParameters, - participants = Map( - participant1 -> ParticipantAttributes(ParticipantPermission.Confirmation) - ), - ) - .build() - compare(setup) - // extend with admin parties should give participant2 a signing key - val crypto2 = TestingTopology - .from(topology = topology, synchronizerParameters = synchronizerParameters) - .withParticipants( - participant1 -> ParticipantAttributes(ParticipantPermission.Confirmation), - participant2 -> ParticipantAttributes(ParticipantPermission.Submission), - ) - .build() - val p1 = crypto2.forOwnerAndSynchronizer(participant1) - val p2 = crypto2.forOwnerAndSynchronizer(participant2) - - "extending with admin parties works" in { - def check(p: ParticipantId) = - p1.currentSnapshotApproximation.ipsSnapshot - .activeParticipantsOf(p.adminParty.toLf) - .futureValueUS - .keys shouldBe Set(p) - check(participant1) - check(participant2) - - } - - val hash = getMyHash(p2.currentSnapshotApproximation.pureCrypto) - - val signature = - Await - .result( - p2.currentSnapshotApproximation.sign(hash, SigningKeyUsage.ProtocolOnly).value, - 10.seconds, - ) - .failOnShutdown - .valueOr(err => fail(s"Failed to sign: $err")) - - "participant2 signatures are valid" in { - await( - p2.currentSnapshotApproximation - .verifySignature(hash, participant2, signature, SigningKeyUsage.ProtocolOnly) - ) shouldBe Either.unit - await( - p1.currentSnapshotApproximation - .verifySignature(hash, participant1, signature, SigningKeyUsage.ProtocolOnly) - ).left.value shouldBe a[SignatureCheckError] - } - - } - - "using reverse topology" should { - val setup = TestingTopology(synchronizerParameters = synchronizerParameters) - .withReversedTopology( - Map(participant1 -> Map(party1.toLf -> ParticipantPermission.Confirmation)) - ) - .withParticipants( - participant1 -> ParticipantAttributes(ParticipantPermission.Confirmation) - ) - .build() - compare(setup) - - "preserve topology and permissions" in { - val syncCryptoApi = - TestingTopology() - .withReversedTopology( - Map( - participant1 -> Map( - party1.toLf -> ParticipantPermission.Observation, - party2.toLf -> ParticipantPermission.Confirmation, - ), - participant2 -> Map(party1.toLf -> ParticipantPermission.Submission), - ) - ) - .build() - .forOwnerAndSynchronizer(participant1) - .currentSnapshotApproximation - def ol(permission: ParticipantPermission) = - ParticipantAttributes(permission) - syncCryptoApi.ipsSnapshot.activeParticipantsOf(party1.toLf).futureValueUS shouldBe Map( - participant1 -> ol(ParticipantPermission.Observation), - participant2 -> ol(ParticipantPermission.Submission), - ) - syncCryptoApi.ipsSnapshot.activeParticipantsOf(party3.toLf).futureValueUS shouldBe Map() - syncCryptoApi.ipsSnapshot.activeParticipantsOf(party2.toLf).futureValueUS shouldBe Map( - participant1 -> ol(ParticipantPermission.Confirmation) - ) - } - } - - "withTopology" should { - val setup = TestingTopology(synchronizerParameters = synchronizerParameters) - .withTopology( - Map(party1.toLf -> participant1), - ParticipantPermission.Confirmation, - ) - .withParticipants( - participant1 -> ParticipantAttributes(ParticipantPermission.Confirmation) - ) - .build() - compare(setup) - } - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala deleted file mode 100644 index 706221c69e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/TopologyManagerTest.scala +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.crypto.BaseCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology.processing.TopologyTransactionTestFactory -import com.digitalasset.canton.topology.store.TopologyStoreId -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.transaction.OwnerToKeyMapping -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class TopologyManagerTest extends AnyWordSpec with BaseTest with HasExecutionContext { - object Factory extends TopologyTransactionTestFactory(loggerFactory, parallelExecutionContext) - import Factory.* - - "TemporaryTopologyManager" should { - behave like permittingMissingSigningKeySignatures( - createTemporaryTopologyManager() - ) - } - "AuthorizedTopologyManager" should { - behave like rejectingMissingSigningKeySignatures( - createAuthorizedTopologyManager() - ) - } - "SynchronizerTopologyManager" should { - behave like rejectingMissingSigningKeySignatures( - createSynchronizerTopologyManager() - ) - } - - private def permittingMissingSigningKeySignatures( - topologyManager: TopologyManager[TopologyStoreId, BaseCrypto] - ): Unit = - "permit OwnerToKeyMappings with missing signing key signatures" in { - val okmS1k7_k1_missing_k7 = - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value - - topologyManager - .add( - Seq(ns1k1_k1, okmS1k7_k1_missing_k7), - ForceFlags.none, - expectFullAuthorization = true, - ) - .futureValueUS - .value - .unwrap - .futureValueUS - - val tx = topologyManager.store - .findPositiveTransactions( - CantonTimestamp.MaxValue, - asOfInclusive = false, - isProposal = false, - types = Seq(Code.OwnerToKeyMapping), - filterUid = None, - filterNamespace = None, - ) - .futureValueUS - .result - .loneElement - .transaction - - // check that the mapping only has key7 as target key - tx.selectMapping[OwnerToKeyMapping] - .value - .mapping - .keys - .forgetNE - .loneElement shouldBe SigningKeys.key7 - - // check that the only signature is from key1 - tx.signatures.forgetNE.loneElement.authorizingLongTermKey shouldBe SigningKeys.key1.fingerprint - } - - private def rejectingMissingSigningKeySignatures( - topologyManager: TopologyManager[TopologyStoreId, BaseCrypto] - ): Unit = - "permit OwnerToKeyMappings with missing signing key signatures" in { - val okmS1k7_k1_missing_k7 = - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value - - val error = loggerFactory.assertLogs( - topologyManager - .add( - Seq(ns1k1_k1, okmS1k7_k1_missing_k7), - ForceFlags.none, - expectFullAuthorization = true, - ) - .swap - .futureValueUS - .value, - _.shouldBeCantonError( - TopologyManagerError.UnauthorizedTransaction, - _ should include("Not authorized"), - ), - ) - error.code shouldBe TopologyManagerError.UnauthorizedTransaction - error.cause should include("Not authorized") - } - - private def createAuthorizedTopologyManager() = - new AuthorizedTopologyManager( - Factory.sequencer1.uid, - wallClock, - Factory.crypto, - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ), - exitOnFatalFailures = exitOnFatal, - timeouts = timeouts, - futureSupervisor = futureSupervisor, - loggerFactory = loggerFactory, - ) - - private def createTemporaryTopologyManager() = - new TemporaryTopologyManager( - Factory.sequencer1.uid, - wallClock, - Factory.crypto, - new InMemoryTopologyStore( - TopologyStoreId.TemporaryStore.tryCreate("test"), - testedProtocolVersion, - loggerFactory, - timeouts, - ), - timeouts = timeouts, - futureSupervisor = futureSupervisor, - loggerFactory = loggerFactory, - ) - - private def createSynchronizerTopologyManager() = - new SynchronizerTopologyManager( - Factory.sequencer1.uid, - wallClock, - Factory.syncCryptoClient.crypto, - defaultStaticSynchronizerParameters, - new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(Factory.physicalSynchronizerId1), - testedProtocolVersion, - loggerFactory, - timeouts, - ), - new SynchronizerOutboxQueue(loggerFactory), - exitOnFatalFailures = exitOnFatal, - timeouts = timeouts, - futureSupervisor = futureSupervisor, - loggerFactory = loggerFactory, - ) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/UniqueIdentifierTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/UniqueIdentifierTest.scala deleted file mode 100644 index 9afccbe3b5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/UniqueIdentifierTest.scala +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology - -import com.digitalasset.canton.ProtoDeserializationError.StringConversionError -import com.digitalasset.canton.{BaseTest, ProtoDeserializationError} -import org.scalatest.wordspec.AnyWordSpec - -class UniqueIdentifierTest extends AnyWordSpec with BaseTest { - - "safe simple string" when { - - "should" should { - "be a happy cookie and not return an error" in { - assert(UniqueIdentifier.verifyValidString("aAbbZ09-").isRight) - } - - "complain on any non simple string character" in { - "#%!><,;".foreach(x => assert(UniqueIdentifier.verifyValidString(x.toString).isLeft)) - - } - - "complain if the delimiter is used" in { - Seq("not::ok", "::not::ok", "::notok", "::not::ok::", "notok::").foreach(ss => - UniqueIdentifier.verifyValidString(ss).left.value shouldBe a[String] - ) - } - - } - - } - - "identifier" should { - "contain only simple characters" in { - forEvery(Seq("#a", "\\a", "/a", "ä")) { s => - UniqueIdentifier.create(s, s).left.value shouldBe a[String] - an[IllegalArgumentException] shouldBe thrownBy(UniqueIdentifier.tryCreate(s, s)) - UniqueIdentifier.fromProtoPrimitive_(s).left.value shouldBe a[ProtoDeserializationError] - } - } - } - - val uid: UniqueIdentifier = UniqueIdentifier.tryCreate("ABCefg123", "12345678") - - "unique identifier" when { - "simple string conversion" should { - "should be identical" in { - UniqueIdentifier.fromProtoPrimitive_(uid.toProtoPrimitive) shouldBe Right(uid) - } - } - - "reading from string" should { - "should yield the same identifier" in { - assertResult(uid)( - UniqueIdentifier.tryFromProtoPrimitive( - uid.identifier.unwrap + "::" + uid.fingerprint.unwrap - ) - ) - } - - "fail for invalid string using the delimiter" in { - Seq("::not::ok", "::not::ok", "::notok", "::not::ok::", "notok::").foreach(ss => - UniqueIdentifier.fromProtoPrimitive_(ss).left.value shouldBe a[ProtoDeserializationError] - ) - } - - "fail for generally invalid strings" in { - val templates = - Seq[String => String]( - x => s"${x}not::ok", - x => s"not$x::ok", - x => s"not::${x}ok", - x => s"not::ok$x", - ) - val checks = Seq("#", "%", "!", ">", "<", ",", ";", "::", ":::", "::::").flatMap(x => - templates.map(_(x)) - ) - forEvery(checks) { ss => - UniqueIdentifier.fromProtoPrimitive_(ss).left.value shouldBe a[ProtoDeserializationError] - } - } - - "succeed for valid strings" in { - Seq("is::ok", ":is::ok", "is::ok", ":is::ok:", "is::o:k:r:l:y").foreach(ss => - UniqueIdentifier.fromProtoPrimitive_(ss).value shouldBe a[UniqueIdentifier] - ) - } - - "throw an exception when using invalid characters" in { - assertThrows[IllegalArgumentException] { - UniqueIdentifier.tryFromProtoPrimitive("%%##!!:!@#@#") - } - } - - "produce sensible error messages " in { - UniqueIdentifier.fromProtoPrimitive_("Bank::") shouldEqual Left( - StringConversionError( - s"Fingerprint decoding of `Bank::` failed with: ${StringConversionError("Daml-LF Party is empty")}" - ) - ) - UniqueIdentifier.fromProtoPrimitive_("") shouldEqual Left( - StringConversionError("Empty string is not a valid unique identifier.") - ) - UniqueIdentifier.fromProtoPrimitive_("::Wurst") shouldEqual Left( - StringConversionError("Invalid unique identifier `::Wurst` with empty identifier.") - ) - UniqueIdentifier.fromProtoPrimitive_("aa::Wur:st::") shouldEqual Left( - StringConversionError( - s"Fingerprint decoding of `aa::Wur:st::` failed with: ${StringConversionError("String contains reserved delimiter `::`.")}" - ) - ) - UniqueIdentifier.fromProtoPrimitive_("::") shouldEqual Left( - StringConversionError("Invalid unique identifier `::` with empty identifier.") - ) - } - - "throw if namespace was ommitted" in { - assertThrows[IllegalArgumentException] { - UniqueIdentifier.tryFromProtoPrimitive("123456") - } - } - - } - - } - - "key owner serialization" should { - "be able to convert back and forth" in { - val pid = DefaultTestIdentities.participant1 - Member.fromProtoPrimitive(pid.toProtoPrimitive, "Pid") shouldBe Right(pid) - } - - "act sanely on invalid inputs" in { - Seq( - "nothing valid", - "not::enough", - "INVALID::da::default", - "::::", - "PAR::::", - "PAR::da::", - "::da::default", - ) - .foreach { str => - Member.fromProtoPrimitive(str, "owner").left.value shouldBe a[ProtoDeserializationError] - } - } - - } - - "SequencerId serialization" should { - "be able to convert back and forth" in { - val sequencerId = DefaultTestIdentities.daSequencerId - SequencerId - .fromProtoPrimitive(sequencerId.toProtoPrimitive, "sequencerId") - .value shouldBe sequencerId - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClientTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClientTest.scala deleted file mode 100644 index b0a4ce5f1c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClientTest.scala +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.client - -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric -import com.digitalasset.canton.config.{ - BatchingConfig, - CacheConfig, - CachingConfigs, - DefaultProcessingTimeouts, -} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.{DefaultTestIdentities, KeyCollection, TestingOwnerWithKeys} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, SequencerCounter, config} -import org.scalatest.wordspec.AsyncWordSpecLike - -import scala.concurrent.Future - -object EffectiveTimeTestHelpers { - - import scala.language.implicitConversions - - implicit def toSequencedTime(ts: CantonTimestamp): SequencedTime = SequencedTime(ts) - implicit def toEffectiveTime(ts: CantonTimestamp): EffectiveTime = EffectiveTime(ts) - -} - -class CachingSynchronizerTopologyClientTest - extends AsyncWordSpecLike - with BaseTest - with FailOnShutdown { - - import EffectiveTimeTestHelpers.* - - private object Fixture { - - val owner = DefaultTestIdentities.sequencerId - val crypto = new TestingOwnerWithKeys(owner, loggerFactory, directExecutionContext) - val mockTransaction = mock[GenericSignedTopologyTransaction] - - val mockParent = mock[StoreBasedSynchronizerTopologyClient] - val mockSnapshot0 = mock[StoreBasedTopologySnapshot] - val mockSnapshot1 = mock[StoreBasedTopologySnapshot] - val mockSnapshot2 = mock[StoreBasedTopologySnapshot] - - val key1 = crypto.SigningKeys.key1 - val key2 = crypto.SigningKeys.key2 - - when(mockSnapshot0.allKeys(owner)) - .thenReturn(FutureUnlessShutdown.pure(KeyCollection(signingKeys = Seq(key1), Seq()))) - when(mockSnapshot1.allKeys(owner)) - .thenReturn(FutureUnlessShutdown.pure(KeyCollection(signingKeys = Seq(key1, key2), Seq()))) - when(mockSnapshot2.allKeys(owner)) - .thenReturn(FutureUnlessShutdown.pure(KeyCollection(signingKeys = Seq(key2), Seq()))) - - val cc = - new CachingSynchronizerTopologyClient( - mockParent, - CachingConfigs( - topologySnapshot = CacheConfig( - maximumSize = PositiveNumeric.tryCreate(100L), - expireAfterAccess = config.NonNegativeFiniteDuration.ofMinutes(5), - ) - ), - BatchingConfig(), - DefaultProcessingTimeouts.testing, - FutureSupervisor.Noop, - loggerFactory, - ) - - val ts1 = CantonTimestamp.Epoch - val ts0 = ts1.minusSeconds(60) - val ts2 = ts1.plusSeconds(60) - val ts3 = ts2.plusSeconds(60) - - when(mockParent.topologyKnownUntilTimestamp).thenReturn(ts3.plusSeconds(3)) - when(mockParent.approximateTimestamp).thenReturn(ts3) - when(mockParent.awaitTimestamp(any[CantonTimestamp])(any[TraceContext])) - .thenReturn(None) - when(mockParent.trySnapshot(ts0)).thenReturn(mockSnapshot0) - when(mockParent.trySnapshot(ts1)).thenReturn(mockSnapshot0) - when(mockParent.trySnapshot(ts1.immediateSuccessor)).thenReturn(mockSnapshot1) - when(mockParent.trySnapshot(ts2.immediateSuccessor)).thenReturn(mockSnapshot2) - when( - mockParent.observed( - any[CantonTimestamp], - any[CantonTimestamp], - any[SequencerCounter], - anySeq[GenericSignedTopologyTransaction], - )(any[TraceContext]) - ).thenReturn(FutureUnlessShutdown.unit) - - } - - "caching client" should { - import Fixture.* - - "return correct snapshot" in { - - for { - _ <- cc - .observed(ts1, ts1, SequencerCounter(1), Seq(mockTransaction)) - sp0a <- cc.snapshot(ts0) - sp0b <- cc.snapshot(ts1) - _ = cc.observed(ts1.plusSeconds(10), ts1.plusSeconds(10), SequencerCounter(1), Seq()) - keys0a <- sp0a.allKeys(owner) - keys0b <- sp0b.allKeys(owner) - keys1a <- cc.snapshot(ts1.plusSeconds(5)).flatMap(_.allKeys(owner)) - _ = cc.observed(ts2, ts2, SequencerCounter(1), Seq(mockTransaction)) - keys1b <- cc.snapshot(ts2).flatMap(_.allKeys(owner)) - _ = cc.observed(ts3, ts3, SequencerCounter(1), Seq()) - keys2a <- cc.snapshot(ts2.plusSeconds(5)).flatMap(_.allKeys(owner)) - keys2b <- cc.snapshot(ts3).flatMap(_.allKeys(owner)) - } yield { - keys0a.signingKeys shouldBe Seq(key1) - keys0b.signingKeys shouldBe Seq(key1) - keys1a.signingKeys shouldBe Seq(key1, key2) - keys1b.signingKeys shouldBe Seq(key1, key2) - keys2a.signingKeys shouldBe Seq(key2) - keys2b.signingKeys shouldBe Seq(key2) - } - - } - - "verify we have properly cached our values" in { - Future { - verify(mockSnapshot0, times(2)).allKeys(owner) - verify(mockSnapshot1, times(1)).allKeys(owner) - verify(mockSnapshot2, times(1)).allKeys(owner) - assert(true) - } - } - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala deleted file mode 100644 index e2079e369f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/DefaultHeadStateInitializerTest.scala +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.client - -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec - -import java.time.Instant - -class DefaultHeadStateInitializerTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown { - - "DefaultHeadStateInitializer" should { - "initialize when topology store is non-empty" in { - val topologyClientMock = mock[SynchronizerTopologyClientWithInit] - val topologyStoreMock = mock[TopologyStore[TopologyStoreId.SynchronizerStore]] - val initializer = new DefaultHeadStateInitializer(topologyStoreMock) - - val maxSequencedTimestamp = - CantonTimestamp.assertFromInstant(Instant.parse("2024-11-19T12:00:00.000Z")) - val maxEffectiveTimestamp = maxSequencedTimestamp.plusMillis(250) - when(topologyStoreMock.maxTimestamp(SequencedTime.MaxValue, includeRejected = true)) - .thenReturn( - FutureUnlessShutdown.pure( - Some(SequencedTime(maxSequencedTimestamp) -> EffectiveTime(maxEffectiveTimestamp)) - ) - ) - - initializer - .initialize(topologyClientMock) - .map { _ => - verify(topologyClientMock).updateHead( - SequencedTime(maxSequencedTimestamp), - EffectiveTime(maxEffectiveTimestamp), - ApproximateTime(maxEffectiveTimestamp), - potentialTopologyChange = true, - ) - succeed - } - } - - "not initialize when the topology store is empty" in { - val topologyClientMock = mock[SynchronizerTopologyClientWithInit] - val topologyStoreMock = mock[TopologyStore[TopologyStoreId.SynchronizerStore]] - when(topologyStoreMock.maxTimestamp(SequencedTime.MaxValue, includeRejected = true)) - .thenReturn(FutureUnlessShutdown.pure(None)) - val initializer = new DefaultHeadStateInitializer(topologyStoreMock) - - initializer - .initialize(topologyClientMock) - .map { _ => - verify(topologyClientMock, never).updateHead( - any[SequencedTime], - any[EffectiveTime], - any[ApproximateTime], - any[Boolean], - )(any[TraceContext]) - succeed - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala deleted file mode 100644 index 3ec130bc33..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.client - -import cats.syntax.either.* -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.PartyInfo -import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, ParticipantPermission} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown, LfPartyId} -import org.scalatest.wordspec.AsyncWordSpec - -import scala.Ordered.orderingToOrdered -import scala.collection.immutable -import scala.concurrent.ExecutionContext - -class PartyTopologySnapshotClientTest extends AsyncWordSpec with BaseTest with FailOnShutdown { - - import DefaultTestIdentities.* - - "party topology snapshot client" should { - lazy val topology = Map( - party1.toLf -> PartyInfo.nonConsortiumPartyInfo( - Map( - participant1 -> ParticipantAttributes(ParticipantPermission.Submission), - participant2 -> ParticipantAttributes(ParticipantPermission.Observation), - ) - ), - party2.toLf -> PartyInfo.nonConsortiumPartyInfo( - Map( - participant2 -> ParticipantAttributes(ParticipantPermission.Observation) - ) - ), - ) - lazy val client = new PartyTopologySnapshotClient - with BaseTopologySnapshotClient - with PartyTopologySnapshotBaseClient { - override def activeParticipantsOf( - party: LfPartyId - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[ParticipantId, ParticipantAttributes]] = - FutureUnlessShutdown.pure( - topology.get(party).fold(Map.empty[ParticipantId, ParticipantAttributes])(_.participants) - ) - override protected implicit def executionContext: ExecutionContext = - PartyTopologySnapshotClientTest.this.executionContext - override def timestamp: CantonTimestamp = ??? - override def inspectKnownParties( - filterParty: String, - filterParticipant: String, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PartyId]] = - ??? - - override def activeParticipantsOfParties( - parties: Seq[LfPartyId] - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[LfPartyId, Set[ParticipantId]]] = ??? - - override def activeParticipantsOfPartiesWithInfo( - parties: Seq[LfPartyId] - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[LfPartyId, PartyInfo]] = - FutureUnlessShutdown.pure( - parties.map { party => - party -> topology.getOrElse(party, PartyInfo.EmptyPartyInfo) - }.toMap - ) - - override def consortiumThresholds( - parties: Set[LfPartyId] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[LfPartyId, PositiveInt]] = - ??? - - override def canNotSubmit( - participant: ParticipantId, - parties: Seq[LfPartyId], - )(implicit traceContext: TraceContext): FutureUnlessShutdown[immutable.Iterable[LfPartyId]] = - ??? - } - - "allHaveActiveParticipants should yield correct results" in { - for { - right1 <- client.allHaveActiveParticipants(Set(party1.toLf)).value - right2 <- client.allHaveActiveParticipants(Set(party1.toLf, party2.toLf)).value - left1 <- client.allHaveActiveParticipants(Set(party1.toLf, party2.toLf), _.canConfirm).value - left2 <- client.allHaveActiveParticipants(Set(party1.toLf, party3.toLf)).value - left3 <- client.allHaveActiveParticipants(Set(party3.toLf)).value - } yield { - right1 shouldBe Either.unit - right2 shouldBe Either.unit - left1.left.value shouldBe a[Set[_]] - left2.left.value shouldBe a[Set[_]] - left3.left.value shouldBe a[Set[_]] - } - } - - "allHostedOn should yield correct results" in { - for { - yes1 <- client.allHostedOn(Set(party1.toLf), participant1) - yes2 <- client.allHostedOn(Set(party1.toLf), participant2) - no1 <- client.allHostedOn(Set(party1.toLf), participant2, _.canConfirm) - no2 <- client.allHostedOn(Set(party1.toLf, party3.toLf), participant1) - no3 <- client.allHostedOn( - Set(party1.toLf, party2.toLf), - participant2, - _.canConfirm, - ) - yes3 <- client.allHostedOn( - Set(party1.toLf, party2.toLf), - participant2, - _.permission >= ParticipantPermission.Observation, - ) - } yield { - yes1 shouldBe true - yes2 shouldBe true - yes3 shouldBe true - no1 shouldBe false - no2 shouldBe false - no3 shouldBe false - } - } - - "canConfirm should yield correct results" in { - for { - yes1 <- client.canConfirm(participant1, Set(party1.toLf)) - no1 <- client.canConfirm(participant1, Set(party2.toLf)) - no2 <- client.canConfirm(participant2, Set(party1.toLf)) - } yield { - yes1 shouldBe Set(party1.toLf) - no1 shouldBe Set.empty - no2 shouldBe Set.empty - } - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala deleted file mode 100644 index f941ceb6f5..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/client/StoreBasedSynchronizerTopologyClientTest.scala +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.client - -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.{SigningKeyUsage, SigningPublicKey} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.store.{ - TopologyStore, - TopologyStoreId, - ValidatedTopologyTransaction, -} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.ParticipantPermission.* -import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, SequencerCounter} -import org.scalatest.wordspec.AsyncWordSpec - -@SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable")) -trait StoreBasedTopologySnapshotTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown { - - import EffectiveTimeTestHelpers.* - - def topologySnapshot( - mk: () => TopologyStore[TopologyStoreId.SynchronizerStore] - ): Unit = { - - val factory = new TestingOwnerWithKeys( - DefaultTestIdentities.participant1, - loggerFactory, - parallelExecutionContext, - ) - import DefaultTestIdentities.* - import factory.* - import factory.TestingTransactions.* - - lazy val party1participant1 = mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - Seq(HostingParticipant(participant1, Confirmation)), - ) - ) - lazy val party2participant1_2 = mkAdd( - PartyToParticipant.tryCreate( - party2, - PositiveInt.one, - Seq( - HostingParticipant(participant1, Submission), - HostingParticipant(participant2, Submission), - ), - ) - ) - - class Fixture { - val store: TopologyStore[TopologyStoreId.SynchronizerStore] = mk() - val client = - new StoreBasedSynchronizerTopologyClient( - mock[Clock], - store, - StoreBasedSynchronizerTopologyClient.NoPackageDependencies, - DefaultProcessingTimeouts.testing, - FutureSupervisor.Noop, - loggerFactory, - ) - - def add( - timestamp: CantonTimestamp, - transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], - ): FutureUnlessShutdown[Unit] = - for { - _ <- store.update( - SequencedTime(timestamp), - EffectiveTime(timestamp), - removeMapping = transactions.map(tx => tx.mapping.uniqueKey -> tx.serial).toMap, - removeTxs = transactions.map(_.hash).toSet, - additions = transactions.map(ValidatedTopologyTransaction(_)), - ) - _ <- client - .observed(timestamp, timestamp, SequencerCounter(1), transactions) - } yield () - - def observed(ts: CantonTimestamp): Unit = - observed(SequencedTime(ts), EffectiveTime(ts)) - - def observed(st: SequencedTime, et: EffectiveTime): Unit = - client - .observed(st, et, SequencerCounter(0), List()) - .futureValueUS - - def updateHead( - st: SequencedTime, - et: EffectiveTime, - at: ApproximateTime, - potentialTopologyChange: Boolean, - ): Unit = - client - .updateHead(st, et, at, potentialTopologyChange) - } - - "waiting for snapshots" should { - - val ts1 = CantonTimestamp.Epoch - val ts2 = ts1.plusSeconds(60) - - "announce snapshot if there is one" in { - val fixture = new Fixture() - import fixture.* - observed(ts1) - client.snapshotAvailable(ts1) shouldBe true - client.snapshotAvailable(ts2) shouldBe false - observed(ts2.immediatePredecessor) - client.snapshotAvailable(ts2) shouldBe true - } - - "correctly get notified" in { - val fixture = new Fixture() - import fixture.* - val awaitTimestampF = client.awaitTimestamp(ts2).getOrElse(fail("expected future")) - observed(ts1) - awaitTimestampF.isCompleted shouldBe false - observed(ts2.immediatePredecessor) - awaitTimestampF.isCompleted shouldBe true - } - - "just return None if snapshot already exists" in { - val fixture = new Fixture() - import fixture.* - observed(ts1) - val awaitTimestampF = client.awaitTimestamp(ts1) - awaitTimestampF shouldBe None - } - } - - "waiting for sequenced time" should { - val ts1 = CantonTimestamp.Epoch - val ts2 = ts1.plusSeconds(60) - - "correctly get notified on observed" in { - val fixture = new Fixture() - import fixture.* - val awaitSequencedTimestampF = - client.awaitSequencedTimestamp(ts2).getOrElse(fail("expected future")) - - observed(SequencedTime(ts1), EffectiveTime(ts1)) - awaitSequencedTimestampF.isCompleted shouldBe false - observed(SequencedTime(ts2), EffectiveTime(ts1)) - awaitSequencedTimestampF.isCompleted shouldBe true - } - - "correctly get notified on updateHead" in { - Table("potential topology change", true, false).forEvery { potentialTopologyChange => - val fixture = new Fixture() - import fixture.* - val awaitSequencedTimestampF = - client.awaitSequencedTimestamp(ts2).getOrElse(fail("expected future")) - - updateHead( - SequencedTime(ts1), - EffectiveTime(ts1), - ApproximateTime(ts1), - potentialTopologyChange, - ) - awaitSequencedTimestampF.isCompleted shouldBe false - updateHead( - SequencedTime(ts2), - EffectiveTime(ts1), - ApproximateTime(ts1), - potentialTopologyChange, - ) - awaitSequencedTimestampF.isCompleted shouldBe true - } - } - - "just return None if sequenced time already known" in { - val fixture = new Fixture() - import fixture.* - observed(SequencedTime(ts1), EffectiveTime(CantonTimestamp.MinValue)) - client.awaitSequencedTimestamp(ts1) shouldBe None - } - } - - "work with empty store" in { - val fixture = new Fixture() - import fixture.* - val _ = client.currentSnapshotApproximation - val mrt = client.approximateTimestamp - val sp = client.trySnapshot(mrt) - for { - parties <- sp.activeParticipantsOf(party1.toLf) - keys <- sp.signingKeys(participant1, SigningKeyUsage.All) - } yield { - parties shouldBe empty - keys shouldBe empty - } - } - - def compareMappings( - result: Map[ParticipantId, ParticipantAttributes], - expected: Map[ParticipantId, ParticipantPermission], - ) = - result.map(x => (x._1, x._2.permission)) shouldBe expected - - def compareKeys(result: Seq[SigningPublicKey], expected: Seq[SigningPublicKey]) = - result.map(_.fingerprint) shouldBe expected.map(_.fingerprint) - - "deliver correct results" in { - val fixture = new Fixture() - for { - _ <- fixture.add( - ts, - Seq( - dpc1, - p1_nsk2, - p1_otk, - p1_dtc, - p2_nsk2, - party1participant1, - party2participant1_2, - ), - ) - _ = fixture.client.observed( - ts.immediateSuccessor, - ts.immediateSuccessor, - SequencerCounter(0), - Seq(), - ) - recent = fixture.client.currentSnapshotApproximation - party1Mappings <- recent.activeParticipantsOf(party1.toLf) - party2Mappings <- recent.activeParticipantsOf(party2.toLf) - keys <- recent.signingKeys(participant1, SigningKeyUsage.All) - } yield { - party1Mappings.keySet shouldBe Set(participant1) - party1Mappings.get(participant1).map(_.permission) shouldBe Some( - ParticipantPermission.Confirmation - ) - party2Mappings.keySet shouldBe Set(participant1) - party2Mappings.get(participant1).map(_.permission) shouldBe Some( - ParticipantPermission.Submission - ) - keys.map(_.id) shouldBe Seq(SigningKeys.key1.id) - } - } - - "properly deals with participants with lower synchronizer privileges" in { - val fixture = new Fixture() - for { - _ <- fixture.add(ts, Seq(dpc1, p1_otk, p1_dtc, party1participant1, p1_pdp_observation)) - _ = fixture.client.observed( - ts.immediateSuccessor, - ts.immediateSuccessor, - SequencerCounter(0), - Seq(), - ) - snapshot <- fixture.client.snapshot(ts.immediateSuccessor) - party1Mappings <- snapshot.activeParticipantsOf(party1.toLf) - } yield { - compareMappings(party1Mappings, Map(participant1 -> ParticipantPermission.Observation)) - } - } - - "work properly with updates" in { - val fixture = new Fixture() - val ts2 = ts1.plusSeconds(1) - for { - _ <- fixture.add( - ts, - Seq( - seq_okm_k2, - dpc1, - p1_otk, - p1_dtc, - party1participant1, - party2participant1_2, - ), - ) - _ <- fixture.add( - ts1, - Seq( - mkRemoveTx(seq_okm_k2), - med_okm_k3, - p2_otk, - p2_dtc, - p1_pdp_observation, - p2_pdp_confirmation, - ), - ) - _ <- fixture.add(ts2, Seq(mkRemoveTx(p1_pdp_observation), mkRemoveTx(p1_dtc))) - _ = fixture.client.observed( - ts2.immediateSuccessor, - ts2.immediateSuccessor, - SequencerCounter(0), - Seq(), - ) - snapshotA <- fixture.client.snapshot(ts1) - snapshotB <- fixture.client.snapshot(ts1.immediateSuccessor) - snapshotC <- fixture.client.snapshot(ts2.immediateSuccessor) - party1Ma <- snapshotA.activeParticipantsOf(party1.toLf) - party1Mb <- snapshotB.activeParticipantsOf(party1.toLf) - party2Ma <- snapshotA.activeParticipantsOf(party2.toLf) - party2Mb <- snapshotB.activeParticipantsOf(party2.toLf) - party2Mc <- snapshotC.activeParticipantsOf(party2.toLf) - keysMa <- snapshotA.signingKeys(mediatorId, SigningKeyUsage.All) - keysMb <- snapshotB.signingKeys(mediatorId, SigningKeyUsage.All) - keysSa <- snapshotA.signingKeys(sequencerId, SigningKeyUsage.All) - keysSb <- snapshotB.signingKeys(sequencerId, SigningKeyUsage.All) - partPermA <- snapshotA.findParticipantState(participant1) - partPermB <- snapshotB.findParticipantState(participant1) - partPermC <- snapshotC.findParticipantState(participant1) - admin1a <- snapshotA.activeParticipantsOf(participant1.adminParty.toLf) - admin1b <- snapshotB.activeParticipantsOf(participant1.adminParty.toLf) - } yield { - compareMappings(party1Ma, Map(participant1 -> ParticipantPermission.Confirmation)) - compareMappings(party1Mb, Map(participant1 -> ParticipantPermission.Observation)) - compareMappings(party2Ma, Map(participant1 -> ParticipantPermission.Submission)) - compareMappings( - party2Mb, - Map( - participant1 -> ParticipantPermission.Observation, - participant2 -> ParticipantPermission.Confirmation, - ), - ) - compareMappings(party2Mc, Map(participant2 -> ParticipantPermission.Confirmation)) - compareKeys(keysMa, Seq()) - compareKeys(keysMb, Seq(SigningKeys.key3)) - compareKeys(keysSa, Seq(SigningKeys.key2)) - compareKeys(keysSb, Seq()) - partPermA - .valueOrFail("No permission for participant1 in snapshotA") - .permission shouldBe ParticipantPermission.Submission - partPermB - .valueOrFail("No permission for participant1 in snapshotB") - .permission shouldBe ParticipantPermission.Observation - partPermC shouldBe None - compareMappings(admin1a, Map(participant1 -> ParticipantPermission.Submission)) - compareMappings(admin1b, Map(participant1 -> ParticipantPermission.Observation)) - } - } - } -} - -class StoreBasedTopologySnapshotTestInMemory extends StoreBasedTopologySnapshotTest { - "InMemoryTopologyStore" should { - behave like topologySnapshot(() => - new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - ) - } -} - -trait DbStoreBasedTopologySnapshotTest - extends StoreBasedTopologySnapshotTest - with DbTopologyStoreHelper { - - this: AsyncWordSpec with BaseTest with HasExecutionContext with DbTest => - - "DbStoreBasedTopologySnapshot" should { - behave like topologySnapshot(() => mkStore(DefaultTestIdentities.physicalSynchronizerId)) - } - -} - -class DbStoreBasedTopologySnapshotTestPostgres - extends DbStoreBasedTopologySnapshotTest - with PostgresTest - -class DbStoreBasedTopologySnapshotTestH2 extends DbStoreBasedTopologySnapshotTest with H2Test diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala deleted file mode 100644 index 7c76cf9a71..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import cats.instances.order.* -import com.digitalasset.canton.topology.Namespace -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.{BaseTestWordSpec, ProtocolVersionChecksAnyWordSpec} -import org.scalatest.wordspec.AnyWordSpec - -class AuthorizationGraphTest - extends AnyWordSpec - with BaseTestWordSpec - with ProtocolVersionChecksAnyWordSpec - with BaseAuthorizationGraphTest { - - private def mkGraph: AuthorizationGraph = - new AuthorizationGraph(namespace, extraDebugInfo = true, loggerFactory) - - import factory.SigningKeys.* - - "authorization graph" when { - "under normal conditions" should { - "add simple" in { - val graph = mkGraph - graph.replace(nsk1k1) - forAll(allMappings)(check(graph, _, valid = true)(key1)) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - } - "support longer chains" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - } - - "support removal" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.remove(nsk2k1_remove) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - forAll(allMappings)(check(graph, _, valid = true)(key1)) - } - "support breaking and re-creating chains" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - loggerFactory.assertLogs( - graph.remove(nsk2k1_remove), - _.warningMessage should (include regex s"dangling.*${key3.fingerprint}"), - ) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - graph.replace(nsk2k1) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - } - "not support several chains" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - graph.replace(nsk3k1_nonRoot) - forAll(allButNSD)(check(graph, _, valid = true)(key3)) - check(graph, Code.NamespaceDelegation, valid = false)(key3) - graph.remove(nsk3k1_nonRoot_remove) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - - "deal with cycles" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - - val danglingKeys = List(key2, key3).map(_.fingerprint).sorted.mkString(", ") - loggerFactory.assertLogs( - // this overwrites nsk2k1, leading to a break in the authorization chain for the now dangling k2 and k3 - graph.replace(nsk2k3), - _.warningMessage should (include regex s"dangling.*$danglingKeys"), - ) - forAll(allMappings)(check(graph, _, valid = true)(key1)) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - - "deal with root certificate revocations" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - - val danglingKeys = List(key2, key3).map(_.fingerprint).sorted.mkString(", ") - loggerFactory.assertLogs( - graph.remove(nsk1k1_remove), - _.warningMessage should (include regex s"dangling.*$danglingKeys"), - ) - forAll(allMappings)(check(graph, _, valid = false)(key1)) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - - "correctly distinguish on delegations that are not allowed to sign a NamespaceDelegation" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk3k1_nonRoot) - forAll(allMappings)(check(graph, _, valid = true)(key1)) - check(graph, Code.NamespaceDelegation, valid = false)(key3) - forAll(allButNSD)(check(graph, _, valid = true)(key3)) - } - - "deal with same mappings used twice" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - // test that random key is not authorized - forAll(allMappings)(check(graph, _, valid = false)(key3)) - // remove first certificate - graph.remove(nsk2k1_remove) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - // add other certificate (we don't remember removes, so we can do that in this test) - graph.replace(nsk2k1) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - } - - "reject delegations with a wrong namespace" in { - val graph = mkGraph - val fakeNs = Namespace(key8.fingerprint) - val nsk1k1 = mkAdd(mkNSD(fakeNs, key1, canSignNamespaceDelegations = true), key1) - loggerFactory.assertThrowsAndLogs[IllegalArgumentException]( - graph.replace(nsk1k1), - _.errorMessage should include("internal error"), - ) - } - - "test removal of transactions authorized with different keys" in { - // can actually do it (add k2 with one key, remove k2 permission with another, but fail to remove it with the other is not valid) - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - - graph.remove(replaceSignature(nsk3k2_remove, key1)) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - } - - // tested elsewhere: an authorized transaction is rejected if the signature does not match the content or key - "under adverse conditions" should { - "ensure that an unauthorized addition has no effect" in { - val graph = mkGraph - graph.replace(nsk1k1) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - - loggerFactory.assertLogs( - graph.replace(nsk3k2), - _.warningMessage should (include regex s"$namespace are dangling: .*${key3.fingerprint}"), - ) - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - - "process an unauthorized removal" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk2k1) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - val fakeRemove = replaceSignature(nsk2k1_remove, key6) - forAll(allMappings)(check(graph, _, valid = false)(key6)) - graph.remove(fakeRemove) - forAll(allMappings)(check(graph, _, valid = false)(key2)) - } - - "ensure that namespace delegations can only be signed by keys with the appropriate delegation restrictions" in { - val graph = mkGraph - graph.replace(nsk1k1) - graph.replace(nsk3k1_nonRoot) - forAll(allButNSD)(check(graph, _, valid = true)(key3)) - check(graph, Code.NamespaceDelegation, valid = false)(key3) - - // add a delegations signed by k3 via unauthorized add - loggerFactory.assertLogs( - graph.replace(Seq(nsk4k3, nsk5k3_nonRoot)), - _.warningMessage should ((include regex s"$namespace are dangling: .*${key4.fingerprint}") and (include regex s"$namespace are dangling: .*${key5.fingerprint}")), - ) - forAll(allMappings)(check(graph, _, valid = false)(key4)) - forAll(allMappings)(check(graph, _, valid = false)(key5)) - } - - "update authorizations when downgrading to delegations that cannot sign NSDs" in { - /* This could happen in the following scenario: - 1. root-cert --k1-> NSD(k2,canSignNSD=true) --k2-> NSD(k3,canSignNSD=true) - 2. downgrade to NSD(k3,canSignNSD=false) - 3. downgrade to NSD(k2,canSignNSD=false) - - */ - val graph = mkGraph - graph.replace(nsk1k1) - // first set up the delegations that can sign NSD - graph.replace(nsk2k1) - graph.replace(nsk3k2) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - forAll(allMappings)(check(graph, _, valid = true)(key3)) - - // now downgrade in reverse order - graph.replace(nsk3k2_nonRoot) - forAll(allMappings)(check(graph, _, valid = true)(key2)) - // key3 can not sign NSD - check(graph, Code.NamespaceDelegation, valid = false)(key3) - // but key3 still can sign all other mappings - forAll(allButNSD)(check(graph, _, valid = true)(key3)) - - loggerFactory.assertLogs( - // downgrading key2 so that it cannot sign NSD breaks the authorization chain for key3 - graph.replace(nsk2k1_nonRoot), - _.warningMessage should (include regex s"$namespace are dangling: .*${key3.fingerprint}"), - ) - // key2 cannot sign NSD - check(graph, Code.NamespaceDelegation, valid = false)(key2) - forAll(allButNSD)(check(graph, _, valid = true)(key2)) - - // key3 should not be considered authorized anymore at all - forAll(allMappings)(check(graph, _, valid = false)(key3)) - } - - "ensure once a delegation is revoked, all depending authorizations will become unauthorized" in { - val graph = mkGraph - val nsk4k3 = mkAdd(mkNSD(namespace, key4, canSignNamespaceDelegations = true), key3) - val nsk5k3 = mkAdd(mkNSD(namespace, key5, canSignNamespaceDelegations = true), key3) - graph.replace(nsk1k1) - graph.replace(nsk2k1) - graph.replace(nsk3k2) - graph.replace(nsk4k3) - graph.replace(nsk5k3) - Seq(key3, key4, key5).foreach(key => - forAll(allMappings)(check(graph, _, valid = true)(key)) - ) - loggerFactory.assertLogs( - { - graph.remove(nsk2k1_remove) - Seq(key3, key4, key5).foreach(key => - forAll(allMappings)(check(graph, _, valid = false)(key)) - ) - }, - _.warningMessage should include("The following target keys"), - ) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/BaseAuthorizationGraphTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/BaseAuthorizationGraphTest.scala deleted file mode 100644 index b372011339..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/BaseAuthorizationGraphTest.scala +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.{Fingerprint, SigningKeyUsage, SigningPublicKey} -import com.digitalasset.canton.topology.DefaultTestIdentities.sequencerId -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, - CanSignSpecificMappings, -} -import com.digitalasset.canton.topology.transaction.{ - DecentralizedNamespaceDefinition, - DelegationRestriction, - NamespaceDelegation, - SingleTransactionSignature, - TopologyMapping, -} -import com.digitalasset.canton.topology.{Namespace, TestingOwnerWithKeys} -import org.scalatest.Assertion - -import scala.util.Random - -trait BaseAuthorizationGraphTest { self: BaseTest => - - private val seed = Random.nextLong() - private val random = new Random(seed) - logger.debug(s"Running ${this.getClass} with seed $seed") - - val factory = new TestingOwnerWithKeys(sequencerId, loggerFactory, directExecutionContext) - import factory.SigningKeys.* - - val allMappings = TopologyMapping.Code.all.toSet - val allButNSD = allMappings - TopologyMapping.Code.NamespaceDelegation - - val namespace = Namespace(key1.fingerprint) - val nsk1k1 = mkAdd(mkNSD(namespace, key1, canSignNamespaceDelegations = true), key1) - val nsk1k1_remove = mkRemove(mkNSD(namespace, key1, canSignNamespaceDelegations = true), key1) - val nsk2k1 = mkAdd(mkNSD(namespace, key2, canSignNamespaceDelegations = true), key1) - val nsk2k3 = mkAdd(mkNSD(namespace, key2, canSignNamespaceDelegations = true), key3) - val nsk2k1_remove = mkRemove(mkNSD(namespace, key2, canSignNamespaceDelegations = true), key1) - val nsk2k1_nonRoot = mkAdd(mkNSD(namespace, key2, canSignNamespaceDelegations = false), key1) - val nsk3k2 = mkAdd(mkNSD(namespace, key3, canSignNamespaceDelegations = true), key2) - val nsk3k2_remove = mkRemove(mkNSD(namespace, key3, canSignNamespaceDelegations = true), key2) - val nsk3k2_nonRoot = mkAdd(mkNSD(namespace, key3, canSignNamespaceDelegations = false), key2) - val nsk3k1_nonRoot = mkAdd(mkNSD(namespace, key3, canSignNamespaceDelegations = false), key1) - val nsk3k1_nonRoot_remove = - mkRemove(mkNSD(namespace, key3, canSignNamespaceDelegations = false), key1) - val nsk4k3 = mkAdd(mkNSD(namespace, key4, canSignNamespaceDelegations = true), key3) - val nsk5k3_nonRoot = mkAdd(mkNSD(namespace, key5, canSignNamespaceDelegations = false), key3) - - val decentralizedNamespace = - Namespace(Fingerprint.tryFromString("decentralized-namespace-fingerprint")) - val ns1 = Namespace(key1.fingerprint) - val ns2 = Namespace(key2.fingerprint) - val ns3 = Namespace(key3.fingerprint) - val owners = NonEmpty(Set, ns1, ns2, ns3) - val decentralizedNamespaceDefinition = - DecentralizedNamespaceDefinition - .create(decentralizedNamespace, PositiveInt.two, owners) - .fold(sys.error, identity) - - val ns1k1k1 = mkAdd(mkNSD(ns1, key1, canSignNamespaceDelegations = true), key1) - val ns2k2k2 = mkAdd(mkNSD(ns2, key2, canSignNamespaceDelegations = true), key2) - val ns2k2k2_remove = mkRemove(mkNSD(ns2, key2, canSignNamespaceDelegations = true), key2) - val ns2k5k2 = mkAdd(mkNSD(ns2, key5, canSignNamespaceDelegations = true), key2) - val ns2k5k2_remove = mkRemove(mkNSD(ns2, key5, canSignNamespaceDelegations = true), key2) - val ns2k5k8 = mkAdd(mkNSD(ns2, key5, canSignNamespaceDelegations = true), key8) - val ns2k2k5 = mkAdd(mkNSD(ns2, key2, canSignNamespaceDelegations = true), key5) - val ns2k8k5 = mkAdd(mkNSD(ns2, key8, canSignNamespaceDelegations = true), key5) - val ns2k8k5_remove = mkRemove(mkNSD(ns2, key8, canSignNamespaceDelegations = true), key5) - val ns2k8k2_nonRoot = mkAdd(mkNSD(ns2, key8, canSignNamespaceDelegations = false), key2) - val ns2k8k2_nonRoot_remove = - mkRemove(mkNSD(ns2, key8, canSignNamespaceDelegations = false), key2) - - val ns3k3k3 = mkAdd(mkNSD(ns3, key3, canSignNamespaceDelegations = true), key3) - - def mkAdd( - nsd: NamespaceDelegation, - key: SigningPublicKey, - ): AuthorizedTopologyTransaction[NamespaceDelegation] = { - val tx = factory.mkAdd(nsd, key) - AuthorizedTopologyTransaction(tx) - } - - def mkRemove( - nsd: NamespaceDelegation, - key: SigningPublicKey, - ): AuthorizedTopologyTransaction[NamespaceDelegation] = { - val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two) - AuthorizedTopologyTransaction(tx) - } - - /** @param canSignNamespaceDelegations - * if true, creates a namespace delegation that can sign all mappings. if false, creates a - * namespace delegation that cannot sign namespace delegations - */ - def mkNSD(namespace: Namespace, key: SigningPublicKey, canSignNamespaceDelegations: Boolean) = - NamespaceDelegation.tryCreate( - namespace, - key, - if (canSignNamespaceDelegations) { - // randomly choose between two ways a delegation can be permitted to sign namespace delegations. - // this is to increase test coverage over time - selectOneAtRandom( - CanSignAllMappings, - CanSignSpecificMappings(NonEmpty.from(TopologyMapping.Code.all.toSet).value), - ) - } else - // randomly choose between two ways a delegation can be prohibite to sign namespace delegations. - // this is to increase test coverage over time - selectOneAtRandom( - CanSignAllButNamespaceDelegations, - CanSignSpecificMappings( - NonEmpty.from(TopologyMapping.Code.all.toSet - NamespaceDelegation.code).value - ), - ), - ) - - private def selectOneAtRandom( - a: DelegationRestriction, - b: DelegationRestriction, - ): DelegationRestriction = - if (random.nextBoolean()) a else b - - def replaceSignature[T <: TopologyMapping]( - authTx: AuthorizedTopologyTransaction[T], - key: SigningPublicKey, - ): AuthorizedTopologyTransaction[T] = { - // in this test we only sign namespace delegations so we can limit the usage to NamespaceOnly - val signature = factory.syncCryptoClient.crypto.privateCrypto - .sign( - authTx.hash.hash, - key.fingerprint, - SigningKeyUsage.NamespaceOnly, - ) - .value - .failOnShutdown - .futureValue - .getOrElse(sys.error(s"Error when signing ${authTx}with $key")) - authTx.copy(transaction = - authTx.transaction.copy(signatures = - NonEmpty.mk(Set, SingleTransactionSignature(authTx.transaction.hash, signature)) - ) - ) - } - - def check( - graph: AuthorizationCheck, - mappingToAuthorize: TopologyMapping.Code, - valid: Boolean, - )(keys: SigningPublicKey*): Assertion = - graph.existsAuthorizedKeyIn( - keys.map(_.fingerprint).toSet, - mappingToAuthorize, - ) shouldBe valid - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala deleted file mode 100644 index 745ae73ae2..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import cats.instances.order.* -import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.AuthorizedNamespaceDelegation -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTestWordSpec, ProtocolVersionChecksAnyWordSpec} -import org.scalatest.wordspec.AnyWordSpec - -class DecentralizedNamespaceAuthorizationGraphTest - extends AnyWordSpec - with BaseTestWordSpec - with ProtocolVersionChecksAnyWordSpec - with BaseAuthorizationGraphTest { - - import DecentralizedNamespaceAuthorizationGraphTest.* - import factory.SigningKeys.* - - private def mkGraph: DecentralizedNamespaceAuthorizationGraph = - DecentralizedNamespaceAuthorizationGraph( - decentralizedNamespaceDefinition, - owners - .map(new AuthorizationGraph(_, extraDebugInfo = false, loggerFactory = loggerFactory)) - .forgetNE - .toSeq, - ) - - "authorization graph for a decentralized namespace" when { - - "only having namespace delegations for its constituents" should { - "work for a simple quorum" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns3k3k3) - - // Individual keys are not enough - for { - key <- Seq(key1, key2, key3) - } { - forAll(allMappings)(check(graph, _, valid = false)(key)) - } - - // at least quorum number of signatures is enough - Seq(key1, key2, key3) - .combinations(decentralizedNamespaceDefinition.threshold.value) - .foreach { keys => - forAll(allMappings)(check(graph, _, valid = true)(keys*)) - } - } - "support longer chains" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns3k3k3) - - graph.addAuth(ns2k5k2) - graph.addAuth(ns2k8k5) - forAll(allMappings)(check(graph, _, valid = true)(key1, key8, key3)) - } - - "support removal" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns3k3k3) - - graph.removeAuth(ns2k2k2_remove) - forAll(allMappings)(check(graph, _, valid = false)(key1, key2)) - forAll(allMappings)(check(graph, _, valid = true)(key1, key3)) - } - - "support breaking and re-creating chains" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - - graph.addAuth(ns2k5k2) - graph.addAuth(ns2k8k5) - forAll(allMappings)(check(graph, _, valid = true)(key1, key2)) - forAll(allMappings)(check(graph, _, valid = true)(key1, key5)) - forAll(allMappings)(check(graph, _, valid = true)(key1, key8)) - loggerFactory.assertLogs( - graph.removeAuth(ns2k5k2_remove), - _.warningMessage should (include regex s"dangling.*${key8.fingerprint}"), - ) - forAll(allMappings)(check(graph, _, valid = false)(key1, key5)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key8)) - graph.addAuth(ns2k5k2) - forAll(allMappings)(check(graph, _, valid = true)(key1, key5)) - forAll(allMappings)(check(graph, _, valid = true)(key1, key8)) - } - - "not support several chains" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - - graph.addAuth(ns2k5k2) - // this is ns2k8 serial=1 - graph.addAuth(ns2k8k5) - forAll(allMappings)(check(graph, _, valid = true)(key1, key8)) - - // this is ns2k8 serial=2 and overwrites the previous mapping ns2k8 signed by k5 - graph.addAuth(ns2k8k2_nonRoot) - forAll(allButNSD)(check(graph, _, valid = true)(key1, key8)) - - // this is ns2k8 serial=3 and removes the ns2k8 mapping entirely - graph.removeAuth(ns2k8k2_nonRoot_remove) - forAll(allMappings)(check(graph, _, valid = false)(key1, key8)) - } - - "deal with cycles" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns2k5k2) - graph.addAuth(ns2k8k5) - - val danglingKeys = List(key5, key8).map(_.fingerprint).sorted.mkString(", ") - loggerFactory.assertLogs( - // this overwrites ns2k5k2, leading to a break in the authorization chain for the now dangling k5 and k8 - graph.addAuth(ns2k5k8), - _.warningMessage should (include regex s"dangling.*$danglingKeys"), - ) - forAll(allMappings)(check(graph, _, valid = true)(key1, key2)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key5)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key8)) - } - - "deal with root revocations" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns3k3k3) - - graph.addAuth(ns2k5k2) - graph.addAuth(ns2k8k5) - - val danglingKeys = List(key5, key8).map(_.fingerprint).sorted.mkString(", ") - loggerFactory.assertLogs( - graph.removeAuth(ns2k2k2_remove), - _.warningMessage should (include regex s"dangling.*$danglingKeys"), - ) - forAll(allMappings)(check(graph, _, valid = false)(key1, key2)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key5)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key8)) - - // however, key1 and key3 can still reach quorum - forAll(allMappings)(check(graph, _, valid = true)(key1, key3)) - } - - "correctly distinguish on root delegations" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns2k8k2_nonRoot) - forAll(allMappings)(check(graph, _, valid = true)(key1, key2)) - check(graph, Code.NamespaceDelegation, valid = false)(key1, key8) - forAll(allButNSD)(check(graph, _, valid = true)(key1, key8)) - } - - "deal with same mappings used twice" in { - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - graph.addAuth(ns2k5k2) - forAll(allMappings)(check(graph, _, valid = true)(key1, key5)) - // test that random key is not authorized - forAll(allMappings)(check(graph, _, valid = false)(key1, key3)) - // remove first certificate - graph.removeAuth(ns2k5k2_remove) - forAll(allMappings)(check(graph, _, valid = false)(key1, key5)) - // add other certificate (we don't remember removes, so we can do that in this test) - graph.addAuth(ns2k5k2) - forAll(allMappings)(check(graph, _, valid = true)(key1, key5)) - } - - "test removal of transactions authorized with different keys" in { - // can actually do it (add k2 with one key, remove k2 permission with another, but fail to remove it with the other is not valid) - val graph = mkGraph - graph.addAuth(ns1k1k1) - graph.addAuth(ns2k2k2) - - graph.addAuth(ns2k5k2) - graph.addAuth(ns2k8k5) - forAll(allMappings)(check(graph, _, valid = true)(key1, key8)) - - graph.removeAuth(replaceSignature(ns2k8k5_remove, key2)) - forAll(allMappings)(check(graph, _, valid = false)(key1, key8)) - } - } - } -} - -object DecentralizedNamespaceAuthorizationGraphTest { - implicit class DecentralizedNamespaceAuthorizationGraphExtension( - val dns: DecentralizedNamespaceAuthorizationGraph - ) extends AnyVal { - def addAuth( - authorizedNSD: AuthorizedNamespaceDelegation - )(implicit traceContext: TraceContext): Unit = - dns.ownerGraphs - .find(_.namespace == authorizedNSD.mapping.namespace) - .foreach(_.replace(authorizedNSD)) - - def removeAuth( - authorizedNSD: AuthorizedNamespaceDelegation - )(implicit traceContext: TraceContext): Unit = - dns.ownerGraphs - .find(_.namespace == authorizedNSD.mapping.namespace) - .foreach(_.remove(authorizedNSD)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala deleted file mode 100644 index 207fbc74fb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidatorTest.scala +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.FailOnShutdown -import com.digitalasset.canton.config.CantonRequireTypes.String300 -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.crypto.SynchronizerCryptoPureApi -import com.digitalasset.canton.store.db.{DbTest, PostgresTest} -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransaction, - StoredTopologyTransactions, - TopologyStore, - TopologyStoreId, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction - -abstract class InitialTopologySnapshotValidatorTest - extends TopologyTransactionHandlingBase - with FailOnShutdown { - - import Factory.* - - protected def mk( - store: TopologyStore[TopologyStoreId.SynchronizerStore] = mkStore( - Factory.physicalSynchronizerId1a - ) - ): (InitialTopologySnapshotValidator, TopologyStore[TopologyStoreId.SynchronizerStore]) = { - - val validator = new InitialTopologySnapshotValidator( - testedProtocolVersion, - new SynchronizerCryptoPureApi(defaultStaticSynchronizerParameters, crypto), - store, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - (validator, store) - } - - "processing the initial topology snapshot" should { - "successfully process the genesis state at topology initialization time" in { - - val timestampForInit = SignedTopologyTransaction.InitialTopologySequencingTime - val genesisState = StoredTopologyTransactions( - List( - // transaction -> expireImmediately - ns1k1_k1 -> false, - dmp1_k1 -> false, - ns2k2_k2 -> false, - ns3k3_k3 -> false, - ns1k2_k1 -> false, - ns3k3_k3 -> false, // check that duplicates are properly processed - dnd_proposal_k1 -> true, - dnd_proposal_k2 -> true, - dtcp1_k1 -> false, - dnd_proposal_k3 - .copy(isProposal = false) - .addSignaturesFromTransaction(dnd_proposal_k1) - .addSignaturesFromTransaction(dnd_proposal_k2) - -> false, - okm1bk5k1E_k1 -> false, - ).map { case (tx, expireImmediately) => - StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = Option.when(expireImmediately)(EffectiveTime(timestampForInit)), - tx, - None, - ) - } - ) - val (validator, store) = mk() - - val result = validator.validateAndApplyInitialTopologySnapshot(genesisState).futureValueUS - result shouldBe Right(()) - val stateAfterInitialization = fetch(store, timestampForInit.immediateSuccessor) - validate(stateAfterInitialization, genesisState.result.map(_.transaction)) - } - - "successfully process the genesis state at topology initialization time ignoring missing signatures of signing keys" in { - - val timestampForInit = SignedTopologyTransaction.InitialTopologySequencingTime - val genesisState = StoredTopologyTransactions( - List( - ns1k1_k1, - dmp1_k1, - ns2k2_k2, - ns3k3_k3, - ns1k2_k1, - dtcp1_k1, - okm1bk5k1E_k1, - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value, - ).map(tx => - StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = None, - tx, - None, - ) - ) - ) - - val (validator, store) = mk() - - val result = validator.validateAndApplyInitialTopologySnapshot(genesisState).futureValueUS - result shouldBe Right(()) - - val stateAfterInitialization = fetch(store, timestampForInit.immediateSuccessor) - validate(stateAfterInitialization, genesisState.result.map(_.transaction)) - } - - "reject missing signatures of signing keys if the transaction is not in the genesis topology state" in { - - val timestampForInit = SignedTopologyTransaction.InitialTopologySequencingTime - val okmS1k7_without_k7_signature = - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value - val genesisState = StoredTopologyTransactions( - List( - ns1k1_k1, - dmp1_k1, - ns2k2_k2, - ns3k3_k3, - ns1k2_k1, - dtcp1_k1, - okm1bk5k1E_k1, - ).map(tx => - StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = None, - tx, - None, - ) - ) :+ StoredTopologyTransaction( - SequencedTime(ts(1)), - EffectiveTime(ts(1).plus((dmp1_k1.mapping.parameters.topologyChangeDelay.duration))), - validUntil = None, - okmS1k7_without_k7_signature, - None, - ) - ) - - val (validator, store) = mk() - - val result = validator.validateAndApplyInitialTopologySnapshot(genesisState).futureValueUS - result.left.value should include regex ("(?s)Store:.*rejectionReason = 'Not authorized'".r) - - val stateAfterInitialization = fetch(store, ts(2)) - // the OTK is rejected and therefore is not returned when looking up valid transactions - validate(stateAfterInitialization, genesisState.result.map(_.transaction).dropRight(1)) - - // verify that the OTK was rejected with the expected reason - store - .findStored(ts(2), okmS1k7_without_k7_signature, includeRejected = true) - .futureValueUS - .value - .rejectionReason - .value - .str shouldBe "Not authorized" - } - - "detect inconsistencies between the snapshot and the result of processing the transactions" in { - - val timestampForInit = SignedTopologyTransaction.InitialTopologySequencingTime - val correctTx = StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = None, - ns1k1_k1, - None, - ) - - { - // here it doesn't matter that ns1k1_k1 is actually a valid transaction, - // but we want to test whether an inconsistency is reported - val validatorDoesNotRejectTransaction = - StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = Some(EffectiveTime(timestampForInit)), - ns2k2_k2, - rejectionReason = Some(String300.tryCreate("some rejection reason")), - ) - val (validator, _) = mk() - val result = validator - .validateAndApplyInitialTopologySnapshot( - // include a valid transaction as well - StoredTopologyTransactions(Seq(correctTx, validatorDoesNotRejectTransaction)) - ) - .value - .futureValueUS - result.left.value should include( - "Mismatch between transactions at index 1 from the initial snapshot and the topology store" - ) - } - - { - val validatorRejectsTransaction = StoredTopologyTransaction( - SequencedTime(timestampForInit), - EffectiveTime(timestampForInit), - validUntil = None, - // originally this transaction might have been valid, - // but in the context of this topology snapshot it is not, because the authorization chain - // is broken. Maybe somebody tampered with the topology snapshot after exporting it or there - // is a bug in the export logic. - // Regardless, we want the validator to report the inconsistency - ns1k3_k2, - rejectionReason = None, - ) - val (validator, _) = mk() - val result = validator - .validateAndApplyInitialTopologySnapshot( - // include a valid transaction as well - StoredTopologyTransactions(Seq(correctTx, validatorRejectsTransaction)) - ) - .value - .futureValueUS - result.left.value should include( - "Mismatch between transactions at index 1 from the initial snapshot and the topology store" - ) - } - - } - } - -} - -class InitialTopologySnapshotValidatorTestInMemory extends InitialTopologySnapshotValidatorTest { - protected def mkStore( - synchronizerId: PhysicalSynchronizerId = Factory.physicalSynchronizerId1 - ): TopologyStore[TopologyStoreId.SynchronizerStore] = - new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(synchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - -} -class InitialTopologySnapshotValidatorTestPostgres - extends InitialTopologySnapshotValidatorTest - with DbTest - with DbTopologyStoreHelper - with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/MultiHashTopologyTransactionsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/MultiHashTopologyTransactionsTest.scala deleted file mode 100644 index 93ba6d1668..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/MultiHashTopologyTransactionsTest.scala +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.crypto.{Fingerprint, Signature, SignatureFormat, TestHash} -import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash -import com.digitalasset.canton.topology.transaction.{ - MultiTransactionSignature, - SignedTopologyTransaction, - SingleTransactionSignature, -} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class MultiHashTopologyTransactionsTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - object Factory - extends TopologyTransactionTestFactory( - loggerFactory, - parallelExecutionContext, - multiHash = true, - ) - - private val transactionHash = Factory.dns1.transaction.hash - - private val multiHash = MultiTransactionSignature( - transactionHashes = NonEmpty.mk(Set, transactionHash, TxHash(TestHash.digest("test"))), - signature = Signature.noSignature, - ) - - private def makeSig(content: String, fingerprint: String) = - Signature.create( - SignatureFormat.Symbolic, - ByteString.copyFromUtf8(content), - Fingerprint.tryFromString(fingerprint), - None, - ) - "signed topology transaction" should { - "only return valid signatures" in { - val signature1 = makeSig("sig1", "fingerprint1") - val signature2 = makeSig("sig2", "fingerprint2") - - val hashes = NonEmpty.mk(Set, TxHash(TestHash.digest("test"))) - val multiHash = MultiTransactionSignature( - transactionHashes = hashes, - signature = signature2, - ) - - val signedTx = SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk(Set, signature1, Signature.noSignature), - isProposal = false, - )( - SignedTopologyTransaction.protocolVersionRepresentativeFor(BaseTest.testedProtocolVersion) - ) - .addSignatures(NonEmpty.mk(Set, multiHash)) - - // Critically, multiHash is not there because it does not cover the transaction - signedTx.allUnvalidatedSignaturesCoveringHash.map( - _.signature - ) should contain theSameElementsAs Set( - signature1, - Signature.noSignature, - ) - } - - "successfully merge single into single signature" in { - val signedTx = SignedTopologyTransaction.create( - Factory.dns1.transaction, - NonEmpty.mk(Set, Signature.noSignature), - isProposal = false, - )(SignedTopologyTransaction.protocolVersionRepresentativeFor(BaseTest.testedProtocolVersion)) - - val newSingleSignature = makeSig("new_sig", "no-fingerprint") - - signedTx.addSingleSignatures( - NonEmpty.mk(Set, newSingleSignature) - ) shouldBe SignedTopologyTransaction.create( - Factory.dns1.transaction, - NonEmpty.mk( - Set, - Signature.noSignature, - newSingleSignature, - ), - isProposal = false, - )( - SignedTopologyTransaction.protocolVersionRepresentativeFor(BaseTest.testedProtocolVersion) - ) - } - - "successfully merge multi into single signature" in { - val signedTx = SignedTopologyTransaction.create( - Factory.dns1.transaction, - NonEmpty.mk(Set, Signature.noSignature), - isProposal = false, - )(SignedTopologyTransaction.protocolVersionRepresentativeFor(BaseTest.testedProtocolVersion)) - - signedTx - .addSignatures(NonEmpty.mk(Set, multiHash)) shouldBe SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk( - Set, - multiHash, - SingleTransactionSignature(Factory.dns1.transaction.hash, Signature.noSignature), - ), - isProposal = false, - BaseTest.testedProtocolVersion, - ) - } - - "successfully merge single into multi signature" in { - val signedTx = SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk(Set, multiHash), - isProposal = false, - BaseTest.testedProtocolVersion, - ) - - val newSingleSignature = makeSig("new_sig", "no-fingerprint") - - signedTx.addSingleSignatures( - NonEmpty.mk(Set, newSingleSignature) - ) shouldBe SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk( - Set, - multiHash, - SingleTransactionSignature(Factory.dns1.transaction.hash, newSingleSignature), - ), - isProposal = false, - BaseTest.testedProtocolVersion, - ) - } - - "successfully merge multi into multi signature" in { - val signedTx = SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk(Set, multiHash), - isProposal = false, - BaseTest.testedProtocolVersion, - ) - - val newSingleSignature = makeSig("new_sig", "no-fingerprint") - - val multiHash2 = MultiTransactionSignature( - transactionHashes = - NonEmpty.mk(Set, Factory.dns1.transaction.hash, TxHash(TestHash.digest("test"))), - signature = newSingleSignature, - ) - - signedTx - .addSignatures(NonEmpty.mk(Set, multiHash2)) shouldBe SignedTopologyTransaction - .create( - Factory.dns1.transaction, - NonEmpty.mk(Set, multiHash, multiHash2), - isProposal = false, - BaseTest.testedProtocolVersion, - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyManagerSigningKeyDetectionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyManagerSigningKeyDetectionTest.scala deleted file mode 100644 index 2ea8c53f8b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyManagerSigningKeyDetectionTest.scala +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class TopologyManagerSigningKeyDetectionTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext { - - "TopologyManagerSigningKeyDetection" should { - - object Factory extends TopologyTransactionTestFactory(loggerFactory, parallelExecutionContext) - import Factory.* - - def ts(seconds: Long) = CantonTimestamp.Epoch.plusSeconds(seconds) - - def mk() = - new TopologyManagerSigningKeyDetection( - new InMemoryTopologyStore( - SynchronizerStore(Factory.physicalSynchronizerId1), - testedProtocolVersion, - loggerFactory, - timeouts, - ), - Factory.syncCryptoClient.crypto.pureCrypto, - Factory.syncCryptoClient.crypto.cryptoPrivateStore, - loggerFactory, - ) - - val dtc_uid1a = TopologyTransaction( - Replace, - PositiveInt.one, - SynchronizerTrustCertificate(ParticipantId(uid1a), synchronizerId1), - testedProtocolVersion, - ) - - "prefer keys furthest from the root certificate" in { - val detector = mk() - - detector.store - .update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq(ns1k1_k1, ns1k2_k1, ns1k3_k2).map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - detector - .getValidSigningKeysForTransaction(ts(1), dtc_uid1a, None, returnAllValidKeys = false) - .map(_._2) - .futureValueUS shouldBe Right(Seq(SigningKeys.key3.fingerprint)) - - // test getting all valid keys - detector - .getValidSigningKeysForTransaction(ts(1), dtc_uid1a, None, returnAllValidKeys = true) - .futureValueUS - .value - ._2 should contain theSameElementsAs Seq( - SigningKeys.key1, - SigningKeys.key2, - SigningKeys.key3, - ).map(_.fingerprint) - - // now let's break the chain by removing the NSD for key2. - // this prevents key3 from being authorized for new signatures. - detector.store - .update( - SequencedTime(ts(1)), - EffectiveTime(ts(1)), - removeMapping = Map.empty, - removeTxs = Set(ns1k2_k1.hash), - additions = Seq.empty, - ) - .futureValueUS - - // reset caches so that the namespace delegations are fetched again from the store. - // normally we would use a separate detector instance per topology manager srequest - detector.reset() - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - detector - .getValidSigningKeysForTransaction(ts(2), dtc_uid1a, None, returnAllValidKeys = false) - .map(_._2) - .futureValueUS shouldBe Right( - Seq(SigningKeys.key1.fingerprint) - ), - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include( - s"The following target keys of namespace $ns1 are dangling: ${List(SigningKeys.key3.fingerprint)}" - ), - "dangling NSD for key3", - ) - ) - ), - ) - } - - "resolves decentralized namespace definitions for finding appropriate signing keys" in { - val detector = mk() - - detector.store - .update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = - Seq(ns1k1_k1, ns8k8_k8, ns9k9_k9, ns1k2_k1, dns1).map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - val otk = TopologyTransaction( - Replace, - PositiveInt.one, - OwnerToKeyMapping( - ParticipantId("decentralized-participant", dns1.mapping.namespace), - NonEmpty(Seq, EncryptionKeys.key1, SigningKeys.key4), - ), - testedProtocolVersion, - ) - - syncCryptoClient.crypto.cryptoPrivateStore - .removePrivateKey(SigningKeys.key8.fingerprint) - .futureValueUS - - detector - .getValidSigningKeysForTransaction(ts(1), otk, None, returnAllValidKeys = false) - .futureValueUS - .value - ._2 should contain theSameElementsAs Seq( - SigningKeys.key2, // the furthest key available for NS1 - SigningKeys.key9, // the root certificate key for NS9 - SigningKeys.key4, // all new signing keys must also sign - // since we removed key8 from the private key store, it cannot be used to sign something, so it is not suggested - ).map(_.fingerprint) - - detector - .getValidSigningKeysForTransaction(ts(1), otk, None, returnAllValidKeys = true) - .futureValueUS - .value - ._2 should contain theSameElementsAs Seq( - SigningKeys.key1, // the root certificate key for NS1 - SigningKeys.key2, // the key authorized for NS1 by an additional NSD - SigningKeys.key9, // the root certificate key for NS9 - SigningKeys.key4, // all new signing keys must also sign - // since we removed key8 from the private key store, it cannot be used to sign something, so it is not suggested - ).map(_.fingerprint) - - } - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala deleted file mode 100644 index be74ade2de..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.store.{ - TopologyStoreId, - TopologyTransactionRejection, - ValidatedTopologyTransaction, -} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.{DefaultTestIdentities, TestingOwnerWithKeys} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalactic.source -import org.scalatest.wordspec.FixtureAnyWordSpec -import org.scalatest.{Assertion, Outcome} - -class TopologyTimestampPlusEpsilonTrackerTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext { - - protected class Fixture { - val crypto = new TestingOwnerWithKeys( - DefaultTestIdentities.sequencerId, - loggerFactory, - parallelExecutionContext, - ) - val store = new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - - var tracker: TopologyTimestampPlusEpsilonTracker = _ - reInit() - - def reInit(): Unit = - tracker = new TopologyTimestampPlusEpsilonTracker( - store, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - - def commitChangeDelay(sequenced: Long, effective: Long, topologyChangeDelay: Long): Unit = { - val sequencedTimeTyped = SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)) - val effectiveTimeTyped = EffectiveTime(CantonTimestamp.ofEpochMicro(effective)) - val topologyChangeDelayTyped = NonNegativeFiniteDuration.tryOfMicros(topologyChangeDelay) - - tracker.adjustTopologyChangeDelay( - effectiveTimeTyped, - topologyChangeDelayTyped, - ) - storeChangeDelay( - sequencedTimeTyped, - effectiveTimeTyped, - topologyChangeDelayTyped, - ) - } - - def storeChangeDelay( - sequenced: SequencedTime, - effective: EffectiveTime, - topologyChangeDelay: NonNegativeFiniteDuration, - ): Unit = { - val tx = crypto.mkAdd( - SynchronizerParametersState( - DefaultTestIdentities.synchronizerId, - DynamicSynchronizerParameters.initialValues( - topologyChangeDelay, - testedProtocolVersion, - ), - ), - crypto.SigningKeys.key1, - ) - store - .update( - sequenced, - effective, - removeMapping = Map(tx.mapping.uniqueKey -> PositiveInt.one), - removeTxs = Set.empty, - List(ValidatedTopologyTransaction(tx, None)), - ) - .futureValueUS - } - - def storeRejection(sequenced: Long, effective: Long): Unit = { - - val tx = ValidatedTopologyTransaction( - crypto.TestingTransactions.p1p1, - Some(TopologyTransactionRejection.NotAuthorized), - ) - - store - .update( - SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)), - EffectiveTime(CantonTimestamp.ofEpochMicro(effective)), - removeMapping = Map.empty, - removeTxs = Set.empty, - Seq(tx), - ) - .futureValueUS - } - - def assertEffectiveTime( - sequenced: Long, - strictMonotonicity: Boolean, - expectedEffective: Long, - )(implicit pos: source.Position): Assertion = - tracker - .trackAndComputeEffectiveTime( - SequencedTime(CantonTimestamp.ofEpochMicro(sequenced)), - strictMonotonicity, - ) - .futureValueUS - .value shouldBe CantonTimestamp.ofEpochMicro(expectedEffective) - } - - type FixtureParam = Fixture - - override protected def withFixture(test: OneArgTest): Outcome = test(new Fixture) - - "The tracker" should { - - "correctly compute effective times with constant topologyChangeDelay" in { f => - import f.* - commitChangeDelay(-1, -1, 250) - - assertEffectiveTime(0, strictMonotonicity = true, 250) - assertEffectiveTime(5, strictMonotonicity = true, 255) - assertEffectiveTime(5, strictMonotonicity = false, 255) - } - - "correctly compute effective times when the topologyChangeDelay increases" in { f => - import f.* - // initialize delay - commitChangeDelay(-1, -1, 250) - - // increase delay - assertEffectiveTime(0, strictMonotonicity = true, 250) - commitChangeDelay(0, 250, 1000) - - // until 250, we should get the old delay - assertEffectiveTime(1, strictMonotonicity = true, 251) - assertEffectiveTime(100, strictMonotonicity = true, 350) - assertEffectiveTime(250, strictMonotonicity = true, 500) - - // after 250, we should get the new delay - assertEffectiveTime(251, strictMonotonicity = true, 1251) - assertEffectiveTime(260, strictMonotonicity = true, 1260) - assertEffectiveTime(350, strictMonotonicity = true, 1350) - assertEffectiveTime(500, strictMonotonicity = true, 1500) - } - - "correctly compute effective times when the topologyChangeDelay decreases" in { f => - import f.* - - // initialize delay - commitChangeDelay(-1, -1, 250) - - // increase delay - assertEffectiveTime(0, strictMonotonicity = true, 250) - commitChangeDelay(0, 250, 100) - - // until 250, we should get the old delay - assertEffectiveTime(1, strictMonotonicity = false, 251) - assertEffectiveTime(100, strictMonotonicity = false, 350) - assertEffectiveTime(250, strictMonotonicity = false, 500) - - // after 250, we should get the new delay, but with corrections to guarantee monotonicity - assertEffectiveTime(251, strictMonotonicity = false, 500) - assertEffectiveTime(252, strictMonotonicity = false, 500) - assertEffectiveTime(253, strictMonotonicity = true, 501) - assertEffectiveTime(254, strictMonotonicity = false, 501) - assertEffectiveTime(300, strictMonotonicity = false, 501) - assertEffectiveTime(300, strictMonotonicity = true, 502) - - // after 402, we should get the new delay without corrections - assertEffectiveTime(403, strictMonotonicity = true, 503) - assertEffectiveTime(404, strictMonotonicity = false, 504) - assertEffectiveTime(410, strictMonotonicity = true, 510) - assertEffectiveTime(500, strictMonotonicity = false, 600) - assertEffectiveTime(600, strictMonotonicity = true, 700) - } - - "initialization should load upcoming epsilon changes" in { f => - import f.* - - // Commit a series of changes and check effective times. - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 110) // delay2 - assertEffectiveTime(100, strictMonotonicity = false, 200) - assertEffectiveTime(111, strictMonotonicity = true, 221) - storeRejection(111, 221) - assertEffectiveTime(120, strictMonotonicity = true, 230) - commitChangeDelay(120, 230, 120) // delay3 - assertEffectiveTime(231, strictMonotonicity = false, 351) - - // Now re-initialize tracker and check if up-coming changes are loaded from store - reInit() - // This will initialize the tracker to sequencedTime = 100, i.e. delay1 is effective, delay2 is upcoming, and delay3 not yet processed. - // delay1 should be loaded from the store, as it is effective - assertEffectiveTime(100, strictMonotonicity = false, 200) - // delay2 should be loaded from the store, as it has been upcoming during initialization - assertEffectiveTime(111, strictMonotonicity = true, 221) - storeRejection(111, 221) - assertEffectiveTime(120, strictMonotonicity = true, 230) - // delay3 needs to be replayed as its sequencing time is after the init time of 100 - commitChangeDelay(120, 230, 120) - assertEffectiveTime(231, strictMonotonicity = false, 351) - } - - "initialization should load upcoming transactions (including rejections)" in { f => - import f.* - - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // set initial delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 50) // decrease delay to delay2 - - // delay1 is still effective - assertEffectiveTime(110, strictMonotonicity = true, 210) - storeRejection(110, 210) - - // delay2 is now effective, but the effective time is corrected - assertEffectiveTime(111, strictMonotonicity = true, 211) - storeRejection(111, 211) - assertEffectiveTime(120, strictMonotonicity = true, 212) - storeRejection(120, 212) - assertEffectiveTime(130, strictMonotonicity = false, 212) - // delay2 is now effective without any correction - assertEffectiveTime(164, strictMonotonicity = true, 214) - storeRejection(164, 214) - - // Now re-initialize and check if previous transactions are reloaded - reInit() - // delay2 is already effective, but the effective time is corrected - assertEffectiveTime(130, strictMonotonicity = false, 212) - // delay2 is now effective without any correction - assertEffectiveTime(164, strictMonotonicity = true, 214) - storeRejection(164, 214) - } - - "initialization should load expired synchronizerParametersChanges" in { f => - import f.* - - assertEffectiveTime(0, strictMonotonicity = true, 0) - commitChangeDelay(0, 0, 100) // set initial delay1 - assertEffectiveTime(10, strictMonotonicity = true, 110) - commitChangeDelay(10, 110, 50) // decrease delay to delay2 - - // delay1 is still effective - assertEffectiveTime(110, strictMonotonicity = false, 210) - // delay2 is now effective, but the effective time is corrected - assertEffectiveTime(120, strictMonotonicity = false, 210) - // delay2 is now effective without any correction - assertEffectiveTime(162, strictMonotonicity = false, 212) - - // Now re-initialize and check if the expiry of delay1 is reloaded - reInit() - assertEffectiveTime(120, strictMonotonicity = false, 210) - // delay2 is now effective without any correction - assertEffectiveTime(162, strictMonotonicity = false, 212) - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala deleted file mode 100644 index 418fcbd1b7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionAuthorizationValidatorTest.scala +++ /dev/null @@ -1,1459 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import cats.Apply -import cats.instances.list.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.SignatureCheckError.{InvalidSignature, UnsupportedKeySpec} -import com.digitalasset.canton.crypto.{Signature, SigningPublicKey, SynchronizerCryptoPureApi} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.{DynamicSequencingParameters, DynamicSynchronizerParameters} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.participant2 -import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ - MultiTransactionHashMismatch, - NoDelegationFoundForKeys, - NotAuthorized, -} -import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, - CanSignSpecificMappings, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace -import com.digitalasset.canton.topology.transaction.TopologyMapping.{Code, MappingHash} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.canton.{ - BaseTest, - FailOnShutdown, - HasExecutionContext, - ProtocolVersionChecksAsyncWordSpec, -} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AsyncWordSpec - -abstract class TopologyTransactionAuthorizationValidatorTest(multiTransactionHash: Boolean) - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with FailOnShutdown - with ProtocolVersionChecksAsyncWordSpec { - - object Factory - extends TopologyTransactionTestFactory( - loggerFactory, - parallelExecutionContext, - multiTransactionHash, - ) - - def ts(seconds: Long) = CantonTimestamp.Epoch.plusSeconds(seconds) - - def mk( - store: InMemoryTopologyStore[TopologyStoreId] = new InMemoryTopologyStore( - SynchronizerStore(Factory.physicalSynchronizerId1), - testedProtocolVersion, - loggerFactory, - timeouts, - ), - validationIsFinal: Boolean = true, - ) = { - val validator = - new TopologyTransactionAuthorizationValidator( - Factory.syncCryptoClient.crypto.pureCrypto, - store, - validationIsFinal = validationIsFinal, - loggerFactory, - ) - validator - } - - def check( - validated: Seq[ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping]], - expectedOutcome: Seq[Option[TopologyTransactionRejection => Boolean]], - ) = { - validated should have length expectedOutcome.size.toLong - validated.zipWithIndex.zip(expectedOutcome).foreach { - case ((ValidatedTopologyTransaction(tx, Some(err), _), _), Some(expected)) => - assert(expected(err), s"Error $err was not expected for transaction: $tx") - case ((ValidatedTopologyTransaction(transaction, rej, _), idx), expected) => - assertResult(expected, s"idx=$idx $transaction")(rej) - } - succeed - } - - def validate( - validator: TopologyTransactionAuthorizationValidator[SynchronizerCryptoPureApi], - timestamp: CantonTimestamp, - toValidate: Seq[GenericSignedTopologyTransaction], - inStore: Map[MappingHash, GenericSignedTopologyTransaction], - expectFullAuthorization: Boolean, - transactionMayHaveMissingSigningKeySignatures: Boolean = false, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Seq[GenericValidatedTopologyTransaction]] = - MonadUtil - .sequentialTraverse(toValidate)(tx => - validator.validateAndUpdateHeadAuthState( - timestamp, - tx, - inStore.get(tx.mapping.uniqueKey), - expectFullAuthorization = expectFullAuthorization, - transactionMayHaveMissingSigningKeySignatures = - transactionMayHaveMissingSigningKeySignatures, - ) - ) - "topology transaction authorization" when { - - "receiving transactions with signatures" should { - "succeed to add if the signature is valid" in { - val validator = mk() - import Factory.* - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None, None)) - } - } - - "fail to add if the signature is invalid" in { - val validator = mk() - import Factory.* - val invalidSig: NonEmpty[Set[TopologyTransactionSignature]] = if (multiTransactionHash) { - ns1k1_k1.signatures.map(sig => - MultiTransactionSignature(NonEmpty.mk(Set, ns1k2_k1.hash), sig.signature) - ) - } else { - ns1k1_k1.signatures.map(sig => SingleTransactionSignature(ns1k2_k1.hash, sig.signature)) - } - val invalid = ns1k2_k1.copy(signatures = invalidSig) - for { - validatedTopologyTransactions <- validate( - validator, - ts(0), - List(ns1k1_k1, invalid), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - validatedTopologyTransactions, - Seq( - None, - Some { - case TopologyTransactionRejection.SignatureCheckFailed(_) => true - case _ => false - }, - ), - ) - } - } - - // TODO(#25752): Add test for invalid signature scheme usage in the transaction protocol (probably as part of the LedgerAuthorizationIntegrationTest). - "fail to add if the signing key has an unsupported scheme" in { - val validator = mk() - import Factory.* - for { - validatedTopologyTransactions <- validate( - validator, - ts(0), - List(ns1k1_k1_unsupportedScheme), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - validatedTopologyTransactions, - Seq( - Some { - case TopologyTransactionRejection.SignatureCheckFailed( - UnsupportedKeySpec( - Factory.SigningKeys.key1_unsupportedSpec.keySpec, - defaultStaticSynchronizerParameters.requiredSigningSpecs.keys, - ) - ) => - true - case _ => false - } - ), - ) - } - } - - "fail to add if the OwnerToKeyMapping or PartyToKeyMapping misses the signature for newly added signing keys if transactionMayHaveMissingSigningKeySignatures==false" in { - val validator = mk() - import Factory.* - - val ownerToKeyWithMissingSigningKeySignature = - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value - val partyToKeyWithMissingSigningKeysignature = mkAddMultiKey( - PartyToKeyMapping.tryCreate( - PartyId.tryCreate("someParty", ns1), - PositiveInt.one, - NonEmpty(Seq, SigningKeys.key7), - ), - NonEmpty(Set, SigningKeys.key1), - ) - - for { - validatedTopologyTransactions <- validate( - validator, - ts(0), - List( - ns1k1_k1, - ownerToKeyWithMissingSigningKeySignature, - partyToKeyWithMissingSigningKeysignature, - ), - Map.empty, - expectFullAuthorization = true, - transactionMayHaveMissingSigningKeySignatures = false, - ) - } yield { - check( - validatedTopologyTransactions, - Seq( - None, - Some(_ == NotAuthorized), - Some(_ == NotAuthorized), - ), - ) - } - } - - s"permit OwnerToKeyMappings with missing signatures for newly added signing keys if transactionMayHaveMissingSigningKeySignatures==true" in { - val validator = mk() - import Factory.* - - val ownerToKeyWithMissingSigningKeySignature = - okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value - val partyToKeyWithMissingSigningKeysignature = mkAddMultiKey( - PartyToKeyMapping.tryCreate( - PartyId.tryCreate("someParty", ns1), - PositiveInt.one, - NonEmpty(Seq, SigningKeys.key7), - ), - NonEmpty(Set, SigningKeys.key1), - ) - for { - validatedTopologyTransactions <- validate( - validator, - ts(0), - List( - ns1k1_k1, - ownerToKeyWithMissingSigningKeySignature, - partyToKeyWithMissingSigningKeysignature, - ), - Map.empty, - expectFullAuthorization = true, - transactionMayHaveMissingSigningKeySignatures = true, - ) - } yield { - check( - validatedTopologyTransactions, - Seq( - None, - None, - // PTKs with missign signing keys are not permitted - Some(_ == NotAuthorized), - ), - ) - } - } - - "reject if the transaction is for the wrong synchronizer" in { - val validator = mk() - import Factory.* - val wrongSynchronizer = - SynchronizerId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap)) - val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap)) - val wrong = mkAdd( - SynchronizerTrustCertificate( - pid, - wrongSynchronizer, - ), - Factory.SigningKeys.key1, - ) - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, wrong), - Map.empty, - expectFullAuthorization = false, - ) - } yield { - check( - res, - Seq( - None, - Some { - case TopologyTransactionRejection.InvalidSynchronizer(_) => true - case _ => false - }, - ), - ) - } - } - - // testing an inconsistent topology store with multiple DNDs effective at the same time - "be able to handle multiple decentralized namespace transactions for the same namespace being erroneously effective" in { - import Factory.* - import SigningKeys.{ec as _, *} - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - - val namespace = DecentralizedNamespaceDefinition.computeNamespace(Set(ns1)) - - val dnd1 = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - namespace, - PositiveInt.one, - owners = NonEmpty(Set, ns1), - ) - .value, - serial = PositiveInt.one, - signingKeys = NonEmpty(Set, key1), - ) - - val dnd2 = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - namespace, - PositiveInt.one, - owners = NonEmpty(Set, ns1, ns2), - ) - .value, - serial = PositiveInt.one, - signingKeys = NonEmpty(Set, key1, key2), - ) - - // we intentionally bootstrap with 2 transactions for the same mapping unique key being effective at the same time, - // so that we can test that authorization validator can handle such faulty situations and not just break - val bootstrapTransactions = Seq(ns1k1_k1, ns2k2_k2, dnd1, dnd2).map( - ValidatedTopologyTransaction(_) - ) - - val dnd3 = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - namespace, - PositiveInt.two, - owners = NonEmpty(Set, ns1, ns2), - ) - .value, - serial = PositiveInt.one, - signingKeys = NonEmpty(Set, key1, key2), - ) - - for { - _ <- store.update( - SequencedTime.MinValue, - EffectiveTime.MinValue, - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = bootstrapTransactions, - ) - result <- validate( - validator, - ts(1), - Seq(dnd3), - Map(dnd2.mapping.uniqueKey -> dnd2), - expectFullAuthorization = false, - ) - } yield { - result.loneElement.rejectionReason shouldBe None - } - } - - // generates mappings for the specified uid (or its namespace) that will - // be validated with delegations that are restricted to the respective mapping. - def generateTestMappings(uid: UniqueIdentifier): Seq[TopologyMapping] = { - import Factory.* - import SigningKeys.{key2, key4, key5} - - val participantId = ParticipantId(uid) - val synchronizerId = SynchronizerId(uid) - val partyId = PartyId(uid) - - val testMappings = Seq( - NamespaceDelegation.tryCreate(uid.namespace, key2, CanSignAllButNamespaceDelegations), - DecentralizedNamespaceDefinition - .create( - DecentralizedNamespaceDefinition.computeNamespace(Set(uid.namespace)), - PositiveInt.one, - NonEmpty(Set, uid.namespace), - ) - .value, - OwnerToKeyMapping(participantId, NonEmpty(Seq, key4)), - SynchronizerTrustCertificate(participantId, synchronizerId), - ParticipantSynchronizerPermission( - synchronizerId, - participantId, - ParticipantPermission.Submission, - None, - None, - ), - PartyHostingLimits(synchronizerId, partyId), - VettedPackages.tryCreate(participantId, Seq.empty), - PartyToParticipant.tryCreate( - partyId, - PositiveInt.one, - Seq(HostingParticipant(participantId, ParticipantPermission.Submission)), - ), - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ), - MediatorSynchronizerState - .create( - synchronizerId, - NonNegativeInt.zero, - PositiveInt.one, - Seq(MediatorId(uid)), - Seq.empty, - ) - .value, - SequencerSynchronizerState - .create(synchronizerId, PositiveInt.one, active = Seq(SequencerId(uid)), Seq.empty) - .value, - PurgeTopologyTransaction - .create(synchronizerId, Seq(PartyHostingLimits(synchronizerId, partyId))) - .value, - DynamicSequencingParametersState( - synchronizerId, - DynamicSequencingParameters.default( - DynamicSequencingParameters.protocolVersionRepresentativeFor(testedProtocolVersion) - ), - ), - PartyToKeyMapping.tryCreate(partyId, PositiveInt.one, NonEmpty(Seq, key5)), - ) - - testMappings - } - - // for all topology mapping codes, generate a NamespaceDelegation - // * restricted to the respective mapping code - // * for the uid's namespace or the uid - // * with a newly generated target key (to detect bugs when checking the delegation restriction) - def generateDelegations(uid: UniqueIdentifier): ( - // Code: the code the delegation is restricted to - // SigningPublicKey: the target key of the delegation - // SignedTopologyTransaction: the delegation itself - Seq[(Code, (SigningPublicKey, SignedTopologyTransaction[Replace, NamespaceDelegation]))] - ) = { - import Factory.* - import SigningKeys.key1 - - val namespaceDelegations = - TopologyMapping.Code.all.map { mappingCode => - // generate a new signing key that is only used by the namespace delegation - val nsdTargetKey = genSignKey(mappingCode.code) - val namespaceDelegation = mkAdd( - NamespaceDelegation - .create( - uid.namespace, - nsdTargetKey, - CanSignSpecificMappings(NonEmpty(Set, mappingCode)), - ) - .value, - // we sign it with the root namespace key to make sure it is considered valid - key1, - ) - mappingCode -> (nsdTargetKey, namespaceDelegation) - } - - namespaceDelegations - } - - "respect the mapping restrictions specified in NamespaceDelegation" in { - import Factory.* - import SigningKeys.key1 - - val uid = UniqueIdentifier.tryCreate("uid", ns1) - - val mappingsToValidate = generateTestMappings(uid) - val delegations = generateDelegations(uid).toMap - - val rootCert = mkAdd(NamespaceDelegation.create(ns1, key1, CanSignAllMappings).value, key1) - - // create an in-memory topology store that is shared across all validations. - // this is acceptable, because we only write to it the initial state. - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - // store the root cert and all delegations in the store - store - .update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = (rootCert +: delegations.values.map(_._2).toSeq) - .map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - // now for the actual test: - // * validate all mappings against all target keys used in delegations - for { - mappingToValidate <- mappingsToValidate - delegation <- delegations.toSeq - (restrictionForDelegation, (key, del)) = delegation - } clue( - s"testing validation of ${mappingToValidate.code} with delegation restricted to $restrictionForDelegation" - ) { - val signedTxToValidate = mkAdd(mappingToValidate, key, isProposal = true) - val validator = mk(store) - - val validated = validator - .validateAndUpdateHeadAuthState( - ts(1), - signedTxToValidate, - inStore = None, - expectFullAuthorization = false, - transactionMayHaveMissingSigningKeySignatures = false, - ) - .futureValueUS - - // if the delegation is restricted to the code of the mapping that is validated - if (restrictionForDelegation == mappingToValidate.code) { - // we expect no errors - validated.rejectionReason shouldBe empty - validated.expireImmediately shouldBe false - } else { - // otherwise, the validation should reject the transaction - validated.rejectionReason should not be empty - } - } - - succeed - } - } - - "observing namespace delegations" should { - "succeed if transaction is properly authorized" in { - val validator = mk() - import Factory.* - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1, ns1k3_k2), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None, None, None)) - } - } - "fail if the signature of a root certificate is not valid" in { - val validator = mk() - import Factory.* - - val sig_k1_emptySignature = Signature - .fromProtoV30( - ns1k1_k1.signatures.head1.signature.toProtoV30 - .copy(signature = ByteString.empty()) - ) - .value - val newSig: NonEmpty[Set[TopologyTransactionSignature]] = if (multiTransactionHash) { - NonEmpty.mk( - Set, - MultiTransactionSignature(NonEmpty.mk(Set, ns1k1_k1.hash), sig_k1_emptySignature), - ) - } else { - NonEmpty.mk(Set, SingleTransactionSignature(ns1k1_k1.hash, sig_k1_emptySignature)) - } - val ns1k1_k1WithEmptySignature = - ns1k1_k1.copy(signatures = newSig) - - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1WithEmptySignature, ns1k2_k1), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - res, - Seq( - Some { - case TopologyTransactionRejection.SignatureCheckFailed( - InvalidSignature(`sig_k1_emptySignature`, _, _) - ) => - true - case _ => false - }, - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key1.fingerprint))), - ), - ) - } - } - "fail if transaction is not properly authorized" in { - val validator = mk() - import Factory.* - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns6k3_k6, ns1k3_k2, ns1k2_k1, ns1k3_k2), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - res, - Seq( - None, - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key6.fingerprint))), - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - None, - None, - ), - ) - } - } - "succeed and use load existing delegations" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), - ) - res <- validate( - validator, - ts(1), - List(ns1k2_k1, ns1k3_k2), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None, None)) - } - } - - "fail on incremental non-authorized transactions" in { - val validator = mk() - import Factory.* - for { - res <- validate( - validator, - ts(1), - List(ns1k1_k1, ns1k3_k2, ns1k2_k1, ns6k3_k6), - Map.empty, - expectFullAuthorization = true, - ) - - } yield { - check( - res, - Seq( - None, - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - None, - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key6.fingerprint))), - ), - ) - } - } - - } - - "observing normal delegations" should { - - "succeed if transaction is properly authorized" in { - val validator = mk() - import Factory.* - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1, okm1ak5k1E_k2, p1p1B_k2, ns6k6_k6, p1p6_k2k6), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None, None, None, None, None, None)) - } - } - "fail if transaction is not properly authorized" in { - val validator = mk() - import Factory.* - for { - resultExpectFullAuthorization <- validate( - validator, - ts(0), - List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2), - Map.empty, - expectFullAuthorization = true, - ) - // also check that insufficiently authorized non-proposals get rejected with expectFullAuthorization - resultDontExpectFullAuthorization <- validate( - validator, - ts(0), - List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2), - Map.empty, - expectFullAuthorization = false, - ) - - } yield { - check( - resultExpectFullAuthorization, - Seq( - None, - Some(_ == NotAuthorized), - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - ), - ) - - check( - resultDontExpectFullAuthorization, - Seq( - None, - Some(_ == NotAuthorized), - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - ), - ) - - } - } - } - - "observing removals" should { - "accept authorized removals" in { - val validator = mk() - import Factory.* - val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse) - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1, Rns1k2_k1), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None, None, None)) - } - } - - "reject un-authorized after removal" in { - val validator = mk() - import Factory.* - val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse) - for { - res <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1, Rns1k2_k1, okm1ak5k1E_k2, p1p6_k2), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - res, - Seq( - None, - None, - None, - Some(_ == NotAuthorized), - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - ), - ) - } - } - - } - - "observing PartyToParticipant mappings" should { - "allow participants to unilaterally disassociate themselves from parties" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - - val pid2 = ParticipantId(UniqueIdentifier.tryCreate("participant2", ns2)) - val participants_1_2_6_HostParty1 = mkAddMultiKey( - PartyToParticipant.tryCreate( - party1b, // lives in the namespace of p1, corresponding to `SigningKeys.key1` - threshold = PositiveInt.two, - Seq( - HostingParticipant(participant1, ParticipantPermission.Submission), - HostingParticipant(pid2, ParticipantPermission.Submission), - HostingParticipant(participant6, ParticipantPermission.Submission), - ), - ), - // both the party's owner and the participant sign - NonEmpty(Set, SigningKeys.key1, SigningKeys.key2, SigningKeys.key6), - serial = PositiveInt.one, - ) - - val unhostingMapping = PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.two, - Seq( - HostingParticipant(participant1, ParticipantPermission.Submission), - HostingParticipant(participant6, ParticipantPermission.Submission), - ), - ) - val unhostingMappingAndThresholdChange = PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq( - HostingParticipant(participant1, ParticipantPermission.Submission), - HostingParticipant(participant6, ParticipantPermission.Submission), - ), - ) - - val participant2RemovesItselfUnilaterally = mkAdd( - unhostingMapping, - // only the unhosting participant signs - SigningKeys.key2, - serial = PositiveInt.two, - ) - - val participant2RemovedFullyAuthorized = mkAddMultiKey( - unhostingMapping, - // both the unhosting participant as well as the party's owner signs - NonEmpty(Set, SigningKeys.key1, SigningKeys.key2), - serial = PositiveInt.two, - ) - - val ptpMappingHash = participants_1_2_6_HostParty1.mapping.uniqueKey - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = List(ns1k1_k1, ns2k2_k2, ns6k6_k6).map( - ValidatedTopologyTransaction(_) - ), - ) - hostingResult <- validate( - validator, - ts(1), - List(participants_1_2_6_HostParty1), - inStore = Map.empty, - expectFullAuthorization = false, - ) - - // unilateral unhosting by participant2 only signed by the participant - unhostingResult <- validate( - validator, - ts(2), - List(participant2RemovesItselfUnilaterally), - inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), - expectFullAuthorization = false, - ) - - // it is still allowed to have a mix of signatures for unhosting - unhostingMixedResult <- validate( - validator, - ts(2), - List(participant2RemovedFullyAuthorized), - inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), - expectFullAuthorization = false, - ) - - // the participant being removed may not sign if anything else changes - unhostingAndThresholdChangeResult <- validate( - validator, - ts(2), - List( - mkAddMultiKey( - unhostingMappingAndThresholdChange, - NonEmpty(Set, SigningKeys.key2), - ) - ), - inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), - expectFullAuthorization = false, - ) - } yield { - check(hostingResult, Seq(None)) - check(unhostingResult, Seq(None)) - check(unhostingMixedResult, Seq(None)) - check( - unhostingAndThresholdChangeResult, - Seq(Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint)))), - ) - } - } - } - - "evolving decentralized namespace definitions with threshold > 1" should { - "succeed if proposing lower threshold and number of owners" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = decentralizedNamespaceWithMultipleOwnerThreshold.map( - ValidatedTopologyTransaction(_) - ), - ) - res <- validate( - validator, - ts(1), - List(dns2), - decentralizedNamespaceWithMultipleOwnerThreshold - .map(tx => tx.mapping.uniqueKey -> tx) - .toMap, - expectFullAuthorization = false, - ) - } yield { - check(res, Seq(None)) - } - } - - "succeed in authorizing with quorum of owner signatures" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - val proposeDecentralizedNamespaceWithLowerThresholdAndOwnerNumber = List(dns2) - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = decentralizedNamespaceWithMultipleOwnerThreshold.map( - ValidatedTopologyTransaction(_) - ), - ) - _ <- store.update( - SequencedTime(ts(1)), - EffectiveTime(ts(1)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = proposeDecentralizedNamespaceWithLowerThresholdAndOwnerNumber.map( - ValidatedTopologyTransaction(_) - ), - ) - res <- validate( - validator, - ts(2), - // Analogously to how the TopologyStateProcessor merges the signatures of proposals - // with the same serial, combine the signature of the previous proposal to the current proposal. - List(dns3.addSignaturesFromTransaction(dns2)), - (decentralizedNamespaceWithMultipleOwnerThreshold ++ proposeDecentralizedNamespaceWithLowerThresholdAndOwnerNumber) - .map(tx => tx.mapping.uniqueKey -> tx) - .toMap, - // Expect to be able to authorize now that we have two signatures as required by - // decentralizedNamespaceWithMultipleOwnerThreshold (dns1). - expectFullAuthorization = true, - ) - } yield { - check(res, Seq(None)) - } - } - - "remove from cache for TopologyChangeOp.REMOVAL" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - for { - // 1. validate and store the decentralized namespace owners root certificates - resultAddOwners <- validate( - validator, - ts(0), - decentralizedNamespaceOwners, - Map.empty, - expectFullAuthorization = true, - ) - _ = resultAddOwners.foreach(_.rejectionReason shouldBe None) - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = resultAddOwners, - ) - - // 2. validate and store the decentralized namespace definition - // this puts the DND authorization graph into the cache - resultAddDND <- validate( - validator, - ts(1), - List(dns1), - Map.empty, - expectFullAuthorization = true, - ) - _ = resultAddDND.foreach(_.rejectionReason shouldBe None) - _ <- store.update( - SequencedTime(ts(1)), - EffectiveTime(ts(1)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = resultAddDND, - ) - - // 3. now process the removal of the decentralized namespace definition - // this should remove the DND authorization graph from the cache - resRemoveDND <- validate( - validator, - ts(2), - List(dns1Removal), - Map(dns1.mapping.uniqueKey -> dns1), - expectFullAuthorization = true, - ) - _ = resRemoveDND.foreach(_.rejectionReason shouldBe None) - _ <- store.update( - SequencedTime(ts(2)), - EffectiveTime(ts(2)), - removeMapping = Map(dns1Removal.mapping.uniqueKey -> dns1Removal.serial), - removeTxs = Set.empty, - additions = resRemoveDND, - ) - - // 4. Now to the actual test: try to authorize something for the decentralized namespace. - // this should be rejected because the namespace is not valid anymore, and the - // authorization cache has been properly cleaned up. - resultUnauthorizedIDD <- validate( - validator, - ts(3), - List(dns1trustCert), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - resultUnauthorizedIDD, - Seq( - Some( - _ == NoDelegationFoundForKeys( - Set(SigningKeys.key1, SigningKeys.key8, SigningKeys.key9).map(_.fingerprint) - ) - ) - ), - ) - } - - } - } - - def checkProposalFlagAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store, validationIsFinal) - import Factory.* - import SigningKeys.{ec as _, *} - - val dns_id = DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns8)) - val dns_2_owners = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create(dns_id, PositiveInt.two, NonEmpty(Set, ns1, ns8)) - .value, - NonEmpty(Set, key1, key8), - serial = PositiveInt.one, - ) - val decentralizedNamespaceWithThreeOwners = List(ns1k1_k1, ns8k8_k8, dns_2_owners) - - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = decentralizedNamespaceWithThreeOwners.map( - ValidatedTopologyTransaction(_) - ), - ) - - pkgTx = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - VettedPackages.tryCreate( - ParticipantId(UniqueIdentifier.tryCreate("consortium-participiant", dns_id)), - Seq.empty, - ), - BaseTest.testedProtocolVersion, - ) - result_packageVetting <- validate( - validator, - ts(1), - toValidate = List( - // Setting isProposal=true despite having enough keys. - // This simulates processing a proposal with the signature of a node, - // that got merged with another proposal already in the store. - mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true) - ), - inStore = Map.empty, - expectFullAuthorization = false, - ) - - } yield { - val validatedPkgTx = result_packageVetting.loneElement - - validatedPkgTx.rejectionReason shouldBe None - withClue("package transaction is proposal")( - validatedPkgTx.transaction.isProposal shouldBe expectProposal - ) - } - } - - "change the proposal status when the validation is final" in { - checkProposalFlagAfterValidation(validationIsFinal = true, expectProposal = false) - } - - "not change the proposal status when the validation is not final" in { - checkProposalFlagAfterValidation(validationIsFinal = false, expectProposal = true) - } - - "remove superfluous signatures" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - import SigningKeys.{ec as _, *} - - val dns_id = DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns8)) - val dnsTwoOwners = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create(dns_id, PositiveInt.two, NonEmpty(Set, ns1, ns8)) - .value, - NonEmpty(Set, key1, key8), - serial = PositiveInt.one, - ) - val decentralizedNamespaceWithTwoOwners = List(ns1k1_k1, ns8k8_k8, dnsTwoOwners) - - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = decentralizedNamespaceWithTwoOwners.map( - ValidatedTopologyTransaction(_) - ), - ) - - pkgTx = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - VettedPackages.tryCreate( - ParticipantId(UniqueIdentifier.tryCreate("consortium-participiant", dns_id)), - Seq.empty, - ), - BaseTest.testedProtocolVersion, - ) - resultPackageVetting <- validate( - validator, - ts(1), - toValidate = List( - // Signing this transaction also with key9 simulates that ns9 was part of the - // decentralized namespace before and was eligible for signing the transaction. - // After this validation, we expect the signature of key9 to be removed - mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true) - ), - inStore = Map.empty, - expectFullAuthorization = false, - ) - - // if there are only superfluous signatures, reject the transaction - resultOnlySuperfluousSignatures <- validate( - validator, - ts(2), - toValidate = List( - mkTrans(pkgTx, signingKeys = NonEmpty(Set, key3, key5), isProposal = true) - ), - inStore = Map.empty, - expectFullAuthorization = false, - ) - - } yield { - val validatedPkgTx = resultPackageVetting.loneElement - val signatures = validatedPkgTx.transaction.signatures - - validatedPkgTx.rejectionReason shouldBe None - signatures.map(_.authorizingLongTermKey).forgetNE should contain theSameElementsAs Set( - key1, - key8, - ).map( - _.id - ) - - resultOnlySuperfluousSignatures.loneElement.rejectionReason shouldBe Some( - TopologyTransactionRejection.NoDelegationFoundForKeys(Set(key3.id, key5.id)) - ) - } - } - - "respect the threshold of decentralized namespaces" in { - val store = - new InMemoryTopologyStore( - TopologyStoreId.AuthorizedStore, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val validator = mk(store) - import Factory.* - import SigningKeys.{ec as _, *} - - val dns_id = DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns8, ns9)) - val dns = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create(dns_id, PositiveInt.tryCreate(3), NonEmpty(Set, ns1, ns8, ns9)) - .value, - NonEmpty(Set, key1, key8, key9), - serial = PositiveInt.one, - ) - - val decentralizedNamespaceWithThreeOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9, dns) - - val pkgMapping = VettedPackages.tryCreate( - ParticipantId(UniqueIdentifier.tryCreate("consortium-participiant", dns_id)), - Seq.empty, - ) - val pkgTx = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - pkgMapping, - BaseTest.testedProtocolVersion, - ) - - def validateTx( - isProposal: Boolean, - expectFullAuthorization: Boolean, - signingKeys: SigningPublicKey* - ): FutureUnlessShutdown[GenericValidatedTopologyTransaction] = - TraceContext.withNewTraceContext("test") { freshTraceContext => - validate( - validator, - ts(1), - toValidate = List( - mkTrans( - pkgTx, - isProposal = isProposal, - signingKeys = NonEmpty.from(signingKeys.toSet).value, - ) - ), - inStore = Map.empty, - expectFullAuthorization = expectFullAuthorization, - )(freshTraceContext) - .map(_.loneElement) - } - - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = decentralizedNamespaceWithThreeOwners.map( - ValidatedTopologyTransaction(_) - ), - ) - - combinationsThatAreNotAuthorized = Seq( - ( /* isProposal*/ true, /* expectFullAuthorization*/ true), - ( /* isProposal*/ false, /* expectFullAuthorization*/ true), - // doesn't make much sense. a non-proposal by definition must be fully authorized - ( /* isProposal*/ false, /* expectFullAuthorization*/ false), - ) - - // try with 1/3 signatures - _ <- MonadUtil.sequentialTraverse(combinationsThatAreNotAuthorized) { - case (isProposal, expectFullAuthorization) => - clueFUS( - s"key1: isProposal=$isProposal, expectFullAuthorization=$expectFullAuthorization" - )( - validateTx(isProposal, expectFullAuthorization, key1).map( - _.rejectionReason shouldBe Some(NotAuthorized) - ) - ) - } - - // authorizing as proposal should succeed - _ <- clueFUS(s"key1: isProposal=true, expectFullAuthorization=false")( - validateTx(isProposal = true, expectFullAuthorization = false, key1).map({ s => - s.rejectionReason shouldBe None - () - }) - ) - - // try with 2/3 signatures - _ <- MonadUtil.sequentialTraverse(combinationsThatAreNotAuthorized) { - case (isProposal, expectFullAuthorization) => - clueFUS( - s"key1, key8: isProposal=$isProposal, expectFullAuthorization=$expectFullAuthorization" - )( - validateTx(isProposal, expectFullAuthorization, key1, key8).map({ s => - s.rejectionReason shouldBe Some(NotAuthorized) - () - }) - ) - } - - _ <- clueFUS( - s"key1, key8: isProposal=true, expectFullAuthorization=false" - )( - validateTx( - isProposal = true, - expectFullAuthorization = false, - key1, - key8, - ).map({ s => - s.rejectionReason shouldBe None - () - }) - ) - - // when there are enough signatures, the transaction should become fully authorized - // regardless of the `isProposal` and `expectFullAuthorization` flags - allCombinations = Apply[List].product(List(true, false), List(true, false)) - _ <- MonadUtil.sequentialTraverse(allCombinations) { - case (isProposal, expectFullAuthorization) => - clueFUS( - s"key1, key8, key9: isProposal=$isProposal, expectFullAuthorization=$expectFullAuthorization" - )( - validateTx(isProposal, expectFullAuthorization, key1, key8, key9).map( - _.rejectionReason shouldBe None - ) - ) - } - - } yield { - succeed - } - } - } - -} - -// Authorizes transactions by signing the hash of the transaction only -class TopologyTransactionAuthorizationValidatorTestDefault - extends TopologyTransactionAuthorizationValidatorTest(multiTransactionHash = false) - -// Authorizes transactions by signing a multi-hash containing the transaction hash -class TopologyTransactionAuthorizationValidatorTestMultiTransactionHash - extends TopologyTransactionAuthorizationValidatorTest(multiTransactionHash = true) { - - def makeOtkWithNonCoveringSignature = { - import Factory.* - val newSig = okm1ak5k1E_k2.signatures.filter( - // Remove the signature from key5, which we need for this OTK, and keep only the namespace signature - _.signature.authorizingLongTermKey == SigningKeys.key2.fingerprint - ) - // Create a signature for an OTK with participant 2. Should not authorize okm1ak5k1E_k2 in any way because it's - // a different transaction - val signatureFromKey5ForParticipant2 = mkAddMultiKey( - OwnerToKeyMapping(participant2, NonEmpty(Seq, SigningKeys.key5, EncryptionKeys.key1)), - NonEmpty(Set, SigningKeys.key5, SigningKeys.key2), - ) - okm1ak5k1E_k2 - .copy( - // the OTK needs 2 signatures to be authorized, we just keep the namespace delegation one here - signatures = NonEmpty.from(newSig).value - ) - // Add the signature to the original OTK - .addSignaturesFromTransaction(signatureFromKey5ForParticipant2) - } - - "topology transaction authorization" should { - "fail if the signatures are valid but do not cover the transaction" in { - val validator = mk() - import Factory.* - val invalid = makeOtkWithNonCoveringSignature - for { - validatedTopologyTransactions <- validate( - validator, - ts(0), - List(ns1k1_k1, ns1k2_k1, invalid), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - check( - validatedTopologyTransactions, - Seq( - None, - None, - Some { - case _: MultiTransactionHashMismatch => true - case _ => false - }, - ), - ) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandlingBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandlingBase.scala deleted file mode 100644 index 1499d1a15a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionHandlingBase.scala +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions -import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} -import com.digitalasset.canton.topology.transaction.{ - SignedTopologyTransaction, - TopologyChangeOp, - TopologyMapping, -} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec - -/** Base class for [[TopologyTransactionProcessorTest]] and - * [[InitialTopologySnapshotValidatorTest]]. - */ -abstract class TopologyTransactionHandlingBase - extends AsyncWordSpec - with BaseTest - with HasExecutionContext { - - protected val crypto = new SymbolicPureCrypto() - object Factory extends TopologyTransactionTestFactory(loggerFactory, parallelExecutionContext) - - protected def mkStore( - synchronizerId: PhysicalSynchronizerId = Factory.physicalSynchronizerId1a - ): TopologyStore[TopologyStoreId.SynchronizerStore] - - protected def ts(idx: Int): CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(idx.toLong) - protected def fetch( - store: TopologyStore[TopologyStoreId], - timestamp: CantonTimestamp, - isProposal: Boolean = false, - ): List[TopologyMapping] = - fetchTx(store, timestamp, isProposal).toTopologyState - - protected def fetchTx( - store: TopologyStore[TopologyStoreId], - timestamp: CantonTimestamp, - isProposal: Boolean = false, - ): GenericStoredTopologyTransactions = - store - .findPositiveTransactions( - asOf = timestamp, - asOfInclusive = false, - isProposal = isProposal, - types = TopologyMapping.Code.all, - None, - None, - ) - .futureValueUS - - protected def validate( - observed: Seq[TopologyMapping], - expected: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], - ): Assertion = - observed.toSet shouldBe expected.map(_.mapping).toSet - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala deleted file mode 100644 index 37cfe54a67..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala +++ /dev/null @@ -1,787 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{SigningPublicKey, SynchronizerCryptoPureApi} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.protocol.messages.TopologyTransactionsBroadcast -import com.digitalasset.canton.sequencing.SubscriptionStart.FreshSubscription -import com.digitalasset.canton.sequencing.protocol.{ - AllMembersOfSynchronizer, - OpenEnvelope, - Recipients, -} -import com.digitalasset.canton.store.db.{DbTest, PostgresTest} -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SynchronizerTimeTracker} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.store.db.DbTopologyStoreHelper -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.store.{ - TopologyStore, - TopologyStoreId, - ValidatedTopologyTransaction, -} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.{FailOnShutdown, SequencerCounter} - -abstract class TopologyTransactionProcessorTest - extends TopologyTransactionHandlingBase - with FailOnShutdown { - - import Factory.* - - protected def mk( - store: TopologyStore[TopologyStoreId.SynchronizerStore] = mkStore( - Factory.physicalSynchronizerId1a - ), - synchronizerId: PhysicalSynchronizerId = Factory.physicalSynchronizerId1a, - ): (TopologyTransactionProcessor, TopologyStore[TopologyStoreId.SynchronizerStore]) = { - - val proc = new TopologyTransactionProcessor( - synchronizerId, - new SynchronizerCryptoPureApi(defaultStaticSynchronizerParameters, crypto), - store, - _ => (), - TerminateProcessing.NoOpTerminateTopologyProcessing, - futureSupervisor, - exitOnFatalFailures = true, - DefaultProcessingTimeouts.testing, - loggerFactory, - ) - (proc, store) - } - - protected def process( - proc: TopologyTransactionProcessor, - ts: CantonTimestamp, - sc: Long, - txs: List[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], - ): Unit = - clue(s"block at sc $sc")( - proc - .process( - SequencedTime(ts), - EffectiveTime(ts), - SequencerCounter(sc), - txs, - ) - .onShutdown(fail()) - .futureValue - ) - - "topology transaction processor" when { - "processing transactions from a synchronizer" should { - "deal with additions" in { - val (proc, store) = mk() - // topology processor assumes to be able to find synchronizer parameters in the store for additional checks - val block1 = List(ns1k1_k1, dmp1_k1) - val block2Adds = List(ns1k2_k1, okm1bk5k1E_k1, dtcp1_k1) - val block3Replaces = List(ns1k8_k3_fail, ns1k1_k1, setSerial(dmp1_k1_bis, PositiveInt.two)) - - process(proc, ts(0), 0, block1) - process(proc, ts(1), 1, block2Adds) - val st1 = fetch(store, ts(1).immediateSuccessor) - process(proc, ts(2), 2, block3Replaces) - val st2 = fetch(store, ts(2).immediateSuccessor) - - // finds the most recently stored version of a transaction, including rejected ones - val rejected_ns1k8_k3_fail = - store - .findStored(CantonTimestamp.MaxValue, ns1k8_k3_fail, includeRejected = true) - .futureValueUS - .valueOrFail("Unable to find ns1k8_k3_fail in the topology store") - // the rejected ns1k1_k1 should not be valid - rejected_ns1k8_k3_fail.validUntil shouldBe Some(rejected_ns1k8_k3_fail.validFrom) - - validate(st1, block1 ++ block2Adds) - validate(st2, ns1k1_k1 +: block2Adds :+ dmp1_k1_bis) - } - - "deal with incremental additions" in { - val (proc, store) = mk() - val block1Adds = List(ns1k1_k1, ns1k2_k1) - val block1Replaces = List(dmp1_k1) - val block1 = block1Adds ++ block1Replaces - val block2 = List(okm1bk5k1E_k1, dtcp1_k1, setSerial(dmp1_k1_bis, PositiveInt.two)) - - process(proc, ts(0), 0, block1) - val st0 = fetch(store, ts(0).immediateSuccessor) - process(proc, ts(1), 1, block2) - val st1 = fetch(store, ts(1).immediateSuccessor) - - validate(st0, block1) - validate(st1, block1Adds ++ block2) // dmp1_k1_bis replaces dmp1_k1 - } - - "deal with removals" in { - val (proc, store) = mk() - val block1 = List(ns1k1_k1, ns1k2_k1) - val block2 = block1.reverse.map(Factory.mkRemoveTx) - process(proc, ts(0), 0, block1) - process(proc, ts(1), 1, block2) - val st1 = fetch(store, ts(0).immediateSuccessor) - val st2 = fetch(store, ts(1).immediateSuccessor) - - validate(st1, block1) - st2 shouldBe empty - } - - "deal with add and remove in the same block" in { - val (proc, store) = mk() - process(proc, ts(0), 0, List(ns1k1_k1, okm1bk5k1E_k1, Factory.mkRemoveTx(okm1bk5k1E_k1))) - val st1 = fetch(store, ts(0).immediateSuccessor) - validate(st1, List(ns1k1_k1)) - } - - "idempotent / crash recovery" in { - val (proc, store) = mk() - val block1 = List(ns1k1_k1, dmp1_k1, ns2k2_k2, ns3k3_k3) - val block2 = List(ns1k2_k1, dtcp1_k1) - val block3 = List(okm1bk5k1E_k1) - val block4 = List(dnd_proposal_k1) - // using two competing proposals in the same block with the same - // * serial - // * mapping_unique_key - // * operation - // * validFrom - // * signing keys - // to check that the unique index is not too general - val block5 = List(dnd_proposal_k2, dnd_proposal_k2_alternative) - val block6 = List(dnd_proposal_k3) - process(proc, ts(0), 0, block1) - process(proc, ts(1), 1, block2) - process(proc, ts(2), 2, block3) - process(proc, ts(3), 3, block4) - process(proc, ts(4), 4, block5) - process(proc, ts(5), 5, block6) - val storeAfterProcessing = store.dumpStoreContent().futureValueUS - val DNDafterProcessing = fetch(store, ts(5).immediateSuccessor) - .find(_.code == TopologyMapping.Code.DecentralizedNamespaceDefinition) - .valueOrFail("Couldn't find DND") - - // check that we indeed stored 2 proposals - storeAfterProcessing.result.filter(_.validFrom == EffectiveTime(ts(4))) should have size 2 - - val proc2 = mk(store)._1 - process(proc2, ts(0), 0, block1) - process(proc2, ts(1), 1, block2) - process(proc2, ts(2), 2, block3) - process(proc2, ts(3), 3, block4) - process(proc2, ts(4), 4, block5) - process(proc2, ts(5), 5, block6) - val storeAfterReplay = store.dumpStoreContent().futureValueUS - - storeAfterReplay.result.size shouldBe storeAfterProcessing.result.size - storeAfterReplay.result.zip(storeAfterProcessing.result).foreach { - case (replayed, original) => replayed shouldBe original - } - DNDafterProcessing shouldBe dnd_proposal_k1.mapping - DNDafterProcessing shouldBe dnd_proposal_k1.mapping - } - - "trigger topology subscribers with/without transactions" in { - val (proc, store) = mk() - var testTopoSubscriberCalledEmpty: Boolean = false - var testTopoSubscriberCalledWithTxs: Boolean = false - val testTopoSubscriber = new TopologyTransactionProcessingSubscriber { - override def observed( - sequencedTimestamp: SequencedTime, - effectiveTimestamp: EffectiveTime, - sequencerCounter: SequencerCounter, - transactions: Seq[GenericSignedTopologyTransaction], - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - if (transactions.nonEmpty) { - testTopoSubscriberCalledWithTxs = true - } else { - testTopoSubscriberCalledEmpty = true - } - FutureUnlessShutdown.unit - } - } - val block1 = List(ns1k1_k1, dmp1_k1, ns2k2_k2, ns3k3_k3) - val block2 = List(ns1k2_k1, dtcp1_k1) - val block3 = List(okm1bk5k1E_k1) - val block4 = List(dnd_proposal_k1) - val block5 = List(dnd_proposal_k2) - val block6 = List(dnd_proposal_k3) - val block7 = List(ns1k1_k1) - val block8 = List(ns1k8_k3_fail) - - process(proc, ts(0), 0, block1) - process(proc, ts(1), 1, block2) - process(proc, ts(2), 2, block3) - proc.subscribe(testTopoSubscriber) - process(proc, ts(3), 3, block4) - clue("incomplete proposals should trigger subscriber with empty transactions") { - testTopoSubscriberCalledWithTxs shouldBe false - testTopoSubscriberCalledEmpty shouldBe true - } - testTopoSubscriberCalledEmpty = false - - process(proc, ts(4), 4, block5) - clue("incomplete proposals should trigger subscriber with empty transactions") { - testTopoSubscriberCalledWithTxs shouldBe false - testTopoSubscriberCalledEmpty shouldBe true - } - testTopoSubscriberCalledEmpty = false - - process(proc, ts(5), 5, block6) - clue("complete proposals should trigger subscriber with non-empty transactions") { - testTopoSubscriberCalledWithTxs shouldBe true - testTopoSubscriberCalledEmpty shouldBe false - } - testTopoSubscriberCalledWithTxs = false - - process(proc, ts(6), 6, block7) - clue("duplicate transactions should trigger subscriber with non-empty transactions") { - testTopoSubscriberCalledWithTxs shouldBe true - testTopoSubscriberCalledEmpty shouldBe false - } - testTopoSubscriberCalledWithTxs = false - - process(proc, ts(7), 7, block8) - clue("rejections should trigger subscriber with empty transactions") { - testTopoSubscriberCalledWithTxs shouldBe false - testTopoSubscriberCalledEmpty shouldBe true - } - testTopoSubscriberCalledEmpty = false - - val DNDafterProcessing = fetch(store, ts(5).immediateSuccessor) - .find(_.code == TopologyMapping.Code.DecentralizedNamespaceDefinition) - .valueOrFail("Couldn't find DND") - DNDafterProcessing shouldBe dnd_proposal_k1.mapping - } - - "cascading update and synchronizer parameters change" in { - val (proc, store) = mk() - val block1 = List(ns1k1_k1, ns1k2_k1, dmp1_k2) - process(proc, ts(0), 0, block1) - val st1 = fetch(store, ts(0).immediateSuccessor) - process(proc, ts(1), 1, List(Factory.mkRemoveTx(ns1k2_k1))) - val st2 = fetch(store, ts(1).immediateSuccessor) - validate(st1, block1) - - /* - dmp1_k2 is not revoked - Synchronizer governance transaction are not removed by cascading updates. The - idea behind is that the change of synchronizer parameters is authorized and then - the new parameters stay valid even if the authorizing key is revoked. That - also ensures that we always have some synchronizer parameters set. - */ - validate(st2, List(ns1k1_k1, dmp1_k2)) - } - - "fetch previous authorizations" in { - // after a restart, we need to fetch pre-existing authorizations from our store - // simulate this one by one - val store = mkStore() - val block1 = List(ns1k1_k1, ns1k2_k1, okm1bk5k1E_k2) - block1.zipWithIndex.foreach { case (elem, idx) => - val proc = mk(store)._1 - process(proc, ts(idx), idx.toLong, List(elem)) - } - val st = fetch(store, ts(3).immediateSuccessor) - validate(st, block1) - - } - - "correctly handle duplicate transactions" in { - import SigningKeys.{ec as _, *} - val dnsNamespace = - DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns7, ns8, ns9)) - val synchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("test-synchronizer", dnsNamespace)).toPhysical - - val dns = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnsNamespace, - PositiveInt.three, - NonEmpty(Set, ns1, ns7, ns8, ns9), - ) - .value, - NonEmpty(Set, key1, key7, key8, key9), - ) - - val dopMapping = SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ) - val dop = mkAddMultiKey( - dopMapping, - NonEmpty(Set, key1, key7, key8), - ) - - val (proc, store) = mk(mkStore(synchronizerId), synchronizerId) - - def checkDop( - ts: CantonTimestamp, - expectedSignatures: Int, - expectedValidFrom: CantonTimestamp, - ) = { - val dopInStore = store - .findStored(ts, dop, includeRejected = false) - .futureValueUS - .value - - dopInStore.mapping shouldBe dopMapping - dopInStore.transaction.signatures.forgetNE.toSeq should have size expectedSignatures.toLong - dopInStore.validUntil shouldBe None - dopInStore.validFrom shouldBe EffectiveTime(expectedValidFrom) - } - - // setup - val block0 = List[GenericSignedTopologyTransaction]( - ns1k1_k1, - ns7k7_k7, - ns8k8_k8, - ns9k9_k9, - dns, - dop, - ) - - process(proc, ts(0), 0L, block0) - validate(fetch(store, ts(0).immediateSuccessor), block0) - // check that the most recently stored version after ts(0) is the one with 3 signatures - checkDop(ts(0).immediateSuccessor, expectedSignatures = 3, expectedValidFrom = ts(0)) - - val extraDop = mkAdd(dopMapping, signingKey = key9, isProposal = true) - - // processing multiple of the same transaction in the same batch works correctly - val block1 = List[GenericSignedTopologyTransaction](extraDop, extraDop) - process(proc, ts(1), 1L, block1) - validate(fetch(store, ts(1).immediateSuccessor), block0) - // check that the most recently stored version after ts(1) is the merge of the previous one with the additional signature - // for a total of 4 signatures - checkDop(ts(1).immediateSuccessor, expectedSignatures = 4, expectedValidFrom = ts(1)) - - // processing yet another instance of the same transaction out of batch will result in a copy of the transaction - val block2 = List(extraDop) - process(proc, ts(2), 2L, block2) - validate(fetch(store, ts(2).immediateSuccessor), block0) - // the latest transaction is now valid from ts(2) - checkDop(ts(2).immediateSuccessor, expectedSignatures = 4, expectedValidFrom = ts(2)) - } - - "correctly handle competing proposals getting enough signatures in the same block" in { - import SigningKeys.{ec as _, *} - val dndNamespace = DecentralizedNamespaceDefinition.computeNamespace(Set(ns1)) - - def createDnd( - owners: Namespace* - )(key: SigningPublicKey, serial: PositiveInt, isProposal: Boolean) = - mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dndNamespace, - PositiveInt.one, - NonEmpty.from(owners).value.toSet, - ) - .value, - NonEmpty(Set, key), - serial = serial, - isProposal = isProposal, - ) - - val dnd = createDnd(ns1)(key1, serial = PositiveInt.one, isProposal = false) - val dnd_add_ns2_k2 = createDnd(ns1, ns2)(key2, serial = PositiveInt.two, isProposal = true) - val dnd_add_ns2_k1 = createDnd(ns1, ns2)(key1, serial = PositiveInt.two, isProposal = true) - val dnd_add_ns3_k3 = createDnd(ns1, ns3)(key3, serial = PositiveInt.two, isProposal = true) - val dnd_add_ns3_k1 = createDnd(ns1, ns3)(key1, serial = PositiveInt.two, isProposal = true) - val (proc, store) = mk() - - val rootCertificates = Seq[GenericSignedTopologyTransaction](ns1k1_k1, ns2k2_k2, ns3k3_k3) - store - .update( - SequencedTime(CantonTimestamp.MinValue.immediateSuccessor), - EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = (rootCertificates :+ dnd).map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - // add proposal to add ns2 to dnd signed by k2 - val block1 = List[GenericSignedTopologyTransaction](dnd_add_ns2_k2) - process(proc, ts(1), 1L, block1) - validate(fetch(store, ts(1).immediateSuccessor, isProposal = true), block1) - - // add proposal to add ns3 to dnd signed by k3 - val block2 = List[GenericSignedTopologyTransaction](dnd_add_ns3_k3) - process(proc, ts(2), 2L, block2) - // now we'll find both proposals - validate(fetch(store, ts(2).immediateSuccessor, isProposal = true), block1 ++ block2) - - // k1 signs both proposals and processes in the same block - val block3 = List[GenericSignedTopologyTransaction](dnd_add_ns2_k1, dnd_add_ns3_k1) - process(proc, ts(3), 3L, block3) - // now there should be no more proposals active - validate(fetch(store, ts(3).immediateSuccessor, isProposal = true), Seq.empty) - // and if we query fully authorized mappings, we should find all root certs - // and the updated DND with ns2 as additional owner. - // validate only looks at the mapping of dnd_add_ns2_k1 for the comparison, - // therefore we don't have to manually merge signatures here first. - validate(fetch(store, ts(3).immediateSuccessor), rootCertificates :+ dnd_add_ns2_k1) - - // additionally when we look up dnd_add_ns3_k1 by tx_hash, - // we should find that it has been stored without merged signatures - // and validFrom == validUntil. - val rejected_dnd_add_ns3_k1 = - store - .findStored(CantonTimestamp.MaxValue, dnd_add_ns3_k1, includeRejected = true) - .futureValueUS - .value - rejected_dnd_add_ns3_k1.transaction shouldBe dnd_add_ns3_k1 - rejected_dnd_add_ns3_k1.validUntil shouldBe Some(EffectiveTime(ts(3))) - rejected_dnd_add_ns3_k1.validFrom shouldBe EffectiveTime(ts(3)) - } - - /* This tests the following scenario for transactions with - * - the same mapping unique key - * - a signature threshold of 2 to fully authorize the transaction - * - * 1. process transaction(serial=1, isProposal=false, signatures=2/3) - * 2. process transaction(serial=2, isProposal=true, signatures=1/3) - * 3. process late signature(serial=1, isProposal=false, signatures=3/3 - * 4. check that the proposal has not been expired - * 5. process transaction(serial=2, isProposal=false, signatures=2/3) - * 6. check that serial=2 expires serial=1 - * - * Triggered by CN-10532 - */ - "correctly handle additional signatures after new proposals have arrived" in { - import SigningKeys.{ec as _, *} - val dnsNamespace = - DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns7, ns8)) - val synchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("test-synchronizer", dnsNamespace)).toPhysical - - val dns = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnsNamespace, - PositiveInt.two, - NonEmpty(Set, ns1, ns7, ns8), - ) - .value, - NonEmpty(Set, key1, key7, key8), - ) - - // mapping and transactions for serial=1 - val dopMapping1 = SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ) - val dop1_k1k7 = mkAddMultiKey( - dopMapping1, - NonEmpty(Set, key1, key7), - serial = PositiveInt.one, - ) - val dop1_k8_late_signature = - mkAdd(dopMapping1, key8, isProposal = true, serial = PositiveInt.one) - - // mapping and transactions for serial=2 - val dopMapping2 = SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .update( - confirmationRequestsMaxRate = - DynamicSynchronizerParameters.defaultConfirmationRequestsMaxRate + NonNegativeInt.one - ), - ) - val dop2_k1_proposal = - mkAdd(dopMapping2, signingKey = key1, serial = PositiveInt.two, isProposal = true) - // this transaction is marked as proposal, but the merging of the signatures k1 and k7 will result - // in a fully authorized transaction - val dop2_k7_proposal = - mkAdd(dopMapping2, signingKey = key7, serial = PositiveInt.two, isProposal = true) - - val (proc, store) = mk(mkStore(synchronizerId), synchronizerId) - - def checkDop( - ts: CantonTimestamp, - transactionToLookUp: GenericSignedTopologyTransaction, - expectedSignatures: Int, - expectedValidFrom: CantonTimestamp, - ) = { - val dopInStore = store - .findStored(ts, transactionToLookUp, includeRejected = false) - .futureValueUS - .value - - dopInStore.mapping shouldBe transactionToLookUp.mapping - dopInStore.transaction.signatures.forgetNE.toSeq should have size expectedSignatures.toLong - dopInStore.validUntil shouldBe None - dopInStore.validFrom shouldBe EffectiveTime(expectedValidFrom) - } - - // setup: namespaces and initial mediator state - val block0 = List[GenericSignedTopologyTransaction]( - ns1k1_k1, - ns7k7_k7, - ns8k8_k8, - dns, - dop1_k1k7, - ) - process(proc, ts(0), 0L, block0) - validate(fetch(store, ts(0).immediateSuccessor), block0) - checkDop( - ts(0).immediateSuccessor, - transactionToLookUp = dop1_k1k7, - expectedSignatures = 2, - expectedValidFrom = ts(0), - ) - - // process the first proposal - val block1 = List(dop2_k1_proposal) - process(proc, ts(1), 1L, block1) - validate(fetch(store, ts(1).immediateSuccessor), block0) - // there's only the DOP proposal in the entire topology store - validate(fetch(store, ts(1).immediateSuccessor, isProposal = true), block1) - // we find the fully authorized transaction with 2 signatures - checkDop( - ts(1).immediateSuccessor, - transactionToLookUp = dop1_k1k7, - expectedSignatures = 2, - expectedValidFrom = ts(0), - ) - // we find the proposal with serial=2 - checkDop( - ts(1).immediateSuccessor, - transactionToLookUp = dop2_k1_proposal, - expectedSignatures = 1, - expectedValidFrom = ts(1), - ) - - // process the late additional signature for serial=1 - val block2 = List(dop1_k8_late_signature) - process(proc, ts(2), 2L, block2) - // the fully authorized mappings haven't changed since block0, only the DOP signatures - validate(fetch(store, ts(2).immediateSuccessor), block0) - validate(fetch(store, ts(2).immediateSuccessor, isProposal = true), block1) - // we find the fully authorized transaction with 3 signatures - checkDop( - ts(2).immediateSuccessor, - transactionToLookUp = dop1_k8_late_signature, - expectedSignatures = 3, - // since serial=1 got signatures updated, the updated transaction is valid as of ts(2) - expectedValidFrom = ts(2), - ) - // we still find the proposal. This was failing in CN-10532 - checkDop( - ts(2).immediateSuccessor, - transactionToLookUp = dop2_k1_proposal, - expectedSignatures = 1, - expectedValidFrom = ts(1), - ) - - // process another signature for serial=2 to fully authorize it - val block3 = List(dop2_k7_proposal) - process(proc, ts(3), 3L, block3) - // the initial DOP mapping has now been overridden by the fully authorized serial=2 in block3 - validate(fetch(store, ts(3).immediateSuccessor), block0.init ++ block3) - // there are no more proposals - validate(fetch(store, ts(3).immediateSuccessor, isProposal = true), List.empty) - // find the serial=2 mapping with 2 signatures - checkDop( - ts(3).immediateSuccessor, - transactionToLookUp = dop2_k7_proposal, - expectedSignatures = 2, - expectedValidFrom = ts(3), - ) - store - .findStored(asOfExclusive = ts(3).immediateSuccessor, dop1_k1k7) - .futureValueUS - .value - .validUntil - .value - .value shouldBe ts(3) - } - - /** this test checks that only fully authorized synchronizer parameter changes are used to - * update the topology change delay for adjusting the effective time - * - * 1. initialize the topology store with a decentralized namespace with 2 owners and - * default synchronizer parameters (topologyChangeDelay=250ms) - * 1. process a proposal to update the topology change delay - * 1. process the fully authorized update to the topology change delay - * 1. process some other topology change delay - * - * only in step 4. should the updated topology change delay be used to compute the effective - * time - */ - "only track fully authorized synchronizer parameter state changes" in { - import SigningKeys.{ec as _, *} - val dnsNamespace = - DecentralizedNamespaceDefinition.computeNamespace(Set(ns1, ns2)) - val synchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("test-synchronizer", dnsNamespace)).toPhysical - - val dns = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnsNamespace, - PositiveInt.two, - NonEmpty(Set, ns1, ns2), - ) - .value, - signingKeys = NonEmpty(Set, key1, key2), - ) - val initialSynchronizerParameters = mkAddMultiKey( - SynchronizerParametersState( - synchronizerId.logical, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ), - signingKeys = NonEmpty(Set, key1, key2), - ) - - val initialTopologyChangeDelay = - initialSynchronizerParameters.mapping.parameters.topologyChangeDelay.duration - val updatedTopologyChangeDelay = initialTopologyChangeDelay.plusMillis(50) - - val updatedSynchronizerParams = SynchronizerParametersState( - synchronizerId.logical, - DynamicSynchronizerParameters.initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryCreate(updatedTopologyChangeDelay), - testedProtocolVersion, - ), - ) - val synchronizerParameters_k1 = mkAdd( - updatedSynchronizerParams, - signingKey = key1, - serial = PositiveInt.two, - isProposal = true, - ) - val synchronizerParameters_k2 = mkAdd( - updatedSynchronizerParams, - signingKey = key2, - serial = PositiveInt.two, - isProposal = true, - ) - - val initialTopologyState = List(ns1k1_k1, ns2k2_k2, dns, initialSynchronizerParameters) - .map(ValidatedTopologyTransaction(_)) - - def mkEnvelope(transaction: GenericSignedTopologyTransaction) = - Traced( - List( - OpenEnvelope( - TopologyTransactionsBroadcast( - synchronizerId, - List(transaction), - ), - recipients = Recipients.cc(AllMembersOfSynchronizer), - )(testedProtocolVersion) - ) - ) - - // in block1 we propose a new topology change delay. the transaction itself will be - // stored with the default topology change delay of 250ms and should NOT trigger a change - // in topology change delay, because it's only a proposal - val block1 = mkEnvelope(synchronizerParameters_k1) - // in block2 we fully authorize the update to synchronizer parameters - val block2 = mkEnvelope(synchronizerParameters_k2) - // in block3 we should see the new topology change delay being used to compute the effective time - val block3 = mkEnvelope(ns3k3_k3) - - val store = mkStore(synchronizerId) - - store - .update( - sequenced = SequencedTime(CantonTimestamp.MinValue.immediateSuccessor), - effective = EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = initialTopologyState, - ) - .futureValueUS - - val (proc, _) = mk(store, synchronizerId) - - val synchronizerTimeTrackerMock = mock[SynchronizerTimeTracker] - when(synchronizerTimeTrackerMock.awaitTick(any[CantonTimestamp])(anyTraceContext)) - .thenAnswer(None) - - proc.subscriptionStartsAt(FreshSubscription, synchronizerTimeTrackerMock).futureValueUS - - // ================== - // process the blocks - - // block1: first proposal to update topology change delay - // use proc.processEnvelopes directly so that the effective time is properly computed from topology change delays - proc - .processEnvelopes(SequencerCounter(0), SequencedTime(ts(0)), None, block1) - .flatMap(_.unwrap) - .futureValueUS - - // block2: second proposal to update the topology change delay, making it fully authorized - proc - .processEnvelopes(SequencerCounter(1), SequencedTime(ts(1)), None, block2) - .flatMap(_.unwrap) - .futureValueUS - - // block3: any topology transaction is now processed with the updated topology change delay - proc - .processEnvelopes(SequencerCounter(2), SequencedTime(ts(2)), None, block3) - .flatMap(_.unwrap) - .futureValueUS - - // ======================================== - // check the applied topology change delays - - // 1. fetch the proposal from block1 at a time when it has become effective - val storedSynchronizerParametersProposal = - fetchTx(store, ts(0).plusSeconds(1), isProposal = true) - .collectOfMapping[SynchronizerParametersState] - .result - .loneElement - // the proposal itself should be processed with the default topology change delay - storedSynchronizerParametersProposal.validFrom.value - storedSynchronizerParametersProposal.sequenced.value shouldBe initialTopologyChangeDelay - - // 2. fetch the latest fully authorized synchronizer parameters transaction from block2 at a time when it has become effective - val storedSynchronizerParametersUpdate = fetchTx(store, ts(1).plusSeconds(1)) - .collectOfMapping[SynchronizerParametersState] - .result - .loneElement - // the transaction to change the topology change delay itself should still be processed with the default topology change delay - storedSynchronizerParametersUpdate.validFrom.value - storedSynchronizerParametersUpdate.sequenced.value shouldBe initialTopologyChangeDelay - - // 3. fetch the topology transaction from block3 at a time when it has become effective - val storedNSD3 = fetchTx(store, ts(2).plusSeconds(1)) - .collectOfMapping[NamespaceDelegation] - .filter(_.mapping.namespace == ns3) - .result - .loneElement - // the transaction should be processed with the updated topology change delay - storedNSD3.validFrom.value - storedNSD3.sequenced.value shouldBe updatedTopologyChangeDelay - } - } - } -} - -class TopologyTransactionProcessorTestInMemory extends TopologyTransactionProcessorTest { - protected def mkStore( - synchronizerId: PhysicalSynchronizerId = Factory.physicalSynchronizerId1a - ): TopologyStore[TopologyStoreId.SynchronizerStore] = - new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(synchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - -} -class TopologyTransactionProcessorTestPostgres - extends TopologyTransactionProcessorTest - with DbTest - with DbTopologyStoreHelper - with PostgresTest diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala deleted file mode 100644 index ff0dab4086..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.processing - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest.* -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.SigningPublicKey -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.protocol.TestSynchronizerParameters -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.{physicalSynchronizerId, sequencerId} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, -} - -import scala.concurrent.ExecutionContext - -class TopologyTransactionTestFactory( - loggerFactory: NamedLoggerFactory, - initEc: ExecutionContext, - multiHash: Boolean = false, -) extends TestingOwnerWithKeys(sequencerId, loggerFactory, initEc, multiHash) { - - import SigningKeys.* - - def createNs(ns: Namespace, key: SigningPublicKey, delegationRestriction: DelegationRestriction) = - NamespaceDelegation.tryCreate(ns, key, delegationRestriction) - - val ns1 = Namespace(key1.fingerprint) - val ns1_unsupportedSpec = Namespace(key1_unsupportedSpec.fingerprint) - val ns2 = Namespace(key2.fingerprint) - val ns3 = Namespace(key3.fingerprint) - val ns4 = Namespace(key4.fingerprint) - val ns6 = Namespace(key6.fingerprint) - val ns7 = Namespace(key7.fingerprint) - val ns8 = Namespace(key8.fingerprint) - val ns9 = Namespace(key9.fingerprint) - val uid1a = UniqueIdentifier.tryCreate("one", ns1) - val uid1b = UniqueIdentifier.tryCreate("two", ns1) - val uid6 = UniqueIdentifier.tryCreate("other", ns6) - val physicalSynchronizerId1 = - SynchronizerId(UniqueIdentifier.tryCreate("synchronizer", ns1)).toPhysical - val synchronizerId1 = physicalSynchronizerId.logical - val physicalSynchronizerId1a = SynchronizerId(uid1a).toPhysical - val synchronizerId1a = SynchronizerId(uid1a) - val party1b = PartyId(uid1b) - val party6 = PartyId(uid6) - val participant1 = ParticipantId(uid1a) - val participant6 = ParticipantId(uid6) - val ns1k1_k1 = mkAdd(createNs(ns1, key1, CanSignAllMappings), key1) - val ns1k1_k1_unsupportedScheme = - mkAdd( - createNs(ns1_unsupportedSpec, key1_unsupportedSpec, CanSignAllMappings), - key1_unsupportedSpec, - ) - val ns1k2_k1 = mkAdd(createNs(ns1, key2, CanSignAllMappings), key1) - val ns1k2_k1p = mkAdd(createNs(ns1, key2, CanSignAllMappings), key1) - val ns1k3_k2 = mkAdd(createNs(ns1, key3, CanSignAllButNamespaceDelegations), key2) - val ns1k8_k3_fail = mkAdd(createNs(ns1, key8, CanSignAllButNamespaceDelegations), key3) - val ns2k2_k2 = mkAdd(createNs(ns2, key2, CanSignAllMappings), key2) - val ns3k3_k3 = mkAdd(createNs(ns3, key3, CanSignAllMappings), key3) - val ns6k3_k6 = mkAdd(createNs(ns6, key3, CanSignAllButNamespaceDelegations), key6) - val ns6k6_k6 = mkAdd(createNs(ns6, key6, CanSignAllMappings), key6) - - val okm1ak5k1E_k2 = - mkAddMultiKey( - OwnerToKeyMapping(participant1, NonEmpty(Seq, key5, EncryptionKeys.key1)), - NonEmpty(Set, key2, key5), - ) - val okm1bk5k1E_k1 = - mkAddMultiKey( - OwnerToKeyMapping(participant1, NonEmpty(Seq, key5, EncryptionKeys.key1)), - NonEmpty(Set, key1, key5), - ) - val okm1bk5k1E_k2 = - mkAddMultiKey( - OwnerToKeyMapping(participant1, NonEmpty(Seq, key5, EncryptionKeys.key1)), - NonEmpty(Set, key2, key5), - ) - - val sequencer1 = SequencerId(UniqueIdentifier.tryCreate("sequencer1", ns1)) - val okmS1k7_k1 = - mkAddMultiKey( - OwnerToKeyMapping(sequencer1, NonEmpty(Seq, key7)), - NonEmpty(Set, key1, key7), - ) - val sdmS1_k1 = - mkAdd( - SequencerSynchronizerState - .create(synchronizerId1, PositiveInt.one, Seq(sequencer1), Seq.empty) - .getOrElse(sys.error("Failed to create SequencerSynchronizerState")), - key1, - ) - - val dtcp1_k1 = - mkAdd(SynchronizerTrustCertificate(participant1, SynchronizerId(uid1a)), key1) - - val defaultSynchronizerParameters = TestSynchronizerParameters.defaultDynamic - - val p1p1B_k2 = - mkAdd( - PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq(HostingParticipant(participant1, ParticipantPermission.Submission)), - ), - key2, - ) - val p1p6_k2 = - mkAdd( - PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq(HostingParticipant(participant6, ParticipantPermission.Submission)), - ), - key2, - isProposal = true, - ) - val p1p6_k6 = - mkAddMultiKey( - PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq(HostingParticipant(participant6, ParticipantPermission.Submission)), - ), - NonEmpty(Set, key1, key6), - isProposal = true, - ) - val p1p6_k2k6 = - mkAddMultiKey( - PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq(HostingParticipant(participant6, ParticipantPermission.Submission)), - ), - NonEmpty(Set, key2, key6), - ) - - val p1p6B_k3 = - mkAdd( - PartyToParticipant.tryCreate( - party1b, - threshold = PositiveInt.one, - Seq(HostingParticipant(participant6, ParticipantPermission.Submission)), - ), - key3, - ) - - val dmp1_k2 = mkAdd( - SynchronizerParametersState(SynchronizerId(uid1a), defaultSynchronizerParameters), - key2, - ) - - val dmp1_k1 = mkAdd( - SynchronizerParametersState( - SynchronizerId(uid1a), - defaultSynchronizerParameters - .tryUpdate(confirmationResponseTimeout = NonNegativeFiniteDuration.tryOfSeconds(1)), - ), - key1, - ) - - val dmp1_k1_bis = mkAdd( - SynchronizerParametersState( - SynchronizerId(uid1a), - defaultSynchronizerParameters - .tryUpdate(confirmationResponseTimeout = NonNegativeFiniteDuration.tryOfSeconds(2)), - ), - key1, - ) - - val ns7k7_k7 = mkAdd(createNs(ns7, key7, CanSignAllMappings), key7) - val ns8k8_k8 = mkAdd(createNs(ns8, key8, CanSignAllMappings), key8) - val ns9k9_k9 = mkAdd(createNs(ns9, key9, CanSignAllMappings), key9) - - val dns1 = mkAddMultiKey( - DecentralizedNamespaceDefinition - .create(ns7, PositiveInt.two, NonEmpty(Set, ns1, ns8, ns9)) - .fold( - err => sys.error(s"Failed to create DecentralizedNamespaceDefinition 1: $err"), - identity, - ), - NonEmpty(Set, key1, key8, key9), - serial = PositiveInt.one, - ) - val dns1Removal = mkRemove( - dns1.mapping, - NonEmpty(Set, key1, key8, key9), - serial = PositiveInt.two, - ) - val dns1trustCert = mkAddMultiKey( - SynchronizerTrustCertificate( - ParticipantId(UniqueIdentifier.tryCreate("test", dns1.mapping.namespace)), - synchronizerId1, - ), - NonEmpty(Set, key1, key8, key9), - ) - val dns2 = mkAdd( - DecentralizedNamespaceDefinition - .create(ns7, PositiveInt.one, NonEmpty(Set, ns1)) - .fold( - err => sys.error(s"Failed to create DecentralizedNamespaceDefinition 2: $err"), - identity, - ), - key9, - serial = PositiveInt.two, - isProposal = true, - ) - val dns3 = mkAdd( - DecentralizedNamespaceDefinition - .create(ns7, PositiveInt.one, NonEmpty(Set, ns1)) - .fold( - err => sys.error(s"Failed to create DecentralizedNamespaceDefinition 3: $err"), - identity, - ), - key8, - serial = PositiveInt.two, - isProposal = true, - ) - val decentralizedNamespaceOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9) - val decentralizedNamespaceWithMultipleOwnerThreshold = - List(ns1k1_k1, ns8k8_k8, ns9k9_k9, dns1) - - private val dndOwners = - NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)) - private val dndNamespace = DecentralizedNamespaceDefinition.computeNamespace(dndOwners) - val dnd_proposal_k1 = mkAdd( - DecentralizedNamespaceDefinition - .create( - dndNamespace, - PositiveInt.two, - dndOwners, - ) - .fold(sys.error, identity), - signingKey = key1, - isProposal = true, - ) - val dnd_proposal_k2 = mkAdd( - DecentralizedNamespaceDefinition - .create( - dndNamespace, - PositiveInt.two, - NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), - ) - .fold(sys.error, identity), - signingKey = key2, - isProposal = true, - ) - // this only differs from dnd_proposal_k2 by having a different threshold - val dnd_proposal_k2_alternative = mkAdd( - DecentralizedNamespaceDefinition - .create( - dndNamespace, - PositiveInt.one, - NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), - ) - .fold(sys.error, identity), - signingKey = key2, - isProposal = true, - ) - - val dnd_proposal_k3 = mkAdd( - DecentralizedNamespaceDefinition - .create( - dndNamespace, - PositiveInt.two, - NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), - ) - .fold(sys.error, identity), - signingKey = key3, - isProposal = true, - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala deleted file mode 100644 index 0dc60f41ef..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import cats.syntax.option.* -import com.digitalasset.canton.FailOnShutdown -import com.digitalasset.canton.config.CantonRequireTypes.String300 -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import org.scalatest.wordspec.AsyncWordSpec - -trait DownloadTopologyStateForInitializationServiceTest - extends AsyncWordSpec - with TopologyStoreTestBase - with FailOnShutdown { - - protected def mkStore( - synchronizerId: PhysicalSynchronizerId - ): TopologyStore[SynchronizerStore] - - val testData = new TopologyStoreTestData(testedProtocolVersion, loggerFactory, executionContext) - import testData.* - - val bootstrapTransactions = StoredTopologyTransactions( - Seq[ - (CantonTimestamp, (GenericSignedTopologyTransaction, Option[CantonTimestamp])) - ]( - ts4 -> (dnd_p1seq, None), - ts5 -> (ptp_fred_p1, None), - ts5 -> (dtc_p2_synchronizer1, None), - ts6 -> (mds_med1_synchronizer1, None), - ts8 -> (sds_seq1_synchronizer1, None), - ).map { case (from, (tx, until)) => - StoredTopologyTransaction( - SequencedTime(from), - EffectiveTime(from), - until.map(EffectiveTime(_)), - tx, - None, - ) - } - ) - - val bootstrapTransactionsWithUpdates = StoredTopologyTransactions( - Seq[ - (CantonTimestamp, (GenericSignedTopologyTransaction, Option[CantonTimestamp])) - ]( - ts4 -> (dnd_p1seq, None), - ts5 -> (ptp_fred_p1, None), - ts5 -> (dtc_p2_synchronizer1, ts6.some), - ts6 -> (dtc_p2_synchronizer1_update, None), - ts6 -> (mds_med1_synchronizer1, ts7.some), - ts7 -> (mds_med1_synchronizer1_update, None), - ts8 -> (sds_seq1_synchronizer1, None), - ).map { case (from, (tx, until)) => - StoredTopologyTransaction( - SequencedTime(from), - EffectiveTime(from), - until.map(EffectiveTime(_)), - tx, - None, - ) - } - ) - - private def initializeStore( - storedTransactions: GenericStoredTopologyTransactions - ): FutureUnlessShutdown[TopologyStore[SynchronizerStore]] = { - val store = mkStore(synchronizer1_p1p2_physicalSynchronizerId) - val groupedBySequencedTime = - storedTransactions.result.groupBy(tx => (tx.sequenced, tx.validFrom)).toSeq.sortBy { - case (sequenced, _) => sequenced - } - import com.digitalasset.canton.util.MonadUtil.syntax.* - groupedBySequencedTime - .sequentialTraverse_ { case ((sequencedTime, effectiveTime), transactions) => - store.update( - sequencedTime, - effective = EffectiveTime(effectiveTime.value), - removeMapping = transactions.map(tx => tx.mapping.uniqueKey -> tx.serial).toMap, - removeTxs = transactions.map(_.hash).toSet, - additions = transactions.map(stored => ValidatedTopologyTransaction(stored.transaction)), - ) - } - .map(_ => store) - } - - "DownloadTopologyStateForInitializationService" should { - "return a valid topology state" when { - "there's only one SynchronizerTrustCertificate" in { - for { - store <- initializeStore(bootstrapTransactions) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = CantonTimestamp.Epoch, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) - } yield { - import storedTxs.result - // all transactions should be valid and not expired - result.foreach(_.validUntil shouldBe empty) - result.map(_.transaction) shouldBe Seq(dnd_p1seq, ptp_fred_p1, dtc_p2_synchronizer1) - } - } - "the first SynchronizerTrustCertificate is superseded by another one" in { - for { - store <- initializeStore(bootstrapTransactionsWithUpdates) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = CantonTimestamp.Epoch, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) - } yield { - import storedTxs.result - // all transactions should be valid and not expired - result.foreach(_.validUntil shouldBe empty) - result.map(_.transaction) shouldBe Seq(dnd_p1seq, ptp_fred_p1, dtc_p2_synchronizer1) - result.last.validUntil shouldBe None - } - } - - "the sequencer was started with a configured minimum sequencing time" in { - for { - store <- initializeStore(bootstrapTransactionsWithUpdates) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = ts6, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(dtc_p2_synchronizer1.mapping.participantId) - } yield { - import storedTxs.result - // all transactions should have a validUntil < ts6 - forAll(result)(_.validUntil.map(_.value) should be < Option(ts6)) - result shouldBe bootstrapTransactionsWithUpdates - .filter(_.validFrom.value < ts6) - .asSnapshotAtMaxEffectiveTime - .result - } - } - - "there's only one MediatorSynchronizerState" in { - for { - store <- initializeStore(bootstrapTransactions) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = CantonTimestamp.Epoch, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(med1Id) - } yield { - import storedTxs.result - // all transactions should be valid and not expired - result.foreach(_.validUntil shouldBe empty) - result.map(_.transaction) shouldBe Seq( - dnd_p1seq, - ptp_fred_p1, - dtc_p2_synchronizer1, - mds_med1_synchronizer1, - ) - } - } - - "the first MediatorSynchronizerState is superseded by another one" in { - for { - store <- initializeStore(bootstrapTransactionsWithUpdates) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = CantonTimestamp.Epoch, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(med1Id) - } yield { - import storedTxs.result - // all transactions should be valid and not validUntil capped at ts6 - result.foreach(_.validUntil.foreach(_.value should be <= ts6)) - result.map(_.transaction) shouldBe Seq( - dnd_p1seq, - ptp_fred_p1, - dtc_p2_synchronizer1, - dtc_p2_synchronizer1_update, - mds_med1_synchronizer1, - ) - result.last.validUntil shouldBe None - } - } - } - "provide the snapshot with all rejected transactions and all proposals" in { - val snapshot = StoredTopologyTransactions( - Seq[ - ( - CantonTimestamp, - (GenericSignedTopologyTransaction, Option[CantonTimestamp], Option[String300]), - ) - ]( - ts4 -> (dnd_p1seq, None, None), - // expiring the proposal at ts6. the snapshot itself is inconsistent, but that's not what we're testing here - ts4 -> (otk_p2_proposal, ts6.some, None), - // expiring the transaction immediately - ts5 -> (ptp_fred_p1, ts5.some, Some(String300.tryCreate("rejection"))), - ts5 -> (dtc_p2_synchronizer1, ts6.some, None), - ts6 -> (dtc_p2_synchronizer1_update, None, None), - ).map { case (from, (tx, until, rejection)) => - StoredTopologyTransaction( - SequencedTime(from), - EffectiveTime(from), - until.map(EffectiveTime(_)), - tx, - rejection, - ) - } - ) - for { - store <- initializeStore(snapshot) - service = new StoreBasedTopologyStateForInitializationService( - store, - minimumSequencingTime = CantonTimestamp.Epoch, - loggerFactory, - ) - storedTxs <- service.initialSnapshot(p2Id) - } yield { - import storedTxs.result - // all transactions should be valid and not expired - result.foreach(_.validUntil.foreach(_.value should be < ts6)) - result - .map(_.transaction) shouldBe Seq( - dnd_p1seq, - otk_p2_proposal, - ptp_fred_p1, - dtc_p2_synchronizer1, - ) - succeed - } - } - - // TODO(#13371) explore all edge cases that the logic for determining a topology snapshot for initialization has to deal with - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/InitializationStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/InitializationStoreTest.scala deleted file mode 100644 index 34cf89e4dd..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/InitializationStoreTest.scala +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.{DbTest, H2Test, MigrationMode, PostgresTest} -import com.digitalasset.canton.topology.UniqueIdentifier -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, FailOnShutdown} -import org.scalatest.wordspec.AsyncWordSpec - -trait InitializationStoreTest extends AsyncWordSpec with BaseTest with FailOnShutdown { - - val uid = UniqueIdentifier.tryFromProtoPrimitive("da::default") - val uid2 = UniqueIdentifier.tryFromProtoPrimitive("two::default") - - def myMigrationMode: MigrationMode - - def initializationStore(mk: () => InitializationStore): Unit = - "when storing the unique identifier" should { - "be able to set the value of the id" in { - val store = mk() - for { - emptyId <- store.uid - _ = emptyId shouldBe None - _ <- store.setUid(uid) - id <- store.uid - } yield id shouldBe Some(uid) - } - "fail when trying to set two different ids" in { - val store = mk() - for { - _ <- store.setUid(uid) - _ <- loggerFactory.assertInternalErrorAsyncUS[IllegalArgumentException]( - store.setUid(uid2), - _.getMessage shouldBe s"Unique id of node is already defined as $uid and can't be changed to $uid2!", - ) - } yield succeed - } - - "support dev version" in { - val store = mk() - myMigrationMode match { - case MigrationMode.Standard => - // query should fail with an exception - store.throwIfNotDev.failed.map { _ => - succeed - } - case MigrationMode.DevVersion => - store.throwIfNotDev.map(_ shouldBe true) - } - } - } -} - -trait DbInitializationStoreTest extends InitializationStoreTest { - this: DbTest => - - override def myMigrationMode: MigrationMode = migrationMode - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = { - import storage.api.* - storage.update( - sqlu"truncate table common_node_id", - operationName = s"${this.getClass}: truncate table common_node_id", - ) - } - - "DbInitializationStore" should { - behave like initializationStore(() => - new DbInitializationStore(storage, timeouts, loggerFactory) - ) - } -} - -class DbInitializationStoreTestH2 extends DbInitializationStoreTest with H2Test - -class DbInitializationStoreTestPostgres extends DbInitializationStoreTest with PostgresTest - -class InitializationStoreTestInMemory extends InitializationStoreTest { - - override def myMigrationMode: MigrationMode = MigrationMode.Standard - - "InMemoryInitializationStore" should { - behave like initializationStore(() => new InMemoryInitializationStore(loggerFactory)) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala deleted file mode 100644 index b3e3b52c90..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala +++ /dev/null @@ -1,1598 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import cats.syntax.option.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.FailOnShutdown -import com.digitalasset.canton.config.CantonRequireTypes.{String255, String300} -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology.processing.{ - EffectiveTime, - InitialTopologySnapshotValidator, - SequencedTime, -} -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions -import com.digitalasset.canton.topology.store.TopologyStore.EffectiveStateChange -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.topology.transaction.{TopologyMapping, *} -import com.digitalasset.canton.topology.{ - DefaultTestIdentities, - ParticipantId, - PartyId, - PhysicalSynchronizerId, -} -import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.canton.version.ProtocolVersion -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec - -trait TopologyStoreTest extends AsyncWordSpec with TopologyStoreTestBase with FailOnShutdown { - - val testData = new TopologyStoreTestData(testedProtocolVersion, loggerFactory, executionContext) - import testData.* - - private lazy val submissionId = String255.tryCreate("submissionId") - private lazy val submissionId2 = String255.tryCreate("submissionId2") - private lazy val submissionId3 = String255.tryCreate("submissionId3") - - protected def partyMetadataStore(mk: () => PartyMetadataStore): Unit = { - import DefaultTestIdentities.* - "inserting new succeeds" in { - val store = mk() - for { - _ <- insertOrUpdatePartyMetadata(store)( - party1, - Some(participant1), - CantonTimestamp.Epoch, - submissionId, - ) - fetch <- store.metadataForParties(Seq(party1)) - } yield { - fetch shouldBe NonEmpty( - Seq, - Some(PartyMetadata(party1, Some(participant1))(CantonTimestamp.Epoch, submissionId)), - ) - } - } - - "updating existing succeeds" in { - val store = mk() - for { - _ <- insertOrUpdatePartyMetadata(store)( - party1, - None, - CantonTimestamp.Epoch, - submissionId, - ) - _ <- insertOrUpdatePartyMetadata(store)( - party2, - None, - CantonTimestamp.Epoch, - submissionId, - ) - _ <- insertOrUpdatePartyMetadata(store)( - party1, - Some(participant1), - CantonTimestamp.Epoch, - submissionId, - ) - _ <- insertOrUpdatePartyMetadata(store)( - party2, - Some(participant3), - CantonTimestamp.Epoch, - submissionId, - ) - metadata <- store.metadataForParties(Seq(party1, party2)) - } yield { - metadata shouldBe NonEmpty( - Seq, - Some( - PartyMetadata(party1, Some(participant1))( - CantonTimestamp.Epoch, - String255.empty, - ) - ), - Some( - PartyMetadata(party2, Some(participant3))( - CantonTimestamp.Epoch, - String255.empty, - ) - ), - ) - } - } - - "updating existing succeeds via batch" in { - val store = mk() - for { - _ <- store.insertOrUpdatePartyMetadata( - Seq( - PartyMetadata(party1, None)(CantonTimestamp.Epoch, submissionId), - PartyMetadata(party2, None)(CantonTimestamp.Epoch, submissionId), - PartyMetadata(party1, Some(participant1))(CantonTimestamp.Epoch, submissionId), - PartyMetadata(party2, Some(participant3))(CantonTimestamp.Epoch, submissionId), - ) - ) - metadata <- store.metadataForParties(Seq(party1, party3, party2)) - } yield { - metadata shouldBe NonEmpty( - Seq, - Some( - PartyMetadata(party1, Some(participant1))(CantonTimestamp.Epoch, String255.empty) - ), - None, // checking that unknown party appears in the matching slot - Some( - PartyMetadata(party2, Some(participant3))(CantonTimestamp.Epoch, String255.empty) - ), - ) - } - } - - "deal with delayed notifications" in { - val store = mk() - val rec1 = - PartyMetadata(party1, Some(participant1))(CantonTimestamp.Epoch, submissionId) - val rec2 = - PartyMetadata(party2, Some(participant3))(CantonTimestamp.Epoch, submissionId2) - val rec3 = - PartyMetadata(party2, Some(participant1))( - CantonTimestamp.Epoch.immediateSuccessor, - submissionId3, - ) - val rec4 = - PartyMetadata(party3, Some(participant2))(CantonTimestamp.Epoch, submissionId3) - for { - _ <- store.insertOrUpdatePartyMetadata(Seq(rec1, rec2, rec3, rec4)) - _ <- store.markNotified(rec2.effectiveTimestamp, Seq(rec2.partyId, rec4.partyId)) - notNotified <- store.fetchNotNotified().map(_.toSet) - } yield { - notNotified shouldBe Set(rec1, rec3) - } - } - - } - - private def insertOrUpdatePartyMetadata(store: PartyMetadataStore)( - partyId: PartyId, - participantId: Option[ParticipantId], - effectiveTimestamp: CantonTimestamp, - submissionId: String255, - ) = - store.insertOrUpdatePartyMetadata( - Seq(PartyMetadata(partyId, participantId)(effectiveTimestamp, submissionId)) - ) - - // TODO(#14066): Test coverage is rudimentary - enough to convince ourselves that queries basically seem to work. - // Increase coverage. - def topologyStore( - mk: PhysicalSynchronizerId => TopologyStore[TopologyStoreId.SynchronizerStore] - ): Unit = { - - val bootstrapTransactions = StoredTopologyTransactions( - Seq[ - ( - CantonTimestamp, - (GenericSignedTopologyTransaction, Option[CantonTimestamp], Option[String]), - ) - ]( - ts1 -> (nsd_p1, None, None), - ts1 -> (nsd_p2, None, None), - ts1 -> (dnd_p1p2, None, None), - ts1 -> (dop_synchronizer1_proposal, ts2.some, None), - ts2 -> (dop_synchronizer1, None, None), - ts2 -> (otk_p1, None, None), - ts3 -> (p1_permission_daSynchronizer, ts3.some, None), - ts3 -> (p1_permission_daSynchronizer_removal, None, None), - ts3 -> (nsd_seq, None, None), - ts3 -> (dtc_p1_synchronizer1, None, None), - ts3 -> (ptp_fred_p1_proposal, ts5.some, None), - ts4 -> (dnd_p1seq, None, None), - ts4 -> (otk_p2_proposal, None, None), - ts5 -> (ptp_fred_p1, None, None), - ts5 -> (dtc_p2_synchronizer1, ts6.some, None), - ts6 -> (dtc_p2_synchronizer1_update, None, None), - ts6 -> (mds_med1_synchronizer1_invalid, ts6.some, s"No delegation found for keys ${seqKey.fingerprint}".some), - ).map { case (from, (tx, until, rejection)) => - StoredTopologyTransaction( - SequencedTime(from), - EffectiveTime(from), - until.map(EffectiveTime(_)), - tx, - rejection.map(String300.tryCreate(_)), - ) - } - ) - - "topology store" should { - - "clear all data without affecting other stores" in { - val store1 = mk(synchronizer1_p1p2_physicalSynchronizerId) - val store2 = mk(da_p1p2_physicalSynchronizerId) - - for { - _ <- update(store1, ts1, add = Seq(nsd_p1)) - _ <- store1.updateDispatchingWatermark(ts1) - - _ <- update(store2, ts1, add = Seq(nsd_p2)) - _ <- store2.updateDispatchingWatermark(ts1) - - _ <- store1.deleteAllData() - - watermarkStore1 <- store1.currentDispatchingWatermark - maxTimestampStore1 <- store1.maxTimestamp( - SequencedTime.MaxValue, - includeRejected = true, - ) - - watermarkStore2 <- store2.currentDispatchingWatermark - maxTimestampStore2 <- store2.maxTimestamp( - SequencedTime.MaxValue, - includeRejected = true, - ) - - } yield { - maxTimestampStore1 shouldBe empty - watermarkStore1 shouldBe empty - - maxTimestampStore2 shouldBe Some((SequencedTime(ts1), EffectiveTime(ts1))) - watermarkStore2 shouldBe Some(ts1) - } - } - - "properly evolve party participant hosting" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - def ptpFred( - participants: HostingParticipant* - ) = - makeSignedTx( - PartyToParticipant.tryCreate( - partyId = `fred::p2Namepsace`, - threshold = PositiveInt.one, - participants = participants, - ) - )(p1Key) - val ptp1 = ptpFred( - HostingParticipant(p1Id, ParticipantPermission.Submission) - ) - val ptp2 = ptpFred( - HostingParticipant(p1Id, ParticipantPermission.Submission), - HostingParticipant(p2Id, ParticipantPermission.Confirmation, onboarding = true), - ) - val ptp3 = ptpFred( - HostingParticipant(p1Id, ParticipantPermission.Submission), - HostingParticipant(p2Id, ParticipantPermission.Confirmation, onboarding = false), - ) - for { - _ <- update(store, ts1, add = Seq(ptp1)) - _ <- update(store, ts2, add = Seq(ptp2), removeTxs = Set(ptp1.transaction.hash)) - _ <- update(store, ts3, add = Seq(ptp3), removeTxs = Set(ptp2.transaction.hash)) - snapshot1 <- inspect(store, TimeQuery.Snapshot(ts1.immediateSuccessor)) - snapshot2 <- inspect(store, TimeQuery.Snapshot(ts2.immediateSuccessor)) - snapshot3 <- inspect(store, TimeQuery.Snapshot(ts3.immediateSuccessor)) - } yield { - expectTransactions(snapshot1, Seq(ptp1)) - expectTransactions(snapshot2, Seq(ptp2)) - expectTransactions(snapshot3, Seq(ptp3)) - } - } - - "deal with authorized transactions" when { - "handle simple operations" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - for { - _ <- update(store, ts1, add = Seq(nsd_p1, dop_synchronizer1_proposal)) - _ <- update(store, ts2, add = Seq(otk_p1)) - _ <- update(store, ts5, add = Seq(dtc_p2_synchronizer1)) - // in the following updates to the store, we add the same onboarding transaction twice - // for each type of node (participant, mediator, sequencer), to test that - // findFirstTrustCertificateForParticipant (and respectively for mediator and sequencer) - // really finds the onboarding transaction with the lowest serial and lowest effective time. - _ <- update( - store, - ts6, - add = Seq(mds_med1_synchronizer1, dtc_p2_synchronizer1), - removeMapping = - Map(dtc_p2_synchronizer1.mapping.uniqueKey -> dtc_p2_synchronizer1.serial), - ) - _ <- update( - store, - ts7, - add = Seq(mds_med1_synchronizer1, sds_seq1_synchronizer1), - removeMapping = - Map(mds_med1_synchronizer1.mapping.uniqueKey -> mds_med1_synchronizer1.serial), - ) - _ <- update( - store, - ts8, - add = Seq(sds_seq1_synchronizer1), - removeMapping = - Map(sds_seq1_synchronizer1.mapping.uniqueKey -> sds_seq1_synchronizer1.serial), - ) - - maxTs <- store.maxTimestamp(SequencedTime.MaxValue, includeRejected = true) - retrievedTx <- store.findStored(CantonTimestamp.MaxValue, nsd_p1) - txProtocolVersion <- store.findStoredForVersion( - CantonTimestamp.MaxValue, - nsd_p1.transaction, - ProtocolVersion.v34, - ) - - proposalTransactions <- inspect( - store, - TimeQuery.Range(ts1.some, ts4.some), - proposals = true, - ) - proposalTransactionsFiltered <- inspect( - store, - TimeQuery.Range(ts1.some, ts4.some), - proposals = true, - types = Seq( - SynchronizerParametersState.code, - PartyToParticipant.code, - ), // to test the types filter - ) - proposalTransactionsFiltered2 <- inspect( - store, - TimeQuery.Range(ts1.some, ts4.some), - proposals = true, - types = Seq(PartyToParticipant.code), - ) - - positiveProposals <- findPositiveTransactions(store, ts6, isProposal = true) - - txByTxHash <- store.findProposalsByTxHash( - EffectiveTime(ts1.immediateSuccessor), // increase since exclusive - NonEmpty(Set, dop_synchronizer1_proposal.hash), - ) - txByMappingHash <- store.findTransactionsForMapping( - EffectiveTime(ts2.immediateSuccessor), // increase since exclusive - NonEmpty(Set, otk_p1.mapping.uniqueKey), - ) - - _ <- store.updateDispatchingWatermark(ts1) - tsWatermark <- store.currentDispatchingWatermark - - _ <- update( - store, - ts4, - removeMapping = Map(nsd_p1.mapping.uniqueKey -> nsd_p1.serial), - ) - removedByMappingHash <- store.findStored(CantonTimestamp.MaxValue, nsd_p1) - _ <- update(store, ts4, removeTxs = Set(otk_p1.hash)) - removedByTxHash <- store.findStored(CantonTimestamp.MaxValue, otk_p1) - - mdsTx <- store.findFirstMediatorStateForMediator( - mds_med1_synchronizer1.mapping.active.headOption.getOrElse(fail()) - ) - - dtsTx <- store.findFirstTrustCertificateForParticipant( - dtc_p2_synchronizer1.mapping.participantId - ) - sdsTx <- store.findFirstSequencerStateForSequencer( - sds_seq1_synchronizer1.mapping.active.headOption.getOrElse(fail()) - ) - } yield { - assert(maxTs.contains((SequencedTime(ts8), EffectiveTime(ts8)))) - retrievedTx.map(_.transaction) shouldBe Some(nsd_p1) - txProtocolVersion.map(_.transaction) shouldBe Some(nsd_p1) - - expectTransactions( - proposalTransactions, - Seq( - dop_synchronizer1_proposal - ), // only proposal transaction, TimeQuery.Range is inclusive on both sides - ) - expectTransactions( - proposalTransactionsFiltered, - Seq( - dop_synchronizer1_proposal - ), - ) - expectTransactions( - proposalTransactionsFiltered2, - Nil, // no proposal transaction of type PartyToParticipant in the range - ) - expectTransactions(positiveProposals, Seq(dop_synchronizer1_proposal)) - - txByTxHash shouldBe Seq(dop_synchronizer1_proposal) - txByMappingHash shouldBe Seq(otk_p1) - - tsWatermark shouldBe Some(ts1) - - removedByMappingHash.flatMap(_.validUntil) shouldBe Some(EffectiveTime(ts4)) - removedByTxHash.flatMap(_.validUntil) shouldBe Some(EffectiveTime(ts4)) - - dtsTx.value.transaction shouldBe dtc_p2_synchronizer1 - dtsTx.value.validFrom.value shouldBe ts5 - - mdsTx.value.transaction shouldBe mds_med1_synchronizer1 - mdsTx.value.validFrom.value shouldBe ts6 - - sdsTx.value.transaction shouldBe sds_seq1_synchronizer1 - sdsTx.value.validFrom.value shouldBe ts7 - } - } - "able to filter with inspect" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - for { - _ <- update(store, ts2, add = Seq(otk_p1)) - _ <- update(store, ts5, add = Seq(dtc_p2_synchronizer1)) - _ <- update(store, ts6, add = Seq(mds_med1_synchronizer1)) - - proposalTransactions <- inspect( - store, - TimeQuery.HeadState, - ) - proposalTransactionsFiltered <- inspect( - store, - TimeQuery.HeadState, - types = Seq( - SynchronizerTrustCertificate.code, - OwnerToKeyMapping.code, - ), // to test the types filter - ) - proposalTransactionsFiltered2 <- inspect( - store, - TimeQuery.HeadState, - types = Seq(PartyToParticipant.code), - ) - } yield { - expectTransactions( - proposalTransactions, - Seq( - otk_p1, - dtc_p2_synchronizer1, - mds_med1_synchronizer1, - ), - ) - expectTransactions( - proposalTransactionsFiltered, - Seq( - otk_p1, - dtc_p2_synchronizer1, - ), - ) - expectTransactions( - proposalTransactionsFiltered2, - Nil, // no proposal transaction of type PartyToParticipant in the range - ) - } - } - - "able to inspect" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - for { - _ <- new InitialTopologySnapshotValidator( - protocolVersion = testedProtocolVersion, - pureCrypto = testData.factory.syncCryptoClient.crypto.pureCrypto, - store = store, - timeouts = timeouts, - loggerFactory = loggerFactory, - ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) - .valueOrFail("topology bootstrap") - headStateTransactions <- inspect(store, TimeQuery.HeadState) - rangeBetweenTs2AndTs3Transactions <- inspect( - store, - TimeQuery.Range(ts2.some, ts3.some), - ) - snapshotAtTs3Transactions <- inspect( - store, - TimeQuery.Snapshot(ts3), - ) - decentralizedNamespaceTransactions <- inspect( - store, - TimeQuery.Range(ts1.some, ts4.some), - types = Seq(DecentralizedNamespaceDefinition.code), - ) - removalTransactions <- inspect( - store, - timeQuery = TimeQuery.Range(ts1.some, ts4.some), - op = TopologyChangeOp.Remove.some, - ) - idP1Transactions <- inspect( - store, - timeQuery = TimeQuery.Range(ts1.some, ts4.some), - idFilter = Some(p1Id.identifier.unwrap), - ) - idNamespaceTransactions <- inspect( - store, - timeQuery = TimeQuery.Range(ts1.some, ts4.some), - namespaceFilter = Some(dns_p1seq.filterString), - ) - allParties <- inspectKnownParties(store, ts6) - onlyFred <- inspectKnownParties( - store, - ts6, - filterParty = `fred::p2Namepsace`.filterString, - ) - fredFullySpecified <- inspectKnownParties( - store, - ts6, - filterParty = `fred::p2Namepsace`.uid.toProtoPrimitive, - filterParticipant = p1Id.uid.toProtoPrimitive, - ) - onlyParticipant2 <- inspectKnownParties(store, ts6, filterParticipant = "participant2") - neitherParty <- inspectKnownParties(store, ts6, "fred::canton", "participant2") - } yield { - expectTransactions( - headStateTransactions, - Seq( - nsd_p1, - nsd_p2, - dnd_p1p2, - dop_synchronizer1, - otk_p1, - p1_permission_daSynchronizer_removal, - nsd_seq, - dtc_p1_synchronizer1, - dnd_p1seq, - ptp_fred_p1, - dtc_p2_synchronizer1_update, - ), - ) - expectTransactions( - rangeBetweenTs2AndTs3Transactions, - Seq( - dop_synchronizer1, - otk_p1, - p1_permission_daSynchronizer, - p1_permission_daSynchronizer_removal, - nsd_seq, - dtc_p1_synchronizer1, - ), - ) - expectTransactions( - snapshotAtTs3Transactions, - Seq( - nsd_p1, - nsd_p2, - dnd_p1p2, - dop_synchronizer1, - otk_p1, - ), // tx2 include as until is inclusive, tx3 missing as from exclusive - ) - expectTransactions(decentralizedNamespaceTransactions, Seq(dnd_p1p2, dnd_p1seq)) - expectTransactions(removalTransactions, Seq(p1_permission_daSynchronizer_removal)) - expectTransactions( - idP1Transactions, - Seq( - otk_p1, - p1_permission_daSynchronizer, - p1_permission_daSynchronizer_removal, - dtc_p1_synchronizer1, - ), - ) - expectTransactions(idNamespaceTransactions, Seq(dnd_p1seq)) - - allParties shouldBe Set( - dtc_p1_synchronizer1.mapping.participantId.adminParty, - ptp_fred_p1.mapping.partyId, - dtc_p2_synchronizer1.mapping.participantId.adminParty, - ) - onlyFred shouldBe Set(ptp_fred_p1.mapping.partyId) - fredFullySpecified shouldBe Set(ptp_fred_p1.mapping.partyId) - onlyParticipant2 shouldBe Set(dtc_p2_synchronizer1.mapping.participantId.adminParty) - neitherParty shouldBe Set.empty - } - } - - "handle rejected transactions" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - val bootstrapTransactions = StoredTopologyTransactions( - Seq[ - ( - CantonTimestamp, - (GenericSignedTopologyTransaction, Option[CantonTimestamp], Option[String300]), - ) - ]( - ts1 -> (nsd_p1, None, None), - ts1 -> (otk_p1, None, None), - ts2 -> (nsd_seq_invalid, Some(ts2), Some( - String300.tryCreate(s"No delegation found for keys ${p1Key.fingerprint}") - )), - ).map { case (from, (tx, until, rejectionReason)) => - StoredTopologyTransaction( - SequencedTime(from), - EffectiveTime(from), - until.map(EffectiveTime(_)), - tx, - rejectionReason, - ) - } - ) - - for { - _ <- new InitialTopologySnapshotValidator( - testedProtocolVersion, - factory.syncCryptoClient.crypto.pureCrypto, - store, - timeouts, - loggerFactory, - ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) - .valueOrFail("topology bootstrap") - - headStateTransactions <- inspect(store, TimeQuery.HeadState) - } yield { - expectTransactions( - headStateTransactions, - Seq(nsd_p1, otk_p1), // tx3_NSD was rejected - ) - } - } - - "able to findEssentialStateAtSequencedTime" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - for { - _ <- update(store, ts2, add = Seq(otk_p1)) - _ <- update(store, ts5, add = Seq(dtc_p2_synchronizer1)) - _ <- update(store, ts6, add = Seq(mds_med1_synchronizer1)) - - transactionsAtTs6 <- store.findEssentialStateAtSequencedTime( - asOfInclusive = SequencedTime(ts6), - includeRejected = true, - ) - } yield { - expectTransactions( - transactionsAtTs6, - Seq( - otk_p1, - dtc_p2_synchronizer1, - mds_med1_synchronizer1, - ), - ) - } - } - - "able to find positive transactions" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - for { - _ <- new InitialTopologySnapshotValidator( - testedProtocolVersion, - factory.syncCryptoClient.crypto.pureCrypto, - store, - timeouts, - loggerFactory, - ).validateAndApplyInitialTopologySnapshot(bootstrapTransactions) - .valueOrFail("topology bootstrap") - - positiveTransactions <- findPositiveTransactions(store, ts6) - positiveTransactionsExclusive <- findPositiveTransactions( - store, - ts5, - ) - positiveTransactionsInclusive <- findPositiveTransactions( - store, - ts5, - asOfInclusive = true, - ) - selectiveMappingTransactions <- findPositiveTransactions( - store, - ts6, - types = Seq( - DecentralizedNamespaceDefinition.code, - OwnerToKeyMapping.code, - PartyToParticipant.code, - ), - ) - uidFilterTransactions <- findPositiveTransactions( - store, - ts6, - filterUid = Some( - Seq( - ptp_fred_p1.mapping.partyId.uid, - dtc_p2_synchronizer1.mapping.participantId.uid, - ) - ), - ) - namespaceFilterTransactions <- findPositiveTransactions( - store, - ts6, - filterNamespace = Some( - Seq(dns_p1seq, p2Namespace) - ), - ) - - essentialStateTransactions <- store.findEssentialStateAtSequencedTime( - SequencedTime(ts6), - includeRejected = false, - ) - - essentialStateTransactionsWithRejections <- store.findEssentialStateAtSequencedTime( - SequencedTime(ts6), - includeRejected = true, - ) - - upcomingTransactions <- store.findUpcomingEffectiveChanges(asOfInclusive = ts4) - - dispatchingTransactionsAfter <- store.findDispatchingTransactionsAfter( - timestampExclusive = ts1, - limit = None, - ) - - onboardingTransactionUnlessShutdown <- store - .findParticipantOnboardingTransactions( - p2Id, - synchronizer1_p1p2_synchronizerId, - ) - } yield { - expectTransactions( - positiveTransactions, - Seq( - nsd_p1, - nsd_p2, - dnd_p1p2, - dop_synchronizer1, - otk_p1, - nsd_seq, - dtc_p1_synchronizer1, - dnd_p1seq, - ptp_fred_p1, - dtc_p2_synchronizer1, - ), - ) - expectTransactions( - positiveTransactionsExclusive, - Seq( - nsd_p1, - nsd_p2, - dnd_p1p2, - dop_synchronizer1, - otk_p1, - nsd_seq, - dtc_p1_synchronizer1, - dnd_p1seq, - ), - ) - expectTransactions( - positiveTransactionsInclusive, - positiveTransactions.result.map(_.transaction), - ) - expectTransactions( - selectiveMappingTransactions, - Seq(dnd_p1p2, otk_p1, dnd_p1seq, ptp_fred_p1), - ) - expectTransactions(uidFilterTransactions, Seq(ptp_fred_p1, dtc_p2_synchronizer1)) - expectTransactions( - namespaceFilterTransactions, - Seq(nsd_p2, dnd_p1seq, ptp_fred_p1, dtc_p2_synchronizer1), - ) - - // Essential state currently encompasses all transactions at the specified time - expectTransactions( - essentialStateTransactions, - bootstrapTransactions.result - .filter(tx => tx.validFrom.value <= ts6 && tx.rejectionReason.isEmpty) - .map(_.transaction), - ) - - // Essential state with rejection currently encompasses all transactions at the specified time - expectTransactions( - essentialStateTransactionsWithRejections, - bootstrapTransactions.result.map(_.transaction), - ) - - upcomingTransactions shouldBe bootstrapTransactions.result.collect { - case tx if tx.validFrom.value >= ts4 => - TopologyStore.Change.Other(tx.sequenced, tx.validFrom) - }.distinct - - expectTransactions( - dispatchingTransactionsAfter, - Seq( - dop_synchronizer1, - otk_p1, - p1_permission_daSynchronizer, - p1_permission_daSynchronizer_removal, - nsd_seq, - dtc_p1_synchronizer1, - dnd_p1seq, - otk_p2_proposal, - ptp_fred_p1, - dtc_p2_synchronizer1, - dtc_p2_synchronizer1_update, - ), - ) - - onboardingTransactionUnlessShutdown shouldBe Seq( - nsd_p2, - dtc_p2_synchronizer1, - dtc_p2_synchronizer1_update, - ) - - } - } - - "correctly store rejected and accepted topology transactions with the same unique key within a batch" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - // * create two transactions with the same unique key but different content. - // * use the signatures of the transaction to accept for the transaction to reject. - // * put the rejected transaction before accepted one in the batch to be stored. - // => if the DB unique key is not specific enough (ie doesn't cover the content), then - // the accepted transaction will not be stored correctly. - - val good_otk = makeSignedTx( - OwnerToKeyMapping(p1Id, NonEmpty(Seq, factory.SigningKeys.key1)) - )(p1Key, factory.SigningKeys.key1) - - val bad_otkTx = makeSignedTx( - OwnerToKeyMapping(p1Id, NonEmpty(Seq, factory.EncryptionKeys.key2)) - )(p1Key, factory.SigningKeys.key2) - val bad_otk = bad_otkTx - .copy(signatures = - good_otk.signatures.map(sig => - SingleTransactionSignature(bad_otkTx.hash, sig.signature) - ) - ) - - for { - _ <- store.update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - bad_otk, - Some(TopologyTransactionRejection.InvalidTopologyMapping("bad signature")), - ), - ValidatedTopologyTransaction(good_otk), - ), - ) - txsAtTs2 <- findPositiveTransactions( - store, - asOf = ts2, - types = Seq(Code.OwnerToKeyMapping), - ) - } yield txsAtTs2.result.loneElement.transaction shouldBe good_otk - } - } - - "compute correctly effective state changes" when { - def assertResult( - actual: Seq[EffectiveStateChange], - expected: Seq[EffectiveStateChange], - ): Assertion = { - type PosTxSet = Set[StoredTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping]] - val emptyPositive: PositiveStoredTopologyTransactions = - StoredTopologyTransactions(Nil) - def makeComparable( - effectiveStateChange: EffectiveStateChange - ): (EffectiveStateChange, PosTxSet, PosTxSet) = - ( - effectiveStateChange.copy( - before = emptyPositive, // clear for comparison - after = emptyPositive, // clear for comparison - ), - effectiveStateChange.before.result.toSet, - effectiveStateChange.after.result.toSet, - ) - actual.map(makeComparable).toSet shouldBe expected.map(makeComparable).toSet - } - - "store is evolving in different ways" in { - val store = mk(synchronizer1_p1p2_physicalSynchronizerId) - - for { - // store is empty - atResultEmpty <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts1, - onlyAtEffective = true, - ) - fromResultEmpty <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts1, - onlyAtEffective = false, - ) - _ = { - atResultEmpty shouldBe List() - fromResultEmpty shouldBe List() - } - - // added a party mapping - partyToParticipant1 = makeSignedTx( - PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Submission, - ) - ), - ) - .value - )(p1Key) - proposedPartyToParticipant = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party3, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Submission, - ) - ), - ) - .value, - isProposal = true, - )(p1Key) - rejectedPartyToParticipant = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party2, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Submission, - ) - ), - ) - .value - )(p1Key) - _ <- store.update( - SequencedTime(ts1), - EffectiveTime(ts2), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - transaction = partyToParticipant1, - rejectionReason = None, - ), - ValidatedTopologyTransaction( - transaction = rejectedPartyToParticipant, - rejectionReason = Some(InvalidTopologyMapping("sad")), - ), - ValidatedTopologyTransaction( - transaction = proposedPartyToParticipant, - rejectionReason = None, - ), - ), - ) - _ <- for { - atTs1Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts1, - onlyAtEffective = true, - ) - - atTs2ResultWithoutMappingFilter <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = true, - ) - atTs2ResultWithMappingFilter <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = true, - filterTypes = Some(Seq(TopologyMapping.Code.PartyToParticipant)), - ) - // no OTK - atTs2ResultWithMappingEmptyFilter <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = true, - filterTypes = Some(Seq(TopologyMapping.Code.OwnerToKeyMapping)), - ) - atTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = true, - ) - fromTs1Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts1, - onlyAtEffective = false, - ) - fromTs2Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = false, - ) - fromTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = false, - ) - } yield { - val resultTs2 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts2), - sequencedTime = SequencedTime(ts1), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq.empty - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts1), - validFrom = EffectiveTime(ts2), - validUntil = None, - transaction = partyToParticipant1, - rejectionReason = None, - ) - ) - ), - ) - assertResult(atTs1Result, Seq.empty) - - assertResult(atTs2ResultWithoutMappingFilter, Seq(resultTs2)) - assertResult(atTs2ResultWithMappingFilter, Seq(resultTs2)) - assertResult(atTs2ResultWithMappingEmptyFilter, Seq()) - - assertResult(atTs3Result, Seq.empty) - assertResult(fromTs1Result, Seq(resultTs2)) - assertResult(fromTs2Result, Seq(resultTs2)) - assertResult(fromTs3Result, Seq.empty) - () - } - - // changed a party mapping, and adding mapping for a different party - partyToParticipant2transient1 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Confirmation, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Submission, - ), - ), - ) - .value, - serial = PositiveInt.two, - )(p1Key, p2Key) - partyToParticipant2transient2 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Observation, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Submission, - ), - ), - ) - .value, - serial = PositiveInt.three, - )(p1Key, p2Key) - partyToParticipant2transient3 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Observation, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Submission, - ), - ), - ) - .value, - op = TopologyChangeOp.Remove, - serial = PositiveInt.tryCreate(4), - )(p1Key) - partyToParticipant2 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Submission, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Submission, - ), - ), - ) - .value, - serial = PositiveInt.tryCreate(5), - )(p1Key, p2Key) - party2ToParticipant = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party2, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Confirmation, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Observation, - ), - ), - ) - .value, - serial = PositiveInt.one, - )(p1Key, p2Key) - _ <- store.update( - SequencedTime(ts2), - EffectiveTime(ts3), - removeMapping = Map( - partyToParticipant2.mapping.uniqueKey -> partyToParticipant2.serial - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - transaction = partyToParticipant2transient1, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant2transient2, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant2transient3, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant2, - rejectionReason = None, - ), - ValidatedTopologyTransaction( - transaction = party2ToParticipant, - rejectionReason = None, - ), - ), - ) - resultTs2 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts2), - sequencedTime = SequencedTime(ts1), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq.empty - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts1), - validFrom = EffectiveTime(ts2), - validUntil = Some(EffectiveTime(ts3)), - transaction = partyToParticipant1, - rejectionReason = None, - ) - ) - ), - ) - _ <- for { - atTs2Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = true, - ) - atTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = true, - ) - atTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = true, - ) - fromTs2Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts2, - onlyAtEffective = false, - ) - fromTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = false, - ) - fromTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = false, - ) - } yield { - val resultTs3 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts3), - sequencedTime = SequencedTime(ts2), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts1), - validFrom = EffectiveTime(ts2), - validUntil = Some(EffectiveTime(ts3)), - transaction = partyToParticipant1, - rejectionReason = None, - ) - ) - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts2), - validFrom = EffectiveTime(ts3), - validUntil = None, - transaction = partyToParticipant2, - rejectionReason = None, - ), - StoredTopologyTransaction( - sequenced = SequencedTime(ts2), - validFrom = EffectiveTime(ts3), - validUntil = None, - transaction = party2ToParticipant, - rejectionReason = None, - ), - ) - ), - ) - assertResult(atTs2Result, Seq(resultTs2)) - assertResult(atTs3Result, Seq(resultTs3)) - assertResult(atTs4Result, Seq.empty) - assertResult(fromTs2Result, Seq(resultTs2, resultTs3)) - assertResult(fromTs3Result, Seq(resultTs3)) - assertResult(fromTs4Result, Seq.empty) - () - } - - // remove a party mapping - partyToParticipant3 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Submission, - ), - HostingParticipant( - participantId = p2Id, - permission = ParticipantPermission.Submission, - ), - ), - ) - .value, - op = TopologyChangeOp.Remove, - serial = PositiveInt.tryCreate(6), - )(p1Key) - _ <- store.update( - SequencedTime(ts3), - EffectiveTime(ts4), - removeMapping = Map( - partyToParticipant3.mapping.uniqueKey -> partyToParticipant3.serial - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - transaction = partyToParticipant3, - rejectionReason = None, - ) - ), - ) - resultTs3 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts3), - sequencedTime = SequencedTime(ts2), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts1), - validFrom = EffectiveTime(ts2), - validUntil = Some(EffectiveTime(ts3)), - transaction = partyToParticipant1, - rejectionReason = None, - ) - ) - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts2), - validFrom = EffectiveTime(ts3), - validUntil = Some(EffectiveTime(ts4)), - transaction = partyToParticipant2, - rejectionReason = None, - ), - StoredTopologyTransaction( - sequenced = SequencedTime(ts2), - validFrom = EffectiveTime(ts3), - validUntil = None, - transaction = party2ToParticipant, - rejectionReason = None, - ), - ) - ), - ) - resultTs4 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts4), - sequencedTime = SequencedTime(ts3), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts2), - validFrom = EffectiveTime(ts3), - validUntil = Some(EffectiveTime(ts4)), - transaction = partyToParticipant2, - rejectionReason = None, - ) - ) - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq.empty - ), - ) - _ <- for { - atTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = true, - ) - atTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = true, - ) - atTs5Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts5, - onlyAtEffective = true, - ) - fromTs3Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts3, - onlyAtEffective = false, - ) - fromTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = false, - ) - fromTs5Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts5, - onlyAtEffective = false, - ) - } yield { - assertResult(atTs3Result, Seq(resultTs3)) - assertResult(atTs4Result, Seq(resultTs4)) - assertResult(atTs5Result, Seq.empty) - assertResult(fromTs3Result, Seq(resultTs3, resultTs4)) - assertResult(fromTs4Result, Seq(resultTs4)) - assertResult(fromTs5Result, Seq.empty) - () - } - - // add remove twice, both transient - partyToParticipant4transient1 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Observation, - ) - ), - ) - .value, - op = TopologyChangeOp.Replace, - serial = PositiveInt.tryCreate(7), - )(p1Key) - partyToParticipant4transient2 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Observation, - ) - ), - ) - .value, - op = TopologyChangeOp.Remove, - serial = PositiveInt.tryCreate(8), - )(p1Key) - partyToParticipant4transient3 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Confirmation, - ) - ), - ) - .value, - op = TopologyChangeOp.Replace, - serial = PositiveInt.tryCreate(9), - )(p1Key) - partyToParticipant4 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p1Id, - permission = ParticipantPermission.Observation, - ) - ), - ) - .value, - op = TopologyChangeOp.Remove, - serial = PositiveInt.tryCreate(10), - )(p1Key) - _ <- store.update( - SequencedTime(ts4), - EffectiveTime(ts5), - removeMapping = Map( - partyToParticipant4.mapping.uniqueKey -> partyToParticipant4.serial - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - transaction = partyToParticipant4transient1, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant4transient2, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant4transient3, - rejectionReason = None, - expireImmediately = true, - ), - ValidatedTopologyTransaction( - transaction = partyToParticipant4, - rejectionReason = None, - ), - ), - ) - _ <- for { - atTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = true, - ) - atTs5Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts5, - onlyAtEffective = true, - ) - atTs6Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts6, - onlyAtEffective = true, - ) - fromTs4Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts4, - onlyAtEffective = false, - ) - fromTs5Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts5, - onlyAtEffective = false, - ) - fromTs6Result <- store.findEffectiveStateChanges( - fromEffectiveInclusive = ts6, - onlyAtEffective = false, - ) - } yield { - assertResult(atTs4Result, Seq(resultTs4)) - assertResult(atTs5Result, Seq.empty) - assertResult(atTs6Result, Seq.empty) - assertResult(fromTs4Result, Seq(resultTs4)) - assertResult(fromTs5Result, Seq.empty) - assertResult(fromTs6Result, Seq.empty) - () - } - - // add mapping again - partyToParticipant5 = makeSignedTx( - mapping = PartyToParticipant - .create( - partyId = party1, - threshold = PositiveInt.one, - participants = Seq( - HostingParticipant( - participantId = p3Id, - permission = ParticipantPermission.Submission, - ) - ), - ) - .value, - op = TopologyChangeOp.Replace, - serial = PositiveInt.tryCreate(11), - )(p1Key, p3Key) - _ <- store.update( - SequencedTime(ts5), - EffectiveTime(ts6), - removeMapping = - Map(partyToParticipant5.mapping.uniqueKey -> partyToParticipant5.serial), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction( - transaction = partyToParticipant5, - rejectionReason = None, - ) - ), - ) - // now testing all the results - resultTs6 = EffectiveStateChange( - effectiveTime = EffectiveTime(ts6), - sequencedTime = SequencedTime(ts5), - before = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq.empty - ), - after = StoredTopologyTransactions[TopologyChangeOp.Replace, TopologyMapping]( - Seq( - StoredTopologyTransaction( - sequenced = SequencedTime(ts5), - validFrom = EffectiveTime(ts6), - validUntil = None, - transaction = partyToParticipant5, - rejectionReason = None, - ) - ) - ), - ) - (testTimestamps, testExpectedAtResults) = List( - ts1 -> None, - ts2 -> Some(resultTs2), - ts3 -> Some(resultTs3), - ts4 -> Some(resultTs4), - ts5 -> None, - ts6 -> Some(resultTs6), - ts7 -> None, - ).unzip - _ <- for { - atResults <- MonadUtil.sequentialTraverse(testTimestamps)( - store.findEffectiveStateChanges( - _, - onlyAtEffective = true, - ) - ) - fromResults <- MonadUtil.sequentialTraverse(testTimestamps)( - store.findEffectiveStateChanges( - _, - onlyAtEffective = false, - ) - ) - } yield { - testTimestamps.zip(testExpectedAtResults).zip(atResults).foreach { - case ((ts, expected), actual) => - withClue(s"at $ts") { - assertResult(actual, expected.toList) - } - } - testTimestamps - .zip(testExpectedAtResults) - .zip(fromResults) - .reverse - .foldLeft(Seq.empty[EffectiveStateChange]) { - case (accEffectiveChanges, ((ts, expected), actual)) => - withClue(s"from $ts") { - val acc = accEffectiveChanges ++ expected.toList - assertResult(actual, acc) - acc - } - } - .toSet shouldBe testExpectedAtResults.flatten.toSet - () - } - } yield succeed - } - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestBase.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestBase.scala deleted file mode 100644 index 7f49703704..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestBase.scala +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.NamedLogging -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.{ - GenericStoredTopologyTransactions, - PositiveStoredTopologyTransactions, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash -import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash -import com.digitalasset.canton.topology.transaction.{TopologyChangeOp, TopologyMapping} -import com.digitalasset.canton.topology.{Namespace, PartyId, UniqueIdentifier} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.{Assertion, Suite} - -private[store] trait TopologyStoreTestBase extends BaseTest with HasExecutionContext { - this: Suite & NamedLogging => - protected def update( - store: TopologyStore[TopologyStoreId], - ts: CantonTimestamp, - add: Seq[GenericSignedTopologyTransaction] = Seq.empty, - removeMapping: Map[MappingHash, PositiveInt] = Map.empty, - removeTxs: Set[TxHash] = Set.empty, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - store.update( - SequencedTime(ts), - EffectiveTime(ts), - removeMapping, - removeTxs, - add.map(ValidatedTopologyTransaction(_)), - ) - - protected def inspect( - store: TopologyStore[TopologyStoreId], - timeQuery: TimeQuery, - proposals: Boolean = false, - recentTimestampO: Option[CantonTimestamp] = None, - op: Option[TopologyChangeOp] = None, - types: Seq[TopologyMapping.Code] = Nil, - idFilter: Option[String] = None, - namespaceFilter: Option[String] = None, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[StoredTopologyTransactions[TopologyChangeOp, TopologyMapping]] = - store.inspect( - proposals, - timeQuery, - recentTimestampO, - op, - types, - idFilter, - namespaceFilter, - ) - - protected def inspectKnownParties( - store: TopologyStore[TopologyStoreId], - timestamp: CantonTimestamp, - filterParty: String = "", - filterParticipant: String = "", - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Set[PartyId]] = - store.inspectKnownParties( - timestamp, - filterParty, - filterParticipant, - ) - - protected def findPositiveTransactions( - store: TopologyStore[TopologyStoreId], - asOf: CantonTimestamp, - asOfInclusive: Boolean = false, - isProposal: Boolean = false, - types: Seq[TopologyMapping.Code] = TopologyMapping.Code.all, - filterUid: Option[Seq[UniqueIdentifier]] = None, - filterNamespace: Option[Seq[Namespace]] = None, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[PositiveStoredTopologyTransactions] = - store.findPositiveTransactions( - asOf, - asOfInclusive, - isProposal, - types, - filterUid, - filterNamespace, - ) - - protected def expectTransactions( - actual: GenericStoredTopologyTransactions, - expected: Seq[GenericSignedTopologyTransaction], - ): Assertion = { - logger.info(s"Actual: ${actual.result.map(_.transaction).mkString(",")}") - logger.info(s"Expected: ${expected.mkString(",")}") - // run more readable assert first since mapping codes are easier to identify than hashes ;-) - actual.result.map(_.mapping.code.code) shouldBe expected.map( - _.mapping.code.code - ) - actual.result.map(_.hash) shouldBe expected.map(_.hash) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala deleted file mode 100644 index 7ef0b985a7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import com.daml.nonempty.{NonEmpty, NonEmptyUtil} -import com.digitalasset.canton.BaseTest.* -import com.digitalasset.canton.concurrent.DirectExecutionContext -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.SigningPublicKey -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings -import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission -import com.digitalasset.canton.version.ProtocolVersion -import org.scalatest.Assertions.fail -import org.scalatest.concurrent.ScalaFutures.convertScalaFuture - -import scala.annotation.nowarn -import scala.concurrent.ExecutionContext - -@nowarn("msg=match may not be exhaustive") -class TopologyStoreTestData( - testedProtocolVersion: ProtocolVersion, - loggerFactory: NamedLoggerFactory, - executionContext: ExecutionContext, -) { - - def makeSignedTx[Op <: TopologyChangeOp, M <: TopologyMapping]( - mapping: M, - op: Op = TopologyChangeOp.Replace, - isProposal: Boolean = false, - serial: PositiveInt = PositiveInt.one, - )(signingKeys: SigningPublicKey*): SignedTopologyTransaction[Op, M] = { - import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* - val tx = TopologyTransaction( - op, - serial, - mapping, - ProtocolVersion.v34, - ) - val signingKeyIdsNE = NonEmptyUtil.fromUnsafe(signingKeys).map(_.id).toSet - val keysWithUsage = TopologyManager - .assignExpectedUsageToKeys( - mapping, - signingKeyIdsNE, - forSigning = true, - ) - val signatures = NonEmpty - .from( - keysWithUsage.toSeq.map { case (keyId, usage) => - factory.syncCryptoClient.crypto.privateCrypto - .sign(tx.hash.hash, keyId, usage) - .value - .onShutdown(fail("shutdown"))( - DirectExecutionContext(loggerFactory.getLogger(this.getClass)) - ) - .futureValue - .getOrElse(fail(s"error")) - } - ) - .getOrElse(fail("no keys provided")) - .toSet - - SignedTopologyTransaction.create[Op, M]( - tx, - signatures = signatures, - isProposal = isProposal, - )( - SignedTopologyTransaction.versioningTable - .protocolVersionRepresentativeFor( - ProtocolVersion.v34 - ) - ) - } - - val Seq(ts1, ts2, ts3, ts4, ts5, ts6, ts7, ts8, ts9, ts10) = - (1L to 10L).map(CantonTimestamp.Epoch.plusSeconds) - - val factory: TestingOwnerWithKeys = - new TestingOwnerWithKeys( - SequencerId( - UniqueIdentifier.tryCreate("da", "sequencer") - ), - loggerFactory, - executionContext, - ) - - val p1Key = factory.SigningKeys.key1 - val p1Namespace = Namespace(p1Key.fingerprint) - val p1Id = ParticipantId(UniqueIdentifier.tryCreate("participant1", p1Namespace)) - val party1 = PartyId.tryCreate("party1", p1Namespace) - val party2 = PartyId.tryCreate("party2", p1Namespace) - val party3 = PartyId.tryCreate("party3", p1Namespace) - - val p2Key = factory.SigningKeys.key2 - val p2Namespace = Namespace(p2Key.fingerprint) - val p2Id = ParticipantId(UniqueIdentifier.tryCreate("participant2", p2Namespace)) - - val p3Key = factory.SigningKeys.key3 - val p3Namespace = Namespace(p3Key.fingerprint) - val p3Id = ParticipantId(UniqueIdentifier.tryCreate("participant3", p3Namespace)) - - val dnd_p1p2_keys = NonEmpty(Seq, p1Key, p2Key) - val dns_p1p2 = DecentralizedNamespaceDefinition.computeNamespace( - Set(p1Namespace, p2Namespace) - ) - val da_p1p2_physicalSynchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("da", dns_p1p2)).toPhysical - val da_p1p2_synchronizerId = da_p1p2_physicalSynchronizerId.logical - - val medKey = factory.SigningKeys.key4 - val medNamespace = Namespace(medKey.fingerprint) - - val seqKey = factory.SigningKeys.key5 - val seqNamespace = Namespace(seqKey.fingerprint) - - val dns_p1seq = DecentralizedNamespaceDefinition.computeNamespace( - Set(p1Namespace, seqNamespace) - ) - - val synchronizer1_p1p2_physicalSynchronizerId = - SynchronizerId(UniqueIdentifier.tryCreate("synchronizer1", dns_p1p2)).toPhysical - val synchronizer1_p1p2_synchronizerId = synchronizer1_p1p2_physicalSynchronizerId.logical - val med1Id = MediatorId(UniqueIdentifier.tryCreate("mediator1", medNamespace)) - val med2Id = MediatorId(UniqueIdentifier.tryCreate("mediator2", medNamespace)) - val seq1Id = SequencerId(UniqueIdentifier.tryCreate("sequencer1", seqNamespace)) - val seq2Id = SequencerId(UniqueIdentifier.tryCreate("sequencer2", seqNamespace)) - - val `fred::p2Namepsace` = PartyId(UniqueIdentifier.tryCreate("fred", p2Namespace)) - - val nsd_p1 = makeSignedTx( - NamespaceDelegation - .tryCreate(p1Namespace, p1Key, CanSignAllMappings) - )(p1Key) - - val nsd_p2 = makeSignedTx( - NamespaceDelegation - .tryCreate(p2Namespace, p2Key, CanSignAllMappings) - )(p2Key) - - val dnd_p1p2 = makeSignedTx( - DecentralizedNamespaceDefinition.tryCreate( - dns_p1p2, - threshold = PositiveInt.two, - owners = dnd_p1p2_keys.map(k => Namespace(k.fingerprint)).toSet, - ) - )(dnd_p1p2_keys*) - val dop_synchronizer1_proposal = makeSignedTx( - SynchronizerParametersState( - synchronizer1_p1p2_synchronizerId, - DynamicSynchronizerParameters - .initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.Zero, - protocolVersion = testedProtocolVersion, - mediatorReactionTimeout = NonNegativeFiniteDuration.Zero, - ), - ), - isProposal = true, - )(p1Key) - - val dop_synchronizer1 = makeSignedTx( - SynchronizerParametersState( - synchronizer1_p1p2_synchronizerId, - DynamicSynchronizerParameters - .initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.Zero, - protocolVersion = testedProtocolVersion, - ), - ) - )(dnd_p1p2_keys*) - val otk_p1 = makeSignedTx( - OwnerToKeyMapping(p1Id, NonEmpty(Seq, p1Key, factory.EncryptionKeys.key1)) - )((p1Key)) - val p1_permission_daSynchronizer = makeSignedTx( - ParticipantSynchronizerPermission( - synchronizer1_p1p2_synchronizerId, - p1Id, - Submission, - None, - None, - ), - serial = PositiveInt.tryCreate(1), - )(dnd_p1p2_keys*) - val p1_permission_daSynchronizer_removal = makeSignedTx( - ParticipantSynchronizerPermission( - synchronizer1_p1p2_synchronizerId, - p1Id, - Submission, - None, - None, - ), - op = TopologyChangeOp.Remove, - serial = PositiveInt.tryCreate(2), - )(dnd_p1p2_keys*) - val dtc_p1_synchronizer1 = makeSignedTx( - SynchronizerTrustCertificate( - p1Id, - synchronizer1_p1p2_synchronizerId, - ) - )(p1Key) - - val ptp_fred_p1_proposal = makeSignedTx( - PartyToParticipant.tryCreate( - partyId = `fred::p2Namepsace`, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(p1Id, ParticipantPermission.Submission)), - ), - isProposal = true, - )(p1Key) - val nsd_seq = makeSignedTx( - NamespaceDelegation.tryCreate(seqNamespace, seqKey, CanSignAllMappings) - )(seqKey) - val nsd_seq_invalid = makeSignedTx( - NamespaceDelegation.tryCreate(seqNamespace, seqKey, CanSignAllMappings) - )(p1Key) // explicitly signing with the wrong key - - val dnd_p1seq = makeSignedTx( - DecentralizedNamespaceDefinition - .create( - dns_p1seq, - PositiveInt.one, - owners = NonEmpty(Set, p1Namespace, seqNamespace), - ) - .getOrElse(fail()) - )(p1Key, seqKey) - val otk_p2_proposal = makeSignedTx( - OwnerToKeyMapping(p2Id, NonEmpty(Seq, p1Key, factory.EncryptionKeys.key1)), - isProposal = true, - serial = PositiveInt.tryCreate(2), - )(p1Key) - val ptp_fred_p1 = makeSignedTx( - PartyToParticipant.tryCreate( - partyId = `fred::p2Namepsace`, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(p1Id, ParticipantPermission.Confirmation)), - ) - )(p1Key, p2Key) - val dtc_p2_synchronizer1 = makeSignedTx( - SynchronizerTrustCertificate( - p2Id, - synchronizer1_p1p2_synchronizerId, - ) - )(p2Key) - val dtc_p2_synchronizer1_update = makeSignedTx( - SynchronizerTrustCertificate( - p2Id, - synchronizer1_p1p2_synchronizerId, - ), - serial = PositiveInt.tryCreate(2), - )(p2Key) - val mds_med1_synchronizer1 = makeSignedTx( - MediatorSynchronizerState - .create( - synchronizerId = synchronizer1_p1p2_synchronizerId, - group = NonNegativeInt.one, - threshold = PositiveInt.one, - active = Seq(med1Id), - observers = Seq.empty, - ) - .getOrElse(fail()) - )(dnd_p1p2_keys*) - - val mds_med1_synchronizer1_invalid = makeSignedTx( - MediatorSynchronizerState - .create( - synchronizerId = synchronizer1_p1p2_synchronizerId, - group = NonNegativeInt.one, - threshold = PositiveInt.one, - active = Seq(med1Id), - observers = Seq.empty, - ) - .getOrElse(fail()) - )(seqKey) - - val mds_med1_synchronizer1_update = makeSignedTx( - MediatorSynchronizerState - .create( - synchronizerId = synchronizer1_p1p2_synchronizerId, - group = NonNegativeInt.one, - threshold = PositiveInt.one, - active = Seq(med1Id, med2Id), - observers = Seq.empty, - ) - .getOrElse(fail()), - serial = PositiveInt.tryCreate(2), - )(dnd_p1p2_keys*) - - val sds_seq1_synchronizer1 = makeSignedTx( - SequencerSynchronizerState - .create( - synchronizerId = synchronizer1_p1p2_synchronizerId, - threshold = PositiveInt.one, - active = Seq(seq1Id), - observers = Seq.empty, - ) - .getOrElse(fail()) - )(dnd_p1p2_keys*) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionTest.scala deleted file mode 100644 index 457d77ac6a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionTest.scala +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.TestSynchronizerParameters -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Remove, Replace} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -class TopologyTransactionCollectionTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - private lazy val uid1 = UniqueIdentifier.tryFromProtoPrimitive("da::tluafed") - private lazy val uid2 = UniqueIdentifier.tryFromProtoPrimitive("da::chop") - private lazy val uid3 = UniqueIdentifier.tryFromProtoPrimitive("da::otherNamespace") - - private lazy val factory: TestingOwnerWithKeys = - new TestingOwnerWithKeys( - DefaultTestIdentities.daSequencerId, - loggerFactory, - parallelExecutionContext, - ) - - private def mkStoredTransaction( - mapping: TopologyMapping, - changeOp: TopologyChangeOp = Replace, - serial: PositiveInt = PositiveInt.one, - ): StoredTopologyTransaction[TopologyChangeOp, TopologyMapping] = { - val mkTx = - if (changeOp == Replace) factory.mkAddMultiKey[TopologyMapping] _ - else factory.mkRemove[TopologyMapping] _ - val tm = CantonTimestamp.now() - StoredTopologyTransaction( - SequencedTime(tm), - EffectiveTime(tm), - None, - mkTx( - mapping, - NonEmpty(Set, factory.SigningKeys.key1), - serial, - false, - ), - None, - ) - } - private def mkSynchronizerParametersChange( - synchronizerId: SynchronizerId, - changeOp: TopologyChangeOp = Replace, - serial: PositiveInt = PositiveInt.one, - ) = - mkStoredTransaction( - SynchronizerParametersState(synchronizerId, TestSynchronizerParameters.defaultDynamic), - changeOp, - serial, - ) - - private lazy val replaceDOP1 = mkSynchronizerParametersChange(SynchronizerId(uid1)) - private lazy val removeDOP1 = - mkSynchronizerParametersChange(SynchronizerId(uid1), Remove, serial = PositiveInt.two) - private lazy val replaceDOP2 = mkSynchronizerParametersChange(SynchronizerId(uid2)) - private lazy val removeDOP3 = - mkSynchronizerParametersChange(SynchronizerId(uid3), Remove, serial = PositiveInt.three) - private lazy val replaceNSD1 = mkStoredTransaction( - NamespaceDelegation.tryCreate( - Namespace(factory.SigningKeys.key1.fingerprint), - factory.SigningKeys.key1, - CanSignAllMappings, - ) - ) - - "StoredTopologyTransactions" should { - lazy val simpleTransactionCollection = StoredTopologyTransactions( - Seq(replaceDOP1, removeDOP1, replaceDOP2, removeDOP3, replaceNSD1) - ) - - "collect for simple collection" in { - simpleTransactionCollection - .collectOfType[Replace] - .result should contain theSameElementsAs - Seq(replaceDOP1, replaceDOP2, replaceNSD1) - - simpleTransactionCollection - .collectOfType[Remove] - .result should contain theSameElementsAs - Seq(removeDOP1, removeDOP3) - - simpleTransactionCollection - .collectOfMapping[NamespaceDelegation] shouldBe StoredTopologyTransactions( - Seq(replaceNSD1) - ) - - simpleTransactionCollection - .collectOfMapping[SynchronizerParametersState] - .result should contain theSameElementsAs - Seq(replaceDOP1, removeDOP1, replaceDOP2, removeDOP3) - - simpleTransactionCollection.collectLatestByUniqueKey.result should contain theSameElementsAs - Seq(removeDOP1, replaceDOP2, removeDOP3, replaceNSD1) - - TopologyTransactions.collectLatestByUniqueKey( - simpleTransactionCollection.result - ) should contain theSameElementsAs - Seq(removeDOP1, replaceDOP2, removeDOP3, replaceNSD1) - - // remove duplicates - TopologyTransactions.collectLatestByUniqueKey( - simpleTransactionCollection.result ++ simpleTransactionCollection.result - ) should contain theSameElementsAs Seq(removeDOP1, replaceDOP2, removeDOP3, replaceNSD1) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbDownloadTopologyStateForInitializationServiceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbDownloadTopologyStateForInitializationServiceTest.scala deleted file mode 100644 index 5269d84d14..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbDownloadTopologyStateForInitializationServiceTest.scala +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store.db - -import com.digitalasset.canton.store.db.{H2Test, PostgresTest} -import com.digitalasset.canton.topology.store.DownloadTopologyStateForInitializationServiceTest - -class DownloadTopologyStateForInitializationServiceTestPostgres - extends DownloadTopologyStateForInitializationServiceTest - with DbTopologyStoreHelper - with PostgresTest - -class DownloadTopologyStateForInitializationServiceTestH2 - extends DownloadTopologyStateForInitializationServiceTest - with DbTopologyStoreHelper - with H2Test diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreHelper.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreHelper.scala deleted file mode 100644 index ea0bfe9962..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreHelper.scala +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store.db - -import com.digitalasset.canton.TestEssentials -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.store.db.DbTest -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.MonadUtil - -trait DbTopologyStoreHelper { - - this: DbTest & TestEssentials => - - @volatile - private var storesToCleanup = List.empty[TopologyStore[_]] - - override def cleanDb( - storage: DbStorage - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - MonadUtil - .sequentialTraverse_(storesToCleanup)( - _.deleteAllData() - ) - .map { _ => - storesToCleanup = Nil - } - - protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(20) - - protected def mkStore( - synchronizerId: PhysicalSynchronizerId - ): TopologyStore[TopologyStoreId.SynchronizerStore] = { - val store = new DbTopologyStore( - storage, - TopologyStoreId.SynchronizerStore(synchronizerId), - testedProtocolVersion, - timeouts, - loggerFactory, - maxItemsInSqlQuery = maxItemsInSqlQuery, - ) - storesToCleanup = store :: storesToCleanup - store - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala deleted file mode 100644 index 294176a99a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store.db - -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} -import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.topology.processing.{ - EffectiveTime, - InitialTopologySnapshotValidator, - SequencedTime, -} -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransaction, - StoredTopologyTransactions, - TopologyStoreTest, -} -import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission -import com.digitalasset.canton.topology.transaction.{HostingParticipant, PartyToParticipant} - -trait DbTopologyStoreTest extends TopologyStoreTest with DbTopologyStoreHelper { - this: DbTest => - - "DbPartyMetadataStore" should { - behave like partyMetadataStore(() => new DbPartyMetadataStore(storage, timeouts, loggerFactory)) - } - - "DbTopologyStore" should { - behave like topologyStore(mkStore) - - "properly handle insertion order for large topology snapshots" in { - val store = mkStore(testData.synchronizer1_p1p2_physicalSynchronizerId) - - val synchronizerSetup = Seq( - 0 -> testData.nsd_p1, - 0 -> testData.nsd_p2, - 0 -> testData.dnd_p1p2, - 0 -> testData.dop_synchronizer1, - 0 -> testData.otk_p1, - 0 -> testData.dtc_p1_synchronizer1, - ) - - val partyAllocations = (1 to maxItemsInSqlQuery.value * 2 + 3) map { i => - i -> testData.makeSignedTx( - PartyToParticipant.tryCreate( - PartyId.tryCreate(s"party$i", testData.p1Namespace), - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(testData.p1Id, Submission)), - ) - )(testData.p1Key) - } - - val transactions = (synchronizerSetup ++ partyAllocations).map { case (timeOffset, tx) => - val ts = CantonTimestamp.Epoch.plusSeconds(timeOffset.toLong) - // the actual transaction and the consistency is not important for this test - StoredTopologyTransaction( - SequencedTime(ts), - EffectiveTime(ts), - None, - tx, - None, - ) - } - val topologySnapshot = StoredTopologyTransactions(transactions) - - for { - _ <- new InitialTopologySnapshotValidator( - testedProtocolVersion, - testData.factory.syncCryptoClient.crypto.pureCrypto, - store, - timeouts, - loggerFactory, - ).validateAndApplyInitialTopologySnapshot(topologySnapshot) - .valueOrFail("topology bootstrap") - - maxTimestamp <- store - .maxTimestamp(SequencedTime.MaxValue, includeRejected = true) - } yield { - val lastSequenced = transactions.last.sequenced - val lastEffective = transactions.last.validFrom - maxTimestamp shouldBe Some((lastSequenced, lastEffective)) - } - } - } -} - -class TopologyStoreTestPostgres extends DbTopologyStoreTest with PostgresTest - -class TopologyStoreTestH2 extends DbTopologyStoreTest with H2Test diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryDownloadTopologyStateForInitializationServiceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryDownloadTopologyStateForInitializationServiceTest.scala deleted file mode 100644 index fc099750e3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryDownloadTopologyStateForInitializationServiceTest.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store.memory - -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.{ - DownloadTopologyStateForInitializationServiceTest, - TopologyStore, - TopologyStoreId, -} - -class InMemoryDownloadTopologyStateForInitializationServiceTest - extends DownloadTopologyStateForInitializationServiceTest { - override protected def mkStore( - synchronizerId: PhysicalSynchronizerId - ): TopologyStore[TopologyStoreId.SynchronizerStore] = { - val storeId = SynchronizerStore(synchronizerId) - new InMemoryTopologyStore[TopologyStoreId.SynchronizerStore]( - storeId, - testedProtocolVersion, - loggerFactory, - timeouts, - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreTest.scala deleted file mode 100644 index a0c42a3cfa..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreTest.scala +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.store.memory - -import com.digitalasset.canton.topology.store.{TopologyStoreId, TopologyStoreTest} - -class InMemoryTopologyStoreTest extends TopologyStoreTest { - - "InMemoryPartyMetadataStore" should { - behave like partyMetadataStore(() => new InMemoryPartyMetadataStore) - } - - "InMemoryTopologyStore" should { - behave like topologyStore(synchronizerId => - new InMemoryTopologyStore( - TopologyStoreId.SynchronizerStore(synchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala deleted file mode 100644 index 223148b242..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.transaction - -import cats.syntax.apply.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{ - GeneratorsCrypto, - Hash, - PublicKey, - Signature, - SigningKeyUsage, - SigningPublicKey, -} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.GeneratorsProtocol -import com.digitalasset.canton.sequencing.GeneratorsSequencing -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, - CanSignSpecificMappings, -} -import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash -import com.digitalasset.canton.topology.{ - GeneratorsTopology, - MediatorId, - Namespace, - ParticipantId, - PartyId, - PhysicalSynchronizerId, - SequencerId, - SynchronizerId, -} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{Generators, GeneratorsLf, LfPackageId} -import com.digitalasset.daml.lf.data.Ref.PackageId -import magnolify.scalacheck.auto.* -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.EitherValues.* - -import scala.math.Ordering.Implicits.* - -final class GeneratorsTransaction( - protocolVersion: ProtocolVersion, - generatorsLf: GeneratorsLf, - generatorsProtocol: GeneratorsProtocol, - generatorsTopology: GeneratorsTopology, - generatorsSequencing: GeneratorsSequencing, -) { - import GeneratorsCrypto.* - import generatorsLf.* - import generatorsProtocol.* - import generatorsSequencing.* - import generatorsTopology.* - import Generators.* - import com.digitalasset.canton.config.GeneratorsConfig.* - - implicit val topologyChangeOpArb: Arbitrary[TopologyChangeOp] = Arbitrary( - Gen.oneOf(TopologyChangeOp.Replace, TopologyChangeOp.Remove) - ) - implicit val topologyTransactionNamespacesArb: Arbitrary[NonEmpty[Set[Namespace]]] = - Generators.nonEmptySet[Namespace] - implicit val topologyTransactionMediatorIdsArb: Arbitrary[NonEmpty[Seq[MediatorId]]] = - Arbitrary(Generators.nonEmptySetGen[MediatorId].map(_.toSeq)) - implicit val topologyTransactionSequencerIdsArb: Arbitrary[NonEmpty[Seq[SequencerId]]] = - Arbitrary(Generators.nonEmptySetGen[SequencerId].map(_.toSeq)) - implicit val topologyTransactionLfPackageIdsArb: Arbitrary[NonEmpty[Seq[LfPackageId]]] = - Arbitrary(Generators.nonEmptySetGen[LfPackageId].map(_.toSeq)) - implicit val topologyTransactionPublicKeysArb: Arbitrary[NonEmpty[Seq[PublicKey]]] = - Arbitrary(Generators.nonEmptySetGen[PublicKey].map(_.toSeq)) - implicit val topologyTransactionSigningPublicKeysArb: Arbitrary[NonEmpty[Seq[SigningPublicKey]]] = - Arbitrary(Generators.nonEmptySetGen[SigningPublicKey].map(_.toSeq)) - implicit val topologyTransactionMappingsArb: Arbitrary[NonEmpty[Seq[TopologyMapping]]] = - Arbitrary(Generators.nonEmptySetGen[TopologyMapping].map(_.toSeq)) - implicit val topologyTransactionPartyIdsArb: Arbitrary[NonEmpty[Seq[PartyId]]] = - Arbitrary(Generators.nonEmptySetGen[PartyId].map(_.toSeq)) - implicit val topologyTransactionHostingParticipantsArb - : Arbitrary[NonEmpty[Seq[HostingParticipant]]] = - Arbitrary(Generators.nonEmptySetGen[HostingParticipant].map(_.toSeq)) - implicit val topologyTransactionVettedPackageArb: Arbitrary[VettedPackage] = Arbitrary( - for { - packageId <- Arbitrary.arbitrary[PackageId] - validFrom <- Arbitrary.arbOption[CantonTimestamp].arbitrary - validUntil <- Arbitrary - .arbOption[CantonTimestamp] - .arbitrary - .suchThat(until => (validFrom, until).tupled.forall { case (from, until) => from < until }) - } yield VettedPackage(packageId, validFrom, validUntil) - ) - - implicit val hostingParticipantArb: Arbitrary[HostingParticipant] = Arbitrary( - for { - pid <- Arbitrary.arbitrary[ParticipantId] - permission <- Arbitrary.arbitrary[ParticipantPermission] - onboarding <- Arbitrary.arbBool.arbitrary - } yield HostingParticipant(pid, permission, onboarding) - ) - - implicit val synchronizerUpgradeAnnouncementArb: Arbitrary[SynchronizerUpgradeAnnouncement] = - Arbitrary(for { - psid <- Arbitrary.arbitrary[PhysicalSynchronizerId] - upgradeTime <- Arbitrary.arbitrary[CantonTimestamp] - } yield SynchronizerUpgradeAnnouncement(psid, upgradeTime)) - - implicit val topologyMappingArb: Arbitrary[TopologyMapping] = genArbitrary - - implicit val decentralizedNamespaceDefinitionArb: Arbitrary[DecentralizedNamespaceDefinition] = - Arbitrary( - for { - namespace <- Arbitrary.arbitrary[Namespace] - owners <- Arbitrary.arbitrary[NonEmpty[Set[Namespace]]] - // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint - threshold <- Gen.choose(1, owners.size).map(PositiveInt.tryCreate) - } yield DecentralizedNamespaceDefinition.create(namespace, threshold, owners).value - ) - - implicit val mediatorSynchronizerStateArb: Arbitrary[MediatorSynchronizerState] = Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[SynchronizerId] - group <- Arbitrary.arbitrary[NonNegativeInt] - active <- Arbitrary.arbitrary[NonEmpty[Seq[MediatorId]]] - // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint - threshold <- Gen.choose(1, active.size).map(PositiveInt.tryCreate) - observers <- Arbitrary.arbitrary[NonEmpty[Seq[MediatorId]]] - } yield MediatorSynchronizerState - .create(synchronizerId, group, threshold, active, observers) - .value - ) - - val restrictionWithNamespaceDelegation = Gen.oneOf( - Gen.const(CanSignAllMappings), - Generators - // generate a random sequence of codes, - .nonEmptySetGen[TopologyMapping.Code] - // but always add the namespace delegation code, so that the result can always sign namespace delegations - .map(_ incl TopologyMapping.Code.NamespaceDelegation) - .map(CanSignSpecificMappings(_)), - ) - - val restrictionWithoutNamespaceDelegation = Gen.oneOf( - Gen.const(CanSignAllButNamespaceDelegations), - Generators - .nonEmptySetGen( - Arbitrary( - // generate a random set of codes but don't allow for NamespaceDelegation to be included - Gen.oneOf(TopologyMapping.Code.all.toSet - TopologyMapping.Code.NamespaceDelegation) - ) - ) - .map(CanSignSpecificMappings(_)), - ) - - implicit val namespaceDelegationArb: Arbitrary[NamespaceDelegation] = Arbitrary( - for { - namespace <- Arbitrary.arbitrary[Namespace] - // target key must include the `Namespace` usage - target <- Arbitrary - .arbitrary[SigningPublicKey] - .retryUntil(key => - SigningKeyUsage.matchesRelevantUsages(key.usage, SigningKeyUsage.NamespaceOnly) - ) - delegationRestriction <- // honor constraint that the delegation must be able to sign namespace delegations if fingerprints match - if (namespace.fingerprint == target.fingerprint) - restrictionWithNamespaceDelegation - else restrictionWithoutNamespaceDelegation - } yield NamespaceDelegation.tryCreate(namespace, target, delegationRestriction) - ) - - implicit val purgeTopologyTransactionArb: Arbitrary[PurgeTopologyTransaction] = Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[SynchronizerId] - mappings <- Arbitrary.arbitrary[NonEmpty[Seq[TopologyMapping]]] - } yield PurgeTopologyTransaction.create(synchronizerId, mappings).value - ) - - implicit val partyToParticipantTopologyTransactionArb: Arbitrary[PartyToParticipant] = Arbitrary( - for { - partyId <- Arbitrary.arbitrary[PartyId] - participants <- Arbitrary.arbitrary[NonEmpty[Seq[HostingParticipant]]] - // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint - threshold <- Gen - .choose(1, participants.count(_.permission >= ParticipantPermission.Confirmation).max(1)) - .map(PositiveInt.tryCreate) - } yield PartyToParticipant - .create(partyId, threshold, participants) - .value - ) - - implicit val vettedPackagesTopologyTransactionArb: Arbitrary[VettedPackages] = Arbitrary( - for { - participantId <- Arbitrary.arbitrary[ParticipantId] - vettedPackages <- Gen.listOf(Arbitrary.arbitrary[VettedPackage]) - } yield VettedPackages.create(participantId, vettedPackages).value - ) - - implicit val partyToKeyTopologyTransactionArb: Arbitrary[PartyToKeyMapping] = Arbitrary( - for { - partyId <- Arbitrary.arbitrary[PartyId] - signingKeys <- Arbitrary.arbitrary[NonEmpty[Seq[SigningPublicKey]]] - // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint - threshold <- Gen - .choose(1, signingKeys.size) - .map(PositiveInt.tryCreate) - } yield PartyToKeyMapping - .create(partyId, threshold, signingKeys) - .value - ) - - implicit val sequencerSynchronizerStateArb: Arbitrary[SequencerSynchronizerState] = Arbitrary( - for { - synchronizerId <- Arbitrary.arbitrary[SynchronizerId] - active <- Arbitrary.arbitrary[NonEmpty[Seq[SequencerId]]] - // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint - threshold <- Gen.choose(1, active.size).map(PositiveInt.tryCreate) - observers <- Arbitrary.arbitrary[NonEmpty[Seq[SequencerId]]] - } yield SequencerSynchronizerState.create(synchronizerId, threshold, active, observers).value - ) - - implicit val topologyTransactionArb - : Arbitrary[TopologyTransaction[TopologyChangeOp, TopologyMapping]] = Arbitrary( - for { - op <- Arbitrary.arbitrary[TopologyChangeOp] - serial <- Arbitrary.arbitrary[PositiveInt] - mapping <- Arbitrary.arbitrary[TopologyMapping] - } yield TopologyTransaction(op, serial, mapping, protocolVersion) - ) - - implicit val topologyTransactionSignaturesArb: Arbitrary[NonEmpty[Set[Signature]]] = - Generators.nonEmptySet[Signature] - - implicit val txHashArb: Arbitrary[NonEmpty[Set[TxHash]]] = Arbitrary( - Generators.nonEmptySet[Hash].arbitrary.map(_.map(TxHash(_))) - ) - - def multiTransactionSignaturesGen(transactionHash: TxHash) = for { - hashes <- Arbitrary.arbitrary[NonEmpty[Set[TxHash]]] - signatures <- Arbitrary.arbitrary[Signature] - } yield MultiTransactionSignature( - // Guarantees that the transaction hash is in the multi hash set - NonEmpty.mk(Set, transactionHash, hashes.toSeq*), - signatures, - ) - - def topologyTransactionSignatureArb( - transactionHash: TxHash - ): Arbitrary[TopologyTransactionSignature] = Arbitrary { - Gen.frequency( - (1, Arbitrary.arbitrary[Signature].map(SingleTransactionSignature(transactionHash, _))), - (1, multiTransactionSignaturesGen(transactionHash)), - ) - } - - implicit val signedTopologyTransactionArb - : Arbitrary[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]] = Arbitrary( - for { - transaction <- Arbitrary.arbitrary[TopologyTransaction[TopologyChangeOp, TopologyMapping]] - proposal <- Arbitrary.arbBool.arbitrary - topologyTransactionSignatures <- { - implicit val localSignatureArb = topologyTransactionSignatureArb(transaction.hash) - Generators.nonEmptySetGen[TopologyTransactionSignature] - } - } yield SignedTopologyTransaction - .create(transaction, topologyTransactionSignatures, proposal, protocolVersion) - ) - - implicit val signedTopologyTransactionsArb - : Arbitrary[SignedTopologyTransactions[TopologyChangeOp, TopologyMapping]] = Arbitrary( - for { - transactions <- Gen.listOf(signedTopologyTransactionArb.arbitrary) - } yield SignedTopologyTransactions(transactions, protocolVersion) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala deleted file mode 100644 index ca3430c0da..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala +++ /dev/null @@ -1,1119 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.topology.transaction - -import cats.instances.order.* -import cats.syntax.either.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} -import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, OnboardingRestriction} -import com.digitalasset.canton.time.PositiveSeconds -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.DefaultTestIdentities.{mediatorId, sequencerId} -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.* -import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping -import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore -import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ - CanSignAllButNamespaceDelegations, - CanSignAllMappings, -} -import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ - Confirmation, - Observation, - Submission, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace -import com.digitalasset.canton.topology.transaction.TopologyMapping.Code -import com.digitalasset.canton.topology.transaction.TopologyMappingChecks.PendingChangesLookup -import com.digitalasset.canton.{ - BaseTest, - FailOnShutdown, - HasExecutionContext, - ProtocolVersionChecksAnyWordSpec, -} -import org.scalatest.wordspec.AnyWordSpec - -import java.time -import scala.annotation.nowarn -import scala.language.implicitConversions - -@nowarn("msg=match may not be exhaustive") -class ValidatingTopologyMappingChecksTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with ProtocolVersionChecksAnyWordSpec - with FailOnShutdown { - - private lazy val factory = new TestingOwnerWithKeys( - DefaultTestIdentities.mediatorId, - loggerFactory, - initEc = parallelExecutionContext, - ) - - def mk() = { - val store = - new InMemoryTopologyStore( - SynchronizerStore(DefaultTestIdentities.physicalSynchronizerId), - testedProtocolVersion, - loggerFactory, - timeouts, - ) - val check = new ValidatingTopologyMappingChecks(store, loggerFactory) - (check, store) - } - - "TopologyMappingChecks" when { - import DefaultTestIdentities.{synchronizerId, participant1, participant2, participant3, party1} - import factory.TestingTransactions.* - - def checkTransaction( - checks: TopologyMappingChecks, - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericSignedTopologyTransaction] = None, - pendingChangesLookup: PendingChangesLookup = Map.empty, - ): Either[TopologyTransactionRejection, Unit] = - checks - .checkTransaction(EffectiveTime.MaxValue, toValidate, inStore, pendingChangesLookup) - .value - .futureValueUS - - implicit def toHostingParticipant( - participantToPermission: (ParticipantId, ParticipantPermission) - ): HostingParticipant = - HostingParticipant(participantToPermission._1, participantToPermission._2) - - "validating any Mapping" should { - "reject removal of non-existent mappings" in { - import factory.SigningKeys.key1 - val (checks, _) = mk() - - val removeNsdSerial1 = factory.mkRemove( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.one, - ) - // also check that for serial > 1 - val removeNsdSerial3 = factory.mkRemove( - NamespaceDelegation.tryCreate(Namespace(key1.fingerprint), key1, CanSignAllMappings), - serial = PositiveInt.three, - ) - checkTransaction(checks, removeNsdSerial1) shouldBe Left( - TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(removeNsdSerial1.mapping) - ) - checkTransaction(checks, removeNsdSerial3) shouldBe Left( - TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(removeNsdSerial3.mapping) - ) - } - - "reject if removal also changes the content" in { - import factory.SigningKeys.{key1, key2} - val (checks, _) = mk() - - val removeNs1k2 = factory.mkRemove( - NamespaceDelegation - .tryCreate( - Namespace(key1.fingerprint), - key2, - // changing the mapping compared to ns1k2 by setting CanSignAllMappings - CanSignAllMappings, - ), - serial = PositiveInt.two, - ) - checkTransaction(checks, removeNs1k2, Some(ns1k2)) shouldBe Left( - TopologyTransactionRejection.RemoveMustNotChangeMapping( - removeNs1k2.mapping, - ns1k2.mapping, - ) - ) - } - - "respect pending changes when loading additional data for validations" in { - import factory.SigningKeys.{key1, key2, key3} - val (checks, store) = mk() - val ns1 = Namespace(key1.fingerprint) - val ns2 = Namespace(key2.fingerprint) - val ns3 = Namespace(key3.fingerprint) - - val nsd1Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings)) - val nsd1Remove_2 = factory.mkRemove( - NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), - serial = PositiveInt.two, - ) - val nsd1ReplaceProposal_3 = factory.mkAdd( - NamespaceDelegation.tryCreate(ns1, key1, CanSignAllMappings), - serial = PositiveInt.three, - isProposal = true, - ) - - val nsd2Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings)) - val nsd2Remove_2 = factory.mkRemove( - NamespaceDelegation.tryCreate(ns2, key2, CanSignAllMappings), - serial = PositiveInt.two, - ) - - val nsd3Replace_1 = - factory.mkAdd(NamespaceDelegation.tryCreate(ns3, key3, CanSignAllMappings)) - - store - .update( - SequencedTime(ts), - EffectiveTime(ts), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq(nsd1Replace_1, nsd2Replace_1).map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - store - .update( - SequencedTime(ts + seconds(1)), - EffectiveTime(ts + seconds(1)), - removeMapping = Map(nsd1Remove_2.mapping.uniqueKey -> nsd1Remove_2.serial), - removeTxs = Set.empty, - additions = Seq(ValidatedTopologyTransaction(nsd1Remove_2)), - ) - .futureValueUS - - store - .update( - SequencedTime(ts + seconds(2)), - EffectiveTime(ts + seconds(2)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = Seq(ValidatedTopologyTransaction(nsd1ReplaceProposal_3)), - ) - .futureValueUS - - /* - * The store contains the following transactions: - * TS0: Replace NSD1, Replace NSD2 - * TS1: Remove NSD1 - * TS2: Replace Proposal NSD1 - */ - - // TS0: load without pending changes - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map.empty, - ) - .futureValueUS - .value should contain theSameElementsAs Seq(nsd1Replace_1, nsd2Replace_1) - - // TS0: load with Removal NS2 as pending change - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - Map(nsd2Remove_2.mapping.uniqueKey -> nsd2Remove_2), - ) - .futureValueUS - .value shouldBe Seq(nsd1Replace_1) - - // TS0: load with Replace NS3 as pending change without prior transactions in the store - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map(nsd3Replace_1.mapping.uniqueKey -> nsd3Replace_1), - ) - .futureValueUS - .value should contain theSameElementsAs Seq( - nsd1Replace_1, - nsd2Replace_1, - nsd3Replace_1, - ) - - // TS0: load with Replace NS3 as pending change without prior transactions in the store and also matching a - // namespace filter - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor), - codes = Set(Code.NamespaceDelegation), - pendingChangesLookup = Map(nsd3Replace_1.mapping.uniqueKey -> nsd3Replace_1), - filterNamespace = Some(Seq(ns2, ns3)), - ) - .futureValueUS - .value should contain theSameElementsAs Seq(nsd2Replace_1, nsd3Replace_1) - - // TS1: don't load Remove NS1 from the store - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(1)), - codes = Set(Code.NamespaceDelegation), - Map.empty, - ) - .futureValueUS - .value shouldBe Seq(nsd2Replace_1) - - // TS1: don't load Remove NS1 from the store mixed with Remove NS2 as pending change - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(1)), - codes = Set(Code.NamespaceDelegation), - Map(nsd2Remove_2.mapping.uniqueKey -> nsd2Remove_2), - ) - .futureValueUS - .value shouldBe Seq.empty - - // TS2: don't load proposals - checks - .loadFromStore( - EffectiveTime(ts.immediateSuccessor + seconds(2)), - codes = Set(Code.NamespaceDelegation), - Map.empty, - ) - .futureValueUS - .value shouldBe Seq(nsd2Replace_1) - - } - } - - "validating DecentralizedNamespaceDefinition" should { - "reject namespaces not derived from their owners' namespaces" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - addToStore(store, rootCerts*) - - val dns = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - Namespace(Fingerprint.tryFromString("bogusNamespace")), - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = keys.toSet, - // using serial=2 here to test that we don't special case serial=1 - serial = PositiveInt.two, - ) - - checkTransaction(checks, dns, None) should matchPattern { - case Left(TopologyTransactionRejection.InvalidTopologyMapping(err)) - if err.contains("not derived from the owners") => - } - } - - "reject if a namespace delegation with the same namespace already exists" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) - - // we are creating namespace delegation with the same namespace as the decentralized namespace. - // this nsd however is not actually fully authorized, but for the purpose of this test, we want to see - // that the decentralized namespace definition gets rejected. - val conflicting_nsd = factory.mkAdd( - NamespaceDelegation - .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), - factory.SigningKeys.key8, - ) - addToStore(store, (rootCerts :+ conflicting_nsd)*) - - val dnd = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnd_namespace, - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = keys.toSet, - serial = PositiveInt.one, - ) - - checkTransaction(checks, dnd, None) shouldBe Left( - TopologyTransactionRejection.NamespaceAlreadyInUse(`dnd_namespace`) - ) - } - - "reject if an owning namespace does not have a root certificate" in { - val (checks, store) = mk() - val (keys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - def createDND(owners: Seq[Namespace], keys: Seq[SigningPublicKey]) = - factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - DecentralizedNamespaceDefinition.computeNamespace(owners.toSet), - PositiveInt.one, - NonEmpty.from(owners).value.toSet, - ) - .value, - signingKeys = NonEmpty.from(keys).value.toSet, - serial = PositiveInt.one, - ) - - val dnd_k1k2 = createDND(namespaces.take(2), keys.take(2)) - - addToStore(store, (rootCerts :+ dnd_k1k2)*) - - val ns4 = Namespace(factory.SigningKeys.key4.fingerprint) - - val dnd_invalid = createDND( - namespaces.takeRight(2) ++ Seq(ns4, dnd_k1k2.mapping.namespace), - // we don't have to provide all keys for this transaction to be fully authorized, - // because the test doesn't check authorization, just semantic validity. - keys.takeRight(2), - ) - checkTransaction(checks, dnd_invalid, None) should matchPattern { - case Left(TopologyTransactionRejection.InvalidTopologyMapping(err)) - if err.contains( - s"No root certificate found for ${Seq(ns4, dnd_k1k2.mapping.namespace).sorted.mkString(", ")}" - ) => - } - } - } - - "validating NamespaceDelegation" should { - "reject a namespace delegation if a decentralized namespace with the same namespace already exists" in { - val (checks, store) = mk() - val (rootKeys, namespaces, rootCerts) = setUpRootCerts( - factory.SigningKeys.key1, - factory.SigningKeys.key2, - factory.SigningKeys.key3, - ) - - val dnd_namespace = DecentralizedNamespaceDefinition.computeNamespace(namespaces.toSet) - - val dnd = factory.mkAddMultiKey( - DecentralizedNamespaceDefinition - .create( - dnd_namespace, - PositiveInt.one, - NonEmpty.from(namespaces).value.toSet, - ) - .value, - signingKeys = rootKeys.toSet, - serial = PositiveInt.one, - ) - - addToStore(store, (rootCerts :+ dnd)*) - - // we are creating namespace delegation with the same namespace as the decentralized namespace. - // even if it is signed by enough owners of the decentralized namespace, we don't allow namespace delegations - // for a decentralized namespace, because - // 1. it goes against the very purpose of a decentralized namespace - // 2. the authorization machinery is actually not prepared to deal with it - // A similar effect can be achieved by setting the threshold of the DND to 1 - val conflicting_nsd = factory.mkAddMultiKey( - NamespaceDelegation - .tryCreate(dnd_namespace, factory.SigningKeys.key8, CanSignAllButNamespaceDelegations), - rootKeys.toSet, - ) - - checkTransaction(checks, conflicting_nsd, None) shouldBe Left( - TopologyTransactionRejection.NamespaceAlreadyInUse(`dnd_namespace`) - ) - } - } - - "validating PartyToParticipant" should { - - "reject when participants don't have a DTC" in { - val (checks, store) = mk() - addToStore(store, p2_dtc) - - val failureCases = Seq(Seq(participant1), Seq(participant1, participant2)) - - failureCases.foreach { participants => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - participants.map[HostingParticipant](_ -> Submission), - ) - ) - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.UnknownMembers(Seq(participant1)) - ) - } - } - - "reject when participants don't have a valid encryption or signing key" in { - val (checks, store) = mk() - val p2MissingEncKey = factory.mkAdd( - OwnerToKeyMapping(participant2, NonEmpty(Seq, factory.SigningKeys.key1)) - ) - val p3MissingSigningKey = factory.mkAdd( - OwnerToKeyMapping(participant3, NonEmpty(Seq, factory.EncryptionKeys.key1)) - ) - - addToStore(store, p1_dtc, p2_dtc, p3_dtc, p2MissingEncKey, p3MissingSigningKey) - - val missingKeyCases = Seq(participant1, participant2, participant3) - - missingKeyCases.foreach { participant => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - Seq(participant -> Submission), - ) - ) - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.InsufficientKeys(Seq(participant)) - ) - } - } - - "handle conflicts between partyId and existing admin parties from synchronizer trust certificates" in { - // the defaults below are a valid explicit admin party allocation for participant1.adminParty - def mkPTP( - partyId: PartyId = participant1.adminParty, - participants: Seq[HostingParticipant] = - Seq(HostingParticipant(participant1, Submission)), - ) = factory.mkAdd( - PartyToParticipant - .create( - partyId = partyId, - threshold = PositiveInt.one, - participants = participants, - ) - .value - ) - - val (checks, store) = mk() - addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc) - - // handle the happy case - checkTransaction(checks, mkPTP()) shouldBe Either.unit - - // unhappy scenarios - val invalidParticipantPermission = Seq( - mkPTP(participants = Seq(HostingParticipant(participant1, Confirmation))), - mkPTP(participants = Seq(HostingParticipant(participant1, Observation))), - ) - - val invalidNumberOfHostingParticipants = mkPTP(participants = - Seq( - HostingParticipant(participant1, Submission), - HostingParticipant(participant2, Submission), - ) - ) - - val foreignParticipant = - mkPTP(participants = Seq(HostingParticipant(participant2, Submission))) - - // we don't need to explicitly check threshold > 1, because we already reject the PTP if participants.size > 1 - // and the threshold can never be higher than the number of participants - - val unhappyCases = invalidParticipantPermission ++ Seq( - foreignParticipant, - invalidNumberOfHostingParticipants, - ) - - forAll(unhappyCases)(ptp => - checkTransaction(checks, ptp) shouldBe Left( - TopologyTransactionRejection.PartyIdConflictWithAdminParty(ptp.mapping.partyId) - ) - ) - } - - "report no errors for valid mappings" in { - val (checks, store) = mk() - addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc) - - val validCases = Seq[(PositiveInt, Seq[HostingParticipant])]( - PositiveInt.one -> Seq(participant1 -> Confirmation), - PositiveInt.one -> Seq(participant1 -> Submission), - PositiveInt.one -> Seq(participant1 -> Observation, participant2 -> Confirmation), - PositiveInt.two -> Seq(participant1 -> Confirmation, participant2 -> Submission), - PositiveInt.two -> Seq( - participant1 -> Observation, - participant2 -> Submission, - participant3 -> Submission, - ), - ) - - validCases.foreach { case (threshold, participants) => - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - threshold, - participants, - ) - ) - checkTransaction(checks, ptp) shouldBe Either.unit - } - } - - } - - "validating SynchronizerTrustCertificate" should { - "reject a removal when the participant still hosts a party" in { - val (checks, store) = mk() - val ptp = factory.mkAdd( - PartyToParticipant.tryCreate( - party1, - PositiveInt.one, - Seq(participant1 -> Submission), - ) - ) - addToStore( - store, - ptp, - ) - val prior = factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - - val dtc = - factory.mkRemove(SynchronizerTrustCertificate(participant1, synchronizerId)) - checkTransaction(checks, dtc, Some(prior)) shouldBe Left( - TopologyTransactionRejection.ParticipantStillHostsParties(participant1, Seq(party1)) - ) - } - - "handle conflicts with existing party allocations" in { - val explicitAdminPartyParticipant1 = factory.mkAdd( - PartyToParticipant - .create( - partyId = participant1.adminParty, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(participant1, Submission)), - ) - .value - ) - - // we allocate a party with participant2's UID on participant1. - // this is not an explicit admin party allocation, the party just so happens to use the same UID as participant2. - val partyWithParticipant2Uid = factory.mkAdd( - PartyToParticipant - .create( - partyId = participant2.adminParty, - threshold = PositiveInt.one, - participants = Seq(HostingParticipant(participant1, Submission)), - ) - .value - ) - - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters.defaultValues(testedProtocolVersion), - ) - ) - - val (checks, store) = mk() - - // normally it's not possible to have a valid PTP without an already existing DTC of the hosting participants. - // but let's pretend for this check. - addToStore(store, dop, explicitAdminPartyParticipant1, partyWithParticipant2Uid) - - // happy case: we allow the DTC (either a creation or modifying an existing one) - // if there is a valid explicit admin party allocation - checkTransaction(checks, p1_dtc, None) shouldBe Either.unit - - // unhappy case: there already exists a normal party allocation with the same UID - checkTransaction(checks, p2_dtc, None) shouldBe Left( - TopologyTransactionRejection.ParticipantIdConflictWithPartyId( - participant2, - partyWithParticipant2Uid.mapping.partyId, - ) - ) - } - - "reject the addition if the synchronizer is locked" in { - Seq(OnboardingRestriction.RestrictedLocked, OnboardingRestriction.UnrestrictedLocked) - .foreach { restriction => - val (checks, store) = mk() - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .tryUpdate(onboardingRestriction = restriction), - ) - ) - addToStore(store, dop) - - val dtc = - factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - - checkTransaction(checks, dtc) shouldBe Left( - TopologyTransactionRejection.OnboardingRestrictionInPlace( - participant1, - restriction, - None, - ) - ) - } - } - - "reject the addition if the synchronizer is restricted" in { - val (checks, store) = mk() - val dop = factory.mkAdd( - SynchronizerParametersState( - synchronizerId, - DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .tryUpdate(onboardingRestriction = OnboardingRestriction.RestrictedOpen), - ) - ) - addToStore( - store, - dop, - factory.mkAdd( - ParticipantSynchronizerPermission( - synchronizerId, - participant1, - ParticipantPermission.Submission, - None, - None, - ) - ), - ) - - // participant2 does not have permission from the synchronizer to join - checkTransaction( - checks, - factory.mkAdd(SynchronizerTrustCertificate(participant2, synchronizerId)), - ) shouldBe Left( - TopologyTransactionRejection.OnboardingRestrictionInPlace( - participant2, - OnboardingRestriction.RestrictedOpen, - None, - ) - ) - - // participant1 has been permissioned by the synchronizer - checkTransaction( - checks, - factory.mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)), - None, - ) shouldBe Either.unit - } - - "reject a rejoining participant" in { - val (checks, store) = mk() - val dtcRemoval = factory.mkRemove( - SynchronizerTrustCertificate( - participant1, - synchronizerId, - ) - ) - addToStore( - store, - dtcRemoval, - ) - val rejoin = - factory.mkAdd( - SynchronizerTrustCertificate(participant1, synchronizerId), - serial = PositiveInt.two, - ) - - checkTransaction(checks, rejoin, Some(dtcRemoval)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(participant1)) - ) - } - - } - - "validating MediatorSynchronizerState" should { - def mkGroups( - serial: PositiveInt, - groupSetup: (NonNegativeInt, Seq[MediatorId])* - ): Seq[SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorSynchronizerState]] = - groupSetup.map { case (group, mediators) => - factory.mkAdd( - MediatorSynchronizerState - .create( - synchronizerId, - group, - PositiveInt.one, - active = mediators, - Seq.empty, - ) - .value, - // the signing key is not relevant for the test - signingKey = factory.SigningKeys.key1, - serial = serial, - ) - } - - "report no errors for valid mappings" in { - val (checks, store) = mk() - val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) - addToStore(store, transactions*) - - val Seq(mds1) = mkGroups(PositiveInt.one, (NonNegativeInt.zero -> Seq(med1))) - val Seq(mds2) = mkGroups(PositiveInt.two, (NonNegativeInt.zero -> Seq(med1, med2))) - - checkTransaction(checks, mds1) shouldBe Either.unit - checkTransaction(checks, mds2, Some(mds1)) shouldBe Either.unit - } - - "report MediatorsAlreadyAssignedToGroups for duplicate mediator assignments" in { - val (checks, store) = mk() - val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) - - val Seq(group0, group1, group2) = mkGroups( - PositiveInt.one, - NonNegativeInt.zero -> Seq(med1), - NonNegativeInt.one -> Seq(med2), - NonNegativeInt.two -> Seq(med1, med2, med3), - ) - - addToStore(store, (transactions :+ group0 :+ group1)*) - - checkTransaction(checks, group2, None) shouldBe Left( - TopologyTransactionRejection.MediatorsAlreadyInOtherGroups( - NonNegativeInt.two, - Map(med1 -> NonNegativeInt.zero, med2 -> NonNegativeInt.one), - ) - ) - } - - "report mediators defined both as active and observers" in { - val (Seq(med1, med2), _transactions) = generateMemberIdentities(2, MediatorId(_)) - - MediatorSynchronizerState - .create( - synchronizerId, - NonNegativeInt.zero, - PositiveInt.one, - active = Seq(med1, med2), - observers = Seq(med1), - ) shouldBe Left( - s"the following mediators were defined both as active and observer: $med1" - ) - } - - "report MembersCannotRejoinSynchronizer for mediators that are being re-onboarded" in { - val (checks, store) = mk() - val (Seq(med1, med2, med3), transactions) = generateMemberIdentities(3, MediatorId(_)) - - val Seq(group0, group1) = mkGroups( - PositiveInt.one, - NonNegativeInt.zero -> Seq(med1, med3), - NonNegativeInt.one -> Seq(med2, med3), - ) - - addToStore(store, (transactions :+ group0 :+ group1)*) - - val Seq(group0RemoveMed1, group1RemoveMed2) = mkGroups( - PositiveInt.two, - NonNegativeInt.zero -> Seq(med3), - NonNegativeInt.one -> Seq(med3), - ) - - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - group0.mapping.uniqueKey -> PositiveInt.one, - group1.mapping.uniqueKey -> PositiveInt.one, - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(group0RemoveMed1), - ValidatedTopologyTransaction(group1RemoveMed2), - ), - ) - .futureValueUS - - val Seq(med1RejoinsGroup0, med2RejoinsGroup0) = mkGroups( - PositiveInt.three, - // try joining the same group - NonNegativeInt.zero -> Seq(med1, med3), - // try joining another group - NonNegativeInt.zero -> Seq(med2, med3), - ) - - checkTransaction(checks, med1RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(med1)) - ) - - checkTransaction(checks, med2RejoinsGroup0, Some(group0RemoveMed1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(med2)) - ) - } - - "handle validation of proposal with a concurrent update of the store" in { - val (checks, store) = mk() - val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) - - val Seq(group0_add_med1) = mkGroups(PositiveInt.one, NonNegativeInt.zero -> Seq(med1)) - - addToStore(store, (transactions :+ group0_add_med1)*) - - val Seq(group0_add_med2) = mkGroups( - PositiveInt.two, - NonNegativeInt.zero -> Seq(med1, med2), - ) - - // let's pretend that group0_add_med2 was broadcast by other synchronizerOwners - // and became fully authorized (not necessarily effective yet though) and stored - // between determining the previous effective transaction (group0_add_med1) at the start of - // the processing of group0_add_med2 and validating group0_add_med2 - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - group0_add_med1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(group0_add_med2) - ), - ) - .futureValueUS - checkTransaction(checks, group0_add_med2, Some(group0_add_med1)) shouldBe Right(()) - } - } - - "validating SequencerSynchronizerState" should { - - def mkSDS( - serial: PositiveInt, - sequencers: SequencerId* - ): SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerSynchronizerState] = - factory.mkAdd( - SequencerSynchronizerState - .create( - synchronizerId, - PositiveInt.one, - active = sequencers, - Seq.empty, - ) - .value, - // the signing key is not relevant for the test - signingKey = factory.SigningKeys.key1, - serial = serial, - ) - - "report no errors for valid mappings" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - addToStore(store, transactions*) - - val sds1 = mkSDS(PositiveInt.one, seq1) - val sds2 = mkSDS(PositiveInt.two, seq1, seq2) - - checkTransaction(checks, sds1) shouldBe Either.unit - checkTransaction(checks, sds2, Some(sds1)) shouldBe Either.unit - } - "report sequencers defined both as active and observers" in { - val (Seq(seq1, seq2), _transactions) = generateMemberIdentities(2, SequencerId(_)) - - SequencerSynchronizerState - .create( - synchronizerId, - PositiveInt.one, - active = Seq(seq1, seq2), - observers = Seq(seq1), - ) shouldBe Left( - s"the following sequencers were defined both as active and observer: $seq1" - ) - } - - "report MembersCannotRejoinSynchronizer for sequencers that are being re-onboarded" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - - val sds_S1_S2 = mkSDS( - PositiveInt.one, - seq1, - seq2, - ) - - addToStore(store, (transactions :+ sds_S1_S2)*) - - val sds_S1 = mkSDS(PositiveInt.two, seq1) - - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - sds_S1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(sds_S1) - ), - ) - .futureValueUS - - val sds_S1_rejoining_S2 = mkSDS(PositiveInt.three, seq1, seq2) - - checkTransaction(checks, sds_S1_rejoining_S2, Some(sds_S1)) shouldBe Left( - TopologyTransactionRejection.MembersCannotRejoinSynchronizer(Seq(seq2)) - ) - } - - "handle validation of a proposal with a concurrent update in the store" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - - val sds_add_seq1 = mkSDS(PositiveInt.one, seq1) - - addToStore(store, (transactions :+ sds_add_seq1)*) - - val sds_add_seq2 = mkSDS(PositiveInt.two, seq1, seq2) - - // let's pretend that sds_add_seq2 was broadcast by other synchronizerOwners - // and became fully authorized (not necessarily effective yet though) and stored - // between determining the previous effective transaction (sds_add_seq1) at the start of - // the processing of sds_add_seq2 and validating sds_add_seq2. - store - .update( - SequencedTime(ts1), - EffectiveTime(ts1), - removeMapping = Map( - sds_add_seq1.mapping.uniqueKey -> PositiveInt.one - ), - removeTxs = Set.empty, - additions = Seq( - ValidatedTopologyTransaction(sds_add_seq2) - ), - ) - .futureValueUS - checkTransaction(checks, sds_add_seq2, Some(sds_add_seq1)) shouldBe Right(()) - } - - } - - "validating OwnerToKeyMapping" should { - "report no errors for valid mappings" in { - val (checks, _) = mk() - val okm_sequencer = factory.mkAddMultiKey( - OwnerToKeyMapping(sequencerId, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_mediator = factory.mkAddMultiKey( - OwnerToKeyMapping(mediatorId, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participant = factory.mkAddMultiKey( - OwnerToKeyMapping( - participant1, - NonEmpty(Seq, factory.EncryptionKeys.key1, factory.SigningKeys.key1), - ), - NonEmpty(Set, factory.SigningKeys.key1), - ) - - checkTransaction(checks, okm_sequencer) shouldBe Either.unit - checkTransaction(checks, okm_mediator) shouldBe Either.unit - checkTransaction(checks, okm_participant) shouldBe Either.unit - } - "reject minimum key violations" in { - val (checks, _) = mk() - val okm_sequencerNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping(sequencerId, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_mediatorNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping(mediatorId, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participantNoSigningKey = factory.mkAddMultiKey( - OwnerToKeyMapping(participant1, NonEmpty(Seq, factory.EncryptionKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - val okm_participantNoEncryptionKey = factory.mkAddMultiKey( - OwnerToKeyMapping(participant1, NonEmpty(Seq, factory.SigningKeys.key1)), - NonEmpty(Set, factory.SigningKeys.key1), - ) - - Seq(okm_sequencerNoSigningKey, okm_mediatorNoSigningKey, okm_participantNoSigningKey) - .foreach(tx => - checkTransaction(checks, tx) shouldBe Left( - InvalidTopologyMapping( - "OwnerToKeyMapping must contain at least 1 signing key." - ) - ) - ) - checkTransaction(checks, okm_participantNoEncryptionKey) shouldBe Left( - InvalidTopologyMapping( - "OwnerToKeyMapping for participants must contain at least 1 encryption key." - ) - ) - } - } - } - - private def generateMemberIdentities[M <: Member]( - numMembers: Int, - uidToMember: UniqueIdentifier => M, - ): (Seq[M], Seq[GenericSignedTopologyTransaction]) = { - val allKeys = { - import factory.SigningKeys.* - Seq(key1, key2, key3, key4, key5, key6) - } - val (memberIds, identityTransactions) = (1 to numMembers).map { idx => - val key = allKeys(idx) - val member = - uidToMember(UniqueIdentifier.tryCreate(s"member$idx", Namespace(key.fingerprint))) - member -> List( - factory.mkAdd( - NamespaceDelegation.tryCreate(member.namespace, key, CanSignAllMappings), - key, - ), - factory.mkAdd(OwnerToKeyMapping(member, NonEmpty(Seq, key)), key), - ) - }.unzip - - memberIds -> identityTransactions.flatten - } - - private def addToStore( - store: TopologyStore[SynchronizerStore], - transactions: GenericSignedTopologyTransaction* - ): Unit = - store - .update( - sequenced = SequencedTime.MinValue, - effective = EffectiveTime.MinValue, - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = transactions.map(ValidatedTopologyTransaction(_)), - ) - .futureValueUS - - private def setUpRootCerts(keys: SigningPublicKey*): ( - NonEmpty[Seq[SigningPublicKey]], - Seq[Namespace], - Seq[SignedTopologyTransaction[Replace, NamespaceDelegation]], - ) = { - val (namespaces, rootCerts) = - keys.map { key => - val namespace = Namespace(key.fingerprint) - namespace -> factory.mkAdd( - NamespaceDelegation.tryCreate( - namespace, - key, - CanSignAllMappings, - ), - signingKey = key, - ) - }.unzip - val keysNE = NonEmpty.from(keys).value - (keysNE, namespaces, rootCerts) - } - - private def seconds(s: Int) = PositiveSeconds.tryCreate(time.Duration.ofSeconds(s.toLong)) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/GrpcTelemetryContextPropagationTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/GrpcTelemetryContextPropagationTest.scala deleted file mode 100644 index af0fa0cc27..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/GrpcTelemetryContextPropagationTest.scala +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.tracing - -import com.digitalasset.canton.networking.grpc.CantonGrpcUtilTest -import com.digitalasset.canton.protobuf.Hello -import com.digitalasset.canton.protobuf.HelloServiceGrpc.HelloService -import com.digitalasset.canton.tracing.TestTelemetry.eventsOrderedByTime -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import io.grpc.stub.StreamObserver -import io.opentelemetry.api.trace.Tracer -import org.scalatest.Outcome -import org.scalatest.concurrent.Eventually -import org.scalatest.wordspec.FixtureAnyWordSpec - -import scala.annotation.nowarn -import scala.concurrent.Future - -@nowarn("msg=match may not be exhaustive") -class GrpcTelemetryContextPropagationTest - extends FixtureAnyWordSpec - with BaseTest - with HasExecutionContext - with Eventually { - val request: Hello.Request = CantonGrpcUtilTest.request - val response: Hello.Response = CantonGrpcUtilTest.response - - class MyHelloService(implicit tracer: Tracer) extends HelloService with Spanning { - override def hello(request: Hello.Request): Future[Hello.Response] = - withSpanFromGrpcContext("MyHelloService.hello") { _ => span => - Future.successful { - span.addEvent("ran MyHelloService.hello") - response - } - } - override def helloStreamed( - request: Hello.Request, - responseObserver: StreamObserver[Hello.Response], - ): Unit = () - } - - class Outer()(implicit tracer: Tracer) extends Spanning { - def foo[A](f: TraceContext => Future[A]): Future[A] = - withNewTrace("Outer.foo") { implicit traceContext => span => - span.addEvent("running Outer.foo") - f(traceContext).map { r => - span.addEvent("finished Outer.foo") - r - } - } - } - - override type FixtureParam = Env - - class Env { - val telemetry = new TestTelemetrySetup() - implicit val tracer: Tracer = telemetry.tracer - val grpc = new CantonGrpcUtilTest.Env(new MyHelloService(), logger)(parallelExecutionContext) - - def close(): Unit = { - grpc.close() - telemetry.close() - } - } - - override def withFixture(test: OneArgTest): Outcome = { - val env = new Env() - try { - withFixture(test.toNoArgTest(env)) - } finally { - env.close() - } - } - - "The telemetry context" should { - "be propagated from GRPC client to server" in { env => - import env.* - implicit val tracer: Tracer = telemetry.tracer - - val sut = new Outer() - grpc.server.start() - - sut.foo(grpc.sendRequest()(_, implicitly).value).futureValue shouldBe Right(response) - - eventually(telemetry.reportedSpans() should have size (2)) - - val List(rootSpan, childSpan) = telemetry.reportedSpans() - childSpan.getParentSpanId shouldBe rootSpan.getSpanId - childSpan.getTraceId shouldBe rootSpan.getTraceId - rootSpan.getName shouldBe "Outer.foo" - childSpan.getName shouldBe "MyHelloService.hello" - - eventsOrderedByTime(rootSpan, childSpan).map(_.getName) should contain.inOrderOnly( - "running Outer.foo", - "ran MyHelloService.hello", - "finished Outer.foo", - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SerializableTraceContextTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SerializableTraceContextTest.scala deleted file mode 100644 index 41b9e51ccf..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SerializableTraceContextTest.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.tracing - -import com.digitalasset.canton.BaseTestWordSpec -import org.scalatest.BeforeAndAfterEach - -class SerializableTraceContextTest extends BaseTestWordSpec with BeforeAndAfterEach { - - var testTelemetrySetup: TestTelemetrySetup = _ - - override def beforeEach(): Unit = - testTelemetrySetup = new TestTelemetrySetup() - - override def afterEach(): Unit = - testTelemetrySetup.close() - - "SerializableTraceContext" can { - // If the trace has no fields set and `tc.toProtoV0.toByteArray` is used, the trace context will serialize to an - // empty byte array. This is problematic as some databases will treat an empty byte array as null for their blob - // columns but our table definitions typically expect non-null for the trace context column value. - // This is not an issue when serializing a `VersionedTraceContext` but to not regress we have this unit test. - "won't be serialized to an empty ByteArray" in { - val res = SerializableTraceContext.empty.toByteArray(testedProtocolVersion) - val empty = new Array[Byte](0) - res should not be empty - } - - "serialization roundtrip preserves equality" in { - val rootSpan = testTelemetrySetup.tracer.spanBuilder("test").startSpan() - val childSpan = testTelemetrySetup.tracer - .spanBuilder("equality") - .setParent(TraceContext.empty.context.`with`(rootSpan)) - .startSpan() - - val emptyContext = TraceContext.empty - val contextWithRootSpan = TraceContext(emptyContext.context.`with`(rootSpan)) - val contextWithChildSpan = TraceContext(emptyContext.context.`with`(childSpan)) - - val testCases = Seq(emptyContext, contextWithRootSpan, contextWithChildSpan) - forEvery(testCases) { context => - SerializableTraceContext - .fromProtoV30(SerializableTraceContext(context).toProtoV30) shouldBe - Right(SerializableTraceContext(context)) - SerializableTraceContext.fromProtoVersioned( - SerializableTraceContext(context).toProtoVersioned(testedProtocolVersion) - ) shouldBe Right(SerializableTraceContext(context)) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SpanningTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SpanningTest.scala deleted file mode 100644 index 8a20977a78..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/SpanningTest.scala +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.tracing - -import cats.data.{EitherT, OptionT} -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.{DirectExecutionContext, Threading} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.sequencing.HandlerResult -import com.digitalasset.canton.tracing.TestTelemetry.eventsOrderedByTime -import com.digitalasset.canton.util.CheckedT -import io.opentelemetry.api.common.AttributeKey.stringKey -import io.opentelemetry.api.trace.{StatusCode, Tracer} -import org.scalatest.BeforeAndAfterEach -import org.scalatest.wordspec.AnyWordSpec - -import scala.annotation.nowarn -import scala.concurrent.{Future, Promise} - -@SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var")) -@nowarn("msg=match may not be exhaustive") -class SpanningTest extends AnyWordSpec with BaseTest with BeforeAndAfterEach { - private val exception = new RuntimeException("exception thrown") - - var testTelemetrySetup: TestTelemetrySetup = _ - - override def beforeEach(): Unit = - testTelemetrySetup = new TestTelemetrySetup() - - override def afterEach(): Unit = - testTelemetrySetup.close() - - private class Inner(implicit tracer: Tracer) extends Spanning { - def foo()(implicit traceContext: TraceContext): Unit = withSpan("Inner.foo") { _ => span => - span.addEvent("running Inner.foo") - span.addEvent("finished Inner.foo") - } - } - - private class Outer(inner: Inner)(implicit tracer: Tracer) extends Spanning { - def foo(): Unit = withNewTrace("Outer.foo") { implicit traceContext => span => - span.addEvent("running Outer.foo") - inner.foo() - span.addEvent("finished Outer.foo") - } - - def nestedFuture( - inner: Future[Unit] - ): EitherT[OptionT[CheckedT[FutureUnlessShutdown, Int, Int, *], *], String, HandlerResult] = { - implicit val directExecutionContext = DirectExecutionContext(noTracingLogger) - withNewTrace("Outer.nestedFuture") { _ => span => - span.addEvent("running Outer.foo") - EitherT.pure( - HandlerResult.asynchronous(FutureUnlessShutdown.outcomeF(inner)) - ) - } - } - - @nowarn("cat=unused") - def exceptionally()(implicit traceContext: TraceContext): Unit = withSpan("exceptionally") { - implicit traceContext => _ => - throw exception - } - } - - "objects with span reporting" should { - "report root and child spans with events" in { - implicit val tracer: Tracer = testTelemetrySetup.tracer - val sut = new Outer(new Inner()) - - sut.foo() - - val List(rootSpan, childSpan) = testTelemetrySetup.reportedSpans() - childSpan.getParentSpanId shouldBe rootSpan.getSpanId - rootSpan.getName shouldBe "Outer.foo" - childSpan.getName shouldBe "Inner.foo" - rootSpan.getTraceId shouldBe childSpan.getTraceId - eventsOrderedByTime(rootSpan, childSpan).map(_.getName) should contain.inOrderOnly( - "running Outer.foo", - "running Inner.foo", - "finished Inner.foo", - "finished Outer.foo", - ) - } - - "report exception and re-throw it" in { - implicit val tracer: Tracer = testTelemetrySetup.tracer - val sut = new Outer(new Inner()) - - a[RuntimeException] should be thrownBy sut.exceptionally() - - val List(span) = testTelemetrySetup.reportedSpans() - - span.getStatus.getStatusCode shouldBe StatusCode.ERROR - span.getName shouldBe "exceptionally" - - val List(exceptionEvent) = eventsOrderedByTime(span) - exceptionEvent.getName shouldBe "exception" - exceptionEvent.getAttributes.get(stringKey("exception.message")) shouldBe exception.getMessage - exceptionEvent.getAttributes.get( - stringKey("exception.type") - ) shouldBe exception.getClass.getName - } - - "close the span only after the inner future has completed" in { - implicit val tracer: Tracer = testTelemetrySetup.tracer - val sut = new Outer(new Inner()) - - val promise = Promise[Unit]() - - val start = System.nanoTime() - val innerFuture = sut - .nestedFuture(promise.future) - .value - .value - .value - .unwrap - .futureValue - .onShutdown(fail()) - .getResult - .value - .value - .value - .unwrap - .futureValue - .onShutdown(fail()) - .unwrap - .unwrap - val end = System.nanoTime() - val outerCallLength = end - start - - Threading.sleep(outerCallLength / 1000000 + 1) - - // Only now complete the inner future - promise.success(()) - innerFuture.futureValue - - val List(span) = testTelemetrySetup.reportedSpans() - - val spanLength = span.getEndEpochNanos - span.getStartEpochNanos - spanLength shouldBe >(outerCallLength) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/TraceContextTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/TraceContextTest.scala deleted file mode 100644 index 9e44a7e242..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/tracing/TraceContextTest.scala +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.tracing - -import io.opentelemetry.api.trace.Span -import io.opentelemetry.context.Context -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec -import org.scalatest.{Assertion, BeforeAndAfterEach, OptionValues} - -import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream} - -class TraceContextTest extends AnyWordSpec with Matchers with OptionValues with BeforeAndAfterEach { - - var testTelemetrySetup: TestTelemetrySetup = _ - - override def beforeEach(): Unit = - testTelemetrySetup = new TestTelemetrySetup() - - override def afterEach(): Unit = - testTelemetrySetup.close() - - private def spansAreEqual(span1: Span, span2: Span): Assertion = { - val ctx1 = span1.getSpanContext - val ctx2 = span2.getSpanContext - assert(ctx1.getTraceId == ctx2.getTraceId && ctx1.getSpanId == ctx2.getSpanId) - } - - "TelemetryContext" can { - "be serialized by Java serialization" in { - val rootSpan = testTelemetrySetup.tracer.spanBuilder("test").startSpan() - val childSpan = testTelemetrySetup.tracer - .spanBuilder("test") - .setParent(TraceContext.empty.context.`with`(rootSpan)) - .startSpan() - - val emptyContext = TraceContext.empty - val contextWithRootSpan = TraceContext(emptyContext.context.`with`(rootSpan)) - val contextWithChildSpan = TraceContext(emptyContext.context.`with`(childSpan)) - - val byteOutputStream = new ByteArrayOutputStream() - val outputStream = new ObjectOutputStream(byteOutputStream) - outputStream.writeObject(emptyContext) - outputStream.writeObject(contextWithRootSpan) - outputStream.writeObject(contextWithChildSpan) - outputStream.flush() - outputStream.close() - val bytes = byteOutputStream.toByteArray - - val inputStream = new ObjectInputStream(new ByteArrayInputStream(bytes)) - val ctx1 = inputStream.readObject().asInstanceOf[TraceContext] - val ctx2 = inputStream.readObject().asInstanceOf[TraceContext] - val ctx3 = inputStream.readObject().asInstanceOf[TraceContext] - inputStream.close() - - Span.fromContextOrNull(ctx1.context) shouldBe null - spansAreEqual(Span.fromContextOrNull(ctx2.context), rootSpan) - spansAreEqual(Span.fromContextOrNull(ctx3.context), childSpan) - } - - "convert back and forth from W3C trace context" in { - val rootSpan = testTelemetrySetup.tracer.spanBuilder("test").startSpan() - val childSpan = testTelemetrySetup.tracer - .spanBuilder("test") - .setParent(Context.root().`with`(rootSpan)) - .startSpan() - - val emptyContext = TraceContext.empty - val contextWithRootSpan = TraceContext(Context.root().`with`(rootSpan)) - val contextWithChildSpan = TraceContext(Context.root().`with`(childSpan)) - - emptyContext.asW3CTraceContext shouldBe None - Span.fromContextOrNull(W3CTraceContext("").toTraceContext.context) shouldBe null - - val rootW3c = contextWithRootSpan.asW3CTraceContext.value - spansAreEqual(Span.fromContextOrNull(rootW3c.toTraceContext.context), rootSpan) - - val childW3c = contextWithChildSpan.asW3CTraceContext.value - spansAreEqual(Span.fromContextOrNull(childW3c.toTraceContext.context), childSpan) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/EventCostCalculatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/EventCostCalculatorTest.scala deleted file mode 100644 index 2502cf3a3f..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/EventCostCalculatorTest.scala +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.traffic - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} -import com.digitalasset.canton.sequencing.protocol.{ - AllMembersOfSynchronizer, - Batch, - ClosedEnvelope, - Recipients, -} -import com.digitalasset.canton.sequencing.traffic.EventCostCalculator -import com.digitalasset.canton.sequencing.traffic.EventCostCalculator.{ - EnvelopeCostDetails, - EventCostDetails, -} -import com.digitalasset.canton.topology.{DefaultTestIdentities, Member} -import com.digitalasset.canton.{BaseTest, ProtocolVersionChecksAnyWordSpec} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class EventCostCalculatorTest - extends AnyWordSpec - with BaseTest - with ProtocolVersionChecksAnyWordSpec { - private val recipient1 = DefaultTestIdentities.participant1 - private val recipient2 = DefaultTestIdentities.participant2 - - "calculate cost correctly" in { - val recipients = Recipients.cc(recipient1, recipient2) - new EventCostCalculator(loggerFactory).computeEnvelopeCost( - PositiveInt.tryCreate(5000), - Map.empty, - )( - ClosedEnvelope.create( - ByteString.copyFrom(Array.fill(5)(1.toByte)), - recipients, - Seq.empty, - testedProtocolVersion, - ) - ) shouldBe EnvelopeCostDetails( - writeCost = 5L, - readCost = 5L, // 5 * 2 * 5000 / 10000 - finalCost = 10L, - recipients = recipients.allRecipients.toSeq, - ) - } - - "use resolved group recipients" in { - val recipients = Recipients.cc(AllMembersOfSynchronizer) - new EventCostCalculator(loggerFactory).computeEnvelopeCost( - PositiveInt.tryCreate(5000), - Map(AllMembersOfSynchronizer -> Set(recipient1, recipient2)), - )( - ClosedEnvelope.create( - ByteString.copyFrom(Array.fill(5)(1.toByte)), - recipients, - Seq.empty, - testedProtocolVersion, - ) - ) shouldBe EnvelopeCostDetails( - writeCost = 5L, - readCost = 5L, // 5 * 2 * 5000 / 10000 - finalCost = 10L, - recipients.allRecipients.toSeq, - ) - } - - "cost computation does not overflow an int" in { - // Trying to reproduce case seen on CN devnet: - // ~ 500 recipients, cost multiplier 200, estimated payload 25000 - // This overflows an Int computation (-154496 instead of 275000) - - val recipients = Recipients.cc(AllMembersOfSynchronizer) - val manyRecipients = List.fill(500)(mock[Member]).toSet - new EventCostCalculator(loggerFactory).computeEnvelopeCost( - PositiveInt.tryCreate(200), - Map(AllMembersOfSynchronizer -> manyRecipients), - )( - ClosedEnvelope.create( - ByteString.copyFrom(Array.fill(25000)(1.toByte)), - recipients, - Seq.empty, - testedProtocolVersion, - ) - ) shouldBe EnvelopeCostDetails( - writeCost = 25000L, - readCost = 250000L, // 25000 * 500 * 200 / 10000 - finalCost = 275000L, - recipients.allRecipients.toSeq, - ) - } - - "detect cost computation overflow" in { - val manyRecipients = List.fill(1_000)(mock[Member]).toSet - - val exception = intercept[IllegalStateException]( - new EventCostCalculator(loggerFactory).computeEnvelopeCost( - PositiveInt.tryCreate(1_000_000_000), - Map(AllMembersOfSynchronizer -> manyRecipients), - )( - ClosedEnvelope.create( - ByteString.copyFrom(Array.fill(10_000_000)(1.toByte)), - Recipients.cc(AllMembersOfSynchronizer), - Seq.empty, - testedProtocolVersion, - ) - ) - ) - - exception.getMessage should include("Overflow in cost computation") - } - - "respect minimum event cost" in { - val recipients = Recipients.cc(recipient1, recipient2) - val expectedEnvelopeCost = EnvelopeCostDetails( - writeCost = 5L, - readCost = 5L, // 5 * 2 * 5000 / 10000 - finalCost = 10L, - recipients = recipients.allRecipients.toSeq, - ) - val baseCost = NonNegativeLong.tryCreate(350) - new EventCostCalculator(loggerFactory).computeEventCost( - Batch.fromClosed( - testedProtocolVersion, - ClosedEnvelope.create( - ByteString.copyFrom(Array.fill(5)(1.toByte)), - recipients, - Seq.empty, - testedProtocolVersion, - ), - ), - PositiveInt.tryCreate(5000), - Map.empty, - testedProtocolVersion, - baseEventCost = baseCost, - ) shouldBe EventCostDetails( - costMultiplier = PositiveInt.tryCreate(5000), - groupToMembersSize = Map.empty, - envelopes = List(expectedEnvelopeCost), - eventCost = NonNegativeLong.tryCreate(expectedEnvelopeCost.finalCost + baseCost.value), - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficControlProcessorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficControlProcessorTest.scala deleted file mode 100644 index 1f7bd3a1ad..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficControlProcessorTest.scala +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.traffic - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} -import com.digitalasset.canton.crypto.Signature -import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.protocol.messages.{ - DefaultOpenEnvelope, - SetTrafficPurchasedMessage, - SignedProtocolMessage, - TopologyTransactionsBroadcast, -} -import com.digitalasset.canton.sequencing.WithCounter -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.TrafficControlErrors.InvalidTrafficPurchasedMessage -import com.digitalasset.canton.sequencing.traffic.TrafficControlProcessor.TrafficControlSubscriber -import com.digitalasset.canton.sequencing.traffic.{TrafficControlProcessor, TrafficReceipt} -import com.digitalasset.canton.topology.processing.TopologyTransactionTestFactory -import com.digitalasset.canton.topology.{DefaultTestIdentities, TestingTopology} -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.{BaseTest, HasExecutionContext, SequencerCounter} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.UUID -import java.util.concurrent.atomic.AtomicReference -import scala.collection.mutable - -class TrafficControlProcessorTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - private val synchronizerId = DefaultTestIdentities.physicalSynchronizerId - private val participantId = DefaultTestIdentities.participant1 - - private val ts1 = CantonTimestamp.ofEpochSecond(1) - private val ts2 = CantonTimestamp.ofEpochSecond(2) - private val ts3 = CantonTimestamp.ofEpochSecond(3) - private val sc1 = SequencerCounter(1) - private val sc2 = SequencerCounter(2) - private val sc3 = SequencerCounter(3) - - private val synchronizerCrypto = TestingTopology(synchronizerParameters = List.empty) - .build(loggerFactory) - .forOwnerAndSynchronizer(DefaultTestIdentities.sequencerId, synchronizerId) - - private val dummySignature = SymbolicCrypto.emptySignature - - private val factory = - new TopologyTransactionTestFactory(loggerFactory, initEc = parallelExecutionContext) - - private lazy val topoTx: TopologyTransactionsBroadcast = TopologyTransactionsBroadcast( - synchronizerId, - List(factory.ns1k1_k1), - ) - - private def mkSetTrafficPurchased( - signatureO: Option[Signature] = None - ): SignedProtocolMessage[SetTrafficPurchasedMessage] = { - val setTrafficPurchased = SetTrafficPurchasedMessage( - participantId, - PositiveInt.one, - NonNegativeLong.tryCreate(100), - synchronizerId, - ) - - signatureO match { - case Some(signature) => - SignedProtocolMessage.from( - setTrafficPurchased, - testedProtocolVersion, - signature, - ) - - case None => - SignedProtocolMessage - .trySignAndCreate( - setTrafficPurchased, - synchronizerCrypto.currentSnapshotApproximation, - testedProtocolVersion, - ) - .failOnShutdown - .futureValue - } - } - - private def mkTrafficProcessor(): ( - TrafficControlProcessor, - AtomicReference[mutable.Builder[CantonTimestamp, Seq[CantonTimestamp]]], - AtomicReference[ - mutable.Builder[SetTrafficPurchasedMessage, Seq[SetTrafficPurchasedMessage]] - ], - ) = { - val tcp = new TrafficControlProcessor( - synchronizerCrypto, - synchronizerId, - Option.empty[CantonTimestamp], - loggerFactory, - ) - val observedTs = new AtomicReference(Seq.newBuilder[CantonTimestamp]) - val updates = new AtomicReference(Seq.newBuilder[SetTrafficPurchasedMessage]) - - tcp.subscribe(new TrafficControlSubscriber { - override def observedTimestamp(timestamp: CantonTimestamp)(implicit - traceContext: TraceContext - ): Unit = observedTs.updateAndGet(_ += timestamp) - - override def trafficPurchasedUpdate( - update: SetTrafficPurchasedMessage, - sequencingTimestamp: CantonTimestamp, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] = FutureUnlessShutdown.pure(updates.updateAndGet(_ += update)) - }) - - (tcp, observedTs, updates) - } - - private def mkDeliver( - ts: CantonTimestamp, - batch: Batch[DefaultOpenEnvelope], - ): Deliver[DefaultOpenEnvelope] = - Deliver.create( - None, - ts, - synchronizerId, - None, - batch, - None, - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - - private def mkDeliverError( - ts: CantonTimestamp - ): DeliverError = - DeliverError.create( - None, - ts, - synchronizerId, - MessageId.fromUuid(new UUID(0, 1)), - SequencerErrors.SubmissionRequestRefused("Some error"), - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - - "the traffic control processor" should { - "notify subscribers of all event timestamps" in { - val batch = Batch.of(testedProtocolVersion, topoTx -> Recipients.cc(participantId)) - val events = Traced( - Seq( - sc1 -> mkDeliver(ts1, batch), - sc2 -> mkDeliverError(ts2), - sc3 -> mkDeliver(ts3, batch), - ).map { case (counter, e) => WithCounter(counter, Traced(e)) } - ) - - val (tcp, observedTs, updates) = mkTrafficProcessor() - - tcp(events).futureValueUS.unwrap.futureValueUS - - observedTs.get().result() shouldBe Seq(ts1, ts2, ts3) - updates.get().result() shouldBe Seq.empty - } - - "notify subscribers of updates" in { - val update = mkSetTrafficPurchased() - val batch = - Batch.of(testedProtocolVersion, update -> Recipients.cc(SequencersOfSynchronizer)) - - val (tcp, observedTs, updates) = mkTrafficProcessor() - - tcp.processSetTrafficPurchasedEnvelopes(ts1, None, batch.envelopes).futureValueUS - - observedTs.get().result() shouldBe Seq.empty - updates.get().result() shouldBe Seq(update.message) - } - - "drop updates that do not target all sequencers" in { - val update = mkSetTrafficPurchased() - val batch = - Batch.of(testedProtocolVersion, update -> Recipients.cc(participantId)) - - val (tcp, observedTs, updates) = mkTrafficProcessor() - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - tcp.processSetTrafficPurchasedEnvelopes(ts1, None, batch.envelopes).futureValueUS, - LogEntry.assertLogSeq( - Seq( - ( - _.shouldBeCantonError( - InvalidTrafficPurchasedMessage, - _ should include("should be addressed to all the sequencers of a synchronizer"), - ), - "invalid recipients", - ) - ) - ), - ) - - observedTs.get().result() shouldBe Seq(ts1) - updates.get().result() shouldBe Seq.empty - } - - "drop updates with invalid signatures" in { - val update = mkSetTrafficPurchased(Some(dummySignature)) - val batch = - Batch.of(testedProtocolVersion, update -> Recipients.cc(SequencersOfSynchronizer)) - - val (tcp, observedTs, updates) = mkTrafficProcessor() - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - tcp.processSetTrafficPurchasedEnvelopes(ts1, None, batch.envelopes).futureValueUS, - LogEntry.assertLogSeq( - Seq( - ( - _.shouldBeCantonError( - InvalidTrafficPurchasedMessage, - _ should (include( - "signature threshold not reached" - ) and include regex raw"Key \S+ used to generate signature is not a valid key for SequencerGroup"), - ), - "invalid signatures", - ) - ) - ), - ) - - observedTs.get().result() shouldBe Seq(ts1) - updates.get().result() shouldBe Seq.empty - } - - "drop updates with invalid timestamp of signing key" in { - val update = mkSetTrafficPurchased() - val batch = - Batch.of(testedProtocolVersion, update -> Recipients.cc(SequencersOfSynchronizer)) - - val (tcp, observedTs, updates) = mkTrafficProcessor() - - loggerFactory.assertLoggedWarningsAndErrorsSeq( - tcp.processSetTrafficPurchasedEnvelopes(ts1, Some(ts2), batch.envelopes).futureValueUS, - LogEntry.assertLogSeq( - Seq( - ( - _.shouldBeCantonError( - InvalidTrafficPurchasedMessage, - _ should include( - s"the timestamp of the topology (Some($ts2)) is not set to the event timestamp ($ts1)" - ), - ), - "invalid timestamp of signing key", - ) - ) - ), - ) - - observedTs.get().result() shouldBe Seq(ts1) - updates.get().result() shouldBe Seq.empty - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala deleted file mode 100644 index c6958faf1a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.traffic - -import cats.data.EitherT -import cats.syntax.either.* -import com.daml.metrics.api.MetricsContext -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.UnlessShutdown -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} -import com.digitalasset.canton.protocol.messages.{ - DefaultOpenEnvelope, - SetTrafficPurchasedMessage, - SignedProtocolMessage, -} -import com.digitalasset.canton.protocol.{DynamicSynchronizerParameters, SynchronizerParameters} -import com.digitalasset.canton.sequencing.TrafficControlParameters -import com.digitalasset.canton.sequencing.client.{ - SendAsyncClientError, - SendCallback, - SendResult, - SequencerClientSend, -} -import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.{ - TrafficControlErrors, - TrafficPurchasedSubmissionHandler, - TrafficReceipt, -} -import com.digitalasset.canton.time.{SimClock, SynchronizerTimeTracker} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionChecksAnyWordSpec} -import com.google.rpc.status.Status -import org.mockito.ArgumentCaptor -import org.mockito.Mockito.clearInvocations -import org.scalatest.BeforeAndAfterEach -import org.scalatest.wordspec.AnyWordSpec -import org.slf4j.event.Level - -import java.time.{LocalDateTime, ZoneOffset} -import scala.jdk.CollectionConverters.CollectionHasAsScala -import scala.util.Try - -class TrafficPurchasedSubmissionHandlerTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with BeforeAndAfterEach - with ProtocolVersionChecksAnyWordSpec { - - private val recipient1 = DefaultTestIdentities.participant1.member - private val sequencerClient = mock[SequencerClientSend] - private val synchronizerTimeTracker = mock[SynchronizerTimeTracker] - private val synchronizerId = SynchronizerId.tryFromString("da::default").toPhysical - private val clock = new SimClock(loggerFactory = loggerFactory) - private val trafficParams = TrafficControlParameters() - private val handler = new TrafficPurchasedSubmissionHandler(clock, loggerFactory) - val crypto = TestingTopology( - synchronizerParameters = List( - SynchronizerParameters.WithValidity( - validFrom = CantonTimestamp.Epoch.minusSeconds(1), - validUntil = None, - parameter = DynamicSynchronizerParameters - .defaultValues(testedProtocolVersion) - .tryUpdate(trafficControlParameters = Some(trafficParams)), - ) - ) - ).build(loggerFactory) - .forOwnerAndSynchronizer(DefaultTestIdentities.sequencerId, synchronizerId) - - override def beforeEach(): Unit = { - super.beforeEach() - clock.reset() - } - - "send a well formed top up message" in { - val maxSequencingTimeCapture: ArgumentCaptor[CantonTimestamp] = - ArgumentCaptor.forClass(classOf[CantonTimestamp]) - val batchCapture: ArgumentCaptor[Batch[DefaultOpenEnvelope]] = - ArgumentCaptor.forClass(classOf[Batch[DefaultOpenEnvelope]]) - val aggregationRuleCapture = ArgumentCaptor.forClass(classOf[Option[AggregationRule]]) - val callbackCapture: ArgumentCaptor[SendCallback] = - ArgumentCaptor.forClass(classOf[SendCallback]) - when( - sequencerClient.sendAsync( - batchCapture.capture(), - any[Option[CantonTimestamp]], - maxSequencingTimeCapture.capture(), - any[MessageId], - aggregationRuleCapture.capture(), - callbackCapture.capture(), - any[Boolean], - )(any[TraceContext], any[MetricsContext]) - ).thenReturn(EitherT.pure(())) - - val resultF = handler - .sendTrafficPurchasedRequest( - recipient1, - PositiveInt.tryCreate(5), - NonNegativeLong.tryCreate(1000), - sequencerClient, - synchronizerTimeTracker, - crypto, - ) - .value - - eventually() { - Try(callbackCapture.getValue).isSuccess shouldBe true - } - callbackCapture.getValue.asInstanceOf[SendCallback.CallbackFuture]( - UnlessShutdown.Outcome(SendResult.Success(mock[Deliver[Envelope[_]]])) - ) - maxSequencingTimeCapture.getValue shouldBe clock.now.plusSeconds( - trafficParams.setBalanceRequestSubmissionWindowSize.duration.toSeconds - ) - - resultF.failOnShutdown.futureValue shouldBe Either.unit - - val batch = batchCapture.getValue - batch.envelopes.head.recipients shouldBe Recipients( - NonEmpty.mk( - Seq, - RecipientsTree.ofMembers( - NonEmpty.mk(Set, recipient1), // Root of recipient tree: recipient of the top up - Seq( - RecipientsTree.recipientsLeaf( // Leaf of the tree: sequencers of synchronizer group - NonEmpty.mk( - Set, - SequencersOfSynchronizer: Recipient, - ) - ) - ), - ), - ) - ) - batch.envelopes.foreach { envelope => - envelope.protocolMessage shouldBe a[SignedProtocolMessage[_]] - val topUpMessage = envelope.protocolMessage - .asInstanceOf[SignedProtocolMessage[SetTrafficPurchasedMessage]] - .message - topUpMessage.synchronizerId shouldBe synchronizerId - topUpMessage.serial.value shouldBe 5 - topUpMessage.member shouldBe recipient1 - topUpMessage.totalTrafficPurchased.value shouldBe 1000 - } - } - - "send 2 messages if close to the end of the max sequencing time window" in { - val callbackCapture: ArgumentCaptor[SendCallback] = - ArgumentCaptor.forClass(classOf[SendCallback]) - val maxSequencingTimeCapture: ArgumentCaptor[CantonTimestamp] = - ArgumentCaptor.forClass(classOf[CantonTimestamp]) - - val minutesBucketEnd = - (8 * trafficParams.setBalanceRequestSubmissionWindowSize.duration.toMinutes).toInt - // 01/01/2024 15:31:00 - val currentSimTime = LocalDateTime.of(2024, 1, 1, 15, minutesBucketEnd - 1, 0) - val newTime = CantonTimestamp.ofEpochMilli( - currentSimTime.toInstant(ZoneOffset.UTC).toEpochMilli - ) - // Advance the clock to 15:minutesBucketEnd - 1 - within one minute of the next time bucket (every setBalanceRequestSubmissionWindowSize minutes) - clock.advanceTo(newTime) - - when( - sequencerClient.sendAsync( - any[Batch[DefaultOpenEnvelope]], - any[Option[CantonTimestamp]], - maxSequencingTimeCapture.capture(), - any[MessageId], - any[Option[AggregationRule]], - callbackCapture.capture(), - any[Boolean], - )(any[TraceContext], any[MetricsContext]) - ).thenReturn(EitherT.pure(())) - - val resultF = handler - .sendTrafficPurchasedRequest( - recipient1, - PositiveInt.tryCreate(5), - NonNegativeLong.tryCreate(1000), - sequencerClient, - synchronizerTimeTracker, - crypto, - ) - .value - - eventually() { - Try(callbackCapture.getAllValues).isSuccess shouldBe true - Try(maxSequencingTimeCapture.getAllValues).isSuccess shouldBe true - callbackCapture.getAllValues.size() shouldBe 2 - maxSequencingTimeCapture.getAllValues.size() shouldBe 2 - } - callbackCapture.getAllValues.asScala.foreach { - _.asInstanceOf[SendCallback.CallbackFuture]( - UnlessShutdown.Outcome(SendResult.Success(mock[Deliver[Envelope[_]]])) - ) - } - - def mkTimeBucketUpperBound(minutes: Int) = CantonTimestamp.ofEpochMilli( - currentSimTime - .withMinute(minutes) - .toInstant(ZoneOffset.UTC) - .toEpochMilli - ) - - maxSequencingTimeCapture.getAllValues.asScala should contain theSameElementsAs List( - mkTimeBucketUpperBound(minutesBucketEnd), - mkTimeBucketUpperBound( - minutesBucketEnd + trafficParams.setBalanceRequestSubmissionWindowSize.duration.toMinutes.toInt - ), - ) - - resultF.failOnShutdown.futureValue shouldBe Either.unit - } - - "catch sequencer client failures" in { - when( - sequencerClient.sendAsync( - any[Batch[DefaultOpenEnvelope]], - any[Option[CantonTimestamp]], - any[CantonTimestamp], - any[MessageId], - any[Option[AggregationRule]], - any[SendCallback], - any[Boolean], - )(any[TraceContext], any[MetricsContext]) - ) - .thenReturn(EitherT.leftT(SendAsyncClientError.RequestFailed("failed"))) - - handler - .sendTrafficPurchasedRequest( - recipient1, - PositiveInt.tryCreate(5), - NonNegativeLong.tryCreate(1000), - sequencerClient, - synchronizerTimeTracker, - crypto, - ) - .value - .failOnShutdown - .futureValue shouldBe Left( - TrafficControlErrors.TrafficPurchasedRequestAsyncSendFailed.Error( - "RequestFailed(failed)" - ) - ) - } - - "log sequencing failures" in { - val callbackCapture: ArgumentCaptor[SendCallback] = - ArgumentCaptor.forClass(classOf[SendCallback]) - when( - sequencerClient.sendAsync( - any[Batch[DefaultOpenEnvelope]], - any[Option[CantonTimestamp]], - any[CantonTimestamp], - any[MessageId], - any[Option[AggregationRule]], - callbackCapture.capture(), - any[Boolean], - )(any[TraceContext], any[MetricsContext]) - ) - .thenReturn(EitherT.pure(())) - - val messageId = MessageId.randomMessageId() - val deliverError = DeliverError.create( - None, - CantonTimestamp.Epoch, - synchronizerId, - messageId, - Status.defaultInstance.withMessage("BOOM"), - testedProtocolVersion, - Option.empty[TrafficReceipt], - ) - - loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.INFO))( - { - val resultF = handler.sendTrafficPurchasedRequest( - recipient1, - PositiveInt.tryCreate(5), - NonNegativeLong.tryCreate(1000), - sequencerClient, - synchronizerTimeTracker, - crypto, - ) - - eventually() { - Try(callbackCapture.getValue).isSuccess shouldBe true - } - callbackCapture.getValue.asInstanceOf[SendCallback.CallbackFuture]( - UnlessShutdown.Outcome(SendResult.Error(deliverError)) - ) - - resultF.failOnShutdown.value.futureValue shouldBe Either.unit - }, - LogEntry.assertLogSeq( - Seq( - ( - _.message should include( - s"The traffic balance request submission failed: DeliverError(previous timestamp = None(), timestamp = 1970-01-01T00:00:00Z, id = $synchronizerId, message id = $messageId, reason = Status(OK, BOOM))" - ), - "sequencing failure", - ) - ), - Seq(_ => succeed), - ), - ) - } - - "log sequencing timeouts" in { - val callbackCapture: ArgumentCaptor[SendCallback] = - ArgumentCaptor.forClass(classOf[SendCallback]) - when( - sequencerClient.sendAsync( - any[Batch[DefaultOpenEnvelope]], - any[Option[CantonTimestamp]], - any[CantonTimestamp], - any[MessageId], - any[Option[AggregationRule]], - callbackCapture.capture(), - any[Boolean], - )(any[TraceContext], any[MetricsContext]) - ) - .thenReturn(EitherT.pure(())) - clearInvocations(synchronizerTimeTracker) - - loggerFactory.assertEventuallyLogsSeq(SuppressionRule.Level(Level.INFO))( - { - val resultF = handler.sendTrafficPurchasedRequest( - recipient1, - PositiveInt.tryCreate(5), - NonNegativeLong.tryCreate(1000), - sequencerClient, - synchronizerTimeTracker, - crypto, - ) - - eventually() { - Try(callbackCapture.getValue).isSuccess shouldBe true - } - callbackCapture.getValue.asInstanceOf[SendCallback.CallbackFuture]( - UnlessShutdown.Outcome(SendResult.Timeout(CantonTimestamp.Epoch)) - ) - - resultF.value.failOnShutdown.futureValue shouldBe Either.unit - }, - LogEntry.assertLogSeq( - Seq( - ( - _.infoMessage should include( - s"The traffic balance request submission timed out after sequencing time 1970-01-01T00:00:00Z has elapsed" - ), - "timeout", - ) - ), - Seq.empty, - ), - ) - - // Check that a tick was requested so that the sequencer will actually observe the timeout - verify(synchronizerTimeTracker).requestTick(any[CantonTimestamp], any[Boolean])( - any[TraceContext] - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala deleted file mode 100644 index 923beda5cf..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.syntax.parallel.* -import cats.syntax.traverse.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.BatchAggregatorConfig -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric -import com.digitalasset.canton.lifecycle.{ - CloseContext, - FutureUnlessShutdown, - PromiseUnlessShutdown, - UnlessShutdown, -} -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.logging.{LogEntry, TracedLogger} -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.version.HasTestCloseContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.github.blemale.scaffeine.Scaffeine -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.concurrent.TrieMap -import scala.util.Random - -class BatchAggregatorTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with HasTestCloseContext { - type K = Int - type V = String - type BatchGetterType = NonEmpty[Seq[Traced[K]]] => FutureUnlessShutdown[Iterable[V]] - - private val defaultKeyToValue: K => V = _.toString - private val defaultBatchGetter: NonEmpty[Seq[Traced[K]]] => FutureUnlessShutdown[Iterable[V]] = - keys => FutureUnlessShutdown.pure(keys.map(item => defaultKeyToValue(item.value))) - - private val defaultMaximumInFlight: Int = 5 - private val defaultMaximumBatchSize: Int = 5 - - private def aggregatorWithDefaults( - maximumInFlight: Int = defaultMaximumInFlight, - batchGetter: BatchGetterType, - ): BatchAggregator[K, V] = { - val processor = new BatchAggregator.Processor[K, V] { - override def kind: String = "item" - override def logger: TracedLogger = BatchAggregatorTest.this.logger - override def executeBatch(items: NonEmpty[Seq[Traced[K]]])(implicit - traceContext: TraceContext, - callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Iterable[V]] = batchGetter(items) - override def prettyItem: Pretty[K] = implicitly - } - - val config = BatchAggregatorConfig( - maximumInFlight = PositiveNumeric.tryCreate(maximumInFlight), - maximumBatchSize = PositiveNumeric.tryCreate(defaultMaximumBatchSize), - ) - - BatchAggregator[K, V](processor, config) - } - - /** @param requestsCountPerSize - * Track the number of requests per size - * @param blocker - * Future that blocks the computations. - * @return - * The default batcher (Int => String = _.toString) - */ - private def batchGetterWithCounter( - requestsCountPerSize: TrieMap[Int, Int], - blocker: FutureUnlessShutdown[Unit], - ): BatchGetterType = - keys => { - requestsCountPerSize.updateWith(keys.size)(_.map(count => count + 1).orElse(Some(1))) - blocker.flatMap(_ => - FutureUnlessShutdown.pure(keys.toList.map(item => defaultKeyToValue(item.value))) - ) - } - - case class CacheWithAggregator(aggregator: BatchAggregator[K, V])(implicit - traceContext: TraceContext - ) { - private val cache = Scaffeine().executor(executorService).buildAsync[K, V]() - - def get(key: K): FutureUnlessShutdown[V] = - FutureUnlessShutdown.outcomeF(cache.get(key, key => aggregator.run(key).futureValueUS)) - } - - object CacheWithAggregator { - def apply(batchGetter: BatchGetterType = defaultBatchGetter): CacheWithAggregator = { - val queryBatcher = aggregatorWithDefaults(batchGetter = batchGetter) - CacheWithAggregator(queryBatcher) - } - } - - "BatchAggregator" should { - "batch queries when the number of in-flight queries is too big" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val requestsCountPerSize = TrieMap[Int, Int]() - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = batchGetterWithCounter(requestsCountPerSize, blocker.futureUS), - ) - - val resultF = List(1, 2, 3).parTraverse(aggregator.run) - - blocker.success(UnlessShutdown.unit) - - resultF.futureValueUS shouldBe List("1", "2", "3") - requestsCountPerSize.toMap shouldBe Map( - 1 -> 1, // One request for a single element - 2 -> 1, // One request for two elements - ) - } - - "propagate an error thrown when issuing a single request" in { - val exception = new RuntimeException("sad getter") - - val cache = CacheWithAggregator(batchGetter = _ => FutureUnlessShutdown.failed(exception)) - val key = 42 - - loggerFactory - .assertThrowsAndLogsAsync[RuntimeException]( - cache.get(key).unwrap, - _.getCause.getCause shouldBe exception, - logEntry => { - logEntry.errorMessage shouldBe s"Failed to process item $key" - logEntry.throwable shouldBe Some(exception) - }, - ) - .futureValue - } - - "propagate an error when no result is returned" in { - val key = 41 - - val aggregator = - aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = _ => FutureUnlessShutdown.pure(Nil), - ) - - val result = loggerFactory - .assertLogs( - aggregator.run(key).failed.futureValueUS, - _.errorMessage should include("executeBatch returned an empty sequence of results"), - _.errorMessage shouldBe s"Failed to process item $key", - ) - - result shouldBe a[RuntimeException] - } - - "propagate an error thrown by the getter" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val exception = new RuntimeException("sad getter") - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = - _ => blocker.futureUS.flatMap(_ => FutureUnlessShutdown.failed[Iterable[V]](exception)), - ) - - val results = List(1, 2, 3).map(aggregator.run) - - loggerFactory.assertLogsUnordered( - { - blocker.success(UnlessShutdown.unit) - results.foreach(_.failed.futureValueUS shouldBe exception) - }, - _.errorMessage shouldBe "Failed to process item 1", - _.errorMessage shouldBe show"Batch request failed for items ${Seq(2, 3)}", - ) - } - - "support many requests" in { - val aggregator = aggregatorWithDefaults( - maximumInFlight = 2, - batchGetter = keys => - FutureUnlessShutdown.pure { - Threading.sleep(Random.nextLong(50)) - keys.toList.map(key => defaultKeyToValue(key.value)) - }, - ) - - val requests = (0 until 100).map(_ => Random.nextInt(20)).toList - val expectedResult = requests.map(key => (key, defaultKeyToValue(key))) - - val results = requests.parTraverse(key => aggregator.run(key).map((key, _))).futureValueUS - results shouldBe expectedResult - } - - "complain about too few results in the batch response" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = keys => - blocker.futureUS.flatMap { _ => - if (keys.sizeIs == 1) FutureUnlessShutdown.pure(List("0")) - else - FutureUnlessShutdown.pure(Iterable.empty) - }, - ) - - val results = List(0, 1, 2).map(aggregator.run) - - def tooFewResponses(key: K)(logEntry: LogEntry): Assertion = { - logEntry.errorMessage shouldBe ErrorUtil.internalErrorMessage - logEntry.throwable.value.getMessage should include(show"No response for item $key") - } - - loggerFactory.assertLogs( - { - blocker.success(UnlessShutdown.unit) - - results(0).futureValueUS shouldBe "0" - results(1).failed.futureValueUS.getMessage shouldBe "No response for item 1" - results(2).failed.futureValueUS.getMessage shouldBe "No response for item 2" - }, - tooFewResponses(1), - tooFewResponses(2), - ) - } - - "complain about too many results in the batch response" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = keys => - blocker.futureUS.flatMap { _ => - if (keys.sizeIs == 1) FutureUnlessShutdown.pure(List("0")) - else - defaultBatchGetter(keys).map(_.toList).map(_ :+ "42") - }, - ) - - val results = List(0, 1, 2).map(aggregator.run) - - loggerFactory.assertLogs( - { - blocker.success(UnlessShutdown.unit) - results.sequence.futureValueUS shouldBe List("0", "1", "2") - }, - _.errorMessage should include("Received 1 excess responses for item batch"), - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorUSTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorUSTest.scala deleted file mode 100644 index a5940efc6a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchAggregatorUSTest.scala +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.syntax.parallel.* -import cats.syntax.traverse.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.BatchAggregatorConfig -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric -import com.digitalasset.canton.lifecycle.{ - CloseContext, - FutureUnlessShutdown, - PromiseUnlessShutdown, - UnlessShutdown, -} -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.logging.{LogEntry, TracedLogger} -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.version.HasTestCloseContext -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import com.github.blemale.scaffeine.Scaffeine -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.concurrent.TrieMap -import scala.util.Random - -class BatchAggregatorUSTest - extends AnyWordSpec - with BaseTest - with HasExecutionContext - with HasTestCloseContext { - type K = Int - type V = String - type BatchGetterType = NonEmpty[Seq[Traced[K]]] => FutureUnlessShutdown[Iterable[V]] - - private val defaultKeyToValue: K => V = _.toString - private val defaultBatchGetter: NonEmpty[Seq[Traced[K]]] => FutureUnlessShutdown[Iterable[V]] = - keys => FutureUnlessShutdown.pure(keys.map(item => defaultKeyToValue(item.value))) - - private val defaultMaximumInFlight: Int = 5 - private val defaultMaximumBatchSize: Int = 5 - - private def aggregatorWithDefaults( - maximumInFlight: Int = defaultMaximumInFlight, - batchGetter: BatchGetterType, - ): BatchAggregatorUS[K, V] = { - val processor = new BatchAggregatorUS.ProcessorUS[K, V] { - override def kind: String = "item" - override def logger: TracedLogger = BatchAggregatorUSTest.this.logger - override def executeBatch(items: NonEmpty[Seq[Traced[K]]])(implicit - traceContext: TraceContext, - callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Iterable[V]] = batchGetter(items) - override def prettyItem: Pretty[K] = implicitly - } - - val config = BatchAggregatorConfig( - maximumInFlight = PositiveNumeric.tryCreate(maximumInFlight), - maximumBatchSize = PositiveNumeric.tryCreate(defaultMaximumBatchSize), - ) - - BatchAggregatorUS[K, V](processor, config) - } - - /** @param requestsCountPerSize - * Track the number of requests per size - * @param blocker - * Future that blocks the computations. - * @return - * The default batcher (Int => String = _.toString) - */ - private def batchGetterWithCounter( - requestsCountPerSize: TrieMap[Int, Int], - blocker: FutureUnlessShutdown[Unit], - ): BatchGetterType = - keys => { - requestsCountPerSize.updateWith(keys.size)(_.map(count => count + 1).orElse(Some(1))) - blocker.flatMap(_ => - FutureUnlessShutdown.pure(keys.toList.map(item => defaultKeyToValue(item.value))) - ) - } - - case class CacheWithAggregator(aggregator: BatchAggregatorUS[K, V])(implicit - traceContext: TraceContext - ) { - private val cache = Scaffeine().executor(executorService).buildAsync[K, V]() - - def get(key: K): FutureUnlessShutdown[V] = - FutureUnlessShutdown.outcomeF(cache.get(key, key => aggregator.run(key).futureValueUS)) - } - - object CacheWithAggregator { - def apply(batchGetter: BatchGetterType = defaultBatchGetter): CacheWithAggregator = { - val queryBatcher = aggregatorWithDefaults(batchGetter = batchGetter) - CacheWithAggregator(queryBatcher) - } - } - - "BatchAggregatorUS" should { - "batch queries when the number of in-flight queries is too big" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val requestsCountPerSize = TrieMap[Int, Int]() - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = batchGetterWithCounter(requestsCountPerSize, blocker.futureUS), - ) - - val resultF = List(1, 2, 3).parTraverse(aggregator.run) - - blocker.success(UnlessShutdown.unit) - - resultF.futureValueUS shouldBe List("1", "2", "3") - requestsCountPerSize.toMap shouldBe Map( - 1 -> 1, // One request for a single element - 2 -> 1, // One request for two elements - ) - } - - "propagate an error thrown when issuing a single request" in { - val exception = new RuntimeException("sad getter") - - val cache = CacheWithAggregator(batchGetter = _ => FutureUnlessShutdown.failed(exception)) - val key = 42 - - loggerFactory - .assertThrowsAndLogsAsync[RuntimeException]( - cache.get(key).unwrap, - _.getCause.getCause shouldBe exception, - logEntry => { - logEntry.errorMessage shouldBe s"Failed to process item $key" - logEntry.throwable shouldBe Some(exception) - }, - ) - .futureValue - } - - "propagate an error when no result is returned" in { - val key = 41 - - val aggregator = - aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = _ => FutureUnlessShutdown.pure(Nil), - ) - - val result = loggerFactory - .assertLogs( - aggregator.run(key).failed.futureValueUS, - _.errorMessage should include("executeBatch returned an empty sequence of results"), - _.errorMessage shouldBe s"Failed to process item $key", - ) - - result shouldBe a[RuntimeException] - } - - "propagate an error thrown by the getter" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val exception = new RuntimeException("sad getter") - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = - _ => blocker.futureUS.flatMap(_ => FutureUnlessShutdown.failed[Iterable[V]](exception)), - ) - - val results = List(1, 2, 3).map(aggregator.run) - - loggerFactory.assertLogsUnordered( - { - blocker.success(UnlessShutdown.unit) - results.foreach(_.failed.futureValueUS shouldBe exception) - }, - _.errorMessage shouldBe "Failed to process item 1", - _.errorMessage shouldBe show"Batch request failed for items ${Seq(2, 3)}", - ) - } - - "support many requests" in { - val aggregator = aggregatorWithDefaults( - maximumInFlight = 2, - batchGetter = keys => - FutureUnlessShutdown.pure { - Threading.sleep(Random.nextLong(50)) - keys.toList.map(key => defaultKeyToValue(key.value)) - }, - ) - - val requests = (0 until 100).map(_ => Random.nextInt(20)).toList - val expectedResult = requests.map(key => (key, defaultKeyToValue(key))) - - val results = requests.parTraverse(key => aggregator.run(key).map((key, _))).futureValueUS - results shouldBe expectedResult - } - - "complain about too few results in the batch response" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = keys => - blocker.futureUS.flatMap { _ => - if (keys.sizeIs == 1) FutureUnlessShutdown.pure(List("0")) - else - FutureUnlessShutdown.pure(Iterable.empty) - }, - ) - - val results = List(0, 1, 2).map(aggregator.run) - - def tooFewResponses(key: K)(logEntry: LogEntry): Assertion = { - logEntry.errorMessage shouldBe ErrorUtil.internalErrorMessage - logEntry.throwable.value.getMessage should include(show"No response for item $key") - } - - loggerFactory.assertLogs( - { - blocker.success(UnlessShutdown.unit) - - results(0).futureValueUS shouldBe "0" - results(1).failed.futureValueUS.getMessage shouldBe "No response for item 1" - results(2).failed.futureValueUS.getMessage shouldBe "No response for item 2" - }, - tooFewResponses(1), - tooFewResponses(2), - ) - } - - "complain about too many results in the batch response" in { - val blocker = PromiseUnlessShutdown.unsupervised[Unit]() - - val aggregator = aggregatorWithDefaults( - maximumInFlight = 1, - batchGetter = keys => - blocker.futureUS.flatMap { _ => - if (keys.sizeIs == 1) FutureUnlessShutdown.pure(List("0")) - else - defaultBatchGetter(keys).map(_.toList).map(_ :+ "42") - }, - ) - - val results = List(0, 1, 2).map(aggregator.run) - - loggerFactory.assertLogs( - { - blocker.success(UnlessShutdown.unit) - results.sequence.futureValueUS shouldBe List("0", "1", "2") - }, - _.errorMessage should include("Received 1 excess responses for item batch"), - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchNSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchNSpec.scala deleted file mode 100644 index 2129f09192..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/BatchNSpec.scala +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import org.apache.pekko.stream.Attributes.InputBuffer -import org.apache.pekko.stream.scaladsl.{Sink, Source} -import org.apache.pekko.stream.{Attributes, DelayOverflowStrategy} -import org.scalatest.flatspec.AsyncFlatSpec -import org.scalatest.matchers.should.Matchers - -import scala.concurrent.duration.* - -class BatchNSpec extends AsyncFlatSpec with Matchers with PekkoBeforeAndAfterAll { - - private val MaxBatchSize = 10 - private val MaxBatchCount = 5 - - behavior of s"BatchN in batchMode ${BatchN.MaximizeConcurrency}" - - it should "form batches of size 1 under no load" in { - val inputSize = 10 - val input = 1 to inputSize - val batchesF = - Source(input).async - // slow upstream - .delay(10.millis, DelayOverflowStrategy.backpressure) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeConcurrency) - .runWith(Sink.seq[Iterable[Int]]) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs Array.fill(inputSize)(1) - } - } - - it should "form maximally-sized batches if downstream is slower than upstream" in { - val inputSize = 100 - val input = 1 to inputSize - - val batchesF = - Source(input) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeConcurrency) - // slow downstream - .initialDelay(10.millis) - .async - .delay(10.millis, DelayOverflowStrategy.backpressure) - .addAttributes(Attributes(InputBuffer(1, 1))) - .runWith(Sink.seq) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs Array.fill(inputSize / MaxBatchSize)( - MaxBatchSize - ) - } - } - - it should "form even-sized batches under downstream back-pressure" in { - val inputSize = 15 - val input = 1 to inputSize - - val batchesF = - Source(input) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeConcurrency) - // slow downstream - .initialDelay(10.millis) - .async - .delay(10.millis, DelayOverflowStrategy.backpressure) - .addAttributes(Attributes(InputBuffer(1, 1))) - .runWith(Sink.seq) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs Array.fill(5)( - 3 - ) - } - } - - behavior of s"BatchN in batchMode ${BatchN.MaximizeBatchSize}" - - it should "form batches of size 1 under no load" in { - val inputSize = 10 - val input = 1 to inputSize - val batchesF = - Source(input).async - // slow upstream - .delay(10.millis, DelayOverflowStrategy.backpressure) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeBatchSize) - .runWith(Sink.seq[Iterable[Int]]) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs Array.fill(inputSize)(1) - } - } - - it should "form maximally-sized batches if downstream is slower than upstream" in { - val inputSize = 100 - val input = 1 to inputSize - - val batchesF = - Source(input) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeBatchSize) - // slow downstream - .initialDelay(10.millis) - .async - .delay(10.millis, DelayOverflowStrategy.backpressure) - .addAttributes(Attributes(InputBuffer(1, 1))) - .runWith(Sink.seq) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs Array.fill(inputSize / MaxBatchSize)( - MaxBatchSize - ) - } - } - - it should "form maximally-sized batches under downstream back-pressure" in { - val inputSize = 25 - val input = 1 to inputSize - - val batchesF = - Source(input) - .batchN(MaxBatchSize, MaxBatchCount, catchUpMode = BatchN.MaximizeBatchSize) - // slow downstream - .initialDelay(10.millis) - .async - .delay(10.millis, DelayOverflowStrategy.backpressure) - .addAttributes(Attributes(InputBuffer(1, 1))) - .runWith(Sink.seq) - - batchesF.map { batches => - batches.flatten should contain theSameElementsInOrderAs input - batches.map(_.size) should contain theSameElementsAs (Array.fill(inputSize / MaxBatchSize)( - MaxBatchSize // fill as many full batches as possible - ) :+ inputSize % MaxBatchSize) // and the last batch is whatever is left-over - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ByteStringUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ByteStringUtilTest.scala deleted file mode 100644 index aa29a1a8a1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ByteStringUtilTest.scala +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.serialization.{ - DefaultDeserializationError, - DeserializationError, - MaxByteToDecompressExceeded, -} -import com.google.protobuf.ByteString -import org.scalactic.Uniformity -import org.scalatest.wordspec.AnyWordSpec - -import java.io.ByteArrayInputStream -import java.nio.charset.Charset - -// Herein contained compressed test data conforms to pre-Java 16 -// Reused among compression methods that work on arrays and byte strings -trait GzipCompressionTests extends AnyWordSpec with BaseTest { - - def compressGzip(str: ByteString): ByteString - def decompressGzip(str: ByteString): Either[DeserializationError, ByteString] - - "compress and decompress Bytestrings" in { - val tests = Table[String, String]( - ("uncompressed-utf8", "compressed-hex"), - ("test", "1f8b08000000000000002b492d2e01000c7e7fd804000000"), - ("", "1f8b080000000000000003000000000000000000"), - ( - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - "1f8b08000000000000004b4ca41a0000a0ec9d324b000000", - ), - ) - - tests.forEvery { (uncompressedUtf8, compressedHex) => - val inputUncompressed = ByteString.copyFromUtf8(uncompressedUtf8) - val inputCompressed = HexString.parseToByteString(compressedHex).value - - val compressed = compressGzip(inputUncompressed) - inputCompressed should equal(compressed)(after being OsHeaderFieldIgnored) - - val uncompressed = decompressGzip(inputCompressed) - uncompressed shouldBe Right(inputUncompressed) - } - } - - "decompress works if timestamp is set" in { - val tests = Table[String, String, String]( - ("name", "compressed-hex", "uncompressed"), - ("Epoch", "1f8b080000000000000003000000000000000000", ""), - ("non-Epoch", "1f8b0800FFFFFFFF000003000000000000000000", ""), - ) - - tests.forEvery { (_, compressedHex, uncompressedUtf8) => - val outputUncompressed = ByteString.copyFromUtf8(uncompressedUtf8) - val inputCompressed = HexString.parseToByteString(compressedHex).value - - val uncompressed = decompressGzip(inputCompressed) - uncompressed shouldBe Right(outputUncompressed) - } - - } - - "decompress fails for bad inputs" in { - val tests = Table[String, String, String]( - ("name", "compressed-hex", "error message"), - ("bad prefix", "1f8a08000000000000004b4ca41a0000a0ec9d324b000000", "Not in GZIP format"), - ( - "bad compression method", - "1f8b05000000000000004b4ca41a0000a0ec9d324b000000", - "Unsupported compression method", - ), - ("bad flags", "1f8a08080000000000004b4ca41a0000a0ec9d324b000000", "Not in GZIP format"), - ( - "bad block length", - "1f8b080000000000000002000000000000000000", - "invalid stored block lengths", - ), - ( - "truncated", - "1f8b08000000000000002b492d2e01000c7e7fd8040000", - "Compressed byte input ended too early", - ), - ) - - tests.forEvery { (_, compressedHex, expectedError) => - val inputCompressed = HexString.parseToByteString(compressedHex).value - val uncompressed = decompressGzip(inputCompressed) - - inside(uncompressed) { case Left(DefaultDeserializationError(err)) => - err should include(expectedError) - } - } - } -} - -/** Ignores the 'os id' value, the 10th byte in the gzip file format header because it changed from - * 0x00 to 0xFF in Java 16 and later (https://bugs.openjdk.org/browse/JDK-8244706); enables - * seamless test execution on Java 11 and 17. - */ -private object OsHeaderFieldIgnored extends Uniformity[ByteString] { - - private val osHeaderFieldAt10thBytePosition = 9 - - override def normalized(data: ByteString): ByteString = { - require(data.size() >= 10, "Gzip compressed data is expected to contain a 10 bytes long header") - if (data.byteAt(osHeaderFieldAt10thBytePosition) == 0) { - data - } else { - val array = data.toByteArray - array(osHeaderFieldAt10thBytePosition) = 0 - ByteString.readFrom(new ByteArrayInputStream(array)) - } - } - - override def normalizedOrSame(o: Any): Any = - o match { - case data: ByteString => normalized(data) - case _ => o - } - - override def normalizedCanHandle(o: Any): Boolean = o.isInstanceOf[ByteString] -} - -class ByteStringUtilTest extends AnyWordSpec with BaseTest with GzipCompressionTests { - override def compressGzip(str: ByteString): ByteString = ByteStringUtil.compressGzip(str) - - override def decompressGzip(str: ByteString): Either[DeserializationError, ByteString] = - ByteStringUtil.decompressGzip(str, maxBytesLimit = None) - - "ByteStringUtilTest" should { - - "order ByteStrings lexicographically" in { - val order = ByteStringUtil.orderByteString - - def less(cmp: Int): Boolean = cmp < 0 - def equal(cmp: Int): Boolean = cmp == 0 - def greater(cmp: Int): Boolean = cmp > 0 - def dual(f: Int => Boolean)(cmp: Int): Boolean = f(-cmp) - - val tests = - Table[String, String, String, Int => Boolean]( - ("name", "first", "second", "outcome"), - ("empty", "", "", equal), - ("empty least", "", "a", less), - ("equal", "abc", "abc", equal), - ("longer", "abc", "abcde", less), - ("shorter", "abcd", "ab", greater), - ("common prefix", "abcdf", "abced", less), - ("no common prefix", "def", "abc", greater), - ) - - tests.forEvery { (name, left, right, result) => - val bs1 = ByteString.copyFromUtf8(left) - val bs2 = ByteString.copyFromUtf8(right) - assert(result(order.compare(bs1, bs2)), name) - assert(dual(result)(order.compare(bs2, bs1)), name + " dual") - } - } - "decompress with max bytes to read" in { - val uncompressed = "a" * 1000000 - val uncompressedByteString = ByteString.copyFrom(uncompressed, Charset.defaultCharset()) - val compressed = compressGzip(uncompressedByteString) - - val res1 = ByteStringUtil.decompressGzip(compressed, maxBytesLimit = Some(1000000)) - res1 shouldBe Right(uncompressedByteString) - val res2 = ByteStringUtil.decompressGzip(compressed, maxBytesLimit = Some(777)) - res2 shouldBe Left( - MaxByteToDecompressExceeded("Max bytes to decompress is exceeded. The limit is 777 bytes.") - ) - } - - "correctly pad or truncate a ByteString" in { - val aByteStr = ByteString.copyFrom("abcdefghij", Charset.defaultCharset()) - - // padded to 20 - val padSize = NonNegativeInt.tryCreate(20) - val toPad = ByteString.copyFrom(new Array[Byte](padSize.value - aByteStr.size())) - val padded = ByteStringUtil - .padOrTruncate(aByteStr, padSize) - padded.size() shouldBe padSize.value - padded.substring(0, aByteStr.size()) == aByteStr shouldBe true - padded.substring(aByteStr.size()) == toPad shouldBe true - - // truncate to 5 - val truncateSize = NonNegativeInt.tryCreate(5) - val expected = ByteString.copyFrom("abcde", Charset.defaultCharset()) - val truncated = ByteStringUtil - .padOrTruncate(aByteStr, truncateSize) - truncated.size() shouldBe truncateSize.value - truncated == expected shouldBe true - - // truncate to 0 - val truncateSize_2 = NonNegativeInt.zero - val empty = ByteString.EMPTY - val truncated_2 = ByteStringUtil - .padOrTruncate(aByteStr, truncateSize_2) - truncated_2.size() shouldBe truncateSize_2.value - truncated_2 == empty shouldBe true - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTTest.scala deleted file mode 100644 index 5c78dda99e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTTest.scala +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.data.{Chain, EitherT, NonEmptyChain, OptionT, Validated} -import cats.instances.either.* -import cats.laws.discipline.{ApplicativeTests, FunctorTests, MonadErrorTests, ParallelTests} -import cats.syntax.either.* -import cats.{Eq, Monad} -import com.digitalasset.canton.BaseTestWordSpec -import org.scalacheck.Arbitrary -import org.scalatest.wordspec.AnyWordSpec - -class CheckedTTest extends AnyWordSpec with BaseTestWordSpec { - - // We use Either as the transformed monad in the tests - type Monad[A] = Either[Int, A] - - def failure[A](x: A): Nothing = throw new RuntimeException - def failure2[A, B](x: A, y: B): Nothing = throw new RuntimeException - - "map" should { - "not touch aborts" in { - val sut: CheckedT[Monad, Int, String, String] = CheckedT.abortT(5) - assert(sut.map(failure) == CheckedT.abortT(5)) - } - - "change the result" in { - val sut = CheckedT.resultT[Monad, Int, String](10) - assert(sut.map(x => x + 1) == CheckedT.resultT(11)) - } - - "respect the transformed monad" in { - val sut = CheckedT[Monad, Int, Int, Int](Left(1)) - assert(sut.map(x => x + 1) == CheckedT.result(Either.left(1))) - } - - "not touch the non-aborts" in { - val sut = Checked.continueWithResult("non-abort", 5) - assert(sut.map(x => x + 1) == Checked.continueWithResult("non-abort", 6)) - } - } - - "mapAbort" should { - "change the abort" in { - assert(CheckedT.abortT[Monad, String, Int](5).mapAbort(x => x + 1) == CheckedT.abortT(6)) - } - - "not touch the non-aborts" in { - val sut: CheckedT[Monad, String, String, Unit] = CheckedT.continueT("non-abort") - assert(sut.mapAbort(failure) == CheckedT.continueT("non-abort")) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(10)) - assert(sut.mapAbort(failure) == CheckedT.result(Either.left(10))) - } - - "not touch the result" in { - val sut: CheckedT[Monad, String, String, Int] = CheckedT.resultT(5) - assert(sut.mapAbort(failure) == CheckedT.resultT(5)) - } - } - - "mapNonaborts" should { - "not touch aborts" in { - val sut = CheckedT.abortT[Monad, Int, Int](5).mapNonaborts(x => x ++ Chain.one(1)) - assert( - sut.value.exists(checked => checked.getAbort.contains(5) && checked.nonaborts == Chain(1)) - ) - } - - "not touch results" in { - val sut = CheckedT.resultT[Monad, Int, Int](3).mapNonaborts(x => x ++ Chain.one(1)) - assert(sut == CheckedT.continueWithResultT(1, 3)) - } - - "change the nonaborts" in { - val sut = Checked.continue("nonaborts").mapNonaborts(x => Chain.one("test") ++ x) - assert(sut.nonaborts == Chain("test", "nonaborts")) - assert(sut.getResult.contains(())) - } - - "repsect the transformed monad" in { - val sut = CheckedT.result(Either.left(5)) - assert(sut.mapNonaborts(failure) == sut) - } - } - - "mapNonabort" should { - "only touch nonaborts" in { - val sut1: CheckedT[Monad, Int, String, Double] = CheckedT.abortT(5) - assert(sut1.mapNonabort(failure) == sut1) - - val sut2: CheckedT[Monad, Int, String, Double] = CheckedT.resultT(5.7) - assert(sut2.mapNonabort(failure) == sut2) - } - - "change the nonaborts" in { - val sut1 = CheckedT.abortT(5).appendNonaborts(Chain(3, 5)).mapNonabort(x => x + 1) - assert(sut1.value.exists(_.getAbort.contains(5))) - assert(sut1.value.exists(_.nonaborts == Chain(4, 6))) - - val sut2 = CheckedT.resultT(5).appendNonaborts(Chain(3, 5)).mapNonabort(x => x + 1) - assert(sut2.value.exists(_.getResult.contains(5))) - assert(sut2.value.exists(_.nonaborts == Chain(4, 6))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(10)) - assert(sut.mapNonabort(failure) == sut) - } - } - - "trimap" should { - "work for aborts" in { - val sut = CheckedT.abortT[Monad, String, Int]("abort").appendNonabort("nonabort") - val result = sut.trimap(x => x + "test", y => "test " + y, failure) - - assert(result.value.exists(_.getAbort.contains("aborttest"))) - assert(result.value.exists(_.nonaborts == Chain("test nonabort"))) - } - - "work for results" in { - val sut = CheckedT.continueWithResultT[Monad, Double]("nonabort", 5) - val expected = CheckedT.continueWithResultT[Monad, Double]("test nonabort", 6) - assert(sut.trimap(failure, x => "test " + x, y => y + 1) == expected) - } - - "respect the transformed monad" in { - val sut = CheckedT.abort(Either.left(10)) - assert(sut.trimap(failure, failure, failure) == sut) - } - } - - "semiflatMap" should { - "act on results" in { - val sut = CheckedT.continueWithResultT[Monad, Double]("nonabort", 5) - assert(sut.semiflatMap(x => Right(x + 1)) == CheckedT.continueWithResultT("nonabort", 6)) - } - - "not touch aborts" in { - val sut = CheckedT.abortT(10) - assert(sut.semiflatMap(failure) == sut) - } - - "act according to the transformed monad" in { - val sut = CheckedT.resultT[Monad, Int, String](5) - assert(sut.semiflatMap(_ => Left(3)) == CheckedT.result(Either.left(3))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(10)) - assert(sut.semiflatMap(failure) == sut) - } - } - - "fold" should { - "fold an abort" in { - val sut = CheckedT.abortT(5).prependNonabort("nonabort") - assert( - sut.fold((x, y) => Chain.one(x.toString) ++ y, failure2) == Either.right( - Chain("5", "nonabort") - ) - ) - } - - "fold a result" in { - val sut = CheckedT.continueWithResultT("nonabort", 7.5) - assert( - sut.fold(failure2, (x, y) => Chain.one(y.toString) ++ x) == Either.right( - Chain("7.5", "nonabort") - ) - ) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(3)) - assert(sut.fold(failure2, failure2) == Either.left(3)) - } - } - - "prependNonaborts" should { - "work on an abort" in { - val sut = - CheckedT.abortT(1).prependNonaborts(Chain("a", "b")).prependNonaborts(Chain("c", "d")) - assert(sut.value.exists(_.getAbort.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("c", "d", "a", "b"))) - } - - "work on a result" in { - val sut = - CheckedT.resultT(1).prependNonaborts(Chain("a", "b")).prependNonaborts(Chain("c", "d")) - assert(sut.value.exists(_.getResult.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("c", "d", "a", "b"))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(3)) - assert(sut.prependNonaborts(Chain("a", "b")) == sut) - } - } - - "prependNonabort" should { - "work on an abort" in { - val sut = CheckedT.abortT(1).prependNonabort("a").prependNonabort("b") - assert( - sut == CheckedT[Monad, Int, String, Int](Either.right(Checked.Abort(1, Chain("b", "a")))) - ) - } - - "work on a result" in { - val sut = CheckedT.resultT(1).prependNonabort("a").prependNonabort("b") - assert( - sut == CheckedT[Monad, Int, String, Int](Either.right(Checked.Result(Chain("b", "a"), 1))) - ) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(3)) - assert(sut.prependNonabort("a") == sut) - } - } - - "appendNonaborts" should { - "work on an abort" in { - val sut = CheckedT.abortT(1).appendNonaborts(Chain("a", "b")).appendNonaborts(Chain("c", "d")) - assert(sut.value.exists(_.getAbort.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("a", "b", "c", "d"))) - } - - "work on a result" in { - val sut = - CheckedT.resultT(1).appendNonaborts(Chain("a", "b")).appendNonaborts(Chain("c", "d")) - assert(sut.value.exists(_.getResult.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("a", "b", "c", "d"))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(4)) - assert(sut.appendNonaborts(Chain("a", "b")) == sut) - } - } - - "appendNonabort" should { - "work on an abort" in { - val sut = CheckedT.abortT(1).appendNonabort("a").appendNonabort("b") - assert(sut.value.exists(_.getAbort.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("a", "b"))) - } - - "work on a result" in { - val sut = CheckedT.resultT(1).appendNonabort("a").appendNonabort("b") - assert(sut.value.exists(_.getResult.contains(1))) - assert(sut.value.exists(_.nonaborts == Chain("a", "b"))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(4)) - assert(sut.appendNonabort("a") == sut) - } - } - - "product" should { - "prefer aborts from the left" in { - val sut1 = - CheckedT.abortT[Monad, Double, String](5).product(CheckedT.abortT[Monad, Double, String](7)) - assert(sut1 == CheckedT.abortT(5)) - - val sut2 = - CheckedT - .abortT[Monad, Double, String](5) - .product(CheckedT.continueWithResultT[Monad, Int](1.0, "result")) - assert(sut2 == CheckedT.abortT(5)) - } - - "combine nonaborts" in { - val sut1 = CheckedT - .continueWithResultT[Monad, Int]("left", 5) - .product(CheckedT.continueWithResultT[Monad, Int]("right", 7)) - assert(sut1.value.exists(_.nonaborts == Chain("left", "right"))) - assert(sut1.value.exists(_.getResult.contains((5, 7)))) - - val sut2 = CheckedT - .continueWithResultT[Monad, String]("left", 6) - .product(CheckedT.abortT[Monad, String, Int]("failure").appendNonabort("right")) - assert(sut2.value.exists(_.getAbort.contains("failure"))) - assert(sut2.value.exists(_.nonaborts == Chain("left", "right"))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(0)) - assert(sut.product(CheckedT.resultT[Monad, Double, String](3)) == sut) - } - } - - "flatMap" should { - "propagate aborts" in { - val sut = CheckedT.abortT("failure").prependNonaborts(Chain("a", "b")) - assert(sut.flatMap(failure) == sut) - } - - "combine non-aborts" in { - val sut = CheckedT - .continueWithResultT[Monad, String]("first", 1) - .flatMap(x => - if (x == 1) CheckedT.continueWithResultT[Monad, String]("second", 2) else failure(x) - ) - assert(sut.value.exists(_.nonaborts == Chain("first", "second"))) - assert(sut.value.exists(_.getResult.contains(2))) - } - - "act according to the transformed monad" in { - val sut = CheckedT.resultT[Monad, Int, String](10) - val expected = CheckedT[Monad, Int, String, Int](Left(10)) - assert(sut.flatMap(x => CheckedT.result[Int, String](Either.left[Int, Int](x))) == expected) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(5)) - assert(sut.flatMap(failure) == sut) - } - } - - "biflatMap" should { - "call the second continuation for results" in { - val sut = CheckedT.continueWithResultT[Monad, Int]("first", 1) - val test = sut.biflatMap( - failure, - result => CheckedT.continueWithResultT[Monad, Int]("second", result + 2), - ) - assert(test.value.exists(_.nonaborts == Chain("first", "second"))) - assert(test.value.exists(_.getResult.contains(3))) - } - - "call the first continuation for aborts" in { - val sut: Checked[Int, String, Int] = Checked.abort(1).prependNonabort("first") - val test = sut.biflatMap( - abort => Checked.continueWithResult("second", abort + 3), - _ => throw new RuntimeException("Called continuation for results"), - ) - assert(test.nonaborts == Chain("first", "second")) - assert(test.getResult.contains(4)) - } - - "act according to the transformed monad" in { - val sut1 = CheckedT - .resultT[Monad, Int, String](1) - .biflatMap(failure, result => CheckedT.result[Int, String](Either.left[Int, Int](result))) - assert(sut1 == CheckedT[Monad, Int, String, Int](Either.left(1))) - - val sut2 = CheckedT - .abortT[Monad, String, Int](1) - .biflatMap(abort => CheckedT.result[Int, String](Either.left[Int, Int](abort)), failure) - assert(sut2 == CheckedT[Monad, Int, String, Int](Either.left(1))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(1)) - assert(sut.biflatMap(failure, failure) == sut) - } - } - - "abortFlatMap" should { - "not touch results" in { - val sut = CheckedT.continueWithResultT[Monad, Int]("first", 1) - assert(sut.abortFlatMap(failure) == sut) - } - - "call the continuation for aborts" in { - val sut = CheckedT.abortT[Monad, String, Int](1).prependNonabort("first") - val test = - sut.abortFlatMap(abort => CheckedT.continueWithResultT[Monad, Int]("second", abort + 3)) - assert(test.value.exists(_.nonaborts == Chain("first", "second"))) - assert(test.value.exists(_.getResult.contains(4))) - } - - "act according to the transformed monad" in { - val sut = CheckedT - .abortT[Monad, String, Int](1) - .abortFlatMap(abort => CheckedT.result[Int, String](Either.left[Int, Int](abort))) - assert(sut == CheckedT[Monad, Int, String, Int](Either.left(1))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(1)) - assert(sut.abortFlatMap(failure) == sut) - } - } - - "subflatMap" should { - "propagate aborts" in { - val sut = CheckedT.abortT("failure").prependNonaborts(Chain("a", "b")) - assert(sut.subflatMap(failure) == sut) - } - - "combine non-aborts" in { - val sut = CheckedT - .continueWithResultT[Monad, String]("first", 1) - .subflatMap(x => if (x == 1) Checked.continueWithResult("second", 2) else failure(x)) - assert(sut.value.exists(_.nonaborts == Chain("first", "second"))) - assert(sut.value.exists(_.getResult.contains(2))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(5)) - assert(sut.subflatMap(failure) == sut) - } - } - - "abortSubflatMap" should { - "not touch results" in { - val sut = CheckedT.resultT("result").prependNonaborts(Chain("a", "b")) - assert(sut.abortSubflatMap(failure) == sut) - } - - "combine non-aborts" in { - val sut = CheckedT - .abortT[Monad, String, Int](1) - .prependNonabort("first") - .abortSubflatMap(x => if (x == 1) Checked.continueWithResult("second", 2) else failure(x)) - assert(sut.value.exists(_.nonaborts == Chain("first", "second"))) - assert(sut.value.exists(_.getResult.contains(2))) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(5)) - assert(sut.abortSubflatMap(failure) == sut) - } - } - - "flatmapIfSuccess" should { - "not touch results when there are non-aborts" in { - val sut = CheckedT.resultT("result").prependNonaborts(Chain("a", "b")) - assert(sut.flatMapIfSuccess(failure) == sut) - } - - "map the result if success " in { - val result = "result" - val sut = CheckedT.resultT(result) - assert( - sut.flatMapIfSuccess[String, String, String](x => CheckedT.resultT(x.reverse)) == CheckedT - .resultT(result.reverse) - ) - } - - "propagate aborts" in { - val sut = CheckedT.abortT("failure").prependNonaborts(Chain("a", "b")) - assert(sut.flatMapIfSuccess(failure) == sut) - } - } - - "toResult" should { - "not touch results" in { - val sut = CheckedT.continueWithResultT("first", 1) - assert(sut.toResult(2) == sut) - } - - "merge abort with the nonaborts" in { - val sut = CheckedT - .abortT[Either[String, *], String, Int]("abort") - .prependNonaborts(Chain("first", "second")) - assert( - sut.toResult(3) == CheckedT - .continueWithResultT("second", 3) - .prependNonaborts(Chain("abort", "first")) - ) - } - - } - - "foreach" should { - "do nothing on an abort" in { - val sut = CheckedT.abortT[Monad, String, Int]("failure") - assert(sut.foreach(failure) == Either.unit) - } - - "run the function on the result" in { - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var run = false - assert( - CheckedT.resultT[Monad, Int, String](5).foreach(x => if (x == 5) run = true) == Either.unit - ) - assert(run) - } - - "respect the transformed monad" in { - val sut = CheckedT.result(Either.left(3)) - assert(sut.foreach(failure) == Left(3)) - } - } - - "exists" should { - "return false on an abort" in { - val sut = CheckedT.abortT("abort").appendNonabort("nonabort") - assert(sut.exists(failure) == Right(false)) - } - - "evaluate the predicate on the result" in { - assert(CheckedT.resultT(5).exists(x => x == 5) == Right(true)) - assert(CheckedT.resultT(5).exists(x => x != 5) == Right(false)) - } - - "respect the transformed monad" in { - assert(CheckedT.result(Either.left(3)).exists(failure) == Left(3)) - } - } - - "forall" should { - "return true on an abort" in { - val sut = CheckedT.abortT("abort").appendNonabort("nonabort") - assert(sut.forall(failure) == Right(true)) - } - - "evaluate the predicate on the result" in { - assert(CheckedT.resultT(5).forall(x => x == 5) == Right(true)) - assert(CheckedT.resultT(5).forall(x => x != 5) == Right(false)) - } - - "respect the transformed monad" in { - assert(CheckedT.result(Either.left(3)).forall(failure) == Left(3)) - } - } - - "toEitherT" should { - "map abort to left" in { - assert(CheckedT.abortT[Monad, String, Int](3).toEitherT == EitherT.leftT(3)) - } - - "map result to right" in { - assert(CheckedT.continueWithResultT[Monad, Int]("nonabort", 4).toEitherT == EitherT.rightT(4)) - } - - "respect the transformed monad" in { - assert(CheckedT.result(Either.left(3)).toEitherT == EitherT.right(Either.left(3))) - } - } - - "toEitherTWithNonAborts" should { - "map abort to Left" in { - assert( - CheckedT - .abortT[Monad, String, Int]("abort") - .prependNonabort("nonabort") - .toEitherTWithNonaborts == EitherT - .leftT(NonEmptyChain("abort", "nonabort")) - ) - } - - "map result to Right" in { - assert(CheckedT.resultT[Monad, String, String](5).toEitherTWithNonaborts == EitherT.rightT(5)) - } - - "map nonaborts to Left" in { - assert( - CheckedT.continueWithResultT[Monad, String]("nonabort", 4).toEitherTWithNonaborts == EitherT - .leftT(NonEmptyChain("nonabort")) - ) - } - } - - "toOptionT" should { - "map abort to None" in { - assert(CheckedT.abortT[Monad, String, Int](3).toOptionT == OptionT.none) - } - - "map result to Some" in { - assert(CheckedT.pure[Monad, Int, String](3).toOptionT == OptionT.pure(3)) - } - - "respect the transformed monad" in { - assert(CheckedT.result(Either.left(3)).toOptionT == OptionT[Monad, Int](Either.left(3))) - } - } - - "widenResult, widenAbort, and widenNonabort" should { - "change only the type" in { - val sut1 = CheckedT.continueWithResultT[Monad, Int]("nonabort", 5) - assert(sut1.widenResult[AnyVal] == sut1) - assert(sut1.widenAbort[AnyVal] == sut1) - assert(sut1.widenNonabort[AnyRef] == sut1) - - val sut2 = CheckedT.abortT[Monad, String, Int](10) - assert(sut2.widenResult[AnyVal] == sut2) - assert(sut2.widenAbort[AnyVal] == sut2) - assert(sut2.widenNonabort[AnyRef] == sut2) - } - } - - "fromChecked" should { - "map aborts to abort" in { - val sut = CheckedT.fromChecked[Monad](Checked.abort(5).prependNonabort("a")) - assert(sut == CheckedT.abortT(5).prependNonabort("a")) - } - - "map results to results" in { - assert(CheckedT.fromChecked[Monad](Checked.continue(5)) == CheckedT.continueT(5)) - } - } - - "fromEitherT" should { - "map Left to abort" in { - val sut = EitherT.leftT[Monad, Int](3) - assert(CheckedT.fromEitherT(sut) == CheckedT.abortT(3)) - } - - "map Right to result" in { - val sut = EitherT.rightT[Monad, Int](4) - assert(CheckedT.fromEitherT(sut) == CheckedT.resultT(4)) - } - - "respect the transformed monad" in { - val sut = EitherT[Monad, Int, Int](Either.left(3)) - assert(CheckedT.fromEitherT(sut) == CheckedT.abort(Either.left(3))) - } - } - - "fromEitherTNonabort" should { - "map Left to nonabort" in { - val sut = EitherT.leftT[Monad, String](3) - assert( - CheckedT.fromEitherTNonabort("result", sut) == CheckedT.continueWithResultT(3, "result") - ) - } - - "map Right to result" in { - val sut = EitherT.rightT[Monad, Int](4) - assert(CheckedT.fromEitherTNonabort(throw new RuntimeException, sut) == CheckedT.resultT(4)) - } - - "respect the transformed monad" in { - val sut = EitherT[Monad, Int, Int](Either.left(3)) - assert( - CheckedT.fromEitherTNonabort(throw new RuntimeException, sut) == CheckedT.abort( - Either.left(3) - ) - ) - } - } - - "Monad.tailRecM" should { - "iterate the step function until it returns Right" in { - val bound = 10 - val run = Monad[CheckedT[Monad, Int, Int, *]].tailRecM[Int, Int](0) { n => - if (n < bound) CheckedT.continueWithResultT(n, Left(n + 1)) else CheckedT.resultT(Right(n)) - } - assert(run.value.exists(_.nonaborts == Chain.fromSeq(0 until bound))) - assert(run.value.exists(_.getResult.contains(bound))) - } - - "abort if the step function aborts" in { - val bound = 10 - val run = Monad[CheckedT[Monad, Int, Int, *]].tailRecM[Int, Int](0) { n => - if (n < bound) CheckedT.continueWithResultT(n, Left(n + 1)) else CheckedT.abortT(bound) - } - assert(run.value.exists(_.getAbort.contains(bound))) - assert(run.value.exists(_.nonaborts == Chain.fromSeq(0 until bound))) - } - - "run in constant stack space" in { - val bound = 1000000 - val run = Monad[CheckedT[Monad, Int, Int, *]].tailRecM[Int, Int](0) { n => - if (n < bound) CheckedT.resultT(Left(n + 1)) else CheckedT.resultT(Right(n)) - } - assert(run == CheckedT.resultT(bound)) - } - - "respect the transformed monad" in { - val run = Monad[CheckedT[Monad, Int, Int, *]].tailRecM[Int, Int](0) { n => - CheckedT.result(Either.left(n)) - } - assert(run.value == Either.left(0)) - } - } - - { - import CheckedTTest.{arbitraryCheckedT, eqCheckedT} - import CheckedTest.{arbitraryChecked, eqChecked} - - "Functor" should { - checkAllLaws( - "MonadError", - FunctorTests[CheckedT[Monad, Int, String, *]].functor[Int, Int, String], - ) - } - - "Applicative" should { - import cats.laws.discipline.arbitrary.catsLawsArbitraryForValidated - checkAllLaws( - "Applicative", - ApplicativeTests[CheckedT[Validated[String, *], Int, String, *]] - .applicative[Int, Int, String], - ) - } - - "MonadError" should { - checkAllLaws( - "MonadError", - MonadErrorTests[CheckedT[Monad, Int, String, *], Int].monadError[Int, Int, String], - ) - } - - "Parallel" should { - import cats.laws.discipline.arbitrary.{ - catsLawsArbitraryForValidated, - catsLawsArbitraryForNested, - } - checkAllLaws( - name = "Parallel", - ParallelTests[CheckedT[Monad, Int, String, *]].parallel[Int, String], - ) - } - } - -} - -object CheckedTTest { - - implicit def eqCheckedT[F[_], A, N, R](implicit - F: Eq[F[Checked[A, N, R]]] - ): Eq[CheckedT[F, A, N, R]] = { (x: CheckedT[F, A, N, R], y: CheckedT[F, A, N, R]) => - F.eqv(x.value, y.value) - } - - implicit def arbitraryCheckedT[F[_], A, N, R](implicit - F: Arbitrary[F[Checked[A, N, R]]] - ): Arbitrary[CheckedT[F, A, N, R]] = - Arbitrary(F.arbitrary.map(CheckedT(_))) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTest.scala deleted file mode 100644 index da4f5a21a7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/CheckedTest.scala +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.Eq -import cats.data.{Chain, EitherT, NonEmptyChain} -import cats.laws.discipline.MonadErrorTests -import cats.syntax.foldable.* -import cats.syntax.traverse.* -import com.digitalasset.canton.BaseTestWordSpec -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.wordspec.{AnyWordSpec, AnyWordSpecLike} - -class CheckedTest extends AnyWordSpec with BaseTestWordSpec { - - def failure[A](x: A): Nothing = throw new RuntimeException - - def failure2[A, B](x: A, y: B): Nothing = throw new RuntimeException - - "map" should { - "not touch aborts" in { - val sut: Checked[Int, String, String] = Checked.abort(5) - assert(sut.map(failure) == Checked.abort(5)) - } - - "change the result" in { - assert(Checked.result(10).map(x => x + 1) == Checked.result(11)) - } - - "not touch the non-aborts" in { - assert( - Checked.continueWithResult("non-abort", 5).map(x => x + 1) == Checked.continueWithResult( - "non-abort", - 6, - ) - ) - } - } - - "mapAbort" should { - "change the abort" in { - assert(Checked.abort(5).mapAbort(x => x + 1) == Checked.abort(6)) - } - - "not touch the non-aborts" in { - val sut: Checked[String, String, Unit] = Checked.continue("non-abort") - assert(sut.mapAbort(failure) == Checked.continue("non-abort")) - } - - "not touch the result" in { - val sut: Checked[String, String, Int] = Checked.result(5) - assert(sut.mapAbort(failure) == Checked.result(5)) - } - } - - "mapNonaborts" should { - "not touch aborts" in { - val sut = Checked.abort(5).mapNonaborts(x => x ++ Chain.one(1)) - assert(sut.getAbort.contains(5)) - assert(sut.nonaborts == Chain(1)) - } - - "not touch results" in { - assert( - Checked.result(3).mapNonaborts(x => x ++ Chain.one(1)) == Checked.continueWithResult(1, 3) - ) - } - - "change the nonaborts" in { - val sut = Checked.continue("nonaborts").mapNonaborts(x => Chain.one("test") ++ x) - assert(sut.nonaborts == Chain("test", "nonaborts")) - assert(sut.getResult.contains(())) - } - } - - "mapNonabort" should { - "only touch nonaborts" in { - val sut1: Checked[Int, String, Double] = Checked.abort(5) - assert(sut1.mapNonabort(failure) == sut1) - - val sut2: Checked[Int, String, Double] = Checked.result(5.7) - assert(sut2.mapNonabort(failure) == sut2) - } - - "change the nonaborts" in { - val sut1 = Checked.abort(5).appendNonaborts(Chain(3, 5)).mapNonabort(x => x + 1) - assert(sut1.getAbort.contains(5)) - assert(sut1.nonaborts == Chain(4, 6)) - - val sut2 = Checked.result(5).appendNonaborts(Chain(3, 5)).mapNonabort(x => x + 1) - assert(sut2.getResult.contains(5)) - assert(sut2.nonaborts == Chain(4, 6)) - } - } - - "trimap" should { - "work for aborts" in { - val sut: Checked[String, String, Int] = Checked.abort("abort").appendNonabort("nonabort") - val result = sut.trimap(x => x + "test", y => "test " + y, failure) - - assert(result.getAbort.contains("aborttest")) - assert(result.nonaborts == Chain("test nonabort")) - } - - "work for results" in { - val sut: Checked[Double, String, Int] = Checked.continueWithResult("nonabort", 5) - assert( - sut.trimap(failure, x => "test " + x, y => y + 1) == Checked.continueWithResult( - "test nonabort", - 6, - ) - ) - } - } - - "fold" should { - "fold an abort" in { - val sut: Checked[Int, String, Double] = Checked.Abort(5, Chain("nonabort")) - assert(sut.fold((x, y) => Chain.one(x.toString) ++ y, failure2) == Chain("5", "nonabort")) - } - - "fold a result" in { - val sut: Checked[Int, String, Double] = Checked.continueWithResult("nonabort", 7.5) - assert(sut.fold(failure2, (x, y) => Chain.one(y.toString) ++ x) == Chain("7.5", "nonabort")) - } - } - - "prependNonaborts" should { - "work on an abort" in { - val sut = Checked.abort(1).prependNonaborts(Chain("a", "b")).prependNonaborts(Chain("c", "d")) - assert(sut.getAbort.contains(1)) - assert(sut.nonaborts == Chain("c", "d", "a", "b")) - } - - "work on a result" in { - val sut = - Checked.result(1).prependNonaborts(Chain("a", "b")).prependNonaborts(Chain("c", "d")) - assert(sut.getResult.contains(1)) - assert(sut.nonaborts == Chain("c", "d", "a", "b")) - } - } - - "prependNonabort" should { - "work on an abort" in { - assert( - Checked.abort(1).prependNonabort("a").prependNonabort("b") == Checked.Abort( - 1, - Chain("b", "a"), - ) - ) - } - - "work on a result" in { - assert( - Checked.result(1).prependNonabort("a").prependNonabort("b") == Checked.Result( - Chain("b", "a"), - 1, - ) - ) - } - } - - "appendNonaborts" should { - "work on an abort" in { - val sut = Checked.abort(1).appendNonaborts(Chain("a", "b")).appendNonaborts(Chain("c", "d")) - assert(sut.getAbort.contains(1)) - assert(sut.nonaborts == Chain("a", "b", "c", "d")) - } - - "work on a result" in { - val sut = Checked.result(1).appendNonaborts(Chain("a", "b")).appendNonaborts(Chain("c", "d")) - assert(sut.getResult.contains(1)) - assert(sut.nonaborts == Chain("a", "b", "c", "d")) - } - } - - "appendNonabort" should { - "work on an abort" in { - val sut = Checked.abort(1).appendNonabort("a").appendNonabort("b") - assert(sut.getAbort.contains(1)) - assert(sut.nonaborts == Chain("a", "b")) - } - - "work on a result" in { - val sut = Checked.result(1).appendNonabort("a").appendNonabort("b") - assert(sut.getResult.contains(1)) - assert(sut.nonaborts == Chain("a", "b")) - } - } - - "product" should { - "prefer aborts from the left" in { - assert(Checked.abort(5).product(Checked.abort(7)) == Checked.abort(5)) - assert( - Checked.abort(5).product(Checked.continueWithResult(1.0, "result")) == Checked.abort(5) - ) - } - - "combine nonaborts" in { - val sut1 = - Checked.continueWithResult("left", 5).product(Checked.continueWithResult("right", 7)) - assert(sut1.nonaborts == Chain("left", "right")) - assert(sut1.getResult.contains((5, 7))) - - val sut2 = Checked - .continueWithResult("left", 6) - .product(Checked.abort("failure").appendNonabort("right")) - assert(sut2.getAbort.contains("failure")) - assert(sut2.nonaborts == Chain("left", "right")) - } - } - - "ap" should { - "prefer aborts from the operator" in { - assert(Checked.abort(5).ap(Checked.abort(7)) == Checked.abort(7)) - assert(Checked.continueWithResult(7, 12).ap(Checked.abort(5)) == Checked.abort(5)) - } - - "combine nonaborts" in { - val sut1 = - Checked - .continueWithResult("right", 7) - .ap(Checked.continueWithResult[Nothing, String, Int => Int]("left", x => x + 1)) - assert(sut1.nonaborts == Chain("left", "right")) - assert(sut1.getResult.contains(8)) - - val sut2 = Checked - .abort("failure") - .appendNonabort("right") - .ap(Checked.continueWithResult[Nothing, String, Int => Int]("left", x => x + 1)) - assert(sut2.getAbort.contains("failure")) - assert(sut2.nonaborts == Chain("left", "right")) - } - } - - "reverseAp" should { - "prefer aborts from the operand" in { - assert(Checked.abort(5).ap(Checked.abort(7)) == Checked.abort(7)) - assert(Checked.continueWithResult(7, 12).ap(Checked.abort(5)) == Checked.abort(5)) - } - - "combine nonaborts" in { - val sut1 = - Checked - .continueWithResult("right", 7) - .reverseAp(Checked.continueWithResult[Nothing, String, Int => Int]("left", x => x + 1)) - assert(sut1.nonaborts == Chain("right", "left")) - assert(sut1.getResult.contains(8)) - - val sut2 = Checked - .continueWithResult("right", 7) - .reverseAp(Checked.abort("failure").appendNonabort("left")) - assert(sut2.getAbort.contains("failure")) - assert(sut2.nonaborts == Chain("right", "left")) - } - } - - "flatMap" should { - "propagate aborts" in { - val sut = Checked.abort("failure").prependNonaborts(Chain("a", "b")) - assert(sut.flatMap(failure) == sut) - } - - "combine non-aborts" in { - val sut = Checked - .continueWithResult("first", 1) - .flatMap(x => if (x == 1) Checked.continueWithResult("second", 2) else failure(x)) - assert(sut.nonaborts == Chain("first", "second")) - assert(sut.getResult.contains(2)) - } - } - - "biflatMap" should { - "call the second continuation for results" in { - val sut = Checked.continueWithResult("first", 1) - val test = sut.biflatMap(failure, result => Checked.continueWithResult("second", result + 2)) - assert(test.nonaborts == Chain("first", "second")) - assert(test.getResult.contains(3)) - } - - "call the first continuation for aborts" in { - val sut = Checked.abort(1).prependNonabort("first") - val test = sut.biflatMap(abort => Checked.continueWithResult("second", abort + 3), failure) - assert(test.nonaborts == Chain("first", "second")) - assert(test.getResult.contains(4)) - } - } - - "abortFlatMap" should { - "not touch results" in { - val sut = Checked.continueWithResult("first", 1) - assert(sut.abortFlatMap(failure) == sut) - } - - "call the continuation for aborts" in { - val sut: Checked[Int, String, Int] = Checked.abort(1).prependNonabort("first") - val test = sut.abortFlatMap(abort => Checked.continueWithResult("second", abort + 3)) - assert(test.nonaborts == Chain("first", "second")) - assert(test.getResult.contains(4)) - } - } - - "toResult" should { - "not touch results" in { - val sut = Checked.continueWithResult("first", 1) - assert(sut.toResult(2) == sut) - } - - "merge abort with the nonaborts" in { - val sut = Checked.abort("abort").prependNonaborts(Chain("first", "second")) - assert( - sut.toResult(3) == Checked - .continueWithResult("second", 3) - .prependNonaborts(Chain("abort", "first")) - ) - } - - } - - "foreach" should { - "do nothing on an abort" in { - val sut: Checked[String, Int, Double] = Checked.abort("failure") - sut.foreach(failure) - } - - "run the function on the result" in { - @SuppressWarnings(Array("org.wartremover.warts.Var")) - var run = false - Checked.result(5).foreach(x => if (x == 5) run = true) - assert(run) - } - } - - "exists" should { - "return false on an abort" in { - val sut: Checked[String, String, String] = Checked.abort("abort").appendNonabort("nonabort") - assert(!sut.exists(failure)) - } - - "evaluate the predicate on the result" in { - assert(Checked.result(5).exists(x => x == 5)) - assert(!Checked.result(5).exists(x => x != 5)) - } - } - - "forall" should { - "return true on an abort" in { - val sut: Checked[String, String, String] = Checked.abort("abort").appendNonabort("nonabort") - assert(sut.forall(failure)) - } - - "evaluate the predicate on the result" in { - assert(Checked.result(5).forall(x => x == 5)) - assert(!Checked.result(5).forall(x => x != 5)) - } - } - - "traverse" should { - def f(x: String): Option[Int] = Some(x.length) - - def g(x: String): Option[Int] = None - - "run on a result" in { - val sut = Checked.continueWithResult("nonabort", "result") - assert(sut.traverse(f) == Some(Checked.continueWithResult("nonabort", 6))) - assert(sut.traverse(g) == None) - } - - "embed the abort" in { - val sut: Checked[String, String, String] = Checked.abort("abort").appendNonabort("nonabort") - assert(sut.traverse(f) == Some(sut)) - assert(sut.traverse(g) == Some(sut)) - } - } - - "toEither" should { - "map abort to Left" in { - assert(Checked.abort("abort").appendNonabort("nonabort").toEither == Left("abort")) - } - - "map result to Right" in { - assert(Checked.continueWithResult("nonabort", "result").toEither == Right("result")) - } - } - - "toEitherWithNonaborts" should { - "map abort to Left" in { - assert( - Checked.abort("abort").appendNonabort("nonabort").toEitherWithNonaborts == Left( - NonEmptyChain("abort", "nonabort") - ) - ) - } - - "map result to Right" in { - assert(Checked.result("result").toEitherWithNonaborts == Right("result")) - } - - "map nonaborts to Left" in { - assert( - Checked.continueWithResult("nonabort", "result").toEitherWithNonaborts == Left( - NonEmptyChain("nonabort") - ) - ) - } - } - - "toOption" should { - "map abort to None" in { - assert(Checked.abort("abort").appendNonabort("nonabort").toOption == None) - } - - "map result to Some" in { - assert(Checked.continueWithResult("nonabort", "result").toOption == Some("result")) - } - } - - "isAbort" should { - "identify aborts" in { - assert(Checked.abort(5).isAbort) - assert(!Checked.result(5).isAbort) - assert(!Checked.continueWithResult(4, 5).isAbort) - } - } - - "isResult" should { - "identify results" in { - assert(!Checked.abort(5).isResult) - assert(Checked.result(5).isResult) - assert(Checked.continueWithResult(4, 5).isResult) - } - } - - "successful" should { - "identify results without nonaborts" in { - assert(!Checked.abort(5).successful) - assert(Checked.result(5).successful) - assert(!Checked.continueWithResult(4, 5).successful) - } - } - - "fromEither" should { - "map Left to abort" in { - assert(Checked.fromEither(Left("abc")) == Checked.abort("abc")) - } - - "map Right to result" in { - assert(Checked.fromEither(Right("abc")) == Checked.result("abc")) - } - } - - "fromEitherNonabort" should { - "map Left to nonabort" in { - assert(Checked.fromEitherNonabort(1)(Left(3)) == Checked.continueWithResult(3, 1)) - } - "map Right to result" in { - assert(Checked.fromEitherNonabort(1)(Right(3)) == Checked.result(3)) - } - } - - "fromEitherT" should { - "map Left to abort" in { - assert(Checked.fromEitherT(EitherT.leftT[Option, Int](5)) == Some(Checked.abort(5))) - assert(Checked.fromEitherT(EitherT.left[Int](Option.empty[Int])) == None) - } - - "map Right to result" in { - assert(Checked.fromEitherT(EitherT.rightT[Option, Int](5)) == Some(Checked.result(5))) - assert(Checked.fromEitherT(EitherT.right[Int](Option.empty[Int])) == None) - } - } - - "fromEitherTNonabort" should { - "map Left to nonabort" in { - assert( - Checked.fromEitherTNonabort(12)(EitherT.leftT[Option, Int](5)) == Some( - Checked.continueWithResult(5, 12) - ) - ) - assert(Checked.fromEitherTNonabort(12)(EitherT.left[Int](Option.empty[Int])) == None) - } - - "map Right to result" in { - assert( - Checked.fromEitherTNonabort(12)(EitherT.rightT[Option, Int](5)) == Some(Checked.result(5)) - ) - assert(Checked.fromEitherTNonabort(12)(EitherT.right[Int](Option.empty[Int])) == None) - } - } - - "MonadError" should { - import CheckedTest.{arbitraryChecked, eqChecked} - checkAllLaws( - "MonadError", - MonadErrorTests[Checked[Int, String, *], Int].monadError[Int, Int, String], - ) - } - - private lazy val stackSafetyDepth = 20000 - - "traverse" should { - "be stack safe" in { - (1 to stackSafetyDepth: Seq[Int]).traverse(x => Checked.result(x)).getResult.value should - have size stackSafetyDepth.toLong - } - } - - "traverse_" should { - "be stack safe" in { - (1 to stackSafetyDepth: Seq[Int]) - .traverse_(x => Checked.result(x)) - .getResult - .value should be(()) - } - } -} - -object CheckedTest extends AnyWordSpecLike { - - implicit def eqChecked[A, N, R]: Eq[Checked[A, N, R]] = Eq.fromUniversalEquals - - implicit def arbitraryChecked[A: Arbitrary, N: Arbitrary, R: Arbitrary] - : Arbitrary[Checked[A, N, R]] = - Arbitrary( - Gen.oneOf( - for { - abort <- Arbitrary.arbitrary[A] - nonaborts <- Arbitrary.arbitrary[List[N]].map(Chain.fromSeq) - } yield Checked.Abort(abort, nonaborts), - for { - nonaborts <- Arbitrary.arbitrary[List[N]].map(Chain.fromSeq) - result <- Arbitrary.arbitrary[R] - } yield Checked.Result(nonaborts, result), - ) - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DamlPackageLoaderTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DamlPackageLoaderTest.scala deleted file mode 100644 index 2f843f48a7..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DamlPackageLoaderTest.scala +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class DamlPackageLoaderTest extends AnyWordSpec with BaseTest { - - "DamlPackageLoader" should { - "find daml package" in { - for { - packages <- DamlPackageLoader.getPackagesFromDarFile(CantonExamplesPath) - } yield packages.values.flatMap(_.modules.keys.map(_.toString)) should contain.allOf( - "CantonExamples", - "Divulgence", - "Paint", - ) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DelayUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DelayUtilTest.scala deleted file mode 100644 index 3be7f363c4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/DelayUtilTest.scala +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.lifecycle.FlagCloseable -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.TimeUnit -import scala.concurrent.Await -import scala.concurrent.duration.* - -class DelayUtilTest extends AnyWordSpec with BaseTest { - "DelayUtil.delay" should { - "succeed roughly within the given delay" in { - val delay = 100.millis - val deadline = delay.fromNow - Await.result(DelayUtil.delay(delay), Duration.Inf) - - deadline.isOverdue() shouldBe true - -deadline.timeLeft should be < delay * 10 - } - - "not prevent termination" in { - val executorService = - Threading.singleThreadScheduledExecutor("delay-util-test-executor", noTracingLogger) - - val delayed = DelayUtil.delay(executorService, 1.minute, _.success(())) - - // Executor service terminates immediately despite a pending task. - executorService.shutdown() - executorService.awaitTermination(1, TimeUnit.SECONDS) shouldBe true - delayed.isCompleted shouldBe false - } - - "not schedule when already closing" in { - val flagCloseable = - FlagCloseable(DelayUtilTest.this.logger, DefaultProcessingTimeouts.testing) - - val delayedCloseable = DelayUtil.delay("test", 20.millis, flagCloseable) - flagCloseable.close() - Threading.sleep(100) - assert(!delayedCloseable.isCompleted, "Future completed during shutdown") - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/EitherUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/EitherUtilTest.scala deleted file mode 100644 index fa68cf2a67..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/EitherUtilTest.scala +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.util.EitherUtil.{RichEither, RichEitherIterable} -import org.scalatest.wordspec.AnyWordSpec - -@SuppressWarnings(Array("org.wartremover.warts.Var")) -class EitherUtilTest extends AnyWordSpec with BaseTest { - "EitherUtil.RichEither" should { - val left: Either[String, Int] = Left("sadness") - val right: Either[String, Int] = Right(42) - - "implement tap left" in { - var counter = 0 - - right.tapLeft(_ => counter += 1) shouldBe right - counter shouldBe 0 - - left.tapLeft(_ => counter += 1) shouldBe left - counter shouldBe 1 - } - - "implement tap right" in { - var counter = 0 - - left.tapRight(_ => counter += 1) shouldBe left - counter shouldBe 0 - - right.tapRight(_ => counter += 1) shouldBe right - counter shouldBe 1 - } - } - - "EitherUtil.RichEitherIterable" should { - val eithers = List(Right(3), Left("a"), Right(2), Right(1), Left("b")) - - "implement collect left" in { - Nil.collectLeft shouldBe Nil - eithers.collectLeft shouldBe List("a", "b") - } - - "implement collect right" in { - Nil.collectRight shouldBe Nil - eithers.collectRight shouldBe List(3, 2, 1) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/HexStringTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/HexStringTest.scala deleted file mode 100644 index b17e75df39..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/HexStringTest.scala +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class HexStringTest extends AnyWordSpec with BaseTest { - "HexString" should { - "correctly (de)serialize byte arrays" in { - val bytes = new Array[Byte](32) - scala.util.Random.nextBytes(bytes) - val s = HexString.toHexString(bytes) - val parsed = HexString.parse(s) - parsed.value shouldBe bytes - } - "correctly (de)serialize byteString with even length size" in { - val bytes = new Array[Byte](32) - scala.util.Random.nextBytes(bytes) - val length = 12 - val s = HexString.toHexString(ByteString.copyFrom(bytes), length = length) - val parsed = HexString.parse(s) - parsed.value shouldBe bytes.take(length / 2) - } - "correctly (de)serialize byteString with odd length size" in { - val bytes = new Array[Byte](32) - scala.util.Random.nextBytes(bytes) - val length = 13 - val s = HexString.toHexString(ByteString.copyFrom(bytes), length = length) - val parsed = HexString.parse(s) - parsed.value shouldBe bytes.take(length / 2 + 1) - } - - "fail to deserialize gibberish hex arrays" in { - val err1 = HexString.parse("0") - val err2 = HexString.parse("blablablabla") - err1 shouldBe None - err2 shouldBe None - } - - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LazyValWithContextTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LazyValWithContextTest.scala deleted file mode 100644 index 9c9ddeb5e6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LazyValWithContextTest.scala +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.util.LazyValWithContextTest.{ - ClassUsingLazyValWithContext, - ClassUsingRecursiveLazyValWithContext, -} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.{Future, Promise} - -class LazyValWithContextTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - "LazyValWithContext" should { - "return the result of the initializer" in { - val sut = ClassUsingLazyValWithContext(_ => "abc") - sut.lazyVal(1) shouldBe "abc" - } - - "evaluate the initializer only once" in { - val counter = new AtomicInteger() - val sut = ClassUsingLazyValWithContext { _ => - counter.incrementAndGet().discard[Int] - "abc" - } - sut.lazyVal(1) shouldBe "abc" - sut.lazyVal(2) shouldBe "abc" - counter.get() shouldBe 1 - } - - "initialize the value with the first context value" in { - val sut = ClassUsingLazyValWithContext { i => - i.toString - } - sut.lazyVal(1) shouldBe "1" - sut.lazyVal(2) shouldBe "1" - } - - "retry initialization upon an exception" in { - val sut = ClassUsingLazyValWithContext { i => - if (i == 1) throw new IllegalArgumentException() else "abc" - } - an[IllegalArgumentException] should be thrownBy sut.lazyVal(1) - sut.lazyVal(2) shouldBe "abc" - } - - "initialize only once even under contention" in { - val sutCell = new SingleUseCell[ClassUsingLazyValWithContext]() - val stash = Promise[String]() - val sut = ClassUsingLazyValWithContext { i => - if (i == 1) { - // Spawn another thread that tries to access the lazy val while it's being initialized - // and wait a bit so that it actually runs - stash.completeWith(Future { - sutCell.get.value.lazyVal(2) - }) - Threading.sleep(100) - "abc" - } else { - "def" - } - } - sutCell.putIfAbsent(sut).discard - sut.lazyVal(1) shouldBe "abc" - stash.future.futureValue shouldBe "abc" - } - - "initialize multiple times when the initializer recurses" in { - // This test shows that the `synchronized` block is re-entrant. - // The same behavior can be observed with ordinary lazy vals in Scala. - val sut = new ClassUsingRecursiveLazyValWithContext() - sut.lazyVal(2) shouldBe "foo2" - sut.state.get shouldBe 2 - } - } -} - -object LazyValWithContextTest { - private type Context = Int - private type T = String - - private class ClassUsingLazyValWithContext(initializer: Context => T) { - val _lazyVal: LazyValWithContext[T, Context] = new LazyValWithContext[T, Context](initializer) - def lazyVal(context: Int): String = _lazyVal.get(context) - } - private object ClassUsingLazyValWithContext { - def apply(initializer: Context => T): ClassUsingLazyValWithContext = - new ClassUsingLazyValWithContext(initializer) - } - - private class ClassUsingRecursiveLazyValWithContext { - val state = new AtomicInteger(0) - - val _lazyVal: LazyValWithContext[T, Context] = new LazyValWithContext[T, Context]({ context => - val newState = state.incrementAndGet() - if (newState == 1) lazyVal(context) else s"foo$newState" - }) - def lazyVal(context: Int): String = _lazyVal.get(context) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LengthLimitedByteStringTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LengthLimitedByteStringTest.scala deleted file mode 100644 index d1027be652..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LengthLimitedByteStringTest.scala +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec - -class LengthLimitedByteStringTest extends AnyWordSpec with BaseTest { - "LengthLimitedByteString256" should { - "have a correctly working .create" in { - val ok = ByteString256.create(ByteString.copyFrom("123".getBytes())) - val ok2 = ByteString256.create(ByteString.EMPTY) - val ok3 = ByteString256.create(ByteString.copyFrom(("a" * 256).getBytes())) - val not_ok = - ByteString256.create(ByteString.copyFrom(("a" * 257).getBytes()), Some("Incantation")) - - ok.value.unwrap shouldBe ByteString.copyFrom("123".getBytes()) - ok2.value.unwrap shouldBe ByteString.EMPTY - ok3.value.unwrap shouldBe ByteString.copyFrom(("a" * 256).getBytes()) - not_ok.left.value shouldBe a[String] - not_ok.left.value should (include("maximum length of 256") and include("Incantation")) - } - - "have a correctly working .tryCreate" in { - val ok = ByteString256.tryCreate(ByteString.copyFrom("123".getBytes())) - val ok2 = ByteString256.tryCreate(ByteString.EMPTY) - val ok3 = ByteString256.tryCreate(ByteString.copyFrom(("a" * 256).getBytes())) - - ok.unwrap shouldBe ByteString.copyFrom("123".getBytes()) - ok2.unwrap shouldBe ByteString.EMPTY - ok3.unwrap shouldBe ByteString.copyFrom(("a" * 256).getBytes()) - a[IllegalArgumentException] should be thrownBy ByteString256 - .tryCreate(ByteString.copyFrom(("a" * 257).getBytes())) - } - - "correctly create a empty bounded ByteString" in { - val empty = ByteString256.empty - empty shouldBe ByteString.EMPTY - } - - "have equals and hashcode functions that work like we expect them to" in { - val s = ByteString.copyFrom("s".getBytes()) - val bar = ByteString.copyFrom("bar".getBytes()) - - val s256 = ByteString256.tryCreate(ByteString.copyFrom("s".getBytes())) - - s256.equals(s) shouldBe true - s256.equals(s256) shouldBe true - s256.equals(bar) shouldBe bar.equals(s256) - - s256.hashCode() == s.hashCode() shouldBe true - s256.hashCode() == bar.hashCode() shouldBe false - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfGenerator.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfGenerator.scala deleted file mode 100644 index 319c16a9bb..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfGenerator.scala +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.data.StateT -import cats.syntax.traverse.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.util.LfTransactionBuilder.* -import com.digitalasset.daml.lf.data.{ImmArray, Ref} -import com.digitalasset.daml.lf.value.test.ValueGenerators -import org.scalacheck.cats.implicits.* -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.wordspec.AnyWordSpec -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -class LfGenerator extends AnyWordSpec with BaseTest with ScalaCheckDrivenPropertyChecks { - - implicit lazy val txArbitrary: Arbitrary[LfTransaction] = Arbitrary(LfGenerator.transactionGen(4)) - - "LfGenerator" must { - "produce well-formed transactions" in { - forAll { (tx: LfTransaction) => - tx.isWellFormed shouldBe Set.empty - } - } - } -} - -/** Unlike the generators from the daml-lf repository, the transaction generator below creates - * well-formed (in the sense of GenTransaction#isWellFormed) transactions - */ -object LfGenerator { - val lfGen = ValueGenerators - - /** The LF generator generates huge (template) identifiers by default; truncate to something - * reasonable - */ - def truncateIdentifier(id: Ref.Identifier): Ref.Identifier = { - def truncateDotted(name: Ref.DottedName): Ref.DottedName = - Ref.DottedName.assertFromNames( - name.segments.relaxedSlice(0, 2).map(name => Ref.Name.assertFromString(name.take(10))) - ) - id.copy( - pkg = Ref.PackageId.assertFromString(id.packageId.take(10)), - qualifiedName = Ref.QualifiedName.assertFromString( - truncateDotted(id.qualifiedName.module).dottedName + ":" + truncateDotted( - id.qualifiedName.name - ).dottedName - ), - ) - } - - def createGen[N >: LfNodeCreate]: StateT[Gen, NodeIdState, LfAction] = - for { - node <- StateT.liftF(lfGen.malformedCreateNodeGen()) - fixedNode = node.copy(templateId = truncateIdentifier(node.coinst.template), keyOpt = None) - action <- createFromLf[Gen](fixedNode) - } yield action - - def fetchGen[N >: LfNodeFetch]: StateT[Gen, NodeIdState, LfAction] = - for { - node <- StateT.liftF(Gen.resize(10, lfGen.fetchNodeGen)) - templateId = truncateIdentifier(node.templateId) - fixedNode = node.copy(templateId = templateId, keyOpt = None) - action <- fetchFromLf[Gen](fixedNode) - } yield action - - def exerciseGen(maxBranching: Int): StateT[Gen, NodeIdState, LfAction] = - for { - children <- generateMultiple(maxBranching) - node <- StateT.liftF(Gen.resize(10, lfGen.danglingRefExerciseNodeGen)) - templateId = truncateIdentifier(node.templateId) - fixedNode = node.copy(children = ImmArray.empty, templateId = templateId, keyOpt = None) - action <- exerciseFromLf[Gen](fixedNode, children) - } yield action - - def generateMultiple(maxBranching: Int): StateT[Gen, NodeIdState, List[LfAction]] = - for { - generators <- StateT.liftF { - // Laziness prevents the circular dependency on exerciseGen from creating an infinite recursion - Gen.lzy( - Gen.resize( - maxBranching, - Gen.containerOf[List, StateT[Gen, NodeIdState, LfAction]]( - Gen.oneOf( - createGen[LfNode], - fetchGen[LfNode], - exerciseGen(maxBranching), - ) - ), - ) - ) - } - idAndMapList <- generators.sequence - } yield idAndMapList - - def transactionGen(maxBranching: Int): Gen[LfTransaction] = - toTransaction(generateMultiple(maxBranching)) - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala deleted file mode 100644 index 40e93786e4..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.Monad -import cats.data.StateT -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.{LfInterfaceId, LfPackageId} -import com.digitalasset.daml.lf.data.Ref.QualifiedName -import com.digitalasset.daml.lf.data.{ImmArray, Ref} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.value.Value - -import scala.collection.immutable.HashMap - -object LfTransactionBuilder { - - type NodeIdState = Int - - type LfAction = (LfNodeId, Map[LfNodeId, LfActionNode]) - - // Helper methods for Daml-LF types - val defaultLanguageVersion: LanguageVersion = LanguageVersion.default - val defaultTransactionVersion: LfLanguageVersion = LfLanguageVersion.AllV2.min - - val defaultPackageId: LfPackageId = LfPackageId.assertFromString("pkg") - val defaultTemplateId: Ref.Identifier = - Ref.Identifier(defaultPackageId, QualifiedName.assertFromString("module:template")) - val defaultPackageName: Ref.PackageName = Ref.PackageName.assertFromString("pkgName") - val defaultInterfaceId: LfInterfaceId = defaultTemplateId - - val defaultGlobalKey: LfGlobalKey = LfGlobalKey.assertBuild( - defaultTemplateId, - Value.ValueUnit, - defaultPackageName, - ) - - def allocateNodeId[M[_]](implicit monadInstance: Monad[M]): StateT[M, NodeIdState, LfNodeId] = - for { - nodeId <- StateT.get[M, NodeIdState] - _ <- StateT.set[M, NodeIdState](nodeId + 1) - } yield LfNodeId(nodeId) - - def exerciseFromLf[M[_]](lfExercise: LfNodeExercises, children: List[LfAction])(implicit - monadInstance: Monad[M] - ): StateT[M, NodeIdState, LfAction] = - for { - nodeId <- allocateNodeId[M] - childrenIds = children.map(_._1) - childrenMap = children.map(_._2).fold(Map.empty[LfNodeId, LfActionNode])(_ ++ _) - nodeWithChildren = lfExercise.copy(children = childrenIds.to(ImmArray)) - } yield (nodeId, childrenMap ++ Map(nodeId -> nodeWithChildren)) - - def createFromLf[M[_]]( - lfCreate: LfNodeCreate - )(implicit monadInstance: Monad[M]): StateT[M, NodeIdState, LfAction] = - for { - nodeId <- allocateNodeId[M] - } yield (nodeId, Map(nodeId -> lfCreate)) - - def fetchFromLf[M[_]]( - lfFetch: LfNodeFetch - )(implicit monadInstance: Monad[M]): StateT[M, NodeIdState, LfAction] = - for { - nodeId <- allocateNodeId[M] - } yield (nodeId, Map(nodeId -> lfFetch)) - - def initialState: NodeIdState = 0 - - def usedPackages(action: LfAction): Set[LfPackageId] = action match { - case (_, nodeMap) => - val nodeSet = nodeMap.values - - nodeSet.map { - case c: LfNodeCreate => c.coinst.template.packageId - case e: LfNodeExercises => e.templateId.packageId - case f: LfNodeFetch => f.templateId.packageId - case l: LfNodeLookupByKey => l.templateId.packageId - }.toSet - } - - /** Turn a state containing a list of actions into a transaction. - * - * @param state - * The (monadic) list of actions - */ - def toTransaction[M[_]]( - state: StateT[M, NodeIdState, List[LfAction]] - )(implicit monadInstance: Monad[M]): M[LfTransaction] = - state - .map( - _.foldRight((List.empty[LfNodeId], Map.empty[LfNodeId, LfNode], Set.empty[LfPackageId])) { - case (act @ (actionRoot, actionMap), (roots, nodeMap, pkgs)) => - (actionRoot +: roots, nodeMap ++ actionMap, pkgs ++ usedPackages(act)) - } - ) - .map { case (rootNodes, nodeMap, _actuallyUsedPkgs) => - LfTransaction(nodes = HashMap(nodeMap.toSeq*), roots = rootNodes.to(ImmArray)) - } - .runA(initialState) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LoggerUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LoggerUtilTest.scala deleted file mode 100644 index 529c75f0ce..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/LoggerUtilTest.scala +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTestWordSpec - -class LoggerUtilTest extends BaseTestWordSpec { - - lazy val testString = - """I hate bananas - |I actually do like em - |But I prefer bockwurst - |""".stripMargin - - "string truncation" should { - "not truncate when staying within limits" in { - LoggerUtil.truncateString(4, 100)(testString) shouldBe testString - } - "truncate when seeing too many lines" in { - LoggerUtil.truncateString(maxLines = 1, maxSize = 100)( - testString - ) shouldBe ("I hate bananas\n ...") - } - "truncate when seeing too many characters" in { - LoggerUtil.truncateString(maxLines = 40, maxSize = 25)( - testString - ) shouldBe "I hate bananas\nI actually ..." - } - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MessageRecorderTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MessageRecorderTest.scala deleted file mode 100644 index faca8eddba..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MessageRecorderTest.scala +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.util.MessageRecorderTest.{Data, Data2} -import com.digitalasset.canton.{BaseTestWordSpec, HasTempDirectory} - -import java.nio.file.Path - -class MessageRecorderTest extends BaseTestWordSpec with HasTempDirectory { - - val testData: Seq[Data] = (0 until 3) map Data.apply - - val recordFile: Path = tempDirectory.resolve("recorded-test-data") - - val recorder = new MessageRecorder(DefaultProcessingTimeouts.testing, loggerFactory) - - "A message recorder" can { - "record data" in { - recorder.startRecording(recordFile) - testData.foreach(m => recorder.record(m)) - recorder.stopRecording() - } - - "read recorded data" in { - val readData = MessageRecorder.load[Data](recordFile, logger) - readData shouldBe testData - } - - "catch type errors" in { - a[ClassCastException] shouldBe thrownBy(MessageRecorder.load[Data2](recordFile, logger)) - } - } -} - -object MessageRecorderTest { - final case class Data(i: Int) - final case class Data2(s: String) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MonadUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MonadUtilTest.scala deleted file mode 100644 index 7601328eb6..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/MonadUtilTest.scala +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.Id -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.mutable - -class MonadUtilTest extends AnyWordSpec with BaseTest { - "sequential traverse" should { - - "return results in the order of the original seq" in { - val xs = List(1, 2, 3) - val result = MonadUtil.sequentialTraverse(xs)(x => x: Id[Int]) - result shouldEqual xs - } - - "perform processing in the order of the original seq" in { - val xs = List(1, 2, 3) - val seen = mutable.ArrayDeque[Int]() - MonadUtil.sequentialTraverse(xs) { x => - seen += x // Appends the seen element to the list - x: Id[Int] - } - seen.toList shouldEqual xs - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/OrderedBucketMergeHubTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/OrderedBucketMergeHubTest.scala deleted file mode 100644 index 9f31e1f0c9..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/OrderedBucketMergeHubTest.scala +++ /dev/null @@ -1,990 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.daml.nonempty.{NonEmpty, NonEmptyUtil} -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances, PrettyPrinting} -import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext} -import com.digitalasset.canton.util.OrderedBucketMergeHub.{ - ActiveSourceTerminated, - DeadlockDetected, - DeadlockTrigger, - NewConfiguration, - Output, - OutputElement, -} -import com.digitalasset.canton.util.PekkoUtil.noOpKillSwitch -import org.apache.pekko.Done -import org.apache.pekko.stream.QueueOfferResult.Enqueued -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import org.apache.pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped -import org.apache.pekko.stream.testkit.scaladsl.{TestSink, TestSource} -import org.apache.pekko.stream.testkit.{StreamSpec, TestPublisher} -import org.apache.pekko.stream.{BoundedSourceQueue, KillSwitch, KillSwitches} -import org.apache.pekko.testkit.EventFilter -import org.apache.pekko.testkit.TestEvent.{Mute, UnMute} - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import scala.collection.concurrent.TrieMap -import scala.concurrent.duration.DurationInt -import scala.concurrent.{ExecutionContext, Future, Promise} - -class OrderedBucketMergeHubTest extends StreamSpec with BaseTest { - // Override the implicit from PekkoSpec so that we don't get ambiguous implicits - override val patience: PatienceConfig = defaultPatience - - private implicit val executionContext: ExecutionContext = system.dispatcher - - private implicit val prettyString: Pretty[String] = PrettyInstances.prettyString - - private type Name = String - private type Config = Int - private type Offset = Int - private type M = String - private case class Bucket(offset: Int, discriminator: Int) extends PrettyPrinting { - override protected def pretty: Pretty[Bucket] = prettyOfClass( - param("offset", _.offset), - param("discriminator", _.discriminator), - ) - } - private case class Elem(bucket: Bucket, description: String) extends HasTraceContext { - override val traceContext: TraceContext = - TraceContext.withNewTraceContext("test")(Predef.identity) - } - - private def mkHub( - ops: OrderedBucketMergeHubOps[Name, Elem, Config, Offset, M] - ): OrderedBucketMergeHub[Name, Elem, Config, Offset, M] = - new OrderedBucketMergeHub[Name, Elem, Config, Offset, M]( - ops, - loggerFactory, - enableInvariantCheck = true, - ) - - private def mkOps(initial: Offset)( - mkSource: (Name, Config, Offset, Option[Elem]) => Source[Elem, (KillSwitch, Future[Done], M)] - ): OrderedBucketMergeHubOps[Name, Elem, Config, Offset, M] = - OrderedBucketMergeHubOps[Name, Elem, Config, Offset, Bucket, M](initial)(_.bucket, _.offset)( - mkSource - ) - - private def matFor(name: Name): M = s"$name-mat" - - private def addMaterialized( - config: OrderedBucketMergeConfig[Name, Config], - mats: Name* - ): OrderedBucketMergeConfig[Name, (Config, Option[M])] = - config.map((name, c) => c -> Option.when(mats.contains(name))(matFor(name))) - - "with a single config and threshold 1" should { - "be the identity" in assertAllStagesStopped { - def mkElem(i: Int): Elem = Elem(Bucket(2 * i + 200, 0), s"$i") - - val sourceCompletesPromise = Promise[Done]() - - val ops = mkOps(100) { (name, _, _, _) => - Source(1 to 10).map(mkElem).watchTermination() { (_, doneF) => - sourceCompletesPromise.completeWith(doneF) - (noOpKillSwitch, doneF, matFor(name)) - } - } - val ((configQueue, doneF), emittedF) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(Sink.seq)(Keep.both).run() - - val config = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "primary" -> 0)) - configQueue.offer(config) shouldBe Enqueued - sourceCompletesPromise.future.futureValue - configQueue.complete() - - emittedF.futureValue shouldBe - NewConfiguration(addMaterialized(config, "primary"), 100) +: - (1 to 10).map(i => OutputElement(NonEmpty(Map, "primary" -> mkElem(i)))) :+ - ActiveSourceTerminated("primary", None) :+ - DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - doneF.futureValue - } - } - - "completing the config source" should { - "terminate the stream and drain the source" in assertAllStagesStopped { - def mkElem(i: Int): Elem = Elem(Bucket(2 * i + 200, 0), s"$i") - - val pulledElems = new AtomicReference[Seq[Int]](Seq.empty[Int]) - val configQueueCell = - new SingleUseCell[BoundedSourceQueue[OrderedBucketMergeConfig[Name, Config]]]() - def observeElem(i: Int): Int = { - pulledElems.updateAndGet(_ :+ i) - // Complete the config stream after the second pull! - if (i == 2) configQueueCell.get.value.complete() - i - } - val killSwitchPulledAt = new AtomicInteger() - - val ops = mkOps(100) { (name, _, _, _) => - val completableSource = Source.queue[Int](11) - completableSource.map(observeElem).map(mkElem).watchTermination() { - (boundedSourceQueue, doneF) => - logger.debug("Filling the source") - (1 to 10).foreach(boundedSourceQueue.offer(_).discard) - // Complete the source only when the kill switch is pulled. - // This ensures that the hub should not output an ActiveSourceTermination - val killSwitch = new KillSwitch { - override def shutdown(): Unit = { - val pulledAt = pulledElems.get().lastOption.value - logger.debug(s"Pulled the kill switch at $pulledAt") - killSwitchPulledAt.set(pulledAt) - boundedSourceQueue.complete() - } - override def abort(ex: Throwable): Unit = ??? - } - doneF.onComplete { res => - logger.debug(s"Source terminated with result: $res") - } - (killSwitch, doneF, matFor(name)) - } - } - val ((configQueue, doneF), emittedF) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(Sink.seq)(Keep.both).run() - configQueueCell.putIfAbsent(configQueue) - val config = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "primary" -> 0)) - configQueue.offer(config) shouldBe Enqueued - - val emitted = emittedF.futureValue - val pulledAt = killSwitchPulledAt.get() - pulledAt shouldBe 2 - emitted shouldBe - NewConfiguration(addMaterialized(config, "primary"), 100) +: - (1 until pulledAt).map(i => OutputElement(NonEmpty(Map, "primary" -> mkElem(i)))) - - doneF.futureValue - pulledElems.get shouldBe (1 to 10) - } - } - - "collect elements until the threshold is reached" in assertAllStagesStopped { - def mkElem(i: Int, d: Int, s: String): Elem = Elem(Bucket(i, d), s) - - val primary = "primary" - val secondary = "secondary" - val tertiary = "tertiary" - val primarySourceRef = new AtomicReference[TestPublisher.Probe[Elem]]() - val secondarySourceRef = new AtomicReference[TestPublisher.Probe[Elem]]() - val tertiarySourceRef = new AtomicReference[TestPublisher.Probe[Elem]]() - val sources = Map( - primary -> primarySourceRef, - secondary -> secondarySourceRef, - tertiary -> tertiarySourceRef, - ) - - val ops = mkOps(100) { (name, _, _, _) => - TestSource.probe[Elem].viaMat(KillSwitches.single)(Keep.both).watchTermination() { - case ((probe, killSwitch), doneF) => - sources(name).set(probe) - (killSwitch, doneF, matFor(name)) - } - } - val sinkProbe = TestSink.probe[Output[Name, (Config, Option[M]), Elem, Offset]] - - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(sinkProbe)(Keep.both).run() - val config = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty(Map, primary -> 0, secondary -> 0, tertiary -> 1), - ) - configQueue.offer(config) shouldBe Enqueued - - // Wait until all three sources have been created - clue("Process NewConfiguration signal") { - sink.request(1) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, primary, secondary, tertiary), - 100, - ) - } - - val primarySource = primarySourceRef.get() - val secondarySource = secondarySourceRef.get() - val tertiarySource = tertiarySourceRef.get() - - // Due to Pekko stream's internal batching, there may be more demand than just the requested 1. - primarySource.expectRequest() should be >= (1L) - secondarySource.expectRequest() should be >= (1L) - tertiarySource.expectRequest() should be >= (1L) - - sink.request(10) - clue("Send diverging elements") { - primarySource.sendNext(mkElem(102, 1, primary)) - secondarySource.sendNext(mkElem(102, 2, secondary)) // Same offset, but different bucket - // Make sure that we don't emit anything yet - sink.expectNoMessage(20.milliseconds) - } - - clue("Reach the threshold on one of the diverging elements") { - tertiarySource.sendNext(mkElem(102, 1, tertiary)) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, primary -> mkElem(102, 1, primary), tertiary -> mkElem(102, 1, tertiary)) - ) - } - - clue("Skip an offset") { - // Now make sure that we can skip offsets if a later quorum is reached - tertiarySource.sendNext(mkElem(103, 1, tertiary)) - // Secondary source's element should have been dropped out by now - // so that the next one is processed - primarySource.sendNext(mkElem(104, 1, primary)) - sink.expectNoMessage(20.milliseconds) - - secondarySource.sendNext(mkElem(104, 1, secondary)) - - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, primary -> mkElem(104, 1, primary), secondary -> mkElem(104, 1, secondary)) - ) - } - - clue("Ignore late offsets") { - tertiarySource.sendNext(mkElem(104, 1, "late arrival")) - tertiarySource.sendNext(mkElem(105, 2, "three")) - primarySource.sendNext(mkElem(105, 2, "one")) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, primary -> mkElem(105, 2, "one"), tertiary -> mkElem(105, 2, "three")) - ) - } - - clue("Output termination only after element") { - tertiarySource.sendNext(mkElem(106, 1, tertiary)) - tertiarySource.sendComplete() - - sink.expectNoMessage(20.milliseconds) - - primarySource.sendNext(mkElem(106, 1, primary)) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, primary -> mkElem(106, 1, primary), tertiary -> mkElem(106, 1, tertiary)) - ) - sink.expectNext() shouldBe ActiveSourceTerminated(tertiary, None) - } - - clue("Continue with two sources") { - secondarySource.sendNext(mkElem(105, 2, "two")) - secondarySource.sendNext(mkElem(107, 10, secondary)) - primarySource.sendNext(mkElem(107, 10, primary)) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, primary -> mkElem(107, 10, primary), secondary -> mkElem(107, 10, secondary)) - ) - } - - configQueue.complete() - sink.expectComplete() - doneF.futureValue - } - - "buffer only one element per subsource" in assertAllStagesStopped { - def mkElem(config: Config, i: Int): Elem = Elem(Bucket(i, config), "") - - val observedElems: scala.collection.concurrent.Map[String, Int] = TrieMap.empty[String, Int] - - def observeElem(name: String, i: Int): Int = { - observedElems.put(name, i).discard[Option[Int]] - i - } - - val ops = mkOps(0) { (name, config, _, _) => - Source(1 to 100) - .map(observeElem(name, _)) - .viaMat(KillSwitches.single)(Keep.right) - .map(mkElem(config, _)) - .watchTermination()((killSwitch, doneF) => (killSwitch, doneF, matFor(name))) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - val sources = 100 - val threshold = PositiveInt.tryCreate(4) - - val config = OrderedBucketMergeConfig( - threshold, - NonEmptyUtil.fromUnsafe((1 to sources).map(i => s"subsource-$i" -> i).toMap), - ) - configQueue.offer(config) shouldBe Enqueued - - sink.request(10) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, (1 to sources).map(i => s"subsource-$i")*), - 0, - ) - - // All sources should have been pulled now - eventually() { - observedElems should have size sources.toLong - } - observedElems.foreach { case (name, i) => - clue(s"For subsource $name") { - i shouldBe 1 - } - } - // Nothing is emitted because each bucket contains at most one element. This is a deadlock. - inside(sink.expectNext()) { case DeadlockDetected(elems, DeadlockTrigger.ElementBucketing) => - elems.size shouldBe sources - threshold.value + 2 - } - - clue("Stop the stream") { - configQueue.complete() - sink.expectComplete() - } - doneF.futureValue - // No further elements are pulled during shutdown (the kill switch sits after the observation) - observedElems.foreach { case (name, i) => - clue(s"For subsource $name") { - i shouldBe 1 - } - } - } - - "complete only after draining the source" in assertAllStagesStopped { - val promise = Promise[Elem]() - - val ops = mkOps(0) { (name, _, _, _) => - Source - .single(()) - .viaMat(KillSwitches.single)(Keep.right) - .mapAsync(parallelism = 1)(_ => promise.future) - .watchTermination()((killSwitch, doneF) => (killSwitch, doneF, matFor(name))) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, "mapAsync-source" -> 1), - ) - configQueue.offer(config) shouldBe Enqueued - - sink.request(10) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, "mapAsync-source"), - 0, - ) - - configQueue.complete() - sink.expectNoMessage(20.milliseconds) - doneF.isCompleted shouldBe false - - promise.success(Elem(Bucket(1, 1), "")) - sink.expectComplete() - doneF.futureValue - } - - "complete only after the completion future of the source" in assertAllStagesStopped { - val promise = Promise[Done]() - - val ops = mkOps(0) { (name, _, _, _) => - Source.empty.mapMaterializedValue(_ => (noOpKillSwitch, promise.future, matFor(name))) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, "source-with-custom-completion-future" -> 1), - ) - configQueue.offer(config) shouldBe Enqueued - - sink.request(10) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, "source-with-custom-completion-future"), - 0, - ) - sink.expectNext() shouldBe ActiveSourceTerminated("source-with-custom-completion-future", None) - sink.expectNext() shouldBe DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - - configQueue.complete() - sink.expectComplete() - always(20.milliseconds) { - doneF.isCompleted shouldBe false - } - - promise.success(Done) - doneF.futureValue - } - - "reconfigurations take effect immediately" in assertAllStagesStopped { - def mkElem(i: Int, d: Int, s: String): Elem = Elem(Bucket(i, d), s) - - val probes = TrieMap.empty[String, TestPublisher.Probe[Elem]] - - val ops = mkOps(0) { (name, config, _, _) => - TestSource.probe[Elem].viaMat(KillSwitches.single)(Keep.both).watchTermination() { - case ((probe, killSwitch), doneF) => - probes.put(s"$name-$config", probe) - (killSwitch, doneF, matFor(name)) - } - } - val sinkProbe = TestSink.probe[Output[Name, (Config, Option[M]), Elem, Offset]] - - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(sinkProbe)(Keep.both).run() - val config = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 0, - "probe3" -> 0, - "probe4" -> 0, - "probe5" -> 0, - ), - ) - configQueue.offer(config) shouldBe Enqueued - - // Wait until all five sources have been created - clue("Process NewConfiguration signal") { - sink.request(1) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, "probe1", "probe2", "probe3", "probe4", "probe5"), - 0, - ) - } - sink.request(20) - - probes("probe1-0").sendNext(mkElem(1, 0, "p1")) - probes("probe1-0").sendNext(mkElem(5, 0, "p1-5")) - probes("probe2-0").sendNext(mkElem(2, 0, "p2")) - probes("probe2-0").sendNext(mkElem(4, 0, "p2-4")) - probes("probe3-0").sendNext(mkElem(3, 0, "p3")) - probes("probe4-0").sendNext(mkElem(3, 1, "p4")) - probes("probe5-0").sendNext(mkElem(5, 1, "p5")) - inside(sink.expectNext()) { case DeadlockDetected(elems, DeadlockTrigger.ElementBucketing) => - elems.size shouldBe 5 - } - probes("probe5-0").sendComplete() - sink.expectNoMessage(20.milliseconds) - - clue("lower the threshold, change probe2 config, remove probe5, add probe6") { - val config2 = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 1, - "probe3" -> 0, - "probe4" -> 0, - "probe6" -> 0, - ), - ) - configQueue.offer(config2) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config2, "probe2", "probe6"), 0) - - probes("probe2-0").expectCancellation() - - // Probe1's element is kept - sink.expectNext() shouldBe - OutputElement(NonEmpty(Map, "probe1" -> mkElem(1, 0, "p1"))) - // Probe2's elements are dropped - // Non-deterministically choose between probe3 and probe4 - sink.expectNext() should ( - equal(OutputElement(NonEmpty(Map, "probe3" -> mkElem(3, 0, "p3")))) or - equal(OutputElement(NonEmpty(Map, "probe4" -> mkElem(3, 1, "p4")))) - ) - // Probe5 is stopped, so there is no non-determinism here for offset 5 - sink.expectNext() shouldBe - OutputElement(NonEmpty(Map, "probe1" -> mkElem(5, 0, "p1-5"))) - // We can use the new probes to send elements - probes("probe2-1").sendNext(mkElem(6, 0, "p2-6")) - sink.expectNext() shouldBe OutputElement(NonEmpty(Map, "probe2" -> mkElem(6, 0, "p2-6"))) - probes("probe6-0").sendNext(mkElem(7, 0, "p6-7")) - sink.expectNext() shouldBe OutputElement(NonEmpty(Map, "probe6" -> mkElem(7, 0, "p6-7"))) - - // Test that all the probes whose elements have not been emitted are evicted - // Do this one by one to avoid signalling races - probes("probe4-0").sendNext(mkElem(8, 0, "p4-8")) - sink.expectNext() shouldBe OutputElement(NonEmpty(Map, "probe4" -> mkElem(8, 0, "p4-8"))) - probes("probe3-0").sendNext(mkElem(9, 0, "p3-9")) - sink.expectNext() shouldBe OutputElement(NonEmpty(Map, "probe3" -> mkElem(9, 0, "p3-9"))) - } - - val config3 = clue("raise the threshold again to 3 and fill the buckets") { - val config3 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(3), - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 1, - "probe3" -> 0, - "probe4" -> 0, - "probe5" -> 0, - "probe6" -> 0, - "probe7" -> 0, - "probe8" -> 0, - ), - ) - configQueue.offer(config3) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config3, "probe5", "probe7", "probe8"), - 9, - ) - - probes("probe1-0").sendNext(mkElem(10, 0, "p1-10")) - probes("probe2-1").sendNext(mkElem(10, 1, "p2-10")) - probes("probe3-0").sendNext(mkElem(10, 0, "p3-10")) - probes("probe4-0").sendNext(mkElem(11, 0, "p4-11")) - probes("probe5-0").sendNext(mkElem(12, 0, "p5-12")) - probes("probe6-0").sendNext(mkElem(12, 0, "p6-12")).sendComplete() - probes("probe7-0").sendNext(mkElem(12, 2, "p7-12")) - probes("probe8-0").sendNext(mkElem(11, 1, "p8-11")).sendComplete() - inside(sink.expectNext()) { case DeadlockDetected(elems, DeadlockTrigger.ElementBucketing) => - elems.size shouldBe 8 - } - sink.expectNoMessage(20.milliseconds) - - config3 - } - - clue("lower the threshold to 2 and make sure that everything is cleaned up") { - val config4 = config3.copy(threshold = PositiveInt.tryCreate(2)) - configQueue.offer(config4) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config4), 9) - - // Probe2's bucket has not reached the threshold, so there's no non-determinism here - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, "probe1" -> mkElem(10, 0, "p1-10"), "probe3" -> mkElem(10, 0, "p3-10")) - ) - // We do not really care whether the termination of probe8 comes before element 12 - val next1 = sink.expectNext() - val next2 = sink.expectNext() - Set(next1, next2) shouldBe Set( - OutputElement( - NonEmpty(Map, "probe5" -> mkElem(12, 0, "p5-12"), "probe6" -> mkElem(12, 0, "p6-12")) - ), - ActiveSourceTerminated("probe8", None), - ) - // However, the termination signal for probe6 must come after the bucket that contains probe6's element - sink.expectNext() shouldBe ActiveSourceTerminated("probe6", None) - - // Probes 2, 4, and 7 should have their elements evicted. - // Check that this is the case - probes("probe7-0").sendComplete() - sink.expectNext() shouldBe ActiveSourceTerminated("probe7", None) - probes("probe2-1").sendNext(mkElem(13, 0, "p2-13")) - probes("probe4-0").sendNext(mkElem(13, 0, "p4-13")) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, "probe2" -> mkElem(13, 0, "p2-13"), "probe4" -> mkElem(13, 0, "p4-13")) - ) - } - - configQueue.complete() - sink.expectComplete() - doneF.futureValue - } - - "reconfiguration synchronizes with the completion future" in assertAllStagesStopped { - val promise = Promise[Done]() - - val ops = mkOps(0) { (name, _, offset, _) => - if (name == "slow-doneF-source") { - Source.single(Elem(Bucket(offset + 1, 0), "")).viaMat(KillSwitches.single) { - (_, killSwitch) => (killSwitch, promise.future, matFor(name)) - } - } else - Source.empty.mapMaterializedValue(_ => - (noOpKillSwitch, Future.successful(Done), matFor(name)) - ) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty(Map, "slow-doneF-source" -> 1, "another-source" -> 1), - ) - configQueue.offer(config) shouldBe Enqueued - - sink.request(10) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config, "slow-doneF-source", "another-source"), - 0, - ) - sink.expectNext() shouldBe ActiveSourceTerminated("another-source", None) - sink.expectNext() shouldBe DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - - val config2 = OrderedBucketMergeConfig( - PositiveInt.one, - NonEmpty(Map, "yet-another-source" -> 1), - ) - configQueue.offer(config2) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config2, "yet-another-source"), 0) - sink.expectNext() shouldBe ActiveSourceTerminated("yet-another-source", None) - sink.expectNext() shouldBe DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - - configQueue.complete() - sink.expectComplete() - always(20.milliseconds) { - doneF.isCompleted shouldBe false - } - - promise.success(Done) - doneF.futureValue - } - - "propagate failures" in assertAllStagesStopped { - val promise = Promise[Done]() - val sourceEx = new Exception("Source failed") - val ops = mkOps(0) { (name, _, _, _) => - Source - .single(()) - .map(_ => throw sourceEx) - .mapMaterializedValue(_ => (noOpKillSwitch, promise.future, matFor(name))) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - val config = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1)) - configQueue.offer(config) shouldBe Enqueued - - sink.request(5) - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config, "one"), 0) - sink.expectNext() shouldBe ActiveSourceTerminated("one", Some(sourceEx)) - sink.expectNext() shouldBe DeadlockDetected(Seq.empty, DeadlockTrigger.ActiveSourceTermination) - - val configEx = new Exception("Config stream failed") - configQueue.fail(configEx) - sink.expectError() shouldBe configEx - - always(20.milliseconds) { - doneF.isCompleted shouldBe false - } - promise.success(Done) - doneF.futureValue - } - - "propagate cancellations" in assertAllStagesStopped { - def mkElem(i: Int): Elem = Elem(Bucket(i, 0), s"$i") - - val pulledElems = new AtomicReference[Seq[Int]](Seq.empty[Int]) - def observeElem(i: Int): Int = { - logger.debug(s"Observing element $i") - pulledElems.updateAndGet(_ :+ i) - i - } - - val ops = mkOps(0) { (name, _, _, _) => - Source(1 to 10).map(observeElem).map(mkElem).watchTermination() { (_, doneF) => - (noOpKillSwitch, doneF, matFor(name)) - } - } - val configSourceProbe = TestSource.probe[OrderedBucketMergeConfig[Name, Config]] - val ((configSource, doneF), sink) = - configSourceProbe.viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1)) - configSource.sendNext(config) - - sink.request(2) - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config, "one"), 0) - sink.expectNext() shouldBe OutputElement(NonEmpty(Map, "one" -> mkElem(1))) - sink.cancel() - - configSource.expectCancellation() - doneF.futureValue - // We can't drain the sources upon cancellation :-( - pulledElems.get() shouldBe (1 to 2) - } - - "remember the prior element" in assertAllStagesStopped { - def mkElem(name: Name, i: Int): Elem = Elem(Bucket(i, 0), s"$name-$i") - val priors = new AtomicReference[Seq[(Name, Option[Elem])]](Seq.empty) - - val ops = mkOps(10) { (name, _, offset, prior) => - priors.updateAndGet(_ :+ (name -> prior)).discard - Source(offset + 1 to offset + 2) - .map(mkElem(name, _)) - .concat(Source.never) - .viaMat(KillSwitches.single)(Keep.right) - .watchTermination()((killSwitch, doneF) => (killSwitch, doneF, matFor(name))) - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config1 = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1)) - configQueue.offer(config1) shouldBe Enqueued - - sink.request(100) - sink.expectNext(NewConfiguration(addMaterialized(config1, "one"), 10)) - sink.expectNext(OutputElement(NonEmpty(Map, "one" -> mkElem("one", 11)))) - sink.expectNext(OutputElement(NonEmpty(Map, "one" -> mkElem("one", 12)))) - - val config2 = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1, "two" -> 1)) - configQueue.offer(config2) shouldBe Enqueued - sink.expectNext(NewConfiguration(addMaterialized(config2, "two"), 12)) - sink.expectNext(OutputElement(NonEmpty(Map, "two" -> mkElem("two", 13)))) - sink.expectNext(OutputElement(NonEmpty(Map, "two" -> mkElem("two", 14)))) - - val config3 = - OrderedBucketMergeConfig(PositiveInt.tryCreate(2), NonEmpty(Map, "one" -> 2, "two" -> 2)) - configQueue.offer(config3) shouldBe Enqueued - sink.expectNext(NewConfiguration(addMaterialized(config3, "one", "two"), 14)) - sink.expectNext( - OutputElement(NonEmpty(Map, "one" -> mkElem("one", 15), "two" -> mkElem("two", 15))) - ) - sink.expectNext( - OutputElement(NonEmpty(Map, "one" -> mkElem("one", 16), "two" -> mkElem("two", 16))) - ) - - configQueue.complete() - doneF.futureValue - - priors.get() should (equal( - Seq( - "one" -> None, - "two" -> Some(mkElem("one", 12)), - "one" -> Some(mkElem("two", 14)), - "two" -> Some(mkElem("two", 14)), - ) - // The order of the last two elements is non-deterministic - ) or equal( - Seq( - "one" -> None, - "two" -> Some(mkElem("one", 12)), - "two" -> Some(mkElem("two", 14)), - "one" -> Some(mkElem("two", 14)), - ) - )) - } - - "initially pass the prior element from Ops" in assertAllStagesStopped { - def mkElem(name: Name, i: Int): Elem = Elem(Bucket(i, 0), s"$name-$i") - - val priors = new AtomicReference[Seq[(Name, Option[Elem])]](Seq.empty) - - val ops = new OrderedBucketMergeHubOps[Name, Elem, Config, Offset, M] { - override type PriorElement = Elem - override type Bucket = OrderedBucketMergeHubTest.this.Bucket - override def prettyBucket: Pretty[Bucket] = implicitly[Pretty[Bucket]] - override def bucketOf(elem: Elem): Bucket = elem.bucket - override def orderingOffset: Ordering[Offset] = implicitly[Ordering[Offset]] - override def offsetOfBucket(bucket: Bucket): Offset = bucket.offset - override def initialOffset: Offset = 10 - override def priorElement: Option[Elem] = Some(mkElem("prior", 10)) - override def toPriorElement(output: OutputElement[Name, Elem]): Elem = output.elem.head1._2 - override def traceContextOf(elem: Elem): TraceContext = TraceContext.empty - override def makeSource( - name: Name, - config: Config, - startFrom: Offset, - priorElement: Option[Elem], - ): Source[Elem, (KillSwitch, Future[Done], M)] = { - priors.getAndUpdate(_ :+ (name -> priorElement)).discard - Source - .empty[Elem] - .viaMat(KillSwitches.single)(Keep.right) - .watchTermination()((killSwitch, doneF) => (killSwitch, doneF, matFor(name))) - } - } - - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - val config1 = - OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1)) - configQueue.offer(config1) shouldBe Enqueued - - sink.request(10) - sink.expectNext(NewConfiguration(addMaterialized(config1, "one"), 10)) - sink.expectNext(ActiveSourceTerminated("one", None)) - configQueue.complete() - doneF.futureValue - - priors.get shouldBe Seq("one" -> Some(mkElem("prior", 10))) - } - - "logs an error when Ops throws" in assertAllStagesStopped { - class SourceCreationException(msg: String) extends Exception(msg) - - val ops = mkOps(10) { (name, _, offset, _) => - throw new SourceCreationException(s"Source creation failed for $name at $offset") - } - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(TestSink.probe)(Keep.both).run() - - // The test will log the exception because of the TestEventListener having been registered, - // but this logger is not used in a normal deployment. - val filter = EventFilter[SourceCreationException]() - system.eventStream.publish(Mute(filter)) - - val config1 = OrderedBucketMergeConfig(PositiveInt.one, NonEmpty(Map, "one" -> 1)) - loggerFactory.assertLogs( - { - configQueue.offer(config1) shouldBe Enqueued - sink.request(1) - sink.expectError() - }, - logEntry => { - logEntry.errorMessage should include("OrderedBucketMergeHub.in onPush failed") - logEntry.throwable.value.getMessage should include("Source creation failed for one at 10") - }, - ) - doneF.futureValue - system.eventStream.publish(UnMute(filter)) - } - - "detect deadlocks correctly" in assertAllStagesStopped { - def mkElem(i: Int, d: Int, s: String): Elem = Elem(Bucket(i, d), s) - - val probes = TrieMap.empty[String, TestPublisher.Probe[Elem]] - - val ops = mkOps(0) { (name, config, _, _) => - TestSource.probe[Elem].viaMat(KillSwitches.single)(Keep.both).watchTermination() { - case ((probe, killSwitch), doneF) => - probes.put(s"$name-$config", probe) - (killSwitch, doneF, matFor(name)) - } - } - val sinkProbe = TestSink.probe[Output[Name, (Config, Option[M]), Elem, Offset]] - - val ((configQueue, doneF), sink) = - Source.queue(1).viaMat(mkHub(ops))(Keep.both).toMat(sinkProbe)(Keep.both).run() - val config1 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(4), - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 0, - "probe3" -> 0, - "probe4" -> 0, - "probe5" -> 0, - ), - ) - configQueue.offer(config1) shouldBe Enqueued - - // Wait until all five sources have been created - clue("Process NewConfiguration signal") { - sink.request(1) - sink.expectNext() shouldBe NewConfiguration( - addMaterialized(config1, "probe1", "probe2", "probe3", "probe4", "probe5"), - 0, - ) - } - sink.request(20) - - clue("Active source completions can trigger a deadlock notification") { - probes("probe1-0").sendNext(mkElem(1, 0, "p1-0")) - probes("probe3-0").sendNext(mkElem(1, 1, "p3-0")) - probes("probe4-0").sendComplete() - sink.expectNext() shouldBe ActiveSourceTerminated("probe4", None) - inside(sink.expectNext()) { - case DeadlockDetected(elems, DeadlockTrigger.ActiveSourceTermination) => - elems.toSet shouldBe Set( - "probe1" -> mkElem(1, 0, "p1-0"), - "probe3" -> mkElem(1, 1, "p3-0"), - ) - } - } - - clue("Further elements do not trigger yet another deadlock notification") { - probes("probe2-0").sendNext(mkElem(1, 0, "p2-0")) - sink.expectNoMessage(20.milliseconds) - - probes("probe5-0").sendNext(mkElem(1, 1, "p5-0")) - sink.expectNoMessage(20.milliseconds) - } - - clue("Reconfiguration can trigger a deadlock notification") { - val config2 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(4), - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 0, - "probe3" -> 0, - "probe5" -> 1, - ), - ) - configQueue.offer(config2) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config2, "probe5"), 0) - // Here, we're just missing one unbucketed source to reach the threshold - inside(sink.expectNext()) { case DeadlockDetected(elems, DeadlockTrigger.Reconfiguration) => - elems.toSet shouldBe Set( - "probe1" -> mkElem(1, 0, "p1-0"), - "probe2" -> mkElem(1, 0, "p2-0"), - "probe3" -> mkElem(1, 1, "p3-0"), - ) - } - probes("probe5-1").sendNext(mkElem(1, 2, "p5-1")) - - val config3 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(4), - NonEmpty( - Map, - "probe1" -> 0, - "probe2" -> 1, - "probe3" -> 0, - "probe5" -> 1, - ), - ) - configQueue.offer(config3) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config3, "probe2"), 0) - // Now we're missing two unbucketed sources to reach the threshold - inside(sink.expectNext()) { case DeadlockDetected(elems, DeadlockTrigger.Reconfiguration) => - elems.toSet shouldBe Set( - "probe1" -> mkElem(1, 0, "p1-0"), - "probe3" -> mkElem(1, 1, "p3-0"), - "probe5" -> mkElem(1, 2, "p5-1"), - ) - } - } - - clue("Termination of a bucketed source can trigger a deadlock notification upon emission") { - val config4 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - "probe1" -> 1, - "probe3" -> 0, - ), - ) - configQueue.offer(config4) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config4, "probe1"), 0) - probes("probe3-0").sendComplete() - sink.expectNoMessage(20.milliseconds) - probes("probe1-1").sendNext(mkElem(1, 1, "p1-1")) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, "probe1" -> mkElem(1, 1, "p1-1"), "probe3" -> mkElem(1, 1, "p3-0")) - ) - sink.expectNext() shouldBe ActiveSourceTerminated("probe3", None) - sink.expectNext() shouldBe DeadlockDetected( - Seq.empty, - DeadlockTrigger.ActiveSourceTermination, - ) - } - - clue("Abortion of a bucketed source can trigger a deadlock notification upon emission") { - val config5 = OrderedBucketMergeConfig( - PositiveInt.tryCreate(2), - NonEmpty( - Map, - "probe1" -> 1, - "probe3" -> 1, - ), - ) - configQueue.offer(config5) shouldBe Enqueued - sink.expectNext() shouldBe NewConfiguration(addMaterialized(config5, "probe3"), 1) - probes("probe1-1").sendNext(mkElem(2, 0, "p1-1")) - val sourceEx = new Exception("Source 1 failed") - probes("probe1-1").sendError(sourceEx) - sink.expectNoMessage(20.milliseconds) - probes("probe3-1").sendNext(mkElem(2, 0, "p3-1")) - sink.expectNext() shouldBe OutputElement( - NonEmpty(Map, "probe1" -> mkElem(2, 0, "p1-1"), "probe3" -> mkElem(2, 0, "p3-1")) - ) - sink.expectNext() shouldBe ActiveSourceTerminated("probe1", Some(sourceEx)) - sink.expectNext() shouldBe DeadlockDetected( - Seq.empty, - DeadlockTrigger.ActiveSourceTermination, - ) - } - - configQueue.complete() - sink.expectComplete() - doneF.futureValue - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PathUtilsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PathUtilsTest.scala deleted file mode 100644 index 39f00cb76d..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PathUtilsTest.scala +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import java.nio.file.Paths - -final class PathUtilsTest extends AnyWordSpec with Matchers { - - import PathUtils.getFilenameWithoutExtension - import Paths.get as path - - "getFilenameWithoutExtension" should { - "extract filename without extension" when { - "getting an absolute path" in { - getFilenameWithoutExtension(path("/dir1/dir2/file.txt")) shouldBe "file" - } - "getting a relative path" in { - getFilenameWithoutExtension(path("dir1/dir2/file.txt")) shouldBe "file" - } - "getting a path relative to the current directory" in { - getFilenameWithoutExtension(path("./dir1/dir2/file.txt")) shouldBe "file" - } - "getting a path relative to the parent directory" in { - getFilenameWithoutExtension(path("../dir1/dir2/file.txt")) shouldBe "file" - } - "getting just a filename" in { - getFilenameWithoutExtension(path("file.txt")) shouldBe "file" - } - "getting a file starting with a dot" in { - getFilenameWithoutExtension(path(".file.txt")) shouldBe ".file" - } - "getting a file with multiple dots" in { - getFilenameWithoutExtension(path("/d.ir.1/di.r.2/file.a.b.c.txt")) shouldBe "file.a.b.c" - } - } - "leave the filename untouched" when { - "getting an empty path" in { - PathUtils.getFilenameWithoutExtension(path("")) shouldBe "" - } - "getting a filename without extension" in { - getFilenameWithoutExtension(path("file")) shouldBe "file" - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PekkoUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PekkoUtilTest.scala deleted file mode 100644 index 01872d4e86..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/PekkoUtilTest.scala +++ /dev/null @@ -1,2170 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.Eq -import cats.syntax.functorFilter.* -import com.daml.nonempty.NonEmpty -import com.daml.scalautil.Statement.discard -import com.digitalasset.canton.BaseTestWordSpec -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.logging.SuppressionRule -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import com.digitalasset.canton.util.PekkoUtil.{ - ContextualizedFlowOps, - FutureQueue, - FutureQueueConsumer, - FutureQueuePullProxy, - IndexingFutureQueue, - PekkoSourceQueueToFutureQueue, - RecoveringFutureQueueImpl, - RecoveringQueueMetrics, - WithKillSwitch, - noOpKillSwitch, -} -import org.apache.pekko.stream.scaladsl.{Flow, Keep, Sink, Source} -import org.apache.pekko.stream.testkit.StreamSpec -import org.apache.pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped -import org.apache.pekko.stream.testkit.scaladsl.{TestSink, TestSource} -import org.apache.pekko.stream.{KillSwitch, KillSwitches, OverflowStrategy} -import org.apache.pekko.{Done, NotUsed} -import org.scalacheck.Arbitrary -import org.scalatest.concurrent.PatienceConfiguration -import org.scalatest.time.Span - -import java.util.concurrent.Semaphore -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference} -import scala.collection.concurrent.TrieMap -import scala.collection.immutable.List -import scala.concurrent.duration.{DurationInt, FiniteDuration} -import scala.concurrent.{ExecutionContext, Future, Promise} -import scala.util.Random -import scala.util.control.NonFatal - -class PekkoUtilTest extends StreamSpec with BaseTestWordSpec { - import PekkoUtilTest.* - - // Override the implicit from PekkoSpec so that we don't get ambiguous implicits - override val patience: PatienceConfig = defaultPatience - - implicit val executionContext: ExecutionContext = system.dispatcher - - private def abortOn(trigger: Int)(x: Int): FutureUnlessShutdown[Int] = - FutureUnlessShutdown(Future { - if (x == trigger) UnlessShutdown.AbortedDueToShutdown - else UnlessShutdown.Outcome(x) - }) - - private def outcomes(length: Int, abortedFrom: Int): Seq[UnlessShutdown[Int]] = - (1 until (abortedFrom min (length + 1))).map(UnlessShutdown.Outcome.apply) ++ - Seq.fill((length - abortedFrom + 1) max 0)(UnlessShutdown.AbortedDueToShutdown) - - override val expectedTestDuration: FiniteDuration = 120 seconds - - "mapAsyncUS" when { - "parallelism is 1" should { - "run everything sequentially" in assertAllStagesStopped { - val currentParallelism = new AtomicInteger(0) - val maxParallelism = new AtomicInteger(0) - - val source = Source(1 to 10).mapAsyncUS(parallelism = 1) { elem => - FutureUnlessShutdown(Future { - val nextCurrent = currentParallelism.addAndGet(1) - maxParallelism.getAndUpdate(_ max nextCurrent) - Thread.`yield`() - Threading.sleep(10) - currentParallelism.addAndGet(-1) - UnlessShutdown.Outcome(elem) - }) - } - source.runWith(Sink.seq).futureValue should ===( - outcomes(10, 11) - ) - - maxParallelism.get shouldBe 1 - currentParallelism.get shouldBe 0 - } - - "emit only AbortedDueToShutdown after the first" in assertAllStagesStopped { - val shutdownAt = 5 - val source = Source(1 to 10).mapAsyncUS(parallelism = 1)(abortOn(shutdownAt)) - source.runWith(Sink.seq).futureValue should - ===(outcomes(10, shutdownAt)) - } - - "stop evaluation upon the first AbortedDueToShutdown" in assertAllStagesStopped { - val evaluationCount = new AtomicInteger(0) - val shutdownAt = 5 - val source = Source(1 to 10).mapAsyncUS(parallelism = 1) { elem => - evaluationCount.addAndGet(1).discard[Int] - abortOn(shutdownAt)(elem) - } - source.runWith(Sink.seq).futureValue should - ===(outcomes(10, shutdownAt)) - evaluationCount.get shouldBe shutdownAt - } - - "drain the source" in assertAllStagesStopped { - val evaluationCount = new AtomicInteger(0) - val source = Source(1 to 10).map { elem => - evaluationCount.addAndGet(1).discard[Int] - elem - } - val shutdownAt = 6 - val mapped = source.mapAsyncUS(parallelism = 1)(abortOn(shutdownAt)) - mapped.runWith(Sink.seq).futureValue should - ===(outcomes(10, shutdownAt)) - evaluationCount.get shouldBe 10 - } - } - - "parallelism is greater than 1" should { - "run several futures in parallel" in assertAllStagesStopped { - val parallelism = 4 - require(parallelism > 1) - val semaphores = Seq.fill(parallelism)(new Semaphore(1)) - semaphores.foreach(_.acquire()) - - val currentParallelism = new AtomicInteger(0) - val maxParallelism = new AtomicInteger(0) - - val source = Source(1 to 10 * parallelism).mapAsyncUS(parallelism) { elem => - FutureUnlessShutdown(Future { - val nextCurrent = currentParallelism.addAndGet(1) - maxParallelism.getAndUpdate(_ max nextCurrent) - - val index = elem % parallelism - semaphores(index).release() - semaphores((index + 1) % parallelism).acquire() - Thread.`yield`() - Threading.sleep(10) - currentParallelism.addAndGet(-1) - UnlessShutdown.Outcome(elem) - }) - } - source.runWith(Sink.seq).futureValue should ===( - (1 to 10 * parallelism).map(UnlessShutdown.Outcome.apply) - ) - // The above synchronization allows for some futures finishing before others are started - // but at least two must run in parallel. - maxParallelism.get shouldBe <=(parallelism) - maxParallelism.get shouldBe >=(2) - currentParallelism.get shouldBe 0 - } - - "emit only AbortedDueToShutdown after the first" in assertAllStagesStopped { - val shutdownAt = 4 - val source = Source(1 to 10).mapAsyncUS(parallelism = 3) { elem => - val outcome = - if (elem == shutdownAt) UnlessShutdown.AbortedDueToShutdown - else UnlessShutdown.Outcome(elem) - FutureUnlessShutdown.lift(outcome) - } - source.runWith(Sink.seq).futureValue should ===( - (1 until shutdownAt).map(UnlessShutdown.Outcome.apply) ++ - Seq.fill(10 - shutdownAt + 1)(UnlessShutdown.AbortedDueToShutdown) - ) - } - - "drain the source" in assertAllStagesStopped { - val evaluationCount = new AtomicInteger(0) - val source = Source(1 to 10).map { elem => - evaluationCount.addAndGet(1).discard[Int] - elem - } - val shutdownAt = 6 - val mapped = source.mapAsyncUS(parallelism = 10)(abortOn(shutdownAt)) - mapped.runWith(Sink.seq).futureValue should - ===(outcomes(10, shutdownAt)) - evaluationCount.get shouldBe 10 - } - } - } - - "mapAsyncAndDrainUS" should { - "stop upon the first AbortedDueToShutdown" in assertAllStagesStopped { - val shutdownAt = 3 - val source = Source(1 to 10).mapAsyncAndDrainUS(parallelism = 3)(abortOn(shutdownAt)) - source.runWith(Sink.seq).futureValue should - ===(1 until shutdownAt) - } - - "drain the source" in assertAllStagesStopped { - val evaluationCount = new AtomicInteger(0) - val source = Source(1 to 10).map { elem => - evaluationCount.addAndGet(1).discard[Int] - elem - } - val shutdownAt = 5 - val mapped = source.mapAsyncAndDrainUS(parallelism = 1)(abortOn(shutdownAt)) - mapped.runWith(Sink.seq).futureValue should - ===(1 until shutdownAt) - evaluationCount.get shouldBe 10 - } - } - - "restartSource" should { - case class RetryCallArgs( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ) - - def withoutKillSwitch[A](source: Source[A, NotUsed]): Source[A, (KillSwitch, Future[Done])] = - source.mapMaterializedValue(_ => noOpKillSwitch -> Future.successful(Done)) - - "restart upon normal completion" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source(s until s + 3)) - val lastStates = new AtomicReference[Seq[Int]](Seq.empty[Int]) - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - lastStates.updateAndGet(states => states :+ lastState) - Option.when(lastState < 10)((0.seconds, lastState + 3)) - } - } - - val ((_killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-upon-completion", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - retrievedElemsF.futureValue.map(_.value) shouldBe (1 to 12) - - doneF.futureValue - lastStates.get() shouldBe Seq(1, 4, 7, 10) - } - - "restart with a delay" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source(s until s + 3)) - val delay = 200.milliseconds - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = - Option.when(lastEmittedElement.forall(_ < 10))((delay, lastState + 3)) - } - - val stream = PekkoUtil - .restartSource("restart-with-delay", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - - val start = System.nanoTime() - val ((_killSwitch, doneF), retrievedElemsF) = stream.run() - retrievedElemsF.futureValue.map(_.value) shouldBe (1 to 12) - val stop = System.nanoTime() - (stop - start) shouldBe >=(3 * delay.toNanos) - doneF.futureValue - } - - "deal with empty sources" in assertAllStagesStopped { - val shouldRetryCalls = new AtomicReference[Seq[RetryCallArgs]](Seq.empty[RetryCallArgs]) - - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(if (s > 3) Source(1 until 3) else Source.empty[Int]) - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - shouldRetryCalls - .updateAndGet(RetryCallArgs(lastState, lastEmittedElement, lastFailure) +: _) - .discard - Option.when(lastState < 5)((0.seconds, lastState + 1)) - } - } - val ((_killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-with-delay", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - retrievedElemsF.futureValue.map(_.value) shouldBe Seq(1, 2, 1, 2) - doneF.futureValue - shouldRetryCalls.get().foreach { - case RetryCallArgs(lastState, lastEmittedElement, lastFailure) => - lastFailure shouldBe None - lastEmittedElement shouldBe Option.when(lastState > 3)(2) - } - } - - "propagate errors" in assertAllStagesStopped { - case class StreamFailure(i: Int) extends Exception(i.toString) - val shouldRetryCalls = new AtomicReference[Seq[RetryCallArgs]](Seq.empty[RetryCallArgs]) - - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch( - if (s % 2 == 0) Source.failed[Int](StreamFailure(s)) else Source.single(10 + s) - ) - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - shouldRetryCalls - .updateAndGet(RetryCallArgs(lastState, lastEmittedElement, lastFailure) +: _) - .discard - Option.when(lastState < 5)((0.seconds, lastState + 1)) - } - } - val ((_killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-propagate-error", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - retrievedElemsF.futureValue.map(_.value) shouldBe Seq(11, 13, 15) - doneF.futureValue - - shouldRetryCalls.get().foreach { - case RetryCallArgs(lastState, lastEmittedElement, lastFailure) => - lastFailure shouldBe Option.when(lastState % 2 == 0)(StreamFailure(lastState)) - lastEmittedElement shouldBe Option.when(lastState % 2 != 0)(10 + lastState) - } - } - - "stop upon pulling the kill switch" in assertAllStagesStopped { - val pulledKillSwitchAt = new SingleUseCell[Int] - val pullKillSwitch = new SingleUseCell[KillSwitch] - - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source.single(s)) - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - pullKillSwitch.get.foreach { killSwitch => - if (lastState > 10) { - pulledKillSwitchAt.putIfAbsent(lastState) - killSwitch.shutdown() - } - } - Some((1.millisecond, lastState + 1)) - } - } - val ((killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-stop-on-kill", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - pullKillSwitch.putIfAbsent(killSwitch) - val retrievedElems = retrievedElemsF.futureValue.map(_.value) - val lastRetry = pulledKillSwitchAt.get.value - lastRetry shouldBe >(10) - retrievedElems shouldBe (1 to lastRetry) - doneF.futureValue - } - - "can pull the kill switch from within the stream" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - Source - .fromIterator(() => Iterator.from(s)) - .viaMat(KillSwitches.single)(Keep.right) - .watchTermination()(Keep.both) - - val policy = PekkoUtil.RetrySourcePolicy.never[Int, Int] - val ((_killSwitch, doneF), sink) = PekkoUtil - .restartSource("close-inner-source", 1, mkSource, policy) - .map { case elemWithKillSwitch @ WithKillSwitch(elem) => - if (elem == 4) elemWithKillSwitch.killSwitch.shutdown() - elem - } - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(4) - sink.expectNext(1).expectNext(2).expectNext(3).expectNext(4) - sink.request(10) - // There's still an element somewhere in the stream that gets delivered first - sink.expectNext(5) - sink.expectComplete() - } - - "abort the delay when the KillSwitch is closed" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source.single(s)) - val pullKillSwitch = new SingleUseCell[KillSwitch] - val longBackoff = 10.seconds - - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - pullKillSwitch.get.foreach { killSwitch => - if (lastState > 10) killSwitch.shutdown() - } - val backoff = - if (pullKillSwitch.isEmpty || lastState <= 10) 1.millisecond else longBackoff - Some((backoff, lastState + 1)) - } - } - val graph = PekkoUtil - .restartSource("restart-stop-immediately-on-kill-switch", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - val start = System.nanoTime() - val ((killSwitch, doneF), retrievedElemsF) = graph.run() - pullKillSwitch.putIfAbsent(killSwitch) - doneF.futureValue - val stop = System.nanoTime() - (stop - start) shouldBe <(longBackoff.toNanos) - retrievedElemsF.futureValue.map(_.value) shouldBe (1 to 11) - } - - "the completion future awaits the retry to finish" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source.single(s)) - - val pullKillSwitch = new SingleUseCell[KillSwitch] - val policyDelayPromise = Promise[Unit]() - - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - pullKillSwitch.get.foreach { killSwitch => - if (lastState > 10) { - killSwitch.shutdown() - policyDelayPromise.future.futureValue - } - } - Some((1.millisecond, lastState + 1)) - } - } - val ((killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-synchronize-retry", 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - pullKillSwitch.putIfAbsent(killSwitch) - retrievedElemsF.futureValue - // The retry policy is still running as we haven't yet completed the promise, - // so the completion future must not be completed yet - always(durationOfSuccess = 1.second) { - doneF.isCompleted shouldBe false - } - policyDelayPromise.success(()) - doneF.futureValue - } - - "close the current source when the kill switch is pulled" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - Source - .fromIterator(() => Iterator.from(s)) - .viaMat(KillSwitches.single)(Keep.right) - .watchTermination()(Keep.both) - - val policy = PekkoUtil.RetrySourcePolicy.never[Int, Int] - val ((killSwitch, doneF), sink) = PekkoUtil - .restartSource("close-inner-source", 1, mkSource, policy) - .map(_.value) - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(4) - sink.expectNext(1).expectNext(2).expectNext(3).expectNext(4) - killSwitch.shutdown() - sink.request(10) - // There's still an element somewhere in the stream that gets delivered first - sink.expectNext(5) - sink.expectComplete() - } - - "await the source's completion futures" in assertAllStagesStopped { - val doneP: scala.collection.concurrent.Map[Int, Promise[Done]] = - new TrieMap[Int, Promise[Done]] - - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - Source.single(s).viaMat(KillSwitches.single) { (_, killSwitch) => - val newPromise = Promise[Done]() - val promise = doneP.putIfAbsent(s, newPromise).getOrElse(newPromise) - killSwitch -> promise.future - } - - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = - Some((1.millisecond, lastState + 1)) - } - - val ((killSwitch, doneF), sink) = PekkoUtil - .restartSource("await completion of inner sources", 1, mkSource, policy) - .map(_.value) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(3) - sink.expectNext(1) - doneP.size should be >= 1 - sink.expectNext(2) - doneP.size should be >= 2 - sink.expectNext(3) - logger.debug("Stopping the restart source via the kill switch") - killSwitch.shutdown() - // Ask for another element to make sure that flatMapConcat inside restartSource notices - // that the current source has been completed (via its kill switch) - // Otherwise, the stream may remain open and doneF never completes - sink.request(1) - - always(durationOfSuccess = 100.milliseconds) { - doneF.isCompleted shouldBe false - } - - doneP.remove(3).foreach(_.success(Done)) - doneP.remove(2).foreach(_.success(Done)) - - always(durationOfSuccess = 100.milliseconds) { - doneF.isCompleted shouldBe false - } - - // Now complete the remaining promises - doneP.foreachEntry((_, promise) => promise.success(Done)) - doneF.futureValue - } - - "log errors thrown during the retry step and complete the stream" in assertAllStagesStopped { - def mkSource(s: Int): Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source.single(s)) - val exception = new Exception("Retry policy failure") - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = { - if (lastState > 3) throw exception - Some((0.milliseconds, lastState + 1)) - } - } - val name = "restart-log-error" - val graph = PekkoUtil - .restartSource(name, 1, mkSource, policy) - .toMat(Sink.seq)(Keep.both) - val retrievedElems = loggerFactory.assertLogs( - { - val ((_killSwitch, doneF), retrievedElemsF) = graph.run() - doneF.futureValue - retrievedElemsF.futureValue - }, - entry => { - entry.errorMessage should include( - s"The retry policy for RestartSource $name failed with an error. Stop retrying." - ) - entry.throwable should contain(exception) - }, - // The log line from the flush - _.errorMessage should include(s"RestartSource $name at state 4 failed"), - ) - retrievedElems.map(_.value) shouldBe Seq(1, 2, 3, 4) - } - - "can pull the kill switch after retries have stopped" in assertAllStagesStopped { - def mkSource: Source[Int, (KillSwitch, Future[Done])] = - withoutKillSwitch(Source.empty[Int]) - val policy = new PekkoUtil.RetrySourcePolicy[Int, Int] { - override def shouldRetry( - lastState: Int, - lastEmittedElement: Option[Int], - lastFailure: Option[Throwable], - ): Option[(FiniteDuration, Int)] = None - } - val ((killSwitch, doneF), retrievedElemsF) = PekkoUtil - .restartSource("restart-kill-switch-after-complete", 1, (_: Int) => mkSource, policy) - .toMat(Sink.seq)(Keep.both) - .run() - retrievedElemsF.futureValue shouldBe Seq.empty - doneF.futureValue - killSwitch.shutdown() - } - } - - "withMaterializedValueMat" should { - "pass the materialized value into the stream" in assertAllStagesStopped { - val source = Source(1 to 10) - val (mat, fut) = source - .withMaterializedValueMat(new AtomicInteger(1))(Keep.right) - .map { case (i, m) => m.addAndGet(i) } - // Add a buffer so that the map function executes even though the resulting element doesn't end up in the sink - .buffer(size = 2, OverflowStrategy.backpressure) - // Stop the stream early to test cancellation support - .take(5) - .toMat(Sink.seq)(Keep.both) - .run() - fut.futureValue should ===(Seq(2, 4, 7, 11, 16)) - mat.get shouldBe 22 - } - - "create a new value upon each materialization" in assertAllStagesStopped { - val stream = PekkoUtil - .withMaterializedValueMat(new AtomicInteger(0))(Source(1 to 5))(Keep.right) - .map { case (i, m) => m.addAndGet(i) } - .toMat(Sink.seq)(Keep.both) - - val (mat1, seq1) = stream.run() - val (mat2, seq2) = stream.run() - - // We get two different materialized atomic integers - mat1 shouldNot be(mat2) - - seq1.futureValue should ===(Seq(1, 3, 6, 10, 15)) - seq2.futureValue should ===(Seq(1, 3, 6, 10, 15)) - } - - "propagate errors down" in assertAllStagesStopped { - val ((source, mat), sink) = TestSource - .probe[Int] - .withMaterializedValueMat(new AtomicInteger(0))(Keep.both) - .map { case (i, m) => m.addAndGet(i) } - .buffer(2, OverflowStrategy.backpressure) - .toMat(TestSink.probe[Int])(Keep.both) - .run() - - sink.request(1) - source.sendNext(1) - sink.expectNext(1) - source.sendNext(2) - source.sendNext(3) - val ex = new Exception("Source error") - source.sendError(ex) - sink.expectError() should ===(ex) - - mat.get() should ===(6) - } - - "propagate errors up" in assertAllStagesStopped { - val ((source, mat), sink) = TestSource - .probe[Int] - .withMaterializedValueMat(new AtomicInteger(0))(Keep.both) - .map { case (i, m) => m.addAndGet(i) } - .buffer(2, OverflowStrategy.backpressure) - .toMat(TestSink.probe[Int])(Keep.both) - .run() - - sink.request(1) - source.sendNext(1) - sink.expectNext(1) - source.sendNext(2) - source.sendNext(3) - val ex = new Exception("Sink error") - sink.cancel(ex) - source.expectCancellationWithCause(ex) - - mat.get() should ===(6) - } - } - - "withUniqueKillSwitchMat" should { - "make the kill switch available inside the stream" in assertAllStagesStopped { - val (source, sink) = TestSource - .probe[Int] - .withUniqueKillSwitchMat()(Keep.left) - .map { elem => - if (elem.value > 0) elem.killSwitch.shutdown() - elem.value - } - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(4) - source.expectRequest() shouldBe >=(3L) - source.sendNext(0).sendNext(-1).sendNext(2) - sink.expectNext(0).expectNext(-1).expectNext(2).expectComplete() - } - - "make the same kill switch available in the materialization" in assertAllStagesStopped { - val ((source, killSwitch), sink) = TestSource - .probe[Int] - .withUniqueKillSwitchMat()(Keep.both) - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(3) - source.sendNext(100) - sink.expectNext(WithKillSwitch(100)(killSwitch)) - killSwitch.shutdown() - source.expectCancellation() - sink.expectComplete() - } - - "propagate completions even without pulling the kill switch" in assertAllStagesStopped { - val (source, sink) = TestSource - .probe[Int] - .withUniqueKillSwitchMat()(Keep.left) - .toMat(TestSink.probe)(Keep.both) - .run() - - sink.request(1) - source.sendComplete() - sink.expectComplete() - } - - "propagate errors" in assertAllStagesStopped { - val ex = new Exception("Kill Switch") - val (source, sink) = TestSource - .probe[Int] - .withUniqueKillSwitchMat()(Keep.left) - .map { elem => - elem.killSwitch.abort(ex) - elem.value - } - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(1) - source.sendNext(1) - source.expectCancellationWithCause(ex) - // Since the kill switch is pulled from within the stream's handler, - // the OnNext message will arrive at the sink before the kill switches - // OnError message goes through the flow that pulled the kill switch. - // So given Pekko's in-order deliver guarantees between actor pairs, - // the OnError will arrive after the OnNext. - sink.expectNext(1) - sink.expectError(ex) - } - } - - "takeUntilThenDrain" should { - "pass elements through until the first one satisfying the predicate" in assertAllStagesStopped { - val elemsF = Source - // Infinite source to test that we really stop - .fromIterator(() => Iterator.from(1)) - .withUniqueKillSwitchMat()(Keep.left) - .takeUntilThenDrain(_ >= 5) - .runWith(Sink.seq) - elemsF.futureValue.map(_.value) shouldBe (1 to 5) - } - - "pass all elements if the condition never fires" in assertAllStagesStopped { - val elemsF = Source(1 to 10) - .withUniqueKillSwitchMat()(Keep.left) - .takeUntilThenDrain(_ => false) - .runWith(Sink.seq) - elemsF.futureValue.map(_.value) shouldBe (1 to 10) - } - - "drain the source" in assertAllStagesStopped { - val observed = new AtomicReference[Seq[Int]](Seq.empty[Int]) - val elemsF = Source(1 to 10) - .map { i => - observed.getAndUpdate(_ :+ i) - withNoOpKillSwitch(i) - } - .takeUntilThenDrain(_ >= 5) - .runWith(Sink.seq) - elemsF.futureValue.map(_.value) shouldBe (1 to 5) - observed.get() shouldBe (1 to 10) - } - } - - // Sanity check that the construction in GrpcSequencerClientTransportPekko works - "concatLazy + Source.lazySingle" should { - "not produce the lazy single element upon an error" in { - val evaluated = new AtomicBoolean() - val (source, sink) = TestSource - .probe[String] - .concatLazy(Source.lazySingle { () => - evaluated.set(true) - "sentinel" - }) - .recover { case NonFatal(e) => e.getMessage } - .toMat(TestSink.probe)(Keep.both) - .run() - sink.request(5) - source.sendNext("one") - sink.expectNext("one") - val ex = new Exception("Source error") - source.sendError(ex) - sink.expectNext("Source error") - sink.expectComplete() - - evaluated.get() shouldBe false - } - } - - "remember" should { - "immediately emit elements" in assertAllStagesStopped { - val (source, sink) = - TestSource.probe[Int].remember(NonNegativeInt.one).toMat(TestSink.probe)(Keep.both).run() - sink.request(5) - source.sendNext(1) - sink.expectNext(NonEmpty(Seq, 1)) - source.sendNext(2) - sink.expectNext(NonEmpty(Seq, 1, 2)) - source.sendNext(3) - sink.expectNext(NonEmpty(Seq, 2, 3)) - source.sendComplete() - sink.expectComplete() - } - - "handle the empty source" in assertAllStagesStopped { - val sinkF = Source.empty[Int].remember(NonNegativeInt.one).toMat(Sink.seq)(Keep.right).run() - sinkF.futureValue shouldBe Seq.empty - } - - "support no memory" in assertAllStagesStopped { - val sinkF = Source(1 to 10).remember(NonNegativeInt.zero).toMat(Sink.seq)(Keep.right).run() - sinkF.futureValue shouldBe ((1 to 10).map(NonEmpty(Seq, _))) - } - - "support completion while the memory is not exhausted" in assertAllStagesStopped { - val sinkF = - Source(1 to 5).remember(NonNegativeInt.tryCreate(10)).toMat(Sink.seq)(Keep.right).run() - sinkF.futureValue shouldBe (1 to 5).inits.toSeq.reverse.mapFilter(NonEmpty.from) - } - - "propagate errors" in assertAllStagesStopped { - val ex = new Exception("Remember failure") - val doneF = - Source.failed(ex).remember(NonNegativeInt.one).toMat(Sink.ignore)(Keep.right).run() - doneF.failed.futureValue shouldBe ex - } - - "work for sources and flows" in assertAllStagesStopped { - // this is merely a compilation test, so no need to run the graphs - val flow = Flow[Int] - .remember(NonNegativeInt.tryCreate(5)) - .mapAsyncAndDrainUS(1)(xs => FutureUnlessShutdown.pure(xs.size)) - .withUniqueKillSwitchMat()(Keep.right) - .takeUntilThenDrain(_ > 0) - - val source = Source - .empty[Int] - .remember(NonNegativeInt.tryCreate(5)) - .mapAsyncAndDrainUS(1)(xs => FutureUnlessShutdown.pure(xs.size)) - .withUniqueKillSwitchMat()(Keep.right) - .takeUntilThenDrain(_ > 0) - .map(_.value) - .via(flow) - - source.to(Sink.seq[WithKillSwitch[Int]]) - - succeed - } - } - - "dropIf" should { - "drop only elements that satisfy the condition" in assertAllStagesStopped { - val elemF = Source(1 to 10).dropIf(3)(_ % 2 == 0).toMat(Sink.seq)(Keep.right).run() - elemF.futureValue shouldBe Seq(1, 3, 5, 7, 8, 9, 10) - } - - "ignore negative counts" in assertAllStagesStopped { - val elemF = Source(1 to 10).dropIf(-1)(_ => false).toMat(Sink.seq)(Keep.right).run() - elemF.futureValue shouldBe (1 to 10) - } - } - - "statefulMapAsyncContextualizedUS" should { - "work with singleton contexts" in assertAllStagesStopped { - val sinkF = Source(Seq(None, Some(2), Some(4))).contextualize - .statefulMapAsyncContextualizedUS(1)((acc, _, i) => - FutureUnlessShutdown.pure((acc + i, acc)) - ) - .toMat(Sink.seq)(Keep.right) - .run() - - sinkF.futureValue shouldBe Seq(None, Some(Outcome(1)), Some(Outcome(3))) - } - - "work with nested contexts" in assertAllStagesStopped { - val source = - Source[Option[(String, Int)]](Seq(Some("context1" -> 1), Some("context2" -> 2))) - // Nested contexts require a bit more manual work. - implicit val traverse: SingletonTraverse.Aux[Lambda[a => Option[(String, a)]], String] = - SingletonTraverse[Option].composeWith(SingletonTraverse[(String, *)])(Keep.right) - val sinkF = ContextualizedFlowOps - .contextualize[Lambda[`+a` => Option[(String, a)]]](source) - .statefulMapAsyncContextualizedUS(2)((acc, string, i) => - FutureUnlessShutdown.pure((acc + i, acc * i + string.length)) - ) - .toMat(Sink.seq)(Keep.right) - .run() - sinkF.futureValue shouldBe Seq( - Some("context1" -> Outcome(2 * 1 + 8)), - Some("context2" -> Outcome(3 * 2 + 8)), - ) - } - - "propagate AbortedDueToShutdown" in assertAllStagesStopped { - val source = Source(Seq(Left("left1"), Right(1), Right(2), Right(3), Left("left2"), Right(4))) - val sinkF = source.contextualize - .statefulMapAsyncContextualizedUS(1)((acc, _, i) => - if (i == 3) FutureUnlessShutdown.abortedDueToShutdown - else FutureUnlessShutdown.pure((acc + i, acc * i)) - ) - .toMat(Sink.seq)(Keep.right) - .run() - sinkF.futureValue shouldBe Seq( - Left("left1"), - Right(Outcome(1)), - Right(Outcome(4)), - Right(AbortedDueToShutdown), - Left("left2"), - Right(AbortedDueToShutdown), - ) - } - - "work for flows" in assertAllStagesStopped { - Flow[Option[Int]].contextualize.statefulMapAsyncContextualizedUS("abc")( - (_: String, _: Unit, _: Int) => ??? - ) - - // The extension method contextualizes only over the outer-most type constructor - Flow[Either[Int, Option[String]]].contextualize - .statefulMapAsyncContextualizedUS(3L)((_: Long, _: Unit, _: Option[String]) => ???) - - implicit val traverse: SingletonTraverse.Aux[Lambda[a => Either[Int, Option[a]]], Unit] = - SingletonTraverse[Either[Int, *]].composeWith(SingletonTraverse[Option])(Keep.right) - ContextualizedFlowOps - .contextualize[Lambda[`+a` => Either[Int, Option[a]]]](Flow[Either[Int, Option[String]]]) - .statefulMapAsyncContextualizedUS(3L)((_: Long, _: Unit, _: String) => ???) - - succeed - } - } - - "WithKillSwitch satisfies the SingletonTraverse laws" should { - checkAllLaws( - "WithKillSwitch", - SingletonTraverseTests[WithKillSwitch].singletonTraverse[Int, Int, Int, Int, Option, Option], - ) - } - - "RecoveringFutureQueueImpl" should { - - "only complete the firstSuccessfulConsumerInitialization after the first successful initialization" in assertAllStagesStopped { - val firstFail = Promise[Unit]() - val secondFail = Promise[Unit]() - val thirdFail = Promise[Unit]() - val finalSucceed = Promise[Unit]() - val firstFailed = Promise[Unit]() - val secondFailed = Promise[Unit]() - val thirdFailed = Promise[Unit]() - val finalSucceeded = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 1, - bufferSize = 20, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - if (finalSucceeded.isCompleted) - fail("Should not get so far") - else if (thirdFailed.isCompleted) - finalSucceed.future.map { _ => - finalSucceeded.trySuccess(()) - val (sourceQueue, sourceDone) = Source - .queue[(Long, Int)](20, OverflowStrategy.backpressure, 1) - .map { elem => - commit(elem._1) - } - .toMat(Sink.ignore)(Keep.both) - .run() - FutureQueueConsumer( - futureQueue = new PekkoSourceQueueToFutureQueue( - sourceQueue = sourceQueue, - sourceDone = sourceDone, - loggerFactory = loggerFactory, - ), - fromExclusive = 0, - ) - } - else if (secondFailed.isCompleted) - thirdFail.future.map { _ => - thirdFailed.trySuccess(()) - throw new Exception("boom") - } - else if (firstFailed.isCompleted) - secondFail.future.map { _ => - secondFailed.trySuccess(()) - throw new Exception("boom") - } - else - firstFail.future.map { _ => - firstFailed.trySuccess(()) - throw new Exception("boom") - }, - ) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - firstFail.trySuccess(()) - firstFailed.future.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - secondFail.trySuccess(()) - secondFailed.future.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - thirdFail.trySuccess(()) - thirdFailed.future.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - finalSucceed.trySuccess(()) - finalSucceeded.future.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.futureValue - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - } - - "fail firstSuccessfulConsumerInitialization if shutdown comes earlier" in assertAllStagesStopped { - val consumerPromise = Promise[FutureQueueConsumer[Int]]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 1, - bufferSize = 20, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => consumerPromise.future, - ) - Threading.sleep(10) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - recoveringQueue.shutdown() - consumerPromise.failure(new Exception("failed to initialize")) - recoveringQueue.done.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.failed.futureValue - } - - "block offer if buffer is full" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => - Future { - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.map { _ => - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - Done - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = 0, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - val blockedOffer1 = recoveringQueue.offer(4) - val blockedOffer2 = recoveringQueue.offer(5) - blockedOffer1.isCompleted shouldBe false - blockedOffer2.isCompleted shouldBe false - Threading.sleep(10) - blockedOffer1.isCompleted shouldBe false - blockedOffer2.isCompleted shouldBe false - offerGated.trySuccess(()) - blockedOffer1.futureValue - blockedOffer2.futureValue - eventually() { - received.get() shouldBe Vector( - 1L -> 1, - 2L -> 2, - 3L -> 3, - 4L -> 4, - 5L -> 5, - ) - } - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - } - - "complete all blocked offer calls if shutting down" in assertAllStagesStopped { - val consumerPromise = Promise[FutureQueueConsumer[Int]]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 3, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => consumerPromise.future, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - val blockedOffer1 = recoveringQueue.offer(3) - val blockedOffer2 = recoveringQueue.offer(4) - val blockedOffer3 = recoveringQueue.offer(5) - recoveringQueue.offer(6).failed.futureValue.getMessage should include( - "Too many parallel offer calls. Maximum allowed parallel offer calls" - ) - blockedOffer1.isCompleted shouldBe false - blockedOffer2.isCompleted shouldBe false - blockedOffer3.isCompleted shouldBe false - Threading.sleep(10) - blockedOffer1.isCompleted shouldBe false - blockedOffer2.isCompleted shouldBe false - blockedOffer3.isCompleted shouldBe false - loggerFactory.assertLogs( - { - recoveringQueue.shutdown() - consumerPromise.failure(new Exception("failed to initialize")) - recoveringQueue.done.futureValue - }, - _.warningMessage should include( - "blocked offer calls pending at the time of the shutdown. It is recommended that shutdown gracefully" - ), - ) - blockedOffer1.futureValue - blockedOffer2.futureValue - blockedOffer3.futureValue - } - - "tear down consumer properly on shutdown" in assertAllStagesStopped { - val shutdownPromise = Promise[Unit]() - val donePromise = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => - Future { - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - override def offer(elem: (Long, Int)): Future[Done] = - Future.successful(Done) - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = donePromise.future.map(_ => Done) - }, - fromExclusive = 0, - ) - }, - ) - recoveringQueue.firstSuccessfulConsumerInitialization.futureValue - shutdownPromise.isCompleted shouldBe false - Threading.sleep(10) - shutdownPromise.isCompleted shouldBe false - recoveringQueue.shutdown() - shutdownPromise.future.futureValue - recoveringQueue.done.isCompleted shouldBe false - Threading.sleep(10) - recoveringQueue.done.isCompleted shouldBe false - donePromise.trySuccess(()) - recoveringQueue.done.futureValue - } - - "tear down consumer properly even if shutdown comes earlier than initialization finished" in assertAllStagesStopped { - val initializedPromise = Promise[Unit]() - val shutdownPromise = Promise[Unit]() - val donePromise = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => - initializedPromise.future.map { _ => - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - override def offer(elem: (Long, Int)): Future[Done] = - Future.successful(Done) - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = donePromise.future.map(_ => Done) - }, - fromExclusive = 0, - ) - }, - ) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - shutdownPromise.isCompleted shouldBe false - Threading.sleep(10) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - shutdownPromise.isCompleted shouldBe false - loggerFactory.assertEventuallyLogsSeq( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.LevelAndAbove(org.slf4j.event.Level.DEBUG) - )( - recoveringQueue.shutdown(), - logEntries => { - logEntries should have size (3) - logEntries.head.infoMessage should include( - "Before shutting down, preventing further initialization retries" - ) - logEntries(1).infoMessage should include( - "Shutdown initiated" - ) - logEntries(2).debugMessage should include( - "Consumer initialization is in progress, delaying shutdown" - ) - }, - ) - // subsequent shutdown has no effect - loggerFactory.assertLogs( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.Level(org.slf4j.event.Level.DEBUG) - )( - recoveringQueue.shutdown(), - logEntry => logEntry.debugMessage should include("Already shutting down, nothing to do"), - ) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - recoveringQueue.done.isCompleted shouldBe false - shutdownPromise.isCompleted shouldBe false - Threading.sleep(10) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - recoveringQueue.done.isCompleted shouldBe false - shutdownPromise.isCompleted shouldBe false - initializedPromise.trySuccess(()) - shutdownPromise.future.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - recoveringQueue.done.isCompleted shouldBe false - Threading.sleep(10) - recoveringQueue.firstSuccessfulConsumerInitialization.isCompleted shouldBe false - recoveringQueue.done.isCompleted shouldBe false - donePromise.trySuccess(()) - recoveringQueue.firstSuccessfulConsumerInitialization.failed.futureValue - recoveringQueue.done.futureValue - } - - "interrupt retry-wait if shutting down" in assertAllStagesStopped { - val firstConsumerInitializationFailedPromise = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 5000, - multiplier = 2, - cap = 5000, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = _ => { - firstConsumerInitializationFailedPromise.trySuccess(()) - Future.failed(new Exception("boom")) - }, - ) - firstConsumerInitializationFailedPromise.future.futureValue - Threading.sleep(10) - // at this point we should be waiting - val beforeShutdown = System.nanoTime() - // shutdown should be interrupting the waiting for retry - loggerFactory.assertEventuallyLogsSeq( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.LevelAndAbove(org.slf4j.event.Level.DEBUG) - )( - recoveringQueue.shutdown(), - logEntries => { - logEntries should have size (3) - logEntries.head.infoMessage should include( - "Before shutting down, preventing further initialization retries" - ) - logEntries(1).infoMessage should include( - "Shutdown initiated" - ) - logEntries(2).infoMessage should include( - "Interrupting wait for initialization retry, shutdown complete" - ) - }, - ) - // subsequent shutdown has no effect - loggerFactory.assertLogs( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.Level(org.slf4j.event.Level.DEBUG) - )( - recoveringQueue.shutdown(), - logEntry => logEntry.debugMessage should include("Already shutting down, nothing to do"), - ) - val afterShutdown = System.nanoTime() - val shutdownCallTookSeconds = (afterShutdown - beforeShutdown) / 1000L / 1000L / 1000L - shutdownCallTookSeconds should be < (3L) // shutdown call should not blocked for the waiting - recoveringQueue.done.futureValue - val afterDone = System.nanoTime() - val shutdownCompletionTookSeconds = (afterDone - beforeShutdown) / 1000L / 1000L / 1000L - shutdownCompletionTookSeconds should be < (3L) // shutdown completion should not blocked for the waiting - } - - "log warn/error if consumer initialization respective warn/error threshold is breached" in assertAllStagesStopped { - val initializationStartedPromise = new AtomicReference(Promise[Unit]()) - val initializationContinuePromise = new AtomicReference(Promise[Unit]()) - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 2, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 3, - retryAttemptErrorThreshold = 6, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = { _ => - val f = initializationContinuePromise.get().future.map { _ => - throw new Exception("initialization fails") - } - initializationStartedPromise.get().trySuccess(()) - f - }, - ) - // info 1 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - // info 2 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - // info 3 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - loggerFactory.assertLogs( - { - // warn 1 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - }, - logEntry => - logEntry.warningMessage should include("Consumer initialization failed (attempt #4)"), - ) - loggerFactory.assertLogs( - { - // warn 2 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - }, - logEntry => - logEntry.warningMessage should include("Consumer initialization failed (attempt #5)"), - ) - loggerFactory.assertLogs( - { - // warn 3 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - }, - logEntry => - logEntry.warningMessage should include("Consumer initialization failed (attempt #6)"), - ) - loggerFactory.assertLogs( - { - // error 1 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - }, - logEntry => - logEntry.errorMessage should include("Consumer initialization failed (attempt #7)"), - ) - loggerFactory.assertLogs( - { - // error 2 - initializationStartedPromise.get().future.futureValue - initializationStartedPromise.set(Promise()) - initializationContinuePromise.getAndSet(Promise()).trySuccess(()) - }, - logEntry => - logEntry.errorMessage should include("Consumer initialization failed (attempt #8)"), - ) - initializationStartedPromise.get().future.futureValue - // shutting down after initialization started, and waiting until the async shutdown finishes - loggerFactory.assertEventuallyLogsSeq( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.LevelAndAbove(org.slf4j.event.Level.DEBUG) - )( - recoveringQueue.shutdown(), - logEntries => { - logEntries should have size (3) - logEntries.head.infoMessage should include( - "Before shutting down, preventing further initialization retries" - ) - logEntries(1).infoMessage should include( - "Shutdown initiated" - ) - logEntries(2).debugMessage should include( - "Consumer initialization is in progress, delaying shutdown" - ) - }, - ) - // error 3, but as shutting down, no more errors are reported - initializationContinuePromise.get().trySuccess(()) - recoveringQueue.done.futureValue - recoveringQueue.firstSuccessfulConsumerInitialization.failed.futureValue - } - - "consumer offer failure should trigger a warning and proper recovery" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val firstConsumer = new AtomicBoolean(false) - val recoveryIndexRef = new AtomicLong(0) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 10, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val recoveryIndex = received.get().lastOption.map(_._1).getOrElse(0L) - if (firstConsumer.get()) { - firstConsumer.set(false) - recoveryIndexRef.set(recoveryIndex) - } else { - firstConsumer.set(true) - } - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.map { _ => - if (elem._2 == 4 && firstConsumer.get()) throw new Exception("boom") - else { - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - commit(elem._1) - Done - } - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = recoveryIndex, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - recoveringQueue.offer(4).futureValue - recoveringQueue.offer(5).futureValue - // now all above are in the internal buffer - loggerFactory.assertLogs( - { - offerGated.trySuccess(()) - eventually() { - received.get() shouldBe Vector( - 1L -> 1, - 2L -> 2, - 3L -> 3, - 4L -> 4, - 5L -> 5, - ) - } - }, - _.warningMessage should include("Offer failed, shutting down delegate"), - ) - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - recoveryIndexRef.get() shouldBe 3 - } - - "recover from the last element correctly" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val firstConsumer = new AtomicBoolean(false) - val recoveryIndexRef = new AtomicLong(0) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 10, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val recoveryIndex = received.get().lastOption.map(_._1).getOrElse(0L) - if (firstConsumer.get()) { - firstConsumer.set(false) - recoveryIndexRef.set(recoveryIndex) - } else { - firstConsumer.set(true) - } - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.flatMap { _ => - if (elem._2 == 4 && firstConsumer.get()) { - shutdownPromise.tryFailure(new Exception("delegate boom")) - Future.never - } else { - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - commit(elem._1) - Future.successful(Done) - } - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = recoveryIndex, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - recoveringQueue.offer(4).futureValue - recoveringQueue.offer(5).futureValue - // now all above are in the internal buffer - offerGated.trySuccess(()) - eventually() { - received.get() shouldBe Vector( - 1L -> 1, - 2L -> 2, - 3L -> 3, - 4L -> 4, - 5L -> 5, - ) - } - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - recoveryIndexRef.get() shouldBe 3 - } - - "recover within the uncommitted range correctly" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val firstConsumer = new AtomicBoolean(false) - val recoveryIndexRef = new AtomicLong(0) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 10, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val recoveryIndex = received.get().lastOption.map(_._1).getOrElse(0L) - if (firstConsumer.get()) { - firstConsumer.set(false) - recoveryIndexRef.set(recoveryIndex) - } else { - firstConsumer.set(true) - } - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.flatMap { _ => - if (elem._2 == 4 && firstConsumer.get()) { - shutdownPromise.tryFailure(new Exception("delegate boom")) - Future.never - } else if (elem._2 >= 3 && firstConsumer.get()) { - // forget, not commit - Future.successful(Done) - } else if (elem._2 >= 2 && firstConsumer.get()) { - // not commit - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - Future.successful(Done) - } else { - commit(elem._1) - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - Future.successful(Done) - } - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = recoveryIndex, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - recoveringQueue.offer(4).futureValue - recoveringQueue.offer(5).futureValue - // now all above are in the internal buffer - offerGated.trySuccess(()) - eventually() { - received.get() shouldBe Vector( - 1L -> 1, - 2L -> 2, - 3L -> 3, - 4L -> 4, - 5L -> 5, - ) - } - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - recoveryIndexRef.get() shouldBe 2 - } - - "recover at the beginning of the uncommitted range correctly" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val firstConsumer = new AtomicBoolean(false) - val recoveryIndexRef = new AtomicLong(0) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 10, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val recoveryIndex = received.get().lastOption.map(_._1).getOrElse(0L) - if (firstConsumer.get()) { - firstConsumer.set(false) - recoveryIndexRef.set(recoveryIndex) - } else { - firstConsumer.set(true) - } - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.flatMap { _ => - if (elem._2 == 4 && firstConsumer.get()) { - shutdownPromise.tryFailure(new Exception("delegate boom")) - Future.never - } else if (elem._2 >= 2 && firstConsumer.get()) { - // forget, not commit - Future.successful(Done) - } else { - commit(elem._1) - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - Future.successful(Done) - } - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = recoveryIndex, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - recoveringQueue.offer(4).futureValue - recoveringQueue.offer(5).futureValue - // now all above are in the internal buffer - offerGated.trySuccess(()) - eventually() { - received.get() shouldBe Vector( - 1L -> 1, - 2L -> 2, - 3L -> 3, - 4L -> 4, - 5L -> 5, - ) - } - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - recoveryIndexRef.get() shouldBe 1 - } - - "trying to recover before the uncommitted range results in halt and error (this is the case for invalid commit wiring in consumer)" in assertAllStagesStopped { - val received = new AtomicReference[Vector[(Long, Int)]](Vector.empty) - val firstConsumer = new AtomicBoolean(false) - val recoveryIndexRef = new AtomicLong(0) - val offerGated = Promise[Unit]() - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 2, - bufferSize = 10, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val recoveryIndex = received.get().lastOption.map(_._1).getOrElse(0L) - if (firstConsumer.get()) { - firstConsumer.set(false) - recoveryIndexRef.set(recoveryIndex) - } else { - firstConsumer.set(true) - } - FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Unit]() - - override def offer(elem: (Long, Int)): Future[Done] = - offerGated.future.flatMap { _ => - if (elem._2 == 4 && firstConsumer.get()) { - shutdownPromise.tryFailure(new Exception("delegate boom")) - Future.never - } else if (elem._2 >= 2 && firstConsumer.get()) { - // forget, but commit: very wrong - commit(elem._1) - Future.successful(Done) - } else { - commit(elem._1) - discard(received.accumulateAndGet(Vector(elem), _ ++ _)) - Future.successful(Done) - } - } - - override def shutdown(): Unit = shutdownPromise.trySuccess(()) - - override def done: Future[Done] = shutdownPromise.future.map(_ => Done) - }, - fromExclusive = recoveryIndex, - ) - }, - ) - recoveringQueue.offer(1).futureValue - recoveringQueue.offer(2).futureValue - recoveringQueue.offer(3).futureValue - recoveringQueue.offer(4).futureValue - recoveringQueue.offer(5).futureValue - // now all above are in the internal buffer - loggerFactory.assertLogs( - { - offerGated.trySuccess(()) - recoveringQueue.done.futureValue - }, - _.errorMessage should include( - "Program error. The next uncommitted after recovery is not the next element." - ), - ) - recoveryIndexRef.get() shouldBe 1 - } - - def fuzzyTest(sleepy: Boolean): Unit = { - val inputSize = 1000 - def test(): Unit = { - val sink = new AtomicReference[List[(Long, Int)]](Nil) - val boomCount = new AtomicInteger(0) - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 1, - bufferSize = 20, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - if (sleepy) Threading.sleep(Random.nextLong(2)) - if (Random.nextLong(6) > 0) throw new Exception("initialization boom") - val (sourceQueue, sourceDone) = Source - .queue[(Long, Int)](20, OverflowStrategy.backpressure, 1) - .via(BatchN(5, 3)) - .mapAsync(3) { batch => - Future { - if (sleepy) Threading.sleep(Random.nextLong(4) / 4) - if (Random.nextLong(10) == 0) { - boomCount.incrementAndGet() - throw new Exception("boom") - } - batch - } - } - .map { batch => - batch.foreach(elem => sink.getAndUpdate(elem :: _)) - commit(batch.last._1) - } - .toMat(Sink.ignore)(Keep.both) - .run() - FutureQueueConsumer( - futureQueue = new PekkoSourceQueueToFutureQueue( - sourceQueue = sourceQueue, - sourceDone = sourceDone, - loggerFactory = loggerFactory, - ), - fromExclusive = sink.get().headOption.map(_._1).getOrElse(0), - ) - }, - ) - val testF = Future { - val inputFixture = Iterator.iterate(1)(_ + 1).take(inputSize).toList - inputFixture.foreach { i => - recoveringQueue.offer(i).futureValue - } - eventually()(sink.get().size shouldBe inputSize) - sink.get().reverse shouldBe inputFixture.zip(inputFixture) - boomCount.get() should be > (5) - } - if (sleepy) Threading.sleep(Random.nextLong(4)) - testF.futureValue(PatienceConfiguration.Timeout(Span.Max)) - val shutdownTestF = Future { - if (sleepy) Threading.sleep(Random.nextLong(10)) - Iterator - .iterate(inputSize + 1)(_ + 1) - .take(inputSize) - .foreach { i => - recoveringQueue.offer(i).futureValue - } - } - if (sleepy) Threading.sleep(Random.nextLong(100)) - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - shutdownTestF.failed.futureValue.getMessage should include( - "Cannot offer new elements to the queue, after shutdown is initiated" - ) - sink.get().reverse shouldBe Range.inclusive(1, sink.get().size).map(i => i -> i).toList - } - - loggerFactory.assertLogsSeq( - SuppressionRule.LoggerNameContains("RecoveringFutureQueueImpl") && - SuppressionRule.Level(org.slf4j.event.Level.WARN) - )( - Range - .inclusive( - 1, - 6, - ) // high parallelism is not high load: this test is waiting most of the time - .map(_ => Future(Range.inclusive(1, 2).foreach(_ => test()))) - .foreach(_.futureValue(PatienceConfiguration.Timeout(Span.Max))), - logentries => - logentries.foldLeft(succeed) { case (_, entry) => - if (entry.level == org.slf4j.event.Level.WARN) { - entry.warningMessage should include( - "blocked offer calls pending at the time of the shutdown. It is recommended that shutdown gracefully" - ) - } else succeed - }, - ) - } - - "work correctly for a fuzzy integration test with fuzzy delays case with many recovery cycles, and fuzzy shutdown" in assertAllStagesStopped { - fuzzyTest(sleepy = true) - } - - "work correctly for a fuzzy integration test without fuzzy delays case with many recovery cycles, and fuzzy shutdown" in assertAllStagesStopped { - fuzzyTest(sleepy = false) - } - - "squeeze trough 1M items in 100 seconds (10K/sec) with even small batch sizes in PekkoStream" in assertAllStagesStopped { - val inputSize = 1000 * 1000 - val recoveringQueue = new RecoveringFutureQueueImpl[Int]( - maxBlockedOffer = 1, - bufferSize = 20, - loggerFactory = loggerFactory, - retryStategy = PekkoUtil.exponentialRetryWithCap( - minWait = 2, - multiplier = 2, - cap = 10, - ), - retryAttemptWarnThreshold = 100, - retryAttemptErrorThreshold = 200, - uncommittedWarnTreshold = 100, - recoveringQueueMetrics = RecoveringQueueMetrics.NoOp, - consumerFactory = commit => - Future { - val (sourceQueue, sourceDone) = Source - .queue[(Long, Int)](20, OverflowStrategy.backpressure, 1) - .via(BatchN(5, 3)) - .mapAsync(3) { batch => - Future { - batch - } - } - .map { batch => - commit(batch.last._1) - } - .toMat(Sink.ignore)(Keep.both) - .run() - FutureQueueConsumer( - futureQueue = new PekkoSourceQueueToFutureQueue( - sourceQueue = sourceQueue, - sourceDone = sourceDone, - loggerFactory = loggerFactory, - ), - fromExclusive = 0, - ) - }, - ) - val start = System.nanoTime() - Iterator - .iterate(1)(_ + 1) - .take(inputSize) - .foreach(i => recoveringQueue.offer(i).futureValue) - val end = System.nanoTime() - recoveringQueue.shutdown() - recoveringQueue.done.futureValue - val testTookMillis = (end - start) / 1000 / 1000 - logger.info(s"1M elem processing took $testTookMillis millis") - testTookMillis should be < (100000L) - } - } - - "FutureQueuePullProxy" should { - - val iterations = { - def recursionThrows(i: Int, limit: Int): Int = - if (i > limit) i - else recursionThrows(i + 1, limit) + 1 - val atomicInteger = new AtomicInteger(10) - Future(try { - Iterator - .continually { - recursionThrows(0, atomicInteger.get()) - atomicInteger.accumulateAndGet(10, _ * _) - } - .foreach(_ => ()) - fail("StackOverflowError expected, but infinite loop ended unexpectedly") - } catch { - case _: StackOverflowError => () - }).futureValue - atomicInteger.get() - } - logger.info(s"Found stack-overflow recursion depth: $iterations") - - "not throw StackOverflowError, even if asynchronous iterations are running on the same thread" in assertAllStagesStopped { - val pullCounter = new AtomicInteger(0) - new FutureQueuePullProxy[Int]( - initialEndIndex = 0, - pull = _ => { - if (pullCounter.incrementAndGet() > iterations) None - else Some(10) - }, - delegate = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Done]() - - override def offer(elem: (Long, Int)): Future[Done] = - Future.successful(Done) - - override def shutdown(): Unit = - shutdownPromise.trySuccess(Done) - - override def done: Future[Done] = - shutdownPromise.future - }, - loggerFactory = loggerFactory, - ) - // this means that actually the constructor is already executed all the push-pull-iterations - pullCounter.incrementAndGet() should be > (iterations) - } - } - - "IndexingFutureQueue" should { - - "index correctly from the next element" in assertAllStagesStopped { - val offered = new AtomicReference[(Long, Int)] - val testee = new IndexingFutureQueue[Int]( - futureQueueConsumer = FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Done]() - - override def offer(elem: (Long, Int)): Future[Done] = { - offered.set(elem) - Future.successful(Done) - } - - override def shutdown(): Unit = - shutdownPromise.trySuccess(Done) - - override def done: Future[Done] = - shutdownPromise.future - }, - fromExclusive = 15L, - ) - ) - testee.offer(16).futureValue - offered.get shouldBe (16L -> 16) - testee.shutdown() - testee.done.futureValue - } - - "fail if used concurrently" in assertAllStagesStopped { - val releaseOffer = Promise[Done]() - val testee = new IndexingFutureQueue[Int]( - futureQueueConsumer = FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, Int)] { - private val shutdownPromise = Promise[Done]() - - override def offer(elem: (Long, Int)): Future[Done] = - releaseOffer.future - - override def shutdown(): Unit = - shutdownPromise.trySuccess(Done) - - override def done: Future[Done] = - shutdownPromise.future - }, - fromExclusive = 15L, - ) - ) - val blockedOffer = testee.offer(16) - testee.offer(17).failed.futureValue.getMessage should include( - "IndexingFutureQueue should be used sequentially" - ) - blockedOffer.isCompleted shouldBe false - releaseOffer.trySuccess(Done) - blockedOffer.futureValue - testee.offer(16).futureValue - testee.shutdown() - testee.done.futureValue - } - - "maintain order in indexing across concurrent users" in assertAllStagesStopped { - val elems = Range(0, 10).toList - val delegateQueue = new AtomicReference[Vector[(Long, (Int, Int))]](Vector.empty) - val testee = new IndexingFutureQueue[(Int, Int)]( - futureQueueConsumer = FutureQueueConsumer( - futureQueue = new FutureQueue[(Long, (Int, Int))] { - private val shutdownPromise = Promise[Done]() - - override def offer(elem: (Long, (Int, Int))): Future[Done] = { - delegateQueue.accumulateAndGet(Vector(elem), _ ++ _) - Future { - Threading.sleep(Random.nextLong(3)) - Done - } - } - - override def shutdown(): Unit = - shutdownPromise.trySuccess(Done) - - override def done: Future[Done] = - shutdownPromise.future - }, - fromExclusive = 0L, - ) - ) - def asyncPush(id: Int): Future[Unit] = Future( - elems.foreach { elem => - Threading.sleep(Random.nextLong(2)) - def offer(): Future[Done] = - testee.offer(id -> elem).recoverWith { _ => - Threading.sleep(Random.nextLong(2)) - offer() - } - offer().futureValue - } - ) - List( - asyncPush(1), - asyncPush(2), - asyncPush(3), - ).foreach(_.futureValue) - testee.shutdown() - testee.done.futureValue - def verifyId(id: Int) = - delegateQueue.get().filter(_._2._1 == id).map(_._2) shouldBe elems.map(id -> _) - verifyId(1) - verifyId(2) - verifyId(3) - delegateQueue.get().map(_._1) shouldBe Range(1, 31).map(_.toLong).toVector - } - } -} - -object PekkoUtilTest { - def withNoOpKillSwitch[A](value: A): WithKillSwitch[A] = WithKillSwitch(value)(noOpKillSwitch) - - implicit val eqKillSwitch: Eq[KillSwitch] = Eq.fromUniversalEquals[KillSwitch] - - /** A dedicated [[cats.Eq]] instance for [[com.digitalasset.canton.util.PekkoUtil.WithKillSwitch]] - * that takes the kill switch into account, unlike the default equality method on - * [[com.digitalasset.canton.util.PekkoUtil.WithKillSwitch]]. - */ - implicit def eqWithKillSwitch[A: Eq]: Eq[WithKillSwitch[A]] = - (x: WithKillSwitch[A], y: WithKillSwitch[A]) => - Eq[A].eqv(x.value, y.value) && Eq[KillSwitch].eqv(x.killSwitch, y.killSwitch) - - implicit def arbitraryChecked[A: Arbitrary]: Arbitrary[WithKillSwitch[A]] = - Arbitrary(Arbitrary.arbitrary[A].map(withNoOpKillSwitch)) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RangeUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RangeUtilTest.scala deleted file mode 100644 index 3967a19eb1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RangeUtilTest.scala +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTestWordSpec - -class RangeUtilTest extends BaseTestWordSpec { - "RangeUtil" can { - "partition an index range" in { - RangeUtil.partitionIndexRange(0, -1, 1) shouldBe Seq(0 -> -1) - RangeUtil.partitionIndexRange(0, 0, 1) shouldBe Seq(0 -> 0) - RangeUtil.partitionIndexRange(0, 1, 1) shouldBe Seq(0 -> 1) - RangeUtil.partitionIndexRange(0, 2, 1) shouldBe Seq(0 -> 1, 1 -> 2) - - RangeUtil.partitionIndexRange(0, 2, 3) shouldBe Seq(0 -> 2) - RangeUtil.partitionIndexRange(0, 3, 3) shouldBe Seq(0 -> 3) - RangeUtil.partitionIndexRange(0, 4, 3) shouldBe Seq(0 -> 3, 3 -> 4) - RangeUtil.partitionIndexRange(0, 7, 3) shouldBe Seq(0 -> 3, 3 -> 6, 6 -> 7) - - RangeUtil.partitionIndexRange(2, 9, 3) shouldBe Seq(2 -> 5, 5 -> 8, 8 -> 9) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RateLimiterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RateLimiterTest.scala deleted file mode 100644 index 03036ad1e1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/RateLimiterTest.scala +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.config.RequireTypes.{NonNegativeNumeric, PositiveNumeric} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext} -import org.scalatest.prop.TableFor2 - -import java.util.concurrent.atomic.AtomicLong -import scala.annotation.tailrec - -class RateLimiterTest extends BaseTestWordSpec with HasExecutionContext { - - lazy val testCases: TableFor2[Double, Int] = Table( - ("maxTasksPerSecond", "initialBurst"), - (0.5, 0), - (1, 1), - (9, 5), - (10, 1), - (100, 10), - (200, 20), - ) - - @tailrec - private def go( - limiter: RateLimiter, - nanotime: AtomicLong, - deltaNanos: Long, - total: Int, - success: Int, - ): Int = { - def submitAndReturnOneOnSuccess(limiter: RateLimiter): Int = - if (limiter.checkAndUpdateRate()) 1 else 0 - if (total == 0) { - success - } else { - nanotime.updateAndGet(_ + deltaNanos).discard - go(limiter, nanotime, deltaNanos, total - 1, success + submitAndReturnOneOnSuccess(limiter)) - } - } - - private def deltaNanos(maxTasksPerSecond: Double, factor: Double): Long = - ((1.0 / maxTasksPerSecond) * 1e9 * factor).toLong - - private def testRun( - maxTasksPerSecond: Double, - initialBurst: Int, - throttle: Double, - ): (Int, Int) = { - val nanotime = new AtomicLong(0) - val limiter = new RateLimiter( - NonNegativeNumeric.tryCreate(maxTasksPerSecond), - PositiveNumeric.tryCreate(Math.max(initialBurst.toDouble / maxTasksPerSecond, 1e-6)), - nanotime.get(), - ) - val total = ((100 * maxTasksPerSecond) / throttle).toInt - val deltaN = deltaNanos(maxTasksPerSecond, throttle) - val burst = (1 to initialBurst).count(_ => limiter.checkAndUpdateRate()) - burst shouldBe initialBurst - val res = go(limiter, nanotime, deltaN, total, 0) - (total, res) - } - - "A decay rate limiter" when { - testCases.forEvery { case (maxTasksPerSecond, initialBurst) => - s"the maximum rate is $maxTasksPerSecond" must { - "submission below max will never be rejected" in { - val (total, res) = testRun(maxTasksPerSecond, initialBurst, 2) - assert(res == total) - } - - "submission above max will be throttled to max" in { - val factor = 0.25 - val (total, res) = testRun(maxTasksPerSecond, initialBurst, factor) - res.toDouble should be > (total * 0.24) - res.toDouble should be < (total * 0.26) - } - } - } - } - - "zero is zero" in { - val limiter = new RateLimiter(NonNegativeNumeric.tryCreate(0.0), PositiveNumeric.tryCreate(1.0)) - assert(!limiter.checkAndUpdateRate()) - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala deleted file mode 100644 index 0655e0265b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.data.EitherT -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.Future - -class ResourceUtilTest extends AnyWordSpec with BaseTest with HasExecutionContext { - case class TestException(message: String) extends RuntimeException(message) - - private def mockResource: AutoCloseable = { - val closeable = mock[AutoCloseable] - doNothing.when(closeable).close() - closeable - } - - private def mockResourceThatThrowsExceptionWhenClosing: AutoCloseable = { - val closeable = mock[AutoCloseable] - doNothing.when(closeable).close() - when(closeable.close()).thenThrow(TestException("Something happened when closing")) - closeable - } - - "ResourceUtil" when { - "withResource" should { - "return value from the function and close resource" in { - val resource = mockResource - val result = ResourceUtil.withResource(resource)(_ => "good") - - verify(resource, times(1)).close() - result shouldBe "good" - } - - "rethrow exception from function and still close resource" in { - val resource = mockResource - val exception = intercept[TestException]( - ResourceUtil.withResource(resource)(_ => throw TestException("Something happened")) - ) - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from closing" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = intercept[TestException](ResourceUtil.withResource(resource)(_ => "good")) - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened when closing") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from function and add exception from closing to suppressed" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = intercept[TestException]( - ResourceUtil.withResource(resource)(_ => throw TestException("Something happened")) - ) - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception - .getSuppressed()(0) shouldBe TestException("Something happened when closing") - } - } - - "withResourceEither" should { - "have the same behavior as withResources but return an Either with the result or exception" in { - ResourceUtil.withResourceEither(mockResource)(_ => "good") shouldBe Right("good") - ResourceUtil.withResourceEither(mockResource)(_ => - throw TestException("Something happened") - ) shouldBe Left(TestException("Something happened")) - ResourceUtil.withResourceEither(mockResourceThatThrowsExceptionWhenClosing)(_ => - "good" - ) shouldBe Left(TestException("Something happened when closing")) - ResourceUtil.withResourceEither(mockResourceThatThrowsExceptionWhenClosing)(_ => - throw TestException("Something happened") - ) should matchPattern { - case Left(e @ TestException("Something happened")) - if e.getSuppressed()(0) == TestException("Something happened when closing") => - } - } - } - - "withResourceFuture" should { - "return value from the function and close resource" in { - val resource = mockResource - val result = ResourceUtil.withResourceFuture(resource)(_ => Future("good")).futureValue - - verify(resource, times(1)).close() - result shouldBe "good" - } - - "rethrow exception from function and still close resource" in { - val resource = mockResource - val exception = ResourceUtil - .withResourceFuture(resource)(_ => Future.failed(TestException("Something happened"))) - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from outside of future and still close resource" in { - val resource = mockResource - val exception = ResourceUtil - .withResourceFuture(resource)(_ => throw TestException("Something happened")) - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from closing" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = - ResourceUtil.withResourceFuture(resource)(_ => Future("good")).failed.futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened when closing") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from function and add exception from closing to suppressed" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = ResourceUtil - .withResourceFuture(resource)(_ => Future.failed(TestException("Something happened"))) - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception - .getSuppressed()(0) shouldBe TestException("Something happened when closing") - } - - } - - "withResourceEitherT" should { - "return value from the function and close resource" in { - val resource = mockResource - val resultRight = ResourceUtil - .withResourceEitherT(resource)(_ => EitherT.rightT[Future, String]("good")) - .futureValue - val resultLeft = ResourceUtil - .withResourceEitherT(resource)(_ => EitherT.leftT[Future, String]("good")) - .value - .futureValue - .left - .value - - verify(resource, times(2)).close() - resultRight shouldBe "good" - resultLeft shouldBe "good" - } - - "rethrow exception from function and still close resource" in { - val resource = mockResource - val exception = ResourceUtil - .withResourceEitherT(resource)(_ => - EitherT.liftF[Future, String, String]( - Future.failed(TestException("Something happened")) - ) - ) - .value - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from outside of future and still close resource" in { - val resource = mockResource - val exception = ResourceUtil - .withResourceEitherT(resource)(_ => throw TestException("Something happened")) - .value - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from closing" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = ResourceUtil - .withResourceEitherT(resource)(_ => EitherT.rightT[Future, String]("good")) - .value - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened when closing") - exception.getSuppressed shouldBe empty - } - - "rethrow exception from function and add exception from closing to suppressed" in { - val resource = mockResourceThatThrowsExceptionWhenClosing - val exception = ResourceUtil - .withResourceEitherT(resource)(_ => - EitherT.liftF[Future, String, String]( - Future.failed(TestException("Something happened")) - ) - ) - .value - .failed - .futureValue - - verify(resource, times(1)).close() - exception shouldBe TestException("Something happened") - exception - .getSuppressed()(0) shouldBe TestException("Something happened when closing") - } - - "create a resource only once" in { - val counter = new AtomicInteger(0) - - def newResource() = new AutoCloseable { - - // Increment the counter when the resource is created - counter.incrementAndGet() - - override def close(): Unit = () - } - - ResourceUtil - .withResourceEitherT(newResource())(_ => EitherTUtil.unit[String]) - .value - .futureValue - - counter.get() shouldBe 1 - } - - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SetCoverTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SetCoverTest.scala deleted file mode 100644 index 04cff73242..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SetCoverTest.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class SetCoverTest extends AnyWordSpec with BaseTest { - - "greedy" should { - "compute a cover" in { - - val testCases = Seq( - // All sets are needed - Map("a" -> Set(1, 2), "b" -> Set(2, 3)) -> Seq(Seq("a", "b")), - // Just one set - Map("a" -> Set(1, 2)) -> Seq(Seq("a")), - // Empty universe - Map.empty[String, Set[Int]] -> Seq(Seq.empty[String]), - // Empty universe with an empty set - Map("a" -> Set.empty[Int]) -> Seq(Seq.empty[String]), - // An example where the greedy algorithm finds the minimal solution - Map("a" -> Set(1, 2, 3), "b" -> Set(1, 2), "c" -> Set(3)) -> Seq(Seq("a")), - // An example where the greedy algorithm finds a non-minimal solution - Map( - "a" -> Set((1 to 7)*), - "b" -> Set((8 to 14)*), - "c" -> Set(1, 8), - "d" -> Set(2, 3, 9, 10), - "e" -> Set(4, 5, 6, 7, 11, 12, 13, 14), - ) -> Seq(Seq("c", "d", "e")), - // An example where the chosen cover depends on tie breaks in the priority queue - Map( - "a" -> Set(1, 2), - "b" -> Set(2, 3), - "c" -> Set(1, 3), - ) -> Seq(Seq("a", "b"), Seq("a", "c"), Seq("b", "c")), - // Duplicate sets - Map( - "a" -> Set(1, 2), - "b" -> Set(1, 2), - ) -> Seq(Seq("a"), Seq("b")), - ) - - forAll(testCases) { case (sets, expected) => - val cover = SetCover.greedy(sets) - // Do not return duplicate elements - cover.distinct shouldBe cover - expected.map(_.toSet) should contain(cover.toSet) - } - } - - "be linear if the sets are disjoint" in { - // This test should take less than a second. - // If it runs any longer, there is likely an performance issue with the implementation - val size = 50000 - val sets = (1 to size).map(i => i -> Set(i)).toMap - SetCover.greedy(sets) should have size size.toLong - } - - "be linear if the sets overlap" in { - // This test should tak eless than a second - // If it runs any longer, there is likely an performance issue with the implementation - val rows = 300 - // Creates the following sets ri and ci up to `rows`: - // r0: 0-0 - // r1: 1-0 1-1 1-1' - // r2: 2-0 2-1 2-1' 2-2 2-2' - // r3: 3-0 3-1 3-1' 3-2 3-2' 3-3 3-3' - // r4: 4-0 4-1 4-1' 4-2 4-2' 4-3 4-3' 4-4 4-4' - // - // c1: 1-1 1-1' 2-2 2-2' 3-3 3-3' 4-4 4-4' - // c2: 1-1 1-1' 2-1 2-1' 3-2 3-2' 4-3 4-3' - // c3: 1-1 1-1' 2-1 2-1' 3-1 3-1' 4-2 4-2' - // c4: 1-1 1-1' 2-1 2-1' 3-1 3-1' 4-1 4-1' - // - // The greedy algorithm will always include the last available row. - // In between, it must update all the columns until all of their elements have been removed. - - def row(i: Int): (String, Set[String]) = - s"r$i" -> (s"$i-0" +: (1 to i).flatMap(j => Seq(s"$i-$j", s"$i-$j'"))).toSet - def col(j: Int): (String, Set[String]) = - s"c$j" -> ((1 to j).flatMap(i => Seq(s"$i-1", s"$i-1'")) ++ (j + 1 to rows).flatMap(i => - Seq(s"$i-${i - j + 1}", s"$i-${i - j + 1}'") - )).toSet - - val sets = ((0 to rows).map(row) ++ (1 to rows).map(col)).toMap - SetCover.greedy(sets) shouldBe ((0 to rows).map(i => s"r$i").reverse) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SimpleExecutionQueueTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SimpleExecutionQueueTest.scala deleted file mode 100644 index 3e87bdd834..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SimpleExecutionQueueTest.scala +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} -import com.digitalasset.canton.{BaseTest, HasExecutionContext, config} -import org.scalatest.BeforeAndAfterEach -import org.scalatest.wordspec.AsyncWordSpec -import org.slf4j.event.Level - -import java.util.concurrent.atomic.AtomicBoolean -import scala.concurrent.{Future, Promise} - -class SimpleExecutionQueueTest - extends AsyncWordSpec - with BaseTest - with HasExecutionContext - with BeforeAndAfterEach { - - private val queueTimeouts = timeouts.copy( - shutdownProcessing = config.NonNegativeDuration.ofSeconds(1), - closing = config.NonNegativeDuration.ofSeconds(1), - ) - - private class MockTask(name: String) { - val started = new AtomicBoolean(false) - private val promise: Promise[UnlessShutdown[String]] = Promise[UnlessShutdown[String]]() - - def run(): FutureUnlessShutdown[String] = { - started.set(true) - FutureUnlessShutdown(promise.future) - } - - def complete(): Unit = promise.success(UnlessShutdown.Outcome(name)) - - def shutdown(): Unit = promise.success(UnlessShutdown.AbortedDueToShutdown) - - def fail(): Unit = promise.failure(new RuntimeException(s"mocked failure for $name")) - } - - /* - Fail a task and captures the warning coming from subsequent tasks that will not be run. - */ - private def failTask(task: MockTask, notRunTasks: Seq[String]): Unit = - terminateTask(task, shutdown = false, notRunTasks) - - /* - Shut a task down and captures the warning coming from subsequent tasks that will not be run. - */ - private def shutdownTask(task: MockTask, notRunTasks: Seq[String]): Unit = - terminateTask(task, shutdown = true, notRunTasks) - - /** @param shutdown - * True if shutdown, false if failure - * @param notRunTasks - * Task which is not run because of the shutdown/failure - */ - private def terminateTask(task: MockTask, shutdown: Boolean, notRunTasks: Seq[String]): Unit = - if (shutdown) { - task.shutdown() - } else { - loggerFactory.assertLogsSeq(SuppressionRule.Level(Level.ERROR))( - task.fail(), - LogEntry.assertLogSeq( - notRunTasks.map { notRunTask => - ( - _.errorMessage should include( - s"Task '$notRunTask' will not run because of failure of previous task" - ), - "task does not run", - ) - }, - Seq.empty, - ), - ) - } - - private def simpleExecutionQueueTests( - mk: () => SimpleExecutionQueue - ): Unit = { - - "only run one future at a time" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - - task2.started.get() should be(false) - task1.complete() - - for { - _ <- task1Result.failOnShutdown - _ = task1.started.get() shouldBe true - // queue one while running - task3Result = queue.executeUS(task3.run(), "Task3").failOnShutdown - _ = task2.complete() - _ <- task2Result.failOnShutdown("aborted due to shutdown.") - _ = task2.started.get() shouldBe true - _ = task3.complete() - _ <- task3Result - } yield task3.started.get() should be(true) - } - - "flush never fails" in { - val queue = mk() - val task1 = new MockTask("task1") - val task1Result = queue.executeUS(task1.run(), "Task1") - - val flush0 = queue.flush() - flush0.isCompleted shouldBe false - task1.fail() - for { - _ <- task1Result.failed.failOnShutdown("aborted due to shutdown.") - _ <- queue.flush() - _ <- flush0 - } yield { - flush0.isCompleted shouldBe true - } - } - - "not run new tasks if the queue is shutdown" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - - val task1Result = queue.executeUS(task1.run(), "Task1") - - task1.complete() - - for { - task1Res <- task1Result.unwrap - _ = queue.close() - task2Result = queue.executeUS(task2.run(), "Task2") - task2res <- task2Result.unwrap - } yield { - task1.started.get() shouldBe true - task1Res shouldBe UnlessShutdown.Outcome("task1") - task2.started.get() shouldBe false - task2res shouldBe UnlessShutdown.AbortedDueToShutdown - } - } - - "not run queued tasks if the queue is shutdown" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - - val closed = Future(queue.close()) - - // Make sure to wait for the close call to be scheduled before completing task 1 - eventually() { - queue.isClosing shouldBe true - } - - task1.complete() - - for { - task1Res <- task1Result.unwrap - task2Res <- task2Result.unwrap - _ <- closed - } yield { - task2Res shouldBe AbortedDueToShutdown - task2.started.get() shouldBe false - task1Res shouldBe Outcome("task1") - } - } - } - - private def stopAfterFailureTests( - mk: () => SimpleExecutionQueue - ): Unit = { - "not run a future in case of a previous failure" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task4 = new MockTask("task4") - val task1Result = queue.executeUS(task1.run(), "Task1").failOnShutdown - val task2Result = queue.executeUS(task2.run(), "Task2").failOnShutdown - val task3Result = queue.executeUS(task3.run(), "Task3").failOnShutdown - val task4Result = queue.executeUS(task4.run(), "Task4").failOnShutdown - - failTask(task1, Seq("Task2", "Task3", "Task4")) - - task3.complete() - - // Propagated to all subsequent tasks - val expectedFailure = "mocked failure for task1" - - for { - task2Res <- task2Result.failed - _ = task2.started.get() shouldBe false - _ = task2Res.getMessage shouldBe expectedFailure - task4Res <- task4Result.failed - _ = task4.started.get() shouldBe false - _ = task4Res.getMessage shouldBe expectedFailure - task1Res <- task1Result.failed - _ = task1Res.getMessage shouldBe expectedFailure - task3Res <- task3Result.failed - } yield { - task3Res.getMessage shouldBe expectedFailure - } - } - - "correctly propagate failures" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - failTask(task1, Seq("Task2", "Task3")) - - task2.complete() - for { - task1Res <- task1Result.failed.failOnShutdown("aborted due to shutdown.") - task2Res <- task2Result.failed.failOnShutdown("aborted due to shutdown.") - task3Res <- task3Result.failed.failOnShutdown("aborted due to shutdown.") - } yield { - task1Res.getMessage shouldBe "mocked failure for task1" - task2Res.getMessage shouldBe "mocked failure for task1" - task3Res.getMessage shouldBe "mocked failure for task1" - } - } - - "not run follow-up tasks if a task has been shutdown" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - task1.complete() - shutdownTask(task2, Seq("Task3")) - - for { - task1res <- task1Result.unwrap - task2res <- task2Result.unwrap - task3res <- task3Result.unwrap - } yield { - task1.started.get() shouldBe true - task1res shouldBe UnlessShutdown.Outcome("task1") - task2.started.get() shouldBe true - task2res shouldBe UnlessShutdown.AbortedDueToShutdown - task3.started.get() shouldBe false - task3res shouldBe UnlessShutdown.AbortedDueToShutdown - } - } - - "list the outstanding tasks" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task4 = new MockTask("task4") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - val task4Result = queue.executeUS(task4.run(), "Task4") - - val queue0 = queue.queued - task1.complete() - val queue1 = queue.queued - failTask(task2, Seq("Task3", "Task4")) - - for { - _ <- task1Result.failOnShutdown - queue2 = queue.queued - _ = task3.complete() - _ <- task2Result.failed.failOnShutdown("aborted due to shutdown.") - _ <- task3Result.failed.failOnShutdown("aborted due to shutdown.") - queue3 = queue.queued - _ = task4.complete() - _ <- task4Result.failed.failOnShutdown("aborted due to shutdown.") - queue4 = queue.queued - } yield { - queue0 shouldBe Seq("sentinel (completed)", "Task1", "Task2", "Task3", "Task4") - queue1 shouldBe Seq("Task1 (completed)", "Task2", "Task3", "Task4") - - // After task2 failure, all tasks removed from the queue - queue2 shouldBe Seq("Task4 (completed)") - queue3 shouldBe Seq("Task4 (completed)") - queue4 shouldBe Seq("Task4 (completed)") - } - } - - "complete subsequent tasks with shutdown even if the currently running task does not close" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - val closed = loggerFactory.assertLoggedWarningsAndErrorsSeq( - Future(queue.close()), - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include( - "Task closing simple-exec-queue: test-queue did not complete within 1 second" - ), - "missing queue closing timeout message", - ), - ( - _.warningMessage should include( - "Closing 'AsyncCloseable(name=simple-exec-queue: test-queue)' failed" - ), - "missing lifecycle closing error message", - ), - ( - _.warningMessage should include( - "Forcibly completing Task2 with AbortedDueToShutdown" - ), - "missing task 2 shutdown", - ), - ( - _.warningMessage should include( - "Forcibly completing Task3 with AbortedDueToShutdown" - ), - "missing task 3 shutdown", - ), - ) - ), - ) - - eventually() { - closed.isCompleted shouldBe true - } - - for { - task2Res <- task2Result.unwrap - task3Res <- task3Result.unwrap - } yield { - task3Res shouldBe AbortedDueToShutdown - task2Res shouldBe AbortedDueToShutdown - // The running task is not completed - task1Result.unwrap.isCompleted shouldBe false - // After we shut it down it should eventually complete though - task1.shutdown() - eventually() { - task1Result.unwrap.futureValue shouldBe AbortedDueToShutdown - } - } - } - - } - - private def continueAfterFailureTests( - mk: () => SimpleExecutionQueue - ): Unit = { - "not propagate failures" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - failTask(task1, Seq()) - - task2.complete() - task3.complete() - for { - task1Res <- task1Result.failed.failOnShutdown("aborted due to shutdown.") - task2Res <- task2Result.failOnShutdown("aborted due to shutdown.") - task3Res <- task3Result.failOnShutdown("aborted due to shutdown.") - } yield { - task1Res.getMessage shouldBe "mocked failure for task1" - task2Res shouldBe "task2" - task3Res shouldBe "task3" - } - } - - "run follow-up tasks if a task has been shutdown" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - task1.complete() - task2.shutdown() - task3.complete() - - for { - task1res <- task1Result.unwrap - task2res <- task2Result.unwrap - task3res <- task3Result.unwrap - } yield { - task1.started.get() shouldBe true - task1res shouldBe UnlessShutdown.Outcome("task1") - task2.started.get() shouldBe true - task2res shouldBe UnlessShutdown.AbortedDueToShutdown - task3.started.get() shouldBe true - task3res shouldBe UnlessShutdown.Outcome("task3") - } - } - - "list the outstanding tasks" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - val task4 = new MockTask("task4") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - val task4Result = queue.executeUS(task4.run(), "Task4") - - val queue0 = queue.queued - task1.complete() - val queue1 = queue.queued - failTask(task2, Seq.empty) - - for { - _ <- task1Result.failOnShutdown - queue2 = queue.queued - _ = task3.complete() - _ <- task2Result.failed.failOnShutdown - _ <- task3Result.failOnShutdown - queue3 = queue.queued - _ = task4.complete() - _ <- task4Result.failOnShutdown - queue4 = queue.queued - } yield { - queue0 shouldBe Seq("sentinel (completed)", "Task1", "Task2", "Task3", "Task4") - queue1 shouldBe Seq("Task1 (completed)", "Task2", "Task3", "Task4") - - // After task2 failure, all tasks removed from the queue - queue2 shouldBe Seq("Task2 (completed)", "Task3", "Task4") - queue3 shouldBe Seq("Task3 (completed)", "Task4") - queue4 shouldBe Seq("Task4 (completed)") - } - } - - "complete subsequent tasks with shutdown even if the currently running task does not close" in { - val queue = mk() - val task1 = new MockTask("task1") - val task2 = new MockTask("task2") - val task3 = new MockTask("task3") - - val task1Result = queue.executeUS(task1.run(), "Task1") - val task2Result = queue.executeUS(task2.run(), "Task2") - val task3Result = queue.executeUS(task3.run(), "Task3") - - val closed = loggerFactory.assertLoggedWarningsAndErrorsSeq( - Future(queue.close()), - LogEntry.assertLogSeq( - Seq( - ( - _.warningMessage should include( - "Task closing simple-exec-queue: test-queue did not complete within 1 second" - ), - "missing queue closing timeout message", - ), - ( - _.warningMessage should include( - "Closing 'AsyncCloseable(name=simple-exec-queue: test-queue)' failed" - ), - "missing lifecycle closing error message", - ), - ( - _.warningMessage should include( - "Forcibly completing Task2 with AbortedDueToShutdown" - ), - "missing task 2 shutdown", - ), - ( - _.warningMessage should include( - "Forcibly completing Task3 with AbortedDueToShutdown" - ), - "missing task 3 shutdown", - ), - ) - ), - ) - - eventually() { - closed.isCompleted shouldBe true - } - - for { - task2Res <- task2Result.unwrap - task3Res <- task3Result.unwrap - } yield { - task3Res shouldBe AbortedDueToShutdown - task2Res shouldBe AbortedDueToShutdown - // The running task is not completed - task1Result.unwrap.isCompleted shouldBe false - // After we shut it down it should eventually complete though - task1.shutdown() - eventually() { - task1Result.unwrap.futureValue shouldBe AbortedDueToShutdown - } - } - } - - } - - "SimpleExecutionQueueWithShutdown" when { - "not logging task timing" should { - val factory = () => - new SimpleExecutionQueue( - "test-queue", - futureSupervisor, - queueTimeouts, - loggerFactory, - logTaskTiming = false, - failureMode = StopAfterFailure, - ) - - behave like simpleExecutionQueueTests(factory) - behave like stopAfterFailureTests(factory) - } - - "logging task timingWithShutdown" should { - val factory = () => - new SimpleExecutionQueue( - "test-queue", - futureSupervisor, - queueTimeouts, - loggerFactory, - logTaskTiming = true, - failureMode = StopAfterFailure, - ) - - behave like simpleExecutionQueueTests(factory) - behave like stopAfterFailureTests(factory) - } - - "in ContinueAfterFailure mode" should { - val factory = () => - new SimpleExecutionQueue( - "test-queue", - futureSupervisor, - queueTimeouts, - loggerFactory, - logTaskTiming = true, - failureMode = ContinueAfterFailure, - ) - - behave like simpleExecutionQueueTests(factory) - behave like continueAfterFailureTests(factory) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingleUseCellTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingleUseCellTest.scala deleted file mode 100644 index 4481594201..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingleUseCellTest.scala +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class SingleUseCellTest extends AnyWordSpec with BaseTest { - - def mk[A](): SingleUseCell[A] = new SingleUseCell[A] - - "SingleUseCell" should { - - "initially be empty" in { - val cell = mk[Int]() - assert(cell.isEmpty) - assert(cell.get.isEmpty) - assert(!cell.isDefined) - } - - "return the written value" in { - val cell = mk[Int]() - assert(cell.putIfAbsent(7).isEmpty) - assert(cell.isDefined) - assert(!cell.isEmpty) - assert(cell.get.contains(7)) - } - - "not overwrite values" in { - val cell = mk[Int]() - cell.putIfAbsent(12) - assert(cell.putIfAbsent(1).contains(12)) - assert(cell.get.contains(12)) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseLaws.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseLaws.scala deleted file mode 100644 index 51e10f26f1..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseLaws.scala +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.laws.{IsEq, IsEqArrow, TraverseLaws} -import cats.{Applicative, Monoid} - -trait SingletonTraverseLaws[F[_], C] extends TraverseLaws[F] { - implicit override def F: SingletonTraverse.Aux[F, C] - - def sizeAtMost1[A](fa: F[A]): IsEq[Boolean] = - (F.size(fa) <= 1) <-> true - - def traverseSingletonConsistency[G[_], A, B](fa: F[A], f: A => G[B])(implicit - G: Applicative[G] - ): IsEq[G[F[B]]] = { - val traverseSingleton = F.traverseSingleton(fa)((_, a) => f(a)) - val traverse = F.traverse(fa)(f) - traverseSingleton <-> traverse - } - - def contextConsistency[A](fa: F[A]): IsEq[Option[C]] = { - implicit val keepFirst: Monoid[Option[C]] = new Monoid[Option[C]] { - override def empty: Option[C] = None - override def combine(x: Option[C], y: Option[C]): Option[C] = x.orElse(y) - } - - val (firstContext, fa2) = F.traverseSingleton(fa)((c, a) => (Option(c), (a, a))) - val (secondContext, _) = F.traverseSingleton(fa2)((c, aa) => (Option(c), (aa, aa))) - - firstContext <-> secondContext - } -} - -object SingletonTraverseLaws { - def apply[F[_]](implicit FF: SingletonTraverse[F]): SingletonTraverseLaws[F, FF.Context] = - new SingletonTraverseLaws[F, FF.Context] { def F: SingletonTraverse.Aux[F, FF.Context] = FF } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTest.scala deleted file mode 100644 index de8fc5ebee..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTest.scala +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.Id -import com.digitalasset.canton.BaseTestWordSpec -import org.scalatest.wordspec.AnyWordSpec - -class SingletonTraverseTest extends AnyWordSpec with BaseTestWordSpec { - "Laws for Id" should { - checkAllLaws( - "Id", - SingletonTraverseTests[Id].singletonTraverse[Int, Int, Int, Int, Option, Option], - ) - } - - "Laws for Option" should { - checkAllLaws( - "Option", - SingletonTraverseTests[Option].singletonTraverse[Int, Int, Int, Int, Option, Option], - ) - } - - "Laws for Tuple2[String, *]" should { - checkAllLaws( - "Tuple2", - SingletonTraverseTests[(String, *)].singletonTraverse[Int, Int, Int, Int, Option, Option], - ) - } - - "Laws for Either[String, *]" should { - checkAllLaws( - "Either", - SingletonTraverseTests[Either[String, *]] - .singletonTraverse[Int, Int, Int, Int, Option, Option], - ) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTests.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTests.scala deleted file mode 100644 index c55736803b..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/SingletonTraverseTests.scala +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.kernel.CommutativeMonoid -import cats.laws.discipline.{TraverseTests, catsLawsIsEqToProp} -import cats.{CommutativeApplicative, Eq} -import org.scalacheck.Prop.forAll -import org.scalacheck.{Arbitrary, Cogen, Prop} - -trait SingletonTraverseTests[F[_], Context] extends TraverseTests[F] { - override def laws: SingletonTraverseLaws[F, Context] - - def singletonTraverse[ - A: Arbitrary, - B: Arbitrary, - C: Arbitrary, - M: Arbitrary, - X[_]: CommutativeApplicative, - Y[_]: CommutativeApplicative, - ](implicit - ArbFA: Arbitrary[F[A]], - ArbFB: Arbitrary[F[B]], - ArbXB: Arbitrary[X[B]], - ArbXM: Arbitrary[X[M]], - ArbYB: Arbitrary[Y[B]], - ArbYC: Arbitrary[Y[C]], - ArbYM: Arbitrary[Y[M]], - ArbFXM: Arbitrary[F[X[M]]], - CogenA: Cogen[A], - CogenB: Cogen[B], - CogenC: Cogen[C], - CogenM: Cogen[M], - M: CommutativeMonoid[M], - MA: CommutativeMonoid[A], - EqFA: Eq[F[A]], - EqFC: Eq[F[C]], - EqM: Eq[M], - EqA: Eq[A], - EqXYFC: Eq[X[Y[F[C]]]], - EqXFB: Eq[X[F[B]]], - EqYFB: Eq[Y[F[B]]], - EqXFM: Eq[X[F[M]]], - EqYFM: Eq[Y[F[M]]], - EqOptionA: Eq[Option[A]], - EqBoolean: Eq[Boolean], - EqOptionC: Eq[Option[Context]], - ): RuleSet = new RuleSet { - def name: String = "SingletonTraverse" - - def bases: Seq[(String, RuleSet)] = Nil - - def parents: Seq[RuleSet] = - Seq(traverse[A, B, C, M, X, Y]) - - def props: Seq[(String, Prop)] = - Seq( - "size at most 1" -> forAll(laws.sizeAtMost1[A] _), - "traverseSingleton consistency" -> forAll(laws.traverseSingletonConsistency[X, A, B] _), - "context consistency" -> forAll(laws.contextConsistency[A] _), - ) - } -} - -object SingletonTraverseTests { - def apply[F[_]](implicit FF: SingletonTraverse[F]): SingletonTraverseTests[F, FF.Context] = - new SingletonTraverseTests[F, FF.Context] { - override def laws: SingletonTraverseLaws[F, FF.Context] = SingletonTraverseLaws[F](FF) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ThereafterTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ThereafterTest.scala deleted file mode 100644 index 8ec6f1d592..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/ThereafterTest.scala +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.data.{Chain, EitherT, Nested, OptionT} -import cats.syntax.either.* -import cats.syntax.traverse.* -import cats.{Applicative, Functor, Id, Traverse} -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import scala.annotation.unused -import scala.concurrent.duration.Duration -import scala.concurrent.{Await, ExecutionContext, Future, Promise, blocking} -import scala.util.{Failure, Success, Try} - -trait ThereafterTest extends AnyWordSpec with BaseTest { - - def thereafter[F[_], Content[_], Shape]( - sut: Thereafter.Aux[F, Content, Shape], - fixture: ThereafterTest.Fixture[F, Content, Shape], - ): Unit = { - "thereafter" should { - "run the body once" in { - forEvery(fixture.contents) { content => - val runCount = new AtomicInteger(0) - val x = fixture.fromContent(content) - val res = sut.thereafter(x) { _ => - fixture.isCompleted(x) shouldBe true - runCount.incrementAndGet() - () - } - fixture.await(res) shouldBe content - runCount.get shouldBe 1 - } - } - - "run the body after a failure" in { - val ex = new RuntimeException("EXCEPTION") - val x = fixture.fromTry(Failure[Unit](ex)) - val res = sut.thereafter(x) { content => - fixture.isCompleted(x) shouldBe true - Try(fixture.theContent(content)) shouldBe Failure(ex) - () - } - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex) - } - - "propagate an exception in the body" in { - val ex = new RuntimeException("BODY FAILURE") - val x = fixture.fromTry(Success(())) - val res = sut.thereafter(x)(_ => throw ex) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex) - } - - "chain failure and body failure via suppression" in { - val ex1 = new RuntimeException("EXCEPTION") - val ex2 = new RuntimeException("BODY FAILURE") - val x = fixture.fromTry(Failure[Unit](ex1)) - val res = sut.thereafter(x)(_ => throw ex2) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex1) - ex1.getSuppressed should contain(ex2) - } - - "chain body exceptions via suppression" in { - val ex1 = new RuntimeException("BODY FAILURE 1") - val ex2 = new RuntimeException("BODY FAILURE 2") - val x = fixture.fromTry(Success(())) - val y = sut.thereafter(x)(_ => throw ex1) - val res = sut.thereafter(y)(_ => throw ex2) - val z = fixture.await(res) - Try(fixture.theContent(z)) shouldBe Failure(ex1) - ex1.getSuppressed should contain(ex2) - } - - "rethrow the exception in the body" in { - val ex = new RuntimeException("FAILURE") - val x = fixture.fromTry(Failure[Unit](ex)) - val res = sut.thereafter(x)(_ => throw ex) - val z = fixture.await(res) - Try(fixture.theContent(z)) shouldBe Failure(ex) - } - - "call the body with the right content" in { - forAll(fixture.contents) { content => - val called = new AtomicReference[Seq[Content[fixture.X]]](Seq.empty) - val x = fixture.fromContent(content) - val res = sut.thereafter(x) { c => - called.updateAndGet(_ :+ c).discard - } - val z = fixture.await(res) - z shouldBe content - called.get() shouldBe Seq(content) - } - } - } - - "withShape" should { - "assemble the content" in { - forEvery(fixture.contents) { content => - fixture.splitContent(content) match { - case Some((shape, x)) => sut.withShape(shape, x) shouldBe content - case None => succeed - } - } - } - } - } - - def thereafterAsync[F[_], Content[_], Shape]( - sut: ThereafterAsync.Aux[F, Content, Shape], - fixture: ThereafterAsyncTest.Fixture[F, Content, Shape], - )(implicit ec: ExecutionContext): Unit = { - "thereafter" should { - - behave like thereafter(sut, fixture) - - "run the body only afterwards" in { - val runCount = new AtomicInteger(0) - val promise = Promise[Int]() - val x = fixture.fromFuture(promise.future) - val res = sut.thereafter(x) { content => - promise.future.isCompleted shouldBe true - fixture.theContent(content) shouldBe 42 - runCount.incrementAndGet() - () - } - promise.success(42) - val y = fixture.await(res) - fixture.theContent(y) shouldBe 42 - runCount.get shouldBe 1 - } - - "run the body even after failure" in { - val ex = new RuntimeException("EXCEPTION") - val promise = Promise[Unit]() - val x = fixture.fromFuture(promise.future) - val res = sut.thereafter(x) { content => - fixture.isCompleted(x) shouldBe true - promise.future.isCompleted shouldBe true - Try(fixture.theContent(content)) shouldBe Failure(ex) - () - } - promise.failure(ex) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex) - } - } - - "thereafterF" should { - "run the body once upon completion" in { - forEvery(fixture.contents) { content => - val runCount = new AtomicInteger(0) - val bodyRunCount = new AtomicInteger(0) - val x = fixture.fromContent(content) - val res = sut.thereafterF(x) { _ => - fixture.isCompleted(x) shouldBe true - runCount.incrementAndGet() - Future { - bodyRunCount.incrementAndGet() - () - } - } - fixture.await(res) shouldBe content - runCount.get shouldBe 1 - bodyRunCount.get shouldBe 1 - } - } - - "run the body only afterwards" in { - val runCount = new AtomicInteger(0) - val promise = Promise[Int]() - val x = fixture.fromFuture(promise.future) - val res = sut.thereafterF(x) { content => - promise.future.isCompleted shouldBe true - Future { - fixture.theContent(content) shouldBe 42 - runCount.incrementAndGet() - () - } - } - promise.success(42) - val y = fixture.await(res) - fixture.theContent(y) shouldBe 42 - runCount.get shouldBe 1 - } - - "propagate a synchronous exception in the body" in { - val ex = new RuntimeException("BODY FAILURE") - val x = fixture.fromFuture(Future.successful(())) - val res = sut.thereafterF(x)(_ => throw ex) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex) - } - - "propagate an asynchronous exception in the body" in { - val ex = new RuntimeException("BODY FAILURE") - val x = fixture.fromFuture(Future.successful(())) - val res = sut.thereafterF(x)(_ => Future.failed(ex)) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex) - } - - "chain failure and body failure via suppression" in { - val ex1 = new RuntimeException("EXCEPTION") - val ex2 = new RuntimeException("BODY FAILURE") - val x = fixture.fromFuture(Future.failed[Unit](ex1)) - val res = sut.thereafterF(x)(_ => Future.failed(ex2)) - val y = fixture.await(res) - Try(fixture.theContent(y)) shouldBe Failure(ex1) - ex1.getSuppressed should contain(ex2) - } - - "chain body exceptions via suppression" in { - val ex1 = new RuntimeException("BODY FAILURE 1") - val ex2 = new RuntimeException("BODY FAILURE 2") - val x = fixture.fromFuture(Future.successful(())) - val y = sut.thereafterF(x)(_ => Future.failed(ex1)) - val res = sut.thereafterF(y)(_ => Future.failed(ex2)) - val z = fixture.await(res) - Try(fixture.theContent(z)) shouldBe Failure(ex1) - ex1.getSuppressed should contain(ex2) - } - - "rethrow the exception in the body" in { - val ex = new RuntimeException("FAILURE") - val x = fixture.fromFuture(Future.failed[Unit](ex)) - val res = sut.thereafterF(x)(_ => Future.failed(ex)) - val z = fixture.await(res) - Try(fixture.theContent(z)) shouldBe Failure(ex) - } - - "call the body with the right content" in { - forAll(fixture.contents) { content => - val called = new AtomicReference[Seq[Content[fixture.X]]](Seq.empty) - val x = fixture.fromContent(content) - val res = sut.thereafterF(x) { c => - Future.successful(called.updateAndGet(_ :+ c).discard) - } - val z = fixture.await(res) - z shouldBe content - called.get() shouldBe Seq(content) - } - } - } - } -} - -object ThereafterTest { - - trait Fixture[F[_], Content[_], Shape] { - type X - def fromTry[A](x: Try[A]): F[A] - def fromContent[A](content: Content[A]): F[A] - def isCompleted[A](x: F[A]): Boolean - def await[A](x: F[A]): Content[A] - def contents: Seq[Content[X]] - def theContent[A](content: Content[A]): A - def splitContent[A](content: Content[A]): Option[(Shape, A)] - } - - /** Test that the scala compiler finds the [[Thereafter]] implicits */ - @unused - private def implicitResolutionTest(): Unit = { - import Thereafter.syntax.* - - @SuppressWarnings(Array("org.wartremover.warts.Null")) - implicit val ec: ExecutionContext = null - - Id(5).thereafter((x: Int) => ()).discard - - EitherT.rightT[Future, Unit]("EitherT Future").thereafter(_ => ()).discard - EitherT - .rightT[FutureUnlessShutdown, Unit]("EitherT FutureUnlessShutdown") - .thereafter(_ => ()) - .discard - OptionT.pure[Future]("OptionT Future").thereafter(_ => ()).discard - OptionT.pure[FutureUnlessShutdown]("OptionT FutureUnlessShutdown").thereafter(_ => ()).discard - - // Type inference copes even with several Thereafter transformers - EitherT - .rightT[EitherT[Try, Unit, *], Unit]("EitherT EitherT Try") - .thereafter(_ => ()) - .discard - OptionT.pure[OptionT[Try, *]]("OptionT OptionT Try").thereafter(_ => ()).discard - Nested( - EitherT.pure[Try, Unit](OptionT.pure[Try]("Nested EitherT Try OptionT Try")) - ).thereafter(_ => ()).discard - } -} - -object ThereafterAsyncTest { - - trait Fixture[F[_], Content[_], Shape] extends ThereafterTest.Fixture[F, Content, Shape] { - override def fromTry[A](x: Try[A]): F[A] = fromFuture(Future.fromTry(x)) - def fromFuture[A](x: Future[A]): F[A] - } - - /** Test that the scala compiler finds the [[ThereafterAsync]] implicits */ - @unused - private def implicitResolutionTest(): Unit = { - import Thereafter.syntax.* - - @SuppressWarnings(Array("org.wartremover.warts.Null")) - implicit val ec: ExecutionContext = null - - EitherT.rightT[Future, Unit]("EitherT Future").thereafterF(_ => Future.unit).discard - EitherT - .rightT[FutureUnlessShutdown, Unit]("EitherT FutureUnlessShutdown") - .thereafterF(_ => Future.unit) - .discard - OptionT.pure[Future]("OptionT Future").thereafterF(_ => Future.unit).discard - OptionT - .pure[FutureUnlessShutdown]("OptionT FutureUnlessShutdown") - .thereafterF(_ => Future.unit) - .discard - - // Type inference copes even with several Thereafter transformers - EitherT - .rightT[EitherT[Future, Unit, *], Unit]("EitherT EitherT Future") - .thereafterF(_ => Future.unit) - .discard - OptionT.pure[OptionT[Future, *]]("OptionT OptionT Future").thereafterF(_ => Future.unit).discard - Nested( - EitherT.pure[Future, Unit](OptionT.pure[Future]("Nested EitherT Future OptionT Future")) - ).thereafterF(_ => Future.unit).discard - } -} - -class TryThereafterTest extends ThereafterTest { - "Try" should { - behave like thereafter(Thereafter[Try], TryThereafterTest.fixture) - } -} - -object TryThereafterTest { - lazy val fixture: ThereafterTest.Fixture[Try, Try, Unit] = - new ThereafterTest.Fixture[Try, Try, Unit] { - override type X = Any - override def fromTry[A](x: Try[A]): Try[A] = x - override def fromContent[A](content: Try[A]): Try[A] = content - override def isCompleted[A](x: Try[A]): Boolean = true - override def await[A](x: Try[A]): Try[A] = x - override def contents: Seq[Try[X]] = TryThereafterTest.contents - override def theContent[A](content: Try[A]): A = - content.fold(err => throw err, Predef.identity) - override def splitContent[A](content: Try[A]): Option[(Unit, A)] = - content.toOption.map(() -> _) - } - lazy val contents: Seq[Try[Any]] = - Seq(TryUtil.unit, Success(5), Failure(new RuntimeException("failure"))) -} - -class FutureThereafterTest extends ThereafterTest with HasExecutionContext { - "Future" should { - behave like thereafter(Thereafter[Future], FutureThereafterTest.fixture) - behave like thereafterAsync(ThereafterAsync[Future], FutureThereafterTest.fixture) - } -} - -object FutureThereafterTest { - lazy val fixture: ThereafterAsyncTest.Fixture[Future, Try, Unit] = - new ThereafterAsyncTest.Fixture[Future, Try, Unit] { - override type X = Any - override def fromFuture[A](x: Future[A]): Future[A] = x - override def fromContent[A](content: Try[A]): Future[A] = - Future.fromTry(content) - override def isCompleted[A](x: Future[A]): Boolean = x.isCompleted - override def await[A](x: Future[A]): Try[A] = Try(blocking { - Await.result(x, Duration.Inf) - }) - override def contents: Seq[Try[X]] = TryThereafterTest.contents - override def theContent[A](content: Try[A]): A = - content.fold(err => throw err, Predef.identity) - override def splitContent[A](content: Try[A]): Option[(Unit, A)] = - content.toOption.map(() -> _) - } -} - -class FutureUnlessShutdownThereafterTest extends ThereafterTest with HasExecutionContext { - "FutureUnlessShutdown" should { - behave like thereafterAsync( - ThereafterAsync[FutureUnlessShutdown], - FutureUnlessShutdownThereafterTest.fixture, - ) - } -} - -object FutureUnlessShutdownThereafterTest { - def fixture(implicit - ec: ExecutionContext - ): ThereafterAsyncTest.Fixture[FutureUnlessShutdown, Lambda[a => Try[UnlessShutdown[a]]], Unit] = - new ThereafterAsyncTest.Fixture[ - FutureUnlessShutdown, - Lambda[a => Try[UnlessShutdown[a]]], - Unit, - ] { - override type X = Any - override def fromFuture[A](x: Future[A]): FutureUnlessShutdown[A] = - FutureUnlessShutdown.outcomeF(x) - override def fromContent[A](content: Try[UnlessShutdown[A]]): FutureUnlessShutdown[A] = - FutureUnlessShutdown(Future.fromTry(content)) - override def isCompleted[A](x: FutureUnlessShutdown[A]): Boolean = x.unwrap.isCompleted - override def await[A]( - x: FutureUnlessShutdown[A] - ): Try[UnlessShutdown[A]] = - Try(blocking { - Await.result(x.unwrap, Duration.Inf) - }) - override def contents: Seq[Try[UnlessShutdown[X]]] = - Success(UnlessShutdown.AbortedDueToShutdown) +: - TryThereafterTest.contents.map(_.map(UnlessShutdown.Outcome(_))) - override def theContent[A](content: Try[UnlessShutdown[A]]): A = - content.fold(err => throw err, _.onShutdown(throw new NoSuchElementException("No outcome"))) - override def splitContent[A](content: Try[UnlessShutdown[A]]): Option[(Unit, A)] = - content.toOption.flatMap { - case UnlessShutdown.Outcome(x) => Some(() -> x) - case UnlessShutdown.AbortedDueToShutdown => None - } - } -} - -class EitherTThereafterTest extends ThereafterTest with HasExecutionContext { - "EitherT" when { - "applied to Try" should { - behave like thereafter( - Thereafter[EitherT[Try, Unit, *]], - EitherTThereafterTest.fixture(TryThereafterTest.fixture, NonEmpty(Seq, ())), - ) - } - - "applied to Future" should { - behave like thereafterAsync( - ThereafterAsync[EitherT[Future, Unit, *]], - EitherTThereafterTest.asyncFixture(FutureThereafterTest.fixture, NonEmpty(Seq, ())), - ) - } - - "applied to FutureUnlessShutdown" should { - implicit val appTryUnlessShutdown = Applicative[Try].compose[UnlessShutdown] - behave like thereafterAsync( - ThereafterAsync[EitherT[FutureUnlessShutdown, String, *]], - EitherTThereafterTest.asyncFixture( - FutureUnlessShutdownThereafterTest.fixture, - NonEmpty(Seq, "left", "another left"), - ), - ) - } - } -} - -object EitherTThereafterTest { - private class EitherTFixture[F[_], Content[_], Shape, E]( - base: ThereafterTest.Fixture[F, Content, Shape], - lefts: NonEmpty[Seq[E]], - )(implicit M: Functor[F], C: Applicative[Content]) - extends ThereafterTest.Fixture[EitherT[F, E, *], Lambda[a => Content[Either[E, a]]], Shape] { - override type X = Any - override def fromTry[A](x: Try[A]): EitherT[F, E, A] = EitherT(M.map(base.fromTry(x))(Right(_))) - override def fromContent[A](content: Content[Either[E, A]]): EitherT[F, E, A] = - EitherT(base.fromContent(content)) - override def isCompleted[A](x: EitherT[F, E, A]): Boolean = base.isCompleted(x.value) - override def await[A](x: EitherT[F, E, A]): Content[Either[E, A]] = - base.await(x.value) - override def contents: Seq[Content[Either[E, X]]] = - lefts.map(l => C.pure(Either.left[E, X](l))) ++ base.contents.map( - C.map(_)(Either.right[E, X](_)) - ) - override def theContent[A](content: Content[Either[E, A]]): A = - base - .theContent(content) - .valueOr(l => throw new NoSuchElementException(s"Left($l) is not a Right")) - override def splitContent[A](content: Content[Either[E, A]]): Option[(Shape, A)] = - base.splitContent(content).flatMap(_.traverse(_.toOption)) - } - - def fixture[F[_], Content[_], Shape, E]( - base: ThereafterTest.Fixture[F, Content, Shape], - lefts: NonEmpty[Seq[E]], - )(implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterTest.Fixture[EitherT[F, E, *], Lambda[a => Content[Either[E, a]]], Shape] = - new EitherTFixture[F, Content, Shape, E](base, lefts) - - private class EitherTAsyncFixture[F[_], Content[_], Shape, E]( - base: ThereafterAsyncTest.Fixture[F, Content, Shape], - lefts: NonEmpty[Seq[E]], - )(implicit M: Functor[F], C: Applicative[Content]) - extends EitherTFixture[F, Content, Shape, E](base, lefts) - with ThereafterAsyncTest.Fixture[ - EitherT[F, E, *], - Lambda[a => Content[Either[E, a]]], - Shape, - ] { - override def fromFuture[A](x: Future[A]): EitherT[F, E, A] = - EitherT(M.map(base.fromFuture(x))(Right(_))) - } - - def asyncFixture[F[_], Content[_], Shape, E]( - base: ThereafterAsyncTest.Fixture[F, Content, Shape], - lefts: NonEmpty[Seq[E]], - )(implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterAsyncTest.Fixture[EitherT[F, E, *], Lambda[a => Content[Either[E, a]]], Shape] = - new EitherTAsyncFixture(base, lefts) -} - -class OptionTThereafterTest extends ThereafterTest with HasExecutionContext { - "OptionT" when { - "applied to Try" should { - behave like thereafter( - Thereafter[OptionT[Try, *]], - OptionTThereafterTest.fixture(TryThereafterTest.fixture), - ) - } - - "applied to Future" should { - behave like thereafterAsync( - ThereafterAsync[OptionT[Future, *]], - OptionTThereafterTest.asyncFixture(FutureThereafterTest.fixture), - ) - } - - "applied to FutureUnlessShutdown" should { - implicit val appTryUnlessShutdown = Applicative[Try].compose[UnlessShutdown] - behave like thereafterAsync( - ThereafterAsync[OptionT[FutureUnlessShutdown, *]], - OptionTThereafterTest.asyncFixture(FutureUnlessShutdownThereafterTest.fixture), - ) - } - } -} - -object OptionTThereafterTest { - private class OptionTFixture[F[_], Content[_], Shape]( - base: ThereafterTest.Fixture[F, Content, Shape] - )(implicit M: Functor[F], C: Applicative[Content]) - extends ThereafterTest.Fixture[OptionT[F, *], Lambda[a => Content[Option[a]]], Shape] { - override type X = Any - override def fromTry[A](x: Try[A]): OptionT[F, A] = OptionT(M.map(base.fromTry(x))(Option(_))) - override def fromContent[A](content: Content[Option[A]]): OptionT[F, A] = - OptionT(base.fromContent(content)) - override def isCompleted[A](x: OptionT[F, A]): Boolean = base.isCompleted(x.value) - override def await[A](x: OptionT[F, A]): Content[Option[A]] = - base.await(x.value) - override def contents: Seq[Content[Option[X]]] = - base.contents.map(C.map(_)(Option[X](_))) :+ C.pure(None) - override def theContent[A](content: Content[Option[A]]): A = - base - .theContent(content) - .getOrElse(throw new NoSuchElementException("The option should not be empty")) - override def splitContent[A](content: Content[Option[A]]): Option[(Shape, A)] = - base.splitContent(content).flatMap(_.sequence) - } - - def fixture[F[_], Content[_], Shape](base: ThereafterTest.Fixture[F, Content, Shape])(implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterTest.Fixture[OptionT[F, *], Lambda[a => Content[Option[a]]], Shape] = - new OptionTFixture[F, Content, Shape](base) - - private class OptionTAsyncFixture[F[_], Content[_], Shape]( - base: ThereafterAsyncTest.Fixture[F, Content, Shape] - )(implicit - M: Functor[F], - C: Applicative[Content], - ) extends OptionTFixture[F, Content, Shape](base) - with ThereafterAsyncTest.Fixture[OptionT[F, *], Lambda[a => Content[Option[a]]], Shape] { - override def fromFuture[A](x: Future[A]): OptionT[F, A] = - OptionT(M.map(base.fromFuture(x))(Option(_))) - } - - def asyncFixture[F[_], Content[_], Shape](base: ThereafterAsyncTest.Fixture[F, Content, Shape])( - implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterAsyncTest.Fixture[OptionT[F, *], Lambda[a => Content[Option[a]]], Shape] = - new OptionTAsyncFixture[F, Content, Shape](base) -} - -class CheckedTThereafterTest extends ThereafterTest with HasExecutionContext { - "CheckedT" when { - "applied to Try" should { - behave like thereafter( - Thereafter[CheckedT[Try, Unit, String, *]], - CheckedTThereafterTest.fixture( - TryThereafterTest.fixture, - NonEmpty(Seq, ()), - NonEmpty(Seq, "ab", "cd"), - ), - ) - } - - "applied to Future" should { - behave like thereafterAsync( - ThereafterAsync[CheckedT[Future, Unit, String, *]], - CheckedTThereafterTest.asyncFixture( - FutureThereafterTest.fixture, - NonEmpty(Seq, ()), - NonEmpty(Seq, "ab", "cd"), - ), - ) - } - - "applied to FutureUnlessShutdown" should { - implicit val appTryUnlessShutdown = Applicative[Try].compose[UnlessShutdown] - behave like thereafterAsync( - ThereafterAsync[CheckedT[FutureUnlessShutdown, String, String, *]], - CheckedTThereafterTest.asyncFixture( - FutureUnlessShutdownThereafterTest.fixture, - NonEmpty(Seq, "abort", "another abort"), - NonEmpty(Seq, "nonabort", "another nonabort"), - ), - ) - } - } -} - -object CheckedTThereafterTest { - private class CheckedTFixture[F[_], Content[_], Shape, E, N]( - base: ThereafterTest.Fixture[F, Content, Shape], - aborts: NonEmpty[Seq[E]], - nonaborts: NonEmpty[Seq[N]], - )(implicit M: Functor[F], C: Applicative[Content]) - extends ThereafterTest.Fixture[ - CheckedT[F, E, N, *], - Lambda[a => Content[Checked[E, N, a]]], - (Shape, Chain[N]), - ] { - override type X = Any - override def fromTry[A](x: Try[A]): CheckedT[F, E, N, A] = CheckedT( - M.map(base.fromTry(x))(Checked.result) - ) - override def fromContent[A](content: Content[Checked[E, N, A]]): CheckedT[F, E, N, A] = - CheckedT(base.fromContent(content)) - override def isCompleted[A](x: CheckedT[F, E, N, A]): Boolean = base.isCompleted(x.value) - override def await[A](x: CheckedT[F, E, N, A]): Content[Checked[E, N, A]] = - base.await(x.value) - override def contents: Seq[Content[Checked[E, N, X]]] = - aborts.forgetNE.flatMap(e => - nonaborts.inits.map(ns => C.pure(Checked.Abort(e, Chain.fromSeq(ns)): Checked[E, N, X])) - ) ++ base.contents.flatMap(x => - nonaborts.tails.map(ns => C.map(x)(Checked.Result(Chain.fromSeq(ns), _): Checked[E, N, X])) - ) - override def theContent[A](content: Content[Checked[E, N, A]]): A = - base.theContent(content) match { - case Checked.Result(_, x) => x - case abort @ Checked.Abort(_, _) => - throw new NoSuchElementException(s"$abort is not a Result") - } - override def splitContent[A]( - content: Content[Checked[E, N, A]] - ): Option[((Shape, Chain[N]), A)] = - base.splitContent(content).flatMap { case (fshape, checked) => - checked match { - case Checked.Result(ns, x) => Some((fshape, ns) -> x) - case _ => None - } - } - } - - def fixture[F[_], Content[_], Shape, E, N]( - base: ThereafterTest.Fixture[F, Content, Shape], - aborts: NonEmpty[Seq[E]], - nonaborts: NonEmpty[Seq[N]], - )(implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterTest.Fixture[ - CheckedT[F, E, N, *], - Lambda[a => Content[Checked[E, N, a]]], - (Shape, Chain[N]), - ] = new CheckedTFixture[F, Content, Shape, E, N](base, aborts, nonaborts) - - private class CheckedTAsyncFixture[F[_], Content[_], Shape, E, N]( - base: ThereafterAsyncTest.Fixture[F, Content, Shape], - aborts: NonEmpty[Seq[E]], - nonaborts: NonEmpty[Seq[N]], - )(implicit M: Functor[F], C: Applicative[Content]) - extends CheckedTFixture[F, Content, Shape, E, N](base, aborts, nonaborts) - with ThereafterAsyncTest.Fixture[ - CheckedT[F, E, N, *], - Lambda[a => Content[Checked[E, N, a]]], - (Shape, Chain[N]), - ] { - override def fromFuture[A](x: Future[A]): CheckedT[F, E, N, A] = - CheckedT(M.map(base.fromFuture(x))(Checked.result)) - } - - def asyncFixture[F[_], Content[_], Shape, E, N]( - base: ThereafterAsyncTest.Fixture[F, Content, Shape], - aborts: NonEmpty[Seq[E]], - nonaborts: NonEmpty[Seq[N]], - )(implicit - M: Functor[F], - C: Applicative[Content], - ): ThereafterAsyncTest.Fixture[ - CheckedT[F, E, N, *], - Lambda[a => Content[Checked[E, N, a]]], - (Shape, Chain[N]), - ] = new CheckedTAsyncFixture(base, aborts, nonaborts) -} - -class NestedThereafterTest extends ThereafterTest with HasExecutionContext { - "Nested" when { - // These test cases covers the following aspects of nesting: - // - Combine two Thereafter instances where at least one of them does not have - // a ThereafterAsync instance (Future/Try) - // - Combine two ThereafterAsync instances (Future, FutureUnlessShutdown) - // - Check that shapes are properly treated with (CheckedT) - // - Use every other type constructor with a Thereafter instance (other than Id - // because the exception handling is very different for Id and the Thereafter - // test cases don't make sense for Id), including Nested inside Nested. - - "applied to Future and Try" should { - behave like thereafter( - Thereafter[Nested[Future, Try, *]], - NestedThereafterTest.fixture( - FutureThereafterTest.fixture, - TryThereafterTest.fixture, - ), - ) - } - - "applied to CheckedT-Future and CheckedT-Try" should { - implicit val traverse: Traverse[Lambda[a => Try[Checked[Unit, String, a]]]] = - Traverse[Try].compose(Traverse[Checked[Unit, String, *]]) - - behave like thereafter( - Thereafter[Nested[CheckedT[Future, Unit, String, *], CheckedT[Try, Int, Double, *], *]], - NestedThereafterTest.fixture( - CheckedTThereafterTest.fixture( - FutureThereafterTest.fixture, - NonEmpty(Seq, ()), - NonEmpty(Seq, "ab", "cd"), - ), - CheckedTThereafterTest.fixture( - TryThereafterTest.fixture, - NonEmpty(Seq, 5), - NonEmpty(Seq, 1.0, 2.0), - ), - ), - ) - } - - "applied to Future and FutureUnlessShutdown" should { - behave like thereafterAsync( - ThereafterAsync[Nested[Future, FutureUnlessShutdown, *]], - NestedThereafterTest.asyncFixture( - FutureThereafterTest.fixture, - FutureUnlessShutdownThereafterTest.fixture, - ), - ) - } - - "applied to EitherT[Future] and OptionT[Future]" should { - implicit val traverse: Traverse[Lambda[a => Try[Either[Unit, a]]]] = - Traverse[Try].compose(Traverse[Either[Unit, *]]) - - behave like thereafterAsync( - ThereafterAsync[Nested[EitherT[Future, Unit, *], OptionT[Future, *], *]], - NestedThereafterTest.asyncFixture( - EitherTThereafterTest.asyncFixture(FutureThereafterTest.fixture, NonEmpty(Seq, ())), - OptionTThereafterTest.asyncFixture(FutureThereafterTest.fixture), - ), - ) - } - - "with multiple nestings" should { - implicit val traverse: Traverse[Lambda[a => Try[Try[UnlessShutdown[a]]]]] = - Traverse[Try].compose(Traverse[Try]).compose(Traverse[UnlessShutdown]) - - behave like thereafter( - Thereafter[ - Nested[Nested[Future, FutureUnlessShutdown, *], Nested[Try, OptionT[Future, *], *], *] - ], - NestedThereafterTest.fixture( - NestedThereafterTest.fixture( - FutureThereafterTest.fixture, - FutureUnlessShutdownThereafterTest.fixture, - ), - NestedThereafterTest.fixture( - TryThereafterTest.fixture, - OptionTThereafterTest.asyncFixture(FutureThereafterTest.fixture), - ), - ), - ) - } - } -} - -object NestedThereafterTest { - private class NestedFixture[F[_], FContent[_], FShape, G[_], GContent[_], GShape]( - val baseF: ThereafterTest.Fixture[F, FContent, FShape], - val baseG: ThereafterTest.Fixture[G, GContent, GShape], - )(implicit MF: Functor[F], CF: Traverse[FContent]) - extends ThereafterTest.Fixture[ - Nested[F, G, *], - Lambda[a => FContent[GContent[a]]], - (FShape, GShape), - ] { - override type X = baseG.X - - override def fromTry[A](x: Try[A]): Nested[F, G, A] = - Nested(baseF.fromTry(Success(baseG.fromTry(x)))) - - override def fromContent[A](content: FContent[GContent[A]]): Nested[F, G, A] = - Nested(MF.map(baseF.fromContent(content))(baseG.fromContent)) - - override def isCompleted[A](x: Nested[F, G, A]): Boolean = - if (baseF.isCompleted(x.value)) { - CF.forall(baseF.await(x.value))(baseG.isCompleted) - } else false - - override def await[A](x: Nested[F, G, A]): FContent[GContent[A]] = - baseF.await(MF.map(x.value)(baseG.await)) - - override def contents: Seq[FContent[GContent[X]]] = - baseF.contents.flatMap { fc => - CF.traverse(fc)(_ => baseG.contents) - } - - override def theContent[A](content: FContent[GContent[A]]): A = - baseG.theContent(baseF.theContent(content)) - - override def splitContent[A](content: FContent[GContent[A]]): Option[((FShape, GShape), A)] = - baseF - .splitContent(content) - .flatMap(_.traverse(baseG.splitContent)) - .map { case (fs, (gs, x)) => ((fs, gs), x) } - } - - def fixture[F[_], FContent[_], FShape, G[_], GContent[_], GShape]( - baseF: ThereafterTest.Fixture[F, FContent, FShape], - baseG: ThereafterTest.Fixture[G, GContent, GShape], - )(implicit - MF: Functor[F], - CF: Traverse[FContent], - ): ThereafterTest.Fixture[ - Nested[F, G, *], - Lambda[a => FContent[GContent[a]]], - (FShape, GShape), - ] = new NestedFixture[F, FContent, FShape, G, GContent, GShape](baseF, baseG) - - private class NestedAsyncFixture[F[_], FContent[_], FShape, G[_], GContent[_], GShape]( - override val baseF: ThereafterAsyncTest.Fixture[F, FContent, FShape], - override val baseG: ThereafterAsyncTest.Fixture[G, GContent, GShape], - )(implicit MF: Functor[F], CF: Traverse[FContent]) - extends NestedFixture[F, FContent, FShape, G, GContent, GShape](baseF, baseG) - with ThereafterAsyncTest.Fixture[ - Nested[F, G, *], - Lambda[a => FContent[GContent[a]]], - (FShape, GShape), - ] { - override def fromFuture[A](x: Future[A]): Nested[F, G, A] = - Nested(baseF.fromTry(Success(baseG.fromFuture(x)))) - } - - def asyncFixture[F[_], FContent[_], FShape, G[_], GContent[_], GShape]( - baseF: ThereafterAsyncTest.Fixture[F, FContent, FShape], - baseG: ThereafterAsyncTest.Fixture[G, GContent, GShape], - )(implicit - MF: Functor[F], - CF: Traverse[FContent], - ): ThereafterAsyncTest.Fixture[ - Nested[F, G, *], - Lambda[a => FContent[GContent[a]]], - (FShape, GShape), - ] = new NestedAsyncFixture[F, FContent, FShape, G, GContent, GShape](baseF, baseG) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TraverseTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TraverseTest.scala deleted file mode 100644 index 485caa5d35..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TraverseTest.scala +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import cats.data.EitherT -import cats.syntax.either.* -import cats.syntax.foldable.* -import cats.syntax.functor.* -import cats.syntax.parallel.* -import cats.syntax.traverse.* -import cats.{Applicative, Parallel} -import com.daml.nonempty.NonEmpty -import com.daml.nonempty.catsinstances.* -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.Semaphore -import scala.collection.immutable.ArraySeq -import scala.concurrent.{Future, blocking} - -class TraverseTest extends AnyWordSpec with BaseTest with HasExecutionContext { - - "traverse" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "deadlock" in { - deadlockTraverse(fOps) - } - } - } - - // The following tests merely document the implementation choices made by a particular version of Cats - "used on a specific Seq" should { - "preserve the Seq type" in { - List(1).traverse(Option.apply).value shouldBe a[List[_]] - Vector(1).traverse(Option.apply).value shouldBe a[Vector[_]] - ArraySeq(1).traverse(Option.apply).value shouldBe an[ArraySeq[_]] - LazyList(1).traverse(Option.apply).value shouldBe a[LazyList[_]] - } - } - - "used on a general Seq" should { - "convert everything to Vector" in { - def go(xs: Seq[Int]): Assertion = - xs.traverse(Option.apply).value shouldBe a[Vector[_]] - - go(List(1)) - go(Vector(1)) - go(ArraySeq(1)) - go(LazyList(1)) - } - } - - "used on a specific NonEmpty Seq" should { - "preserve the Seq type" in { - NonEmpty(List, 1).toNEF.traverse(Option.apply).value.forgetNE shouldBe a[List[_]] - NonEmpty(Vector, 1).toNEF.traverse(Option.apply).value.forgetNE shouldBe a[Vector[_]] - NonEmpty(ArraySeq, 1).toNEF.traverse(Option.apply).value.forgetNE shouldBe an[ArraySeq[_]] - NonEmpty(LazyList, 1).toNEF.traverse(Option.apply).value.forgetNE shouldBe a[LazyList[_]] - } - } - - "used on a generic NonEmpty Seq" should { - "convert everything to Vector" in { - def go(xs: NonEmpty[Seq[Int]]): Assertion = - xs.toNEF - .traverse(Option.apply) - .value - .forgetNE shouldBe a[Vector[_]] - - go(NonEmpty(List, 1)) - go(NonEmpty(Vector, 1)) - go(NonEmpty(ArraySeq, 1)) - go(NonEmpty(LazyList, 1)) - } - } - } - - "traverse_" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "deadlock" in { - deadlockTraverse_(fOps) - } - } - } - } - - "flatTraverse" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "deadlock" in { - deadlockFlatTraverse(fOps) - } - } - } - } - - "parTraverse" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "run in parallel" in { - runParTraverse(fOps) - } - - "be stack safe" in { - parTraverseStackSafety(fOps) - } - } - } - } - - "parTraverse_" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "run in parallel" in { - runParTraverse_(fOps) - } - - "be stack safe" in { - parTraverse_StackSafety(fOps) - } - } - } - } - - "parFlatTraverse" when { - futureLikes.foreach { fOps => - s"used with ${fOps.name}" should { - "run in parallel" in { - runParFlatTraverse(fOps) - } - - "be stack safe" in { - parFlatTraverseStackSafety(fOps) - } - } - } - } - - private case object FutureOps extends TraverseTest.FutureLikeOps { - override type F[X] = Future[X] - override def name: String = "Future" - override def mk[A](x: => A): Future[A] = Future(x) - override def isCompleted[A](f: Future[A]): Boolean = f.isCompleted - override def await[A](f: Future[A]): A = f.futureValue - override def applicative: Applicative[Future] = Applicative[Future] - override def parallel: Parallel[Future] = Parallel[Future] - } - - private case object FutureUnlessShutdownOps extends TraverseTest.FutureLikeOps { - override type F[X] = FutureUnlessShutdown[X] - override def name: String = "FutureUnlessShutdown" - override def mk[A](x: => A): FutureUnlessShutdown[A] = FutureUnlessShutdown.outcomeF(Future(x)) - override def isCompleted[A](f: FutureUnlessShutdown[A]): Boolean = - f.unwrap.isCompleted - override def await[A](f: FutureUnlessShutdown[A]): A = - f.onShutdown(fail("shutdown")).futureValue - override def applicative: Applicative[FutureUnlessShutdown] = Applicative[FutureUnlessShutdown] - override def parallel: Parallel[FutureUnlessShutdown] = Parallel[FutureUnlessShutdown] - } - - private case object EitherTFutureOps extends TraverseTest.FutureLikeOps { - override type F[X] = EitherT[Future, String, X] - override def name: String = "EitherT[Future, String, *]" - override def mk[A](x: => A): EitherT[Future, String, A] = EitherT(Future(Either.right(x))) - override def isCompleted[A](f: EitherT[Future, String, A]): Boolean = f.value.isCompleted - override def await[A](f: EitherT[Future, String, A]): A = f.valueOrFail("left").futureValue - override def applicative: Applicative[EitherT[Future, String, *]] = - Applicative[EitherT[Future, String, *]] - override def parallel: Parallel[EitherT[Future, String, *]] = - Parallel[EitherT[Future, String, *]] - } - - private lazy val futureLikes = Seq( - FutureOps, - FutureUnlessShutdownOps, - EitherTFutureOps, - ) - - private sealed trait Op extends Product with Serializable - private case object Acquire extends Op - private case object Release extends Op - - private def runOp(semaphore: Semaphore, name: String, op: Op): Unit = - op match { - case Acquire => - logger.debug(s"Acquiring $semaphore for $name") - blocking(semaphore.acquire()) - case Release => - logger.debug(s"Releasing $semaphore for $name") - semaphore.release() - } - - // Put the two acquires at the end and the releases into the middle so that we detect sequential - // processing even if it's done from right to left. - private lazy val ops = List(Acquire, Release, Release, Acquire) - - private def checkDeadlock[F[_]](mkF: Semaphore => F[Unit])( - isCompleted: F[Unit] => Boolean - ): F[Unit] = { - val semaphore = new Semaphore(3) - semaphore.acquire(3) - val fl = mkF(semaphore) - always()(isCompleted(fl) shouldBe false) - logger.debug("Releasing the deadlock") - semaphore.release() - eventually()(isCompleted(fl) shouldBe true) - semaphore.release(2) - fl - } - - private def deadlockTraverse(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.applicative - val fl = checkDeadlock[fOps.F](semaphore => - ops.traverse(op => fOps.mk(runOp(semaphore, s"traverse on ${fOps.name}", op))).void - )(fOps.isCompleted) - fOps.await(fl) - } - - private def deadlockTraverse_(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.applicative - val fl = checkDeadlock[fOps.F](semaphore => - ops.traverse_(op => fOps.mk(runOp(semaphore, s"traverse_ on ${fOps.name}", op))) - )(fOps.isCompleted) - fOps.await(fl) - } - - private def deadlockFlatTraverse(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.applicative - val fl = checkDeadlock[fOps.F](semaphore => - ops.flatTraverse { op => - fOps.mk(List(runOp(semaphore, s"flatTraverse on ${fOps.name}", op))) - }.void - )(fOps.isCompleted) - fOps.await(fl) - } - - private def runParTraverse(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val semaphore = new Semaphore(2) - semaphore.acquire(2) - val fl = ops.parTraverse(op => fOps.mk(runOp(semaphore, "parTraverse", op))) - fOps.await(fl) should have size ops.size.toLong - semaphore.release(2) - } - - private def runParTraverse_(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val semaphore = new Semaphore(2) - semaphore.acquire(2) - val fl = ops.parTraverse_(op => fOps.mk(runOp(semaphore, "parTraverse_", op))) - fOps.await(fl) - semaphore.release(2) - } - - private def runParFlatTraverse(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val semaphore = new Semaphore(2) - semaphore.acquire(2) - val fl = ops.parFlatTraverse(op => fOps.mk(List(runOp(semaphore, "parFlatTraverse", op)))) - fOps.await(fl) should have size ops.size.toLong - semaphore.release(2) - } - - private lazy val stackSafetyDepth = 20000 - - private def parTraverseStackSafety(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val fl = (1 to stackSafetyDepth: Seq[Int]).parTraverse(_ => fOps.mk(())) - fOps.await(fl) should have size stackSafetyDepth.toLong - } - - private def parTraverse_StackSafety(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val fl = (1 to stackSafetyDepth: Seq[Int]).parTraverse_(_ => fOps.mk(())) - fOps.await(fl) - } - - private def parFlatTraverseStackSafety(fOps: TraverseTest.FutureLikeOps): Unit = { - import fOps.parallel - val fl = (1 to stackSafetyDepth: Seq[Int]).parFlatTraverse(_ => fOps.mk(Seq(()))) - fOps.await(fl) should have size stackSafetyDepth.toLong - } -} - -object TraverseTest { - private[TraverseTest] trait FutureLikeOps extends Product with Serializable { - type F[_] - def name: String - def mk[A](x: => A): F[A] - def isCompleted[A](f: F[A]): Boolean - def await[A](f: F[A]): A - implicit def applicative: Applicative[F] - implicit def parallel: Parallel[F] - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TwoPhasePriorityAccumulatorTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TwoPhasePriorityAccumulatorTest.scala deleted file mode 100644 index f34413413c..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/TwoPhasePriorityAccumulatorTest.scala +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.util.TwoPhasePriorityAccumulator.ItemHandle -import com.digitalasset.canton.util.TwoPhasePriorityAccumulatorTest.Item -import org.scalatest.wordspec.AnyWordSpec - -import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} -import scala.annotation.tailrec -import scala.collection.concurrent.TrieMap - -class TwoPhasePriorityAccumulatorTest extends AnyWordSpec with BaseTest { - - private def priority[A](itemWithPriority: (A, Int)): Int = itemWithPriority._2 - - s"${classOf[TwoPhasePriorityAccumulator[?, ?]].getSimpleName}" should { - "drain all items in ascending priority order" in { - - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - accumulator.isAccumulating shouldBe true - - val items = Seq("first" -> 1, "second" -> 2, "third" -> 0) - - items.foreach { case (item, priority) => - accumulator.accumulate(item, priority).value - } - - accumulator.isAccumulating shouldBe true - accumulator.getPhase shouldBe None - accumulator.stopAccumulating(()) shouldBe None - accumulator.isAccumulating shouldBe false - accumulator.getPhase shouldBe Some(()) - - accumulator.drain().toSeq shouldBe items.sortBy(priority) - } - - "be empty by default" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - accumulator.stopAndDrain(()).toSeq shouldBe Seq.empty - } - - "allow the same item to be added multiple times" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - - val items = Seq("first" -> 1, "second" -> 2, "first" -> 3, "first" -> 1, "first" -> 0) - items.foreach((accumulator.accumulate _).tupled(_).value) - - accumulator.stopAccumulating(()) - accumulator.drain().toSeq shouldBe items.sortBy(priority) - } - - "accumulation stops upon draining" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Int](None) - - val items = Seq("first" -> 1, "second" -> 2) - items.foreach((accumulator.accumulate _).tupled(_).value) - - val label = 10 - accumulator.stopAccumulating(label) - - accumulator.accumulate("third", 3) shouldBe Left(label) - val iter = accumulator.drain() - accumulator.accumulate("forth", 4) shouldBe Left(label) - iter.toSeq shouldBe items.sortBy(priority) - } - - "support removal via the handle" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - - val items = Seq("first", "second", "third", "forth", "first", "first") - val handles = items.zipWithIndex.map((accumulator.accumulate _).tupled(_).value) - - handles.zipWithIndex.foreach { case (handle, index) => - if (index % 2 == 0) { - handle.remove() shouldBe true - } - } - - accumulator.stopAccumulating(()) - accumulator.drain().toSeq shouldBe items.zipWithIndex.filterNot(_._2 % 2 == 0) - } - - "removal succeeds only once" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - - val items = Seq("first", "second", "third", "forth") - val handles = items.zipWithIndex.map((accumulator.accumulate _).tupled(_).value) - - handles(0).remove() shouldBe true - handles(0).remove() shouldBe false - - accumulator.stopAccumulating(()) - val iter = accumulator.drain() - iter.hasNext shouldBe true - iter.next() shouldBe (items(1) -> 1) - - handles(1).remove() shouldBe false - val removals34 = Seq(handles(2), handles(3)).map(_.remove()) - - val drained34 = iter.toSeq - drained34.size + removals34.count(identity) shouldBe 2 - } - - "can be stopped only once" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Int](None) - - val label = 5 - accumulator.stopAccumulating(label) shouldBe None - accumulator.getPhase shouldBe Some(label) - accumulator.accumulate("first", 1) shouldBe Left(label) - accumulator.stopAccumulating(label + 1) shouldBe Some(label) - accumulator.getPhase shouldBe Some(label) - accumulator.accumulate("second", 2) shouldBe Left(label) - accumulator.stopAccumulating(label + 2) shouldBe Some(label) - accumulator.getPhase shouldBe Some(label) - } - - "parallel drainings are disjoint and ascending in priority" in { - val accumulator = new TwoPhasePriorityAccumulator[String, Unit](None) - val items = Seq( - "first", - "second", - "third", - "forth", - "fifth", - "sixth", - "seventh", - "eighth", - "ninth", - "tenth", - ) - items.zipWithIndex.foreach((accumulator.accumulate _).tupled(_).value) - - accumulator.stopAccumulating(()) - - val iter1 = accumulator.drain() - val iter2 = accumulator.drain() - val iter3 = accumulator.drain() - val iters = Seq(iter1, iter2, iter3) - - val drainedB = Seq.newBuilder[(Int, (String, TwoPhasePriorityAccumulator.Priority))] - - @tailrec def roundRobin(index: Int, lastDrainedIndex: Int): Unit = { - val iter = iters(index) - if (iter.hasNext) { - drainedB += index -> iter.next() - roundRobin((index + 1) % iters.size, index) - } else if (index == lastDrainedIndex) - // Stop if we've gone through all iterators and they are all empty - () - else roundRobin((index + 1) % iters.size, lastDrainedIndex) - } - roundRobin(0, 0) - - val drained = drainedB.result() - drained - .map { case (_, itemWithPriority) => itemWithPriority } - .sortBy { case (_, priority) => priority } shouldBe items.zipWithIndex - forAll(drained.groupBy { case (index, _) => index }) { case (index, drainedI) => - val priorities = drainedI.map { case (_, itemWithPriority) => priority(itemWithPriority) } - priorities.sorted shouldBe priorities - } - } - - "clean up obsolete items" in { - val accumulator = new TwoPhasePriorityAccumulator[Item, Unit](Some(_.isObsolete)) - - val items = Seq("first", "second", "third", "forth", "fifth", "sixth").map(Item.apply) - - val handles = items.map(accumulator.accumulate(_, 0).value) - - val obsoleteIndices = Seq(1, 3, 5) - obsoleteIndices.foreach { index => - items(index).markObsolte() - } - accumulator.removeObsoleteTasks() - - val notRemoved = handles.map(_.remove()).zipWithIndex.collect { case (false, index) => index } - notRemoved shouldBe obsoleteIndices - - accumulator.stopAccumulating(()) - val drained = accumulator.drain() - drained.toSeq shouldBe Seq.empty - } - - "behave correctly if races occur" in { - // We simulate races by hijacking the obsolete item clean-up flag. - // This makes this unit test an ugly white-box test - - val obsoleteRef = new AtomicReference[Item => Boolean](_ => false) - val afterRegistrationRef = new AtomicReference[() => Unit](() => ()) - - val accumulator = - new TwoPhasePriorityAccumulator[Item, Unit](Some(item => obsoleteRef.get()(item))) { - override protected def afterRegistration(): Unit = afterRegistrationRef.get()() - } - - val items = Seq("first", "second", "third", "forth", "fifth", "sixth").map(Item.apply) - val priority = 0 - val handles = items.take(3).map(accumulator.accumulate(_, priority).value) - - val drainedRef = - new AtomicReference[Seq[(Item, TwoPhasePriorityAccumulator.Priority)]](Seq.empty) - val accumulatedRef = TrieMap.empty[Item, Either[Unit, ItemHandle]] - - afterRegistrationRef.set { () => - // This runs while we're accumulating items(3) - afterRegistrationRef.set { () => - // This runs while we're accumulating items(3,4) - obsoleteRef.set { item => - // This runs while we're accumulating items(3,4,5) - if (item == items(2)) { - // Use item(2) as a flag to ensure this executes only once - handles(2).remove() - afterRegistrationRef.set(() => ()) - obsoleteRef.set(_ => false) - // Completely drain the accumulator - // This will run before the accumulator is inserted, - // and so we expect the result of the accumulation to be a left - drainedRef.set(accumulator.stopAndDrain(()).toSeq) - } - false - } - val item = items(5) - val handle = accumulator.accumulate(item, priority) - accumulatedRef.put(item, handle) - } - val item = items(4) - val handle = accumulator.accumulate(item, priority) - accumulatedRef.put(item, handle) - } - accumulator.accumulate(items(3), priority).value - - drainedRef.get().toSet shouldBe - Seq(items(0), items(1), items(3), items(4)).map(_ -> priority).toSet - accumulatedRef.keySet shouldBe Set(items(4), items(5)) - // items(4) gets drained and therefore should be a handle - accumulatedRef(items(4)).value - // items(5) does not get drained and must therefore be a Left - accumulatedRef(items(5)) shouldBe Left(()) - } - - } -} - -object TwoPhasePriorityAccumulatorTest { - private final case class Item(name: String) { - val obsoleteFlag: AtomicBoolean = new AtomicBoolean() - def isObsolete: Boolean = obsoleteFlag.get() - def markObsolte(): Unit = obsoleteFlag.set(true) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/BoundedQueueTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/BoundedQueueTest.scala deleted file mode 100644 index 0d10c80b98..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/BoundedQueueTest.scala +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.collection - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.util.collection.BoundedQueue.DropStrategy -import org.scalatest.wordspec.AnyWordSpec - -class BoundedQueueTest extends AnyWordSpec with BaseTest { - - "BoundedQueue" should { - "enqueue messages one by one and drop the oldest message" in { - val queue = new BoundedQueue[Short](maxQueueSize = 2) - queue.enqueue(1) - queue.enqueue(2) - queue should contain theSameElementsInOrderAs Seq(1, 2) - queue.enqueue(3) - queue should contain theSameElementsInOrderAs Seq(2, 3) - } - - "enqueue all messages and drop the newest one" in { - val queue = new BoundedQueue[Short](maxQueueSize = 2, DropStrategy.DropNewest) - queue.enqueueAll(Seq(1, 2, 3)) - queue should contain theSameElementsInOrderAs Seq(1, 2) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/IterableUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/IterableUtilTest.scala deleted file mode 100644 index 385dd3d532..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/IterableUtilTest.scala +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.collection - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.util.collection.IterableUtilTest.CompareOnlyFirst -import org.scalatest.wordspec.AnyWordSpec - -import scala.annotation.tailrec - -class IterableUtilTest extends AnyWordSpec with BaseTest { - "spansBy" should { - "work on a simple example" in { - val example = List(1, 1, 1, 2, 2, 4, 5, 5).zipWithIndex - IterableUtil - .spansBy(example)(_._1) - .map { case (i, it) => i -> it.map(_._2).toList } - .toList shouldBe List( - (1, List(0, 1, 2)), - (2, List(3, 4)), - (4, List(5)), - (5, List(6, 7)), - ) - } - - "run in linear time" in { - val size = 100000 - IterableUtil.spansBy((1 to size).toVector)(_ % 2 == 0).map(_._2) shouldBe - (1 to size).map(NonEmpty(Seq, _)) - } - } - - "subzipBy" should { - "stop when empty" in { - IterableUtil.subzipBy(Iterator(1, 2), Iterator.empty: Iterator[Int]) { (_, _) => - Some(1) - } shouldBe Seq.empty - - IterableUtil.subzipBy(Iterator.empty: Iterator[Int], Iterator(1, 2)) { (_, _) => - Some(1) - } shouldBe Seq.empty - } - - "skip elements in the second argument" in { - IterableUtil.subzipBy(Iterator(1, 2, 3), Iterator(0, 1, 1, 2, 0, 2, 3, 4)) { (x, y) => - if (x == y) Some(x -> y) else None - } shouldBe Seq(1 -> 1, 2 -> 2, 3 -> 3) - } - - "not skip elements in the first argument" in { - IterableUtil.subzipBy(Iterator(1, 2, 3), Iterator(2, 3, 1)) { (x, y) => - if (x == y) Some(x -> y) else None - } shouldBe Seq(1 -> 1) - } - } - - "max" should { - "find the max elements of a list" in { - IterableUtil.maxList(List(2, 1, 2, 2, 0, 1, 0, 2)) shouldEqual List(2, 2, 2, 2) - IterableUtil.maxList(List(1, 2, 3)) shouldEqual List(3) - IterableUtil.maxList(List(3, 2, 1)) shouldEqual List(3) - IterableUtil.maxList[Int](List.empty) shouldEqual List.empty - - val onlyFirsts = - List((2, 2), (4, 2), (4, 1), (4, 3), (2, 1), (3, 5), (0, 4), (4, 4)).map(x => - CompareOnlyFirst(x._1, x._2) - ) - IterableUtil.maxList(onlyFirsts) shouldEqual onlyFirsts - .filter(x => x.first == 4) - .reverse - } - } - - "min" should { - "find the min elements of a list" in { - IterableUtil.minList(List(0, 1, 0, 0, 2, 1, 2, 0)) shouldEqual List(0, 0, 0, 0) - IterableUtil.minList(List(3, 2, 1)) shouldEqual List(1) - IterableUtil.minList(List(1, 2, 3)) shouldEqual List(1) - IterableUtil.minList[Int](List.empty) shouldEqual List.empty - - val onlyFirsts = - List((0, 2), (1, 3), (0, 1), (3, 3), (0, 5)).map(x => CompareOnlyFirst(x._1, x._2)) - IterableUtil.minList(onlyFirsts) shouldEqual onlyFirsts - .filter(x => x.first == 0) - .reverse - } - } - - "splitAfter" should { - "split after the elements" in { - IterableUtil.splitAfter(1 to 12)(isPrime) shouldBe - Seq( - NonEmpty(Seq, 1, 2), - NonEmpty(Seq, 3), - NonEmpty(Seq, 4, 5), - NonEmpty(Seq, 6, 7), - NonEmpty(Seq, 8, 9, 10, 11), - NonEmpty(Seq, 12), - ) - } - - "handle the empty sequence gracefulle" in { - IterableUtil.splitAfter(Seq.empty[Int])(_ => true) shouldBe Seq.empty - IterableUtil.splitAfter(Seq.empty[Int])(_ => false) shouldBe Seq.empty - } - - "work if no elements satify the predicate" in { - IterableUtil.splitAfter(1 to 10)(_ >= 11) shouldBe Seq(NonEmpty(Seq, 1, 2 to 10: _*)) - } - - "evaluate the predicate only on arguments" in { - IterableUtil.splitAfter(1 to 10)(x => - if (x >= 1 && x <= 10) x == 5 - else throw new IllegalArgumentException(s"Predicate evaluated on $x") - ) shouldBe Seq(NonEmpty(Seq, 1, 2, 3, 4, 5), NonEmpty(Seq, 6, 7, 8, 9, 10)) - } - - // This should run in a couple of hundreds of milliseconds - "work for long lists efficiently" in { - val count = 100000 - IterableUtil.splitAfter((1 to count).toVector)(_ => true) shouldBe (1 to count).map( - NonEmpty(Seq, _) - ) - } - } - - @tailrec - private def isPrime(i: Int): Boolean = - if (i == Integer.MIN_VALUE) false - else if (i < 0) isPrime(-i) - else if (i < 2) false - else (2 to Math.sqrt(i.toDouble).toInt).forall(d => i % d != 0) -} - -object IterableUtilTest { - final case class CompareOnlyFirst(first: Int, second: Int) extends Ordered[CompareOnlyFirst] { - override def compare(that: CompareOnlyFirst): Int = first.compareTo(that.first) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/MapsUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/MapsUtilTest.scala deleted file mode 100644 index faa40e0958..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/MapsUtilTest.scala +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.collection - -import cats.Id -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class MapsUtilTest extends AnyWordSpec with BaseTest { - "MapsUtil" should { - - "correctly group by multiple values" in { - val m = Map[String, Int]("abc" -> 1, "cde" -> 2, "def" -> 3) - MapsUtil.groupByMultipleM[Id, String, Char, Int](m)(s => s.toSet) shouldBe Map( - 'a' -> Set(1), - 'b' -> Set(1), - 'c' -> Set(1, 2), - 'd' -> Set(2, 3), - 'e' -> Set(2, 3), - 'f' -> Set(3), - ) - } - - "build non conflicting maps" in { - MapsUtil.toNonConflictingMap(Seq(1 -> 2, 2 -> 3)) shouldBe Right(Map(1 -> 2, 2 -> 3)) - MapsUtil.toNonConflictingMap(Seq(1 -> 2, 2 -> 3, 1 -> 2)) shouldBe Right(Map(1 -> 2, 2 -> 3)) - MapsUtil.toNonConflictingMap(Seq(1 -> 2, 1 -> 3)) shouldBe Left(Map(1 -> Set(2, 3))) - } - - "compute intersection based on values" in { - import MapsUtil.intersectValues - - val empty = Map.empty[String, Set[Int]] - - intersectValues(Map("1" -> Set(1)), empty) shouldBe Map.empty - intersectValues(empty, Map("1" -> Set(1))) shouldBe Map.empty - intersectValues(Map("1" -> Set(1)), Map("2" -> Set(2))) shouldBe Map.empty - - intersectValues(Map("1" -> Set(1)), Map("2" -> Set(2), "1" -> Set(1))) shouldBe Map( - "1" -> Set(1) - ) - - intersectValues( - Map("1" -> Set(1), "2" -> Set(20, 21, 22, 23)), - Map("2" -> Set(20, 23, 25), "1" -> Set(1)), - ) shouldBe Map( - "1" -> Set(1), - "2" -> Set(20, 23), - ) - } - - "transpose" in { - import MapsUtil.transpose - - transpose(Map(1 -> Set("a"), 2 -> Set("a"))) shouldBe Map("a" -> Set(1, 2)) - transpose(Map(1 -> Set("a", "b"))) shouldBe Map("a" -> Set(1), "b" -> Set(1)) - transpose(Map("a" -> Set.empty[Int], "b" -> Set(1))) shouldBe Map(1 -> Set("b")) - transpose(Map.empty[Int, Set[String]]) shouldBe Map.empty[String, Set[Int]] - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/SeqUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/SeqUtilTest.scala deleted file mode 100644 index f120620dda..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/SeqUtilTest.scala +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.collection - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import scala.util.Random - -class SeqUtilTest extends AnyWordSpec with BaseTest { - - "randomSubsetShuffle" should { - "pick a random subset of the given size" in { - val iterations = 1000 - for { i <- 1 to iterations } { - val subset = SeqUtil.randomSubsetShuffle(1 to 100, 30, new Random(i)) - subset should have size 30 - subset.distinct shouldBe subset - (1 to 1000) should contain allElementsOf subset - } - } - - "deal with tiny subsets" in { - val random = new Random(123) - val subsetSize1 = SeqUtil.randomSubsetShuffle(1 to 1000, 1, random) - subsetSize1 should have size 1 - - val subsetSize0 = SeqUtil.randomSubsetShuffle(1 to 1000, 0, random) - subsetSize0 should have size 0 - } - - "deal with large subsets" in { - val random = new Random(345) - val subsetSize999 = SeqUtil.randomSubsetShuffle(1 to 1000, 999, random) - subsetSize999 should have size 999 - subsetSize999.distinct shouldBe subsetSize999 - (1 to 1000) should contain allElementsOf subsetSize999 - - val fullShuffle = SeqUtil.randomSubsetShuffle(1 to 1000, 1000, random) - fullShuffle.sorted shouldBe (1 to 1000) - } - - "cap the size" in { - val random = new Random(678) - val more = SeqUtil.randomSubsetShuffle(1 to 10, 20, random) - more.sorted shouldBe (1 to 10) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/TrieMapUtilTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/TrieMapUtilTest.scala deleted file mode 100644 index 14800488cd..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/collection/TrieMapUtilTest.scala +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.collection - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -import scala.collection.concurrent.TrieMap - -class TrieMapUtilTest extends AnyWordSpec with BaseTest { - - case class Error(key: Int, oldValue: String, newValue: String) - - "TrieMapUtil" should { - - "insert if absent" in { - val map = TrieMap(1 -> "Foo", 2 -> "Bar") - TrieMapUtil.insertIfAbsent(map, 3, "test", Error.apply _) shouldBe Either.unit - } - - "insert if idempotent" in { - val map = TrieMap(1 -> "Foo", 2 -> "Bar") - TrieMapUtil.insertIfAbsent(map, 2, "Bar", Error.apply _) shouldBe Either.unit - } - - "fail insert on different values " in { - val map = TrieMap(1 -> "Foo", 2 -> "Bar") - TrieMapUtil - .insertIfAbsent(map, 2, "Something else", Error.apply _) - .left - .value shouldBe an[Error] - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/JitterSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/JitterSpec.scala deleted file mode 100644 index d1a621bae2..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/JitterSpec.scala +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.retry - -import com.google.common.primitives.UnsignedInteger -import org.scalatest.funspec.AnyFunSpec - -import java.util.Random -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.duration.* - -class JitterSpec extends AnyFunSpec { - // val rng = new SecureRandom() - val rng = new java.util.Random() - val rand = Jitter.randomSource(rng) - val cap = 2000 milliseconds - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - def testJitter(jitter: Jitter)(): Unit = { - val min = rand(1, 100) milliseconds - var sleep = rand(1, 1000) milliseconds - - for (i <- 0 until 10000) { - val delay = sleep - sleep = jitter(delay, min, i + 1) - assert(sleep.unit === TimeUnit.MILLISECONDS) - assert(sleep.length >= 0) - assert(sleep.length <= cap.length) - } - } - - describe("retry.Defaults.random") { - it("should return sane random values") { - for (i <- 0 until 1000) { - val result = rand(0, 10) - assert(result >= 0) - assert(result <= 10) - } - } - - it("should handle swapped bounds") { - for (i <- 0 until 1000) { - val result = rand(10, 0) - assert(result >= 0) - assert(result <= 10) - } - } - - it("should not cache random values") { - val counter = new AtomicInteger() - val rng = new Random() { - override def nextInt(): Int = - counter.addAndGet(1) - override def nextInt(n: Int): Int = - counter.addAndGet(1) - } - - val rand = Jitter.randomSource(rng) - rand(0, 100) - // intentionally using the guava UnsignedInteger.MAX_VALUE over Int.MaxValue - rand(0, UnsignedInteger.MAX_VALUE.longValue() + 1L) - assert(counter.get() === 4) - } - - it("should handle bounds greater than Int.MaxValue correctly") { - // this would previously throw an IllegalArgumentException - rand(0, Int.MaxValue.toLong + 1L) - } - } - - describe("retry.Jitter.none") { - it("should perform backoff correctly") { - testJitter(Jitter.none(cap))() - } - } - - describe("retry.Jitter.decorrelated") { - it("should perform decorrelated jitter correctly") { - testJitter(Jitter.decorrelated(cap))() - } - } - - describe("retry.Jitter.full") { - it("should perform full jitter correctly") { - testJitter(Jitter.full(cap))() - } - } - - describe("retry.Jitter.equal") { - it("should perform equal jitter correctly") { - testJitter(Jitter.equal(cap))() - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala deleted file mode 100644 index 7595f029a8..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala +++ /dev/null @@ -1,1292 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.retry - -import cats.Eval -import com.digitalasset.canton.concurrent.{ExecutorServiceExtensions, Threading} -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.lifecycle.* -import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown -import com.digitalasset.canton.logging.{SuppressionRule, TracedLogger} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.retry.ErrorKind.TransientErrorKind -import com.digitalasset.canton.util.retry.Jitter.RandomSource -import com.digitalasset.canton.util.{DelayUtil, FutureUtil, retry} -import com.digitalasset.canton.{BaseTest, HasExecutorService} -import org.scalatest.Assertion -import org.scalatest.funspec.AsyncFunSpec -import org.slf4j.event.Level - -import java.util.Random -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference} -import scala.concurrent.duration.* -import scala.concurrent.{Await, ExecutionContext, Future} -import scala.util.{Failure, Success as TrySuccess} - -class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { - - val random = new Random() - val randomSource: RandomSource = Jitter.randomSource(random) - - val flagCloseable: FlagCloseable = FlagCloseable(logger, DefaultProcessingTimeouts.testing) - - def forwardCountingFutureStream(value: Int = 0): LazyList[Future[Int]] = - Future(value) #:: forwardCountingFutureStream(value + 1) - - def backwardCountingFutureStream(value: Int): LazyList[Future[Int]] = - if (value < 0) LazyList.empty - else Future(value) #:: backwardCountingFutureStream(value - 1) - - def time[T](f: => T): Duration = { - val before = System.currentTimeMillis - f - Duration(System.currentTimeMillis - before, MILLISECONDS) - } - - describe("retry.Directly") { - - testUnexpectedException(Directly(logger, flagCloseable, 10, "directly-unexpected-exception-op")) - - it("should retry a future for a specified number of times") { - implicit val success: Success[Int] = Success(_ == 3) - val tries = forwardCountingFutureStream().iterator - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - operationName = "directly-retry-specified-times-op", - )( - tries.next(), - AllExceptionRetryPolicy, - ).map(result => assert(success.predicate(result) === true)) - } - - it("should fail when expected") { - val success = implicitly[Success[Option[Int]]] - val tries = Future(None: Option[Int]) - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 2, - operationName = "directly-fail-when-expected-op", - )( - tries, - AllExceptionRetryPolicy, - ).map(result => assert(success.predicate(result) === false)) - } - - it("should deal with future failures") { - implicit val success: Success[Any] = Success.always - val policy = Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - operationName = "directly-future-failures-op", - ) - val counter = new AtomicInteger(0) - val future = policy( - { - counter.incrementAndGet() - Future.failed(new RuntimeException("always failing")) - }, - AllExceptionRetryPolicy, - ) - // expect failure after 1+3 tries - future.failed.map { t => - assert(counter.get() === 4 && t.getMessage === "always failing") - } - } - - testSynchronousException( - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - operationName = "directly-sync-exception-op", - ), - 3, - ) - - it("should accept a future in reduced syntax format") { - implicit val success: Success[Any] = Success.always - val counter = new AtomicInteger() - val future = Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 1, - operationName = "directly-future-reduced-syntax-format-op", - )( - { - counter.incrementAndGet() - Future.failed(new RuntimeException("always failing")) - }, - AllExceptionRetryPolicy, - ) - future.failed.map(t => assert(counter.get() === 2 && t.getMessage === "always failing")) - } - - it("should retry futures passed by-name instead of caching results") { - implicit val success: Success[Any] = Success.always - val counter = new AtomicInteger() - val future = Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 1, - operationName = "directly-retry-futures-passed-op", - )( - counter.getAndIncrement() match { - case 1 => Future.successful("yay!") - case _ => Future.failed(new RuntimeException("failed")) - }, - AllExceptionRetryPolicy, - ) - future.map(result => assert(counter.get() === 2 && result === "yay!")) - } - - it("should repeat on not expected value until success") { - implicit val success: Success[Boolean] = Success(identity) - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future(false) - } else { - Future(true) - } - - val policy = - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - operationName = "directly-repeat-on-non-expected-value-op", - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == retriedUntilSuccess) - } - } - - testStopOnClosing( - Directly( - logger, - _, - maxRetries = Forever, - operationName = "directly-stop-on-closing-op", - retryLogLevel = Some(Level.INFO), - ), - retriedUntilClose = 10, - ) - - testClosedExecutionContext( - Directly( - logger, - _, - maxRetries = Forever, - operationName = "directly-closed-ex-context-op", - retryLogLevel = Some(Level.INFO), - ) - ) - - testStopOnShutdown( - Directly(logger, _, maxRetries = Forever, operationName = "directly-stop-on-shutdown-op"), - retriedUntilShutdown = 10, - ) - - testSuspend(maxRetries => - suspend => - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = maxRetries, - operationName = "directly-suspend-op", - suspendRetries = suspend, - ) - ) - - testExceptionLogging( - Directly( - logger, - flagCloseable, - maxRetries = 3, - operationName = "directly-exception-logging-op", - ) - ) - - testExceptionLoggingRetryForever( - Directly( - logger, - flagCloseable, - maxRetries = Forever, - operationName = "directly-exception-logging-forever-op", - ) - ) - } - - describe("retry.Pause") { - - testUnexpectedException( - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 10, - delay = 30.millis, - operationName = "pause-unexpected-exception-op", - ) - ) - - it("should pause in between retries") { - implicit val success: Success[Int] = Success(_ == 3) - val tries = forwardCountingFutureStream().iterator - val policy = Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - delay = 30.millis, - operationName = "pause-between-retries-op", - ) - val marker_base = System.currentTimeMillis - val marker = new AtomicLong(0) - - val runF = policy( - { - marker.set(System.currentTimeMillis); tries.next() - }, - AllExceptionRetryPolicy, - ) - runF.map { result => - val delta = marker.get() - marker_base - assert( - success.predicate(result) && - delta >= 90 && delta <= 1000 - ) // was 110, depends on how hot runtime is - } - } - - it("should repeat on unexpected value with pause until success") { - implicit val success: Success[Boolean] = Success(identity) - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future(false) - } else { - Future(true) - } - - val policy = Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - delay = 1.millis, - operationName = "pause-repeat-on-unexpected-until-success-op", - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == retriedUntilSuccess) - } - } - - testSynchronousException( - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 1, - delay = 1.millis, - operationName = "pause-sync-exception-op", - ), - 1, - ) - - testStopOnClosing( - Pause( - logger, - _, - maxRetries = Forever, - delay = 50.millis, - operationName = "pause-stop-on-closing-op", - retryLogLevel = Some(Level.INFO), - ), - retriedUntilClose = 3, - ) - - testClosedExecutionContext( - Pause( - logger, - _, - maxRetries = Forever, - delay = 10.millis, - operationName = "pause-closed-ex-context-op", - retryLogLevel = Some(Level.INFO), - ) - ) - - testStopOnShutdown( - Pause( - logger, - _, - maxRetries = Forever, - delay = 1.millis, - operationName = "pause-stop-on-shutdown-op", - ), - retriedUntilShutdown = 10, - ) - - testSuspend(maxRetries => - suspend => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = maxRetries, - delay = 5.millis, - operationName = "pause-suspend-op", - suspendRetries = suspend, - ) - ) - - testExceptionLogging( - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - delay = 1.millis, - operationName = "pause-exception-logging-op", - ) - ) - - testExceptionLoggingRetryForever( - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = retry.Forever, - delay = 1.millis, - operationName = "pause-exception-logging-forever-op", - ) - ) - } - - describe("retry.Backoff") { - - implicit val jitter: Jitter = Jitter.none(1.minute) - - testUnexpectedException( - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 10, - initialDelay = 30.millis, - maxDelay = Duration.Inf, - operationName = "backoff-unexpected-exception-op", - ) - ) - - it("should pause with multiplier between retries") { - implicit val success: Success[Int] = Success(_ == 2) - val tries = forwardCountingFutureStream().iterator - val policy = Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 2, - initialDelay = 30.millis, - maxDelay = Duration.Inf, - operationName = "backoff-pause-multiply-between-retries-op", - ) - val marker_base = System.currentTimeMillis - val marker = new AtomicLong(0) - val runF = policy( - { - marker.set(System.currentTimeMillis); tries.next() - }, - AllExceptionRetryPolicy, - ) - runF.map { result => - val delta = marker.get() - marker_base - assert( - success.predicate(result) === true && - delta >= 90 && delta <= 1000 // was 110 - ) - } - } - - it("should repeat on unexpected value with backoff until success") { - implicit val success: Success[Boolean] = Success(identity) - val retried = new AtomicInteger() - val retriedUntilSuccess = 5 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future(false) - } else { - Future(true) - } - - val policy = Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "backoff-repeat-on-unexpected-value-op", - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == 5) - } - } - - testSynchronousException( - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 1, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "backoff-sync-exception-op", - ), - 1, - ) - - testStopOnClosing( - Backoff( - logger, - _, - maxRetries = Forever, - initialDelay = 10.millis, - maxDelay = Duration.Inf, - operationName = "backoff-stop-on-closing-op", - retryLogLevel = Some(Level.INFO), - ), - retriedUntilClose = 3, - ) - - testClosedExecutionContext( - Backoff( - logger, - _, - maxRetries = Forever, - initialDelay = 10.millis, - maxDelay = Duration.Inf, - operationName = "backoff-closed-ex-context-op", - retryLogLevel = Some(Level.INFO), - ) - ) - - testStopOnShutdown( - Backoff( - logger, - _, - maxRetries = 10, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "backoff-stop-on-shutdown-op", - ), - retriedUntilShutdown = 3, - ) - - testSuspend(maxRetries => - suspend => - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = maxRetries, - initialDelay = 5.milli, - maxDelay = Duration.Inf, - operationName = "backoff-suspend-op", - suspendRetries = suspend, - ) - ) - - testExceptionLogging( - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - initialDelay = 1.millis, - maxDelay = 1.millis, - operationName = "backoff-exception-logging-op", - ) - ) - - testExceptionLoggingRetryForever( - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = retry.Forever, - initialDelay = 1.millis, - maxDelay = 1.millis, - operationName = "backoff-exception-logging-forever-op", - )(Jitter.none(1.millis)) - ) - } - - trait AlgoCreator { - def apply(cap: FiniteDuration): Jitter - } - - def testJitterBackoff(name: String, algoCreator: AlgoCreator): Unit = { - describe(s"retry.JitterBackoff.$name") { - - testUnexpectedException( - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 10, - initialDelay = 30.millis, - maxDelay = Duration.Inf, - operationName = "backoff-unexpected-exception-op", - )(algoCreator(cap = 10.millis)) - ) - - it("should retry a future for a specified number of times") { - implicit val success: Success[Int] = Success(_ == 3) - implicit val algo: Jitter = algoCreator(cap = 10.millis) - val tries = forwardCountingFutureStream().iterator - val policy = Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - initialDelay = 1.milli, - maxDelay = Duration.Inf, - operationName = "backoff-retry-specified-num-of-times-op", - ) - policy(tries.next(), AllExceptionRetryPolicy).map(result => - assert(success.predicate(result) === true) - ) - } - - it("should fail when expected") { - implicit val algo: Jitter = algoCreator(cap = 10.millis) - val success = implicitly[Success[Option[Int]]] - val tries = Future(None: Option[Int]) - val policy = - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - initialDelay = 1.milli, - maxDelay = Duration.Inf, - operationName = "backoff-fail-when-expected-op", - ) - policy(tries, AllExceptionRetryPolicy).map(result => - assert(success.predicate(result) === false) - ) - } - - it("should deal with future failures") { - implicit val success: Success[Any] = Success.always - implicit val algo: Jitter = algoCreator(cap = 10.millis) - val policy = - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - initialDelay = 5.millis, - maxDelay = Duration.Inf, - operationName = "backoff-future-failures-op", - ) - val counter = new AtomicInteger() - val future = policy( - { - counter.incrementAndGet() - Future.failed(new RuntimeException("always failing")) - }, - AllExceptionRetryPolicy, - ) - future.failed.map(t => assert(counter.get() === 4 && t.getMessage === "always failing")) - } - - it("should retry futures passed by-name instead of caching results") { - implicit val success: Success[Any] = Success.always - implicit val algo: Jitter = algoCreator(cap = 10.millis) - val counter = new AtomicInteger() - val policy = - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 1, - initialDelay = 1.milli, - maxDelay = Duration.Inf, - operationName = "backoff-try-futures-passed-op", - ) - val future = policy( - counter.getAndIncrement() match { - case 1 => Future.successful("yay!") - case _ => Future.failed(new RuntimeException("failed")) - }, - AllExceptionRetryPolicy, - ) - future.map(result => assert(counter.get() == 2 && result === "yay!")) - } - - it("should pause with multiplier and jitter between retries") { - implicit val success: Success[Int] = Success(_ == 2) - implicit val algo: Jitter = algoCreator(cap = 1000.millis) - val tries = forwardCountingFutureStream().iterator - val policy = Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 5, - initialDelay = 50.millis, - maxDelay = Duration.Inf, - operationName = "backoff-pause-with-mult-and-jutter-op", - ) - val marker_base = System.currentTimeMillis - val marker = new AtomicLong(0) - - policy( - { - marker.set(System.currentTimeMillis); tries.next() - }, - AllExceptionRetryPolicy, - ).map { result => - val delta = marker.get() - marker_base - assert( - success.predicate(result) === true && - delta >= 0 && delta <= 2000 - ) - } - } - - it("should also work when invoked as forever") { - implicit val success: Success[Int] = Success(_ == 5) - implicit val algo: Jitter = algoCreator(cap = 50.millis) - val tries = forwardCountingFutureStream().iterator - val policy = - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - initialDelay = 10.millis, - maxDelay = Duration.Inf, - operationName = "backoff-as-forever-op", - ) - val marker = new AtomicLong(0) - val marker_base = System.currentTimeMillis - - policy( - { - marker.set(System.currentTimeMillis); tries.next() - }, - AllExceptionRetryPolicy, - ).map { result => - val delta = - marker.get() - marker_base // The actual delay is async task scheduling-sensitive - assert( - success.predicate(result) === true && - delta >= 0 && delta <= 2_000 - ) - } - } - - it("should repeat on unexpected value with jitter backoff until success") { - implicit val success: Success[Boolean] = Success(identity) - implicit val algo: Jitter = algoCreator(cap = 10.millis) - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future(false) - } else { - Future(true) - } - - val policy = Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "backoff-repeat-with-jitter-until-success-op", - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == retriedUntilSuccess) - } - } - } - } - - testJitterBackoff("none", t => Jitter.none(cap = t)) - testJitterBackoff("full", t => Jitter.full(cap = t, random = randomSource)) - testJitterBackoff("equal", t => Jitter.equal(cap = t, random = randomSource)) - testJitterBackoff("decorrelated", t => Jitter.decorrelated(cap = t, random = randomSource)) - - describe("retry.When") { - - testUnexpectedException( - When( - logger, - { case _ => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 10, - delay = 30.millis, - operationName = "when-unexpected-exception-op", - ) - }, - ) - ) - - it("should retry conditionally when a condition is met") { - implicit val success: Success[Int] = Success(_ == 2) - val tries = forwardCountingFutureStream().iterator - val policy = When( - logger, - { - // this is very contrived but should serve as an example - // of matching then dispatching a retry depending on - // the value of the future when completed - case 0 => - When( - logger, - { case 1 => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 4, - delay = 2.seconds, - operationName = "when-retry-cond-op", - ) - }, - ) - }, - ) - val future = policy(tries.next(), AllExceptionRetryPolicy) - future.map(result => assert(success.predicate(result) === true)) - } - - it("should retry but only when condition is met") { - implicit val success: Success[Int] = Success(_ == 2) - val tries = forwardCountingFutureStream().iterator - val policy = When( - logger, - { - // this cond will never be met because - // a cond for n == 0 is not defined - case 1 => - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 3, - operationName = "when-retry-only-when-cond-met-op", - ) - }, - ) - - val future = policy(tries.next(), AllExceptionRetryPolicy) - future.map(result => assert(success.predicate(result) === false)) - } - - it("should handle future failures") { - implicit val success: Success[Boolean] = Success(identity) - final case class RetryAfter(duration: FiniteDuration) extends RuntimeException - val retried = new AtomicBoolean - - def run() = - if (retried.get()) Future(true) - else { - retried.set(true) - Future.failed(RetryAfter(1.second)) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case RetryAfter(duration) => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 4, - delay = duration, - operationName = "when-handle-future-failures-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map(result => assert(result === true)) - } - - it("should handle synchronous failures") { - implicit val success: Success[Boolean] = Success(identity) - final case class RetryAfter(duration: FiniteDuration) extends RuntimeException - val retried = new AtomicBoolean - - def run() = - if (retried.get()) Future(true) - else { - retried.set(true) - throw RetryAfter(1.second) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case RetryAfter(duration) => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = 4, - delay = duration, - operationName = "when-sync-failures-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map(result => assert(result === true)) - } - - it("should repeat on failure until success") { - implicit val success: Success[Boolean] = Success(identity) - class MyException extends RuntimeException - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future.failed(new MyException) - } else { - Future(true) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case _: MyException => - Directly( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - operationName = "when-repeat-fail-until-success-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == retriedUntilSuccess) - } - } - - it("should repeat on failure with pause until success") { - implicit val success: Success[Boolean] = Success(identity) - class MyException extends RuntimeException - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future.failed(new MyException) - } else { - Future(true) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case _: MyException => - Pause( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - delay = 1.millis, - operationName = "when-repeat-with-pause-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == retriedUntilSuccess) - } - } - - it("should repeat on failure with backoff until success") { - implicit val success: Success[Boolean] = Success[Boolean](identity) - implicit val jitter: Jitter = Jitter.none(1.minute) - class MyException extends RuntimeException - val retried = new AtomicInteger() - val retriedUntilSuccess = 5 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future.failed(new MyException) - } else { - Future(true) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case _: MyException => - Backoff( - logger, - hasSynchronizeWithClosing = flagCloseable, - maxRetries = Forever, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "when-backoff-repeat-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == 5) - } - } - - it("should repeat on failure with jitter backoff until success") { - implicit val success: Success[Boolean] = Success(identity) - class MyException extends RuntimeException - val retried = new AtomicInteger() - val retriedUntilSuccess = 10 - - def run() = - if (retried.get() < retriedUntilSuccess) { - retried.incrementAndGet() - Future.failed(new MyException) - } else { - Future(true) - } - - val policy = When( - logger, - { - // lift an exception into a new policy - case _: MyException => - Backoff( - logger, - flagCloseable, - maxRetries = Forever, - initialDelay = 1.millis, - maxDelay = Duration.Inf, - operationName = "when-backoff-jitter-op", - ) - }, - ) - policy(run(), AllExceptionRetryPolicy).map { result => - assert(result === true) - assert(retried.get() == 10) - } - } - - testStopOnShutdown( - _ => - When( - logger, - _ => - Directly(logger, flagCloseable, maxRetries = Forever, operationName = "should-not-run"), - ), - retriedUntilShutdown = 1, - ) - } - - def testUnexpectedException(policy: Policy): Unit = - it("should not retry after an exception that isn't retryable") { - implicit val success: Success[Any] = Success.always - val counter = new AtomicInteger() - val future = policy( - { - counter.incrementAndGet() - Future.failed(new RuntimeException(s"unexpected problem")) - }, - NoExceptionRetryPolicy, - ) - future.failed.map(t => assert(counter.get() === 1 && t.getMessage === "unexpected problem")) - } - - def testSynchronousException(policy: Policy, maxRetries: Int): Unit = - it("should convert a synchronous exception into an asynchronous one") { - implicit val success: Success[Any] = Success.always - val counter = new AtomicInteger(0) - val future = policy.apply[Future, Unit]( - { - counter.incrementAndGet() - throw new RuntimeException("always failing") - }, - AllExceptionRetryPolicy, - ) - // expect failure after 1+maxRetries tries - future.failed.map { t => - assert(counter.get() === maxRetries + 1 && t.getMessage === "always failing") - } - } - - def testStopOnClosing(policy: PerformUnlessClosing => Policy, retriedUntilClose: Int): Unit = { - it("should repeat until closed from within") { - implicit val success: Success[Int] = Success.never - - val closeable = FlagCloseable(logger, DefaultProcessingTimeouts.testing) - val retried = new AtomicInteger() - - def run(): Future[Int] = { - val incr = retried.incrementAndGet() - if (incr == retriedUntilClose) { - // Do not directly call `close` because this will deadlock - FutureUtil.doNotAwait( - Future(closeable.close())(executorService), - "Closing the FlagCloseable of the retry", - ) - eventually() { - closeable.isClosing shouldBe true - } - } - Future.successful(incr) - } - - val result = policy(closeable)(run(), AllExceptionRetryPolicy)( - success, - executorService, - traceContext, - implicitly, - ).futureValue - - assert(result == retriedUntilClose, "Expected to get last result as result.") - assert( - retried.get() == retriedUntilClose, - s"Expected to increment $retriedUntilClose times before failure", - ) - } - - it("should repeat until closed from outside") { - val closeable = FlagCloseable(logger, DefaultProcessingTimeouts.testing) - val retried = new AtomicInteger() - - def run(): Future[Int] = Future.successful { - val num = retried.incrementAndGet() - logger.debug(s"Increment retried is $num, closeable is ${closeable.isClosing}") - num - } - - val retryF = { - implicit val executionContext: ExecutionContext = executorService - policy(closeable)(run(), AllExceptionRetryPolicy)( - Success.never, - executorService, - traceContext, - implicitly, - ) - .thereafter { count => - logger.debug(s"Stopped retry after $count") - } - } - - logger.debug("Wrapping") - // Wrap the retry in a performUnlessClosing to trigger possible deadlocks. - val retryUnlessClosingF = - closeable.synchronizeWithClosingF("test-retry")(retryF)(executorService, traceContext) - - Threading.sleep(10) - closeable.close() - - inside(Await.result(retryUnlessClosingF.unwrap, 100.millis)) { - case UnlessShutdown.Outcome(_) => succeed - case UnlessShutdown.AbortedDueToShutdown => fail("Unexpected shutdown.") - } - } - } - - def testClosedExecutionContext(policy: PerformUnlessClosing => Policy): Unit = - it("should handle a closed execution context after closing") { - val closeable = FlagCloseable(logger, DefaultProcessingTimeouts.testing) - - val closeableEc = Threading.newExecutionContext( - executionContextName, - noTracingLogger, - Threading.detectNumberOfThreads(noTracingLogger), - exitOnFatal = exitOnFatal, - ) - - val retried = new AtomicInteger() - def run(): Future[Int] = Future { - retried.incrementAndGet() - }(closeableEc) - - try { - FutureUtil.doNotAwait( - // This future probably never completes because we are likely to close the execution context during a `Delay` - policy(closeable)(run(), AllExceptionRetryPolicy)( - success = Success.never, - executionContext = closeableEc, - traceContext = implicitly, - effect = implicitly, - ), - "retrying forever until the execution context closes", - ) - - Threading.sleep(50) - logger.debug("About to close the FlagCloseable") - closeable.close() - } finally { - LifeCycle.close(ExecutorServiceExtensions(closeableEc)(logger, timeouts))(logger) - } - succeed - } - - def testStopOnShutdown( - policy: PerformUnlessClosing => Policy, - retriedUntilShutdown: Int, - ): Unit = - it("should stop on shutdown") { - implicit val success: Success[Boolean] = Success(identity) - val retried = new AtomicInteger() - - def run(): FutureUnlessShutdown[Boolean] = { - val retries = retried.incrementAndGet() - if (retries == retriedUntilShutdown) { - FutureUnlessShutdown.abortedDueToShutdown - } else { - FutureUnlessShutdown.pure(false) - } - } - - policy(flagCloseable).unlessShutdown(run(), AllExceptionRetryPolicy).unwrap.map { result => - result shouldBe AbortedDueToShutdown - retried.get() shouldBe retriedUntilShutdown - } - } - - def testSuspend(mkPolicy: Int => Eval[FiniteDuration] => RetryWithDelay): Unit = - it("does not retry while suspended") { - implicit val success: Success[Unit] = Success(_ => false) - val maxRetries = 10 - val retried = new AtomicInteger() - val suspend = new AtomicReference(Duration.Zero) - - def run(): Future[Unit] = { - val retries = retried.incrementAndGet() - logger.debug(s"testSuspend 'retries' has been incremented to $retries") - if (suspend.get() > Duration.Zero) { - logger.error("Policy is still retrying despite suspension.") - } else if (retries == 3) { - suspend.set(1.millis) - logger.debug("testSuspend 'suspend' has been set to 1 millisecond.") - FutureUtil.doNotAwait( - DelayUtil.delay(100.millis).map { _ => - suspend.set(Duration.Zero) - logger.debug("testSuspend 'suspend' has been reset to 0.") - }, - "An error occurred while resetting suspension delay.", - ) - } - Future.unit - } - - val policy = mkPolicy(maxRetries)(Eval.always(suspend.get())) - policy.apply(run(), NoExceptionRetryPolicy).map { _ => - retried.get() shouldBe maxRetries + 3 - } - } - - def testExceptionLogging(policy: => Policy): Unit = - it("should log an exception with the configured retry log level") { - // We don't care about the success criteria as we always throw an exception - implicit val success: Success[Any] = Success.always - - case class TestException() extends RuntimeException("test exception") - - val retryable = new ExceptionRetryPolicy() { - - override protected def determineExceptionErrorKind( - exception: Throwable, - logger: TracedLogger, - )(implicit - tc: TraceContext - ): ErrorKind = - TransientErrorKind() - - override def retryLogLevel(e: Throwable): Option[Level] = e match { - case TestException() => Some(Level.WARN) - case _ => None - } - - } - - loggerFactory - .assertLogsSeq(SuppressionRule.Level(Level.WARN))( - policy[Future, Assertion](Future.failed(TestException()), retryable).transform { - case Failure(TestException()) => - logger.debug("Retry terminated with expected exception") - TrySuccess(succeed) - case result => result - }, - entries => - forEvery(entries) { e => - e.warningMessage should (include("Detected an error") - or include regex raw"The operation '\S+' has failed with an exception" - or include regex raw"Now retrying operation '\S+'") - }, - ) - } - - def testExceptionLoggingRetryForever(policy: => Policy): Unit = - it( - "when retrying forever, should keep using configured retry log level even after many retries" - ) { - // We don't care about the success criteria as we always throw an exception - implicit val success: Success[Any] = Success.always - - case class TestException() extends RuntimeException("test exception") - - class Retryable extends ExceptionRetryPolicy { - override protected def determineExceptionErrorKind( - exception: Throwable, - logger: TracedLogger, - )(implicit tc: TraceContext): ErrorKind = TransientErrorKind() - } - - class RetryableWithOverriddenLogLevel extends Retryable { - override def retryLogLevel(e: Throwable): Option[Level] = e match { - case TestException() => Some(Level.INFO) - case _ => None - } - } - - val retried = new AtomicInteger() - - def run(): Future[Int] = { - val num = retried.incrementAndGet() - if (num > RetryWithDelay.complainAfterRetries + 1) Future.successful(num) - else Future.failed(TestException()) - } - - for { - // Check normal case of retrying forever with a retryable error that does not override the log level. - // After `complainAfterRetries` retries, we should log at WARN. - _ <- loggerFactory - .assertLogsSeq(SuppressionRule.Level(Level.WARN))( - policy[Future, Int](run(), new Retryable()), - entries => - forExactly(2, entries) { e => - e.warningMessage should (include regex raw"The operation \S+ has failed with an exception" - or include regex raw"Now retrying operation \S+") - }, - ) - - // If the retryable error overrides the lever lower than WARN, we should - // not see WARN logs even after `complainAfterRetries` retries. - _ <- loggerFactory - .assertLogsSeq(SuppressionRule.Level(Level.WARN))( - { - retried.set(0) - policy[Future, Int](run(), new RetryableWithOverriddenLogLevel()) - }, - _ shouldBe empty, - ) - } yield succeed - } - -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/SuccessSpec.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/SuccessSpec.scala deleted file mode 100644 index 2987973d5a..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/util/retry/SuccessSpec.scala +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.retry - -import org.scalatest.funspec.AnyFunSpec - -import scala.util.Try - -class SuccessSpec extends AnyFunSpec { - describe("retry.Success.either") { - val either = implicitly[Success[Either[String, String]]] - it("should be successful on a Right") { - assert(either.predicate(Right("")) === true) - } - - it("should be a failure on a Left") { - assert(either.predicate(Left("")) === false) - } - } - - describe("retry.Success.option") { - val option = implicitly[Success[Option[String]]] - it("should be successful on Some(_)") { - assert(option.predicate(Some("")) == true) - } - - it("should be a failure on None") { - assert(option.predicate(None) == false) - } - } - - describe("retry.Success.tried") { - val tried = implicitly[Success[Try[String]]] - it("should be successful on Success(_)") { - assert(tried.predicate(Try("")) == true) - } - - it("should be failure on Failure(_)") { - assert(tried.predicate(Try({ throw new RuntimeException("") })) === false) - } - } - - describe("retry.Success combinators") { - val a = Success[Int](_ > 1) - val b = Success[Int](_ < 3) - it("should support and") { - assert(a.and(b).predicate(2) === true) - assert(a.and(false).predicate(2) === false) - } - it("should support or") { - assert(a.or(b).predicate(4) === true) - assert(a.or(true).predicate(0) === true) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/AllGenerators.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/AllGenerators.scala deleted file mode 100644 index 5977dcd0c3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/AllGenerators.scala +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.GeneratorsLf -import com.digitalasset.canton.data.{GeneratorsData, GeneratorsTrafficData} -import com.digitalasset.canton.protocol.GeneratorsProtocol -import com.digitalasset.canton.protocol.messages.{ - GeneratorsLocalVerdict, - GeneratorsMessages, - GeneratorsVerdict, -} -import com.digitalasset.canton.sequencing.GeneratorsSequencing -import com.digitalasset.canton.sequencing.protocol.GeneratorsProtocol as GeneratorsProtocolSequencing -import com.digitalasset.canton.topology.GeneratorsTopology -import com.digitalasset.canton.topology.transaction.GeneratorsTransaction - -final class AllGenerators(protocolVersion: ProtocolVersion) { - lazy val topology = new GeneratorsTopology(protocolVersion) - lazy val generatorsSequencing = new GeneratorsSequencing(topology) - lazy val lf = new GeneratorsLf(topology) - lazy val protocol = new GeneratorsProtocol(protocolVersion, lf, topology) - lazy val data = new GeneratorsData(protocolVersion, lf, protocol, topology) - lazy val transaction = - new GeneratorsTransaction( - protocolVersion, - lf, - protocol, - topology, - generatorsSequencing, - ) - lazy val localVerdict = GeneratorsLocalVerdict(protocolVersion, lf) - lazy val verdict = GeneratorsVerdict(protocolVersion, localVerdict) - lazy val generatorsMessages = new GeneratorsMessages( - protocolVersion, - data, - lf, - protocol, - localVerdict, - verdict, - topology, - transaction, - ) - lazy val generatorsProtocolSeq = new GeneratorsProtocolSequencing( - protocolVersion, - generatorsMessages, - topology, - ) - lazy val trafficData = new GeneratorsTrafficData( - protocolVersion, - topology, - ) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/CantonVersionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/CantonVersionTest.scala deleted file mode 100644 index 494a666873..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/CantonVersionTest.scala +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.BaseTest -import org.scalatest.wordspec.AnyWordSpec - -class CantonVersionTest extends AnyWordSpec with BaseTest { - lazy val v2_1_0_rc1: ReleaseVersion = ReleaseVersion(2, 1, 0, Some("rc1")) - lazy val v2_0_0_snapshot: ReleaseVersion = ReleaseVersion(2, 0, 0, Some("SNAPSHOT")) - - private def v(rawVersion: String): ReleaseVersion = ReleaseVersion.create(rawVersion).value - "CantonVersion" should { - "parse version string if valid" in { - ReleaseVersion.create("5.1.3").value shouldBe new ReleaseVersion(5, 1, 3) - ReleaseVersion.create("1.43.3-SNAPSHOT").value shouldBe new ReleaseVersion( - 1, - 43, - 3, - Some("SNAPSHOT"), - ) - ReleaseVersion.create("1.43.3-rc").value shouldBe new ReleaseVersion( - 1, - 43, - 3, - Some("rc"), - ) - ReleaseVersion.create("1.43.3-rc9").value shouldBe new ReleaseVersion( - 1, - 43, - 3, - Some("rc9"), - ) - ReleaseVersion.create("1.1.1-SNAPSHT").value shouldBe new ReleaseVersion( - 1, - 1, - 1, - Some("SNAPSHT"), - ) - ReleaseVersion.create("1.1.1-rc10").value shouldBe new ReleaseVersion( - 1, - 1, - 1, - Some("rc10"), - ) - ReleaseVersion.create("1.1.1-rc0").value shouldBe new ReleaseVersion( - 1, - 1, - 1, - Some("rc0"), - ) - ReleaseVersion.create("1.1.1-").value shouldBe new ReleaseVersion(1, 1, 1, Some("")) - ReleaseVersion.create("1.1.1-SNAPSHOT-rc").value shouldBe new ReleaseVersion( - 1, - 1, - 1, - Some("SNAPSHOT-rc"), - ) - ReleaseVersion.create("2.0.0-SNAPSHOT").value shouldBe v2_0_0_snapshot - ReleaseVersion.create("2.1.0-rc1").value shouldBe v2_1_0_rc1 - - ReleaseVersion.create("1").left.value shouldBe a[String] - ReleaseVersion.create("1.0.-1").left.value shouldBe a[String] - ReleaseVersion.create("1.1.10000").left.value shouldBe a[String] - ReleaseVersion.create("foo.bar").left.value shouldBe a[String] - ReleaseVersion.create("1.1.").left.value shouldBe a[String] - ReleaseVersion.create("1.1.0snapshot").left.value shouldBe a[String] - ReleaseVersion.create("1.1.0.rc").left.value shouldBe a[String] - ReleaseVersion.create("1.1.0.1").left.value shouldBe a[String] - } - - "should prove that releases are correctly compared" in { - - v("1.2.3") == v("1.2.3") shouldBe true - v("1.2.3") > v("1.2.0") shouldBe true - v("1.2.3") > v("1.0.0") shouldBe true - - v("1.0.0-rc") == v("1.0.0") shouldBe false - v("1.0.0-rc") == v("1.0.0-ia") shouldBe false - v("1.0.0-a") < v("1.0.0-b") shouldBe true - - v("1.0.0") > v("1.0.0-b") shouldBe true - v("1.0.0-1") < v("1.0.0-b") shouldBe true - v("1.0.0-1") < v("1.0.0-2") shouldBe true - v("1.0.0-a.b.c") < v("1.0.0-a.b.d") shouldBe true - v("1.0.0-a.b.c.e") < v("1.0.0-a.b.d") shouldBe true - - // examples given in SemVer specification - v("1.0.0-alpha") < v("1.0.0-alpha.1") shouldBe true - v("1.0.0-alpha.1") < v("1.0.0-alpha.beta") shouldBe true - v("1.0.0-alpha.beta") < v("1.0.0-beta") shouldBe true - v("1.0.0-beta") < v("1.0.0-beta.2") shouldBe true - v("1.0.0-beta.2") < v("1.0.0-beta.11") shouldBe true - v("1.0.0-rc.1") < v("1.0.0") shouldBe true - - v("1.0.0-rc") > v("1.0.0-ia") shouldBe true - v("1.0.0-rc-SNAPSHOT") == v("1.0.0-ia-SNAPSHOT") shouldBe false - - v("1.0.0-SNAPSHOT") < v("1.0.0") shouldBe true - v("1.1.0-SNAPSHOT") > v("1.0.0") shouldBe true - v("1.0.1-SNAPSHOT") > v("1.0.0") shouldBe true - - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala deleted file mode 100644 index 5b0dd2a18e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.protocol.LfLanguageVersion -import org.scalatest.wordspec.AnyWordSpec - -import scala.math.Ordered.orderingToOrdered -import scala.util.Try - -class DamlLfVersionToProtocolVersionsTest extends AnyWordSpec with BaseTest { - - val supportedTransactionVersions = LfLanguageVersion.AllV2.filter(_ >= LfLanguageVersion.v2_1) - - "DamlLFVersionToProtocolVersions" should { - supportedTransactionVersions.foreach { version => - s"find the minimum protocol version for $version" in { - assert( - Try( - DamlLfVersionToProtocolVersions.getMinimumSupportedProtocolVersion(version) - ).isSuccess, - s"Add $version to damlLfVersionToProtocolVersions Map", - ) - - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala deleted file mode 100644 index 9c67d33cf0..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ProtoDeserializationError.UnknownProtoVersion -import com.digitalasset.canton.protobuf.{VersionedMessageV0, VersionedMessageV1, VersionedMessageV2} -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.version.ProtocolVersion.ProtocolVersionWithStatus -import com.google.protobuf.ByteString -import org.scalatest.Assertion -import org.scalatest.wordspec.AnyWordSpec - -import scala.annotation.unused - -class HasProtocolVersionedWrapperTest extends AnyWordSpec with BaseTest { - - import HasProtocolVersionedWrapperTest.* - - /* - Supposing that basePV is 30, we get the scheme - - proto 0 1 2 - protocolVersion 30 31 32 33 34 ... - */ - "HasVersionedWrapperV2" should { - "use correct proto version depending on the protocol version for serialization" in { - def message(pv: ProtocolVersion): Message = - Message("Hey", 1, 2.0)(protocolVersionRepresentative(pv), None) - message(basePV).toProtoVersioned.version shouldBe 0 - message(basePV + 1).toProtoVersioned.version shouldBe 0 - message(basePV + 2).toProtoVersioned.version shouldBe 1 - message(basePV + 3).toProtoVersioned.version shouldBe 2 - message(basePV + 4).toProtoVersioned.version shouldBe 2 - } - - def fromByteString( - bytes: ByteString, - protoVersion: Int, - expectedProtocolVersion: ProtocolVersion, - ): ParsingResult[Message] = Message - .fromByteString( - expectedProtocolVersion, - VersionedMessage[Message](bytes, protoVersion).toByteString, - ) - - "set correct protocol version depending on the proto version" in { - - val messageV1 = VersionedMessageV1("Hey", 42).toByteString - val expectedV1Deserialization = - Message("Hey", 42, 1.0)(protocolVersionRepresentative(basePV + 2), None) - fromByteString(messageV1, 1, basePV + 2).value shouldBe expectedV1Deserialization - - // Round trip serialization - Message - .fromByteString(basePV + 2, expectedV1Deserialization.toByteString) - .value shouldBe expectedV1Deserialization - - val messageV2 = VersionedMessageV2("Hey", 42, 43.0).toByteString - val expectedV2Deserialization = - Message("Hey", 42, 43.0)(protocolVersionRepresentative(basePV + 3), None) - fromByteString(messageV2, 2, basePV + 3).value shouldBe expectedV2Deserialization - - // Round trip serialization - Message - .fromByteString(basePV + 3, expectedV2Deserialization.toByteString) - .value shouldBe expectedV2Deserialization - } - - "return the protocol representative" in { - protocolVersionRepresentative(basePV + 0).representative shouldBe basePV - protocolVersionRepresentative(basePV + 1).representative shouldBe basePV - protocolVersionRepresentative(basePV + 2).representative shouldBe basePV + 2 - protocolVersionRepresentative(basePV + 3).representative shouldBe basePV + 3 - protocolVersionRepresentative(basePV + 4).representative shouldBe basePV + 3 - protocolVersionRepresentative(basePV + 5).representative shouldBe basePV + 3 - } - - "return the highest inclusive protocol representative for an unknown protocol version" in { - protocolVersionRepresentative(ProtocolVersion(-1)).representative shouldBe basePV + 3 - } - - "fail for an unknown proto version" in { - val maxProtoVersion = Message.versioningTable.table.keys.max.v - val unknownProtoVersion = ProtoVersion(maxProtoVersion + 1) - - Message - .protocolVersionRepresentativeFor(unknownProtoVersion) - .left - .value shouldBe UnknownProtoVersion(unknownProtoVersion, Message.name) - } - - "fail deserialization when the representative protocol version from the proto version does not match the expected (representative) protocol version" in { - val message = VersionedMessageV1("Hey", 42).toByteString - fromByteString(message, 1, basePV + 3).left.value should have message - Message.unexpectedProtoVersionError(basePV + 3, basePV + 2).message - } - - "validate proto version against expected (representative) protocol version" in { - Message - .validateDeserialization(ProtocolVersionValidation(basePV + 2), basePV + 2) - .value shouldBe () - Message - .validateDeserialization(ProtocolVersionValidation(basePV + 3), basePV + 2) - .left - .value should have message Message - .unexpectedProtoVersionError(basePV + 3, basePV + 2) - .message - Message - .validateDeserialization( - ProtocolVersionValidation.NoValidation, - basePV, - ) - .value shouldBe () - } - - "status consistency between protobuf messages and protocol versions" in { - new VersioningCompanionMemoization[Message] { - - // Used by the compiled string below - @unused - val stablePV: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] = - ProtocolVersion.createStable(10) - @unused - val alphaPV: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] = - ProtocolVersion.createAlpha(11) - - def name: String = "message" - - override def versioningTable: VersioningTable = ??? - - @unused - private def createVersionedProtoCodec[ - ProtoClass <: scalapb.GeneratedMessage, - Status <: ProtocolVersionAnnotation.Status, - ]( - protoCompanion: scalapb.GeneratedMessageCompanion[ProtoClass] & Status, - pv: ProtocolVersion.ProtocolVersionWithStatus[Status], - deserializer: ProtoClass => DataByteString => ParsingResult[Message], - serializer: Message => ProtoClass, - ) = - VersionedProtoCodec.apply[ - Message, - Unit, - Message, - Message.type, - ProtoClass, - Status, - ](pv)( - protoCompanion - )( - supportedProtoVersionMemoized(_)(deserializer), - serializer, - ) - - clue("can use a stable proto message in a stable protocol version") { - assertCompiles( - """ - createVersionedProtoCodec( - VersionedMessageV1, - stablePV, - Message.fromProtoV1, - _.toProtoV1, - )""" - ): Assertion - } - - clue("can use a stable proto message in an alpha protocol version") { - assertCompiles( - """ - createVersionedProtoCodec( - VersionedMessageV1, - alphaPV, - Message.fromProtoV1, - _.toProtoV1, - )""" - ): Assertion - } - - clue("can use an alpha proto message in an alpha protocol version") { - assertCompiles( - """ - createVersionedProtoCodec( - VersionedMessageV2, - alphaPV, - Message.fromProtoV2, - _.toProtoV2, - )""" - ): Assertion - } - - clue("can not use an alpha proto message in a stable protocol version") { - assertTypeError( - """ - createVersionedProtoCodec( - VersionedMessageV2, - stablePV, - Message.fromProtoV2, - _.toProtoV2, - )""" - ): Assertion - } - } - } - } -} - -object HasProtocolVersionedWrapperTest { - import org.scalatest.EitherValues.* - - private val basePV = ProtocolVersion.minimum - - implicit class RichProtocolVersion(val pv: ProtocolVersion) { - def +(i: Int): ProtocolVersion = ProtocolVersion(pv.v + i) - } - - private def protocolVersionRepresentative( - pv: ProtocolVersion - ): RepresentativeProtocolVersion[Message.type] = - Message.protocolVersionRepresentativeFor(pv) - - final case class Message( - msg: String, - iValue: Int, - dValue: Double, - )( - override val representativeProtocolVersion: RepresentativeProtocolVersion[Message.type], - val deserializedFrom: Option[ByteString] = None, - ) extends HasProtocolVersionedWrapper[Message] { - - @transient override protected lazy val companionObj: Message.type = Message - - def toProtoV0 = VersionedMessageV0(msg) - def toProtoV1 = VersionedMessageV1(msg, iValue) - def toProtoV2 = VersionedMessageV2(msg, iValue, dValue) - } - - object Message extends VersioningCompanionMemoization[Message] { - def name: String = "Message" - - /* - Supposing that basePV is 30, we get the scheme - - proto 0 1 2 - protocolVersion 30 31 32 33 34 ... - */ - override val versioningTable: VersioningTable = VersioningTable( - ProtoVersion(1) -> VersionedProtoCodec(ProtocolVersion.createAlpha((basePV + 2).v))( - VersionedMessageV1 - )( - supportedProtoVersionMemoized(_)(fromProtoV1), - _.toProtoV1, - ), - // Can use a stable Protobuf message in a stable protocol version - ProtoVersion(0) -> VersionedProtoCodec(ProtocolVersion.createStable(basePV.v))( - VersionedMessageV0 - )( - supportedProtoVersionMemoized(_)(fromProtoV0), - _.toProtoV0, - ), - // Can use an alpha Protobuf message in an alpha protocol version - ProtoVersion(2) -> VersionedProtoCodec( - ProtocolVersion.createAlpha((basePV + 3).v) - )(VersionedMessageV2)( - supportedProtoVersionMemoized(_)(fromProtoV2), - _.toProtoV2, - ), - ) - - def fromProtoV0(message: VersionedMessageV0)(bytes: ByteString): ParsingResult[Message] = - Message( - message.msg, - 0, - 0, - )( - protocolVersionRepresentativeFor(ProtoVersion(0)).value, - Some(bytes), - ).asRight - - def fromProtoV1(message: VersionedMessageV1)(bytes: ByteString): ParsingResult[Message] = - Message( - message.msg, - message.value, - 1, - )( - protocolVersionRepresentativeFor(ProtoVersion(1)).value, - Some(bytes), - ).asRight - - def fromProtoV2(message: VersionedMessageV2)(bytes: ByteString): ParsingResult[Message] = - Message( - message.msg, - message.iValue, - message.dValue, - )( - protocolVersionRepresentativeFor(ProtoVersion(2)).value, - Some(bytes), - ).asRight - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasTestCloseContext.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasTestCloseContext.scala deleted file mode 100644 index ae3997d394..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/HasTestCloseContext.scala +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.config.DefaultProcessingTimeouts -import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} -import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} -import com.digitalasset.canton.version.HasTestCloseContext.makeTestCloseContext - -object HasTestCloseContext { - def makeTestCloseContext(loggerP: TracedLogger): CloseContext = CloseContext( - FlagCloseable(loggerP, DefaultProcessingTimeouts.testing) - ) -} - -trait HasNonImplicitTestCloseContext { self: NamedLogging => - protected val testCloseContext: CloseContext = makeTestCloseContext(self.logger) -} - -trait HasTestCloseContext { self: NamedLogging => - implicit protected val testCloseContext: CloseContext = makeTestCloseContext(self.logger) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionCompatibilityTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionCompatibilityTest.scala deleted file mode 100644 index a36e444592..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionCompatibilityTest.scala +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.version.ProtocolVersionCompatibility.canClientConnectToServer -import org.scalatest.wordspec.AnyWordSpec - -class ProtocolVersionCompatibilityTest extends AnyWordSpec with BaseTest { - "ProtocolVersionCompatibility" should { - "version check" should { - "be successful for matching versions" in { - canClientConnectToServer( - clientSupportedVersions = Seq(ProtocolVersion.v34, ProtocolVersion.dev), - serverVersion = ProtocolVersion.dev, - None, - ) shouldBe Either.unit - } - - "fail with a nice message if incompatible" in { - canClientConnectToServer( - clientSupportedVersions = Seq(ProtocolVersion.v34), - serverVersion = ProtocolVersion.dev, - None, - ).left.value shouldBe (VersionNotSupportedError( - ProtocolVersion.dev, - Seq(ProtocolVersion.v34), - )) - } - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala deleted file mode 100644 index 39025fded3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.version.ProtocolVersion.unsupportedErrorMessage -import org.scalatest.wordspec.AnyWordSpec - -class ProtocolVersionTest extends AnyWordSpec with BaseTest { - "ProtocolVersion" should { - "refuse release versions which are not protocol versions" in { - ProtocolVersion.create("5.1.3").left.value shouldBe a[String] - ProtocolVersion.create("5.1.0").left.value shouldBe a[String] - ProtocolVersion.create("1.43.3-SNAPSHOT").left.value shouldBe a[String] - ProtocolVersion.create("1.43.3-rc").left.value shouldBe a[String] - ProtocolVersion.create("1.43.3-rc9").left.value shouldBe a[String] - } - - "parse version string if valid" in { - // New format - ProtocolVersion - .create(ProtocolVersion.v34.toProtoPrimitiveS) - .value shouldBe ProtocolVersion.v34 - - ProtocolVersion - .create(Int.MaxValue.toString) - .value shouldBe ProtocolVersion.dev - - ProtocolVersion.create("DeV").value shouldBe ProtocolVersion.dev - } - - "be comparable" in { - ProtocolVersion.v34 < ProtocolVersion.dev shouldBe true - ProtocolVersion.v34 <= ProtocolVersion.dev shouldBe true - ProtocolVersion.dev <= ProtocolVersion.dev shouldBe true - - ProtocolVersion.dev < ProtocolVersion.v34 shouldBe false - ProtocolVersion.dev <= ProtocolVersion.v34 shouldBe false - - ProtocolVersion.dev <= ProtocolVersion.dev shouldBe true - ProtocolVersion.v34 < ProtocolVersion.dev shouldBe true - ProtocolVersion.dev <= ProtocolVersion.v34 shouldBe false - - ProtocolVersion.dev == ProtocolVersion.dev shouldBe true - ProtocolVersion.dev == ProtocolVersion.v34 shouldBe false - } - - val invalidProtocolVersionNumber = Int.MinValue - val invalidProtocolVersion = ProtocolVersion(invalidProtocolVersionNumber) - - "parse version string with create" in { - ProtocolVersion.supported.foreach { supported => - ProtocolVersion.create(supported.toString).value shouldBe supported - } - } - - "fail parsing version string with create" in { - ProtocolVersion.create(invalidProtocolVersionNumber.toString).left.value should be( - unsupportedErrorMessage(invalidProtocolVersion) - ) - } - - "fail parsing version string considering also deleted protocol versions with create" in { - ProtocolVersion - .create(invalidProtocolVersionNumber.toString, allowDeleted = true) - .left - .value should be( - unsupportedErrorMessage(invalidProtocolVersion, includeDeleted = true) - ) - } - - "parse version string with tryCreate" in { - ProtocolVersion.supported.foreach { supported => - ProtocolVersion.tryCreate(supported.toString) shouldBe supported - } - } - - "fail parsing version string with tryCreate" in { - the[RuntimeException] thrownBy { - ProtocolVersion.tryCreate(invalidProtocolVersionNumber.toString) - } should have message unsupportedErrorMessage(invalidProtocolVersion) - } - - "parse version with fromProtoPrimitive" in { - ProtocolVersion.supported.foreach { supported => - val result = ProtocolVersion.fromProtoPrimitive(supported.toProtoPrimitive) - result shouldBe a[ParsingResult[?]] - result.value shouldBe supported - } - } - - "fail parsing version fromProtoPrimitive" in { - val result = ProtocolVersion.fromProtoPrimitive(invalidProtocolVersionNumber) - result shouldBe a[ParsingResult[?]] - result.left.value should have message unsupportedErrorMessage(invalidProtocolVersion) - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/SerializationDeserializationTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/SerializationDeserializationTest.scala deleted file mode 100644 index 6bc0d7173e..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/SerializationDeserializationTest.scala +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.crypto.{SymmetricKey, TestHash} -import com.digitalasset.canton.data.* -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.protocol.messages.* -import com.digitalasset.canton.protocol.messages.EncryptedViewMessage.computeRandomnessLength -import com.digitalasset.canton.pruning.* -import com.digitalasset.canton.sequencing.SequencerConnections -import com.digitalasset.canton.sequencing.channel.{ - ConnectToSequencerChannelRequest, - ConnectToSequencerChannelResponse, -} -import com.digitalasset.canton.sequencing.protocol.channel.{ - SequencerChannelConnectedToAllEndpoints, - SequencerChannelMetadata, - SequencerChannelSessionKey, - SequencerChannelSessionKeyAck, -} -import com.digitalasset.canton.sequencing.protocol.{ - AcknowledgeRequest, - AggregationRule, - Batch, - ClosedEnvelope, - GetTrafficStateForMemberRequest, - GetTrafficStateForMemberResponse, - MaxRequestSizeToDeserialize, - SequencedEvent, - SequencingSubmissionCost, - SignedContent, - SubmissionRequest, - SubscriptionRequestV2, - TopologyStateForInitRequest, -} -import com.digitalasset.canton.topology.transaction.{ - SignedTopologyTransaction, - SignedTopologyTransactions, - TopologyTransaction, -} -import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.google.protobuf.ByteString -import org.scalatest.wordspec.AnyWordSpec -import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks - -class SerializationDeserializationTest - extends AnyWordSpec - with BaseTest - with ScalaCheckPropertyChecks - with SerializationDeserializationTestHelpers { - - forAll(Table("protocol version", ProtocolVersion.supported*)) { version => - val generators = new AllGenerators(version) - - import generators.data.* - import generators.generatorsMessages.* - import generators.trafficData.* - import generators.verdict.* - import generators.localVerdict.* - import generators.protocol.* - import generators.generatorsProtocolSeq.* - import generators.generatorsSequencing.* - import generators.transaction.* - import com.digitalasset.canton.crypto.GeneratorsCrypto.* - - s"Serialization and deserialization methods using protocol version $version" should { - "compose to the identity" in { - testVersioned(SymmetricKey, version) - - test(StaticSynchronizerParameters, version) - test(DynamicSynchronizerParameters, version) - test(DynamicSequencingParameters, version) - - test(AcsCommitment, version) - test(Verdict, version) - test(ConfirmationResponses, version) - testContext(TypedSignedProtocolMessageContent, version, version) - testContext(SignedProtocolMessage, version, version) - test(ProtocolSymmetricKey, version) - test(LocalVerdict, version) - testContext(EnvelopeContent, (TestHash, version), version) - test(ConfirmationResultMessage, version) - - test(AcknowledgeRequest, version) - test(AggregationRule, version) - test(ClosedEnvelope, version) - test(SequencingSubmissionCost, version) - testVersioned(ContractMetadata, version)( - generators.protocol.contractMetadataArb(canHaveEmptyKey = true) - ) - testVersioned[SerializableContract](SerializableContract, version)( - generators.protocol.serializableContractArb(canHaveEmptyKey = true) - ) - - test(ActionDescription, version) - - // Merkle tree leaves - testContext(CommonMetadata, TestHash, version) - testContext(ParticipantMetadata, TestHash, version) - testContext(SubmitterMetadata, TestHash, version) - testContext(AssignmentCommonData, TestHash, version) - testContext(AssignmentView, TestHash, version) - testContext(UnassignmentCommonData, TestHash, version) - testContext(UnassignmentView, TestHash, version) - testContext(ViewCommonData, TestHash, version) - - test(TopologyTransaction, version) - testContext(SignedTopologyTransaction, ProtocolVersionValidation(version), version) - testContext(SignedTopologyTransactions, ProtocolVersionValidation(version), version) - - testContext(ViewParticipantData, TestHash, version) - test(Batch, version) - test(SetTrafficPurchasedMessage, version) - testContext(SubmissionRequest, MaxRequestSizeToDeserialize.NoLimit, version) - testVersioned(SequencerConnections, version) - testVersioned(CounterParticipantIntervalsBehind, version) - test(GetTrafficStateForMemberRequest, version) - // This fails, which is expected, because PartySignatures serialization is only defined on PV.dev - // We do this on purpose to make clear that this is a work in progress and should **NOT** be merged to 3.1 - test(ExternalAuthorization, version) - test(GetTrafficStateForMemberResponse, version) - test(TopologyStateForInitRequest, version) - test(SubscriptionRequestV2, version) - if (version.isDev) { - test(ConnectToSequencerChannelRequest, version) - test(ConnectToSequencerChannelResponse, version) - test(SequencerChannelMetadata, version) - test(SequencerChannelConnectedToAllEndpoints, version) - test(SequencerChannelSessionKey, version) - test(SequencerChannelSessionKeyAck, version) - } - test(SequencedEvent, version) - test(SignedContent, version) - testContext(TransactionView, (TestHash, version), version) - testContext(FullInformeeTree, (TestHash, version), version) - // testing MerkleSeq structure with specific VersionedMerkleTree: SubmitterMetadata. - testContext( - MerkleSeq, - ( - ( - TestHash, - (bytes: ByteString) => SubmitterMetadata.fromTrustedByteString(TestHash)(bytes), - ), - version, - ), - version, - ) - val randomnessLength = computeRandomnessLength(ExampleTransactionFactory.pureCrypto) - testContext(LightTransactionViewTree, ((TestHash, randomnessLength), version), version) - testContextTaggedProtocolVersion(AssignmentViewTree, TestHash, Target(version)) - testContext( - UnassignmentViewTree, - (TestHash, Source(ProtocolVersionValidation.PV(version))), - version, - ) - } - } - } - - "be exhaustive" in { - val requiredTests = findHasProtocolVersionedWrapperSubClasses("com.digitalasset.canton").toSet - - val missingTests = requiredTests.diff(testedClasses) - - /* - If this test fails, it means that one class inheriting from HasProtocolVersionWrapper in the - package is not tested in the SerializationDeserializationTests - */ - clue(s"Missing tests should be empty but found: $missingTests")(missingTests shouldBe empty) - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/TestProtocolVersions.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/TestProtocolVersions.scala deleted file mode 100644 index 0f713248a3..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/version/TestProtocolVersions.scala +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.version - -// Provides protocol versions for testing which may be invalid and unsupported; and thus intentionally located -// within the version package so that ProtocolVersion internal functionality is accessible. -object CommunityTestProtocolVersions { - - /** An old, unsupported protocol version. - */ - val DeletedPv: ProtocolVersion = ProtocolVersion(5) -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/canton/watchdog/WatchdogServiceTest.scala b/canton/community/common/src/test/test/scala/com/digitalasset/canton/watchdog/WatchdogServiceTest.scala deleted file mode 100644 index 65ce293204..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/canton/watchdog/WatchdogServiceTest.scala +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.watchdog - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.{NonNegativeFiniteDuration, PositiveFiniteDuration} -import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality -import org.scalatest.wordspec.AnyWordSpec - -import scala.concurrent.duration.* - -class WatchdogServiceTest extends AnyWordSpec with BaseTest { - "WatchdogServiceTest" must { - "execute checkIsAlive periodically" in { - var checkCounter = 0 - var killCounter = 0 - val watchdogService = new WatchdogService( - checkInterval = PositiveFiniteDuration(100.milliseconds), - checkIsAlive = { - checkCounter += 1 - true - }, - killDelay = NonNegativeFiniteDuration(10.milliseconds), - killAction = { - killCounter += 1 - }, - loggerFactory = loggerFactory, - timeouts = timeouts, - ) - Threading.sleep(1000) - watchdogService.close() - checkCounter shouldBe >(1) - killCounter shouldBe 0 - } - "kill the service using killAction if it is not alive" in { - var checkCounter = 0 - var killCounter = 0 - loggerFactory.assertLogsUnorderedOptional( - { - val watchdogService = new WatchdogService( - checkInterval = PositiveFiniteDuration(40.milliseconds), - checkIsAlive = { - checkCounter += 1 - false - }, - killDelay = NonNegativeFiniteDuration(10.milliseconds), - killAction = { - killCounter += 1 - }, - loggerFactory = loggerFactory, - timeouts = timeouts, - ) - Threading.sleep(1000) - watchdogService.close() - }, - ( - LogEntryOptionality.Required, - _.errorMessage shouldBe "Watchdog detected that the service is not alive. Scheduling to kill the service after a delay of 0.01s.", - ), - ( - LogEntryOptionality.Required, - _.errorMessage shouldBe "Watchdog is killing the service now.", - ), - ) - checkCounter shouldBe 1 - killCounter shouldBe 1 - } - } -} diff --git a/canton/community/common/src/test/test/scala/com/digitalasset/platform/daml/lf/testing/SampleParties.scala b/canton/community/common/src/test/test/scala/com/digitalasset/platform/daml/lf/testing/SampleParties.scala deleted file mode 100644 index a0da5a4f64..0000000000 --- a/canton/community/common/src/test/test/scala/com/digitalasset/platform/daml/lf/testing/SampleParties.scala +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.platform.daml.lf.testing - -import com.digitalasset.canton.LfPartyId - -object SampleParties { - val AlicesBank = LfPartyId.assertFromString("AlicesBank::default") - val BobsBank = LfPartyId.assertFromString("BobsBank::default") - val Alice = LfPartyId.assertFromString("Alice::default") - val Bob = LfPartyId.assertFromString("Bob::default") - val Charlie = LfPartyId.assertFromString("Charlie::default") -} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/ProtocolContinuityConformanceTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/ProtocolContinuityConformanceTest.scala new file mode 100644 index 0000000000..e110506fda --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/ProtocolContinuityConformanceTest.scala @@ -0,0 +1,252 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.continuity + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.HasExecutionContext +import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.integration.bootstrap.{ + NetworkBootstrapper, + NetworkTopologyDescription, +} +import com.digitalasset.canton.integration.plugins.* +import com.digitalasset.canton.integration.plugins.UseExternalProcess.RunVersion +import com.digitalasset.canton.integration.plugins.UseLedgerApiTestTool.{ + LAPITTVersion, + releasesFromArtifactory, +} +import com.digitalasset.canton.integration.tests.ledgerapi.LedgerApiConformanceBase +import com.digitalasset.canton.integration.tests.ledgerapi.LedgerApiConformanceBase.excludedTests +import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule +import com.digitalasset.canton.integration.{ + ConfigTransforms, + EnvironmentDefinition, + IsolatedEnvironments, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ReleaseUtils +import com.digitalasset.canton.util.ReleaseUtils.TestedRelease +import com.digitalasset.canton.version.{ProtocolVersionCompatibility, ReleaseVersion} + +trait MultiVersionLedgerApiConformanceBase extends LedgerApiConformanceBase { + + protected def ledgerApiTestToolVersions: Seq[String] + + protected val ledgerApiTestToolPlugins: Map[String, UseLedgerApiTestTool] = + ledgerApiTestToolVersions + .map(version => + version -> new UseLedgerApiTestTool( + loggerFactory = loggerFactory, + connectedSynchronizersCount = connectedSynchronizersCount, + version = LAPITTVersion.Explicit(version), + ) + ) + .toMap + + ledgerApiTestToolPlugins.values.foreach(registerPlugin) + + def runShardedTests( + version: ReleaseVersion + )(shard: Int, numShards: Int)( + env: TestConsoleEnvironment + ): String = + ledgerApiTestToolPlugins(version.toString) + .runShardedSuites( + shard, + numShards, + exclude = excludedTests, + )(env) +} + +/** The Protocol continuity tests test that we don't accidentally break protocol compatibility with + * respect to the Ledger API. + * + * To run them, see : + * + * - AllProtocolContinuityConformanceTest Tests against all previously published releases. + * + * - LatestProtocolContinuityConformanceTest Tests against the latest published release. + */ +trait ProtocolContinuityConformanceTest + extends MultiVersionLedgerApiConformanceBase + with IsolatedEnvironments + with HasExecutionContext { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1S1M1_Manual + .addConfigTransforms(ConfigTransforms.clearMinimumProtocolVersion*) + .addConfigTransforms(ConfigTransforms.dontWarnOnDeprecatedPV*) + + protected def testedReleases: List[TestedRelease] + override lazy val ledgerApiTestToolVersions: List[String] = + testedReleases.map(_.releaseVersion.toString) + + protected def numShards: Int + protected def shard: Int +} + +/** For a given release R, the Ledger API conformance test suites at release R are run against: + * - 1x synchronizer of release R with the latest protocol version of release R + * - 4x participants based on current main + */ +trait ProtocolContinuityConformanceTestSynchronizer extends ProtocolContinuityConformanceTest { + private lazy val externalSequencer = + new UseExternalProcess( + loggerFactory, + externalSequencers = Set("sequencer1"), + fileNameHint = this.getClass.getSimpleName, + ) + + private lazy val externalMediator = + new UseExternalProcess( + loggerFactory, + externalMediators = Set("mediator1"), + fileNameHint = this.getClass.getSimpleName, + ) + + registerPlugin(externalSequencer) + registerPlugin(externalMediator) + + testedReleases.foreach { case TestedRelease(release, protocolVersions) => + lazy val binDir = ReleaseUtils.retrieve(release).futureValue + lazy val pv = protocolVersions.max1 + + s"run conformance tests of shard $shard with release $release and protocol $pv" in { env => + import env.* + + val cantonReleaseVersion = RunVersion.Release(binDir) + + externalMediator.start(remoteMediator1.name, cantonReleaseVersion) + externalSequencer.start(remoteSequencer1.name, cantonReleaseVersion) + + remoteSequencer1.health.wait_for_ready_for_initialization() + remoteMediator1.health.wait_for_ready_for_initialization() + + val staticParams = StaticSynchronizerParameters.defaultsWithoutKMS(protocolVersion = pv) + NetworkBootstrapper( + Seq( + NetworkTopologyDescription.createWithStaticSynchronizerParameters( + daName, + synchronizerOwners = Seq(remoteSequencer1), + synchronizerThreshold = PositiveInt.one, + sequencers = Seq(remoteSequencer1), + mediators = Seq(remoteMediator1), + staticSynchronizerParameters = staticParams, + ) + ) + )(env).bootstrap() + + remoteSequencer1.health.wait_for_initialized() + remoteMediator1.health.wait_for_initialized() + participant1.health.wait_for_initialized() + + setupLedgerApiConformanceEnvironment(env) + + loggerFactory.suppress(ApiUserManagementServiceSuppressionRule) { + runShardedTests(release)(shard, numShards)(env) + } + + // Shutdown + shutdownLedgerApiConformanceEnvironment(env) + + externalMediator.stop(remoteMediator1.name) + } + } +} + +/** For a given release R, these tests run the Ledger API compatibility tests against + * - 1x synchronizer based on current main with the latest protocol version of release R + * - 1x participant of release R + */ +trait ProtocolContinuityConformanceTestParticipant extends ProtocolContinuityConformanceTest { + val external = new UseExternalProcess( + loggerFactory, + externalParticipants = Set("participant1"), + fileNameHint = this.getClass.getSimpleName, + ) + + // TODO(i9548): Run with a single participant because currently there is no way to set a participant when using the Ledger API test tool. + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition + .buildBaseEnvironmentDefinition(1, 1, 1) + .withManualStart + + registerPlugin(external) + + testedReleases.foreach { case TestedRelease(release, protocolVersions) => + lazy val binDir = ReleaseUtils.retrieve(release).futureValue + lazy val pv = protocolVersions.max1 + + s"run conformance tests of shard $shard with release $release and protocol $pv" in { + implicit env => + import env.* + + val cantonReleaseVersion = RunVersion.Release(binDir) + + sequencer1.start() + mediator1.start() + mediator1.health.wait_for_ready_for_initialization() + sequencer1.health.wait_for_ready_for_initialization() + + val staticParams = StaticSynchronizerParameters.defaultsWithoutKMS(protocolVersion = pv) + NetworkBootstrapper( + Seq( + EnvironmentDefinition.S1M1.copy(staticSynchronizerParameters = staticParams) + ) + ).bootstrap() + + // Run the participant from the release binary + external.start(remoteParticipant1.name, cantonReleaseVersion) + remoteParticipant1.health.wait_for_initialized() + + setupLedgerApiConformanceEnvironment(env) + + runShardedTests(release)(shard, numShards)(env) + + // Shutdown + shutdownLedgerApiConformanceEnvironment(env) + external.stop(remoteParticipant1.name) + } + } +} + +private[continuity] object ProtocolContinuityConformanceTest { + // all patch versions that are supported + def previousSupportedReleases(logger: TracedLogger)(implicit + tc: TraceContext + ): List[TestedRelease] = + releasesFromArtifactory(logger) + .map(ReleaseVersion.tryCreate) + .map { releaseVersion => + TestedRelease( + releaseVersion, + ProtocolVersionCompatibility.supportedProtocols( + includeAlphaVersions = false, + includeBetaVersions = true, + release = releaseVersion, + ), + ) + } + // excluding protocol versions that are deleted + .flatMap { case TestedRelease(releaseVersion, protocolVersions) => + NonEmpty + .from(protocolVersions.filterNot(_.isDeleted)) + .map(pvs => TestedRelease(releaseVersion, pvs)) + } + .toList + .sortBy(_.releaseVersion) + + def latestSupportedRelease(logger: TracedLogger)(implicit + tc: TraceContext + ): List[TestedRelease] = + previousSupportedReleases(logger).takeRight(1) +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/all/AllProtocolContinuityConformanceTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/all/AllProtocolContinuityConformanceTest.scala new file mode 100644 index 0000000000..0e5c17d267 --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/all/AllProtocolContinuityConformanceTest.scala @@ -0,0 +1,95 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.continuity.all + +import com.digitalasset.canton.integration.tests.continuity.{ + ProtocolContinuityConformanceTest, + ProtocolContinuityConformanceTestParticipant, + ProtocolContinuityConformanceTestSynchronizer, +} + +/** The Protocol continuity tests test that we don't accidentally break protocol compatibility with + * respect to the Ledger API. The tests are executed for all prior releases. + */ +trait AllProtocolContinuityConformanceTest extends ProtocolContinuityConformanceTest { + lazy val testedReleases = + ProtocolContinuityConformanceTest.previousSupportedReleases(logger) + + override lazy val ledgerApiTestToolVersions = testedReleases.map(_.releaseVersion.toString) + + protected val numShards: Int = 6 + protected def shard: Int +} + +class ProtocolContinuityShard0ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 0 +} + +class ProtocolContinuityShard1ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 1 +} + +class ProtocolContinuityShard2ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 2 +} + +class ProtocolContinuityShard3ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 3 +} + +class ProtocolContinuityShard4ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 4 +} + +class ProtocolContinuityShard5ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with AllProtocolContinuityConformanceTest { + override def shard: Int = 5 +} + +class ProtocolContinuityShard0ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 0 +} + +class ProtocolContinuityShard1ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 1 +} + +class ProtocolContinuityShard2ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 2 +} + +class ProtocolContinuityShard3ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 3 +} + +class ProtocolContinuityShard4ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 4 +} + +class ProtocolContinuityShard5ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with AllProtocolContinuityConformanceTest { + override def shard: Int = 5 +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/latest/LatestProtocolContinuityConformanceTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/latest/LatestProtocolContinuityConformanceTest.scala new file mode 100644 index 0000000000..a5ba2b1984 --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/continuity/latest/LatestProtocolContinuityConformanceTest.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.continuity.latest + +import com.digitalasset.canton.integration.tests.continuity.{ + ProtocolContinuityConformanceTest, + ProtocolContinuityConformanceTestParticipant, + ProtocolContinuityConformanceTestSynchronizer, +} +import com.digitalasset.canton.util.ReleaseUtils + +/** The Protocol continuity tests test that we don't accidentally break protocol compatibility with + * respect to the Ledger API. The tests are executed for the latest published release. + */ +trait LatestProtocolContinuityConformanceTest extends ProtocolContinuityConformanceTest { + lazy val testedReleases: List[ReleaseUtils.TestedRelease] = + ProtocolContinuityConformanceTest.latestSupportedRelease(logger) + + protected val numShards: Int = 1 + protected def shard: Int +} + +class LatestProtocolContinuityShard0ConformanceTestSynchronizer + extends ProtocolContinuityConformanceTestSynchronizer + with LatestProtocolContinuityConformanceTest { + override val shard: Int = 0 +} + +class LatestProtocolContinuityShard0ConformanceTestParticipant + extends ProtocolContinuityConformanceTestParticipant + with LatestProtocolContinuityConformanceTest { + override val shard: Int = 0 +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala new file mode 100644 index 0000000000..fa651797ca --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala @@ -0,0 +1,107 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.infra + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.integration.plugins.UseLedgerApiTestTool.{ + extractVersionString, + findAllReleases, + findMatchingVersions, + latestVersionFromArtifactory, + releasesFromArtifactory, +} +import com.digitalasset.canton.version.{ReleaseVersion, ReleaseVersionToProtocolVersions} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import scala.util.matching.Regex + +final class UseLedgerApiTestToolTest extends AnyFlatSpec with Matchers with BaseTest { + private val versions = Seq( + "3.3.0-snapshot.20250416.15779.0.v6cccc0c4", + "3.3.0-ad-hoc.20250905.16091.0.v704bf59d", + "3.3.0-snapshot.20251007.16123.0.v670c8fae", + "3.3.1-snapshot.20251007.16123.0.v670c8fae", // not existing just for testing patches + "3.3.10-snapshot.20251007.16123.0.v670c8fae", // not existing just for testing patches + "3.4.0-snapshot.20250429.15866.0.vc8f10812", + "3.4.0-snapshot.20251003.17075.0.v69d92264", + "dev", + ) + + "findAllReleases" should "find the major.minor.patch releases correctly" in { + findAllReleases(versions) shouldBe Seq("3.3.0", "3.3.1", "3.3.10", "3.4.0") + } + + "findMatchingVersions" should "find and sort (by date) all the versions matching the given release" in { + findMatchingVersions(versions, "3.3.0") shouldBe Seq( + "3.3.0-snapshot.20250416.15779.0.v6cccc0c4", + "3.3.0-ad-hoc.20250905.16091.0.v704bf59d", + "3.3.0-snapshot.20251007.16123.0.v670c8fae", + ) + + findMatchingVersions(versions, "3.4.0") shouldBe Seq( + "3.4.0-snapshot.20250429.15866.0.vc8f10812", + "3.4.0-snapshot.20251003.17075.0.v69d92264", + ) + } + + private def extractVersion(version: String): ReleaseVersion = { + val versionPattern: Regex = """(\d+)\.(\d+)\.(\d+).*""".r + version match { + case versionPattern(major, minor, patch) => + ReleaseVersion(major.toInt, minor.toInt, patch.toInt) + case _ => throw new IllegalArgumentException(s"No version number found in '$version'") + } + } + + /* + TODO(#16458) + This is a lie because it does not contain all releases. Revisit when Canton 3 is stable and we can + again get the list of releases via the release notes. + */ + private val allReleases = + ReleaseVersionToProtocolVersions.majorMinorToStableProtocolVersions.keys.toSeq.sorted + + /** Return the (major, minor) just before `version` + */ + private def previous(version: ReleaseVersion): (Int, Int) = + allReleases.zip(allReleases.tail).collectFirst { + case (prev, curr) if curr == version.majorMinor => prev + } match { + case Some(previous) => previous + case None => allReleases.last + } + + "latestVersionFromArtifactory" should "be able to fetch the latest version from artifactory" in { + val latestToolVersion = latestVersionFromArtifactory(logger) + + latestToolVersion should not be empty + extractVersion(latestToolVersion).major shouldBe 3 + extractVersion(latestToolVersion).majorMinor shouldBe >=( + previous(extractVersion(BuildInfo.version)) + ) + } + + "releasesFromArtifactory" should "be able to fetch the lapitt for each release from artifactory" in { + val releasedToolVersions = releasesFromArtifactory(logger) + + releasedToolVersions should not be empty + + val extractedVersions = + releasedToolVersions.collect(extractVersionString).map(ReleaseVersion.tryCreate) + val currentMajorMinor = + ReleaseVersion(ReleaseVersion.current.major, ReleaseVersion.current.minor, 0) + + // We only test releases starting from 3.3.0, as earlier releases are no longer supported + // TODO(#16458): update the earliest tested release once Canton 3 is stable + val releases33andLater = allReleases.filter { case (major, minor) => + major == 3 && minor >= 3 + } + + extractedVersions + .filter(_ <= currentMajorMinor) + .map(_.majorMinor) should contain allElementsOf releases33andLater.dropRight(1) + } +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala new file mode 100644 index 0000000000..29a50dc224 --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala @@ -0,0 +1,449 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi + +import com.digitalasset.canton.config +import com.digitalasset.canton.config.* +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.integration.ConfigTransforms.updateAllParticipantConfigs_ +import com.digitalasset.canton.integration.plugins.* +import com.digitalasset.canton.integration.plugins.UseLedgerApiTestTool.LAPITTVersion +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.tests.ledgerapi.LedgerApiConformanceBase.excludedTests +import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + IsolatedEnvironments, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.version.ProtocolVersion +import monocle.macros.syntax.lens.* +import org.slf4j.event + +trait SingleVersionLedgerApiConformanceBase extends LedgerApiConformanceBase { + protected def lfVersion: UseLedgerApiTestTool.LfVersion = UseLedgerApiTestTool.LfVersion.Stable + + protected def lapittVersion: LAPITTVersion = LAPITTVersion.LocalJar + + protected val ledgerApiTestToolPlugin = + new UseLedgerApiTestTool( + loggerFactory, + lfVersion = lfVersion, + version = lapittVersion, + connectedSynchronizersCount = connectedSynchronizersCount, + ) + registerPlugin(ledgerApiTestToolPlugin) + + def runShardedTests(shard: Int, numShards: Int)( + env: TestConsoleEnvironment + ): String = + ledgerApiTestToolPlugin.runShardedSuites( + shard, + numShards, + exclude = excludedTests, + )(env) +} + +trait LedgerApiConformanceBase extends CommunityIntegrationTest with IsolatedEnvironments { + + protected def connectedSynchronizersCount: Int + + // ensure ledger api conformance tests have less noisy neighbours + protected override def numPermits: PositiveInt = PositiveInt.tryCreate(2) + /* + When running the ProtocolContinuityConformance test, the protocol version can be different + from the `testedProtocolVersion`. + */ + protected def setupLedgerApiConformanceEnvironment( + env: TestConsoleEnvironment + ): Unit = { + import env.* + + require(env.environment.config.sequencers.sizeIs == connectedSynchronizersCount) + + implicit val e: TestConsoleEnvironment = env + + sequencer1_.topology.synchronizer_parameters.propose_update( + daId, + previous => + previous.update( + confirmationResponseTimeout = NonNegativeFiniteDuration.ofMinutes(1), + mediatorReactionTimeout = NonNegativeFiniteDuration.ofMinutes(1), + ledgerTimeRecordTimeTolerance = NonNegativeFiniteDuration.ofMinutes(3), + mediatorDeduplicationTimeout = NonNegativeFiniteDuration.ofMinutes(6), + preparationTimeRecordTimeTolerance = NonNegativeFiniteDuration.ofMinutes(6 / 2), + reconciliationInterval = PositiveDurationSeconds.ofSeconds(1), + ), + ) + + participants.all.synchronizers.connect_local(sequencer1_, alias = daName) + } + + def shutdownLedgerApiConformanceEnvironment(env: TestConsoleEnvironment): Unit = { + import env.* + implicit val e: TestConsoleEnvironment = env + participants.all.synchronizers.disconnect("synchronizer1") + participants.local.foreach(_.stop()) + } +} + +class LedgerApiConformanceMultiSynchronizerTest + extends CommunityIntegrationTest + with IsolatedEnvironments { + + private val connectedSynchronizersCount: Int = 2 + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1_S1M1 + .withSetup(setupLedgerApiConformanceEnvironment) + + // ensure ledger api conformance tests have less noisy neighbours + protected override def numPermits: PositiveInt = PositiveInt.tryCreate(2) + + protected def setupLedgerApiConformanceEnvironment( + env: TestConsoleEnvironment + ): Unit = { + import env.* + implicit val e: TestConsoleEnvironment = env + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) + } + + protected val ledgerApiTestToolPlugin = + new UseLedgerApiTestTool( + loggerFactory, + connectedSynchronizersCount = connectedSynchronizersCount, + lfVersion = UseLedgerApiTestTool.LfVersion.Stable, + version = LAPITTVersion.LocalJar, + ) + registerPlugin(ledgerApiTestToolPlugin) + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + ) + ), + ) + ) + + "Ledger API test tool on a multi-synchronizer setup" can { + "pass multi-synchronizer related conformance tests" in { implicit env => + ledgerApiTestToolPlugin.runSuites( + suites = LedgerApiConformanceBase.multiSynchronizerTests.mkString(","), + exclude = Nil, + concurrency = 2, + ) + } + } +} + +object LedgerApiConformanceBase { + val multiSynchronizerTests = Seq( + "CommandServiceIT:CSsubmitAndWaitPrescribedSynchronizerId", + "CommandServiceIT:CSsubmitAndWaitForTransactionPrescribedSynchronizerId", + "CommandSubmissionCompletionIT:CSCSubmitWithPrescribedSynchronizerId", + "ExplicitDisclosureIT:EDFailOnDisclosedContractIdMismatchWithPrescribedSynchronizerId", + "ExplicitDisclosureIT:EDRouteByDisclosedContractSynchronizerId", + ) + val excludedTests = Seq( + "ClosedWorldIT", // Canton errors with "Some(Disputed: unable to parse party id 'unallocated': FailedSimpleStringConversion(LfError(Invalid unique identifier missing namespace unallocated)))" + // Exclude tests which are run separately below + "ParticipantPruningIT", + "TLSOnePointThreeIT", + "TLSAtLeastOnePointTwoIT", + "CheckpointInTailingStreamsIT", + // The following tests need pruning configuration, see LedgerApiParticipantPruningConformanceTest + "ActiveContractsServiceIT:AcsAtPruningOffsetIsAllowed", + "ActiveContractsServiceIT:AcsBeforePruningOffsetIsDisallowed", + "CommandDeduplicationPeriodValidationIT:OffsetPruned", + // Exclude ContractIdIT tests except: RejectNonSuffixedV1Cid, AcceptSuffixedV1Cid + "ContractIdIT:AcceptNonSuffixedV1Cid", + "ContractIdIT:AcceptSuffixedV1CidExerciseTarget", // Racy with: ABORTED: CONTRACT_NOT_FOUND(14,0): Contract could not be found with id + "ContractIdIT:RejectNonSuffixedV1Cid", // With Daml 2.0.0-snapshot.20220124 started failing due to Actual error id (COMMAND_PREPROCESSING_FAILED) does not match expected error id + "UserManagementServiceIT:RaceConditionGrantRights", // See LedgerApiConformanceSuppressedLogs + "UserManagementServiceIT:RaceConditionCreateUsers", // See LedgerApiConformanceSuppressedLogs + // Following value normalisation (https://github.com/digital-asset/daml/pull/19912), this throws a different, equally correct, error + "CommandServiceIT:CSRefuseBadParameter", + ) +} + +trait LedgerApiShardedConformanceBase extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup(setupLedgerApiConformanceEnvironment) + + protected val numShards: Int = 3 + protected def shard: Int + + "Ledger Api Test Tool" can { + s"pass semantic tests block $shard" in { implicit env => + // suppress warnings for UserManagementServiceIT + loggerFactory.suppress(ApiUserManagementServiceSuppressionRule) { + runShardedTests(shard, numShards)(env) + } + } + } +} + +// Conformance test that need a suppressing rule on canton side +trait LedgerApiConformanceSuppressedLogs extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup(setupLedgerApiConformanceEnvironment) + + "Ledger Api Test Tool" can { + "pass semantic tests block" in { implicit env => + /* + UserManagementServiceIT:RaceConditionGrantRights logs: + - An error on the daml side (violation of uniqueness constraint) + - An error on canton (by the ApiRequestLogger) + */ + loggerFactory.assertLogsSeq(SuppressionRule.Level(event.Level.ERROR))( + ledgerApiTestToolPlugin.runSuitesSerially( + suites = "UserManagementServiceIT:RaceConditionGrantRights", + exclude = Nil, + ), + forEvery(_) { + _.errorMessage should ((startWith( + "Request c.d.l.a.v.a.UserManagementService/GrantUserRights" + ) and include( + s"failed with INTERNAL/${LogEntry.SECURITY_SENSITIVE_MESSAGE_ON_API}" + )) or include( + "Processing the request failed due to a non-transient database error: ERROR: duplicate key value violates unique constraint" + )) + }, + ) + + /* + UserManagementServiceIT:RaceConditionCreateUsers logs: + - An error on the daml side (violation of uniqueness constraint) + - An error on canton (by the ApiRequestLogger) + */ + loggerFactory.assertLogsSeq(SuppressionRule.Level(event.Level.ERROR))( + ledgerApiTestToolPlugin.runSuitesSerially( + suites = "UserManagementServiceIT:RaceConditionCreateUsers", + exclude = Nil, + ), + forEvery(_) { + _.errorMessage should ((startWith( + "Request c.d.l.a.v.a.UserManagementService/CreateUser" + ) and include( + s"failed with INTERNAL/${LogEntry.SECURITY_SENSITIVE_MESSAGE_ON_API}" + )) or include( + "Processing the request failed due to a non-transient database error: ERROR: duplicate key value violates unique constraint" + )) + }, + ) + } + } +} + +class LedgerApiConformanceSuppressedLogsPostgres extends LedgerApiConformanceSuppressedLogs { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiShard0ConformanceTest extends LedgerApiShardedConformanceBase { + override def shard: Int = 0 +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiShard0ConformanceTestPostgres extends LedgerApiShard0ConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiShard1ConformanceTest extends LedgerApiShardedConformanceBase { + override def shard: Int = 1 +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiShard1ConformanceTestPostgres extends LedgerApiShard1ConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiShard2ConformanceTest extends LedgerApiShardedConformanceBase { + override def shard: Int = 2 +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiShard2ConformanceTestPostgres extends LedgerApiShard2ConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiExperimentalConformanceTest extends SingleVersionLedgerApiConformanceBase { + + override def lfVersion = UseLedgerApiTestTool.LfVersion.Dev + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup(setupLedgerApiConformanceEnvironment) + + "Ledger Api Test Tool" can { + "pass experimental tests for 2.dev lf version" onlyRunWithOrGreaterThan ProtocolVersion.dev in { + implicit env => + ledgerApiTestToolPlugin.runSuites( + suites = + "ContractKeysCommandDeduplicationIT,ContractKeysContractIdIT,ContractKeysDeeplyNestedValueIT," + + "ContractKeysDivulgenceIT,ContractKeysExplicitDisclosureIT,ContractKeysMultiPartySubmissionIT," + + "ContractKeysWronglyTypedContractIdIT,ContractKeysIT,RaceConditionIT,ExceptionsIT,ExceptionRaceConditionIT," + + "EventsDescendantsIT,PrefetchContractKeysIT", + exclude = Seq( + // TODO(#16065) + "ExceptionRaceConditionIT:RWRollbackCreateVsNonTransientCreate", + "ExceptionRaceConditionIT:RWArchiveVsRollbackFailedLookupByKey", + "ExceptionsIT:ExRollbackDuplicateKeyArchived", + "ExceptionsIT:ExRollbackDuplicateKeyCreated", + "ExceptionsIT:ExRollbackExerciseCreateLookup", + // tests with divulged/disclosed contracts fail on Canton as does scoping by maintainer unless we're on a UCK synchronizer (see below) + "ContractKeysIT:CKFetchOrLookup", + "ContractKeysIT:CKMaintainerScoped", + "ContractKeysIT:CKNoFetchUndisclosed", + // tests with unique contract key assumption fail as does RWArchiveVsFailedLookupByKey (finding a lookup failure after contract creation) + "RaceConditionIT:RWArchiveVsFailedLookupByKey", + "RaceConditionIT:WWArchiveVsNonTransientCreate", + "RaceConditionIT:WWDoubleNonTransientCreate", + "RaceConditionIT:RWTransientCreateVsNonTransientCreate", + // Exclude the "prepare endpoint" versions of contract key prefetching because + // the external hashing algorithm for interactive submission only supports LF=2.1 + // When contract keys are not an LF dev feature anymore those can be enabled + "PrefetchContractKeysIT:CSprefetchContractKeysPrepareEndpointBasic", + "PrefetchContractKeysIT:CSprefetchContractKeysPrepareWronglyTyped", + "PrefetchContractKeysIT:CSprefetchContractPrepareKeysMany", + ), + concurrency = 4, + ) + } + } +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiExperimentalConformanceTest_Postgres extends LedgerApiExperimentalConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiParticipantPruningConformanceTest extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .addConfigTransforms( + ConfigTransforms.updateMaxDeduplicationDurations(java.time.Duration.ofSeconds(1)), + // Disable warnings about large ACS with consistency checking as the pruning tests create many contracts + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.activationFrequencyForWarnAboutConsistencyChecks) + .replace(Long.MaxValue) + ), + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + "On a synchronizer with expedited acs commitment reconciliation, Ledger Api Test Tool" can { + "pass ParticipantPruning test" in { implicit env => + val suites = Seq( + "ParticipantPruningIT", + "CommandDeduplicationPeriodValidationIT:OffsetPruned", + "ActiveContractsServiceIT:AcsAtPruningOffsetIsAllowed", + "ActiveContractsServiceIT:AcsBeforePruningOffsetIsDisallowed", + ) + + ledgerApiTestToolPlugin.runSuitesSerially( + suites = suites.mkString(","), + exclude = Nil, + kv = "--timeout-scale-factor" -> "4", + ) + } + } +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiParticipantPruningConformanceTestPostgres + extends LedgerApiParticipantPruningConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiOffsetCheckpointsConformanceTest extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .addConfigTransforms( + updateAllParticipantConfigs_( + _.focus(_.ledgerApi.indexService.offsetCheckpointCacheUpdateInterval) + .replace(config.NonNegativeFiniteDuration(java.time.Duration.ofMillis(1000))) + .focus(_.ledgerApi.indexService.idleStreamOffsetCheckpointTimeout) + .replace(config.NonNegativeFiniteDuration(java.time.Duration.ofMillis(1000))) + ) + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + "On a Ledger API with short offsetCheckpoint cache update interval, Ledger Api Test Tool" can { + "pass CheckpointInTailingStreamsIT test" in { implicit env => + val suites = Seq( + "CheckpointInTailingStreamsIT" + ) + + ledgerApiTestToolPlugin.runSuitesSerially( + suites = suites.mkString(","), + exclude = Nil, + ) + } + } +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +class LedgerApiOffsetCheckpointsConformanceTestPostgres + extends LedgerApiOffsetCheckpointsConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +// simple class which can be used to test a single test in the Ledger API conformance suite +class LedgerApiSingleTest extends SingleVersionLedgerApiConformanceBase { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .withSetup(setupLedgerApiConformanceEnvironment) + + "Ledger Api Test Tool" can { + "run a single test" in { implicit env => + ledgerApiTestToolPlugin.runSuites( + suites = "PartyManagementServiceIT:PMGenerateExternalPartyTopologyTransaction", + exclude = Nil, + concurrency = 1, + ) + } + } +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/JsonApiConformanceIntegrationTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/JsonApiConformanceIntegrationTest.scala new file mode 100644 index 0000000000..38f574181b --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/JsonApiConformanceIntegrationTest.scala @@ -0,0 +1,258 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.variations + +import com.daml.ledger.api.testtool.Tests +import com.daml.ledger.api.testtool.infrastructure.{ + JsonSupported, + LedgerTestCase, + LedgerTestSuite, + TestConstraints, +} +import com.daml.ledger.api.testtool.runner.{AvailableTests, Config, ConfiguredTests, TestRunner} +import com.digitalasset.canton.config +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.integration.ConfigTransforms.updateAllParticipantConfigs_ +import com.digitalasset.canton.integration.plugins.UseLedgerApiTestTool.{ + EnvVarTestOverrides, + TestInclusions, +} +import com.digitalasset.canton.integration.tests.ledgerapi.LedgerApiConformanceBase +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import monocle.Monocle.toAppliedFocusOps +import org.scalatest.time.{Seconds, Span} + +// TODO(#21030): Unify with gRPC conformance tests +sealed trait JsonApiConformanceBase + extends CommunityIntegrationTest + with SharedEnvironment + with EnvVarTestOverrides { + + // ensure conformance tests have less noisy neighbours + protected override def numPermits: PositiveInt = PositiveInt.tryCreate(2) + + // Set to false to compare with gRPC test run + protected def useJson = true + + protected def inclusions: TestInclusions + protected def exclusions: Set[String] = Set.empty + + // This is local patience config - used only to wait for a shard end + protected val testSummariesPatience: PatienceConfig = + PatienceConfig(timeout = scaled(Span(500, Seconds))) + + protected def testCaseName: String + + protected def setup()(implicit + env: FixtureParam + ): ( + Config, + AvailableTests, + List[LedgerTestCase], + TestInclusions, + /* Env arg inclusions */ + ) = { + val participants = testParticipants + val adminParticipants = testAdminParticipants + val config = Config.default.copy( + verbose = true, + timeoutScaleFactor = 3.0, + reportOnFailuresOnly = true, + jsonApiMode = useJson, + participantsEndpoints = participants, + participantsAdminEndpoints = adminParticipants, + concurrentTestRuns = VariationsConformanceTestUtils.ConcurrentTestRuns, + connectedSynchronizers = env.environment.config.sequencers.size, + ) + + val availableTests = new AvailableTests { + override def defaultTests: Vector[LedgerTestSuite] = + Tests.default(timeoutScaleFactor = config.timeoutScaleFactor) + + override def optionalTests: Vector[LedgerTestSuite] = + Tests.optional(config.tlsConfig) + } + + val envArgInclusion = envArgTestsInclusion.getOrElse(TestInclusions.AllIncluded) + + val testsToRun = + new ConfiguredTests(availableTests, config).defaultTests.view + .flatMap(_.tests) + .filter { testCase => + testCase.limitation match { + case _: JsonSupported => + inclusions.testCaseEnabled(testCase.name) && + !(exclusions.contains(testCase.name) || exclusions.contains(testCase.suite.name)) + case _: TestConstraints.GrpcOnly => false + } + } + .toList + + (config, availableTests, testsToRun, envArgInclusion) + } + + private def testParticipants(implicit env: FixtureParam): Vector[(String, Int)] = + if (useJson) + env.participants.all.map { + // TODO(#22349): Use the JSON API client config once exposed, similarly to how we extract the address and port for the gRPC API + case localParticipantConfig: LocalParticipantReference => + val jsonApiServer = localParticipantConfig.config.httpLedgerApi.server + jsonApiServer.address -> jsonApiServer.port.unwrap + case _other => + fail(s"Expecting only local participant references but got ${_other}") + }.toVector + else + env.participants.all.map { p => + val ledgerApiConfig = p.config.clientLedgerApi + ledgerApiConfig.address -> ledgerApiConfig.port.unwrap + }.toVector + + private def testAdminParticipants(implicit env: FixtureParam): Vector[(String, Int)] = + env.participants.all.map { p => + val adminApiConfig = p.config.clientAdminApi + adminApiConfig.address -> adminApiConfig.port.unwrap + }.toVector + + def numShards: Int + def shard: Int + + def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .prependConfigTransform( + ConfigTransforms.enableHttpLedgerApi + ) + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1_, alias = daName) + } + + "Ledger JSON API" should { + testCaseName in { implicit env => + import env.* + + val (config, availableTests, testsToRun, envArgInclusions) = setup() + + val testsInCurrentShard = testsToRun.zipWithIndex.collect { + case (testCase, i) if i % numShards == shard => testCase + } + + val testsToBeRun = + testsInCurrentShard + .filter(tc => envArgInclusions.testCaseEnabled(tc.name)) + .map(_.name) + .toSet + + // If the env arg filtering leads to no tests in the current shard, fail + if (testsToBeRun.isEmpty) { + cancel( + s"No tests to run in current shard $shard/$numShards. " + + s"Original tests in current shard: ${testsInCurrentShard.map(_.name)}. " + + s"Test selection from env var $LapittRunOnlyEnvVarName: $envArgInclusions." + ) + } else { + val testRunner = new TestRunner( + availableTests, + config.copy(included = testsToBeRun), + Tests.lfVersion, + ) + + logger.debug( + s"Running ${testsToBeRun.mkString("[", ", ", "]")} in current shard $shard/$numShards" + ) + + val (resultF, _testCases) = testRunner.runInProcess() + resultF + .map { summaries => + val failures = summaries + .collect(summary => + summary.result.left + .map(failure => s"${summary.name} failed: $failure") + ) + .collect { case Left(failure) => failure } + if (failures.nonEmpty) { + fail( + s"Some JSON Ledger API conformance test cases have failed: ${failures + .mkString("\n\t", "\n\t", "")}" + ) + } + succeed + } + .futureValue(config = testSummariesPatience, pos = implicitly) + } + } + } +} + +sealed abstract class JsonApiConformanceIntegrationShardedTest( + val shard: Int, + val numShards: Int, +) extends JsonApiConformanceBase { + + // Multi-domain, multi-participant environment for full test coverage + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1_S1M1 + .prependConfigTransform(ConfigTransforms.enableHttpLedgerApi) + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + participants.all.synchronizers.connect_local(sequencer2, alias = acmeName) + } + + protected def inclusions: TestInclusions = TestInclusions.AllIncluded + override protected def exclusions: Set[String] = LedgerApiConformanceBase.excludedTests.toSet ++ + Set( + "HealthServiceIT", // Service not available in JSON, + "PartyManagementServiceIT", // updatePartyIdentityProviderIs is not available in JSON API + "UserManagementServiceIT", // Results in PERMISSION_DENIED (wrong _.userManagement.supported) + ) + + protected def testCaseName = "pass the Ledger API conformance tests" +} + +final class JsonApiConformanceIntegrationShardedTest_Shard_0 + extends JsonApiConformanceIntegrationShardedTest(shard = 0, numShards = 5) + +final class JsonApiConformanceIntegrationShardedTest_Shard_1 + extends JsonApiConformanceIntegrationShardedTest(shard = 1, numShards = 5) + +final class JsonApiConformanceIntegrationShardedTest_Shard_2 + extends JsonApiConformanceIntegrationShardedTest(shard = 2, numShards = 5) + +final class JsonApiConformanceIntegrationShardedTest_Shard_3 + extends JsonApiConformanceIntegrationShardedTest(shard = 3, numShards = 5) + +final class JsonApiConformanceIntegrationShardedTest_Shard_4 + extends JsonApiConformanceIntegrationShardedTest(shard = 4, numShards = 5) + +private[variations] trait NonSharded { + this: JsonApiConformanceBase => + + override def shard: Int = 0 + override def numShards: Int = 1 +} + +final class JsonApiOffsetCheckpointsConformanceTest extends JsonApiConformanceBase with NonSharded { + override def environmentDefinition: EnvironmentDefinition = + super.environmentDefinition.addConfigTransforms( + updateAllParticipantConfigs_( + _.focus(_.ledgerApi.indexService.offsetCheckpointCacheUpdateInterval) + .replace(config.NonNegativeFiniteDuration(java.time.Duration.ofMillis(1000))) + .focus(_.ledgerApi.indexService.idleStreamOffsetCheckpointTimeout) + .replace(config.NonNegativeFiniteDuration(java.time.Duration.ofMillis(1000))) + ) + ) + + override def inclusions: TestInclusions = TestInclusions.SelectedTests( + includedSuites = Set("CheckpointInTailingStreamsIT") + ) + + override def testCaseName: String = + "pass CheckpointInTailingStreamsIT on a Ledger API with short offset checkpoint update interval" +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/LedgerApiVariationsConformanceTest.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/LedgerApiVariationsConformanceTest.scala new file mode 100644 index 0000000000..e4443c37b6 --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/LedgerApiVariationsConformanceTest.scala @@ -0,0 +1,346 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.variations + +import com.daml.tls.TlsVersion +import com.digitalasset.canton.config.* +import com.digitalasset.canton.config.RequireTypes.ExistingFile +import com.digitalasset.canton.integration.plugins.* +import com.digitalasset.canton.integration.tests.ledgerapi.LedgerApiConformanceBase.excludedTests +import com.digitalasset.canton.integration.tests.ledgerapi.SingleVersionLedgerApiConformanceBase +import com.digitalasset.canton.integration.tests.ledgerapi.SuppressionRules.ApiUserManagementServiceSuppressionRule +import com.digitalasset.canton.integration.{ConfigTransforms, EnvironmentDefinition} +import com.digitalasset.canton.logging.SuppressionRule +import com.digitalasset.canton.participant.config.{ParticipantNodeConfig, TestingTimeServiceConfig} +import com.digitalasset.canton.platform.apiserver.SeedService +import monocle.macros.syntax.lens.* +import org.slf4j.event.Level + +// Conformance with default settings but with covering buffer size (i.e. big enough) for the in-memory fan-out, +// ensuring that all Ledger API read requests are served from the in-memory fan-out buffer. +sealed abstract class LedgerApiInMemoryFanOutConformanceTestShardedPostgres(shard: Int) + extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .addConfigTransforms( + ConfigTransforms.updateAllParticipantConfigs_ { (c: ParticipantNodeConfig) => + c + .focus(_.ledgerApi.userManagementService.enabled) + .replace(true) + .focus(_.ledgerApi.indexService.maxTransactionsInMemoryFanOutBufferSize) + .replace(20000) + .focus(_.parameters.ledgerApiServer.contractIdSeeding) + .replace(SeedService.Seeding.Weak) + } + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + protected val numShards: Int = 3 + + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + "A participant with covering in-memory fan-out buffer" can { + "pass integration tests" in { implicit env => + loggerFactory.suppress(ApiUserManagementServiceSuppressionRule) { + ledgerApiTestToolPlugin.runShardedSuites( + shard = shard, + numShards = numShards, + exclude = excludedTests, + concurrentTestRuns = VariationsConformanceTestUtils.ConcurrentTestRuns, + ) + } + } + } +} + +class LedgerApiShard0InMemoryFanOutConformanceTestPostgres + extends LedgerApiInMemoryFanOutConformanceTestShardedPostgres(shard = 0) + +class LedgerApiShard1InMemoryFanOutConformanceTestPostgres + extends LedgerApiInMemoryFanOutConformanceTestShardedPostgres(shard = 1) + +class LedgerApiShard2InMemoryFanOutConformanceTestPostgres + extends LedgerApiInMemoryFanOutConformanceTestShardedPostgres(shard = 2) + +// By default, participants are tuned for performance. The buffers and caches used by the participant +// are by default so large that they are not filled by the small amount of data produced by the conformance test. +// We run one conformance test with small buffer/cache sizes to make sure we cover cases where data doesn't fit +// into a cache or where multiple buffers have to be combined. +sealed abstract class LedgerApiTinyBuffersConformanceShardedTestPostgres(shard: Int) + extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .addConfigTransforms( + ConfigTransforms.updateParticipantConfig("participant1") { (c: ParticipantNodeConfig) => + c + .focus(_.ledgerApi.userManagementService.enabled) + .replace(true) + .focus(_.ledgerApi.userManagementService.maxCacheSize) + .replace(2) + .focus(_.parameters.ledgerApiServer.contractIdSeeding) + .replace(SeedService.Seeding.Weak) + .focus(_.ledgerApi.indexService.activeContractsServiceStreams.maxIdsPerIdPage) + .replace(2) + .focus( + _.ledgerApi.indexService.activeContractsServiceStreams.maxPayloadsPerPayloadsPage + ) + .replace(2) + .focus(_.ledgerApi.indexService.maxContractKeyStateCacheSize) + .replace(2) + .focus(_.ledgerApi.indexService.maxContractStateCacheSize) + .replace(2) + .focus(_.ledgerApi.indexService.maxTransactionsInMemoryFanOutBufferSize) + .replace(3) + .focus(_.ledgerApi.indexService.bufferedStreamsPageSize) + .replace(1) + } + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + protected val numShards: Int = 4 + + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + "A participant with tiny buffers" can { + "pass integration tests" in { implicit env => + loggerFactory.suppress(ApiUserManagementServiceSuppressionRule) { + ledgerApiTestToolPlugin.runShardedSuites( + shard = shard, + numShards = numShards, + exclude = excludedTests, + concurrentTestRuns = VariationsConformanceTestUtils.ConcurrentTestRuns, + ) + } + } + } +} + +class LedgerApiShard0TinyBuffersConformanceTestPostgres + extends LedgerApiTinyBuffersConformanceShardedTestPostgres(shard = 0) + +class LedgerApiShard1TinyBuffersConformanceTestPostgres + extends LedgerApiTinyBuffersConformanceShardedTestPostgres(shard = 1) + +class LedgerApiShard2TinyBuffersConformanceTestPostgres + extends LedgerApiTinyBuffersConformanceShardedTestPostgres(shard = 2) + +class LedgerApiShard3TinyBuffersConformanceTestPostgres + extends LedgerApiTinyBuffersConformanceShardedTestPostgres(shard = 3) + +// Conformance test with the in-memory fan-out, mutable contract state cache and user management cache disabled +// (i.e. cache/buffer sizes set to 0). +trait LedgerApiCachesDisabledConformanceTest extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + .addConfigTransforms( + ConfigTransforms.updateParticipantConfig("participant1") { (c: ParticipantNodeConfig) => + c + .focus(_.ledgerApi.userManagementService.enabled) + .replace(true) + .focus(_.ledgerApi.userManagementService.maxCacheSize) + .replace(0) + .focus(_.ledgerApi.userManagementService.maxRightsPerUser) + .replace(100) + .focus(_.parameters.ledgerApiServer.contractIdSeeding) + .replace(SeedService.Seeding.Weak) + .focus(_.ledgerApi.indexService.maxContractKeyStateCacheSize) + .replace(0) + .focus(_.ledgerApi.indexService.maxContractStateCacheSize) + .replace(0) + .focus(_.ledgerApi.indexService.maxTransactionsInMemoryFanOutBufferSize) + .replace(0) + } + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + "A participant with caches disabled" can { + "pass integration tests" in { implicit env => + loggerFactory.suppress(ApiUserManagementServiceSuppressionRule) { + ledgerApiTestToolPlugin.runSuites( + suites = "SemanticTests,MultiPartySubmissionIT,ExplicitDisclosureIT,CommandServiceIT", + exclude = Seq( + // Following value normalisation (https://github.com/digital-asset/daml/pull/19912), this throws a different, equally correct, error + "CommandServiceIT:CSRefuseBadParameter" + ), + concurrency = VariationsConformanceTestUtils.ConcurrentTestRuns, + ) + } + } + } +} + +class LedgerApiCachesDisabledConformanceTestPostgres + extends LedgerApiCachesDisabledConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiStaticTimeConformanceTest extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .addConfigTransforms( + ConfigTransforms.updateParticipantConfig("participant1") { (c: ParticipantNodeConfig) => + c + .focus(_.parameters.ledgerApiServer.contractIdSeeding) + .replace(SeedService.Seeding.Weak) + .focus(_.testingTime) + .replace( + Some(TestingTimeServiceConfig.MonotonicTime) + ) + }, + c => c.focus(_.parameters.clock).replace(ClockConfig.SimClock), + ) + .withSetup(setupLedgerApiConformanceEnvironment) + + "A participant with static time" can { + "pass integration tests" in { implicit env => + // TODO(i12121): Exclusions due to timeouts because with sim clock time doesn't advance on its own + val exclusions = Seq( + "CommandServiceIT:CSduplicateSubmitAndWaitBasic", + "CommandServiceIT:CSduplicateSubmitAndWaitForTransactionId", + "CommandServiceIT:CSduplicateSubmitAndWaitForTransactionData", + "CommandServiceIT:CSduplicateSubmitAndWaitForTransactionTree", + "InteractiveSubmissionServiceIT:ISSPrepareSubmissionExecuteBasic", + "InteractiveSubmissionServiceIT:ISSPrepareSubmissionFailExecuteOnInvalidSignature", + "InteractiveSubmissionServiceIT:ISSPrepareSubmissionFailExecuteOnInvalidSignatory", + "InteractiveSubmissionServiceIT:ISSExecuteSubmissionRequestWithInputContracts", + "InteractiveSubmissionServiceIT:ISSExecuteSubmissionRequestFailOnEmptyInputContracts", + "InteractiveSubmissionServiceIT:ISSExecuteSubmissionAndWaitBasic", + "InteractiveSubmissionServiceIT:ISSPrepareSubmissionFailExecuteAndWaitOnInvalidSignature", + "CommandDeduplicationIT:SimpleDeduplicationBasic", + "CommandDeduplicationIT:SimpleDeduplicationCommandClient", + "CommandDeduplicationIT:DeduplicateSubmitterBasic", + "CommandDeduplicationIT:DeduplicateSubmitterCommandClient", + "CommandDeduplicationIT:DeduplicateUsingDurations", + "CommandDeduplicationIT:DeduplicateUsingOffsets", + // suites that are irrelevant for the static time test + "ActiveContractsServiceIT", + "DeeplyNestedValueIT", + "IdentityProviderConfigServiceIT", + "Interface", + "LimitsIT", + "MultiPartySubmissionIT", + "Package", + "PartyManagement", + "Transaction", + "Upgrading", + "UserManagement", + "ValueLimitsIT", + // Following value normalisation (https://github.com/digital-asset/daml/pull/19912), this throws a different, equally correct, error + "CommandServiceIT:CSRefuseBadParameter", + "VettingIT", + ) + loggerFactory.suppress(LogSuppressionRule) { + ledgerApiTestToolPlugin.runShardedSuites( + shard = 0, + numShards = 1, + exclude = excludedTests ++ exclusions, + concurrentTestRuns = VariationsConformanceTestUtils.ConcurrentTestRuns, + ) + } + } + } + + // The CantonTimeServiceBackend can produce a warning: + // WARN c.d.c.p.l.a.CantonTimeServiceBackend:LedgerApiStaticTimeConformanceTestPostgres/participant=participant1 - Cannot advance clock: Specified current time 1970-01-01T00:00:31.200Z does not match ledger time 1970-01-01T00:00:30.200Z + // from the test `TimeServiceIT` case `Time advancement can fail when current time is not accurate` + val LogSuppressionRule: SuppressionRule = + SuppressionRule.LoggerNameContains("ApiUserManagementService") || + SuppressionRule.LoggerNameContains("ApiTimeService") || + SuppressionRule.LoggerNameContains("CantonTimeServiceBackend") && + SuppressionRule.Level(Level.WARN) + +} + +class LedgerApiStaticTimeConformanceTestPostgres extends LedgerApiStaticTimeConformanceTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} + +trait LedgerApiTlsConformanceBase extends SingleVersionLedgerApiConformanceBase { + + override def connectedSynchronizersCount = 1 + + private val certChainFile = + PemFile(ExistingFile.tryCreate("./enterprise/app/src/test/resources/tls/ledger-api.crt")) + private val privateKeyFile = + PemFile(ExistingFile.tryCreate("./enterprise/app/src/test/resources/tls/ledger-api.pem")) + private val trustCertCollectionFile = + PemFile(ExistingFile.tryCreate("./enterprise/app/src/test/resources/tls/root-ca.crt")) + + private val tls = TlsServerConfig( + certChainFile = certChainFile, + privateKeyFile = privateKeyFile, + trustCollectionFile = Some(trustCertCollectionFile), + ) + + protected def tlsCerts: Seq[(String, String)] = + Seq( + "--client-cert" -> (certChainFile.pemFile.unwrap.getPath + "," + privateKeyFile.pemFile.unwrap.getPath), + "--cacrt" -> trustCertCollectionFile.pemFile.unwrap.getPath, + ) + + protected def minTlsVersion: TlsVersion.TlsVersion + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.ledgerApi.tls) + .replace(Some(tls.copy(minimumServerProtocolVersion = Some(minTlsVersion.version)))) + ) + ) + .withSetup(setupLedgerApiConformanceEnvironment) +} + +// not testing in-memory/H2, as we have observed flaky h2 persistence problems in the indexer + +// Only defined postgres to conserve resources +class LedgerApiTls12ConformanceTestPostgres extends LedgerApiTlsConformanceBase { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + val minTlsVersion = TlsVersion.V1_2 + + "Ledger Api Test Tool" can { + "pass tls 1.2 tests" in { implicit env => + ledgerApiTestToolPlugin.runSuites( + suites = "TLSAtLeastOnePointTwoIT", + exclude = Nil, + concurrency = VariationsConformanceTestUtils.ConcurrentTestRuns, + tlsCerts* + ) + } + } +} + +class LedgerApiTls13ConformanceTestPostgres extends LedgerApiTlsConformanceBase { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + val minTlsVersion = TlsVersion.V1_3 + + "Ledger Api Test Tool" can { + "pass tls 1.3 tests" in { implicit env => + ledgerApiTestToolPlugin.runSuites( + suites = "TLSOnePointThreeIT", + exclude = Nil, + concurrency = VariationsConformanceTestUtils.ConcurrentTestRuns, + tlsCerts* + ) + } + } +} diff --git a/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/VariationsConformanceTestUtils.scala b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/VariationsConformanceTestUtils.scala new file mode 100644 index 0000000000..0db545ed75 --- /dev/null +++ b/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/variations/VariationsConformanceTestUtils.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.variations + +private[variations] object VariationsConformanceTestUtils { + // Use higher parallelism than the default of 4 since conformance tests in variations CI job + // run without neighbors (numPermits = 2) + val ConcurrentTestRuns: Int = 8 +} diff --git a/canton/community/drivers/reference/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory b/canton/community/drivers/reference/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory deleted file mode 100644 index 92a01d86ff..0000000000 --- a/canton/community/drivers/reference/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory +++ /dev/null @@ -1 +0,0 @@ -com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.CommunityReferenceSequencerDriverFactory diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/BaseReferenceSequencerDriverFactory.scala b/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/BaseReferenceSequencerDriverFactory.scala deleted file mode 100644 index 57ef746cef..0000000000 --- a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/BaseReferenceSequencerDriverFactory.scala +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.synchronizer.sequencing.sequencer.reference - -import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout, StorageConfig} -import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.synchronizer.block.BlockFormat.DefaultFirstBlockHeight -import com.digitalasset.canton.synchronizer.block.{SequencerDriver, SequencerDriverFactory} -import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.BaseReferenceSequencerDriverFactory.createClock -import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.store.ReferenceBlockOrderingStore -import com.digitalasset.canton.time.{Clock, TimeProvider, TimeProviderClock} -import com.digitalasset.canton.tracing.TraceContext -import org.apache.pekko.stream.Materializer -import pureconfig.{ConfigReader, ConfigWriter} - -import scala.concurrent.ExecutionContext - -abstract class BaseReferenceSequencerDriverFactory extends SequencerDriverFactory { - - override final type ConfigType = ReferenceSequencerDriver.Config[StorageConfig] - - protected def createStorage( - config: ConfigType, - clock: Clock, - processingTimeout: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - )(implicit - executionContext: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - metricsContext: MetricsContext, - ): Storage - - override final def version: Int = 1 - - override final def usesTimeProvider: Boolean = true - - override def configParser: ConfigReader[ConfigType] = { - import pureconfig.generic.semiauto.* - import com.digitalasset.canton.config.BaseCantonConfig.Readers.* - implicit val communityMemoryStorageConfigReader: ConfigReader[StorageConfig.Memory] = - deriveReader[StorageConfig.Memory] - implicit val communityH2StorageConfigReader: ConfigReader[DbConfig.H2] = - deriveReader[DbConfig.H2] - implicit val communityPostgresStorageConfigReader: ConfigReader[DbConfig.Postgres] = - deriveReader[DbConfig.Postgres] - implicit val communityStorageConfigReader: ConfigReader[StorageConfig] = - deriveReader[StorageConfig] - - deriveReader[ConfigType] - } - - override def configWriter(confidential: Boolean): ConfigWriter[ConfigType] = { - import pureconfig.generic.semiauto.* - import com.digitalasset.canton.config.BaseCantonConfig.Writers.* - - implicit val enterpriseMemoryStorageConfigWriter: ConfigWriter[StorageConfig.Memory] = - deriveWriter[StorageConfig.Memory] - implicit val enterpriseH2StorageConfigWriter: ConfigWriter[DbConfig.H2] = - deriveWriter[DbConfig.H2] - implicit val enterprisePostgresStorageConfigWriter: ConfigWriter[DbConfig.Postgres] = - deriveWriter[DbConfig.Postgres] - implicit val StorageConfigWriter: ConfigWriter[StorageConfig] = - deriveWriter[StorageConfig] - - deriveWriter[ConfigType] - } - - override def create( - config: ConfigType, - nonStandardConfig: Boolean, - timeProvider: TimeProvider, - firstBlockHeight: Option[Long], - synchronizerId: String, - sequencerId: String, - loggerFactory: NamedLoggerFactory, - )(implicit executionContext: ExecutionContext, materializer: Materializer): SequencerDriver = { - val processingTimeout = ProcessingTimeout() - val closeable = flagCloseable(processingTimeout, loggerFactory) - val storage = - createStorage( - config, - createClock(timeProvider, loggerFactory), - processingTimeout, - loggerFactory, - )( - executionContext, - TraceContext.empty, - new CloseContext(closeable), - MetricsContext.Empty, - ) - val store = - ReferenceBlockOrderingStore(storage, processingTimeout, loggerFactory) - new ReferenceSequencerDriver( - sequencerId, - store, - config, - timeProvider, - firstBlockHeight.getOrElse(DefaultFirstBlockHeight), - storage, - closeable, - loggerFactory, - processingTimeout, - ) - } - - private def flagCloseable( - processingTimeout: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - ): FlagCloseable = - FlagCloseable(loggerFactory.getTracedLogger(getClass), processingTimeout) -} - -private[reference] object BaseReferenceSequencerDriverFactory { - - final def createClock(timeProvider: TimeProvider, loggerFactory: NamedLoggerFactory) = - new TimeProviderClock(timeProvider, loggerFactory) -} diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/CommunityReferenceSequencerDriverFactory.scala b/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/CommunityReferenceSequencerDriverFactory.scala deleted file mode 100644 index 565065dcd4..0000000000 --- a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/CommunityReferenceSequencerDriverFactory.scala +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.synchronizer.sequencing.sequencer.reference - -import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout, StorageConfig} -import com.digitalasset.canton.lifecycle.CloseContext -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.{CommunityStorageSetup, Storage} -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.tracing.TraceContext -import monocle.macros.syntax.lens.* - -import scala.concurrent.ExecutionContext - -final class CommunityReferenceSequencerDriverFactory extends BaseReferenceSequencerDriverFactory { - - override def name: String = "community-reference" - - override protected def createStorage( - config: ReferenceSequencerDriver.Config[StorageConfig], - clock: Clock, - processingTimeout: ProcessingTimeout, - loggerFactory: NamedLoggerFactory, - )(implicit - executionContext: ExecutionContext, - traceContext: TraceContext, - closeContext: CloseContext, - metricsContext: MetricsContext, - ): Storage = - CommunityStorageSetup.tryCreateAndMigrateStorage( - config.storage, - config.logQueryCost, - clock, - processingTimeout, - loggerFactory, - setMigrationsPath, - ) - - def setMigrationsPath(config: StorageConfig): StorageConfig = - config match { - case h2: DbConfig.H2 => - h2.focus(_.parameters.migrationsPaths) - .replace(Seq("classpath:db/migration/canton/h2/dev/reference/")) - case pg: DbConfig.Postgres => - pg.focus(_.parameters.migrationsPaths) - .replace(Seq("classpath:db/migration/canton/postgres/dev/reference/")) - case x => x - } -} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommonTestAliases.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommonTestAliases.scala index fb1a083e97..e70c278344 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommonTestAliases.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommonTestAliases.scala @@ -53,6 +53,7 @@ trait CommonTestAliases { lazy val remoteSequencer1: RemoteSequencerReference = rs("sequencer1") lazy val remoteSequencer2: RemoteSequencerReference = rs("sequencer2") lazy val remoteSequencer3: RemoteSequencerReference = rs("sequencer3") + lazy val remoteSequencer4: RemoteSequencerReference = rs("sequencer4") lazy val remoteMediator1: RemoteMediatorReference = rm("mediator1") lazy val remoteMediator2: RemoteMediatorReference = rm("mediator2") diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/ConfigTransforms.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/ConfigTransforms.scala index 3b6e203f79..4c010374be 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/ConfigTransforms.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/ConfigTransforms.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.integration import cats.syntax.option.* -import com.digitalasset.canton.BaseTest.testedProtocolVersion import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port, PositiveInt} import com.digitalasset.canton.config.StartupMemoryCheckConfig.ReportingLevel @@ -15,6 +14,7 @@ import com.digitalasset.canton.config.{ *, } import com.digitalasset.canton.console.FeatureFlag +import com.digitalasset.canton.http.{HttpServerConfig, JsonApiConfig, WebsocketConfig} import com.digitalasset.canton.participant.config.{ ParticipantNodeConfig, RemoteParticipantConfig, @@ -39,6 +39,7 @@ import monocle.macros.syntax.lens.* import monocle.macros.{GenLens, GenPrism} import scala.concurrent.duration.* +import scala.jdk.DurationConverters.* import scala.util.Random /** Utilities for transforming instances of [[CantonConfig]]. A transform itself is merely a @@ -127,8 +128,7 @@ object ConfigTransforms { _.focus(_.monitoring.logging.api.warnBeyondLoad).replace(Some(10000)), // disable exit on fatal error in tests ConfigTransforms.setExitOnFatalFailures(false), - // TODO(i26481): adjust when the new connection pool is stable - ConfigTransforms.setConnectionPool(testedProtocolVersion >= ProtocolVersion.dev), + ConfigTransforms.setConnectionPool(true), ) lazy val dontWarnOnDeprecatedPV: Seq[ConfigTransform] = Seq( @@ -251,6 +251,8 @@ object ConfigTransforms { .replace(nextPort.some) .focus(_.adminApi.internalPort) .replace(nextPort.some) + .focus(_.httpLedgerApi.server.internalPort) + .replace(nextPort.some) .focus(_.monitoring.grpcHealthServer) .modify(_.map(_.copy(internalPort = nextPort.some))) ) @@ -665,6 +667,18 @@ object ConfigTransforms { .replace(config.NonNegativeFiniteDuration(maxDeduplicationDuration)) ) + def updateTargetTimestampForwardTolerance( + targetTimestampForwardTolerance: scala.concurrent.duration.FiniteDuration + ): ConfigTransform = updateTargetTimestampForwardTolerance(targetTimestampForwardTolerance.toJava) + + def updateTargetTimestampForwardTolerance( + targetTimestampForwardTolerance: java.time.Duration + ): ConfigTransform = + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.reassignmentsConfig.targetTimestampForwardTolerance) + .replace(config.NonNegativeFiniteDuration(targetTimestampForwardTolerance)) + ) + /** Flag the provided participants as being replicated. Keep in mind to actually work they need to * be configured to share the same database (see * [[com.digitalasset.canton.integration.plugins.UseSharedStorage]]). @@ -852,7 +866,7 @@ object ConfigTransforms { /** Use the new sequencer connection pool if 'value' is true. Otherwise use the former transports. */ - private def setConnectionPool(value: Boolean): ConfigTransform = + def setConnectionPool(value: Boolean): ConfigTransform = updateAllSequencerConfigs { case (_name, config) => config.focus(_.sequencerClient.useNewConnectionPool).replace(value) }.compose(updateAllMediatorConfigs { case (_name, config) => @@ -860,4 +874,27 @@ object ConfigTransforms { }).compose(updateAllParticipantConfigs { case (_name, config) => config.focus(_.sequencerClient.useNewConnectionPool).replace(value) }) + + /** Must be applied before the default config transformers */ + def enableHttpLedgerApi: ConfigTransform = updateAllParticipantConfigs_( + _.copy(httpLedgerApi = JsonApiConfig(server = HttpServerConfig())) + ) + + /** Must be applied before the default config transformers */ + def enableHttpLedgerApi( + participantName: String, + websocketConfig: Option[WebsocketConfig] = None, + pathPrefix: Option[String] = None, + ): ConfigTransform = + updateParticipantConfig(participantName)(config => + config.copy(httpLedgerApi = + JsonApiConfig( + server = HttpServerConfig( + pathPrefix = pathPrefix + ), + websocketConfig = websocketConfig, + ) + ) + ) + } diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentDefinition.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentDefinition.scala index a6be3ad0a0..6e5c35a252 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentDefinition.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentDefinition.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.config.{ ApiLoggingConfig, CantonConfig, CantonFeatures, - CryptoConfig, EnterpriseCantonEdition, LoggingConfig, MonitoringConfig, @@ -30,7 +29,6 @@ import com.digitalasset.canton.integration.bootstrap.{ } import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.config.ParticipantNodeConfig -import com.digitalasset.canton.synchronizer.config.SynchronizerParametersConfig import com.digitalasset.canton.synchronizer.mediator.MediatorNodeConfig import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeConfig import com.digitalasset.canton.tracing.TracingConfig @@ -100,6 +98,14 @@ final case class EnvironmentDefinition( def addConfigTransform(transform: ConfigTransform): EnvironmentDefinition = copy(configTransforms = this.configTransforms :+ transform) + /** Allow to run a config transformation before the default config transformers are run */ + def prependConfigTransform(transform: ConfigTransform): EnvironmentDefinition = + copy(configTransforms = transform +: this.configTransforms) + + /** Allow to run a config transformations before the default config transformers are run */ + def prependConfigTransforms(transforms: ConfigTransform*): EnvironmentDefinition = + copy(configTransforms = transforms ++ this.configTransforms) + def updateTestingConfig( update: TestingConfigInternal => TestingConfigInternal ): EnvironmentDefinition = @@ -153,10 +159,11 @@ final case class EnvironmentDefinition( */ object EnvironmentDefinition extends LazyLogging { - lazy val defaultStaticSynchronizerParameters: StaticSynchronizerParameters = - StaticSynchronizerParameters.fromConfig( - SynchronizerParametersConfig(), - CryptoConfig(), + def defaultStaticSynchronizerParameters(implicit + env: TestConsoleEnvironment[CantonConfig, CantonEnvironment] + ): StaticSynchronizerParameters = + StaticSynchronizerParameters.initialValues( + env.environment.clock, BaseTest.testedProtocolVersion, ) @@ -612,6 +619,14 @@ object EnvironmentDefinition extends LazyLogging { NetworkBootstrapper(S1M1_S1M1) } + /** - 1 participants '''not''' connected to any synchronizer + * - 3 synchronizers with 1 sequencer and 1 mediator each + */ + lazy val P1_S1M1_S1M1_S1M1: EnvironmentDefinition = + P1S3M3_Config.withNetworkBootstrap { implicit env => + NetworkBootstrapper(S1M1_S1M1_S1M1) + } + /** - 2 participants '''not''' connected to any synchronizer * - 3 synchronizers with 1 sequencer and 1 mediator each */ @@ -639,7 +654,7 @@ object EnvironmentDefinition extends LazyLogging { .getOrElse(alias.unwrap, defaultStaticSynchronizerParameters) new NetworkBootstrapper( - NetworkTopologyDescription( + NetworkTopologyDescription.createWithStaticSynchronizerParameters( daName, synchronizerOwners = Seq(sequencer1), synchronizerThreshold = PositiveInt.one, @@ -647,7 +662,7 @@ object EnvironmentDefinition extends LazyLogging { mediators = Seq(mediator1), staticSynchronizerParameters = paramsOrDefaults(daName), ), - NetworkTopologyDescription( + NetworkTopologyDescription.createWithStaticSynchronizerParameters( acmeName, synchronizerOwners = Seq(sequencer2), synchronizerThreshold = PositiveInt.one, @@ -655,7 +670,7 @@ object EnvironmentDefinition extends LazyLogging { mediators = Seq(mediator2), staticSynchronizerParameters = paramsOrDefaults(acmeName), ), - NetworkTopologyDescription( + NetworkTopologyDescription.createWithStaticSynchronizerParameters( repairSynchronizerName, synchronizerOwners = Seq(sequencer3), synchronizerThreshold = PositiveInt.one, @@ -791,7 +806,7 @@ object EnvironmentDefinition extends LazyLogging { /** - 5 participants '''not''' connected to the synchronizer * - 2 synchronizers with 1 sequencer and 1 mediator each */ - private lazy val P5_S1M1_S1M1_Config: EnvironmentDefinition = + lazy val P5_S1M1_S1M1_Config: EnvironmentDefinition = buildBaseEnvironmentDefinition( numParticipants = 5, numSequencers = 2, @@ -803,23 +818,7 @@ object EnvironmentDefinition extends LazyLogging { */ lazy val P5_S1M1_S1M1: EnvironmentDefinition = P5_S1M1_S1M1_Config.withNetworkBootstrap { implicit env => - import env.* - new NetworkBootstrapper( - NetworkTopologyDescription( - daName, - synchronizerOwners = Seq(sequencer1), - synchronizerThreshold = PositiveInt.one, - sequencers = Seq(sequencer1), - mediators = Seq(mediator1), - ), - NetworkTopologyDescription( - acmeName, - synchronizerOwners = Seq(sequencer2), - synchronizerThreshold = PositiveInt.one, - sequencers = Seq(sequencer2), - mediators = Seq(mediator2), - ), - ) + NetworkBootstrapper(S1M1_S1M1) } lazy val P5_S1M1_S1M1_Manual: EnvironmentDefinition = @@ -887,11 +886,16 @@ object EnvironmentDefinition extends LazyLogging { /** - 3 participants '''not''' connected to any synchronizer * - 2 synchronizers with 1 sequencer and 1 mediator each */ - lazy val P3_S1M1_S1M1: EnvironmentDefinition = buildBaseEnvironmentDefinition( + lazy val P3_S1M1_S1M1_Config: EnvironmentDefinition = buildBaseEnvironmentDefinition( numParticipants = 3, numSequencers = 2, numMediators = 2, ) + + /** - 3 participants '''not''' connected to any synchronizer + * - 2 synchronizers with 1 sequencer and 1 mediator each + */ + lazy val P3_S1M1_S1M1: EnvironmentDefinition = P3_S1M1_S1M1_Config .withNetworkBootstrap { implicit env => NetworkBootstrapper(S1M1_S1M1) } diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetup.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetup.scala index 0d692d4e07..294e64e4c6 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetup.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetup.scala @@ -20,11 +20,7 @@ import com.digitalasset.canton.config.{ } import com.digitalasset.canton.environment.{Environment, EnvironmentFactory} import com.digitalasset.canton.integration.EnvironmentSetup.EnvironmentSetupException -import com.digitalasset.canton.integration.plugins.{ - UseH2, - UsePostgres, - UseReferenceBlockSequencerBase, -} +import com.digitalasset.canton.integration.plugins.{UseH2, UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.logging.{LogEntry, NamedLogging, SuppressingLogger} import com.digitalasset.canton.metrics.{MetricsFactoryType, ScopedInMemoryMetricsFactory} import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, GrpcError} @@ -287,7 +283,7 @@ sealed trait EnvironmentSetup[C <: SharedCantonConfig[C], E <: Environment[C]] /* the block sequencer makes use of its own db so we don't want to create a new one here since that would * set a new state and lead to conflicts with the old db. */ - case _: UseH2 | _: UsePostgres | _: UseReferenceBlockSequencerBase[_] => + case _: UseH2 | _: UsePostgres | _: UseReferenceBlockSequencer[_] => false // to prevent creating a new fresh db, the db is only deleted when the old environment is destroyed. case plugin => runPlugins(plugin) }, diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala index 2f6e4f97d1..68762ddce4 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.W import com.digitalasset.canton.config import com.digitalasset.canton.config.{ConsoleCommandTimeout, CantonConfig} import com.digitalasset.canton.console.ParticipantReference +import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.examples.java.cycle as M import com.digitalasset.canton.examples.java.cycle.Cycle @@ -37,31 +38,33 @@ trait HasCycleUtils { } } - def p2acs(): Seq[LedgerApiTypeWrappers.WrappedCreatedEvent] = - participant2.ledger_api.state.acs - .of_party(partyId) - .filter(_.templateId.isModuleEntity("Cycle", "Cycle")) - .map(entry => WrappedCreatedEvent(entry.event)) - - p2acs() shouldBe empty + participantAcs(participant2, partyId) shouldBe empty clue("creating cycle " + commandId) { - createCycleContract(participant1, partyId, "I SHALL CREATE", commandId) - } - val coid = participant2.ledger_api.javaapi.state.acs.await(M.Cycle.COMPANION)(partyId) - val cycleEx = coid.id.exerciseArchive().commands.loneElement - clue("submitting response") { - participant2.ledger_api.javaapi.commands.submit( - Seq(partyId), - Seq(cycleEx), - commandId = (if (commandId.isEmpty) "" else s"$commandId-response"), + createCycleContract( + participant1, + partyId, + "I SHALL CREATE", + commandId, ) } + + awaitAndExerciseCycleContract(participant2, partyId, commandId).discard + eventually() { - p2acs() shouldBe empty + participantAcs(participant2, partyId) shouldBe empty } } + private def participantAcs( + participant: ParticipantReference, + partyId: PartyId, + ): Seq[LedgerApiTypeWrappers.WrappedCreatedEvent] = + participant.ledger_api.state.acs + .of_party(partyId) + .filter(_.templateId.isModuleEntity("Cycle", "Cycle")) + .map(entry => WrappedCreatedEvent(entry.event)) + def createCycleCommand(party: Party, id: String): Command = Command.fromJavaProto( new Cycle(id, party.toProtoPrimitive) @@ -71,6 +74,21 @@ trait HasCycleUtils { .toProtoCommand ) + def cleanupCycles( + partyId: PartyId, + participant: ParticipantReference, + commandId: String = "", + ): Unit = { + val coids = participant.ledger_api.javaapi.state.acs.filter(M.Cycle.COMPANION)(partyId) + clue("submitting responses for cleanup") { + archiveCycleContracts(participant, partyId, coids, commandId) + } + + eventually() { + participantAcs(participant, partyId) shouldBe empty + } + } + def createCycleContract( participant: ParticipantReference, party: Party, @@ -88,6 +106,20 @@ trait HasCycleUtils { JavaDecodeUtil.decodeAllCreated(Cycle.COMPANION)(tx).loneElement } + def awaitAndExerciseCycleContract( + participant: ParticipantReference, + partyId: PartyId, + commandId: String = "", + ): Unit = { + val coid = participant.ledger_api.javaapi.state.acs.await(M.Cycle.COMPANION)(partyId) + archiveCycleContract( + participant, + partyId, + coid, + commandId, + ) + } + def createCycleContracts( participant: ParticipantReference, partyId: PartyId, @@ -104,4 +136,32 @@ trait HasCycleUtils { participant.ledger_api.javaapi.commands .submit(Seq(partyId), cycles, commandId = commandId, optTimeout = optTimeout) } + + def archiveCycleContract( + participant: ParticipantReference, + partyId: PartyId, + coid: Cycle.Contract, + commandId: String = "", + ): Unit = { + val cycleEx = coid.id.exerciseArchive().commands.loneElement + participant.ledger_api.javaapi.commands.submit( + Seq(partyId), + Seq(cycleEx), + commandId = (if (commandId.isEmpty) "" else s"$commandId-response"), + ) + } + + def archiveCycleContracts( + participant: ParticipantReference, + partyId: PartyId, + coids: Seq[Cycle.Contract], + commandId: String = "", + ): Unit = { + val cycleExs = coids.map(_.id.exerciseArchive().commands.loneElement) + participant.ledger_api.javaapi.commands.submit( + Seq(partyId), + cycleExs, + commandId = (if (commandId.isEmpty) "" else s"$commandId-responses"), + ) + } } diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/TestEnvironment.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/TestEnvironment.scala index acae19a076..0d3898e434 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/TestEnvironment.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/TestEnvironment.scala @@ -22,7 +22,6 @@ import com.digitalasset.canton.console.{ LocalInstanceReference, } import com.digitalasset.canton.crypto.Crypto -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.integration.bootstrap.InitializedSynchronizer import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -61,7 +60,6 @@ trait TestEnvironment[C <: SharedCantonConfig[C]] CachingConfigs.defaultPublicKeyConversionCache, storage, CryptoPrivateStoreFactory.withoutKms(environment.clock, executionContext), - CommunityKmsFactory, testedReleaseProtocolVersion, FutureSupervisor.Noop, environment.clock, diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/bootstrap/NetworkBootstrapper.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/bootstrap/NetworkBootstrapper.scala index d45615f2b7..aaef2f606f 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/bootstrap/NetworkBootstrapper.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/bootstrap/NetworkBootstrapper.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.integration.bootstrap import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters +import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.config.CantonConfig import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.console.{ @@ -17,6 +18,7 @@ import com.digitalasset.canton.integration.{EnvironmentDefinition, TestConsoleEn import com.digitalasset.canton.sequencing.SubmissionRequestAmplification import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.{SynchronizerAlias, protocol} +import monocle.syntax.all.* /** Bootstraps synchronizers given topology descriptions and stores information in * [[com.digitalasset.canton.integration.EnvironmentTestHelpers.initializedSynchronizers]]. @@ -89,23 +91,31 @@ final case class NetworkTopologyDescription( Map[MediatorReference, (Seq[SequencerReference], PositiveInt, NonNegativeInt)] ], mediatorThreshold: PositiveInt, -) +) { + def withTopologyChangeDelay( + topologyChangeDelay: NonNegativeFiniteDuration + ): NetworkTopologyDescription = + this + .focus(_.staticSynchronizerParameters.topologyChangeDelay) + .replace(topologyChangeDelay) +} object NetworkTopologyDescription { + def apply( synchronizerAlias: SynchronizerAlias, synchronizerOwners: Seq[InstanceReference], synchronizerThreshold: PositiveInt, sequencers: Seq[SequencerReference], mediators: Seq[MediatorReference], - staticSynchronizerParameters: StaticSynchronizerParameters = - EnvironmentDefinition.defaultStaticSynchronizerParameters, mediatorRequestAmplification: SubmissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, overrideMediatorToSequencers: Option[ Map[MediatorReference, (Seq[SequencerReference], PositiveInt, NonNegativeInt)] ] = None, mediatorThreshold: PositiveInt = PositiveInt.one, + )(implicit + env: TestConsoleEnvironment[CantonConfig, CantonEnvironment] ): NetworkTopologyDescription = NetworkTopologyDescription( synchronizerName = synchronizerAlias.unwrap, @@ -113,11 +123,32 @@ object NetworkTopologyDescription { synchronizerThreshold, sequencers, mediators, - staticSynchronizerParameters, + EnvironmentDefinition.defaultStaticSynchronizerParameters, mediatorRequestAmplification, overrideMediatorToSequencers, mediatorThreshold, ) + + def createWithStaticSynchronizerParameters( + synchronizerAlias: SynchronizerAlias, + synchronizerOwners: Seq[InstanceReference], + synchronizerThreshold: PositiveInt, + sequencers: Seq[SequencerReference], + mediators: Seq[MediatorReference], + staticSynchronizerParameters: StaticSynchronizerParameters, + ): NetworkTopologyDescription = + NetworkTopologyDescription( + synchronizerName = synchronizerAlias.unwrap, + synchronizerOwners, + synchronizerThreshold, + sequencers, + mediators, + staticSynchronizerParameters, + SubmissionRequestAmplification.NoAmplification, + None, + PositiveInt.one, + ) + } /** A data container to hold useful information for initialized synchronizers diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseAwsKms.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseAwsKms.scala new file mode 100644 index 0000000000..56bc9e6c54 --- /dev/null +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseAwsKms.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.plugins + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.logging.NamedLoggerFactory + +/** Integration test plugin for setting up AWS KMS clients for nodes and to optionally enabled + * encrypted crypto private stores using the KMS. + * + * @param keyId + * defines whether we use a pre-defined key (identified by its key identifier) or generate a new + * key if left as None. + * @param multiRegion + * whether it will be a single or multi-region key. + * @param nodes + * specifies (i.e. by its InstanceName) for which nodes we will configure a KMS (for now using + * AWS KMS). If nodes = UseKms.AllNodesSelected all environment nodes are selected. + * @param enableEncryptedPrivateStore + * enable encrypted private stores + * @return + */ +class UseAwsKms( + protected val keyId: Option[KmsKeyId] = Some(UseAwsKms.DefaultCantonTestKeyId), + protected val multiRegion: Boolean = false, + protected val nodes: Set[String], + protected val nodesWithSessionSigningKeysDisabled: Set[String] = Set.empty, + protected val enableEncryptedPrivateStore: EncryptedPrivateStoreStatus = + EncryptedPrivateStoreStatus.Enable, + protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +) extends UseKms { + protected val kmsConfig: KmsConfig.Aws = + KmsConfig.Aws.defaultTestConfig.copy(multiRegionKey = multiRegion) +} + +object UseAwsKms { + lazy val DefaultCantonTestKeyId: KmsKeyId = KmsKeyId( + String300.tryCreate("alias/canton-kms-test-key") + ) + lazy val DefaultCantonRotationTestKeyId: KmsKeyId = KmsKeyId( + String300.tryCreate("alias/canton-kms-rotation-test-key") + ) +} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseBftSequencer.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseBftSequencer.scala index 473f47789e..1d5d8ba092 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseBftSequencer.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseBftSequencer.scala @@ -3,13 +3,14 @@ package com.digitalasset.canton.integration.plugins +import com.digitalasset.canton import com.digitalasset.canton.UniquePortGenerator import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.StorageConfig.Memory -import com.digitalasset.canton.config.{CantonConfig, TlsClientConfig} +import com.digitalasset.canton.config.{CantonConfig, QueryCostMonitoringConfig, TlsClientConfig} import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.integration.EnvironmentSetupPlugin -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.{ +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.{ MultiSynchronizer, SequencerSynchronizerGroups, SingleSynchronizer, @@ -49,7 +50,7 @@ final class UseBftSequencer( shouldGenerateEndpointsOnly: Boolean = false, shouldOverwriteStoredEndpoints: Boolean = false, shouldUseMemoryStorageForBftOrderer: Boolean = false, - shouldDisableCircuitBreaker: Boolean = false, + shouldBenchmarkBftSequencer: Boolean = false, ) extends EnvironmentSetupPlugin[CantonConfig, CantonEnvironment] { val sequencerEndpoints @@ -155,7 +156,7 @@ final class UseBftSequencer( overwriteStoredEndpoints = shouldOverwriteStoredEndpoints, ) val blockSequencerConfig = - if (shouldDisableCircuitBreaker) + if (shouldBenchmarkBftSequencer) BlockSequencerConfig( circuitBreaker = BlockSequencerConfig.CircuitBreakerConfig(enabled = false), streamInstrumentation = BlockSequencerStreamInstrumentationConfig(isEnabled = true), @@ -183,6 +184,15 @@ final class UseBftSequencer( sequencerEndpoints.putIfAbsent(sequencersToEndpoints.toMap) config + .focus(_.monitoring.logging.queryCost) + .modify { _ => + if (shouldBenchmarkBftSequencer) + Some( + QueryCostMonitoringConfig(every = canton.config.NonNegativeFiniteDuration.ofSeconds(30)) + ) + else + None + } .focus(_.sequencers) .modify(_.map(mapSequencerConfigs)) } diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityKms.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityKms.scala deleted file mode 100644 index 590f2e8dce..0000000000 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityKms.scala +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.plugins - -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.crypto.kms.{CommunityKmsFactory, Kms, KmsError} -import com.digitalasset.canton.time.WallClock -import com.digitalasset.canton.tracing.NoReportingTracerProvider - -import scala.concurrent.ExecutionContext - -trait UseCommunityKms extends UseKms { - override protected def createKms()(implicit ec: ExecutionContext): Either[KmsError, Kms] = - CommunityKmsFactory - .create( - kmsConfig, - timeouts, - FutureSupervisor.Noop, - NoReportingTracerProvider, - new WallClock(timeouts, loggerFactory), - loggerFactory, - ec, - ) -} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityReferenceBlockSequencer.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityReferenceBlockSequencer.scala deleted file mode 100644 index b59ee828df..0000000000 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseCommunityReferenceBlockSequencer.scala +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.plugins - -import com.digitalasset.canton.config.StorageConfig -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.{ - SequencerSynchronizerGroups, - SingleSynchronizer, -} -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.CommunityReferenceSequencerDriverFactory - -import scala.reflect.ClassTag - -class UseCommunityReferenceBlockSequencer[S <: StorageConfig]( - override protected val loggerFactory: NamedLoggerFactory, - sequencerGroups: SequencerSynchronizerGroups = SingleSynchronizer, - postgres: Option[UsePostgres] = None, -)(implicit _c: ClassTag[S]) - extends UseReferenceBlockSequencerBase[ - S - ](loggerFactory, "reference", "community-reference", sequencerGroups, postgres) { - - override protected val driverFactory = new CommunityReferenceSequencerDriverFactory - -} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseGcpKms.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseGcpKms.scala new file mode 100644 index 0000000000..3bfbe76a13 --- /dev/null +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseGcpKms.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.plugins + +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{KmsConfig, ProcessingTimeout} +import com.digitalasset.canton.crypto.kms.KmsKeyId +import com.digitalasset.canton.logging.NamedLoggerFactory + +/** Integration test plugin for setting up GCP KMS clients for nodes and to optionally enabled + * encrypted crypto private stores using the KMS. For more info please check [[UseAwsKms]]. + */ +class UseGcpKms( + protected val keyId: Option[KmsKeyId] = Some(UseGcpKms.DefaultCantonTestKeyId), + protected val nodes: Set[String], + protected val nodesWithSessionSigningKeysDisabled: Set[String] = Set.empty, + protected val enableEncryptedPrivateStore: EncryptedPrivateStoreStatus = + EncryptedPrivateStoreStatus.Enable, + protected val kmsConfig: KmsConfig.Gcp = KmsConfig.Gcp.defaultTestConfig, + protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +) extends UseKms + +object UseGcpKms { + lazy val DefaultCantonTestKeyId: KmsKeyId = KmsKeyId( + String300.tryCreate("canton-kms-test-key") + ) + lazy val DefaultCantonRotationTestKeyId: KmsKeyId = KmsKeyId( + String300.tryCreate("canton-kms-rotation-test-key") + ) +} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKms.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKms.scala index a1ebf3c2c7..8b3bbc2e57 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKms.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKms.scala @@ -9,11 +9,12 @@ import cats.syntax.parallel.* import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, ExecutorServiceExtensions, + FutureSupervisor, Threading, } import com.digitalasset.canton.config.* import com.digitalasset.canton.config.DefaultProcessingTimeouts.shutdownProcessing -import com.digitalasset.canton.crypto.kms.{Kms, KmsError, KmsKeyId} +import com.digitalasset.canton.crypto.kms.{Kms, KmsError, KmsFactory, KmsKeyId} import com.digitalasset.canton.crypto.store.{CryptoPrivateStore, EncryptedCryptoPrivateStore} import com.digitalasset.canton.environment.CantonEnvironment import com.digitalasset.canton.integration.{ @@ -23,7 +24,8 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.time.WallClock +import com.digitalasset.canton.tracing.{NoReportingTracerProvider, NoTracing} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ResourceUtil import monocle.macros.syntax.lens.* @@ -44,8 +46,6 @@ abstract class UseKms protected val timeouts: ProcessingTimeout protected val loggerFactory: NamedLoggerFactory - protected def createKms()(implicit ec: ExecutionContext): Either[KmsError, Kms] - // ensure that all nodes with session signing keys `disabled` are part of the full protected node set require( nodesWithSessionSigningKeysDisabled.subsetOf(nodes), @@ -53,11 +53,23 @@ abstract class UseKms s"${nodesWithSessionSigningKeysDisabled.diff(nodes).mkString(", ")}", ) + private val clock = new WallClock(timeouts, loggerFactory) + protected def withKmsClient[V]( f: Kms => EitherT[Future, KmsError, V] )(implicit ec: ExecutionContext): EitherT[Future, KmsError, V] = for { - kmsClient <- createKms().toEitherT[Future] + kmsClient <- KmsFactory + .create( + kmsConfig, + timeouts, + FutureSupervisor.Noop, + NoReportingTracerProvider, + clock, + loggerFactory, + ec, + ) + .toEitherT[Future] res <- ResourceUtil.withResourceM(kmsClient)(f) } yield res @@ -187,15 +199,40 @@ abstract class UseKms override def close(): Unit = LifeCycle.close( + clock, ExecutorServiceExtensions(kmsKeyDeletionExecutionContext)( logger, DefaultProcessingTimeouts.testing, - ) + ), )(logger) override def afterTests(): Unit = close() } +object UseKms { + def withKmsClient[V]( + kmsConfig: KmsConfig, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )( + f: Kms => EitherT[Future, KmsError, V] + )(implicit ec: ExecutionContext): EitherT[Future, KmsError, V] = + for { + kmsClient <- KmsFactory + .create( + kmsConfig, + timeouts, + FutureSupervisor.Noop, + NoReportingTracerProvider, + new WallClock(timeouts, loggerFactory), + loggerFactory, + ec, + ) + .toEitherT[Future] + res <- ResourceUtil.withResourceM(kmsClient)(f) + } yield res +} + sealed trait EncryptedPrivateStoreStatus object EncryptedPrivateStoreStatus { final case object Enable extends EncryptedPrivateStoreStatus diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKmsDriver.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKmsDriver.scala index 802ffc3987..0ecca285b6 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKmsDriver.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseKmsDriver.scala @@ -37,7 +37,7 @@ class UseKmsDriver( val driverConfig: ConfigValue = ConfigValueFactory.fromMap(Map.empty[String, AnyRef].asJava), protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, -) extends UseCommunityKms { +) extends UseKms { protected val kmsConfig: KmsConfig.Driver = KmsConfig.Driver( driverName, driverConfig, diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseLedgerApiTestTool.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseLedgerApiTestTool.scala index 9bec9efbbd..41a59af10d 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseLedgerApiTestTool.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseLedgerApiTestTool.scala @@ -98,7 +98,7 @@ class UseLedgerApiTestTool( version match { case LAPITTVersion.LocalJar => // Requires running `sbt ledger-test-tool-/assembly` first. - s"file://${System.getProperty("user.dir")}/enterprise/ledger-test-tool/tool/lf-v${lfVersion.testToolSuffix.tail}/target/scala-2.13/$testToolName-$testToolVersion.jar" + s"file://${System.getProperty("user.dir")}/community/ledger-test-tool/tool/lf-v${lfVersion.testToolSuffix.tail}/target/scala-2.13/$testToolName-$testToolVersion.jar" case _ => val relativeUrl = s"com/digitalasset/canton/ledger-api-test-tool_2.13/$testToolVersion/${testToolName}_2.13-$testToolVersion.jar" diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencer.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencer.scala new file mode 100644 index 0000000000..14ea315d47 --- /dev/null +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencer.scala @@ -0,0 +1,300 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.plugins + +import cats.syntax.parallel.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.* +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.environment.CantonEnvironment +import com.digitalasset.canton.integration.ConfigTransforms.generateUniqueH2DatabaseName +import com.digitalasset.canton.integration.EnvironmentSetupPlugin +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.{ + MultiSynchronizer, + SequencerSynchronizerGroups, + SingleSynchronizer, +} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} +import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig +import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeConfig +import com.digitalasset.canton.synchronizer.sequencer.{BlockSequencerConfig, SequencerConfig} +import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.{ + ReferenceSequencerDriver, + ReferenceSequencerDriverFactory, +} +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.FutureInstances.parallelFuture +import com.digitalasset.canton.{TempDirectory, TempFile} +import com.typesafe.config.{Config, ConfigFactory} +import monocle.macros.syntax.lens.* +import pureconfig.ConfigCursor + +import scala.concurrent.{Await, ExecutionContext, Future} +import scala.reflect.ClassTag + +/** @param sequencerGroups + * If sequencerGroups is defined, all the sequencers of the same set will share the same storage + * (which means they are part of the same synchronizer). All sequencers in the config that are + * not in a defined group will be placed in the same default group. If it is not defined, all + * sequencers will share the same storage by default and one synchronizer only is assumed. If + * in-memory storage is defined, sequencers sharing storage is not supported (each one is a + * different synchronizer). + */ +class UseReferenceBlockSequencer[StorageConfigT <: StorageConfig]( + override protected val loggerFactory: NamedLoggerFactory, + sequencerGroups: SequencerSynchronizerGroups = SingleSynchronizer, + postgres: Option[UsePostgres] = None, +)(implicit c: ClassTag[StorageConfigT]) + extends EnvironmentSetupPlugin[CantonConfig, CantonEnvironment] { + + private val driverSingleWordName: String = "reference" + private val driverDescription: String = "Reference Block Sequencer" + + private implicit val pluginExecutionContext: ExecutionContext = + Threading.newExecutionContext( + loggerFactory.threadName + s"-$driverSingleWordName-sequencer-plugin-execution-context", + noTracingLogger, + ) + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + private[canton] var pgPlugin: Option[UsePostgres] = postgres + + protected val driverFactory: ReferenceSequencerDriverFactory = new ReferenceSequencerDriverFactory + + private final def dbNameForGroup(group: Int): String = s"${driverSingleWordName}_db_$group" + + protected val dbNames: NonEmpty[List[String]] = sequencerGroups match { + case SingleSynchronizer => NonEmpty(List, dbNameForGroup(0)) + case MultiSynchronizer(_) => + NonEmpty( + List, + dbNameForGroup(0), // db 0 is the default one + (1 to sequencerGroups.numberOfSynchronizers).map(i => dbNameForGroup(i)).toList* + ) + } + + private def driverConfigs( + config: CantonConfig, + storageConfigs: Map[InstanceName, StorageConfigT], + ): Map[InstanceName, SequencerConfig] = { + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext.forClass(loggerFactory, getClass) + config.sequencers.map { case (sequencerName, originalConfig) => + val driverConfigCursor = ConfigCursor( + driverFactory + .configWriter(confidential = false) + .to( + ReferenceSequencerDriver + .Config( + storageConfigs.getOrElse( + sequencerName, + ErrorUtil.invalidState(s"Missing storage config for $sequencerName"), + ) + ) + ), + List(), + ) + + sequencerName -> + (originalConfig.sequencer match { + case external: SequencerConfig.External => + // Here we preserve any settings coming from the original config, i.e. from ConfigTransforms + external.copy( + sequencerType = driverFactory.name, + config = driverConfigCursor, + ) + + case _ => + SequencerConfig.External( + driverFactory.name, + BlockSequencerConfig(), + driverConfigCursor, + ) + }) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + override def beforeEnvironmentCreated(config: CantonConfig): CantonConfig = { + c.runtimeClass match { + case cl if cl == classOf[DbConfig.Postgres] => + pgPlugin = + if (pgPlugin.nonEmpty) pgPlugin + else { + val postgresPlugin = new UsePostgres(loggerFactory) + postgresPlugin.beforeTests() + Some(postgresPlugin) + } + val postgresPlugin = + pgPlugin.getOrElse( + throw new IllegalStateException("Impossible code path: pgPlugin is set above") + ) + Await.result( + dbNames.forgetNE.parTraverse_(db => postgresPlugin.recreateDatabaseForNodeWithName(db)), + config.parameters.timeouts.processing.io.duration, + ) + case _ => + } + + // in H2 we need to make db name unique, but it has to be matching for all sequencers, so we cache it + lazy val dbNamesH2: Map[String, String] = dbNames.forgetNE.map { dbName => + dbName -> generateUniqueH2DatabaseName(dbName) + }.toMap + + def dbToStorageConfig( + dbName: String, + dbParametersConfig: DbParametersConfig, + baseDbConfig: Config, + ): StorageConfigT = + c.runtimeClass match { + case cl if cl == classOf[DbConfig.H2] => + val h2DbName = dbNamesH2.getOrElse( + dbName, + throw new IllegalStateException( + s"Impossible code path: dbName $dbName not found in $dbNamesH2" + ), + ) + DbConfig + .H2( + DbBasicConfig("user", "pass", h2DbName, "", 0).toH2DbConfig + .copy(parameters = dbParametersConfig) + .config + ) + .asInstanceOf[StorageConfigT] + case cl if cl == classOf[DbConfig.Postgres] => + val postgresPlugin = + pgPlugin.getOrElse( + throw new IllegalStateException("Impossible code path: pgPlugin is set above") + ) + postgresPlugin + .generateDbConfig(dbName, dbParametersConfig, baseDbConfig) + .asInstanceOf[StorageConfigT] + case cl if cl == classOf[StorageConfig.Memory] => + StorageConfig.Memory(parameters = dbParametersConfig).asInstanceOf[StorageConfigT] + case other => + // E.g. Nothing; we need to check and fail b/c the Scala compiler doesn't enforce + // passing the ClassTag-reified type parameter, if it's only used for a ClassTag implicit + sys.error( + s"The $driverDescription sequencer driver doesn't recognize storage type $other" + ) + } + + val storageConfigMap: Map[InstanceName, StorageConfigT] = sequencerGroups match { + case SingleSynchronizer => + config.sequencers.map { case (name, sequencerConfig) => + val dbParameters = sequencerConfig.storage.parameters + (name, dbToStorageConfig(dbNames.head1, dbParameters, sequencerConfig.storage.config)) + } + case MultiSynchronizer(groups) => + groups.zipWithIndex.flatMap { case (sequencers, i) => + val dbName = dbNameForGroup(i + 1) + sequencers.map { name => + val (dbParameters, baseDbConfig) = config.sequencers + .get(name) + .map(s => (s.storage.parameters, s.storage.config)) + .getOrElse((DbParametersConfig(), ConfigFactory.empty())) + + (name, dbToStorageConfig(dbName, dbParameters, baseDbConfig)) + } + }.toMap + } + + val sequencersToConfig: Map[InstanceName, SequencerConfig] = + driverConfigs(config, storageConfigMap) + + def mapSequencerConfigs( + kv: (InstanceName, SequencerNodeConfig) + ): (InstanceName, SequencerNodeConfig) = kv match { + case (name, cfg) => + ( + name, + cfg.focus(_.sequencer).replace(sequencersToConfig(name)), + ) + } + + config + .focus(_.sequencers) + .modify(_.map(mapSequencerConfigs)) + } + + override def afterTests(): Unit = pgPlugin.foreach(_.afterTests()) + + override def afterEnvironmentDestroyed(config: CantonConfig): Unit = + pgPlugin.foreach { postgresPlugin => + Await.result( + postgresPlugin.dropDatabases(dbNames), + config.parameters.timeouts.processing.io.duration, + ) + } + + def recreateDatabases(): Future[Unit] = + pgPlugin match { + case Some(postgresPlugin) => + dbNames.forgetNE.parTraverse_(db => + postgresPlugin + .recreateDatabaseForNodeWithName(db) + .failOnShutdownToAbortException("recreateDatabases") + ) + case _ => + logger.underlying.warn( + s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" + ) + Future.unit + } + + def dumpDatabases(tempDirectory: TempDirectory, forceLocal: Boolean = false): Future[Unit] = + pgPlugin match { + case Some(postgresPlugin) => + val pgDumpRestore = PostgresDumpRestore(postgresPlugin, forceLocal) + dbNames.forgetNE.parTraverse_(db => + pgDumpRestore.saveDump(db, dumpTempFile(tempDirectory, db)) + ) + case _ => + logger.underlying.warn( + s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" + ) + Future.unit + } + + def restoreDatabases(tempDirectory: TempDirectory, forceLocal: Boolean = false): Future[Unit] = + pgPlugin match { + case Some(postgresPlugin) => + val pgDumpRestore = PostgresDumpRestore(postgresPlugin, forceLocal) + dbNames.forgetNE.parTraverse_(db => + pgDumpRestore.restoreDump(db, dumpTempFile(tempDirectory, db).path) + ) + case _ => + logger.underlying.warn( + s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" + ) + Future.unit + } + + private def dumpTempFile(tempDirectory: TempDirectory, dbName: String): TempFile = + tempDirectory.toTempFile(s"pg-dump-$dbName.tar") + +} + +object UseReferenceBlockSequencer { + + sealed trait SequencerSynchronizerGroups { + def numberOfSynchronizers: Int + } + + case object SingleSynchronizer extends SequencerSynchronizerGroups { + override val numberOfSynchronizers: Int = 1 + } + + final case class MultiSynchronizer(sequencerGroups: Seq[Set[InstanceName]]) + extends SequencerSynchronizerGroups { + override val numberOfSynchronizers: Int = sequencerGroups.size + } + + object MultiSynchronizer { + def tryCreate(sequencerGroups: Set[String]*): MultiSynchronizer = + MultiSynchronizer(sequencerGroups.map(_.map(InstanceName.tryCreate))) + } +} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencerBase.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencerBase.scala deleted file mode 100644 index c32c66566d..0000000000 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/plugins/UseReferenceBlockSequencerBase.scala +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.integration.plugins - -import cats.syntax.parallel.* -import com.daml.nameof.NameOf.functionFullName -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.config.* -import com.digitalasset.canton.config.CantonRequireTypes.InstanceName -import com.digitalasset.canton.environment.CantonEnvironment -import com.digitalasset.canton.integration.ConfigTransforms.generateUniqueH2DatabaseName -import com.digitalasset.canton.integration.EnvironmentSetupPlugin -import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencerBase.{ - MultiSynchronizer, - SequencerSynchronizerGroups, - SingleSynchronizer, -} -import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} -import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig -import com.digitalasset.canton.synchronizer.sequencer.BlockSequencerConfig.CircuitBreakerConfig -import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeConfig -import com.digitalasset.canton.synchronizer.sequencer.{ - BlockSequencerConfig, - BlockSequencerStreamInstrumentationConfig, - SequencerConfig, -} -import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.{ - BaseReferenceSequencerDriverFactory, - ReferenceSequencerDriver, -} -import com.digitalasset.canton.util.ErrorUtil -import com.digitalasset.canton.util.FutureInstances.parallelFuture -import com.digitalasset.canton.{TempDirectory, TempFile} -import com.typesafe.config.{Config, ConfigFactory} -import monocle.macros.syntax.lens.* -import pureconfig.ConfigCursor - -import scala.concurrent.{Await, ExecutionContext, Future} -import scala.reflect.ClassTag - -/** @param sequencerGroups - * If sequencerGroups is defined, all the sequencers of the same set will share the same storage - * (which means they are part of the same synchronizer). All sequencers in the config that are - * not in a defined group will be placed in the same default group. If it is not defined, all - * sequencers will share the same storage by default and one synchronizer only is assumed. If - * in-memory storage is defined, sequencers sharing storage is not supported (each one is a - * different synchronizer). - */ -abstract class UseReferenceBlockSequencerBase[ - StorageConfigT <: StorageConfig -]( - override protected val loggerFactory: NamedLoggerFactory, - driverSingleWordName: String, - driverDescription: String, - sequencerGroups: SequencerSynchronizerGroups = SingleSynchronizer, - postgres: Option[UsePostgres] = None, -)(implicit c: ClassTag[StorageConfigT]) - extends EnvironmentSetupPlugin[CantonConfig, CantonEnvironment] { - - private implicit val pluginExecutionContext: ExecutionContext = - Threading.newExecutionContext( - loggerFactory.threadName + s"-$driverSingleWordName-sequencer-plugin-execution-context", - noTracingLogger, - ) - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - private[canton] var pgPlugin: Option[UsePostgres] = postgres - - protected val driverFactory: BaseReferenceSequencerDriverFactory - - private final def dbNameForGroup(group: Int): String = s"${driverSingleWordName}_db_$group" - - protected val dbNames: NonEmpty[List[String]] = sequencerGroups match { - case SingleSynchronizer => NonEmpty(List, dbNameForGroup(0)) - case MultiSynchronizer(_) => - NonEmpty( - List, - dbNameForGroup(0), // db 0 is the default one - (1 to sequencerGroups.numberOfSynchronizers).map(i => dbNameForGroup(i)).toList* - ) - } - - private def driverConfigs( - config: CantonConfig, - storageConfigs: Map[InstanceName, StorageConfigT], - ): Map[InstanceName, SequencerConfig] = { - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext.forClass(loggerFactory, getClass) - config.sequencers.keys.map { sequencerName => - sequencerName -> SequencerConfig.External( - driverFactory.name, - BlockSequencerConfig( - circuitBreaker = config.sequencers(sequencerName).sequencer match { - case external: SequencerConfig.External => external.block.circuitBreaker - case _ => CircuitBreakerConfig() - }, - streamInstrumentation = config.sequencers(sequencerName).sequencer match { - case external: SequencerConfig.External => external.block.streamInstrumentation - case _ => BlockSequencerStreamInstrumentationConfig() - }, - ), - ConfigCursor( - driverFactory - .configWriter(confidential = false) - .to( - ReferenceSequencerDriver - .Config( - storageConfigs.getOrElse( - sequencerName, - ErrorUtil.invalidState(s"Missing storage config for $sequencerName"), - ) - ) - ), - List(), - ), - ) - }.toMap - } - - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) - override def beforeEnvironmentCreated(config: CantonConfig): CantonConfig = { - c.runtimeClass match { - case cl if cl == classOf[DbConfig.Postgres] => - pgPlugin = - if (pgPlugin.nonEmpty) pgPlugin - else { - val postgresPlugin = new UsePostgres(loggerFactory) - postgresPlugin.beforeTests() - Some(postgresPlugin) - } - val postgresPlugin = - pgPlugin.getOrElse( - throw new IllegalStateException("Impossible code path: pgPlugin is set above") - ) - Await.result( - dbNames.forgetNE.parTraverse_(db => postgresPlugin.recreateDatabaseForNodeWithName(db)), - config.parameters.timeouts.processing.io.duration, - ) - case _ => - } - - // in H2 we need to make db name unique, but it has to be matching for all sequencers, so we cache it - lazy val dbNamesH2: Map[String, String] = dbNames.forgetNE.map { dbName => - dbName -> generateUniqueH2DatabaseName(dbName) - }.toMap - - def dbToStorageConfig( - dbName: String, - dbParametersConfig: DbParametersConfig, - baseDbConfig: Config, - ): StorageConfigT = - c.runtimeClass match { - case cl if cl == classOf[DbConfig.H2] => - val h2DbName = dbNamesH2.getOrElse( - dbName, - throw new IllegalStateException( - s"Impossible code path: dbName $dbName not found in $dbNamesH2" - ), - ) - DbConfig - .H2( - DbBasicConfig("user", "pass", h2DbName, "", 0).toH2DbConfig - .copy(parameters = dbParametersConfig) - .config - ) - .asInstanceOf[StorageConfigT] - case cl if cl == classOf[DbConfig.Postgres] => - val postgresPlugin = - pgPlugin.getOrElse( - throw new IllegalStateException("Impossible code path: pgPlugin is set above") - ) - postgresPlugin - .generateDbConfig(dbName, dbParametersConfig, baseDbConfig) - .asInstanceOf[StorageConfigT] - case cl if cl == classOf[StorageConfig.Memory] => - StorageConfig.Memory(parameters = dbParametersConfig).asInstanceOf[StorageConfigT] - case other => - // E.g. Nothing; we need to check and fail b/c the Scala compiler doesn't enforce - // passing the ClassTag-reified type parameter, if it's only used for a ClassTag implicit - sys.error( - s"The $driverDescription sequencer driver doesn't recognize storage type $other" - ) - } - - val storageConfigMap: Map[InstanceName, StorageConfigT] = sequencerGroups match { - case SingleSynchronizer => - config.sequencers.map { case (name, sequencerConfig) => - val dbParameters = sequencerConfig.storage.parameters - (name, dbToStorageConfig(dbNames.head1, dbParameters, sequencerConfig.storage.config)) - } - case MultiSynchronizer(groups) => - groups.zipWithIndex.flatMap { case (sequencers, i) => - val dbName = dbNameForGroup(i + 1) - sequencers.map { name => - val (dbParameters, baseDbConfig) = config.sequencers - .get(name) - .map(s => (s.storage.parameters, s.storage.config)) - .getOrElse((DbParametersConfig(), ConfigFactory.empty())) - - (name, dbToStorageConfig(dbName, dbParameters, baseDbConfig)) - } - }.toMap - } - - val sequencersToConfig: Map[InstanceName, SequencerConfig] = - driverConfigs(config, storageConfigMap) - - def mapSequencerConfigs( - kv: (InstanceName, SequencerNodeConfig) - ): (InstanceName, SequencerNodeConfig) = kv match { - case (name, cfg) => - ( - name, - cfg.focus(_.sequencer).replace(sequencersToConfig(name)), - ) - } - - config - .focus(_.sequencers) - .modify(_.map(mapSequencerConfigs)) - } - - override def afterTests(): Unit = pgPlugin.foreach(_.afterTests()) - - override def afterEnvironmentDestroyed(config: CantonConfig): Unit = - pgPlugin.foreach { postgresPlugin => - Await.result( - postgresPlugin.dropDatabases(dbNames), - config.parameters.timeouts.processing.io.duration, - ) - } - - def recreateDatabases(): Future[Unit] = - pgPlugin match { - case Some(postgresPlugin) => - dbNames.forgetNE.parTraverse_(db => - postgresPlugin - .recreateDatabaseForNodeWithName(db) - .failOnShutdownToAbortException("recreateDatabases") - ) - case _ => - logger.underlying.warn( - s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" - ) - Future.unit - } - - def dumpDatabases(tempDirectory: TempDirectory, forceLocal: Boolean = false): Future[Unit] = - pgPlugin match { - case Some(postgresPlugin) => - val pgDumpRestore = PostgresDumpRestore(postgresPlugin, forceLocal) - dbNames.forgetNE.parTraverse_(db => - pgDumpRestore.saveDump(db, dumpTempFile(tempDirectory, db)) - ) - case _ => - logger.underlying.warn( - s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" - ) - Future.unit - } - - def restoreDatabases(tempDirectory: TempDirectory, forceLocal: Boolean = false): Future[Unit] = - pgPlugin match { - case Some(postgresPlugin) => - val pgDumpRestore = PostgresDumpRestore(postgresPlugin, forceLocal) - dbNames.forgetNE.parTraverse_(db => - pgDumpRestore.restoreDump(db, dumpTempFile(tempDirectory, db).path) - ) - case _ => - logger.underlying.warn( - s"`$functionFullName` was called on a test without UsePostgres plugin, which is not supported!" - ) - Future.unit - } - - private def dumpTempFile(tempDirectory: TempDirectory, dbName: String): TempFile = - tempDirectory.toTempFile(s"pg-dump-$dbName.tar") - -} - -object UseReferenceBlockSequencerBase { - - sealed trait SequencerSynchronizerGroups { - def numberOfSynchronizers: Int - } - - case object SingleSynchronizer extends SequencerSynchronizerGroups { - override val numberOfSynchronizers: Int = 1 - } - - final case class MultiSynchronizer(sequencerGroups: Seq[Set[InstanceName]]) - extends SequencerSynchronizerGroups { - override val numberOfSynchronizers: Int = sequencerGroups.size - } - - object MultiSynchronizer { - def tryCreate(sequencerGroups: Set[String]*): MultiSynchronizer = - MultiSynchronizer(sequencerGroups.map(_.map(InstanceName.tryCreate))) - } -} diff --git a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/ProgrammableSequencer.scala b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/ProgrammableSequencer.scala index 12657b0854..105441074d 100644 --- a/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/ProgrammableSequencer.scala +++ b/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/ProgrammableSequencer.scala @@ -107,6 +107,11 @@ class ProgrammableSequencer( SequencerTrafficStatus ] = baseSequencer.trafficStatus(members, selector) + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = + baseSequencer.sequencingTime + /** Run body with a given policy. * * Points to consider: diff --git a/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Bench.daml b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Bench.daml new file mode 100644 index 0000000000..a2a22bf73a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Bench.daml @@ -0,0 +1,38 @@ +-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Bench where + +inefficientFibonacci : Int -> Int +inefficientFibonacci n = + case n of + 0 -> 0 + 1 -> 1 + m -> inefficientFibonacci (m - 2) + inefficientFibonacci (m - 1) + +template InefficientFibonacciResult + with + owner: Party + result: Int + where + signatory owner + + +template InefficientFibonacci + with + owner: Party + where + signatory owner + + choice InefficientFibonacci_Compute : ContractId InefficientFibonacciResult + with + value: Int + controller owner + do create (InefficientFibonacciResult owner (inefficientFibonacci value)) + + nonconsuming choice InefficientFibonacci_NcCompute : Int + with + value: Int + controller owner + do + pure (inefficientFibonacci value) diff --git a/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Foo.daml b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Foo.daml new file mode 100644 index 0000000000..461275a18b --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/Foo.daml @@ -0,0 +1,211 @@ +-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Foo where + +import DA.Functor (void) + +template Divulger + with + divulgees: [Party] -- Parties to whom something is divulged + divulger: Party -- Party who divulges something + keyId: Text + where + signatory [divulger] <> divulgees + + key (divulger, keyId): (Party, Text) + maintainer key._1 + + nonconsuming choice DivulgeContractImmediate: () + with + fooObservers : [Party] + fooPayload : Text + fooKeyId: Text + fooTemplateName: Text + controller divulger + do + -- Parties from 'divulgees' see the creation of Foo even though + -- they are not contract stakeholders, i.e. immediate divulgence occurs. + if fooTemplateName == "Foo1" then + void $ create Foo1 with + signatory = divulger + observers = fooObservers + payload = fooPayload + keyId = fooKeyId + else if fooTemplateName == "Foo2" then + void $ create Foo2 with + signatory = divulger + observers = fooObservers + payload = fooPayload + keyId = fooKeyId + else if fooTemplateName == "Foo3" then + void $ create Foo3 with + signatory = divulger + observers = fooObservers + payload = fooPayload + keyId = fooKeyId + else + return () + + nonconsuming choice DivulgeConsumingExercise: () + with + fooTemplateName: Text + fooKey: (Party, Text) + fooConsumingPayload : Text + controller divulger + do + -- Parties from 'divulgees' see the consuming exercise on Foo even though they are not + -- contract stakeholders or choice controllers/observers, i.e. divulgence occurs. + if fooTemplateName == "Foo1" then + void $ exerciseByKey @Foo1 fooKey (Foo1_ConsumingChoice fooConsumingPayload) + else if fooTemplateName == "Foo2" then + void $ exerciseByKey @Foo2 fooKey (Foo2_ConsumingChoice fooConsumingPayload) + else if fooTemplateName == "Foo3" then + void $ exerciseByKey @Foo3 fooKey (Foo3_ConsumingChoice fooConsumingPayload) + else + return () +template Foo1 + with + signatory : Party + observers : [Party] + payload : Text + keyId: Text + where + signatory signatory + observer observers + key (signatory, keyId): (Party, Text) + maintainer key._1 + + nonconsuming choice Foo1_NonconsumingChoice: () + with + exercisePayload: Text + observer observers + controller signatory + do + return () + + choice Foo1_ConsumingChoice: () + with + exercisePayload: Text + controller signatory + do + return () + + interface instance FooI1 for Foo1 where + view = FooData with templateName = "Foo1", .. + +template Foo2 + with + signatory : Party + observers : [Party] + payload : Text + keyId: Text + where + signatory signatory + observer observers + key (signatory, keyId): (Party, Text) + maintainer key._1 + + nonconsuming choice Foo2_NonconsumingChoice: () + with + exercisePayload: Text + observer observers + controller signatory + do + return () + + choice Foo2_ConsumingChoice: () + with + exercisePayload: Text + controller signatory + do + return () + + interface instance FooI2 for Foo2 where + view = foo2ToFooData $ foo2Roundtrip 10 this + + +template Foo3 + with + signatory : Party + observers : [Party] + payload : Text + keyId: Text + where + signatory signatory + observer observers + key (signatory, keyId): (Party, Text) + maintainer key._1 + + nonconsuming choice Foo3_NonconsumingChoice: () + with + exercisePayload: Text + observer observers + controller signatory + do + return () + + choice Foo3_ConsumingChoice: () + with + exercisePayload: Text + controller signatory + do + return () + + interface instance FooI3 for Foo3 where + view = foo3ToFooData $ foo3Roundtrip 100 this + + +template Dummy + with + signatory: Party + where + signatory signatory + + +data FooData = FooData + with + signatory : Party + observers : [Party] + payload : Text + keyId: Text + templateName: Text + deriving (Eq, Show) + +-- FooI1 is exposing the most simple case of the interface views - just copying the data from within the template +interface FooI1 where + viewtype FooData + +foo2ToFooData : Foo2 -> FooData +foo2ToFooData Foo2{..} = FooData with templateName = "Foo2", .. + +fooDataToFoo2 : FooData -> Foo2 +fooDataToFoo2 FooData{..} + | templateName == "Foo2" = Foo2 {..} + | otherwise = error "fooDataToFoo2 called non non-foo2" + +foo2Roundtrip : Int -> Foo2 -> Foo2 +foo2Roundtrip n x + | n <= 0 = x + | otherwise = foo2Roundtrip (n - 1) (fooDataToFoo2 $ foo2ToFooData x) + +-- FooI2 is exposing a FooData view through 10 round-trips in the recursion calls +interface FooI2 where + viewtype FooData + +foo3ToFooData : Foo3 -> FooData +foo3ToFooData Foo3{..} = FooData with templateName = "Foo3", .. + +fooDataToFoo3 : FooData -> Foo3 +fooDataToFoo3 FooData{..} + | templateName == "Foo3" = Foo3 {..} + | otherwise = error "fooDataToFoo3 called non non-foo3" + +foo3Roundtrip : Int -> Foo3 -> Foo3 +foo3Roundtrip n x + | n <= 0 = x + | otherwise = foo3Roundtrip (n - 1) (fooDataToFoo3 $ foo3ToFooData x) + +-- FooI3 is exposing a FooData view through 100 round-trips in the recursion calls +interface FooI3 where + viewtype FooData diff --git a/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml new file mode 100644 index 0000000000..69d02eb35b --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml @@ -0,0 +1,9 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +build-options: +- --enable-interfaces=yes +name: benchtool-tests +source: . +version: 3.4.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger-api-bench-tool/src/main/resources/logback.xml b/canton/community/ledger-api-bench-tool/src/main/resources/logback.xml new file mode 100644 index 0000000000..dd006482b8 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/resources/logback.xml @@ -0,0 +1,19 @@ + + + + + %date{"yyyy-MM-dd'T'HH:mm:ss.SSSXXX", UTC} %-5level %logger{5}@[%-4.30thread] - %msg%n + + + + + + + + + + + + + + diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/AuthorizationHelper.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/AuthorizationHelper.scala new file mode 100644 index 0000000000..b61ee52741 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/AuthorizationHelper.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.daml.grpc.AuthCallCredentials +import com.daml.jwt.{ + AuthServiceJWTCodec, + DecodedJwt, + JwtSigner, + StandardJWTPayload, + StandardJWTTokenFormat, +} +import io.grpc.stub.AbstractStub + +object AuthorizationHelper { + def maybeAuthedService[T <: AbstractStub[T]](userTokenO: Option[String])(service: T): T = + userTokenO.fold(service)(token => AuthCallCredentials.authorizingStub(service, token)) +} + +class AuthorizationHelper(val authorizationTokenSecret: String) { + + /** @return + * user token signed with HMAC256 + */ + def tokenFor(userId: String): String = { + val payload = StandardJWTPayload( + issuer = None, + participantId = None, + userId = userId, + exp = None, + format = StandardJWTTokenFormat.Scope, + audiences = List.empty, + scope = None, + ) + JwtSigner.HMAC256 + .sign( + jwt = DecodedJwt( + header = """{"alg": "HS256", "typ": "JWT"}""", + payload = AuthServiceJWTCodec.compactPrint(payload), + ), + secret = authorizationTokenSecret, + ) + .getOrElse(sys.error("Failed to generate token")) + .value + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/Benchmark.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/Benchmark.scala new file mode 100644 index 0000000000..5f922cdcda --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/Benchmark.scala @@ -0,0 +1,149 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse +import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse +import com.daml.ledger.api.v2.update_service.GetUpdatesResponse +import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory +import com.daml.timer.Delayed +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig +import com.digitalasset.canton.ledger.api.benchtool.metrics.{ + BenchmarkResult, + MetricsSet, + StreamMetrics, +} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import org.apache.pekko.actor.typed.{ActorSystem, SpawnProtocol} +import org.slf4j.LoggerFactory + +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future} + +object Benchmark { + private val logger = LoggerFactory.getLogger(getClass) + + def run( + streamConfigs: List[StreamConfig], + reportingPeriod: FiniteDuration, + apiServices: LedgerApiServices, + metricsFactory: LabeledMetricsFactory, + system: ActorSystem[SpawnProtocol.Command], + )(implicit ec: ExecutionContext): Future[Either[String, Unit]] = + Future + .traverse(streamConfigs) { + case streamConfig: StreamConfig.TransactionsStreamConfig => + for { + _ <- delaySubscriptionIfConfigured(streamConfig)(system) + observer <- StreamMetrics + .observer[GetUpdatesResponse]( + streamName = streamConfig.name, + logInterval = reportingPeriod, + metrics = MetricsSet.transactionMetrics(streamConfig.objectives), + logger = logger, + exposedMetrics = Some( + MetricsSet + .transactionExposedMetrics(streamConfig.name, metricsFactory) + ), + itemCountingFunction = MetricsSet.countTransactionsEvents, + maxItemCount = streamConfig.maxItemCount, + )(system, ec) + _ = streamConfig.timeoutO + .foreach(timeout => scheduleCancelStreamTask(timeout, observer)) + result <- apiServices.updateService.transactions(streamConfig, observer) + } yield result + case streamConfig: StreamConfig.TransactionLedgerEffectsStreamConfig => + for { + _ <- delaySubscriptionIfConfigured(streamConfig)(system) + observer <- StreamMetrics + .observer[GetUpdatesResponse]( + streamName = streamConfig.name, + logInterval = reportingPeriod, + metrics = MetricsSet.transactionMetrics(streamConfig.objectives), + logger = logger, + exposedMetrics = Some( + MetricsSet.transactionExposedMetrics( + streamConfig.name, + metricsFactory, + ) + ), + itemCountingFunction = MetricsSet.countTransactionsEvents, + maxItemCount = streamConfig.maxItemCount, + )(system, ec) + _ = streamConfig.timeoutO + .foreach(timeout => scheduleCancelStreamTask(timeout, observer)) + result <- apiServices.updateService.transactionsLedgerEffects(streamConfig, observer) + } yield result + case streamConfig: StreamConfig.ActiveContractsStreamConfig => + for { + _ <- delaySubscriptionIfConfigured(streamConfig)(system) + observer <- StreamMetrics + .observer[GetActiveContractsResponse]( + streamName = streamConfig.name, + logInterval = reportingPeriod, + metrics = MetricsSet.activeContractsMetrics(streamConfig.objectives), + logger = logger, + exposedMetrics = Some( + MetricsSet.activeContractsExposedMetrics( + streamConfig.name, + metricsFactory, + ) + ), + itemCountingFunction = response => MetricsSet.countActiveContracts(response).toLong, + maxItemCount = streamConfig.maxItemCount, + )(system, ec) + _ = streamConfig.timeoutO + .foreach(timeout => scheduleCancelStreamTask(timeout, observer)) + result <- apiServices.stateService.getActiveContracts(streamConfig, observer) + } yield result + case streamConfig: StreamConfig.CompletionsStreamConfig => + for { + _ <- delaySubscriptionIfConfigured(streamConfig)(system) + observer <- StreamMetrics + .observer[CompletionStreamResponse]( + streamName = streamConfig.name, + logInterval = reportingPeriod, + metrics = MetricsSet.completionsMetrics(streamConfig.objectives), + logger = logger, + exposedMetrics = Some( + MetricsSet + .completionsExposedMetrics(streamConfig.name, metricsFactory) + ), + itemCountingFunction = response => MetricsSet.countCompletions(response).toLong, + maxItemCount = streamConfig.maxItemCount, + )(system, ec) + _ = streamConfig.timeoutO + .foreach(timeout => scheduleCancelStreamTask(timeout, observer)) + result <- apiServices.commandCompletionService.completions(streamConfig, observer) + } yield result + } + .map { results => + Either.cond( + !results.contains(BenchmarkResult.ObjectivesViolated), + (), + "Metrics objectives not met.", + ) + } + + def scheduleCancelStreamTask(timeoutDuration: Duration, observer: ObserverWithResult[_, _])( + implicit ec: ExecutionContext + ): Unit = { + val _ = Delayed.by(t = timeoutDuration)( + observer.cancel() + ) + } + + private def delaySubscriptionIfConfigured( + streamConfig: StreamConfig + )(implicit system: ActorSystem[SpawnProtocol.Command]): Future[Unit] = + streamConfig.subscriptionDelay match { + case Some(delay) => + logger.info( + s"Delaying stream subscription with $delay for stream $streamConfig" + ) + org.apache.pekko.pattern.after(delay)(Future.unit) + case None => Future.unit + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricher.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricher.scala new file mode 100644 index 0000000000..b5f2596c89 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricher.scala @@ -0,0 +1,123 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.daml.ledger.api.v2.value.Identifier +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.{ + ActiveContractsStreamConfig, + CompletionsStreamConfig, + PartyFilter, + PartyNamePrefixFilter, + TransactionLedgerEffectsStreamConfig, + TransactionsStreamConfig, +} +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + AllocatedParties, + BenchtoolTestsPackageInfo, + FooTemplateDescriptor, +} + +class ConfigEnricher( + allocatedParties: AllocatedParties, + packageInfo: BenchtoolTestsPackageInfo, +) { + private val packageRef = packageInfo.packageRef.toString + + private def toTemplateId[T](templateId: Identifier): (String, String) = + templateId.entityName -> s"$packageRef:${templateId.moduleName}:${templateId.entityName}" + + private val interfaceNameToFullyQualifiedNameMap: Map[String, String] = List( + FooTemplateDescriptor.fooI1TemplateId(packageRef), + FooTemplateDescriptor.fooI2TemplateId(packageRef), + FooTemplateDescriptor.fooI3TemplateId(packageRef), + ).map(toTemplateId).toMap + + private val templateNameToFullyQualifiedNameMap: Map[String, String] = List( + FooTemplateDescriptor.Foo1(packageRef).templateId, + FooTemplateDescriptor.Foo2(packageRef).templateId, + FooTemplateDescriptor.Foo3(packageRef).templateId, + ).map(toTemplateId).toMap + + def enrichStreamConfig( + streamConfig: StreamConfig + ): StreamConfig = + streamConfig match { + case config: TransactionsStreamConfig => + config + .copy( + filters = enrichFilters(config.filters) ++ config.partyNamePrefixFilters.flatMap( + convertFilterByPartySet + ), + partyNamePrefixFilters = List.empty, + ) + case config: TransactionLedgerEffectsStreamConfig => + config + .copy( + filters = enrichFilters(config.filters) ++ config.partyNamePrefixFilters.flatMap( + convertFilterByPartySet + ), + partyNamePrefixFilters = List.empty, + ) + case config: ActiveContractsStreamConfig => + config + .copy( + filters = enrichFilters(config.filters) ++ config.partyNamePrefixFilters.flatMap( + convertFilterByPartySet + ), + partyNamePrefixFilters = List.empty, + ) + case config: CompletionsStreamConfig => + config.copy(parties = config.parties.map(party => convertParty(party))) + } + + private def convertParty( + partyShortName: String + ): String = + allocatedParties.allAllocatedParties + .map(_.getValue) + .find(_.contains(partyShortName)) + .getOrElse(partyShortName) + + private def convertFilterByPartySet( + filter: PartyNamePrefixFilter + ): List[PartyFilter] = { + val convertedTemplates = filter.templates.map(convertTemplate) + val convertedInterfaces = filter.interfaces.map(convertInterface) + val matchedParties = matchingParties(filter.partyNamePrefix) + matchedParties.map(party => + PartyFilter(party = party, templates = convertedTemplates, interfaces = convertedInterfaces) + ) + } + + private def matchingParties(partyNamePrefix: String): List[String] = { + val knownParties = allocatedParties.allAllocatedParties.map(_.getValue) + val matchedParties = knownParties.filter(_.startsWith(partyNamePrefix)) + if (matchedParties.isEmpty) { + val knownPartiesText = knownParties.mkString(", ") + sys.error( + s"Expected party name prefix: '$partyNamePrefix' does not match any of the known parties: $knownPartiesText" + ) + } else + matchedParties + } + + private def enrichFilters( + filters: List[StreamConfig.PartyFilter] + ): List[StreamConfig.PartyFilter] = + filters.map { filter => + StreamConfig.PartyFilter( + party = convertParty(filter.party), + templates = filter.templates.map(convertTemplate), + interfaces = filter.interfaces.map(convertInterface), + ) + } + + def convertTemplate(shortTemplateName: String): String = + templateNameToFullyQualifiedNameMap.getOrElse(shortTemplateName, shortTemplateName) + + def convertInterface(shortInterfaceName: String): String = + interfaceNameToFullyQualifiedNameMap.getOrElse(shortInterfaceName, shortInterfaceName) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/LedgerApiBenchTool.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/LedgerApiBenchTool.scala new file mode 100644 index 0000000000..6ecd8c7e6a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/LedgerApiBenchTool.scala @@ -0,0 +1,424 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import cats.syntax.either.* +import com.daml.ledger.resources.{ResourceContext, ResourceOwner} +import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory +import com.daml.metrics.api.opentelemetry.OpenTelemetryMetricsFactory +import com.digitalasset.canton.config.TlsClientConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.{ + FibonacciSubmissionConfig, + FooSubmissionConfig, +} +import com.digitalasset.canton.ledger.api.benchtool.config.{Config, ConfigMaker, WorkflowConfig} +import com.digitalasset.canton.ledger.api.benchtool.metrics.MetricsManager.NoOpMetricsManager +import com.digitalasset.canton.ledger.api.benchtool.metrics.{ + BenchmarkResult, + LatencyMetric, + MetricRegistryOwner, + MetricsManager, +} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.* +import com.digitalasset.canton.ledger.api.benchtool.submission.foo.RandomPartySelecting +import com.digitalasset.canton.ledger.api.benchtool.util.TypedActorSystemResourceOwner +import com.digitalasset.canton.ledger.localstore.api.UserManagementStore +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import io.grpc.Channel +import io.grpc.netty.shaded.io.grpc.netty.{NegotiationType, NettyChannelBuilder} +import io.opentelemetry.api.metrics.MeterProvider +import org.apache.pekko.actor.typed.{ActorSystem, SpawnProtocol} +import org.slf4j.{Logger, LoggerFactory} + +import java.util.concurrent.* +import scala.concurrent.duration.* +import scala.concurrent.{Await, ExecutionContext, Future} +import scala.util.control.NonFatal + +/** Runs a submission step followed by a benchmark step. Either step is optional. + */ +object LedgerApiBenchTool { + private val printer = pprint.PPrinter.BlackWhite + + private[benchtool] val logger: Logger = LoggerFactory.getLogger(getClass) + private[benchtool] def prettyPrint(x: Any): String = printer(x).toString() + + def main(args: Array[String]): Unit = { + import scala.concurrent.ExecutionContext.Implicits.global + ConfigMaker.make(args) match { + case Left(error) => + logger.error(s"Configuration error: ${error.details}") + sys.exit(1) + case Right(config) => + logger.info(s"Starting benchmark with configuration:\n${prettyPrint(config)}") + val result = LedgerApiBenchTool(config) + .run()(ExecutionContext.Implicits.global) + .map { + case Right(()) => + logger.info(s"Benchmark finished successfully.") + case Left(error) => + logger.info(s"Benchmark failed: $error") + } + .recover { case ex => + logger.error(s"ledger-api-bench-tool failure: ${ex.getMessage}", ex) + sys.exit(1) + }(scala.concurrent.ExecutionContext.Implicits.global) + Await.result(result, atMost = Duration.Inf) + () + } + } + + def apply(config: Config): LedgerApiBenchTool = + new LedgerApiBenchTool( + names = new Names, + authorizationHelper = config.authorizationTokenSecret.map(new AuthorizationHelper(_)), + config = config, + ) + +} + +class LedgerApiBenchTool( + names: Names, + authorizationHelper: Option[AuthorizationHelper], + config: Config, +) { + + import LedgerApiBenchTool.{logger, prettyPrint} + + def run()(implicit ec: ExecutionContext): Future[Either[String, Unit]] = { + implicit val resourceContext: ResourceContext = ResourceContext(ec) + + val resources: ResourceOwner[ + ( + String => LedgerApiServices, + ActorSystem[SpawnProtocol.Command], + MeterProvider, + ) + ] = for { + servicesForUserId <- apiServicesOwner(config, authorizationHelper) + system <- TypedActorSystemResourceOwner.owner() + meterProvider <- new MetricRegistryOwner(config.reportingPeriod, NamedLoggerFactory.root) + } yield (servicesForUserId, system, meterProvider) + + resources.use { case (servicesForUserId, actorSystem, meterProvider) => + val adminServices = servicesForUserId(UserManagementStore.DefaultParticipantAdminUserId) + val regularUserServices = servicesForUserId(names.benchtoolUserId) + val metricsFactory = new OpenTelemetryMetricsFactory( + meterProvider.meterBuilder("ledger-api-bench-tool").build(), + Set.empty, + Some(logger), + ) + + val partyAllocating = new PartyAllocating( + names = names, + adminServices = adminServices, + ) + for { + _ <- regularUserSetupStep(adminServices) + (allocatedParties, benchtoolTestsPackageInfo) <- { + config.workflow.submission match { + case None => + logger.info("No submission config found; skipping the command submission step") + for { + allocatedParties <- SubmittedDataAnalyzing.determineAllocatedParties( + config.workflow, + partyAllocating, + ) + } yield { + (allocatedParties, BenchtoolTestsPackageInfo.StaticDefault) + } + case Some(submissionConfig) => + logger.info("Submission config found; command submission will be performed") + submissionStep( + regularUserServices = regularUserServices, + adminServices = adminServices, + submissionConfig = submissionConfig, + metricsFactory = metricsFactory, + partyAllocating = partyAllocating, + ) + .map(_ -> BenchtoolTestsPackageInfo.StaticDefault) + .map { v => + // We manually execute a 'VACUUM ANALYZE' at the end of the submission step (if IndexDB is on Postgresql), + // to make sure query planner statistics, visibility map, etc.. are all up-to-date. + config.ledger.indexDbJdbcUrlO.foreach { indexDbJdbcUrl => + if (indexDbJdbcUrl.startsWith("jdbc:postgresql:")) { + PostgresUtils.invokeVacuumAnalyze(indexDbJdbcUrl) + } + } + v + } + } + } + + configEnricher = new ConfigEnricher(allocatedParties, benchtoolTestsPackageInfo) + updatedStreamConfigs = config.workflow.streams.map(streamsConfig => + configEnricher.enrichStreamConfig(streamsConfig) + ) + + _ = logger.info( + s"Stream configs adapted after the submission step: ${prettyPrint(updatedStreamConfigs)}" + ) + benchmarkResult <- + if (config.latencyTest) { + benchmarkLatency( + regularUserServices = regularUserServices, + adminServices = adminServices, + submissionConfigO = config.workflow.submission, + metricsFactory = metricsFactory, + allocatedParties = allocatedParties, + actorSystem = actorSystem, + maxLatencyObjectiveMillis = config.maxLatencyObjectiveMillis, + ) + } else if (config.workflow.pruning.isDefined) { + new PruningBenchmark(reportingPeriod = config.reportingPeriod).benchmarkPruning( + pruningConfig = + config.workflow.pruning.getOrElse(sys.error("Pruning config not defined!")), + regularUserServices = regularUserServices, + adminServices = adminServices, + actorSystem = actorSystem, + signatory = allocatedParties.signatory, + names = names, + ) + } else { + benchmarkStreams( + regularUserServices = regularUserServices, + streamConfigs = updatedStreamConfigs, + metricsFactory = metricsFactory, + actorSystem = actorSystem, + ) + } + } yield benchmarkResult + } + } + + private def regularUserSetupStep( + adminServices: LedgerApiServices + )(implicit ec: ExecutionContext): Future[Unit] = + (config.authorizationTokenSecret, config.workflow.submission) match { + case (Some(_), Some(submissionConfig)) => + // We only need to setup the user when the UserManagementService is used and we're going to submit transactions + // The submission config is necessary to establish a set of rights that will be granted to the user. + logger.info( + s"Setting up the regular '${names.benchtoolUserId}' user prior to the submission phase." + ) + adminServices.userManagementService.createUserOrGrantRightsToExisting( + userId = names.benchtoolUserId, + observerPartyNames = names.observerPartyNames( + submissionConfig.numberOfObservers, + submissionConfig.uniqueParties, + ), + signatoryPartyName = names.signatoryPartyName, + ) + case _ => + Future.successful( + logger.info( + s"The '${names.benchtoolUserId}' user is going to be used for authentication." + ) + ) + } + + private def benchmarkStreams( + regularUserServices: LedgerApiServices, + streamConfigs: List[WorkflowConfig.StreamConfig], + metricsFactory: LabeledMetricsFactory, + actorSystem: ActorSystem[SpawnProtocol.Command], + )(implicit ec: ExecutionContext): Future[Either[String, Unit]] = + if (streamConfigs.isEmpty) { + logger.info(s"No streams defined. Skipping the benchmark step.") + Future.successful(Either.unit) + } else + Benchmark + .run( + streamConfigs = streamConfigs, + reportingPeriod = config.reportingPeriod, + apiServices = regularUserServices, + metricsFactory = metricsFactory, + system = actorSystem, + ) + + private def benchmarkLatency( + regularUserServices: LedgerApiServices, + adminServices: LedgerApiServices, + submissionConfigO: Option[WorkflowConfig.SubmissionConfig], + metricsFactory: LabeledMetricsFactory, + allocatedParties: AllocatedParties, + actorSystem: ActorSystem[SpawnProtocol.Command], + maxLatencyObjectiveMillis: Long, + )(implicit ec: ExecutionContext): Future[Either[String, Unit]] = + submissionConfigO match { + case Some(submissionConfig: FooSubmissionConfig) => + val generator: CommandGenerator = new FooCommandGenerator( + config = submissionConfig, + divulgeesToDivulgerKeyMap = Map.empty, + names = names, + allocatedParties = allocatedParties, + partySelecting = new RandomPartySelecting( + config = submissionConfig, + allocatedParties = allocatedParties, + randomnessProvider = RandomnessProvider.Default, + ), + randomnessProvider = RandomnessProvider.Default, + ) + for { + metricsManager <- MetricsManager.create( + observedMetric = "submit-and-wait-latency", + logInterval = config.reportingPeriod, + metrics = List(LatencyMetric.empty(maxLatencyObjectiveMillis)), + exposedMetrics = None, + )(actorSystem, ec) + submitter = CommandSubmitter( + names = names, + benchtoolUserServices = regularUserServices, + adminServices = adminServices, + metricsFactory = metricsFactory, + metricsManager = metricsManager, + waitForSubmission = true, + partyAllocating = new PartyAllocating( + names = names, + adminServices = adminServices, + ), + ) + result <- submitter + .generateAndSubmit( + generator = generator, + config = submissionConfig, + baseActAs = List(allocatedParties.signatory), + maxInFlightCommands = config.maxInFlightCommands, + submissionBatchSize = config.submissionBatchSize, + ) + .flatMap(_ => metricsManager.result()) + .map { + case BenchmarkResult.ObjectivesViolated => + Left("Metrics objectives not met.") + case BenchmarkResult.Ok => + Either.unit + } + .recoverWith { case NonFatal(e) => + Future.successful(Left(e.getMessage)) + } + } yield result + case Some(other) => + Future.failed( + new RuntimeException(s"Unsupported submission config for latency benchmarking: $other") + ) + case None => + Future.failed( + new RuntimeException("Submission config cannot be empty for latency benchmarking") + ) + } + + def submissionStep( + regularUserServices: LedgerApiServices, + adminServices: LedgerApiServices, + submissionConfig: WorkflowConfig.SubmissionConfig, + metricsFactory: LabeledMetricsFactory, + partyAllocating: PartyAllocating, + )(implicit + ec: ExecutionContext + ): Future[AllocatedParties] = { + + val submitter = CommandSubmitter( + names = names, + benchtoolUserServices = regularUserServices, + adminServices = adminServices, + metricsFactory = metricsFactory, + metricsManager = NoOpMetricsManager(), + waitForSubmission = submissionConfig.waitForSubmission, + partyAllocating = partyAllocating, + ) + for { + allocatedParties <- submitter.prepare( + submissionConfig + ) + _ <- + submissionConfig match { + case submissionConfig: FooSubmissionConfig => + new FooSubmission( + submitter = submitter, + maxInFlightCommands = config.maxInFlightCommands, + submissionBatchSize = config.submissionBatchSize, + allocatedParties = allocatedParties, + names = names, + randomnessProvider = RandomnessProvider.Default, + ).performSubmission(submissionConfig) + case submissionConfig: FibonacciSubmissionConfig => + val generator: CommandGenerator = new FibonacciCommandGenerator( + signatory = allocatedParties.signatory, + config = submissionConfig, + names = names, + ) + for { + _ <- submitter + .generateAndSubmit( + generator = generator, + config = submissionConfig, + baseActAs = List(allocatedParties.signatory) ++ allocatedParties.divulgees, + maxInFlightCommands = config.maxInFlightCommands, + submissionBatchSize = config.submissionBatchSize, + ) + } yield () + } + } yield allocatedParties + } + + private def apiServicesOwner( + config: Config, + authorizationHelper: Option[AuthorizationHelper], + )(implicit ec: ExecutionContext): ResourceOwner[String => LedgerApiServices] = + for { + executorService <- threadPoolExecutorOwner(config.concurrency) + channel <- channelOwner(config.ledger, config.tls, executorService) + servicesForUserId <- ResourceOwner.forFuture(() => + LedgerApiServices.forChannel( + channel = channel, + authorizationHelper = authorizationHelper, + ) + ) + } yield servicesForUserId + + private def channelOwner( + ledger: Config.Ledger, + tls: Option[TlsClientConfig], + executor: Executor, + ): ResourceOwner[Channel] = { + logger.info( + s"Setting up a managed channel to a ledger at: ${ledger.hostname}:${ledger.port}..." + ) + val MessageChannelSizeBytes: Int = 32 * 1024 * 1024 // 32 MiB + val ShutdownTimeout: FiniteDuration = 5.seconds + + val channelBuilder = NettyChannelBuilder + .forAddress(ledger.hostname, ledger.port) + .executor(executor) + .maxInboundMessageSize(MessageChannelSizeBytes) + .usePlaintext() + + tls.map(ClientChannelBuilder.sslContext(_, logTlsProtocolAndCipherSuites = true)).foreach { + sslContext => + logger.info(s"Setting up a managed channel with transport security...") + channelBuilder + .useTransportSecurity() + .sslContext(sslContext) + .negotiationType(NegotiationType.TLS) + } + + ResourceOwner.forChannel(channelBuilder, ShutdownTimeout) + } + + private def threadPoolExecutorOwner( + config: Config.Concurrency + ): ResourceOwner[ThreadPoolExecutor] = + ResourceOwner.forExecutorService(() => + new ThreadPoolExecutor( + config.corePoolSize, + config.maxPoolSize, + config.keepAliveTime, + TimeUnit.SECONDS, + if (config.maxQueueLength == 0) new SynchronousQueue[Runnable]() + else new ArrayBlockingQueue[Runnable](config.maxQueueLength), + ) + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PostgresUtils.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PostgresUtils.scala new file mode 100644 index 0000000000..de26b4116a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PostgresUtils.scala @@ -0,0 +1,69 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.digitalasset.canton.ledger.api.benchtool.LedgerApiBenchTool.logger + +import java.sql.{Connection, DriverManager, Statement} + +object PostgresUtils { + + def invokeVacuumAnalyze(indexDbJdbcUrl: String): Unit = { + val connection = DriverManager.getConnection(indexDbJdbcUrl) + try { + val stmt = connection.createStatement() + val vacuumQuery = "VACUUM ANALYZE" + try { + logger.info( + s"Executing '$vacuumQuery' on the IndexDB identified by JDBC URL: '$indexDbJdbcUrl' ..." + ) + stmt.executeUpdate(vacuumQuery) + logger.info(s"Executed '$vacuumQuery'") + } finally { + stmt.close() + inspectVacuumAndAnalyzeState(connection) + } + } finally { + connection.close() + } + } + + private def inspectVacuumAndAnalyzeState(connection: Connection): Unit = { + val stmt = connection.createStatement() + val query = + "SELECT relname, last_vacuum, last_autovacuum, last_analyze, last_autoanalyze FROM pg_stat_user_tables ORDER BY relname" + try { + logger.info( + "Executing SQL query: " + query + ) + stmt.execute( + query + ) + printQueryResult(stmt) + } finally { + stmt.close() + } + } + + private def printQueryResult(s: Statement): Unit = { + val rs = s.getResultSet + val meta = rs.getMetaData + val colCount = meta.getColumnCount + val buffer = new StringBuffer() + try { + while (rs.next()) { + val text = 1 + .to(colCount) + .map(colNumber => + f"${meta.getColumnName(colNumber)} ${rs.getString(colNumber) + ","}%-45s" + ) + .mkString(" ") + buffer.append(text).append("\n") + } + } finally { + logger.info(buffer.toString) + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PruningBenchmark.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PruningBenchmark.scala new file mode 100644 index 0000000000..4382b921f4 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/PruningBenchmark.scala @@ -0,0 +1,108 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import cats.syntax.either.* +import com.daml.ledger.api.v2.admin.participant_pruning_service.PruneRequest +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod.Empty +import com.daml.ledger.api.v2.commands.{Command, Commands, CreateCommand} +import com.daml.ledger.api.v2.value.{Record, RecordField, Value} +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.canton.ledger.api.benchtool.metrics.{ + BenchmarkResult, + MetricsManager, + MetricsSet, +} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{FooTemplateDescriptor, Names} +import com.digitalasset.daml.lf.data.Ref +import org.apache.pekko.actor.typed.{ActorSystem, SpawnProtocol} + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +class PruningBenchmark(reportingPeriod: FiniteDuration) { + + private val packageRef: Ref.PackageRef = TestDars.benchtoolDarPackageRef + + def benchmarkPruning( + signatory: Party, + pruningConfig: WorkflowConfig.PruningConfig, + regularUserServices: LedgerApiServices, + adminServices: LedgerApiServices, + actorSystem: ActorSystem[SpawnProtocol.Command], + names: Names, + )(implicit ec: ExecutionContext): Future[Either[String, Unit]] = for { + endOffset <- regularUserServices.stateService.getLedgerEnd() + // Submit one more command so that we're not pruning exactly at the ledger end offset + _ <- adminServices.commandService.submitAndWait( + Commands( + workflowId = "", + userId = names.benchtoolUserId, + commandId = "pruning-benchmarking-dummy-command", + commands = Seq(makeCreateDummyCommand(signatory)), + deduplicationPeriod = Empty, + minLedgerTimeAbs = None, + minLedgerTimeRel = None, + actAs = Seq(signatory.getValue), + readAs = Nil, + submissionId = "", + disclosedContracts = Nil, + synchronizerId = "", + packageIdSelectionPreference = Nil, + prefetchContractKeys = Nil, + ) + ) + durationMetric = MetricsSet.createTotalRuntimeMetric[Unit](pruningConfig.maxDurationObjective) + metricsManager <- MetricsManager.create( + observedMetric = "benchtool-pruning", + logInterval = reportingPeriod, + metrics = List(durationMetric), + exposedMetrics = None, + )(actorSystem, ec) + _ <- adminServices.pruningService + .prune( + new PruneRequest( + pruneUpTo = endOffset, + submissionId = "benchtool-pruning", + pruneAllDivulgedContracts = pruningConfig.pruneAllDivulgedContracts, + ) + ) + .map { _ => + metricsManager.sendNewValue(()) + metricsManager.result().map { + case BenchmarkResult.ObjectivesViolated => Left("Metrics objectives not met.") + case BenchmarkResult.Ok => Either.unit + } + } + } yield Either.unit + + private def makeCreateDummyCommand( + signatory: Party + ) = { + val createArguments: Option[Record] = Some( + Record( + None, + Seq( + RecordField( + label = "signatory", + value = Some(Value(Value.Sum.Party(signatory.getValue))), + ) + ), + ) + ) + val c: Command = Command( + command = Command.Command.Create( + CreateCommand( + templateId = Some(FooTemplateDescriptor.dummyTemplateId(packageId = packageRef.toString)), + createArguments = createArguments, + ) + ) + ) + c + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/SubmittedDataAnalyzing.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/SubmittedDataAnalyzing.scala new file mode 100644 index 0000000000..1e48e62f1d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/SubmittedDataAnalyzing.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.submission.{AllocatedParties, PartyAllocating} +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.{ExecutionContext, Future} + +/** Contains utilities for retrieving useful facts from data already submitted to a Ledger API + * server. (The motivating use case are the benchmarks that do not perform a submission step on + * their own and for that reason cannot statically determine these facts.) + */ +object SubmittedDataAnalyzing { + + private[benchtool] val logger: Logger = LoggerFactory.getLogger(getClass) + + def determineAllocatedParties( + workflowConfig: WorkflowConfig, + partyAllocating: PartyAllocating, + )(implicit ec: ExecutionContext): Future[AllocatedParties] = { + logger.info("Analyzing existing parties..") + for { + existingParties <- { + logger.info("Analyzing existing parties..") + partyAllocating.lookupExistingParties() + } + } yield { + AllocatedParties.forExistingParties( + parties = existingParties.toList, + partyPrefixesForPartySets = + workflowConfig.streams.flatMap(_.partySetPrefixes.iterator).distinct, + ) + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Cli.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Cli.scala new file mode 100644 index 0000000000..e846ac67ef --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Cli.scala @@ -0,0 +1,495 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import cats.syntax.either.* +import com.digitalasset.canton.config.RequireTypes.ExistingFile +import com.digitalasset.canton.config.{PemFile, TlsClientCertificate, TlsClientConfig} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import scopt.{OptionParser, Read} + +import java.io.File +import java.nio.file.Paths +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.util.{Failure, Success, Try} + +object Cli { + private val ProgramName: String = "ledger-api-bench-tool" + private val parser: OptionParser[Config] = new OptionParser[Config](ProgramName) { + import Reads.* + + head("A tool for measuring transaction streaming performance of a ledger.").discard + + opt[(String, Int)]("endpoint")(endpointRead) + .abbr("e") + .text("Ledger API endpoint") + .valueName(":") + .optional() + .action { case ((hostname, port), config) => + config.copy(ledger = config.ledger.copy(hostname = hostname, port = port)) + } + .discard + + opt[String]("indexdb-jdbc-url") + .text("JDBC url to an IndexDB instance") + .optional() + .action { case (url, config) => config.withLedgerConfig(_.copy(indexDbJdbcUrlO = Some(url))) } + .discard + + opt[WorkflowConfig.StreamConfig]("consume-stream") + .abbr("s") + .optional() + .unbounded() + .text( + s"Stream configuration." + ) + .valueName( + "=,=,..." + ) + .action { case (streamConfig, config) => + config + .copy(workflow = config.workflow.copy(streams = config.workflow.streams :+ streamConfig)) + } + .discard + + opt[File]("workflow-config") + .hidden() // TODO(#12064): uncomment when production-ready + .abbr("w") + .optional() + .text( + "A workflow configuration file. Parameters defined via this method take precedence over --consume-stream options." + ) + .action { case (workflowConfigFile, config) => + config.copy(workflowConfigFile = Some(workflowConfigFile)) + } + .discard + + opt[Int]("max-in-flight-commands") + .hidden() // TODO(#12064): uncomment when production-ready + .text("Maximum in-flight commands for command submissions.") + .optional() + .action { case (size, config) => + config.copy(maxInFlightCommands = size) + } + .discard + + opt[Unit]("latency-test") + .text("Run a SubmitAndWait latency benchmark") + .optional() + .action { case (_, config) => config.copy(latencyTest = true) } + .discard + + opt[Long]("max-latency-millis") + .text( + "The maximum average latency allowed for latency benchmarks (in millis). Only relevant with `latency-test` enabled." + ) + .optional() + .action { case (maxLatencyMillis, config) => + config.copy(maxLatencyObjectiveMillis = maxLatencyMillis) + } + .discard + + opt[Int]("submission-batch-size") + .hidden() // TODO(#12064): uncomment when production-ready + .text("Number of contracts created per command submission.") + .optional() + .action { case (size, config) => + config.copy(submissionBatchSize = size) + } + .discard + + opt[FiniteDuration]("log-interval") + .abbr("r") + .text("Stream metrics log interval.") + .action { case (period, config) => config.copy(reportingPeriod = period) } + .discard + + opt[Int]("core-pool-size") + .text("Initial size of the worker thread pool.") + .optional() + .action { case (size, config) => + config.copy(concurrency = config.concurrency.copy(corePoolSize = size)) + } + .discard + + opt[Int]("max-pool-size") + .text("Maximum size of the worker thread pool.") + .optional() + .action { case (size, config) => + config.copy(concurrency = config.concurrency.copy(maxPoolSize = size)) + } + .discard + + opt[String]("user-based-authorization-secret") + .optional() + .text( + "Enables user based authorization. The value is used for signing authorization tokens with HMAC256." + ) + .action((secret, config) => config.copy(authorizationTokenSecret = Some(secret))) + .discard + + opt[Seq[String]]("client-cert") + .optional() + .text( + "TLS: The crt file to be used as the cert chain and the pem file to be used as the private key." + ) + .validate { + case certs @ Seq(_crt, _pem) => + certs + .map(path => + validatePath(path, s"The file $path specified via --client-cert does not exist") + ) + .find(_.isLeft) + .getOrElse(Either.unit) + case _ => Left("expected only two files for --client-cert") + } + .valueName(",") + .action { + case (Seq(crt, pem), config) => + config.copy( + tls = config.tls.map(tls => + tls.copy(clientCert = + Some( + TlsClientCertificate( + certChainFile = PemFile(ExistingFile.tryCreate(Paths.get(crt).toFile)), + privateKeyFile = PemFile(ExistingFile.tryCreate(Paths.get(pem).toFile)), + ) + ) + ) + ) + ) + case (_, config) => config + } + .discard + + opt[String]("cacrt") + .optional() + .text("TLS: The crt file to be used as the trusted root CA.") + .validate(validatePath(_, "The file specified via --cacrt does not exist")) + .action { (path, config) => + val file = Some(PemFile(ExistingFile.tryCreate(path))) + config.copy( + tls = config.tls + .map(_.copy(trustCollectionFile = file)) + .orElse( + Some( + TlsClientConfig( + trustCollectionFile = file, + clientCert = None, + ) + ) + ) + ) + } + .discard + + // allows you to enable tls without any special certs, + // i.e., tls without client auth with the default root certs. + // If any certificates are set tls is enabled implicitly and + // this is redundant. + opt[Unit]("tls") + .optional() + .text("TLS: Enable tls. This is redundant if --client-cert or --cacrt are set") + .action((_, config) => + config.copy(tls = + config.tls.orElse( + Some( + TlsClientConfig( + trustCollectionFile = None, + clientCert = None, + ) + ) + ) + ) + ) + .discard + + checkConfig(c => + Either.cond( + !(c.latencyTest && c.workflow.streams.nonEmpty), + (), + "Latency test cannot have configured streams", + ) + ).discard + + private def validatePath(path: String, message: String): Either[String, Unit] = { + val valid = Try(Paths.get(path).toFile.canRead).getOrElse(false) + Either.cond(valid, (), message) + } + + help("help").text("Prints this information").discard + + private def note(level: Int, param: String, desc: String = ""): Unit = { + val paddedParam = s"${" " * level * 2}$param" + val internalPadding = math.max(1, 50 - paddedParam.length) + note(s"$paddedParam${" " * internalPadding}$desc").discard + } + + note(0, "") + note(0, "Stream configuration parameters:") + note(1, "Transactions/transactions LedgerEffects:") + note(2, "stream-type=", "(required)") + note(2, "name=", "Stream name used to identify results (required)") + note( + 2, + "filters=party1@template1@template2+party2", + "List of per-party filters separated by the plus symbol (required)", + ) + note(2, "begin-offset=") + note(2, "end-offset=") + note(2, "max-delay=", "Max record time delay objective") + note(2, "min-consumption-speed=", "Min consumption speed objective") + note(2, "min-item-rate=", "Min item rate per second") + note(2, "max-item-rate=", "Max item rate per second") + note(1, "Active contract sets:") + note(2, "stream-type=active-contracts", "(required)") + note(2, "name=", "Stream name used to identify results (required)") + note( + 2, + "filters=party1@template1@template2+party2", + "List of per-party filters separated by the plus symbol (required)", + ) + note(2, "min-item-rate=", "Min item rate per second") + note(2, "max-item-rate=", "Max item rate per second") + note(1, "Command completions:") + note(2, "stream-type=completions", "(required)") + note(2, "name=", "Stream name used to identify results (required)") + note(2, "party=", "(required)") + note(2, "begin-offset=") + note(2, "template-ids=|") + note(2, "min-item-rate=", "Min item rate per second") + note(2, "max-item-rate=", "Max item rate per second") + } + + def config(args: Array[String]): Option[Config] = + parser.parse(args, Config.Default) + + private object Reads { + implicit val streamConfigRead: Read[WorkflowConfig.StreamConfig] = + Read.mapRead[String, String].map { m => + def stringField(fieldName: String): Either[String, String] = + m.get(fieldName) match { + case Some(value) => Right(value) + case None => Left(s"Missing field: '$fieldName'") + } + + def optionalLongField(fieldName: String): Either[String, Option[Long]] = + optionalField[Long](fieldName, _.toLong) + + def optionalDoubleField(fieldName: String): Either[String, Option[Double]] = + optionalField[Double](fieldName, _.toDouble) + + def optionalScalaDurationField(fieldName: String): Either[String, Option[FiniteDuration]] = + optionalField[String](fieldName, identity).flatMap { + case Some(value) => + Duration(value) match { + case infinite: Duration.Infinite => + Left(s"Subscription delay duration must be finite, but got $infinite") + case finiteDuration: FiniteDuration => Right(Some(finiteDuration)) + } + case None => Right(None) + } + + def optionalField[T](fieldName: String, f: String => T): Either[String, Option[T]] = + Try(m.get(fieldName).map(f)) match { + case Success(value) => Right(value) + case Failure(_) => Left(s"Invalid value for field name: $fieldName") + } + + def transactionObjectives( + maxDelaySeconds: Option[Long], + minConsumptionSpeed: Option[Double], + minItemRate: Option[Double], + maxItemRate: Option[Double], + ): Option[WorkflowConfig.StreamConfig.TransactionObjectives] = + (maxDelaySeconds, minConsumptionSpeed, minItemRate, maxItemRate) match { + case (None, None, None, None) => None + case _ => + Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = maxDelaySeconds, + minConsumptionSpeed = minConsumptionSpeed, + minItemRate = minItemRate, + maxItemRate = maxItemRate, + // NOTE: Unsupported on CLI + maxTotalStreamRuntimeDuration = None, + ) + ) + } + + def transactionsConfig + : Either[String, WorkflowConfig.StreamConfig.TransactionsStreamConfig] = for { + name <- stringField("name") + filters <- stringField("filters").flatMap(parseFilters) + beginOffset <- optionalLongField("begin-offset") + endOffset <- optionalLongField("end-offset") + maxDelaySeconds <- optionalLongField("max-delay") + minConsumptionSpeed <- optionalDoubleField("min-consumption-speed") + minItemRate <- optionalDoubleField("min-item-rate") + maxItemRate <- optionalDoubleField("max-item-rate") + maxItemCount <- optionalLongField("max-item-count") + timeoutO <- optionalScalaDurationField("timeout") + subscriptionDelayO <- optionalScalaDurationField("subscription-delay") + } yield WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = name, + filters = filters, + beginOffsetExclusive = beginOffset.getOrElse(0L), + endOffsetInclusive = endOffset, + objectives = + transactionObjectives(maxDelaySeconds, minConsumptionSpeed, minItemRate, maxItemRate), + timeoutO = timeoutO, + maxItemCount = maxItemCount, + // NOTE: Unsupported on CLI + partyNamePrefixFilters = List.empty, + subscriptionDelay = subscriptionDelayO, + ) + + def transactionLedgerEffectsConfig + : Either[String, WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig] = + for { + name <- stringField("name") + filters <- stringField("filters").flatMap(parseFilters) + beginOffset <- optionalLongField("begin-offset") + endOffset <- optionalLongField("end-offset") + maxDelaySeconds <- optionalLongField("max-delay") + minConsumptionSpeed <- optionalDoubleField("min-consumption-speed") + minItemRate <- optionalDoubleField("min-item-rate") + maxItemRate <- optionalDoubleField("max-item-rate") + maxItemCount <- optionalLongField("max-item-count") + timeoutO <- optionalScalaDurationField("timeout") + subscriptionDelayO <- optionalScalaDurationField("subscription-delay") + } yield WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = name, + filters = filters, + beginOffsetExclusive = beginOffset.getOrElse(0L), + endOffsetInclusive = endOffset, + objectives = + transactionObjectives(maxDelaySeconds, minConsumptionSpeed, minItemRate, maxItemRate), + timeoutO = timeoutO, + maxItemCount = maxItemCount, + // NOTE: Unsupported on CLI + partyNamePrefixFilters = List.empty, + subscriptionDelay = subscriptionDelayO, + ) + + def rateObjectives( + minItemRate: Option[Double], + maxItemRate: Option[Double], + ): Option[WorkflowConfig.StreamConfig.AcsAndCompletionsObjectives] = + (minItemRate, maxItemRate) match { + case (None, None) => None + case _ => + Some( + WorkflowConfig.StreamConfig.AcsAndCompletionsObjectives( + minItemRate = minItemRate, + maxItemRate = maxItemRate, + // NOTE: Unsupported on CLI + maxTotalStreamRuntimeDuration = None, + ) + ) + } + + def activeContractsConfig + : Either[String, WorkflowConfig.StreamConfig.ActiveContractsStreamConfig] = for { + name <- stringField("name") + filters <- stringField("filters").flatMap(parseFilters) + minItemRate <- optionalDoubleField("min-item-rate") + maxItemRate <- optionalDoubleField("max-item-rate") + maxItemCount <- optionalLongField("max-item-count") + timeout <- optionalScalaDurationField("timeout") + subscriptionDelayO <- optionalScalaDurationField("subscription-delay") + } yield WorkflowConfig.StreamConfig.ActiveContractsStreamConfig( + name = name, + filters = filters, + objectives = rateObjectives(minItemRate, maxItemRate), + timeoutO = timeout, + maxItemCount = maxItemCount, + // NOTE: Unsupported on CLI + partyNamePrefixFilters = List.empty, + subscriptionDelay = subscriptionDelayO, + ) + + def completionsConfig: Either[String, WorkflowConfig.StreamConfig.CompletionsStreamConfig] = + for { + name <- stringField("name") + parties <- stringField("parties").map(parseParties) + userId <- stringField("user-id") + beginOffset <- optionalLongField("begin-offset") + minItemRate <- optionalDoubleField("min-item-rate") + maxItemRate <- optionalDoubleField("max-item-rate") + timeoutO <- optionalScalaDurationField("timeout") + maxItemCount <- optionalLongField("max-item-count") + subscriptionDelayO <- optionalScalaDurationField("subscription-delay") + } yield WorkflowConfig.StreamConfig.CompletionsStreamConfig( + name = name, + parties = parties, + userId = userId, + beginOffsetExclusive = beginOffset, + objectives = rateObjectives(minItemRate, maxItemRate), + timeoutO = timeoutO, + maxItemCount = maxItemCount, + subscriptionDelay = subscriptionDelayO, + ) + + val config = stringField("stream-type").flatMap[String, WorkflowConfig.StreamConfig] { + case "transactions" => transactionsConfig + case "transactions-ledger-effects" => transactionLedgerEffectsConfig + case "active-contracts" => activeContractsConfig + case "completions" => completionsConfig + case invalid => Left(s"Invalid stream type: $invalid") + } + + config.fold(error => throw new IllegalArgumentException(error), identity) + } + + // Parse strings like: "", "party1" or "party1+party2+party3" + private def parseParties(raw: String): List[String] = + raw.split('+').toList + + private def parseFilters( + listOfIds: String + ): Either[String, List[WorkflowConfig.StreamConfig.PartyFilter]] = + listOfIds + .split('+') + .toList + .map(parseFilter) + .foldLeft[Either[String, List[WorkflowConfig.StreamConfig.PartyFilter]]]( + Right(List.empty) + ) { case (acc, next) => + for { + filters <- acc + filter <- next + } yield filters :+ filter + } + + private def parseFilter( + filterString: String + ): Either[String, WorkflowConfig.StreamConfig.PartyFilter] = + filterString + .split('@') + .toList match { + case party :: templates => + Right( + WorkflowConfig.StreamConfig.PartyFilter(party, templates, List.empty) + ) // Interfaces are not supported via Cli + case _ => Left("Filter cannot be empty") + } + + def endpointRead: Read[(String, Int)] = new Read[(String, Int)] { + val arity = 1 + val reads: String => (String, Int) = + splitAddress(_) match { + case (k, v) => Read.stringRead.reads(k) -> Read.intRead.reads(v) + } + } + + private def splitAddress(s: String): (String, String) = + s.indexOf(':') match { + case -1 => + throw new IllegalArgumentException("Addresses should be specified as `:`") + case n: Int => (s.slice(0, n), s.slice(n + 1, s.length)) + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Config.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Config.scala new file mode 100644 index 0000000000..27934a9a01 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/Config.scala @@ -0,0 +1,63 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import com.digitalasset.canton.config.TlsClientConfig + +import java.io.File +import scala.concurrent.duration.* + +final case class Config( + ledger: Config.Ledger, + concurrency: Config.Concurrency, + tls: Option[TlsClientConfig], + workflow: WorkflowConfig, + reportingPeriod: FiniteDuration, + workflowConfigFile: Option[File], + maxInFlightCommands: Int, + submissionBatchSize: Int, + authorizationTokenSecret: Option[String], + latencyTest: Boolean, + maxLatencyObjectiveMillis: Long, +) { + def withLedgerConfig(f: Config.Ledger => Config.Ledger): Config = copy(ledger = f(ledger)) +} + +object Config { + final case class Ledger( + hostname: String, + port: Int, + indexDbJdbcUrlO: Option[String] = None, + ) + + final case class Concurrency( + corePoolSize: Int, + maxPoolSize: Int, + keepAliveTime: Long, + maxQueueLength: Int, + ) + + val Default: Config = + Config( + ledger = Config.Ledger( + hostname = "localhost", + port = 6865, + ), + concurrency = Config.Concurrency( + corePoolSize = 2, + maxPoolSize = 8, + keepAliveTime = 30, + maxQueueLength = 10000, + ), + tls = None, + workflow = WorkflowConfig(), + reportingPeriod = 5.seconds, + workflowConfigFile = None, + maxInFlightCommands = 100, + submissionBatchSize = 100, + authorizationTokenSecret = None, + latencyTest = false, + maxLatencyObjectiveMillis = 1000L, + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/ConfigMaker.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/ConfigMaker.scala new file mode 100644 index 0000000000..365a15475a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/ConfigMaker.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import com.digitalasset.canton.ledger.api.benchtool.util.SimpleFileReader + +import java.io.File +import scala.util.{Failure, Success} + +object ConfigMaker { + + def make(args: Array[String]): Either[ConfigurationError, Config] = { + def parseCli: Either[ConfigurationError, Config] = Cli.config(args) match { + case None => Left(ConfigurationError("Invalid CLI arguments.")) + case Some(config) => + Right(config) + } + + def parseWorkflowConfig(workflowConfigFile: File): Either[ConfigurationError, WorkflowConfig] = + SimpleFileReader.readFile(workflowConfigFile)(WorkflowConfigParser.parse) match { + case Failure(ex) => + Left(ConfigurationError(s"Workflow config reading error: ${ex.getLocalizedMessage}")) + case Success(result) => + result.left + .map(parserError => + ConfigurationError(s"Workflow config parsing error: ${parserError.details}") + ) + } + + for { + config <- parseCli + workflowConfig <- config.workflowConfigFile match { + case None => Right(config.workflow) + case Some(workflowConfigFile) => parseWorkflowConfig(workflowConfigFile) + } + } yield { + // Workflow defined in the YAML file takes precedence over CLI params + config.copy(workflow = workflowConfig) + } + } + + final case class ConfigurationError(details: String) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfig.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfig.scala new file mode 100644 index 0000000000..547bfae474 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfig.scala @@ -0,0 +1,209 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import scala.concurrent.duration.FiniteDuration + +import WorkflowConfig.FooSubmissionConfig.{ConsumingExercises, NonconsumingExercises} +import WorkflowConfig.StreamConfig.PartyNamePrefixFilter + +final case class WorkflowConfig( + submission: Option[WorkflowConfig.SubmissionConfig] = None, + streams: List[WorkflowConfig.StreamConfig] = Nil, + pruning: Option[WorkflowConfig.PruningConfig] = None, +) + +object WorkflowConfig { + + sealed trait SubmissionConfig extends Product with Serializable { + def numberOfInstances: Int + def numberOfObservers: Int + def numberOfDivulgees: Int + def numberOfExtraSubmitters: Int + def uniqueParties: Boolean + def waitForSubmission: Boolean + def observerPartySets: List[FooSubmissionConfig.PartySet] + } + + final case class FibonacciSubmissionConfig( + numberOfInstances: Int, + uniqueParties: Boolean, + value: Int, + waitForSubmission: Boolean, + ) extends SubmissionConfig { + override val numberOfObservers = 0 + override val numberOfDivulgees = 0 + override val numberOfExtraSubmitters = 0 + override val observerPartySets: List[FooSubmissionConfig.PartySet] = List.empty + } + + final case class FooSubmissionConfig( + numberOfInstances: Int, + numberOfObservers: Int, + uniqueParties: Boolean, + instanceDistribution: List[FooSubmissionConfig.ContractDescription], + numberOfDivulgees: Int = 0, + numberOfExtraSubmitters: Int = 0, + nonConsumingExercises: Option[NonconsumingExercises] = None, + consumingExercises: Option[ConsumingExercises] = None, + userIds: List[FooSubmissionConfig.UserId] = List.empty, + maybeWaitForSubmission: Option[Boolean] = None, + observerPartySets: List[FooSubmissionConfig.PartySet] = List.empty, + allowNonTransientContracts: Boolean = false, + ) extends SubmissionConfig { + def waitForSubmission: Boolean = maybeWaitForSubmission.getOrElse(true) + } + + object FooSubmissionConfig { + + /** @param partyNamePrefix + * prefix of each party in this party set; also serves as its identifier + * @param count + * number of parties to create + * @param visibility + * a fraction of contracts that each of the parties from this set should see + */ + final case class PartySet( + partyNamePrefix: String, + count: Int, + visibility: Double, + ) + + final case class ContractDescription( + template: String, + weight: Int, + payloadSizeBytes: Int, + ) + + final case class NonconsumingExercises( + probability: Double, + payloadSizeBytes: Int, + ) + + final case class ConsumingExercises( + probability: Double, + payloadSizeBytes: Int, + ) + + final case class UserId( + userId: String, + weight: Int, + ) + + } + + final case class PruningConfig( + name: String, + pruneAllDivulgedContracts: Boolean, + maxDurationObjective: FiniteDuration, + ) + + sealed trait StreamConfig extends Product with Serializable { + def name: String + + /** If specified, used to cancel the stream when enough items has been seen. + */ + def maxItemCount: Option[Long] = None + + /** If specified, used to cancel the stream after the specified time out + */ + def timeoutO: Option[FiniteDuration] = None + + def partySetPrefixes: List[String] + + def partyNamePrefixFilters: List[PartyNamePrefixFilter] + + def subscriptionDelay: Option[FiniteDuration] + } + + object StreamConfig { + + final case class PartyFilter( + party: String, + templates: List[String] = List.empty, + interfaces: List[String] = List.empty, + ) + + final case class PartyNamePrefixFilter( + partyNamePrefix: String, + templates: List[String] = List.empty, + interfaces: List[String] = List.empty, + ) + + final case class TransactionsStreamConfig( + name: String, + filters: List[PartyFilter] = List.empty, + partyNamePrefixFilters: List[PartyNamePrefixFilter] = List.empty, + beginOffsetExclusive: Long = 0L, + endOffsetInclusive: Option[Long] = None, + objectives: Option[StreamConfig.TransactionObjectives] = None, + subscriptionDelay: Option[FiniteDuration] = None, + override val maxItemCount: Option[Long] = None, + override val timeoutO: Option[FiniteDuration] = None, + ) extends StreamConfig { + override def partySetPrefixes: List[String] = partyNamePrefixFilters.map(_.partyNamePrefix) + } + + final case class TransactionLedgerEffectsStreamConfig( + name: String, + filters: List[PartyFilter], + partyNamePrefixFilters: List[PartyNamePrefixFilter] = List.empty, + beginOffsetExclusive: Long = 0L, + endOffsetInclusive: Option[Long] = None, + objectives: Option[StreamConfig.TransactionObjectives] = None, + subscriptionDelay: Option[FiniteDuration] = None, + override val maxItemCount: Option[Long] = None, + override val timeoutO: Option[FiniteDuration] = None, + ) extends StreamConfig { + override def partySetPrefixes: List[String] = + partyNamePrefixFilters.map(_.partyNamePrefix) + } + + final case class ActiveContractsStreamConfig( + name: String, + filters: List[PartyFilter], + partyNamePrefixFilters: List[PartyNamePrefixFilter] = List.empty, + objectives: Option[StreamConfig.AcsAndCompletionsObjectives] = None, + subscriptionDelay: Option[FiniteDuration] = None, + override val maxItemCount: Option[Long] = None, + override val timeoutO: Option[FiniteDuration] = None, + ) extends StreamConfig { + override def partySetPrefixes: List[String] = + partyNamePrefixFilters.map(_.partyNamePrefix) + } + + final case class CompletionsStreamConfig( + name: String, + parties: List[String], + userId: String, + beginOffsetExclusive: Option[Long], + objectives: Option[StreamConfig.AcsAndCompletionsObjectives], + subscriptionDelay: Option[FiniteDuration] = None, + override val maxItemCount: Option[Long], + override val timeoutO: Option[FiniteDuration], + ) extends StreamConfig { + override def partySetPrefixes: List[String] = List.empty + override def partyNamePrefixFilters: List[PartyNamePrefixFilter] = List.empty + } + + trait CommonObjectivesConfig { + def maxTotalStreamRuntimeDuration: Option[FiniteDuration] + def minItemRate: Option[Double] + def maxItemRate: Option[Double] + } + final case class TransactionObjectives( + maxDelaySeconds: Option[Long], + minConsumptionSpeed: Option[Double], + override val minItemRate: Option[Double], + override val maxItemRate: Option[Double], + override val maxTotalStreamRuntimeDuration: Option[FiniteDuration] = None, + ) extends CommonObjectivesConfig + + final case class AcsAndCompletionsObjectives( + override val minItemRate: Option[Double], + override val maxItemRate: Option[Double], + override val maxTotalStreamRuntimeDuration: Option[FiniteDuration] = None, + ) extends CommonObjectivesConfig + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParser.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParser.scala new file mode 100644 index 0000000000..ecc86ad7ed --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParser.scala @@ -0,0 +1,345 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import cats.syntax.functor.* +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.{ + ConsumingExercises, + NonconsumingExercises, +} +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.{ + PartyFilter, + PartyNamePrefixFilter, +} +import io.circe.yaml.Parser +import io.circe.{Decoder, HCursor} + +import java.io.Reader +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.util.{Failure, Success, Try} + +object WorkflowConfigParser { + import Decoders.* + import WorkflowConfig.* + + def parse(reader: Reader): Either[ParserError, WorkflowConfig] = + Parser.default + .parse(reader) + .flatMap(_.as[WorkflowConfig]) + .left + .map(error => ParserError(error.getLocalizedMessage)) + + final case class ParserError(details: String) + + object Decoders { + implicit val scalaDurationDecoder: Decoder[FiniteDuration] = + Decoder.decodeString.emapTry(strDuration => + Try(Duration(strDuration)).flatMap { + case infinite: Duration.Infinite => + Failure( + new IllegalArgumentException( + s"Subscription delay duration must be finite, but got $infinite" + ) + ) + case duration: FiniteDuration => Success(duration) + } + ) + + implicit val transactionObjectivesDecoder: Decoder[StreamConfig.TransactionObjectives] = + Decoder.forProduct5( + "max_delay_seconds", + "min_consumption_speed", + "min_item_rate", + "max_item_rate", + "max_stream_duration", + )(StreamConfig.TransactionObjectives.apply) + + implicit val rateObjectivesDecoder: Decoder[StreamConfig.AcsAndCompletionsObjectives] = + Decoder.forProduct3( + "min_item_rate", + "max_item_rate", + "max_stream_duration", + )(StreamConfig.AcsAndCompletionsObjectives.apply) + + implicit val partyFilterDecoder: Decoder[StreamConfig.PartyFilter] = + (c: HCursor) => { + for { + party <- c.downField("party").as[String] + templates <- c.downField("templates").as[Option[List[String]]] + interfaces <- c.downField("interfaces").as[Option[List[String]]] + } yield StreamConfig.PartyFilter( + party, + templates.getOrElse(List.empty), + interfaces.getOrElse(List.empty), + ) + } + + implicit val partySetTemplateFilterDecoder: Decoder[StreamConfig.PartyNamePrefixFilter] = + (c: HCursor) => { + for { + partyNamePrefix <- c.downField("party_name_prefix").as[String] + templates <- c.downField("templates").as[Option[List[String]]] + interfaces <- c.downField("interfaces").as[Option[List[String]]] + } yield StreamConfig.PartyNamePrefixFilter( + partyNamePrefix, + templates.getOrElse(List.empty), + interfaces.getOrElse(List.empty), + ) + } + + implicit val transactionStreamDecoder: Decoder[StreamConfig.TransactionsStreamConfig] = + (c: HCursor) => { + for { + name <- c.downField("name").as[String] + filters <- c.downField("filters").as[Option[List[PartyFilter]]] + beginOffset <- c.downField("begin_offset").as[Option[Long]] + endOffset <- c.downField("end_offset").as[Option[Long]] + partyNamePrefixFilters <- c + .downField("party_prefix_filters") + .as[Option[List[PartyNamePrefixFilter]]] + objectives <- c.downField("objectives").as[Option[StreamConfig.TransactionObjectives]] + subscriptionDelay <- c + .downField("subscription_delay") + .as[Option[FiniteDuration]] + maxItemCount <- c.downField("max_item_count").as[Option[Long]] + timeout <- c + .downField("timeout") + .as[Option[FiniteDuration]] + } yield StreamConfig.TransactionsStreamConfig( + name = name, + filters = filters.getOrElse(List.empty), + partyNamePrefixFilters = partyNamePrefixFilters.getOrElse(List.empty), + beginOffsetExclusive = beginOffset.getOrElse(0L), + endOffsetInclusive = endOffset, + objectives = objectives, + subscriptionDelay = subscriptionDelay, + maxItemCount = maxItemCount, + timeoutO = timeout, + ) + } + + implicit val transactionLedgerEffectsStreamDecoder + : Decoder[StreamConfig.TransactionLedgerEffectsStreamConfig] = + (c: HCursor) => { + for { + name <- c.downField("name").as[String] + filters <- c.downField("filters").as[Option[List[PartyFilter]]] + beginOffset <- c.downField("begin_offset").as[Long] + endOffset <- c.downField("end_offset").as[Option[Long]] + partyNamePrefixFilters <- c + .downField("party_prefix_filters") + .as[Option[List[PartyNamePrefixFilter]]] + objectives <- c.downField("objectives").as[Option[StreamConfig.TransactionObjectives]] + subscriptionDelay <- c + .downField("subscription_delay") + .as[Option[FiniteDuration]] + maxItemCount <- c.downField("max_item_count").as[Option[Long]] + timeout <- c + .downField("timeout") + .as[Option[FiniteDuration]] + } yield StreamConfig.TransactionLedgerEffectsStreamConfig( + name = name, + filters = filters.getOrElse(List.empty), + partyNamePrefixFilters = partyNamePrefixFilters.getOrElse(List.empty), + beginOffsetExclusive = beginOffset, + endOffsetInclusive = endOffset, + objectives = objectives, + subscriptionDelay = subscriptionDelay, + maxItemCount = maxItemCount, + timeoutO = timeout, + ) + } + + implicit val activeContractsStreamDecoder: Decoder[StreamConfig.ActiveContractsStreamConfig] = + (c: HCursor) => { + for { + name <- c.downField("name").as[String] + filters <- c.downField("filters").as[Option[List[PartyFilter]]] + partyNamePrefixFilters <- c + .downField("party_prefix_filters") + .as[Option[List[PartyNamePrefixFilter]]] + objectives <- c + .downField("objectives") + .as[Option[StreamConfig.AcsAndCompletionsObjectives]] + subscriptionDelay <- c + .downField("subscription_delay") + .as[Option[FiniteDuration]] + maxItemCount <- c.downField("max_item_count").as[Option[Long]] + timeout <- c + .downField("timeout") + .as[Option[FiniteDuration]] + } yield StreamConfig.ActiveContractsStreamConfig( + name = name, + filters = filters.getOrElse(List.empty), + partyNamePrefixFilters = partyNamePrefixFilters.getOrElse(List.empty), + objectives = objectives, + subscriptionDelay = subscriptionDelay, + maxItemCount = maxItemCount, + timeoutO = timeout, + ) + } + + implicit val completionsStreamDecoder: Decoder[StreamConfig.CompletionsStreamConfig] = + (c: HCursor) => { + for { + name <- c.downField("name").as[String] + parties <- c.downField("parties").as[List[String]] + userId <- c.downField("user_id").as[String] + beginOffset <- c.downField("begin_offset").as[Option[Long]] + objectives <- c + .downField("objectives") + .as[Option[StreamConfig.AcsAndCompletionsObjectives]] + subscriptionDelay <- c + .downField("subscription_delay") + .as[Option[FiniteDuration]] + maxItemCount <- c.downField("max_item_count").as[Option[Long]] + timeout <- c + .downField("timeout") + .as[Option[FiniteDuration]] + } yield StreamConfig.CompletionsStreamConfig( + name = name, + parties = parties, + userId = userId, + beginOffsetExclusive = beginOffset, + objectives = objectives, + subscriptionDelay = subscriptionDelay, + maxItemCount = maxItemCount, + timeoutO = timeout, + ) + } + + implicit val streamConfigDecoder: Decoder[StreamConfig] = + Decoder + .forProduct1[String, String]("type")(identity) + .flatMap[StreamConfig] { + case "transactions" => transactionStreamDecoder.widen + case "transactions-ledger-effects" => transactionLedgerEffectsStreamDecoder.widen + case "active-contracts" => activeContractsStreamDecoder.widen + case "completions" => completionsStreamDecoder.widen + case invalid => Decoder.failedWithMessage(s"Invalid stream type: $invalid") + } + + implicit val contractDescriptionDecoder: Decoder[FooSubmissionConfig.ContractDescription] = + Decoder.forProduct3( + "template", + "weight", + "payload_size_bytes", + )(FooSubmissionConfig.ContractDescription.apply) + + implicit val nonconsumingExercisesDecoder: Decoder[FooSubmissionConfig.NonconsumingExercises] = + Decoder.forProduct2( + "probability", + "payload_size_bytes", + )(FooSubmissionConfig.NonconsumingExercises.apply) + + implicit val consumingExercisesDecoder: Decoder[FooSubmissionConfig.ConsumingExercises] = + Decoder.forProduct2( + "probability", + "payload_size_bytes", + )(FooSubmissionConfig.ConsumingExercises.apply) + + implicit val userIdConfigDecoder: Decoder[FooSubmissionConfig.UserId] = + Decoder.forProduct2( + "id", + "weight", + )(FooSubmissionConfig.UserId.apply) + + implicit val partySetDecoder: Decoder[FooSubmissionConfig.PartySet] = + Decoder.forProduct3( + "party_name_prefix", + "count", + "visibility", + )(FooSubmissionConfig.PartySet.apply) + + implicit val fooSubmissionConfigDecoder: Decoder[FooSubmissionConfig] = + (c: HCursor) => { + for { + allowNonTransientContracts <- c + .downField("allow_non_transient_contracts") + .as[Option[Boolean]] + numInstances <- c.downField("num_instances").as[Int] + numObservers <- c.downField("num_observers").as[Int] + uniqueObservers <- c.downField("unique_parties").as[Boolean] + instancesDistribution <- c + .downField("instance_distribution") + .as[List[FooSubmissionConfig.ContractDescription]] + numberOfDivulgees <- c.downField("num_divulgees").as[Option[Int]] + numberOfExtraSubmitters <- c.downField("num_extra_submitters").as[Option[Int]] + nonConsumingExercises <- c + .downField("nonconsuming_exercises") + .as[Option[NonconsumingExercises]] + consumingExercises <- c.downField("consuming_exercises").as[Option[ConsumingExercises]] + userIds <- c + .downField("user_ids") + .as[Option[List[FooSubmissionConfig.UserId]]] + maybeWaitForSubmission <- c.downField("wait_for_submission").as[Option[Boolean]] + observerPartySets <- c + .downField("observers_party_sets") + .as[Option[List[FooSubmissionConfig.PartySet]]] + } yield FooSubmissionConfig( + allowNonTransientContracts = allowNonTransientContracts.getOrElse(false), + numberOfInstances = numInstances, + numberOfObservers = numObservers, + uniqueParties = uniqueObservers, + instanceDistribution = instancesDistribution, + numberOfDivulgees = numberOfDivulgees.getOrElse(0), + numberOfExtraSubmitters = numberOfExtraSubmitters.getOrElse(0), + nonConsumingExercises = nonConsumingExercises, + consumingExercises = consumingExercises, + userIds = userIds.getOrElse(List.empty), + maybeWaitForSubmission = maybeWaitForSubmission, + observerPartySets = observerPartySets.getOrElse(List.empty), + ) + } + + implicit val fibonacciSubmissionConfigDecoder: Decoder[FibonacciSubmissionConfig] = + Decoder.forProduct4( + "num_instances", + "unique_parties", + "value", + "wait_for_submission", + )(FibonacciSubmissionConfig.apply) + + implicit val submissionConfigDecoder: Decoder[SubmissionConfig] = + Decoder + .forProduct1[String, String]("type")(identity) + .flatMap[SubmissionConfig] { + case "foo" => fooSubmissionConfigDecoder.widen + case "fibonacci" => fibonacciSubmissionConfigDecoder.widen + case invalid => Decoder.failedWithMessage(s"Invalid submission type: $invalid") + } + + val pruningConfigInternal: Decoder[PruningConfig] = (c: HCursor) => { + for { + name <- c.downField("name").as[String] + pruneAllDivulgedContracts <- c.downField("prune_all_divulged_contracts").as[Boolean] + maxDurationObjective <- c.downField("max_duration_objective").as[FiniteDuration] + } yield PruningConfig( + name = name, + pruneAllDivulgedContracts = pruneAllDivulgedContracts, + maxDurationObjective = maxDurationObjective, + ) + } + + implicit val pruningConfig: Decoder[PruningConfig] = + Decoder + .forProduct1[String, String]("type")(identity) + .flatMap[PruningConfig] { + case "pruning" => pruningConfigInternal + case invalid => Decoder.failedWithMessage(s"Invalid submission type: $invalid") + } + + implicit val workflowConfigDecoder: Decoder[WorkflowConfig] = + (c: HCursor) => + for { + submission <- c.downField("submission").as[Option[SubmissionConfig]] + streams <- c + .downField("streams") + .as[Option[List[WorkflowConfig.StreamConfig]]] + .map(_.getOrElse(Nil)) + unary <- c.downField("unary").as[Option[List[PruningConfig]]] + } yield WorkflowConfig(submission, streams, unary.toList.flatten.headOption) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/infrastructure/TestDars.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/infrastructure/TestDars.scala new file mode 100644 index 0000000000..8d3a333e09 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/infrastructure/TestDars.scala @@ -0,0 +1,44 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.infrastructure + +import com.digitalasset.canton.ledger.api.benchtool.util.SimpleFileReader +import com.digitalasset.canton.util.JarResourceUtils +import com.digitalasset.daml.lf.archive.{DamlLf, Dar, DarParser} +import com.digitalasset.daml.lf.data.Ref +import com.google.protobuf.ByteString + +import scala.util.chaining.* +import scala.util.control.NonFatal +import scala.util.{Failure, Success, Try} + +object TestDars { + private val BenchtoolTestsDar = "benchtool-tests.dar" + private lazy val resources: List[String] = List(BenchtoolTestsDar) + + val benchtoolDar: Dar[DamlLf.Archive] = + JarResourceUtils + .extractFileFromJar(BenchtoolTestsDar) + .pipe(DarParser.assertReadArchiveFromFile) + + val benchtoolDarPackageRef: Ref.PackageRef = + Ref.PackageRef.Name(Ref.PackageName.assertFromString("benchtool-tests")) + + def readAll(): Try[List[DarFile]] = + (TestDars.resources + .foldLeft[Try[List[DarFile]]](Success(List.empty)) { (acc, resourceName) => + for { + dars <- acc + bytes <- SimpleFileReader.readResource(resourceName) + } yield DarFile(resourceName, bytes) :: dars + }) + .recoverWith { case NonFatal(ex) => + Failure(TestDarsError(s"Reading test dars failed. Details: ${ex.getLocalizedMessage}", ex)) + } + + final case class TestDarsError(message: String, cause: Throwable) + extends Exception(message, cause) + + final case class DarFile(name: String, bytes: ByteString) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/BenchmarkResult.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/BenchmarkResult.scala new file mode 100644 index 0000000000..5f2604392d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/BenchmarkResult.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +sealed trait BenchmarkResult extends Product with Serializable + +object BenchmarkResult { + final case object Ok extends BenchmarkResult + final case object ObjectivesViolated extends BenchmarkResult +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetric.scala new file mode 100644 index 0000000000..6f7d045dd0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetric.scala @@ -0,0 +1,120 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.util.TimeUtil +import com.google.protobuf.timestamp.Timestamp + +import java.time.{Duration, Instant} + +final case class ConsumptionSpeedMetric[T]( + recordTimeFunction: T => Seq[Timestamp], + objective: Option[ + (ConsumptionSpeedMetric.MinConsumptionSpeed, Option[ConsumptionSpeedMetric.Value]) + ], + previousLatest: Option[Instant] = None, + currentPeriodLatest: Option[Instant] = None, +) extends Metric[T] { + import ConsumptionSpeedMetric.* + + override type V = Value + override type Objective = MinConsumptionSpeed + + override def onNext(value: T): ConsumptionSpeedMetric[T] = { + val recordTimes = recordTimeFunction(value) + val newPreviousLatest = + previousLatest match { + case None => recordTimes.headOption.map(TimeUtil.timestampToInstant) + case v => v + } + val newCurrentPeriodLatest = recordTimes.lastOption.map(TimeUtil.timestampToInstant) + + this.copy( + previousLatest = newPreviousLatest, + currentPeriodLatest = newCurrentPeriodLatest, + ) + } + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = { + val value = Value(Some(periodicSpeed(periodDuration))) + val updatedMetric = this.copy( + previousLatest = if (currentPeriodLatest.isDefined) currentPeriodLatest else previousLatest, + currentPeriodLatest = None, + objective = updatedObjectives(value), + ) + (updatedMetric, value) + } + + override def finalValue(totalDuration: Duration): Value = + Value(None) + + override def violatedPeriodicObjectives: List[(MinConsumptionSpeed, Value)] = + objective.collect { + case (obj, value) if value.isDefined => obj -> value.get + }.toList + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(MinConsumptionSpeed, Value)] = Nil + + private def periodicSpeed(periodDuration: Duration): Double = + (previousLatest, currentPeriodLatest) match { + case (Some(previous), Some(current)) => + (current.toEpochMilli - previous.toEpochMilli).toDouble / periodDuration.toMillis + case _ => + 0.0 + } + + private def updatedObjectives(newValue: Value): Option[ + (MinConsumptionSpeed, Option[Value]) + ] = + objective.map { case (objective, currentMaxValue) => + if (objective.isViolatedBy(newValue)) { + currentMaxValue match { + case None => + objective -> Some(newValue) + case Some(currentValue) => + objective -> Some(Ordering[Value].min(currentValue, newValue)) + } + } else { + objective -> currentMaxValue + } + } +} + +object ConsumptionSpeedMetric { + + def empty[T]( + recordTimeFunction: T => Seq[Timestamp], + objective: Option[MinConsumptionSpeed] = None, + ): ConsumptionSpeedMetric[T] = + ConsumptionSpeedMetric( + recordTimeFunction, + objective.map(objective => objective -> None), + ) + + // TODO(#12064): remove option + final case class Value(relativeSpeed: Option[Double]) extends MetricValue + + object Value { + implicit val ordering: Ordering[Value] = (x: Value, y: Value) => { + (x.relativeSpeed, y.relativeSpeed) match { + case (Some(xx), Some(yy)) => + if (xx < yy) -1 + else if (xx > yy) 1 + else 0 + case (Some(_), None) => 1 + case (None, Some(_)) => -1 + case (None, None) => 0 + } + } + } + + final case class MinConsumptionSpeed(minSpeed: Double) extends ServiceLevelObjective[Value] { + override def isViolatedBy(metricValue: Value): Boolean = + Ordering[Value].lt(metricValue, v) + + private val v = Value(Some(minSpeed)) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetric.scala new file mode 100644 index 0000000000..ee471131d1 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetric.scala @@ -0,0 +1,105 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import java.time.Duration + +final case class CountRateMetric[T]( + countingFunction: T => Int, + periodicObjectives: List[ + (CountRateMetric.RateObjective, Option[CountRateMetric.Value]) + ], + finalObjectives: List[CountRateMetric.RateObjective], + counter: Int = 0, + lastCount: Int = 0, +) extends Metric[T] { + import CountRateMetric.* + + override type V = Value + override type Objective = RateObjective + + override def onNext(value: T): CountRateMetric[T] = + this.copy(counter = counter + countingFunction(value)) + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = { + val value = Value(periodicRate(periodDuration)) + val updatedMetric = this.copy( + periodicObjectives = updatedPeriodicObjectives(value), + lastCount = counter, + ) + (updatedMetric, value) + } + + override def finalValue(totalDuration: Duration): Value = + Value(ratePerSecond = totalRate(totalDuration)) + + override def violatedPeriodicObjectives: List[(RateObjective, Value)] = + periodicObjectives.collect { case (objective, Some(value)) => + objective -> value + } + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(RateObjective, Value)] = + finalObjectives.collect { + case objective if objective.isViolatedBy(finalValue(totalDuration)) => + (objective, finalValue(totalDuration)) + } + + private def periodicRate(periodDuration: Duration): Double = + (counter - lastCount) * 1000.0 / periodDuration.toMillis + + private def totalRate(totalDuration: Duration): Double = + counter / totalDuration.toMillis.toDouble * 1000.0 + + private def updatedPeriodicObjectives( + newValue: Value + ): List[(RateObjective, Option[Value])] = + periodicObjectives.map { case (objective, currentMinValue) => + if (objective.isViolatedBy(newValue)) { + currentMinValue match { + case None => objective -> Some(newValue) + case Some(currentValue) => objective -> Some(Ordering[Value].min(currentValue, newValue)) + } + } else { + objective -> currentMinValue + } + } +} + +object CountRateMetric { + final case class Value(ratePerSecond: Double) extends MetricValue + + object Value { + implicit val ordering: Ordering[Value] = + Ordering.fromLessThan(_.ratePerSecond < _.ratePerSecond) + } + + abstract class RateObjective extends ServiceLevelObjective[Value] with Product with Serializable + object RateObjective { + final case class MinRate(minAllowedRatePerSecond: Double) extends RateObjective { + override def isViolatedBy(metricValue: CountRateMetric.Value): Boolean = + Ordering[CountRateMetric.Value].lt(metricValue, v) + + private val v = CountRateMetric.Value(minAllowedRatePerSecond) + } + + final case class MaxRate(minAllowedRatePerSecond: Double) extends RateObjective { + override def isViolatedBy(metricValue: CountRateMetric.Value): Boolean = + Ordering[CountRateMetric.Value].gt(metricValue, v) + + private val v = CountRateMetric.Value(minAllowedRatePerSecond) + } + } + + def empty[T]( + countingFunction: T => Int, + periodicObjectives: List[RateObjective], + finalObjectives: List[RateObjective], + ): CountRateMetric[T] = CountRateMetric[T]( + countingFunction, + periodicObjectives.map(obj => obj -> None), + finalObjectives = finalObjectives, + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetric.scala new file mode 100644 index 0000000000..38268f71b0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetric.scala @@ -0,0 +1,116 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.util.TimeUtil +import com.google.protobuf.timestamp.Timestamp + +import java.time.{Clock, Duration} + +final case class DelayMetric[T]( + recordTimeFunction: T => Seq[Timestamp], + clock: Clock, + objective: Option[(DelayMetric.MaxDelay, Option[DelayMetric.Value])], + delaysInCurrentInterval: List[Duration] = List.empty, +) extends Metric[T] { + import DelayMetric.* + + override type V = Value + override type Objective = MaxDelay + + override def onNext(value: T): DelayMetric[T] = { + val now = clock.instant() + val newDelays: List[Duration] = recordTimeFunction(value).toList + .map(TimeUtil.durationBetween(_, now)) + this.copy(delaysInCurrentInterval = delaysInCurrentInterval ::: newDelays) + } + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = { + val value = Value(periodicMeanDelay.map(_.getSeconds)) + val updatedMetric = this.copy( + delaysInCurrentInterval = List.empty, + objective = updatedObjective(value), + ) + (updatedMetric, value) + } + + override def finalValue(totalDuration: Duration): Value = + Value(None) + + override def violatedPeriodicObjectives: List[(MaxDelay, Value)] = + objective.collect { + case (obj, value) if value.isDefined => obj -> value.get + }.toList + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(MaxDelay, Value)] = Nil + + private def updatedObjective( + newValue: Value + ): Option[(MaxDelay, Option[DelayMetric.Value])] = + objective.map { case (objective, currentViolatingValue) => + // verify if the new value violates objective's requirements + if (objective.isViolatedBy(newValue)) { + currentViolatingValue match { + case None => + // if the new value violates objective's requirements and there is no other violating value, + // record the new value + objective -> Some(newValue) + case Some(currentValue) => + // if the new value violates objective's requirements and there is already a value that violates + // requirements, record the maximum value of the two + objective -> Some(Ordering[V].max(currentValue, newValue)) + } + } else { + objective -> currentViolatingValue + } + } + + private def periodicMeanDelay: Option[Duration] = + if (delaysInCurrentInterval.nonEmpty) + Some( + delaysInCurrentInterval + .reduceLeft(_.plus(_)) + .dividedBy(delaysInCurrentInterval.length.toLong) + ) + else None +} + +object DelayMetric { + + def empty[T]( + recordTimeFunction: T => Seq[Timestamp], + clock: Clock, + objective: Option[MaxDelay] = None, + ): DelayMetric[T] = + DelayMetric( + recordTimeFunction = recordTimeFunction, + clock = clock, + objective = objective.map(objective => objective -> None), + ) + + final case class Value(meanDelaySeconds: Option[Long]) extends MetricValue + + object Value { + implicit val valueOrdering: Ordering[Value] = (x: Value, y: Value) => { + (x.meanDelaySeconds, y.meanDelaySeconds) match { + case (Some(xx), Some(yy)) => + if (xx < yy) -1 + else if (xx > yy) 1 + else 0 + case (Some(_), None) => 1 + case (None, Some(_)) => -1 + case (None, None) => 0 + } + } + } + + final case class MaxDelay(maxDelaySeconds: Long) + extends ServiceLevelObjective[DelayMetric.Value] { + override def isViolatedBy(metricValue: DelayMetric.Value): Boolean = + metricValue.meanDelaySeconds.exists(_ > maxDelaySeconds) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ExposedMetrics.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ExposedMetrics.scala new file mode 100644 index 0000000000..691f7ffe54 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ExposedMetrics.scala @@ -0,0 +1,101 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.daml.metrics.api.MetricHandle.{Counter, Gauge, Histogram, LabeledMetricsFactory} +import com.daml.metrics.api.{MetricInfo, MetricName, MetricQualification, MetricsContext} +import com.digitalasset.canton.ledger.api.benchtool.util.TimeUtil +import com.google.protobuf.timestamp.Timestamp + +import java.time.Clock + +final class ExposedMetrics[T]( + counterMetric: ExposedMetrics.CounterMetric[T], + bytesProcessedMetric: ExposedMetrics.BytesProcessedMetric[T], + delayMetric: Option[ExposedMetrics.DelayMetric[T]], + latestRecordTimeMetric: Option[ExposedMetrics.LatestRecordTimeMetric[T]], + clock: Clock, +) { + def onNext(elem: T): Unit = { + counterMetric.counter.inc(counterMetric.countingFunction(elem))(MetricsContext.Empty) + bytesProcessedMetric.bytesProcessed.inc(bytesProcessedMetric.sizingFunction(elem))( + MetricsContext.Empty + ) + delayMetric.foreach { metric => + val now = clock.instant() + metric + .recordTimeFunction(elem) + .foreach { recordTime => + val delay = TimeUtil.durationBetween(recordTime, now) + metric.delays.update(delay.getSeconds) + } + } + latestRecordTimeMetric.foreach { metric => + metric + .recordTimeFunction(elem) + .lastOption + .foreach(recordTime => metric.latestRecordTime.updateValue(recordTime.seconds)) + } + } + +} + +object ExposedMetrics { + private val Prefix: MetricName = MetricName.Daml :+ "bench_tool" + + final case class CounterMetric[T](counter: Counter, countingFunction: T => Long) + final case class BytesProcessedMetric[T](bytesProcessed: Counter, sizingFunction: T => Long) + final case class DelayMetric[T](delays: Histogram, recordTimeFunction: T => Seq[Timestamp]) + final case class LatestRecordTimeMetric[T]( + latestRecordTime: Gauge[Long], + recordTimeFunction: T => Seq[Timestamp], + ) + + def apply[T]( + streamName: String, + factory: LabeledMetricsFactory, + countingFunction: T => Long, + sizingFunction: T => Long, + recordTimeFunction: Option[T => Seq[Timestamp]], + clock: Clock = Clock.systemUTC(), + ): ExposedMetrics[T] = { + val counterMetric = CounterMetric[T]( + counter = factory.counter( + MetricInfo(Prefix :+ "count" :+ streamName, "", MetricQualification.Debug) + ), + countingFunction = countingFunction, + ) + val bytesProcessedMetric = BytesProcessedMetric[T]( + bytesProcessed = factory.counter( + MetricInfo(Prefix :+ "bytes_read" :+ streamName, "", MetricQualification.Debug) + ), + sizingFunction = sizingFunction, + ) + val delayMetric = recordTimeFunction.map { f => + DelayMetric[T]( + delays = factory.histogram( + MetricInfo(Prefix :+ "delay" :+ streamName, "", MetricQualification.Debug) + ), + recordTimeFunction = f, + ) + } + val latestRecordTimeMetric = recordTimeFunction.map { f => + LatestRecordTimeMetric[T]( + latestRecordTime = factory.gauge( + MetricInfo(Prefix :+ "latest_record_time" :+ streamName, "", MetricQualification.Debug), + 0L, + )(MetricsContext.Empty), + recordTimeFunction = f, + ) + } + + new ExposedMetrics[T]( + counterMetric = counterMetric, + bytesProcessedMetric = bytesProcessedMetric, + delayMetric = delayMetric, + latestRecordTimeMetric = latestRecordTimeMetric, + clock = clock, + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetric.scala new file mode 100644 index 0000000000..43d843945f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetric.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import java.time.Duration + +import LatencyMetric.{LatencyNanos, MaxLatency, Value} + +final case class LatencyMetric( + totalNanos: LatencyNanos, + numberObservations: Int, + maxLatency: MaxLatency, +) extends Metric[LatencyNanos] { + override type V = LatencyMetric.Value + override type Objective = MaxLatency + + override def onNext(value: LatencyNanos): Metric[LatencyNanos] = + copy( + totalNanos = totalNanos + value, + numberObservations = numberObservations + 1, + ) + + override def periodicValue(periodDuration: Duration): (Metric[LatencyNanos], Value) = + this -> currentAverage + + override def finalValue(totalDuration: Duration): Value = + currentAverage + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(MaxLatency, Value)] = { + val averageLatency = finalValue(totalDuration) + val violation = maxLatency.isViolatedBy(averageLatency) + if (violation) List(maxLatency -> averageLatency) + else Nil + } + + private def currentAverage: Value = + if (numberObservations == 0) Value(0L) else Value(totalNanos / numberObservations) +} + +object LatencyMetric { + type LatencyNanos = Long + final case class Value(latencyNanos: LatencyNanos) extends MetricValue + + def empty(maxLatencyObjectiveMillis: Long): LatencyMetric = + LatencyMetric(0, 0, MaxLatency(maxLatencyObjectiveMillis * 1000000L)) + + final case class MaxLatency(maxLatency: LatencyNanos) extends ServiceLevelObjective[Value] { + override def isViolatedBy(metricValue: Value): Boolean = + metricValue.latencyNanos > maxLatency + + def millis: Double = maxLatency / 1000000d + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MeteredStreamObserver.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MeteredStreamObserver.scala new file mode 100644 index 0000000000..43be89314f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MeteredStreamObserver.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import org.slf4j.Logger + +import scala.concurrent.Future + +class MeteredStreamObserver[T]( + val streamName: String, + logger: Logger, + manager: MetricsManager[T], + itemCountingFunction: T => Long, + maxItemCount: Option[Long], +) extends ObserverWithResult[T, BenchmarkResult](logger) { + private var itemsCount = 0L + + override def onNext(value: T): Unit = { + itemsCount += itemCountingFunction(value) + manager.sendNewValue(value) + super.onNext(value) + if (maxItemCount.isDefined && itemsCount >= maxItemCount.get) + cancel() + } + + override def completeWith(): Future[BenchmarkResult] = + manager.result() + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/Metric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/Metric.scala new file mode 100644 index 0000000000..645b46350d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/Metric.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import java.time.Duration + +trait Metric[Elem] { + + type V <: MetricValue + + type Objective <: ServiceLevelObjective[V] + + /** @return + * an updated version of itself + */ + def onNext(value: Elem): Metric[Elem] + + /** @return + * an updated version of itself and the value observed in this period + * + * NOTE: Durations of subsequent periods are not guaranteed to be exactly the same. + */ + def periodicValue(periodDuration: Duration): (Metric[Elem], V) + + def finalValue(totalDuration: Duration): V + + /** @return + * a list of objective violations, where each element is a pair of a violated objective and the + * periodic value that violates it the most. + */ + def violatedPeriodicObjectives: List[(Objective, V)] = Nil + + /** @return + * a list of objective violations, where each element is a pair of a violated objective and the + * final value that violates it. + */ + def violatedFinalObjectives(totalDuration: Duration): List[(Objective, V)] + + def name: String = getClass.getSimpleName + +} + +object Metric { + def rounded(value: Double): String = "%.2f".format(value) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricRegistryOwner.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricRegistryOwner.scala new file mode 100644 index 0000000000..84163fa429 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricRegistryOwner.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.LogReporter +import io.opentelemetry.api.metrics.MeterProvider +import io.opentelemetry.sdk.metrics.SdkMeterProvider +import io.opentelemetry.sdk.metrics.`export`.PeriodicMetricReader + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class MetricRegistryOwner(reportingInterval: Duration, loggerFactory: NamedLoggerFactory) + extends ResourceOwner[MeterProvider] { + override def acquire()(implicit + context: ResourceContext + ): Resource[MeterProvider] = + ResourceOwner.forCloseable(() => metricOwner).acquire() + + private def metricOwner = { + val loggingMetricReader = PeriodicMetricReader + .builder(new LogReporter(logAsInfo = true, loggerFactory)) + .setInterval(reportingInterval.toMillis, TimeUnit.MILLISECONDS) + .build() + val meterProviderBuilder = SdkMeterProvider + .builder() + meterProviderBuilder.registerMetricReader(loggingMetricReader) + meterProviderBuilder.build() + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricValue.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricValue.scala new file mode 100644 index 0000000000..f6f2bee011 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricValue.scala @@ -0,0 +1,6 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +trait MetricValue diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollector.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollector.scala new file mode 100644 index 0000000000..2582de4869 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollector.scala @@ -0,0 +1,101 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.util.TimeUtil +import org.apache.pekko.actor.typed.scaladsl.Behaviors +import org.apache.pekko.actor.typed.{ActorRef, Behavior} + +import java.time.{Clock, Duration, Instant} + +object MetricsCollector { + + sealed trait Message + object Message { + final case class NewValue[T](value: T) extends Message + final case class PeriodicReportRequest(replyTo: ActorRef[Response.PeriodicReportResponse]) + extends Message + final case class FinalReportRequest(replyTo: ActorRef[Response.FinalReport]) extends Message + } + + sealed trait Response + object Response { + sealed trait PeriodicReportResponse extends Response + final case class PeriodicReport(values: List[MetricValue]) extends PeriodicReportResponse + final case object ReportNotReady extends PeriodicReportResponse + final case class MetricFinalReportData( + name: String, + value: MetricValue, + violatedObjectives: List[(ServiceLevelObjective[_], MetricValue)], + ) + final case class FinalReport(totalDuration: Duration, metricsData: List[MetricFinalReportData]) + extends Response + } + + def apply[T]( + metrics: List[Metric[T]], + exposedMetrics: Option[ExposedMetrics[T]] = None, + ): Behavior[Message] = { + val clock = Clock.systemUTC() + val startTime: Instant = clock.instant() + val minimumTimePeriodBetweenSubsequentReports: Duration = Duration.ofMillis(100) + new MetricsCollector[T](exposedMetrics, minimumTimePeriodBetweenSubsequentReports, clock) + .handlingMessages(metrics, startTime, startTime) + } +} + +class MetricsCollector[T]( + exposedMetrics: Option[ExposedMetrics[T]], + minimumTimePeriodBetweenSubsequentReports: Duration = Duration.ofMillis(100), + clock: Clock, +) { + import MetricsCollector.* + import MetricsCollector.Message.* + import MetricsCollector.Response.* + + @scala.annotation.nowarn("msg=.*is unchecked since it is eliminated by erasure") + def handlingMessages( + metrics: List[Metric[T]], + lastPeriodicCheck: Instant, + startTime: Instant, + ): Behavior[Message] = + Behaviors.receive { case (_, message) => + message match { + case newValue: NewValue[T] => + exposedMetrics.foreach(_.onNext(newValue.value)) + handlingMessages(metrics.map(_.onNext(newValue.value)), lastPeriodicCheck, startTime) + + case request: PeriodicReportRequest => + val currentTime = clock.instant() + val periodSinceLastReport: Duration = + TimeUtil.durationBetween(lastPeriodicCheck, currentTime) + if ( + TimeUtil.isAtLeast(periodSinceLastReport, minimumTimePeriodBetweenSubsequentReports) + ) { + val (newMetrics, values) = metrics + .map(_.periodicValue(periodSinceLastReport)) + .unzip + request.replyTo ! Response.PeriodicReport(values) + handlingMessages(newMetrics, currentTime, startTime) + } else { + request.replyTo ! Response.ReportNotReady + Behaviors.same + } + + case request: FinalReportRequest => + val duration = TimeUtil.durationBetween(startTime, clock.instant()) + val data: List[MetricFinalReportData] = + metrics.map { metric => + MetricFinalReportData( + name = metric.name, + value = metric.finalValue(duration), + violatedObjectives = + metric.violatedPeriodicObjectives ::: metric.violatedFinalObjectives(duration), + ) + } + request.replyTo ! FinalReport(duration, data) + Behaviors.stopped + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsManager.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsManager.scala new file mode 100644 index 0000000000..e027a524f0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsManager.scala @@ -0,0 +1,122 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.benchtool.util.ReportFormatter +import org.apache.pekko.actor.typed.scaladsl.AskPattern.* +import org.apache.pekko.actor.typed.{ActorRef, ActorSystem, Props, SpawnProtocol} +import org.apache.pekko.actor.{Cancellable, CoordinatedShutdown} +import org.apache.pekko.util.Timeout +import org.slf4j.LoggerFactory + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +import MetricsCollector.Response + +trait MetricsManager[T] { + def sendNewValue(value: T): Unit + def result(): Future[BenchmarkResult] +} + +final case class MetricsManagerImpl[T]( + collector: ActorRef[MetricsCollector.Message], + logInterval: FiniteDuration, + observedMetric: String, +)(implicit + system: ActorSystem[SpawnProtocol.Command] +) extends MetricsManager[T] { + def sendNewValue(value: T): Unit = + collector ! MetricsCollector.Message.NewValue(value) + + def result(): Future[BenchmarkResult] = { + logger.debug(s"Requesting result of stream: $observedMetric") + periodicRequest.cancel().discard + implicit val timeout: Timeout = Timeout(3.seconds) + collector + .ask(MetricsCollector.Message.FinalReportRequest.apply) + .map { (response: MetricsCollector.Response.FinalReport) => + logger.info( + ReportFormatter.formatFinalReport( + streamName = observedMetric, + finalReport = response, + ) + ) + val atLeastOneObjectiveViolated = response.metricsData.exists(_.violatedObjectives.nonEmpty) + if (atLeastOneObjectiveViolated) BenchmarkResult.ObjectivesViolated + else BenchmarkResult.Ok + }(system.executionContext) + } + + CoordinatedShutdown(system).addTask( + phase = CoordinatedShutdown.PhaseBeforeServiceUnbind, + taskName = "report-results", + ) { () => + logger.debug(s"Shutting down infrastructure for stream: $observedMetric") + result().map(_ => org.apache.pekko.Done)(system.executionContext) + } + + private val periodicRequest: Cancellable = + system.scheduler.scheduleWithFixedDelay(logInterval, logInterval) { () => + implicit val timeout: Timeout = Timeout(logInterval) + collector + .ask(MetricsCollector.Message.PeriodicReportRequest.apply) + .collect { + case Response.ReportNotReady => () + case response: Response.PeriodicReport => + logger.info( + ReportFormatter.formatPeriodicReport( + streamName = observedMetric, + periodicReport = response, + ) + ) + }(system.executionContext) + .discard + () + }(system.executionContext) + + private val logger = LoggerFactory.getLogger(getClass) +} + +object MetricsManager { + def create[StreamElem]( + observedMetric: String, + logInterval: FiniteDuration, + metrics: List[Metric[StreamElem]], + exposedMetrics: Option[ExposedMetrics[StreamElem]], + )(implicit + system: ActorSystem[SpawnProtocol.Command], + ec: ExecutionContext, + ): Future[MetricsManager[StreamElem]] = { + implicit val timeout: Timeout = Timeout(3.seconds) + + val collectorActor: Future[ActorRef[MetricsCollector.Message]] = system.ask( + SpawnProtocol.Spawn( + behavior = MetricsCollector( + metrics = metrics, + exposedMetrics = exposedMetrics, + ), + name = s"$observedMetric-collector", + props = Props.empty, + _, + ) + ) + + collectorActor.map(collector => + MetricsManagerImpl[StreamElem]( + collector = collector, + logInterval = logInterval, + observedMetric = observedMetric, + ) + ) + } + + final case class NoOpMetricsManager[T]() extends MetricsManager[T] { + override def sendNewValue(value: T): Unit = { + val _ = value + } + override def result(): Future[BenchmarkResult] = Future.successful(BenchmarkResult.Ok) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSet.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSet.scala new file mode 100644 index 0000000000..48025c1c76 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSet.scala @@ -0,0 +1,177 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse +import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse +import com.daml.ledger.api.v2.update_service.GetUpdatesResponse +import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.* +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric.MaxDurationObjective +import com.google.protobuf.timestamp.Timestamp + +import java.time.{Clock, Duration} +import scala.concurrent.duration.FiniteDuration + +object MetricsSet { + + def transactionMetrics( + configO: Option[TransactionObjectives] + ): List[Metric[GetUpdatesResponse]] = + transactionMetrics[GetUpdatesResponse]( + countingFunction = response => countTransactionsEvents(response).toInt, + sizingFunction = _.serializedSize.toLong, + recordTimeFunction = _.update.transaction + .collect { + case t if t.effectiveAt.isDefined => t.getEffectiveAt + } + .toList, + configO = configO, + ) + + def transactionExposedMetrics( + streamName: String, + metricsFactory: LabeledMetricsFactory, + ): ExposedMetrics[GetUpdatesResponse] = + ExposedMetrics[GetUpdatesResponse]( + streamName = streamName, + factory = metricsFactory, + countingFunction = countTransactionsEvents, + sizingFunction = _.serializedSize.toLong, + recordTimeFunction = Some( + _.update.transaction + .collect { + case t if t.effectiveAt.isDefined => t.getEffectiveAt + } + .toList + ), + ) + + def activeContractsMetrics( + configO: Option[AcsAndCompletionsObjectives] + ): List[Metric[GetActiveContractsResponse]] = + List[Metric[GetActiveContractsResponse]]( + CountRateMetric.empty[GetActiveContractsResponse]( + countingFunction = _.contractEntry.activeContract.knownSize, + periodicObjectives = Nil, + finalObjectives = List( + configO.flatMap(_.minItemRate.map(CountRateMetric.RateObjective.MinRate.apply)), + configO.flatMap(_.maxItemRate.map(CountRateMetric.RateObjective.MaxRate.apply)), + ).flatten, + ), + TotalCountMetric.empty[GetActiveContractsResponse]( + countingFunction = countActiveContracts + ), + SizeMetric.empty[GetActiveContractsResponse]( + sizingFunction = _.serializedSize.toLong + ), + ) ++ optionalMaxDurationMetrics(configO) + + def activeContractsExposedMetrics( + streamName: String, + metricsFactory: LabeledMetricsFactory, + ): ExposedMetrics[GetActiveContractsResponse] = + ExposedMetrics[GetActiveContractsResponse]( + streamName = streamName, + factory = metricsFactory, + countingFunction = response => countActiveContracts(response).toLong, + sizingFunction = _.serializedSize.toLong, + recordTimeFunction = None, + ) + + def completionsMetrics( + configO: Option[AcsAndCompletionsObjectives] + ): List[Metric[CompletionStreamResponse]] = + List[Metric[CompletionStreamResponse]]( + CountRateMetric.empty( + countingFunction = _.completionResponse.completion.toList.length, + periodicObjectives = Nil, + finalObjectives = List( + configO.flatMap(_.minItemRate.map(CountRateMetric.RateObjective.MinRate.apply)), + configO.flatMap(_.maxItemRate.map(CountRateMetric.RateObjective.MaxRate.apply)), + ).flatten, + ), + TotalCountMetric.empty( + countingFunction = countCompletions + ), + SizeMetric.empty( + sizingFunction = _.serializedSize.toLong + ), + ) ++ optionalMaxDurationMetrics(configO) + + def completionsExposedMetrics( + streamName: String, + metricsFactory: LabeledMetricsFactory, + ): ExposedMetrics[CompletionStreamResponse] = + ExposedMetrics[CompletionStreamResponse]( + streamName = streamName, + factory = metricsFactory, + countingFunction = response => countCompletions(response).toLong, + sizingFunction = _.serializedSize.toLong, + recordTimeFunction = None, + ) + + private def transactionMetrics[T]( + countingFunction: T => Int, + sizingFunction: T => Long, + recordTimeFunction: T => Seq[Timestamp], + configO: Option[TransactionObjectives], + ): List[Metric[T]] = + List[Metric[T]]( + CountRateMetric.empty[T]( + countingFunction = countingFunction, + periodicObjectives = Nil, + finalObjectives = List( + configO.flatMap(_.minItemRate.map(CountRateMetric.RateObjective.MinRate.apply)), + configO.flatMap(_.maxItemRate.map(CountRateMetric.RateObjective.MaxRate.apply)), + ).flatten, + ), + TotalCountMetric.empty[T]( + countingFunction = countingFunction + ), + ConsumptionSpeedMetric.empty[T]( + recordTimeFunction = recordTimeFunction, + objective = configO.flatMap( + _.minConsumptionSpeed.map(ConsumptionSpeedMetric.MinConsumptionSpeed.apply) + ), + ), + DelayMetric.empty[T]( + recordTimeFunction = recordTimeFunction, + clock = Clock.systemUTC(), + objective = configO.flatMap(_.maxDelaySeconds.map(DelayMetric.MaxDelay.apply)), + ), + SizeMetric.empty[T]( + sizingFunction = sizingFunction + ), + ) ++ optionalMaxDurationMetrics(configO) + + def countActiveContracts(response: GetActiveContractsResponse): Int = + response.contractEntry.activeContract.knownSize + + def countCompletions(response: CompletionStreamResponse): Int = + response.completionResponse.completion.size + + def countTransactionsEvents(response: GetUpdatesResponse): Long = + response.update.transaction.foldLeft(0L)((acc, tx) => acc + tx.events.size) + + private def optionalMaxDurationMetrics[T]( + configO: Option[CommonObjectivesConfig] + ): List[Metric[T]] = { + for { + config <- configO + maxRuntime <- config.maxTotalStreamRuntimeDuration + } yield createTotalRuntimeMetric[T](maxRuntime) + }.toList + + def createTotalRuntimeMetric[T](maxRuntime: FiniteDuration): Metric[T] = + TotalRuntimeMetric.empty( + clock = Clock.systemUTC(), + startTime = Clock.systemUTC().instant(), + objective = MaxDurationObjective(maxValue = toJavaDuration(maxRuntime)), + ) + + protected[metrics] def toJavaDuration[T](maxStreamDuration: FiniteDuration): Duration = + Duration.ofNanos(maxStreamDuration.toNanos) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ServiceLevelObjective.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ServiceLevelObjective.scala new file mode 100644 index 0000000000..563a786cff --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ServiceLevelObjective.scala @@ -0,0 +1,8 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +trait ServiceLevelObjective[MetricValueType <: MetricValue] { + def isViolatedBy(metricValue: MetricValueType): Boolean +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetric.scala new file mode 100644 index 0000000000..95154beae5 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetric.scala @@ -0,0 +1,50 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import java.time.Duration + +final case class SizeMetric[T]( + sizingBytesFunction: T => Long, + currentSizeBytesBucket: Long = 0, + sizeRateList: List[Double] = List.empty, +) extends Metric[T] { + import SizeMetric.* + + override type V = Value + + override def onNext(value: T): SizeMetric[T] = { + val addedBytesSize = sizingBytesFunction(value) + this.copy(currentSizeBytesBucket = currentSizeBytesBucket + addedBytesSize) + } + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = { + val sizeRate = periodicSizeRate(periodDuration) + val updatedMetric = this.copy( + currentSizeBytesBucket = 0, + sizeRateList = sizeRate :: sizeRateList, + ) // ok to prepend because the list is used only to calculate mean value so the order doesn't matter + (updatedMetric, Value(sizeRate)) + } + + override def finalValue(totalDuration: Duration): Value = { + val value = sizeRateList match { + case Nil => 0.0 + case rates => rates.sum / rates.length + } + Value(value) + } + + override def violatedFinalObjectives(totalDuration: Duration): List[(Objective, Value)] = Nil + + private def periodicSizeRate(periodDuration: Duration): Double = + (currentSizeBytesBucket.toDouble / periodDuration.toMillis) * 1000.0 / (1024 * 1024) +} + +object SizeMetric { + final case class Value(megabytesPerSecond: Double) extends MetricValue + + def empty[T](sizingFunction: T => Long): SizeMetric[T] = + SizeMetric[T](sizingFunction) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/StreamMetrics.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/StreamMetrics.scala new file mode 100644 index 0000000000..6190fee368 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/StreamMetrics.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import org.apache.pekko.actor.typed.{ActorSystem, SpawnProtocol} +import org.slf4j.Logger + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +object StreamMetrics { + + def observer[StreamElem]( + streamName: String, + logInterval: FiniteDuration, + metrics: List[Metric[StreamElem]], + logger: Logger, + exposedMetrics: Option[ExposedMetrics[StreamElem]] = None, + itemCountingFunction: (StreamElem) => Long, + maxItemCount: Option[Long], + )(implicit + system: ActorSystem[SpawnProtocol.Command], + ec: ExecutionContext, + ): Future[MeteredStreamObserver[StreamElem]] = + MetricsManager.create(streamName, logInterval, metrics, exposedMetrics).map { manager => + new MeteredStreamObserver[StreamElem]( + streamName = streamName, + logger = logger, + manager = manager, + itemCountingFunction = itemCountingFunction, + maxItemCount = maxItemCount, + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetric.scala new file mode 100644 index 0000000000..62c4ea45cc --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetric.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import java.time.Duration + +final case class TotalCountMetric[T]( + countingFunction: T => Int, + counter: Int = 0, + lastCount: Int = 0, +) extends Metric[T] { + import TotalCountMetric.* + + override type V = Value + + override def onNext(value: T): TotalCountMetric[T] = + this.copy(counter = counter + countingFunction(value)) + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = + (this.copy(lastCount = counter), Value(counter)) + + override def finalValue(totalDuration: Duration): Value = + Value(totalCount = counter) + + override def violatedFinalObjectives(totalDuration: Duration): List[(Objective, Value)] = Nil +} + +object TotalCountMetric { + final case class Value(totalCount: Int) extends MetricValue + + def empty[T]( + countingFunction: T => Int + ): TotalCountMetric[T] = TotalCountMetric[T](countingFunction) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetric.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetric.scala new file mode 100644 index 0000000000..9e86b443f1 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetric.scala @@ -0,0 +1,75 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric.{ + MaxDurationObjective, + Value, +} +import com.digitalasset.canton.ledger.api.benchtool.metrics.{ + Metric, + MetricValue, + ServiceLevelObjective, +} +import com.digitalasset.canton.ledger.api.benchtool.util.TimeUtil + +import java.time.{Clock, Duration, Instant} + +object TotalRuntimeMetric { + + final case class MaxDurationObjective(maxValue: Duration) extends ServiceLevelObjective[Value] { + override def isViolatedBy(value: Value): Boolean = value.v.compareTo(maxValue) > 0 + } + + def empty[T]( + clock: Clock, + startTime: Instant, + objective: MaxDurationObjective, + ): TotalRuntimeMetric[T] = + TotalRuntimeMetric[T]( + clock = clock, + startTime = startTime, + objective = objective, + ) + + final case class Value(v: Duration) extends MetricValue +} + +/** Measures the total runtime since the set start time to the time of receiving the most recent + * item. + */ +final case class TotalRuntimeMetric[T]( + clock: Clock, + startTime: Instant, + objective: MaxDurationObjective, +) extends Metric[T] { + override type V = Value + override type Objective = MaxDurationObjective + + // NOTE: There's no need to synchronize on this variable + // as this metric used solely as an internal state of an actor at 'com.daml.ledger.api.benchtool.metrics.MetricsCollector.handlingMessages' + private var lastSeenItemTime: Instant = startTime + + override def onNext(item: T): Metric[T] = { + lastSeenItemTime = clock.instant() + this + } + + override def periodicValue(periodDuration: Duration): (Metric[T], Value) = + this -> totalRuntime + + override def finalValue(totalDuration: Duration): Value = + totalRuntime + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(MaxDurationObjective, Value)] = + if (objective.isViolatedBy(totalRuntime)) + List((objective, totalRuntime)) + else + List.empty + + private def totalRuntime: Value = Value(TimeUtil.durationBetween(startTime, lastSeenItemTime)) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandCompletionService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandCompletionService.scala new file mode 100644 index 0000000000..46621fc46f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandCompletionService.scala @@ -0,0 +1,59 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.command_completion_service.{ + CommandCompletionServiceGrpc, + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import io.grpc.Channel +import org.slf4j.LoggerFactory + +import scala.concurrent.Future + +class CommandCompletionService( + channel: Channel, + userId: String, + authorizationToken: Option[String], +) { + private val logger = LoggerFactory.getLogger(getClass) + private val service: CommandCompletionServiceGrpc.CommandCompletionServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + CommandCompletionServiceGrpc.stub(channel) + ) + + def completions[Result]( + config: WorkflowConfig.StreamConfig.CompletionsStreamConfig, + observer: ObserverWithResult[CompletionStreamResponse, Result], + ): Future[Result] = { + val request = completionsRequest(config) + service.completionStream(request, observer) + logger.info(s"Started fetching completions") + observer.result + } + + private def completionsRequest( + config: WorkflowConfig.StreamConfig.CompletionsStreamConfig + ): CompletionStreamRequest = { + if (authorizationToken.isDefined) { + assert( + userId == config.userId, + s"When using user based authorization userId (${config.userId}) must be equal to userId ($userId)", + ) + } + val request = CompletionStreamRequest.defaultInstance + .withParties(config.parties) + .withUserId(config.userId) + + config.beginOffsetExclusive match { + case Some(offset) => request.withBeginExclusive(offset) + case None => request + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandService.scala new file mode 100644 index 0000000000..86eec53f9a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandService.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.command_service.* +import com.daml.ledger.api.v2.commands.Commands +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel +import org.slf4j.LoggerFactory + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +class CommandService(channel: Channel, authorizationToken: Option[String]) { + private val logger = LoggerFactory.getLogger(getClass) + private val service: CommandServiceGrpc.CommandServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)(CommandServiceGrpc.stub(channel)) + + def submitAndWait( + commands: Commands + )(implicit ec: ExecutionContext): Future[SubmitAndWaitResponse] = + service + .submitAndWait(new SubmitAndWaitRequest(Some(commands))) + .recoverWith { case NonFatal(ex) => + Future.failed { + logger.error(s"Command submission error. Details: ${ex.getLocalizedMessage}", ex) + ex + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandSubmissionService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandSubmissionService.scala new file mode 100644 index 0000000000..3b90f56d38 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/CommandSubmissionService.scala @@ -0,0 +1,33 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.command_submission_service.* +import com.daml.ledger.api.v2.commands.Commands +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel +import org.slf4j.LoggerFactory + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +class CommandSubmissionService(channel: Channel, authorizationToken: Option[String]) { + private val logger = LoggerFactory.getLogger(getClass) + + private val service: CommandSubmissionServiceGrpc.CommandSubmissionServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + CommandSubmissionServiceGrpc.stub(channel) + ) + + def submit(commands: Commands)(implicit ec: ExecutionContext): Future[SubmitResponse] = + service + .submit(new SubmitRequest(Some(commands))) + .recoverWith { case NonFatal(ex) => + Future.failed { + logger.error(s"Command submission error. Details: ${ex.getLocalizedMessage}", ex) + ex + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/LedgerApiServices.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/LedgerApiServices.scala new file mode 100644 index 0000000000..1a6900a6fa --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/LedgerApiServices.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel + +import scala.concurrent.{ExecutionContext, Future} + +class LedgerApiServices( + channel: Channel, + userId: String, + authorizationHelper: Option[AuthorizationHelper], +) { + + private val authorizationToken: Option[String] = authorizationHelper.map(_.tokenFor(userId)) + val commandService = new CommandService(channel, authorizationToken = authorizationToken) + val commandSubmissionService = + new CommandSubmissionService(channel, authorizationToken = authorizationToken) + val commandCompletionService = + new CommandCompletionService( + channel, + userId = userId, + authorizationToken = authorizationToken, + ) + val packageManagementService = + new PackageManagementService(channel, authorizationToken = authorizationToken) + val pruningService = new PruningService(channel, authorizationToken = authorizationToken) + val packageService = new PackageService(channel, authorizationToken = authorizationToken) + val partyManagementService = + new PartyManagementService(channel, authorizationToken = authorizationToken) + + val stateService = new StateService(channel, authorizationToken = authorizationToken) + val updateService = new UpdateService(channel, authorizationToken = authorizationToken) + val userManagementService = new UserManagementService(channel, authorizationToken) +} + +object LedgerApiServices { + + /** @return + * factory function for creating optionally authorized services for a given userId + */ + def forChannel( + authorizationHelper: Option[AuthorizationHelper], + channel: Channel, + )(implicit ec: ExecutionContext): Future[String => LedgerApiServices] = Future { + (userId: String) => + new LedgerApiServices( + channel, + userId = userId, + authorizationHelper = authorizationHelper, + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageManagementService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageManagementService.scala new file mode 100644 index 0000000000..eaa89b17a3 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageManagementService.scala @@ -0,0 +1,37 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.admin.package_management_service.{ + PackageManagementServiceGrpc, + UploadDarFileRequest, +} +import com.digitalasset.canton.ledger.api.UploadDarVettingChange +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import com.google.protobuf.ByteString +import io.grpc.Channel + +import scala.concurrent.{ExecutionContext, Future} + +class PackageManagementService(channel: Channel, authorizationToken: Option[String]) { + private val service = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + PackageManagementServiceGrpc.stub(channel) + ) + + def uploadDar(bytes: ByteString, submissionId: String)(implicit + ec: ExecutionContext + ): Future[Unit] = + service + .uploadDarFile( + new UploadDarFileRequest( + bytes, + submissionId, + UploadDarVettingChange.default.toProto, + synchronizerId = "", + ) + ) + .map(_ => ()) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageService.scala new file mode 100644 index 0000000000..422eafec6d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PackageService.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.package_service.{GetPackageResponse, ListPackagesResponse, *} +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel + +import scala.concurrent.Future + +class PackageService(channel: Channel, authorizationToken: Option[String]) { + private val service = + AuthorizationHelper.maybeAuthedService(authorizationToken)(PackageServiceGrpc.stub(channel)) + + def getPackage(packageId: String): Future[GetPackageResponse] = + service.getPackage(GetPackageRequest(packageId = packageId)) + + def listPackages(): Future[ListPackagesResponse] = + service.listPackages(ListPackagesRequest()) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PartyManagementService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PartyManagementService.scala new file mode 100644 index 0000000000..b947c5ccf9 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PartyManagementService.scala @@ -0,0 +1,66 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocatePartyRequest, + ListKnownPartiesRequest, + PartyManagementServiceGrpc, +} +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} + +class PartyManagementService(channel: Channel, authorizationToken: Option[String]) { + private val logger: Logger = LoggerFactory.getLogger(getClass) + private val service: PartyManagementServiceGrpc.PartyManagementServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + PartyManagementServiceGrpc.stub(channel) + ) + + def listKnownParties()(implicit ec: ExecutionContext): Future[Set[String]] = + service + .listKnownParties( + new ListKnownPartiesRequest( + pageToken = "", + pageSize = 0, + identityProviderId = "", + ) + ) + .map(_.partyDetails.map(_.party).toSet) + + def allocateParty(hint: String, synchronizerId: Option[String] = None)(implicit + ec: ExecutionContext + ): Future[Party] = + service + .allocateParty( + AllocatePartyRequest( + partyIdHint = hint, + localMetadata = None, + identityProviderId = "", + synchronizerId = synchronizerId.getOrElse(""), + userId = "", + ) + ) + .transformWith { + case Success(response) => + Future.successful { + val party = new Party(response.partyDetails.get.party) + logger.info(s"Allocated party: $party") + party + } + case Failure(exception) => + Future.failed { + logger.error( + s"Error during party allocation. Details: ${exception.getLocalizedMessage}", + exception, + ) + exception + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PruningService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PruningService.scala new file mode 100644 index 0000000000..704a3d3fdc --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/PruningService.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.admin.participant_pruning_service.{ + ParticipantPruningServiceGrpc, + PruneRequest, + PruneResponse, +} +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import io.grpc.Channel + +import scala.concurrent.Future + +class PruningService(channel: Channel, authorizationToken: Option[String]) { + private val service: ParticipantPruningServiceGrpc.ParticipantPruningServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + ParticipantPruningServiceGrpc.stub(channel) + ) + + def prune(request: PruneRequest): Future[PruneResponse] = + service.prune(request) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StateService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StateService.scala new file mode 100644 index 0000000000..21c5d1431e --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StateService.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.state_service.{ + GetActiveContractsRequest, + GetActiveContractsResponse, + GetLedgerEndRequest, + StateServiceGrpc, +} +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import io.grpc.Channel +import org.slf4j.LoggerFactory + +import scala.concurrent.{ExecutionContext, Future} + +final class StateService( + channel: Channel, + authorizationToken: Option[String], +) { + private val logger = LoggerFactory.getLogger(getClass) + private val service: StateServiceGrpc.StateServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)(StateServiceGrpc.stub(channel)) + + def getActiveContracts[Result]( + config: WorkflowConfig.StreamConfig.ActiveContractsStreamConfig, + observer: ObserverWithResult[GetActiveContractsResponse, Result], + )(implicit ec: ExecutionContext): Future[Result] = + getLedgerEnd().flatMap { offset => + getActiveContractsRequest(config, offset) match { + case Right(request) => + service.getActiveContracts(request, observer) + logger.info("Started fetching active contracts") + observer.result + case Left(error) => + Future.failed(new RuntimeException(error)) + } + } + + private def getActiveContractsRequest( + config: WorkflowConfig.StreamConfig.ActiveContractsStreamConfig, + activeAt: Long, + ): Either[String, GetActiveContractsRequest] = + StreamFilters.eventFormat(config.filters).map { eventFormat => + GetActiveContractsRequest.defaultInstance + .withEventFormat(eventFormat) + .withActiveAtOffset(activeAt) + } + + def getLedgerEnd()(implicit ec: ExecutionContext): Future[Long] = for { + response <- service.getLedgerEnd(new GetLedgerEndRequest()) + } yield response.offset +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StreamFilters.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StreamFilters.scala new file mode 100644 index 0000000000..08112cc61d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/StreamFilters.scala @@ -0,0 +1,98 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + InterfaceFilter, + TemplateFilter, + WildcardFilter, +} +import com.daml.ledger.api.v2.value.Identifier +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig + +object StreamFilters { + + def eventFormat( + filters: List[WorkflowConfig.StreamConfig.PartyFilter] + ): Either[String, EventFormat] = + toEitherList(filters.map(toTransactionFilter)) + .map { byPartyFilters => + EventFormat.defaultInstance.withFiltersByParty(byPartyFilters.toMap) + } + + private def toTransactionFilter( + filter: WorkflowConfig.StreamConfig.PartyFilter + ): Either[String, (String, Filters)] = + ((filter.templates, filter.interfaces) match { + case (Nil, Nil) => + Right(Filters.defaultInstance) + case (templateIds, interfaceIds) => + for { + tplIds <- templateIdentifiers(templateIds) + ifaceIds <- templateIdentifiers(interfaceIds) + } yield { + val interfaceFilters = + ifaceIds.map(interfaceId => + InterfaceFilter( + interfaceId = Some(interfaceId), + includeInterfaceView = true, + includeCreatedEventBlob = false, + ) + ) + val templateFilters = + tplIds.map(templateId => + TemplateFilter( + templateId = Some(templateId), + includeCreatedEventBlob = false, + ) + ) + val templateWildcardFilterO = + Option.when(tplIds.isEmpty && ifaceIds.isEmpty)( + WildcardFilter( + includeCreatedEventBlob = false + ) + ) + + Filters.defaultInstance.withCumulative( + interfaceFilters.map(CumulativeFilter.defaultInstance.withInterfaceFilter) ++ + templateFilters.map(CumulativeFilter.defaultInstance.withTemplateFilter) ++ + (templateWildcardFilterO match { + case Some(templateWildcardFilter) => + Seq(CumulativeFilter.defaultInstance.withWildcardFilter(templateWildcardFilter)) + case None => Seq.empty + }) + ) + } + }).map(templateFilters => filter.party -> templateFilters) + + private def templateIdentifiers(templates: List[String]): Either[String, List[Identifier]] = + toEitherList(templates.map(templateIdFromString)) + + private def templateIdFromString(fullyQualifiedTemplateId: String): Either[String, Identifier] = + fullyQualifiedTemplateId + .split(':') + .toList match { + case packageId :: moduleName :: entityName :: Nil => + Right( + Identifier.defaultInstance + .withEntityName(entityName) + .withModuleName(moduleName) + .withPackageId(packageId) + ) + case _ => + Left(s"Invalid template id: $fullyQualifiedTemplateId") + } + + private def toEitherList[L, R](l: List[Either[L, R]]): Either[L, List[R]] = + l.foldLeft[Either[L, List[R]]](Right(List.empty[R])) { case (acc, next) => + for { + elems <- acc + elem <- next + } yield elem :: elems + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UpdateService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UpdateService.scala new file mode 100644 index 0000000000..348d9798a5 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UpdateService.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} +import com.daml.ledger.api.v2.transaction_filter.{TransactionFormat, TransactionShape, UpdateFormat} +import com.daml.ledger.api.v2.update_service.{ + GetUpdatesRequest, + GetUpdatesResponse, + UpdateServiceGrpc, +} +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import io.grpc.Channel +import io.grpc.stub.StreamObserver +import org.slf4j.LoggerFactory + +import scala.concurrent.Future +import scala.util.{Failure, Success, Try} + +final class UpdateService( + channel: Channel, + authorizationToken: Option[String], +) { + private val logger = LoggerFactory.getLogger(getClass) + private val service: UpdateServiceGrpc.UpdateServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)(UpdateServiceGrpc.stub(channel)) + + def transactions[Result]( + config: WorkflowConfig.StreamConfig.TransactionsStreamConfig, + observer: ObserverWithResult[GetUpdatesResponse, Result], + ): Future[Result] = + transactionsWithoutResult(config, observer) match { + case Failure(exception) => Future.failed(exception) + case Success(()) => observer.result + } + + def transactionsWithoutResult( + config: WorkflowConfig.StreamConfig.TransactionsStreamConfig, + observer: StreamObserver[GetUpdatesResponse], + ): Try[Unit] = getUpdatesRequest( + filters = config.filters, + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + beginOffsetExclusive = config.beginOffsetExclusive, + endOffsetInclusive = config.endOffsetInclusive, + ) match { + case Right(request) => + service.getUpdates(request, observer) + logger.info("Started fetching transactions") + Success(()) + case Left(error) => + Failure(new RuntimeException(error)) + } + + def transactionsLedgerEffects[Result]( + config: WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig, + observer: ObserverWithResult[ + GetUpdatesResponse, + Result, + ], + ): Future[Result] = + getUpdatesRequest( + filters = config.filters, + beginOffsetExclusive = config.beginOffsetExclusive, + endOffsetInclusive = config.endOffsetInclusive, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) match { + case Right(request) => + service.getUpdates(request, observer) + logger.info("Started fetching ledger effects transactions") + observer.result + case Left(error) => + Future.failed(new RuntimeException(error)) + } + + private def getUpdatesRequest( + filters: List[WorkflowConfig.StreamConfig.PartyFilter], + transactionShape: TransactionShape, + beginOffsetExclusive: Long, + endOffsetInclusive: Option[Long], + ): Either[String, GetUpdatesRequest] = + StreamFilters + .eventFormat(filters) + .map { eventFormat => + GetUpdatesRequest.defaultInstance + .withBeginExclusive(beginOffsetExclusive) + .withUpdateFormat( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some(eventFormat), + transactionShape = transactionShape, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + ) + .update( + _.optionalEndInclusive := endOffsetInclusive + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UserManagementService.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UserManagementService.scala new file mode 100644 index 0000000000..24ddb2e98f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/services/UserManagementService.scala @@ -0,0 +1,93 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.services + +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + GrantUserRightsRequest, + Right as UserRight, + User, + UserManagementServiceGrpc, +} +import com.digitalasset.base.error.utils.ErrorDetails +import com.digitalasset.canton.ledger.api.benchtool.AuthorizationHelper +import com.digitalasset.canton.ledger.error.groups.UserManagementServiceErrors +import io.grpc.{Channel, StatusRuntimeException} +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.{ExecutionContext, Future} + +class UserManagementService(channel: Channel, authorizationToken: Option[String]) { + private val logger: Logger = LoggerFactory.getLogger(getClass) + private val service: UserManagementServiceGrpc.UserManagementServiceStub = + AuthorizationHelper.maybeAuthedService(authorizationToken)( + UserManagementServiceGrpc.stub(channel) + ) + + def createUserOrGrantRightsToExisting( + userId: String, + observerPartyNames: Seq[String], + signatoryPartyName: String, + )(implicit ec: ExecutionContext): Future[Unit] = { + val rights = userRights(observerPartyNames, signatoryPartyName) + createUser(userId, rights).recoverWith { + case e: StatusRuntimeException + if ErrorDetails.matches(e, UserManagementServiceErrors.UserAlreadyExists) => + logger.info( + s"Benchmark user already exists (received error: ${e.getStatus.getDescription}) so granting rights the existing user." + ) + grantUserRights(userId, rights) + } + } + + private def createUser( + userId: String, + rights: Seq[UserRight], + )(implicit ec: ExecutionContext): Future[Unit] = { + logger.info(s"Creating a user: '$userId' with rights: ${rights.mkString(", ")}") + service + .createUser( + CreateUserRequest( + user = Some( + User( + id = userId, + primaryParty = "", + isDeactivated = false, + metadata = None, + identityProviderId = "", + ) + ), + rights = rights, + ) + ) + .map(_ => ()) + } + + private def grantUserRights( + userId: String, + rights: Seq[UserRight], + )(implicit ec: ExecutionContext): Future[Unit] = { + logger.info(s"Granting rights: ${rights.mkString(", ")} to the user: $userId") + service + .grantUserRights( + GrantUserRightsRequest( + userId = userId, + rights = rights, + identityProviderId = "", + ) + ) + .map(_ => ()) + } + + private def userRights( + observerPartyNames: Seq[String], + signatoryPartyName: String, + ): Seq[UserRight] = { + val actAs = UserRight(UserRight.Kind.CanActAs(UserRight.CanActAs(signatoryPartyName))) + val readAs = observerPartyNames.map(observerPartyName => + UserRight(UserRight.Kind.CanReadAs(UserRight.CanReadAs(observerPartyName))) + ) + actAs +: readAs + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPool.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPool.scala new file mode 100644 index 0000000000..5d690d5d7f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPool.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.discard.Implicits.DiscardOps + +import scala.collection.mutable + +/** Keeps track of contract keys of contracts that haven't been used up (archived) yet. Allows to + * select the next contract key to use up at random. + */ +final class ActiveContractKeysPool[T](randomnessProvider: RandomnessProvider) { + + private val poolsPerTemplate = mutable.Map.empty[String, DepletingUniformRandomPool[T]] + + def getAndRemoveContractKey(templateName: String): T = synchronized { + val pool = poolsPerTemplate(templateName) + pool.pop() + } + + def addContractKey(templateName: String, value: T): Unit = synchronized { + if (!poolsPerTemplate.contains(templateName)) { + poolsPerTemplate.put(templateName, new DepletingUniformRandomPool(randomnessProvider)).discard + } + val pool = poolsPerTemplate(templateName) + pool.put(value) + } +} + +/** A pool of elements supporting two operations: + * 1. pop() - select an element uniformly at random and remove it from the pool. + * 1. put() - add an element to the pool + */ +final class DepletingUniformRandomPool[V](randomnessProvider: RandomnessProvider) { + private val buffer = mutable.ArrayBuffer.empty[V] + + def pop(): V = { + val v = buffer.last + buffer.remove(index = buffer.size - 1, count = 1) + v + } + + def put(v: V): Unit = { + val i = randomnessProvider.randomNatural(buffer.size + 1) + buffer.insert(index = i, elem = v) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedParties.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedParties.scala new file mode 100644 index 0000000000..d891cb30cb --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedParties.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party + +final case class AllocatedParties( + signatoryO: Option[Party], + observers: List[Party], + divulgees: List[Party], + extraSubmitters: List[Party], + observerPartySets: List[AllocatedPartySet], +) { + val allAllocatedParties: List[Party] = + signatoryO.toList ++ observers ++ divulgees ++ extraSubmitters ++ observerPartySets.flatMap( + _.parties + ) + + /** NOTE: This is guaranteed to be safe only for runs with synthetic data generated by Benchtool + */ + def signatory: Party = signatoryO.getOrElse(sys.error("Signatory party not found!")) +} + +object AllocatedParties { + + /** @param partyPrefixesForPartySets + * get converted to main party prefixes and then used to find party sets + */ + def forExistingParties( + parties: List[String], + partyPrefixesForPartySets: List[String], + ): AllocatedParties = { + val partiesByMainPrefixMap: Map[String, List[Party]] = parties + .groupBy(Names.parsePartyNameMainPrefix) + .view + .mapValues(_.map(new Party(_))) + .toMap + val observerPartySets = for { + partySetPrefix <- partyPrefixesForPartySets.map(Names.parsePartyNameMainPrefix) + parties <- partiesByMainPrefixMap.get(partySetPrefix) + } yield AllocatedPartySet( + mainPartyNamePrefix = partySetPrefix, + parties = parties, + ) + val signatories = partiesByMainPrefixMap.getOrElse(Names.SignatoryPrefix, List.empty) + AllocatedParties( + // NOTE: For synthetic streams signatory is always present + signatoryO = signatories.headOption, + observers = partiesByMainPrefixMap.getOrElse(Names.ObserverPrefix, List.empty), + divulgees = partiesByMainPrefixMap.getOrElse(Names.DivulgeePrefix, List.empty), + extraSubmitters = partiesByMainPrefixMap.getOrElse(Names.ExtraSubmitterPrefix, List.empty), + observerPartySets = observerPartySets, + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartySet.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartySet.scala new file mode 100644 index 0000000000..a225edd550 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartySet.scala @@ -0,0 +1,19 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party + +final case class AllocatedPartySet( + mainPartyNamePrefix: String, + parties: List[Party], +) { + { + val offenders = parties.iterator.filterNot(_.getValue.startsWith(mainPartyNamePrefix)).toList + require( + offenders.isEmpty, + s"All party names in party-set '$mainPartyNamePrefix' must start with prefix $mainPartyNamePrefix. Found offenders: $offenders", + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/BenchtoolTestsPackageInfo.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/BenchtoolTestsPackageInfo.scala new file mode 100644 index 0000000000..e2ede9ea06 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/BenchtoolTestsPackageInfo.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.daml.lf.data.Ref + +final case class BenchtoolTestsPackageInfo private (packageRef: Ref.PackageRef) + +object BenchtoolTestsPackageInfo { + + val StaticDefault: BenchtoolTestsPackageInfo = + BenchtoolTestsPackageInfo(packageRef = TestDars.benchtoolDarPackageRef) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandGenerator.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandGenerator.scala new file mode 100644 index 0000000000..dcec75980e --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandGenerator.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.commands.Command +import com.daml.ledger.javaapi.data.Party +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Try + +trait CommandGenerator { + val batchesPerInstance: Int + def commandBatchSource( + numContractsToGenerate: Int, + contractCommandGenerationParallelism: Int, + )(implicit ec: ExecutionContext): Source[(Int, Seq[Command]), NotUsed] + def nextUserId(): String + def nextExtraCommandSubmitters(): List[Party] +} + +trait SimpleCommandGenerator extends CommandGenerator { + def next(): Try[Seq[Command]] + + override val batchesPerInstance: Int = 1 + override def commandBatchSource(numContractsToGenerate: Int, commandGenerationParallelism: Int)( + implicit ec: ExecutionContext + ): Source[(Int, Seq[Command]), NotUsed] = + Source + .fromIterator(() => (1 to numContractsToGenerate).iterator) + .mapAsync(commandGenerationParallelism)(index => + Future.fromTry( + next().map(cmd => index -> cmd) + ) + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandSubmitter.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandSubmitter.scala new file mode 100644 index 0000000000..fd66c18c3f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CommandSubmitter.scala @@ -0,0 +1,293 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod.Empty +import com.daml.ledger.api.v2.commands.{Command, Commands} +import com.daml.ledger.javaapi.data.Party +import com.daml.ledger.resources.{ResourceContext, ResourceOwner} +import com.daml.metrics.api.MetricHandle.{LabeledMetricsFactory, Timer} +import com.daml.metrics.api.{MetricInfo, MetricName, MetricQualification} +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.SubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.canton.ledger.api.benchtool.metrics.LatencyMetric.LatencyNanos +import com.digitalasset.canton.ledger.api.benchtool.metrics.MetricsManager +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import io.grpc.Status +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.stream.scaladsl.Sink +import org.apache.pekko.stream.{Materializer, OverflowStrategy} +import org.slf4j.LoggerFactory + +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.util.chaining.* +import scala.util.control.NonFatal + +final case class CommandSubmitter( + names: Names, + benchtoolUserServices: LedgerApiServices, + adminServices: LedgerApiServices, + partyAllocating: PartyAllocating, + metricsFactory: LabeledMetricsFactory, + metricsManager: MetricsManager[LatencyNanos], + waitForSubmission: Boolean, + commandGenerationParallelism: Int = 8, + maxInFlightCommandsOverride: Option[Int] = None, +) { + private val logger = LoggerFactory.getLogger(getClass) + private val submitLatencyTimer = if (waitForSubmission) { + metricsFactory.timer( + MetricInfo( + MetricName("daml_submit_and_wait_latency"), + "Submit and wait latency", + MetricQualification.Debug, + ) + ) + } else { + metricsFactory.timer( + MetricInfo(MetricName("daml_submit_latency"), "Submit latency", MetricQualification.Debug) + ) + } + + def prepare(config: SubmissionConfig)(implicit + ec: ExecutionContext + ): Future[AllocatedParties] = { + logger.info(s"Identifier suffix: ${names.identifierSuffix}") + (for { + allocatedParties <- partyAllocating.allocateParties(config) + _ <- uploadTestDars() + } yield allocatedParties) + .recoverWith { case NonFatal(ex) => + logger.error( + s"Command submission preparation failed. Details: ${ex.getLocalizedMessage}", + ex, + ) + Future.failed(CommandSubmitter.CommandSubmitterError(ex.getLocalizedMessage, ex)) + } + } + + def submitSingleBatch( + commandId: String, + actAs: Seq[Party], + commands: Seq[Command], + )(implicit + ec: ExecutionContext + ): Future[Unit] = + submit( + id = commandId, + actAs = actAs, + commands = commands, + userId = names.benchtoolUserId, + useSubmitAndWait = true, + ) + + def generateAndSubmit( + generator: CommandGenerator, + config: SubmissionConfig, + baseActAs: List[Party], + maxInFlightCommands: Int, + submissionBatchSize: Int, + )(implicit ec: ExecutionContext): Future[Unit] = { + logger.info("Generating contracts...") + (for { + _ <- submitCommands( + generator = generator, + config = config, + maxInFlightCommands = maxInFlightCommands, + submissionBatchSize = submissionBatchSize, + baseActAs = baseActAs, + ) + } yield { + logger.info("Commands submitted successfully.") + () + }) + .recoverWith { case NonFatal(ex) => + logger.error(s"Command submission failed. Details: ${ex.getLocalizedMessage}", ex) + Future.failed(CommandSubmitter.CommandSubmitterError(ex.getLocalizedMessage, ex)) + } + } + + private def uploadDar(dar: TestDars.DarFile, submissionId: String)(implicit + ec: ExecutionContext + ): Future[Unit] = + adminServices.packageManagementService.uploadDar( + bytes = dar.bytes, + submissionId = submissionId, + ) + + private def uploadTestDars()(implicit ec: ExecutionContext): Future[Unit] = { + logger.info("Uploading dars...") + for { + dars <- Future.delegate(Future.fromTry(TestDars.readAll())) + _ <- Future.sequence { + dars.zipWithIndex + .map { case (dar, index) => + uploadDar(dar, names.mainPackageId(index)) + } + } + } yield { + logger.info("Uplading dars completed") + } + } + + private def submit( + id: String, + actAs: Seq[Party], + commands: Seq[Command], + userId: String, + useSubmitAndWait: Boolean, + )(implicit + ec: ExecutionContext + ): Future[Unit] = { + def makeCommands(commands: Seq[Command]) = + new Commands( + userId = userId, + commandId = id, + actAs = actAs.map(_.getValue), + commands = commands, + workflowId = names.workflowId, + minLedgerTimeAbs = None, + minLedgerTimeRel = None, + readAs = Nil, + submissionId = "", + disclosedContracts = Nil, + synchronizerId = "", + packageIdSelectionPreference = Nil, + prefetchContractKeys = Nil, + deduplicationPeriod = Empty, + ) + + (if (useSubmitAndWait) { + benchtoolUserServices.commandService.submitAndWait(makeCommands(commands)) + } else { + benchtoolUserServices.commandSubmissionService.submit(makeCommands(commands)) + }).map(_ => ()) + } + + private def submitCommands( + generator: CommandGenerator, + config: SubmissionConfig, + baseActAs: List[Party], + maxInFlightCommands: Int, + submissionBatchSize: Int, + )(implicit + ec: ExecutionContext + ): Future[Unit] = { + implicit val resourceContext: ResourceContext = ResourceContext(ec) + + val numBatches: Int = config.numberOfInstances / submissionBatchSize + val progressMeter = CommandSubmitter.ProgressMeter(config.numberOfInstances) + // Output a log line roughly once per 10% progress, or once every 10000 submissions (whichever comes first) + val progressLogInterval = math.min(config.numberOfInstances / 10 + 1, 10000) + val progressLoggingSink = { + var lastInterval = 0 + Sink.foreach[Int](index => + if (index / progressLogInterval != lastInterval) { + lastInterval = index / progressLogInterval + logger.info(progressMeter.getProgress(index)) + } + ) + + } + logger.info( + s"Submitting commands ($numBatches commands, $submissionBatchSize contracts per command)..." + ) + materializerOwner() + .use { implicit materializer => + for { + _ <- generator + .commandBatchSource(config.numberOfInstances, commandGenerationParallelism) + .groupedWithin(submissionBatchSize, 1.minute) + .map(cmds => cmds.head._1 -> cmds.map(_._2).toList) + .buffer(maxInFlightCommands, OverflowStrategy.backpressure) + .mapAsync(maxInFlightCommandsOverride.getOrElse(maxInFlightCommands)) { + case (index, commands) => + timed(submitLatencyTimer, metricsManager) { + submit( + id = names.commandId(index), + actAs = baseActAs ++ generator.nextExtraCommandSubmitters(), + commands = commands.flatten, + userId = generator.nextUserId(), + useSubmitAndWait = config.waitForSubmission, + ) + } + .map(_ => index + commands.length - 1) + .recoverWith { + case e: io.grpc.StatusRuntimeException + if e.getStatus.getCode == Status.Code.ABORTED => + logger.info(s"Flow rate limited at index $index: ${e.getLocalizedMessage}") + Thread.sleep(10) // Small back-off period + Future.successful(index + commands.length - 1) + case ex => + logger.error( + s"Command submission failed. Details: ${ex.getLocalizedMessage}", + ex, + ) + Future.failed( + CommandSubmitter.CommandSubmitterError(ex.getLocalizedMessage, ex) + ) + } + } + .runWith(progressLoggingSink) + } yield () + } + } + + private def materializerOwner(): ResourceOwner[Materializer] = + for { + actorSystem <- ResourceOwner.forActorSystem(() => ActorSystem("CommandSubmissionSystem")) + materializer <- ResourceOwner.forMaterializer(() => Materializer(actorSystem)) + } yield materializer + + private def timed[O](timer: Timer, metricsManager: MetricsManager[LatencyNanos])( + f: => Future[O] + )(implicit ec: ExecutionContext) = { + val ctx = timer.startAsync() + val startNanos = System.nanoTime() + f.map(_.tap { _ => + ctx.stop() + val endNanos = System.nanoTime() + metricsManager.sendNewValue(endNanos - startNanos) + }) + } +} + +object CommandSubmitter { + final case class CommandSubmitterError(msg: String, cause: Throwable) + extends RuntimeException(msg, cause) + + final case class SubmissionSummary(observers: List[Party]) + + class ProgressMeter(totalItems: Int) { + var startTimeMillis: Long = System.currentTimeMillis() + + def start(): Unit = + startTimeMillis = System.currentTimeMillis() + + def getProgress(index: Int): String = + f"Progress: $index/$totalItems (${percentage(index)}%1.1f%%). Elapsed time: $elapsedSeconds%1.1f s. Remaining time: ${remainingSeconds(index)}%1.1f s" + + private def percentage(index: Int): Double = (index.toDouble / totalItems) * 100 + + private def elapsedSeconds: Double = + (System.currentTimeMillis() - startTimeMillis).toDouble / 1000 + + private def remainingSeconds(index: Int): Double = { + val remainingItems = totalItems - index + if (remainingItems > 0) { + val timePerItem: Double = elapsedSeconds / index + remainingItems * timePerItem + } else { + 0.0 + } + } + } + + object ProgressMeter { + def apply(totalItems: Int) = new ProgressMeter( + totalItems = totalItems + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Distribution.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Distribution.scala new file mode 100644 index 0000000000..ccefa60f39 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Distribution.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +/** Allows to pseudo-randomly pick an index out of a set of indices according to their weights. */ +class Distribution[T](weights: List[Int], items: IndexedSeq[T]) { + assert(weights.nonEmpty, "Weights list must not be empty.") + assert(weights.size == items.size, "The number of weights and items must be the same.") + assert(!weights.exists(_ < 1), "Weights must be strictly positive.") + + private val totalWeight: Long = weights.map(_.toLong).sum + private val distribution: List[Double] = + weights.scanLeft(0)((sum, weight) => sum + weight).map(_.toDouble / totalWeight).tail + + def choose(randomDouble: Double): T = items(index(randomDouble)) + + private[submission] def index(randomDouble: Double): Int = { + assert(randomDouble < 1.0, "Given random double must be < 1.0.") + // Consider changing implementation to use binary search when using on large lists. + distribution.indexWhere(_ > randomDouble) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FibonacciCommandGenerator.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FibonacciCommandGenerator.scala new file mode 100644 index 0000000000..8fb8c80f15 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FibonacciCommandGenerator.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.commands.{Command, CreateAndExerciseCommand} +import com.daml.ledger.api.v2.value.{Record, RecordField, Value} +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FibonacciSubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.daml.lf.data.Ref + +import scala.util.{Success, Try} + +final class FibonacciCommandGenerator( + config: FibonacciSubmissionConfig, + signatory: Party, + names: Names, +) extends SimpleCommandGenerator { + + private val packageRef: Ref.PackageRef = TestDars.benchtoolDarPackageRef + + override def nextUserId(): String = names.benchtoolUserId + + override def nextExtraCommandSubmitters(): List[Party] = List.empty + + def next(): Try[Seq[Command]] = { + val createArguments: Option[Record] = Some( + Record( + None, + Seq( + RecordField( + label = "owner", + value = Some(Value(Value.Sum.Party(signatory.getValue))), + ) + ), + ) + ) + Success( + Seq( + Command( + Command.Command.CreateAndExercise( + CreateAndExerciseCommand( + templateId = + Some(FooTemplateDescriptor.inefficientFibonacciTemplateId(packageRef.toString)), + createArguments = createArguments, + choice = "InefficientFibonacci_Compute", + choiceArgument = Some( + Value( + Value.Sum.Record( + Record( + None, + Seq( + RecordField( + label = "value", + value = Some(Value(Value.Sum.Int64(config.value.toLong))), + ) + ), + ) + ) + ) + ), + ) + ) + ) + ) + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGenerator.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGenerator.scala new file mode 100644 index 0000000000..7aac2612da --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGenerator.scala @@ -0,0 +1,374 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.commands.{Command, CreateCommand, ExerciseByKeyCommand} +import com.daml.ledger.api.v2.value.{Identifier, Record, RecordField, Value} +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.canton.ledger.api.benchtool.submission.foo.RandomPartySelecting +import com.digitalasset.daml.lf.data.Ref + +import java.util.concurrent.atomic.AtomicLong +import scala.util.control.NonFatal +import scala.util.{Failure, Try} + +/** @param divulgeesToDivulgerKeyMap + * map whose keys are sorted divulgees lists + */ +final class FooCommandGenerator( + config: FooSubmissionConfig, + allocatedParties: AllocatedParties, + divulgeesToDivulgerKeyMap: Map[Set[Party], Value], + names: Names, + partySelecting: RandomPartySelecting, + randomnessProvider: RandomnessProvider, +) extends SimpleCommandGenerator { + + private val packageRef: Ref.PackageRef = TestDars.benchtoolDarPackageRef + + private val activeContractKeysPool = new ActiveContractKeysPool[Value](randomnessProvider) + + private val contractDescriptions = new Distribution[FooSubmissionConfig.ContractDescription]( + weights = config.instanceDistribution.map(_.weight), + items = config.instanceDistribution.toIndexedSeq, + ) + + private val userIdsDistributionO: Option[Distribution[FooSubmissionConfig.UserId]] = + Option.when(config.userIds.nonEmpty)( + new Distribution( + weights = config.userIds.map(_.weight), + items = config.userIds.toIndexedSeq, + ) + ) + + override def next(): Try[Seq[Command]] = + (for { + (contractDescription, partySelection) <- Try( + ( + pickContractDescription(), + partySelecting.nextPartiesForContracts(), + ) + ) + divulgees = partySelection.divulgees.toSet + createContractPayload <- Try(randomPayload(contractDescription.payloadSizeBytes)) + command = createCommands( + templateDescriptor = FooTemplateDescriptor + .forName(templateName = contractDescription.template, packageId = packageRef.toString), + signatory = allocatedParties.signatory, + observers = partySelection.observers, + divulgerContractKeyO = + if (divulgees.isEmpty) None else divulgeesToDivulgerKeyMap.get(divulgees), + payload = createContractPayload, + ) + } yield command).recoverWith { case NonFatal(ex) => + Failure( + FooCommandGenerator.CommandGeneratorError( + msg = s"Command generation failed. Details: ${ex.getLocalizedMessage}", + cause = ex, + ) + ) + } + + override def nextUserId(): String = + userIdsDistributionO.fold( + names.benchtoolUserId + )(userIdsDistribution => + userIdsDistribution + .choose(randomnessProvider.randomDouble()) + .userId + ) + + override def nextExtraCommandSubmitters(): List[Party] = + partySelecting.nextExtraSubmitter() + + private def pickContractDescription(): FooSubmissionConfig.ContractDescription = + contractDescriptions.choose(randomnessProvider.randomDouble()) + + private def createCommands( + templateDescriptor: FooTemplateDescriptor, + signatory: Party, + observers: List[Party], + divulgerContractKeyO: Option[Value], + payload: String, + ): Seq[Command] = { + val contractCounter = FooCommandGenerator.nextContractNumber.getAndIncrement() + val fooKeyId = "foo-" + contractCounter + val fooContractKey = FooCommandGenerator.makeContractKeyValue(signatory, fooKeyId) + // Create events + val createFooCmd = divulgerContractKeyO match { + case Some(divulgerContractKey) => + makeCreateAndDivulgeFooCommand( + divulgerContractKey = divulgerContractKey, + payload = payload, + fooKeyId = fooKeyId, + observers = observers, + templateName = templateDescriptor.name, + ) + case None => + makeCreateFooCommand( + payload = payload, + fooKeyId = fooKeyId, + signatory = signatory, + observers = observers, + templateId = templateDescriptor.templateId, + ) + } + if (config.allowNonTransientContracts) { + activeContractKeysPool.addContractKey(templateDescriptor.name, fooContractKey) + } + // Non-consuming events + val nonconsumingExercises: Seq[Command] = makeNonConsumingExerciseCommands( + templateDescriptor = templateDescriptor, + fooContractKey = fooContractKey, + ) + // Consuming events + val consumingPayloadO: Option[String] = config.consumingExercises + .flatMap(config => + if (randomnessProvider.randomDouble() <= config.probability) { + Some(randomPayload(config.payloadSizeBytes)) + } else None + ) + val consumingExerciseO: Option[Command] = consumingPayloadO.map { payload => + val selectedActiveFooContractKey = + if (config.allowNonTransientContracts) { + // This can choose at random a key of any the previously generated contracts. + activeContractKeysPool.getAndRemoveContractKey(templateDescriptor.name) + } else { + // This is always the key of the contract created in this batch of commands. + fooContractKey + } + divulgerContractKeyO match { + case Some(divulgerContractKey) => + makeDivulgedConsumeExerciseCommand( + templateDescriptor = templateDescriptor, + fooContractKey = selectedActiveFooContractKey, + payload = payload, + divulgerContractKey = divulgerContractKey, + ) + + case None => + makeExerciseByKeyCommand( + templateId = templateDescriptor.templateId, + choiceName = templateDescriptor.consumingChoiceName, + args = Seq( + RecordField( + label = "exercisePayload", + value = Some(Value(Value.Sum.Text(payload))), + ) + ), + )(contractKey = selectedActiveFooContractKey) + } + } + Seq(createFooCmd) ++ nonconsumingExercises ++ consumingExerciseO.toList + } + + private def makeDivulgedConsumeExerciseCommand( + templateDescriptor: FooTemplateDescriptor, + fooContractKey: Value, + payload: String, + divulgerContractKey: Value, + ): Command = + makeExerciseByKeyCommand( + templateId = FooTemplateDescriptor.divulgerTemplateId(packageId = packageRef.toString), + choiceName = FooTemplateDescriptor.Divulger_DivulgeConsumingExercise, + args = Seq( + RecordField( + label = "fooTemplateName", + value = Some(Value(Value.Sum.Text(templateDescriptor.name))), + ), + RecordField( + label = "fooKey", + value = Some(fooContractKey), + ), + RecordField( + label = "fooConsumingPayload", + value = Some(Value(Value.Sum.Text(payload))), + ), + ), + )(contractKey = divulgerContractKey) + + private def makeNonConsumingExerciseCommands( + templateDescriptor: FooTemplateDescriptor, + fooContractKey: Value, + ): Seq[Command] = { + val nonconsumingExercisePayloads: Seq[String] = + config.nonConsumingExercises.fold(Seq.empty[String]) { config => + var f = config.probability.toInt + if (randomnessProvider.randomDouble() <= config.probability - f) { + f += 1 + } + Seq.fill[String](f)(randomPayload(config.payloadSizeBytes)) + } + val nonconsumingExercises = nonconsumingExercisePayloads.map { payload => + makeExerciseByKeyCommand( + templateId = templateDescriptor.templateId, + choiceName = templateDescriptor.nonconsumingChoiceName, + args = Seq( + RecordField( + label = "exercisePayload", + value = Some(Value(Value.Sum.Text(payload))), + ) + ), + )(contractKey = fooContractKey) + } + nonconsumingExercises + } + + private def makeCreateFooCommand( + payload: String, + fooKeyId: String, + signatory: Party, + observers: List[Party], + templateId: Identifier, + ) = { + val createArguments: Option[Record] = Some( + Record( + None, + Seq( + RecordField( + label = "signatory", + value = Some(Value(Value.Sum.Party(signatory.getValue))), + ), + RecordField( + label = "observers", + value = Some( + Value( + Value.Sum.List( + com.daml.ledger.api.v2.value.List( + observers.map(obs => Value(Value.Sum.Party(obs.getValue))) + ) + ) + ) + ), + ), + RecordField( + label = "payload", + value = Some(Value(Value.Sum.Text(payload))), + ), + RecordField( + label = "keyId", + value = Some(Value(Value.Sum.Text(fooKeyId))), + ), + ), + ) + ) + val c: Command = Command( + command = Command.Command.Create( + CreateCommand( + templateId = Some(templateId), + createArguments = createArguments, + ) + ) + ) + c + } + + private def makeCreateAndDivulgeFooCommand( + divulgerContractKey: Value, + payload: String, + fooKeyId: String, + observers: List[Party], + templateName: String, + ) = + makeExerciseByKeyCommand( + templateId = FooTemplateDescriptor.divulgerTemplateId(packageId = packageRef.toString), + choiceName = FooTemplateDescriptor.Divulger_DivulgeContractImmediate, + args = Seq( + RecordField( + label = "fooObservers", + value = Some( + Value( + Value.Sum.List( + com.daml.ledger.api.v2.value.List( + observers.map(obs => Value(Value.Sum.Party(obs.getValue))) + ) + ) + ) + ), + ), + RecordField( + label = "fooPayload", + value = Some(Value(Value.Sum.Text(payload))), + ), + RecordField( + label = "fooKeyId", + value = Some(Value(Value.Sum.Text(fooKeyId))), + ), + RecordField( + label = "fooTemplateName", + value = Some(Value(Value.Sum.Text(templateName))), + ), + ), + )(contractKey = divulgerContractKey) + + def makeExerciseByKeyCommand(templateId: Identifier, choiceName: String, args: Seq[RecordField])( + contractKey: Value + ): Command = { + val choiceArgument = Some( + Value( + Value.Sum.Record( + Record( + None, + args, + ) + ) + ) + ) + val c: Command = Command( + command = Command.Command.ExerciseByKey( + ExerciseByKeyCommand( + templateId = Some(templateId), + contractKey = Some(contractKey), + choice = choiceName, + choiceArgument = choiceArgument, + ) + ) + ) + c + } + + private def randomPayload(sizeBytes: Int): String = + FooCommandGenerator.randomPayload(randomnessProvider, sizeBytes) + +} + +object FooCommandGenerator { + + private[submission] val nextContractNumber = new AtomicLong(0) + + /** @return + * A DAML tuple of type `(Party, Text)` + */ + private[submission] def makeContractKeyValue( + party: Party, + keyId: String, + ): Value = + Value( + Value.Sum.Record( + Record( + None, + Seq( + RecordField( + value = Some(Value(Value.Sum.Party(party.getValue))) + ), + RecordField( + value = Some(Value(Value.Sum.Text(keyId))) + ), + ), + ) + ) + ) + + final case class CommandGeneratorError(msg: String, cause: Throwable) + extends RuntimeException(msg, cause) + + private[submission] def randomPayload( + randomnessProvider: RandomnessProvider, + sizeBytes: Int, + ): String = + randomnessProvider.randomAsciiString(sizeBytes) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooDivulgerCommandGenerator.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooDivulgerCommandGenerator.scala new file mode 100644 index 0000000000..5f87135cdc --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooDivulgerCommandGenerator.scala @@ -0,0 +1,115 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.commands.{Command, CreateCommand} +import com.daml.ledger.api.v2.value.{Record, RecordField, Value} +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.infrastructure.TestDars +import com.digitalasset.daml.lf.data.Ref + +object FooDivulgerCommandGenerator { + + private val packageRef: Ref.PackageRef = TestDars.benchtoolDarPackageRef + + /** Builds a create Divulger command for each non-empty subset of divulgees such that the created + * Divulger contract can be used to divulge (by immediate divulgence) Foo1, Foo2 or Foo3 + * contracts to the corresponding subset of divulgees. + * + * @param allDivulgees + * Small number of divulgees. At most 5. + * @return + * A tuple of: + * - a sequence of create Divulger commands, + * - a map from sets of divulgees (all non-empty subsets of all divulgees) to corresponding + * contract keys, + */ + def makeCreateDivulgerCommands( + divulgingParty: Party, + allDivulgees: List[Party], + ): (List[Command], Map[Set[Party], Value]) = { + require( + allDivulgees.size <= 5, + s"Number of divulgee parties must be at most 5, was: ${allDivulgees.size}.", + ) + def allNonEmptySubsets(divulgees: List[Party]): List[List[Party]] = { + def iter(remaining: List[Party]): List[List[Party]] = + remaining match { + case Nil => List(List.empty) + case head :: tail => + val sub: List[List[Party]] = iter(tail) + val sub2: List[List[Party]] = sub.map(xs => xs.prepended(head)) + sub ::: sub2 + } + iter(divulgees) + .collect { + case parties if parties.nonEmpty => parties.sortBy(_.getValue) + } + } + + def createDivulgerFor(divulgees: List[Party]): (Command, Value) = { + val keyId = "divulger-" + FooCommandGenerator.nextContractNumber.getAndIncrement() + val createDivulgerCmd: Command = makeCreateDivulgerCommand( + divulgees = divulgees, + divulger = divulgingParty, + keyId = keyId, + ) + val divulgerKey: Value = FooCommandGenerator.makeContractKeyValue(divulgingParty, keyId) + (createDivulgerCmd, divulgerKey) + } + + val allSubsets = allNonEmptySubsets(allDivulgees) + val (commands, keys, divulgeeSets) = allSubsets.map { divulgees => + val (cmd, key) = createDivulgerFor(divulgees) + (cmd, key, divulgees.toSet) + }.unzip3 + val divulgeesToContractKeysMap = divulgeeSets.zip(keys).toMap + (commands, divulgeesToContractKeysMap) + } + + private def makeCreateDivulgerCommand( + keyId: String, + divulger: Party, + divulgees: List[Party], + ) = { + val createArguments: Option[Record] = Some( + Record( + None, + Seq( + RecordField( + label = "divulger", + value = Some(Value(Value.Sum.Party(divulger.getValue))), + ), + RecordField( + label = "divulgees", + value = Some( + Value( + Value.Sum.List( + com.daml.ledger.api.v2.value.List( + divulgees.map(d => Value(Value.Sum.Party(d.getValue))) + ) + ) + ) + ), + ), + RecordField( + label = "keyId", + value = Some(Value(Value.Sum.Text(keyId))), + ), + ), + ) + ) + val c: Command = Command( + command = Command.Command.Create( + CreateCommand( + templateId = + Some(FooTemplateDescriptor.divulgerTemplateId(packageId = packageRef.toString)), + createArguments = createArguments, + ) + ) + ) + c + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooSubmission.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooSubmission.scala new file mode 100644 index 0000000000..b857524c7c --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooSubmission.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.submission.foo.RandomPartySelecting + +import scala.concurrent.{ExecutionContext, Future} + +class FooSubmission( + submitter: CommandSubmitter, + maxInFlightCommands: Int, + submissionBatchSize: Int, + allocatedParties: AllocatedParties, + names: Names, + randomnessProvider: RandomnessProvider, +) { + + def performSubmission(submissionConfig: FooSubmissionConfig)(implicit + ec: ExecutionContext + ): Future[Unit] = { + val (divulgerCmds, divulgeesToDivulgerKeyMap) = FooDivulgerCommandGenerator + .makeCreateDivulgerCommands( + divulgingParty = allocatedParties.signatory, + allDivulgees = allocatedParties.divulgees, + ) + val partySelecting = + new RandomPartySelecting( + config = submissionConfig, + allocatedParties = allocatedParties, + randomnessProvider = randomnessProvider, + ) + for { + _ <- + if (divulgerCmds.nonEmpty) { + require( + divulgeesToDivulgerKeyMap.nonEmpty, + "Map from divulgees to Divulger contract keys must be non empty.", + ) + submitter.submitSingleBatch( + commandId = "divulgence-setup", + actAs = Seq(allocatedParties.signatory) ++ allocatedParties.divulgees, + commands = divulgerCmds, + ) + } else { + Future.unit + } + generator: CommandGenerator = new FooCommandGenerator( + config = submissionConfig, + divulgeesToDivulgerKeyMap = divulgeesToDivulgerKeyMap, + names = names, + allocatedParties = allocatedParties, + partySelecting = partySelecting, + randomnessProvider = randomnessProvider, + ) + _ <- submitter + .generateAndSubmit( + generator = generator, + config = submissionConfig, + baseActAs = List(allocatedParties.signatory) ++ allocatedParties.divulgees, + maxInFlightCommands = maxInFlightCommands, + submissionBatchSize = submissionBatchSize, + ) + } yield () + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooTemplateDescriptor.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooTemplateDescriptor.scala new file mode 100644 index 0000000000..70ef751a0d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooTemplateDescriptor.scala @@ -0,0 +1,96 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.value.Identifier + +final case class FooTemplateDescriptor( + name: String, + templateId: Identifier, + consumingChoiceName: String, + nonconsumingChoiceName: String, +) + +/** NOTE: Keep me in sync with `Foo.daml` + */ +object FooTemplateDescriptor { + + def inefficientFibonacciTemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Bench", + entityName = "InefficientFibonacci", + ) + + def fooI1TemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "FooI1", + ) + + def fooI2TemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "FooI2", + ) + + def fooI3TemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "FooI3", + ) + + def divulgerTemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "Divulger", + ) + + def dummyTemplateId(packageId: String): Identifier = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "Dummy", + ) + + def Foo1(packageId: String): FooTemplateDescriptor = FooTemplateDescriptor( + name = "Foo1", + templateId = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "Foo1", + ), + consumingChoiceName = "Foo1_ConsumingChoice", + nonconsumingChoiceName = "Foo1_NonconsumingChoice", + ) + def Foo2(packageId: String): FooTemplateDescriptor = FooTemplateDescriptor( + name = "Foo2", + templateId = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "Foo2", + ), + consumingChoiceName = "Foo2_ConsumingChoice", + nonconsumingChoiceName = "Foo2_NonconsumingChoice", + ) + def Foo3(packageId: String): FooTemplateDescriptor = FooTemplateDescriptor( + name = "Foo3", + templateId = Identifier( + packageId = packageId, + moduleName = "Foo", + entityName = "Foo3", + ), + consumingChoiceName = "Foo3_ConsumingChoice", + nonconsumingChoiceName = "Foo3_NonconsumingChoice", + ) + + def forName(templateName: String, packageId: String): FooTemplateDescriptor = + templateName match { + case "Foo1" => Foo1(packageId) + case "Foo2" => Foo2(packageId) + case "Foo3" => Foo3(packageId) + case other => sys.error(s"Invalid template: $other") + } + + val Divulger_DivulgeContractImmediate = "DivulgeContractImmediate" + val Divulger_DivulgeConsumingExercise = "DivulgeConsumingExercise" +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Names.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Names.scala new file mode 100644 index 0000000000..7f619a2846 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/Names.scala @@ -0,0 +1,91 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +/** Collects identifiers used by the benchtool in a single place. + */ +class Names { + + import Names.{ + SignatoryPrefix, + PartyPrefixSeparatorChar, + ObserverPrefix, + DivulgeePrefix, + ExtraSubmitterPrefix, + } + + val identifierSuffix = f"${System.nanoTime}%x" + val benchtoolUserId = "benchtool" + val workflowId = s"$benchtoolUserId-$identifierSuffix" + val signatoryPartyName = s"$SignatoryPrefix$PartyPrefixSeparatorChar$identifierSuffix" + + def observerPartyNames(numberOfObservers: Int, uniqueParties: Boolean): Seq[String] = + partyNames(ObserverPrefix, numberOfObservers, uniqueParties) + + def divulgeePartyNames(numberOfDivulgees: Int, uniqueParties: Boolean): Seq[String] = + partyNames(DivulgeePrefix, numberOfDivulgees, uniqueParties) + + def extraSubmitterPartyNames(numberOfExtraSubmitters: Int, uniqueParties: Boolean): Seq[String] = + partyNames( + ExtraSubmitterPrefix, + numberOfExtraSubmitters, + uniqueParties, + padPartyIndexWithLeadingZeroes = true, + ) + + def partySetPartyName(prefix: String, numberOfParties: Int, uniqueParties: Boolean): Seq[String] = + partyNames( + prefix = prefix, + numberOfParties = numberOfParties, + uniqueParties = uniqueParties, + // Padding the party names with leading zeroes makes it more convenient to construct requests based on a party prefix. + // For example, if we have 1000 parties in a party set, we can use prefix 'Party-1' to match precisely the parties {Party-100, Party-101, .., Party-199} + padPartyIndexWithLeadingZeroes = true, + ) + + def commandId(index: Int): String = s"command-$index-$identifierSuffix" + + def mainPackageId(index: Int) = s"submission-dars-$index-$identifierSuffix" + + private def partyNames( + prefix: String, + numberOfParties: Int, + uniqueParties: Boolean, + padPartyIndexWithLeadingZeroes: Boolean = false, + ): Seq[String] = { + val largestIndex = numberOfParties - 1 + val paddingTargetLength = largestIndex.toString.length + def indexToString(i: Int): String = + if (padPartyIndexWithLeadingZeroes) { + padLeftWithZeroes(i, paddingTargetLength) + } else { + i.toString + } + (0 until numberOfParties).map(i => partyName(prefix, indexToString(i), uniqueParties)) + } + + private def padLeftWithZeroes(i: Int, len: Int): String = { + val iText = i.toString + "0" * (len - iText.length) + iText + } + + private def partyName(baseName: String, index: String, uniqueParties: Boolean): String = + s"$baseName$PartyPrefixSeparatorChar$index" + (if (uniqueParties) identifierSuffix else "") + +} + +object Names { + protected val PartyPrefixSeparatorChar: Char = '-' + val SignatoryPrefix = "signatory" + val ObserverPrefix = "Obs" + val DivulgeePrefix = "Div" + val ExtraSubmitterPrefix = "Sub" + + /** @return + * main prefix of a party which is the prefix up to the first '-' character + */ + def parsePartyNameMainPrefix(partyName: String): String = + partyName.split(Names.PartyPrefixSeparatorChar)(0) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/PartyAllocating.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/PartyAllocating.scala new file mode 100644 index 0000000000..c704a93edf --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/PartyAllocating.scala @@ -0,0 +1,99 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.SubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import org.slf4j.LoggerFactory + +import scala.concurrent.{ExecutionContext, Future} + +class PartyAllocating( + names: Names, + adminServices: LedgerApiServices, +) { + + private val logger = LoggerFactory.getLogger(getClass) + + def allocateParties(config: SubmissionConfig)(implicit + ec: ExecutionContext + ): Future[AllocatedParties] = { + val observerPartyNames = + names.observerPartyNames(config.numberOfObservers, config.uniqueParties) + val divulgeePartyNames = + names.divulgeePartyNames(config.numberOfDivulgees, config.uniqueParties) + val extraSubmittersPartyNames = + names.extraSubmitterPartyNames(config.numberOfExtraSubmitters, config.uniqueParties) + val observersPartySetParties: Map[String, List[String]] = + config.observerPartySets.map { partySet => + val parties = names + .partySetPartyName( + prefix = partySet.partyNamePrefix, + numberOfParties = partySet.count, + uniqueParties = config.uniqueParties, + ) + .toList + partySet.partyNamePrefix -> parties + }.toMap + logger.info("Allocating parties...") + for { + known <- lookupExistingParties() + signatory <- allocateSignatoryParty(known) + observers <- allocateParties(observerPartyNames, known) + divulgees <- allocateParties(divulgeePartyNames, known) + extraSubmitters <- allocateParties(extraSubmittersPartyNames, known) + partySetNames = observersPartySetParties.keys + partySetParties: Map[String, List[Party]] <- Future + .sequence(partySetNames.map { partySetName => + allocateParties(observersPartySetParties(partySetName), known).map(partySetName -> _) + }) + .map(_.toMap) + } yield { + logger.info("Allocating parties completed") + AllocatedParties( + signatoryO = Some(signatory), + observers = observers, + divulgees = divulgees, + extraSubmitters = extraSubmitters, + observerPartySets = partySetParties.view.map { case (partyName, parties) => + AllocatedPartySet( + mainPartyNamePrefix = partyName, + parties = parties, + ) + }.toList, + ) + } + } + + def lookupExistingParties()(implicit ec: ExecutionContext): Future[Set[String]] = + adminServices.partyManagementService.listKnownParties() + + private def allocateSignatoryParty(known: Set[String])(implicit + ec: ExecutionContext + ): Future[Party] = + lookupOrAllocateParty(names.signatoryPartyName, known) + + private def allocateParties(partyNames: Seq[String], known: Set[String])(implicit + ec: ExecutionContext + ): Future[List[Party]] = + Future.traverse(partyNames.toList)(lookupOrAllocateParty(_, known)) + + private def lookupOrAllocateParty(party: String, known: Set[String])(implicit + ec: ExecutionContext + ): Future[Party] = + if (known.exists(_.startsWith(party))) { + val partyId = known + .find(_.startsWith(party)) + .getOrElse( + throw new RuntimeException(s"Party id for party $party should have been found") + ) + logger.info( + s"Found known party: $party with party id: $partyId." + ) + Future.successful(new Party(partyId)) + } else + adminServices.partyManagementService.allocateParty(party) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/RandomnessProvider.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/RandomnessProvider.scala new file mode 100644 index 0000000000..d68030c082 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/RandomnessProvider.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +trait RandomnessProvider { + def randomDouble(): Double // 0.0 <= randomDouble() < 1.0 + /** Guarantees that each character will take exactly one byte in UTF-8. + */ + def randomAsciiString(n: Int): String + def randomNatural(n: Int): Int // 0 <= randomNatural(n) < n +} + +object RandomnessProvider { + object Default extends Seeded(System.currentTimeMillis()) + + def forSeed(seed: Long) = new Seeded(seed = seed) + + class Seeded(seed: Long) extends RandomnessProvider { + private val r = new scala.util.Random(seed) + override def randomDouble(): Double = r.nextDouble() + override def randomNatural(n: Int): Int = r.nextInt(n) + override def randomAsciiString(n: Int): String = { + val buffer = new StringBuilder(n) + 0.until(n).foreach { _ => + buffer.append(r.nextPrintableChar()) + } + buffer.toString() + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/PartiesSelection.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/PartiesSelection.scala new file mode 100644 index 0000000000..9f84113971 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/PartiesSelection.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission.foo + +import com.daml.ledger.javaapi.data.Party + +final case class PartiesSelection( + observers: List[Party], + divulgees: List[Party], +) diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/RandomPartySelecting.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/RandomPartySelecting.scala new file mode 100644 index 0000000000..ec927de444 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/submission/foo/RandomPartySelecting.scala @@ -0,0 +1,62 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission.foo + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + AllocatedParties, + RandomnessProvider, +} + +class RandomPartySelecting( + config: FooSubmissionConfig, + allocatedParties: AllocatedParties, + randomnessProvider: RandomnessProvider, +) { + + private val observersProbability = probabilitiesByPartyIndex(allocatedParties.observers) + private val divulgeesProbability = probabilitiesByPartyIndex(allocatedParties.divulgees) + private val extraSubmittersProbability = probabilitiesByPartyIndex( + allocatedParties.extraSubmitters + ) + private val observerPartySetPartiesProbability: List[(Party, Double)] = + allocatedParties.observerPartySets.flatMap { partySet => + val visibility = config.observerPartySets + .find(_.partyNamePrefix == partySet.mainPartyNamePrefix) + .fold( + sys.error( + s"Could not find visibility for party set ${partySet.mainPartyNamePrefix} in the submission config" + ) + )(_.visibility) + partySet.parties.map(party => party -> visibility) + } + + def nextPartiesForContracts(): PartiesSelection = + PartiesSelection( + observers = + pickParties(observersProbability) ++ pickParties(observerPartySetPartiesProbability), + divulgees = pickParties(divulgeesProbability), + ) + + def nextExtraSubmitter(): List[Party] = pickParties(extraSubmittersProbability) + + private def pickParties(probabilities: List[(Party, Double)]): List[Party] = + probabilities + .collect { case (party, probability) if randomBoolean(probability) => party } + + private def randomBoolean(truthProbability: Double): Boolean = + randomnessProvider.randomDouble() <= truthProbability + + private def probabilitiesByPartyIndex( + orderedParties: List[Party] + ): List[(Party, Double)] = + orderedParties.zipWithIndex.toMap.view.mapValues(probabilityBaseTen).toList + + /** @return + * probability of a 1/(10**i) + */ + private def probabilityBaseTen(i: Int): Double = math.pow(10.0, -i.toDouble) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ObserverWithResult.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ObserverWithResult.scala new file mode 100644 index 0000000000..e29868d0d6 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ObserverWithResult.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.util + +import com.digitalasset.base.error.utils.ErrorDetails +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors +import io.grpc.stub.{ClientCallStreamObserver, ClientResponseObserver} +import org.slf4j.Logger + +import scala.concurrent.{Future, Promise} + +object ClientCancelled extends Exception + +abstract class ObserverWithResult[RespT, Result](logger: Logger) + extends ClientResponseObserver[Any, RespT] { + + private var requestStream: ClientCallStreamObserver[_] = null + + def streamName: String + + def result: Future[Result] = promise.future + + def completeWith(): Future[Result] + + override def onNext(value: RespT): Unit = () + + override def onError(t: Throwable): Unit = { + logger.info(withStreamName(s"Received error: $t")) + t match { + case ex: io.grpc.StatusRuntimeException if isServerShuttingDownError(ex) => + logger.info(s"Stopping reading the stream due to the server being shut down.") + promise.completeWith(completeWith()) + case ex if ex.getCause == ClientCancelled => + logger.info(s"Stopping reading the stream due to a client cancellation.") + promise.completeWith(completeWith()) + case ex => + promise.failure(ex) + } + } + + override def beforeStart(requestStream: ClientCallStreamObserver[Any]): Unit = + this.requestStream = requestStream + + def cancel(): Unit = + requestStream.cancel(null, ClientCancelled) + + private def isServerShuttingDownError(ex: io.grpc.StatusRuntimeException): Boolean = + ErrorDetails.matches(ex, GrpcErrors.AbortedDueToShutdown) + + override def onCompleted(): Unit = { + logger.info(withStreamName(s"Stream completed.")) + promise.completeWith(completeWith()) + } + + private val promise: Promise[Result] = Promise[Result]() + + protected def withStreamName(message: String) = s"[$streamName] $message" + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ReportFormatter.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ReportFormatter.scala new file mode 100644 index 0000000000..1683281808 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/ReportFormatter.scala @@ -0,0 +1,148 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.util + +import com.digitalasset.canton.ledger.api.benchtool.metrics.MetricsCollector.Response.{ + FinalReport, + PeriodicReport, +} +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.{ + ConsumptionSpeedMetric, + CountRateMetric, + DelayMetric, + LatencyMetric, + MetricValue, + ServiceLevelObjective, + SizeMetric, + TotalCountMetric, +} + +object ReportFormatter { + def formatPeriodicReport(streamName: String, periodicReport: PeriodicReport): String = { + val values = periodicReport.values.map(shortMetricReport).mkString(", ") + s"[$streamName] $values" + } + + def formatFinalReport(streamName: String, finalReport: FinalReport): String = { + def valueFormat(label: String, value: String): String = + s"""[$streamName][final-value] $label: $value""" + def failureFormat(info: String): String = s"""[$streamName][failure] $info""" + + val reports = finalReport.metricsData.flatMap { metricData => + val valueLog: Option[String] = + if (includeInFinalReport(metricData.value)) + Some(valueFormat(metricName(metricData.value), formattedValue(metricData.value))) + else + None + + val violatedObjectives: List[String] = + metricData.violatedObjectives.map { case (objective, value) => + val info = + s"${objectiveName(objective)}: required: ${formattedObjectiveValue(objective)}, metered: ${formattedValue(value)}" + failureFormat(info) + } + + valueLog.toList ::: violatedObjectives + } + + val durationLog = + valueFormat("Duration [s]", s"${finalReport.totalDuration.toMillis.toDouble / 1000}") + val reportWidth = 80 + val bar = "=" * reportWidth + s""" + |$bar + | BENCHMARK RESULTS: $streamName + |$bar + |$durationLog + |${reports.mkString("\n")} + |$bar""".stripMargin + } + + private def includeInFinalReport(value: MetricValue): Boolean = value match { + case _: ConsumptionSpeedMetric.Value => false + case _: DelayMetric.Value => false + case _ => true + } + + private def metricName(value: MetricValue): String = value match { + case _: ConsumptionSpeedMetric.Value => "Consumption speed [-]" + case _: CountRateMetric.Value => "Item rate [item/s]" + case _: DelayMetric.Value => "Mean delay [s]" + case _: SizeMetric.Value => "Size rate [MB/s]" + case _: TotalCountMetric.Value => "Total item count [item]" + case _: LatencyMetric.Value => "Average latency (millis)" + case _: TotalRuntimeMetric.Value => "Total runtime [ms]" + case other => sys.error(s"Unsupported value: $other") + } + + private def shortMetricReport(value: MetricValue): String = + s"${shortMetricName(value)}: ${formattedValue(value)}" + + private def shortMetricName(value: MetricValue): String = value match { + case _: ConsumptionSpeedMetric.Value => "speed [-]" + case _: CountRateMetric.Value => "rate [item/s]" + case _: DelayMetric.Value => "delay [s]" + case _: SizeMetric.Value => "rate [MB/s]" + case _: TotalCountMetric.Value => "count [item]" + case _: LatencyMetric.Value => "Average latency (millis)" + case _: TotalRuntimeMetric.Value => "Total runtime [ms]" + case other => sys.error(s"Unsupported value: $other") + } + + private def formattedValue(value: MetricValue): String = value match { + case v: ConsumptionSpeedMetric.Value => + s"${v.relativeSpeed.map(rounded).getOrElse("-")}" + case v: CountRateMetric.Value => + s"${rounded(v.ratePerSecond)}" + case v: DelayMetric.Value => + s"${v.meanDelaySeconds.getOrElse("-")}" + case v: SizeMetric.Value => + s"${rounded(v.megabytesPerSecond)}" + case v: TotalCountMetric.Value => + s"${v.totalCount}" + case v: LatencyMetric.Value => + s"${v.latencyNanos / 1000000.0d}" + case v: TotalRuntimeMetric.Value => + v.v.toMillis.toString + case other => sys.error(s"Unsupported value: $other") + } + + private def objectiveName(objective: ServiceLevelObjective[_]): String = + objective match { + case _: DelayMetric.MaxDelay => + s"Maximum record time delay [s]" + case _: ConsumptionSpeedMetric.MinConsumptionSpeed => + s"Minimum consumption speed [-]" + case _: CountRateMetric.RateObjective.MinRate => + s"Minimum item rate [item/s]" + case _: CountRateMetric.RateObjective.MaxRate => + s"Maximum item rate [item/s]" + case _: LatencyMetric.MaxLatency => + "Maximum latency (millis)" + case _: TotalRuntimeMetric.MaxDurationObjective => + "Total runtime [ms]" + case other => sys.error(s"Unsupported value: $other") + } + + private def formattedObjectiveValue(objective: ServiceLevelObjective[_]): String = + objective match { + case obj: DelayMetric.MaxDelay => + obj.maxDelaySeconds.toString + case obj: ConsumptionSpeedMetric.MinConsumptionSpeed => + obj.minSpeed.toString + case obj: CountRateMetric.RateObjective.MinRate => + obj.minAllowedRatePerSecond.toString + case obj: CountRateMetric.RateObjective.MaxRate => + obj.minAllowedRatePerSecond.toString + case obj: LatencyMetric.MaxLatency => + obj.millis.toString + case obj: TotalRuntimeMetric.MaxDurationObjective => + obj.maxValue.toMillis.toString + case other => sys.error(s"Unsupported value: $other") + } + + private def rounded(value: Double): String = "%.2f".format(value) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/SimpleFileReader.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/SimpleFileReader.scala new file mode 100644 index 0000000000..1e85ac00a4 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/SimpleFileReader.scala @@ -0,0 +1,19 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.util + +import com.google.protobuf.ByteString + +import java.io.{BufferedReader, File, FileReader, Reader} +import scala.util.{Try, Using} + +object SimpleFileReader { + + def readResource[Result](name: String): Try[ByteString] = + Using(getClass.getClassLoader.getResourceAsStream(name))(ByteString.readFrom) + + def readFile[Result](file: File)(f: Reader => Result): Try[Result] = + Using(new BufferedReader(new FileReader(file)))(f) + +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TimeUtil.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TimeUtil.scala new file mode 100644 index 0000000000..2c07e43b56 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TimeUtil.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.util + +import com.google.protobuf.timestamp.Timestamp + +import java.time.{Duration, Instant} + +object TimeUtil { + + def timestampToInstant(timestamp: Timestamp): Instant = + Instant.ofEpochSecond(timestamp.seconds.toLong, timestamp.nanos.toLong) + + def durationBetween(before: Timestamp, after: Instant): Duration = + Duration.between(timestampToInstant(before), after) + + def durationBetween(before: Instant, after: Instant): Duration = + Duration.between(before, after) + + /** Returns `true` if `a` is longer or equal to `b`. */ + def isAtLeast(a: Duration, b: Duration): Boolean = + a.compareTo(b) >= 0 +} diff --git a/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TypedActorSystemResourceOwner.scala b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TypedActorSystemResourceOwner.scala new file mode 100644 index 0000000000..4539cde05d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/main/scala/com/digitalasset/canton/ledger/api/benchtool/util/TypedActorSystemResourceOwner.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.util + +import com.daml.ledger.resources.{ResourceContext, ResourceOwner} +import com.daml.resources.{AbstractResourceOwner, ReleasableResource, Resource} +import org.apache.pekko.actor.typed.scaladsl.Behaviors +import org.apache.pekko.actor.typed.{ActorSystem, Behavior, SpawnProtocol} + +import scala.concurrent.Future + +class TypedActorSystemResourceOwner[BehaviorType]( + acquireActorSystem: () => ActorSystem[BehaviorType] +) extends AbstractResourceOwner[ResourceContext, ActorSystem[BehaviorType]] { + override def acquire()(implicit + context: ResourceContext + ): Resource[ResourceContext, ActorSystem[BehaviorType]] = + ReleasableResource(Future(acquireActorSystem()))(system => Future(system.terminate())) +} + +object TypedActorSystemResourceOwner { + def owner(): ResourceOwner[ActorSystem[SpawnProtocol.Command]] = + new TypedActorSystemResourceOwner[SpawnProtocol.Command](() => + ActorSystem(Creator(), "Creator") + ) + + object Creator { + def apply(): Behavior[SpawnProtocol.Command] = + Behaviors.setup { context => + context.log.debug(s"Starting Creator actor") + SpawnProtocol() + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FibonacciCommandSubmitterITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FibonacciCommandSubmitterITSpec.scala new file mode 100644 index 0000000000..a78caf2300 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FibonacciCommandSubmitterITSpec.scala @@ -0,0 +1,84 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + EventsObserver, + FibonacciCommandGenerator, +} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.AppendedClues + +class FibonacciCommandSubmitterITSpec extends BenchtoolSandboxFixture with AppendedClues { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "FibonacciCommandSubmitterIT" should { + "populate create fibonacci contracts" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val config = WorkflowConfig.FibonacciSubmissionConfig( + numberOfInstances = 10, + uniqueParties = false, + value = 7, + waitForSubmission = true, + ) + + (for { + (apiServices, names, submitter) <- benchtoolFixture() + allocatedParties <- submitter.prepare(config) + _ = allocatedParties.divulgees shouldBe empty + generator = new FibonacciCommandGenerator( + signatory = allocatedParties.signatory, + config = config, + names = names, + ) + _ <- submitter.generateAndSubmit( + generator = generator, + config = config, + baseActAs = List(allocatedParties.signatory) ++ allocatedParties.divulgees, + maxInFlightCommands = 1, + submissionBatchSize = 5, + ) + eventsObserver = EventsObserver(expectedTemplateNames = + Set( + "InefficientFibonacci", + "InefficientFibonacciResult", + ) + ) + ledgerEnd <- apiServices.stateService.getLedgerEnd() + _ <- apiServices.updateService.transactionsLedgerEffects( + config = WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = allocatedParties.signatory.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + observer = eventsObserver, + ) + observerResult <- eventsObserver.result + } yield { + observerResult.numberOfCreatesPerTemplateName( + "InefficientFibonacci" + ) shouldBe config.numberOfInstances withClue "number of create events" + succeed + }).futureValue + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FooCommandSubmitterITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FooCommandSubmitterITSpec.scala new file mode 100644 index 0000000000..a4b465d23f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/FooCommandSubmitterITSpec.scala @@ -0,0 +1,176 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.{ + ConsumingExercises, + NonconsumingExercises, +} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{EventsObserver, ObservedEvents} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.{AppendedClues, Checkpoints} + +import scala.concurrent.{ExecutionContext, Future} + +class FooCommandSubmitterITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "FooCommandSubmitter" should { + "populate participant with create, consuming and non consuming exercises" onlyRunWithOrGreaterThan ProtocolVersion.dev in { + env => + import env.* + + val foo1Config = WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 100, + ) + val foo2Config = WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo2", + weight = 1, + payloadSizeBytes = 100, + ) + val consumingExercisesConfig = ConsumingExercises( + probability = 1.0, + payloadSizeBytes = 100, + ) + val nonConsumingExercisesConfig = NonconsumingExercises( + probability = 2.0, + payloadSizeBytes = 100, + ) + val config = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 100, + numberOfObservers = 2, + numberOfDivulgees = 0, + numberOfExtraSubmitters = 0, + uniqueParties = false, + instanceDistribution = List( + foo1Config, + foo2Config, + ), + nonConsumingExercises = Some(nonConsumingExercisesConfig), + consumingExercises = Some(consumingExercisesConfig), + userIds = List.empty, + ) + + val (apiServices, allocatedParties, fooSubmission) = + benchtoolFooSubmissionFixture(config).futureValue + + allocatedParties.divulgees shouldBe empty + + fooSubmission.performSubmission(submissionConfig = config).futureValue + val observerResult_signatory = ledgerEffectsEventsObserver( + apiServices = apiServices, + party = allocatedParties.signatory, + ).futureValue + + val observerResult_observer0 = ledgerEffectsEventsObserver( + apiServices = apiServices, + party = allocatedParties.observers.head, + ).futureValue + + val observerResult_observer1 = ledgerEffectsEventsObserver( + apiServices = apiServices, + party = allocatedParties.observers(1), + ).futureValue + + val cp = new Checkpoint + cp( + discard( + observerResult_signatory.createEvents.size shouldBe config.numberOfInstances withClue "number of create events" + ) + ) + cp( + discard( + observerResult_signatory.avgSizeOfCreateEventPerTemplateName( + "Foo1" + ) shouldBe 310 +- 20 withClue "payload size of create Foo1" + ) + ) + cp( + discard( + observerResult_signatory.avgSizeOfCreateEventPerTemplateName( + "Foo2" + ) shouldBe 310 +- 20 withClue "payload size of create Foo2" + ) + ) + cp( + discard( + observerResult_signatory.avgSizeOfConsumingExercise shouldBe 108 + ) + ) + cp( + discard( + observerResult_signatory.avgSizeOfNonconsumingExercise shouldBe 108 + ) + ) + cp( + discard( + observerResult_signatory.consumingExercises.size.toDouble shouldBe (config.numberOfInstances * consumingExercisesConfig.probability) withClue "number of consuming exercises" + ) + ) + val expectedNumberOfNonConsumingExercises = + config.numberOfInstances * nonConsumingExercisesConfig.probability.toInt + cp( + discard( + observerResult_signatory.nonConsumingExercises.size shouldBe expectedNumberOfNonConsumingExercises withClue "number of non consuming exercises visible to signatory" + ) + ) + // First observer can see all non-consuming events + cp( + discard( + observerResult_observer0.nonConsumingExercises.size shouldBe expectedNumberOfNonConsumingExercises withClue "number of non consuming exercises visible to Obs-0" + ) + ) + // Second observer can see ~10% of all non-consuming events (see probabilitiesByPartyIndex()) + cp( + discard( + observerResult_observer1.nonConsumingExercises.size shouldBe 14 withClue "number of non consuming exercises visible to Obs-1" + ) + ) + cp.reportAll() + succeed + } + } + + private def ledgerEffectsEventsObserver( + apiServices: LedgerApiServices, + party: Party, + )(implicit ec: ExecutionContext): Future[ObservedEvents] = for { + ledgerEnd <- apiServices.stateService.getLedgerEnd() + eventsObserver = EventsObserver(expectedTemplateNames = Set("Foo1", "Foo2")) + config = WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + txs <- apiServices.updateService.transactionsLedgerEffects( + config = config, + observer = eventsObserver, + ) + } yield txs + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/InterfaceSubscriptionITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/InterfaceSubscriptionITSpec.scala new file mode 100644 index 0000000000..76ba3182df --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/InterfaceSubscriptionITSpec.scala @@ -0,0 +1,114 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + ActiveContractsObserver, + BenchtoolTestsPackageInfo, + ObservedEvents, +} +import com.digitalasset.canton.ledger.api.benchtool.{BenchtoolSandboxFixture, ConfigEnricher} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.AppendedClues + +import scala.concurrent.{ExecutionContext, Future} + +class InterfaceSubscriptionITSpec extends BenchtoolSandboxFixture with AppendedClues { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "interface subscriptions" should { + "be exposed to the benchtool" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val foo1Config = WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 100, + ) + val foo2Config = WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo2", + weight = 1, + payloadSizeBytes = 100, + ) + val foo3Config = WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo3", + weight = 1, + payloadSizeBytes = 100, + ) + + val config = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 100, + numberOfObservers = 2, + numberOfDivulgees = 0, + numberOfExtraSubmitters = 0, + uniqueParties = false, + instanceDistribution = List( + foo1Config, + foo2Config, + foo3Config, + ), + nonConsumingExercises = None, + consumingExercises = None, + userIds = List.empty, + ) + + (for { + (apiServices, allocatedParties, fooSubmission) <- benchtoolFooSubmissionFixture(config) + configDesugaring = new ConfigEnricher( + allocatedParties, + BenchtoolTestsPackageInfo.StaticDefault, + ) + _ <- fooSubmission.performSubmission(submissionConfig = config) + observedEvents <- observer( + configDesugaring = configDesugaring, + apiServices = apiServices, + party = allocatedParties.signatory, + ) + } yield { + observedEvents.createEvents.forall(_.interfaceViews.nonEmpty) shouldBe true + observedEvents.createEvents + .flatMap(_.interfaceViews) + .forall(_.serializedSize > 0) shouldBe true + observedEvents.createEvents + .flatMap(_.interfaceViews) + .map(_.interfaceName) + .toSet shouldBe Set("FooI2", "FooI1", "FooI3") + }).futureValue + } + } + + private def observer( + configDesugaring: ConfigEnricher, + apiServices: LedgerApiServices, + party: Party, + )(implicit ec: ExecutionContext): Future[ObservedEvents] = { + val config = WorkflowConfig.StreamConfig.ActiveContractsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List("FooI2", "FooI1", "FooI3"), + ) + ), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + apiServices.stateService.getActiveContracts( + config = configDesugaring + .enrichStreamConfig(config) + .asInstanceOf[WorkflowConfig.StreamConfig.ActiveContractsStreamConfig], + observer = ActiveContractsObserver(Set("Foo1", "Foo2", "Foo3")), + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonStakeholderInformeesITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonStakeholderInformeesITSpec.scala new file mode 100644 index 0000000000..59f97eed42 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonStakeholderInformeesITSpec.scala @@ -0,0 +1,215 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.ConsumingExercises +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{EventsObserver, ObservedEvents} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.{AppendedClues, Checkpoints, OptionValues} + +import scala.concurrent.{ExecutionContext, Future} + +class NonStakeholderInformeesITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with OptionValues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "NonStakeholderInformees" should { + "divulge events" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val expectedTemplateNames = Set("Foo1", "Divulger") + val submissionConfig = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 100, + numberOfObservers = 1, + numberOfDivulgees = 3, + numberOfExtraSubmitters = 0, + uniqueParties = false, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 0, + ) + ), + nonConsumingExercises = None, + consumingExercises = Some( + ConsumingExercises( + probability = 0.1, + payloadSizeBytes = 0, + ) + ), + userIds = List.empty, + ) + (for { + (apiServices, allocatedParties, fooSubmission) <- benchtoolFooSubmissionFixture( + submissionConfig + ) + _ <- fooSubmission.performSubmission(submissionConfig = submissionConfig) + (ledgerEffectsResults_divulgee0, acsDeltaResults_divulgee0) <- observeAllTemplatesForParty( + party = allocatedParties.divulgees.head, + apiServices = apiServices, + expectedTemplateNames = expectedTemplateNames, + ) + (ledgerEffectsResults_divulgee1, acsDeltaResults_divulgee1) <- observeAllTemplatesForParty( + party = allocatedParties.divulgees(1), + apiServices = apiServices, + expectedTemplateNames = expectedTemplateNames, + ) + (ledgerEffectsResults_observer0, acsDeltaResults_observer0) <- observeAllTemplatesForParty( + party = allocatedParties.observers.head, + apiServices = apiServices, + expectedTemplateNames = expectedTemplateNames, + ) + (ledgerEffectsResults_signatory, _) <- observeAllTemplatesForParty( + party = allocatedParties.signatory, + apiServices = apiServices, + expectedTemplateNames = expectedTemplateNames, + ) + } yield { + // Creates of Foo1 are divulged to "divulgee" party, + // thus, they are visible on transaction ledger effects stream but absent from acs delta transactions stream. + val cp = new Checkpoint + + { + // Divulge0 + { + // Create events + val ledgerEffectsFoo1 = + ledgerEffectsResults_divulgee0.numberOfCreatesPerTemplateName("Foo1") + val acsDeltaFoo1 = acsDeltaResults_divulgee0.numberOfCreatesPerTemplateName("Foo1") + cp( + discard( + ledgerEffectsFoo1 shouldBe 100 withClue "number of Foo1 contracts visible to divulgee0 on ledger effects transactions stream" + ) + ) + cp( + discard( + acsDeltaFoo1 shouldBe 0 withClue "number of Foo1 contracts visible to divulgee0 on acs delta transactions stream" + ) + ) + val divulger = ledgerEffectsResults_divulgee0.numberOfCreatesPerTemplateName("Divulger") + // For 3 divulgees in total (a, b, c) there are 4 subsets that contain 'a': a, ab, ac, abc. + cp( + discard( + divulger shouldBe 4 withClue "number of divulger contracts visible to divulgee0" + ) + ) + } + { + // Consuming events (with 10% chance of generating a consuming event for a contract) + val ledgerEffectsFoo1 = + ledgerEffectsResults_divulgee0.numberOfConsumingExercisesPerTemplateName("Foo1") + val acsDeltaFoo1 = + acsDeltaResults_divulgee0.numberOfConsumingExercisesPerTemplateName("Foo1") + cp( + discard( + ledgerEffectsFoo1 shouldBe 13 withClue "number of Foo1 consuming events visible to divulgee0 on ledger effects transactions stream" + ) + ) + cp( + discard( + acsDeltaFoo1 shouldBe 0 withClue "number of Foo1 consuming events visible to divulgee0 on acs delta transactions stream" + ) + ) + } + } + { + // Divulgee1 + val ledgerEffectsFoo1 = + ledgerEffectsResults_divulgee1.numberOfCreatesPerTemplateName("Foo1") + val acsDeltaFoo1 = acsDeltaResults_divulgee1.numberOfCreatesPerTemplateName("Foo1") + // This assertion will fail once in ~37k test executions with number of observed items being 0 + // because for 100 instances and 10% chance of divulging to divulgee1, divulgee1 won't be disclosed any contracts once in 1/(0.9**100) ~= 37649 + cp(discard(ledgerEffectsFoo1 shouldBe 9)) + cp(discard(acsDeltaFoo1 shouldBe 0)) + val divulger = ledgerEffectsResults_divulgee1.numberOfCreatesPerTemplateName("Divulger") + cp(discard(divulger shouldBe 4)) + } + { + // Observer0 + val ledgerEffectsFoo1 = + ledgerEffectsResults_observer0.numberOfCreatesPerTemplateName("Foo1") + val acsDeltaFoo1 = acsDeltaResults_observer0.numberOfCreatesPerTemplateName("Foo1") + cp(discard(ledgerEffectsFoo1 shouldBe 100)) + // Approximately 10% of contracts is created and archived in the same transaction and thus omitted from the acs delta transactions stream + cp(discard(acsDeltaFoo1 shouldBe 87)) + val divulger = ledgerEffectsResults_observer0.numberOfCreatesPerTemplateName("Divulger") + cp(discard(divulger shouldBe 0)) + } + { + val divulger = ledgerEffectsResults_signatory.numberOfCreatesPerTemplateName("Divulger") + cp(discard(divulger shouldBe 7)) + } + cp.reportAll() + succeed + }).futureValue + } + } + + private def observeAllTemplatesForParty( + party: Party, + apiServices: LedgerApiServices, + expectedTemplateNames: Set[String], + )(implicit ec: ExecutionContext): Future[(ObservedEvents, ObservedEvents)] = { + val ledgerEffectsTxObserver = EventsObserver(expectedTemplateNames = expectedTemplateNames) + val acsDeltaTxObserver = EventsObserver(expectedTemplateNames = expectedTemplateNames) + for { + ledgerEnd <- apiServices.stateService.getLedgerEnd() + _ <- apiServices.updateService.transactionsLedgerEffects( + config = WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + observer = ledgerEffectsTxObserver, + ) + ledgerEnd <- apiServices.stateService.getLedgerEnd() + _ <- apiServices.updateService.transactions( + config = WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + observer = acsDeltaTxObserver, + ) + ledgerEffectsResults: ObservedEvents <- ledgerEffectsTxObserver.result + acsDeltaResults: ObservedEvents <- acsDeltaTxObserver.result + } yield { + (ledgerEffectsResults, acsDeltaResults) + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonTransientContractsITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonTransientContractsITSpec.scala new file mode 100644 index 0000000000..54f3e9deb9 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/NonTransientContractsITSpec.scala @@ -0,0 +1,125 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{EventsObserver, ObservedEvents} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.{AppendedClues, Checkpoints, EitherValues, OptionValues} + +import scala.concurrent.{ExecutionContext, Future} + +class NonTransientContractsITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with EitherValues + with OptionValues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "benchtool" should { + "submit non-transient contracts" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val totalContractsCount = 100 + val submissionConfig = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = totalContractsCount, + numberOfObservers = 1, + uniqueParties = false, + allowNonTransientContracts = true, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 0, + ), + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo2", + weight = 1, + payloadSizeBytes = 0, + ), + ), + consumingExercises = Some( + WorkflowConfig.FooSubmissionConfig.ConsumingExercises( + probability = 0.7, + payloadSizeBytes = 0, + ) + ), + ) + (for { + (apiServices, allocatedParties, submission) <- benchtoolFooSubmissionFixture( + submissionConfig + ) + _ <- submission.performSubmission(submissionConfig) + txEvents: ObservedEvents <- txObserver( + apiServices = apiServices, + party = allocatedParties.observers.head, + ) + } yield { + val createAndConsumeOffsetPairs = for { + create <- txEvents.createEvents + consume <- txEvents.consumingExercises.find(_.contractId == create.contractId).toList + } yield create.offset -> consume.offset + val activeContracts = txEvents.createEvents.count(create => + !txEvents.consumingExercises.exists(_.contractId == create.contractId) + ) + val cp = new Checkpoint + val nonTransientContracts = createAndConsumeOffsetPairs.count { + case (createOffset, archiveOffset) => createOffset != archiveOffset + } + val transientContracts = createAndConsumeOffsetPairs.count { + case (createOffset, archiveOffset) => createOffset == archiveOffset + } + cp(discard(nonTransientContracts shouldBe 47)) + cp(discard(transientContracts shouldBe 16)) + // sanity check: + cp( + discard( + activeContracts + nonTransientContracts + transientContracts shouldBe totalContractsCount + ) + ) + cp.reportAll() + succeed + }).futureValue + } + } + + private def txObserver( + apiServices: LedgerApiServices, + party: Party, + beginOffsetExclusive: Long = 0, + )(implicit ec: ExecutionContext): Future[ObservedEvents] = for { + ledgerEnd <- apiServices.stateService + .getLedgerEnd() + eventsObserver = EventsObserver(expectedTemplateNames = Set("Foo1", "Foo2")) + config = WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = beginOffsetExclusive, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + txs <- apiServices.updateService.transactionsLedgerEffects( + config = config, + observer = eventsObserver, + ) + } yield txs + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartyAllocationITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartyAllocationITSpec.scala new file mode 100644 index 0000000000..3223b73920 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartyAllocationITSpec.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.AppendedClues + +class PartyAllocationITSpec extends BenchtoolSandboxFixture with AppendedClues { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "benchtool" should { + "allow parties to be reused" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val config = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 0, + numberOfObservers = 1, + numberOfDivulgees = 1, + numberOfExtraSubmitters = 1, + uniqueParties = false, + instanceDistribution = Nil, + nonConsumingExercises = None, + consumingExercises = None, + userIds = List.empty, + ) + + (for { + (_, _, submitter) <- benchtoolFixture() + parties1 <- submitter.prepare(config) + parties2 <- submitter.prepare(config) + } yield { + parties1 shouldBe parties2 + }).futureValue + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartySetsITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartySetsITSpec.scala new file mode 100644 index 0000000000..5e95713571 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PartySetsITSpec.scala @@ -0,0 +1,292 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.{ + ConsumingExercises, + NonconsumingExercises, + PartySet, +} +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.{ + PartyNamePrefixFilter, + TransactionLedgerEffectsStreamConfig, +} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + BenchtoolTestsPackageInfo, + EventsObserver, + ObservedEvents, +} +import com.digitalasset.canton.ledger.api.benchtool.{BenchtoolSandboxFixture, ConfigEnricher} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.{AppendedClues, Checkpoints, OptionValues} + +import scala.concurrent.{ExecutionContext, Future} + +class PartySetsITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with OptionValues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + "benchtool" should { + "submit a party-set and apply party-set filter on a stream" onlyRunWithOrGreaterThan ProtocolVersion.dev in { + env => + import env.* + + val submissionConfig = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 10, + numberOfObservers = 1, + uniqueParties = false, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 0, + ) + ), + observerPartySets = List( + PartySet( + partyNamePrefix = "FooParty", + count = 100, + visibility = 0.5, + ), + PartySet( + partyNamePrefix = "BarParty", + count = 20, + visibility = 0.05, + ), + ), + consumingExercises = Some( + ConsumingExercises( + probability = 0.3, + payloadSizeBytes = 0, + ) + ), + nonConsumingExercises = Some( + NonconsumingExercises( + probability = 2.0, + payloadSizeBytes = 0, + ) + ), + ) + (for { + (apiServices, allocatedParties, fooSubmission) <- benchtoolFooSubmissionFixture( + submissionConfig + ) + configDesugaring = new ConfigEnricher( + allocatedParties, + BenchtoolTestsPackageInfo.StaticDefault, + ) + _ <- fooSubmission.performSubmission(submissionConfig = submissionConfig) + _ = allocatedParties.observerPartySets + .find(_.mainPartyNamePrefix == "FooParty") + .value + .parties(87) + .getValue should startWith("FooParty-87") + ledgerEffectsResults_fooParty87 <- observeStreams( + configDesugaring = configDesugaring, + filterByParties = List("FooParty-87"), + apiServices = apiServices, + expectedTemplateNames = Set("Foo1"), + ) + ledgerEffectsResults_fooPartySet <- observeStreams( + configDesugaring = configDesugaring, + filterByPartyNamePrefixes = List("FooParty"), + apiServices = apiServices, + expectedTemplateNames = Set("Foo1"), + ) + ledgerEffectsResults_fooPartyNamePrefix <- observeStreams( + configDesugaring = configDesugaring, + // matches 10 parties: {FooParty-30, FooParty-31, .., FooParty-39} + filterByPartyNamePrefixes = List("FooParty-3"), + apiServices = apiServices, + expectedTemplateNames = Set("Foo1"), + ) + ledgerEffectsResults_barPartySet <- observeStreams( + configDesugaring = configDesugaring, + filterByPartyNamePrefixes = List("BarParty"), + apiServices = apiServices, + expectedTemplateNames = Set("Foo1"), + ) + ledgerEffectsResults_barPartyNamePrefix <- observeStreams( + configDesugaring = configDesugaring, + // Matches 10 parties: {BarParty-10, BarParty-11, .., BarParty-19} + filterByPartyNamePrefixes = List("BarParty-1"), + apiServices = apiServices, + expectedTemplateNames = Set("Foo1"), + ) + + } yield { + val cp = new Checkpoint + + { // Party from party set + cp( + discard( + ledgerEffectsResults_fooParty87.numberOfCreatesPerTemplateName("Foo1") shouldBe 4 + ) + ) + cp( + discard( + ledgerEffectsResults_fooParty87.numberOfNonConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 8 + ) + ) + cp( + discard( + ledgerEffectsResults_fooParty87.numberOfConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 1 + ) + ) + } + { // Foo party set + cp( + discard( + ledgerEffectsResults_fooPartySet.numberOfCreatesPerTemplateName("Foo1") shouldBe 10 + ) + ) + cp( + discard( + ledgerEffectsResults_fooPartySet.numberOfNonConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 20 + ) + ) + cp( + discard( + ledgerEffectsResults_fooPartySet.numberOfConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 4 + ) + ) + } + { // Foo party set subset + cp( + discard( + ledgerEffectsResults_fooPartyNamePrefix.numberOfCreatesPerTemplateName( + "Foo1" + ) shouldBe 10 + ) + ) + cp( + discard( + ledgerEffectsResults_fooPartyNamePrefix + .numberOfNonConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 20 + ) + ) + cp( + discard( + ledgerEffectsResults_fooPartyNamePrefix.numberOfConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 4 + ) + ) + } + { // Bar party set + cp( + discard( + ledgerEffectsResults_barPartySet.numberOfCreatesPerTemplateName("Foo1") shouldBe 5 + ) + ) + cp( + discard( + ledgerEffectsResults_barPartySet.numberOfNonConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 10 + ) + ) + cp( + discard( + ledgerEffectsResults_barPartySet.numberOfConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 2 + ) + ) + } + { // Bar party set subset + cp( + discard( + ledgerEffectsResults_barPartyNamePrefix.numberOfCreatesPerTemplateName( + "Foo1" + ) shouldBe 5 + ) + ) + cp( + discard( + ledgerEffectsResults_barPartyNamePrefix + .numberOfNonConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 10 + ) + ) + cp( + discard( + ledgerEffectsResults_barPartyNamePrefix.numberOfConsumingExercisesPerTemplateName( + "Foo1" + ) shouldBe 2 + ) + ) + } + cp.reportAll() + succeed + }).futureValue + } + } + + private def observeStreams( + configDesugaring: ConfigEnricher, + filterByPartyNamePrefixes: List[String] = List.empty, + filterByParties: List[String] = List.empty, + filterByTemplates: List[String] = List.empty, + apiServices: LedgerApiServices, + expectedTemplateNames: Set[String], + )(implicit ec: ExecutionContext): Future[ObservedEvents] = { + val txObserver = EventsObserver(expectedTemplateNames = expectedTemplateNames) + for { + ledgerEnd <- apiServices.stateService.getLedgerEnd() + ledgerEffectsConfig = TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = filterByParties.map(party => + WorkflowConfig.StreamConfig.PartyFilter( + party = party, + templates = filterByTemplates, + interfaces = List.empty, + ) + ), + partyNamePrefixFilters = filterByPartyNamePrefixes.map(partyNamePrefix => + PartyNamePrefixFilter( + partyNamePrefix = partyNamePrefix, + templates = filterByTemplates, + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + _ <- apiServices.updateService.transactionsLedgerEffects( + config = configDesugaring + .enrichStreamConfig(ledgerEffectsConfig) + .asInstanceOf[TransactionLedgerEffectsStreamConfig], + observer = txObserver, + ) + results: ObservedEvents <- txObserver.result + } yield { + results + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PruningITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PruningITSpec.scala new file mode 100644 index 0000000000..f5993d8376 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/PruningITSpec.scala @@ -0,0 +1,178 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.config.{Config, WorkflowConfig} +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + ActiveContractsObserver, + EventsObserver, + Names, + ObservedEvents, +} +import com.digitalasset.canton.ledger.api.benchtool.util.TypedActorSystemResourceOwner.Creator +import com.digitalasset.canton.ledger.api.benchtool.{BenchtoolSandboxFixture, PruningBenchmark} +import com.digitalasset.canton.version.ProtocolVersion +import org.apache.pekko.actor.typed.{ActorSystem, SpawnProtocol} +import org.scalatest.{AppendedClues, Checkpoints, EitherValues, OptionValues} + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +class PruningITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with EitherValues + with OptionValues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + private var system: ActorSystem[SpawnProtocol.Command] = _ + + override def beforeAll(): Unit = { + super.beforeAll() + system = ActorSystem(Creator(), "Creator") + } + + override def afterAll(): Unit = { + super.afterAll() + system.terminate() + } + + "benchtool" should { + "benchmark pruning" onlyRunWithOrGreaterThan ProtocolVersion.dev in { env => + import env.* + + val reconciliationInterval: java.time.Duration = participant1.topology.synchronizer_parameters + .get_dynamic_synchronizer_parameters(daId) + .reconciliationInterval + .asJava + + val maxDeduplicationDuration: java.time.Duration = + participant1.config.init.ledgerApi.maxDeduplicationDuration.asJava + + val simClock = environment.simClock.value + + val submissionConfig = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 100, + numberOfObservers = 1, + uniqueParties = false, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 0, + ) + ), + consumingExercises = Some( + WorkflowConfig.FooSubmissionConfig.ConsumingExercises( + probability = 0.5, + payloadSizeBytes = 0, + ) + ), + ) + val testedPruningBenchmark = + new PruningBenchmark(reportingPeriod = Config.Default.reportingPeriod) + (for { + (apiServices, allocatedParties, submission) <- benchtoolFooSubmissionFixture( + submissionConfig + ) + _ <- submission.performSubmission(submissionConfig) + txPrePruning: ObservedEvents <- txObserver( + apiServices = apiServices, + party = allocatedParties.observers.head, + ) + // advance the clock long enough to be sure that the last event to be pruned is followed by an acs commitment + _ = simClock.advance( + (if (maxDeduplicationDuration.compareTo(reconciliationInterval) >= 0) + maxDeduplicationDuration + else reconciliationInterval).multipliedBy(2) + ) + pruningBenchmarkResult <- testedPruningBenchmark.benchmarkPruning( + pruningConfig = WorkflowConfig.PruningConfig( + name = "pruning-benchmark-test", + pruneAllDivulgedContracts = true, + maxDurationObjective = 0.nano, + ), + regularUserServices = apiServices, + adminServices = apiServices, + actorSystem = system, + signatory = allocatedParties.signatory, + names = new Names(), + ) + acsPostPruning: ObservedEvents <- acsObserver( + apiServices = apiServices, + party = allocatedParties.observers.head, + ) + } yield { + pruningBenchmarkResult.value shouldBe () + txPrePruning.createEvents.size shouldBe 100 + txPrePruning.consumingExercises.size shouldBe 49 + acsPostPruning.numberOfCreatesPerTemplateName("Foo1") shouldBe 51 + acsPostPruning.numberOfConsumingExercisesPerTemplateName("Foo1") shouldBe 0 + succeed + }).futureValue + } + } + + private def txObserver( + apiServices: LedgerApiServices, + party: Party, + beginOffsetExclusive: Long = 0L, + )(implicit ec: ExecutionContext): Future[ObservedEvents] = { + val eventsObserver = EventsObserver(expectedTemplateNames = Set("Foo1")) + for { + ledgerEnd <- apiServices.stateService.getLedgerEnd() + config = WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + beginOffsetExclusive = beginOffsetExclusive, + endOffsetInclusive = Some(ledgerEnd), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + txs <- apiServices.updateService.transactionsLedgerEffects( + config = config, + observer = eventsObserver, + ) + } yield txs + } + + private def acsObserver( + apiServices: LedgerApiServices, + party: Party, + )(implicit ec: ExecutionContext): Future[ObservedEvents] = { + val eventsObserver = ActiveContractsObserver(expectedTemplateNames = Set("Foo1", "Foo2")) + val config = WorkflowConfig.StreamConfig.ActiveContractsStreamConfig( + name = "dummy-name", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = party.getValue, + templates = List.empty, + interfaces = List.empty, + ) + ), + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + apiServices.stateService.getActiveContracts( + config = config, + observer = eventsObserver, + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/WeightedApplicationIdsAndSubmittersITSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/WeightedApplicationIdsAndSubmittersITSpec.scala new file mode 100644 index 0000000000..9bc742bacd --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/integration/tests/ledger/api/benchtool/submission/WeightedApplicationIdsAndSubmittersITSpec.scala @@ -0,0 +1,133 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.daml.scalautil.Statement.discard +import com.daml.timer.Delayed +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.ledgerapi.NoAuthPlugin +import com.digitalasset.canton.ledger.api.benchtool.BenchtoolSandboxFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.UserId +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + CompletionsObserver, + ObservedCompletions, +} +import com.digitalasset.canton.version.ProtocolVersion +import org.scalatest.concurrent.PatienceConfiguration +import org.scalatest.{AppendedClues, Checkpoints, OptionValues} + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.{Duration, DurationInt, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future} + +class WeightedUserIdsAndSubmittersITSpec + extends BenchtoolSandboxFixture + with AppendedClues + with OptionValues + with Checkpoints { + registerPlugin(NoAuthPlugin(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + private val timeout: FiniteDuration = 2.minutes + + "benchtool" should { + "populate participant with contracts using specified user-ids and submitters" onlyRunWithOrGreaterThan ProtocolVersion.dev in { + env => + import env.* + + val submissionConfig = WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 100, + numberOfObservers = 1, + numberOfExtraSubmitters = 3, + uniqueParties = false, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 1, + payloadSizeBytes = 0, + ) + ), + nonConsumingExercises = None, + consumingExercises = None, + userIds = List( + UserId( + userId = "App-1", + weight = 90, + ), + UserId( + userId = "App-2", + weight = 10, + ), + ), + ) + (for { + (apiServices, allocatedParties, fooSubmission) <- benchtoolFooSubmissionFixture( + submissionConfig + ) + _ <- fooSubmission.performSubmission(submissionConfig = submissionConfig) + completionsApp1 <- observeCompletions( + parties = List(allocatedParties.signatory), + apiServices = apiServices, + userId = "App-1", + ) + completionsApp2 <- observeCompletions( + parties = List(allocatedParties.signatory), + apiServices = apiServices, + userId = "App-2", + ) + completionsApp1Submitter0 <- observeCompletions( + parties = List(allocatedParties.extraSubmitters.head), + apiServices = apiServices, + userId = "App-1", + ) + completionsApp1Submitter1 <- observeCompletions( + parties = List(allocatedParties.extraSubmitters(1)), + apiServices = apiServices, + userId = "App-1", + ) + } yield { + val cp = new Checkpoint + // App only filters + cp(discard(completionsApp1.completions.size shouldBe 91)) + cp(discard(completionsApp2.completions.size shouldBe 9)) + // App and party filters + cp( + discard( + completionsApp1Submitter0.completions.size shouldBe completionsApp1.completions.size + ) + ) + cp(discard(completionsApp1Submitter0.completions.size shouldBe 91)) + cp(discard(completionsApp1Submitter1.completions.size shouldBe 9)) + cp.reportAll() + succeed + }).futureValue(timeout = PatienceConfiguration.Timeout(timeout)) + } + } + + private def observeCompletions( + parties: List[Party], + userId: String, + apiServices: LedgerApiServices, + )(implicit ec: ExecutionContext): Future[ObservedCompletions] = { + val observer = CompletionsObserver() + Delayed.by(t = Duration(5, TimeUnit.SECONDS))(observer.cancel()) + apiServices.commandCompletionService.completions( + config = WorkflowConfig.StreamConfig.CompletionsStreamConfig( + name = "dummy-name", + parties = parties.map(_.getValue), + userId = userId, + beginOffsetExclusive = None, + objectives = None, + timeoutO = None, + maxItemCount = None, + ), + observer = observer, + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/BenchtoolSandboxFixture.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/BenchtoolSandboxFixture.scala new file mode 100644 index 0000000000..f02f907c2c --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/BenchtoolSandboxFixture.scala @@ -0,0 +1,69 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.daml.metrics.api.noop.NoOpMetricsFactory +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.integration.tests.ledgerapi.fixture.CantonFixture +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig +import com.digitalasset.canton.ledger.api.benchtool.metrics.MetricsManager.NoOpMetricsManager +import com.digitalasset.canton.ledger.api.benchtool.services.LedgerApiServices +import com.digitalasset.canton.ledger.api.benchtool.submission.* +import org.scalatest.Suite +import org.scalatest.time.{Minutes, Span} + +import scala.concurrent.{ExecutionContext, Future} + +trait BenchtoolSandboxFixture extends CantonFixture { self: Suite & BaseTest => + + override implicit lazy val patienceConfig: PatienceConfig = PatienceConfig(Span(2, Minutes)) + + protected def benchtoolFixture()(implicit + ec: ExecutionContext + ): Future[(LedgerApiServices, Names, CommandSubmitter)] = + for { + ledgerApiServicesF <- LedgerApiServices.forChannel( + channel = channel, + authorizationHelper = None, + ) + apiServices: LedgerApiServices = ledgerApiServicesF("someUser") + names = new Names() + submitter = CommandSubmitter( + names = names, + benchtoolUserServices = apiServices, + adminServices = apiServices, + metricsFactory = NoOpMetricsFactory, + metricsManager = NoOpMetricsManager(), + waitForSubmission = true, + partyAllocating = new PartyAllocating( + names = names, + adminServices = apiServices, + ), + // Making command generation deterministic w.r.t. parallelism + commandGenerationParallelism = 1, + // Making command submission deterministic w.r.t. parallelism + maxInFlightCommandsOverride = Some(1), + ) + } yield ( + apiServices, + names, + submitter, + ) + + protected def benchtoolFooSubmissionFixture( + submissionConfig: WorkflowConfig.FooSubmissionConfig + )(implicit ec: ExecutionContext): Future[(LedgerApiServices, AllocatedParties, FooSubmission)] = + for { + (apiServices, _, submitter) <- benchtoolFixture() + allocatedParties <- submitter.prepare(submissionConfig) + foo = new FooSubmission( + submitter = submitter, + maxInFlightCommands = 1, + submissionBatchSize = 1, + allocatedParties = allocatedParties, + names = new Names(), + randomnessProvider = RandomnessProvider.forSeed(seed = 0), + ) + } yield (apiServices, allocatedParties, foo) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricherSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricherSpec.scala new file mode 100644 index 0000000000..b8c6ca085e --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/ConfigEnricherSpec.scala @@ -0,0 +1,141 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.{ + PartyFilter, + PartyNamePrefixFilter, + TransactionsStreamConfig, +} +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + AllocatedParties, + AllocatedPartySet, + BenchtoolTestsPackageInfo, +} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class ConfigEnricherSpec extends AnyFlatSpec with Matchers { + + it should "expand party-set filter into a sequence of party filters" in { + def makePartyName(shortName: String): String = s"$shortName" + def makeParty(shortName: String): Party = new Party(makePartyName(shortName)) + + val desugaring = new ConfigEnricher( + allocatedParties = AllocatedParties( + signatoryO = Some(makeParty("Sig-0")), + observers = List(makeParty("Obs-0")), + divulgees = List(makeParty("Div-0")), + extraSubmitters = List(makeParty("Sub-0")), + observerPartySets = List( + AllocatedPartySet( + mainPartyNamePrefix = "MyParty", + List("MyParty-0", "MyParty-1", "MyParty-11", "MyParty-12", "MyParty-21", "MyParty-22") + .map(makeParty), + ) + ), + ), + packageInfo = BenchtoolTestsPackageInfo.StaticDefault, + ) + val templates: List[String] = List("otherTemplate", "Foo1") + val enrichedTemplates: List[String] = + List( + "otherTemplate", + s"${BenchtoolTestsPackageInfo.StaticDefault.packageRef}:Foo:Foo1", + ) + + val actual = desugaring.enrichStreamConfig( + TransactionsStreamConfig( + name = "flat", + filters = List( + PartyFilter( + party = "Obs-0", + templates = templates, + interfaces = List.empty, + ), + PartyFilter( + party = "Sig-0", + templates = templates, + interfaces = List.empty, + ), + PartyFilter( + party = "UnknownParty-0", + templates = templates, + interfaces = List.empty, + ), + ), + partyNamePrefixFilters = List( + PartyNamePrefixFilter( + partyNamePrefix = "MyParty-1", + templates = templates, + ), + PartyNamePrefixFilter( + partyNamePrefix = "MyParty-2", + templates = templates, + ), + PartyNamePrefixFilter( + partyNamePrefix = "Obs", + templates = templates, + ), + ), + subscriptionDelay = Some(Duration(1337, TimeUnit.SECONDS)), + ) + ) + actual shouldBe TransactionsStreamConfig( + name = "flat", + filters = List( + PartyFilter( + party = "Obs-0", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "Sig-0", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "UnknownParty-0", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "MyParty-1", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "MyParty-11", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "MyParty-12", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "MyParty-21", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "MyParty-22", + templates = enrichedTemplates, + interfaces = List.empty, + ), + PartyFilter( + party = "Obs-0", + templates = enrichedTemplates, + interfaces = List.empty, + ), + ), + subscriptionDelay = Some(Duration(1337, TimeUnit.SECONDS)), + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/CliSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/CliSpec.scala new file mode 100644 index 0000000000..2fe298fea6 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/CliSpec.scala @@ -0,0 +1,337 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import org.scalatest.OptionValues +import org.scalatest.matchers.should.Matchers +import org.scalatest.prop.TableDrivenPropertyChecks +import org.scalatest.wordspec.AnyWordSpec + +import java.io.File +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.* + +class CliSpec extends AnyWordSpec with Matchers with OptionValues with TableDrivenPropertyChecks { + + "Cli" should { + "produce the default config when no arguments defined" in { + parse() shouldBe Config.Default + } + + "parse ledger API endpoint" in { + val endpoint = "foo:123" + val expectedConfig = Config.Default.copy( + ledger = Config.Ledger( + hostname = "foo", + port = 123, + ) + ) + parse("--endpoint", endpoint) shouldBe expectedConfig + parse("-e", endpoint) shouldBe expectedConfig + } + + "parse workflow config location" in { + val workflowFile = "/some/path/to/file" + val expectedConfig = Config.Default.copy(workflowConfigFile = Some(new File(workflowFile))) + parse("--workflow-config", workflowFile) shouldBe expectedConfig + parse("-w", workflowFile) shouldBe expectedConfig + } + + "parse maximum number of in-flight commands parameter" in { + val maxCommands = 123 + val expectedConfig = Config.Default.copy(maxInFlightCommands = maxCommands) + parse("--max-in-flight-commands", maxCommands.toString) shouldBe expectedConfig + } + + "parse submission batch size" in { + val batchSize = 1234 + val expectedConfig = Config.Default.copy(submissionBatchSize = batchSize) + parse("--submission-batch-size", batchSize.toString) shouldBe expectedConfig + } + + "parse log interval" in { + val cases = Table( + "cli value" -> "duration", + "1s" -> 1.second, + "123millis" -> 123.millis, + "5m" -> 5.minutes, + ) + forAll(cases) { (argument, intervalDuration) => + val expectedConfig = Config.Default.copy(reportingPeriod = intervalDuration) + parse("--log-interval", argument) shouldBe expectedConfig + parse("-r", argument) shouldBe expectedConfig + } + } + + "parse thread pool executor's core pool size" in { + val size = 123 + val expectedConfig = + Config.Default.copy(concurrency = Config.Default.concurrency.copy(corePoolSize = size)) + parse("--core-pool-size", size.toString) shouldBe expectedConfig + } + + "parse thread pool executor's max pool size" in { + val size = 123 + val expectedConfig = + Config.Default.copy(concurrency = Config.Default.concurrency.copy(maxPoolSize = size)) + parse("--max-pool-size", size.toString) shouldBe expectedConfig + } + + "parse stream type" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party1 = "dummy1" + val party2 = "dummy2" + val userId = "userId" + val cases = Table( + "cli argument" -> "stream config", + s"stream-type=transactions,name=$name,filters=$party1" -> TransactionsStreamConfig( + name = name, + filters = List(PartyFilter(party1, Nil, Nil)), + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + s"stream-type=transactions-ledger-effects,name=$name,filters=$party1" -> TransactionLedgerEffectsStreamConfig( + name = name, + filters = List(PartyFilter(party1, Nil, Nil)), + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + s"stream-type=active-contracts,name=$name,filters=$party1" -> ActiveContractsStreamConfig( + name = name, + filters = List(PartyFilter(party1, Nil, Nil)), + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + s"stream-type=completions,name=$name,parties=$party1+$party2,user-id=$userId,timeout=123s,max-item-count=5" -> CompletionsStreamConfig( + name = name, + parties = List(party1, party2), + userId = userId, + beginOffsetExclusive = None, + objectives = None, + timeoutO = Some(Duration(123, TimeUnit.SECONDS)), + maxItemCount = Some(5), + ), + ) + forAll(cases) { (argument, config) => + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(config))) + parse("--consume-stream", argument) shouldBe expectedConfig + parse("-s", argument) shouldBe expectedConfig + } + } + + "parse stream filters" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party1 = "alice" + val party2 = "bob" + val party3 = "david" + val template1 = "packageid:Foo:Foo1" + val template2 = "packageid2:Foo:Foo2" + // each party filter separated by '+' and each template in a filter separated by '@' + val filters = s"$party1+$party2@$template1@$template2+$party3@$template2" + val filtersList = List( + PartyFilter(party1, List(), List()), + PartyFilter(party2, List(template1, template2), List()), + PartyFilter(party3, List(template2), List()), + ) + val cases = Table( + "cli argument" -> "stream config", + s"stream-type=transactions,name=$name,filters=$filters" -> TransactionsStreamConfig( + name = name, + filters = filtersList, + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + s"stream-type=transactions-ledger-effects,name=$name,filters=$filters" -> TransactionLedgerEffectsStreamConfig( + name = name, + filters = filtersList, + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + s"stream-type=active-contracts,name=$name,filters=$filters" -> ActiveContractsStreamConfig( + name = name, + filters = filtersList, + objectives = None, + maxItemCount = None, + timeoutO = None, + ), + ) + forAll(cases) { (argument, config) => + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(config))) + parse("--consume-stream", argument) shouldBe expectedConfig + parse("-s", argument) shouldBe expectedConfig + } + } + + "parse begin offset" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party = "dummy" + val cases = Table( + "cli parameter" -> "offset", + ",begin-offset=12345678" -> 12345678L, + "" -> 0L, + ) + forAll(cases) { (argument, offset) => + val streamConfig = TransactionsStreamConfig( + name = name, + filters = List(PartyFilter(party, Nil, Nil)), + beginOffsetExclusive = offset, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(streamConfig))) + + parse( + "--consume-stream", + s"stream-type=transactions,name=$name,filters=$party$argument", + ) shouldBe expectedConfig + } + } + + "parse end offset" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party = "dummy" + val cases = Table( + "cli parameter" -> "offset", + ",end-offset=12345678" -> Some(12345678L), + "" -> None, + ) + forAll(cases) { (argument, offset) => + val streamConfig = TransactionsStreamConfig( + name = name, + filters = List(PartyFilter(party, Nil, Nil)), + beginOffsetExclusive = 0L, + endOffsetInclusive = offset, + objectives = None, + maxItemCount = None, + timeoutO = None, + ) + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(streamConfig))) + + parse( + "--consume-stream", + s"stream-type=transactions,name=$name,filters=$party$argument", + ) shouldBe expectedConfig + } + } + + "parse transaction objectives" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party = "dummy" + val cases = Table( + "cli parameter" -> "objectives", + "max-delay=5" -> TransactionObjectives(maxDelaySeconds = Some(5), None, None, None), + "min-consumption-speed=1.23" -> TransactionObjectives( + None, + minConsumptionSpeed = Some(1.23), + None, + None, + ), + "min-item-rate=1234.5" -> TransactionObjectives( + None, + None, + minItemRate = Some(1234.5), + None, + ), + "max-item-rate=1234.5" -> TransactionObjectives( + None, + None, + None, + maxItemRate = Some(1234.5), + ), + ) + forAll(cases) { (argument, objectives) => + val streamConfig = TransactionsStreamConfig( + name = name, + filters = List(PartyFilter(party, Nil, Nil)), + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = Some(objectives), + maxItemCount = None, + timeoutO = None, + ) + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(streamConfig))) + + parse( + "--consume-stream", + s"stream-type=transactions,name=$name,filters=$party,$argument", + ) shouldBe expectedConfig + } + } + + "parse rate objectives" in { + import WorkflowConfig.StreamConfig.* + val name = "streamname" + val party = "dummy" + val cases = Table( + "cli parameter" -> "objectives", + "min-item-rate=1234.5" -> AcsAndCompletionsObjectives(minItemRate = Some(1234.5), None), + "max-item-rate=1234.5" -> AcsAndCompletionsObjectives(None, maxItemRate = Some(1234.5)), + ) + forAll(cases) { (argument, objectives) => + val streamConfig = ActiveContractsStreamConfig( + name = name, + filters = List(PartyFilter(party, Nil, Nil)), + objectives = Some(objectives), + maxItemCount = None, + timeoutO = None, + ) + val expectedConfig = + Config.Default.copy(workflow = Config.Default.workflow.copy(streams = List(streamConfig))) + + parse( + "--consume-stream", + s"stream-type=active-contracts,name=$name,filters=$party,$argument", + ) shouldBe expectedConfig + } + } + + "parse `latency-test` flag" in { + val expectedConfig = Config.Default.copy(latencyTest = true) + parse("--latency-test") shouldBe expectedConfig + } + + "parse `max-latency-objective` flag" in { + val expectedConfig = Config.Default.copy(maxLatencyObjectiveMillis = 6000L) + parse("--max-latency-millis", "6000") shouldBe expectedConfig + } + + "`latency-test` cannot be enabled with configured workflow streams" in { + Cli.config( + Array( + "--latency-test", + "--consume-stream", + s"stream-type=transactions,name=some-name,filters=some-filter,end-offset=1234567", + ) + ) shouldBe empty + } + } + + private def parse(args: String*): Config = + Cli.config(args.toArray).value +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParserSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParserSpec.scala new file mode 100644 index 0000000000..c8c1344131 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/config/WorkflowConfigParserSpec.scala @@ -0,0 +1,682 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.config + +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.FooSubmissionConfig.PartySet +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.StreamConfig.PartyNamePrefixFilter +import com.digitalasset.canton.ledger.api.benchtool.config.WorkflowConfig.{ + FooSubmissionConfig, + PruningConfig, +} +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.io.StringReader +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class WorkflowConfigParserSpec extends AnyWordSpec with Matchers { + + "WorkflowConfigParser" should { + "parse complete workflow configuration" in { + val yaml = + """submission: + | type: foo + | num_instances: 500 + | num_observers: 4 + | num_divulgees: 5 + | num_extra_submitters: 6 + | unique_parties: true + | allow_non_transient_contracts: true + | instance_distribution: + | - template: Foo1 + | weight: 50 + | payload_size_bytes: 60 + | nonconsuming_exercises: + | probability: 4.9 + | payload_size_bytes: 100 + | consuming_exercises: + | probability: 0.5 + | payload_size_bytes: 200 + | user_ids: + | - id: App-1 + | weight: 100 + | - id: App-2 + | weight: 102 + | observers_party_sets: + | - party_name_prefix: FooParty + | count: 99 + | visibility: 0.35 + | - party_name_prefix: BazParty + | count: 10 + | visibility: 0.01 + |streams: + | - type: active-contracts + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | subscription_delay: 7min + | objectives: + | min_item_rate: 123 + | max_item_rate: 456 + | max_item_count: 700 + | - type: transactions + | name: stream-2 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + |unary: + | - type: pruning + | name: pruning-123 + | prune_all_divulged_contracts: false + | max_duration_objective: 56 ms + |""".stripMargin + + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = Some( + WorkflowConfig.FooSubmissionConfig( + allowNonTransientContracts = true, + numberOfInstances = 500, + numberOfObservers = 4, + numberOfDivulgees = 5, + numberOfExtraSubmitters = 6, + uniqueParties = true, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 50, + payloadSizeBytes = 60, + ) + ), + nonConsumingExercises = Some( + WorkflowConfig.FooSubmissionConfig.NonconsumingExercises( + probability = 4.9, + payloadSizeBytes = 100, + ) + ), + consumingExercises = Some( + WorkflowConfig.FooSubmissionConfig.ConsumingExercises( + probability = 0.5, + payloadSizeBytes = 200, + ) + ), + userIds = List( + FooSubmissionConfig.UserId( + userId = "App-1", + weight = 100, + ), + FooSubmissionConfig.UserId( + userId = "App-2", + weight = 102, + ), + ), + observerPartySets = List( + PartySet(partyNamePrefix = "FooParty", count = 99, visibility = 0.35), + PartySet(partyNamePrefix = "BazParty", count = 10, visibility = 0.01), + ), + ) + ), + streams = List( + WorkflowConfig.StreamConfig.ActiveContractsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + objectives = Some( + WorkflowConfig.StreamConfig.AcsAndCompletionsObjectives( + minItemRate = Some(123), + maxItemRate = Some(456), + ) + ), + maxItemCount = Some(700), + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ), + // Configuration with all optional values missing + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-2", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1"), + ) + ), + ), + ), + pruning = Some( + PruningConfig( + name = "pruning-123", + pruneAllDivulgedContracts = false, + maxDurationObjective = Duration(56, TimeUnit.MILLISECONDS), + ) + ), + ) + ) + } + + "parse foo submission configuration" in { + val yaml = + """submission: + | type: foo + | num_instances: 500 + | num_divulgees: 1 + | num_observers: 4 + | num_divulgees: 5 + | num_extra_submitters: 6 + | unique_parties: true + | instance_distribution: + | - template: Foo1 + | weight: 50 + | payload_size_bytes: 60 + | - template: Foo2 + | weight: 25 + | payload_size_bytes: 35 + | - template: Foo3 + | weight: 10 + | payload_size_bytes: 25 + """.stripMargin + + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = Some( + WorkflowConfig.FooSubmissionConfig( + numberOfInstances = 500, + numberOfObservers = 4, + numberOfDivulgees = 5, + numberOfExtraSubmitters = 6, + uniqueParties = true, + instanceDistribution = List( + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo1", + weight = 50, + payloadSizeBytes = 60, + ), + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo2", + weight = 25, + payloadSizeBytes = 35, + ), + WorkflowConfig.FooSubmissionConfig.ContractDescription( + template = "Foo3", + weight = 10, + payloadSizeBytes = 25, + ), + ), + nonConsumingExercises = None, + consumingExercises = None, + userIds = List.empty, + ) + ), + streams = Nil, + ) + ) + } + + "parse fibonacci submission configuration" in { + val yaml = + """submission: + | type: fibonacci + | num_instances: 500 + | unique_parties: true + | value: 7 + | wait_for_submission: true + """.stripMargin + + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = Some( + WorkflowConfig.FibonacciSubmissionConfig( + numberOfInstances = 500, + uniqueParties = true, + value = 7, + waitForSubmission = true, + ) + ), + streams = Nil, + ) + ) + + } + + "parse transactions stream configuration" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | party_prefix_filters: + | - party_name_prefix: MyParty + | templates: [Foo1, Foo2] + | - party_name_prefix: MyOtherParty + | templates: [Foo1] + | begin_offset: 456 + | end_offset: 789 + | subscription_delay: 7min + | objectives: + | max_delay_seconds: 123 + | min_consumption_speed: 2.34 + | min_item_rate: 12 + | max_item_rate: 34 + | max_stream_duration: 56 ms + |""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + partyNamePrefixFilters = List( + PartyNamePrefixFilter( + partyNamePrefix = "MyParty", + templates = List("Foo1", "Foo2"), + ), + PartyNamePrefixFilter( + partyNamePrefix = "MyOtherParty", + templates = List("Foo1"), + ), + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = Some(123), + minConsumptionSpeed = Some(2.34), + minItemRate = Some(12), + maxItemRate = Some(34), + maxTotalStreamRuntimeDuration = Some(Duration(56, TimeUnit.MILLISECONDS)), + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + // Right(WorkflowConfig(None,List(TransactionsStreamConfig(stream-1,List(PartyFilter(Obs-2,List(Foo1, Foo3),List())),List(PartyNamePrefixFilter(MyParty,List(Foo1, Foo2),List()), PartyNamePrefixFilter(MyOtherParty,List(Foo1),List())),Some(LedgerOffset(Absolute(foo))),Some(LedgerOffset(Absolute(foo))),Some(TransactionObjectives(Some(123),Some(2.34),Some(12.0),Some(34.0),Some(56 milliseconds))),Some(7 minutes),None,None)))) was not equal to + // Right(WorkflowConfig(None,List(TransactionsStreamConfig(stream-1,List(PartyFilter(Obs-2,List(Foo1, Foo3),List())),List(PartyNamePrefixFilter(MyParty,List(Foo1, Foo2),List()), PartyNamePrefixFilter(MyOtherParty,List(Foo1),List())),Some(LedgerOffset(Absolute(foo))),Some(LedgerOffset(Absolute(bar))),Some(TransactionObjectives(Some(123),Some(2.34),Some(12.0),Some(34.0),Some(56 milliseconds))),Some(7 minutes),None,None)))) + } + + "parse stream configuration with some objectives set" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | begin_offset: 456 + | end_offset: 789 + | subscription_delay: 7min + | objectives: + | min_consumption_speed: 2.34 + | min_item_rate: 12""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = None, + minConsumptionSpeed = Some(2.34), + minItemRate = Some(12), + maxItemRate = None, + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse stream configuration without objectives" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | subscription_delay: 7min + | begin_offset: 456 + | end_offset: 789""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = None, + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse transactions-ledger-effects stream configuration" in { + val yaml = + """streams: + | - type: transactions-ledger-effects + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | - party: Obs-3 + | begin_offset: 456 + | end_offset: 789 + | subscription_delay: 7min + | objectives: + | max_delay_seconds: 123 + | min_consumption_speed: 2.34 + | min_item_rate: 12 + | max_item_rate: 34""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionLedgerEffectsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ), + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-3", + templates = List.empty, + ), + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = Some(123), + minConsumptionSpeed = Some(2.34), + minItemRate = Some(12), + maxItemRate = Some(34), + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse active contracts stream configuration" in { + val yaml = + """streams: + | - type: active-contracts + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | subscription_delay: 7min + | objectives: + | min_item_rate: 123 + | max_item_rate: 4567""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.ActiveContractsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + objectives = Some( + WorkflowConfig.StreamConfig.AcsAndCompletionsObjectives( + minItemRate = Some(123), + maxItemRate = Some(4567), + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse completions stream configuration" in { + val yaml = + """streams: + | - type: completions + | name: stream-1 + | parties: [Obs-2] + | begin_offset: 567 + | user_id: foobar + | timeout: 100s + | max_item_count: 101 + | subscription_delay: 7min + | objectives: + | min_item_rate: 12 + | max_item_rate: 345""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.CompletionsStreamConfig( + name = "stream-1", + parties = List("Obs-2"), + beginOffsetExclusive = Some(567), + userId = "foobar", + objectives = Some( + WorkflowConfig.StreamConfig.AcsAndCompletionsObjectives( + minItemRate = Some(12), + maxItemRate = Some(345), + ) + ), + timeoutO = Some(Duration(100, TimeUnit.SECONDS)), + maxItemCount = Some(101L), + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse begin_offset and end_offset markers absence" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | subscription_delay: 7min""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + beginOffsetExclusive = 0L, + endOffsetInclusive = None, + objectives = None, + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + } + + "parse stream configuration with interface filters" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | interfaces: + | - FooInterface + | begin_offset: 456 + | end_offset: 789 + | subscription_delay: 7min + | objectives: + | min_consumption_speed: 2.34 + | min_item_rate: 12""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + interfaces = List("FooInterface"), + ) + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = None, + minConsumptionSpeed = Some(2.34), + minItemRate = Some(12), + maxItemRate = None, + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + "parse party_prefix_filters interfaces" in { + val yaml = + """streams: + | - type: transactions + | name: stream-1 + | filters: + | - party: Obs-2 + | templates: + | - Foo1 + | - Foo3 + | party_prefix_filters: + | - party_name_prefix: My-Party + | interfaces: [FooInterface] + | begin_offset: 456 + | end_offset: 789 + | subscription_delay: 7min + | objectives: + | max_delay_seconds: 123 + | min_consumption_speed: 2.34 + | min_item_rate: 12 + | max_item_rate: 34 + | max_stream_duration: 56 ms + |""".stripMargin + parseYaml(yaml) shouldBe Right( + WorkflowConfig( + submission = None, + streams = List( + WorkflowConfig.StreamConfig.TransactionsStreamConfig( + name = "stream-1", + filters = List( + WorkflowConfig.StreamConfig.PartyFilter( + party = "Obs-2", + templates = List("Foo1", "Foo3"), + ) + ), + partyNamePrefixFilters = List( + PartyNamePrefixFilter( + partyNamePrefix = "My-Party", + interfaces = List("FooInterface"), + ) + ), + beginOffsetExclusive = 456L, + endOffsetInclusive = Some(789L), + objectives = Some( + WorkflowConfig.StreamConfig.TransactionObjectives( + maxDelaySeconds = Some(123), + minConsumptionSpeed = Some(2.34), + minItemRate = Some(12), + maxItemRate = Some(34), + maxTotalStreamRuntimeDuration = Some(Duration(56, TimeUnit.MILLISECONDS)), + ) + ), + maxItemCount = None, + timeoutO = None, + subscriptionDelay = Some(Duration(7, TimeUnit.MINUTES)), + ) + ), + ) + ) + } + + def parseYaml(yaml: String): Either[WorkflowConfigParser.ParserError, WorkflowConfig] = + WorkflowConfigParser.parse(new StringReader(yaml)) + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetricSpec.scala new file mode 100644 index 0000000000..b7bcf73a9e --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/ConsumptionSpeedMetricSpec.scala @@ -0,0 +1,270 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.ConsumptionSpeedMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.ConsumptionSpeedMetric.* +import com.google.protobuf.timestamp.Timestamp +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.{Clock, Duration, Instant} +import scala.language.existentials + +class ConsumptionSpeedMetricSpec extends AnyWordSpec with Matchers { + ConsumptionSpeedMetric.getClass.getSimpleName should { + "correctly handle initial state" in { + val metric = ConsumptionSpeedMetric.empty[String](_ => List.empty) + + val (_, periodicValue) = metric.periodicValue(Duration.ofMillis(100)) + val finalValue = metric.finalValue(Duration.ofSeconds(1)) + + periodicValue shouldBe Value(Some(0.0)) + finalValue shouldBe Value(None) + } + + "compute values after processing elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(3) + val elem1: String = "a" + val elem2: String = "d" + val testNow = Clock.systemUTC().instant() + val recordTimes1 = List( + testNow.minusSeconds(100), + testNow.minusSeconds(50), + ) + val recordTimes2 = List( + testNow.minusSeconds(20) + ) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> recordTimes1, + elem2 -> recordTimes2, + ) + ) + + val metric = ConsumptionSpeedMetric.empty[String](testRecordTimeFunction) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val firstElementOfThePeriod = recordTimes1.head + val lastElementOfThePeriod = recordTimes2.last + val expectedSpeed = + (lastElementOfThePeriod.getEpochSecond - firstElementOfThePeriod.getEpochSecond) * 1000.0 / periodDuration.toMillis + + periodicValue shouldBe Value(Some(expectedSpeed)) + finalValue shouldBe Value(None) + } + + "correctly handle initial periods with a single record time" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(3) + val elem1: String = "a" + val testNow = Clock.systemUTC().instant() + val recordTimes1 = List(testNow.minusSeconds(11)) + // The assumption made here is that each consecutive element has higher record times + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> recordTimes1 + ) + ) + + val metric = ConsumptionSpeedMetric.empty[String](testRecordTimeFunction) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + periodicValue shouldBe Value(Some(0.0)) + finalValue shouldBe Value(None) + } + + "correctly handle non-initial periods with a single record time" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(3) + val elem1: String = "a" + val elem2: String = "b" + val testNow = Clock.systemUTC().instant() + val recordTimes1 = List( + testNow.minusSeconds(100), + testNow.minusSeconds(50), + ) + val recordTimes2 = List( + testNow.minusSeconds(20) + ) + // The assumption made here is that each consecutive element has higher record times + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> recordTimes1, + elem2 -> recordTimes2, + ) + ) + + val metric = ConsumptionSpeedMetric.empty[String](testRecordTimeFunction) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .periodicValue(periodDuration) + ._1 + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + periodicValue shouldBe Value(Some(300.0)) + finalValue shouldBe Value(None) + } + + "correctly handle periods with no elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(3) + val elem1: String = "a" + val elem2: String = "b" + val testNow = Clock.systemUTC().instant() + val recordTimes1 = List( + testNow.minusSeconds(100) + ) + val recordTimes2 = List( + testNow.minusSeconds(90) + ) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> recordTimes1, + elem2 -> recordTimes2, + ) + ) + + val metric = ConsumptionSpeedMetric.empty[String](testRecordTimeFunction) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + periodicValue shouldBe Value(Some(0.0)) + finalValue shouldBe Value(None) + } + + "correctly handle multiple periods with elements" in { + val period1Duration: Duration = Duration.ofMillis(100) + val period2Duration: Duration = Duration.ofMillis(120) + val period3Duration: Duration = Duration.ofMillis(110) + val totalDuration: Duration = Duration.ofSeconds(3) + val elem1: String = "a" + val elem2: String = "b" + val elem3: String = "c" + val testNow = Clock.systemUTC().instant() + val recordTimes1 = List( + testNow.minusSeconds(100), + testNow.minusSeconds(90), + ) + // The assumption made here is that each consecutive element has higher record times + val recordTimes2 = List( + testNow.minusSeconds(70), + testNow.minusSeconds(40), + ) + val recordTimes3 = List( + testNow.minusSeconds(20), + testNow.minusSeconds(15), + ) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> recordTimes1, + elem2 -> recordTimes2, + elem3 -> recordTimes3, + ) + ) + + val metric = ConsumptionSpeedMetric.empty[String](testRecordTimeFunction) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(period1Duration) + ._1 + .periodicValue(period2Duration) + ._1 + .onNext(elem3) + .periodicValue(period3Duration) + val finalValue = newMetric.finalValue(totalDuration) + + val first = recordTimes2.last + val last = recordTimes3.last + val expectedSpeed = + (last.getEpochSecond - first.getEpochSecond) * 1000.0 / period3Duration.toMillis + + periodicValue shouldBe Value(Some(expectedSpeed)) + finalValue shouldBe Value(None) + } + + "compute violated min speed SLO and the minimum speed" in { + val periodDuration: Duration = Duration.ofMillis(100) + val testNow = Clock.systemUTC().instant() + val minAllowedSpeed = 2.0 + val elem1 = "a" + val elem2 = "b" + val elem3 = "c" + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> List( + testNow.minusMillis(5000), + testNow.minusMillis(4500), + testNow.minusMillis(4000), + ), // ok, speed = 10.0 + elem2 -> List( + testNow.minusMillis(3990), + testNow.minusMillis(3980), + testNow.minusMillis(3920), // not ok, speed 0.8 + ), + elem3 -> List( + testNow.minusMillis(3900), + testNow.minusMillis(3800), + testNow.minusMillis(3770), // not ok, speed 1.5 + ), + ) + ) + + val objective = MinConsumptionSpeed(minAllowedSpeed) + val metric: ConsumptionSpeedMetric[String] = + ConsumptionSpeedMetric.empty[String]( + recordTimeFunction = testRecordTimeFunction, + objective = Some(objective), + ) + + val violatedObjectives = + metric + .onNext(elem1) + .periodicValue(periodDuration) + ._1 + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + ._1 + .violatedPeriodicObjectives + + violatedObjectives shouldBe List( + objective -> Value(Some(0.8)) + ) + } + } + + private def recordTimeFunctionFromMap( + map: Map[String, List[Instant]] + )(str: String): List[Timestamp] = + map + .map { case (k, v) => k -> v.map(instantToTimestamp) } + .getOrElse(str, throw new RuntimeException(s"Unexpected record function argument: $str")) + + private def instantToTimestamp(instant: Instant): Timestamp = + Timestamp.of(instant.getEpochSecond, instant.getNano) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetricSpec.scala new file mode 100644 index 0000000000..0653de72ca --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/CountRateMetricSpec.scala @@ -0,0 +1,287 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.CountRateMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.CountRateMetric.* +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Duration +import scala.language.existentials + +class CountRateMetricSpec extends AnyWordSpec with Matchers { + "CountRateMetric" should { + "correctly handle initial state" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(1) + val metric: CountRateMetric[String] = anEmptyStringMetric() + + val (_, periodicValue) = metric.periodicValue(periodDuration) + val finalValue = metric.finalValue(totalDuration) + + periodicValue shouldBe Value(0.0) + finalValue shouldBe Value(0.0) + } + + "compute values after processing elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: CountRateMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + periodicValue shouldBe Value( + ratePerSecond = totalCount * 1000.0 / periodDuration.toMillis + ) + finalValue shouldBe Value( + ratePerSecond = totalCount / totalDuration.getSeconds.toDouble + ) + } + + "correctly handle periods with no elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: CountRateMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + periodicValue shouldBe Value( + ratePerSecond = 0.0 + ) + finalValue shouldBe Value( + ratePerSecond = totalCount / totalDuration.getSeconds.toDouble + ) + } + + "correctly handle multiple periods with elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: CountRateMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + val elem3: String = "hij" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + stringLength(elem3) + periodicValue shouldBe Value( + ratePerSecond = stringLength(elem3) * 1000.0 / periodDuration.toMillis + ) + finalValue shouldBe Value( + ratePerSecond = totalCount / totalDuration.getSeconds.toDouble + ) + } + + "compute violated minimum rate periodic SLO and the corresponing violating value" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val minAllowedRatePerSecond = 2.0 + val objective = RateObjective.MinRate(minAllowedRatePerSecond) + val metric = anEmptyStringMetric(periodicObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + .onNext("de") + .periodicValue(periodDuration) + ._1 + .onNext("f") + .onNext("gh") + // During this period we get 3 elements: f, g, h, which means that the rate is 1.5 + .periodicValue(periodDuration) + ._1 + .onNext("ijklmn") + .violatedPeriodicObjectives + + violatedObjective shouldBe List( + objective -> Value(1.5) + ) + } + + "not report not violated periodic min rate objectives" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val minAllowedRatePerSecond = 2.0 + val objective = RateObjective.MinRate(minAllowedRatePerSecond) + val metric = anEmptyStringMetric(periodicObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + .onNext("de") + .periodicValue(periodDuration) + ._1 + .onNext("f") + .onNext("gh") + .onNext("ijk") + .periodicValue(periodDuration) + ._1 + .onNext("lmnoprst") + .violatedPeriodicObjectives + + violatedObjective shouldBe Nil + } + + "report violated min rate final objective" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(6) + val minAllowedRatePerSecond = 2.0 + val objective = RateObjective.MinRate(minAllowedRatePerSecond) + val metric = anEmptyStringMetric(finalObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + .periodicValue(periodDuration) + ._1 + .onNext("def") + .onNext("ghi") + // total rate is (3 + 3 + 3) / 6.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe List( + objective -> Value(1.5) + ) + } + + "not report non-violated min rate final objective" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(6) + val minAllowedRatePerSecond = 2.0 + val objective = RateObjective.MinRate(minAllowedRatePerSecond) + val metric = anEmptyStringMetric(finalObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + .periodicValue(periodDuration) + ._1 + .onNext("def") + .onNext("ghi") + .onNext("jklmno") + // total rate is (3 + 3 + 3 + 6) / 6.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe Nil + } + + "not report non-violated min rate final objective if the objective is violated only in a period" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(3) + val minAllowedRatePerSecond = 2.0 + val objective = RateObjective.MinRate(minAllowedRatePerSecond) + val metric = anEmptyStringMetric(finalObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + // periodic rate is 3 / 2.0 = 1.5 + .periodicValue(periodDuration) + ._1 + .onNext("def") + .onNext("ghi") + // total rate is (3 + 3 + 3) / 3.0 = 3.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe Nil + } + + "report violated max rate final objective" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(3) + val objective = RateObjective.MaxRate(3.0) + val metric = CountRateMetric.empty[String]( + countingFunction = stringLength, + periodicObjectives = Nil, + finalObjectives = List(objective), + ) + + val violatedObjective = + metric + .onNext("abc") + .periodicValue(periodDuration) + ._1 + .onNext("def") + .onNext("ghijkl") + // total rate is (3 + 3 + 6) / 3.0 = 4.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe List( + objective -> Value(4.0) + ) + } + + "not report non-violated max rate final objective" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(3) + val objective = RateObjective.MaxRate(3.0) + val metric = anEmptyStringMetric(finalObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abc") + .periodicValue(periodDuration) + ._1 + .onNext("def") + .onNext("ghi") + // total rate is (3 + 3 + 3) / 3.0 = 3.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe Nil + } + + "not report non-violated max rate final objective if the objective is violated only in a period" in { + val periodDuration: Duration = Duration.ofSeconds(2) + val totalDuration: Duration = Duration.ofSeconds(4) + val objective = RateObjective.MaxRate(2.0) + val metric = anEmptyStringMetric(finalObjectives = List(objective)) + + val violatedObjective = + metric + .onNext("abcde") + // periodic rate is 5 / 2.0 = 2.5 + .periodicValue(periodDuration) + ._1 + .onNext("f") + .onNext("gh") + // total rate is (5 + 1 + 2) / 4.0 = 2.0 + .violatedFinalObjectives(totalDuration) + + violatedObjective shouldBe Nil + } + } + + private def stringLength(value: String): Int = value.length + private def anEmptyStringMetric( + periodicObjectives: List[CountRateMetric.RateObjective] = Nil, + finalObjectives: List[CountRateMetric.RateObjective] = Nil, + ): CountRateMetric[String] = + CountRateMetric.empty[String]( + countingFunction = stringLength, + periodicObjectives = periodicObjectives, + finalObjectives = finalObjectives, + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetricSpec.scala new file mode 100644 index 0000000000..496e012900 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/DelayMetricSpec.scala @@ -0,0 +1,245 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.DelayMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.DelayMetric.* +import com.google.protobuf.timestamp.Timestamp +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.{Clock, Duration, Instant, ZoneId} +import scala.language.existentials + +class DelayMetricSpec extends AnyWordSpec with Matchers { + DelayMetric.getClass.getSimpleName should { + "correctly handle initial state" in { + val periodDuration: Duration = Duration.ofMillis(100) + val metric: DelayMetric[String] = anEmptyDelayMetric(Clock.systemUTC()) + + val (_, periodicValue) = metric.periodicValue(periodDuration) + val totalDuration: Duration = Duration.ofSeconds(1) + val finalValue = metric.finalValue(totalDuration) + + periodicValue shouldBe Value(None) + finalValue shouldBe Value(None) + } + + "compute values after processing elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val elem1: String = "abc" + val elem2: String = "defgh" + val testNow = Clock.systemUTC().instant() + val recordTime1 = testNow.minusSeconds(11) + val recordTime2 = testNow.minusSeconds(22) + val recordTime3 = testNow.minusSeconds(33) + val delay1 = secondsBetween(recordTime1, testNow) + val delay2 = secondsBetween(recordTime2, testNow) + val delay3 = secondsBetween(recordTime3, testNow) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> List(recordTime1, recordTime2), + elem2 -> List(recordTime3), + ) + ) + val clock = Clock.fixed(testNow, ZoneId.of("UTC")) + val metric: DelayMetric[String] = + DelayMetric.empty[String]( + recordTimeFunction = testRecordTimeFunction, + clock = clock, + ) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val expectedMean = (delay1 + delay2 + delay3) / 3 + periodicValue shouldBe Value(Some(expectedMean)) + finalValue shouldBe Value(None) + } + + "correctly handle periods with no elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val elem1: String = "abc" + val elem2: String = "defg" + val testNow = Clock.systemUTC().instant() + val recordTime1 = testNow.minusSeconds(11) + val recordTime2 = testNow.minusSeconds(22) + val recordTime3 = testNow.minusSeconds(33) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> List(recordTime1, recordTime2), + elem2 -> List(recordTime3), + ) + ) + val clock = Clock.fixed(testNow, ZoneId.of("UTC")) + val metric: DelayMetric[String] = + DelayMetric.empty[String]( + recordTimeFunction = testRecordTimeFunction, + clock = clock, + ) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + periodicValue shouldBe Value(None) + finalValue shouldBe Value(None) + } + + "correctly handle multiple periods with elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val elem1: String = "abc" + val elem2: String = "defg" + val elem3: String = "hij" + val testNow = Clock.systemUTC().instant() + val recordTime1 = testNow.minusSeconds(11) + val recordTime2 = testNow.minusSeconds(22) + val recordTime3 = testNow.minusSeconds(33) + val recordTime4 = testNow.minusSeconds(44) + val recordTime5 = testNow.minusSeconds(55) + val delay4 = secondsBetween(recordTime4, testNow) + val delay5 = secondsBetween(recordTime5, testNow) + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> List(recordTime1, recordTime2), + elem2 -> List(recordTime3), + elem3 -> List(recordTime4, recordTime5), + ) + ) + val clock = Clock.fixed(testNow, ZoneId.of("UTC")) + val metric: DelayMetric[String] = + DelayMetric.empty[String]( + recordTimeFunction = testRecordTimeFunction, + clock = clock, + ) + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val expectedMean = (delay4 + delay5) / 2 + periodicValue shouldBe Value(Some(expectedMean)) + finalValue shouldBe Value(None) + } + + "compute violated max delay SLO with the most extreme value" in { + val periodDuration: Duration = Duration.ofMillis(100) + val maxAllowedDelaySeconds: Long = 1000 + val elem1: String = "abc" + val elem2: String = "defg" + val elem3: String = "hijkl" + val elem4: String = "mno" + val testNow = Clock.systemUTC().instant() + + // first period + val recordTime1 = + testNow.minusSeconds(maxAllowedDelaySeconds - 100) // allowed record time + + // second period + val recordTime2A = + testNow.minusSeconds(maxAllowedDelaySeconds + 100) // not allowed record time + val recordTime2B = + testNow.minusSeconds(maxAllowedDelaySeconds + 200) // not allowed record time + val delay2A = durationBetween(recordTime2A, testNow) + val delay2B = durationBetween(recordTime2B, testNow) + val meanInPeriod2 = delay2A.plus(delay2B).dividedBy(2).getSeconds + + // third period - a period with record times higher than anywhere else, + // the mean delay from this period should be provided by the metric as the most violating value + val recordTime3A = testNow.minusSeconds( + maxAllowedDelaySeconds + 1100 + ) // not allowed record time + val recordTime3B = testNow.minusSeconds( + maxAllowedDelaySeconds + 1200 + ) // not allowed record time + val delay3A = durationBetween(recordTime3A, testNow) + val delay3B = durationBetween(recordTime3B, testNow) + val meanInPeriod3 = delay3A.plus(delay3B).dividedBy(2).getSeconds + + // fourth period + val recordTime4 = + testNow.minusSeconds(maxAllowedDelaySeconds + 300) // not allowed record time + val delay4 = durationBetween(recordTime4, testNow) + val meanInPeriod4 = delay4.getSeconds + + val maxDelay = List(meanInPeriod2, meanInPeriod3, meanInPeriod4).max + + def testRecordTimeFunction: String => List[Timestamp] = recordTimeFunctionFromMap( + Map( + elem1 -> List(recordTime1), + elem2 -> List(recordTime2A, recordTime2B), + elem3 -> List(recordTime3A, recordTime3B), + elem4 -> List(recordTime4), + ) + ) + val expectedViolatedObjective = MaxDelay(maxAllowedDelaySeconds) + val clock = Clock.fixed(testNow, ZoneId.of("UTC")) + val metric: DelayMetric[String] = + DelayMetric.empty[String]( + recordTimeFunction = testRecordTimeFunction, + clock = clock, + objective = Some(expectedViolatedObjective), + ) + + val violatedObjectives = + metric + .onNext(elem1) + .periodicValue(periodDuration) + ._1 + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + ._1 + .onNext(elem4) + .periodicValue(periodDuration) + ._1 + .violatedPeriodicObjectives + + violatedObjectives shouldBe List( + expectedViolatedObjective -> Value(Some(maxDelay)) + ) + } + } + + private def recordTimeFunctionFromMap( + map: Map[String, List[Instant]] + )(str: String): List[Timestamp] = + map + .map { case (k, v) => k -> v.map(instantToTimestamp) } + .getOrElse(str, throw new RuntimeException(s"Unexpected record function argument: $str")) + + private def instantToTimestamp(instant: Instant): Timestamp = + Timestamp.of(instant.getEpochSecond, instant.getNano) + + private def durationBetween(first: Instant, second: Instant): Duration = + Duration.between(first, second) + + private def secondsBetween(first: Instant, second: Instant): Long = + Duration.between(first, second).getSeconds + + private def dummyRecordTimesFunction(str: String): List[Timestamp] = + str.map(_ => Timestamp.of(100, 0)).toList + + private def anEmptyDelayMetric(clock: Clock): DelayMetric[String] = + DelayMetric.empty[String](dummyRecordTimesFunction, clock) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetricSpec.scala new file mode 100644 index 0000000000..167d22cc20 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/LatencyMetricSpec.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.LatencyMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.LatencyMetric.MaxLatency +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Duration +import scala.util.chaining.* + +class LatencyMetricSpec extends AnyWordSpec with Matchers { + private val dummyPeriod = Duration.ofSeconds(1L) + + LatencyMetric.getClass.getSimpleName should { + "compute correct values on updates" in { + LatencyMetric + .empty(maxLatencyObjectiveMillis = 0L) + .tap(_.finalValue(dummyPeriod) shouldBe LatencyMetric.Value(0L)) + .tap( + _.periodicValue(dummyPeriod) shouldBe (LatencyMetric( + 0L, + 0, + MaxLatency(0), + ) -> LatencyMetric.Value(0L)) + ) + .onNext(1000L) + .tap(_.finalValue(dummyPeriod) shouldBe LatencyMetric.Value(1000L)) + .tap( + _.periodicValue(dummyPeriod) shouldBe (LatencyMetric( + 1000L, + 1, + MaxLatency(0), + ) -> LatencyMetric.Value(1000L)) + ) + .onNext(2000L) + .tap(_.finalValue(dummyPeriod) shouldBe LatencyMetric.Value(1500L)) + .tap( + _.periodicValue(dummyPeriod) shouldBe (LatencyMetric( + 3000L, + 2, + MaxLatency(0), + ) -> LatencyMetric.Value(1500L)) + ) + } + } + + MaxLatency.getClass.getSimpleName should { + "correctly report violated metric" in { + val maxObjectiveMillis = 1000L + LatencyMetric + .empty(maxLatencyObjectiveMillis = maxObjectiveMillis) + .onNext(nanosFromMillis(1000L)) + .tap(_.violatedFinalObjectives(dummyPeriod) shouldBe empty) + .onNext(nanosFromMillis(2000L)) + .tap( + _.violatedFinalObjectives(dummyPeriod) shouldBe List( + MaxLatency(nanosFromMillis(1000L)) -> LatencyMetric.Value(nanosFromMillis(1500L)) + ) + ) + } + } + + private def nanosFromMillis(millis: Long) = millis * 1000000L +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MaxDelaySpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MaxDelaySpec.scala new file mode 100644 index 0000000000..740f262b67 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MaxDelaySpec.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import org.scalatest.matchers.should.Matchers +import org.scalatest.prop.TableDrivenPropertyChecks +import org.scalatest.wordspec.AnyWordSpec + +import scala.util.Random + +class MaxDelaySpec extends AnyWordSpec with Matchers with TableDrivenPropertyChecks { + "Maximum delay SLO" should { + "correctly report violation" in { + import DelayMetric.Value + val randomValue = Random.nextInt(10000).toLong + val randomSmaller = randomValue - 1 + val randomLarger = randomValue + 1 + val maxDelay = DelayMetric.MaxDelay(randomValue) + val cases = Table( + ("Metric value", "Expected violated"), + (Value(None), false), + (Value(Some(randomSmaller)), false), + (Value(Some(randomValue)), false), + (Value(Some(randomLarger)), true), + ) + + forAll(cases) { (metricValue, expectedViolated) => + maxDelay.isViolatedBy(metricValue) shouldBe expectedViolated + } + } + + "correctly pick a value more violating requirements" in { + import DelayMetric.Value + val randomNumber = Random.nextInt(10).toLong + val higherNumber = randomNumber + 1 + val cases = Table( + ("first", "second", "expected result"), + (Value(Some(randomNumber)), Value(Some(higherNumber)), Value(Some(higherNumber))), + (Value(Some(higherNumber)), Value(Some(randomNumber)), Value(Some(higherNumber))), + (Value(Some(randomNumber)), Value(None), Value(Some(randomNumber))), + (Value(None), Value(Some(randomNumber)), Value(Some(randomNumber))), + (Value(None), Value(None), Value(None)), + ) + + forAll(cases) { (first, second, expected) => + Ordering[Value].max(first, second) shouldBe expected + } + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollectorSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollectorSpec.scala new file mode 100644 index 0000000000..fa32eb2815 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsCollectorSpec.scala @@ -0,0 +1,235 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.daml.clock.AdjustableClock +import org.apache.pekko.actor.testkit.typed.scaladsl.{BehaviorTestKit, ScalaTestWithActorTestKit} +import org.apache.pekko.actor.typed.{ActorRef, Behavior} +import org.scalatest.wordspec.AnyWordSpecLike + +import java.time.{Clock, Duration, Instant, ZoneId} +import scala.util.Random + +class MetricsCollectorSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { + import MetricsCollector.{Message, Response} + + "The MetricsCollector" should { + "respond with empty periodic report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.PeriodicReportResponse]() + + collector ! Message.PeriodicReportRequest(probe.ref) + + probe.expectMessage( + Response.PeriodicReport( + values = List( + TestMetricValue("PERIODIC:") + ) + ) + ) + } + + "respond with correct periodic report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.PeriodicReportResponse]() + + collector ! Message.NewValue("banana") + collector ! Message.NewValue("mango") + collector ! Message.PeriodicReportRequest(probe.ref) + + probe.expectMessage( + Response.PeriodicReport( + values = List( + TestMetricValue("PERIODIC:banana-mango") + ) + ) + ) + } + + "not respond with a periodic report when requests are too frequent" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.PeriodicReportResponse]() + + collector ! Message.NewValue("banana") + collector ! Message.NewValue("mango") + collector ! Message.PeriodicReportRequest(probe.ref) + + probe.expectMessageType[Response.PeriodicReport] + + clock.fastForward(Duration.ofSeconds(1)) + collector ! Message.PeriodicReportRequest(probe.ref) + + probe.expectMessage(Response.ReportNotReady) + } + + "include objective-violating values in periodic report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.PeriodicReportResponse]() + + collector ! Message.NewValue("banana") + collector ! Message.NewValue(TestObjective.TestViolatingValue) + collector ! Message.NewValue("mango") + collector ! Message.PeriodicReportRequest(probe.ref) + + probe.expectMessage( + Response.PeriodicReport( + values = List( + TestMetricValue("PERIODIC:banana-tomato-mango") + ) + ) + ) + } + + "respond with empty final report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.FinalReport]() + + collector ! Message.FinalReportRequest(probe.ref) + + probe.expectMessage( + Response.FinalReport( + metricsData = List( + Response.MetricFinalReportData( + name = "Test Metric", + value = TestMetricValue("FINAL:"), + violatedObjectives = Nil, + ) + ), + totalDuration = Duration.ofSeconds(10), + ) + ) + } + + "respond with correct final report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.FinalReport]() + + collector ! Message.NewValue("mango") + collector ! Message.NewValue("banana") + collector ! Message.NewValue("cherry") + collector ! Message.FinalReportRequest(probe.ref) + + probe.expectMessage( + Response.FinalReport( + metricsData = List( + Response.MetricFinalReportData( + name = "Test Metric", + value = TestMetricValue("FINAL:mango-banana-cherry"), + violatedObjectives = Nil, + ) + ), + totalDuration = Duration.ofSeconds(10), + ) + ) + } + + "include information about violated objective in the final report" in new CollectorFixture { + val probe = testKit.createTestProbe[Response.FinalReport]() + + collector ! Message.NewValue("mango") + collector ! Message.NewValue(TestObjective.TestViolatingValue) + collector ! Message.NewValue("cherry") + collector ! Message.FinalReportRequest(probe.ref) + + probe.expectMessage( + Response.FinalReport( + metricsData = List( + Response.MetricFinalReportData( + name = "Test Metric", + value = TestMetricValue("FINAL:mango-tomato-cherry"), + violatedObjectives = List( + ( + TestObjective, + TestMetricValue(TestObjective.TestViolatingValue), + ) + ), + ) + ), + totalDuration = Duration.ofSeconds(10), + ) + ) + } + + "stop after receiving final report request" in { + val probe = testKit.createTestProbe[Response.FinalReport]() + val behaviorTestKit = BehaviorTestKit(behavior) + + behaviorTestKit.isAlive shouldBe true + + behaviorTestKit.run(Message.FinalReportRequest(probe.ref)) + + behaviorTestKit.isAlive shouldBe false + } + } + + private class CollectorFixture { + private val now = Clock.systemUTC().instant() + private val tenSecondsAgo = now.minusSeconds(10) + private val minimumReportInterval = Duration.ofSeconds(5) + val clock = AdjustableClock( + baseClock = Clock.fixed(now, ZoneId.of("UTC")), + offset = Duration.ZERO, + ) + val collector: ActorRef[Message] = + spawnWithFixedClock(clock, tenSecondsAgo, tenSecondsAgo, minimumReportInterval) + } + + private def spawnWithFixedClock( + clock: Clock, + startTime: Instant, + lastPeriodicCheck: Instant, + minimumTimePeriodBetweenSubsequentReports: Duration, + ) = { + val behavior = + new MetricsCollector[String](None, minimumTimePeriodBetweenSubsequentReports, clock) + .handlingMessages( + metrics = List(TestMetric()), + lastPeriodicCheck = lastPeriodicCheck, + startTime = startTime, + ) + testKit.spawn( + behavior = behavior, + name = Random.alphanumeric.take(10).mkString, + ) + } + + private def behavior: Behavior[Message] = + MetricsCollector[String]( + metrics = List(TestMetric()), + exposedMetrics = None, + ) + + private case class TestMetricValue(value: String) extends MetricValue + + private case object TestObjective extends ServiceLevelObjective[TestMetricValue] { + val TestViolatingValue = "tomato" + + override def isViolatedBy(metricValue: TestMetricValue): Boolean = + metricValue.value == TestViolatingValue + } + + private case class TestMetric( + processedElems: List[String] = List.empty + ) extends Metric[String] { + override type V = TestMetricValue + override type Objective = TestObjective.type + + override def name: String = "Test Metric" + + override def onNext(value: String): Metric[String] = + this.copy(processedElems = processedElems :+ value) + + override def periodicValue(periodDuration: Duration): (Metric[String], TestMetricValue) = + (this, TestMetricValue(s"PERIODIC:${processedElems.mkString("-")}")) + + override def finalValue(totalDuration: Duration): TestMetricValue = + TestMetricValue(s"FINAL:${processedElems.mkString("-")}") + + override def violatedPeriodicObjectives: List[(TestObjective.type, TestMetricValue)] = + if (processedElems.contains(TestObjective.TestViolatingValue)) + List(TestObjective -> TestMetricValue(TestObjective.TestViolatingValue)) + else + Nil + + override def violatedFinalObjectives( + totalDuration: Duration + ): List[(TestObjective.type, TestMetricValue)] = Nil + + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSetSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSetSpec.scala new file mode 100644 index 0000000000..d39aadee4c --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MetricsSetSpec.scala @@ -0,0 +1,19 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.MetricsSet +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.time.Duration +import scala.concurrent.duration.FiniteDuration + +class MetricsSetSpec extends AnyFlatSpec with Matchers { + + it should "convert Scala's FiniteDuration to Java's Duration" in { + MetricsSet.toJavaDuration(FiniteDuration(5, "seconds")) shouldBe Duration.ofSeconds(5) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MinConsumptionSpeedSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MinConsumptionSpeedSpec.scala new file mode 100644 index 0000000000..086b2c979d --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/MinConsumptionSpeedSpec.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.ConsumptionSpeedMetric +import org.scalatest.matchers.should.Matchers +import org.scalatest.prop.TableDrivenPropertyChecks +import org.scalatest.wordspec.AnyWordSpec + +import scala.util.Random + +class MinConsumptionSpeedSpec extends AnyWordSpec with Matchers with TableDrivenPropertyChecks { + "Min consumption speed SLO" should { + "correctly report violation" in { + import com.digitalasset.canton.ledger.api.benchtool.metrics.ConsumptionSpeedMetric.Value + val objectiveSpeed = Random.nextDouble() + val objective = ConsumptionSpeedMetric.MinConsumptionSpeed(objectiveSpeed) + val lowerSpeed = objectiveSpeed - 1.0 + val higherSpeed = objectiveSpeed + 1.0 + val cases = Table( + ("Metric value", "Expected violated"), + (Value(None), true), + (Value(Some(lowerSpeed)), true), + (Value(Some(objectiveSpeed)), false), + (Value(Some(higherSpeed)), false), + ) + + forAll(cases) { (metricValue, expectedViolated) => + objective.isViolatedBy(metricValue) shouldBe expectedViolated + } + } + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetricSpec.scala new file mode 100644 index 0000000000..9b56f92ab0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/SizeMetricSpec.scala @@ -0,0 +1,103 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.SizeMetric +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Duration + +class SizeMetricSpec extends AnyWordSpec with Matchers { + SizeMetric.getClass.getSimpleName should { + "correctly handle initial state" in { + val totalDuration: Duration = Duration.ofSeconds(1) + val periodDuration: Duration = Duration.ofMillis(100) + val metric: SizeMetric[String] = anEmptySizeMetric() + + val (_, periodicValue) = metric.periodicValue(periodDuration) + val finalValue = metric.finalValue(totalDuration) + + periodicValue shouldBe SizeMetric.Value(0.0) + finalValue shouldBe SizeMetric.Value(0.0) + } + + "compute values after processing elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: SizeMetric[String] = anEmptySizeMetric() + val elem1: String = "abc" + val elem2: String = "defghi" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalSizeMegabytes = + (testSizingFunction(elem1) + testSizingFunction(elem2)).toDouble / 1024 / 1024 + periodicValue shouldBe SizeMetric.Value(totalSizeMegabytes * 1000.0 / periodDuration.toMillis) + finalValue shouldBe SizeMetric.Value(totalSizeMegabytes * 1000.0 / periodDuration.toMillis) + } + + "correctly handle periods with no elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: SizeMetric[String] = anEmptySizeMetric() + val elem1: String = "abc" + val elem2: String = "defghi" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val firstPeriodMegabytes = + (testSizingFunction(elem1) + testSizingFunction(elem2)).toDouble / 1024 / 1024 + val firstPeriodMean = firstPeriodMegabytes * 1000.0 / periodDuration.toMillis + val secondPeriodMean = 0.0 + val totalMean = (firstPeriodMean + secondPeriodMean) / 2 + periodicValue shouldBe SizeMetric.Value(secondPeriodMean) + finalValue shouldBe SizeMetric.Value(totalMean) + } + + "correctly handle multiple periods with elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: SizeMetric[String] = anEmptySizeMetric() + val elem1: String = "abc" + val elem2: String = "defg" + val elem3: String = "hij" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val firstPeriodMegabytes = + (testSizingFunction(elem1) + testSizingFunction(elem2)).toDouble / 1024 / 1024 + val firstPeriodMean = firstPeriodMegabytes * 1000.0 / periodDuration.toMillis + val secondPeriodMean = 0.0 + val thirdPeriodMegabytes = testSizingFunction(elem3).toDouble / 1024 / 1024 + val thirdPeriodMean = thirdPeriodMegabytes * 1000.0 / periodDuration.toMillis + val totalMean = (firstPeriodMean + secondPeriodMean + thirdPeriodMean) / 3 + periodicValue shouldBe SizeMetric.Value(thirdPeriodMean) + finalValue shouldBe SizeMetric.Value(totalMean) + } + } + + private def testSizingFunction(value: String): Long = value.length.toLong * 12345 + private def anEmptySizeMetric(): SizeMetric[String] = + SizeMetric.empty[String](sizingFunction = testSizingFunction) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetricSpec.scala new file mode 100644 index 0000000000..8cb5c8e673 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/TotalCountMetricSpec.scala @@ -0,0 +1,91 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics + +import com.digitalasset.canton.ledger.api.benchtool.metrics.TotalCountMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.TotalCountMetric.Value +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Duration + +class TotalCountMetricSpec extends AnyWordSpec with Matchers { + TotalCountMetric.getClass.getSimpleName should { + "correctly handle initial state" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(1) + val metric: TotalCountMetric[String] = anEmptyStringMetric() + + val (_, periodicValue) = metric.periodicValue(periodDuration) + val finalValue = metric.finalValue(totalDuration) + + periodicValue shouldBe Value(0) + finalValue shouldBe Value(0) + } + + "compute values after processing elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: TotalCountMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + periodicValue shouldBe Value(totalCount) + finalValue shouldBe Value(totalCount) + } + + "correctly handle periods with no elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: TotalCountMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + periodicValue shouldBe Value(totalCount) + finalValue shouldBe Value(totalCount) + } + + "correctly handle multiple periods with elements" in { + val periodDuration: Duration = Duration.ofMillis(100) + val totalDuration: Duration = Duration.ofSeconds(5) + val metric: TotalCountMetric[String] = anEmptyStringMetric() + val elem1: String = "abc" + val elem2: String = "defg" + val elem3: String = "hij" + + val (newMetric, periodicValue) = metric + .onNext(elem1) + .onNext(elem2) + .periodicValue(periodDuration) + ._1 + .onNext(elem3) + .periodicValue(periodDuration) + val finalValue = newMetric.finalValue(totalDuration) + + val totalCount: Int = stringLength(elem1) + stringLength(elem2) + stringLength(elem3) + periodicValue shouldBe Value(totalCount) + finalValue shouldBe Value(totalCount) + } + } + + private def stringLength(value: String): Int = value.length + private def anEmptyStringMetric(): TotalCountMetric[String] = + TotalCountMetric.empty[String](stringLength) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetricSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetricSpec.scala new file mode 100644 index 0000000000..9b2aa11ad9 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/metrics/metrics/TotalRuntimeMetricSpec.scala @@ -0,0 +1,65 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.metrics.metrics + +import com.daml.clock.AdjustableClock +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric +import com.digitalasset.canton.ledger.api.benchtool.metrics.metrics.TotalRuntimeMetric.{ + MaxDurationObjective, + Value, +} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.time.{Clock, Duration, Instant, ZoneId} + +class TotalRuntimeMetricSpec extends AnyFlatSpec with Matchers { + + it should "keep track of total stream runtime" in { + val startTime = Instant.EPOCH.plusMillis(1000) + val clock = AdjustableClock(Clock.fixed(startTime, ZoneId.systemDefault()), Duration.ZERO) + val objective = MaxDurationObjective( + maxValue = Duration.ofMillis(103) + ) + val tested = TotalRuntimeMetric[Any]( + clock = clock, + startTime = clock.instant, + objective = objective, + ) + val ignoredDuration = Duration.ofMillis(0) + val item = new Object() + + tested.periodicValue(ignoredDuration) shouldBe (tested, Value(Duration.ZERO)) + + clock.fastForward(Duration.ofMillis(15)) + tested.onNext(item) + + tested.periodicValue(ignoredDuration) shouldBe (tested, Value(Duration.ofMillis(15))) + + clock.fastForward(Duration.ofMillis(30)) + tested.onNext(item) + + tested.periodicValue(ignoredDuration) shouldBe (tested, Value(Duration.ofMillis(45))) + tested.violatedFinalObjectives(ignoredDuration) shouldBe Nil + tested.violatedPeriodicObjectives shouldBe Nil + tested.finalValue(ignoredDuration) shouldBe Value(Duration.ofMillis(45)) + + clock.fastForward(Duration.ofMillis(100)) + tested.onNext(item) + + tested.periodicValue(ignoredDuration) shouldBe (tested, Value(Duration.ofMillis(145))) + tested.violatedPeriodicObjectives shouldBe Nil + tested.violatedFinalObjectives(ignoredDuration) shouldBe List( + objective -> Value(Duration.ofMillis(145)) + ) + tested.finalValue(ignoredDuration) shouldBe Value(Duration.ofMillis(145)) + + clock.fastForward(Duration.ofMillis(100)) + tested.onNext(item) + tested.violatedFinalObjectives(ignoredDuration) shouldBe List( + objective -> Value(Duration.ofMillis(245)) + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPoolSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPoolSpec.scala new file mode 100644 index 0000000000..6ae4afe822 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractKeysPoolSpec.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.value.Value +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class ActiveContractKeysPoolSpec extends AnyFlatSpec with Matchers { + + it should "put and pop from a pool" in { + val tested = new ActiveContractKeysPool[Value](RandomnessProvider.forSeed(0)) + intercept[NoSuchElementException](tested.getAndRemoveContractKey(templateName = "t1")) + tested.addContractKey(templateName = "t1", value = makeValue("1")) + intercept[NoSuchElementException](tested.getAndRemoveContractKey(templateName = "t2")) + tested.getAndRemoveContractKey("t1") shouldBe makeValue("1") + intercept[NoSuchElementException](tested.getAndRemoveContractKey(templateName = "t1")) + tested.addContractKey(templateName = "t1", value = makeValue("1")) + tested.addContractKey(templateName = "t1", value = makeValue("2")) + tested.addContractKey(templateName = "t1", value = makeValue("3")) + tested.addContractKey(templateName = "t2", value = makeValue("1")) + tested.getAndRemoveContractKey("t1") shouldBe makeValue("3") + tested.getAndRemoveContractKey("t1") shouldBe makeValue("1") + tested.getAndRemoveContractKey("t2") shouldBe makeValue("1") + } + + private def makeValue(payload: String): Value = Value(Value.Sum.Text(payload)) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractsObserver.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractsObserver.scala new file mode 100644 index 0000000000..985256a40b --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ActiveContractsObserver.scala @@ -0,0 +1,44 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.Future + +object ActiveContractsObserver { + def apply(expectedTemplateNames: Set[String]): ActiveContractsObserver = + new ActiveContractsObserver( + logger = LoggerFactory.getLogger(getClass), + expectedTemplateNames = expectedTemplateNames, + ) +} + +/** Collects information about create events from ACS. + */ +class ActiveContractsObserver(logger: Logger, expectedTemplateNames: Set[String]) + extends ObserverWithResult[GetActiveContractsResponse, ObservedEvents](logger) { + + private val createEvents = collection.mutable.ArrayBuffer[ObservedCreateEvent]() + + override def streamName: String = "dummy-stream-name" + + override def onNext(value: GetActiveContractsResponse): Unit = + for { + ac <- value.contractEntry.activeContract + event <- ac.createdEvent + } { + createEvents.addOne(ObservedCreateEvent(event)) + } + + override def completeWith(): Future[ObservedEvents] = Future.successful( + ObservedEvents( + expectedTemplateNames = expectedTemplateNames, + createEvents = createEvents.toList, + exerciseEvents = List.empty, + ) + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartiesSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartiesSpec.scala new file mode 100644 index 0000000000..d9dfe3e7f0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/AllocatedPartiesSpec.scala @@ -0,0 +1,100 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ledger.api.benchtool.submission.{AllocatedParties, AllocatedPartySet} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class AllocatedPartiesSpec extends AnyFlatSpec with Matchers { + + it should "apportion parties appropriately" in { + AllocatedParties.forExistingParties( + parties = List( + "signatory-123", + "Obs-0", + "Obs-1", + "Div-0", + "Sub-0", + "FooParty-0", + "FooParty-1", + "BarParty-100", + "MyOtherParty-0", + ), + partyPrefixesForPartySets = List("FooParty", "BarParty"), + ) shouldBe AllocatedParties( + signatoryO = Some(new Party("signatory-123")), + observers = List( + new Party("Obs-0"), + new Party("Obs-1"), + ), + divulgees = List(new Party("Div-0")), + extraSubmitters = List(new Party("Sub-0")), + observerPartySets = List( + AllocatedPartySet( + mainPartyNamePrefix = "FooParty", + parties = List(new Party("FooParty-0"), new Party("FooParty-1")), + ), + AllocatedPartySet( + mainPartyNamePrefix = "BarParty", + parties = List(new Party("BarParty-100")), + ), + ), + ) + } + + it should "apportion parties appropriately - minimal" in { + AllocatedParties.forExistingParties( + parties = List( + "signatory-123" + ), + partyPrefixesForPartySets = List.empty, + ) shouldBe AllocatedParties( + signatoryO = Some(new Party("signatory-123")), + observers = List.empty, + divulgees = List.empty, + extraSubmitters = List.empty, + observerPartySets = List.empty, + ) + } + + it should "find party sets for any party prefix" in { + AllocatedParties.forExistingParties( + parties = List( + "Party-01", + "Party-02", + "Party-10", + "Foo-01", + "Bar-02", + "Baz-03", + ), + partyPrefixesForPartySets = List("Party-0", "Foo-", "Bar"), + ) shouldBe AllocatedParties( + signatoryO = None, + observers = List.empty, + divulgees = List.empty, + extraSubmitters = List.empty, + observerPartySets = List( + AllocatedPartySet( + "Party", + parties = List( + new Party("Party-01"), + new Party("Party-02"), + new Party("Party-10"), + ), + ), + AllocatedPartySet( + "Foo", + parties = List(new Party("Foo-01")), + ), + AllocatedPartySet( + "Bar", + parties = List(new Party("Bar-02")), + ), + ), + ) + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CompletionsObserver.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CompletionsObserver.scala new file mode 100644 index 0000000000..e243d4a043 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/CompletionsObserver.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.Future + +object CompletionsObserver { + def apply(): CompletionsObserver = new CompletionsObserver( + logger = LoggerFactory.getLogger(getClass) + ) +} + +class CompletionsObserver(logger: Logger) + extends ObserverWithResult[CompletionStreamResponse, ObservedCompletions](logger) { + + private val completions = collection.mutable.ArrayBuffer[ObservedCompletion]() + + override def streamName: String = "dummy-stream-name" + + override def onNext(value: CompletionStreamResponse): Unit = + for { + completion <- value.completionResponse.completion + } { + completions.addOne( + ObservedCompletion( + userId = completion.userId, + actAs = completion.actAs, + ) + ) + } + + override def completeWith(): Future[ObservedCompletions] = Future.successful( + ObservedCompletions( + completions = completions.toList + ) + ) +} + +final case class ObservedCompletion(userId: String, actAs: Seq[String]) + +final case class ObservedCompletions( + completions: Seq[ObservedCompletion] +) diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DepletingUniformRandomPoolSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DepletingUniformRandomPoolSpec.scala new file mode 100644 index 0000000000..3295d1abc1 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DepletingUniformRandomPoolSpec.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class DepletingUniformRandomPoolSpec extends AnyFlatSpec with Matchers { + + it should "put and pop from a pool" in { + val tested = new DepletingUniformRandomPool[Int](RandomnessProvider.forSeed(0)) + intercept[NoSuchElementException](tested.pop()) + tested.put(1) + tested.pop() shouldBe 1 + tested.put(1) + tested.put(2) + tested.put(3) + tested.pop() shouldBe 3 + tested.pop() shouldBe 1 + tested.pop() shouldBe 2 + intercept[NoSuchElementException](tested.pop()) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DistributionSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DistributionSpec.scala new file mode 100644 index 0000000000..429d87ffd0 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/DistributionSpec.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.ledger.api.benchtool.submission.Distribution +import org.scalacheck.Gen +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec +import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks + +class DistributionSpec extends AnyWordSpec with Matchers with ScalaCheckDrivenPropertyChecks { + implicit override val generatorDrivenConfig: PropertyCheckConfiguration = + PropertyCheckConfiguration(minSuccessful = 100) + + "Distribution" should { + val MaxValue = 1000000 + val smallInt = Gen.choose(1, MaxValue) + val zeroToOneDouble: Gen[Double] = + Gen.choose(0, Int.MaxValue - 1).map(_.toDouble / Int.MaxValue) + val listOfWeights: Gen[List[Int]] = Gen.choose(1, 50).flatMap(Gen.listOfN(_, smallInt)) + + "handle single-element list" in { + val cases: Gen[(Int, Double)] = for { + weight <- smallInt + double <- zeroToOneDouble + } yield (weight, double) + + forAll(cases) { case (weight, d) => + val sentinel = new Object() + val distribution = new Distribution[Object](List(weight), IndexedSeq(sentinel)) + distribution.choose(d) shouldBe sentinel + } + } + + "handle multi-element list" in { + val cases = for { + double <- zeroToOneDouble + weights <- listOfWeights + } yield (weights, double) + + forAll(cases) { case (weights, d) => + val distribution = new Distribution[Int](weights, items = weights.toIndexedSeq) + val index = distribution.index(d) + + val totalWeight = weights.map(_.toLong).sum + weights.take(index).map(_.toDouble / totalWeight).sum should be <= d + weights.take(index + 1).map(_.toDouble / totalWeight).sum should be > d + } + } + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/EventsObserver.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/EventsObserver.scala new file mode 100644 index 0000000000..ceb23dc47f --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/EventsObserver.scala @@ -0,0 +1,58 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.daml.ledger.api.v2.update_service.GetUpdatesResponse +import com.digitalasset.canton.ledger.api.benchtool.util.ObserverWithResult +import org.slf4j.{Logger, LoggerFactory} + +import scala.concurrent.Future + +object EventsObserver { + + def apply(expectedTemplateNames: Set[String]): EventsObserver = + new EventsObserver( + logger = LoggerFactory.getLogger(getClass), + expectedTemplateNames = expectedTemplateNames, + ) + +} + +/** Collects information about create and exercise events. + */ +class EventsObserver(expectedTemplateNames: Set[String], logger: Logger) + extends ObserverWithResult[GetUpdatesResponse, ObservedEvents](logger) { + + private val createEvents = collection.mutable.ArrayBuffer[ObservedCreateEvent]() + private val exerciseEvents = collection.mutable.ArrayBuffer[ObservedExerciseEvent]() + + override def streamName: String = "dummy-stream-name" + + override def onNext(value: GetUpdatesResponse): Unit = + for { + transaction <- value.update.transaction + allEvents = transaction.events + event <- allEvents + } { + event.event.created.foreach(created => + createEvents.addOne( + ObservedCreateEvent(created) + ) + ) + event.event.exercised.foreach(exercised => + exerciseEvents.addOne( + ObservedExerciseEvent(exercised, offset = transaction.offset) + ) + ) + } + + override def completeWith(): Future[ObservedEvents] = + Future.successful( + ObservedEvents( + expectedTemplateNames = expectedTemplateNames, + createEvents = createEvents.toList, + exerciseEvents = exerciseEvents.toList, + ) + ) +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGeneratorSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGeneratorSpec.scala new file mode 100644 index 0000000000..8575d43f7a --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/FooCommandGeneratorSpec.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.ledger.api.benchtool.submission.{ + FooCommandGenerator, + RandomnessProvider, +} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.nio.charset.StandardCharsets + +class FooCommandGeneratorSpec extends AnyFlatSpec with Matchers { + + it should "generate random payload of a given size" in { + FooCommandGenerator + .randomPayload(RandomnessProvider.Default, sizeBytes = 100) + .getBytes(StandardCharsets.UTF_8) + .length shouldBe 100 + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/NamesSpec.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/NamesSpec.scala new file mode 100644 index 0000000000..4e4e96fdc1 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/NamesSpec.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +import com.digitalasset.canton.ledger.api.benchtool.submission.Names +import org.scalatest.OptionValues +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class NamesSpec extends AnyFlatSpec with Matchers with OptionValues { + + it should "make left pad party set party names" in { + val tested = new Names + tested.partySetPartyName(prefix = "Party", numberOfParties = 2, uniqueParties = false) shouldBe + Seq( + "Party-0", + "Party-1", + ) + tested.partySetPartyName(prefix = "Party", numberOfParties = 12, uniqueParties = false) shouldBe + Seq( + "Party-00", + "Party-01", + "Party-02", + "Party-03", + "Party-04", + "Party-05", + "Party-06", + "Party-07", + "Party-08", + "Party-09", + "Party-10", + "Party-11", + ) + val thousandParties = + tested.partySetPartyName(prefix = "Party", numberOfParties = 1000, uniqueParties = false) + thousandParties.headOption.value shouldBe "Party-000" + thousandParties.lastOption.value shouldBe "Party-999" + thousandParties should have length 1000 + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedCreateEvent.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedCreateEvent.scala new file mode 100644 index 0000000000..dc0e2e2c24 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedCreateEvent.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +final case class ObservedCreateEvent( + templateName: String, + createArgumentsSerializedSize: Int, + interfaceViews: Seq[ObservedInterfaceView], + offset: Long, + contractId: String, +) + +object ObservedCreateEvent { + def apply( + created: com.daml.ledger.api.v2.event.CreatedEvent + ): ObservedCreateEvent = { + val argsSize = created.createArguments.fold(0)(_.serializedSize) + val templateName = + created.templateId.getOrElse(sys.error(s"Expected templateId in $created")).entityName + ObservedCreateEvent( + templateName = templateName, + createArgumentsSerializedSize = argsSize, + interfaceViews = created.interfaceViews.map(ObservedInterfaceView.apply), + offset = created.offset, + contractId = created.contractId, + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedEvents.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedEvents.scala new file mode 100644 index 0000000000..d6996eca76 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedEvents.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +final case class ObservedEvents( + expectedTemplateNames: Set[String], + createEvents: Seq[ObservedCreateEvent], + exerciseEvents: Seq[ObservedExerciseEvent] = List.empty, +) { + private val _actualTemplateNames = + (createEvents.map(_.templateName) ++ exerciseEvents.map(_.templateName)).toSet + require( + _actualTemplateNames.subsetOf(expectedTemplateNames), + s"${_actualTemplateNames} must be a subset of $expectedTemplateNames", + ) + + val consumingExercises: Seq[ObservedExerciseEvent] = exerciseEvents.filter(_.consuming) + val nonConsumingExercises: Seq[ObservedExerciseEvent] = exerciseEvents.filterNot(_.consuming) + + val avgSizeOfConsumingExercise: Int = + if (consumingExercises.isEmpty) 0 + else consumingExercises.map(_.choiceArgumentsSerializedSize).sum / consumingExercises.size + + val avgSizeOfNonconsumingExercise: Int = + if (nonConsumingExercises.isEmpty) 0 + else + nonConsumingExercises.map(_.choiceArgumentsSerializedSize).sum / nonConsumingExercises.size + + val numberOfCreatesPerTemplateName: Map[String, Int] = { + val groups = createEvents.groupBy(_.templateName) + expectedTemplateNames.map(name => name -> groups.get(name).fold(0)(_.size)).toMap + } + + val numberOfConsumingExercisesPerTemplateName: Map[String, Int] = { + val groups = consumingExercises.groupBy(_.templateName) + expectedTemplateNames.map(name => name -> groups.get(name).fold(0)(_.size)).toMap + } + + val numberOfNonConsumingExercisesPerTemplateName: Map[String, Int] = { + val groups = nonConsumingExercises.groupBy(_.templateName) + expectedTemplateNames.map(name => name -> groups.get(name).fold(0)(_.size)).toMap + } + + val avgSizeOfCreateEventPerTemplateName: Map[String, Int] = { + val groups = createEvents.groupBy(_.templateName) + expectedTemplateNames.map { name => + val avgSize = groups + .get(name) + .fold(0)(events => + if (events.isEmpty) 0 else events.map(_.createArgumentsSerializedSize).sum / events.size + ) + name -> avgSize + }.toMap + } + +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedExerciseEvent.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedExerciseEvent.scala new file mode 100644 index 0000000000..01f938e9e2 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedExerciseEvent.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +final case class ObservedExerciseEvent( + templateName: String, + choiceName: String, + choiceArgumentsSerializedSize: Int, + consuming: Boolean, + offset: Long, + contractId: String, +) +object ObservedExerciseEvent { + def apply( + exercised: com.daml.ledger.api.v2.event.ExercisedEvent, + offset: Long, + ): ObservedExerciseEvent = { + val argsSize = exercised.choiceArgument.fold(0)(_.serializedSize) + val templateName = exercised.templateId + .getOrElse(sys.error(s"Expected templateId in $exercised")) + .entityName + val contractId = exercised.contractId + val choiceName = exercised.choice + ObservedExerciseEvent( + templateName = templateName, + choiceName = choiceName, + choiceArgumentsSerializedSize = argsSize, + consuming = exercised.consuming, + offset = offset, + contractId = contractId, + ) + } +} diff --git a/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedInterfaceView.scala b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedInterfaceView.scala new file mode 100644 index 0000000000..59e07b8337 --- /dev/null +++ b/canton/community/ledger-api-bench-tool/src/test/scala/com/digitalasset/canton/ledger/api/benchtool/submission/ObservedInterfaceView.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.benchtool.submission + +final case class ObservedInterfaceView(interfaceName: String, serializedSize: Int) +object ObservedInterfaceView { + def apply(interfaceView: com.daml.ledger.api.v2.event.InterfaceView): ObservedInterfaceView = { + val interfaceName = + interfaceView.interfaceId + .getOrElse(sys.error(s"Expected interfaceId in $interfaceView")) + .entityName + val serializedSize = interfaceView.serializedSize + ObservedInterfaceView(interfaceName, serializedSize) + } +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/package_management_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/package_management_service.proto index 08df5a2c44..35c975d4ff 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/package_management_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/package_management_service.proto @@ -5,6 +5,7 @@ syntax = "proto3"; package com.daml.ledger.api.v2.admin; +import "com/daml/ledger/api/v2/package_reference.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Com.Daml.Ledger.Api.V2.Admin"; @@ -40,6 +41,9 @@ service PackageManagementService { // - Succeed if the package is valid // - Respond with a gRPC error if the package is not valid rpc ValidateDarFile(ValidateDarFileRequest) returns (ValidateDarFileResponse); + + // Update the vetted packages of this participant + rpc UpdateVettedPackages(UpdateVettedPackagesRequest) returns (UpdateVettedPackagesResponse); } message ListKnownPackagesRequest {} @@ -82,6 +86,31 @@ message UploadDarFileRequest { // Unique submission identifier. // Optional, defaults to a random identifier. string submission_id = 2; + + enum VettingChange { + // Vetting change field left unspecified, defaults to vetting all the + // packages in the DAR. + VETTING_CHANGE_UNSPECIFIED = 0; + // Vet all the packages in the DAR. + VETTING_CHANGE_VET_ALL_PACKAGES = 1; + // Do not vet any packages in the DAR. + VETTING_CHANGE_DONT_VET_ANY_PACKAGES = 2; + } + + // How to vet packages in the DAR being uploaded + VettingChange vetting_change = 3; + + // Only used if VettingChange is set to VETTING_CHANGE_VET_ALL_PACKAGES, in + // order to specify which synchronizer to vet on. + // + // If synchronizer_id is set, the synchronizer with this ID will be used. If + // synchronizer_id is unset and the participant is only connected to a single + // synchronizer, that synchronizer will be used by default. If synchronizer_id + // is unset and the participant is connected to multiple synchronizers, the + // request will error out with PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + // + // Optional + string synchronizer_id = 4; } // A message that is received when the upload operation succeeded. @@ -99,6 +128,124 @@ message ValidateDarFileRequest { // Unique submission identifier. // Optional, defaults to a random identifier. string submission_id = 2; + + // If synchronizer_id is set, the synchronizer with this ID will be used. If + // synchronizer_id is unset and the participant is only connected to a single + // synchronizer, that synchronizer will be used by default. If synchronizer_id + // is unset and the participant is connected to multiple synchronizers, the + // request will error out with PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + // + // Optional + string synchronizer_id = 4; } message ValidateDarFileResponse {} + +// A change to the set of vetted packages. +message VettedPackagesChange { + // Remove packages from the set of vetted packages + message Unvet { + // Packages to be unvetted. + // + // If a reference in this list matches multiple packages, they are all + // unvetted. + repeated VettedPackagesRef packages = 1; + } + + // Set vetting bounds of a list of packages. Packages that were not previously + // vetted have their bounds added, previous vetting bounds are overwritten. + message Vet { + // Packages to be vetted. + // + // If a reference in this list matches more than one package, the change is + // considered ambiguous and the entire update request is rejected. In other + // words, every reference must match exactly one package. + repeated VettedPackagesRef packages = 1; + + // The time from which these packages should be vetted, prior lower bounds + // are overwritten. + // Optional + google.protobuf.Timestamp new_valid_from_inclusive = 2; + + // The time until which these packages should be vetted, prior upper bounds + // are overwritten. + // Optional + google.protobuf.Timestamp new_valid_until_exclusive = 3; + } + + oneof operation { + // Add packages to or update packages in the set of vetted packages. + Vet vet = 1; + // Remove packages from the set of vetted packages. + Unvet unvet = 2; + } +} + +// A reference to identify one or more packages. +// +// A reference matches a package if its ``package_id`` matches the package's ID, +// its ``package_name`` matches the package's name, and its ``package_version`` +// matches the package's version. If an attribute in the reference is left +// unspecified (i.e. as an empty string), that attribute is treated as a +// wildcard. At a minimum, ``package_id`` or the ``package_name`` must be +// specified. +// +// If a reference does not match any package, the reference is considered +// unresolved and the entire update request is rejected. +message VettedPackagesRef { + // Package's package id must be the same as this field. + // Optional + string package_id = 1; + + // Package's name must be the same as this field. + // Optional + string package_name = 2; + + // Package's version must be the same as this field. + // Optional + string package_version = 3; +} + +message UpdateVettedPackagesRequest { + // Changes to apply to the current vetting state of the participant on the + // specified synchronizer. The changes are applied in order. + // Any package not changed will keep their previous vetting state. + repeated VettedPackagesChange changes = 1; + + // If dry_run is true, then the changes are only prepared, but not applied. If + // a request would trigger an error when run (e.g. TOPOLOGY_DEPENDENCIES_NOT_VETTED), + // it will also trigger an error when dry_run. + // + // Use this flag to preview a change before applying it. + bool dry_run = 2; + + // If set, the requested changes will take place on the specified + // synchronizer. If synchronizer_id is unset and the participant is only + // connected to a single synchronizer, that synchronizer will be used by + // default. If synchronizer_id is unset and the participant is connected to + // multiple synchronizers, the request will error out with + // PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + // + // Optional + string synchronizer_id = 3; + + // The serial of the last ``VettedPackages`` topology transaction of this + // participant and on this synchronizer. + // + // Execution of the request fails if this is not correct. Use this to guard + // against concurrent changes. + // + // If left unspecified, no validation is done against the last transaction's + // serial. + // + // Optional + PriorTopologySerial expected_topology_serial = 4; +} + +message UpdateVettedPackagesResponse { + // All vetted packages on this participant and synchronizer, before the + // specified changes. Empty if no vetting state existed beforehand. + VettedPackages past_vetted_packages = 1; + // All vetted packages on this participant and synchronizer, after the specified changes. + VettedPackages new_vetted_packages = 2; +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto index c01d75b4f3..b40518158a 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto @@ -6,6 +6,7 @@ syntax = "proto3"; package com.daml.ledger.api.v2.admin; import "com/daml/ledger/api/v2/admin/object_meta.proto"; +import "com/daml/ledger/api/v2/crypto.proto"; import "google/protobuf/field_mask.proto"; option csharp_namespace = "Com.Daml.Ledger.Api.V2.Admin"; @@ -68,12 +69,39 @@ service PartyManagementService { // colon, minus and underscore limited to 255 chars rpc AllocateParty(AllocatePartyRequest) returns (AllocatePartyResponse); + // Alpha 3.3: Endpoint to allocate a new external party on a synchronizer + // + // Expected to be stable in 3.5 + // + // The external party must be hosted (at least) on this node with either confirmation or observation permissions + // It can optionally be hosted on other nodes (then called a multi-hosted party). + // If hosted on additional nodes, explicit authorization of the hosting relationship must be performed on those nodes + // before the party can be used. + // Decentralized namespaces are supported but must be provided fully authorized by their owners. + // The individual owner namespace transactions can be submitted in the same call (fully authorized as well). + // In the simple case of a non-multi hosted, non-decentralized party, the RPC will return once the party is + // effectively allocated and ready to use, similarly to the AllocateParty behavior. + // For more complex scenarios applications may need to query the party status explicitly (only through the admin API as of now). + rpc AllocateExternalParty(AllocateExternalPartyRequest) returns (AllocateExternalPartyResponse); + // Update selected modifiable participant-local attributes of a party details resource. // Can update the participant's local information for local parties. rpc UpdatePartyDetails(UpdatePartyDetailsRequest) returns (UpdatePartyDetailsResponse); // Update the assignment of a party from one IDP to another. rpc UpdatePartyIdentityProviderId(UpdatePartyIdentityProviderIdRequest) returns (UpdatePartyIdentityProviderIdResponse); + + // Alpha 3.3: Convenience endpoint to generate topology transactions for external signing + // + // Expected to be stable in 3.5 + // + // You may use this endpoint to generate the common external topology transactions + // which can be signed externally and uploaded as part of the allocate party process + // + // Note that this request will create a normal namespace using the same key for the + // identity as for signing. More elaborate schemes such as multi-signature + // or decentralized parties require you to construct the topology transactions yourself. + rpc GenerateExternalPartyTopology(GenerateExternalPartyTopologyRequest) returns (GenerateExternalPartyTopologyResponse); } // Required authorization: ``HasRight(ParticipantAdmin)`` @@ -170,6 +198,48 @@ message AllocatePartyResponse { PartyDetails party_details = 1; } +// Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(identity_provider_id)`` +message AllocateExternalPartyRequest { + message SignedTransaction { + // The serialized TopologyTransaction + bytes transaction = 1; + // Additional signatures for this transaction specifically + // Use for transactions that require additional signatures beyond the namespace key signatures + // e.g: PartyToKeyMapping must be signed by all registered keys + repeated Signature signatures = 2; + } + + // TODO(#27670) support synchronizer aliases + // Synchronizer ID on which to onboard the party + // Required + string synchronizer = 1; + // TopologyTransactions to onboard the external party + // Can contain: + // - A namespace for the party. + // This can be either a single NamespaceDelegation, + // or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + // May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + // - A PartyToKeyMapping to register the party's signing keys. + // May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + // - A PartyToParticipant to register the hosting relationship of the party. + // Must be provided. + // Required + repeated SignedTransaction onboarding_transactions = 2; + + // Optional signatures of the combined hash of all onboarding_transactions + // This may be used instead of providing signatures on each individual transaction + repeated Signature multi_hash_signatures = 3; + + // The id of the ``Identity Provider`` + // If not set, assume the party is managed by the default identity provider. + // Optional + string identity_provider_id = 4; +} + +message AllocateExternalPartyResponse { + string party_id = 1; +} + // Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(party_details.identity_provider_id)`` message UpdatePartyDetailsRequest { // Party to be updated @@ -241,3 +311,36 @@ message UpdatePartyIdentityProviderIdRequest { } message UpdatePartyIdentityProviderIdResponse {} + +message GenerateExternalPartyTopologyRequest { + // TODO(#27670) support synchronizer aliases + // Required: synchronizer-id for which we are building this request. + string synchronizer = 1; + // Required: the actual party id will be constructed from this hint and a fingerprint of the public key + string party_hint = 2; + // Required: public key + com.daml.ledger.api.v2.SigningPublicKey public_key = 3; + // Optional: if true, then the local participant will only be observing, not confirming. Default false. + bool local_participant_observation_only = 4; + // Optional: other participant ids which should be confirming for this party + repeated string other_confirming_participant_uids = 5; + // Optional: Confirmation threshold >= 1 for the party. Defaults to all available confirmers (or if set to 0). + uint32 confirmation_threshold = 6; + // Optional: other observing participant ids for this party + repeated string observing_participant_uids = 7; +} + +// Response message with topology transactions and the multi-hash to be signed. +message GenerateExternalPartyTopologyResponse { + // the generated party id + string party_id = 1; + // the fingerprint of the supplied public key + string public_key_fingerprint = 2; + // The serialized topology transactions which need to be signed and submitted as part of the allocate party process + // Note that the serialization includes the versioning information. Therefore, the transaction here is serialized + // as an `UntypedVersionedMessage` which in turn contains the serialized `TopologyTransaction` in the version + // supported by the synchronizer. + repeated bytes topology_transactions = 3; + // the multi-hash which may be signed instead of each individual transaction + bytes multi_hash = 4; +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_service.proto index 03390340ad..dd037a1cfa 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_service.proto @@ -29,13 +29,6 @@ service CommandService { // Propagates the gRPC error of failed submissions including Daml interpretation errors. rpc SubmitAndWaitForTransaction(SubmitAndWaitForTransactionRequest) returns (SubmitAndWaitForTransactionResponse); - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Submits a single composite command, waits for its result, and returns the transaction tree. - // Propagates the gRPC error of failed submissions including Daml interpretation errors. - rpc SubmitAndWaitForTransactionTree(SubmitAndWaitRequest) returns (SubmitAndWaitForTransactionTreeResponse) { - option deprecated = true; - } - // Submits a single composite reassignment command, waits for its result, and returns the reassignment. // Propagates the gRPC error of failed submission. rpc SubmitAndWaitForReassignment(SubmitAndWaitForReassignmentRequest) returns (SubmitAndWaitForReassignmentResponse); @@ -79,15 +72,6 @@ message SubmitAndWaitForTransactionResponse { Transaction transaction = 1; } -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message SubmitAndWaitForTransactionTreeResponse { - option deprecated = true; - // The transaction tree that resulted from the submitted command. - // The transaction might contain no events (request conditions result in filtering out all of them). - // Required - TransactionTree transaction = 1; -} - // This reassignment is executed as a single atomic update. message SubmitAndWaitForReassignmentRequest { // The reassignment commands to be submitted. diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/commands.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/commands.proto index 81dde0be2c..95a593d246 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/commands.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/commands.proto @@ -114,11 +114,14 @@ message DisclosedContract { // The template id of the contract. // The identifier uses the package-id reference format. // - // Required + // If provided, used to validate the template id of the contract serialized in the created_event_blob. + // Optional Identifier template_id = 1; // The contract id - // Required + // + // If provided, used to validate the contract id of the contract serialized in the created_event_blob. + // Optional string contract_id = 2; // Opaque byte string containing the complete payload required by the Daml engine diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto new file mode 100644 index 0000000000..76c71c1649 --- /dev/null +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto @@ -0,0 +1,109 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.daml.ledger.api.v2; + +option csharp_namespace = "Com.Daml.Ledger.Api.V2"; +option java_outer_classname = "CryptoOuterClass"; +option java_package = "com.daml.ledger.api.v2"; + +// Note: these crypto data types are a subset of the ones used internally by Canton. +// They have been forked from the respective com.digitalasset.canton.crypto.v30 package. + +enum SigningKeySpec { + SIGNING_KEY_SPEC_UNSPECIFIED = 0; + + // Elliptic Curve Key from Curve25519 + // as defined in http://ed25519.cr.yp.to/ + SIGNING_KEY_SPEC_EC_CURVE25519 = 1; + + // Elliptic Curve Key from the NIST P-256 curve (aka secp256r1) + // as defined in https://doi.org/10.6028/NIST.FIPS.186-4 + SIGNING_KEY_SPEC_EC_P256 = 2; + + // Elliptic Curve Key from the NIST P-384 curve (aka secp384r1) + // as defined in https://doi.org/10.6028/NIST.FIPS.186-4 + SIGNING_KEY_SPEC_EC_P384 = 3; + + // Elliptic Curve Key from SECG P256k1 curve (aka secp256k1) + // commonly used in bitcoin and ethereum + // as defined in https://www.secg.org/sec2-v2.pdf + SIGNING_KEY_SPEC_EC_SECP256K1 = 4; +} + +// Serialization format for crypto keys and signatures +enum CryptoKeyFormat { + CRYPTO_KEY_FORMAT_UNSPECIFIED = 0; + + // ASN.1 + DER encoding + // Legacy format no longer used, except for migrations + CRYPTO_KEY_FORMAT_DER = 1; + + // Raw encoding of a key + CRYPTO_KEY_FORMAT_RAW = 2; + + // ASN.1 + DER-encoding of X.509 SubjectPublicKeyInfo structure: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1 + CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO = 3; + + // Symbolic crypto, must only be used for testing + reserved 10000; +} + +message SigningPublicKey { + // The serialization format of the public key + CryptoKeyFormat format = 1; + + // Serialized public key in the format specified above + bytes key_data = 2; + + // The key specification + SigningKeySpec key_spec = 3; +} + +message Signature { + SignatureFormat format = 1; + + bytes signature = 2; + + // The fingerprint/id of the keypair used to create this signature and needed to verify. + string signed_by = 3; + + // The signing algorithm specification used to produce this signature + SigningAlgorithmSpec signing_algorithm_spec = 4; +} + +enum SigningAlgorithmSpec { + SIGNING_ALGORITHM_SPEC_UNSPECIFIED = 0; + + // EdDSA Signature based on Curve25519 with SHA-512 + // http://ed25519.cr.yp.to/ + SIGNING_ALGORITHM_SPEC_ED25519 = 1; + + // Elliptic Curve Digital Signature Algorithm with SHA256 + SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 = 2; + + // Elliptic Curve Digital Signature Algorithm with SHA384 + SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 = 3; +} + +enum SignatureFormat { + SIGNATURE_FORMAT_UNSPECIFIED = 0; + + // Signature scheme specific signature format + // Legacy format no longer used, except for migrations + SIGNATURE_FORMAT_RAW = 1; + + // ASN.1 + DER-encoding of the `r` and `s` integers, as defined in https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 + // Used for ECDSA signatures + SIGNATURE_FORMAT_DER = 2; + + // Concatenation of the integers `r || s` in little-endian form, as defined in https://datatracker.ietf.org/doc/html/rfc8032#section-3.3 + // Note that this is different from the format defined in IEEE P1363, which uses concatenation in big-endian form. + // Used for EdDSA signatures + SIGNATURE_FORMAT_CONCAT = 3; + + // Symbolic crypto, must only be used for testing + SIGNATURE_FORMAT_SYMBOLIC = 10000; +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/event.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/event.proto index 70bc8d28ae..c0625ecca3 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/event.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/event.proto @@ -135,6 +135,15 @@ message CreatedEvent { // and should therefore considered when tracking contract activeness on the client-side. // Required bool acs_delta = 14; + + // A package-id present in the participant package store that typechecks the contract's argument. + // This may differ from the package-id of the template used to create the contract. + // For contracts created before Canton 3.4, this field matches the contract's creation package-id. + // + // NOTE: Experimental, server internal concept, not for client consumption. Subject to change without notice. + // + // Required + string representative_package_id = 15; } // View of a create event matched by an interface filter. diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto index c8af6c4066..91ee9148b2 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto @@ -6,6 +6,7 @@ syntax = "proto3"; package com.daml.ledger.api.v2.interactive; import "com/daml/ledger/api/v2/commands.proto"; +import "com/daml/ledger/api/v2/crypto.proto"; import "com/daml/ledger/api/v2/interactive/interactive_submission_common_data.proto"; import "com/daml/ledger/api/v2/interactive/transaction/v1/interactive_submission_data.proto"; import "com/daml/ledger/api/v2/package_reference.proto"; @@ -100,6 +101,16 @@ message PrepareSubmissionRequest { // Optional MinLedgerTime min_ledger_time = 4; + // Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + // If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + // with a new valid max_record_time. + // Use this to limit the time-to-life of a prepared transaction, + // which is useful to know when it can definitely not be accepted + // anymore and resorting to preparing another transaction for the same + // intent is safe again. + // Optional + optional google.protobuf.Timestamp max_record_time = 11; + // Set of parties on whose behalf the command should be executed, if submitted. // If ledger API authorization is enabled, then the authorization metadata must authorize the sender of the request // to **read** (not act) on behalf of each of the given parties. This is because this RPC merely prepares a transaction @@ -124,7 +135,8 @@ message PrepareSubmissionRequest { repeated DisclosedContract disclosed_contracts = 7; // Must be a valid synchronizer id - // Required + // If not set, a suitable synchronizer that this node is connected to will be chosen + // Optional string synchronizer_id = 8; // The package-id selection preference of the client for resolving @@ -172,52 +184,6 @@ message PrepareSubmissionResponse { optional string hashing_details = 4; } -message Signature { - SignatureFormat format = 1; - - bytes signature = 2; - - // The fingerprint/id of the keypair used to create this signature and needed to verify. - string signed_by = 3; - - // The signing algorithm specification used to produce this signature - SigningAlgorithmSpec signing_algorithm_spec = 4; -} - -enum SigningAlgorithmSpec { - SIGNING_ALGORITHM_SPEC_UNSPECIFIED = 0; - - // EdDSA Signature based on Curve25519 with SHA-512 - // http://ed25519.cr.yp.to/ - SIGNING_ALGORITHM_SPEC_ED25519 = 1; - - // Elliptic Curve Digital Signature Algorithm with SHA256 - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 = 2; - - // Elliptic Curve Digital Signature Algorithm with SHA384 - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 = 3; -} - -enum SignatureFormat { - SIGNATURE_FORMAT_UNSPECIFIED = 0; - - // Signature scheme specific signature format - // Legacy format no longer used, except for migrations - SIGNATURE_FORMAT_RAW = 1; - - // ASN.1 + DER-encoding of the `r` and `s` integers, as defined in https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 - // Used for ECDSA signatures - SIGNATURE_FORMAT_DER = 2; - - // Concatenation of the integers `r || s` in little-endian form, as defined in https://datatracker.ietf.org/doc/html/rfc8032#section-3.3 - // Note that this is different from the format defined in IEEE P1363, which uses concatenation in big-endian form. - // Used for EdDSA signatures - SIGNATURE_FORMAT_CONCAT = 3; - - // Symbolic crypto, must only be used for testing - SIGNATURE_FORMAT_SYMBOLIC = 10000; -} - // Signatures provided by a single party message SinglePartySignatures { // Submitting party @@ -499,6 +465,14 @@ message Metadata { // Contextual information needed to process the transaction but not signed, either because it's already indirectly // signed by signing the transaction, or because it doesn't impact the ledger state repeated GlobalKeyMappingEntry global_key_mapping = 8; + + // Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer `synchronizer_id`. + // If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + // with a new valid max_record_time. + // Unsigned in 3.3 to avoid a breaking protocol change + // Will be signed in 3.4+ + // Set max_record_time in the PreparedTransactionRequest to get this field set accordingly + optional uint64 max_record_time = 11; } /* @@ -526,7 +500,7 @@ message DamlTransaction { } // [docs-entry-end: DamlTransaction.Node] - // Transaction version, will be >= max(nodes version) + // serialization version, will be >= max(nodes version) string version = 1; // Root nodes of the transaction repeated string roots = 2; diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_reference.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_reference.proto index 19fb039d09..35428aeb9d 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_reference.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_reference.proto @@ -5,6 +5,9 @@ syntax = "proto3"; package com.daml.ledger.api.v2; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + option csharp_namespace = "Com.Daml.Ledger.Api.V2"; option java_outer_classname = "PackageReferenceOuterClass"; option java_package = "com.daml.ledger.api.v2"; @@ -17,3 +20,60 @@ message PackageReference { // Required string package_version = 3; } + +// A package that is vetting on a given participant and synchronizer, +// modelled after ``VettedPackage`` in `topology.proto `_, +// enriched with the package name and version. +message VettedPackage { + // Package ID of this package. Always present. + string package_id = 1; + + // The time from which this package is vetted. Empty if vetting time has no + // lower bound. + google.protobuf.Timestamp valid_from_inclusive = 2; + + // The time until which this package is vetted. Empty if vetting time has no + // upper bound. + google.protobuf.Timestamp valid_until_exclusive = 3; + + // Name of this package. + // Only available if the package has been uploaded to the current participant. + // If unavailable, is empty string. + string package_name = 4; + + // Version of this package. + // Only available if the package has been uploaded to the current participant. + // If unavailable, is empty string. + string package_version = 5; +} + +// The list of packages vetted on a given participant and synchronizer, modelled +// after ``VettedPackages`` in `topology.proto `_. +// The list only contains packages that matched a filter in the query that +// originated it. +message VettedPackages { + // Sorted by package_name and package_version where known, and package_id as a + // last resort. + repeated VettedPackage packages = 1; + + // Participant on which these packages are vetted. Always present. + string participant_id = 2; + + // Synchronizer on which these packages are vetted. Always present. + string synchronizer_id = 3; + + // Serial of last ``VettedPackages`` topology transaction of this participant + // and on this synchronizer. Always present. + PriorTopologySerial topology_serial = 4; +} + +// The serial of last ``VettedPackages`` topology transaction on a given +// participant and synchronizer. +message PriorTopologySerial { + oneof serial { + // Previous transaction's serial. + uint32 prior = 1; + // No previous transaction exists. + google.protobuf.Empty no_prior = 2; + } +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_service.proto index 8549a1e8dc..6892abb9e3 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_service.proto @@ -5,6 +5,8 @@ syntax = "proto3"; package com.daml.ledger.api.v2; +import "com/daml/ledger/api/v2/package_reference.proto"; + option csharp_namespace = "Com.Daml.Ledger.Api.V2"; option java_outer_classname = "PackageServiceOuterClass"; option java_package = "com.daml.ledger.api.v2"; @@ -58,6 +60,10 @@ service PackageService { // Returns the status of a single package. rpc GetPackageStatus(GetPackageStatusRequest) returns (GetPackageStatusResponse); + + // Lists which participant node vetted what packages on which synchronizer. + // Can be called by any authenticated user. + rpc ListVettedPackages(ListVettedPackagesRequest) returns (ListVettedPackagesResponse); } message ListPackagesRequest {} @@ -75,3 +81,92 @@ message GetPackageStatusRequest { // Required string package_id = 1; } + +// Filter the VettedPackages by package metadata. +// +// A PackageMetadataFilter without package_ids and without package_name_prefixes +// matches any vetted package. +// +// Non-empty fields specify candidate values of which at least one must match. +// If both fields are set, then a candidate is returned if it matches one of the fields. +message PackageMetadataFilter { + // If this list is non-empty, any vetted package with a package ID in this + // list will match the filter. + repeated string package_ids = 1; + + // If this list is non-empty, any vetted package with a name matching at least + // one prefix in this list will match the filter. + repeated string package_name_prefixes = 2; +} + +// Filter the vetted packages by the participant and synchronizer that they are +// hosted on. +// +// Empty fields are ignored, such that a ``TopologyStateFilter`` without +// participant_ids and without synchronizer_ids matches a vetted package hosted +// on any participant and synchronizer. +// +// Non-empty fields specify candidate values of which at least one must match. +// If both fields are set then at least one candidate value must match from each +// field. +message TopologyStateFilter { + // If this list is non-empty, only vetted packages hosted on participants + // listed in this field match the filter. + // Query the current Ledger API's participant's ID via the public + // ``GetParticipantId`` command in ``PartyManagementService``. + repeated string participant_ids = 1; + + // If this list is non-empty, only vetted packages from the topology state of + // the synchronizers in this list match the filter. + repeated string synchronizer_ids = 2; +} + +message ListVettedPackagesRequest { + // The package metadata filter the returned vetted packages set must satisfy. + // Optional + PackageMetadataFilter package_metadata_filter = 1; + + // The topology filter the returned vetted packages set must satisfy. + // Optional + TopologyStateFilter topology_state_filter = 2; + + // Pagination token to determine the specific page to fetch. Using the token + // guarantees that ``VettedPackages`` on a subsequent page are all greater + // (``VettedPackages`` are sorted by synchronizer ID then participant ID) than + // the last ``VettedPackages`` on a previous page. + // + // The server does not store intermediate results between calls chained by a + // series of page tokens. As a consequence, if new vetted packages are being + // added and a page is requested twice using the same token, more packages can + // be returned on the second call. + // + // Leave unspecified (i.e. as empty string) to fetch the first page. + // + // Optional + string page_token = 3; + + // Maximum number of ``VettedPackages`` results to return in a single page. + // + // If the page_size is unspecified (i.e. left as 0), the server will decide + // the number of results to be returned. + // + // If the page_size exceeds the maximum supported by the server, an + // error will be returned. + // + // To obtain the server's maximum consult the PackageService descriptor + // available in the VersionService. + // + // Optional + uint32 page_size = 4; +} + +message ListVettedPackagesResponse { + // All ``VettedPackages`` that contain at least one ``VettedPackage`` matching + // both a ``PackageMetadataFilter`` and a ``TopologyStateFilter``. + // Sorted by synchronizer_id then participant_id. + repeated VettedPackages vetted_packages = 1; + + // Pagination token to retrieve the next page. + // Empty string if there are no further results. + string next_page_token = 2; +} diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/state_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/state_service.proto index be8a9896c6..46d7c405bb 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/state_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/state_service.proto @@ -38,16 +38,11 @@ service StateService { // Note that it is ok to request acs snapshots for party migration with offsets other than ledger end, because party // migration is not concerned with incomplete (un)assignments. message GetActiveContractsRequest { - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Templates to include in the served snapshot, per party. - // Optional, if specified event_format must be unset, if not specified event_format must be set. - TransactionFilter filter = 1 [deprecated = true]; + reserved 1; + reserved "filter"; - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // If enabled, values served over the API will contain more information than strictly necessary to interpret the data. - // In particular, setting the verbose flag to true triggers the ledger to include labels for record fields. - // Optional, if specified event_format must be unset. - bool verbose = 2 [deprecated = true]; + reserved 2; + reserved "verbose"; // The offset at which the snapshot of the active contracts will be computed. // Must be no greater than the current ledger end offset. @@ -142,7 +137,8 @@ message IncompleteAssigned { message GetConnectedSynchronizersRequest { // The party of interest // Must be a valid PartyIdString (as described in ``value.proto``). - // Required + // If empty, all synchronizers this node is connected to will be returned + // Optional string party = 1; // The id of a participant whose mapping of a party to connected synchronizers is requested. @@ -167,7 +163,8 @@ message GetConnectedSynchronizersResponse { string synchronizer_id = 2; // The permission on the synchronizer - // Required + // Set if a party was used in the request, otherwise unspecified. + // Optional ParticipantPermission permission = 3; } repeated ConnectedSynchronizer connected_synchronizers = 1; diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction.proto index a3008bf1da..086c8bef73 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction.proto @@ -13,78 +13,6 @@ option csharp_namespace = "Com.Daml.Ledger.Api.V2"; option java_outer_classname = "TransactionOuterClass"; option java_package = "com.daml.ledger.api.v2"; -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -// Each tree event message type below contains a ``witness_parties`` field which -// indicates the subset of the requested parties that can see the event -// in question. -// -// Note that transaction trees might contain events with -// _no_ witness parties, which were included simply because they were -// children of events which have witnesses. -message TreeEvent { - option deprecated = true; - oneof kind { - // The event as it appeared in the context of its original daml transaction on this participant node. - // In particular, the offset, node_id pair of the daml transaction are preserved. - CreatedEvent created = 1; - ExercisedEvent exercised = 2; - } -} - -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -// Complete view of an on-ledger transaction. -message TransactionTree { - option deprecated = true; - // Assigned by the server. Useful for correlating logs. - // Must be a valid LedgerString (as described in ``value.proto``). - // Required - string update_id = 1; - - // The ID of the command which resulted in this transaction. Missing for everyone except the submitting party. - // Must be a valid LedgerString (as described in ``value.proto``). - // Optional - string command_id = 2; - - // The workflow ID used in command submission. Only set if the ``workflow_id`` for the command was set. - // Must be a valid LedgerString (as described in ``value.proto``). - // Optional - string workflow_id = 3; - - // Ledger effective time. - // Required - google.protobuf.Timestamp effective_at = 4; - - // The absolute offset. The details of this field are described in ``community/ledger-api/README.md``. - // Required, it is a valid absolute offset (positive integer). - int64 offset = 5; - - // Changes to the ledger that were caused by this transaction. Nodes of the transaction tree. - // Each key must be a valid node ID (non-negative integer). - // Required - map events_by_id = 6; - - // A valid synchronizer id. - // Identifies the synchronizer that synchronized the transaction. - // Required - string synchronizer_id = 7; - - // Optional; ledger API trace context - // - // The trace context transported in this message corresponds to the trace context supplied - // by the client application in a HTTP2 header of the original command submission. - // We typically use a header to transfer this type of information. Here we use message - // body, because it is used in gRPC streams which do not support per message headers. - // This field will be populated with the trace context contained in the original submission. - // If that was not provided, a unique ledger-api-server generated trace context will be used - // instead. - TraceContext trace_context = 8; - - // The time at which the transaction was recorded. The record time refers to the synchronizer - // which synchronized the transaction. - // Required - google.protobuf.Timestamp record_time = 9; -} - // Filtered view of an on-ledger transaction's create and archive events. message Transaction { // Assigned by the server. Useful for correlating logs. diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction_filter.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction_filter.proto index dcef8a80a1..7fddb8e405 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction_filter.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction_filter.proto @@ -96,30 +96,6 @@ message TemplateFilter { bool include_created_event_blob = 2; } -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -// Used both for filtering create and archive events as well as for filtering transaction trees. -message TransactionFilter { - option deprecated = true; - // Each key must be a valid PartyIdString (as described in ``value.proto``). - // The interpretation of the filter depends on the transaction-shape being filtered: - // - // 1. For **transaction trees** (used in GetUpdateTreesResponse for backwards compatibility) all party keys used as - // wildcard filters, and all subtrees whose root has one of the listed parties as an informee are returned. - // If there are ``CumulativeFilter``s, those will control returned ``CreatedEvent`` fields where applicable, but will - // not be used for template/interface filtering. - // 2. For **ledger-effects** create and exercise events are returned, for which the witnesses include at least one of - // the listed parties and match the per-party filter. - // 3. For **transaction and active-contract-set streams** create and archive events are returned for all contracts whose - // stakeholders include at least one of the listed parties and match the per-party filter. - // - // Required - map filters_by_party = 1; - - // Wildcard filters that apply to all the parties existing on the participant. The interpretation of the filters is the same - // with the per-party filter as described above. - Filters filters_for_any_party = 2; -} - // A format for events which defines both which events should be included // and what data should be computed and included for them. // diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/update_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/update_service.proto index e1e379acd7..8c311b0760 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/update_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/update_service.proto @@ -22,7 +22,7 @@ option java_package = "com.daml.ledger.api.v2"; // offsets, which are strictly increasing. // The virtual shared ledger consist of changes happening on multiple synchronizers which are // connected to the serving participant. Each update belongs to one synchronizer, this is -// provided in the result (the ``synchronizer_id`` field in ``Transaction`` and ``TransactionTree`` +// provided in the result (the ``synchronizer_id`` field in ``Transaction`` // for transactions, the ``source`` field in ``UnassignedEvent`` and the ``target`` field in ``AssignedEvent``). // Consumers can rely on strong causal guarantees on the virtual shared ledger for a single // synchronizer: updates which have greater offsets are happened after than updates with smaller @@ -36,46 +36,6 @@ service UpdateService { // - ledger effects: a requesting party must be a witness of an en event for it to be included. rpc GetUpdates(GetUpdatesRequest) returns (stream GetUpdatesResponse); - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Read the ledger's complete transaction tree stream and related (un)assignments for a set of parties. - // The stream will be filtered only by the parties as wildcard parties. - // The template/interface filters describe the respective fields in the ``CreatedEvent`` results. - rpc GetUpdateTrees(GetUpdatesRequest) returns (stream GetUpdateTreesResponse) { - option deprecated = true; - } - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Lookup a transaction tree by its offset. - // For looking up a transaction instead of a transaction tree, please see GetTransactionByEventId - // If the transaction cannot be found for the request, or all the events are filtered, a TRANSACTION_NOT_FOUND error will be raised. - rpc GetTransactionTreeByOffset(GetTransactionByOffsetRequest) returns (GetTransactionTreeResponse) { - option deprecated = true; - } - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Lookup a transaction tree by its ID. - // For looking up a transaction instead of a transaction tree, please see GetTransactionById - // If the transaction cannot be found for the request, or all the events are filtered, a TRANSACTION_NOT_FOUND error will be raised. - rpc GetTransactionTreeById(GetTransactionByIdRequest) returns (GetTransactionTreeResponse) { - option deprecated = true; - } - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Lookup a transaction by its offset. - // If there is no transaction with this offset, or all the events are filtered, a TRANSACTION_NOT_FOUND error will be raised. - // Use a wildcard template filter if you want to retrieve any transaction visible to the parties you can read as. - rpc GetTransactionByOffset(GetTransactionByOffsetRequest) returns (GetTransactionResponse) { - option deprecated = true; - } - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Lookup a transaction by its ID. - // If there is no transaction with this id, or all the events are filtered, a TRANSACTION_NOT_FOUND error will be raised. - // Use a wildcard template filter if you want to retrieve any transaction visible to the parties you can read as. - rpc GetTransactionById(GetTransactionByIdRequest) returns (GetTransactionResponse) { - option deprecated = true; - } - // Lookup an update by its offset. // If there is no update with this offset, or all the events are filtered, an UPDATE_NOT_FOUND error will be raised. rpc GetUpdateByOffset(GetUpdateByOffsetRequest) returns (GetUpdateResponse); @@ -99,18 +59,11 @@ message GetUpdatesRequest { // If specified, the stream will terminate after this absolute offset (positive integer) is reached. optional int64 end_inclusive = 2; - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // Requesting parties with template filters. - // Template filters must be empty for GetUpdateTrees requests. - // Optional for backwards compatibility, if defined update_format must be unset - TransactionFilter filter = 3 [deprecated = true]; + reserved 3; + reserved "filter"; - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // If enabled, values served over the API will contain more information than strictly necessary to interpret the data. - // In particular, setting the verbose flag to true triggers the ledger to include labels, record and variant type ids - // for record fields. - // Optional for backwards compatibility, if defined update_format must be unset - bool verbose = 4 [deprecated = true]; + reserved 4; + reserved "verbose"; // Must be unset for GetUpdateTrees request. // Optional for backwards compatibility for GetUpdates request: defaults to an UpdateFormat where: @@ -135,85 +88,6 @@ message GetUpdatesResponse { } } -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message GetUpdateTreesResponse { - option deprecated = true; - // The update that matches the filter in the request. - oneof update { - TransactionTree transaction_tree = 1; - Reassignment reassignment = 2; - OffsetCheckpoint offset_checkpoint = 3; - } -} - -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message GetTransactionByOffsetRequest { - option deprecated = true; - // The offset of the transaction being looked up. - // Must be a valid absolute offset (positive integer). - // Required - int64 offset = 1; - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // The parties whose events the client expects to see. - // Events that are not visible for the parties in this collection will not be present in the response. - // Each element must be a valid PartyIdString (as described in ``value.proto``). - // Must be set for GetTransactionTreeByOffset request. - // Optional for backwards compatibility for GetTransactionByOffset request: if defined transaction_format must be - // unset (falling back to defaults). - repeated string requesting_parties = 2; - - // Must be unset for GetTransactionTreeByOffset request. - // Optional for GetTransactionByOffset request for backwards compatibility: defaults to a TransactionFormat, where: - // - // - event_format.filters_by_party will have template-wildcard filters for all the requesting_parties - // - event_format.filters_for_any_party is unset - // - event_format.verbose = true - // - transaction_shape = TRANSACTION_SHAPE_ACS_DELTA - TransactionFormat transaction_format = 3; -} - -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message GetTransactionByIdRequest { - option deprecated = true; - // The ID of a particular transaction. - // Must be a valid LedgerString (as described in ``value.proto``). - // Required - string update_id = 1; - - // Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - // The parties whose events the client expects to see. - // Events that are not visible for the parties in this collection will not be present in the response. - // Each element must be a valid PartyIdString (as described in ``value.proto``). - // Must be set for GetTransactionTreeById request. - // Optional for backwards compatibility for GetTransactionById request: if defined transaction_format must be - // unset (falling back to defaults). - repeated string requesting_parties = 2; - - // Must be unset for GetTransactionTreeById request. - // Optional for GetTransactionById request for backwards compatibility: defaults to a transaction_format, where: - // - // - event_format.filters_by_party will have template-wildcard filters for all the requesting_parties - // - event_format.filters_for_any_party is unset - // - event_format.verbose = true - // - transaction_shape = TRANSACTION_SHAPE_ACS_DELTA - TransactionFormat transaction_format = 3; -} - -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message GetTransactionTreeResponse { - option deprecated = true; - // Required - TransactionTree transaction = 1; -} - -// Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. -message GetTransactionResponse { - option deprecated = true; - // Required - Transaction transaction = 1; -} - message GetUpdateByOffsetRequest { // The offset of the update being looked up. // Must be a valid absolute offset (positive integer). diff --git a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/version_service.proto b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/version_service.proto index a2c4e773f4..95871e1c97 100644 --- a/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/version_service.proto +++ b/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/version_service.proto @@ -55,6 +55,11 @@ message FeaturesDescriptor { // It contains the timeouts related to the periodic offset checkpoint emission OffsetCheckpointFeature offset_checkpoint = 4; + + // If set, then the Ledger API server supports package listing + // configurability. It is recommended that clients query this field to + // gracefully adjust their behavior to maximum package listing page size. + PackageFeature package_feature = 5; } message UserManagementFeature { @@ -77,6 +82,12 @@ message PartyManagementFeature { int32 max_parties_page_size = 1; } +message PackageFeature { + // The maximum number of vetted packages the server can return in a single + // response (page) when listing them. + int32 max_vetted_packages_page_size = 1; +} + message OffsetCheckpointFeature { // The maximum delay to emmit a new OffsetCheckpoint if it exists google.protobuf.Duration max_offset_checkpoint_emission_delay = 1; diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Allocation.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Allocation.scala new file mode 100644 index 0000000000..c30f1567ae --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Allocation.scala @@ -0,0 +1,105 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext + +object Allocation { + + /** Specifies a sequence of party counts to be allocated on a sequence of participants. + * + * Number of party counts does not need to match the number of participants. If there are fewer + * participants than party counts, the participants will be reused in a circular fashion. + */ + def allocate(firstPartyCount: PartyCount, partyCounts: PartyCount*): PartyAllocation = + PartyAllocation( + partyCounts = firstPartyCount +: partyCounts, + minimumParticipantCount = 1, + minimumSynchronizersCount = 1, + ) + + final case class PartyAllocation( + partyCounts: Seq[PartyCount], + minimumParticipantCount: Int, + minimumSynchronizersCount: Int, + ) { + def expectingMinimumActualParticipantCount( + minimumParticipantCount: Int + ): PartyAllocation = + copy(minimumParticipantCount = minimumParticipantCount) + + def expectingMinimumNumberOfSynchronizers( + minimumSynchronizersCount: Int + ): PartyAllocation = copy(minimumSynchronizersCount = minimumSynchronizersCount) + } + + /** Specifies the number of parties to allocate in a participant. + * + * NOTE: A single participant can be allocated parties from multiple party counts. + */ + sealed trait PartyCount { + val count: Int + val isExternal: Boolean = false + } + + case object NoParties extends PartyCount { + override val count = 0 + } + + case object SingleParty extends PartyCount { + override val count = 1 + } + + case object SingleExternalParty extends PartyCount { + override val count = 1 + override val isExternal: Boolean = true + } + + case object TwoParties extends PartyCount { + override val count = 2 + } + + case object TwoExternalParties extends PartyCount { + override val count = 2 + override val isExternal: Boolean = true + } + + final case class Parties(override val count: Int) extends PartyCount + final case class ExternalParties(override val count: Int) extends PartyCount { + override val isExternal: Boolean = true + } + + /** Exposes information about configured participants and allocated parties to a test case. + * + * When using multiple participants, keep in mind that they do not update their view of the + * ledger synchronously. E.g., after you create a contract through the command service of one + * participant, other participants might not know about the contract yet. Use + * `com.daml.ledger.api.testtool.infrastructure.Eventually` if you want to wait for a participant + * to catch up, or `synchronize` to add a synchronization point between all participants. + */ + final case class Participants private[infrastructure] ( + minSynchronizers: Int, + val participants: Participant* + ) { + import scala.concurrent.{Future, ExecutionContext} + + def synchronize(implicit ec: ExecutionContext) = + Future.sequence { + participants + .sliding(2) + .collect { case Seq(p1, p2) => + Synchronize.synchronize(p1.context, p2.context, minSynchronizers) + } + .toList + } + } + object Participants { + def unapplySeq(p: Participants) = Some(p.participants) + } + + final case class Participant private[infrastructure] ( + context: ParticipantTestContext, + parties: Seq[Party], + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/AssertionErrorWithPreformattedMessage.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/AssertionErrorWithPreformattedMessage.scala new file mode 100644 index 0000000000..2707b7d7d1 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/AssertionErrorWithPreformattedMessage.scala @@ -0,0 +1,9 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +final case class AssertionErrorWithPreformattedMessage[T]( + preformattedMessage: String, + message: String, +) extends AssertionError(message) diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Assertions.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Assertions.scala new file mode 100644 index 0000000000..4c3d4bc1a1 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Assertions.scala @@ -0,0 +1,227 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.timer.RetryStrategy +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.base.error.utils.ErrorDetails +import com.google.rpc.ErrorInfo +import io.grpc.StatusRuntimeException +import io.grpc.protobuf.StatusProto +import munit.{Assertions as MUnit, ComparisonFailException} + +import java.util.regex.Pattern +import scala.annotation.tailrec +import scala.concurrent.Future +import scala.jdk.CollectionConverters.* +import scala.language.implicitConversions +import scala.util.Try + +object Assertions { + def fail(message: String): Nothing = + throw new AssertionError(message) + + def fail(message: String, cause: Throwable): Nothing = + throw new AssertionError(message, cause) + + def verifyLength[A, F[_] <: Seq[_]](context: String, length: Int, as: F[A]): Unit = + assert(as.length == length, s"$context: expected $length item(s), got ${as.length}, $as") + + def assertLength[A, F[_] <: Seq[_]](context: String, length: Int, as: F[A]): F[A] = { + verifyLength(context, length, as) + as + } + + def assertSingleton[A](context: String, as: Seq[A]): A = + assertLength(context, 1, as).head + + def assertEquals[T](context: String, actual: T, expected: T): Unit = + try { + MUnit.assertEquals(actual, expected, context) + } catch { + case e: ComparisonFailException => + throw AssertionErrorWithPreformattedMessage( + e.message, + s"$context: two objects are supposed to be equal but they are not", + ) + } + + def assertGreaterOrEquals[T](context: String, a: T, b: T)(implicit ordering: Ordering[T]): Unit = + assert( + ordering.gteq(a, b), + s"$context: $a should be equal or greater than $b", + ) + + def assertEquals[T](actual: T, expected: T): Unit = + try { + MUnit.assertEquals(actual, expected) + } catch { + case e: ComparisonFailException => + throw AssertionErrorWithPreformattedMessage( + e.message, + s"two objects are supposed to be equal but they are not", + ) + } + + def assertSameElements[T]( + actual: Iterable[T], + expected: Iterable[T], + context: String = "", + ): Unit = + assert( + actual.toSet == expected.toSet, + s"Actual |${actual.mkString(", ")}| should have the same elements as (expected): |${expected + .mkString(", ")}|${Option(context).filter(_.nonEmpty).map(nonEmptyContext => s"Context: $nonEmptyContext").getOrElse("")}", + ) + + def assertIsEmpty(actual: Iterable[_]): Unit = + assertSameElements(actual, Seq.empty) + + def assertGrpcErrorOneOf(t: Throwable, errors: ErrorCode*): Unit = { + val hasErrorCode = + errors.map(errorCode => Try(assertGrpcError(t, errorCode, None))).exists(_.isSuccess) + if (!hasErrorCode) + fail(s"gRPC failure did not contain one of the expected error codes $errors.", t) + } + + def assertGrpcError( + t: Throwable, + errorCode: ErrorCode, + exceptionMessageSubstring: Option[String], + checkDefiniteAnswerMetadata: Boolean = false, + additionalErrorAssertions: Throwable => Unit = _ => (), + ): Unit = + assertGrpcErrorRegex( + t, + errorCode, + exceptionMessageSubstring + .map(msgSubstring => Pattern.compile(Pattern.quote(msgSubstring))), + checkDefiniteAnswerMetadata, + additionalErrorAssertions, + ) + + /** Match the given exception against a status code and a regex for the expected message. Succeeds + * if the exception is a GrpcException with the expected code and the regex matches some part of + * the message or there is no message and the pattern is None. + */ + @tailrec + def assertGrpcErrorRegex( + t: Throwable, + errorCode: ErrorCode, + optPattern: Option[Pattern], + checkDefiniteAnswerMetadata: Boolean = false, + additionalErrorAssertions: Throwable => Unit = _ => (), + ): Unit = + t match { + case RetryStrategy.FailedRetryException(cause) => + assertGrpcErrorRegex( + cause, + errorCode, + optPattern, + checkDefiniteAnswerMetadata, + additionalErrorAssertions, + ) + case exception: StatusRuntimeException => + optPattern.foreach(assertMatches("Error message", exception.getMessage, _)) + assertErrorCode(exception, errorCode) + if (checkDefiniteAnswerMetadata) assertDefiniteAnswer(exception) + additionalErrorAssertions(exception) + case _ => + fail("Exception is not a StatusRuntimeException", t) + } + + def assertMatches(subjectName: String, message: String, pattern: Pattern): Unit = + if (pattern.matcher(message).find()) { + () + } else { + fail(s"$subjectName did not contain [$pattern], but was [$message].") + } + + private def assertDefiniteAnswer(exception: Exception): Unit = { + val definitiveAnswer = extractErrorInfoMetadataValue(exception, "definite_answer") + if (!Set("true", "false").contains(definitiveAnswer.toLowerCase)) { + fail(s"The error contained an invalid definite answer: [$definitiveAnswer]") + } + } + + def extractErrorInfoMetadataValue(exception: Throwable, key: String): String = { + val metadata = extractErrorInfoMetadata(exception) + metadata.get(key) match { + case Some(value) => + value + case None => + fail( + s"The error metadata did not contain the key $key. Metadata was: [$metadata]", + exception, + ) + } + } + + def extractErrorInfoMetadata(exception: Throwable): Map[String, String] = + extractErrorInfoMetadata(StatusProto.fromThrowable(exception)) + + def extractErrorInfoMetadata(status: com.google.rpc.Status): Map[String, String] = { + val details = status.getDetailsList.asScala + details + .find(_.is(classOf[ErrorInfo])) + .map { any => + val errorInfo = any.unpack(classOf[ErrorInfo]) + errorInfo.getMetadataMap.asScala.toMap + } + .getOrElse { + Map.empty + } + } + + def assertErrorCode( + statusRuntimeException: StatusRuntimeException, + expectedErrorCode: ErrorCode, + ): Unit = { + val status = StatusProto.fromThrowable(statusRuntimeException) + + val expectedStatusCode = expectedErrorCode.category.grpcCode + .map(_.value()) + .getOrElse( + throw new RuntimeException( + s"Errors without grpc code cannot be asserted on the Ledger API. Expected error: $expectedErrorCode" + ) + ) + val expectedErrorId = expectedErrorCode.id + val expectedRetryability = expectedErrorCode.category.retryable.map(_.duration) + + val actualStatusCode = status.getCode + val actualErrorDetails = ErrorDetails.from(status.getDetailsList.asScala.toSeq) + val actualErrorId = actualErrorDetails + .collectFirst { case err: ErrorDetails.ErrorInfoDetail => err.errorCodeId } + .getOrElse(fail(s"Actual error id is not defined. Actual error: $statusRuntimeException")) + val actualRetryability = actualErrorDetails + .collectFirst { case err: ErrorDetails.RetryInfoDetail => err.duration } + + if (actualErrorId != expectedErrorId) + fail( + s"Actual error id ($actualErrorId) does not match expected error id ($expectedErrorId}. Actual error: $statusRuntimeException" + ) + + Assertions.assertEquals( + "gRPC error code mismatch", + actualStatusCode, + expectedStatusCode, + ) + + Assertions.assertEquals( + s"Error retryability details mismatch", + actualRetryability, + expectedRetryability, + ) + } + + def assertDefined[T](option: Option[T], errorMessage: String): T = { + assert(option.isDefined, errorMessage) + option.get + } + + /** Allows for assertions with more information in the error messages. */ + implicit def futureAssertions[T](future: Future[T]): FutureAssertions[T] = + new FutureAssertions[T](future) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ChannelEndpoint.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ChannelEndpoint.scala new file mode 100644 index 0000000000..c134d569b2 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ChannelEndpoint.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import io.grpc.Channel + +sealed trait Endpoint + +object Endpoint { + + final case object InProcess extends Endpoint + + final case class Remote(hostname: String, port: Int) extends Endpoint + +} + +final case class ChannelEndpoint(channel: Channel, endpoint: Endpoint) + +object ChannelEndpoint { + + type JsonApiEndpoint = (String, Int) + + def forRemote(channel: Channel, hostname: String, port: Int): ChannelEndpoint = + ChannelEndpoint(channel, Endpoint.Remote(hostname, port)) + + def forInProcess(channel: Channel): ChannelEndpoint = ChannelEndpoint(channel, Endpoint.InProcess) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Dars.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Dars.scala new file mode 100644 index 0000000000..80b1543d88 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Dars.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.google.protobuf.ByteString + +object Dars { + + // The list of all DAR packages that are bundled with this binary. + def resources(lfVersion: String): List[String] = TestDar.paths(lfVersion) + + def read(name: String): ByteString = + ByteString.readFrom(getClass.getClassLoader.getResourceAsStream(name)) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Errors.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Errors.scala new file mode 100644 index 0000000000..3c8de07994 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Errors.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +object Errors { + + sealed abstract class FrameworkException(message: String, cause: Throwable) + extends RuntimeException(message, cause) + + final class ParticipantConnectionException(cause: Throwable) + extends FrameworkException( + s"Could not connect to the participant: ${cause.getMessage}", + cause, + ) + + final class DarUploadException(name: String, cause: Throwable) + extends FrameworkException(s"""Failed to upload DAR "$name": ${cause.getMessage}""", cause) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Eventually.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Eventually.scala new file mode 100644 index 0000000000..0baa2329c7 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Eventually.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.timer.RetryStrategy +import com.daml.timer.RetryStrategy.{TooManyAttemptsException, UnhandledFailureException} + +import scala.concurrent.duration.{Duration, DurationInt} +import scala.concurrent.{ExecutionContext, Future} + +object Eventually { + + /* + Runs provided closure with the exponential back-off retry strategy for a number of `attempts`. + */ + def eventually[A](assertionName: String, attempts: Int = 10, firstWaitTime: Duration = 10.millis)( + runAssertion: => Future[A] + )(implicit ec: ExecutionContext): Future[A] = + RetryStrategy + .exponentialBackoff(attempts, firstWaitTime) { (_, _) => + runAssertion + } + .recoverWith { + case tooManyAttempts: TooManyAttemptsException => + Future.failed( + tooManyAttempts.copy(message = s"$assertionName: ${tooManyAttempts.message}") + ) + case unhandledFailure: UnhandledFailureException => + Future.failed( + unhandledFailure.copy(message = s"$assertionName: ${unhandledFailure.message}") + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/FutureAssertions.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/FutureAssertions.scala new file mode 100644 index 0000000000..c1d9137cf5 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/FutureAssertions.scala @@ -0,0 +1,146 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.FutureAssertions.ExpectedFailureException +import com.daml.ledger.api.testtool.infrastructure.time.DelayMechanism +import com.daml.logging.{ContextualizedLogger, LoggingContext} +import com.digitalasset.base.error.ErrorCode + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal +import scala.util.{Failure, Success} + +final class FutureAssertions[T](future: Future[T]) { + + /** Checks that the future failed, and returns the throwable. We use this instead of + * `Future#failed` because the error message that delivers is unhelpful. It doesn't tell us what + * the value actually was. + */ + def mustFail(context: String)(implicit executionContext: ExecutionContext): Future[Throwable] = + handle(_ => true, context) + + /** Checks that the future failed satisfying the predicate and returns the throwable. We use this + * instead of `Future#failed` because the error message that delivers is unhelpful. It doesn't + * tell us what the value actually was. + */ + def mustFailWith(context: String)( + predicate: Throwable => Boolean + )(implicit executionContext: ExecutionContext): Future[Throwable] = + handle(predicate, context) + + def mustFailWith( + context: String, + errorCode: ErrorCode, + exceptionMessageSubstring: Option[String] = None, + )(implicit executionContext: ExecutionContext): Future[Unit] = + for { + error <- mustFail(context) + } yield { + Assertions.assertGrpcError( + t = error, + errorCode = errorCode, + exceptionMessageSubstring = exceptionMessageSubstring, + ) + } + + private def handle(predicate: Throwable => Boolean, context: String)(implicit + executionContext: ExecutionContext + ): Future[Throwable] = + future.transform { + case Failure(throwable) if predicate(throwable) => Success(throwable) + case Success(value) => Failure(new ExpectedFailureException(context, value)) + case Failure(other) => Failure(other) + } + +} + +object FutureAssertions { + + private val logger = ContextualizedLogger.get(getClass) + + /** Runs the test case after the specified delay + */ + def assertAfter[V]( + delay: FiniteDuration, + delayMechanism: DelayMechanism, + )(test: => Future[V])(implicit executionContext: ExecutionContext): Future[V] = + delayMechanism.delayBy(delay).flatMap(_ => test) + + /** Run the test every `retryDelay` up to `maxRetryDuration`. The test case will run up to + * `ceil(maxRetryDuration / retryDelay)` times. The assertion will succeed as soon as any of the + * test case runs are successful. The assertion will fail if no test case runs are successful and + * the `maxRetryDuration` is exceeded. + */ + def succeedsEventually[V]( + retryDelay: FiniteDuration = 100.millis, + maxRetryDuration: FiniteDuration, + delayMechanism: DelayMechanism, + description: String, + )( + test: => Future[V] + )(implicit ec: ExecutionContext, loggingContext: LoggingContext): Future[V] = { + def internalSucceedsEventually(remainingDuration: FiniteDuration): Future[V] = { + val nextRetryRemainingDuration = remainingDuration - retryDelay + if (nextRetryRemainingDuration < Duration.Zero) test.andThen { case Failure(exception) => + logger.error( + s"Assertion never succeeded after $maxRetryDuration with a delay of $retryDelay. Description: $description", + exception, + ) + } + else + assertAfter(retryDelay, delayMechanism)(test).recoverWith { case NonFatal(ex) => + logger.debug( + s"Failed assertion: $description. Running again with new max duration $nextRetryRemainingDuration", + ex, + ) + internalSucceedsEventually(nextRetryRemainingDuration) + } + } + + internalSucceedsEventually(maxRetryDuration) + } + + def forAllParallel[T]( + data: Seq[T] + )( + testCase: T => Future[Unit] + )(implicit ec: ExecutionContext, loggingContext: LoggingContext): Future[Unit] = Future + .traverse(data)(input => + testCase(input).map(Right(_)).recover { case NonFatal(ex) => + Left(input -> ex) + } + ) + .map { results => + val (failures, successes) = results.partitionMap(identity) + if (failures.nonEmpty) { + failures + .foreach(res => logger.error(s"Failed parallel test case for input ${res._1}", res._2)) + throw ParallelTestFailureException( + s"Failed parallel test case. Failures: ${failures.length}. Success: ${successes.length}\nFailed inputs: ${failures + .map(_._1) + .mkString("[", ",", "]")}", + failures.last._2, + ) + } + } + + def optionalAssertion(runs: Boolean, description: String)( + assertions: => Future[_] + )(implicit loggingContext: LoggingContext): Future[_] = if (runs) assertions + else { + logger.warn(s"Not running optional assertions: $description") + Future.unit + } + + final class ExpectedFailureException[T](context: String, value: T) + extends NoSuchElementException( + s"Expected a failure when $context, but got a successful result of: $value" + ) + +} + +final case class ParallelTestFailureException(message: String, failure: Throwable) + extends RuntimeException(message, failure) diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Identification.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Identification.scala new file mode 100644 index 0000000000..f2d1f3ecba --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Identification.scala @@ -0,0 +1,72 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +object Identification { + + val greekAlphabet = Vector( + "alpha", + "beta", + "gamma", + "delta", + "epsilon", + "zeta", + "eta", + "theta", + "iota", + "kappa", + "lambda", + "mu", + "nu", + "xi", + "omicron", + "pi", + "rho", + "sigma", + "tau", + "upsilon", + "phi", + "chi", + "psi", + "omega", + ) + + /** E.g. + * + * {{{ + * val ids = circularWithIndex(Vector("a", "b", "c")) + * + * assert(ids() == "a") + * assert(ids() == "b") + * assert(ids() == "c") + * assert(ids() == "a0") + * assert(ids() == "b0") + * assert(ids() == "c0") + * assert(ids() == "a1") + * }}} + */ + def circularWithIndex(base: Vector[String]): () => String = + synchronizedProvider(base.iterator ++ Iterator.continually(base).zipWithIndex.flatMap { + case (alphabet, index) => alphabet.map(letter => s"$letter$index") + }) + + /** E.g. + * + * {{{ + * val ids = indexSuffix("prefix") + * + * assert(ids() == "prefix-0") + * assert(ids() == "prefix-1") + * assert(ids() == "prefix-2") + * }}} + */ + def indexSuffix(template: String): () => String = + synchronizedProvider(Iterator.from(0).map(n => s"$template-$n")) + + /** Rules out race conditions when accessing an iterator + */ + private def synchronizedProvider[A](it: Iterator[A]): () => A = + () => it.synchronized(it.next()) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerServices.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerServices.scala new file mode 100644 index 0000000000..47b707a791 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerServices.scala @@ -0,0 +1,997 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import cats.implicits.{catsSyntaxSemigroup, toTraverseOps} +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.JsonErrors.GenericErrorCode +import com.daml.ledger.api.testtool.infrastructure.ws.WsHelper +import com.daml.ledger.api.v2.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigService +import com.daml.ledger.api.v2.admin.identity_provider_config_service.{ + CreateIdentityProviderConfigRequest, + CreateIdentityProviderConfigResponse, + DeleteIdentityProviderConfigRequest, + DeleteIdentityProviderConfigResponse, + GetIdentityProviderConfigRequest, + GetIdentityProviderConfigResponse, + IdentityProviderConfigServiceGrpc, + ListIdentityProviderConfigsRequest, + ListIdentityProviderConfigsResponse, + UpdateIdentityProviderConfigRequest, + UpdateIdentityProviderConfigResponse, +} +import com.daml.ledger.api.v2.admin.package_management_service.PackageManagementServiceGrpc.PackageManagementService +import com.daml.ledger.api.v2.admin.package_management_service.{ + ListKnownPackagesRequest, + ListKnownPackagesResponse, + PackageManagementServiceGrpc, + UpdateVettedPackagesRequest, + UpdateVettedPackagesResponse, + UploadDarFileRequest, + UploadDarFileResponse, + ValidateDarFileRequest, + ValidateDarFileResponse, +} +import com.daml.ledger.api.v2.admin.participant_pruning_service.ParticipantPruningServiceGrpc +import com.daml.ledger.api.v2.admin.participant_pruning_service.ParticipantPruningServiceGrpc.ParticipantPruningService +import com.daml.ledger.api.v2.admin.party_management_service.PartyManagementServiceGrpc.PartyManagementService +import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocateExternalPartyRequest, + AllocateExternalPartyResponse, + AllocatePartyRequest, + AllocatePartyResponse, + GenerateExternalPartyTopologyRequest, + GenerateExternalPartyTopologyResponse, + GetParticipantIdRequest, + GetParticipantIdResponse, + GetPartiesRequest, + GetPartiesResponse, + ListKnownPartiesRequest, + ListKnownPartiesResponse, + PartyManagementServiceGrpc, + UpdatePartyDetailsRequest, + UpdatePartyDetailsResponse, + UpdatePartyIdentityProviderIdRequest, + UpdatePartyIdentityProviderIdResponse, +} +import com.daml.ledger.api.v2.admin.user_management_service.UserManagementServiceGrpc.UserManagementService +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + CreateUserResponse, + DeleteUserRequest, + DeleteUserResponse, + GetUserRequest, + GetUserResponse, + GrantUserRightsRequest, + GrantUserRightsResponse, + ListUserRightsRequest, + ListUserRightsResponse, + ListUsersRequest, + ListUsersResponse, + RevokeUserRightsRequest, + RevokeUserRightsResponse, + UpdateUserIdentityProviderIdRequest, + UpdateUserIdentityProviderIdResponse, + UpdateUserRequest, + UpdateUserResponse, + UserManagementServiceGrpc, +} +import com.daml.ledger.api.v2.command_completion_service.CommandCompletionServiceGrpc.CommandCompletionService +import com.daml.ledger.api.v2.command_completion_service.{ + CommandCompletionServiceGrpc, + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.daml.ledger.api.v2.command_service.CommandServiceGrpc.CommandService +import com.daml.ledger.api.v2.command_service.{ + CommandServiceGrpc, + SubmitAndWaitForReassignmentRequest, + SubmitAndWaitForReassignmentResponse, + SubmitAndWaitForTransactionRequest, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitRequest, + SubmitAndWaitResponse, +} +import com.daml.ledger.api.v2.command_submission_service.CommandSubmissionServiceGrpc.CommandSubmissionService +import com.daml.ledger.api.v2.command_submission_service.{ + CommandSubmissionServiceGrpc, + SubmitReassignmentRequest, + SubmitReassignmentResponse, + SubmitRequest, + SubmitResponse, +} +import com.daml.ledger.api.v2.event_query_service.EventQueryServiceGrpc.EventQueryService +import com.daml.ledger.api.v2.event_query_service.{ + EventQueryServiceGrpc, + GetEventsByContractIdRequest, +} +import com.daml.ledger.api.v2.interactive.interactive_submission_service.InteractiveSubmissionServiceGrpc.InteractiveSubmissionService +import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ + ExecuteSubmissionAndWaitForTransactionRequest, + ExecuteSubmissionAndWaitForTransactionResponse, + ExecuteSubmissionAndWaitRequest, + ExecuteSubmissionAndWaitResponse, + ExecuteSubmissionRequest, + ExecuteSubmissionResponse, + GetPreferredPackageVersionRequest, + GetPreferredPackageVersionResponse, + GetPreferredPackagesRequest, + GetPreferredPackagesResponse, + InteractiveSubmissionServiceGrpc, + PrepareSubmissionRequest, + PrepareSubmissionResponse, +} +import com.daml.ledger.api.v2.package_service.PackageServiceGrpc.PackageService +import com.daml.ledger.api.v2.package_service.{ + GetPackageRequest, + GetPackageResponse, + GetPackageStatusRequest, + GetPackageStatusResponse, + ListPackagesRequest, + ListPackagesResponse, + ListVettedPackagesRequest, + ListVettedPackagesResponse, + PackageServiceGrpc, +} +import com.daml.ledger.api.v2.state_service.StateServiceGrpc.StateService +import com.daml.ledger.api.v2.state_service.{ + GetActiveContractsRequest, + GetActiveContractsResponse, + GetConnectedSynchronizersRequest, + GetConnectedSynchronizersResponse, + GetLatestPrunedOffsetsRequest, + GetLatestPrunedOffsetsResponse, + GetLedgerEndRequest, + GetLedgerEndResponse, + StateServiceGrpc, +} +import com.daml.ledger.api.v2.testing.time_service.TimeServiceGrpc +import com.daml.ledger.api.v2.testing.time_service.TimeServiceGrpc.TimeService +import com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.UpdateService +import com.daml.ledger.api.v2.update_service.{ + GetUpdateByIdRequest, + GetUpdateByOffsetRequest, + GetUpdateResponse, + GetUpdatesRequest, + GetUpdatesResponse, + UpdateServiceGrpc, +} +import com.daml.ledger.api.v2.version_service.VersionServiceGrpc.VersionService +import com.daml.ledger.api.v2.version_service.{GetLedgerApiVersionRequest, VersionServiceGrpc} +import com.digitalasset.base.error.ErrorCategory.GenericErrorCategory +import com.digitalasset.base.error.utils.DecodedCantonError +import com.digitalasset.base.error.{ + ErrorCategory, + ErrorCategoryRetry, + ErrorClass, + ErrorCode, + ErrorResource, + Grouping, +} +import com.digitalasset.canton.http.json.v2.Endpoints.{CallerContext, Jwt} +import com.digitalasset.canton.http.json.v2.JsSchema.JsCantonError +import com.digitalasset.canton.http.json.v2.{ + JsCommandService, + JsEventService, + JsExecuteSubmissionAndWaitForTransactionRequest, + JsExecuteSubmissionAndWaitForTransactionResponse, + JsExecuteSubmissionAndWaitRequest, + JsExecuteSubmissionRequest, + JsGetActiveContractsResponse, + JsIdentityProviderService, + JsInteractiveSubmissionService, + JsPackageService, + JsPartyManagementService, + JsPrepareSubmissionRequest, + JsPrepareSubmissionResponse, + JsStateService, + JsUpdateService, + JsUserManagementService, + JsVersionService, + LegacyDTOs, + PagedList, + ProtocolConverters, + SchemaProcessorsImpl, + TranscodePackageIdResolver, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, NoLogging} +import com.digitalasset.canton.serialization.ProtoConverter.InstantConverter +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.OptionUtil +import com.digitalasset.daml.lf.archive.{DarParser, Decode} +import io.grpc.health.v1.health.HealthGrpc +import io.grpc.health.v1.health.HealthGrpc.Health +import io.grpc.stub.StreamObserver +import io.grpc.{Channel, ClientInterceptor, Status} +import org.apache.pekko.NotUsed +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.http.scaladsl.settings.ConnectionPoolSettings +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Flow, Sink, Source} +import org.apache.pekko.util.ByteString +import org.reactivestreams.{Subscriber, Subscription} +import org.slf4j.event.Level +import sttp.capabilities +import sttp.capabilities.pekko.PekkoStreams +import sttp.client3.pekkohttp.PekkoHttpBackend +import sttp.model.StatusCode +import sttp.tapir.client.sttp.SttpClientInterpreter +import sttp.tapir.{DecodeResult, Endpoint} + +import java.io.InputStream +import java.util.concurrent.atomic.AtomicReference +import java.util.zip.ZipInputStream +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.IteratorHasAsScala +import scala.util.Using + +object LedgerServices { + def apply( + participantEndpoint: Either[JsonApiEndpoint, Channel], + commandInterceptors: Seq[ClientInterceptor], + dars: List[String], + )(implicit executionContext: ExecutionContext): LedgerServices = participantEndpoint + .map(LedgerServicesGrpc(_, commandInterceptors)) + .left + .map { case (hostname, port) => + implicit val actorSystem: ActorSystem = ActorSystem("LedgerServicesJson") + implicit val materializer: Materializer = Materializer(actorSystem) + + LedgerServicesJson( + hostname, + port, + dars, + None, + ) + } + .merge +} + +private final case class LedgerServicesJson( + hostname: String, + port: Int, + dars: List[String], + tokenParam: Option[String], +)(implicit executionContext: ExecutionContext, mat: Materializer) + extends LedgerServices + with NamedLogging { + import com.digitalasset.canton.http.util.GrpcHttpErrorCodes.`gRPC status as sttp` + protected def loggerFactory: NamedLoggerFactory = + NamedLoggerFactory("client-ledger-services", "json") + + implicit val traceContext: TraceContext = + TraceContext.createNew("ledger_services") + private val tokenPrefix: String = "jwt.token." + private val wsProtocol: String = "daml.ws.auth" + implicit val token: Option[String] = tokenParam + + private val backend = PekkoHttpBackend( + customConnectionPoolSettings = + Some(ConnectionPoolSettings.apply(mat.system).withMaxOpenRequests(256)), + customizeRequest = { request => + logger.debug(s"JSON Request ${request.method} ${request.uri}") + request + }, + customizeWebsocketRequest = { request => + val prot = s"""$tokenPrefix${token.getOrElse("")},$wsProtocol""" + val reqf = request.copy( + subprotocol = Some(prot), + extraHeaders = Seq(), + ) // We clear extraheaders, as they contain redundant wsProtocol header + reqf + }, + ) + + private def client[INPUT, OUTPUT]( + endpoint: sttp.tapir.Endpoint[ + CallerContext, + INPUT, + (StatusCode, JsCantonError), + OUTPUT, + sttp.capabilities.pekko.PekkoStreams & sttp.capabilities.WebSockets, + ], + ws: Boolean, + ): CallerContext => INPUT => Future[DecodeResult[Either[(StatusCode, JsCantonError), OUTPUT]]] = + SttpClientInterpreter().toSecureClient( + endpoint, + Some(sttp.model.Uri(if (ws) "ws" else "http", hostname, port)), + backend, + )(WsHelper.webSocketsSupportedForPekkoStreams) + + private def clientContext[INPUT, OUTPUT]( + endpoint: sttp.tapir.Endpoint[ + CallerContext, + INPUT, + (StatusCode, JsCantonError), + OUTPUT, + sttp.capabilities.pekko.PekkoStreams & sttp.capabilities.WebSockets, + ], + ws: Boolean, + ): INPUT => Future[DecodeResult[Either[(StatusCode, JsCantonError), OUTPUT]]] = + client(endpoint, ws)(CallerContext(token.map(Jwt.apply))) + + private def clientCall[INPUT, OUTPUT]( + endpoint: sttp.tapir.Endpoint[ + CallerContext, + INPUT, + (StatusCode, JsCantonError), + OUTPUT, + sttp.capabilities.pekko.PekkoStreams & sttp.capabilities.WebSockets, + ], + input: INPUT, + ws: Boolean = false, + ): Future[OUTPUT] = + clientContext(endpoint, ws)(input) + .map { + case DecodeResult.Value(Right(value)) => + value + case DecodeResult.Value(Left(jsonCallError)) => + val decoded: DecodedCantonError = toDecodedCantonError(jsonCallError._2) + val grpcErrorCode = decoded.code.category.grpcCode.getOrElse(Status.UNKNOWN.getCode) + assert( + grpcErrorCode.asSttpStatus == jsonCallError._1, + "returned grpcError should match http status code from response", + ) + val sre = ErrorCode.asGrpcError(decoded)(NoLogging) + throw sre + case DecodeResult.Error(error, thr) => + throw new RuntimeException(s"error calling service $error", thr) + case otherError => + throw new RuntimeException(s"unknown error calling service $otherError") + } + + private def toDecodedCantonError(jsCantonError: JsCantonError): DecodedCantonError = + new DecodedCantonError( + code = GenericErrorCode( + id = jsCantonError.code, + category = GenericErrorCategory( + grpcCode = jsCantonError.grpcCodeValue.map(Status.fromCodeValue).map(_.getCode), + logLevel = Level.INFO, + retryable = jsCantonError.retryInfo.map(duration => + ErrorCategoryRetry(FiniteDuration(duration.length, duration.unit)) + ), + redactDetails = false, + asInt = jsCantonError.errorCategory, + // unused + rank = 1, + ), + ), + cause = jsCantonError.cause, + correlationId = jsCantonError.correlationId, + traceId = jsCantonError.traceId, + context = jsCantonError.context, + resources = jsCantonError.resources.map { case (k, v) => (ErrorResource(k), v) }, + definiteAnswerO = jsCantonError.definiteAnswer, + ) + + private def wsCall[REQ, JSRESP, RESP]( + endpoint: Endpoint[CallerContext, Unit, (StatusCode, JsCantonError), Flow[ + REQ, + Either[JsCantonError, JSRESP], + Any, + ], PekkoStreams & capabilities.WebSockets], + input: REQ, + responseObserver: StreamObserver[RESP], + converter: JSRESP => Future[RESP] = { (elem: JSRESP) => + Future.successful(elem.asInstanceOf[RESP]) + }, + ): Future[Unit] = { + for { + wsFlow <- clientCall(endpoint = endpoint, input = (), ws = true) + _ <- Future { + + val sink = Sink.fromSubscriber(new Subscriber[Either[JsCantonError, RESP]]() { + override def onSubscribe(subscription: Subscription): Unit = + subscription.request(Long.MaxValue) + + override def onNext(t: Either[JsCantonError, RESP]): Unit = + t match { + case Left(cantonError) => + val decoded = toDecodedCantonError(cantonError) + val err = io.grpc.protobuf.StatusProto.toStatusRuntimeException( + com.google.rpc.status.Status + .toJavaProto(decoded.toRpcStatusWithForwardedRequestId) + ) + responseObserver.onError(err) + case Right(v) => responseObserver.onNext(v) + } + + override def onError(t: Throwable): Unit = + responseObserver.onError(t) + + override def onComplete(): Unit = + responseObserver.onCompleted() + }) + Source + .single( + input + ) + .via(wsFlow) + .mapAsync(1) { + case Left(e) => Future.successful(Left(e)) + case Right(js) => converter(js).map(Right(_)) + } + .to(sink) + .run() + } + } yield { + () + } + Future.unit + } + + private val packageMetadataView: AtomicReference[PackageMetadata] = + new AtomicReference(PackageMetadata()) + + dars.foreach(dar => addDarToPackageMetadataView(Dars.read(dar).newInput())) + + private val schemaProcessors = + new SchemaProcessorsImpl(_ => packageMetadataView.get().packages, loggerFactory) + private val protocolConverters = + new ProtocolConverters( + schemaProcessors, + // For simplicity, use the package metadata built from all the DARs expected + // to be used in tests. This means that the package name resolution done by the conformance tests + // ledger client will not be affected by the topology of the target participant. + TranscodePackageIdResolver.packageMetadataBacked( + () => packageMetadataView.get(), + loggerFactory, + ), + ) + + def command: CommandService = new CommandService { + + override def submitAndWait( + request: SubmitAndWaitRequest + ): Future[SubmitAndWaitResponse] = for { + jsCommands <- protocolConverters.Commands.toJson(request.getCommands) + response <- clientCall(JsCommandService.submitAndWait, jsCommands) + } yield response + + override def submitAndWaitForTransaction( + request: SubmitAndWaitForTransactionRequest + ): Future[SubmitAndWaitForTransactionResponse] = for { + jsRequest <- protocolConverters.SubmitAndWaitForTransactionRequest.toJson(request) + jsResponse <- clientCall(JsCommandService.submitAndWaitForTransactionEndpoint, jsRequest) + response <- protocolConverters.SubmitAndWaitTransactionResponse.fromJson(jsResponse) + } yield response + + override def submitAndWaitForReassignment( + request: SubmitAndWaitForReassignmentRequest + ): Future[SubmitAndWaitForReassignmentResponse] = for { + jsResponse <- clientCall(JsCommandService.submitAndWaitForReassignmentEndpoint, request) + response <- protocolConverters.SubmitAndWaitForReassignmentResponse.fromJson(jsResponse) + } yield response + } + + def commandCompletion: CommandCompletionService = ( + request: CompletionStreamRequest, + responseObserver: StreamObserver[CompletionStreamResponse], + ) => wsCall(JsCommandService.completionStreamEndpoint, request, responseObserver) + + def commandSubmission: CommandSubmissionService = new CommandSubmissionService { + + override def submit(request: SubmitRequest): Future[SubmitResponse] = + for { + jsCommands <- protocolConverters.Commands.toJson(request.getCommands) + resp <- clientCall(JsCommandService.submitAsyncEndpoint, jsCommands) + } yield resp + + override def submitReassignment( + request: SubmitReassignmentRequest + ): Future[SubmitReassignmentResponse] = + clientCall(JsCommandService.submitReassignmentAsyncEndpoint, request) + } + + def health: Health = throw new UnsupportedOperationException( + "Health is not available in JSON API" + ) + + def state: StateService = new StateService { + override def getActiveContracts( + request: GetActiveContractsRequest, + responseObserver: StreamObserver[GetActiveContractsResponse], + ): Unit = wsCall( + JsStateService.activeContractsEndpoint, + toGetActiveContractsRequestLegacy(request), + responseObserver, + (v: JsGetActiveContractsResponse) => + protocolConverters.GetActiveContractsResponse + .fromJson(v), + ) + + private def toGetActiveContractsRequestLegacy( + req: GetActiveContractsRequest + ): LegacyDTOs.GetActiveContractsRequest = + LegacyDTOs.GetActiveContractsRequest( + filter = None, + activeAtOffset = req.activeAtOffset, + eventFormat = req.eventFormat, + ) + + override def getConnectedSynchronizers( + request: GetConnectedSynchronizersRequest + ): Future[GetConnectedSynchronizersResponse] = + clientCall( + JsStateService.getConnectedSynchronizersEndpoint, + ( + Option(request.party).filter(_.nonEmpty), + Option(request.participantId).filter(_.nonEmpty), + Option(request.identityProviderId).filter(_.nonEmpty), + ), + ) + + override def getLedgerEnd(request: GetLedgerEndRequest): Future[GetLedgerEndResponse] = + clientCall(JsStateService.getLedgerEndEndpoint, ()) + + override def getLatestPrunedOffsets( + request: GetLatestPrunedOffsetsRequest + ): Future[GetLatestPrunedOffsetsResponse] = + clientCall(JsStateService.getLastPrunedOffsetsEndpoint, ()) + + } + + def partyManagement: PartyManagementService = new PartyManagementService { + override def getParticipantId( + request: GetParticipantIdRequest + ): Future[GetParticipantIdResponse] = + clientCall(JsPartyManagementService.getParticipantIdEndpoint, ()) + + override def getParties(request: GetPartiesRequest): Future[GetPartiesResponse] = + clientCall( + JsPartyManagementService.getPartyEndpoint, + (request.parties.head, Some(request.identityProviderId), request.parties.drop(1).toList), + ) + + override def listKnownParties( + request: ListKnownPartiesRequest + ): Future[ListKnownPartiesResponse] = + clientCall( + JsPartyManagementService.listKnownPartiesEndpoint, + PagedList((), Some(request.pageSize), Some(request.pageToken)), + ) + + override def allocateParty(request: AllocatePartyRequest): Future[AllocatePartyResponse] = + for { + jsRequest <- protocolConverters.AllocatePartyRequest.toJson(request) + resp <- clientCall(JsPartyManagementService.allocatePartyEndpoint, jsRequest) + } yield resp + + override def updatePartyDetails( + request: UpdatePartyDetailsRequest + ): Future[UpdatePartyDetailsResponse] = + clientCall( + JsPartyManagementService.updatePartyEndpoint, + (request.getPartyDetails.party, request), + ) + + override def updatePartyIdentityProviderId( + request: UpdatePartyIdentityProviderIdRequest + ): Future[UpdatePartyIdentityProviderIdResponse] = throw new UnsupportedOperationException( + "updatePartyIdentityProviderIs is not available in JSON API" + ) + + override def allocateExternalParty( + request: AllocateExternalPartyRequest + ): Future[AllocateExternalPartyResponse] = clientCall( + JsPartyManagementService.allocateExternalPartyEndpoint, + request, + ) + override def generateExternalPartyTopology( + request: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] = + clientCall(JsPartyManagementService.externalPartyGenerateTopologyEndpoint, request) + + } + def packageManagement: PackageManagementService = new PackageManagementService { + override def listKnownPackages( + request: ListKnownPackagesRequest + ): Future[ListKnownPackagesResponse] = + throw new UnsupportedOperationException( + "PackageManagement listKnownPackages is not available in JSON API" + ) + + override def uploadDarFile(request: UploadDarFileRequest): Future[UploadDarFileResponse] = { + val src: Source[ByteString, NotUsed] = Source.fromIterator(() => + request.darFile + .asReadOnlyByteBufferList() + .iterator + .asScala + .map(org.apache.pekko.util.ByteString(_)) + ) + clientCall( + JsPackageService.uploadDar, + ( + src, + request.vettingChange match { + case UploadDarFileRequest.VettingChange.VETTING_CHANGE_UNSPECIFIED => + None + case UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES => + Some(true) + case UploadDarFileRequest.VettingChange.VETTING_CHANGE_DONT_VET_ANY_PACKAGES => + Some(false) + case UploadDarFileRequest.VettingChange.Unrecognized(unrecognizedValue) => + throw new IllegalArgumentException( + s"could not convert unrecognized VettingChange enum $unrecognizedValue to a boolean" + ) + }, + OptionUtil.emptyStringAsNone(request.synchronizerId), + ), + ) + }.map { result => + Using(request.darFile.newInput()) { + addDarToPackageMetadataView + } + result + } + + override def validateDarFile(request: ValidateDarFileRequest): Future[ValidateDarFileResponse] = + throw new UnsupportedOperationException( + "ValidateDarFile is not available in JSON API" + ) + + override def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] = + clientCall(JsPackageService.updateVettedPackagesEndpoint, request) + } + + private def addDarToPackageMetadataView(inputStream: InputStream): Unit = + Using(new ZipInputStream(inputStream)) { zip => + (for { + archive <- DarParser.readArchive("Uploaded DAR", zip) + pkgs <- archive.all.traverse(Decode.decodeArchive(_)) + } yield { + pkgs.map { case (pkgId, pkg) => PackageMetadata.from(pkgId, pkg) }.foreach { + newPackageMetadata => + packageMetadataView.updateAndGet(_ |+| newPackageMetadata) + } + }).fold( + err => { + logger.error("Could not load a requested DAR", err) + throw err + }, + identity, + ) + } + + def participantPruning: ParticipantPruningService = + throw new UnsupportedOperationException( + "ParticipantPruningService is not available in JSON API" + ) + + def packages: PackageService = new PackageService { + + override def listPackages(request: ListPackagesRequest): Future[ListPackagesResponse] = + clientCall(JsPackageService.listPackagesEndpoint, ()) + + override def getPackage(request: GetPackageRequest): Future[GetPackageResponse] = + for { + body <- clientCall(JsPackageService.downloadPackageEndpoint, request.packageId) + bytes <- body._1.runFold(ByteString.empty)(_ ++ _) + } yield GetPackageResponse( + hashFunction = com.daml.ledger.api.v2.package_service.HashFunction.HASH_FUNCTION_SHA256, + archivePayload = com.google.protobuf.ByteString.copyFrom(bytes.toArray), + hash = body._2, + ) + + override def getPackageStatus( + request: GetPackageStatusRequest + ): Future[GetPackageStatusResponse] = + clientCall(JsPackageService.packageStatusEndpoint, request.packageId) + + override def listVettedPackages( + request: ListVettedPackagesRequest + ): Future[ListVettedPackagesResponse] = + clientCall(JsPackageService.listVettedPackagesEndpoint, request) + } + + def update: UpdateService = new UpdateService { + + override def getUpdates( + request: GetUpdatesRequest, + responseObserver: StreamObserver[GetUpdatesResponse], + ): Unit = wsCall( + JsUpdateService.getUpdatesEndpoint, + toGetUpdatesRequestLegacy(request), + responseObserver, + protocolConverters.GetUpdatesResponse.fromJson, + ) + + private def toGetUpdatesRequestLegacy( + req: GetUpdatesRequest + ): LegacyDTOs.GetUpdatesRequest = + LegacyDTOs.GetUpdatesRequest( + beginExclusive = req.beginExclusive, + endInclusive = req.endInclusive, + filter = None, + updateFormat = req.updateFormat, + ) + + override def getUpdateByOffset( + request: GetUpdateByOffsetRequest + ): Future[GetUpdateResponse] = clientCall( + JsUpdateService.getUpdateByOffsetEndpoint, + request, + ) + .flatMap(protocolConverters.GetUpdateResponse.fromJson) + + override def getUpdateById( + request: GetUpdateByIdRequest + ): Future[GetUpdateResponse] = + clientCall( + JsUpdateService.getUpdateByIdEndpoint, + request, + ) + .flatMap(protocolConverters.GetUpdateResponse.fromJson) + } + + def eventQuery: EventQueryService = (request: GetEventsByContractIdRequest) => + clientCall( + JsEventService.getEventsByContractIdEndpoint, + request, + ) + .flatMap(protocolConverters.GetEventsByContractIdResponse.fromJson) + def time: TimeService = throw new UnsupportedOperationException( + "TimeService is not available in JSON API" + ) + + def version: VersionService = (_: GetLedgerApiVersionRequest) => + clientCall(JsVersionService.versionEndpoint, ()) + + def userManagement: UserManagementService = new UserManagementService { + override def createUser(request: CreateUserRequest): Future[CreateUserResponse] = + clientCall(JsUserManagementService.createUserEndpoint, request) + + override def getUser(request: GetUserRequest): Future[GetUserResponse] = + if (request.userId.isEmpty) { + clientCall( + JsUserManagementService.getCurrentUserEndpoint, + Some(request.identityProviderId), + ) + } else { + clientCall( + JsUserManagementService.getUserEndpoint, + (request.userId, Some(request.identityProviderId)), + ) + } + + override def updateUser(request: UpdateUserRequest): Future[UpdateUserResponse] = + clientCall(JsUserManagementService.updateUserEndpoint, (request.getUser.id, request)) + + override def deleteUser(request: DeleteUserRequest): Future[DeleteUserResponse] = + clientCall(JsUserManagementService.deleteUserEndpoint, request.userId).map(_ => + DeleteUserResponse() + ) + + override def listUsers(request: ListUsersRequest): Future[ListUsersResponse] = + clientCall( + JsUserManagementService.listUsersEndpoint, + PagedList((), Some(request.pageSize), Some(request.pageToken)), + ) + + override def grantUserRights(request: GrantUserRightsRequest): Future[GrantUserRightsResponse] = + clientCall(JsUserManagementService.grantUserRightsEndpoint, (request.userId, request)) + + override def revokeUserRights( + request: RevokeUserRightsRequest + ): Future[RevokeUserRightsResponse] = + clientCall(JsUserManagementService.revokeUserRightsEndpoint, (request.userId, request)) + + override def listUserRights(request: ListUserRightsRequest): Future[ListUserRightsResponse] = + clientCall(JsUserManagementService.listUserRightsEndpoint, request.userId) + + override def updateUserIdentityProviderId( + request: UpdateUserIdentityProviderIdRequest + ): Future[UpdateUserIdentityProviderIdResponse] = + clientCall( + JsUserManagementService.updateUserIdentityProviderEndpoint, + (request.userId, request), + ) + + } + def identityProviderConfig: IdentityProviderConfigService = new IdentityProviderConfigService { + + override def createIdentityProviderConfig( + request: CreateIdentityProviderConfigRequest + ): Future[CreateIdentityProviderConfigResponse] = + clientCall(JsIdentityProviderService.createIdpsEndpoint, request) + + override def getIdentityProviderConfig( + request: GetIdentityProviderConfigRequest + ): Future[GetIdentityProviderConfigResponse] = + clientCall(JsIdentityProviderService.getIdpEndpoint, request.identityProviderId) + + override def updateIdentityProviderConfig( + request: UpdateIdentityProviderConfigRequest + ): Future[UpdateIdentityProviderConfigResponse] = + clientCall( + JsIdentityProviderService.updateIdpEndpoint, + (request.getIdentityProviderConfig.identityProviderId, request), + ) + + override def listIdentityProviderConfigs( + request: ListIdentityProviderConfigsRequest + ): Future[ListIdentityProviderConfigsResponse] = + clientCall(JsIdentityProviderService.listIdpsEndpoint, ()) + + override def deleteIdentityProviderConfig( + request: DeleteIdentityProviderConfigRequest + ): Future[DeleteIdentityProviderConfigResponse] = + clientCall(JsIdentityProviderService.deleteIdpEndpoint, request.identityProviderId) + } + + override def interactiveSubmission: InteractiveSubmissionService = + new InteractiveSubmissionService { + override def prepareSubmission( + request: PrepareSubmissionRequest + ): Future[PrepareSubmissionResponse] = for { + jsPrepareRequest <- protocolConverters.PrepareSubmissionRequest.toJson(request) + response <- clientCall[JsPrepareSubmissionRequest, JsPrepareSubmissionResponse]( + JsInteractiveSubmissionService.prepareEndpoint, + jsPrepareRequest, + ).flatMap { jsResponse => + protocolConverters.PrepareSubmissionResponse.fromJson(jsResponse) + } + } yield response + + override def executeSubmission( + request: ExecuteSubmissionRequest + ): Future[ExecuteSubmissionResponse] = + for { + jsExecuteRequest <- protocolConverters.ExecuteSubmissionRequest.toJson(request) + response <- clientCall[JsExecuteSubmissionRequest, ExecuteSubmissionResponse]( + JsInteractiveSubmissionService.executeEndpoint, + jsExecuteRequest, + ) + } yield response + + override def getPreferredPackages( + request: GetPreferredPackagesRequest + ): Future[GetPreferredPackagesResponse] = + clientCall( + JsInteractiveSubmissionService.preferredPackagesEndpoint, + request, + ) + + override def getPreferredPackageVersion( + request: GetPreferredPackageVersionRequest + ): Future[GetPreferredPackageVersionResponse] = + clientCall( + JsInteractiveSubmissionService.preferredPackageVersionEndpoint, + ( + request.parties.toList, + request.packageName, + request.vettingValidAt.map( + InstantConverter + .fromProtoPrimitive(_) + .getOrElse( + throw new IllegalArgumentException( + s"could not transform ${request.vettingValidAt} to an Instant" + ) + ) + ), + Option(request.synchronizerId).filter(_.nonEmpty), + ), + ) + + override def executeSubmissionAndWait( + request: ExecuteSubmissionAndWaitRequest + ): Future[ExecuteSubmissionAndWaitResponse] = + for { + jsExecuteRequest <- protocolConverters.ExecuteSubmissionAndWaitRequest.toJson(request) + response <- clientCall[ + JsExecuteSubmissionAndWaitRequest, + ExecuteSubmissionAndWaitResponse, + ]( + JsInteractiveSubmissionService.executeAndWaitEndpoint, + jsExecuteRequest, + ) + } yield response + + override def executeSubmissionAndWaitForTransaction( + request: ExecuteSubmissionAndWaitForTransactionRequest + ): Future[ExecuteSubmissionAndWaitForTransactionResponse] = + for { + jsExecuteRequest <- protocolConverters.ExecuteSubmissionAndWaitForTransactionRequest + .toJson(request) + response <- clientCall[ + JsExecuteSubmissionAndWaitForTransactionRequest, + JsExecuteSubmissionAndWaitForTransactionResponse, + ]( + JsInteractiveSubmissionService.executeAndWaitForTransactionEndpoint, + jsExecuteRequest, + ) + grpcResponse <- protocolConverters.ExecuteSubmissionAndWaitForTransactionResponse + .fromJson(response) + } yield grpcResponse + } +} + +sealed trait LedgerServices { + def command: CommandService + def commandCompletion: CommandCompletionService + def commandSubmission: CommandSubmissionService + def health: Health + def interactiveSubmission: InteractiveSubmissionService + def state: StateService + def partyManagement: PartyManagementService + def packageManagement: PackageManagementService + def participantPruning: ParticipantPruningService + def packages: PackageService + def update: UpdateService + def eventQuery: EventQueryService + def time: TimeService + def version: VersionService + def userManagement: UserManagementService + def identityProviderConfig: IdentityProviderConfigService +} + +private final case class LedgerServicesGrpc( + channel: Channel, + commandInterceptors: Seq[ClientInterceptor], +) extends LedgerServices { + + val command: CommandService = + CommandServiceGrpc.stub(channel).withInterceptors(commandInterceptors*) + + val commandCompletion: CommandCompletionService = + CommandCompletionServiceGrpc.stub(channel) + + val commandSubmission: CommandSubmissionService = + CommandSubmissionServiceGrpc.stub(channel).withInterceptors(commandInterceptors*) + + val health: Health = + HealthGrpc.stub(channel) + + val state: StateService = + StateServiceGrpc.stub(channel) + + val partyManagement: PartyManagementService = + PartyManagementServiceGrpc.stub(channel) + + val packageManagement: PackageManagementService = + PackageManagementServiceGrpc.stub(channel) + + val participantPruning: ParticipantPruningService = + ParticipantPruningServiceGrpc.stub(channel) + + val packages: PackageService = + PackageServiceGrpc.stub(channel) + + val update: UpdateService = + UpdateServiceGrpc.stub(channel) + + val eventQuery: EventQueryService = + EventQueryServiceGrpc.stub(channel) + + val time: TimeService = + TimeServiceGrpc.stub(channel) + + val version: VersionService = + VersionServiceGrpc.stub(channel) + + val userManagement: UserManagementService = + UserManagementServiceGrpc.stub(channel) + + val identityProviderConfig: IdentityProviderConfigService = + IdentityProviderConfigServiceGrpc.stub(channel) + + val interactiveSubmission: InteractiveSubmissionService = + InteractiveSubmissionServiceGrpc.stub(channel) +} + +object JsonErrors { + implicit val genericErrorClass: ErrorClass = ErrorClass( + List(Grouping("generic", "ErrorClass")) + ) + + final case class GenericErrorCode( + override val id: String, + override val category: ErrorCategory, + ) extends ErrorCode(id, category) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSession.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSession.scala new file mode 100644 index 0000000000..5ca6ac1eb2 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSession.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantSession + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Random + +private[infrastructure] final class LedgerSession private ( + participantSessions: Vector[(String, ParticipantSession)], + shuffleParticipants: Boolean, + connectedSynchronizers: Int, +)(implicit val executionContext: ExecutionContext) { + + private[infrastructure] def createTestContext( + userId: String, + identifierSuffix: String, + ): Future[LedgerTestContext] = { + val sessions = + if (shuffleParticipants) Random.shuffle(participantSessions) + else participantSessions + Future + .traverse(sessions) { case (endpointId, session) => + session.createTestContext( + endpointId, + userId, + identifierSuffix, + session.features, + ) + } + .map(new LedgerTestContext(_, connectedSynchronizers)) + } + +} + +object LedgerSession { + + def apply( + participantSessions: Vector[ParticipantSession], + shuffleParticipants: Boolean, + connectedSynchronizers: Int, + )(implicit executionContext: ExecutionContext): LedgerSession = { + val endpointIdProvider = + Identification.circularWithIndex(Identification.greekAlphabet) + val sessions = participantSessions.map(endpointIdProvider() -> _) + new LedgerSession( + sessions, + shuffleParticipants, + connectedSynchronizers, + ) + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSessionConfiguration.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSessionConfiguration.scala new file mode 100644 index 0000000000..18144c7c61 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerSessionConfiguration.scala @@ -0,0 +1,17 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantSessionConfiguration +import io.grpc.ManagedChannelBuilder + +private[testtool] final class LedgerSessionConfiguration( + participantChannelBuilders: Vector[ManagedChannelBuilder[_]], + partyAllocation: PartyAllocationConfiguration, + val shuffleParticipants: Boolean, +) { + val participants: Vector[ParticipantSessionConfiguration] = + for (participantChannelBuilder <- participantChannelBuilders) + yield ParticipantSessionConfiguration(participantChannelBuilder, partyAllocation) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCase.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCase.scala new file mode 100644 index 0000000000..ae58ab2ece --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCase.scala @@ -0,0 +1,139 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{Participants, PartyAllocation} +import com.daml.ledger.api.testtool.infrastructure.participant.{Features, ParticipantTestContext} +import com.daml.test.evidence.tag.EvidenceTag +import com.digitalasset.daml.lf.data.Ref +import org.slf4j.LoggerFactory + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +/** @param suite + * To which collection of tests this case belongs to + * @param shortIdentifier + * A unique identifier used to generate party names, command identifiers, etc. + * @param description + * A human-readable description of what this case tests + * @param timeoutScale + * The factor applied to the default + * @param runConcurrently + * True if the test is safe be ran concurrently with other tests without affecting their results + * @param partyAllocation + * What parties need to be allocated on what participants as a setup for the test case + * @param runTestCase + * The body of the test to be executed + */ +sealed class LedgerTestCase( + val suite: LedgerTestSuite, + val shortIdentifier: Ref.LedgerString, + val description: String, + val timeoutScale: Double, + val runConcurrently: Boolean, + val repeated: Int = 1, + val tags: List[EvidenceTag] = List.empty, + val limitation: TestConstraints = TestConstraints.NoLimitations, + enabled: Features => Boolean, + disabledReason: String, + partyAllocation: PartyAllocation, + runTestCase: ExecutionContext => Seq[ParticipantTestContext] => Participants => Future[Unit], +) { + private val logger = LoggerFactory.getLogger(getClass) + val name: String = s"${suite.name}:$shortIdentifier" + + private def allocatePartiesAndRun( + context: LedgerTestContext + )(implicit ec: ExecutionContext): Future[Unit] = + for { + participants: Participants <- context.allocateParties(partyAllocation) + result <- runTestCase(ec)(context.configuredParticipants)(participants) + .transformWith { result => + cleanUpCreatedUsers(context, result) + } + } yield { + result + } + + def repetitions: Vector[LedgerTestCase.Repetition] = + if (repeated == 1) + Vector(new LedgerTestCase.Repetition(this, repetition = None)) + else + (1 to repeated) + .map(i => new LedgerTestCase.Repetition(this, repetition = Some(i -> repeated))) + .toVector + + def isEnabled( + features: Features, + participantCount: Int, + connectedSynchronizersCount: Int, + ): Either[String, Unit] = + for { + _ <- Either.cond(enabled(features), (), disabledReason) + _ <- Either.cond( + partyAllocation.minimumParticipantCount <= participantCount, + (), + s"Not enough participants to run this test case. Expected ${partyAllocation.minimumParticipantCount} but got $participantCount", + ) + _ <- Either.cond( + partyAllocation.minimumSynchronizersCount <= connectedSynchronizersCount, + (), + s"Not enough connected synchronizers to run this test case. Expected ${partyAllocation.minimumSynchronizersCount} but got $connectedSynchronizersCount", + ) + } yield { + val recommendedNumberOfParticipants = partyAllocation.partyCounts.size + if (recommendedNumberOfParticipants > participantCount) + logger.warn( + s"Test $shortIdentifier is run with $participantCount participants, less than the recommended number of $recommendedNumberOfParticipants " + ) + } + + /** Deletes users created during this test case execution. + */ + private def cleanUpCreatedUsers(context: LedgerTestContext, testCaseRunResult: Try[Unit])(implicit + ec: ExecutionContext + ): Future[Unit] = { + lazy val deleteCreatedUsersF = + Future.sequence(context.configuredParticipants.map(_.deleteCreatedUsers())) + lazy val deleteIdentityProvidersUsersF = + Future.sequence(context.configuredParticipants.map(_.deleteCreateIdentityProviders())) + testCaseRunResult match { + case Success(v) => + for { + _ <- deleteCreatedUsersF + _ <- deleteIdentityProvidersUsersF + } yield v + case Failure(exception) => + // Prioritizing a failure of users' clean-up over the original failure of the test case + // since clean-up failures can affect other test cases. + { + for { + _ <- deleteCreatedUsersF + _ <- deleteIdentityProvidersUsersF + } yield () + }.flatMap(_ => Future.failed(exception)) + } + } + +} + +object LedgerTestCase { + + final class Repetition(val testCase: LedgerTestCase, val repetition: Option[(Int, Int)]) { + def suite: LedgerTestSuite = testCase.suite + + def shortIdentifier: Ref.LedgerString = testCase.shortIdentifier + + def description: String = testCase.description + + def timeoutScale: Double = testCase.timeoutScale + + def allocatePartiesAndRun(context: LedgerTestContext)(implicit + ec: ExecutionContext + ): Future[Unit] = + testCase.allocatePartiesAndRun(context) + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCasesRunner.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCasesRunner.scala new file mode 100644 index 0000000000..be1b3962da --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestCasesRunner.scala @@ -0,0 +1,311 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.LedgerTestCasesRunner.* +import com.daml.ledger.api.testtool.infrastructure.PartyAllocationConfiguration.ClosedWorldWaitingForAllParticipants +import com.daml.ledger.api.testtool.infrastructure.participant.{ + ParticipantSession, + ParticipantTestContext, +} +import com.digitalasset.canton.util.MonadUtil +import io.grpc.ClientInterceptor +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Sink, Source} +import org.slf4j.LoggerFactory + +import java.util.concurrent.{ExecutionException, TimeoutException} +import java.util.{Timer, TimerTask} +import scala.concurrent.duration.{Duration, DurationInt} +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.Try +import scala.util.control.NonFatal +import scala.util.matching.Regex + +object LedgerTestCasesRunner { + private val DefaultTimeout = 30.seconds + + private val timer = new Timer("ledger-test-suite-runner-timer", true) + + private val logger = LoggerFactory.getLogger(classOf[LedgerTestCasesRunner]) + + private[this] val uncaughtExceptionErrorMessage = + "UNEXPECTED UNCAUGHT EXCEPTION, GATHER THE STACKTRACE AND OPEN A _DETAILED_ TICKET DESCRIBING THE ISSUE HERE: https://github.com/digital-asset/daml/issues/new" + + private final class UncaughtExceptionError(cause: Throwable) + extends RuntimeException(uncaughtExceptionErrorMessage, cause) + +} + +final class LedgerTestCasesRunner( + testCases: Vector[LedgerTestCase], + participantChannels: Either[Vector[JsonApiEndpoint], Vector[ChannelEndpoint]], + participantAdminChannels: Vector[ChannelEndpoint], + skipDarNamesPattern: Option[Regex], + maxConnectionAttempts: Int = 10, + partyAllocation: PartyAllocationConfiguration = ClosedWorldWaitingForAllParticipants, + shuffleParticipants: Boolean = false, + timeoutScaleFactor: Double = 1.0, + concurrentTestRuns: Int = 8, + identifierSuffix: String = "test", + commandInterceptors: Seq[ClientInterceptor] = Seq.empty, + lfVersion: String, + connectedSynchronizers: Int, +) { + private[this] val verifyRequirements: Try[Unit] = + Try { + require( + maxConnectionAttempts > 0, + "The number of connection attempts must be strictly positive", + ) + require(timeoutScaleFactor > 0, "The timeout scale factor must be strictly positive") + require(identifierSuffix.nonEmpty, "The identifier suffix cannot be an empty string") + } + + def runTests(implicit executionContext: ExecutionContext): Future[Vector[LedgerTestSummary]] = + verifyRequirements.fold( + Future.failed, + _ => prepareResourcesAndRun, + ) + + private def createTestContextAndStart( + test: LedgerTestCase.Repetition, + session: LedgerSession, + )(implicit executionContext: ExecutionContext): Future[Duration] = { + val execution = Promise[Duration]() + val scaledTimeout = DefaultTimeout * timeoutScaleFactor * test.timeoutScale + + val testName = + test.repetition.fold[String](test.shortIdentifier)(r => s"${test.shortIdentifier}_${r._1}") + val startedTest = + session + .createTestContext(testName, identifierSuffix) + .flatMap { context => + val start = System.nanoTime() + val result = test + .allocatePartiesAndRun(context) + .map(_ => Duration.fromNanos(System.nanoTime() - start)) + logger.info( + s"Started '${test.description}'${test.repetition.fold("")(r => s" (${r._1}/${r._2})")} with a timeout of $scaledTimeout." + ) + result + } + + val testTimeout = new TimerTask { + override def run(): Unit = { + val message = s"Timeout of $scaledTimeout for '${test.description}' hit." + if (execution.tryFailure(new TimeoutException(message))) { + logger.error(message) + } + } + } + timer.schedule(testTimeout, scaledTimeout.toMillis) + startedTest.onComplete { _ => + testTimeout.cancel() + logger.info(s"Finished '${test.description}'.") + } + execution.completeWith(startedTest).future + } + + private def result( + startedTest: Future[Duration] + )(implicit executionContext: ExecutionContext): Future[Either[Result.Failure, Result.Success]] = + startedTest + .map[Either[Result.Failure, Result.Success]](duration => Right(Result.Succeeded(duration))) + .recover[Either[Result.Failure, Result.Success]] { + case Result.Retired => + Right(Result.Retired) + case Result.Excluded(reason) => + Right(Result.Excluded(reason)) + case _: TimeoutException => + Left(Result.TimedOut) + case failure: AssertionError => + Left(Result.Failed(failure)) + case NonFatal(box: ExecutionException) => + box.getCause match { + case failure: AssertionError => + Left(Result.Failed(failure)) + case exception => + Left(Result.FailedUnexpectedly(exception)) + } + case NonFatal(exception) => + Left(Result.FailedUnexpectedly(exception)) + } + + private def summarize( + suite: LedgerTestSuite, + test: LedgerTestCase, + result: Either[Result.Failure, Result.Success], + ): LedgerTestSummary = + LedgerTestSummary(suite.name, test.name, test.description, result) + + private def run( + test: LedgerTestCase.Repetition, + session: LedgerSession, + )(implicit executionContext: ExecutionContext): Future[Either[Result.Failure, Result.Success]] = + result(createTestContextAndStart(test, session)) + + private def uploadDarsIfRequired( + sessions: Vector[ParticipantSession] + )(implicit executionContext: ExecutionContext): Future[Unit] = { + val allDars = Dars.resources(lfVersion) + val darsToUpload = skipDarNamesPattern + .map { skipRegex => + val darsToUpload = allDars.filterNot(skipRegex.matches) + logger.info( + s"Uploading DARs excluding pattern ${skipRegex.pattern.toString}: ${darsToUpload + .mkString("[", ",", "]")}" + ) + darsToUpload + } + .getOrElse { + logger.info(s"Uploading all available test DARs") + allDars + } + + MonadUtil + .sequentialTraverse(sessions) { session => + logger.info(s"Uploading DAR files for session $session") + for { + context <- session.createInitContext( + userId = "upload-dars", + identifierSuffix = identifierSuffix, + features = session.features, + ) + // upload the dars sequentially to avoid conflicts + _ <- MonadUtil.sequentialTraverse_(darsToUpload)(dar => uploadDar(context, dar)) + } yield () + } + .map(_ => ()) + } + + private def uploadDar( + context: ParticipantTestContext, + name: String, + )(implicit executionContext: ExecutionContext): Future[Unit] = { + logger.info(s"""Uploading DAR "$name"...""") + context + .uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(name)) + .map { _ => + logger.info(s"""Uploaded DAR "$name".""") + } + .recover { case NonFatal(exception) => + throw new Errors.DarUploadException(name, exception) + } + } + + private def createActorSystem: ActorSystem = + ActorSystem(classOf[LedgerTestCasesRunner].getSimpleName) + + private def runTestCases( + ledgerSession: LedgerSession, + testCases: Vector[LedgerTestCase], + concurrency: Int, + )(implicit + materializer: Materializer, + executionContext: ExecutionContext, + ): Future[Vector[LedgerTestSummary]] = { + val testCaseRepetitions = testCases.flatMap(_.repetitions) + val testCount = testCaseRepetitions.size + logger.info(s"Running $testCount tests with concurrency of $concurrency.") + Source(testCaseRepetitions.zipWithIndex) + .mapAsyncUnordered(concurrency) { case (test, index) => + run(test, ledgerSession).map(summarize(test.suite, test.testCase, _) -> index) + } + .runWith(Sink.seq) + .map(_.toVector.sortBy(_._2).map(_._1)) + } + + private def run( + participantChannels: Either[Vector[JsonApiEndpoint], Vector[ChannelEndpoint]], + participantAdminChannels: Vector[ChannelEndpoint], + )(implicit + materializer: Materializer, + executionContext: ExecutionContext, + ): Future[Vector[LedgerTestSummary]] = { + val sessions: Future[Vector[ParticipantSession]] = ParticipantSession.createSessions( + partyAllocationConfig = partyAllocation, + participantChannels = participantChannels, + participantAdminChannels = participantAdminChannels, + maxConnectionAttempts = maxConnectionAttempts, + commandInterceptors = commandInterceptors, + timeoutScaleFactor = timeoutScaleFactor, + darList = Dars.resources(lfVersion), + connectedSynchronizers = connectedSynchronizers, + ) + sessions + .flatMap { (sessions: Vector[ParticipantSession]) => + // All the participants should support the same features (for testing at least) + val ledgerFeatures = sessions.head.features + val (disabledTestCases, enabledTestCases) = + testCases.partitionMap(testCase => + testCase + .isEnabled(ledgerFeatures, sessions.size, connectedSynchronizers) + .fold(disabledReason => Left(testCase -> disabledReason), _ => Right(testCase)) + ) + val excludedTestResults = disabledTestCases + .map { case (testCase, disabledReason) => + LedgerTestSummary( + testCase.suite.name, + testCase.name, + testCase.description, + Right(Result.Excluded(disabledReason)), + ) + } + val (concurrentTestCases, sequentialTestCases) = + enabledTestCases.partition(_.runConcurrently) + val ledgerSession = LedgerSession( + sessions, + shuffleParticipants, + connectedSynchronizers, + ) + val testResults = + for { + _ <- uploadDarsIfRequired(sessions) + sequentialTestResults <- runTestCases( + ledgerSession, + sequentialTestCases, + concurrency = 1, + )(materializer, executionContext) + concurrentTestResults <- runTestCases( + ledgerSession, + concurrentTestCases, + concurrentTestRuns, + )(materializer, executionContext) + } yield concurrentTestResults ++ sequentialTestResults ++ excludedTestResults + + testResults.recover { + case NonFatal(e) if !e.isInstanceOf[Errors.FrameworkException] => + throw new LedgerTestCasesRunner.UncaughtExceptionError(e) + } + } + } + + private def prepareResourcesAndRun(implicit + executionContext: ExecutionContext + ): Future[Vector[LedgerTestSummary]] = { + + val materializerResources = + ResourceOwner.forMaterializerDirectly(() => createActorSystem).acquire() + + // When running the tests, explicitly use the materializer's execution context + // The tests will then be executed on it instead of the implicit one -- which + // should only be used to manage resources' lifecycle + val results = + for { + materializer <- materializerResources.asFuture + results <- run(participantChannels, participantAdminChannels)( + materializer, + executionContext, + ) + } yield results + + results.onComplete(_ => materializerResources.release()) + + results + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestContext.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestContext.scala new file mode 100644 index 0000000000..406dcdcf2b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestContext.scala @@ -0,0 +1,69 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + PartyAllocation, +} +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext + +import scala.collection.immutable +import scala.concurrent.{ExecutionContext, Future} + +private[testtool] final class LedgerTestContext private[infrastructure] ( + val configuredParticipants: immutable.Seq[ParticipantTestContext], + val connectedSynchronizers: Int, +)(implicit ec: ExecutionContext) { + + require(configuredParticipants.nonEmpty, "At least one participant must be provided.") + + private[this] val participantsRing = Iterator.continually(configuredParticipants).flatten + + /** This allocates participants and a specified number of parties for each participant. + * + * e.g. `allocate(ParticipantAllocation(SingleParty, Parties(3), NoParties, TwoParties))` will + * eventually return: + * + * {{{ + * Participants( + * Participant(alpha: ParticipantTestContext, alice: Party), + * Participant(beta: ParticipantTestContext, bob: Party, barbara: Party, bernard: Party), + * Participant(gamma: ParticipantTestContext), + * Participant(delta: ParticipantTestContext, doreen: Party, dan: Party), + * ) + * }}} + * + * Each execution of a test case allocates parties on participants, then deconstructs the result + * and uses the various participants and parties throughout the test. + */ + def allocateParties(allocation: PartyAllocation): Future[Participants] = { + val participantAllocations: Seq[(ParticipantTestContext, Allocation.PartyCount)] = + allocation.partyCounts.map(nextParticipant() -> _) + val participantsUnderTest: Seq[ParticipantTestContext] = participantAllocations.map(_._1) + Future + .sequence(participantAllocations.map { + case (participant: ParticipantTestContext, partyCount: Allocation.PartyCount) + if partyCount.isExternal => + participant + .allocateExternalParties(partyCount.count, connectedSynchronizers) + .map(parties => Participant(participant, parties)) + case (participant: ParticipantTestContext, partyCount: Allocation.PartyCount) => + participant + .preallocateParties( + partyCount.count, + participantsUnderTest, + connectedSynchronizers, + ) + .map(parties => Participant(participant, parties)) + }) + .map(participants => Participants(connectedSynchronizers, participants*)) + } + + private[this] def nextParticipant(): ParticipantTestContext = + participantsRing.synchronized { + participantsRing.next() + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSuite.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSuite.scala new file mode 100644 index 0000000000..5358d39fa5 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSuite.scala @@ -0,0 +1,160 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{Participants, PartyAllocation} +import com.daml.ledger.api.testtool.infrastructure.TestConstraints.NoLimitations +import com.daml.ledger.api.testtool.infrastructure.participant.{Features, ParticipantTestContext} +import com.daml.ledger.api.v2.commands.Command as CommandV1 +import com.daml.ledger.api.v2.commands.Command.toJavaProto +import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.value.Value.Sum +import com.daml.ledger.api.v2.value.{GenMap, Identifier, List as ApiList, Optional, TextMap, Value} +import com.daml.ledger.javaapi.data.{Command, Identifier as JavaIdentifier, Party} +import com.daml.test.evidence.tag.EvidenceTag +import com.digitalasset.daml.lf.data.Ref + +import java.util.List as JList +import scala.collection.mutable.ListBuffer +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* +import scala.language.implicitConversions + +abstract class LedgerTestSuite { + private val testCaseBuffer: ListBuffer[LedgerTestCase] = ListBuffer() + + final lazy val tests: Vector[LedgerTestCase] = testCaseBuffer.toVector + + protected final def test( + shortIdentifier: String, + description: String, + partyAllocation: PartyAllocation, + timeoutScale: Double = 1.0, + runConcurrently: Boolean = true, + repeated: Int = 1, + enabled: Features => Boolean = _ => true, + disabledReason: String = "No reason", + tags: List[EvidenceTag] = List.empty, + limitation: TestConstraints = NoLimitations, + )(testCase: ExecutionContext => PartialFunction[Participants, Future[Unit]]): Unit = + testGivenAllParticipants( + shortIdentifier, + description, + partyAllocation, + timeoutScale, + runConcurrently, + repeated, + enabled, + disabledReason, + tags, + limitation, + )((ec: ExecutionContext) => (_: Seq[ParticipantTestContext]) => testCase(ec)) + + protected final def testGivenAllParticipants( + shortIdentifier: String, + description: String, + partyAllocation: PartyAllocation, + timeoutScale: Double = 1.0, + runConcurrently: Boolean = true, + repeated: Int = 1, + enabled: Features => Boolean = _ => true, + disabledReason: String = "No reason", + tags: List[EvidenceTag] = List.empty, + limitation: TestConstraints = NoLimitations, + )( + testCase: ExecutionContext => Seq[ParticipantTestContext] => PartialFunction[ + Participants, + Future[Unit], + ] + ): Unit = { + val shortIdentifierRef = Ref.LedgerString.assertFromString(shortIdentifier) + testCaseBuffer.append( + new LedgerTestCase( + this, + shortIdentifierRef, + description, + timeoutScale, + runConcurrently, + repeated, + tags, + limitation, + enabled, + disabledReason, + partyAllocation, + testCase, + ) + ) + } + + def name: String = getClass.getSimpleName + + def updateCommands(commands: JList[Command], f: CommandV1 => CommandV1): JList[Command] = + commands.asScala + .map(c => CommandV1.fromJavaProto(c.toProtoCommand)) + .map(f) + .map(c => Command.fromProtoCommand(toJavaProto(c))) + .asJava + + implicit class IdentifierConverter(id: JavaIdentifier) { + def toV1: Identifier = Identifier.fromJavaProto(id.toProto) + } + + implicit def partyToString(party: Party): String = party.getValue + + object ClearIdsImplicits { + + def clearIds(value: Value): Value = { + val sum = value.sum match { + case Sum.Record(record) => + Sum.Record( + record.clearRecordId.copy(fields = + record.fields.map(f => f.copy(value = f.value.map(clearIds))) + ) + ) + case Sum.Variant(variant) => + Sum.Variant(variant.clearVariantId.copy(value = variant.value.map(clearIds))) + case Sum.List(list) => Sum.List(ApiList(list.elements.map(clearIds))) + case Sum.Optional(optional) => + Sum.Optional(Optional(optional.value.map(clearIds))) + case Sum.GenMap(genmap) => + Sum.GenMap(GenMap(genmap.entries.map { case GenMap.Entry(k, v) => + GenMap.Entry(k.map(clearIds), v.map(clearIds)) + })) + case Sum.TextMap(simplemap) => + Sum.TextMap(TextMap(simplemap.entries.map { case TextMap.Entry(k, v) => + TextMap.Entry(k, v.map(clearIds)) + })) + case _ => value.sum + } + Value(sum) + } + + implicit class ClearValueIdsImplicits(record: com.daml.ledger.api.v2.value.Record) { + // TODO(#16361): remove when java bindings toValue produce an enriched Record w.r.t. type and field name + def clearValueIds: com.daml.ledger.api.v2.value.Record = + clearIds(Value(Sum.Record(record))).getRecord + } + } + + def assertAcsDelta(events: Seq[Event], acsDelta: Boolean, message: String): Unit = + assert( + events.forall(_.event match { + case Event.Event.Created(created) => created.acsDelta == acsDelta + case Event.Event.Exercised(exercised) => exercised.acsDelta == acsDelta + case Event.Event.Empty => true + case Event.Event.Archived(_) => true + }), + message + s", events: ${events.map(_.event).mkString(", ")}", + ) + +} + +sealed trait TestConstraints +sealed trait JsonSupported extends TestConstraints + +object TestConstraints { + case object NoLimitations extends JsonSupported + case class JsonOnly(reason: String, grpcTest: Option[String] = None) extends JsonSupported + case class GrpcOnly(reason: String, jsonTest: Option[String] = None) extends TestConstraints +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSummary.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSummary.scala new file mode 100644 index 0000000000..52ea935433 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/LedgerTestSummary.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +final case class LedgerTestSummary( + suite: String, + name: String, + description: String, + result: Either[Result.Failure, Result.Success], +) diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/NamePicker.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/NamePicker.scala new file mode 100644 index 0000000000..1e3d71d83d --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/NamePicker.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +case class NamePicker(alphabet: String) { + private[testtool] val canon: String = alphabet.sorted.foldLeft("") { (acc, e) => + acc.lastOption match { + case Some(l) if l == e => acc + case _ => acc + e + } + } + + private[testtool] val mx: Char = canon.last + private[testtool] val mn: Char = canon.head + + private[testtool] def belongs(s: String): Boolean = s.foldLeft(true) { + case (true, e) if canon.contains(e) => true + case (_, _) => false + } + + private[testtool] def allLowest(s: String): Boolean = s.foldLeft(true) { + case (true, e) if canon.headOption.contains(e) => true + case (_, _) => false + } + + private[testtool] def lower(c: Char): Option[Char] = + canon.foldLeft(None: Option[Char]) { + case (_, _) if !canon.contains(c) => None + case (_, e) if c > e => Some(e) + case (other, _) => other + } + + def lower(s: String): Option[String] = s match { + case s if !belongs(s) => None + case s if allLowest(s) => s.lastOption.map(_ => s.dropRight(1)) + case s => + s.reverse.toList match { + case last :: init if lower(last).isDefined => Some((init.reverse ++ lower(last)).mkString) + case last :: Nil => Some(lower(last).toList.mkString) + case _ :: init => lower(init.reverse.mkString).map(_ + mx) + case Nil => None + } + } + + def lowerConstrained(high: String, low: String, extendOnConflict: Int = 5): Option[String] = + (high, low) match { + case (h, l) if !belongs(h) || !belongs(l) => None + case (h, l) if h <= l => None + case (h, l) if h.length == l.length + 1 && h.last == mn && h.dropRight(1) == l => None + case (h, l) => lower(h).map(ld => if (ld == l) ld ++ (mx.toString * extendOnConflict) else ld) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala new file mode 100644 index 0000000000..e524908ea2 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.v2.crypto as lapicrypto +import com.daml.ledger.api.v2.crypto.SignatureFormat.SIGNATURE_FORMAT_RAW +import com.daml.ledger.javaapi.data.Party as ApiParty +import com.digitalasset.canton.crypto.Fingerprint +import com.google.protobuf.ByteString + +import java.security.{KeyPair, Signature} +import scala.language.implicitConversions + +sealed trait Party { + def underlying: ApiParty + def initialSynchronizers: List[String] +} +case class LocalParty(underlying: ApiParty, initialSynchronizers: List[String]) extends Party +case class ExternalParty( + underlying: ApiParty, + initialSynchronizers: List[String], + signingFingerprint: Fingerprint, + signingKeyPair: KeyPair, +) extends Party { + def sign(data: ByteString): ByteString = { + val signatureInstance = Signature.getInstance("Ed25519") + signatureInstance.initSign(signingKeyPair.getPrivate) + signatureInstance.update(data.toByteArray) + ByteString.copyFrom(signatureInstance.sign()) + } + + def signProto(data: ByteString): lapicrypto.Signature = + lapicrypto.Signature( + format = SIGNATURE_FORMAT_RAW, + signature = sign(data), + signedBy = signingFingerprint.toProtoPrimitive, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) +} + +object Party { + + def external( + value: String, + signingFingerprint: Fingerprint, + signingKeyPair: KeyPair, + initialSynchronizers: List[String] = List.empty, + ): ExternalParty = + ExternalParty(new ApiParty(value), initialSynchronizers, signingFingerprint, signingKeyPair) + + def apply(value: String, initialSynchronizers: List[String] = List.empty): Party = + LocalParty(new ApiParty(value), initialSynchronizers) + implicit def toApiParty(party: Party): ApiParty = party.underlying + implicit def toApiString(party: Party): String = party.underlying.getValue +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/PartyAllocationConfiguration.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/PartyAllocationConfiguration.scala new file mode 100644 index 0000000000..969c1d9a16 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/PartyAllocationConfiguration.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +sealed trait PartyAllocationConfiguration { + def allocateParties: Boolean + + def waitForAllParticipants: Boolean +} + +object PartyAllocationConfiguration { + + case object OpenWorld extends PartyAllocationConfiguration { + override val allocateParties: Boolean = false + + override val waitForAllParticipants: Boolean = false + } + + case object ClosedWorld extends PartyAllocationConfiguration { + override val allocateParties: Boolean = true + + override val waitForAllParticipants: Boolean = false + } + + case object ClosedWorldWaitingForAllParticipants extends PartyAllocationConfiguration { + override val allocateParties: Boolean = true + + override val waitForAllParticipants: Boolean = true + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ProtobufConverters.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ProtobufConverters.scala new file mode 100644 index 0000000000..d2e0866b93 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ProtobufConverters.scala @@ -0,0 +1,39 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.google.protobuf + +import scala.jdk.DurationConverters.{JavaDurationOps, ScalaDurationOps} + +object ProtobufConverters { + + implicit class JavaDurationConverter(duration: java.time.Duration) { + def asProtobuf: protobuf.duration.Duration = + new protobuf.duration.Duration(duration.getSeconds, duration.getNano) + } + + implicit class JavaInstantConverter(instant: java.time.Instant) { + def asProtobuf: protobuf.timestamp.Timestamp = + new protobuf.timestamp.Timestamp(instant.getEpochSecond, instant.getNano) + } + + implicit class ScalaDurationConverter(duration: scala.concurrent.duration.FiniteDuration) { + def asProtobuf: protobuf.duration.Duration = + duration.toJava.asProtobuf + } + + implicit class ProtobufDurationConverter(duration: protobuf.duration.Duration) { + def asJava: java.time.Duration = + java.time.Duration.ofSeconds(duration.seconds, duration.nanos.toLong) + + def asScala: scala.concurrent.duration.Duration = + asJava.toScala + } + + implicit class ProtobufTimestampConverter(timestamp: protobuf.timestamp.Timestamp) { + def asJava: java.time.Instant = + java.time.Instant.ofEpochSecond(timestamp.seconds, timestamp.nanos.toLong) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RaceConditionTests.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RaceConditionTests.scala new file mode 100644 index 0000000000..5085b649b0 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RaceConditionTests.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.timer.Delayed +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Success, Try} + +private[testtool] object RaceConditionTests { + val DefaultRepetitionsNumber: Int = 3 + private val WaitBeforeGettingTransactions: FiniteDuration = 500.millis + + def executeRepeatedlyWithRandomDelay[T]( + numberOfAttempts: Int, + once: => Future[T], + repeated: => Future[T], + )(implicit ec: ExecutionContext): Future[Vector[Try[T]]] = { + val attempts = (1 to numberOfAttempts).toVector + Future.traverse(attempts) { attempt => + scheduleWithRandomDelay(upTo = 20.millis) { _ => + (if (attempt == numberOfAttempts) { + once + } else { + repeated + }).transform(Success(_)) + } + } + } + + private def randomDurationUpTo(limit: FiniteDuration): FiniteDuration = + scala.util.Random.nextInt(limit.toMillis.toInt).millis + + private def scheduleWithRandomDelay[T](upTo: FiniteDuration)(f: Unit => Future[T])(implicit + ec: ExecutionContext + ): Future[T] = + Delayed.by(randomDurationUpTo(upTo))(()).flatMap(f) + + def transactions( + ledger: ParticipantTestContext, + party: Party, + waitBefore: FiniteDuration = WaitBeforeGettingTransactions, + )(implicit ec: ExecutionContext): Future[Vector[Transaction]] = + Delayed.by(waitBefore)(()).flatMap(_ => ledger.transactions(LedgerEffects, party)) + + def assertTransactionOrder( + expectedFirst: Transaction, + expectedSecond: Transaction, + ): Unit = + if (expectedFirst.offset < expectedSecond.offset) () + else fail(s"""Offset ${expectedFirst.offset} is not before ${expectedSecond.offset} + | + |Expected first: ${printTransaction(expectedFirst)} + |Expected second: ${printTransaction(expectedSecond)} + |""".stripMargin) + + def printTransaction(transaction: Transaction): String = + s"""Offset: ${transaction.offset}, number of events: ${transaction.events.size} + |${transaction.events.map(e => s" -> $e").mkString("\n")} + |""".stripMargin + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RemoveTrailingNone.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RemoveTrailingNone.scala new file mode 100644 index 0000000000..2020798ede --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RemoveTrailingNone.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.v2.value.* +import com.daml.ledger.api.v2.value.Value.{Sum, toJavaProto} +import com.daml.ledger.javaapi.data.DamlRecord + +// TODO(#27163): This can be removed once DamlRecord.toValue does not produce a Record with trailing None values +object RemoveTrailingNone { + + private def removeTrailingNone(value: Value): Value = { + val sum = value.sum match { + case Sum.Record(record) => + val n = record.fields.reverseIterator + .dropWhile(_.value match { + case None => true + case Some(Value(Value.Sum.Optional(Optional(None)))) => true + case _ => false + }) + .size + Sum.Record( + record.copy(fields = + record.fields.take(n).map(f => f.copy(value = f.value.map(removeTrailingNone))) + ) + ) + case Sum.Variant(variant) => + Sum.Variant(variant.clearVariantId.copy(value = variant.value.map(removeTrailingNone))) + case Sum.List(list) => Sum.List(List(list.elements.map(removeTrailingNone))) + case Sum.Optional(optional) => + Sum.Optional(Optional(optional.value.map(removeTrailingNone))) + case Sum.GenMap(map) => + Sum.GenMap(GenMap(map.entries.map { case GenMap.Entry(k, v) => + GenMap.Entry(k.map(removeTrailingNone), v.map(removeTrailingNone)) + })) + case Sum.TextMap(map) => + Sum.TextMap(TextMap(map.entries.map { case TextMap.Entry(k, v) => + TextMap.Entry(k, v.map(removeTrailingNone)) + })) + case _ => value.sum + } + Value(sum) + } + + implicit class Implicits(record: DamlRecord) { + def withoutTrailingNoneFields: DamlRecord = { + val withTrailing = Value.fromJavaProto(record.toProto) + val withoutTrailing = removeTrailingNone(withTrailing) + DamlRecord.fromProto(toJavaProto(withoutTrailing).getRecord) + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Reporter.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Reporter.scala new file mode 100644 index 0000000000..516e250011 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Reporter.scala @@ -0,0 +1,154 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.digitalasset.canton.buildinfo.BuildInfo + +import java.io.{PrintStream, PrintWriter, StringWriter} +import scala.util.Try + +trait Reporter[A] { + def report( + results: Vector[LedgerTestSummary], + skippedTests: Vector[LedgerTestSummary], + runInfo: Seq[(String, String)], + ): A +} + +object Reporter { + object ColorizedPrintStreamReporter { + private val reset = "\u001b[0m" + + private def red(s: String): String = s"\u001b[31m$s$reset" + + private def green(s: String): String = s"\u001b[32m$s$reset" + + private def yellow(s: String): String = s"\u001b[33m$s$reset" + + private def blue(s: String): String = s"\u001b[34m$s$reset" + + private def cyan(s: String): String = s"\u001b[36m$s$reset" + + private def render(t: Throwable): Iterator[String] = { + val stringWriter = new StringWriter + val writer = new PrintWriter(stringWriter) + t.printStackTrace(writer) + stringWriter.toString.linesIterator + } + + private def extractRelevantLineNumber(t: Throwable): Option[Int] = + t.getStackTrace + .find(stackTraceElement => + Try(Class.forName(stackTraceElement.getClassName)) + .filter(_ != classOf[LedgerTestSuite]) + .filter(classOf[LedgerTestSuite].isAssignableFrom) + .isSuccess + ) + .map(_.getLineNumber) + } + + final class ColorizedPrintStreamReporter( + s: PrintStream, + printStackTraces: Boolean, + printOnFailuresOnly: Boolean = false, + ) extends Reporter[Unit] { + + import ColorizedPrintStreamReporter.* + + private def indented(msg: String, n: Int = 2): String = { + val indent = " " * n + if (msg != null) msg.linesIterator.map(l => s"$indent$l").mkString("\n") + else "" + } + + private def printReport(results: Vector[LedgerTestSummary]): Unit = + results.groupBy(_.suite).foreach { case (suite, summaries) => + s.println() + s.println(cyan(suite)) + + for (LedgerTestSummary(_, name, description, result) <- summaries) { + s.print(cyan(s"- [$name] $description ... ")) + result match { + case Right(Result.Succeeded(duration)) => + s.println(green(s"Success (${duration.toMillis} ms)")) + case Right(Result.Retired) => + s.println(yellow(s"Skipped (retired test)")) + case Right(Result.Excluded(reason)) => + s.println(yellow(s"Skipped ($reason)")) + case Left(Result.TimedOut) => s.println(red(s"Timeout")) + case Left(Result.Failed(cause)) => + val message = + extractRelevantLineNumber(cause).fold("Assertion failed") { lineHint => + s"Assertion failed at line $lineHint" + } + s.println(red(message)) + s.println(red(indented(cause.getMessage))) + cause match { + case AssertionErrorWithPreformattedMessage(preformattedMessage, _) => + preformattedMessage.split("\n").map(indented(_)).foreach(s.println) + case _ => // ignore + } + if (printStackTraces) { + for (renderedStackTraceLine <- render(cause)) + s.println(red(indented(renderedStackTraceLine))) + } + case Left(Result.FailedUnexpectedly(cause)) => + val prefix = + s"Unexpected failure (${cause.getClass.getSimpleName})" + val message = + extractRelevantLineNumber(cause).fold(prefix) { lineHint => + s"$prefix at line $lineHint" + } + s.println(red(message)) + s.println(red(indented(cause.getMessage))) + if (printStackTraces) { + for (renderedStackTraceLine <- render(cause)) + s.println(red(indented(renderedStackTraceLine))) + } + } + } + } + + override def report( + results: Vector[LedgerTestSummary], + excludedTests: Vector[LedgerTestSummary], + runInfo: Seq[(String, String)], + ): Unit = { + val (successes, failures) = results.partition(_.result.isRight) + + s.println() + s.println(blue("#" * 80)) + s.println(blue("#")) + s.println(blue(s"# TEST REPORT, version: ${BuildInfo.version}")) + s.println(blue("#")) + s.println(blue("#" * 80)) + if (failures.isEmpty && printOnFailuresOnly) { + s.println(s"Successful tests: ${successes.size}") + s.println(s"Excluded tests: ${excludedTests.size}") + } else { + s.println() + s.println(yellow("### RUN INFORMATION")) + s.println() + runInfo.foreach { case (label, value) => s.println(cyan(s"$label = $value")) } + if (successes.nonEmpty) { + s.println() + s.println(green("### SUCCESSES")) + printReport(successes) + } + + if (excludedTests.nonEmpty) { + s.println() + s.println(yellow("### EXCLUDED TESTS")) + printReport(excludedTests) + } + + if (failures.nonEmpty) { + s.println() + s.println(red("### FAILURES")) + printReport(failures) + } + } + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ResourceOwner.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ResourceOwner.scala new file mode 100644 index 0000000000..bdbd27322a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ResourceOwner.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.resources.grpc.GrpcResourceOwnerFactories +import com.daml.resources.pekko.PekkoResourceOwnerFactories +import com.daml.resources.{HasExecutionContext, ResourceOwnerFactories} + +import scala.concurrent.ExecutionContext + +import HasExecutionContext.`ExecutionContext has itself` + +object ResourceOwner + extends ResourceOwnerFactories[ExecutionContext] + with PekkoResourceOwnerFactories[ExecutionContext] + with GrpcResourceOwnerFactories[ExecutionContext] { + override protected implicit val hasExecutionContext: HasExecutionContext[ExecutionContext] = + implicitly[HasExecutionContext[ExecutionContext]] +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Result.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Result.scala new file mode 100644 index 0000000000..bf34a145e6 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Result.scala @@ -0,0 +1,27 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import scala.concurrent.duration.Duration +import scala.util.control.NoStackTrace + +private[daml] object Result { + + sealed trait Success + + final case class Succeeded(duration: Duration) extends Success + + case object Retired extends RuntimeException with NoStackTrace with Success + + final case class Excluded(reason: String) extends RuntimeException with NoStackTrace with Success + + sealed trait Failure + + case object TimedOut extends Failure + + final case class Failed(cause: AssertionError) extends Failure + + final case class FailedUnexpectedly(cause: Throwable) extends Failure + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RetryingGetConnectedSynchronizersForParty.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RetryingGetConnectedSynchronizersForParty.scala new file mode 100644 index 0000000000..43a41c2b64 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/RetryingGetConnectedSynchronizersForParty.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.v2.state_service.GetConnectedSynchronizersRequest +import com.daml.timer.RetryStrategy + +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal +import scala.util.{Failure, Success} + +object RetryingGetConnectedSynchronizersForParty { + // There is a 250ms grace period for all topology changes + // Remove this wait once proper synchronization is ensured server-side + def apply(services: LedgerServices, party: String, minSynchronizers: Int)(implicit + ec: ExecutionContext + ): Future[Seq[String]] = + RetryStrategy + .exponentialBackoff(attempts = 20, 125.millis) { (_, _) => + services.state + .getConnectedSynchronizers(new GetConnectedSynchronizersRequest(party, "", "")) + .map(_.connectedSynchronizers.map(_.synchronizerId)) + .transform { + case Success(synchronizers) if synchronizers.sizeIs < minSynchronizers => + Failure( + new RuntimeException( + s"Not enough connected synchronizers when allocating party $party. Want $minSynchronizers, got ${synchronizers.size}" + ) + ) + case other => other + } + } + .recoverWith { case NonFatal(_) => + Future.successful(List("")) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Synchronize.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Synchronize.scala new file mode 100644 index 0000000000..cc191359a0 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Synchronize.scala @@ -0,0 +1,65 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.Party +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId} +import com.daml.ledger.test.java.model.test.Sync + +import scala.concurrent.{ExecutionContext, Future} + +object Synchronize { + + implicit val syncCompanion: ContractCompanion.WithoutKey[Sync.Contract, Sync.ContractId, Sync] = + Sync.COMPANION + + /** Creates a synchronization point between two participants by + * - ensuring that a party created on each participant is visible on the other one, + * - ensuring those parties are each connected to the same number of synchronizers, + * - ensuring that a contract created on each participant is visible on the other one. + * + * Useful to ensure two parties distributed across participants both view the updates happened + * _BEFORE_ the call to this method. + * + * This allows us to check that an earlier update which is not to be seen on either participant + * by parties distributed across them is actually not visible and not a byproduct of interleaved + * distributed calls. + */ + final def synchronize( + alpha: ParticipantTestContext, + beta: ParticipantTestContext, + connectedSynchronizers: Int, + )(implicit + ec: ExecutionContext + ): Future[Unit] = + for { + alice <- alpha.allocateParty() + bob <- beta.allocateParty() + _ <- alpha.waitForPartiesOnOtherParticipants(Set(beta), Set(alice), connectedSynchronizers) + _ <- beta.waitForPartiesOnOtherParticipants(Set(alpha), Set(bob), connectedSynchronizers) + _ <- alpha.create(alice, new Sync(alice, bob)).flatMap(waitForContract(beta, alice, _)) + _ <- beta.create(bob, new Sync(bob, alice)).flatMap(waitForContract(alpha, alice, _)) + } yield { + // Nothing to do, by flatmapping over this we know + // the two participants are synchronized up to the + // point before invoking this method + } + + final def waitForContract[TCid <: ContractId[?]]( + participant: ParticipantTestContext, + party: Party, + contractId: TCid, + )(implicit ec: ExecutionContext): Future[Unit] = + eventually("Wait for contract to become active") { + participant.activeContracts(Some(Seq(party))).map { events => + assert( + events.exists(_.contractId == contractId.contractId), + s"Could not find contract $contractId", + ) + } + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TestDar.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TestDar.scala new file mode 100644 index 0000000000..03077fe4aa --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TestDar.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +sealed trait TestDar { val path: String } + +object TestDar { + private val v21Dars = List( + ModelTestDar, + SemanticTestDar, + OngoingStreamPackageUploadTestDar, + PackageManagementTestDar, + Carbonv1TestDar, + Carbonv2TestDar, + ).map(_.path) + + private val v2devDars = + v21Dars ++ Seq(ExperimentalTestDar).map(_.path) + + val paths: Map[String, List[String]] = Map( + "2.1" -> v21Dars, + "2.dev" -> v2devDars, + ) +} + +case object ExperimentalTestDar extends TestDar { val path = "experimental-tests-3.1.0.dar" } +case object ModelTestDar extends TestDar { val path = "model-tests-3.1.0.dar" } +case object SemanticTestDar extends TestDar { val path = "semantic-tests-3.1.0.dar" } +case object OngoingStreamPackageUploadTestDar extends TestDar { + val path = "ongoing-stream-package-upload-tests-3.1.0.dar" +} +case object PackageManagementTestDar extends TestDar { + val path = "package-management-tests-3.1.0.dar" +} +case object Carbonv1TestDar extends TestDar { val path = "carbonv1-tests-3.1.0.dar" } +case object Carbonv2TestDar extends TestDar { val path = "carbonv2-tests-3.1.0.dar" } +case object UpgradeTestDar1_0_0 extends TestDar { val path = "upgrade-tests-1.0.0.dar" } +case object UpgradeTestDar2_0_0 extends TestDar { val path = "upgrade-tests-2.0.0.dar" } +case object UpgradeTestDar3_0_0 extends TestDar { val path = "upgrade-tests-3.0.0.dar" } +case object UpgradeFetchTestDar1_0_0 extends TestDar { val path = "upgrade-fetch-tests-1.0.0.dar" } +case object UpgradeFetchTestDar2_0_0 extends TestDar { val path = "upgrade-fetch-tests-2.0.0.dar" } +case object UpgradeIfaceDar extends TestDar { val path = "upgrade-iface-tests-3.1.0.dar" } + +case object VettingMainDar_1_0_0 extends TestDar { val path = "vetting-main-1.0.0.dar" } +case object VettingMainDar_2_0_0 extends TestDar { val path = "vetting-main-2.0.0.dar" } +case object VettingMainDar_Split_Lineage_2_0_0 extends TestDar { + val path = "vetting-main-split-lineage-2.0.0.dar" +} +case object VettingDepDar extends TestDar { val path = "vetting-dep-1.0.0.dar" } +case object VettingAltDar extends TestDar { val path = "vetting-alt-1.0.0.dar" } diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutException.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutException.scala new file mode 100644 index 0000000000..872ae30fc4 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutException.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import scala.util.control.NoStackTrace + +case object TimeoutException extends RuntimeException with NoStackTrace { + override val getMessage: String = s"Future could not be completed before timeout" +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutTask.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutTask.scala new file mode 100644 index 0000000000..272469f350 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TimeoutTask.scala @@ -0,0 +1,13 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import java.util.TimerTask +import scala.concurrent.Promise + +final class TimeoutTask[A](p: Promise[A]) extends TimerTask { + override def run(): Unit = { + val _ = p.tryFailure(TimeoutException) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionHelpers.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionHelpers.scala new file mode 100644 index 0000000000..c0c3169e01 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionHelpers.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.v2.event.{ArchivedEvent, CreatedEvent, Event, ExercisedEvent} +import com.daml.ledger.api.v2.transaction.Transaction + +object TransactionHelpers { + def archivedEvents(transaction: Transaction): Vector[ArchivedEvent] = + events(transaction).flatMap(_.event.archived.toList).toVector + + def createdEvents(transaction: Transaction): Vector[CreatedEvent] = + events(transaction).flatMap(_.event.created.toList).toVector + + def exercisedEvents(transaction: Transaction): Vector[ExercisedEvent] = + events(transaction).flatMap(_.event.exercised.toList).toVector + + private def events(transaction: Transaction): Iterator[Event] = + transaction.events.iterator +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionOps.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionOps.scala new file mode 100644 index 0000000000..cf90b8278b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/TransactionOps.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction.Transaction.toJavaProto +import com.daml.ledger.javaapi.data.Transaction as JavaTransaction + +import scala.jdk.CollectionConverters.* + +object TransactionOps { + + implicit class TransactionOps(val tx: Transaction) extends AnyVal { + + def rootNodeIds(): List[Int] = + JavaTransaction.fromProto(toJavaProto(tx)).getRootNodeIds.asScala.map(_.toInt).toList + + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/WithTimeout.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/WithTimeout.scala new file mode 100644 index 0000000000..71325ffbf7 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/WithTimeout.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure + +import java.util.Timer +import scala.concurrent.duration.Duration +import scala.concurrent.{Future, Promise} + +object WithTimeout { + + private[this] val timer = new Timer("timeout-timer", true) + + def apply[A](t: Duration)(f: => Future[A]): Future[A] = { + val p = Promise[A]() + timer.schedule(new TimeoutTask(p), t.toMillis) + p.completeWith(f).future + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/assertions/CommandDeduplicationAssertions.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/assertions/CommandDeduplicationAssertions.scala new file mode 100644 index 0000000000..7769da8285 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/assertions/CommandDeduplicationAssertions.scala @@ -0,0 +1,128 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.assertions + +import com.daml.ledger.api.testtool.infrastructure.Assertions.assertDefined +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{Party, WithTimeout} +import com.daml.ledger.api.v2.completion.Completion +import com.google.protobuf.duration.Duration as DurationProto +import io.grpc.Status.Code + +import java.time.Duration +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordering.Implicits.* + +object CommandDeduplicationAssertions { + + def assertDeduplicationDuration( + requestedDeduplicationDuration: DurationProto, + completion: Completion, + submittingParty: Party, + ledger: ParticipantTestContext, + )(implicit executionContext: ExecutionContext): Future[Unit] = { + val requestedDuration = DurationConversion.fromProto(requestedDeduplicationDuration) + val reportedOffset = assertDefined( + completion.deduplicationPeriod.deduplicationOffset, + "No deduplication offset has been reported", + ) + if (completion.getStatus.code == Code.ALREADY_EXISTS.value) { + assertReportedOffsetForDuplicateSubmission( + reportedOffset, + completion, + submittingParty, + ledger, + ) + } else { + assertReportedOffsetForAcceptedSubmission( + reportedOffset, + requestedDuration, + completion, + submittingParty, + ledger, + ) + } + } + + private def assertReportedOffsetForDuplicateSubmission( + reportedOffset: Long, + completion: Completion, + submittingParty: Party, + ledger: ParticipantTestContext, + )(implicit executionContext: ExecutionContext) = + WithTimeout(5.seconds)( + ledger.findCompletionAtOffset( + reportedOffset, + c => c.commandId == completion.commandId && c.getStatus.code == Code.OK.value, + )(submittingParty) + ).map { optAcceptedCompletion => + val acceptedCompletion = assertDefined( + optAcceptedCompletion, + s"No accepted completion with the command ID '${completion.commandId}' since the reported offset $reportedOffset has been found", + ) + assert( + acceptedCompletion.offset < completion.offset, + s"An accepted completion with the command ID '${completion.commandId}' at the offset ${acceptedCompletion.offset} that is not before the completion's offset ${completion.offset} has been found", + ) + } + + private def assertReportedOffsetForAcceptedSubmission( + reportedOffset: Long, + requestedDuration: Duration, + completion: Completion, + submittingParty: Party, + ledger: ParticipantTestContext, + )(implicit executionContext: ExecutionContext) = + WithTimeout(5.seconds)( + ledger.findCompletionAtOffset( + reportedOffset, + _.commandId == completion.commandId, + )(submittingParty) + ).map { optReportedOffsetCompletion => + val reportedOffsetCompletion = assertDefined( + optReportedOffsetCompletion, + s"No completion with the command ID '${completion.commandId}' since the reported offset $reportedOffset has been found", + ) + assert( + reportedOffsetCompletion.offset == reportedOffset, + s"No completion with the reported offset $reportedOffset has been found, the ${reportedOffsetCompletion.offset} offset has been found instead", + ) + val durationBetweenReportedDeduplicationOffsetAndCompletionRecordTimes = Duration + .between( + reportedOffsetCompletion.getSynchronizerTime.getRecordTime.asJavaInstant, + completion.getSynchronizerTime.getRecordTime.asJavaInstant, + ) + assert( + durationBetweenReportedDeduplicationOffsetAndCompletionRecordTimes >= requestedDuration, + s"The requested deduplication duration $requestedDuration was greater than the duration between the reported deduplication offset and completion record times ($durationBetweenReportedDeduplicationOffsetAndCompletionRecordTimes).", + ) + } + + def assertDeduplicationOffset( + requestedDeduplicationOffsetCompletion: Completion, + completion: Completion, + ): Unit = { + val reportedOffset = assertDefined( + completion.deduplicationPeriod.deduplicationOffset, + "No deduplication offset has been reported", + ) + val requestedDeduplicationOffset = + requestedDeduplicationOffsetCompletion.offset + assert( + reportedOffset <= requestedDeduplicationOffset, + s"The reported deduplication offset $reportedOffset was more recent than the requested deduplication offset $requestedDeduplicationOffset.", + ) + } + + object DurationConversion { + + def toProto(jDuration: Duration): DurationProto = + DurationProto(jDuration.getSeconds, jDuration.getNano) + + def fromProto(pDuration: DurationProto): Duration = + Duration.ofSeconds(pDuration.seconds, pDuration.nanos.toLong) + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/Features.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/Features.scala new file mode 100644 index 0000000000..3cca094efc --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/Features.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.v2.version_service.{ + GetLedgerApiVersionResponse, + OffsetCheckpointFeature, + PartyManagementFeature, + UserManagementFeature, +} + +final case class Features( + userManagement: UserManagementFeature, + partyManagement: PartyManagementFeature, + staticTime: Boolean, + offsetCheckpoint: OffsetCheckpointFeature, +) + +object Features { + def fromApiVersionResponse(response: GetLedgerApiVersionResponse): Features = { + val features = response.getFeatures + val experimental = features.getExperimental + + Features( + userManagement = features.getUserManagement, + partyManagement = features.getPartyManagement, + staticTime = experimental.getStaticTime.supported, + offsetCheckpoint = features.getOffsetCheckpoint, + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSession.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSession.scala new file mode 100644 index 0000000000..2462b2c752 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSession.scala @@ -0,0 +1,140 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.{ + ChannelEndpoint, + Errors, + LedgerServices, + PartyAllocationConfiguration, + RetryingGetConnectedSynchronizersForParty, +} +import com.daml.ledger.api.v2.admin.party_management_service.GetParticipantIdRequest +import com.daml.ledger.api.v2.state_service.GetLedgerEndRequest +import com.daml.ledger.api.v2.version_service.GetLedgerApiVersionRequest +import com.daml.timer.RetryStrategy +import io.grpc.ClientInterceptor +import org.slf4j.LoggerFactory + +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Failure +import scala.util.control.NonFatal + +/** Represents a running participant server exposing a set of services. + */ +private[infrastructure] final class ParticipantSession private ( + partyAllocationConfig: PartyAllocationConfiguration, + services: LedgerServices, + ledgerEndpoint: Either[JsonApiEndpoint, ChannelEndpoint], + adminEndpoint: ChannelEndpoint, + val features: Features, + timeoutScaleFactor: Double, +)(implicit val executionContext: ExecutionContext) { + + private[testtool] def createInitContext( + userId: String, + identifierSuffix: String, + features: Features, + ): Future[ParticipantTestContext] = + createTestContext( + "init", + userId, + identifierSuffix, + features = features, + ) + + private[testtool] def createTestContext( + endpointId: String, + userId: String, + identifierSuffix: String, + features: Features, + ): Future[ParticipantTestContext] = + for { + end <- services.state + .getLedgerEnd(new GetLedgerEndRequest()) + .map(_.offset) + participantId <- services.partyManagement + .getParticipantId(new GetParticipantIdRequest()) + .map(_.participantId) + } yield new TimeoutParticipantTestContext( + timeoutScaleFactor, + new SingleParticipantTestContext( + endpointId = endpointId, + userId = userId, + identifierSuffix = identifierSuffix, + referenceOffset = end, + services = services, + partyAllocationConfig = partyAllocationConfig, + ledgerEndpoint = ledgerEndpoint, + adminEndpoint = adminEndpoint, + features = features, + participantId = participantId, + ), + ) +} + +object ParticipantSession { + private val logger = LoggerFactory.getLogger(classOf[ParticipantSession]) + + def createSessions( + partyAllocationConfig: PartyAllocationConfiguration, + participantChannels: Either[Vector[JsonApiEndpoint], Vector[ChannelEndpoint]], + participantAdminChannels: Vector[ChannelEndpoint], + maxConnectionAttempts: Int, + commandInterceptors: Seq[ClientInterceptor], + timeoutScaleFactor: Double, + darList: List[String], + connectedSynchronizers: Int, + )(implicit + executionContext: ExecutionContext + ): Future[Vector[ParticipantSession]] = { + val participantChannelsS = participantChannels match { + case Left(value) => value.map(Left(_)) + case Right(value) => value.map(Right(_)) + } + Future.traverse(participantChannelsS.zip(participantAdminChannels)) { + case (endpoint: Either[JsonApiEndpoint, ChannelEndpoint], adminEndpoint) => + val services = LedgerServices(endpoint.map(_.channel), commandInterceptors, darList) + for { + features <- RetryStrategy + .exponentialBackoff(attempts = maxConnectionAttempts, 100.millis) { (attempt, wait) => + services.version + .getLedgerApiVersion(new GetLedgerApiVersionRequest()) + .map(Features.fromApiVersionResponse) + .andThen { case Failure(_) => + logger.info( + s"Could not connect to the participant (attempt #$attempt). Trying again in $wait..." + ) + } + } + .recoverWith { case NonFatal(exception) => + Future.failed(new Errors.ParticipantConnectionException(exception)) + } + // There's no Ledger API endpoint that allows querying the connected synchronizers of a participant + // Instead, use the participant's admin party + participantId <- services.partyManagement.getParticipantId(GetParticipantIdRequest()) + synchronizers <- RetryingGetConnectedSynchronizersForParty( + services, + participantId.participantId, + minSynchronizers = connectedSynchronizers, + ) + } yield { + if (synchronizers.sizeIs != connectedSynchronizers) + throw new RuntimeException( + s"Configured for $connectedSynchronizers connected synchronizers, but found ${synchronizers.size})" + ) + new ParticipantSession( + partyAllocationConfig = partyAllocationConfig, + services = services, + ledgerEndpoint = endpoint, + adminEndpoint = adminEndpoint, + features = features, + timeoutScaleFactor = timeoutScaleFactor, + ) + } + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSessionConfiguration.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSessionConfiguration.scala new file mode 100644 index 0000000000..7eecf58231 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantSessionConfiguration.scala @@ -0,0 +1,14 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.testtool.infrastructure.PartyAllocationConfiguration +import io.grpc.ManagedChannelBuilder + +import scala.language.existentials + +private[testtool] final case class ParticipantSessionConfiguration( + participant: ManagedChannelBuilder[_], + partyAllocation: PartyAllocationConfiguration, +) diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala new file mode 100644 index 0000000000..8a79bc3815 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala @@ -0,0 +1,611 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext.{ + IncludeInterfaceView, + topologyResultFilter, +} +import com.daml.ledger.api.testtool.infrastructure.time.DelayMechanism +import com.daml.ledger.api.testtool.infrastructure.{ChannelEndpoint, ExternalParty, Party} +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.package_management_service.{ + PackageDetails, + UpdateVettedPackagesRequest, + UpdateVettedPackagesResponse, + UploadDarFileRequest, + ValidateDarFileRequest, +} +import com.daml.ledger.api.v2.admin.participant_pruning_service.PruneResponse +import com.daml.ledger.api.v2.admin.party_management_service.* +import com.daml.ledger.api.v2.command_completion_service.{ + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.daml.ledger.api.v2.command_service.{ + SubmitAndWaitForTransactionRequest, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitRequest, + SubmitAndWaitResponse, +} +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.event_query_service.{ + GetEventsByContractIdRequest, + GetEventsByContractIdResponse, +} +import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ + ExecuteSubmissionAndWaitForTransactionRequest, + ExecuteSubmissionAndWaitForTransactionResponse, + ExecuteSubmissionAndWaitRequest, + ExecuteSubmissionAndWaitResponse, + ExecuteSubmissionRequest, + ExecuteSubmissionResponse, + GetPreferredPackageVersionResponse, + GetPreferredPackagesResponse, + PrepareSubmissionRequest, + PrepareSubmissionResponse, +} +import com.daml.ledger.api.v2.package_service.{ + GetPackageResponse, + ListVettedPackagesRequest, + ListVettedPackagesResponse, + PackageStatus, +} +import com.daml.ledger.api.v2.state_service.GetActiveContractsRequest +import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.{EventFormat, Filters, TransactionFormat} +import com.daml.ledger.api.v2.update_service.* +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId, Exercised, Update} +import com.daml.ledger.javaapi.data.{Command, Identifier, Template, Unit as UnitData, Value} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.api.TransactionShape +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.util.MonadUtil +import com.google.protobuf.ByteString +import io.grpc.health.v1.health.HealthCheckResponse + +import java.security.KeyPair +import java.time.Instant +import java.util.List as JList +import scala.concurrent.{ExecutionContext, Future} + +trait ParticipantTestContext extends UserManagementTestContext { + + val begin: Long = 0L + + val userId: String + val endpointId: String + def ledgerEndpoint: Either[JsonApiEndpoint, ChannelEndpoint] + def adminEndpoint: ChannelEndpoint + def features: Features + def referenceOffset: Long + def nextKeyId: () => String + def nextUserId: () => String + def nextIdentityProviderId: () => String + def nextPartyId: () => String + def delayMechanism: DelayMechanism + + /** Gets the absolute offset of the ledger end at a point in time. + */ + def currentEnd(): Future[Long] + + /** Returns an absolute offset (positive integer) that is beyond the current ledger end. + * + * Note: the offset might not be valid for the underlying ledger. This method can therefore only + * be used for offsets that are only interpreted by the ledger API server and not sent to the + * ledger. + */ + def offsetBeyondLedgerEnd(): Future[Long] + def time(): Future[Instant] + def setTime(currentTime: Instant, newTime: Instant): Future[Unit] + def listKnownPackages(): Future[Seq[PackageDetails]] + + def uploadDarFileAndVetOnConnectedSynchronizers(bytes: ByteString): Future[Unit] = for { + connected <- connectedSynchronizers() + _ <- MonadUtil.sequentialTraverse(connected)(synchronizerId => + uploadDarFile( + UploadDarFileRequest( + darFile = bytes, + submissionId = "", + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId = synchronizerId, + ) + ) + ) + } yield () + + def validateDarFile(bytes: ByteString): Future[Unit] = + validateDarFile(ValidateDarFileRequest(bytes, "", "")) + def validateDarFile(request: ValidateDarFileRequest): Future[Unit] + + def uploadDarRequest(bytes: ByteString, synchronizerId: String): UploadDarFileRequest + def uploadDarFile(request: UploadDarFileRequest): Future[Unit] + def getParticipantId(): Future[String] + def listPackages(): Future[Seq[String]] + def listVettedPackages(request: ListVettedPackagesRequest): Future[ListVettedPackagesResponse] + def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] + def getPackage(packageId: String): Future[GetPackageResponse] + def getPackageStatus(packageId: String): Future[PackageStatus] + def prepareSubmission( + prepareSubmissionRequest: PrepareSubmissionRequest + ): Future[PrepareSubmissionResponse] + def executeSubmission( + executeSubmissionRequest: ExecuteSubmissionRequest + ): Future[ExecuteSubmissionResponse] + def executeSubmissionAndWait( + executeSubmissionAndWaitRequest: ExecuteSubmissionAndWaitRequest + ): Future[ExecuteSubmissionAndWaitResponse] + def executeSubmissionAndWaitForTransaction( + executeSubmissionAndWaitForTransactionRequest: ExecuteSubmissionAndWaitForTransactionRequest + ): Future[ExecuteSubmissionAndWaitForTransactionResponse] + def getPreferredPackageVersion( + parties: Seq[Party], + packageName: String, + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackageVersionResponse] + def getPreferredPackages( + vettingRequirements: Map[String, Seq[Party]], + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackagesResponse] + + def connectedSynchronizers(): Future[Seq[String]] + + /** Managed version of party allocation, should be used anywhere a party has to be allocated + * unless the party management service itself is under test + */ + def allocateParty(): Future[Party] + def allocateExternalPartyFromHint( + partyIdHint: Option[String] = None, + minSynchronizers: Int = 1, + ): Future[ExternalParty] + + /** Non managed version of party allocation. Use exclusively when testing the party management + * service. + */ + def allocateParty( + partyIdHint: Option[String] = None, + localMetadata: Option[ObjectMeta] = None, + identityProviderId: Option[String] = None, + minSynchronizers: Option[Int] = None, + userId: String = "", + ): Future[Party] + + def allocateExternalPartyRequest( + keyPair: KeyPair, + partyIdHint: Option[String] = None, + synchronizer: String = "", + ): Future[AllocateExternalPartyRequest] + + def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] + + def allocateExternalParty( + request: AllocateExternalPartyRequest, + minSynchronizers: Option[Int] = None, + ): Future[Party] + + def allocateParty( + req: AllocatePartyRequest, + connectedSynchronizers: Int, + ): Future[(AllocatePartyResponse, Seq[String])] + def updatePartyDetails(req: UpdatePartyDetailsRequest): Future[UpdatePartyDetailsResponse] + def generateExternalPartyTopology( + req: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] + def updatePartyIdentityProviderId( + request: UpdatePartyIdentityProviderIdRequest + ): Future[UpdatePartyIdentityProviderIdResponse] + def allocateExternalParties( + partiesCount: Int, + minSynchronizers: Int, + ): Future[Vector[ExternalParty]] + def allocateParties(n: Int, minSynchronizers: Int): Future[Vector[Party]] + def getParties(req: GetPartiesRequest): Future[GetPartiesResponse] + def getParties(parties: Seq[Party]): Future[Seq[PartyDetails]] + + def listKnownPartiesExpanded(): Future[Set[Party]] + + def listKnownParties(req: ListKnownPartiesRequest): Future[ListKnownPartiesResponse] + + def listKnownParties(): Future[ListKnownPartiesResponse] + + /** @return + * a future that completes when all the participants can list all the expected parties + */ + def waitForPartiesOnOtherParticipants( + otherParticipants: Iterable[ParticipantTestContext], + expectedParties: Set[Party], + connectedSynchronizers: Int, + ): Future[Unit] + def activeContracts( + request: GetActiveContractsRequest + ): Future[Vector[CreatedEvent]] + def activeContractsIds( + request: GetActiveContractsRequest + ): Future[Vector[ContractId[Any]]] = + activeContracts(request).map { case createEvents: Seq[CreatedEvent] => + createEvents.map(c => new ContractId[Any](c.contractId)) + } + + def activeContractsRequest( + parties: Option[Seq[Party]], + activeAtOffset: Long, + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + verbose: Boolean = true, + ): GetActiveContractsRequest + def activeContracts( + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long] = None, + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] + def activeContractsByTemplateId( + templateIds: Seq[Identifier], + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long] = None, + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] + + /** Create an EventFormat with a set of Party objects. + * + * You should use this only when you need to tweak the request of + * [[transactions(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdatesRequest):*]], + * otherwise use the shortcut override that allows you to directly pass a set of Party + */ + def eventFormat( + verbose: Boolean, + parties: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + ): EventFormat + + /** Create a TransactionFormat with a set of Party objects. + * + * You should use this only when you need to tweak the request of + * [[transactions(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdatesRequest):*]], + * otherwise use the shortcut override that allows you to directly pass a set of Party + */ + def transactionFormat( + parties: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + transactionShape: TransactionShape = AcsDelta, + verbose: Boolean = false, + ): TransactionFormat + + def filters( + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + ): Filters + + def updates( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] + + def updates( + take: Int, + request: GetUpdatesRequest, + resultFilter: GetUpdatesResponse => Boolean, + ): Future[Vector[GetUpdatesResponse.Update]] + + def updates( + within: NonNegativeFiniteDuration, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] + + def getTransactionsRequest( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + ): Future[GetUpdatesRequest] + + def getTransactionsRequestWithEnd( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + end: Option[Long], + ): GetUpdatesRequest + + def getUpdatesRequestWithEnd( + transactionFormatO: Option[TransactionFormat] = None, + reassignmentsFormatO: Option[EventFormat] = None, + topologyFilterO: Option[Seq[Party]] = None, + begin: Long = referenceOffset, + end: Option[Long] = None, + ): GetUpdatesRequest + + def transactionsByTemplateId( + templateId: Identifier, + parties: Option[Seq[Party]], + ): Future[Vector[Transaction]] + + /** Non-managed version of + * [[transactions(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdatesRequest):*]], use + * this only if you need to tweak the request (i.e. to test low-level details) + */ + def transactions(request: GetUpdatesRequest): Future[Vector[Transaction]] + + /** Managed version of + * [[transactions(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdatesRequest):*]], use + * this unless you need to tweak the request (i.e. to test low-level details) + */ + def transactions(transactionShape: TransactionShape, parties: Party*): Future[Vector[Transaction]] + + /** Non-managed version of + * [[transactions(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdatesRequest):*]], use + * this only if you need to tweak the request (i.e. to test low-level details) + */ + def transactions( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[Transaction]] + + def transactions( + take: Int, + transactionShape: TransactionShape, + parties: Party* + ): Future[Vector[Transaction]] + + /** Managed version of transactionTrees, use this unless you need to tweak the request (i.e. to + * test low-level details) + */ + def transactionTreeById(transactionId: String, parties: Party*): Future[Transaction] + + /** Non-managed version of + * [[updateById(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByIdRequest):*]], + * use this only if you need to tweak the request (i.e. to test low-level details) + */ + def updateById(request: GetUpdateByIdRequest): Future[GetUpdateResponse] + + /** Managed version of + * [[updateById(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByIdRequest):*]] for + * transactions, use this unless you need to tweak the request (i.e. to test low-level details) + */ + def transactionById( + updateId: String, + parties: Seq[Party], + transactionShape: TransactionShape = AcsDelta, + templateIds: Seq[Identifier] = Seq.empty, + ): Future[Transaction] + + /** Managed version of + * [[updateById(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByIdRequest):*]] for + * topology transactions, use this unless you need to tweak the request (i.e. to test low-level + * details). If the parties list is empty then no filtering is applied to the topology + * transactions. + */ + def topologyTransactionById( + updateId: String, + parties: Seq[Party], + ): Future[TopologyTransaction] + + /** Managed version of transactionTreeByOffset + */ + def transactionTreeByOffset(offset: Long, parties: Party*): Future[Transaction] + + /** Non-managed version of + * [[updateByOffset(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByOffsetRequest):*]], + * use this only if you need to tweak the request (i.e. to test low-level details) + */ + def updateByOffset(request: GetUpdateByOffsetRequest): Future[GetUpdateResponse] + + /** Managed version of + * [[updateByOffset(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByOffsetRequest):*]] + * for transactions, use this unless you need to tweak the request (i.e. to test low-level + * details) + */ + def transactionByOffset( + offset: Long, + parties: Seq[Party], + transactionShape: TransactionShape, + templateIds: Seq[Identifier] = Seq.empty, + ): Future[Transaction] + + /** Managed version of + * [[updateByOffset(request:com\.daml\.ledger\.api\.v2\.update_service\.GetUpdateByOffsetRequest):*]] + * for topology transactions, use this unless you need to tweak the request (i.e. to test + * low-level details). If the parties list is empty then no filtering is applied to the topology + * transactions. + */ + def topologyTransactionByOffset( + offset: Long, + parties: Seq[Party], + ): Future[TopologyTransaction] + + def getEventsByContractId( + request: GetEventsByContractIdRequest + ): Future[GetEventsByContractIdResponse] + + def create[ + TCid <: ContractId[T], + T <: Template, + ]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] + def create[TCid <: ContractId[T], T <: Template]( + actAs: List[Party], + readAs: List[Party], + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] + def createAndGetTransactionId[TCid <: ContractId[T], T <: Template]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[(String, TCid)] + def exercise[T]( + party: Party, + exercise: Update[T], + transactionShape: TransactionShape = LedgerEffects, + verbose: Boolean = true, + ): Future[Transaction] + def exercise[T]( + actAs: List[Party], + readAs: List[Party], + exercise: Update[T], + ): Future[Transaction] + def exerciseAndGetContract[TCid <: ContractId[T], T]( + party: Party, + exercise: Update[Exercised[TCid]], + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] + def exerciseAndGetContractNoDisclose[TCid <: ContractId[?]]( + party: Party, + exercise: Update[Exercised[UnitData]], + )(implicit companion: ContractCompanion[?, TCid, ?]): Future[TCid] + def exerciseByKey( + party: Party, + template: Identifier, + key: Value, + choice: String, + argument: Value, + ): Future[Transaction] + def submitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitRequest + def submitRequest(party: Party, commands: JList[Command] = JList.of()): SubmitRequest + def submitAndWaitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitAndWaitRequest + def submitAndWaitForTransactionRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + transactionShape: TransactionShape, + ): SubmitAndWaitForTransactionRequest + def submitAndWaitRequest(party: Party, commands: JList[Command]): SubmitAndWaitRequest + def prepareSubmissionRequest(party: Party, commands: JList[Command]): PrepareSubmissionRequest + def executeSubmissionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionRequest + def executeSubmissionAndWaitRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionAndWaitRequest + def executeSubmissionAndWaitForTransactionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + transactionFormat: Option[TransactionFormat], + ): ExecuteSubmissionAndWaitForTransactionRequest + def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + ): SubmitAndWaitForTransactionRequest + def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + transactionShape: TransactionShape, + filterParties: Option[Seq[Party]] = None, + templateIds: Seq[Identifier] = Seq.empty, + verbose: Boolean = true, + ): SubmitAndWaitForTransactionRequest + def submit(request: SubmitRequest): Future[Unit] + def submitAndWait(request: SubmitAndWaitRequest): Future[SubmitAndWaitResponse] + def submitAndWaitForTransaction( + request: SubmitAndWaitForTransactionRequest + ): Future[SubmitAndWaitForTransactionResponse] + def submitRequestAndTolerateGrpcError[T]( + errorCode: ErrorCode, + submitAndWaitGeneric: ParticipantTestContext => Future[T], + ): Future[T] + def completions( + within: NonNegativeFiniteDuration, + request: CompletionStreamRequest, + ): Future[Vector[CompletionStreamResponse.CompletionResponse]] + + def completionStreamRequest(from: Long = referenceOffset)( + parties: Party* + ): CompletionStreamRequest + def firstCompletions(request: CompletionStreamRequest): Future[Vector[Completion]] + def firstCompletions(parties: Party*): Future[Vector[Completion]] + def findCompletionAtOffset( + offset: Long, + p: Completion => Boolean, + )(parties: Party*): Future[Option[Completion]] + def findCompletion( + request: CompletionStreamRequest + )(p: Completion => Boolean): Future[Option[Completion]] + def findCompletion(parties: Party*)( + p: Completion => Boolean + ): Future[Option[Completion]] + def offsets(n: Int, request: CompletionStreamRequest): Future[Vector[Long]] + def checkHealth(): Future[HealthCheckResponse] + def watchHealth(): Future[Seq[HealthCheckResponse]] + + private[infrastructure] def preallocateParties( + n: Int, + participants: Iterable[ParticipantTestContext], + connectedSynchronizers: Int, + ): Future[Vector[Party]] + + def getConnectedSynchronizers( + party: Option[Party], + participantId: Option[String], + identityProviderId: Option[String] = None, + ): Future[Set[String]] + + def prune( + pruneUpTo: Long, + attempts: Int = 10, + ): Future[PruneResponse] + + /** We are retrying a command submission + pruning to get a safe-to-prune offset for Canton. + * That's because in Canton pruning will fail unless ACS commitments have been exchanged between + * participants. To this end, repeatedly submitting commands is prompting Canton to exchange ACS + * commitments and allows the pruning call to eventually succeed. + */ + def pruneCantonSafe( + pruneUpTo: Long, + party: Party, + dummyCommand: Party => JList[Command], + )(implicit ec: ExecutionContext): Future[Unit] + + def latestPrunedOffsets(): Future[(Long, Long)] + def maxOffsetCheckpointEmissionDelay: NonNegativeFiniteDuration + + def participantAuthorizationTransaction( + partyIdSubstring: String, + begin: Option[Long] = None, + end: Option[Long] = None, + ): Future[TopologyTransaction] = updates( + 1, + getUpdatesRequestWithEnd( + topologyFilterO = Some(Nil), + begin = begin.getOrElse(referenceOffset), + end = end, + ), + topologyResultFilter(partyIdSubstring), + ).map( + _.headOption + .flatMap(_.topologyTransaction) + .getOrElse(throw new RuntimeException("at least one transaction should have been found")) + ) +} + +object ParticipantTestContext { + type IncludeInterfaceView = Boolean + + def topologyResultFilter(partyIdSubstring: String)( + response: GetUpdatesResponse + ): Boolean = + response.update.isTopologyTransaction && response.getTopologyTransaction.events + // filtering for topology transaction about the party (for any participants, for any synchronizers) + .exists( + _.getParticipantAuthorizationAdded.partyId.contains(partyIdSubstring) + ) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala new file mode 100644 index 0000000000..2e310968b2 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala @@ -0,0 +1,1491 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.grpc.test.StreamConsumer +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.ProtobufConverters.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext.IncludeInterfaceView +import com.daml.ledger.api.testtool.infrastructure.time.{ + DelayMechanism, + StaticTimeDelayMechanism, + TimeDelayMechanism, +} +import com.daml.ledger.api.testtool.infrastructure.{ + ChannelEndpoint, + ExternalParty, + FutureAssertions, + Identification, + LedgerServices, + Party, + PartyAllocationConfiguration, + RetryingGetConnectedSynchronizersForParty, +} +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.package_management_service.{ + ListKnownPackagesRequest, + PackageDetails, + UpdateVettedPackagesRequest, + UpdateVettedPackagesResponse, + UploadDarFileRequest, + ValidateDarFileRequest, +} +import com.daml.ledger.api.v2.admin.participant_pruning_service.{PruneRequest, PruneResponse} +import com.daml.ledger.api.v2.admin.party_management_service.* +import com.daml.ledger.api.v2.command_completion_service.{ + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.daml.ledger.api.v2.command_service.{ + SubmitAndWaitForTransactionRequest, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitRequest, + SubmitAndWaitResponse, +} +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.commands.{Command as ApiCommand, Commands} +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.event.Event.Event.Created +import com.daml.ledger.api.v2.event.{CreatedEvent, Event} +import com.daml.ledger.api.v2.event_query_service.{ + GetEventsByContractIdRequest, + GetEventsByContractIdResponse, +} +import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ + ExecuteSubmissionAndWaitForTransactionRequest, + ExecuteSubmissionAndWaitForTransactionResponse, + ExecuteSubmissionAndWaitRequest, + ExecuteSubmissionAndWaitResponse, + ExecuteSubmissionRequest, + ExecuteSubmissionResponse, + GetPreferredPackageVersionRequest, + GetPreferredPackageVersionResponse, + GetPreferredPackagesRequest, + GetPreferredPackagesResponse, + HashingSchemeVersion, + PackageVettingRequirement, + PartySignatures, + PrepareSubmissionRequest, + PrepareSubmissionResponse, + SinglePartySignatures, +} +import com.daml.ledger.api.v2.package_service.* +import com.daml.ledger.api.v2.state_service.* +import com.daml.ledger.api.v2.testing.time_service.{GetTimeRequest, SetTimeRequest} +import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.* +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.update_service.* +import com.daml.ledger.api.v2.{crypto as lapicrypto, value as v1} +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId, Exercised, Update} +import com.daml.ledger.javaapi.data.{ + Command, + ExerciseByKeyCommand, + Identifier, + Template, + Unit as UnitData, + Value, +} +import com.daml.logging.{ContextualizedLogger, LoggingContext} +import com.daml.timer.Delayed +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.api.TransactionShape +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects, toProto} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.{PartyId, UniqueIdentifier} +import com.digitalasset.canton.util.{MonadUtil, OptionUtil} +import com.google.protobuf.ByteString +import io.grpc.StatusRuntimeException +import io.grpc.health.v1.health.{HealthCheckRequest, HealthCheckResponse} +import io.grpc.protobuf.StatusProto +import io.grpc.stub.StreamObserver +import io.scalaland.chimney.dsl.* + +import java.security.{KeyPair, KeyPairGenerator, Signature} +import java.time.{Clock, Instant} +import java.util.List as JList +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* +import scala.util.control.NonFatal +import scala.util.{Failure, Success} + +/** Exposes services running on some participant server in a test case. + * + * Each time a test case is run it receives a fresh instance of [[SingleParticipantTestContext]] + * (one for every used participant server). + */ +final class SingleParticipantTestContext private[participant] ( + val endpointId: String, + val userId: String, + identifierSuffix: String, + val referenceOffset: Long, + protected[participant] val services: LedgerServices, + partyAllocationConfig: PartyAllocationConfiguration, + val ledgerEndpoint: Either[JsonApiEndpoint, ChannelEndpoint], + val adminEndpoint: ChannelEndpoint, + val features: Features, + val participantId: String, +)(protected[participant] implicit val ec: ExecutionContext) + extends ParticipantTestContext { + private val logger = ContextualizedLogger.get(getClass) + + private[this] val identifierPrefix = + s"$userId-$endpointId-$identifierSuffix" + + private[this] def nextIdGenerator(name: String, lowerCase: Boolean = false): () => String = { + val f = Identification.indexSuffix(s"$identifierPrefix-$name") + if (lowerCase) + () => f().toLowerCase + else + f + } + + private[this] val nextPartyHintId: () => String = nextIdGenerator("party") + private[this] val nextCommandId: () => String = nextIdGenerator("command") + private[this] val nextSubmissionId: () => String = nextIdGenerator("submission") + private[this] val workflowId: String = s"$userId-$identifierSuffix" + override val nextKeyId: () => String = nextIdGenerator("key") + override val nextUserId: () => String = nextIdGenerator("user", lowerCase = true) + override val nextPartyId: () => String = nextIdGenerator("party", lowerCase = true) + override val nextIdentityProviderId: () => String = nextIdGenerator("idp", lowerCase = true) + + override lazy val delayMechanism: DelayMechanism = if (features.staticTime) { + new StaticTimeDelayMechanism(this) + } else + new TimeDelayMechanism() + + override def toString: String = s"participant $endpointId" + + override def currentEnd(): Future[Long] = + services.state + .getLedgerEnd(new GetLedgerEndRequest()) + .map(_.offset) + + override def latestPrunedOffsets(): Future[(Long, Long)] = + services.state + .getLatestPrunedOffsets(GetLatestPrunedOffsetsRequest()) + .map(response => + response.participantPrunedUpToInclusive -> response.allDivulgedContractsPrunedUpToInclusive + ) + + override def offsetBeyondLedgerEnd(): Future[Long] = + currentEnd().map(_ + 1000000L) + + override def time(): Future[Instant] = + services.time + .getTime(new GetTimeRequest()) + .map(_.getCurrentTime.asJava) + .recover { case NonFatal(_) => + Clock.systemUTC().instant() + } + + override def setTime(currentTime: Instant, newTime: Instant): Future[Unit] = + services.time + .setTime( + SetTimeRequest( + currentTime = Some(currentTime.asProtobuf), + newTime = Some(newTime.asProtobuf), + ) + ) + .map(_ => ()) + + override def listKnownPackages(): Future[Seq[PackageDetails]] = + services.packageManagement + .listKnownPackages(new ListKnownPackagesRequest) + .map(_.packageDetails) + + override def validateDarFile(request: ValidateDarFileRequest): Future[Unit] = + services.packageManagement.validateDarFile(request).map(_ => ()) + + override def uploadDarRequest(bytes: ByteString, synchronizerId: String): UploadDarFileRequest = + new UploadDarFileRequest( + bytes, + nextSubmissionId(), + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId, + ) + + override def uploadDarFile(request: UploadDarFileRequest): Future[Unit] = + services.packageManagement + .uploadDarFile(request) + .map(_ => ()) + + override def listVettedPackages( + request: ListVettedPackagesRequest + ): Future[ListVettedPackagesResponse] = + services.packages + .listVettedPackages(request) + + override def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] = + services.packageManagement + .updateVettedPackages(request) + + override def getParticipantId(): Future[String] = + services.partyManagement + .getParticipantId(new GetParticipantIdRequest) + .map(_.participantId) + + override def listPackages(): Future[Seq[String]] = + services.packages + .listPackages(new ListPackagesRequest()) + .map(_.packageIds) + + override def getPackage(packageId: String): Future[GetPackageResponse] = + services.packages.getPackage(new GetPackageRequest(packageId)) + + override def getPackageStatus(packageId: String): Future[PackageStatus] = + services.packages + .getPackageStatus(new GetPackageStatusRequest(packageId)) + .map(_.packageStatus) + + override def prepareSubmission( + prepareSubmissionRequest: PrepareSubmissionRequest + ): Future[PrepareSubmissionResponse] = + services.interactiveSubmission.prepareSubmission(prepareSubmissionRequest) + + def executeSubmission( + executeSubmissionRequest: ExecuteSubmissionRequest + ): Future[ExecuteSubmissionResponse] = + services.interactiveSubmission.executeSubmission(executeSubmissionRequest) + + def executeSubmissionAndWait( + executeSubmissionAndWaitRequest: ExecuteSubmissionAndWaitRequest + ): Future[ExecuteSubmissionAndWaitResponse] = + services.interactiveSubmission.executeSubmissionAndWait(executeSubmissionAndWaitRequest) + + def executeSubmissionAndWaitForTransaction( + executeSubmissionAndWaitForTransactionRequest: ExecuteSubmissionAndWaitForTransactionRequest + ): Future[ExecuteSubmissionAndWaitForTransactionResponse] = + services.interactiveSubmission.executeSubmissionAndWaitForTransaction( + executeSubmissionAndWaitForTransactionRequest + ) + + override def getPreferredPackageVersion( + parties: Seq[Party], + packageName: String, + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackageVersionResponse] = + services.interactiveSubmission.getPreferredPackageVersion( + new GetPreferredPackageVersionRequest( + parties = parties.map(_.getValue), + packageName = packageName, + synchronizerId = synchronizerIdO.getOrElse(""), + vettingValidAt = vettingValidAt.map(_.asProtobuf), + ) + ) + + override def getPreferredPackages( + vettingRequirements: Map[String, Seq[Party]], + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackagesResponse] = + services.interactiveSubmission.getPreferredPackages( + new GetPreferredPackagesRequest( + packageVettingRequirements = vettingRequirements.view.map { case (packageName, parties) => + PackageVettingRequirement(parties = parties.map(_.getValue), packageName = packageName) + }.toSeq, + synchronizerId = synchronizerIdO.getOrElse(""), + vettingValidAt = vettingValidAt.map(_.asProtobuf), + ) + ) + + override def allocateParty(): Future[Party] = + allocateParty(partyIdHint = Some(nextPartyHintId())) + + override def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] = + for { + syncIds <- getConnectedSynchronizers(None, None) + syncId = syncIds.headOption.getOrElse(throw new Exception("No synchronizer connected")) + onboardingTransactions <- generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId, + partyHint = partyIdHint.getOrElse(nextPartyHintId()), + publicKey = Some( + lapicrypto.SigningPublicKey( + format = + lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + keyData = ByteString.copyFrom(namespacePublicKey), + keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, + ) + ), + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + } yield onboardingTransactions + + override def allocateExternalPartyRequest( + keyPair: KeyPair, + partyIdHint: Option[String] = None, + synchronizer: String = "", + ): Future[AllocateExternalPartyRequest] = { + val signing = Signature.getInstance("Ed25519") + signing.initSign(keyPair.getPrivate) + for { + onboardingTransactions <- generateExternalPartyTopologyRequest( + keyPair.getPublic.getEncoded, + partyIdHint, + ) + } yield { + signing.update(onboardingTransactions.multiHash.toByteArray) + AllocateExternalPartyRequest( + synchronizer = synchronizer, + onboardingTransactions = onboardingTransactions.topologyTransactions.map { transaction => + AllocateExternalPartyRequest.SignedTransaction( + transaction, + Seq.empty, + ) + }, + multiHashSignatures = Seq( + lapicrypto.Signature( + format = lapicrypto.SignatureFormat.SIGNATURE_FORMAT_RAW, + signature = ByteString.copyFrom(signing.sign()), + signedBy = onboardingTransactions.publicKeyFingerprint, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) + ), + identityProviderId = "", + ) + } + } + + override def allocateExternalParty( + req: AllocateExternalPartyRequest, + minSynchronizers: Option[Int] = None, + ): Future[Party] = + for { + result <- services.partyManagement.allocateExternalParty(req) + synchronizerIds <- RetryingGetConnectedSynchronizersForParty( + services, + result.partyId, + minSynchronizers.getOrElse(1), + ) + } yield Party(result.partyId, synchronizerIds.toList) + + override def allocateExternalPartyFromHint( + partyIdHint: Option[String], + minSynchronizers: Int, + ): Future[ExternalParty] = { + val keyGen = KeyPairGenerator.getInstance("Ed25519") + val keyPair = keyGen.generateKeyPair() + for { + connectedSynchronizerIds <- connectedSynchronizers() + result <- MonadUtil.foldLeftM[Future, Option[AllocateExternalPartyResponse], String]( + None, + connectedSynchronizerIds, + ) { case (previousResponse, synchronizerId) => + val previouslyAllocatedPartyIdHint = + previousResponse + .map(_.partyId) + .map(PartyId.tryFromProtoPrimitive) + .map(_.identifier.unwrap) + allocateExternalPartyRequest( + keyPair, + partyIdHint + // otherwise use the dynamically generated party id as the party id hint, to allocate + // the same party across all synchronizers + .orElse(previouslyAllocatedPartyIdHint), + synchronizer = synchronizerId, + ).flatMap(services.partyManagement.allocateExternalParty) + .map(Some(_)) + } + } yield Party.external( + result.head.partyId, + UniqueIdentifier.tryFromProtoPrimitive(result.head.partyId).fingerprint, + keyPair, + connectedSynchronizerIds.toList, + ) + } + + override def allocateParty( + partyIdHint: Option[String] = None, + localMetadata: Option[ObjectMeta] = None, + identityProviderId: Option[String] = None, + minSynchronizers: Option[Int] = None, + userId: String = "", + ): Future[Party] = + for { + (result, synchronizerIds) <- allocateParty( + new AllocatePartyRequest( + partyIdHint = partyIdHint.getOrElse(""), + localMetadata = localMetadata, + identityProviderId = identityProviderId.getOrElse(""), + synchronizerId = None.getOrElse(""), + userId = userId, + ), + minSynchronizers.getOrElse(1), + ) + } yield Party(result.partyDetails.get.party, synchronizerIds.toList) + + override def allocateParty( + req: AllocatePartyRequest, + minSynchronizers: Int, + ): Future[(AllocatePartyResponse, Seq[String])] = + for { + connectedSynchronizerIds <- connectedSynchronizers() + result <- MonadUtil.foldLeftM[Future, Option[AllocatePartyResponse], String]( + None, + connectedSynchronizerIds, + ) { case (previousResponse, synchronizerId) => + val previouslyAllocatedPartyIdHint = + previousResponse + .flatMap(_.partyDetails) + .map(_.party) + .map(PartyId.tryFromProtoPrimitive) + .map(_.identifier.unwrap) + services.partyManagement + .allocateParty( + req.copy( + partyIdHint = OptionUtil + // if the original request contained a partyIdHint, use it + .emptyStringAsNone(req.partyIdHint) + // otherwise use the dynamically generated party id as the party id hint, to allocate + // the same party across all synchronizers + .orElse(previouslyAllocatedPartyIdHint) + .getOrElse(""), + synchronizerId = synchronizerId, + ) + ) + .map(Some(_)) + } + synchronizerIdsForParty <- RetryingGetConnectedSynchronizersForParty( + services, + result.head.partyDetails.get.party, + minSynchronizers, + ) + } yield (result.head, synchronizerIdsForParty) + + override def connectedSynchronizers(): Future[Seq[String]] = for { + participantId <- services.partyManagement.getParticipantId(GetParticipantIdRequest()) + synchronizerIds <- RetryingGetConnectedSynchronizersForParty( + services, + participantId.participantId, + 1, + ) + } yield synchronizerIds + + override def updatePartyDetails( + req: UpdatePartyDetailsRequest + ): Future[UpdatePartyDetailsResponse] = + services.partyManagement.updatePartyDetails(req) + + def updatePartyIdentityProviderId( + request: UpdatePartyIdentityProviderIdRequest + ): Future[UpdatePartyIdentityProviderIdResponse] = + services.partyManagement.updatePartyIdentityProviderId(request) + + override def allocateParties(partiesCount: Int, minSynchronizers: Int): Future[Vector[Party]] = + Future.sequence( + Vector.fill(partiesCount)( + allocateParty( + partyIdHint = Some(nextPartyHintId()), + localMetadata = None, + identityProviderId = None, + minSynchronizers = Some(minSynchronizers), + ) + ) + ) + + override def allocateExternalParties( + partiesCount: Int, + minSynchronizers: Int, + ): Future[Vector[ExternalParty]] = + Future.sequence( + Vector.fill(partiesCount)( + allocateExternalPartyFromHint( + partyIdHint = Some(nextPartyHintId()), + minSynchronizers, + ) + ) + ) + + override def getParties(req: GetPartiesRequest): Future[GetPartiesResponse] = + services.partyManagement.getParties(req) + + override def getParties(parties: Seq[Party]): Future[Seq[PartyDetails]] = + services.partyManagement + .getParties(GetPartiesRequest(parties.map(_.getValue), "")) + .map(_.partyDetails) + + override def listKnownPartiesExpanded(): Future[Set[Party]] = + services.partyManagement + .listKnownParties(ListKnownPartiesRequest("", 0, "")) + .map(_.partyDetails.map(partyDetails => Party(partyDetails.party)).toSet) + + override def listKnownParties(req: ListKnownPartiesRequest): Future[ListKnownPartiesResponse] = + services.partyManagement + .listKnownParties(req) + + override def listKnownParties(): Future[ListKnownPartiesResponse] = + services.partyManagement + .listKnownParties(new ListKnownPartiesRequest("", 0, "")) + + override def waitForPartiesOnOtherParticipants( + otherParticipants: Iterable[ParticipantTestContext], + expectedParties: Set[Party], + connectedSynchronizers: Int, + ): Future[Unit] = + if (partyAllocationConfig.waitForAllParticipants) { + val expectedPartyNames = expectedParties.map(_.underlying.getValue) + Future + .sequence(otherParticipants.filter(_.endpointId != endpointId).map { participant => + for { + _ <- eventually(s"Wait for parties on ${participant.endpointId}") { + participant + .listKnownPartiesExpanded() + .map { actualParties => + assert( + expectedPartyNames.subsetOf(actualParties.map(_.underlying.getValue)), + s"Parties from $endpointId never appeared on ${participant.endpointId}.", + ) + } + } + _ <- Future.sequence(expectedParties.map { party => + eventually( + s"Wait for synchronizers for ${party.underlying.getValue} on ${participant.endpointId}" + )( + participant + .getConnectedSynchronizers(Some(party), Some(participantId)) + .map(synchronizers => + assert( + synchronizers.sizeIs == connectedSynchronizers, + s"Expecting party ${party.underlying.getValue} created on $endpointId to be connected to $connectedSynchronizers synchronizers on ${participant.endpointId} but found ${synchronizers.size}", + ) + ) + ) + }) + } yield () + }) + .map(_ => ()) + } else + Future.unit + + override def generateExternalPartyTopology( + req: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] = + services.partyManagement.generateExternalPartyTopology(req) + + override def activeContracts( + request: GetActiveContractsRequest + ): Future[Vector[CreatedEvent]] = + for { + contracts <- new StreamConsumer[GetActiveContractsResponse]( + services.state.getActiveContracts(request, _) + ).all() + } yield contracts.flatMap(_.contractEntry.activeContract.flatMap(_.createdEvent)) + + override def activeContractsRequest( + parties: Option[Seq[Party]], + activeAtOffset: Long, + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + verbose: Boolean = true, + ): GetActiveContractsRequest = + GetActiveContractsRequest( + activeAtOffset = activeAtOffset, + eventFormat = Some(eventFormat(verbose, parties, templateIds, interfaceFilters)), + ) + + override def activeContracts( + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long] = None, + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] = + activeContractsByTemplateId(Seq.empty, parties, activeAtOffsetO, verbose) + + override def activeContractsByTemplateId( + templateIds: Seq[Identifier], + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long], + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] = + for { + activeAtOffset <- activeAtOffsetO match { + case None => currentEnd() + case Some(activeAt) => Future.successful(activeAt) + } + acs <- activeContracts( + activeContractsRequest( + parties, + activeAtOffset, + templateIds, + verbose = verbose, + ) + ) + } yield acs + + def eventFormat( + verbose: Boolean, + partiesO: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + ): EventFormat = { + lazy val fs = filters(templateIds, interfaceFilters) + partiesO match { + case None => + EventFormat( + filtersByParty = Map.empty, + filtersForAnyParty = Some(fs), + verbose = verbose, + ) + case Some(parties) => + EventFormat( + filtersByParty = parties.map(party => party.getValue -> fs).toMap, + filtersForAnyParty = None, + verbose = verbose, + ) + } + } + + override def transactionFormat( + parties: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + transactionShape: TransactionShape = AcsDelta, + verbose: Boolean = false, + ): TransactionFormat = + TransactionFormat( + eventFormat = Some( + eventFormat( + verbose = verbose, + partiesO = parties, + templateIds = templateIds, + interfaceFilters = interfaceFilters, + ) + ), + transactionShape = toProto(transactionShape), + ) + + override def filters( + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + ): Filters = Filters( + if (templateIds.isEmpty && interfaceFilters.isEmpty) + Seq( + CumulativeFilter.defaultInstance.withWildcardFilter( + WildcardFilter(includeCreatedEventBlob = false) + ) + ) + else + interfaceFilters + .map { case (id, includeInterfaceView) => + CumulativeFilter( + IdentifierFilter.InterfaceFilter( + InterfaceFilter( + interfaceId = Some(v1.Identifier.fromJavaProto(id.toProto)), + includeInterfaceView = includeInterfaceView, + includeCreatedEventBlob = false, + ) + ) + ) + } + ++ + templateIds + .map(tid => + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + templateId = Some(v1.Identifier.fromJavaProto(tid.toProto)), + includeCreatedEventBlob = false, + ) + ) + ) + ) + ) + + override def getTransactionsRequest( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + ): Future[GetUpdatesRequest] = currentEnd().map { end => + getTransactionsRequestWithEnd( + transactionFormat = transactionFormat, + begin = begin, + end = Some(end), + ) + } + + override def getTransactionsRequestWithEnd( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + end: Option[Long], + ): GetUpdatesRequest = getUpdatesRequestWithEnd( + transactionFormatO = Some(transactionFormat), + begin = begin, + end = end, + ) + + override def getUpdatesRequestWithEnd( + transactionFormatO: Option[TransactionFormat] = None, + reassignmentsFormatO: Option[EventFormat] = None, + topologyFilterO: Option[Seq[Party]] = None, + begin: Long = referenceOffset, + end: Option[Long] = None, + ): GetUpdatesRequest = { + + val includeTopologyTransactions: Option[TopologyFormat] = + topologyFilterO.map(parties => + TopologyFormat( + Some( + ParticipantAuthorizationTopologyFormat( + parties.map(_.underlying.getValue) + ) + ) + ) + ) + + GetUpdatesRequest( + beginExclusive = begin, + endInclusive = end, + updateFormat = Some( + UpdateFormat( + includeTransactions = transactionFormatO, + includeReassignments = reassignmentsFormatO, + includeTopologyEvents = includeTopologyTransactions, + ) + ), + ) + } + + private def transactions( + n: Int, + request: GetUpdatesRequest, + service: (GetUpdatesRequest, StreamObserver[GetUpdatesResponse]) => Unit, + ): Future[Vector[GetUpdatesResponse]] = + new StreamConsumer[GetUpdatesResponse](service(request, _)) + .filterTake(_.update.isTransaction)(n) + + private def updates[Res]( + n: Int, + request: GetUpdatesRequest, + service: (GetUpdatesRequest, StreamObserver[Res]) => Unit, + resultFilter: Res => Boolean, + ): Future[Vector[Res]] = + new StreamConsumer[Res](service(request, _)).filterTake(resultFilter)(n) + + private def transactions[Res]( + request: GetUpdatesRequest, + service: (GetUpdatesRequest, StreamObserver[Res]) => Unit, + ): Future[Vector[Res]] = + new StreamConsumer[Res](service(request, _)).all() + + override def transactionsByTemplateId( + templateId: Identifier, + parties: Option[Seq[Party]], + ): Future[Vector[Transaction]] = + getTransactionsRequest(transactionFormat(parties, Seq(templateId))) + .flatMap(transactions) + + override def transactions( + request: GetUpdatesRequest + ): Future[Vector[Transaction]] = + transactions(request, services.update.getUpdates) + .map(_.flatMap(_.update.transaction)) + + override def transactions( + transactionShape: TransactionShape, + parties: Party* + ): Future[Vector[Transaction]] = + getTransactionsRequest( + transactionFormat = + transactionFormat(Some(parties), transactionShape = transactionShape, verbose = true) + ).flatMap(transactions) + + override def transactions( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[Transaction]] = + transactions(take, request, services.update.getUpdates) + .map(_.flatMap(_.update.transaction)) + + override def updates( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] = + updates(take, request, services.update.getUpdates, (_: GetUpdatesResponse) => true) + .map(_.map(_.update)) + + override def updates( + take: Int, + request: GetUpdatesRequest, + resultFilter: GetUpdatesResponse => Boolean, + ): Future[Vector[GetUpdatesResponse.Update]] = + updates(take, request, services.update.getUpdates, resultFilter) + .map(_.map(_.update)) + + override def updates( + within: NonNegativeFiniteDuration, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] = + new StreamConsumer(services.update.getUpdates(request, _)) + .within(within.toScala) + .map(_.map(_.update)) + + override def transactions( + take: Int, + transactionShape: TransactionShape, + parties: Party* + ): Future[Vector[Transaction]] = + getTransactionsRequest(transactionFormat(Some(parties), transactionShape = transactionShape)) + .flatMap(txReq => transactions(take, txReq)) + + override def transactionTreeById( + transactionId: String, + parties: Party* + ): Future[Transaction] = { + val partiesList = parties.toList.headOption.map(_ => parties.toList) + services.update + .getUpdateById( + GetUpdateByIdRequest( + updateId = transactionId, + updateFormat = Some( + UpdateFormat( + includeTransactions = + Some(transactionFormat(parties = partiesList, transactionShape = LedgerEffects)), + includeReassignments = None, + includeTopologyEvents = None, + ) + ), + ) + ) + .map(_.getTransaction) + } + + override def updateById(request: GetUpdateByIdRequest): Future[GetUpdateResponse] = + services.update.getUpdateById(request) + + override def transactionById( + updateId: String, + parties: Seq[Party], + transactionShape: TransactionShape = AcsDelta, + templateIds: Seq[Identifier] = Seq.empty, + ): Future[Transaction] = + updateById( + GetUpdateByIdRequest( + updateId = updateId, + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + transactionFormat( + parties = Some(parties), + templateIds = templateIds, + transactionShape = transactionShape, + verbose = true, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + ), + ) + ).map(_.getTransaction) + + override def topologyTransactionById( + updateId: String, + parties: Seq[Party], + ): Future[TopologyTransaction] = + updateById( + GetUpdateByIdRequest( + updateId = updateId, + updateFormat = Some( + UpdateFormat( + includeTransactions = None, + includeReassignments = None, + includeTopologyEvents = Some( + TopologyFormat(includeParticipantAuthorizationEvents = + Some(ParticipantAuthorizationTopologyFormat(parties.map(_.getValue))) + ) + ), + ) + ), + ) + ).map(_.getTopologyTransaction) + + override def transactionTreeByOffset(offset: Long, parties: Party*): Future[Transaction] = { + val partiesList = parties.toList.headOption.map(_ => parties.toList) + services.update + .getUpdateByOffset( + GetUpdateByOffsetRequest( + offset = offset, + updateFormat = Some( + UpdateFormat( + includeTransactions = + Some(transactionFormat(parties = partiesList, transactionShape = LedgerEffects)), + includeReassignments = None, + includeTopologyEvents = None, + ) + ), + ) + ) + .map(_.getTransaction) + } + override def updateByOffset( + request: GetUpdateByOffsetRequest + ): Future[GetUpdateResponse] = + services.update + .getUpdateByOffset(request) + + override def transactionByOffset( + offset: Long, + parties: Seq[Party], + transactionShape: TransactionShape, + templateIds: Seq[Identifier] = Seq.empty, + ): Future[Transaction] = + updateByOffset( + GetUpdateByOffsetRequest( + offset = offset, + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + transactionFormat( + parties = Some(parties), + templateIds = templateIds, + transactionShape = transactionShape, + verbose = true, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + ), + ) + ).map(_.getTransaction) + + override def topologyTransactionByOffset( + offset: Long, + parties: Seq[Party], + ): Future[TopologyTransaction] = + updateByOffset( + GetUpdateByOffsetRequest( + offset = offset, + updateFormat = Some( + UpdateFormat( + includeTransactions = None, + includeReassignments = None, + includeTopologyEvents = Some( + TopologyFormat(includeParticipantAuthorizationEvents = + Some(ParticipantAuthorizationTopologyFormat(parties.map(_.getValue))) + ) + ), + ) + ), + ) + ).map(_.getTopologyTransaction) + + private def extractContracts[TCid <: ContractId[?]](transaction: Transaction)(implicit + companion: ContractCompanion[?, TCid, ?] + ): Seq[TCid] = + transaction.events.collect { case Event(Created(e)) => + companion.toContractId(new ContractId(e.contractId)) + } + + override def getEventsByContractId( + request: GetEventsByContractIdRequest + ): Future[GetEventsByContractIdResponse] = + services.eventQuery.getEventsByContractId(request) + + override def create[ + TCid <: ContractId[T], + T <: Template, + ]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(party, template.create.commands, AcsDelta) + ) + .map(response => extractContracts(response.getTransaction).head) + + override def create[TCid <: ContractId[T], T <: Template]( + actAs: List[Party], + readAs: List[Party], + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(actAs, readAs, template.create.commands, AcsDelta) + ).map(response => extractContracts(response.getTransaction).head) + + override def createAndGetTransactionId[ + TCid <: ContractId[T], + T <: Template, + ]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[(String, TCid)] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(party, template.create.commands) + ) + .map(_.getTransaction) + .map(tx => + tx.updateId -> tx.events.collect { case Event(Created(e)) => + companion.toContractId(new ContractId(e.contractId)) + }.head + ) + + override def exercise[T]( + party: Party, + exercise: Update[T], + transactionShape: TransactionShape = LedgerEffects, + verbose: Boolean = true, + ): Future[Transaction] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest( + party, + exercise.commands, + transactionShape, + verbose = verbose, + ) + ).map(_.getTransaction) + + override def exercise[T]( + actAs: List[Party], + readAs: List[Party], + exercise: Update[T], + ): Future[Transaction] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest( + actAs, + readAs, + exercise.commands, + transactionShape = LedgerEffects, + ) + ).map(_.getTransaction) + + override def exerciseAndGetContract[TCid <: ContractId[T], T]( + party: Party, + exercise: Update[Exercised[TCid]], + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(party, exercise.commands) + ) + .map(_.getTransaction) + .map(t => extractContracts(t)(companion)) + .map(_.head) + + override def exerciseAndGetContractNoDisclose[TCid <: ContractId[?]]( + party: Party, + exercise: Update[Exercised[UnitData]], + )(implicit companion: ContractCompanion[?, TCid, ?]): Future[TCid] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(party, exercise.commands) + ) + .map(_.getTransaction) + .map(t => extractContracts(t)(companion)) + .map(_.head) + + override def exerciseByKey( + party: Party, + template: Identifier, + key: Value, + choice: String, + argument: Value, + ): Future[Transaction] = + submitAndWaitForTransaction( + submitAndWaitForTransactionRequest( + party, + JList.of( + new ExerciseByKeyCommand( + template, + key, + choice, + argument, + ) + ), + LedgerEffects, + ) + ).map(_.getTransaction) + + override def submitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitRequest = + new SubmitRequest( + Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = actAs.map(_.getValue), + readAs = readAs.map(_.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ) + ) + + override def submitRequest(party: Party, commands: JList[Command] = JList.of()): SubmitRequest = + new SubmitRequest( + Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = Seq(party.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ) + ) + + override def submitAndWaitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitAndWaitRequest = + new SubmitAndWaitRequest( + Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = actAs.map(_.getValue), + readAs = readAs.map(_.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ) + ) + + override def submitAndWaitForTransactionRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + transactionShape: TransactionShape, + ): SubmitAndWaitForTransactionRequest = + new SubmitAndWaitForTransactionRequest( + commands = Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = actAs.map(_.getValue), + readAs = readAs.map(_.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ), + transactionFormat = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = actAs.map(_.getValue -> Filters(Nil)).toMap, + filtersForAnyParty = None, + verbose = true, + ) + ), + transactionShape = toProto(transactionShape), + ) + ), + ) + + override def prepareSubmissionRequest( + party: Party, + commands: JList[Command], + ): PrepareSubmissionRequest = + PrepareSubmissionRequest( + userId = userId, + commandId = nextCommandId(), + commands = Commands.defaultInstance + .copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = Seq(party.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + .commands, + minLedgerTime = None, + actAs = Seq(party.getValue), + readAs = Seq(party.getValue), + disclosedContracts = Seq.empty, + synchronizerId = "", + packageIdSelectionPreference = Seq.empty, + verboseHashing = false, + prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, + ) + + override def executeSubmissionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionRequest = { + val signature = party.signProto(preparedTx.preparedTransactionHash) + ExecuteSubmissionRequest( + preparedTransaction = preparedTx.preparedTransaction, + partySignatures = Some( + PartySignatures( + Seq( + SinglePartySignatures( + party.underlying.getValue, + Seq(signature), + ) + ) + ) + ), + deduplicationPeriod = ExecuteSubmissionRequest.DeduplicationPeriod.Empty, + submissionId = nextSubmissionId(), + userId = userId, + hashingSchemeVersion = HashingSchemeVersion.HASHING_SCHEME_VERSION_V2, + minLedgerTime = None, + ) + } + + override def executeSubmissionAndWaitRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionAndWaitRequest = + executeSubmissionRequest(party, preparedTx).transformInto[ExecuteSubmissionAndWaitRequest] + + override def executeSubmissionAndWaitForTransactionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + transactionFormat: Option[TransactionFormat], + ): ExecuteSubmissionAndWaitForTransactionRequest = + executeSubmissionRequest(party, preparedTx) + .into[ExecuteSubmissionAndWaitForTransactionRequest] + .withFieldConst(_.transactionFormat, transactionFormat) + .transform + + override def submitAndWaitRequest(party: Party, commands: JList[Command]): SubmitAndWaitRequest = + new SubmitAndWaitRequest( + Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = Seq(party.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ) + ) + + override def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + ): SubmitAndWaitForTransactionRequest = + submitAndWaitForTransactionRequest( + party = party, + commands = commands, + transactionShape = AcsDelta, + filterParties = Some(Seq(party)), + ) + + override def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + transactionShape: TransactionShape, + filterParties: Option[Seq[Party]] = None, + templateIds: Seq[Identifier] = Seq.empty, + verbose: Boolean = true, + ): SubmitAndWaitForTransactionRequest = + SubmitAndWaitForTransactionRequest( + commands = Some( + Commands.defaultInstance.copy( + userId = userId, + commandId = nextCommandId(), + submissionId = nextSubmissionId(), + actAs = Seq(party.getValue), + commands = commands.asScala.toSeq.map(c => ApiCommand.fromJavaProto(c.toProtoCommand)), + workflowId = workflowId, + ) + ), + transactionFormat = Some( + transactionFormat( + parties = filterParties, + templateIds = templateIds, + transactionShape = transactionShape, + verbose = verbose, + ) + ), + ) + + override def submit(request: SubmitRequest): Future[Unit] = + services.commandSubmission.submit(request).map(_ => ()) + + override def submitAndWait(request: SubmitAndWaitRequest): Future[SubmitAndWaitResponse] = + services.command.submitAndWait(request) + + override def submitAndWaitForTransaction( + request: SubmitAndWaitForTransactionRequest + ): Future[SubmitAndWaitForTransactionResponse] = + services.command.submitAndWaitForTransaction(request) + + /** This addresses a narrow case in which we tolerate a single occurrence of a specific and + * transient (and rare) error by retrying only a single time. + */ + override def submitRequestAndTolerateGrpcError[T]( + errorCodeToTolerateOnce: ErrorCode, + submitAndWaitGeneric: ParticipantTestContext => Future[T], + ): Future[T] = + submitAndWaitGeneric(this) + .transform { + case Failure(e: StatusRuntimeException) + if errorCodeToTolerateOnce.category.grpcCode + .map(_.value()) + .contains(StatusProto.fromThrowable(e).getCode) => + Success(Left(e)) + case otherTry => + // Otherwise return a Right with a nested Either that + // let's us create a failed or successful future in the + // default case of the step below. + Success(Right(otherTry.toEither)) + } + .flatMap { + case Left(_) => // If we are retrying a single time, back off first for one second. + Delayed.Future.by(1.second)(submitAndWaitGeneric(this)) + case Right(firstCallResult) => firstCallResult.fold(Future.failed, Future.successful) + } + + override def completions( + within: NonNegativeFiniteDuration, + request: CompletionStreamRequest, + ): Future[Vector[CompletionStreamResponse.CompletionResponse]] = + new StreamConsumer[CompletionStreamResponse]( + services.commandCompletion.completionStream(request, _) + ) + .within(within.toScala) + .map(_.map(_.completionResponse)) + + override def completionStreamRequest(from: Long = referenceOffset)( + parties: Party* + ): CompletionStreamRequest = + new CompletionStreamRequest( + userId, + parties.map(_.getValue), + from, + ) + + override def firstCompletions(request: CompletionStreamRequest): Future[Vector[Completion]] = + new StreamConsumer[CompletionStreamResponse]( + services.commandCompletion.completionStream(request, _) + ).find(_.completionResponse.completion.nonEmpty) + .map(_.completionResponse.completion.toVector) + + override def firstCompletions(parties: Party*): Future[Vector[Completion]] = + firstCompletions(completionStreamRequest()(parties*)) + + override def findCompletionAtOffset( + offset: Long, + p: Completion => Boolean, + )(parties: Party*): Future[Option[Completion]] = { + // We have to request an offset before the reported offset, as offsets are exclusive in the completion service. + val offsetPreviousToReportedOffset = + if (offset == 0L) referenceOffset else offset - 1 + val reportedOffsetCompletionStreamRequest = + completionStreamRequest(offsetPreviousToReportedOffset)(parties*) + findCompletion(reportedOffsetCompletionStreamRequest)(p) + } + + override def findCompletion( + request: CompletionStreamRequest + )(p: Completion => Boolean): Future[Option[Completion]] = + new StreamConsumer[CompletionStreamResponse]( + services.commandCompletion.completionStream(request, _) + ).find(_.completionResponse.completion.exists(p)) + .map(response => response.completionResponse.completion.find(p)) + + override def findCompletion(parties: Party*)( + p: Completion => Boolean + ): Future[Option[Completion]] = + findCompletion(completionStreamRequest()(parties*))(p) + + override def offsets(n: Int, request: CompletionStreamRequest): Future[Vector[Long]] = + new StreamConsumer[CompletionStreamResponse]( + services.commandCompletion.completionStream(request, _) + ).filterTake(_.completionResponse.isCompletion)(n) + .map(_.map(_.getCompletion.offset)) + + override def checkHealth(): Future[HealthCheckResponse] = + services.health.check(HealthCheckRequest()) + + override def watchHealth(): Future[Seq[HealthCheckResponse]] = + new StreamConsumer[HealthCheckResponse](services.health.watch(HealthCheckRequest(), _)) + .within(1.second) + + override def prune( + pruneUpTo: Long, + attempts: Int = 10, + ): Future[PruneResponse] = + // Distributed ledger participants need to reach global consensus prior to pruning. Hence the "eventually" here: + eventually(assertionName = "Prune", attempts = attempts) { + services.participantPruning + .prune( + PruneRequest(pruneUpTo, nextSubmissionId(), pruneAllDivulgedContracts = true) + ) + .andThen { case Failure(exception) => + logger.warn("Failed to prune", exception)(LoggingContext.ForTesting) + } + } + + override def pruneCantonSafe( + pruneUpTo: Long, + party: Party, + dummyCommand: Party => JList[Command], + )(implicit ec: ExecutionContext): Future[Unit] = + FutureAssertions.succeedsEventually( + retryDelay = 100.millis, + maxRetryDuration = 10.seconds, + delayMechanism, + "Pruning", + ) { + for { + _ <- submitAndWait(submitAndWaitRequest(party, dummyCommand(party))) + _ <- prune( + pruneUpTo = pruneUpTo, + attempts = 1, + ) + } yield () + }(ec, LoggingContext.ForTesting) + + private[infrastructure] override def preallocateParties( + n: Int, + participants: Iterable[ParticipantTestContext], + connectedSynchronizers: Int, + ): Future[Vector[Party]] = + for { + parties <- + if (partyAllocationConfig.allocateParties) { + allocateParties(n, connectedSynchronizers) + } else { + reservePartyNames(n) + } + _ <- waitForPartiesOnOtherParticipants(participants, parties.toSet, connectedSynchronizers) + } yield parties + + override def getConnectedSynchronizers( + party: Option[Party], + participantId: Option[String], + identityProviderId: Option[String] = None, + ): Future[Set[String]] = + services.state + .getConnectedSynchronizers( + new GetConnectedSynchronizersRequest( + party.map(_.getValue).getOrElse(""), + participantId.getOrElse(""), + identityProviderId.getOrElse(""), + ) + ) + .map(_.connectedSynchronizers.map(_.synchronizerId).toSet) + + private def reservePartyNames(n: Int): Future[Vector[Party]] = + Future.successful(Vector.fill(n)(Party(nextPartyHintId()))) + + val maxOffsetCheckpointEmissionDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryCreate( + features.offsetCheckpoint.getMaxOffsetCheckpointEmissionDelay.asJava + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala new file mode 100644 index 0000000000..f6f76b6e2c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala @@ -0,0 +1,795 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.testtool.infrastructure.ChannelEndpoint.JsonApiEndpoint +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext.IncludeInterfaceView +import com.daml.ledger.api.testtool.infrastructure.time.{DelayMechanism, Durations} +import com.daml.ledger.api.testtool.infrastructure.{ChannelEndpoint, ExternalParty, Party} +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.package_management_service.{ + PackageDetails, + UpdateVettedPackagesRequest, + UpdateVettedPackagesResponse, + UploadDarFileRequest, + ValidateDarFileRequest, +} +import com.daml.ledger.api.v2.admin.participant_pruning_service.PruneResponse +import com.daml.ledger.api.v2.admin.party_management_service.* +import com.daml.ledger.api.v2.command_completion_service.{ + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.daml.ledger.api.v2.command_service.{ + SubmitAndWaitForTransactionRequest, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitRequest, + SubmitAndWaitResponse, +} +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.event_query_service.{ + GetEventsByContractIdRequest, + GetEventsByContractIdResponse, +} +import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ + ExecuteSubmissionAndWaitForTransactionRequest, + ExecuteSubmissionAndWaitForTransactionResponse, + ExecuteSubmissionAndWaitRequest, + ExecuteSubmissionAndWaitResponse, + ExecuteSubmissionRequest, + ExecuteSubmissionResponse, + GetPreferredPackageVersionResponse, + GetPreferredPackagesResponse, + PrepareSubmissionRequest, + PrepareSubmissionResponse, +} +import com.daml.ledger.api.v2.package_service.{ + GetPackageResponse, + ListVettedPackagesRequest, + ListVettedPackagesResponse, + PackageStatus, +} +import com.daml.ledger.api.v2.state_service.GetActiveContractsRequest +import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.{EventFormat, Filters, TransactionFormat} +import com.daml.ledger.api.v2.update_service.* +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId, Exercised, Update} +import com.daml.ledger.javaapi.data.{Command, Identifier, Template, Unit as UnitData, Value} +import com.daml.timer.Delayed +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.api.TransactionShape +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.google.protobuf.ByteString +import io.grpc.health.v1.health.HealthCheckResponse + +import java.security.KeyPair +import java.time.Instant +import java.util.List as JList +import java.util.concurrent.TimeoutException +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} + +class TimeoutParticipantTestContext(timeoutScaleFactor: Double, delegate: ParticipantTestContext) + extends ParticipantTestContext { + + private val timeoutDuration = Durations.scaleDuration(15.seconds, timeoutScaleFactor) + + override val userId: String = delegate.userId + override val endpointId: String = delegate.endpointId + override private[participant] def services = delegate.services + override private[participant] implicit val ec: ExecutionContext = delegate.ec + override def ledgerEndpoint: Either[JsonApiEndpoint, ChannelEndpoint] = delegate.ledgerEndpoint + override def adminEndpoint: ChannelEndpoint = delegate.adminEndpoint + override def features: Features = delegate.features + override def referenceOffset: Long = delegate.referenceOffset + override def nextKeyId: () => String = delegate.nextKeyId + override def nextUserId: () => String = delegate.nextUserId + override def nextPartyId: () => String = delegate.nextUserId + override def nextIdentityProviderId: () => String = delegate.nextIdentityProviderId + + override def delayMechanism: DelayMechanism = delegate.delayMechanism + + override def currentEnd(): Future[Long] = + withTimeout("Get current end", delegate.currentEnd()) + override def offsetBeyondLedgerEnd(): Future[Long] = withTimeout( + "Offset beyond ledger end", + delegate.offsetBeyondLedgerEnd(), + ) + override def time(): Future[Instant] = withTimeout("Get time", delegate.time()) + override def setTime(currentTime: Instant, newTime: Instant): Future[Unit] = withTimeout( + "Set time", + delegate.setTime(currentTime, newTime), + ) + override def listKnownPackages(): Future[Seq[PackageDetails]] = withTimeout( + "List known packages", + delegate.listKnownPackages(), + ) + + override def uploadDarRequest(bytes: ByteString, synchronizerId: String): UploadDarFileRequest = + delegate.uploadDarRequest(bytes, synchronizerId) + override def validateDarFile(request: ValidateDarFileRequest): Future[Unit] = withTimeout( + s"Validate dar file ${request.submissionId}", + delegate.validateDarFile(request), + ) + override def uploadDarFile(request: UploadDarFileRequest): Future[Unit] = withTimeout( + s"Upload dar file ${request.submissionId}", + delegate.uploadDarFile(request), + ) + + override def listVettedPackages( + request: ListVettedPackagesRequest + ): Future[ListVettedPackagesResponse] = + delegate.listVettedPackages(request) + + override def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] = + delegate.updateVettedPackages(request) + + override def getParticipantId(): Future[String] = + withTimeout("Get participant id", delegate.getParticipantId()) + override def listPackages(): Future[Seq[String]] = + withTimeout("List packages", delegate.listPackages()) + override def getPackage(packageId: String): Future[GetPackageResponse] = withTimeout( + s"Get package $packageId", + delegate.getPackage(packageId), + ) + override def getPackageStatus(packageId: String): Future[PackageStatus] = withTimeout( + s"Get package status $packageId", + delegate.getPackageStatus(packageId), + ) + + override def prepareSubmission( + prepareSubmissionRequest: PrepareSubmissionRequest + ): Future[PrepareSubmissionResponse] = withTimeout( + s"Prepare submission", + delegate.prepareSubmission(prepareSubmissionRequest), + ) + def executeSubmission( + executeSubmissionRequest: ExecuteSubmissionRequest + ): Future[ExecuteSubmissionResponse] = withTimeout( + s"Execute submission", + delegate.executeSubmission(executeSubmissionRequest), + ) + def executeSubmissionAndWait( + executeSubmissionAndWaitRequest: ExecuteSubmissionAndWaitRequest + ): Future[ExecuteSubmissionAndWaitResponse] = withTimeout( + s"Execute submission and wait", + delegate.executeSubmissionAndWait(executeSubmissionAndWaitRequest), + ) + def executeSubmissionAndWaitForTransaction( + executeSubmissionAndWaitForTransactionRequest: ExecuteSubmissionAndWaitForTransactionRequest + ): Future[ExecuteSubmissionAndWaitForTransactionResponse] = withTimeout( + s"Execute submission and wait", + delegate.executeSubmissionAndWaitForTransaction(executeSubmissionAndWaitForTransactionRequest), + ) + override def getPreferredPackageVersion( + parties: Seq[Party], + packageName: String, + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackageVersionResponse] = withTimeout( + s"Get preferred package version for parties $parties, $packageName, $synchronizerIdO at $vettingValidAt", + delegate.getPreferredPackageVersion(parties, packageName, vettingValidAt, synchronizerIdO), + ) + + override def getPreferredPackages( + vettingRequirements: Map[String, Seq[Party]], + vettingValidAt: Option[Instant] = None, + synchronizerIdO: Option[String] = None, + ): Future[GetPreferredPackagesResponse] = withTimeout( + s"Get preferred package version for parties $vettingRequirements, $synchronizerIdO at $vettingValidAt", + delegate.getPreferredPackages(vettingRequirements, vettingValidAt, synchronizerIdO), + ) + + override def connectedSynchronizers(): Future[Seq[String]] = + withTimeout("Connected synchronizers", delegate.connectedSynchronizers()) + + override def allocateParty(): Future[Party] = + withTimeout("Allocate party", delegate.allocateParty()) + + override def allocateExternalPartyFromHint( + partyIdHint: Option[String] = None, + minSynchronizers: Int, + ): Future[ExternalParty] = + withTimeout( + "Allocate external party", + delegate.allocateExternalPartyFromHint(partyIdHint, minSynchronizers), + ) + + def allocateParty( + req: AllocatePartyRequest, + connectedSynchronizers: Int, + ): Future[(AllocatePartyResponse, Seq[String])] = + withTimeout("Allocate party", delegate.allocateParty(req, connectedSynchronizers)) + + override def updatePartyDetails( + req: UpdatePartyDetailsRequest + ): Future[UpdatePartyDetailsResponse] = + withTimeout("Update party details", delegate.updatePartyDetails(req)) + + override def updatePartyIdentityProviderId( + request: UpdatePartyIdentityProviderIdRequest + ): Future[UpdatePartyIdentityProviderIdResponse] = + withTimeout( + "Update party identity provider id", + delegate.updatePartyIdentityProviderId(request), + ) + + override def allocateExternalParty( + request: AllocateExternalPartyRequest, + minSynchronizers: Option[Int] = None, + ): Future[Party] = withTimeout( + s"Allocate external party", + delegate.allocateExternalParty(request, minSynchronizers), + ) + + override def allocateExternalPartyRequest( + keyPair: KeyPair, + partyIdHint: Option[String] = None, + synchronizer: String = "", + ): Future[AllocateExternalPartyRequest] = + delegate.allocateExternalPartyRequest(keyPair, partyIdHint, synchronizer) + + override def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] = withTimeout( + s"Generate topology transactions to allocate external party $partyIdHint", + delegate.generateExternalPartyTopologyRequest(namespacePublicKey, partyIdHint), + ) + + override def allocateParty( + partyIdHint: Option[String] = None, + localMetadata: Option[ObjectMeta] = None, + identityProviderId: Option[String] = None, + minSynchronizers: Option[Int] = None, + userId: String = "", + ): Future[Party] = withTimeout( + s"Allocate party with hint $partyIdHint", + delegate.allocateParty(partyIdHint, localMetadata, identityProviderId, minSynchronizers, userId), + ) + + override def getParties(req: GetPartiesRequest): Future[GetPartiesResponse] = withTimeout( + s"Get parties", + delegate.getParties(req), + ) + + override def allocateExternalParties( + n: Int, + minSynchronizers: Int, + ): Future[Vector[ExternalParty]] = withTimeout( + s"Allocate $n parties", + delegate.allocateExternalParties(n, minSynchronizers), + ) + + override def allocateParties(n: Int, minSynchronizers: Int): Future[Vector[Party]] = withTimeout( + s"Allocate $n parties", + delegate.allocateParties(n, minSynchronizers), + ) + override def getParties(parties: Seq[Party]): Future[Seq[PartyDetails]] = withTimeout( + s"Get parties $parties", + delegate.getParties(parties), + ) + override def listKnownPartiesExpanded(): Future[Set[Party]] = withTimeout( + "List known parties", + delegate.listKnownPartiesExpanded(), + ) + override def listKnownParties(req: ListKnownPartiesRequest): Future[ListKnownPartiesResponse] = + withTimeout( + "List known parties", + delegate.listKnownParties(req), + ) + override def listKnownParties(): Future[ListKnownPartiesResponse] = withTimeout( + "List known parties", + delegate.listKnownParties(), + ) + + override def waitForPartiesOnOtherParticipants( + otherParticipants: Iterable[ParticipantTestContext], + expectedParties: Set[Party], + connectedSynchronizers: Int, + ): Future[Unit] = withTimeout( + s"Wait for parties $expectedParties on participants ${otherParticipants.map(_.ledgerEndpoint)}", + delegate.waitForPartiesOnOtherParticipants( + otherParticipants, + expectedParties, + connectedSynchronizers, + ), + ) + + override def generateExternalPartyTopology( + req: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] = delegate.generateExternalPartyTopology(req) + + override def activeContracts( + request: GetActiveContractsRequest + ): Future[Vector[CreatedEvent]] = withTimeout( + s"Active contracts for request $request", + delegate.activeContracts(request), + ) + override def activeContractsRequest( + parties: Option[Seq[Party]], + activeAtOffset: Long, + templateIds: Seq[Identifier], + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + verbose: Boolean = true, + ): GetActiveContractsRequest = + delegate.activeContractsRequest( + parties, + activeAtOffset, + templateIds, + interfaceFilters, + verbose, + ) + override def activeContracts( + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long], + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] = + withTimeout( + s"Active contracts for parties $parties", + delegate.activeContracts(parties, activeAtOffsetO, verbose), + ) + override def activeContractsByTemplateId( + templateIds: Seq[Identifier], + parties: Option[Seq[Party]], + activeAtOffsetO: Option[Long], + verbose: Boolean = true, + ): Future[Vector[CreatedEvent]] = withTimeout( + s"Active contracts by template ids $templateIds for parties $parties", + delegate.activeContractsByTemplateId(templateIds, parties, activeAtOffsetO, verbose), + ) + + def eventFormat( + verbose: Boolean, + partiesO: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + ): EventFormat = + delegate.eventFormat( + verbose, + partiesO, + templateIds, + interfaceFilters, + ) + + def transactionFormat( + parties: Option[Seq[Party]], + templateIds: Seq[Identifier] = Seq.empty, + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)] = Seq.empty, + transactionShape: TransactionShape = AcsDelta, + verbose: Boolean = false, + ): TransactionFormat = delegate.transactionFormat( + parties, + templateIds, + interfaceFilters, + transactionShape, + verbose, + ) + + override def filters( + templateIds: Seq[Identifier], + interfaceFilters: Seq[(Identifier, IncludeInterfaceView)], + ): Filters = delegate.filters(templateIds, interfaceFilters) + + override def updates( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] = + delegate.updates(take, request) + + override def updates( + take: Int, + request: GetUpdatesRequest, + resultFilter: GetUpdatesResponse => Boolean, + ): Future[Vector[GetUpdatesResponse.Update]] = + delegate.updates(take, request, resultFilter) + + override def updates( + within: NonNegativeFiniteDuration, + request: GetUpdatesRequest, + ): Future[Vector[GetUpdatesResponse.Update]] = + delegate.updates(within, request) + + override def getTransactionsRequest( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + ): Future[GetUpdatesRequest] = + delegate.getTransactionsRequest( + transactionFormat = transactionFormat, + begin = begin, + ) + + override def getTransactionsRequestWithEnd( + transactionFormat: TransactionFormat, + begin: Long = referenceOffset, + end: Option[Long], + ): GetUpdatesRequest = + delegate.getTransactionsRequestWithEnd( + transactionFormat = transactionFormat, + begin = begin, + end = end, + ) + + override def getUpdatesRequestWithEnd( + transactionFormatO: Option[TransactionFormat] = None, + reassignmentsFormatO: Option[EventFormat] = None, + topologyFilterO: Option[Seq[Party]] = None, + begin: Long = referenceOffset, + end: Option[Long] = None, + ): GetUpdatesRequest = + delegate.getUpdatesRequestWithEnd( + transactionFormatO = transactionFormatO, + reassignmentsFormatO = reassignmentsFormatO, + topologyFilterO = topologyFilterO, + begin = begin, + end = end, + ) + + override def transactionsByTemplateId( + templateId: Identifier, + parties: Option[Seq[Party]], + ): Future[Vector[Transaction]] = withTimeout( + s"Flat transaction by template id $templateId for parties $parties", + delegate.transactionsByTemplateId(templateId, parties), + ) + override def transactions( + request: GetUpdatesRequest + ): Future[Vector[Transaction]] = + withTimeout(s"Flat transactions for request $request", delegate.transactions(request)) + + override def transactions( + transactionShape: TransactionShape, + parties: Party* + ): Future[Vector[Transaction]] = + withTimeout( + s"Flat transactions for parties $parties", + delegate.transactions(transactionShape, parties*), + ) + + override def transactions( + take: Int, + request: GetUpdatesRequest, + ): Future[Vector[Transaction]] = withTimeout( + s"$take flat transactions for request $request", + delegate.transactions(take, request), + ) + override def transactions( + take: Int, + transactionShape: TransactionShape, + parties: Party* + ): Future[Vector[Transaction]] = + withTimeout( + s"$take $transactionShape transactions for parties $parties", + delegate.transactions(take, transactionShape, parties*), + ) + + override def transactionTreeById( + transactionId: String, + parties: Party* + ): Future[Transaction] = withTimeout( + s"Get transaction tree by id for transaction id $transactionId and parties $parties", + delegate.transactionTreeById(transactionId, parties*), + ) + + override def updateById(request: GetUpdateByIdRequest): Future[GetUpdateResponse] = + withTimeout( + s"Update by id for request $request", + delegate.updateById(request), + ) + + override def transactionById( + updateId: String, + parties: Seq[Party], + transactionShape: TransactionShape, + templateIds: Seq[Identifier], + ): Future[Transaction] = withTimeout( + s"Transaction by id for update id $updateId, parties $parties and templates $templateIds", + delegate.transactionById(updateId, parties, transactionShape, templateIds), + ) + + def topologyTransactionById( + updateId: String, + parties: Seq[Party], + ): Future[TopologyTransaction] = withTimeout( + s"Topology transaction by id for update id $updateId, parties $parties", + delegate.topologyTransactionById(updateId, parties), + ) + + override def transactionTreeByOffset( + offset: Long, + parties: Party* + ): Future[Transaction] = withTimeout( + s"Transaction tree by offset for offset $offset and parties $parties", + delegate.transactionTreeByOffset(offset, parties*), + ) + + override def updateByOffset(request: GetUpdateByOffsetRequest): Future[GetUpdateResponse] = + withTimeout( + s"Update by offset for request $request", + delegate.updateByOffset(request), + ) + + def transactionByOffset( + offset: Long, + parties: Seq[Party], + transactionShape: TransactionShape, + templateIds: Seq[Identifier], + ): Future[Transaction] = withTimeout( + s"Transaction by offset for offset $offset, parties $parties, templates $templateIds", + delegate.transactionByOffset(offset, parties, transactionShape, templateIds), + ) + + def topologyTransactionByOffset( + offset: Long, + parties: Seq[Party], + ): Future[TopologyTransaction] = withTimeout( + s"Topology transaction by offset for offset $offset, parties $parties", + delegate.topologyTransactionByOffset(offset, parties), + ) + + override def getEventsByContractId( + request: GetEventsByContractIdRequest + ): Future[GetEventsByContractIdResponse] = withTimeout( + s"Get events by contract id for request $request", + delegate.getEventsByContractId(request), + ) + + override def create[ + TCid <: ContractId[T], + T <: Template, + ]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = + withTimeout(s"Create template for party $party", delegate.create(party, template)) + override def create[TCid <: ContractId[T], T <: Template]( + actAs: List[Party], + readAs: List[Party], + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = withTimeout( + s"Create template for actAs $actAs and readAs $readAs", + delegate.create(actAs, readAs, template), + ) + override def createAndGetTransactionId[ + TCid <: ContractId[T], + T <: Template, + ]( + party: Party, + template: T, + )(implicit companion: ContractCompanion[?, TCid, T]): Future[(String, TCid)] = withTimeout( + s"Create and get transaction id for party $party", + delegate.createAndGetTransactionId(party, template), + ) + override def exercise[T]( + party: Party, + exercise: Update[T], + transactionShape: TransactionShape, + verbose: Boolean, + ): Future[Transaction] = + withTimeout( + s"Exercise for party $party", + delegate.exercise(party, exercise, transactionShape, verbose = verbose), + ) + override def exercise[T]( + actAs: List[Party], + readAs: List[Party], + exercise: Update[T], + ): Future[Transaction] = withTimeout( + s"Exercise for actAs $actAs and readAs $readAs", + delegate.exercise(actAs, readAs, exercise), + ) + override def exerciseAndGetContract[TCid <: ContractId[T], T]( + party: Party, + exercise: Update[Exercised[TCid]], + )(implicit companion: ContractCompanion[?, TCid, T]): Future[TCid] = withTimeout( + s"Exercise and get contract for party $party", + delegate.exerciseAndGetContract[TCid, T](party, exercise), + ) + override def exerciseAndGetContractNoDisclose[TCid <: ContractId[?]]( + party: Party, + exercise: Update[Exercised[UnitData]], + )(implicit companion: ContractCompanion[?, TCid, ?]): Future[TCid] = withTimeout( + s"Exercise and get non disclosed contract for party $party", + delegate.exerciseAndGetContractNoDisclose[TCid](party, exercise), + ) + override def exerciseByKey( + party: Party, + template: Identifier, + key: Value, + choice: String, + argument: Value, + ): Future[Transaction] = withTimeout( + s"Exercise by key for party $party, template $template, key $key, choice $choice and argument $argument.", + delegate.exerciseByKey(party, template, key, choice, argument), + ) + override def submitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitRequest = delegate.submitRequest(actAs, readAs, commands) + override def submitRequest(party: Party, commands: JList[Command] = JList.of()): SubmitRequest = + delegate.submitRequest(party, commands) + override def submitAndWaitRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + ): SubmitAndWaitRequest = delegate.submitAndWaitRequest(actAs, readAs, commands) + override def submitAndWaitForTransactionRequest( + actAs: List[Party], + readAs: List[Party], + commands: JList[Command], + transactionShape: TransactionShape, + ): SubmitAndWaitForTransactionRequest = + delegate.submitAndWaitForTransactionRequest(actAs, readAs, commands, transactionShape) + override def submitAndWaitRequest( + party: Party, + commands: JList[Command], + ): SubmitAndWaitRequest = delegate.submitAndWaitRequest(party, commands) + + override def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + ): SubmitAndWaitForTransactionRequest = + delegate.submitAndWaitForTransactionRequest(party, commands) + + def prepareSubmissionRequest(party: Party, commands: JList[Command]): PrepareSubmissionRequest = + delegate.prepareSubmissionRequest(party, commands) + + def executeSubmissionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionRequest = + delegate.executeSubmissionRequest(party, preparedTx) + + def submitAndWaitForTransactionRequest( + party: Party, + commands: JList[Command], + transactionShape: TransactionShape, + filterParties: Option[Seq[Party]], + templateIds: Seq[Identifier], + verbose: Boolean, + ): SubmitAndWaitForTransactionRequest = + delegate.submitAndWaitForTransactionRequest( + party = party, + commands = commands, + transactionShape = transactionShape, + filterParties = filterParties, + templateIds = templateIds, + verbose = verbose, + ) + + override def submit(request: SubmitRequest): Future[Unit] = + withTimeout(s"Submit for request $request", delegate.submit(request)) + override def submitAndWait(request: SubmitAndWaitRequest): Future[SubmitAndWaitResponse] = + withTimeout( + s"Submit and wait for request $request", + delegate.submitAndWait(request), + ) + override def submitAndWaitForTransaction( + request: SubmitAndWaitForTransactionRequest + ): Future[SubmitAndWaitForTransactionResponse] = withTimeout( + s"Submit and wait for transaction request $request", + delegate.submitAndWaitForTransaction(request), + ) + override def submitRequestAndTolerateGrpcError[T]( + errorToTolerate: ErrorCode, + submitAndWaitGeneric: ParticipantTestContext => Future[T], + ): Future[T] = // timeout enforced by submitAndWaitGeneric + delegate.submitRequestAndTolerateGrpcError(errorToTolerate, submitAndWaitGeneric) + + override def completions( + within: NonNegativeFiniteDuration, + request: CompletionStreamRequest, + ): Future[Vector[CompletionStreamResponse.CompletionResponse]] = + delegate.completions(within, request) + + override def completionStreamRequest(from: Long)( + parties: Party* + ): CompletionStreamRequest = delegate.completionStreamRequest(from)(parties*) + + override def firstCompletions(request: CompletionStreamRequest): Future[Vector[Completion]] = + withTimeout( + s"First completions for request $request", + delegate.firstCompletions(request), + ) + override def firstCompletions(parties: Party*): Future[Vector[Completion]] = + withTimeout( + s"First completions for parties $parties", + delegate.firstCompletions(parties*), + ) + override def findCompletionAtOffset(offset: Long, p: Completion => Boolean)( + parties: Party* + ): Future[Option[Completion]] = withTimeout( + s"Find completion at offset $offset for parties $parties", + delegate.findCompletionAtOffset(offset, p)(parties*), + ) + override def findCompletion(request: CompletionStreamRequest)( + p: Completion => Boolean + ): Future[Option[Completion]] = withTimeout( + s"Find completion for request $request", + delegate.findCompletion(request)(p), + ) + override def findCompletion(parties: Party*)( + p: Completion => Boolean + ): Future[Option[Completion]] = + withTimeout(s"Find completion for parties $parties", delegate.findCompletion(parties*)(p)) + override def offsets(n: Int, request: CompletionStreamRequest): Future[Vector[Long]] = + withTimeout(s"$n checkpoints for request $request", delegate.offsets(n, request)) + override def checkHealth(): Future[HealthCheckResponse] = + withTimeout("Check health", delegate.checkHealth()) + override def watchHealth(): Future[Seq[HealthCheckResponse]] = + withTimeout("Watch health", delegate.watchHealth()) + + private[infrastructure] override def preallocateParties( + n: Int, + participants: Iterable[ParticipantTestContext], + connectedSynchronizers: Int, + ): Future[Vector[Party]] = withTimeout( + s"Preallocate $n parties on participants ${participants.map(_.ledgerEndpoint)}", + delegate.preallocateParties(n, participants, connectedSynchronizers), + ) + + override def getConnectedSynchronizers( + party: Option[Party], + participantId: Option[String], + identityProviderId: Option[String] = None, + ): Future[Set[String]] = withTimeout( + s"Querying connected synchronizers of a party $party", + delegate.getConnectedSynchronizers(party, participantId, identityProviderId), + ) + + override def prune( + pruneUpTo: Long, + attempts: Int, + ): Future[PruneResponse] = withTimeout( + s"Prune up to $pruneUpTo, with $attempts attempts", + delegate.prune(pruneUpTo, attempts), + ) + + override def pruneCantonSafe( + pruneUpTo: Long, + party: Party, + dummyCommand: Party => JList[Command], + )(implicit ec: ExecutionContext): Future[Unit] = + delegate.pruneCantonSafe(pruneUpTo, party, dummyCommand) + + private def withTimeout[T](hint: String, future: => Future[T]): Future[T] = + Future.firstCompletedOf( + Seq( + Delayed.Future.by(timeoutDuration)( + Future.failed( + new TimeoutException(s"Operation [$hint] timed out after $timeoutDuration.") + ) + ), + future, + ) + ) + + override def latestPrunedOffsets(): Future[(Long, Long)] = withTimeout( + "Requesting the latest pruned offsets", + delegate.latestPrunedOffsets(), + ) + + override def maxOffsetCheckpointEmissionDelay: NonNegativeFiniteDuration = + delegate.maxOffsetCheckpointEmissionDelay + + override def executeSubmissionAndWaitRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + ): ExecuteSubmissionAndWaitRequest = + delegate.executeSubmissionAndWaitRequest(party, preparedTx) + + def executeSubmissionAndWaitForTransactionRequest( + party: ExternalParty, + preparedTx: PrepareSubmissionResponse, + transactionFormat: Option[TransactionFormat], + ): ExecuteSubmissionAndWaitForTransactionRequest = + delegate.executeSubmissionAndWaitForTransactionRequest(party, preparedTx, transactionFormat) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/UserManagementTestContext.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/UserManagementTestContext.scala new file mode 100644 index 0000000000..309049d6f3 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/UserManagementTestContext.scala @@ -0,0 +1,192 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.participant + +import com.daml.ledger.api.testtool.infrastructure.LedgerServices +import com.daml.ledger.api.v2.admin.identity_provider_config_service.{ + CreateIdentityProviderConfigRequest, + CreateIdentityProviderConfigResponse, + DeleteIdentityProviderConfigRequest, + DeleteIdentityProviderConfigResponse, + GetIdentityProviderConfigRequest, + GetIdentityProviderConfigResponse, + IdentityProviderConfig, + ListIdentityProviderConfigsRequest, + ListIdentityProviderConfigsResponse, + UpdateIdentityProviderConfigRequest, + UpdateIdentityProviderConfigResponse, +} +import com.daml.ledger.api.v2.admin.user_management_service.UserManagementServiceGrpc.UserManagementService +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + CreateUserResponse, + DeleteUserRequest, + DeleteUserResponse, + User, +} +import com.digitalasset.base.error.utils.ErrorDetails +import com.digitalasset.canton.ledger.error.groups.AdminServiceErrors +import com.google.protobuf.field_mask.FieldMask + +import java.util.UUID +import java.util.concurrent.ConcurrentHashMap +import scala.concurrent.{ExecutionContext, Future} + +trait UserManagementTestContext { + self: ParticipantTestContext => + + private[participant] def services: LedgerServices + + private[participant] implicit val ec: ExecutionContext + + /** Users created during execution of the test case on this participant. + */ + private val createdUsersById = new ConcurrentHashMap[String, User] + private val createdIdentityProvidersById = new ConcurrentHashMap[String, IdentityProviderConfig] + + def userManagement: UserManagementService = + services.userManagement + + /** Creates a new user. + * + * Additionally keeps track of the created users so that they can be cleaned up automatically + * when the test case ends. + */ + def createUser(createUserRequest: CreateUserRequest): Future[CreateUserResponse] = + for { + response <- services.userManagement.createUser(createUserRequest) + user = response.user.get + _ = createdUsersById.put(user.id, user) + } yield response + + /** Deletes a user. + * + * Additionally keeps track of the created users so that they can be cleaned up automatically + * when the test case ends. + */ + def deleteUser(request: DeleteUserRequest): Future[DeleteUserResponse] = + for { + response <- services.userManagement.deleteUser(request) + _ = createdUsersById.remove(request.userId) + } yield response + + def deleteCreateIdentityProviders(): Future[Unit] = { + import scala.jdk.CollectionConverters.* + val deletions = createdIdentityProvidersById + .keys() + .asScala + .map(idpId => + services.identityProviderConfig + .deleteIdentityProviderConfig( + DeleteIdentityProviderConfigRequest(idpId) + ) + .map(_ => ()) + .recover { + case e + if ErrorDetails.matches( + e, + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigNotFound, + ) => + () + } + ) + Future.sequence(deletions).map(_ => ()) + } + + /** Intended to be called by the infrastructure code after a test case's execution has ended. + */ + def deleteCreatedUsers(): Future[Unit] = { + import scala.jdk.CollectionConverters.* + val deletions = createdUsersById + .keys() + .asScala + .map(userId => + services.userManagement + .deleteUser( + DeleteUserRequest(userId, "") + ) + .map(_ => ()) + .recover { + case e if ErrorDetails.matches(e, AdminServiceErrors.UserManagement.UserNotFound) => + () + } + ) + Future.sequence(deletions).map(_ => ()) + } + + def createIdentityProviderConfig( + identityProviderId: String = UUID.randomUUID().toString, + isDeactivated: Boolean = false, + issuer: String = UUID.randomUUID().toString, + jwksUrl: String = "http://daml.com/jwks.json", + ): Future[CreateIdentityProviderConfigResponse] = + for { + response <- services.identityProviderConfig.createIdentityProviderConfig( + CreateIdentityProviderConfigRequest( + Some( + IdentityProviderConfig( + identityProviderId = identityProviderId, + isDeactivated = isDeactivated, + issuer = issuer, + jwksUrl = jwksUrl, + audience = "", + ) + ) + ) + ) + idp = response.identityProviderConfig.get + _ = createdIdentityProvidersById.put(idp.identityProviderId, idp) + } yield response + + def updateIdentityProviderConfig( + identityProviderId: String = UUID.randomUUID().toString, + isDeactivated: Boolean = false, + issuer: String = UUID.randomUUID().toString, + jwksUrl: String = "http://daml.com/jwks.json", + updateMask: Option[FieldMask] = None, + ): Future[UpdateIdentityProviderConfigResponse] = + services.identityProviderConfig.updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + Some( + IdentityProviderConfig( + identityProviderId = identityProviderId, + isDeactivated = isDeactivated, + issuer = issuer, + jwksUrl = jwksUrl, + audience = "", + ) + ), + updateMask, + ) + ) + + def updateIdentityProviderConfig( + request: UpdateIdentityProviderConfigRequest + ): Future[UpdateIdentityProviderConfigResponse] = + services.identityProviderConfig.updateIdentityProviderConfig(request) + + def createIdentityProviderConfig( + request: CreateIdentityProviderConfigRequest + ): Future[CreateIdentityProviderConfigResponse] = + services.identityProviderConfig.createIdentityProviderConfig(request) + + def getIdentityProviderConfig( + request: GetIdentityProviderConfigRequest + ): Future[GetIdentityProviderConfigResponse] = + services.identityProviderConfig.getIdentityProviderConfig(request) + + def deleteIdentityProviderConfig( + request: DeleteIdentityProviderConfigRequest + ): Future[DeleteIdentityProviderConfigResponse] = + for { + resp <- services.identityProviderConfig.deleteIdentityProviderConfig(request) + _ = createdIdentityProvidersById.remove(request.identityProviderId) + } yield resp + + def listIdentityProviderConfig(): Future[ListIdentityProviderConfigsResponse] = + services.identityProviderConfig.listIdentityProviderConfigs( + ListIdentityProviderConfigsRequest() + ) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/DelayMechanism.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/DelayMechanism.scala new file mode 100644 index 0000000000..8ba61359da --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/DelayMechanism.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.time + +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.timer.Delayed + +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +trait DelayMechanism { + def delayBy(duration: Duration): Future[Unit] +} + +class TimeDelayMechanism()(implicit ec: ExecutionContext) extends DelayMechanism { + override def delayBy(duration: Duration): Future[Unit] = Delayed.by(duration)(()) +} + +class StaticTimeDelayMechanism(ledger: ParticipantTestContext)(implicit + ec: ExecutionContext +) extends DelayMechanism { + override def delayBy(duration: Duration): Future[Unit] = + ledger + .time() + .flatMap { currentTime => + ledger.setTime(currentTime, currentTime.plusMillis(duration.toMillis)) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/Durations.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/Durations.scala new file mode 100644 index 0000000000..3d2d418668 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/time/Durations.scala @@ -0,0 +1,22 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.time + +import scala.concurrent.duration.{Duration, FiniteDuration} + +object Durations { + + def scaleDuration(duration: FiniteDuration, timeoutScaleFactor: Double): FiniteDuration = + asFiniteDuration( + duration * timeoutScaleFactor + ) + + def asFiniteDuration(duration: Duration): FiniteDuration = + duration match { + case duration: FiniteDuration => duration + case _ => + throw new IllegalArgumentException(s"Duration $duration is not finite.") + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ws/WsHelper.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ws/WsHelper.scala new file mode 100644 index 0000000000..d9bf886775 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/ws/WsHelper.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.infrastructure.ws + +import org.apache.pekko.stream.scaladsl.{Flow, Sink, Source} +import sttp.capabilities.WebSockets +import sttp.capabilities.pekko.PekkoStreams +import sttp.tapir.client.sttp.WebSocketToPipe +import sttp.tapir.model.WebSocketFrameDecodeFailure +import sttp.tapir.{DecodeResult, WebSocketBodyOutput} +import sttp.ws.{WebSocket, WebSocketFrame} + +import scala.concurrent.{ExecutionContext, Future} + +/* This is a copy of sttp.tapir.client.sttp.ws.pekkohttp.WebSocketToPekkoPipe + with a change that org.apache.pekko.stream.StreamDetachedException is ignored. + It allows to continue processing of client side messages, despite stream being closed. + */ +class WebSocketToPekkoPipe[R](implicit ec: ExecutionContext) extends WebSocketToPipe[R] { + override type S = PekkoStreams + override type F[X] = Future[X] + + override def apply[REQ, RESP]( + s: Any + )(ws: WebSocket[Future], o: WebSocketBodyOutput[Any, REQ, RESP, _, PekkoStreams]): Any = { + + val sink = Flow[REQ] + .map(o.requests.encode) + .mapAsync(1)(ws.send(_, isContinuation = false)) // TODO support fragmented frames + .to(Sink.ignore) + + val source = Source + .repeat(() => ws.receive()) + .mapAsync(1)(lazyFuture => lazyFuture()) + .mapAsync(1) { + case _: WebSocketFrame.Close if !o.decodeCloseResponses => + Future.successful(Right(None): Either[Unit, Option[RESP]]) + case _: WebSocketFrame.Pong if o.ignorePong => + Future.successful(Left(()): Either[Unit, Option[RESP]]) + case WebSocketFrame.Ping(p) if o.autoPongOnPing => + ws.send(WebSocketFrame.Pong(p)).map(_ => Left(()): Either[Unit, Option[RESP]]) + case f => + o.responses.decode(f) match { + case failure: DecodeResult.Failure => + Future.failed(new WebSocketFrameDecodeFailure(f, failure)) + case DecodeResult.Value(v) => + Future.successful(Right(Some(v)): Either[Unit, Option[RESP]]) + } + } + .collect { case Right(d) => d } + .takeWhile(_.isDefined) + .recover { error => + error match { + case _: org.apache.pekko.stream.StreamDetachedException => None + case other => throw other + } + } + .collect { case Some(d) => d } + + Flow.fromSinkAndSource(sink, source): Flow[REQ, RESP, Any] + } +} + +object WsHelper { + implicit def webSocketsSupportedForPekkoStreams(implicit + ec: ExecutionContext + ): WebSocketToPipe[PekkoStreams with WebSockets] = + new WebSocketToPekkoPipe[PekkoStreams with WebSockets] +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/AvailableTests.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/AvailableTests.scala new file mode 100644 index 0000000000..62ae2945e7 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/AvailableTests.scala @@ -0,0 +1,12 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.runner + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite + +trait AvailableTests { + def defaultTests: Vector[LedgerTestSuite] + + def optionalTests: Vector[LedgerTestSuite] +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Config.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Config.scala new file mode 100644 index 0000000000..c5ae644c90 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Config.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.runner + +import com.daml.ledger.api.testtool.infrastructure.PartyAllocationConfiguration +import com.daml.ledger.api.testtool.runner +import com.digitalasset.canton.config.TlsClientConfig + +import scala.concurrent.duration.FiniteDuration +import scala.util.matching.Regex + +final case class Config( + jsonApiMode: Boolean, + participantsEndpoints: Vector[(String, Int)], + participantsAdminEndpoints: Vector[(String, Int)], + maxConnectionAttempts: Int, + mustFail: Boolean, + verbose: Boolean, + timeoutScaleFactor: Double, + concurrentTestRuns: Int, + extract: Boolean, + tlsConfig: Option[TlsClientConfig], + excluded: Set[String], + included: Set[String], + additional: Set[String], + listTests: Boolean, + listTestSuites: Boolean, + shuffleParticipants: Boolean, + partyAllocation: PartyAllocationConfiguration, + ledgerClockGranularity: FiniteDuration, + skipDarNamesPattern: Option[Regex], + reportOnFailuresOnly: Boolean, + connectedSynchronizers: Int, +) { + def withTlsConfig(modify: TlsClientConfig => TlsClientConfig): Config = { + val base = tlsConfig.getOrElse(TlsClientConfig(None, None)) + copy(tlsConfig = Some(modify(base))) + } +} + +object Config { + val DefaultTestDarExclusions = new Regex( + ".*upgrade-tests.*|ongoing-stream-package-upload-tests.*" + ) + + val default: Config = Config( + jsonApiMode = false, + participantsEndpoints = Vector.empty, + participantsAdminEndpoints = Vector.empty, + maxConnectionAttempts = 10, + mustFail = false, + verbose = false, + timeoutScaleFactor = Defaults.TimeoutScaleFactor, + concurrentTestRuns = runner.Defaults.ConcurrentRuns, + extract = false, + tlsConfig = None, + excluded = Set.empty, + included = Set.empty, + additional = Set.empty, + listTests = false, + listTestSuites = false, + shuffleParticipants = false, + partyAllocation = PartyAllocationConfiguration.ClosedWorldWaitingForAllParticipants, + ledgerClockGranularity = runner.Defaults.LedgerClockGranularity, + skipDarNamesPattern = Some(DefaultTestDarExclusions), + reportOnFailuresOnly = false, + connectedSynchronizers = 1, + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/ConfiguredTests.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/ConfiguredTests.scala new file mode 100644 index 0000000000..dce8a97510 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/ConfiguredTests.scala @@ -0,0 +1,23 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.runner + +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestCase, LedgerTestSuite} + +final class ConfiguredTests(availableTests: AvailableTests, config: Config) { + val defaultTests: Vector[LedgerTestSuite] = availableTests.defaultTests + val optionalTests: Vector[LedgerTestSuite] = availableTests.optionalTests + + val allTests: Vector[LedgerTestSuite] = defaultTests ++ optionalTests + val missingTests: Set[String] = { + val allTestCaseNames = allTests.flatMap(_.tests).map(_.name).toSet + (config.included ++ config.excluded ++ config.additional).filterNot(prefix => + allTestCaseNames.exists(_.startsWith(prefix)) + ) + } + + val defaultCases: Vector[LedgerTestCase] = defaultTests.flatMap(_.tests) + val optionalCases: Vector[LedgerTestCase] = optionalTests.flatMap(_.tests) + val allCases: Vector[LedgerTestCase] = defaultCases ++ optionalCases +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Defaults.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Defaults.scala new file mode 100644 index 0000000000..1f80ca80cc --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/Defaults.scala @@ -0,0 +1,18 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.runner + +import scala.concurrent.duration.{DurationInt, FiniteDuration} + +object Defaults { + + val LedgerClockGranularity: FiniteDuration = 1.second + + val TimeoutScaleFactor: Double = 1.0 + + // Neither ledgers nor participants scale perfectly with the number of processors. + // We therefore limit the maximum number of concurrent tests, to avoid overwhelming the ledger. + val ConcurrentRuns: Int = sys.runtime.availableProcessors min 4 + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/TestRunner.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/TestRunner.scala new file mode 100644 index 0000000000..639eda8438 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/runner/TestRunner.scala @@ -0,0 +1,273 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.runner + +import com.daml.ledger.api.testtool.infrastructure.* +import com.daml.ledger.api.testtool.runner.TestRunner.* +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.TlsClientConfig +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.typesafe.scalalogging.Logger +import io.grpc.Channel +import io.grpc.netty.shaded.io.grpc.netty.{NegotiationType, NettyChannelBuilder} +import org.slf4j.LoggerFactory + +import java.io.File +import java.nio.file.{Files, Paths, StandardCopyOption} +import java.util.concurrent.Executors +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} + +object TestRunner { + + private type ResourceOwner[T] = com.daml.resources.AbstractResourceOwner[ExecutionContext, T] + private type Resource[T] = com.daml.resources.Resource[ExecutionContext, T] + private val Resource = new com.daml.resources.ResourceFactories[ExecutionContext] + + private val logger = LoggerFactory.getLogger(getClass.getName.stripSuffix("$")) + + // The suffix that will be appended to all party and command identifiers to ensure + // they are unique across test runs (but still somewhat stable within a single test run) + // This implementation could fail based on the limitations of System.nanoTime, that you + // can read on here: https://docs.oracle.com/javase/8/docs/api/java/lang/System.html#nanoTime-- + // Still, the only way in which this can fail is if two test runs target the same ledger + // with the identifier suffix being computed to the same value, which at the very least + // requires this to happen on what is resolved by the JVM as the very same millisecond. + // This is very unlikely to fail and allows to easily "date" parties on a ledger used + // for testing and compare data related to subsequent runs without any reference + private val identifierSuffix = f"${System.nanoTime}%x" + + private val uncaughtExceptionErrorMessage = + "UNEXPECTED UNCAUGHT EXCEPTION ON MAIN THREAD, GATHER THE STACKTRACE AND OPEN A _DETAILED_ TICKET DESCRIBING THE ISSUE HERE: https://github.com/digital-asset/daml/issues/new" + + private def exitCode(summaries: Vector[LedgerTestSummary], expectFailure: Boolean): Int = + if (summaries.exists(_.result.isLeft) == expectFailure) 0 else 1 + + private def printListOfTests[A](tests: Seq[A])(getName: A => String): Unit = { + println("All tests are run by default.") + println() + tests.map(getName).sorted.foreach(println(_)) + } + + private def printAvailableTestSuites(testSuites: Vector[LedgerTestSuite]): Unit = { + println("Listing test suites. Run with --list-all to see individual tests.") + printListOfTests(testSuites)(_.name) + } + + private def printAvailableTests(testSuites: Vector[LedgerTestSuite]): Unit = { + println("Listing all tests. Run with --list to only see test suites.") + printListOfTests(testSuites.flatMap(_.tests))(_.name) + } + + private def extractResources(resources: Seq[String]): Unit = { + val pwd = Paths.get(".").toAbsolutePath + println(s"Extracting all Daml resources necessary to run the tests into $pwd.") + for (resource <- resources) { + val is = getClass.getClassLoader.getResourceAsStream(resource) + if (is == null) sys.error(s"Could not find $resource in classpath") + val targetFile = new File(new File(resource).getName) + Files.copy(is, targetFile.toPath, StandardCopyOption.REPLACE_EXISTING) + println(s"Extracted $resource to $targetFile") + } + } + + private def matches(prefixes: Iterable[String])(test: LedgerTestCase): Boolean = + prefixes.exists(test.name.startsWith) +} + +final class TestRunner(availableTests: AvailableTests, config: Config, lfVersion: String) { + + implicit val resourceManagementExecutionContext: ExecutionContext = + ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor()) + + def runAndExit(): Unit = { + val (result, excludedTests) = runInProcess() + result.onComplete { + case Success(summaries) => + sys.exit(exitCode(summaries, config.mustFail)) + case Failure(exception: Errors.FrameworkException) => + logger.error(exception.getMessage) + logger.debug(exception.getMessage, exception) + sys.exit(1) + case Failure(exception) => + logger.error(exception.getMessage, exception) + sys.exit(1) + + } + } + + def runInProcess(): (Future[Vector[LedgerTestSummary]], Vector[LedgerTestCase]) = { + val tests = new ConfiguredTests(availableTests, config) + + if (tests.missingTests.nonEmpty) { + println("The following exclusion or inclusion does not match any test:") + tests.missingTests.foreach { testName => + println(s" - $testName") + } + sys.exit(64) + } + + if (config.listTestSuites) { + printAvailableTestSuites(tests.allTests) + sys.exit(0) + } + + if (config.listTests) { + printAvailableTests(tests.allTests) + sys.exit(0) + } + + if (config.extract) { + extractResources(Dars.resources(lfVersion)) + sys.exit(0) + } + + if (config.participantsEndpoints.isEmpty) { + logger.error("No participant to test, exiting.") + sys.exit(1) + } + + Thread + .currentThread() + .setUncaughtExceptionHandler { (_, exception) => + logger.error(uncaughtExceptionErrorMessage, exception) + sys.exit(1) + } + + val includedTests = + if (config.included.isEmpty) tests.defaultCases + else tests.allCases.filter(matches(config.included)) + + val addedTests = tests.allCases.filter(matches(config.additional)) + + val (excludedTests, testsToRun) = + (includedTests ++ addedTests).partition(matches(config.excluded)) + + val runner = newLedgerCasesRunner(config, testsToRun) + val testsF = runner.asFuture + .flatMap( + _.runTests(Threading.newExecutionContext("TestRunner", Logger(logger))) + .transformWith { + case scala.util.Success(summaries) => runner.release().map(_ => summaries) + case scala.util.Failure(error) => runner.release().flatMap(_ => Future.failed(error)) + } + ) + .map { summaries => + val excludedTestSummaries = + excludedTests.map { ledgerTestCase => + LedgerTestSummary( + suite = ledgerTestCase.suite.name, + name = ledgerTestCase.name, + description = ledgerTestCase.description, + result = Right(Result.Excluded("excluded test")), + ) + } + new Reporter.ColorizedPrintStreamReporter( + System.out, + config.verbose, + config.reportOnFailuresOnly, + ).report( + summaries, + excludedTestSummaries, + Seq( + "identifierSuffix" -> identifierSuffix, + "concurrentTestRuns" -> config.concurrentTestRuns.toString, + "timeoutScaleFactor" -> config.timeoutScaleFactor.toString, + ), + ) + summaries + } + + (testsF, excludedTests) + } + + private def newLedgerCasesRunner( + config: Config, + cases: Vector[LedgerTestCase], + ): Resource[LedgerTestCasesRunner] = + createLedgerCasesRunner(config, cases, config.concurrentTestRuns) + + private def createLedgerCasesRunner( + config: Config, + cases: Vector[LedgerTestCase], + concurrentTestRuns: Int, + ): Resource[LedgerTestCasesRunner] = + if (config.jsonApiMode) { + initializeParticipantChannels( + participantEndpoints = config.participantsAdminEndpoints, + tlsConfig = config.tlsConfig, + ).map(adminChannels => + new LedgerTestCasesRunner( + testCases = cases, + participantChannels = Left(config.participantsEndpoints), + participantAdminChannels = adminChannels, + maxConnectionAttempts = config.maxConnectionAttempts, + skipDarNamesPattern = config.skipDarNamesPattern, + partyAllocation = config.partyAllocation, + shuffleParticipants = config.shuffleParticipants, + timeoutScaleFactor = config.timeoutScaleFactor, + concurrentTestRuns = concurrentTestRuns, + identifierSuffix = identifierSuffix, + lfVersion = lfVersion, + connectedSynchronizers = config.connectedSynchronizers, + ) + ) + } else { + for { + lapiChannels <- initializeParticipantChannels( + participantEndpoints = config.participantsEndpoints, + tlsConfig = config.tlsConfig, + ) + adminChannels <- initializeParticipantChannels( + participantEndpoints = config.participantsAdminEndpoints, + tlsConfig = config.tlsConfig, + ) + } yield { + new LedgerTestCasesRunner( + testCases = cases, + participantChannels = Right(lapiChannels), + participantAdminChannels = adminChannels, + maxConnectionAttempts = config.maxConnectionAttempts, + skipDarNamesPattern = config.skipDarNamesPattern, + partyAllocation = config.partyAllocation, + shuffleParticipants = config.shuffleParticipants, + timeoutScaleFactor = config.timeoutScaleFactor, + concurrentTestRuns = concurrentTestRuns, + identifierSuffix = identifierSuffix, + lfVersion = lfVersion, + connectedSynchronizers = config.connectedSynchronizers, + ) + } + } + + private def initializeParticipantChannel( + host: String, + port: Int, + tlsConfig: Option[TlsClientConfig], + ): ResourceOwner[Channel] = { + logger.info(s"Setting up managed channel to participant at $host:$port...") + val channelBuilder = NettyChannelBuilder.forAddress(host, port).usePlaintext() + for (ssl <- tlsConfig; sslContext = ClientChannelBuilder.sslContext(ssl)) { + logger.info("Setting up managed channel with transport security.") + channelBuilder + .useTransportSecurity() + .sslContext(sslContext) + .negotiationType(NegotiationType.TLS) + } + channelBuilder.maxInboundMessageSize(10000000) + ResourceOwner.forChannel(channelBuilder, shutdownTimeout = 5.seconds) + } + + private def initializeParticipantChannels( + participantEndpoints: Vector[(String, Int)], + tlsConfig: Option[TlsClientConfig], + ): Resource[Vector[ChannelEndpoint]] = + Resource.sequence(participantEndpoints.map { case (host, port) => + initializeParticipantChannel(host, port, tlsConfig) + .acquire() + .map(channel => ChannelEndpoint.forRemote(channel = channel, hostname = host, port = port)) + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1.scala new file mode 100644 index 0000000000..8fe965c1a8 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1.scala @@ -0,0 +1,77 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.suites.v2_1.objectmeta.{ + PartyManagementServiceObjectMetaIT, + UserManagementServiceObjectMetaIT, +} +import com.digitalasset.canton.config.TlsClientConfig + +package object v2_1 { + def default(timeoutScaleFactor: Double): Vector[LedgerTestSuite] = + Vector( + new ActiveContractsServiceIT, + new CheckpointInTailingStreamsIT, + new ClosedWorldIT, + new CommandDeduplicationIT(timeoutScaleFactor), + new CommandDeduplicationParallelIT, + new CommandDeduplicationPeriodValidationIT, + new CommandServiceIT, + new CommandSubmissionCompletionIT, + new CompletionDeduplicationInfoIT(CompletionDeduplicationInfoIT.CommandService), + new CompletionDeduplicationInfoIT(CompletionDeduplicationInfoIT.CommandSubmissionService), + new ContractIdIT, + new DamlValuesIT, + new DeeplyNestedValueIT, + new DivulgenceIT, + new EventQueryServiceIT, + new ExplicitDisclosureIT, + new HealthServiceIT, + new IdentityProviderConfigServiceIT, + new InteractiveSubmissionServiceIT, + new InterfaceIT, + new InterfaceSubscriptionsIT, + new InterfaceSubscriptionsWithEventBlobsIT, + new LimitsIT, + new MultiPartySubmissionIT, + new PackageManagementServiceIT, + new PackageServiceIT, + new ParticipantPruningIT, + new PartyManagementServiceIT, + new ExternalPartyManagementServiceIT, + new PartyManagementServiceObjectMetaIT, + new PartyManagementServiceUpdateRpcIT, + new SemanticTests, + new StateServiceIT, + new TimeServiceIT, + new TransactionServiceArgumentsIT, + new TransactionServiceAuthorizationIT, + new TransactionServiceCorrectnessIT, + new TransactionServiceExerciseIT, + new TransactionServiceFiltersIT, + new TransactionServiceOutputsIT, + new UpdateServiceQueryIT, + new TransactionServiceStakeholdersIT, + new TransactionServiceValidationIT, + new TransactionServiceVisibilityIT, + new UpdateServiceStreamsIT, + new UpdateServiceTopologyEventsIT, + new UpgradingIT, + new UserManagementServiceIT, + new UserManagementServiceObjectMetaIT, + new UserManagementServiceUpdateRpcIT, + new ValueLimitsIT, + new WitnessesIT, + new WronglyTypedContractIdIT, + new VettingIT, + ) + + def optional(tlsConfiguration: Option[TlsClientConfig]): Vector[LedgerTestSuite] = + Vector( + new TLSOnePointThreeIT(tlsConfiguration), + new TLSAtLeastOnePointTwoIT(tlsConfiguration), + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ActiveContractsServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ActiveContractsServiceIT.scala new file mode 100644 index 0000000000..9a46c7513c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ActiveContractsServiceIT.scala @@ -0,0 +1,1095 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party, TestConstraints} +import com.daml.ledger.api.v2.event.Event.Event.Created +import com.daml.ledger.api.v2.event.{CreatedEvent, Event} +import com.daml.ledger.api.v2.state_service.GetActiveContractsRequest +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + WildcardFilter, +} +import com.daml.ledger.api.v2.value.Identifier +import com.daml.ledger.javaapi.data.codegen.ContractId +import com.daml.ledger.javaapi.data.{Identifier as JavaIdentifier, Template} +import com.daml.ledger.test.java.model.test.{ + Divulgence1, + Divulgence2, + Dummy, + DummyFactory, + DummyWithParam, + TriAgreement, + TriProposal, + WithObservers, + Witnesses as TestWitnesses, +} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* +import scala.util.Random + +class ActiveContractsServiceIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "ACSemptyResponse", + "The ActiveContractService should succeed with an empty response if no contracts have been created for a party", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + activeContracts <- ledger.activeContracts(Some(Seq(party))) + } yield { + assert( + activeContracts.isEmpty, + s"There should be no active contracts, but received $activeContracts", + ) + } + }) + + test( + "ACSallContracts", + "The ActiveContractService should return all active contracts", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + offsetAtTestStart <- ledger.currentEnd() + (dummy, dummyWithParam, dummyFactory) <- createDummyContracts(party, ledger) + activeContracts <- ledger.activeContracts(Some(Seq(party))) + activeContractsPartyWildcard <- ledger + .activeContracts(None) + .map(_.filter(_.offset > offsetAtTestStart)) + end <- ledger.currentEnd() + activeContractsPartyWildcardAndSpecificParty <- ledger + .activeContracts( + GetActiveContractsRequest( + eventFormat = Some( + ledger + .eventFormat(verbose = true, Some(Seq(party))) + .update(_.filtersForAnyParty := Filters(Nil)) + ), + activeAtOffset = end, + ) + ) + .map(_.filter(_.offset > offsetAtTestStart)) + + // archive the contracts to not be active on the next tests + _ <- archive(ledger, party)(dummy, dummyWithParam, dummyFactory) + } yield { + assert( + activeContracts.size == 3, + s"Expected 3 contracts, but received ${activeContracts.size}.", + ) + + assert( + activeContracts.exists(_.contractId == dummy.contractId), + s"Didn't find Dummy contract with contractId $dummy.", + ) + assert( + activeContracts.exists(_.contractId == dummyWithParam.contractId), + s"Didn't find DummyWithParam contract with contractId $dummy.", + ) + assert( + activeContracts.exists(_.contractId == dummyFactory.contractId), + s"Didn't find DummyFactory contract with contractId $dummy.", + ) + + assert( + activeContracts.forall(_.acsDelta), + s"Expected acs_delta to be true for all the created events", + ) + + val invalidSignatories = activeContracts.filterNot(_.signatories == Seq(party.getValue)) + assert( + invalidSignatories.isEmpty, + s"Found contracts with signatories other than $party: $invalidSignatories", + ) + + val invalidObservers = activeContracts.filterNot(_.observers.isEmpty) + assert( + invalidObservers.isEmpty, + s"Found contracts with non-empty observers: $invalidObservers", + ) + + assert( + activeContracts == activeContractsPartyWildcard, + s"Active contracts read for any party are not the same with the fetched for the specific party.", + ) + + assert( + activeContracts == activeContractsPartyWildcardAndSpecificParty, + s"Active contracts with both filters used are not the same with the fetched for the specific party.", + ) + + } + }) + + test( + "ACSfilterContracts", + "The ActiveContractService should return contracts filtered by templateId", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + offsetAtTestStart <- ledger.currentEnd() + (dummy, dummyWithParam, dummyFactory) <- createDummyContracts(party, ledger) + activeContracts <- ledger.activeContractsByTemplateId( + Seq(Dummy.TEMPLATE_ID), + Some(Seq(party)), + ) + activeContractsPartyWildcard <- ledger + .activeContractsByTemplateId(Seq(Dummy.TEMPLATE_ID), None) + .map(_.filter(_.offset > offsetAtTestStart)) + _ <- archive(ledger, party)(dummy, dummyWithParam, dummyFactory) + } yield { + assert( + activeContracts.size == 1, + s"Expected 1 contract, but received ${activeContracts.size}.", + ) + + assert( + activeContracts.head.getTemplateId == Identifier.fromJavaProto( + Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toProto + ), + s"Received contract is not of type Dummy, but ${activeContracts.head.templateId}.", + ) + assert( + activeContracts.head.contractId == dummy.contractId, + s"Expected contract with contractId $dummy, but received ${activeContracts.head.contractId}.", + ) + + assert( + activeContracts == activeContractsPartyWildcard, + "Active contracts read for any party are not the same as for the specific party.\n" + + s"Active contracts read for any party: $activeContractsPartyWildcard\n" + + s" active contracts for the specific party: $activeContracts", + ) + + } + }) + + test( + "ACSarchivedContracts", + "The ActiveContractService does not return archived contracts", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + (dummy, dummyWithParam, dummyFactory) <- createDummyContracts(party, ledger) + contractsBeforeExercise <- ledger.activeContracts(Some(Seq(party))) + _ <- ledger.exercise(party, dummy.exerciseDummyChoice1()) + contractsAfterExercise <- ledger.activeContracts(Some(Seq(party))) + _ <- ledger.exercise(party, dummyWithParam.exerciseArchive()) + _ <- ledger.exercise(party, dummyFactory.exerciseArchive()) + } yield { + // check the contracts BEFORE the exercise + assert( + contractsBeforeExercise.size == 3, + s"Expected 3 contracts, but received ${contractsBeforeExercise.size}.", + ) + + assert( + contractsBeforeExercise.exists(_.contractId == dummy.contractId), + s"Expected to receive contract with contractId $dummy, but received ${contractsBeforeExercise + .map(_.contractId) + .mkString(", ")} instead.", + ) + + // check the contracts AFTER the exercise + assert( + contractsAfterExercise.size == 2, + s"Expected 2 contracts, but received ${contractsAfterExercise.size}", + ) + + assert( + !contractsAfterExercise.exists(_.contractId == dummy.contractId), + s"Expected to not receive contract with contractId $dummy.", + ) + } + }) + + test( + "ACSusableOffset", + "The ActiveContractService should return a usable offset to resume streaming transactions", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + offset <- ledger.currentEnd() + onlyDummy <- ledger.activeContracts( + ledger.activeContractsRequest(Some(Seq(party)), offset) + ) + dummyWithParam <- ledger.create(party, new DummyWithParam(party)) + request <- ledger.getTransactionsRequest(ledger.transactionFormat(Some(Seq(party)))) + fromOffset = request.update(_.beginExclusive := offset) + transactions <- ledger.transactions(fromOffset) + _ <- ledger.exercise(party, dummy.exerciseArchive()) + _ <- ledger.exercise(party, dummyWithParam.exerciseArchive()) + } yield { + assert(onlyDummy.size == 1) + assert( + onlyDummy.exists(_.contractId == dummy.contractId), + s"Expected to receive $dummy in active contracts, but didn't receive it.", + ) + + assert( + transactions.size == 1, + s"Expected to receive only 1 transaction from offset $offset, but received ${transactions.size}.", + ) + + val transaction = transactions.head + assert( + transaction.events.size == 1, + s"Expected only 1 event in the transaction, but received ${transaction.events.size}.", + ) + + val createdEvent = transaction.events.collect { case Event(Created(createdEvent)) => + createdEvent + } + assert( + createdEvent.exists(_.contractId == dummyWithParam.contractId), + s"Expected a CreateEvent for $dummyWithParam, but received $createdEvent.", + ) + } + }) + + test( + "ACSverbosity", + "The ActiveContractService should emit field names only if the verbose flag is set to true", + allocate(SingleParty), + runConcurrently = false, + limitation = TestConstraints.GrpcOnly("Labels are always emitted by Transcode/SchemaProcessor"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + end <- ledger.currentEnd() + verboseRequest = ledger + .activeContractsRequest(Some(Seq(party)), end) + .update(_.eventFormat.verbose := true) + nonVerboseRequest = verboseRequest.update(_.eventFormat.verbose := false) + verboseEvents <- ledger.activeContracts(verboseRequest) + nonVerboseEvents <- ledger.activeContracts(nonVerboseRequest) + verboseEventsPartyWildcard <- ledger.activeContracts( + ledger.activeContractsRequest(None, end).update(_.eventFormat.verbose := true) + ) + nonVerboseEventsPartyWildcard <- ledger.activeContracts( + ledger.activeContractsRequest(None, end).update(_.eventFormat.verbose := false) + ) + _ <- ledger.exercise(party, dummy.exerciseArchive()) + } yield { + val verboseCreateArgs = verboseEvents.map(_.getCreateArguments).flatMap(_.fields) + assert( + verboseEvents.nonEmpty && verboseCreateArgs.forall(_.label.nonEmpty), + s"$party expected a contract with labels, but received $verboseEvents.", + ) + + val nonVerboseCreateArgs = nonVerboseEvents.map(_.getCreateArguments).flatMap(_.fields) + assert( + nonVerboseEvents.nonEmpty && nonVerboseCreateArgs.forall(_.label.isEmpty), + s"$party expected a contract without labels, but received $nonVerboseEvents.", + ) + + assert( + verboseEvents == verboseEventsPartyWildcard, + s"""Verbose events for any party are not the same as for specific party. + |Verbose events for any party: $verboseEventsPartyWildcard + |Verbose events for the specific party. $verboseEvents""".stripMargin, + ) + assert( + nonVerboseEvents == nonVerboseEventsPartyWildcard, + s"""Non verbose events for any party are not the same as for specific party. + |Non verbose events for any party: $nonVerboseEventsPartyWildcard. + |Non verbose events for the specific party: $nonVerboseEvents.""".stripMargin, + ) + + } + }) + + test( + "ACSmultiParty", + "The ActiveContractsService should return contracts for the requesting parties", + allocate(TwoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + offsetAtTestStart <- ledger.currentEnd() + dummyAlice <- createDummyContracts(alice, ledger) + dummyBob <- createDummyContracts(bob, ledger) + allContractsForAlice <- ledger.activeContracts(Some(Seq(alice))) + allContractsForBob <- ledger.activeContracts(Some(Seq(bob))) + allContractsForAliceAndBob <- ledger.activeContracts(Some(Seq(alice, bob))) + allContractsPartyWildcard <- ledger + .activeContracts(None) + .map(_.filter(_.offset > offsetAtTestStart)) + dummyContractsForAlice <- ledger.activeContractsByTemplateId( + Seq(Dummy.TEMPLATE_ID), + Some(Seq(alice)), + ) + dummyContractsForAliceAndBob <- ledger.activeContractsByTemplateId( + Seq(Dummy.TEMPLATE_ID), + Some(Seq(alice, bob)), + ) + dummyContractsPartyWildcard <- ledger + .activeContractsByTemplateId( + templateIds = Seq(Dummy.TEMPLATE_ID), + parties = None, + ) + .map(_.filter(_.offset > offsetAtTestStart)) + _ <- (archive(ledger, alice) _).tupled(dummyAlice) + _ <- (archive(ledger, bob) _).tupled(dummyBob) + } yield { + assert( + allContractsForAlice.size == 3, + s"$alice expected 3 events, but received ${allContractsForAlice.size}.", + ) + assertTemplates(Seq(alice), allContractsForAlice, Dummy.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + assertTemplates( + Seq(alice), + allContractsForAlice, + DummyWithParam.TEMPLATE_ID_WITH_PACKAGE_ID, + 1, + ) + assertTemplates(Seq(alice), allContractsForAlice, DummyFactory.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + + assert( + allContractsForBob.size == 3, + s"$bob expected 3 events, but received ${allContractsForBob.size}.", + ) + assertTemplates(Seq(bob), allContractsForBob, Dummy.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + assertTemplates(Seq(bob), allContractsForBob, DummyWithParam.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + assertTemplates(Seq(bob), allContractsForBob, DummyFactory.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + + assert( + allContractsForAliceAndBob.size == 6, + s"$alice and $bob expected 6 events, but received ${allContractsForAliceAndBob.size}.", + ) + assertTemplates( + Seq(alice, bob), + allContractsForAliceAndBob, + Dummy.TEMPLATE_ID_WITH_PACKAGE_ID, + 2, + ) + assertTemplates( + Seq(alice, bob), + allContractsForAliceAndBob, + DummyWithParam.TEMPLATE_ID_WITH_PACKAGE_ID, + 2, + ) + assertTemplates( + Seq(alice, bob), + allContractsForAliceAndBob, + DummyFactory.TEMPLATE_ID_WITH_PACKAGE_ID, + 2, + ) + + assert( + allContractsForAliceAndBob == allContractsPartyWildcard, + s"""Active contracts for any party are not the same as for specific parties. + |Active contracts for any party: $allContractsPartyWildcard. + |Active contracts for specific parties: $allContractsForAliceAndBob.""".stripMargin, + ) + + assert( + dummyContractsForAlice.size == 1, + s"$alice expected 1 event, but received ${dummyContractsForAlice.size}.", + ) + assertTemplates(Seq(alice), dummyContractsForAlice, Dummy.TEMPLATE_ID_WITH_PACKAGE_ID, 1) + + assert( + dummyContractsForAliceAndBob.size == 2, + s"$alice and $bob expected 2 events, but received ${dummyContractsForAliceAndBob.size}.", + ) + assertTemplates( + Seq(alice, bob), + dummyContractsForAliceAndBob, + Dummy.TEMPLATE_ID_WITH_PACKAGE_ID, + 2, + ) + + assert( + dummyContractsForAliceAndBob == dummyContractsPartyWildcard, + s"""Active Dummy contracts for any party are not the same as for specific parties. + |Active Dummy contracts for any party: $dummyContractsPartyWildcard. + |Active Dummy contracts for specific parties: $dummyContractsForAliceAndBob.""".stripMargin, + ) + + } + }) + + test( + "ACSEventOffset", + "The ActiveContractService should properly fill the offset field of the events", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + Vector(dummyEvent) <- ledger.activeContracts(Some(Seq(party))) + offset <- ledger.currentEnd() + acsDeltaTransaction <- ledger.transactionByOffset(dummyEvent.offset, Seq(party), AcsDelta) + ledgerEffectsTransaction <- ledger.transactionByOffset( + dummyEvent.offset, + Seq(party), + LedgerEffects, + ) + _ <- ledger.exercise(party, dummy.exerciseArchive()) + } yield { + assert( + acsDeltaTransaction.updateId == ledgerEffectsTransaction.updateId, + s"Offset ${dummyEvent.offset} did not resolve to the same acs delta transaction (${acsDeltaTransaction.updateId}) and ledger effects transaction (${ledgerEffectsTransaction.updateId}).", + ) + assert( + dummyEvent.offset == acsDeltaTransaction.offset, + s"Active contract's created event ${dummyEvent.offset} was not equal to transaction's offset $offset.", + ) + assert( + acsDeltaTransaction.events.sizeIs == 1, + "Expected only a single event for flat transaction", + ) + acsDeltaTransaction.events + .flatMap(_.event.created) + .foreach(createdEvent => + assert( + acsDeltaTransaction.offset == createdEvent.offset, + s"AcsDelta transaction's created event ${dummyEvent.offset} was not equal to transaction's offset ${acsDeltaTransaction.offset}.", + ) + ) + assert( + ledgerEffectsTransaction.events.sizeIs == 1, + "Expected only a single event for LedgerEffects transaction", + ) + ledgerEffectsTransaction.events + .flatMap(_.event.created) + .foreach(createdEvent => + assert( + ledgerEffectsTransaction.offset == createdEvent.offset, + s"LedgerEffects' transaction created event ${dummyEvent.offset} was not equal to transaction's offset ${ledgerEffectsTransaction.offset}.", + ) + ) + + ledgerEffectsTransaction.events + .flatMap(_.event.created) + .zip( + acsDeltaTransaction.events + .flatMap(_.event.created) + ) + .foreach { case (ledgerEffectsEvent, acsDeltaEvent) => + assert( + ledgerEffectsEvent.nodeId == acsDeltaEvent.nodeId, + s"LedgerEffects' transaction event (at offset ${ledgerEffectsEvent.offset}) node id ${ledgerEffectsEvent.nodeId} was not " + + s"equal to AcsDelta transaction's event (at offset ${acsDeltaEvent.offset}) node id ${acsDeltaEvent.nodeId}.", + ) + } + } + }) + + test( + "ACSnoWitnessedContracts", + "The ActiveContractService should not return witnessed contracts", + allocate(TwoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + offsetAtTestStart <- ledger.currentEnd() + witnesses: TestWitnesses.ContractId <- ledger.create( + alice, + new TestWitnesses(alice, bob, bob), + ) + newWitnesses <- ledger.exerciseAndGetContract[TestWitnesses.ContractId, TestWitnesses]( + bob, + witnesses.exerciseWitnessesCreateNewWitnesses(), + ) + bobContracts <- ledger.activeContracts(Some(Seq(bob))) + aliceContracts <- ledger.activeContracts(Some(Seq(alice))) + partyWildcardContracts <- ledger + .activeContracts(None) + .map(_.filter(_.offset > offsetAtTestStart)) + _ <- ledger.exercise(alice, witnesses.exerciseArchive()) + _ <- ledger.exercise(bob, newWitnesses.exerciseArchive()) + } yield { + assert( + bobContracts.size == 2, + s"Expected to receive 2 active contracts for $bob, but received ${bobContracts.size}.", + ) + assert( + aliceContracts.size == 1, + s"Expected to receive 1 active contracts for $alice, but received ${aliceContracts.size}.", + ) + assert( + partyWildcardContracts.size == 2, + s"Expected to receive 2 active contracts for all parties, but received ${partyWildcardContracts.size}.", + ) + } + }) + + test( + "ACSnoDivulgedContracts", + "The ActiveContractService should not return divulged contracts", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + divulgence1 <- ledger.create(alice, new Divulgence1(alice)) + divulgence2 <- ledger.create(bob, new Divulgence2(bob, alice)) + _ <- ledger.exercise(alice, divulgence2.exerciseDivulgence2Fetch(divulgence1)) + bobContracts <- ledger.activeContracts(Some(Seq(bob))) + aliceContracts <- ledger.activeContracts(Some(Seq(alice))) + _ <- ledger.exercise(alice, divulgence1.exerciseArchive()) + _ <- ledger.exercise(bob, divulgence2.exerciseArchive()) + } yield { + assert( + bobContracts.size == 1, + s"Expected to receive 1 active contracts for $bob, but received ${bobContracts.size}.", + ) + assert( + aliceContracts.size == 2, + s"Expected to receive 2 active contracts for $alice, but received ${aliceContracts.size}.", + ) + } + }) + + test( + "ACSnoSignatoryObservers", + "The ActiveContractService should not return overlapping signatories and observers", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + contract <- ledger.create( + alice, + new WithObservers(alice, Seq(alice, bob).map(_.getValue).asJava), + ) + contracts <- ledger.activeContracts(Some(Seq(alice))) + _ <- ledger.exercise(alice, contract.exerciseArchive()) + } yield { + assert(contracts.nonEmpty) + contracts.foreach(ce => + assert( + ce.observers == Seq(bob.getValue), + s"Expected observers to only contain $bob, but received ${ce.observers}", + ) + ) + } + }) + + test( + "ACFilterWitnesses", + "The ActiveContractService should filter witnesses by the transaction filter", + allocate(Parties(3)), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie))) => + for { + offsetAtTestStart <- ledger.currentEnd() + contract <- ledger.create( + alice, + new WithObservers(alice, List(bob, charlie).map(_.getValue).asJava), + ) + bobContracts <- ledger.activeContracts(Some(Seq(bob))) + aliceBobContracts <- ledger.activeContracts(Some(Seq(alice, bob))) + bobCharlieContracts <- ledger.activeContracts(Some(Seq(bob, charlie))) + partyWildcardContracts <- ledger + .activeContracts(None) + .map(_.filter(_.offset > offsetAtTestStart)) + _ <- ledger.exercise(alice, contract.exerciseArchive()) + } yield { + def assertWitnesses(contracts: Vector[CreatedEvent], requesters: Set[Party]): Unit = { + assert( + contracts.size == 1, + s"Expected to receive 1 active contracts for $requesters, but received ${contracts.size}.", + ) + assert( + contracts.head.witnessParties.toSet == requesters.map(_.getValue), + s"Expected witness parties to equal to $requesters, but received ${contracts.head.witnessParties}", + ) + } + + assertWitnesses(bobContracts, Set(bob)) + assertWitnesses(aliceBobContracts, Set(alice, bob)) + assertWitnesses(bobCharlieContracts, Set(bob, charlie)) + assertWitnesses(partyWildcardContracts, Set(alice, bob, charlie)) + } + }) + + test( + "ACSFilterCombinations", + "Testing ACS filter combinations", + allocate(Parties(3)), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(p1, p2, p3))) => + // Let us have 3 templates + val templateIds: Vector[Identifier] = + Vector(TriAgreement.TEMPLATE_ID, TriProposal.TEMPLATE_ID, WithObservers.TEMPLATE_ID).map(t => + Identifier.fromJavaProto(t.toProto) + ) + // Let us have 3 parties + val parties: Vector[Party] = Vector(p1, p2, p3) + // Let us have all combinations for the 3 parties + val partyCombinations = + Vector(Set(0), Set(1), Set(2), Set(0, 1), Set(1, 2), Set(0, 2), Set(0, 1, 2)) + // Let us populate 3 contracts for each template/partyCombination pair (see createContracts below) + // Then we require the following Filter - Expectations to be upheld + + // Key is the index of a test party (see parties), if None the wildcard-party is used + // Value is + // either empty, meaning a template-wildcard filter + // or the Set of indices of a test template (see templateIds) + val * = Set.empty[Int] + val w = -1 + type ACSFilter = Map[Option[Int], Set[Int]] + + case class FilterCoord(templateId: Int, stakeholders: Set[Int]) + + def filterCoordsForFilter(filter: ACSFilter): Set[FilterCoord] = + (for { + (partyO, templates) <- filter + templateId <- if (templates.isEmpty) templateIds.indices.toSet else templates + allowedPartyCombination <- partyO.fold(partyCombinations)(party => + partyCombinations.filter(_(party)) + ) + } yield FilterCoord(templateId, allowedPartyCombination)).toSet + + val filters: Vector[ACSFilter] = + Vector( + Map(0 -> *), + Map(0 -> Set(0)), + Map(0 -> Set(1)), + Map(0 -> Set(2)), + Map(0 -> Set(0, 1)), + Map(0 -> Set(0, 2)), + Map(0 -> Set(1, 2)), + Map(0 -> Set(0, 1, 2)), + // multi filter + Map(0 -> *, 1 -> *), + Map(0 -> *, 2 -> *), + Map(0 -> *, 1 -> *, 2 -> *), + Map(0 -> Set(0), 1 -> Set(1)), + Map(0 -> Set(0), 1 -> Set(1), 2 -> Set(2)), + Map(0 -> Set(0, 1), 1 -> Set(0, 2)), + Map(0 -> Set(0, 1), 1 -> Set(0, 2), 2 -> Set(1, 2)), + Map(0 -> *, 1 -> Set(0)), + Map(0 -> *, 1 -> Set(0), 2 -> Set(1, 2)), + Map(0 -> *), + // party-wildcard + Map(w -> Set(0)), + Map(w -> Set(1)), + Map(w -> Set(2)), + Map(w -> Set(0, 1)), + Map(w -> Set(0, 2)), + Map(w -> Set(1, 2)), + Map(w -> Set(0, 1, 2)), + // multi filter w/ party-wildcard + Map(w -> *, 1 -> *), + Map(0 -> *, w -> *), + Map(0 -> *, w -> *, 2 -> *), + Map(w -> Set(0), 1 -> Set(1)), + Map(0 -> Set(0), w -> Set(1)), + Map(0 -> Set(0), 1 -> Set(1), w -> Set(2)), + Map(0 -> Set(0, 1), w -> Set(0, 2)), + Map(w -> Set(0, 1), 1 -> Set(0, 2), 2 -> Set(1, 2)), + Map(w -> *, 1 -> Set(0)), + Map(0 -> *, w -> Set(0)), + Map(w -> *, 1 -> Set(0), 2 -> Set(1, 2)), + Map(0 -> *, w -> Set(0), 2 -> Set(1, 2)), + Map(0 -> *, 1 -> Set(0), w -> Set(1, 2)), + Map(0 -> *, w -> Set(0), 2 -> Set(1, 2)), + ).map(_.map { case (k, v) => (Option.when(k >= 0)(k), v) }) + val fixtures: Vector[(ACSFilter, Set[FilterCoord])] = + filters.map(filter => filter -> filterCoordsForFilter(filter)) + + def createContracts: Future[Map[FilterCoord, Set[String]]] = { + def withThreeParties[T <: Template]( + f: (String, String, String) => T + )(partySet: Set[Party]): T = + partySet.toList.map(_.getValue) match { + case a :: b :: c :: Nil => f(a, b, c) + case a :: b :: Nil => f(a, b, b) + case a :: Nil => f(a, a, a) + case invalid => + throw new Exception(s"Invalid partySet, length must be 1 or 2 or 3 but it was $invalid") + } + + val templateFactories: Vector[Set[Party] => _ <: Template] = + Vector( + (parties: Set[Party]) => withThreeParties(new TriAgreement(_, _, _))(parties), + (parties: Set[Party]) => withThreeParties(new TriProposal(_, _, _))(parties), + (parties: Set[Party]) => + new WithObservers(parties.head, parties.toList.map(_.getValue).asJava), + ) + + def createContractFor( + template: Int, + partyCombination: Int, + ): Future[ContractId[_]] = { + val partiesSet = partyCombinations(partyCombination).map(parties) + templateFactories(template)(partiesSet) match { + case agreement: TriAgreement => + ledger.create( + partiesSet.toList, + partiesSet.toList, + agreement, + )(TriAgreement.COMPANION) + case proposal: TriProposal => + ledger.create( + partiesSet.toList, + partiesSet.toList, + proposal, + )(TriProposal.COMPANION) + case withObservers: WithObservers => + ledger.create( + partiesSet.toList, + partiesSet.toList, + withObservers, + )(WithObservers.COMPANION) + case t => throw new RuntimeException(s"the template given has an unexpected type $t") + } + } + + val createFs = for { + partyCombinationIndex <- partyCombinations.indices + templateIndex <- templateFactories.indices + _ <- 1 to 3 + } yield createContractFor(templateIndex, partyCombinationIndex) + .map((partyCombinationIndex, templateIndex) -> _) + + Future + .sequence(createFs) + .map(_.groupBy(_._1).map { case ((partyCombinationIndex, templateIndex), contractIds) => + ( + FilterCoord(templateIndex, partyCombinations(partyCombinationIndex)), + contractIds.view.map(_._2.contractId).toSet, + ) + }) + } + + val random = new Random(System.nanoTime()) + + def testForFixtures( + fixtures: Vector[(ACSFilter, Set[FilterCoord])], + allContracts: Map[FilterCoord, Set[String]], + offsetAtTestStart: Long, + ) = { + def filtersForTemplates(templates: Set[Int]) = + Filters( + random + .shuffle(templates.toSeq.map(templateIds)) + .map(templateId => + CumulativeFilter( + IdentifierFilter.TemplateFilter(TemplateFilter(Some(templateId), false)) + ) + ) + ) + + def activeContractIdsFor(filter: ACSFilter): Future[Vector[String]] = + for { + end <- ledger.currentEnd() + filtersByParty = filter + .collect { + case (Some(party), templates) if templates.isEmpty => + (parties(party).getValue, Filters(Seq.empty)) + case (Some(party), templates) => + ( + parties(party).getValue, + filtersForTemplates(templates), + ) + } + filtersForAnyParty = filter.get(None) match { + // no party-wildcard defined + case None => None + case Some(templates) if templates.isEmpty => + Some( + Filters( + Seq( + CumulativeFilter.defaultInstance + .withWildcardFilter(WildcardFilter(false)) + ) + ) + ) + case Some(templates) => Some(filtersForTemplates(templates)) + } + createdEvents <- ledger + .activeContracts( + GetActiveContractsRequest( + eventFormat = Some( + EventFormat( + filtersByParty = filtersByParty, + filtersForAnyParty = filtersForAnyParty, + verbose = true, + ) + ), + activeAtOffset = end, + ) + ) + .map(_.filter(_.offset > offsetAtTestStart)) + } yield createdEvents.map(_.contractId) + + def testForFixture(actual: Vector[String], expected: Set[FilterCoord], hint: String): Unit = { + val actualSet = actual.toSet + assert( + expected.forall(allContracts.contains), + s"$hint expected FilterCoord(s) which do not exist(s): ${expected.filterNot(allContracts.contains)}", + ) + assert( + actualSet.size == actual.size, + s"$hint ACS returned redundant entries ${actual.groupBy(identity).toList.filter(_._2.size > 1).map(_._1).mkString("\n")}", + ) + val errors = allContracts.toList.flatMap { + case (filterCoord, contracts) if expected(filterCoord) && contracts.forall(actualSet) => + Nil + case (filterCoord, contracts) + if expected(filterCoord) && contracts.forall(x => !actualSet(x)) => + List(s"$filterCoord is missing from result") + case (filterCoord, _) if expected(filterCoord) => + List(s"$filterCoord is partially missing from result") + case (filterCoord, contracts) if contracts.forall(actualSet) => + List(s"$filterCoord is present (too many contracts in result)") + case (filterCoord, contracts) if contracts.exists(actualSet) => + List(s"$filterCoord is partially present (too many contracts in result)") + case (_, _) => Nil + } + assert(errors == Nil, s"$hint ACS mismatch: ${errors.mkString(", ")}") + val expectedContracts = expected.view.flatMap(allContracts).toSet + // This extra, redundant test is to safeguard the above, more fine grained approach + assert( + expectedContracts == actualSet, + s"$hint ACS mismatch\n Extra contracts: ${actualSet -- expectedContracts}\n Missing contracts: ${expectedContracts -- actualSet}", + ) + } + + val testFs = fixtures.map { case (filter, expectedResultCoords) => + activeContractIdsFor(filter).map( + testForFixture(_, expectedResultCoords, s"Filter: $filter") + ) + } + Future.sequence(testFs) + } + + for { + offsetAtTestStart <- ledger.currentEnd() + allContracts <- createContracts + _ <- testForFixtures(fixtures, allContracts, offsetAtTestStart) + } yield () + }) + + test( + "ActiveAtOffsetInfluencesAcs", + "Allow to specify optional active_at_offset", + partyAllocation = allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val eventFormat = + Some( + EventFormat( + filtersByParty = Map(party.getValue -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ) + for { + c1 <- ledger.create(party, new Dummy(party)) + offset1 <- ledger.currentEnd() + c2 <- ledger.create(party, new Dummy(party)) + offset2 <- ledger.currentEnd() + // acs at offset1 + acs1 <- ledger + .activeContractsIds( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = offset1, + ) + ) + _ = assertEquals("acs1", acs1.toSet, Set(c1)) + // acs at offset2 + acs2 <- ledger + .activeContractsIds( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = offset2, + ) + ) + _ = assertEquals("ACS at the second offset", acs2.toSet, Set(c1, c2)) + } yield () + }) + + test( + "AcsAtPruningOffsetIsAllowed", + "Allow requesting ACS at the pruning offset", + partyAllocation = allocate(SingleParty), + runConcurrently = false, + limitation = TestConstraints.GrpcOnly("Pruning not available in JSON API"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val eventFormat = + Some( + EventFormat( + filtersByParty = Map(party.getValue -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ) + for { + c1 <- ledger.create(party, new Dummy(party)) + anOffset <- ledger.currentEnd() + _ <- ledger.create(party, new Dummy(party)) + _ <- ledger.pruneCantonSafe( + pruneUpTo = anOffset, + party = party, + dummyCommand = p => new Dummy(p).create.commands, + ) + acs <- ledger + .activeContractsIds( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = anOffset, + ) + ) + _ = assertEquals("acs valid_at at pruning offset", acs.toSet, Set(c1)) + } yield () + }) + + test( + "AcsBeforePruningOffsetIsDisallowed", + "Fail when requesting ACS before the pruning offset", + partyAllocation = allocate(SingleParty), + runConcurrently = false, + limitation = TestConstraints.GrpcOnly("Pruning not available in JSON API"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val eventFormat = + Some( + EventFormat( + filtersByParty = Map(party.getValue -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ) + for { + offset1 <- ledger.currentEnd() + _ <- ledger.create(party, new Dummy(party)) + offset2 <- ledger.currentEnd() + _ <- ledger.create(party, new Dummy(party)) + _ <- ledger.pruneCantonSafe( + pruneUpTo = offset2, + party = party, + p => new Dummy(p).create.commands, + ) + _ <- ledger + .activeContractsIds( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = offset1, + ) + ) + .mustFailWith( + "ACS before the pruning offset", + RequestValidationErrors.ParticipantPrunedDataAccessed, + ) + } yield () + }) + + test( + "ActiveAtOffsetInvalidInput", + "Fail when active_at_offset has invalid input", + partyAllocation = allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val eventFormat = + Some( + EventFormat( + filtersByParty = Map(party.getValue -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ) + for { + _ <- ledger + .activeContracts( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = -1L, + ) + ) + .mustFailWith( + "invalid offset", + RequestValidationErrors.NegativeOffset, + ) + } yield () + }) + + test( + "ActiveAtOffsetAfterLedgerEnd", + "Fail when active_at_offset is after the ledger end offset", + partyAllocation = allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val eventFormat = + Some( + EventFormat( + filtersByParty = Map(party.getValue -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ) + for { + offsetBeyondLedgerEnd <- ledger.offsetBeyondLedgerEnd() + _ <- ledger + .activeContracts( + GetActiveContractsRequest( + eventFormat = eventFormat, + activeAtOffset = offsetBeyondLedgerEnd, + ) + ) + .mustFailWith( + "offset after ledger end", + RequestValidationErrors.OffsetAfterLedgerEnd, + ) + } yield () + }) + + private def createDummyContracts(party: Party, ledger: ParticipantTestContext)(implicit + ec: ExecutionContext + ): Future[ + ( + Dummy.ContractId, + DummyWithParam.ContractId, + DummyFactory.ContractId, + ) + ] = + for { + dummy <- ledger.create(party, new Dummy(party)) + dummyWithParam <- ledger.create(party, new DummyWithParam(party)) + dummyFactory <- ledger.create(party, new DummyFactory(party)) + } yield (dummy, dummyWithParam, dummyFactory) + + private def assertTemplates( + party: Seq[Party], + events: Vector[CreatedEvent], + templateId: JavaIdentifier, + count: Int, + ): Unit = { + val templateEvents = + events.count(_.getTemplateId == Identifier.fromJavaProto(templateId.toProto)) + assert( + templateEvents == count, + s"${party.mkString(" and ")} expected $count $templateId events, but received $templateEvents.", + ) + } + + private def archive( + ledger: ParticipantTestContext, + party: Party, + )( + dummy: Dummy.ContractId, + dummyWithParam: DummyWithParam.ContractId, + dummyFactory: DummyFactory.ContractId, + )(implicit ec: ExecutionContext) = + for { + _ <- ledger.exercise(party, dummy.exerciseArchive()) + _ <- ledger.exercise(party, dummyWithParam.exerciseArchive()) + _ <- ledger.exercise(party, dummyFactory.exerciseArchive()) + } yield () + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CheckpointInTailingStreamsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CheckpointInTailingStreamsIT.scala new file mode 100644 index 0000000000..fe147415ed --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CheckpointInTailingStreamsIT.scala @@ -0,0 +1,334 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse.CompletionResponse +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.offset_checkpoint.OffsetCheckpoint +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.update_service.GetUpdatesResponse +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.time.NonNegativeFiniteDuration + +import scala.concurrent.Future + +class CheckpointInTailingStreamsIT extends LedgerTestSuite { + import CompanionImplicits.* + import com.daml.ledger.api.testtool.suites.v2_1.CheckpointInTailingStreamsIT.* + + test( + "TXServeTailingStreamCheckpointAtTheEnd", + "Tailing transaction streams should contain a checkpoint message", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 10 + val transactionsToRead = 10 + for { + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + // sleep for 3 * maxOffsetCheckpointEmissionDelay to ensure that the offset checkpoint cache is updated + _ <- Future( + Thread.sleep(3 * ledger.maxOffsetCheckpointEmissionDelay.duration.toMillis) + ) + updates <- ledger.updates( + within, + ledger + .getTransactionsRequestWithEnd( + transactionFormat = ledger.transactionFormat( + parties = Some(Seq(party)), + transactionShape = AcsDelta, + ), + end = None, + ), + ) + txs: Vector[Transaction] = updates.flatMap(_.transaction) + checkpoints: Vector[OffsetCheckpoint] = updates.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + updates.size > transactionsToRead, + s"More than $transactionsToRead updates should have been received but ${updates.size} were instead", + ) + assert( + txs.size == transactionsToRead, + s"$transactionsToRead transactions should have been received but ${txs.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertUpdatesInOrder(updates) + } + }) + + test( + "TXServeTailingStreamCheckpointTimeout", + "Tailing transaction streams should contain a checkpoint message when idle", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + val transactionsToSubmit = 10 + for { + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + updates <- ledger.updates( + ledger.maxOffsetCheckpointEmissionDelay * NonNegativeInt.tryCreate(3), + ledger + .getTransactionsRequestWithEnd( + transactionFormat = ledger.transactionFormat(parties = Some(Seq(party2))), + end = None, + ), + ) + txs: Vector[Transaction] = updates.flatMap(_.transaction) + checkpoints: Vector[OffsetCheckpoint] = updates.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + updates.nonEmpty, + s"At least one update (checkpoint) should have been received but none were instead", + ) + assert( + txs.isEmpty, + s"No transactions should have been received but ${txs.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertUpdatesInOrder(updates) + } + }) + + test( + "TXServeTailingStreamCheckpointEmpty", + "Tailing transaction streams should contain a checkpoint message if there are any offset updates (even if they are for other parties)", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + val transactionsToSubmit = 10 + for { + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + // sleep for 3 * maxOffsetCheckpointEmissionDelay to ensure that the offset checkpoint cache is updated + _ <- Future(Thread.sleep(3 * ledger.maxOffsetCheckpointEmissionDelay.duration.toMillis)) + // fetching updates for party2 should return 0 txs + updates <- ledger.updates( + within, + ledger + .getTransactionsRequestWithEnd( + transactionFormat = ledger.transactionFormat(Some(Seq(party2))), + end = None, + ), + ) + txs: Vector[Transaction] = updates.flatMap(_.transaction) + checkpoints: Vector[OffsetCheckpoint] = updates.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + updates.nonEmpty, + s"At least one update should have been received but none were instead", + ) + assert( + txs.isEmpty, + s"No transactions should have been received but ${txs.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertUpdatesInOrder(updates) + } + }) + + test( + "CompletionsStreamCheckpointAtTheEnd", + "Command completions streams should contain a checkpoint message", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 10 + val transactionsToRead = 10 + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + // sleep for 3 * maxOffsetCheckpointEmissionDelay to ensure that the offset checkpoint cache is updated + _ <- Future(Thread.sleep(3 * ledger.maxOffsetCheckpointEmissionDelay.duration.toMillis)) + responses <- ledger.completions( + within, + ledger + .completionStreamRequest(endOffsetAtTestStart)(party), + ) + completions: Vector[Completion] = responses.flatMap(_.completion) + checkpoints: Vector[OffsetCheckpoint] = responses.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + responses.size > transactionsToRead, + s"More than ${transactionsToRead + 1} responses should have been received but ${responses.size} were instead", + ) + assert( + completions.size == transactionsToRead, + s"$transactionsToRead completions should have been received but ${completions.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertCompletionsInOrder(responses) + } + }) + + test( + "CompletionsStreamCheckpointTimeout", + "Command completions streams should contain a checkpoint message when idle", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + val transactionsToSubmit = 10 + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + responses <- ledger.completions( + ledger.maxOffsetCheckpointEmissionDelay * NonNegativeInt.tryCreate(3), + ledger + .completionStreamRequest(endOffsetAtTestStart)(party2), + ) + completions: Vector[Completion] = responses.flatMap(_.completion) + checkpoints: Vector[OffsetCheckpoint] = responses.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + responses.nonEmpty, + s"At least one response (checkpoint) should have been received but none were instead", + ) + assert( + completions.isEmpty, + s"No completions should have been received but ${completions.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertCompletionsInOrder(responses) + } + }) + + test( + "CompletionsStreamCheckpointEmpty", + "Command completions streams should contain a checkpoint message if there are any offset updates (even if they are for other parties)", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + val transactionsToSubmit = 10 + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + // sleep for 3 * maxOffsetCheckpointEmissionDelay to ensure that the offset checkpoint cache is updated + _ <- Future(Thread.sleep(3 * ledger.maxOffsetCheckpointEmissionDelay.duration.toMillis)) + // fetching completions for party2 should return 0 completions + responses <- ledger.completions( + within, + ledger + .completionStreamRequest(endOffsetAtTestStart)(party2), + ) + completions: Vector[Completion] = responses.flatMap(_.completion) + checkpoints: Vector[OffsetCheckpoint] = responses.flatMap(_.offsetCheckpoint) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + responses.nonEmpty, + s"At least one response should have been received but none were instead", + ) + assert( + completions.isEmpty, + s"No completions should have been received but ${completions.size} were instead", + ) + assert( + checkpoints.nonEmpty, + s"At least one checkpoint should have been received but none were instead", + ) + assertCompletionsInOrder(responses) + } + }) + +} + +object CheckpointInTailingStreamsIT { + private val within: NonNegativeFiniteDuration = NonNegativeFiniteDuration.tryOfSeconds(1L) + + sealed trait ElementOrCheckpoint + final case object Empty extends ElementOrCheckpoint + final case class Element(offset: Long) extends ElementOrCheckpoint + final case class Checkpoint(offset: Long) extends ElementOrCheckpoint + + private def getElementOrCheckpoint(update: GetUpdatesResponse.Update): ElementOrCheckpoint = + update match { + case GetUpdatesResponse.Update.Empty => Empty + case GetUpdatesResponse.Update.Transaction(tx) => Element(tx.offset) + case GetUpdatesResponse.Update.Reassignment(r) => Element(r.offset) + case GetUpdatesResponse.Update.OffsetCheckpoint(checkpoint) => Checkpoint(checkpoint.offset) + case GetUpdatesResponse.Update.TopologyTransaction(topology) => Element(topology.offset) + } + + private def getElementOrCheckpoint(completion: CompletionResponse): ElementOrCheckpoint = + completion match { + case CompletionResponse.Empty => Empty + case CompletionResponse.Completion(completion) => Element(completion.offset) + case CompletionResponse.OffsetCheckpoint(checkpoint) => + Checkpoint(checkpoint.offset) + } + + private def assertUpdatesInOrder[T](updates: Seq[T], convert: T => ElementOrCheckpoint): Unit = + updates.map(convert).foldLeft(0L) { (lastOffset, update) => + update match { + case Empty => lastOffset + case Element(offset) => + assert( + offset > lastOffset, + s"element with offset $offset should have been greater than last offset", + ) + offset + case Checkpoint(checkpointOffset) => + val checkpointOffsetStr = checkpointOffset + assert( + checkpointOffsetStr >= lastOffset, + s"checkpoint with offset $checkpointOffset should have been greater or equal to last offset", + ) + checkpointOffsetStr + } + } + + private def assertUpdatesInOrder(updates: Seq[GetUpdatesResponse.Update]): Unit = + assertUpdatesInOrder[GetUpdatesResponse.Update](updates, getElementOrCheckpoint) + + private def assertCompletionsInOrder(completions: Seq[CompletionResponse]): Unit = + assertUpdatesInOrder[CompletionResponse](completions, getElementOrCheckpoint) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ClosedWorldIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ClosedWorldIT.scala new file mode 100644 index 0000000000..8eca81649c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ClosedWorldIT.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.test.java.semantic.semantictests.{Amount, Iou} +import com.digitalasset.canton.ledger.error.groups.SyncServiceRejectionErrors + +import java.math.BigDecimal +import java.util.regex.Pattern + +class ClosedWorldIT extends LedgerTestSuite { + import CompanionImplicits.* + + private[this] val onePound = new Amount(BigDecimal.valueOf(1), "GBP") + + /* + * All informees in a transaction must be allocated. + */ + + test( + "ClosedWorldObserver", + "Cannot execute a transaction that references unallocated observer parties", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(payer))) => + for { + failure <- alpha + .create(payer, new Iou(payer, "unallocated", onePound)) + .mustFail("referencing an unallocated party") + } yield { + assertGrpcErrorRegex( + failure, + SyncServiceRejectionErrors.PartyNotKnownOnLedger, + Some(Pattern.compile("Part(y|ies) not known on ledger")), + checkDefiniteAnswerMetadata = true, + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationIT.scala new file mode 100644 index 0000000000..de2a8ee70b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationIT.scala @@ -0,0 +1,848 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.grpc.GrpcStatus +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.FutureAssertions.* +import com.daml.ledger.api.testtool.infrastructure.ProtobufConverters.* +import com.daml.ledger.api.testtool.infrastructure.assertions.CommandDeduplicationAssertions.{ + assertDeduplicationDuration, + assertDeduplicationOffset, +} +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.time.{DelayMechanism, Durations} +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.completion.Completion.DeduplicationPeriod as CompletionDeduplicationPeriod +import com.daml.ledger.test.java.model.test.{Dummy, DummyWithAnnotation} +import com.daml.logging.LoggingContext +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.api.SubmissionIdGenerator +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, ConsistencyErrors} +import com.digitalasset.daml.lf.data.Ref.{LedgerString, SubmissionId} +import io.grpc.Status.Code + +import java.time +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +final class CommandDeduplicationIT( + timeoutScaleFactor: Double +) extends LedgerTestSuite { + + private implicit val loggingContext: LoggingContext = LoggingContext.ForTesting + private val deduplicationDuration: FiniteDuration = + Durations.scaleDuration(2.seconds, timeoutScaleFactor) + + test( + s"SimpleDeduplicationBasic", + "Deduplicate commands within the deduplication duration window", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger + .submitRequest(party, new DummyWithAnnotation(party, "Duplicate command").create.commands) + .update( + _.commands.deduplicationPeriod := + DeduplicationPeriod.DeduplicationDuration(deduplicationDuration.asProtobuf) + ) + val firstAcceptedSubmissionId = newSubmissionId() + for { + // Submit command (first deduplication window) + // Note: the second submit() in this block is deduplicated and thus rejected by the ledger API server, + // only one submission is therefore sent to the ledger. + response <- submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(request, firstAcceptedSubmissionId), + party, + ) + _ <- submitRequestAndAssertDeduplication( + ledger, + request, + firstAcceptedSubmissionId, + response.offset, + party, + ) + // Inspect created contracts + _ <- assertPartyHasActiveContracts( + ledger, + party, + noOfActiveContracts = 1, + ) + } yield { + assert( + response.commandId == request.commands.get.commandId, + "The command ID of the first completion does not match the command ID of the submission", + ) + } + }) + + test( + s"StopOnSubmissionFailure", + "Stop deduplicating commands on submission failure", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + // Do not set the deduplication timeout. + // The server will default to the maximum possible deduplication timeout. + val requestA = ledger.submitRequest(alice, new Dummy(bob).create.commands) + + for { + // Submit an invalid command (should fail with INVALID_ARGUMENT) + _ <- submitRequestAndAssertSyncFailure( + ledger, + requestA, + Code.INVALID_ARGUMENT, + CommandExecutionErrors.Interpreter.AuthorizationError, + ) + + // Re-submit the invalid command (should again fail with INVALID_ARGUMENT and not with ALREADY_EXISTS) + _ <- submitRequestAndAssertSyncFailure( + ledger, + updateWithFreshSubmissionId(requestA), + Code.INVALID_ARGUMENT, + CommandExecutionErrors.Interpreter.AuthorizationError, + ) + } yield {} + }) + + test( + s"SimpleDeduplicationCommandClient", + "Deduplicate commands within the deduplication time window using the command client", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update( + _.commands.deduplicationDuration := deduplicationDuration.asProtobuf + ) + val acceptedSubmissionId1 = newSubmissionId() + for { + // Submit command (first deduplication window) + response <- submitAndWaitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(request, acceptedSubmissionId1), + party, + ) + _ <- submitAndWaitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(request), + acceptedSubmissionId1, + response.offset, + ) + + // Inspect created contract + _ <- assertPartyHasActiveContracts( + ledger, + party, + noOfActiveContracts = 1, + ) + } yield {} + }) + + // staticTime - we run calls in parallel and with static time we would need to advance the time, + // therefore this cannot be run in static time + testGivenAllParticipants( + "DeduplicationMixedClients", + "Deduplicate commands within the deduplication time window using the command client and the command submission client", + allocate(Parties(16)), + enabled = !_.staticTime, + disabledReason = "Cannot work in static time as we run multiple test cases in parallel", + runConcurrently = false, // updates the time model + timeoutScale = 3, + )(implicit ec => + _ => { case Participants(Participant(ledger, parties)) => + def generateVariations(elements: List[List[Boolean]]): List[List[Boolean]] = + elements match { + case Nil => List(Nil) + case currentElement :: tail => + currentElement.flatMap(value => generateVariations(tail).map(value :: _)) + } + + runWithTimeModel { minMaxSkewSum => + val numberOfCalls = 4 + // cover all the different generated variations of submit and submitAndWait + val allGeneratedVariations = + generateVariations(List.fill(numberOfCalls)(List(true, false))).zip(parties) + forAllParallel(allGeneratedVariations) { + case (firstCall :: secondCall :: thirdCall :: fourthCall :: Nil, party) => + mixedClientsCommandDeduplicationTestCase( + ledger, + party, + ledger.delayMechanism, + minMaxSkewSum, + )( + firstCall, + secondCall, + thirdCall, + fourthCall, + ) + case _ => throw new IllegalArgumentException("Wrong call list constructed") + } + .map(_ => ()) + } + } + ) + + private def mixedClientsCommandDeduplicationTestCase( + ledger: ParticipantTestContext, + party: Party, + delay: DelayMechanism, + skews: FiniteDuration, + )(firstCall: Boolean, secondCall: Boolean, thirdCall: Boolean, fourthCall: Boolean)(implicit + ec: ExecutionContext + ) = { + val submitAndWaitRequest = ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update( + _.commands.deduplicationDuration := deduplicationDuration.asProtobuf + ) + val submitRequest = ledger + .submitRequest(party, new Dummy(party).create.commands) + .update( + _.commands.commandId := submitAndWaitRequest.getCommands.commandId, + _.commands.deduplicationDuration := deduplicationDuration.asProtobuf, + ) + + def submitAndAssertAccepted( + submitAndWait: Boolean + ): Future[Completion] = { + val acceptedSubmissionId: SubmissionId = newSubmissionId() + if (submitAndWait) + submitAndWaitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(submitAndWaitRequest, acceptedSubmissionId), + party, + ) + else + submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(submitRequest, acceptedSubmissionId), + party, + ) + } + + def submitAndAssertDeduplicated( + submitAndWait: Boolean, + acceptedSubmissionId: SubmissionId, + acceptedParticipantOffset: Long, + ): Future[Option[Completion]] = + if (submitAndWait) + submitAndWaitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(submitAndWaitRequest), + acceptedSubmissionId, + acceptedParticipantOffset, + ).map(_ => None) + else + submitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(submitRequest), + acceptedSubmissionId, + acceptedParticipantOffset, + party, + ).map(Some(_)) + + for { + // Submit command (first deduplication window) + firstAcceptedCommand <- submitAndAssertAccepted(firstCall) + duplicateResponse <- submitAndAssertDeduplicated( + secondCall, + LedgerString.assertFromString(firstAcceptedCommand.submissionId), + firstAcceptedCommand.offset, + ) + deduplicationDurationFromPeriod = extractDurationFromDeduplicationPeriod( + deduplicationCompletionResponse = duplicateResponse, + defaultDuration = deduplicationDuration, + skews = skews, + ) + eventuallyAccepted <- succeedsEventually( + maxRetryDuration = deduplicationDurationFromPeriod + skews + 10.seconds, + description = + s"Deduplication period expires and request is accepted for command ${submitRequest.getCommands}.", + delayMechanism = delay, + ) { + submitAndAssertAccepted(thirdCall) + } + _ = assert( + time.Duration + .between( + firstAcceptedCommand.getSynchronizerTime.getRecordTime.asJavaInstant, + eventuallyAccepted.getSynchronizerTime.getRecordTime.asJavaInstant, + ) + .toNanos > deduplicationDuration.toNanos, + s"Interval between accepted commands is smaller than the deduplication duration. First accepted command record time: ${firstAcceptedCommand.getSynchronizerTime.getRecordTime.asJavaInstant}. Second accepted command record time: ${eventuallyAccepted.getSynchronizerTime.getRecordTime.asJavaInstant}", + ) + _ <- submitAndAssertDeduplicated( + fourthCall, + LedgerString.assertFromString(eventuallyAccepted.submissionId), + eventuallyAccepted.offset, + ) + _ <- assertPartyHasActiveContracts( + ledger, + party = party, + noOfActiveContracts = 2, + ) + } yield {} + } + + test( + "DeduplicateSubmitterBasic", + "Commands with identical submitter and command identifier should be deduplicated by the submission client", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + val aliceRequest = ledger.submitRequest(alice, new Dummy(alice).create.commands) + val bobRequest = ledger + .submitRequest(bob, new Dummy(bob).create.commands) + .update(_.commands.commandId := aliceRequest.getCommands.commandId) + + val aliceAcceptedSubmissionId = newSubmissionId() + val bobAcceptedSubmissionId = newSubmissionId() + + for { + // Submit a command as alice + aliceResponse <- submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(aliceRequest, aliceAcceptedSubmissionId), + alice, + ) + _ <- submitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(aliceRequest), + aliceAcceptedSubmissionId, + aliceResponse.offset, + alice, + ) + + // Submit another command that uses same commandId, but is submitted by Bob + bobResponse <- submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(bobRequest, bobAcceptedSubmissionId), + bob, + ) + _ <- submitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(bobRequest), + bobAcceptedSubmissionId, + bobResponse.offset, + bob, + ) + _ <- assertPartyHasActiveContracts( + ledger, + party = alice, + noOfActiveContracts = 1, + ) + _ <- assertPartyHasActiveContracts( + ledger, + party = bob, + noOfActiveContracts = 1, + ) + } yield {} + }) + + test( + "DeduplicateSubmitterCommandClient", + "Commands with identical submitter and command identifier should be deduplicated by the command client", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + val aliceRequest = ledger.submitAndWaitRequest(alice, new Dummy(alice).create.commands) + val bobRequest = ledger + .submitAndWaitRequest(bob, new Dummy(bob).create.commands) + .update(_.commands.commandId := aliceRequest.getCommands.commandId) + + val aliceAcceptedSubmissionId = newSubmissionId() + val bobAcceptedSubmissionId = newSubmissionId() + for { + // Submit a command as alice + aliceResponse <- submitAndWaitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(aliceRequest, aliceAcceptedSubmissionId), + alice, + ) + _ <- submitAndWaitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(aliceRequest), + aliceAcceptedSubmissionId, + aliceResponse.offset, + ) + + // Submit another command that uses same commandId, but is submitted by Bob + bobReponse <- submitAndWaitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(bobRequest, bobAcceptedSubmissionId), + bob, + ) + _ <- submitAndWaitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(bobRequest), + bobAcceptedSubmissionId, + bobReponse.offset, + ) + // Inspect the ledger state + _ <- assertPartyHasActiveContracts( + ledger, + party = alice, + noOfActiveContracts = 1, + ) + _ <- assertPartyHasActiveContracts( + ledger, + party = bob, + noOfActiveContracts = 1, + ) + } yield {} + }) + + testGivenAllParticipants( + shortIdentifier = s"DeduplicateUsingDurations", + description = "Deduplicate commands within the deduplication period defined by a duration", + partyAllocation = allocate(SingleParty), + runConcurrently = false, // updates the time model + disabledReason = + "Most of the assertions run on async responses. Also, ledgers with the sync-only deduplication support use the wall clock for deduplication.", + )(implicit ec => + _ => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger + .submitRequest(party, new DummyWithAnnotation(party, "Duplicate command").create.commands) + .update( + _.commands.deduplicationPeriod := + DeduplicationPeriod.DeduplicationDuration(deduplicationDuration.asProtobuf) + ) + val firstAcceptedSubmissionId = newSubmissionId() + runWithTimeModel { minMaxSkewSum => + for { + completionResponse <- submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(request, firstAcceptedSubmissionId), + party, + ) + deduplicationCompletionResponse <- submitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId(request), + firstAcceptedSubmissionId, + completionResponse.offset, + party, + ) + deduplicationDurationFromPeriod = extractDurationFromDeduplicationPeriod( + deduplicationCompletionResponse = Some(deduplicationCompletionResponse), + defaultDuration = deduplicationDuration, + skews = minMaxSkewSum, + ) + eventuallyAcceptedCompletionResponse <- succeedsEventually( + maxRetryDuration = deduplicationDurationFromPeriod + minMaxSkewSum + 10.seconds, + description = + s"The deduplication period expires and the request is accepted for the commands ${request.getCommands}.", + delayMechanism = ledger.delayMechanism, + ) { + submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(request, firstAcceptedSubmissionId), + party, + ) + } + _ <- assertPartyHasActiveContracts( + ledger, + party, + noOfActiveContracts = 2, + ) + _ <- assertDeduplicationDuration( + deduplicationDuration.asProtobuf, + deduplicationCompletionResponse, + party, + ledger, + ) + _ <- assertDeduplicationDuration( + deduplicationDuration.asProtobuf, + eventuallyAcceptedCompletionResponse, + party, + ledger, + ) + } yield { + assert( + completionResponse.commandId == request.commands.get.commandId, + "The command ID of the first completion does not match the command ID of the submission", + ) + } + } + } + ) + + testGivenAllParticipants( + "DeduplicateUsingOffsets", + "Deduplicate commands within the deduplication period defined by the offset", + allocate(SingleParty), + runConcurrently = false, // updates the time model + )(implicit ec => + _ => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger + .submitRequest(party, new DummyWithAnnotation(party, "Duplicate command").create.commands) + val acceptedSubmissionId = newSubmissionId() + runWithTimeModel { _ => + val dummyRequest = ledger.submitRequest( + party, + new DummyWithAnnotation( + party, + "Dummy command to generate a completion offset", + ).create.commands, + ) + for { + // Send a dummy command to the ledger so that we obtain a recent offset + // We should be able to just grab the current ledger end, + // but the converter from offsets to durations cannot handle this yet. + offsetBeforeFirstCompletion <- submitRequestAndAssertCompletionAccepted( + ledger, + dummyRequest, + party, + ).map(_.offset) + firstAcceptedResponse <- submitRequestAndAssertCompletionAccepted( + ledger, + updateSubmissionId(request, acceptedSubmissionId), + party, + ) + // Submit command again using the first offset as the deduplication offset + duplicateResponse <- submitRequestAndAssertDeduplication( + ledger, + updateWithFreshSubmissionId( + request.update( + _.commands.deduplicationPeriod := DeduplicationPeriod.DeduplicationOffset( + offsetBeforeFirstCompletion + ) + ) + ), + acceptedSubmissionId, + firstAcceptedResponse.offset, + party, + ) + // Submit command again using the rejection offset as a deduplication period + secondAcceptedResponse <- submitRequestAndAssertCompletionAccepted( + ledger, + updateWithFreshSubmissionId( + request.update( + _.commands.deduplicationPeriod := DeduplicationPeriod.DeduplicationOffset( + duplicateResponse.offset + ) + ) + ), + party, + ) + } yield { + assertDeduplicationOffset( + firstAcceptedResponse, + duplicateResponse, + ) + assertDeduplicationOffset( + duplicateResponse, + secondAcceptedResponse, + ) + } + } + } + ) + + protected def assertPartyHasActiveContracts( + ledger: ParticipantTestContext, + party: Party, + noOfActiveContracts: Int, + )(implicit ec: ExecutionContext): Future[Unit] = + ledger + .activeContracts(Some(Seq(party))) + .map(contracts => + assert( + contracts.length == noOfActiveContracts, + s"Expected $noOfActiveContracts active contracts for $party but found ${contracts.length} active contracts", + ) + ) + + protected def submitRequestAndAssertCompletionAccepted( + ledger: ParticipantTestContext, + request: SubmitRequest, + parties: Party* + )(implicit + ec: ExecutionContext + ): Future[Completion] = + submitRequestAndAssertCompletion(ledger, request, parties*) { completion => + assertCompletionStatus(request.toString, completion, Code.OK) + } + + protected def submitAndWaitRequestAndAssertCompletionAccepted( + ledger: ParticipantTestContext, + request: SubmitAndWaitRequest, + parties: Party* + )(implicit + ec: ExecutionContext + ): Future[Completion] = + submitAndWaitRequestAndAssertCompletion(ledger, request, parties*) { completion => + assertCompletionStatus(request.toString, completion, Code.OK) + } + + private def submitRequestAndAssertSyncFailure( + ledger: ParticipantTestContext, + request: SubmitRequest, + grpcCode: Code, + errorCode: ErrorCode, + additionalErrorAssertions: Throwable => Unit = _ => (), + )(implicit ec: ExecutionContext): Future[Unit] = + ledger + .submit(request) + .mustFail(s"Request expected to fail with code $grpcCode") + .map( + assertGrpcError( + _, + errorCode, + exceptionMessageSubstring = None, + checkDefiniteAnswerMetadata = true, + additionalErrorAssertions, + ) + ) + + protected def submitAndWaitRequestAndAssertDeduplication( + ledger: ParticipantTestContext, + request: SubmitAndWaitRequest, + acceptedSubmissionId: SubmissionId, + acceptedOffset: Long, + )(implicit ec: ExecutionContext): Future[Unit] = + ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + _.submitAndWait(request), + ) + .mustFail("Request was accepted but we were expecting it to fail with a duplicate error") + .map( + assertGrpcError( + _, + ConsistencyErrors.DuplicateCommand, + None, + checkDefiniteAnswerMetadata = true, + additionalErrorAssertions = assertDeduplicatedSubmissionIdAndOffsetOnError( + acceptedSubmissionId, + acceptedOffset, + _, + ), + ) + ) + + protected def submitRequestAndAssertDeduplication( + ledger: ParticipantTestContext, + request: SubmitRequest, + acceptedSubmissionId: SubmissionId, + acceptedOffset: Long, + parties: Party* + )(implicit ec: ExecutionContext): Future[Completion] = + submitRequestAndAssertCompletion( + ledger, + request, + parties* + ) { completion => + assertCompletionStatus( + request.toString, + completion, + Code.ALREADY_EXISTS, // Deduplication error + ) + assertDeduplicatedSubmissionIdAndOffsetOnCompletion( + acceptedSubmissionId, + acceptedOffset, + completion, + ) + } + + private def assertCompletionStatus( + requestString: String, + response: Completion, + statusCodes: Code* + ): Unit = + assert( + statusCodes.exists(response.getStatus.code == _.value()), + s"""Expecting completion with status code(s) ${statusCodes.mkString( + "," + )} but completion has status ${response.status}. + |Request: $requestString + |Response: $response + |Metadata: ${extractErrorInfoMetadata( + GrpcStatus.toJavaProto(response.getStatus) + )}""".stripMargin, + ) + + private def assertDeduplicatedSubmissionIdAndOffsetOnError( + acceptedSubmissionId: SubmissionId, + acceptedCompletionOffset: Long, + t: Throwable, + ): Unit = t match { + case exception: Exception => + val metadata = extractErrorInfoMetadata(exception) + assertExistingSubmissionIdOnMetadata(metadata, acceptedSubmissionId) + assertExistingCompletionOffsetOnMetadata(metadata, acceptedCompletionOffset) + case _ => () + } + + private def assertDeduplicatedSubmissionIdAndOffsetOnCompletion( + acceptedSubmissionId: SubmissionId, + acceptedCompletionOffset: Long, + response: Completion, + ): Unit = { + val metadata = extractErrorInfoMetadata( + GrpcStatus.toJavaProto(response.getStatus) + ) + assertExistingSubmissionIdOnMetadata(metadata, acceptedSubmissionId) + assertExistingCompletionOffsetOnMetadata(metadata, acceptedCompletionOffset) + } + + private def assertExistingSubmissionIdOnMetadata( + metadata: Map[String, String], + acceptedSubmissionId: SubmissionId, + ): Unit = + metadata.get("existing_submission_id").foreach { metadataExistingSubmissionId => + assertEquals( + "submission ID mismatch", + metadataExistingSubmissionId, + acceptedSubmissionId, + ) + } + + private def assertExistingCompletionOffsetOnMetadata( + metadata: Map[String, String], + acceptedCompletionOffset: Long, + ): Unit = + metadata.get("completion_offset").foreach { offset => + assertEquals( + "completion offset mismatch", + offset.toLong, + acceptedCompletionOffset, + ) + } + + private def submitRequestAndAssertCompletion( + ledger: ParticipantTestContext, + request: SubmitRequest, + parties: Party* + )( + additionalCompletionAssertion: Completion => Unit + )(implicit + ec: ExecutionContext + ): Future[Completion] = + ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + submitRequestAndFindCompletion(_, request, parties*), + ) + .map { response => + additionalCompletionAssertion(response) + response + } + + private def submitAndWaitRequestAndAssertCompletion( + ledger: ParticipantTestContext, + request: SubmitAndWaitRequest, + parties: Party* + )( + additionalCompletionAssertion: Completion => Unit + )(implicit + ec: ExecutionContext + ): Future[Completion] = + submitRequestAndFindCompletion(ledger, request, parties*).map { response => + additionalCompletionAssertion(response) + response + } + + protected def submitRequestAndFindCompletion( + ledger: ParticipantTestContext, + request: SubmitRequest, + parties: Party* + )(implicit + ec: ExecutionContext + ): Future[Completion] = + submitRequest(ledger)(request) + .flatMap { ledgerEnd => + ledger + .findCompletion(ledger.completionStreamRequest(ledgerEnd)(parties*)) { completion => + request.commands.map(_.submissionId).contains(completion.submissionId) + } + .map(_.toList) + } + .map { completions => + assertSingleton("Expected only one completion", completions) + } + + protected def submitRequestAndFindCompletion( + ledger: ParticipantTestContext, + request: SubmitAndWaitRequest, + parties: Party* + )(implicit + ec: ExecutionContext + ): Future[Completion] = + ledger + .submitAndWait(request) + .flatMap { _ => + ledger + .findCompletion(ledger.completionStreamRequest()(parties*)) { completion => + request.commands.map(_.submissionId).contains(completion.submissionId) + } + .map(_.toList) + } + .map { completions => + assert( + completions.head.offset > 0, + "Expected a populated completion offset", + ) + assertSingleton("Expected only one completion", completions) + } + + protected def submitRequest( + ledger: ParticipantTestContext + )( + request: SubmitRequest + )(implicit ec: ExecutionContext): Future[Long] = for { + ledgerEnd <- ledger.currentEnd() + _ <- ledger.submit(request) + } yield { + ledgerEnd + } + + private def updateSubmissionId( + request: SubmitRequest, + submissionId: SubmissionId, + ): SubmitRequest = + request.update(_.commands.submissionId := submissionId) + + private def updateSubmissionId( + request: SubmitAndWaitRequest, + acceptedSubmissionId1: SubmissionId, + ): SubmitAndWaitRequest = + request.update(_.commands.submissionId := acceptedSubmissionId1) + + private def updateWithFreshSubmissionId(request: SubmitRequest): SubmitRequest = + request.update(_.commands.submissionId := newSubmissionId()) + + private def updateWithFreshSubmissionId(request: SubmitAndWaitRequest): SubmitAndWaitRequest = + request.update(_.commands.submissionId := newSubmissionId()) + + private def newSubmissionId(): SubmissionId = SubmissionIdGenerator.Random.generate() + + val defaultCantonSkew = 365.days + + private def runWithTimeModel( + testWithDelayMechanism: FiniteDuration => Future[Unit] + ): Future[Unit] = + testWithDelayMechanism(Durations.asFiniteDuration(defaultCantonSkew)) + + private def extractDurationFromDeduplicationPeriod( + deduplicationCompletionResponse: Option[Completion], + defaultDuration: FiniteDuration, + skews: FiniteDuration, + ): FiniteDuration = + deduplicationCompletionResponse + .map(_.deduplicationPeriod) + .map { + case CompletionDeduplicationPeriod.Empty => + throw new IllegalStateException("received empty completion") + case CompletionDeduplicationPeriod.DeduplicationOffset(_) => + defaultDuration + case CompletionDeduplicationPeriod.DeduplicationDuration(value) => + value.asScala + } + .getOrElse(defaultDuration + skews) + .asInstanceOf[FiniteDuration] + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationParallelIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationParallelIT.scala new file mode 100644 index 0000000000..3bfbf0ba66 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationParallelIT.scala @@ -0,0 +1,175 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.grpc.GrpcException +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + SingleParty, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.fail +import com.daml.ledger.api.testtool.infrastructure.ProtobufConverters.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod +import com.daml.ledger.test.java.model.test.DummyWithAnnotation +import io.grpc.Status +import io.grpc.Status.Code + +import java.util.UUID +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Random, Success} + +class CommandDeduplicationParallelIT extends LedgerTestSuite { + + private val deduplicationDuration = 3.seconds + private val numberOfParallelRequests = 10 + + test( + "DeduplicateParallelSubmissionsUsingCommandSubmissionService", + "Commands submitted at the same, in parallel, using the CommandSubmissionService, should be deduplicated", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + lazy val request = buildSubmitRequest(ledger, party) + runTestWithSubmission[SubmitRequest]( + ledger, + party, + () => submitRequestAndGetStatusCode(ledger)(request, party), + ) + }) + + test( + "DeduplicateParallelSubmissionsUsingCommandService", + "Commands submitted at the same, in parallel, using the CommandService, should be deduplicated", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = buildSubmitAndWaitRequest(ledger, party) + runTestWithSubmission[SubmitAndWaitRequest]( + ledger, + party, + () => submitAndWaitRequestAndGetStatusCode(ledger)(request, party), + ) + }) + + test( + "DeduplicateParallelSubmissionsUsingMixedCommandServiceAndCommandSubmissionService", + "Commands submitted at the same, in parallel, using the CommandService and the CommandSubmissionService, should be deduplicated", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val submitAndWaitRequest = buildSubmitAndWaitRequest(ledger, party) + val submitRequest = buildSubmitRequest(ledger, party).update( + _.commands.commandId := submitAndWaitRequest.getCommands.commandId + ) + runTestWithSubmission[SubmitAndWaitRequest]( + ledger, + party, + () => + if (Random.nextBoolean()) + submitAndWaitRequestAndGetStatusCode(ledger)(submitAndWaitRequest, party) + else submitRequestAndGetStatusCode(ledger)(submitRequest, party), + ) + }) + + private def runTestWithSubmission[T]( + ledger: ParticipantTestContext, + party: Party, + submitRequestAndGetStatus: () => Future[Code], + )(implicit + ec: ExecutionContext + ) = + for { + responses <- Future + .traverse(Seq.fill(numberOfParallelRequests)(()))(_ => submitRequestAndGetStatus()) + .map(_.groupBy(identity).view.mapValues(_.size).toMap) + activeContracts <- ledger.activeContracts(Some(Seq(party))) + } yield { + val expectedDuplicateResponses = numberOfParallelRequests - 1 + val okResponses = responses.getOrElse(Code.OK, 0) + val alreadyExistsResponses = responses.getOrElse(Code.ALREADY_EXISTS, 0) + // Canton can return ABORTED for parallel in-flight duplicate submissions + val abortedResponses = responses.getOrElse(Code.ABORTED, 0) + val duplicateResponses = alreadyExistsResponses + abortedResponses + assert( + okResponses == 1 && duplicateResponses == numberOfParallelRequests - 1, + s"Expected $expectedDuplicateResponses duplicate responses and one accepted, got $responses", + ) + assert(activeContracts.size == 1) + } + + private def buildSubmitRequest( + ledger: ParticipantTestContext, + party: Party, + ) = ledger + .submitRequest( + party, + new DummyWithAnnotation(party, "Duplicate Using CommandSubmissionService").create.commands, + ) + .update( + _.commands.deduplicationPeriod := DeduplicationPeriod.DeduplicationDuration( + deduplicationDuration.asProtobuf + ) + ) + + private def buildSubmitAndWaitRequest( + ledger: ParticipantTestContext, + party: Party, + ) = ledger + .submitAndWaitRequest( + party, + new DummyWithAnnotation(party, "Duplicate using CommandService").create.commands, + ) + .update( + _.commands.deduplicationDuration := deduplicationDuration.asProtobuf + ) + + private def submitAndWaitRequestAndGetStatusCode( + ledger: ParticipantTestContext + )(request: SubmitAndWaitRequest, parties: Party*)(implicit ec: ExecutionContext) = { + val submissionId = UUID.randomUUID().toString + val requestWithSubmissionId = request.update(_.commands.submissionId := submissionId) + val submitResult = ledger.submitAndWait(requestWithSubmissionId) + submissionResultToFinalStatusCode(ledger)(submitResult.map(_ => ()), submissionId, parties*) + } + + protected def submitRequestAndGetStatusCode( + ledger: ParticipantTestContext + )(request: SubmitRequest, parties: Party*)(implicit ec: ExecutionContext): Future[Code] = { + val submissionId = UUID.randomUUID().toString + val requestWithSubmissionId = request.update(_.commands.submissionId := submissionId) + val submitResult = ledger + .submit(requestWithSubmissionId) + submissionResultToFinalStatusCode(ledger)(submitResult, submissionId, parties*) + } + + private def submissionResultToFinalStatusCode( + ledger: ParticipantTestContext + )(submitResult: Future[Unit], submissionId: String, parties: Party*)(implicit + ec: ExecutionContext + ) = submitResult + .transformWith { + case Failure(exception) => + exception match { + case GrpcException(status, _) => + Future.successful(status.getCode) + case otherException => + fail(s"Not a GRPC exception $otherException", otherException) + } + case Success(_) => + ledger + .findCompletion(parties*)(completion => completion.submissionId == submissionId) + .map { + case Some(response) => + Status.fromCodeValue(response.getStatus.code).getCode + case None => + fail(s"Did not find completion for request with submission id $submissionId") + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationPeriodValidationIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationPeriodValidationIT.scala new file mode 100644 index 0000000000..ca26ed45a3 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandDeduplicationPeriodValidationIT.scala @@ -0,0 +1,198 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.ProtobufConverters.* +import com.daml.ledger.api.testtool.infrastructure.assertions.CommandDeduplicationAssertions.DurationConversion +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{ + FutureAssertions, + LedgerTestSuite, + Party, + TestConstraints, +} +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod +import com.daml.ledger.test.java.model.test.Dummy +import com.daml.logging.LoggingContext +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.LedgerApiErrors +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import java.time.Duration +import java.util.regex.Pattern +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.* + +class CommandDeduplicationPeriodValidationIT extends LedgerTestSuite { + import CompanionImplicits.* + + private implicit val loggingContext: LoggingContext = LoggingContext.ForTesting + + test( + "ValidDeduplicationDuration", + "Submission returns OK if deduplication time is positive", + allocate(SingleParty), + )(_ => { case Participants(Participant(ledger, Seq(party))) => + // Submission using the maximum allowed deduplication time + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + val maxDedupDuration = Duration.ofMinutes(30).asProtobuf + ledger.submit(request.update(_.commands.deduplicationDuration := maxDedupDuration)) + }) + + test( + "NegativeDeduplicationDuration", + "Submission with negative deduplication durations are rejected", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val deduplicationPeriod = DeduplicationPeriod.DeduplicationDuration( + DurationConversion.toProto(Duration.ofSeconds(-1)) + ) + assertSyncFailedRequest( + ledger, + party, + deduplicationPeriod, + failReason = "Requests with a deduplication period represented by a negative duration", + expectedMessage = + "The submitted command has a field with invalid value: Invalid field deduplication_period: Duration must be positive", + expectedError = RequestValidationErrors.InvalidField, + ) + }) + + test( + "NegativeOffset", + "Submission with deduplication periods represented by negative offsets are rejected", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val offset = -12345678L + val deduplicationPeriod = DeduplicationPeriod.DeduplicationOffset(offset) + assertSyncFailedRequest( + ledger, + party, + deduplicationPeriod, + failReason = "Submitting a command with a negative offset", + expectedMessage = + s"Offset $offset in deduplication_period is a negative integer: the deduplication offset has to be a non-negative integer and not $offset", + expectedError = RequestValidationErrors.NegativeOffset, + ) + }) + + test( + "ZeroOffset", + "Submission with deduplication periods represented by zero offsets are rejected", + allocate(SingleParty), + )(_ => { case Participants(Participant(ledger, Seq(party))) => + val offset = 0L + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + val deduplicationPeriod = DeduplicationPeriod.DeduplicationOffset(offset) + ledger.submit(request.update(_.commands.deduplicationPeriod := deduplicationPeriod)) + }) + + test( + "OffsetPruned", + "Submission with deduplication periods represented by offsets which are pruned are rejected", + allocate(SingleParty), + runConcurrently = false, // Pruning is involved + limitation = TestConstraints.GrpcOnly("Pruning not available in JSON API"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + def submitAndWaitWithDeduplication( + deduplicationPeriod: DeduplicationPeriod.DeduplicationOffset + ) = + ledger + .submitAndWait( + ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update( + _.commands.deduplicationPeriod := deduplicationPeriod + ) + ) + for { + start <- ledger.currentEnd() + beforeBegin = 0L + firstCreate <- ledger.create(party, new Dummy(party)) + _ <- ledger.exercise(party, firstCreate.exerciseDummyChoice1()) + secondCreate: Dummy.ContractId <- ledger.create(party, new Dummy(party)) + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + ) + end <- ledger.currentEnd() + _ <- ledger.exercise(party, secondCreate.exerciseDummyChoice1()) + _ <- FutureAssertions.succeedsEventually( + retryDelay = 10.millis, + maxRetryDuration = 10.seconds, + ledger.delayMechanism, + "Prune offsets", + ) { + for { + _ <- ledger.create(party, new Dummy(party)) + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + ) + _ <- ledger.prune(pruneUpTo = end, attempts = 1) + } yield {} + } + failure <- submitAndWaitWithDeduplication( + DeduplicationPeriod.DeduplicationOffset(start) + ).mustFail("using an offset which was pruned") + _ = assertGrpcErrorRegex( + failure, + // Canton returns INVALID_DEDUPLICATION_PERIOD with earliest_offset metadata + errorCode = RequestValidationErrors.InvalidDeduplicationPeriodField, + None, + ) + earliestOffset = extractErrorInfoMetadataValue( + failure, + LedgerApiErrors.EarliestOffsetMetadataKey, + ) + failureZero <- submitAndWaitWithDeduplication( + DeduplicationPeriod.DeduplicationOffset(beforeBegin) + ).mustFail("using an offset which was pruned") + _ = assertGrpcErrorRegex( + failureZero, + // Canton returns INVALID_DEDUPLICATION_PERIOD with earliest_offset metadata + errorCode = RequestValidationErrors.InvalidDeduplicationPeriodField, + None, + ) + earliestOffsetZero = extractErrorInfoMetadataValue( + failureZero, + LedgerApiErrors.EarliestOffsetMetadataKey, + ) + _ = assert( + earliestOffsetZero == earliestOffset, + s"Earliest offset returned when deduplication offset is zero ($earliestOffsetZero) should be equal to $earliestOffset", + ) + _ <- + submitAndWaitWithDeduplication( + DeduplicationPeriod.DeduplicationOffset(earliestOffsetZero.toLong) + ) + } yield {} + }) + + private def assertSyncFailedRequest( + ledger: ParticipantTestContext, + party: Party, + deduplicationPeriod: DeduplicationPeriod, + failReason: String, + expectedMessage: String, + expectedError: ErrorCode, + )(implicit ec: ExecutionContext) = + for { + failure <- ledger + .submit( + ledger + .submitRequest(party, new Dummy(party).create.commands) + .update( + _.commands.deduplicationPeriod := deduplicationPeriod + ) + ) + .mustFail(failReason) + } yield { + assertGrpcErrorRegex( + failure, + expectedError, + Some(Pattern.compile(expectedMessage)), + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandServiceIT.scala new file mode 100644 index 0000000000..edd8cab530 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandServiceIT.scala @@ -0,0 +1,1017 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party, TestConstraints} +import com.daml.ledger.api.testtool.suites.v2_1.CommandServiceIT.{ + createEventToDisclosedContract, + formatByPartyAndTemplate, +} +import com.daml.ledger.api.v2.command_service.{ + SubmitAndWaitForTransactionRequest, + SubmitAndWaitRequest, +} +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, + UpdateFormat, +} +import com.daml.ledger.api.v2.update_service.GetUpdatesRequest +import com.daml.ledger.api.v2.value.{Identifier, Record, RecordField, Value} +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.Command +import com.daml.ledger.test.java.model.test.* +import com.daml.ledger.test.java.semantic.divulgencetests.DummyFlexibleController +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.{ + CommandExecutionErrors, + ConsistencyErrors, + RequestValidationErrors, +} +import org.scalatest.Inside.inside + +import java.math.BigDecimal +import java.util.List as JList +import java.util.regex.Pattern +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +import CompanionImplicits.* + +final class CommandServiceIT extends LedgerTestSuite with CommandSubmissionTestUtils { + test( + "CSsubmitAndWaitBasic", + "CSsubmitAndWaitBasic returns a valid transaction identifier", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + for { + response <- ledger.submitAndWait(request) + updateId = response.updateId + retrievedTransaction <- ledger.transactionById(updateId, Seq(party)) + transactions <- ledger.transactions(AcsDelta, party) + } yield { + + assert(updateId.nonEmpty, "The transaction identifier was empty but shouldn't.") + assert( + transactions.size == 1, + s"$party should see only one transaction but sees ${transactions.size}", + ) + + val events = transactions.head.events + + assert(events.size == 1, s"$party should see only one event but sees ${events.size}") + assert( + events.head.event.isCreated, + s"$party should see only one create but sees ${events.head.event}", + ) + val created = transactions.head.events.head.getCreated + + assert( + transactions.head.externalTransactionHash.isEmpty, + "Expected empty external transaction hash for a local party transaction", + ) + + assert( + retrievedTransaction.updateId == updateId, + s"$party should see the transaction for the created contract $updateId but sees ${retrievedTransaction.updateId}", + ) + assert( + retrievedTransaction.events.sizeIs == 1, + s"The retrieved transaction should contain a single event but contains ${retrievedTransaction.events.size}", + ) + val retrievedEvent = retrievedTransaction.events.head + + assert( + retrievedEvent.event.isCreated, + s"The only event seen should be a created but instead it's $retrievedEvent", + ) + assert( + retrievedEvent.getCreated == created, + s"The retrieved created event does not match the one in the flat transactions: event=$created retrieved=$retrievedEvent", + ) + + } + }) + + test( + "CSsubmitAndWaitForTransactionBasic", + "SubmitAndWaitForTransaction returns a transaction", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = + ledger.submitAndWaitForTransactionRequest( + party = party, + commands = new Dummy(party).create.commands, + transactionShape = AcsDelta, + ) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(request) + } yield { + assertOnTransactionResponse(transactionResponse.getTransaction) + } + }) + + test( + "CSsubmitAndWaitForTransactionLedgerEffectsBasic", + "SubmitAndWaitForTransaction with LedgerEffects returns a transaction with LedgerEffects", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitForTransactionRequest( + party, + new Dummy(party).createAnd().exerciseDummyChoice1().commands, + LedgerEffects, + ) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(request) + } yield { + val transaction = transactionResponse.getTransaction + assert( + transaction.updateId.nonEmpty, + "The transaction identifier was empty but shouldn't.", + ) + assert( + transaction.events.size == 2, + s"The returned transaction should contain 2 events, but contained ${transaction.events.size}", + ) + val event1 = transaction.events.head + assert( + event1.event.isCreated, + s"The returned transaction should contain a created event, but was ${event1.event}", + ) + val event2 = transaction.events.last + assert( + event2.event.isExercised, + s"The returned transaction should contain an exercised event, but was ${event2.event}", + ) + assert( + event1.getCreated.getTemplateId == Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + s"The template ID of the created event should be ${Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1}, but was ${event1.getCreated.getTemplateId}", + ) + } + }) + + test( + "CSduplicateSubmitAndWaitBasic", + "SubmitAndWait should fail on duplicate requests", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submitAndWait(request) + failure <- ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + _.submitAndWait(request), + ) + .mustFail("submitting a duplicate request") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.DuplicateCommand, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSSubmitAndWaitForTransactionFilterByParty", + "SubmitAndWaitForTransaction returns a filtered transaction (by party)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitForTransactionRequest( + party = party, + commands = (new Dummy(party).create.commands.asScala + ++ new DummyFactory(party).create.commands.asScala).asJava, + filterParties = Some(Seq(party)), + transactionShape = AcsDelta, + ) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(request) + transaction = assertDefined( + transactionResponse.transaction, + "The transaction should be defined", + ) + } yield { + assertLength("Two create events should have been into the transaction", 2, transaction.events) + } + }) + + test( + "CSSubmitAndWaitForTransactionFilterByWrongParty", + "SubmitAndWaitForTransaction returns a transaction with empty events when filtered by wrong party", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + def request = ledger.submitAndWaitForTransactionRequest( + party = party, + commands = (new Dummy(party).create.commands.asScala + ++ new DummyFactory(party).create.commands.asScala).asJava, + filterParties = Some(Seq(party2)), + transactionShape = AcsDelta, + ) + for { + transactionResponseAcsDelta <- ledger.submitAndWaitForTransaction(request) + transactionResponseLedgerEffects <- ledger.submitAndWaitForTransaction( + request.update(_.transactionFormat.transactionShape := TRANSACTION_SHAPE_LEDGER_EFFECTS) + ) + } yield { + assertLength( + "No events should have been into the transaction", + 0, + transactionResponseAcsDelta.transaction.toList.flatMap(_.events), + ) + assertLength( + "No events should have been into the transaction", + 0, + transactionResponseLedgerEffects.transaction.toList.flatMap(_.events), + ) + } + }) + + test( + "CSSubmitAndWaitForTransactionFilterAnyParty", + "SubmitAndWaitForTransaction returns a filtered transaction (any party)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitForTransactionRequest( + party = party, + commands = (new Dummy(party).create.commands.asScala + ++ new DummyFactory(party).create.commands.asScala).asJava, + filterParties = None, + transactionShape = AcsDelta, + ) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(request) + transaction = assertDefined( + transactionResponse.transaction, + "The transaction should be defined", + ) + } yield { + assertLength("Two create events should have been into the transaction", 2, transaction.events) + } + }) + + test( + "CSSubmitAndWaitForTransactionFilterTemplate", + "SubmitAndWaitForTransaction returns a filtered transaction (any party)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitAndWaitForTransactionRequest( + party = party, + commands = (new DummyFactory(party).create.commands.asScala + ++ new Dummy(party).create.commands.asScala).asJava, + filterParties = None, + transactionShape = AcsDelta, + templateIds = Seq(Dummy.TEMPLATE_ID), + ) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(request) + transaction = assertDefined( + transactionResponse.transaction, + "The transaction should be defined", + ) + event = assertSingleton( + "One create event should have been into the transaction", + transaction.events, + ) + } yield { + assertOnTransactionResponse(transactionResponse.getTransaction) + } + }) + + test( + "CSDuplicateSubmitAndWaitForTransactionData", + "SubmitAndWaitForTransaction should fail on duplicate requests", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = + ledger.submitAndWaitForTransactionRequest(party, new Dummy(party).create.commands, AcsDelta) + for { + _ <- ledger.submitAndWaitForTransaction(request) + failure <- ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + _.submitAndWaitForTransaction(request), + ) + .mustFail("submitting a duplicate request") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.DuplicateCommand, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSSubmitAndWaitForTransactionExplicitDisclosure", + "SubmitAndWaitForTransaction returns an empty transaction when an explicitly disclosed contract is exercised", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner, stranger))) => + testExplicitDisclosure( + ledger = ledger, + owner = owner, + stranger = stranger, + submitAndWaitForTransactionRequest = (disclosedContract, contractId) => + ledger + .submitAndWaitForTransactionRequest( + party = stranger, + commands = contractId.exerciseFlexibleConsume(stranger).commands, + transactionShape = AcsDelta, + filterParties = Some(Seq(stranger)), + ) + .update(_.commands.disclosedContracts := scala.Seq(disclosedContract)), + getTransactionLedgerEffects = + (updateId, stranger) => ledger.transactionById(updateId, Seq(stranger), LedgerEffects), + ) + }) + + test( + "CSsubmitAndWaitInvalidSynchronizerId", + "SubmitAndWait should fail for invalid synchronizer ids", + allocate(SingleParty), + )(testInvalidSynchronizerIdSubmission(_.submitAndWait)) + + test( + "CSsubmitAndWaitForTransactionInvalidSynchronizerId", + "SubmitAndWaitForTransaction should fail for invalid synchronizer ids", + allocate(SingleParty), + )(testInvalidSynchronizerIdSubmissionForTransaction(_.submitAndWaitForTransaction)) + + test( + "CSsubmitAndWaitPrescribedSynchronizerId", + "SubmitAndWait should use the prescribed synchronizer id when routing the submission", + allocate(SingleParty).expectingMinimumNumberOfSynchronizers(2), + )({ implicit ec: ExecutionContext => + { case Participants(Participant(ledger, Seq(party, _*))) => + val (synchronizer1, synchronizer2) = inside(party.initialSynchronizers) { + case Seq(synchronizer1, synchronizer2, _*) => + synchronizer1 -> synchronizer2 + } + val requestForSynchronizer1 = ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := synchronizer1) + for { + actualSynchronizerId1 <- ledger + .submitAndWait(requestForSynchronizer1) + .map(_.updateId) + .flatMap(updateId => ledger.transactionById(updateId, Seq(party), AcsDelta)) + .map(_.synchronizerId) + requestForSynchronizer2 = ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := synchronizer2) + actualSynchronizerId2 <- ledger + .submitAndWait(requestForSynchronizer2) + .map(_.updateId) + .flatMap(updateId => ledger.transactionById(updateId, Seq(party), AcsDelta)) + .map(_.synchronizerId) + } yield { + assertEquals(actualSynchronizerId1, synchronizer1) + assertEquals(actualSynchronizerId2, synchronizer2) + } + } + }) + + test( + "CSsubmitAndWaitForTransactionPrescribedSynchronizerId", + "SubmitAndWaitForTransaction should use the prescribed synchronizer id when routing the submission", + allocate(SingleParty).expectingMinimumNumberOfSynchronizers(2), + )( + testValidSynchronizerIdSubmission(participant => + request => + participant + .submitAndWaitForTransaction(request) + .map(_.getTransaction.synchronizerId)(ExecutionContext.parasitic) + ) + ) + + test( + "CSRefuseBadParameter", + "The submission of a creation that contains a bad parameter label should result in an INVALID_ARGUMENT", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val createWithBadArgument = + updateCommands( + new Dummy(party).create.commands, + _.update(_.create.createArguments.fields.foreach(_.label := "INVALID_PARAM")), + ) + val badRequest = ledger.submitAndWaitRequest(party, createWithBadArgument) + for { + failure <- ledger + .submitAndWait(badRequest) + .mustFail("submitting a request with a bad parameter label") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + // both alternatives below are possible depends which problem is found first + Some(Pattern.compile(s"Found non-optional extra field|Missing non-optional field")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + // TODO(#16361) fix this test: This test is not asserting that an interpretation error is returning a stack trace. + // Furthermore, stack traces are not returned as of 1.18. + // Instead more detailed error messages with the failed transaction are provided. + test( + "CSReturnStackTrace", + "A submission resulting in an interpretation error should return the stack trace", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy: Dummy.ContractId <- ledger.create(party, new Dummy(party)) + failure <- ledger + .exercise(party, dummy.exerciseFailingClone()) + .mustFail("submitting a request with an interpretation error") + } yield { + val trace = + """ in choice [0-9a-f]{8}:Test:Dummy:FailingChoice on contract 00[0-9a-f]{8} \(#3\) + | in choice [0-9a-f]{8}:Test:Dummy:FailingClone on contract 00[0-9a-f]{8} \(#0\) + | in exercise command [0-9a-f]{8}:Test:Dummy:FailingClone on contract 00[0-9a-f]{8}""".stripMargin + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("Assertion failed"), + checkDefiniteAnswerMetadata = true, + additionalErrorAssertions = throwable => + assertMatches( + "exercise_trace", + extractErrorInfoMetadataValue(throwable, "exercise_trace"), + Pattern.compile(trace), + ), + ) + } + }) + + test( + "CSDiscloseCreateToObservers", + "Disclose create to observers", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(alpha, Seq(giver, observer1)), + Participant(beta, Seq(observer2)), + ) => + val template = new WithObservers(giver, List(observer1, observer2).map(_.getValue).asJava) + for { + _ <- alpha.create(giver, template) + _ <- p.synchronize + observer1View <- alpha.transactions(LedgerEffects, observer1) + observer2View <- beta.transactions(LedgerEffects, observer2) + } yield { + val observer1Created = assertSingleton( + "The first observer should see exactly one creation", + observer1View.flatMap(createdEvents), + ) + val observer2Created = assertSingleton( + "The second observer should see exactly one creation", + observer2View.flatMap(createdEvents), + ) + assertEquals( + "The two observers should see the same creation", + observer1Created.getCreateArguments.fields, + observer2Created.getCreateArguments.fields, + ) + assertEquals( + "The observers should see the created contract", + observer1Created.getCreateArguments.fields, + Value.fromJavaProto(template.toValue.toProto).getRecord.fields, + ) + } + }) + + test( + "CSDiscloseExerciseToObservers", + "Disclose exercise to observers", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(alpha, Seq(giver, observer1)), + Participant(beta, Seq(observer2)), + ) => + val template = new WithObservers(giver, List(observer1, observer2).map(_.getValue).asJava) + for { + withObservers: WithObservers.ContractId <- alpha.create(giver, template) + _ <- alpha.exercise(giver, withObservers.exercisePing()) + _ <- p.synchronize + observer1View <- alpha.transactions(LedgerEffects, observer1) + observer2View <- beta.transactions(LedgerEffects, observer2) + } yield { + val observer1Exercise = assertSingleton( + "The first observer should see exactly one exercise", + observer1View.flatMap(exercisedEvents), + ) + val observer2Exercise = assertSingleton( + "The second observer should see exactly one exercise", + observer2View.flatMap(exercisedEvents), + ) + assert( + observer1Exercise.contractId == observer2Exercise.contractId, + "The two observers should see the same exercise", + ) + assert( + observer1Exercise.contractId == withObservers.contractId, + "The observers shouls see the exercised contract", + ) + } + }) + + test( + "CSHugeCommandSubmission", + "The server should accept a submission with 15 commands", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val target = 15 + val commands = List.fill(target)(new Dummy(party).create.commands.asScala).flatten + val request = ledger.submitAndWaitRequest(party, commands.asJava) + for { + _ <- ledger.submitAndWait(request) + acs <- ledger.activeContracts(Some(Seq(party))) + } yield { + assert( + acs.size == target, + s"Expected $target contracts to be created, got ${acs.size} instead", + ) + } + }) + + test( + "CSCallablePayout", + "Run CallablePayout and return the right events", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(alpha, Seq(giver, newReceiver)), + Participant(beta, Seq(receiver)), + ) => + for { + callablePayout: CallablePayout.ContractId <- alpha.create( + giver, + new CallablePayout(giver, receiver), + ) + _ <- p.synchronize + tree <- beta.exercise(receiver, callablePayout.exerciseTransfer(newReceiver)) + } yield { + val created = assertSingleton("There should only be one creation", createdEvents(tree)) + assertEquals( + "The created event should be the expected one", + created.getCreateArguments.fields, + Value + .fromJavaProto(new CallablePayout(giver, newReceiver).toValue.toProto) + .getRecord + .fields, + ) + } + }) + + test( + "CSReadyForExercise", + "It should be possible to exercise a choice on a created contract", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + factory: DummyFactory.ContractId <- ledger.create(party, new DummyFactory(party)) + tree <- ledger.exercise(party, factory.exerciseDummyFactoryCall()) + } yield { + val exercise = assertSingleton("There should only be one exercise", exercisedEvents(tree)) + assert(exercise.contractId == factory.contractId, "Contract identifier mismatch") + assert(exercise.consuming, "The choice should have been consuming") + val _ = assertLength("Two creations should have occurred", 2, createdEvents(tree)) + } + }) + + test("CSBadNumericValues", "Reject unrepresentable numeric values", allocate(SingleParty))( + implicit ec => { case Participants(Participant(ledger, Seq(party))) => + // Code generation catches bad decimals early so we have to do some work to create (possibly) invalid requests + def rounding(numeric: String): JList[Command] = + updateCommands( + new DecimalRounding(party, BigDecimal.valueOf(0)).create.commands, + _.update( + _.create.createArguments + .fields(1) := RecordField(value = Some(Value(Value.Sum.Numeric(numeric)))) + ), + ) + val wouldLosePrecision = "0.00000000005" + val positiveOutOfBounds = "10000000000000000000000000000.0000000000" + val negativeOutOfBounds = "-10000000000000000000000000000.0000000000" + for { + e1 <- ledger + .submitAndWait(ledger.submitAndWaitRequest(party, rounding(wouldLosePrecision))) + .mustFail("submitting a request which would lose precision") + e2 <- ledger + .submitAndWait(ledger.submitAndWaitRequest(party, rounding(positiveOutOfBounds))) + .mustFail("submitting a request with a positive number out of bounds") + e3 <- ledger + .submitAndWait(ledger.submitAndWaitRequest(party, rounding(negativeOutOfBounds))) + .mustFail("submitting a request with a negative number out of bounds") + } yield { + assertGrpcError( + e1, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("Cannot represent"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + e2, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("Out-of-bounds (Numeric 10)"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + e3, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("Out-of-bounds (Numeric 10)"), + checkDefiniteAnswerMetadata = true, + ) + } + } + ) + + test("CSCreateAndExercise", "Implement create-and-exercise correctly", allocate(SingleParty))( + implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val createAndExercise = new Dummy(party).createAnd.exerciseDummyChoice1().commands + val request = ledger.submitAndWaitRequest(party, createAndExercise) + for { + _ <- ledger.submitAndWait(request) + transactions <- ledger.transactions(AcsDelta, party) + transactionsLedgerEffects <- ledger.transactions(LedgerEffects, party) + } yield { + assert( + transactions.flatMap(_.events).isEmpty, + "A create-and-exercise acs delta transaction should show no event", + ) + assertEquals( + "Unexpected template identifier in create event", + transactionsLedgerEffects.flatMap(createdEvents).map(_.getTemplateId), + Vector(Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + ) + val contractId = transactionsLedgerEffects.flatMap(createdEvents).head.contractId + assertEquals( + "Unexpected exercise event triple (choice, contractId, consuming)", + transactionsLedgerEffects + .flatMap(exercisedEvents) + .map(e => (e.choice, e.contractId, e.consuming)), + Vector(("DummyChoice1", contractId, true)), + ) + } + } + ) + + test( + "CSBadCreateAndExercise", + "Fail create-and-exercise on bad create arguments", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val createAndExercise = + updateCommands( + new Dummy(party).createAnd + .exerciseDummyChoice1() + .commands, + _.update(_.createAndExercise.createArguments := Record()), + ) + val request = ledger.submitAndWaitRequest(party, createAndExercise) + for { + failure <- ledger + .submitAndWait(request) + .mustFail("submitting a request with bad create arguments") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("Missing non-optional field"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSCreateAndBadExerciseArguments", + "Fail create-and-exercise on bad choice arguments", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Problem creating faulty JSON from a faulty GRPC call"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val createAndExercise = + updateCommands( + new Dummy(party).createAnd + .exerciseDummyChoice1() + .commands, + _.update(_.createAndExercise.choiceArgument := Value(Value.Sum.Bool(value = false))), + ) + val request = ledger.submitAndWaitRequest(party, createAndExercise) + for { + failure <- ledger + .submitAndWait(request) + .mustFail("submitting a request with bad choice arguments") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("mismatching type"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSCreateAndBadExerciseChoice", + "Fail create-and-exercise on invalid choice", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Problem creating faulty JSON from a faulty GRPC call"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val missingChoice = "DoesNotExist" + val createAndExercise = + updateCommands( + new Dummy(party).createAnd + .exerciseDummyChoice1() + .commands, + _.update(_.createAndExercise.choice := missingChoice), + ) + val request = ledger.submitAndWaitRequest(party, createAndExercise) + for { + failure <- ledger + .submitAndWait(request) + .mustFail("submitting a request with an invalid choice") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some( + Pattern.compile( + "(unknown|Couldn't find requested) choice " + missingChoice + ) + ), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSsubmitAndWaitCompletionOffset", + "SubmitAndWait methods return the completion offset in the response", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + def request = ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + def requestForTransaction = + ledger.submitAndWaitForTransactionRequest(party, new Dummy(party).create.commands, AcsDelta) + def requestForTransactionTree = + ledger.submitAndWaitForTransactionRequest( + party, + new Dummy(party).create.commands, + LedgerEffects, + ) + for { + transactionIdResponse <- ledger.submitAndWait(request) + retrievedTransaction <- ledger.transactionById( + transactionIdResponse.updateId, + Seq(party), + LedgerEffects, + ) + transactionResponse <- ledger.submitAndWaitForTransaction(requestForTransaction) + transactionTreeResponse <- ledger.submitAndWaitForTransaction(requestForTransactionTree) + } yield { + assert( + transactionIdResponse.completionOffset > 0 && transactionIdResponse.completionOffset == retrievedTransaction.offset, + "SubmitAndWait does not contain the expected completion offset", + ) + assert( + transactionResponse.getTransaction.offset > 0, + "SubmitAndWaitForTransaction does not contain the expected completion offset", + ) + assert( + transactionTreeResponse.getTransaction.offset > 0, + "SubmitAndWaitForTransactionTree does not contain the expected completion offset", + ) + } + }) + + test( + "CSSubmitAndWaitForTransactionNonConsumingChoice", + "SubmitAndWaitForTransaction returns an empty transaction when command contains only a non-consuming choice", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + contractId: Dummy.ContractId <- ledger.create(owner, new Dummy(owner)) + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + owner, + contractId.exerciseDummyNonConsuming().commands, + AcsDelta, + ) + ) + offset = response.transaction.get.offset + transaction = assertDefined( + response.transaction, + "the transaction should have been returned", + ) + _ <- fetchAndCompareTransactions( + transaction, + ledger.transactionByOffset(offset, Seq(owner), LedgerEffects), + ) + } yield () + }) + + private def testExplicitDisclosure( + ledger: ParticipantTestContext, + owner: Party, + stranger: Party, + submitAndWaitForTransactionRequest: ( + DisclosedContract, + DummyFlexibleController.ContractId, + ) => SubmitAndWaitForTransactionRequest, + getTransactionLedgerEffects: (String, Party) => Future[Transaction], + )(implicit ec: ExecutionContext): Future[Unit] = for { + contractId: DummyFlexibleController.ContractId <- ledger.create( + owner, + new DummyFlexibleController(owner), + ) + end <- ledger.currentEnd() + witnessTxs <- ledger.transactions( + new GetUpdatesRequest( + beginExclusive = ledger.begin, + endInclusive = Some(end), + updateFormat = Some(formatByPartyAndTemplate(owner, DummyFlexibleController.TEMPLATE_ID)), + ) + ) + tx = assertSingleton("Owners' transactions", witnessTxs) + create = assertSingleton("The create", createdEvents(tx)) + disclosedContract = createEventToDisclosedContract(create) + submitResponse <- ledger.submitAndWaitForTransaction( + submitAndWaitForTransactionRequest(disclosedContract, contractId) + ) + transaction = assertDefined( + submitResponse.transaction, + "the transaction should have been returned", + ) + _ <- fetchAndCompareTransactions( + transaction = transaction, + getTransactionLedgerEffects = getTransactionLedgerEffects(transaction.updateId, stranger), + ) + } yield () + + private def fetchAndCompareTransactions( + transaction: Transaction, + getTransactionLedgerEffects: => Future[Transaction], + )(implicit ec: ExecutionContext): Future[Unit] = + for { + transactionLedgerEffects <- getTransactionLedgerEffects + } yield assertEquals( + "The transaction should contain the same details (except events) as the ledger effects transaction", + transaction, + Transaction( + updateId = transactionLedgerEffects.updateId, + commandId = transactionLedgerEffects.commandId, + workflowId = transactionLedgerEffects.workflowId, + effectiveAt = transactionLedgerEffects.effectiveAt, + events = Seq.empty, + offset = transactionLedgerEffects.offset, + synchronizerId = transactionLedgerEffects.synchronizerId, + traceContext = transactionLedgerEffects.traceContext, + recordTime = transactionLedgerEffects.recordTime, + externalTransactionHash = transactionLedgerEffects.externalTransactionHash, + ), + ) + + private def testValidSynchronizerIdSubmission( + submitAndWaitEndpoint: ParticipantTestContext => SubmitAndWaitForTransactionRequest => Future[ + String + ] + ): ExecutionContext => PartialFunction[Participants, Future[Unit]] = { + implicit ec: ExecutionContext => + { case Participants(Participant(ledger, Seq(party, _*))) => + val (synchronizer1, synchronizer2) = inside(party.initialSynchronizers) { + case Seq(synchronizer1, synchronizer2, _*) => + synchronizer1 -> synchronizer2 + } + val requestForSynchronizer1 = ledger + .submitAndWaitForTransactionRequest(party, new Dummy(party).create.commands, AcsDelta) + .update(_.commands.synchronizerId := synchronizer1) + for { + actualSynchronizerId1 <- submitAndWaitEndpoint(ledger)(requestForSynchronizer1) + requestForSynchronizer2 = ledger + .submitAndWaitForTransactionRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := synchronizer2) + actualSynchronizerId2 <- submitAndWaitEndpoint(ledger)(requestForSynchronizer2) + } yield { + assertEquals(actualSynchronizerId1, synchronizer1) + assertEquals(actualSynchronizerId2, synchronizer2) + } + } + } + + private def testInvalidSynchronizerIdSubmission[R]( + submitAndWaitEndpoint: ParticipantTestContext => SubmitAndWaitRequest => Future[R] + ): ExecutionContext => PartialFunction[Participants, Future[Unit]] = { + implicit ec: ExecutionContext => + { case Participants(Participant(ledger, Seq(party, _*))) => + val invalidSynchronizerId = "invalidSynchronizerId" + val request: SubmitAndWaitRequest = ledger + .submitAndWaitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := invalidSynchronizerId) + for { + failure <- submitAndWaitEndpoint(ledger)(request).mustFail( + "submitting a request with an invalid synchronizer id" + ) + } yield assertGrpcError( + failure, + RequestValidationErrors.InvalidField, + Some( + s"Invalid field synchronizer_id: Invalid unique identifier `$invalidSynchronizerId` with missing namespace." + ), + checkDefiniteAnswerMetadata = true, + ) + } + } + + private def testInvalidSynchronizerIdSubmissionForTransaction[R]( + submitAndWaitEndpoint: ParticipantTestContext => SubmitAndWaitForTransactionRequest => Future[ + R + ] + ): ExecutionContext => PartialFunction[Participants, Future[Unit]] = { + implicit ec: ExecutionContext => + { case Participants(Participant(ledger, Seq(party, _*))) => + val invalidSynchronizerId = "invalidSynchronizerId" + val request: SubmitAndWaitForTransactionRequest = ledger + .submitAndWaitForTransactionRequest(party, new Dummy(party).create.commands, AcsDelta) + .update(_.commands.synchronizerId := invalidSynchronizerId) + for { + failure <- submitAndWaitEndpoint(ledger)(request).mustFail( + "submitting a request with an invalid synchronizer id" + ) + } yield assertGrpcError( + failure, + RequestValidationErrors.InvalidField, + Some( + s"Invalid field synchronizer_id: Invalid unique identifier `$invalidSynchronizerId` with missing namespace." + ), + checkDefiniteAnswerMetadata = true, + ) + } + } + +} + +object CommandServiceIT { + def createEventToDisclosedContract(ev: CreatedEvent): DisclosedContract = + DisclosedContract( + templateId = ev.templateId, + contractId = ev.contractId, + createdEventBlob = ev.createdEventBlob, + synchronizerId = "", + ) + + def formatByPartyAndTemplate( + owner: Party, + templateId: javaapi.data.Identifier, + ): UpdateFormat = { + val templateIdScalaPB = Identifier.fromJavaProto(templateId.toProto) + + val eventFormat = EventFormat( + filtersByParty = Map( + owner.getValue -> new Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter(Some(templateIdScalaPB), includeCreatedEventBlob = true) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some(eventFormat), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionCompletionIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionCompletionIT.scala new file mode 100644 index 0000000000..5798b6d03c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionCompletionIT.scala @@ -0,0 +1,320 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.{ + LedgerTestSuite, + TestConstraints, + TimeoutException, + WithTimeout, +} +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.InvalidPrescribedSynchronizerId +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, RequestValidationErrors} +import org.scalatest.Inside.inside + +import java.util.regex.Pattern +import scala.concurrent.Future +import scala.concurrent.duration.DurationInt + +final class CommandSubmissionCompletionIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "CSCCompletions", + "Read completions correctly with a correct user identifier and reading party", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submit(request) + completions <- ledger.firstCompletions(party) + } yield { + val commandId = + assertSingleton("Expected only one completion", completions.map(_.commandId)) + assert( + commandId == request.commands.get.commandId, + "Wrong command identifier on completion", + ) + } + }) + + test( + "CSCParticipantBeginCompletions", + "Read completions from participant begin", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submit(request) + completionRequest = ledger + .completionStreamRequest(0L)(party) + completions <- ledger.firstCompletions(completionRequest) + } yield { + assert(completions.nonEmpty, "Completions should not have been empty") + } + }) + + test( + "CSCNoCompletionsWithoutRightUserId", + "Read no completions without the correct user identifier", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submit(request) + invalidRequest = ledger + .completionStreamRequest()(party) + .update(_.userId := "invalid-user-id") + failure <- WithTimeout(5.seconds)(ledger.firstCompletions(invalidRequest)) + .mustFail("subscribing to completions with an invalid user ID") + } yield { + assert(failure == TimeoutException, "Timeout expected") + } + }) + + test( + "CSCAfterEnd", + "An OUT_OF_RANGE error should be returned when subscribing to completions past the ledger end", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submit(request) + futureOffset <- ledger.offsetBeyondLedgerEnd() + invalidRequest = ledger + .completionStreamRequest()(party) + .update(_.beginExclusive := futureOffset) + failure <- ledger + .firstCompletions(invalidRequest) + .mustFail("subscribing to completions past the ledger end") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.OffsetAfterLedgerEnd, + Some("is after ledger end"), + ) + } + }) + + test( + "CSCNoCompletionsWithoutRightParty", + "Read no completions without the correct party", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, notTheSubmittingParty))) => + val request = ledger.submitRequest(party, new Dummy(party).create.commands) + for { + _ <- ledger.submit(request) + failure <- WithTimeout(5.seconds)(ledger.firstCompletions(notTheSubmittingParty)) + .mustFail("subscribing to completions with the wrong party") + } yield { + assert(failure == TimeoutException, "Timeout expected") + } + }) + + test( + "CSCRefuseBadChoice", + "The submission of an exercise of a choice that does not exist should yield INVALID_ARGUMENT", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly( + "Problem creating faulty JSON from a faulty GRPC call" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val badChoice = "THIS_IS_NOT_A_VALID_CHOICE" + for { + dummy <- ledger.create(party, new Dummy(party)) + exercise = dummy.exerciseDummyChoice1().commands + wrongExercise = updateCommands(exercise, _.update(_.exercise.choice := badChoice)) + wrongRequest = ledger.submitRequest(party, wrongExercise) + failure <- ledger.submit(wrongRequest).mustFail("submitting an invalid choice") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some( + Pattern.compile( + "(unknown|Couldn't find requested) choice " + badChoice + ) + ), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSCDisallowEmptyTransactionsSubmission", + "The submission of an empty command should be rejected with INVALID_ARGUMENT", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val emptyRequest = ledger.submitRequest(party) + for { + failure <- ledger.submit(emptyRequest).mustFail("submitting an empty command") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.MissingField, + Some("commands"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CSCHandleMultiPartySubscriptions", + "Listening for completions should support multi-party subscriptions", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + val aliceRequest = ledger.submitRequest(alice, new Dummy(alice).create.commands) + val bobRequest = ledger.submitRequest(bob, new Dummy(bob).create.commands) + val aliceBobRequest = { + val originalCommands = ledger.submitRequest(alice, new Dummy(alice).create.commands) + originalCommands.withCommands( + originalCommands.getCommands.copy( + actAs = Seq(alice, bob) + ) + ) + } + + val aliceCommandId = aliceRequest.getCommands.commandId + val bobCommandId = bobRequest.getCommands.commandId + val aliceBobCommandId = aliceBobRequest.getCommands.commandId + + for { + _ <- ledger.submit(aliceRequest) + _ <- ledger.submit(bobRequest) + _ <- ledger.submit(aliceBobRequest) + _ <- WithTimeout(5.seconds)(ledger.findCompletion(alice, bob)(_.commandId == aliceCommandId)) + _ <- WithTimeout(5.seconds)(ledger.findCompletion(alice, bob)(_.commandId == bobCommandId)) + aliceBobCompletionForAliceBob <- WithTimeout(5.seconds)( + ledger.findCompletion(alice, bob)(_.commandId == aliceBobCommandId) + ) + // as all the commands are already visible on the ledger we can go to lower timeouts + _ <- WithTimeout(2.seconds)(ledger.findCompletion(alice)(_.commandId == aliceCommandId)) + _ <- WithTimeout(2.seconds)(ledger.findCompletion(alice)(_.commandId == bobCommandId)) + .mustFailWith("alice should not be able to look up bob's command")(_ == TimeoutException) + aliceBobCompletionForAlice <- WithTimeout(2.seconds)( + ledger.findCompletion(alice)(_.commandId == aliceBobCommandId) + ) + _ <- WithTimeout(2.seconds)(ledger.findCompletion(bob)(_.commandId == aliceCommandId)) + .mustFailWith("bob should not be able to look up alice's command")(_ == TimeoutException) + _ <- WithTimeout(2.seconds)(ledger.findCompletion(bob)(_.commandId == bobCommandId)) + aliceBobCompletionForBob <- WithTimeout(2.seconds)( + ledger.findCompletion(bob)(_.commandId == aliceBobCommandId) + ) + } yield { + assertEquals( + "If filtered with both alice and bob, the multi-party submissions should have both act_as parties.", + aliceBobCompletionForAliceBob.map(_.actAs.toSet), + Some(Set(alice.underlying.getValue, bob.underlying.getValue)), + ) + assertEquals( + "If filtered with only alice, the multi-party submissions should have only alice act_as party.", + aliceBobCompletionForAlice.map(_.actAs.toSet), + Some(Set(alice.underlying.getValue)), + ) + assertEquals( + "If filtered with only bob, the multi-party submissions should have only bob act_as party.", + aliceBobCompletionForBob.map(_.actAs.toSet), + Some(Set(bob.underlying.getValue)), + ) + } + }) + + test( + "CSCSubmitInvalidSynchronizerId", + "Submit should fail for invalid synchronizer ids", + allocate(SingleParty), + ) { implicit ec => + { case Participants(Participant(ledger, Seq(party, _*))) => + val invalidSynchronizerId = "invalidSynchronizerId" + val request = ledger + .submitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := invalidSynchronizerId) + for { + failure <- ledger + .submit(request) + .mustFail( + "submitting a request with an invalid synchronizer id" + ) + } yield assertGrpcError( + failure, + RequestValidationErrors.InvalidField, + Some( + s"Invalid field synchronizer_id: Invalid unique identifier `$invalidSynchronizerId` with missing namespace." + ), + checkDefiniteAnswerMetadata = true, + ) + } + } + + test( + "CSCSubmitUnknownSynchronizerId", + "Submit should fail for unknown synchronizer ids", + allocate(SingleParty) expectingMinimumNumberOfSynchronizers (1), + ) { implicit ec => + { case Participants(Participant(ledger, Seq(party, _*))) => + val synchronizer = + inside(party.initialSynchronizers) { case Seq(synchronizer, _*) => + synchronizer + } + val unknownSynchronizerId = s"unknown_$synchronizer" + val request = ledger + .submitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := unknownSynchronizerId) + for { + failure <- ledger + .submit(request) + .mustFail( + "submitting a request with an unknown synchronizer id" + ) + } yield assertGrpcError( + failure, + InvalidPrescribedSynchronizerId, + Some( + s"Cannot submit transaction to prescribed synchronizer" + ), + checkDefiniteAnswerMetadata = true, + ) + } + } + + test( + "CSCSubmitWithPrescribedSynchronizerId", + "Submit should use the prescribed synchronizer id when routing the submission", + allocate(SingleParty).expectingMinimumNumberOfSynchronizers(2), + ) { implicit ec => + { case Participants(Participant(ledger, Seq(party, _*))) => + def assertSubmitRoutedToSynchronizer(targetSynchronizer: String): Future[Unit] = for { + _ <- Future.unit + requestForSynchronizer1 = ledger + .submitRequest(party, new Dummy(party).create.commands) + .update(_.commands.synchronizerId := targetSynchronizer) + _ <- ledger.submit(requestForSynchronizer1) + firstSynchronizerCompletion <- WithTimeout(5.seconds)( + ledger.findCompletion(party)(_.commandId == requestForSynchronizer1.getCommands.commandId) + ).map( + _.getOrElse( + fail(s"completion not found for ${requestForSynchronizer1.getCommands.commandId}") + ) + ) + actualSynchronizerId <- ledger + .transactionById(firstSynchronizerCompletion.updateId, Seq(party), AcsDelta) + .map(_.synchronizerId) + } yield assertEquals(actualSynchronizerId, targetSynchronizer) + + val (synchronizer1, synchronizer2) = + inside(party.initialSynchronizers) { case Seq(synchronizer1, synchronizer2, _*) => + synchronizer1 -> synchronizer2 + } + + for { + _ <- assertSubmitRoutedToSynchronizer(synchronizer1) + _ <- assertSubmitRoutedToSynchronizer(synchronizer2) + } yield () + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionTestUtils.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionTestUtils.scala new file mode 100644 index 0000000000..f842f77c54 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CommandSubmissionTestUtils.scala @@ -0,0 +1,28 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.test.java.model.test.Dummy + +trait CommandSubmissionTestUtils { this: LedgerTestSuite => + protected def assertOnTransactionResponse( + transaction: Transaction + ): Unit = { + assert( + transaction.updateId.nonEmpty, + "The transaction identifier was empty but shouldn't.", + ) + val event = transaction.events.head + assert( + event.event.isCreated, + s"The returned transaction should contain a created-event, but was ${event.event}", + ) + assert( + event.getCreated.getTemplateId == Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + s"The template ID of the created-event should by ${Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1}, but was ${event.getCreated.getTemplateId}", + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompanionImplicits.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompanionImplicits.scala new file mode 100644 index 0000000000..c77ac1a07a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompanionImplicits.scala @@ -0,0 +1,106 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.model.iou.Iou +import com.daml.ledger.test.java.model.test.{ + Agreement, + AgreementFactory, + CallablePayout, + Delegated, + Delegation, + DiscloseCreate, + Divulgence1, + Divulgence2, + Dummy, + DummyFactory, + DummyWithParam, + TriProposal, + WithObservers, + Witnesses as TestWitnesses, +} +import com.daml.ledger.test.java.model.trailingnones.TrailingNones +import com.daml.ledger.test.java.semantic.divulgencetests.DummyFlexibleController +import com.daml.ledger.test.java.semantic.semantictests + +object CompanionImplicits { + + implicit val dummyCompanion + : ContractCompanion.WithoutKey[Dummy.Contract, Dummy.ContractId, Dummy] = Dummy.COMPANION + implicit val dummyWithParamCompanion: ContractCompanion.WithoutKey[ + DummyWithParam.Contract, + DummyWithParam.ContractId, + DummyWithParam, + ] = DummyWithParam.COMPANION + implicit val dummyFactoryCompanion + : ContractCompanion.WithoutKey[DummyFactory.Contract, DummyFactory.ContractId, DummyFactory] = + DummyFactory.COMPANION + implicit val withObserversCompanion: ContractCompanion.WithoutKey[ + WithObservers.Contract, + WithObservers.ContractId, + WithObservers, + ] = WithObservers.COMPANION + implicit val callablePayoutCompanion: ContractCompanion.WithoutKey[ + CallablePayout.Contract, + CallablePayout.ContractId, + CallablePayout, + ] = CallablePayout.COMPANION + implicit val delegatedCompanion: ContractCompanion.WithoutKey[ + Delegated.Contract, + Delegated.ContractId, + Delegated, + ] = Delegated.COMPANION + implicit val delegationCompanion + : ContractCompanion.WithoutKey[Delegation.Contract, Delegation.ContractId, Delegation] = + Delegation.COMPANION + implicit val discloseCreatedCompanion: ContractCompanion.WithoutKey[ + DiscloseCreate.Contract, + DiscloseCreate.ContractId, + DiscloseCreate, + ] = DiscloseCreate.COMPANION + implicit val testWitnessesCompanion: ContractCompanion.WithoutKey[ + TestWitnesses.Contract, + TestWitnesses.ContractId, + TestWitnesses, + ] = TestWitnesses.COMPANION + implicit val divulgence1Companion + : ContractCompanion.WithoutKey[Divulgence1.Contract, Divulgence1.ContractId, Divulgence1] = + Divulgence1.COMPANION + implicit val divulgence2Companion + : ContractCompanion.WithoutKey[Divulgence2.Contract, Divulgence2.ContractId, Divulgence2] = + Divulgence2.COMPANION + implicit val semanticTestsIouCompanion: ContractCompanion.WithoutKey[ + semantictests.Iou.Contract, + semantictests.Iou.ContractId, + semantictests.Iou, + ] = semantictests.Iou.COMPANION + implicit val iouCompanion: ContractCompanion.WithoutKey[Iou.Contract, Iou.ContractId, Iou] = + Iou.COMPANION + implicit val agreementFactoryCompanion: ContractCompanion.WithoutKey[ + AgreementFactory.Contract, + AgreementFactory.ContractId, + AgreementFactory, + ] = AgreementFactory.COMPANION + implicit val agreementCompanion + : ContractCompanion.WithoutKey[Agreement.Contract, Agreement.ContractId, Agreement] = + Agreement.COMPANION + + implicit val divulgeIouByExerciseCompanion: ContractCompanion.WithoutKey[ + DummyFlexibleController.Contract, + DummyFlexibleController.ContractId, + DummyFlexibleController, + ] = DummyFlexibleController.COMPANION + + implicit val triProposalCompanion + : ContractCompanion.WithoutKey[TriProposal.Contract, TriProposal.ContractId, TriProposal] = + TriProposal.COMPANION + + implicit val trailingNonesCompanion: ContractCompanion.WithoutKey[ + TrailingNones.Contract, + TrailingNones.ContractId, + TrailingNones, + ] = + TrailingNones.COMPANION +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompletionDeduplicationInfoIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompletionDeduplicationInfoIT.scala new file mode 100644 index 0000000000..b03e6e2f94 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/CompletionDeduplicationInfoIT.scala @@ -0,0 +1,194 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.assertDefined +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party, WithTimeout} +import com.daml.ledger.api.testtool.suites.v2_1.CompletionDeduplicationInfoIT.* +import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest +import com.daml.ledger.api.v2.command_submission_service.SubmitRequest +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.javaapi.data.Command +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.SubmissionIdGenerator +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.data.Ref.SubmissionId +import io.grpc.Status + +import java.util.List as JList +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} + +final class CompletionDeduplicationInfoIT[ServiceRequest]( + service: Service[ServiceRequest] +) extends LedgerTestSuite { + + private val serviceName: String = service.productPrefix + + override def name = super.name + + test( + shortIdentifier = s"CCDIIncludeDedupInfo$serviceName", + description = s"Deduplication information is preserved in completions ($serviceName)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val requestWithoutSubmissionId = service.buildRequest(ledger, party) + val requestWithSubmissionId = service.buildRequest(ledger, party, Some(RandomSubmissionId)) + for { + optNoDeduplicationSubmittedCompletion <- service.submitRequest( + ledger, + party, + requestWithoutSubmissionId, + ) + optSubmissionIdSubmittedCompletion <- service + .submitRequest(ledger, party, requestWithSubmissionId) + } yield { + assertUserIdIsPreserved(ledger.userId, optNoDeduplicationSubmittedCompletion) + service.assertCompletion(optNoDeduplicationSubmittedCompletion) + assertDeduplicationPeriodIsReported(optNoDeduplicationSubmittedCompletion) + assertSubmissionIdIsPreserved(optSubmissionIdSubmittedCompletion, RandomSubmissionId) + } + }) +} + +private[testtool] object CompletionDeduplicationInfoIT { + + private[testtool] sealed trait Service[ProtoRequestType] extends Serializable with Product { + def buildRequest( + ledger: ParticipantTestContext, + party: Party, + optSubmissionId: Option[Ref.SubmissionId] = None, + ): ProtoRequestType + + def submitRequest( + ledger: ParticipantTestContext, + party: Party, + request: ProtoRequestType, + )(implicit ec: ExecutionContext): Future[Option[Completion]] + + def assertCompletion(optCompletion: Option[Completion]): Unit + } + + case object CommandService extends Service[SubmitAndWaitRequest] { + override def buildRequest( + ledger: ParticipantTestContext, + party: Party, + optSubmissionId: Option[SubmissionId], + ): SubmitAndWaitRequest = { + val request = ledger.submitAndWaitRequest(party, simpleCreate(party)) + optSubmissionId + .map { submissionId => + request.update(_.commands.submissionId := submissionId) + } + .getOrElse(request) + } + + override def submitRequest( + ledger: ParticipantTestContext, + party: Party, + request: SubmitAndWaitRequest, + )(implicit ec: ExecutionContext): Future[Option[Completion]] = + for { + offset <- ledger.currentEnd() + _ <- ledger.submitAndWait(request) + completion <- singleCompletionAfterOffset(ledger, party, offset) + } yield completion + + override def assertCompletion(optCompletion: Option[Completion]): Unit = { + val completion = assertDefined(optCompletion, "No completion has been produced") + assert(completion.status.forall(_.code == Status.Code.OK.value())) + assert( + Ref.SubmissionId.fromString(completion.submissionId).isRight, + "Missing or invalid submission ID in completion", + ) + } + } + + case object CommandSubmissionService extends Service[SubmitRequest] { + override def buildRequest( + ledger: ParticipantTestContext, + party: Party, + optSubmissionId: Option[SubmissionId], + ): SubmitRequest = { + val request = ledger.submitRequest(party, simpleCreate(party)) + optSubmissionId + .map { submissionId => + request.update(_.commands.submissionId := submissionId) + } + .getOrElse(request) + } + + override def submitRequest( + ledger: ParticipantTestContext, + party: Party, + request: SubmitRequest, + )(implicit ec: ExecutionContext): Future[Option[Completion]] = + for { + offset <- ledger.currentEnd() + _ <- ledger.submit(request) + completion <- singleCompletionAfterOffset(ledger, party, offset) + } yield completion + + override def assertCompletion(optCompletion: Option[Completion]): Unit = { + val completion = assertDefined(optCompletion, "No completion has been produced") + assert(completion.status.forall(_.code == Status.Code.OK.value())) + } + } + + private def singleCompletionAfterOffset( + ledger: ParticipantTestContext, + party: Party, + offset: Long, + ): Future[Option[Completion]] = + WithTimeout(5.seconds)( + ledger + .findCompletion(ledger.completionStreamRequest(offset)(party))(_ => true) + ) + + private def assertSubmissionIdIsPreserved( + optCompletion: Option[Completion], + requestedSubmissionId: Ref.SubmissionId, + ): Unit = { + val submissionIdCompletion = assertDefined(optCompletion, "No completion has been produced") + val actualSubmissionId = submissionIdCompletion.submissionId + assert(submissionIdCompletion.status.forall(_.code == Status.Code.OK.value())) + assert( + actualSubmissionId == requestedSubmissionId, + "Wrong submission ID in completion, " + + s"expected: $requestedSubmissionId, actual: $actualSubmissionId", + ) + } + + private def assertDeduplicationPeriodIsReported( + optCompletion: Option[Completion] + ): Unit = { + val completion = assertDefined(optCompletion, "No completion has been produced") + assert(completion.status.forall(_.code == Status.Code.OK.value())) + assert(completion.deduplicationPeriod.isDefined, "The deduplication period was not reported") + } + + private def assertUserIdIsPreserved( + requestedUserId: String, + optCompletion: Option[Completion], + ): Unit = { + val expectedUserId = requestedUserId + assertDefined(optCompletion, "No completion has been produced").discard + val userIdCompletion = optCompletion.get + assert(userIdCompletion.status.forall(_.code == Status.Code.OK.value())) + val actualUserId = userIdCompletion.userId + assert( + Ref.UserId.fromString(actualUserId).contains(expectedUserId), + "Wrong user ID in completion, " + + s"expected: $expectedUserId, actual: $actualUserId", + ) + } + + private def simpleCreate(party: Party): JList[Command] = new Dummy(party.getValue).create.commands + + private val RandomSubmissionId = + Ref.SubmissionId.assertFromString(SubmissionIdGenerator.Random.generate()) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ContractIdIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ContractIdIT.scala new file mode 100644 index 0000000000..6b6f7139ac --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ContractIdIT.scala @@ -0,0 +1,192 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.{ + assertErrorCode, + assertGrpcError, + fail, +} +import com.daml.ledger.api.testtool.infrastructure.participant.{Features, ParticipantTestContext} +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.testtool.suites.v2_1.ContractIdIT.* +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.semantic.contractidtests.{Contract, ContractRef} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.{ + CommandExecutionErrors, + ConsistencyErrors, + RequestValidationErrors, +} +import com.digitalasset.canton.util.TryUtil +import io.grpc.StatusRuntimeException + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +// See `daml-lf/spec/contract-id.rst` for more information on contract ID formats. +// Check the Ledger API accepts or rejects non-suffixed contract ID. +// - Central committer ledger implementations (sandboxes, KV...) may accept non-suffixed CID +// - Distributed ledger implementations (e.g. Canton) must reject non-suffixed CID +final class ContractIdIT extends LedgerTestSuite { + implicit val contractCompanion + : ContractCompanion.WithoutKey[Contract.Contract$, Contract.ContractId, Contract] = + Contract.COMPANION + implicit val contractRefCompanion: ContractCompanion.WithoutKey[ + ContractRef.Contract, + ContractRef.ContractId, + ContractRef, + ] = ContractRef.COMPANION + + List( + TestConfiguration( + description = "non-suffixed v1", + example = nonSuffixedV1Cid, + accepted = false, + disabledReason = "non-suffixed V1 contract IDs are supported", + failsInPreprocessing = true, + ), + TestConfiguration( + description = "suffixed v1", + example = suffixedV1Cid, + accepted = true, + ), + ).foreach { + case TestConfiguration( + cidDescription, + example, + accepted, + isSupported, + disabledReason, + failsInPreprocessing, + ) => + val result = if (accepted) "Accept" else "Reject" + + def test( + description: String, + parseErrorCode: ErrorCode = RequestValidationErrors.InvalidArgument, + )( + update: ExecutionContext => ( + ParticipantTestContext, + Party, + ) => Future[Try[_]] + ): Unit = + super.test( + shortIdentifier = result + camelCase(cidDescription) + "Cid" + camelCase(description), + description = result + "s " + cidDescription + " Contract Id in " + description, + partyAllocation = allocate(SingleParty), + enabled = isSupported, + disabledReason = disabledReason, + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + update(ec)(alpha, party).map { + case Success(_) if accepted => () + case Failure(err: Throwable) if !accepted => + val (prefix, errorCode) = + if (failsInPreprocessing) + ( + "Illegal Contract ID", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) + else + ("cannot parse ContractId", parseErrorCode) + assertGrpcError( + err, + errorCode, + Some(s"""$prefix "$example""""), + checkDefiniteAnswerMetadata = true, + ) + () + case otherwise => + fail("Unexpected " + otherwise.fold(err => s"failure: $err", _ => "success")) + } + }) + + test("create payload") { implicit ec => (alpha, party) => + alpha + .create(party, new ContractRef(party, new Contract.ContractId(example))) + .transformWith(Future.successful) + } + + test("exercise target", parseErrorCode = RequestValidationErrors.InvalidField) { + implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- + alpha + .exercise( + party, + new ContractRef.ContractId(example).exerciseChange(contractCid), + ) + .transformWith(Future.successful) + } yield result match { + case Failure(exception: StatusRuntimeException) + if Try( + assertErrorCode( + statusRuntimeException = exception, + expectedErrorCode = ConsistencyErrors.ContractNotFound, + ) + ).isSuccess => + TryUtil.unit + + case Success(_) => Failure(new UnknownError("Unexpected Success")) + case otherwise => otherwise.map(_ => ()) + } + } + + test("choice argument") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + contractRefCid <- alpha.create(party, new ContractRef(party, contractCid)) + result <- alpha + .exercise(party, contractRefCid.exerciseChange(new Contract.ContractId(example))) + .transformWith(Future.successful) + } yield result + } + + test("create-and-exercise payload") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- alpha + .exercise( + party, + new ContractRef(party, new Contract.ContractId(example)).createAnd + .exerciseChange(contractCid), + ) + .transformWith(Future.successful) + } yield result + } + + test("create-and-exercise choice argument") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- alpha + .exercise( + party, + new ContractRef(party, contractCid).createAnd + .exerciseChange(new Contract.ContractId(example)), + ) + .transformWith(Future.successful) + } yield result + } + } +} + +object ContractIdIT { + private val nonSuffixedV1Cid = (0 to 32).map("%02x".format(_)).mkString + private val suffixedV1Cid = (0 to 48).map("%02x".format(_)).mkString + + private def camelCase(s: String): String = + s.split("[ -]").iterator.map(_.capitalize).mkString("") + + final private case class TestConfiguration( + description: String, + example: String, + accepted: Boolean, + isSupported: Features => Boolean = _ => true, + disabledReason: String = "", + // Invalid v1 cids (e.g. no suffix when one is required) fail during command preprocessing. + failsInPreprocessing: Boolean = false, + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DamlValuesIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DamlValuesIT.scala new file mode 100644 index 0000000000..434ecdf648 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DamlValuesIT.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + SingleParty, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.value.Value +import com.daml.ledger.javaapi.data.codegen.PrimitiveValueDecoders +import com.daml.ledger.test.java.semantic.damlvalues.WithTextMap + +import scala.jdk.CollectionConverters.{MapHasAsJava, MapHasAsScala} + +final class DamlValuesIT extends LedgerTestSuite { + test( + "DVTextMap", + "Ledger API should support text maps in Daml values", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + for { + contract: WithTextMap.ContractId <- ledger + .create(alice, new WithTextMap(alice, Map("foo" -> "bar").asJava))( + WithTextMap.COMPANION + ) + resultTxTree <- ledger + .exercise(alice, contract.exerciseWithTextMap_Expand(Map("marco" -> "polo").asJava)) + } yield { + val result = resultTxTree.events.head.getExercised.exerciseResult + .getOrElse(fail("Expected return value")) + + val actualItems = PrimitiveValueDecoders + .fromTextMap(PrimitiveValueDecoders.fromText) + .decode(com.daml.ledger.javaapi.data.Value.fromProto(Value.toJavaProto(result))) + .asScala + + val expectedItems = Map("foo" -> "bar", "marco" -> "polo") + assertSameElements(actualItems, expectedItems, "items returned by WithTextMap_Expand") + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DeeplyNestedValueIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DeeplyNestedValueIT.scala new file mode 100644 index 0000000000..159ccc559f --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DeeplyNestedValueIT.scala @@ -0,0 +1,165 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, Update} +import com.daml.ledger.test.java.semantic.deeplynestedvalue.{Contract, Handler, Nat, nat} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import scala.annotation.tailrec +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Success + +final class DeeplyNestedValueIT extends LedgerTestSuite { + implicit val handlerCompanion + : ContractCompanion.WithoutKey[Handler.Contract, Handler.ContractId, Handler] = + Handler.COMPANION + + @tailrec + private[this] def toNat(i: Long, acc: Nat = new nat.Z(javaapi.data.Unit.getInstance)): Nat = + if (i == 0) acc else toNat(i - 1, new nat.S(acc)) + + private[this] def waitForUpdateId( + alpha: ParticipantTestContext, + party: Party, + command: Update[_], + )(implicit + ec: ExecutionContext + ): Future[Either[Throwable, String]] = + alpha + .submitAndWait( + alpha.submitAndWaitRequest(party, command.commands) + ) + .transform(x => Success(x.map(_.updateId).toEither)) + + private[this] def camlCase(s: String) = + s.split(" ").iterator.map(_.capitalize).mkString("") + + List[Long](46, 100, 101, 110, 200).foreach { nesting => + val accepted = nesting <= 100 + val result = if (accepted) "Accept" else "Reject" + + // Once converted to Nat, `n` will have a nesting `nesting`. + // Note that Nat.Z(()) has nesting 1. + val n = nesting - 1 + + // Choice argument are always wrapped in a record + val nChoiceArgument = n - 1 + + // The nesting of the payload of a `Contract` is one more than the nat it contains + val nContract = n - 1 + + def test[T](description: String, errorCodeIfExpected: ErrorCode)( + update: ExecutionContext => ( + ParticipantTestContext, + Party, + ) => Future[Either[Throwable, T]] + ): Unit = + super.test( + result + camlCase(description) + nesting.toString, + s"${result.toLowerCase}s $description with a nesting of $nesting", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + update(ec)(alpha, party).map { + case Right(_) if accepted => () + case Left(err: Throwable) if !accepted => + assertGrpcError( + err, + errorCodeIfExpected, + None, + checkDefiniteAnswerMetadata = true, + ) + case otherwise => + fail("Unexpected " + otherwise.fold(err => s"failure: $err", _ => "success")) + } + }) + + test( + "create command", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) { implicit ec => (alpha, party) => + waitForUpdateId(alpha, party, new Contract(party, nContract, toNat(nContract)).create) + } + + test( + "exercise command", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- waitForUpdateId( + alpha, + party, + handler.exerciseDestruct(toNat(nChoiceArgument)), + ) + } yield result + } + + test( + "create argument in CreateAndExercise command", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) { implicit ec => (alpha, party) => + waitForUpdateId( + alpha, + party, + new Contract(party, nContract, toNat(nContract)).createAnd + .exerciseArchive(), + ) + } + + test( + "choice argument in CreateAndExercise command", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) { implicit ec => (alpha, party) => + waitForUpdateId( + alpha, + party, + new Handler(party).createAnd.exerciseDestruct(toNat(nChoiceArgument)), + ) + } + + test( + "exercise argument", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- + waitForUpdateId( + alpha, + party, + handler.exerciseConstructThenDestruct(nChoiceArgument), + ) + } yield result + } + + test( + "exercise output", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- + waitForUpdateId(alpha, party, handler.exerciseConstruct(n)) + } yield result + } + + test( + "create argument", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- waitForUpdateId(alpha, party, handler.exerciseCreate(nContract)) + } yield result + } + + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DivulgenceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DivulgenceIT.scala new file mode 100644 index 0000000000..36b9923d49 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/DivulgenceIT.scala @@ -0,0 +1,320 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionOps.* +import com.daml.ledger.test.java.model.test.{Divulgence1, Divulgence2} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.platform.store.utils.EventOps.EventOps + +final class DivulgenceIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "DivulgenceTx", + "Divulged contracts should not be exposed by the transaction service", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + divulgence1 <- ledger.create(alice, new Divulgence1(alice)) + divulgence2 <- ledger.create(bob, new Divulgence2(bob, alice)) + _ <- ledger.exercise(alice, divulgence2.exerciseDivulgence2Archive(divulgence1)) + bobTransactions <- ledger.transactions(AcsDelta, bob) + bobTransactionsLedgerEffects <- ledger.transactions(LedgerEffects, bob) + transactionsForBoth <- ledger.transactions(AcsDelta, alice, bob) + } yield { + + // Inspecting the acs delta transaction stream as seen by Bob + + // We expect only one transaction containing only one create event for Divulgence2. + // We expect to _not_ see the create or archive for Divulgence1, even if Divulgence1 was divulged + // to Bob, and even if the exercise is visible to Bob in the ledger effects. + + assert( + bobTransactions.sizeIs == 1, + s"${bob.getValue} should see exactly one transaction but sees ${bobTransactions.size} instead", + ) + + val events = bobTransactions.head.events + assert( + events.sizeIs == 1, + s"The transaction should contain exactly one event but contains ${events.size} instead", + ) + + val event = events.head.event + assert( + event.isCreated, + s"The only event in the transaction was expected to be a created event", + ) + + val contractId = event.created.get.contractId + assert( + contractId == divulgence2.contractId, + s"The only visible event should be the creation of the second contract (expected $divulgence2, got $contractId instead)", + ) + + // Inspecting the ledger effects as seen by Bob + + // then what we expect for Bob's ledger effects transactions. note that here we witness the exercise that + // caused the archive of div1Cid, even if we did _not_ see the archive event in the acs delta transaction + // stream above + + // We expect to see two transactions: one for the second create and one for the exercise. + + assert( + bobTransactionsLedgerEffects.sizeIs == 2, + s"$bob should see exactly two transactions but sees ${bobTransactionsLedgerEffects.size} instead", + ) + + val createDivulgence2Transaction = bobTransactionsLedgerEffects(0) + assert( + createDivulgence2Transaction.rootNodeIds().sizeIs == 1, + s"The transaction that creates Divulgence2 should contain exactly one root event, but it contains ${createDivulgence2Transaction.rootNodeIds().size} instead", + ) + + val createDivulgence2 = + createDivulgence2Transaction.events.head + assert( + createDivulgence2.event.isCreated, + s"Event expected to be a create", + ) + + val createDivulgence2ContractId = createDivulgence2.getCreated.contractId + assert( + createDivulgence2ContractId == divulgence2.contractId, + s"The event where Divulgence2 is created should have the same contract identifier as the created contract (expected $divulgence2, got $createDivulgence2ContractId instead)", + ) + + val exerciseOnDivulgence2Transaction = bobTransactionsLedgerEffects(1) + assert( + exerciseOnDivulgence2Transaction.rootNodeIds().sizeIs == 1, + s"The transaction where a choice is exercised on Divulgence2 should contain exactly one root event contains ${exerciseOnDivulgence2Transaction.rootNodeIds().size} instead", + ) + + val exerciseOnDivulgence2 = exerciseOnDivulgence2Transaction.events.head + assert( + exerciseOnDivulgence2.event.isExercised, + s"Expected event to be an exercise", + ) + + assert(exerciseOnDivulgence2.getExercised.contractId == divulgence2.contractId) + + assert( + exerciseOnDivulgence2.getExercised.lastDescendantNodeId - exerciseOnDivulgence2.getExercised.nodeId == 1 + ) + + val exerciseOnDivulgence1 = + exerciseOnDivulgence2Transaction.events + .find( + _.nodeId == exerciseOnDivulgence2.getExercised.lastDescendantNodeId + ) + .get + + assert(exerciseOnDivulgence1.event.isExercised) + + assert(exerciseOnDivulgence1.getExercised.contractId == divulgence1.contractId) + + assert( + exerciseOnDivulgence1.getExercised.lastDescendantNodeId == exerciseOnDivulgence1.getExercised.nodeId + ) + + // Alice should see: + // - create Divulgence1 + // - create Divulgence2 + // - archive Divulgence1 + // Note that we do _not_ see the exercise of Divulgence2 because it is nonconsuming. + + assert( + transactionsForBoth.sizeIs == 3, + s"Filtering for both $alice and $bob should result in three transactions seen but ${transactionsForBoth.size} are seen instead", + ) + + val firstTransactionForBoth = transactionsForBoth.head + assert( + firstTransactionForBoth.events.sizeIs == 1, + s"The first transaction seen by filtering for both $alice and $bob should contain exactly one event but it contains ${firstTransactionForBoth.events.size} events instead", + ) + + val firstEventForBoth = transactionsForBoth.head.events.head.event + assert( + firstEventForBoth.isCreated, + s"The first event seen by filtering for both $alice and $bob was expected to be a creation", + ) + + val firstCreationForBoth = firstEventForBoth.created.get + assert( + firstCreationForBoth.contractId == divulgence1.contractId, + s"The creation seen by filtering for both $alice and $bob was expected to be $divulgence1 but is ${firstCreationForBoth.contractId} instead", + ) + + assert( + firstCreationForBoth.witnessParties == Seq(alice.getValue), + s"The creation seen by filtering for both $alice and $bob was expected to be witnessed by $alice but is instead ${firstCreationForBoth.witnessParties}", + ) + } + }) + + test( + "ImmediateDivulgenceTx", + "Immediately divulged contracts (created events) should not be exposed by the transaction service", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + divulgence2 <- ledger.create(bob, new Divulgence2(bob, alice)) + _ <- ledger.exercise(alice, divulgence2.exerciseDivulgence2ImmediateDivulge()) + bobTransactions <- ledger.transactions(AcsDelta, bob) + bobTransactionsLedgerEffects <- ledger.transactions(LedgerEffects, bob) + } yield { + + // Inspecting the acs delta transaction stream as seen by Bob + + // We expect only one transaction containing only one create event for Divulgence2. + // We expect to _not_ see the create for Divulgence1, even if Divulgence1 was divulged + // to Bob, and even if the exercise is visible to Bob in the ledger effects. + + assert( + bobTransactions.sizeIs == 1, + s"${bob.getValue} should see exactly one transaction but sees ${bobTransactions.size} instead", + ) + + val events = bobTransactions.head.events + assert( + events.sizeIs == 1, + s"The transaction should contain exactly one event but contains ${events.size} instead", + ) + + val event = events.head.event + assert( + event.isCreated, + s"The only event in the transaction was expected to be a created event", + ) + + val contractId = event.created.get.contractId + assert( + contractId == divulgence2.contractId, + s"The only visible event should be the creation of the Divulgence2 contract (expected $divulgence2, got $contractId instead)", + ) + + // Inspecting the ledger effects as seen by Bob + + // then what we expect for Bob's ledger effects transactions. note that here we witness the exercise that + // caused the create of div1Cid, even if we did _not_ see the create event in the acs delta transaction + // stream above + + // We expect to see two transactions: one for the first create and one for the exercise. + + assert( + bobTransactionsLedgerEffects.sizeIs == 2, + s"$bob should see exactly two transactions but sees ${bobTransactionsLedgerEffects.size} instead", + ) + + val createDivulgence2Transaction = bobTransactionsLedgerEffects(0) + assert( + createDivulgence2Transaction.rootNodeIds().sizeIs == 1, + s"The transaction that creates Divulgence2 should contain exactly one root event, but it contains ${createDivulgence2Transaction.rootNodeIds().size} instead", + ) + + val createDivulgence2 = + createDivulgence2Transaction.events.head + assert( + createDivulgence2.event.isCreated, + s"Event expected to be a create", + ) + + val createDivulgence2ContractId = createDivulgence2.getCreated.contractId + assert( + createDivulgence2ContractId == divulgence2.contractId, + s"The event where Divulgence2 is created should have the same contract identifier as the created contract (expected $divulgence2, got $createDivulgence2ContractId instead)", + ) + + val exerciseOnDivulgence2Transaction = bobTransactionsLedgerEffects(1) + assert( + exerciseOnDivulgence2Transaction.rootNodeIds().sizeIs == 1, + s"The transaction where a choice is exercised on Divulgence2 should contain exactly one root event contains ${exerciseOnDivulgence2Transaction.rootNodeIds().size} instead", + ) + + val exerciseOnDivulgence2 = exerciseOnDivulgence2Transaction.events.head + assert( + exerciseOnDivulgence2Transaction.events.sizeIs == 2, + "The transaction should contain exactly two events, one for the exercise and one for the create", + ) + assert( + exerciseOnDivulgence2.event.isExercised, + s"Expected event to be an exercise", + ) + assertAcsDelta( + exerciseOnDivulgence2Transaction.events, + acsDelta = false, + "None of the events in the transaction should have acs_delta set, as this is a non-consuming exercise and an immediately divulged create", + ) + + assert(exerciseOnDivulgence2.getExercised.contractId == divulgence2.contractId) + + assert( + exerciseOnDivulgence2.getExercised.lastDescendantNodeId - exerciseOnDivulgence2.getExercised.nodeId == 1, + "The last descendant node id should be one more than the node id, since it contains the create event", + ) + } + }) + + test( + "DivulgenceAcs", + "Divulged contracts should not be exposed by the active contract service", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + divulgence1 <- ledger.create(alice, new Divulgence1(alice)) + divulgence2 <- ledger.create(bob, new Divulgence2(bob, alice)) + _ <- ledger.exercise(alice, divulgence2.exerciseDivulgence2Fetch(divulgence1)) + activeForBobOnly <- ledger.activeContracts(Some(Seq(bob))) + activeForBoth <- ledger.activeContracts(Some(Seq(alice, bob))) + } yield { + + // Bob only sees Divulgence2 + assert( + activeForBobOnly.sizeIs == 1, + s"$bob should see only one active contract but sees ${activeForBobOnly.size} instead", + ) + assert( + activeForBobOnly.head.contractId == divulgence2.contractId, + s"$bob should see $divulgence2 but sees ${activeForBobOnly.head.contractId} instead", + ) + + // Since we're filtering for Bob only Bob will be the only reported witness even if Alice sees the contract + assert( + activeForBobOnly.head.witnessParties == Seq(bob.getValue), + s"The witness parties as seen by $bob should only include him but it is instead ${activeForBobOnly.head.witnessParties}", + ) + + // Alice sees both + assert( + activeForBoth.sizeIs == 2, + s"The active contracts as seen by $alice and $bob should be two but are ${activeForBoth.size} instead", + ) + val divulgence1ContractId = divulgence1.contractId + val divulgence2ContractId = divulgence2.contractId + val activeForBothContractIds = activeForBoth.map(_.contractId).sorted + val expectedContractIds = Seq(divulgence1ContractId, divulgence2ContractId).sorted + assert( + activeForBothContractIds == expectedContractIds, + s"$divulgence1 and $divulgence2 are expected to be seen when filtering for $alice and $bob but instead the following contract identifiers are seen: $activeForBothContractIds", + ) + val divulgence1Witnesses = + activeForBoth.find(_.contractId == divulgence1ContractId).get.witnessParties.sorted + val divulgence2Witnesses = + activeForBoth.find(_.contractId == divulgence2ContractId).get.witnessParties.sorted + assert( + divulgence1Witnesses == Seq(alice.getValue), + s"The witness parties of the first contract should only include $alice but it is instead $divulgence1Witnesses ($bob)", + ) + assert( + divulgence2Witnesses == Seq(alice, bob).map(_.getValue).sorted, + s"The witness parties of the second contract should include $alice and $bob but it is instead $divulgence2Witnesses", + ) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/EventQueryServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/EventQueryServiceIT.scala new file mode 100644 index 0000000000..51dcf072e5 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/EventQueryServiceIT.scala @@ -0,0 +1,489 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdRequest +import com.daml.ledger.test.java.model.test.{Agreement, Dummy} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.daml.lf.value.Value.ContractId + +class EventQueryServiceIT extends LedgerTestSuite { + import com.daml.ledger.api.testtool.suites.v2_1.CompanionImplicits.* + + // TODO(i16065): Re-enable getEventsByContractKey tests +// private def toOption(protoString: String): Option[String] = { +// if (protoString.nonEmpty) Some(protoString) else None +// } + + // Note that the Daml template must be inspected to establish the key type and fields + // For the TextKey template the key is: (tkParty, tkKey) : (Party, Text) + // When populating the Record identifiers are not required. + // TODO(i16065): Re-enable getEventsByContractKey tests +// private def makeTextKeyKey(party: Party, keyText: String) = { +// Value.Sum.Record( +// Record(fields = +// Vector( +// RecordField(value = Some(Value(Value.Sum.Party(party.underlying)))), +// RecordField(value = Some(Value(Value.Sum.Text(keyText)))), +// ) +// ) +// ) +// } + + test( + "TXEventsByContractIdBasic", + "Expose a create event by contract id", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummyCid <- ledger.create(party, new Dummy(party)) + acs <- ledger.activeContracts(Some(Seq(party))) + expected = assertDefined( + acs.headOption, + "Expected a created event", + ) + events <- ledger.getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some(ledger.eventFormat(verbose = true, Some(Seq(party)))), + ) + ) + // archive not to interfere with other tests + _ <- ledger.exercise(party, dummyCid.exerciseArchive()) + } yield { + val created = assertDefined(events.created, "Expected a created event wrapper") + val actual = assertDefined(created.createdEvent, "Expected a created event") + assertEquals("Looked up event should match the transaction event", actual, expected) + } + }) + + test( + "TXEventsByContractIdConsumed", + "Expose an archive event by contract id", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummyCid <- ledger.create(party, new Dummy(party)) + tx <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest(party, dummyCid.exerciseDummyChoice1().commands) + ) + expected = assertDefined( + tx.getTransaction.events.flatMap(_.event.archived).headOption, + "Expected an exercised event", + ) + events <- ledger.getEventsByContractId( + GetEventsByContractIdRequest( + dummyCid.contractId, + Some(ledger.eventFormat(verbose = true, Some(Seq(party)))), + ) + ) + } yield { + assertDefined(events.created, "Expected a create event").discard + val archived = assertDefined(events.archived, "Expected a exercise event wrapper") + val actual = assertDefined(archived.archivedEvent, "Expected a exercise event") + assertEquals("Looked up event should match the transaction event", actual, expected) + } + }) + + test( + "TXEventsByContractIdNotExistent", + "No events are returned for a non-existent contract id", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val nonExistentContractId = ContractId.V1.assertFromString("00" * 32 + "0001") + for { + error <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + nonExistentContractId.coid, + Some(ledger.eventFormat(verbose = true, Some(Seq(party)))), + ) + ) + .failed + } yield { + assertGrpcError( + error, + RequestValidationErrors.NotFound.ContractEvents, + Some("Contract events not found, or not visible"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "TXEventsByContractIdNotVisible", + "CONTRACT_EVENTS_NOT_FOUND returned for a non-visible contract id", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, notTheSubmittingParty))) => + for { + dummyCid <- ledger.create(party, new Dummy(party)) + acs <- ledger.activeContracts(Some(Seq(party))) + expected = assertDefined( + acs.headOption, + "Expected a created event", + ) + error <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some(ledger.eventFormat(verbose = true, Some(Seq(notTheSubmittingParty)))), + ) + ) + .failed + // archive not to interfere with other tests + _ <- ledger.exercise(party, dummyCid.exerciseArchive()) + } yield { + assertGrpcError( + error, + RequestValidationErrors.NotFound.ContractEvents, + Some("Contract events not found, or not visible"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "TXEventsByContractIdFilterCombinations", + "EventsByContractId should support event format", + allocate(TwoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party, notTheSubmittingParty))) => + for { + dummyCid <- ledger.create(party, new Dummy(party)) + acs <- ledger.activeContracts(Some(Seq(party)), verbose = false) + expected = assertDefined( + acs.headOption, + "Expected a created event", + ) + wildcardPartiesResult <- ledger.getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some(ledger.eventFormat(verbose = false, None)), + ) + ) + templateFiltersResult <- ledger.getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some(ledger.eventFormat(verbose = false, Some(Seq(party)), Seq(Dummy.TEMPLATE_ID))), + ) + ) + templateFiltersNotFoundDueToParty <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some( + ledger.eventFormat( + verbose = false, + Some(Seq(notTheSubmittingParty)), + Seq(Dummy.TEMPLATE_ID), + ) + ), + ) + ) + .failed + templateFiltersNotFoundDueToTemplate <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + expected.contractId, + Some(ledger.eventFormat(verbose = false, Some(Seq(party)), Seq(Agreement.TEMPLATE_ID))), + ) + ) + .failed + // archive not to interfere with other tests + _ <- ledger.exercise(party, dummyCid.exerciseArchive()) + } yield { + assertIsEmpty(wildcardPartiesResult.archived) + assertEquals( + "Looked up wildcardPartiesResult should match the transaction event", + assertDefined( + wildcardPartiesResult.created.flatMap(_.createdEvent), + "Expected a create event", + ), + expected, + ) + + assertIsEmpty(templateFiltersResult.archived) + assertEquals( + "Looked up templateFiltersResult should match the transaction event", + assertDefined( + templateFiltersResult.created.flatMap(_.createdEvent), + "Expected a create event", + ), + expected, + ) + + assertGrpcError( + templateFiltersNotFoundDueToParty, + RequestValidationErrors.NotFound.ContractEvents, + Some("Contract events not found, or not visible"), + checkDefiniteAnswerMetadata = true, + ) + + assertGrpcError( + templateFiltersNotFoundDueToTemplate, + RequestValidationErrors.NotFound.ContractEvents, + Some("Contract events not found, or not visible"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "TXEventsByContractIdTransient", + "Expose transient events (created and archived in the same transaction) by contract id", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + // Create command with transient contract + tx <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party = party, + commands = new Dummy(party).createAnd.exerciseArchive().commands, + transactionShape = LedgerEffects, + ) + ) + contractId = assertDefined( + tx.getTransaction.events.flatMap(_.event.created).headOption, + "Expected a created event", + ).contractId + transient <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + contractId, + Some(ledger.eventFormat(verbose = false, parties = Some(Seq(party)))), + ) + ) + transientPartyWildcard <- ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + contractId, + Some(ledger.eventFormat(verbose = false, parties = None)), + ) + ) + } yield { + assertDefined(transient.created.flatMap(_.createdEvent), "Expected a created event") + assertDefined(transient.archived.flatMap(_.archivedEvent), "Expected an archived event") + + assertDefined( + transientPartyWildcard.created.flatMap(_.createdEvent), + "Expected a created event", + ) + assertDefined( + transientPartyWildcard.archived.flatMap(_.archivedEvent), + "Expected an archived event", + ) + } + }) + + // TODO(i16065): Re-enable getEventsByContractKey tests +// test( +// "TXEventsByContractKeyBasic", +// "Expose a visible create event by contract key", +// allocate(SingleParty), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => +// val someKey = "some key" +// val key = makeTextKeyKey(party, someKey) +// +// for { +// tx <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest(party, new TextKey(party.underlying, someKey, JList.of()).create.commands) +// ) +// _ = logger.error(tx.toString) +// expected = assertDefined( +// tx.transaction.flatMap(_.events.flatMap(_.event.created).headOption), +// "Expected a created event", +// ) +// +// events <- ledger.getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(party), +// ) +// ) +// _ = logger.error(events.toString) +// } yield { +// val actual = assertDefined(events.createEvent, "Expected a created event") +// assertEquals("Looked up event should match the transaction event", actual, expected) +// } +// }) + +// test( +// "TXArchiveEventByContractKeyBasic", +// "Expose a visible archive event by contract key", +// allocate(SingleParty), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => +// val someKey = "some archive key" +// val key = makeTextKeyKey(party, someKey) +// +// for { +// cId: TextKey.ContractId <- ledger.create(party, new TextKey(party, someKey, JList.of())) +// tx <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest(party, cId.exerciseTextKeyChoice().commands) +// ) +// expected = assertDefined( +// tx.transaction.flatMap(_.events.flatMap(_.event.archived).headOption), +// "Expected an archived event", +// ) +// events <- ledger.getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(party), +// ) +// ) +// } yield { +// assertDefined(events.createEvent, "Expected a create event").discard +// val actual = assertDefined(events.archiveEvent, "Expected a archived event") +// assertEquals("Looked up event should match the transaction event", actual, expected) +// } +// }) + +// test( +// "TXEventsByContractKeyNoKey", +// "No events are returned for a non existent contract key", +// allocate(SingleParty), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => +// val key = makeTextKeyKey(party, "non existent key") +// +// for { +// events <- ledger.getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(party), +// ) +// ) +// } yield { +// assertIsEmpty(Seq(events.createEvent, events.archiveEvent).flatten[GeneratedMessage]) +// } +// }) + +// test( +// "TXEventsByContractKeyNotVisible", +// "No events are returned for a non visible contract key", +// allocate(TwoParties), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party, notTheSubmittingParty))) => +// val nonVisibleKey = "non visible key" +// val key = makeTextKeyKey(party, nonVisibleKey) +// +// for { +// _ <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest( +// party, +// new TextKey(party, nonVisibleKey, JList.of()).create.commands, +// ) +// ) +// +// events <- ledger.getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(notTheSubmittingParty), +// ) +// ) +// } yield { +// assertIsEmpty(Seq(events.createEvent, events.archiveEvent).flatten[GeneratedMessage]) +// } +// }) + +// test( +// "TXEventsByContractKeyEndExclusive", +// "Should return event prior to the end exclusive event", +// allocate(SingleParty), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => +// val exercisedKey = "paging key" +// val key = makeTextKeyKey(party, exercisedKey) +// +// def getNextResult(continuationToken: Option[String]): Future[Option[String]] = { +// ledger +// .getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(party), +// continuationToken = continuationToken.getOrElse( +// GetEventsByContractKeyRequest.defaultInstance.continuationToken +// ), +// ) +// ) +// .map(r => toOption(r.continuationToken)) +// } +// +// for { +// textKeyCid1: TextKey.ContractId <- ledger.create( +// party, +// new TextKey(party, exercisedKey, Nil.asJava), +// ) +// _ <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest(party, textKeyCid1.exerciseTextKeyChoice().commands) +// ) +// textKeyCid2: TextKey.ContractId <- ledger.create( +// party, +// new TextKey(party, exercisedKey, Nil.asJava), +// ) +// _ <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest(party, textKeyCid2.exerciseTextKeyChoice().commands) +// ) +// eventId1 <- getNextResult(None) +// eventId2 <- getNextResult(Some(assertDefined(eventId1, "Expected eventId2"))) +// eventId3 <- getNextResult(Some(assertDefined(eventId2, "Expected eventId3"))) +// } yield { +// assertEquals("Expected the final offset to be empty", eventId3, None) +// } +// }) + +// test( +// "TXEventsByContractKeyChained", +// "Should not miss events where the choice recreates the key", +// allocate(SingleParty), +// )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => +// val exercisedKey = "paging key" +// val key = makeTextKeyKey(party, exercisedKey) +// +// // (contract-id, continuation-token) +// def getNextResult( +// continuationToken: Option[String] +// ): Future[(Option[String], Option[String])] = { +// ledger +// .getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(TextKey.TEMPLATE_ID.toV1), +// requestingParties = Seq(party), +// continuationToken = continuationToken.getOrElse( +// GetEventsByContractKeyRequest.defaultInstance.continuationToken +// ), +// ) +// ) +// .map(r => (r.createEvent.map(_.contractId), toOption(r.continuationToken))) +// } +// +// for { +// expected: TextKey.ContractId <- ledger.create( +// party, +// new TextKey(party, exercisedKey, Nil.asJava), +// ) +// _ <- ledger.submitAndWaitForTransaction( +// ledger.submitAndWaitRequest( +// party, +// expected.exerciseTextKeyDisclose(JList.of(): JList[String]).commands, +// ) +// ) +// (cId2, token2) <- getNextResult(None) +// (cId1, token1) <- getNextResult(token2) +// (cId0, _) <- getNextResult(token1) +// } yield { +// assertEquals("Expected the first offset to be empty", cId2.isDefined, true) +// assertEquals("Expected the final offset to be empty", cId1, Some(expected.contractId)) +// assertEquals("Expected the final offset to be empty", cId0.isDefined, false) +// } +// }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExplicitDisclosureIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExplicitDisclosureIT.scala new file mode 100644 index 0000000000..2673268d88 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExplicitDisclosureIT.scala @@ -0,0 +1,747 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.createdEvents +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.testtool.suites.v2_1.CompanionImplicits.* +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.api.v2.value.Identifier +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.CreateCommand +import com.daml.ledger.test.java.model.test.{Delegated, Delegation, DiscloseCreate, Dummy} +import com.digitalasset.canton.ledger.error.groups.{ + CommandExecutionErrors, + ConsistencyErrors, + RequestValidationErrors, +} +import com.digitalasset.daml.lf.transaction.TransactionCoder +import com.google.protobuf.ByteString +import org.scalatest.Inside.inside + +import java.util.List as JList +import scala.concurrent.{ExecutionContext, Future} +import scala.util.chaining.scalaUtilChainingOps +import scala.util.{Success, Try} + +final class ExplicitDisclosureIT extends LedgerTestSuite { + import ExplicitDisclosureIT.* + + test( + "EDCorrectCreatedEventBlobDisclosure", + "Submission is successful if the correct disclosure as created_event_blob is provided", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise a choice on the Delegation that fetches the Delegated contract + // Fails because the submitter doesn't see the contract being fetched + exerciseFetchError <- testContext + .exerciseFetchDelegated() + .mustFail("the submitter does not see the contract") + + // Exercise the same choice, this time using correct explicit disclosure + _ <- testContext.exerciseFetchDelegated(testContext.disclosedContract) + } yield { + assertEquals(!testContext.disclosedContract.createdEventBlob.isEmpty, true) + + assertGrpcError( + exerciseFetchError, + ConsistencyErrors.ContractNotFound, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "EDSuperfluousDisclosure", + "Submission is successful when unnecessary disclosed contract is provided", + allocate(SingleParty, SingleParty), + )(testCase = implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + dummyCid: Dummy.ContractId <- ownerParticipant.create(owner, new Dummy(owner)) + txReq <- ownerParticipant.getTransactionsRequest( + formatByPartyAndTemplate(owner, Dummy.TEMPLATE_ID) + ) + dummyTxs <- ownerParticipant.transactions( + txReq + ) + dummyCreate = createdEvents(dummyTxs(0)).head + dummyDisclosedContract = createEventToDisclosedContract(dummyCreate) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise works with provided disclosed contract + _ <- testContext.exerciseFetchDelegated(testContext.disclosedContract) + // Exercise works with the Dummy contract as a superfluous disclosed contract + _ <- testContext.exerciseFetchDelegated( + testContext.disclosedContract, + dummyDisclosedContract, + ) + + // Archive the Dummy contract + _ <- ownerParticipant.exercise(owner, dummyCid.exerciseArchive()) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise works with the archived superfluous disclosed contract + _ <- testContext.exerciseFetchDelegated( + testContext.disclosedContract, + dummyDisclosedContract, + ) + } yield () + }) + + test( + "EDArchivedDisclosedContracts", + "The ledger rejects archived disclosed contracts", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Archive the disclosed contract + _ <- ownerParticipant.exercise(owner, testContext.delegatedCid.exerciseArchive()) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise the choice using the now inactive disclosed contract + _ <- testContext + .exerciseFetchDelegated(testContext.disclosedContract) + .mustFail("the contract is already archived") + } yield { + // TODO(#16361) ED: Assert specific error codes once Canton error codes can be accessible from these suites + } + }) + + test( + "EDDisclosedContractsArchiveRaceTest", + "Only one archival succeeds in a race between a normal exercise and one with disclosed contracts", + allocate(SingleParty, SingleParty), + repeated = 3, + )(implicit ec => { + case p @ Participants(Participant(ledger1, Seq(party1)), Participant(ledger2, Seq(party2))) => + val attempts = 10 + + Future + .traverse((1 to attempts).toList) { + _ => + for { + contractId: Dummy.ContractId <- ledger1.create(party1, new Dummy(party1)) + + transactions <- ledger1 + .transactionsByTemplateId(Dummy.TEMPLATE_ID, Some(Seq(party1))) + create = createdEvents(transactions(0)).head + disclosedContract = createEventToDisclosedContract(create) + + // Submit concurrently two consuming exercise choices (with and without disclosed contract) + party1_exerciseF = ledger1.exercise(party1, contractId.exerciseArchive()) + // Ensure participants are synchronized + _ <- p.synchronize + party2_exerciseWithDisclosureF = + ledger2.submitAndWait( + ledger2 + .submitAndWaitRequest(party2, contractId.exercisePublicChoice(party2).commands) + .update(_.commands.disclosedContracts := scala.Seq(disclosedContract)) + ) + + // Wait for both commands to finish + party1_exercise_result <- party1_exerciseF.transform(Success(_)) + party2_exerciseWithDisclosure <- party2_exerciseWithDisclosureF.transform(Success(_)) + } yield { + oneFailedWith( + party1_exercise_result, + party2_exerciseWithDisclosure, + ) { _ => + // TODO(#16361) ED: Assert specific error codes once Canton error codes can be accessible from these suites + () + } + } + } + .map(_ => ()) + }) + + test( + "EDMalformedDisclosedContractCreatedEventBlob", + "The ledger rejects disclosed contracts with a malformed created event blob", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise a choice using invalid explicit disclosure + failure <- testContext + .exerciseFetchDelegated( + testContext.disclosedContract + .update(_.createdEventBlob.set(ByteString.copyFromUtf8("foo"))) + ) + .mustFail("using a malformed disclosed contract created event blob") + + } yield { + assertGrpcError( + failure, + RequestValidationErrors.InvalidArgument, + Some( + "The submitted request has invalid arguments: Unable to decode disclosed contract event payload: DecodeError" + ), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "EDInconsistentDisclosedContract", + "The ledger rejects inconsistent disclosed contract", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + ownerContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Create a new context only for the sake of getting a new disclosed contract + // with the same template id + delegateContext <- initializeTest( + ownerParticipant = delegateParticipant, + delegateParticipant = delegateParticipant, + owner = delegate, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(delegate), + ) + + // Ensure participants are synchronized + _ <- p.synchronize + + otherSalt = TransactionCoder + .decodeFatContractInstance(delegateContext.disclosedContract.createdEventBlob) + .map(_.authenticationData) + .getOrElse(fail("contract decode failed")) + + tamperedEventBlob = TransactionCoder + .encodeFatContractInstance( + TransactionCoder + .decodeFatContractInstance(ownerContext.disclosedContract.createdEventBlob) + .map(_.setAuthenticationData(otherSalt)) + .getOrElse(fail("contract decode failed")) + ) + .getOrElse(fail("contract encode failed")) + + _ <- ownerContext + // Use of inconsistent disclosed contract + // i.e. the delegate cannot fetch the owner's contract with attaching a different disclosed contract + .exerciseFetchDelegated( + ownerContext.disclosedContract.copy(createdEventBlob = tamperedEventBlob) + ) + .mustFail("using an inconsistent disclosed contract created event blob") + } yield { + // TODO(#16361) ED: Assert specific error codes once Canton error codes can be accessible from these suites + // Should be DISCLOSED_CONTRACT_AUTHENTICATION_FAILED + } + }) + + test( + "EDDuplicates", + "Submission is rejected on duplicate contract ids or key hashes", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Ensure participants are synchronized + _ <- p.synchronize + + // Exercise a choice with a disclosed contract + _ <- testContext.exerciseFetchDelegated(testContext.disclosedContract) + + // Submission with disclosed contracts with the same contract id should be rejected + errorDuplicateContractId <- testContext + .dummyCreate(testContext.disclosedContract, testContext.disclosedContract) + .mustFail("duplicate contract id") + } yield { + assertGrpcError( + errorDuplicateContractId, + RequestValidationErrors.InvalidArgument, + Some( + s"Disclosed contracts contain duplicate contract id (${testContext.disclosedContract.contractId})" + ), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "EDRejectOnCreatedEventBlobNotSet", + "Submission is rejected when the disclosed contract created event blob is not set", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + for { + testContext <- initializeTest( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + transactionFormat = formatByPartyAndTemplate(owner), + ) + + // Ensure participants are synchronized + _ <- p.synchronize + + failure <- testContext + .exerciseFetchDelegated( + testContext.disclosedContract.copy( + createdEventBlob = ByteString.EMPTY + ) + ) + .mustFail("Submitter forwarded a contract with unpopulated created_event_blob") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.MissingField, + Some( + "The submitted command is missing a mandatory field: DisclosedContract.createdEventBlob" + ), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "EDDiscloseeUsesWitnessedContract", + "A contract creation witness can fetch and use it as a disclosed contract in a command submission (if authorized)", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(witnessParticipant, Seq(witness)), + ) => + for { + discloseCreate <- ownerParticipant.create(owner, new DiscloseCreate(owner)) + _ <- ownerParticipant.exercise(owner, discloseCreate.exerciseDiscloseCreate_To(witness)) + _ <- p.synchronize + end <- witnessParticipant.currentEnd() + req = ownerParticipant.getUpdatesRequestWithEnd( + transactionFormatO = Some(formatByPartyAndTemplate(witness, Dummy.TEMPLATE_ID)), + begin = witnessParticipant.begin, + end = Some(end), + ) + witnessTxs <- witnessParticipant.transactions(req) + tx = assertSingleton("Witness' transactions", witnessTxs) + witnessedCreate = assertSingleton("The witnessed create", createdEvents(tx)) + witnessedCreateAsDisclosedContract = createEventToDisclosedContract(witnessedCreate) + _ = assert( + !witnessedCreateAsDisclosedContract.createdEventBlob.isEmpty, + "createdEventBlob is empty", + ) + dummyContract = new Dummy.ContractId(witnessedCreateAsDisclosedContract.contractId) + _ <- { + val request = witnessParticipant + .submitAndWaitRequest( + witness, + dummyContract.exercisePublicChoice(witness).commands, + ) + .update(_.commands.disclosedContracts := Seq(witnessedCreateAsDisclosedContract)) + witnessParticipant.submitAndWait(request).map(_ => ()) + } + } yield () + }) + + test( + "EDFailOnDisclosedContractIdMismatchWithPrescribedSynchronizerId", + "A submission is rejected if an attached disclosed contract specifies a synchronizer id different than the synchronizer id prescribed in the command", + allocate(SingleParty, SingleParty).expectingMinimumNumberOfSynchronizers(2), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + val (synchronizer1, synchronizer2) = inside(owner.initialSynchronizers) { + case synchronizer1 :: synchronizer2 :: _ => + synchronizer1 -> synchronizer2 + } + + val contractKey = ownerParticipant.nextKeyId() + for { + delegationCid <- ownerParticipant + .submitAndWaitForTransaction( + ownerParticipant + .submitAndWaitForTransactionRequest( + owner, + new Delegation(owner.getValue, delegate.getValue).create().commands(), + ) + .update(_.commands.synchronizerId.set(synchronizer1)) + ) + .map( + _.getTransaction.events.head.getCreated.contractId.pipe(new Delegation.ContractId(_)) + ) + + delegatedCid <- ownerParticipant + .submitAndWaitForTransaction( + ownerParticipant + .submitAndWaitForTransactionRequest( + owner, + new Delegated(owner.getValue, contractKey).create().commands(), + ) + .update(_.commands.synchronizerId := synchronizer1) + ) + .map( + _.getTransaction.events.head.getCreated.contractId.pipe(new Delegated.ContractId(_)) + ) + + // Get the contract payload from the transaction stream of the owner + txReq <- ownerParticipant.getTransactionsRequest(formatByPartyAndTemplate(owner)) + delegatedTx <- ownerParticipant.transactions(txReq) + createDelegatedEvent = createdEvents(delegatedTx.head).head + + // Copy the actual Delegated contract to a disclosed contract (which can be shared out of band). + disclosedContract = createEventToDisclosedContract(createDelegatedEvent) + + // Ensure participants are synchronized + _ <- p.synchronize + + request = delegateParticipant + .submitAndWaitRequest( + delegate, + delegationCid.exerciseFetchDelegated(delegatedCid).commands, + ) + .update( + _.commands.disclosedContracts := Seq( + disclosedContract.copy(synchronizerId = synchronizer2) + ) + ) + .update(_.commands.synchronizerId := synchronizer1) + error <- delegateParticipant + .submitAndWait(request) + .mustFail( + "the disclosed contract has a different synchronizer id than the prescribed one" + ) + } yield assertGrpcError( + error, + CommandExecutionErrors.PrescribedSynchronizerIdMismatch, + None, + checkDefiniteAnswerMetadata = true, + ) + }) + + test( + "EDRouteByDisclosedContractSynchronizerId", + "The synchronizer id of disclosed contracts is used as the prescribed synchronizer id", + allocate(SingleParty, SingleParty).expectingMinimumNumberOfSynchronizers(2), + )(implicit ec => { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(delegateParticipant, Seq(delegate)), + ) => + val (synchronizer1, synchronizer2) = inside(owner.initialSynchronizers) { + case synchronizer1 :: synchronizer2 :: _ => + synchronizer1 -> synchronizer2 + } + + for { + _ <- testRoutingByDisclosedContractSynchronizerId( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + targetSynchronizer = synchronizer1, + p, + ) + _ <- testRoutingByDisclosedContractSynchronizerId( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + targetSynchronizer = synchronizer2, + p, + ) + } yield () + }) + + private def oneFailedWith(result1: Try[_], result2: Try[_])( + assertError: Throwable => Unit + ): Unit = + (result1.isFailure, result2.isFailure) match { + case (true, false) => assertError(result1.failed.get) + case (false, true) => assertError(result2.failed.get) + case (true, true) => fail("Exactly one request should have failed, but both failed") + case (false, false) => fail("Exactly one request should have failed, but both succeeded") + } +} + +object ExplicitDisclosureIT { + case class TestContext( + ownerParticipant: ParticipantTestContext, + delegateParticipant: ParticipantTestContext, + owner: Party, + delegate: Party, + contractKey: String, + delegationCid: Delegation.ContractId, + delegatedCid: Delegated.ContractId, + originalCreateEvent: CreatedEvent, + disclosedContract: DisclosedContract, + ) { + + /** Exercises the FetchDelegated choice as the delegate party, with the given explicit + * disclosure contracts. This choice fetches the Delegation contract which is only visible to + * the owner. + */ + def exerciseFetchDelegated(disclosedContracts: DisclosedContract*): Future[Unit] = { + val request = delegateParticipant + .submitAndWaitRequest( + delegate, + delegationCid.exerciseFetchDelegated(delegatedCid).commands, + ) + .update(_.commands.disclosedContracts := disclosedContracts) + delegateParticipant.submitAndWait(request).map(_ => ())(ExecutionContext.parasitic) + } + + def dummyCreate(disclosedContracts: DisclosedContract*): Future[Unit] = { + val request = delegateParticipant + .submitAndWaitRequest( + delegate, + JList.of( + new CreateCommand( + Dummy.TEMPLATE_ID, + new Dummy(delegate.getValue).toValue, + ) + ), + ) + .update(_.commands.disclosedContracts := disclosedContracts) + delegateParticipant.submitAndWait(request).map(_ => ())(ExecutionContext.parasitic) + } + + } + + private def initializeTest( + ownerParticipant: ParticipantTestContext, + delegateParticipant: ParticipantTestContext, + owner: Party, + delegate: Party, + transactionFormat: TransactionFormat, + )(implicit ec: ExecutionContext): Future[TestContext] = { + val contractKey = ownerParticipant.nextKeyId() + + for { + // Create a Delegation contract + // Contract is visible both to owner (as signatory) and delegate (as observer) + delegationCid <- ownerParticipant.create( + owner, + new Delegation(owner.getValue, delegate.getValue), + ) + + // Create Delegated contract + // This contract is only visible to the owner + delegatedCid <- ownerParticipant.create(owner, new Delegated(owner.getValue, contractKey)) + + // Get the contract payload from the transaction stream of the owner + txReq <- ownerParticipant.getTransactionsRequest(transactionFormat) + delegatedTx <- ownerParticipant.transactions(txReq) + createDelegatedEvent = createdEvents(delegatedTx.head).head + + // Copy the actual Delegated contract to a disclosed contract (which can be shared out of band). + disclosedContract = createEventToDisclosedContract(createDelegatedEvent) + } yield TestContext( + ownerParticipant = ownerParticipant, + delegateParticipant = delegateParticipant, + owner = owner, + delegate = delegate, + contractKey = contractKey, + delegationCid = delegationCid, + delegatedCid = delegatedCid, + originalCreateEvent = createDelegatedEvent, + disclosedContract = disclosedContract, + ) + } + + private def formatByPartyAndTemplate( + owner: Party, + templateId: javaapi.data.Identifier = Delegated.TEMPLATE_ID, + ): TransactionFormat = { + val templateIdScalaPB = Identifier.fromJavaProto(templateId.toProto) + + TransactionFormat( + Some( + EventFormat( + filtersByParty = Map( + owner.getValue -> new Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter(Some(templateIdScalaPB), includeCreatedEventBlob = true) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + } + + private def createEventToDisclosedContract(ev: CreatedEvent): DisclosedContract = + DisclosedContract( + templateId = ev.templateId, + contractId = ev.contractId, + createdEventBlob = ev.createdEventBlob, + synchronizerId = "", + ) + + private def testRoutingByDisclosedContractSynchronizerId( + ownerParticipant: ParticipantTestContext, + delegateParticipant: ParticipantTestContext, + owner: Party, + delegate: Party, + targetSynchronizer: String, + p: Participants, + )(implicit ec: ExecutionContext): Future[Unit] = { + val contractKey = ownerParticipant.nextKeyId() + + for { + delegationCid <- ownerParticipant + .submitAndWaitForTransaction( + ownerParticipant + .submitAndWaitForTransactionRequest( + owner, + new Delegation(owner.getValue, delegate.getValue).create().commands(), + ) + ) + .map( + _.getTransaction.events.head.getCreated.contractId.pipe(new Delegation.ContractId(_)) + ) + + (delegatedCid, delegatedCreateUpdateId) <- ownerParticipant + .submitAndWaitForTransaction( + ownerParticipant + .submitAndWaitForTransactionRequest( + owner, + new Delegated(owner.getValue, contractKey).create().commands(), + ) + .update(_.commands.synchronizerId := targetSynchronizer) + ) + .map(resp => + resp.getTransaction.events.head.getCreated.contractId + .pipe(new Delegated.ContractId(_)) -> resp.getTransaction.updateId + ) + + // Get the contract payload from the transaction stream of the owner + txReq <- ownerParticipant.getTransactionsRequest(formatByPartyAndTemplate(owner)) + delegatedTx <- ownerParticipant + .transactions(txReq) + .map(_.filter(_.updateId == delegatedCreateUpdateId)) + createDelegatedEvent = createdEvents(delegatedTx.head).head + + // Copy the actual Delegated contract to a disclosed contract (which can be shared out of band). + disclosedContract = createEventToDisclosedContract(createDelegatedEvent) + + // Ensure participants are synchronized + _ <- p.synchronize + + request = delegateParticipant + .submitAndWaitForTransactionRequest( + delegate, + delegationCid.exerciseFetchDelegated(delegatedCid).commands, + ) + .update( + _.commands.disclosedContracts := Seq( + disclosedContract.copy(synchronizerId = targetSynchronizer) + ) + ) + txSynchronizerId <- delegateParticipant + .submitAndWaitForTransaction(request) + .map(_.getTransaction.synchronizerId) + } yield { + assertEquals(txSynchronizerId, targetSynchronizer) + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExternalPartyManagementServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExternalPartyManagementServiceIT.scala new file mode 100644 index 0000000000..a313a73f84 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ExternalPartyManagementServiceIT.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.v2.admin.party_management_service.GetPartiesRequest + +class ExternalPartyManagementServiceIT extends PartyManagementITBase { + test( + "PMAllocateExternalPartyBasic", + "Allocate an external party", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + party <- ledger.allocateExternalPartyFromHint(Some("alice")) + get <- ledger.getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = "") + ) + } yield { + assert( + party.getValue.contains("alice"), + "The allocated party identifier does not contain the party hint", + ) + assertDefined( + get.partyDetails.find(_.party == party.getValue), + "Expected to list allocated party", + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/HealthServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/HealthServiceIT.scala new file mode 100644 index 0000000000..14227e2c0a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/HealthServiceIT.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import io.grpc.health.v1.health.HealthCheckResponse + +class HealthServiceIT extends LedgerTestSuite { + test("HScheck", "The Health.Check endpoint reports everything is well", allocate(NoParties))( + implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + health <- ledger.checkHealth() + } yield { + assertEquals("HSisServing", health.status, HealthCheckResponse.ServingStatus.SERVING) + } + } + ) + + test("HSwatch", "The Health.Watch endpoint reports everything is well", allocate(NoParties))( + implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + healthSeq <- ledger.watchHealth() + } yield { + val health = assertSingleton("HScontinuesToServe", healthSeq) + assert(health.status == HealthCheckResponse.ServingStatus.SERVING) + } + } + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/IdentityProviderConfigServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/IdentityProviderConfigServiceIT.scala new file mode 100644 index 0000000000..5e626b37f4 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/IdentityProviderConfigServiceIT.scala @@ -0,0 +1,433 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TestConstraints +import com.daml.ledger.api.v2.admin.identity_provider_config_service.{ + CreateIdentityProviderConfigRequest, + DeleteIdentityProviderConfigRequest, + DeleteIdentityProviderConfigResponse, + GetIdentityProviderConfigRequest, + IdentityProviderConfig, + UpdateIdentityProviderConfigRequest, +} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} +import com.google.protobuf.field_mask.FieldMask + +import java.util.UUID +import scala.concurrent.Future + +class IdentityProviderConfigServiceIT extends UserManagementServiceITBase { + + test( + "CreateConfigInvalidArguments", + "Test argument validation for IdentityProviderConfigService#CreateIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def createAndCheck( + problem: String, + expectedErrorCode: ErrorCode, + identityProviderId: String = UUID.randomUUID().toString, + isDeactivated: Boolean = false, + issuer: String = UUID.randomUUID().toString, + jwksUrl: String = "http://daml.com/jwks.json", + ): Future[Unit] = ledger + .createIdentityProviderConfig( + identityProviderId, + isDeactivated, + issuer, + jwksUrl, + ) + .mustFailWith(context = problem, expectedErrorCode) + + for { + _ <- createAndCheck( + "empty identity_provider_id", + RequestValidationErrors.MissingField, + identityProviderId = "", + ) + _ <- createAndCheck( + "invalid identity_provider_id", + RequestValidationErrors.InvalidField, + identityProviderId = "!@", + ) + _ <- createAndCheck( + "empty issuer", + RequestValidationErrors.MissingField, + issuer = "", + ) + _ <- createAndCheck( + "empty jwks_url", + RequestValidationErrors.MissingField, + jwksUrl = "", + ) + _ <- createAndCheck( + "non valid jwks_url", + RequestValidationErrors.InvalidField, + jwksUrl = "url.com", + ) + _ <- ledger + .createIdentityProviderConfig(CreateIdentityProviderConfigRequest(None)) + .mustFailWith( + context = "empty identity_provider_config", + RequestValidationErrors.MissingField, + ) + } yield () + }) + + test( + "GetConfigInvalidArguments", + "Test argument validation for IdentityProviderConfigService#GetIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + limitation = TestConstraints.GrpcOnly( + "Empty identity_provider_id leads to other JSON request: /v2/idps/ which gives a list" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def createAndCheck( + problem: String, + expectedErrorCode: ErrorCode, + identityProviderId: String, + ): Future[Unit] = ledger + .getIdentityProviderConfig( + GetIdentityProviderConfigRequest(identityProviderId) + ) + .mustFailWith(context = problem, expectedErrorCode) + + for { + _ <- createAndCheck( + "empty identity_provider_id", + RequestValidationErrors.MissingField, + identityProviderId = "", + ) + _ <- createAndCheck( + "invalid identity_provider_id", + RequestValidationErrors.InvalidField, + identityProviderId = "!@", + ) + } yield () + }) + + test( + "UpdateConfigInvalidArguments", + "Test argument validation for IdentityProviderConfigService#UpdateIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + limitation = TestConstraints.GrpcOnly( + "Empty identity_provider_id leads to other JSON request: /v2/idps/ which gives a list" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def createAndCheck( + problem: String, + expectedErrorCode: ErrorCode, + identityProviderId: String = UUID.randomUUID().toString, + isDeactivated: Boolean = false, + issuer: String = UUID.randomUUID().toString, + jwksUrl: String = "http://daml.com/jwks.json", + updateMask: Option[FieldMask] = Some(FieldMask(Seq("is_deactivated"))), + ): Future[Unit] = ledger + .updateIdentityProviderConfig( + identityProviderId, + isDeactivated, + issuer, + jwksUrl, + updateMask, + ) + .mustFailWith(context = problem, expectedErrorCode) + + for { + _ <- createAndCheck( + "empty identity_provider_id", + RequestValidationErrors.MissingField, + identityProviderId = "", + ) + _ <- createAndCheck( + "invalid identity_provider_id", + RequestValidationErrors.InvalidField, + identityProviderId = "!@", + ) + _ <- createAndCheck( + "non valid url", + RequestValidationErrors.InvalidField, + jwksUrl = "url.com", + ) + _ <- createAndCheck( + "empty update_mask", + RequestValidationErrors.MissingField, + updateMask = None, + ) + _ <- ledger + .updateIdentityProviderConfig(UpdateIdentityProviderConfigRequest(None, None)) + .mustFailWith( + context = "empty identity_provider_config", + RequestValidationErrors.MissingField, + ) + + createdIdp <- ledger.createIdentityProviderConfig() + + _ <- ledger + .updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + Some(createdIdp.identityProviderConfig.get), + Some(FieldMask(Seq.empty)), + ) + ) + .mustFailWith( + context = "empty update_mask", + AdminServiceErrors.IdentityProviderConfig.InvalidUpdateIdentityProviderConfigRequest, + ) + } yield () + }) + + test( + "DeleteConfigInvalidArguments", + "Test argument validation for IdentityProviderConfigService#DeleteIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + limitation = TestConstraints.GrpcOnly( + "Empty identity_provider_id leads to other JSON request: /v2/idps/ which gives a list" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def createAndCheck( + problem: String, + expectedErrorCode: ErrorCode, + identityProviderId: String, + ): Future[Unit] = ledger + .deleteIdentityProviderConfig( + DeleteIdentityProviderConfigRequest(identityProviderId) + ) + .mustFailWith(context = problem, expectedErrorCode) + + for { + _ <- createAndCheck( + "empty identity_provider_id", + RequestValidationErrors.MissingField, + identityProviderId = "", + ) + _ <- createAndCheck( + "invalid identity_provider_id", + RequestValidationErrors.InvalidField, + identityProviderId = "!@", + ) + } yield () + }) + + test( + "CreateConfigSuccess", + "Exercise CreateIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val identityProviderId = UUID.randomUUID().toString + val isDeactivated = false + val issuer = UUID.randomUUID().toString + val jwksUrl = "http://daml.com/jwks.json" + val config = IdentityProviderConfig( + identityProviderId, + isDeactivated, + issuer, + jwksUrl, + "", + ) + for { + response1 <- ledger.createIdentityProviderConfig( + CreateIdentityProviderConfigRequest(Some(config)) + ) + response2 <- ledger.createIdentityProviderConfig( + isDeactivated = true + ) + _ <- ledger + .createIdentityProviderConfig( + identityProviderId, + isDeactivated, + UUID.randomUUID().toString, + jwksUrl, + ) + .mustFailWith( + "Creating duplicate IDP with the same ID", + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigAlreadyExists, + ) + + _ <- ledger + .createIdentityProviderConfig( + issuer = issuer + ) + .mustFailWith( + "Creating duplicate IDP with the same issuer", + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigIssuerAlreadyExists, + ) + + } yield { + assertEquals(response1.identityProviderConfig, Some(config)) + assertIdentityProviderConfig(response2.identityProviderConfig) { config => + assertEquals(config.isDeactivated, true) + } + } + }) + + test( + "UpdateConfigSuccess", + "Exercise UpdateIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + response <- ledger.createIdentityProviderConfig() + response2 <- ledger.createIdentityProviderConfig() + response3 <- ledger.createIdentityProviderConfig() + isDeactivatedUpdate <- ledger + .updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + identityProviderConfig = + response.identityProviderConfig.map(_.copy(isDeactivated = true)), + updateMask = Some(FieldMask(Seq("is_deactivated"))), + ) + ) + jwksUrlUpdate <- ledger + .updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + identityProviderConfig = + response.identityProviderConfig.map(_.copy(jwksUrl = "http://daml.com/jwks2.json")), + updateMask = Some(FieldMask(Seq("jwks_url"))), + ) + ) + newIssuer = UUID.randomUUID().toString + issuerUpdate <- ledger + .updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + identityProviderConfig = + response.identityProviderConfig.map(_.copy(issuer = newIssuer)), + updateMask = Some(FieldMask(Seq("issuer"))), + ) + ) + + duplicateIssuer = response2.identityProviderConfig.get.issuer + _ <- ledger + .updateIdentityProviderConfig( + UpdateIdentityProviderConfigRequest( + identityProviderConfig = + response3.identityProviderConfig.map(_.copy(issuer = duplicateIssuer)), + updateMask = Some(FieldMask(Seq("issuer"))), + ) + ) + .mustFailWith( + "Updating to the issuer which already exists", + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigIssuerAlreadyExists, + ) + } yield { + assertIdentityProviderConfig(isDeactivatedUpdate.identityProviderConfig) { config => + assertEquals(config.isDeactivated, true) + } + + assertIdentityProviderConfig(jwksUrlUpdate.identityProviderConfig) { config => + assertEquals(config.jwksUrl, "http://daml.com/jwks2.json") + } + + assertIdentityProviderConfig(issuerUpdate.identityProviderConfig) { config => + assertEquals(config.issuer, newIssuer) + } + } + }) + + test( + "GetConfigSuccess", + "Exercise GetIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val identityProviderId = UUID.randomUUID().toString + val isDeactivated = false + val issuer = UUID.randomUUID().toString + val jwksUrl = "http://daml.com/jwks.json" + val config = IdentityProviderConfig( + identityProviderId, + isDeactivated, + issuer, + jwksUrl, + "", + ) + for { + response1 <- ledger.createIdentityProviderConfig( + CreateIdentityProviderConfigRequest(Some(config)) + ) + response2 <- ledger.getIdentityProviderConfig( + GetIdentityProviderConfigRequest(identityProviderId) + ) + _ <- ledger + .getIdentityProviderConfig( + GetIdentityProviderConfigRequest( + UUID.randomUUID().toString + ) + ) + .mustFailWith( + "non existing idp", + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigNotFound, + ) + } yield { + assertEquals(response1.identityProviderConfig, Some(config)) + assertEquals(response2.identityProviderConfig, Some(config)) + } + }) + + test( + "ListConfig", + "Exercise ListIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val id1 = UUID.randomUUID().toString + val id2 = UUID.randomUUID().toString + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = id1) + _ <- ledger.createIdentityProviderConfig(identityProviderId = id2) + listResponse <- ledger.listIdentityProviderConfig() + } yield { + val ids = listResponse.identityProviderConfigs.map(_.identityProviderId) + assertEquals(ids.contains(id1), true) + assertEquals(ids.contains(id2), true) + } + }) + + test( + "DeleteConfigSuccess", + "Exercise DeleteIdentityProviderConfig", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val id = UUID.randomUUID().toString + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = id) + deleted <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(id)) + _ <- ledger + .deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(id)) + .mustFailWith( + "config does not exist anymore", + AdminServiceErrors.IdentityProviderConfig.IdentityProviderConfigNotFound, + ) + } yield { + assertEquals(deleted, DeleteIdentityProviderConfigResponse()) + } + }) + + private def assertIdentityProviderConfig(config: Option[IdentityProviderConfig])( + f: IdentityProviderConfig => Unit + ): Unit = + config match { + case Some(value) => f(value) + case None => fail("identity_provider_config expected") + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InteractiveSubmissionServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InteractiveSubmissionServiceIT.scala new file mode 100644 index 0000000000..70e57d592f --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InteractiveSubmissionServiceIT.scala @@ -0,0 +1,969 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.createdEvents +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{ + ExternalParty, + LedgerTestSuite, + LocalParty, + Party, +} +import com.daml.ledger.api.testtool.suites.v2_1.CommandServiceIT.{ + createEventToDisclosedContract, + formatByPartyAndTemplate, +} +import com.daml.ledger.api.testtool.suites.v2_1.CompanionImplicits.* +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.interactive.interactive_submission_service.Metadata.InputContract +import com.daml.ledger.api.v2.interactive.interactive_submission_service.Metadata.InputContract.Contract +import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ + ExecuteSubmissionAndWaitForTransactionResponse, + MinLedgerTime, +} +import com.daml.ledger.api.v2.package_reference.PackageReference +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.update_service.GetUpdatesRequest +import com.daml.ledger.javaapi.data +import com.daml.ledger.javaapi.data.Command +import com.daml.ledger.test.java.model.test.{Dummy, DummyFactory} +import com.daml.ledger.test.java.semantic.divulgencetests.{ + DivulgenceProposal, + DummyFlexibleController, +} +import com.digitalasset.canton.LfTimestamp +import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.InvalidPrescribedSynchronizerId +import com.digitalasset.canton.error.TransactionRoutingError.TopologyErrors.UnknownInformees +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.InteractiveSubmissionExecuteError +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.InvalidField +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.NotFound.PackageNamesNotFound +import com.digitalasset.canton.ledger.error.groups.{ConsistencyErrors, RequestValidationErrors} +import com.google.protobuf.timestamp.Timestamp + +import java.time.Instant +import scala.concurrent.{ExecutionContext, Future} + +final class InteractiveSubmissionServiceIT extends LedgerTestSuite with CommandSubmissionTestUtils { + test( + "ISSPrepareSubmissionRequestBasic", + "Prepare a submission request", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val prepareRequest = ledger.prepareSubmissionRequest( + party, + new Dummy(party).create.commands, + ) + for { + response <- ledger.prepareSubmission(prepareRequest) + tx = response.preparedTransaction + hash = response.preparedTransactionHash + } yield { + assert(tx.nonEmpty, "The transaction was empty but shouldn't.") + assert(!hash.isEmpty, "The hash was empty but shouldn't.") + } + }) + + test( + "ISSPrepareSubmissionRequestExplicitDisclosure", + "Prepare a submission request with explicit disclosure", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner, stranger))) => + for { + cidAndDisclosed <- testExplicitDisclosure(ledger, owner) + (contractId, disclosedContract) = cidAndDisclosed + prepareResponse <- ledger.prepareSubmission( + ledger + .prepareSubmissionRequest(stranger, contractId.exerciseFlexibleConsume(stranger).commands) + .copy(disclosedContracts = Seq(disclosedContract)) + ) + } yield { + assert(prepareResponse.preparedTransaction.nonEmpty, "prepared transaction was empty") + val inputContractIds = for { + tx <- prepareResponse.preparedTransaction + metadata <- tx.metadata + } yield metadata.inputContracts.map { + case InputContract(Contract.V1(value), _createdAt, _eventBlob) => value.contractId + case InputContract(Contract.Empty, _createdAt, _eventBlob) => + fail("Received empty input contract") + } + assert(inputContractIds.contains(Seq(contractId.contractId)), "Unexpected input contract id") + } + }) + + test( + "ISSPrepareSubmissionRequestFailOnUnknownContract", + "Fail to prepare a submission request on an unknown contract", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner, stranger))) => + for { + cidAndDisclosed <- testExplicitDisclosure(ledger, owner) + (contractId, _) = cidAndDisclosed + error <- ledger + .prepareSubmission( + ledger + .prepareSubmissionRequest( + stranger, + contractId.exerciseFlexibleConsume(stranger).commands, + ) + ) + .mustFail("missing input contract") + } yield { + assert( + error.getMessage.contains("CONTRACT_NOT_FOUND"), + s"wrong failure, got ${error.getMessage}", + ) + } + }) + + test( + "ISSPrepareSubmissionRequestSynchronizerId", + "Prepare a submission request on a specific synchronizer", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + ledger + .connectedSynchronizers() + .flatMap { synchronizers => + Future.sequence { + synchronizers.toList.map { synchronizerId => + val prepareRequest = ledger + .prepareSubmissionRequest( + party, + new Dummy(party).create.commands, + ) + .copy( + synchronizerId = synchronizerId + ) + for { + response <- ledger.prepareSubmission(prepareRequest) + tx = response.preparedTransaction + hash = response.preparedTransactionHash + } yield { + assert( + tx.get.metadata.get.synchronizerId == synchronizerId, + "Unexpected synchronizer ID.", + ) + } + } + } + } + .map(_ => ()) + }) + + test( + "ISSPrepareSubmissionRequestMinLedgerTime", + "Prepare a submission request and respect min ledger time", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val time = Instant.now().plusSeconds(20) + val prepareRequest = ledger + .prepareSubmissionRequest( + party, + new Dummy(party) + .createAnd() + .exerciseConsumeIfTimeIsBetween(time, time.plusSeconds(1)) + .commands, + ) + .copy( + minLedgerTime = Some(MinLedgerTime(MinLedgerTime.Time.MinLedgerTimeAbs(Timestamp(time)))) + ) + for { + response <- ledger.prepareSubmission(prepareRequest) + tx = response.preparedTransaction + } yield { + val minLet = tx.get.metadata.get.minLedgerEffectiveTime + val expected = LfTimestamp.assertFromInstant(time).micros + assert( + minLet.contains(expected), + s"Incorrect min ledger time. Received: $minLet, expected: $expected", + ) + } + }) + + test( + "ISSPrepareSubmissionExecuteBasic", + "Execute an externally signed transaction", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(externalParty: ExternalParty))) => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + externalParty, + new Dummy(externalParty).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + executeRequest = ledger.executeSubmissionRequest(externalParty, prepareResponse) + _ <- ledger.executeSubmission(executeRequest) + _ <- ledger.firstCompletions(externalParty) + transactions <- ledger.transactions(LedgerEffects, externalParty) + } yield { + val transaction = assertSingleton("expected one transaction", transactions) + val event = transaction.events.head.event + assert(event.isCreated) + assert(transaction.externalTransactionHash.contains(prepareResponse.preparedTransactionHash)) + } + }) + + test( + "ISSExecuteSubmissionAndWaitBasic", + "Execute and wait for an externally signed transaction", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(externalParty: ExternalParty))) => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + externalParty, + new Dummy(externalParty).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + executeAndWaitRequest = ledger.executeSubmissionAndWaitRequest(externalParty, prepareResponse) + response <- ledger.executeSubmissionAndWait(executeAndWaitRequest) + retrievedTransaction <- ledger.transactionById(response.updateId, Seq(externalParty)) + } yield { + assert( + response.updateId == retrievedTransaction.updateId, + "ExecuteAndWait does not contain the expected updateId", + ) + assert( + response.completionOffset == retrievedTransaction.offset, + "ExecuteAndWait does not contain the expected completion offset", + ) + val event = retrievedTransaction.events.head.event + assert(event.isCreated, "Expected created event") + assert( + retrievedTransaction.externalTransactionHash.contains( + prepareResponse.preparedTransactionHash + ), + "Transaction hash was not set or incorrect", + ) + } + }) + + test( + "ISSPrepareSubmissionFailExecuteOnInvalidSignature", + "Fail execute if the signature is invalid", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(externalParty: ExternalParty))) => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + externalParty, + new Dummy(externalParty).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + executeRequest = ledger + .executeSubmissionRequest(externalParty, prepareResponse) + // Mess with the signature + .update( + _.partySignatures.signatures.modify( + _.map( + _.update(_.signatures.modify(_.map(_.update(_.signature.modify(_.substring(1)))))) + ) + ) + ) + _ <- ledger + .executeSubmission(executeRequest) + .mustFailWith( + "Invalid signature", + InteractiveSubmissionExecuteError.code, + Some("Received 0 valid signatures (1 invalid)"), + ) + } yield () + }) + + test( + "ISSPrepareSubmissionFailExecuteAndWaitOnInvalidSignature", + "Fail execute and wait if the signature is invalid", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(externalParty: ExternalParty))) => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + externalParty, + new Dummy(externalParty).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + executeAndWaitRequest = ledger + .executeSubmissionAndWaitRequest(externalParty, prepareResponse) + .update( + _.partySignatures.signatures.modify( + _.map( + _.update(_.signatures.modify(_.map(_.update(_.signature.modify(_.substring(1)))))) + ) + ) + ) + _ <- ledger + .executeSubmissionAndWait(executeAndWaitRequest) + .mustFailWith( + "Invalid signature", + InteractiveSubmissionExecuteError.code, + Some("Received 0 valid signatures (1 invalid)"), + ) + } yield () + }) + + test( + "ISSPrepareSubmissionFailExecuteOnInvalidSignatory", + "Fail execute if signed by a non signatory party", + allocate(TwoExternalParties), + )(implicit ec => { + case Participants(Participant(ledger, Seq(alice: ExternalParty, bob: ExternalParty))) => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + alice, + new Dummy(alice).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + // bob signs instead of alice + executeRequest = ledger.executeSubmissionRequest(bob, prepareResponse) + _ <- ledger + .executeSubmission(executeRequest) + .mustFailWith( + "Missing signature", + InteractiveSubmissionExecuteError.code, + Some("The following actAs parties did not provide an external signature"), + ) + } yield () + }) + + test( + "ISSExecuteSubmissionRequestWithInputContracts", + "Submit with input contracts", + allocate(TwoExternalParties), + )(implicit ec => { + case Participants(Participant(ledger, Seq(owner: ExternalParty, stranger: ExternalParty))) => + for { + cidAndDisclosed <- testExplicitDisclosure(ledger, owner) + (contractId, disclosedContract) = cidAndDisclosed + prepareResponse <- ledger.prepareSubmission( + ledger + .prepareSubmissionRequest( + stranger, + contractId.exerciseFlexibleConsume(stranger).commands, + ) + .copy(disclosedContracts = Seq(disclosedContract)) + ) + executeRequest = ledger.executeSubmissionRequest(stranger, prepareResponse) + _ <- ledger.executeSubmission(executeRequest) + _ <- ledger.firstCompletions(stranger) + transactions <- ledger.transactions(LedgerEffects, stranger) + } yield { + val transaction = transactions.head + val event = transaction.events.head.event + assert(event.isExercised) + assert( + transaction.externalTransactionHash.contains(prepareResponse.preparedTransactionHash) + ) + } + }) + + test( + "ISSExecuteSubmissionRequestFailOnEmptyInputContracts", + "Fail if input contracts are missing", + allocate(TwoExternalParties), + )(implicit ec => { + case Participants(Participant(ledger, Seq(owner: ExternalParty, stranger: ExternalParty))) => + for { + cidAndDisclosed <- testExplicitDisclosure(ledger, owner) + (contractId, disclosedContract) = cidAndDisclosed + prepareResponse <- ledger.prepareSubmission( + ledger + .prepareSubmissionRequest( + stranger, + contractId.exerciseFlexibleConsume(stranger).commands, + ) + .copy(disclosedContracts = Seq(disclosedContract)) + ) + executeRequest = ledger + .executeSubmissionRequest(stranger, prepareResponse) + // Remove input contracts + .update(_.preparedTransaction.metadata.inputContracts.set(Seq.empty)) + _ <- ledger + .executeSubmission(executeRequest) + .mustFailWith( + "Missing input contracts", + InteractiveSubmissionExecuteError.code, + Some("Missing input contracts"), + ) + } yield () + }) + + test( + "ISSDuplicateExecuteAndWaitForTransactionData", + "ExecuteSubmissionAndWaitForTransaction should fail on duplicate requests", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party: ExternalParty))) => + val prepareRequest = + ledger.prepareSubmissionRequest(party, new Dummy(party).create.commands) + for { + prepareResponse <- ledger.prepareSubmission(prepareRequest) + executeRequest = ledger.executeSubmissionAndWaitForTransactionRequest( + party, + prepareResponse, + None, + ) + _ <- ledger.executeSubmissionAndWaitForTransaction(executeRequest) + failure <- ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + _.executeSubmissionAndWaitForTransaction(executeRequest), + ) + .mustFail("submitting a duplicate request") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.DuplicateCommand, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ISSDuplicateExecuteAndWaitData", + "ExecuteSubmissionAndWait should fail on duplicate requests", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party: ExternalParty))) => + val prepareRequest = + ledger.prepareSubmissionRequest(party, new Dummy(party).create.commands) + for { + prepareResponse <- ledger.prepareSubmission(prepareRequest) + executeRequest = ledger.executeSubmissionAndWaitRequest( + party, + prepareResponse, + ) + _ <- ledger.executeSubmissionAndWait(executeRequest) + failure <- ledger + .submitRequestAndTolerateGrpcError( + ConsistencyErrors.SubmissionAlreadyInFlight, + _.executeSubmissionAndWait(executeRequest), + ) + .mustFail("submitting a duplicate request") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.DuplicateCommand, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ISSExecuteSubmissionAndWaitForTransactionFilterByTemplateId", + "ExecuteSubmissionAndWaitForTransaction returns a filtered transaction (by template id)", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val format = ledger.transactionFormat( + parties = Some(Seq(party)), + transactionShape = AcsDelta, + ) + for { + createdTransaction <- executeAndWaitForTransaction( + ledger, + party, + new DummyFactory(party).create.commands, + Some(format), + ) + transaction = assertDefined( + createdTransaction.transaction, + "The transaction should be defined", + ) + exerciseTransactionO <- executeAndWaitForTransaction( + ledger, + party, + DummyFactory.ContractId + .fromContractId( + new com.daml.ledger.javaapi.data.codegen.ContractId( + transaction.events.head.getCreated.contractId + ) + ) + .exerciseDummyFactoryCall() + .commands(), + Some( + ledger.transactionFormat( + parties = Some(Seq(party)), + transactionShape = AcsDelta, + templateIds = Seq(Dummy.TEMPLATE_ID), + ) + ), + ) + exerciseTransaction = assertDefined( + exerciseTransactionO.transaction, + "The transaction should be defined", + ) + } yield { + // The transaction creates 2 contracts of type Dummy and DummyWithParam + // But because we filter by Dummy template we should only get that one + assertLength( + "Two create event should have been into the transaction", + 1, + exerciseTransaction.events, + ) + val templateId = assertDefined( + assertSingleton("expected single event", exerciseTransaction.events).getCreated.templateId, + "expected template id", + ) + assert(templateId.packageId == Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.getPackageId) + assert(templateId.moduleName == Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.getModuleName) + assert(templateId.entityName == Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.getEntityName) + } + }) + + test( + "ISSExecuteAndWaitForTransactionBasic", + "ExecuteSubmissionAndWaitForTransaction returns a transaction", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + transactionResponse <- executeAndWaitForTransaction( + ledger, + party, + new Dummy(party).create.commands, + Some( + ledger.transactionFormat( + parties = Some(Seq(party)) + ) + ), + ) + } yield { + assertOnTransactionResponse(transactionResponse.getTransaction) + } + }) + + test( + "ISSExecuteAndWaitForTransactionNoFilter", + "ExecuteSubmissionAndWaitForTransaction returns a transaction with no filter", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + transactionResponse <- executeAndWaitForTransaction( + ledger, + party, + new Dummy(party).create.commands, + None, + ) + } yield { + assertOnTransactionResponse(transactionResponse.getTransaction) + } + }) + + test( + "ISSExecuteAndWaitForTransactionFilterByWrongParty", + "ExecuteSubmissionAndWaitForTransaction returns a transaction with empty events when filtered by wrong party", + allocate(TwoExternalParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, party2))) => + val format = ledger.transactionFormat( + parties = Some(Seq(party2)), + transactionShape = AcsDelta, + ) + for { + transactionResponseAcsDelta <- executeAndWaitForTransaction( + ledger, + party, + new Dummy(party).create.commands, + Some(format), + ) + transactionResponseLedgerEffects <- executeAndWaitForTransaction( + ledger, + party, + new Dummy(party).create.commands, + Some(format.update(_.transactionShape := TRANSACTION_SHAPE_LEDGER_EFFECTS)), + ) + } yield { + assertLength( + "No events should have been into the transaction", + 0, + transactionResponseAcsDelta.transaction.toList.flatMap(_.events), + ) + assertLength( + "No events should have been into the transaction", + 0, + transactionResponseLedgerEffects.transaction.toList.flatMap(_.events), + ) + } + }) + + test( + "ISSExecuteAndWaitInvalidSynchronizerId", + "ExecuteSubmissionAndWait should fail for invalid synchronizer ids", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party: ExternalParty))) => + val invalidSynchronizerId = "invalidSynchronizerId" + val prepareRequest = ledger.prepareSubmissionRequest( + party, + new Dummy(party).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareRequest) + executeAndWaitRequest = ledger + .executeSubmissionAndWaitRequest( + party, + prepareResponse, + ) + .update(_.preparedTransaction.metadata.synchronizerId := invalidSynchronizerId) + failure <- ledger + .executeSubmissionAndWait(executeAndWaitRequest) + .mustFail( + "submitting a request with an invalid synchronizer id" + ) + } yield assertGrpcError( + failure, + RequestValidationErrors.InvalidField, + Some( + s"Invalid field synchronizer_id: Invalid unique identifier `$invalidSynchronizerId` with missing namespace." + ), + checkDefiniteAnswerMetadata = true, + ) + }) + + test( + "ISSExecuteAndWaitForTransactionInvalidSynchronizerId", + "ExecuteSubmissionAndWaitForTransaction should fail for invalid synchronizer ids", + allocate(SingleExternalParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party: ExternalParty))) => + val invalidSynchronizerId = "invalidSynchronizerId" + val prepareRequest = ledger.prepareSubmissionRequest( + party, + new Dummy(party).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareRequest) + executeAndWaitRequest = ledger + .executeSubmissionAndWaitForTransactionRequest( + party, + prepareResponse, + None, + ) + .update(_.preparedTransaction.metadata.synchronizerId := invalidSynchronizerId) + failure <- ledger + .executeSubmissionAndWaitForTransaction(executeAndWaitRequest) + .mustFail( + "submitting a request with an invalid synchronizer id" + ) + } yield assertGrpcError( + failure, + RequestValidationErrors.InvalidField, + Some( + s"Invalid field synchronizer_id: Invalid unique identifier `$invalidSynchronizerId` with missing namespace." + ), + checkDefiniteAnswerMetadata = true, + ) + }) + + test( + "ISSPreferredPackagesKnown", + "Getting preferred packages should return a valid result", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + for { + result <- ledger.getPreferredPackages( + Map( + Dummy.PACKAGE_NAME -> Seq(party1, party2), + DivulgenceProposal.PACKAGE_NAME -> Seq(party1, party2), + ) + ) + } yield assertSameElements( + result.packageReferences.sortBy(_.packageId), + Seq( + PackageReference( + packageId = Dummy.PACKAGE_ID, + packageName = Dummy.PACKAGE_NAME, + packageVersion = Dummy.PACKAGE_VERSION.toString, + ), + PackageReference( + packageId = DivulgenceProposal.PACKAGE_ID, + packageName = DivulgenceProposal.PACKAGE_NAME, + packageVersion = DivulgenceProposal.PACKAGE_VERSION.toString, + ), + ), + ) + }) + + test( + "ISSPreferredPackagesUnknownParty", + "Getting preferred package version for an unknown party should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + invalidPartyFailure <- ledger + .getPreferredPackages( + // Manually craft invalid party + Map(Dummy.PACKAGE_NAME -> Seq(Party(new data.Party("invalid-party")))) + ) + .mustFail("invalid party") + unknownPartyFailure <- ledger + .getPreferredPackages( + // Manually craft invalid party + Map(Dummy.PACKAGE_NAME -> Seq(Party(new data.Party("unknownParty::ns")))) + ) + .mustFail("unknown party") + } yield { + assertGrpcError( + // TODO(#25385): Here we should first report the invalid party-id format + invalidPartyFailure, + UnknownInformees, + None, + ) + assertGrpcError( + unknownPartyFailure, + UnknownInformees, + None, + ) + } + }) + + test( + "ISSPreferredPackagesUnknownPackageName", + "Getting preferred package version for an unknown package-name should fail", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + invalidPackageNameFailure <- ledger + .getPreferredPackages(Map("What-Is-A-Package-Name?" -> Seq(party))) + .mustFail("invalid package-name") + unknownPackageNameFailure <- ledger + .getPreferredPackages(Map("NoSuchPackage" -> Seq(party))) + .mustFail("unknown package-name") + } yield { + assertGrpcError( + invalidPackageNameFailure, + InvalidField, + Some("package_name/packageName"), + ) + assertGrpcError( + unknownPackageNameFailure, + PackageNamesNotFound, + None, + ) + } + }) + + test( + "ISSPreferredPackagesUnknownSynchronizerId", + "Getting preferred package version for an unknown synhcronizer-id should fail", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + for { + invalidSynchronizerIdFailure <- ledger + .getPreferredPackages( + Map(Dummy.PACKAGE_NAME -> Seq(party1, party2)), + synchronizerIdO = Some("invalidSyncId"), + ) + .mustFail("unknown synchronizer-id") + unknownSynchronizerIdFailure <- ledger + .getPreferredPackages( + Map(Dummy.PACKAGE_NAME -> Seq(party1, party2)), + synchronizerIdO = Some("unknownSynchronizerId::ns"), + ) + .mustFail("unknown synchronizer-id") + } yield { + assertGrpcError( + invalidSynchronizerIdFailure, + InvalidField, + Some("synchronizer_id/synchronizerId"), + ) + assertGrpcError( + unknownSynchronizerIdFailure, + InvalidPrescribedSynchronizerId, + None, + ) + } + }) + + test( + "ISSPreferredPackageVersionKnown", + "Getting preferred package version should return a valid result", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + for { + result <- ledger.getPreferredPackageVersion(Seq(party1, party2), Dummy.PACKAGE_NAME) + } yield { + result.packagePreference + .flatMap(_.packageReference) + .map( + assertEquals( + _, + PackageReference( + packageId = Dummy.PACKAGE_ID, + packageName = Dummy.PACKAGE_NAME, + packageVersion = Dummy.PACKAGE_VERSION.toString, + ), + ) + ) + .getOrElse(fail(s"Invalid preference response: $result")) + } + }) + + test( + "ISSPreferredPackageVersionUnknownParty", + "Getting preferred package version for an unknown party should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + invalidPartyFailure <- ledger + .getPreferredPackageVersion( + // Manually craft invalid party + Seq(Party(new data.Party("invalid-party"))), + Dummy.PACKAGE_NAME, + ) + .mustFail("invalid party") + unknownPartyFailure <- ledger + .getPreferredPackageVersion( + // Manually craft invalid party + Seq(Party(new data.Party("unknownParty::ns"))), + Dummy.PACKAGE_NAME, + ) + .mustFail("unknown party") + } yield { + assertGrpcError( + // TODO(#25385): Here we should first report the invalid party-id format + invalidPartyFailure, + UnknownInformees, + None, + ) + assertGrpcError( + unknownPartyFailure, + UnknownInformees, + None, + ) + } + }) + + test( + "ISSPreferredPackageVersionUnknownPackageName", + "Getting preferred package version for an unknown package-name should fail", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + invalidPackageNameFailure <- ledger + .getPreferredPackageVersion(Seq(party), "What-Is-A-Package-Name?") + .mustFail("invalid package-name") + unknownPackageNameFailure <- ledger + .getPreferredPackageVersion(Seq(party), "NoSuchPackage") + .mustFail("unknown package-name") + } yield { + assertGrpcError( + invalidPackageNameFailure, + InvalidField, + Some("package_name/packageName"), + ) + assertGrpcError( + unknownPackageNameFailure, + PackageNamesNotFound, + None, + ) + } + }) + + test( + "ISSPreferredPackageVersionUnknownSynchronizerId", + "Getting preferred package version for an unknown synhcronizer-id should fail", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + for { + invalidSynchronizerIdFailure <- ledger + .getPreferredPackageVersion( + Seq(party1, party2), + Dummy.PACKAGE_NAME, + synchronizerIdO = Some("invalidSyncId"), + ) + .mustFail("unknown synchronizer-id") + unknownSynchronizerIdFailure <- ledger + .getPreferredPackageVersion( + Seq(party1, party2), + Dummy.PACKAGE_NAME, + synchronizerIdO = Some("unknownSynchronizerId::ns"), + ) + .mustFail("unknown synchronizer-id") + } yield { + assertGrpcError( + invalidSynchronizerIdFailure, + InvalidField, + Some("synchronizer_id/synchronizerId"), + ) + assertGrpcError( + unknownSynchronizerIdFailure, + InvalidPrescribedSynchronizerId, + None, + ) + } + }) + + private def testExplicitDisclosure( + ledger: ParticipantTestContext, + owner: Party, + )(implicit + ec: ExecutionContext + ): Future[(DummyFlexibleController.ContractId, DisclosedContract)] = { + def create: Future[DummyFlexibleController.ContractId] = owner match { + case local: LocalParty => + ledger.create( + owner, + new DummyFlexibleController(owner), + ) + case external: infrastructure.ExternalParty => + val prepareSubmissionRequest = ledger.prepareSubmissionRequest( + external, + new DummyFlexibleController(external).create.commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareSubmissionRequest) + executeRequest = ledger.executeSubmissionRequest(external, prepareResponse) + _ <- ledger.executeSubmission(executeRequest) + _ <- ledger.firstCompletions(external) + transactions <- ledger.transactions(LedgerEffects, external) + } yield { + DummyFlexibleController.ContractId.fromContractId( + new com.daml.ledger.javaapi.data.codegen.ContractId( + transactions.head.events.head.getCreated.contractId + ) + ) + } + } + + for { + contractId: DummyFlexibleController.ContractId <- create + end <- ledger.currentEnd() + witnessTxs <- ledger.transactions( + new GetUpdatesRequest( + beginExclusive = ledger.begin, + endInclusive = Some(end), + updateFormat = Some(formatByPartyAndTemplate(owner, DummyFlexibleController.TEMPLATE_ID)), + ) + ) + tx = assertSingleton("Owners' transactions", witnessTxs) + create = assertSingleton("The create", createdEvents(tx)) + disclosedContract = createEventToDisclosedContract(create) + } yield (contractId, disclosedContract) + } + + def executeAndWaitForTransaction( + ledger: ParticipantTestContext, + party: Party, + commands: java.util.List[Command], + transactionFormat: Option[com.daml.ledger.api.v2.transaction_filter.TransactionFormat] = None, + )(implicit + executionContext: ExecutionContext + ): Future[ExecuteSubmissionAndWaitForTransactionResponse] = + party match { + case externalParty: ExternalParty => + val prepareRequest = ledger.prepareSubmissionRequest( + externalParty, + commands, + ) + for { + prepareResponse <- ledger.prepareSubmission(prepareRequest) + executeAndWaitRequest = ledger.executeSubmissionAndWaitForTransactionRequest( + externalParty, + prepareResponse, + transactionFormat, + ) + response <- ledger.executeSubmissionAndWaitForTransaction(executeAndWaitRequest) + } yield response + case _ => fail("Expected an external party") + } + + // TODO(#25385): Add test for GetPreferredPackages.vettingValidAt +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceIT.scala new file mode 100644 index 0000000000..456248b577 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceIT.scala @@ -0,0 +1,134 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + SingleParty, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, TestConstraints} +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, Update} +import com.daml.ledger.javaapi.data.{Command, DamlRecord, ExerciseCommand, Identifier} +import com.daml.ledger.test.java.semantic.interface$.T +import com.daml.ledger.test.java.semantic.{interface1, interface2} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import java.util.List as JList +import scala.jdk.CollectionConverters.* + +class InterfaceIT extends LedgerTestSuite { + implicit val tCompanion: ContractCompanion.WithoutKey[T.Contract, T.ContractId, T] = + T.COMPANION + + // replace identifier with the wrong identifier for some of these tests + private[this] def useWrongId[X]( + update: Update[X], + id: Identifier, + ): JList[Command] = { + val command = update.commands.asScala.head + val exe = command.asExerciseCommand.get + val arg = exe.getChoiceArgument.asRecord.get + JList.of( + new ExerciseCommand( + id, + exe.getContractId, + exe.getChoice, + new DamlRecord(arg.getFields), + ) + ) + } + + test( + "ExerciseTemplateSuccess", + "Success but does not set interfaceId in output event", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new T(party)) + tree <- ledger.exercise(party, t.exerciseMyArchive()) + } yield { + val events = exercisedEvents(tree) + assertLength(s"1 successful exercise", 1, events).discard + assertEquals(events.head.interfaceId, None) + assertEquals(events.head.getExerciseResult.getText, "Interface.T") + } + }) + + test( + "ExerciseInterfaceSuccess", + "Success and set interfaceId in output event", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new T(party)) + tree <- ledger.exercise(party, t.toInterface(interface1.I.INTERFACE).exerciseMyArchive()) + } yield { + val events = exercisedEvents(tree) + assertLength(s"1 successful exercise", 1, events).discard + assertEquals(events.head.interfaceId, Some(interface1.I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1)) + assertEquals(events.head.getExerciseResult.getText, "Interface1.I") + } + }) + + test( + "ExerciseInterfaceByTemplateFail", + "Cannot exercise an interface choice using templateId", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Problem creating faulty JSON from a faulty GRPC call"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new T(party)) + failure <- ledger + .submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party, + useWrongId(t.toInterface(interface1.I.INTERFACE).exerciseChoiceI1(), T.TEMPLATE_ID), + ) + ) + .mustFail("unknown choice") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("unknown choice ChoiceI1"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExerciseInterfaceByRequiringFail", + "Cannot exercise an interface choice using requiring templateId", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Problem creating faulty JSON from a faulty GRPC call"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new T(party)) + failure <- ledger + .submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party, + useWrongId( + t.toInterface(interface1.I.INTERFACE).exerciseChoiceI1(), + interface2.I.TEMPLATE_ID, + ), + ) + ) + .mustFail("unknown choice") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("unknown choice ChoiceI1"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceSubscriptionsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceSubscriptionsIT.scala new file mode 100644 index 0000000000..0d8f16b6ca --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/InterfaceSubscriptionsIT.scala @@ -0,0 +1,944 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.* +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.FutureAssertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.v2.event.Event.Event +import com.daml.ledger.api.v2.event.{CreatedEvent, InterfaceView} +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{EventFormat, TransactionFormat} +import com.daml.ledger.api.v2.value.{Identifier, Record} +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.semantic.interfaceviews.{ + I, + I2, + INoTemplate, + T1, + T2, + T3, + T4, + T5, + T6, +} +import com.daml.ledger.test.java.{carbonv1, carbonv2} +import com.daml.logging.LoggingContext +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import java.util.regex.Pattern +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +class InterfaceSubscriptionsIT extends InterfaceSubscriptionsITBase("IS") +class InterfaceSubscriptionsWithEventBlobsIT extends InterfaceSubscriptionsITBase("ISWP") + +abstract class InterfaceSubscriptionsITBase(prefix: String) extends LedgerTestSuite { + private def archive(ledger: ParticipantTestContext, party: Party)( + c1: T1.ContractId, + c2: T2.ContractId, + c3: T3.ContractId, + c4: T4.ContractId, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + _ <- ledger.exercise(party, c1.exerciseArchive()) + _ <- ledger.exercise(party, c2.exerciseArchive()) + _ <- ledger.exercise(party, c3.exerciseArchive()) + _ <- ledger.exercise(party, c4.exerciseArchive()) + } yield () + + implicit val t1Companion: ContractCompanion.WithoutKey[T1.Contract, T1.ContractId, T1] = + T1.COMPANION + implicit val t2Companion: ContractCompanion.WithoutKey[T2.Contract, T2.ContractId, T2] = + T2.COMPANION + implicit val t3Companion: ContractCompanion.WithoutKey[T3.Contract, T3.ContractId, T3] = + T3.COMPANION + implicit val t4Companion: ContractCompanion.WithoutKey[T4.Contract, T4.ContractId, T4] = + T4.COMPANION + implicit val t5Companion: ContractCompanion.WithoutKey[T5.Contract, T5.ContractId, T5] = + T5.COMPANION + implicit val t6Companion: ContractCompanion.WithoutKey[T6.Contract, T6.ContractId, T6] = + T6.COMPANION + + test( + s"${prefix}TransactionsBasic", + "Basic functionality for interface subscriptions on transaction streams", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + _ <- archive(ledger, party)(c1, c2, c3, c4) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq(T1.TEMPLATE_ID), + Seq((I.TEMPLATE_ID, true)), + verbose = true, + ) + ) + txs <- transactions(txReq) + txReqWithoutInterfaceViews <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq(T1.TEMPLATE_ID), + interfaceFilters = Seq((I.TEMPLATE_ID, false)), + ) + ) + txsWithoutInterfaceViews <- transactions(txReqWithoutInterfaceViews) + created = txs.flatMap(createdEvents) + exercisedImplementedInterfaces = Some( + txs.flatMap(archivedEvents).map(_.implementedInterfaces) + ) + } yield { + basicAssertions( + created, + exercisedImplementedInterfaces, + c1.contractId, + c2.contractId, + c3.contractId, + ) + assertEquals( + "If no inclusion of interface views requested, then we should have the create events, but no views rendered", + txsWithoutInterfaceViews.flatMap(createdEvents).map(_.interfaceViews), + Seq(Nil, Nil, Nil), + ) + assertEquals( + "If no inclusion of interface views requested, then we should have the archive events, but no implemented_interfaces rendered", + txsWithoutInterfaceViews.flatMap(archivedEvents).map(_.implementedInterfaces), + Seq(Nil, Nil, Nil), + ) + } + }) + + test( + s"${prefix}TransactionLedgerEffectsBasic", + "Basic functionality for interface subscriptions on ledger effects transaction streams", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + _ <- ledger.exercise(party, c2.exerciseChoiceT2()) + _ <- ledger.exercise(party, c2.toInterface(I.INTERFACE).exerciseChoiceI()) + _ <- archive(ledger, party)(c1, c2, c3, c4) + txReq <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq(T1.TEMPLATE_ID), + interfaceFilters = Seq((I.TEMPLATE_ID, true)), + transactionShape = LedgerEffects, + verbose = true, + ) + ) + txs <- transactions(txReq) + txReqWithoutInterfaceViews <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq(T1.TEMPLATE_ID), + interfaceFilters = Seq((I.TEMPLATE_ID, false)), + transactionShape = LedgerEffects, + ) + ) + txsWithoutInterfaceViews <- transactions(txReqWithoutInterfaceViews) + created = txs.flatMap(createdEvents) + exercisedImplementedInterfaces = Some( + txs.flatMap(exercisedEvents).map(_.implementedInterfaces) + ) + } yield { + basicAssertions( + created, + exercisedImplementedInterfaces, + c1.contractId, + c2.contractId, + c3.contractId, + exercisedEvents = true, + ) + assertEquals( + "If no inclusion of interface views requested, then we should have the create events, but no views rendered", + txsWithoutInterfaceViews.flatMap(createdEvents).map(_.interfaceViews), + Seq(Nil, Nil, Nil), + ) + assertEquals( + "If no inclusion of interface views requested, then we should have the exercise events, but no implemented_interfaces rendered", + txsWithoutInterfaceViews.flatMap(exercisedEvents).map(_.implementedInterfaces), + Seq(Nil, Nil, Nil, Nil, Nil), + ) + } + }) + + test( + s"${prefix}AcsBasic", + "Basic functionality for interface subscriptions on ACS streams", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + end <- ledger.currentEnd() + createdEvents <- activeContracts( + activeContractsRequest( + parties = Some(Seq(party)), + templateIds = Seq(T1.TEMPLATE_ID), + interfaceFilters = Seq((I.TEMPLATE_ID, true)), + activeAtOffset = end, + ) + ) + // archive to avoid interference with subsequent tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield basicAssertions(createdEvents, None, c1.contractId, c2.contractId, c3.contractId) + }) + + private def basicAssertions( + createdEvents: Vector[CreatedEvent], + exercisedImplementedInterfacesO: Option[Vector[Seq[Identifier]]], + c1: String, + c2: String, + c3: String, + exercisedEvents: Boolean = false, + ): Unit = { + + def checkArgumentsNonEmpty(event: CreatedEvent, id: Int): Unit = + assertEquals( + s"Create event $id createArguments must NOT be empty", + event.createArguments.isEmpty, + false, + ) + + val transactionNum = 3 + val (transactionNumExercised, archivedEventIndexF) = + if (exercisedEvents) (5, (i: Int) => i + 2) + else (3, (i: Int) => i) + + assertLength( + s"$transactionNum created transactions found", + transactionNum, + createdEvents, + ).discard + exercisedImplementedInterfacesO.foreach(exercisedImplementedInterfaces => + assertLength( + s"$transactionNumExercised exercised transactions should be found", + transactionNumExercised, + exercisedImplementedInterfaces, + ).discard + ) + + // T1 + val createdEvent1 = createdEvents(0) + assertLength("Create event 1 has a view", 1, createdEvent1.interfaceViews).discard + assertEquals( + "Create event 1 template ID", + createdEvent1.templateId.get.toString, + T1.TEMPLATE_ID_WITH_PACKAGE_ID.toV1.toString, + ) + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c1) + assertViewEquals(createdEvent1.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View1 has 2 fields", 2, value.fields).discard + assertEquals("View1.a", value.fields(0).getValue.getInt64, 1) + assertEquals("View1.b", value.fields(1).getValue.getBool, true) + assert( + value.fields.forall(_.label.nonEmpty), + "Expected a view with labels (verbose)", + ) + } + checkArgumentsNonEmpty(createdEvent1, id = 1) + assert( + createdEvent1.getCreateArguments.fields.forall(_.label.nonEmpty), + "Expected a contract with labels (verbose)", + ) + exercisedImplementedInterfacesO.foreach(exercisedImplementedInterfaces => + assertEquals( + "Archive event 1 has correct implemented_interfaces", + exercisedImplementedInterfaces(archivedEventIndexF(0)), + Seq(I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + ) + ) + + // T2 + val createdEvent2 = createdEvents(1) + assertLength("Create event 2 has a view", 1, createdEvent2.interfaceViews).discard + assertEquals( + "Create event 2 template ID", + createdEvent2.templateId.get.toString, + T2.TEMPLATE_ID_WITH_PACKAGE_ID.toV1.toString, + ) + assertEquals("Create event 2 contract ID", createdEvent2.contractId, c2) + assertViewEquals(createdEvent2.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View2 has 2 fields", 2, value.fields).discard + assertEquals("View2.a", value.fields(0).getValue.getInt64, 2) + assertEquals("View2.b", value.fields(1).getValue.getBool, false) + } + checkArgumentsNonEmpty(createdEvent2, id = 2) + exercisedImplementedInterfacesO.foreach(exercisedImplementedInterfaces => + assertEquals( + "Archive event 2 has correct implemented_interfaces", + exercisedImplementedInterfaces(archivedEventIndexF(1)), + Seq(I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + ) + ) + + // T3 + val createdEvent3 = createdEvents(2) + assertLength("Create event 3 has a view", 1, createdEvent3.interfaceViews).discard + assertEquals( + "Create event 3 template ID", + createdEvent3.templateId.get.toString, + T3.TEMPLATE_ID_WITH_PACKAGE_ID.toV1.toString, + ) + assertEquals("Create event 3 contract ID", createdEvent3.contractId, c3) + assertViewFailed(createdEvent3.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) + checkArgumentsNonEmpty(createdEvent3, id = 3) + exercisedImplementedInterfacesO.foreach(exercisedImplementedInterfaces => + assertEquals( + "Archive event 3 has correct implemented_interfaces", + exercisedImplementedInterfaces(archivedEventIndexF(2)), + Seq(I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + ) + ) + + } + + test( + s"${prefix}MultipleWitness", + "Multiple witness", + allocate(Parties(2)), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + import ledger.* + for { + c <- create(party1, new T6(party1, party2)) + txReq <- getTransactionsRequest( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + party1.getValue -> filters( + Seq.empty, + Seq((I.TEMPLATE_ID, true)), + ), + party2.getValue -> filters( + Seq.empty, + Seq((I2.TEMPLATE_ID, true)), + ), + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ) + mergedTransactions <- transactions(txReq) + + txReq1 <- getTransactionsRequest( + transactionFormat(Some(Seq(party1)), Seq.empty, Seq((I.TEMPLATE_ID, true))) + ) + party1Transactions <- transactions(txReq1) + // archive active contract to avoid interference with subsequent tests + _ <- ledger.exercise(party1, c.exerciseArchive()) + } yield { + assertLength("single transaction found", 1, mergedTransactions).discard + val createdEvent1 = createdEvents(mergedTransactions(0)).head + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c.contractId) + assertViewEquals(createdEvent1.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View1 has 2 fields", 2, value.fields).discard + assertEquals("View1.a", value.fields(0).getValue.getInt64, 6) + assertEquals("View1.b", value.fields(1).getValue.getBool, true) + } + assertViewEquals(createdEvent1.interfaceViews, I2.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View2 has 1 field", 1, value.fields).discard + assertEquals("View2.c", value.fields(0).getValue.getInt64, 7) + } + + assertLength("single transaction found", 1, party1Transactions).discard + val createdEvent2 = createdEvents(party1Transactions(0)).head + assertEquals("Create event 1 contract ID", createdEvent2.contractId, c.contractId) + assertLength("single view found", 1, createdEvent2.interfaceViews).discard + assertViewEquals(createdEvent2.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View1 has 2 fields", 2, value.fields).discard + assertEquals("View1.a", value.fields(0).getValue.getInt64, 6) + assertEquals("View1.b", value.fields(1).getValue.getBool, true) + } + } + }) + + test( + s"${prefix}MultipleViews", + "Multiple interface views populated for one event", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c <- create(party, new T5(party, 31337)) + _ <- ledger.exercise(party, c.exerciseArchive()) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq.empty, + Seq((I.TEMPLATE_ID, true), (I2.TEMPLATE_ID, true)), + ) + ) + transactions <- transactions(txReq) + } yield { + assertLength("Two transactions found", 2, transactions).discard + val createdEvent = createdEvents(transactions(0)).head + val archivedEvent = archivedEvents(transactions(1)).head + assertEquals("Create event with correct contract ID", createdEvent.contractId, c.contractId) + assertViewEquals(createdEvent.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View1 has 2 fields", 2, value.fields).discard + assertEquals("View1.a", value.fields(0).getValue.getInt64, 31337) + assertEquals("View1.b", value.fields(1).getValue.getBool, true) + } + assertViewEquals(createdEvent.interfaceViews, I2.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View2 has 1 field", 1, value.fields).discard + assertEquals("View2.c", value.fields(0).getValue.getInt64, 1) + } + assertEquals( + "Archived event with correct contract ID", + archivedEvent.contractId, + c.contractId, + ) + assertEquals( + "Archive event has both implemented_interfaces", + archivedEvent.implementedInterfaces.toSet, + Set( + I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + I2.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ), + ) + } + }) + + test( + s"${prefix}TransactionsIrrelevantTransactions", + "Subscribing on transaction stream by interface with no relevant transactions", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + _ <- create(party, new T4(party, 4)) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq.empty, + Seq((INoTemplate.TEMPLATE_ID, true)), + ) + ) + transactions <- transactions(txReq) + } yield { + assertLength("0 transactions should be found", 0, transactions).discard + () + } + }) + + test( + s"${prefix}TransactionsDuplicateInterfaceFilters", + "Subscribing on transaction stream by interface with duplicate filters and not verbose", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Labels are always emitted by Transcode/SchemaProcessor"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + txReq <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq(T1.TEMPLATE_ID), + interfaceFilters = Seq((I.TEMPLATE_ID, false), (I.TEMPLATE_ID, true)), + ) + ).map(_.update(_.updateFormat.includeTransactions.eventFormat.verbose := false)) + transactions <- transactions(txReq) + // archive to avoid interference with subsequent tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + val createdEvent1 = createdEvents(transactions(0)).head + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c1.contractId) + val createdEvent2 = createdEvents(transactions(1)).head + assertEquals("Create event 2 contract ID", createdEvent2.contractId, c2.contractId) + // Expect view to be delivered even though there is an ambiguous + // includeInterfaceView flag set to true and false at the same time (true wins) + assertViewEquals(createdEvent2.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View2 has 2 fields", 2, value.fields).discard + assertEquals("View2.a", value.fields(0).getValue.getInt64, 2) + assertEquals("View2.b", value.fields(1).getValue.getBool, false) + assert( + value.fields.forall(_.label.isEmpty), + s"Expected a view with no labels (verbose = false)", + ) + } + val createdEvent3 = createdEvents(transactions(2)).head + assertEquals("Create event 3 contract ID", createdEvent3.contractId, c3.contractId) + } + }) + + test( + s"${prefix}TransactionsDuplicateTemplateFilters", + "Subscribing on transaction stream by template with duplicate filters", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq(T1.TEMPLATE_ID, T1.TEMPLATE_ID), + Seq((I.TEMPLATE_ID, true)), + ) + ) + transactions <- transactions(txReq) + // archive to avoid interference with subsequent tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + val createdEvent1 = createdEvents(transactions(0)).head + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c1.contractId) + assertEquals( + "Create event 1 createArguments must NOT be empty", + createdEvent1.createArguments.isEmpty, + false, + ) + val createdEvent2 = createdEvents(transactions(1)).head + assertEquals("Create event 2 contract ID", createdEvent2.contractId, c2.contractId) + // Expect view to be delivered even though there is an ambiguous + // includeInterfaceView flag set to true and false at the same time. + assertViewEquals(createdEvent2.interfaceViews, I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) { value => + assertLength("View2 has 2 fields", 2, value.fields).discard + assertEquals("View2.a", value.fields(0).getValue.getInt64, 2) + assertEquals("View2.b", value.fields(1).getValue.getBool, false) + } + val createdEvent3 = createdEvents(transactions(2)).head + assertEquals("Create event 3 contract ID", createdEvent3.contractId, c3.contractId) + } + }) + + test( + s"${prefix}TransactionsNoIncludedView", + "Subscribing on transaction stream by interface or template without included views", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq(T1.TEMPLATE_ID), + Seq((I.TEMPLATE_ID, false)), + ) + ) + transactions <- transactions(txReq) + // archive to avoid interference with subsequent tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + assertLength("3 transactions found", 3, transactions).discard + val interfaceViewCount: Int = + transactions.flatMap(createdEvents).map(_.interfaceViews.size).sum + assertEquals("No views have been computed and produced", 0, interfaceViewCount) + val createArgumentsCount: Int = + transactions.flatMap(createdEvents).map(_.createArguments.isDefined).count(_ == true) + assertEquals("All 3 create arguments must be delivered", 3, createArgumentsCount) + + // T1 + val createdEvent1 = createdEvents(transactions(0)).head + assertEquals( + "Create event 1 template ID", + createdEvent1.templateId.get.toString, + T1.TEMPLATE_ID_WITH_PACKAGE_ID.toV1.toString, + ) + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c1.contractId) + } + }) + + test( + s"${prefix}TransactionsEquivalentFilters", + "Subscribing by interface or all implementing templates gives the same result", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + val allImplementations = Seq(T1.TEMPLATE_ID, T2.TEMPLATE_ID, T3.TEMPLATE_ID) + for { + c1 <- create(party, new T1(party, 1)) + c2 <- create(party, new T2(party, 2)) + c3 <- create(party, new T3(party, 3)) + c4 <- create(party, new T4(party, 4)) + // 1. Subscribe by the interface + txReq1 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq.empty, + Seq((I.TEMPLATE_ID, true)), + ) + ) + transactions1 <- transactions(txReq1) + // 2. Subscribe by all implementing templates + txReq2 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + allImplementations, + Seq.empty, + ) + ) + transactions2 <- transactions(txReq2) + // 3. Subscribe by both the interface and all templates (redundant filters) + txReq3 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + allImplementations, + Seq((I.TEMPLATE_ID, true)), + ) + ) + transactions3 <- transactions(txReq3) + // archive to avoid interference with subsequent tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + assertLength("3 transactions found", 3, transactions1).discard + assertEquals( + "1 and 2 find the same transactions (but not the same views)", + transactions1.map(_.updateId), + transactions2.map(_.updateId), + ) + assertEquals( + "2 and 3 find the same contract_arguments (but not the same views)", + transactions2.map(updateTransaction()), + transactions3.map(updateTransaction(emptyView = true)), + ) + assertEquals( + "1 and 3 produce the same views (but not the same create arguments)", + // do not check on details since tid is contained and it is expected to be different + transactions1 + .map(updateTransaction(emptyDetails = true)) + .map(hideTraceIdFromStatusMessages), + transactions3 + .map( + updateTransaction( + emptyContractKey = true, + emptyDetails = true, + ) + ) + .map(hideTraceIdFromStatusMessages), + ) + } + }) + + test( + s"${prefix}TransactionsUnknownTemplateOrInterface", + "Subscribing on transaction stream by an unknown template fails", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val packageName = I.TEMPLATE_ID.getPackageId + val moduleName = I.TEMPLATE_ID.getModuleName + val unknownTemplate = + new javaapi.data.Identifier(packageName, moduleName, "TemplateDoesNotExist") + val unknownInterface = + new javaapi.data.Identifier(packageName, moduleName, "InterfaceDoesNotExist") + import ledger.* + for { + _ <- create(party, new T1(party, 1)) + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq(unknownTemplate), + Seq.empty, + ) + ) + failure <- transactions(txReq) + .mustFail("subscribing with an unknown template") + txReq2 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq.empty, + Seq((unknownInterface, true)), + ) + ) + failure2 <- transactions(txReq2) + .mustFail("subscribing with an unknown interface") + } yield { + assertGrpcErrorRegex( + failure, + RequestValidationErrors.NotFound.NoTemplatesForPackageNameAndQualifiedName, + Some( + Pattern.compile( + "The following package-name/template qualified-name pairs do not reference any template-id uploaded on this participant.*TemplateDoesNotExist" + ) + ), + ) + assertGrpcErrorRegex( + failure2, + RequestValidationErrors.NotFound.NoInterfaceForPackageNameAndQualifiedName, + Some( + Pattern.compile( + "The following package-name/interface qualified-name pairs do not reference any interface-id uploaded on this participant.*InterfaceDoesNotExist" + ) + ), + ) + } + }) + + test( + s"${prefix}TransactionsMultipleParties", + "Subscribing on transaction stream by multiple parties", + allocate(Parties(2)), + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + import ledger.* + for { + c <- create(party1, new T6(party1, party2)) + txReqWithView <- getTransactionsRequest( + transactionFormat( + Some(Seq(party1)), + Seq.empty, + Seq((I.TEMPLATE_ID, true)), + ) + ) + party1Iface1transactionsWithView <- transactions(txReqWithView) + txReqWithoutView <- getTransactionsRequest( + transactionFormat( + Some(Seq(party1)), + Seq.empty, + Seq((I.TEMPLATE_ID, false)), + ) + ) + party1Iface1transactionsWithoutView <- transactions(txReqWithoutView) + txReqWithView2 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party2)), + Seq.empty, + Seq((I.TEMPLATE_ID, true)), + ) + ) + party2Iface1transactionsWithView <- transactions(txReqWithView2) + txReqWithoutView2 <- getTransactionsRequest( + transactionFormat( + Some(Seq(party2)), + Seq.empty, + Seq((I.TEMPLATE_ID, false)), + ) + ) + party2Iface1transactionsWithoutView <- transactions(txReqWithoutView2) + // archive active contract to avoid interference with subsequent tests + _ <- ledger.exercise(party1, c.exerciseArchive()) + } yield { + assertEquals( + party1Iface1transactionsWithView.map( + updateTransaction(emptyView = false, emptyWitness = true) + ), + party2Iface1transactionsWithView.map( + updateTransaction(emptyView = false, emptyWitness = true) + ), + ) + + assertEquals( + party1Iface1transactionsWithoutView.map( + updateTransaction(emptyView = false, emptyWitness = true) + ), + party2Iface1transactionsWithoutView.map( + updateTransaction(emptyView = false, emptyWitness = true) + ), + ) + + assertEquals( + party1Iface1transactionsWithView.map( + updateTransaction(emptyView = true, emptyWitness = true) + ), + party2Iface1transactionsWithoutView.map( + updateTransaction(emptyView = false, emptyWitness = true) + ), + ) + } + }) + + test( + s"${prefix}TransactionsSubscribeBeforeTemplateCreated", + "Subscribing on transaction stream by interface before template is created", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + + implicit val loggingContext: LoggingContext = LoggingContext.ForTesting + + for { + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(Carbonv1TestDar.path)) + + txReq <- getTransactionsRequest( + transactionFormat( + Some(Seq(party)), + Seq.empty, + Seq((carbonv1.carbonv1.I.TEMPLATE_ID, true)), + ) + ).map( + // endless stream here as we would like to keep it open until + // template is uploaded and contract with this template is created + _.update( + _.optionalEndInclusive := None + ) + ) + transactionFuture = transactions( + take = 1, + request = txReq, + ) + _ = assertEquals(transactionFuture.isCompleted, false) + + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(Carbonv2TestDar.path)) + + _ = assertEquals(transactionFuture.isCompleted, false) + + contract <- succeedsEventually( + maxRetryDuration = 10.seconds, + description = "Topology processing around Dar upload can take a bit of time.", + delayMechanism = ledger.delayMechanism, + ) { + create(party, new carbonv2.carbonv2.T(party, 21))(carbonv2.carbonv2.T.COMPANION) + } + + transactions <- transactionFuture + + } yield assertSingleContractWithSimpleView( + transactions = transactions, + contractIdentifier = carbonv2.carbonv2.T.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + viewIdentifier = carbonv1.carbonv1.I.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + contractId = contract.contractId, + viewValue = 21, + ) + }) + + private def assertSingleContractWithSimpleView( + transactions: Vector[Transaction], + contractIdentifier: Identifier, + viewIdentifier: Identifier, + contractId: String, + viewValue: Long, + ): Unit = { + assertLength("transaction should be found", 1, transactions).discard + val createdEvent = createdEvents(transactions(0)).head + assertLength("Create event has a view", 1, createdEvent.interfaceViews).discard + assertEquals( + "Create event template ID", + createdEvent.templateId.get.toString, + contractIdentifier.toString, + ) + assertEquals("Create event contract ID", createdEvent.contractId, contractId) + assertViewEquals(createdEvent.interfaceViews, viewIdentifier) { value => + assertLength("View has 1 field", 1, value.fields).discard + assertEquals("View.value", value.fields(0).getValue.getInt64, viewValue) + } + } + + private def updateTransaction( + emptyView: Boolean = false, + emptyWitness: Boolean = false, + emptyCreateArguments: Boolean = false, + emptyContractKey: Boolean = false, + emptyDetails: Boolean = false, + )(tx: com.daml.ledger.api.v2.transaction.Transaction): Transaction = + tx.copy( + events = tx.events.map { event => + event.copy(event = event.event match { + case created: Event.Created => + created.copy(value = + created.value.copy( + witnessParties = if (emptyWitness) Seq.empty else created.value.witnessParties, + interfaceViews = + if (emptyView) Seq.empty + else if (emptyDetails) + created.value.interfaceViews.map(iv => + iv.copy(viewStatus = + iv.viewStatus.map(status => status.copy(details = Seq.empty)) + ) + ) + else created.value.interfaceViews, + contractKey = if (emptyContractKey) None else created.value.contractKey, + createArguments = if (emptyCreateArguments) None else created.value.createArguments, + ) + ) + case other => other + }) + }, + commandId = "", + ) + + private def hideTraceIdFromStatusMessages( + tx: com.daml.ledger.api.v2.transaction.Transaction + ): Transaction = + tx.copy( + events = tx.events.map { event => + event.copy(event = event.event match { + case created: Event.Created => + created.copy(value = + created.value.copy( + interfaceViews = created.value.interfaceViews.map(view => + view.copy( + viewStatus = view.viewStatus.map(status => + status.copy(message = + status.message + .replaceFirst("""DAML_FAILURE\(9,.{8}\)""", "DAML_FAILURE(9,0)") + ) + ) + ) + ) + ) + ) + case other => other + }) + }, + commandId = "", + ) + + private def assertViewFailed(views: Seq[InterfaceView], interfaceId: Identifier): Unit = { + val viewSearch = views.find(_.interfaceId.contains(interfaceId)) + + val view = assertDefined(viewSearch, "View could not be found") + + val actualInterfaceId = assertDefined(view.interfaceId, "Interface ID is not defined") + assertEquals("View has correct interface ID", interfaceId, actualInterfaceId) + + val status = assertDefined(view.viewStatus, "Status is not defined") + assertEquals("Status must be invalid argument", status.code, 9) + } + + private def assertViewEquals(views: Seq[InterfaceView], interfaceId: Identifier)( + checkValue: Record => Unit + ): Unit = { + val viewSearch = views.find(_.interfaceId.contains(interfaceId)) + + val view = assertDefined( + viewSearch, + s"View could not be found, there are: ${views.map(_.interfaceId).mkString("[", ",", "]")}", + ) + + val viewCount = views.count(_.interfaceId.contains(interfaceId)) + assertEquals(s"Only one view of interfaceId=$interfaceId must be defined", viewCount, 1) + + val actualInterfaceId = assertDefined(view.interfaceId, "Interface ID is not defined") + assertEquals("View has correct interface ID", actualInterfaceId, interfaceId) + + val status = assertDefined(view.viewStatus, "Status is not defined") + assertEquals("Status must be successful", status.code, 0) + + val actualValue = assertDefined(view.viewValue, "Value is not defined") + checkValue(actualValue) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/LimitsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/LimitsIT.scala new file mode 100644 index 0000000000..a816523d5e --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/LimitsIT.scala @@ -0,0 +1,78 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.test.java.semantic.limits.{WithList, WithMap} + +import scala.jdk.CollectionConverters.* + +final class LimitsIT extends LedgerTestSuite { + + test( + "LLargeMapInContract", + "Create a contract with a field containing large map", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + val elements = (1 to 10000).map(e => (f"element_$e%08d", alice.getValue)).toMap.asJava + for { + contract: WithMap.ContractId <- ledger.create(alice, new WithMap(alice, elements))( + WithMap.COMPANION + ) + _ <- ledger.exercise(alice, contract.exerciseWithMap_Noop()) + } yield { + () + } + }) + + test( + "LLargeMapInChoice", + "Exercise a choice with a large map", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + val elements = (1 to 10000).map(e => (f"element_$e%08d", alice.getValue)).toMap.asJava + for { + contract: WithMap.ContractId <- ledger.create( + alice, + new WithMap(alice, Map.empty[String, String].asJava), + )(WithMap.COMPANION) + _ <- ledger.exercise(alice, contract.exerciseWithMap_Expand(elements)) + } yield { + () + } + }) + + test( + "LLargeListInContract", + "Create a contract with a field containing large list", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + val elements = (1 to 10000).map(e => f"element_$e%08d").asJava + for { + contract: WithList.ContractId <- ledger.create(alice, new WithList(alice, elements))( + WithList.COMPANION + ) + _ <- ledger.exercise(alice, contract.exerciseWithList_Noop()) + } yield { + () + } + }) + + test( + "LLargeListInChoice", + "Exercise a choice with a large list", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + val elements = (1 to 10000).map(e => f"element_$e%08d").asJava + for { + contract: WithList.ContractId <- ledger + .create(alice, new WithList(alice, List.empty[String].asJava))(WithList.COMPANION) + _ <- ledger.exercise(alice, contract.exerciseWithList_Expand(elements)) + } yield { + () + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/MultiPartySubmissionIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/MultiPartySubmissionIT.scala new file mode 100644 index 0000000000..6ec97331b7 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/MultiPartySubmissionIT.scala @@ -0,0 +1,237 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.model.test.MultiPartyContract +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, ConsistencyErrors} + +import java.util.UUID +import java.util.regex.Pattern +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +final class MultiPartySubmissionIT extends LedgerTestSuite { + implicit val multiPartyContractCompanion: ContractCompanion.WithoutKey[ + MultiPartyContract.Contract, + MultiPartyContract.ContractId, + MultiPartyContract, + ] = MultiPartyContract.COMPANION + + test( + "MPSSubmit", + "Submit creates a multi-party contract", + allocate(Parties(2)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + // Create a contract for (Alice, Bob) + val request = ledger.submitRequest( + actAs = List(alice, bob), + readAs = List.empty, + commands = new MultiPartyContract(List(alice, bob).map(_.getValue).asJava, "").create.commands, + ) + + for { + _ <- ledger.submit(request) + completions <- ledger.firstCompletions(bob) + } yield { + assert(completions.length == 1) + assert(completions.head.commandId == request.commands.get.commandId) + } + }) + + test( + "MPSCreateSuccess", + "Create succeeds with sufficient authorization", + allocate(Parties(2)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + // Create a contract for (Alice, Bob) + _ <- ledger.create( + actAs = List(alice, bob), + readAs = List.empty, + template = new MultiPartyContract(List(alice, bob).map(_.getValue).asJava, ""), + ) + } yield () + }) + + test( + "MPSCreateInsufficientAuthorization", + "Create fails with insufficient authorization", + allocate(Parties(3)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie))) => + for { + // Create a contract for (Alice, Bob, Charlie), but only submit as (Alice, Bob). + // Should fail because required authorizer Charlie is missing from submitters. + failure <- ledger + .create( + actAs = List(alice, bob), + readAs = List.empty, + template = new MultiPartyContract(List(alice, bob, charlie).map(_.getValue).asJava, ""), + ) + .mustFail("submitting a contract with a missing authorizers") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSAddSignatoriesSuccess", + "Exercise AddSignatories succeeds with sufficient authorization", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create a contract for (Alice, Bob) + (contract, _) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Exercise a choice to add (Charlie, David) + // Requires authorization from all four parties + _ <- ledger.exercise( + actAs = List(alice, bob, charlie, david), + readAs = List.empty, + exercise = + contract.exerciseMPAddSignatories(List(alice, bob, charlie, david).map(_.getValue).asJava), + ) + } yield () + }) + + test( + "MPSAddSignatoriesInsufficientAuthorization", + "Exercise AddSignatories fails with insufficient authorization", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create a contract for (Alice, Bob) + (contract, _) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Exercise a choice to add (Charlie, David) to the list of signatories + // Should fail as it's missing authorization from one of the original signatories (Alice) + failure <- ledger + .exercise( + actAs = List(bob, charlie, david), + readAs = List.empty, + exercise = contract.exerciseMPAddSignatories( + List(alice, bob, charlie, david).map(_.getValue).asJava + ), + ) + .mustFail("exercising a choice with a missing authorizers") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + None, + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSFetchOtherSuccess", + "Exercise FetchOther succeeds with sufficient authorization and read delegation", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, _) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + _ <- ledger.exercise( + actAs = List(charlie, david), + readAs = List(alice), + exercise = + contractB.exerciseMPFetchOther(contractA, List(charlie, david).map(_.getValue).asJava), + ) + } yield () + }) + + test( + "MPSFetchOtherInsufficientAuthorization", + "Exercise FetchOther fails with insufficient authorization", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, _) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an authorization error + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List(bob, alice), + exercise = + contractB.exerciseMPFetchOther(contractA, List(charlie, david).map(_.getValue).asJava), + ) + .mustFail("exercising a choice without authorization to fetch another contract") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some(Pattern.compile("of the fetched contract to be an authorizer, but authorizers were")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSFetchOtherInvisible", + "Exercise FetchOther fails because the contract isn't visible", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, _) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an interpretation error because the fetched contract isn't visible to any submitter + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List.empty, + exercise = + contractB.exerciseMPFetchOther(contractA, List(charlie, david).map(_.getValue).asJava), + ) + .mustFail("exercising a choice without authorization to fetch another contract") + } yield { + assertGrpcErrorRegex( + failure, + ConsistencyErrors.ContractNotFound, + Some(Pattern.compile("Contract could not be found")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + private[this] def createMultiPartyContract( + ledger: ParticipantTestContext, + submitters: List[Party], + value: String = UUID.randomUUID().toString, + )(implicit + ec: ExecutionContext + ): Future[(MultiPartyContract.ContractId, MultiPartyContract)] = + ledger + .create( + actAs = submitters, + readAs = List.empty, + template = new MultiPartyContract(submitters.map(_.getValue).asJava, value), + ) + .map(cid => cid -> new MultiPartyContract(submitters.map(_.getValue).asJava, value)) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageManagementServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageManagementServiceIT.scala new file mode 100644 index 0000000000..a04ab892c4 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageManagementServiceIT.scala @@ -0,0 +1,106 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.{ + LedgerTestSuite, + PackageManagementTestDar, + TestConstraints, +} +import com.daml.ledger.test.java.package_management.packagemanagementtest.PackageManagementTestTemplate +import com.digitalasset.canton.ledger.error.PackageServiceErrors +import com.google.protobuf.ByteString + +import java.util.regex.Pattern +import scala.concurrent.{ExecutionContext, Future} + +final class PackageManagementServiceIT extends LedgerTestSuite { + private[this] val testPackageResourcePath = PackageManagementTestDar.path + + private def loadTestPackage()(implicit ec: ExecutionContext): Future[ByteString] = { + val testPackage = Future { + val in = getClass.getClassLoader.getResourceAsStream(testPackageResourcePath) + assert(in != null, s"Unable to load test package resource at '$testPackageResourcePath'") + in + } + val bytes = testPackage.map(ByteString.readFrom) + bytes.onComplete(_ => testPackage.map(_.close())) + bytes + } + + test( + "PMEmptyUpload", + "An attempt at uploading an empty payload should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + failure <- ledger + .uploadDarFileAndVetOnConnectedSynchronizers(ByteString.EMPTY) + .mustFail("uploading an empty package") + } yield { + assertGrpcErrorRegex( + failure, + PackageServiceErrors.Reading.InvalidDar, + Some(Pattern.compile("Invalid DAR: package-upload|Dar file is corrupt")), + ) + } + }) + + test( + "PMDuplicateSubmissionId", + "Duplicate submission ids are accepted when package uploaded twice", + allocate(NoParties, NoParties), + )(implicit ec => { case Participants(Participant(alpha, Seq()), Participant(beta, Seq())) => + // Multiple package updates should always succeed. Participant adds extra entropy to the + // submission id to ensure client does not inadvertently cause problems by poor selection + // of submission ids. + for { + testPackage <- loadTestPackage() + _ <- alpha.uploadDarFileAndVetOnConnectedSynchronizers(testPackage) + _ <- beta.uploadDarFileAndVetOnConnectedSynchronizers(testPackage) + } yield () + }) + + test( + "PMLoad", + "Concurrent uploads of the same package should be idempotent and result in the package being available for use", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly( + "PackageManagementService listKnownPackages is not available in JSON" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + testPackage <- loadTestPackage() + _ <- Future.sequence( + Vector.fill(8)(ledger.uploadDarFileAndVetOnConnectedSynchronizers(testPackage)) + ) + knownPackages <- ledger.listKnownPackages() + contract <- ledger.create(party, new PackageManagementTestTemplate(party))( + PackageManagementTestTemplate.COMPANION + ) + acsBefore <- ledger.activeContracts(Some(Seq(party))) + _ <- ledger.exercise(party, contract.exerciseTestChoice()) + acsAfter <- ledger.activeContracts(Some(Seq(party))) + } yield { + val duplicatePackageIds = + knownPackages.groupBy(_.packageId).view.mapValues(_.size).filter(_._2 > 1).toMap + assert( + duplicatePackageIds.isEmpty, + s"There are duplicate package identifiers: ${duplicatePackageIds + .map { case (name, count) => s"$name ($count)" } + .mkString(", ")}", + ) + assert( + acsBefore.size == 1, + "After the contract has been created there should be one active contract but there's none", + ) + assert( + acsAfter.isEmpty, + s"There should be no active package after the contract has been consumed: ${acsAfter.map(_.contractId).mkString(", ")}", + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageServiceIT.scala new file mode 100644 index 0000000000..6a585a411c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PackageServiceIT.scala @@ -0,0 +1,95 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import scala.concurrent.Future + +final class PackageServiceIT extends LedgerTestSuite { + + /** A package ID that is guaranteed to not be uploaded */ + private[this] val unknownPackageId = " " + + test("PackagesList", "Listing packages should return a result", allocate(NoParties))( + implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + knownPackages <- ledger.listPackages() + } yield assert( + knownPackages.size >= 3, + s"List of packages was expected to contain at least 3 packages, got ${knownPackages.size} instead.", + ) + } + ) + + test( + "PackagesGetKnown", + "Getting package content should return a valid result", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + somePackageId <- ledger + .listPackages() + .map(_.headOption.getOrElse(fail("No package found"))) + somePackage <- ledger.getPackage(somePackageId) + } yield { + assert(somePackage.hash.length > 0, s"Package $somePackageId has an empty hash.") + assert( + somePackage.hash == somePackageId, + s"Package $somePackageId has hash ${somePackage.hash}, expected hash to be equal to the package ID.", + ) + assert(somePackage.archivePayload.size() >= 0, s"Package $somePackageId has zero size.") + } + }) + + test( + "PackagesGetUnknown", + "Getting package content for an unknown package should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + failure <- ledger + .getPackage(unknownPackageId) + .mustFail("getting the contents of an unknown package") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Package, + None, + ) + } + }) + + test( + "PackagesStatusKnown", + "Getting package status should return a valid result", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + packageIds <- ledger + .listPackages() + .map(packages => packages.headOption.fold(fail("No package found"))(_ => packages)) + statuses <- Future.traverse(packageIds)(ledger.getPackageStatus) + } yield { + statuses + .collectFirst(_.isPackageStatusRegistered) + .getOrElse(fail(s"No registered package found among: ${packageIds.toString}")) + } + }) + + test( + "PackagesStatusUnknown", + "Getting package status for an unknown package should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + status <- ledger.getPackageStatus(unknownPackageId) + } yield { + assert(status.isPackageStatusUnspecified, s"Package $unknownPackageId is not unknown.") + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ParticipantPruningIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ParticipantPruningIT.scala new file mode 100644 index 0000000000..7e0ee7ae1f --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ParticipantPruningIT.scala @@ -0,0 +1,837 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{Participant, *} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{FutureAssertions, LedgerTestSuite, Party} +import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdRequest +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.model +import com.daml.ledger.test.java.semantic.divulgencetests +import com.daml.ledger.test.java.semantic.divulgencetests.{Contract, Dummy} +import com.daml.logging.LoggingContext +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import java.util.regex.Pattern +import scala.concurrent.duration.DurationInt +import scala.concurrent.{ExecutionContext, Future} + +class ParticipantPruningIT extends LedgerTestSuite { + import CompanionImplicits.* + implicit val contractCompanion + : ContractCompanion.WithoutKey[Contract.Contract$, Contract.ContractId, Contract] = + Contract.COMPANION + implicit val semanticTestsDummyCompanion: ContractCompanion.WithoutKey[ + divulgencetests.Dummy.Contract, + divulgencetests.Dummy.ContractId, + divulgencetests.Dummy, + ] = divulgencetests.Dummy.COMPANION + + private implicit val loggingContext: LoggingContext = LoggingContext.ForTesting + + // One point of populating the ledger with a lot of events is to help advance canton's safe-pruning offsets + private val batchesToPopulate = 74 + + private val lastItemToPruneIndex = batchesToPopulate + + test( + "PRFailPruneByNoOffset", + "Pruning a participant without specifying an offset should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(participant, Seq())) => + for { + failure <- participant + .prune( + 0, + attempts = 1, + ) + .mustFail("pruning without specifying an offset") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.InvalidArgument, + Some("prune_up_to not specified or zero"), + ) + } + }) + + test( + "PRQueryLatestPrunedOffsets", + "It should be possible to query the latest pruned offsets", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice))) => + for { + (prunedUpToInclusive_initial, allDivulgencePrunedUpToInclusive_initial) <- + ledger.latestPrunedOffsets() + + // Move the ledger end forward + _ <- ledger.create(alice, new Dummy(alice)) + firstPruningOffset <- ledger.currentEnd() + + // Add one more element to bypass pruning to ledger end restriction + // and allow pruning at the first pruning offset + _ <- ledger.create(alice, new Dummy(alice)) + secondPruningOffset <- ledger.currentEnd() + + // Add one element to bypass pruning to ledger end restriction + // and allow pruning at the second pruning offset + _ <- ledger.create(alice, new Dummy(alice)) + + // Prune the ledger at firstPruningOffset + _ <- ledger.pruneCantonSafe( + firstPruningOffset, + alice, + p => new Dummy(p).create.commands, + ) + ( + prunedUpToInclusive_afterFirstPruning, + allDivulgencePrunedUpToInclusive_afterFirstPruning, + ) <- ledger.latestPrunedOffsets() + + // Prune the ledger with divulgence at secondPruningOffset + _ <- ledger.pruneCantonSafe( + secondPruningOffset, + alice, + p => new Dummy(p).create.commands, + ) + ( + prunedUpToInclusive_afterSecondPruning, + allDivulgencePrunedUpToInclusive_afterSecondPruning, + ) <- ledger.latestPrunedOffsets() + } yield { + assert( + assertion = prunedUpToInclusive_initial < prunedUpToInclusive_afterFirstPruning, + message = + s"The initial pruning offset ($prunedUpToInclusive_initial) should be different than the latest pruning offset ($prunedUpToInclusive_afterFirstPruning)", + ) + + assertEquals( + "Requested pruning offset matches the queried offset", + prunedUpToInclusive_afterFirstPruning, + firstPruningOffset, + ) + + assertEquals( + "All divulgence pruning offset matches the requested pruning offset with all divulgence pruning enabled", + allDivulgencePrunedUpToInclusive_afterFirstPruning, + firstPruningOffset, + ) + + assert( + assertion = + allDivulgencePrunedUpToInclusive_afterFirstPruning < allDivulgencePrunedUpToInclusive_afterSecondPruning, + message = + "Divulgence pruning offset advanced as well after the second prune call with all divulgence pruning enabled", + ) + + assertEquals( + "Pruning offsets are equal after all divulgence pruning", + allDivulgencePrunedUpToInclusive_afterSecondPruning, + prunedUpToInclusive_afterSecondPruning, + ) + } + }) + + test( + "PRFailPruneByNegativeOffset", + "Pruning a participant specifying a negative offset should fail", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(participant, Seq())) => + val negativeOffset: Long = -134134134L + for { + cannotPruneNegativeOffset <- participant + .prune( + negativeOffset, + attempts = 1, + ) + .mustFail("pruning, specifying a negative offset") + } yield { + assertGrpcError( + cannotPruneNegativeOffset, + RequestValidationErrors.NonPositiveOffset, + Some(s"Offset $negativeOffset in prune_up_to is not a positive integer"), + ) + } + }) + + test( + "PRFailPruneByOutOfBoundsOffset", + "Pruning a participant specifying an offset after the ledger end should fail", + allocate(NoParties), + runConcurrently = + false, // in spite of being a negative test, cannot be run concurrently as otherwise ledger end grows + )(implicit ec => { case Participants(Participant(participant, Seq())) => + eventually("participantEndShouldNotIncrease") { + for { + endBefore <- participant.currentEnd() + cannotPruneOffsetBeyondEnd <- participant + .prune(endBefore, attempts = 1) + .mustFail("pruning, specifying an offset after the ledger end") + endAfter <- participant.currentEnd() + } yield { + // participant end should not have been increased after we have retrieved it since it can lead to a different error + assertEquals(endAfter, endBefore) + assertGrpcError( + cannotPruneOffsetBeyondEnd, + RequestValidationErrors.OffsetOutOfRange, + Some("prune_up_to needs to be before ledger end"), + ) + } + } + }) + + test( + "PRPruneTxLedgerEffects", + "Prune succeeds as observed by transaction (LedgerEffects) lookups", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + offsetOfSecondToLastPrunedTransaction = offsets( + lastItemToPruneIndex - 1 + ) // This offset is the largest exclusive offset we can no longer read from after + offsetOfFirstSurvivingTransaction = offsets(lastItemToPruneIndex + 1) + + _ <- participant.prune(offsetToPruneUpTo) + + txReqAfterPrune <- participant + .getTransactionsRequest( + transactionFormat = participant + .transactionFormat( + parties = Some(Seq(submitter)), + transactionShape = LedgerEffects, + ), + begin = offsetToPruneUpTo, + ) + transactionsAfterPrune <- participant.transactions(txReqAfterPrune) + + txReq <- participant + .getTransactionsRequest( + transactionFormat = participant.transactionFormat( + parties = Some(Seq(submitter)), + transactionShape = LedgerEffects, + ), + begin = offsetOfSecondToLastPrunedTransaction, + ) + cannotReadAnymore <- participant + .transactions(txReq) + .mustFail("attempting to read transactions before the pruning cut-off") + + cannotReadFromParticipantBegin <- participant + .transactions(txReq.update(_.beginExclusive := 0L)) + .mustFail( + "attempting to read transactions from participant begin after pruning has occurred" + ) + } yield { + assert( + transactionsAfterPrune.head.offset == offsetOfFirstSurvivingTransaction, + s"transactions not pruned at expected offset", + ) + assertGrpcErrorRegex( + cannotReadAnymore, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"(Transactions request from [0-9]* to [0-9]* precedes pruned offset $offsetToPruneUpTo)|(Request from [0-9]* precedes pruned offset $offsetToPruneUpTo)" + ) + ), + ) + assertGrpcErrorRegex( + cannotReadFromParticipantBegin, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"(Transactions request from [0-9]* to [0-9]* precedes pruned offset $offsetToPruneUpTo)|(Request from [0-9]* precedes pruned offset $offsetToPruneUpTo)" + ) + ), + ) + } + }) + + test( + "PRPruneTxAcsDelta", + "Prune succeeds as observed by transaction (AcsDelta) lookups", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + offsetOfSecondToLastPrunedTransaction = offsets( + lastItemToPruneIndex - 1 + ) // This offset is the largest exclusive offset we can no longer read from after + offsetOfFirstSurvivingTransaction = offsets(lastItemToPruneIndex + 1) + + _ <- participant.prune(offsetToPruneUpTo) + + txReqAfterPrune <- participant + .getTransactionsRequest( + transactionFormat = participant.transactionFormat(parties = Some(Seq(submitter))), + begin = offsetToPruneUpTo, + ) + txAfterPrune <- participant.transactions(txReqAfterPrune) + + txReq <- participant + .getTransactionsRequest( + transactionFormat = participant.transactionFormat(parties = Some(Seq(submitter))), + begin = offsetOfSecondToLastPrunedTransaction, + ) + cannotReadAnymore <- participant + .transactions(txReq) + .mustFail("attempting to read transactions before the pruning cut-off") + + cannotReadFromParticipantBegin <- participant + .transactions(txReq.update(_.beginExclusive := 0L)) + .mustFail( + "attempting to read transactions from participant begin after pruning has occurred" + ) + } yield { + assert( + txAfterPrune.head.offset == offsetOfFirstSurvivingTransaction, + s"flat transactions not pruned at expected offset", + ) + assertGrpcErrorRegex( + cannotReadAnymore, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"(Transactions request from [0-9]* to [0-9]* precedes pruned offset $offsetToPruneUpTo)|(Request from [0-9]* precedes pruned offset $offsetToPruneUpTo)" + ) + ), + ) + assertGrpcErrorRegex( + cannotReadFromParticipantBegin, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"(Transactions request from [0-9]* to [0-9]* precedes pruned offset $offsetToPruneUpTo)|(Request from [0-9]* precedes pruned offset $offsetToPruneUpTo)" + ) + ), + ) + } + }) + + test( + "PRPruneCompletions", + "Prune succeeds as observed by command completions", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + endOffsetAtTestStart <- participant.currentEnd() + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + offsetOfSecondToLastPrunedCheckpoint = offsets( + lastItemToPruneIndex - 1 + ) // This offset is the largest exclusive offset we can no longer read from after + offsetOfFirstSurvivingCheckpoint = offsets(lastItemToPruneIndex + 1) + + firstCheckpointBeforePrune <- participant + .offsets( + 1, + participant.completionStreamRequest(endOffsetAtTestStart)(submitter), + ) + .map(_.head) + + _ <- participant.prune(offsetToPruneUpTo) + + firstCheckpointsAfterPrune <- participant + .offsets( + 1, + participant + .completionStreamRequest(offsetToPruneUpTo)(submitter), + ) + .map(_.head) + + cannotReadAnymore <- participant + .offsets( + 1, + participant.completionStreamRequest(offsetOfSecondToLastPrunedCheckpoint)(submitter), + ) + .mustFail("attempting to read transactions before the pruning cut-off") + + cannotReadFromParticipantBegin <- participant + .offsets( + 1, + participant.completionStreamRequest(0L)(submitter), + ) + .mustFail( + "attempting to read transactions from participant begin after pruning has occurred" + ) + } yield { + assert( + firstCheckpointBeforePrune < offsetToPruneUpTo + ) + assert( + firstCheckpointsAfterPrune == offsetOfFirstSurvivingCheckpoint, + s"first checkpoint offset $firstCheckpointsAfterPrune after pruning does not match expected offset $offsetOfFirstSurvivingCheckpoint", + ) + assertGrpcErrorRegex( + cannotReadAnymore, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"Command completions? request from [0-9]* to [0-9]* overlaps with pruned offset $offsetToPruneUpTo" + ) + ), + ) + assertGrpcErrorRegex( + cannotReadFromParticipantBegin, + RequestValidationErrors.ParticipantPrunedDataAccessed, + Some( + Pattern.compile( + s"Command completions? request from [0-9]* to [0-9]* overlaps with pruned offset $offsetToPruneUpTo" + ) + ), + ) + } + }) + + test( + "PRPruneACS", + "Prune succeeds by not affecting active contracts", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + + createdBefore <- participant.activeContracts(Some(Seq(submitter))) + + _ <- participant.prune(offsetToPruneUpTo) + + createdAfter <- participant.activeContracts(Some(Seq(submitter))) + } yield { + assert(createdBefore == createdAfter, "Pruning should not alter the set of active contracts") + } + }) + + test( + "PRPruneByTxId", + "Prune succeeds as observed by individual transaction lookups", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsetAndTransactionIdEntries <- populateLedgerAndGetOffsetsWithTransactionIds( + participant, + submitter, + ) + offsetToPruneUpTo = offsetAndTransactionIdEntries(lastItemToPruneIndex)._1 + transactionsPerBatch = offsetAndTransactionIdEntries.size / batchesToPopulate + prunedTransactionIds = Range( + lastItemToPruneIndex - transactionsPerBatch + 1, + lastItemToPruneIndex + 1, + ).toVector.map(offsetAndTransactionIdEntries(_)._2) + unprunedTransactionIds = Range( + lastItemToPruneIndex + 1, + lastItemToPruneIndex + transactionsPerBatch + 1, + ).toVector + .map(offsetAndTransactionIdEntries(_)._2) + + _ <- participant.prune(offsetToPruneUpTo) + + prunedTransactions <- Future.sequence( + prunedTransactionIds.map( + participant + .transactionById(_, Seq(submitter), AcsDelta) + .mustFail("attempting to read transactions before the pruning cut-off") + ) + ) + + _ <- Future.sequence( + unprunedTransactionIds.map(participant.transactionById(_, Seq(submitter), AcsDelta)) + ) + } yield { + prunedTransactions.foreach( + assertGrpcError( + _, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + ) + } + }) + + test( + "PRPruneByOffset", + "Prune succeeds as observed by individual event lookups via transaction", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + transactionsPerBatch = offsets.size / batchesToPopulate + prunedTransactionOffsets = Range( + lastItemToPruneIndex - transactionsPerBatch + 1, + lastItemToPruneIndex + 1, + ).toVector.map(offsets(_)) + unprunedTransactionOffsets = Range( + lastItemToPruneIndex + 1, + lastItemToPruneIndex + transactionsPerBatch + 1, + ).toVector + .map(offsets(_)) + + _ <- participant.prune(offsetToPruneUpTo) + + prunedFlatTransactions <- Future.sequence( + prunedTransactionOffsets.map( + participant + .transactionByOffset(_, Seq(submitter), AcsDelta) + .mustFail("attempting to read transactions before the pruning cut-off") + ) + ) + + _ <- Future.sequence( + unprunedTransactionOffsets.map(participant.transactionByOffset(_, Seq(submitter), AcsDelta)) + ) + } yield { + prunedFlatTransactions.foreach( + assertGrpcError( + _, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + ) + } + }) + + test( + "PRPruneRepeated", + "Prune succeeds when called repeatedly", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + offsetOfFirstSurvivingTransaction = offsets(lastItemToPruneIndex + 1) + + _ <- participant.prune(offsetToPruneUpTo) + + txReqAfterPrune <- participant + .getTransactionsRequest(participant.transactionFormat(parties = Some(Seq(submitter)))) + .map( + _.update( + _.beginExclusive := offsetToPruneUpTo + ) + ) + transactionsAfterPrune <- participant.transactions(txReqAfterPrune) + + offsetAlreadyPruned = offsets(lastItemToPruneIndex / 2) + + _ <- participant.prune(offsetAlreadyPruned) + + txReqAfterRedundantPrune <- participant + .getTransactionsRequest(participant.transactionFormat(parties = Some(Seq(submitter)))) + .map( + _.update( + _.beginExclusive := offsetToPruneUpTo + ) + ) + transactionsAfterRedundantPrune <- participant.transactions(txReqAfterRedundantPrune) + + offsetToPruneUpToInSecondRealPrune = offsets((lastItemToPruneIndex + 1) * 2 - 1) + offsetOfFirstSurvivingTransactionInSecondPrune = offsets((lastItemToPruneIndex + 1) * 2) + + // Add more events before second prune too to advance canton's safe pruning offset + offsetsFollowingSecondRealPrune <- populateLedgerAndGetOffsets(participant, submitter) + + _ <- participant.prune(offsetToPruneUpToInSecondRealPrune) + + txReqAfterSecondPrune <- participant + .getTransactionsRequest(participant.transactionFormat(parties = Some(Seq(submitter)))) + .map( + _.update( + _.beginExclusive := offsetToPruneUpToInSecondRealPrune + ) + ) + transactionsAfterSecondPrune <- participant.transactions(txReqAfterSecondPrune) + + } yield { + assert( + transactionsAfterPrune.size == offsets.size - (lastItemToPruneIndex + 1), + s"transaction count after pruning does not match expected count", + ) + assert( + transactionsAfterPrune.head.offset == offsetOfFirstSurvivingTransaction, + s"transaction not pruned at expected offset", + ) + assert( + transactionsAfterRedundantPrune.size == offsets.size - (lastItemToPruneIndex + 1), + s"transaction count after redundant pruning does not match expected count", + ) + assert( + transactionsAfterRedundantPrune.head.offset == offsetOfFirstSurvivingTransaction, + s"transaction not pruned at expected offset after redundant prune", + ) + assert( + transactionsAfterSecondPrune.size == offsets.size - 2 * (lastItemToPruneIndex + 1) + offsetsFollowingSecondRealPrune.size, + s"transaction count after second pruning does not match expected count", + ) + assert( + transactionsAfterSecondPrune.head.offset == offsetOfFirstSurvivingTransactionInSecondPrune, + s"transaction not pruned at expected offset after second prune", + ) + } + }) + + test( + "PRPruneThenExercise", + "Prune succeeds as observed by being able to exercise a contract created in pruned offset range", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + createdBeforePrune <- participant.create(submitter, new model.test.Dummy(submitter)) + + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + + _ <- participant.prune(offsetToPruneUpTo) + + _ <- participant.exercise(submitter, createdBeforePrune.exerciseDummyChoice1()) + } yield () + }) + + test( + "PRPruneQueryEmptyRangeOk", + // This test is not terribly useful for conformance, but helps ensure that pruning does not interfere when run before + // TransactionServiceStreamingIT "TXEndToEnd" tests. + "Prune succeeds and does not prevent querying empty ranges even in pruned space", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(submitter))) => + for { + ledgerEnd <- participant.currentEnd() + offsets <- populateLedgerAndGetOffsets(participant, submitter) + offsetInPrunedRange = offsets(lastItemToPruneIndex / 2) + offsetToPruneUpTo = offsets(lastItemToPruneIndex) + + _ <- participant.prune(offsetToPruneUpTo) + + emptyRangeBeforePruning <- participant + .getTransactionsRequest( + participant.transactionFormat(parties = Some(Seq(submitter))), + begin = ledgerEnd, + ) + .map(_.update(_.endInclusive := ledgerEnd)) + + emptyRangeInPrunedSpace <- participant + .getTransactionsRequest( + participant.transactionFormat(parties = Some(Seq(submitter))), + begin = offsetInPrunedRange, + ) + .map( + _.update( + _.endInclusive := offsetInPrunedRange + ) + ) + + emptyBeginLedgerEffectsWillFail <- participant.transactions( + emptyRangeBeforePruning.update( + _.updateFormat.includeTransactions.transactionShape := TRANSACTION_SHAPE_LEDGER_EFFECTS + ) + ) + emptyBeginAcsDeltaWillFail <- participant.transactions(emptyRangeBeforePruning) + emptyPrunedLedgerEffectsWillFail <- participant.transactions( + emptyRangeInPrunedSpace.update( + _.updateFormat.includeTransactions.transactionShape := TRANSACTION_SHAPE_LEDGER_EFFECTS + ) + ) + emptyPrunedAcsDeltaWillFail <- participant.transactions(emptyRangeInPrunedSpace) + } yield { + assert(emptyBeginLedgerEffectsWillFail.isEmpty) + assert(emptyBeginAcsDeltaWillFail.isEmpty) + assert(emptyPrunedLedgerEffectsWillFail.isEmpty) + assert(emptyPrunedAcsDeltaWillFail.isEmpty) + } + }) + + test( + "PREventsByContractIdPruned", + "Ensure that EventsByContractId works as expected with pruned data", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(participant, Seq(party))) => + def getEvents(dummyCid: model.test.Dummy.ContractId): Future[Int] = { + val request = GetEventsByContractIdRequest( + contractId = dummyCid.contractId, + eventFormat = Some(participant.eventFormat(verbose = true, Some(Seq(party)))), + ) + + participant + .getEventsByContractId(request) + .map(r => r.created.fold(0)(_ => 1) + r.archived.fold(0)(_ => 1)) + .recover { + case error if error.getMessage.contains("CONTRACT_EVENTS_NOT_FOUND") => 0 + } + } + + for { + dummyCid <- participant.create(party, new model.test.Dummy(party)) + end1 <- pruneToCurrentEnd(participant, party) + events1 <- getEvents(dummyCid) + exerciseCmd = participant.submitAndWaitRequest( + party, + dummyCid.exerciseDummyChoice1().commands, + ) + _ <- participant.submitAndWait(exerciseCmd) + events2 <- getEvents(dummyCid) + _ <- participant.prune(end1) + events3 <- getEvents(dummyCid) + _ <- pruneToCurrentEnd(participant, party) // TODO(#16361) - This line causes the problem + events4 <- getEvents(dummyCid) + } yield { + assertEquals("Expected single create event after prune", events1, 1) + assertEquals("Expected create and consume event before prune", events2, 2) + assertEquals( + "Pruning to a point before create and consume does not remove events", + events3, + 2, + ) + assertEquals("Expected no events following prune", events4, 0) + } + }) + + // TODO(i16065): Re-enable getEventsByContractKey tests +// test( +// "PREventsByContractKey", +// "Ensure that EventsByContractKey works as expected with pruned data", +// allocate(SingleParty), +// runConcurrently = false, +// )(implicit ec => { case Participants(Participant(participant, Seq(party))) => +// val exercisedKey = "pruning test key" +// val key = makeTextKeyKey(party, exercisedKey) +// +// def getEvents: Future[Int] = participant +// .getEventsByContractKey( +// GetEventsByContractKeyRequest( +// contractKey = Some(key), +// templateId = Some(Identifier.fromJavaProto(TextKey.TEMPLATE_ID.toProto)), +// requestingParties = Seq(party), +// ) +// ) +// .map(r => r.createEvent.fold(0)(_ => 1) + r.archiveEvent.fold(0)(_ => 1)) +// +// for { +// textKeyCid1: TextKey.ContractId <- participant.create( +// party, +// new TextKey(party, exercisedKey, JList.of()), +// ) +// _ <- pruneToCurrentEnd(participant, party).map(_.getAbsolute) +// events1 <- getEvents +// exerciseCmd = participant.submitAndWaitRequest( +// party, +// textKeyCid1.exerciseTextKeyChoice().commands, +// ) +// _ <- participant.submitAndWaitForTransaction(exerciseCmd) +// events2 <- getEvents +// _ <- pruneToCurrentEnd(participant, party).map(_.getAbsolute) +// events3 <- getEvents +// } yield { +// assertEquals("Expected single create event after prune", events1, 1) +// assertEquals("Expected create and consume event before prune", events2, 2) +// assertEquals("Expected no events following prune", events3, 0) +// } +// }) + + private def populateLedgerAndGetOffsets(participant: ParticipantTestContext, submitter: Party)( + implicit ec: ExecutionContext + ): Future[Vector[Long]] = + populateLedger(participant, submitter).map(_.map(tx => tx.offset)) + + private def populateLedgerAndGetOffsetsWithTransactionIds( + participant: ParticipantTestContext, + submitter: Party, + )(implicit ec: ExecutionContext): Future[Vector[(Long, String)]] = + populateLedger(participant, submitter) + .map( + _.map(tx => (tx.offset, tx.updateId)) + ) + + private def populateLedger(participant: ParticipantTestContext, submitter: Party)(implicit + ec: ExecutionContext + ): Future[Vector[Transaction]] = + for { + endOffsetAtTestStart <- participant.currentEnd() + _ <- Future + .sequence(Vector.fill(batchesToPopulate) { + for { + dummy <- participant.create(submitter, new model.test.Dummy(submitter)) + _ <- participant.exercise(submitter, dummy.exerciseDummyChoice1()) + _ <- participant.create(submitter, new Dummy(submitter)) + } yield () + }) + txReq <- participant.getTransactionsRequest( + transactionFormat = participant.transactionFormat( + parties = Some(Seq(submitter)), + transactionShape = LedgerEffects, + ), + begin = endOffsetAtTestStart, + ) + txs <- participant.transactions(txReq) + } yield txs + +// // Note that the Daml template must be inspected to establish the key type and fields +// // For the TextKey template the key is: (tkParty, tkKey) : (Party, Text) +// // When populating the Record identifiers are not required. +// private def makeTextKeyKey(party: Party, keyText: String) = +// Value( +// Value.Sum.Record( +// Record(fields = +// Vector( +// RecordField(value = Some(Value(Value.Sum.Party(party)))), +// RecordField(value = Some(Value(Value.Sum.Text(keyText)))), +// ) +// ) +// ) +// ) + + /** Note that the ledger end returned will be that prior to the dummy contract creation/prune + * calls so will not represent the ledger end post pruning + */ + private def pruneToCurrentEnd(participant: ParticipantTestContext, party: Party)(implicit + ec: ExecutionContext + ): Future[Long] = + for { + end <- participant.currentEnd() + _ <- pruneCantonSafe(participant, end, party) + } yield end + + /** We are retrying a command submission + pruning to make this test compatible with Canton. + * That's because in Canton pruning will fail unless ACS commitments have been exchanged between + * participants. To this end, repeatedly submitting commands is prompting Canton to exchange ACS + * commitments and allows the pruning call to eventually succeed. + */ + private def pruneCantonSafe( + ledger: ParticipantTestContext, + pruneUpTo: Long, + party: Party, + )(implicit ec: ExecutionContext): Future[Unit] = + FutureAssertions.succeedsEventually( + retryDelay = 100.millis, + maxRetryDuration = 10.seconds, + ledger.delayMechanism, + "Pruning", + ) { + for { + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(party, new Dummy(party).create.commands) + ) + _ <- ledger.prune(pruneUpTo = pruneUpTo, attempts = 1) + } yield () + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementITBase.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementITBase.scala new file mode 100644 index 0000000000..5f729a0580 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementITBase.scala @@ -0,0 +1,132 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + NoParties, + Participant, + Participants, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.* +import com.google.protobuf.field_mask.FieldMask + +import scala.concurrent.{ExecutionContext, Future} + +trait PartyManagementITBase extends LedgerTestSuite { + def unsetResourceVersion[T](t: T): T = { + val t2: T = t match { + case u: PartyDetails => u.update(_.localMetadata.resourceVersion := "").asInstanceOf[T] + case u: AllocatePartyResponse => + u.update(_.partyDetails.localMetadata.resourceVersion := "").asInstanceOf[T] + case u: UpdatePartyDetailsResponse => + u.update(_.partyDetails.localMetadata.resourceVersion := "").asInstanceOf[T] + case other => sys.error(s"could not match $other") + } + t2 + } + + def updateRequest( + party: String, + isLocal: Boolean = false, + annotations: Map[String, String] = Map.empty, + resourceVersion: String = "", + updatePaths: Seq[String], + ): UpdatePartyDetailsRequest = + UpdatePartyDetailsRequest( + partyDetails = Some( + PartyDetails( + party = party, + isLocal = isLocal, + localMetadata = + Some(ObjectMeta(resourceVersion = resourceVersion, annotations = annotations)), + identityProviderId = "", + ) + ), + updateMask = Some(FieldMask(updatePaths)), + ) + + def extractUpdatedAnnotations( + updateResp: UpdatePartyDetailsResponse + ): Map[String, String] = + updateResp.partyDetails.get.localMetadata.get.annotations + + def extractUpdatedAnnotations( + allocateResp: AllocatePartyResponse + ): Map[String, String] = + allocateResp.partyDetails.get.localMetadata.get.annotations + + def withFreshParty[T]( + connectedSynchronizers: Int, + annotations: Map[String, String] = Map.empty, + )( + f: PartyDetails => Future[T] + )(implicit ledger: ParticipantTestContext, ec: ExecutionContext): Future[T] = { + val req = AllocatePartyRequest( + partyIdHint = "", + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = annotations, + ) + ), + identityProviderId = "", + synchronizerId = "", + userId = "", + ) + for { + (create, _) <- ledger.allocateParty(req, connectedSynchronizers) + v <- f(create.partyDetails.get) + } yield v + } + + def testWithFreshPartyDetails( + shortIdentifier: String, + description: String, + )( + annotations: Map[String, String] = Map.empty + )( + body: ExecutionContext => ParticipantTestContext => PartyDetails => Future[Unit] + ): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + partyAllocation = allocate(NoParties), + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + withFreshParty( + connectedSynchronizers = p.minSynchronizers, + annotations = annotations, + ) { partyDetails => + body(ec)(ledger)(partyDetails) + }(ledger, ec) + }) + + def testWithoutPartyDetails( + shortIdentifier: String, + description: String, + )( + body: ExecutionContext => ParticipantTestContext => Participants => Future[Unit] + ): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + partyAllocation = allocate(NoParties), + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + body(ec)(ledger)(p) + }) + + def newPartyDetails( + party: String, + annotations: Map[String, String] = Map.empty, + isLocal: Boolean = true, + ): PartyDetails = PartyDetails( + party = party, + isLocal = isLocal, + localMetadata = Some(ObjectMeta(resourceVersion = "", annotations = annotations)), + identityProviderId = "", + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala new file mode 100644 index 0000000000..445d70c48b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala @@ -0,0 +1,1125 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.{NamePicker, Party} +import com.daml.ledger.api.v2.admin.identity_provider_config_service.DeleteIdentityProviderConfigRequest +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocateExternalPartyRequest, + AllocatePartyRequest, + AllocatePartyResponse, + GenerateExternalPartyTopologyRequest, + GetPartiesRequest, + GetPartiesResponse, + ListKnownPartiesRequest, + ListKnownPartiesResponse, + PartyDetails, + UpdatePartyIdentityProviderIdRequest, +} +import com.daml.ledger.api.v2.crypto as lapicrypto +import com.daml.ledger.javaapi.data.Party as ApiParty +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} +import com.digitalasset.daml.lf.data.Ref +import com.google.protobuf.ByteString + +import java.security.{KeyPairGenerator, Signature} +import java.util.UUID +import java.util.regex.Pattern +import scala.concurrent.Future +import scala.util.Random + +final class PartyManagementServiceIT extends PartyManagementITBase { + import CompanionImplicits.* + + val namePicker: NamePicker = NamePicker( + "-_ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ) + + test( + "PMUpdatingPartyIdentityProviderNonDefaultIdps", + "Test reassigning party to a different idp using non default idps", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpId1 = ledger.nextIdentityProviderId() + val idpId2 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId2) + party <- ledger.allocateParty(identityProviderId = Some(idpId1)) + get1 <- ledger.getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = idpId1) + ) + // Update party's idp id + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = idpId2, + ) + ) + get2 <- ledger.getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = idpId2) + ) + get3 <- ledger.getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = idpId1) + ) + // Cleanup + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = idpId2, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpId1)) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpId2)) + } yield { + assertEquals( + "is idp1, request as idp1", + get1.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq(idpId1 -> party.getValue -> true), + ) + assertEquals( + "is idp2, request as idp2", + get2.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq(idpId2 -> party.getValue -> true), + ) + assertEquals( + "is idp2, request as idp1", + get3.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + // party and isLocal values get blinded + Seq("" -> party.getValue -> false), + ) + + } + }) + + test( + "PMUpdatingPartyIdentityProviderWithDefaultIdp", + "Test reassigning party to a different idp using the default idp", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpId1 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + // allocate a party in the default idp + party <- ledger.allocateParty(identityProviderId = None) + get1 <- ledger.getParties(GetPartiesRequest(parties = Seq(party), identityProviderId = "")) + // Update party's idp id + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = "", + targetIdentityProviderId = idpId1, + ) + ) + get2 <- ledger.getParties( + GetPartiesRequest(parties = Seq(party), identityProviderId = idpId1) + ) + // Cleanup - changing party's idp to the default idp so that non default one can be deleted + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpId1)) + } yield { + assertEquals( + "default idp", + get1.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq("" -> party.getValue -> true), + ) + assertEquals( + "non default idp", + get2.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq(idpId1 -> party.getValue -> true), + ) + } + }) + + test( + "PMUpdatingPartyIdentityProviderNonExistentIdps", + "Test reassigning party to a different idp when source or target idp doesn't exist", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpIdNonExistent = ledger.nextIdentityProviderId() + for { + party <- ledger.allocateParty(identityProviderId = None) + _ <- ledger + .updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = idpIdNonExistent, + targetIdentityProviderId = "", + ) + ) + .mustFailWith( + "non existent source idp", + RequestValidationErrors.InvalidArgument, + ) + _ <- ledger + .updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = party, + sourceIdentityProviderId = "", + targetIdentityProviderId = idpIdNonExistent, + ) + ) + .mustFailWith( + "non existent target idp", + RequestValidationErrors.InvalidArgument, + ) + } yield () + }) + + test( + "PMUpdatingPartyIdentityProviderMismatchedSourceIdp", + "Test reassigning party to a different idp using mismatched source idp id", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpIdNonDefault = ledger.nextIdentityProviderId() + val idpIdTarget = ledger.nextIdentityProviderId() + val idpIdMismatched = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdNonDefault) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdMismatched) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdTarget) + partyDefault <- ledger.allocateParty(identityProviderId = None) + partyNonDefault <- ledger.allocateParty(identityProviderId = Some(idpIdNonDefault)) + _ <- ledger + .updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyDefault, + sourceIdentityProviderId = idpIdMismatched, + targetIdentityProviderId = idpIdTarget, + ) + ) + .mustFailWith( + "mismatched source idp id", + AdminServiceErrors.PartyManagement.PartyNotFound, + ) + _ <- ledger + .updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyNonDefault, + sourceIdentityProviderId = idpIdMismatched, + targetIdentityProviderId = idpIdTarget, + ) + ) + .mustFailWith( + "mismatched source idp id", + AdminServiceErrors.PartyManagement.PartyNotFound, + ) + // cleanup + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyNonDefault, + sourceIdentityProviderId = idpIdNonDefault, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpIdNonDefault)) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpIdMismatched)) + } yield () + }) + + test( + "PMUpdatingPartyIdentityProviderSourceAndTargetIdpTheSame", + "Test reassigning party to a different idp but source and target idps are the same", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpId1 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + partyDefault <- ledger.allocateParty(identityProviderId = None) + partyNonDefault <- ledger.allocateParty(identityProviderId = Some(idpId1)) + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyDefault, + sourceIdentityProviderId = "", + targetIdentityProviderId = "", + ) + ) + get1 <- ledger.getParties( + GetPartiesRequest(parties = Seq(partyDefault), identityProviderId = "") + ) + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyNonDefault, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = idpId1, + ) + ) + get2 <- ledger.getParties( + GetPartiesRequest(parties = Seq(partyNonDefault), identityProviderId = idpId1) + ) + // cleanup + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyNonDefault, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpId1)) + } yield { + assertEquals( + "default idp", + get1.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq("" -> partyDefault.getValue -> true), + ) + assertEquals( + "non default idp", + get2.partyDetails.map(d => d.identityProviderId -> d.party -> d.isLocal), + Seq(idpId1 -> partyNonDefault.getValue -> true), + ) + } + }) + + test( + "PMGetPartiesUsingDifferentIdps", + "Test getting parties using different idps", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val idpId1 = ledger.nextIdentityProviderId() + val idpId2 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId2) + partyDefault <- ledger.allocateParty(identityProviderId = None) + partyNonDefault <- ledger.allocateParty(identityProviderId = Some(idpId1)) + partyOtherNonDefault <- ledger.allocateParty(identityProviderId = Some(idpId2)) + getAsDefaultIdp <- ledger.getParties( + GetPartiesRequest(parties = Seq(partyDefault, partyNonDefault), identityProviderId = "") + ) + getAsNonDefaultIdp <- ledger.getParties( + GetPartiesRequest( + parties = Seq(partyDefault, partyNonDefault, partyOtherNonDefault), + identityProviderId = idpId1, + ) + ) + // cleanup + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyNonDefault, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.updatePartyIdentityProviderId( + UpdatePartyIdentityProviderIdRequest( + party = partyOtherNonDefault, + sourceIdentityProviderId = idpId2, + targetIdentityProviderId = "", + ) + ) + _ <- ledger.deleteIdentityProviderConfig(DeleteIdentityProviderConfigRequest(idpId1)) + } yield { + assertEquals( + "default idp", + getAsDefaultIdp.partyDetails.map(_.copy(localMetadata = None)).toSet, + Set( + PartyDetails( + party = partyDefault, + isLocal = true, + identityProviderId = "", + localMetadata = None, + ), + PartyDetails( + party = partyNonDefault, + isLocal = true, + identityProviderId = "", + localMetadata = None, + ), + ), + ) + assertEquals( + "non default idp", + getAsNonDefaultIdp.partyDetails.map(_.copy(localMetadata = None)).toSet, + Set( + PartyDetails( + party = partyDefault, + isLocal = false, + identityProviderId = "", + localMetadata = None, + ), + PartyDetails( + party = partyNonDefault, + isLocal = true, + identityProviderId = idpId1, + localMetadata = None, + ), + PartyDetails( + party = partyOtherNonDefault, + isLocal = false, + identityProviderId = "", + localMetadata = None, + ), + ), + ) + } + }) + + test( + "PMNonEmptyParticipantID", + "Asking for the participant identifier should return a non-empty string", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + participantId <- ledger.getParticipantId() + } yield { + assert(participantId.nonEmpty, "The ledger returned an empty participant identifier") + } + }) + + private val pMAllocateWithHint = "PMAllocateWithHint" + test( + pMAllocateWithHint, + "It should be possible to provide a hint when allocating a party", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + party <- ledger.allocateParty( + partyIdHint = Some(pMAllocateWithHint + "_" + Random.alphanumeric.take(10).mkString) + ) + } yield assert( + party.getValue.nonEmpty, + "The allocated party identifier is an empty string", + ) + }) + + test( + "PMAllocateWithoutHint", + "It should be possible to not provide a hint when allocating a party", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + party <- ledger.allocateParty(partyIdHint = None) + } yield assert( + party.getValue.nonEmpty, + "The allocated party identifier is an empty string", + ) + }) + + test( + "PMAllocateWithLocalMetadataAnnotations", + "Successfully allocating a party with non-empty annotations", + partyAllocation = allocate(NoParties), + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + for { + (allocate1, _) <- ledger.allocateParty( + AllocatePartyRequest( + partyIdHint = "", + localMetadata = Some( + ObjectMeta(resourceVersion = "", annotations = Map("key1" -> "val1", "key2" -> "val2")) + ), + identityProviderId = "", + synchronizerId = "", + userId = "", + ), + p.minSynchronizers, + ) + allocatedParty = allocate1.partyDetails.get.party + expectedPartyDetails = PartyDetails( + party = allocatedParty, + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = allocate1.partyDetails.get.localMetadata.get.resourceVersion, + annotations = Map( + "key1" -> "val1", + "key2" -> "val2", + ), + ) + ), + identityProviderId = "", + ) + _ = assertEquals( + allocate1, + expected = AllocatePartyResponse( + partyDetails = Some( + expectedPartyDetails + ) + ), + ) + get1 <- ledger.getParties(GetPartiesRequest(Seq(allocatedParty), "")) + _ = assertEquals( + get1, + expected = GetPartiesResponse( + partyDetails = Seq( + expectedPartyDetails + ) + ), + ) + } yield () + }) + + test( + "PMFailToAllocateWhenAnnotationsHaveEmptyValues", + "Failing to allocate when annotations have empty values", + partyAllocation = allocate(NoParties), + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + for { + _ <- ledger + .allocateParty( + AllocatePartyRequest( + partyIdHint = "", + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("key1" -> "val1", "key2" -> ""), + ) + ), + identityProviderId = "", + synchronizerId = "", + userId = "", + ), + p.minSynchronizers, + ) + .mustFailWith( + "allocating a party", + RequestValidationErrors.InvalidArgument, + ) + } yield () + }) + + test( + "PMRejectionDuplicateHint", + "A party allocation request with a duplicate party hint should be rejected", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val hint = "party_hint" + "_" + Random.alphanumeric.take(10).mkString + for { + party <- ledger.allocateParty(partyIdHint = Some(hint)) + error <- ledger + .allocateParty(partyIdHint = Some(hint)) + .mustFail("allocating a party with a duplicate hint") + } yield { + assert( + party.getValue.nonEmpty, + "The allocated party identifier is an empty string", + ) + assertGrpcErrorRegex( + error, + RequestValidationErrors.InvalidArgument, + Some(Pattern.compile("Party already exists|PartyToParticipant")), + ) + } + }) + + test( + "PMRejectLongPartyHints", + "A party identifier which is too long should be rejected with the proper error", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + error <- ledger + .allocateParty( + partyIdHint = Some(Random.alphanumeric.take(256).mkString) + ) + .mustFail("allocating a party with a very long identifier") + } yield { + assertGrpcError( + error, + RequestValidationErrors.InvalidArgument, + Some("Party is too long"), + ) + } + }) + + test( + "PMRejectInvalidPartyHints", + "A party identifier that contains invalid characters should be rejected with the proper error", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + error <- ledger + .allocateParty( + // Assumption: emojis will never be acceptable in a party identifier + partyIdHint = Some("\uD83D\uDE00") + ) + .mustFail("allocating a party with invalid characters") + } yield { + assertGrpcError( + error, + RequestValidationErrors.InvalidArgument, + Some("non expected character"), + ) + } + }) + + test( + "PMAllocateOneHundred", + "It should create unique party names when allocating many parties", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + parties <- ledger.allocateParties(n = 100, minSynchronizers = 0) + } yield { + val nonUniqueNames = parties.groupBy(_.getValue).view.mapValues(_.size).filter(_._2 > 1).toMap + assert( + nonUniqueNames.isEmpty, + s"There are non-unique party names: ${nonUniqueNames + .map { case (name, count) => s"$name ($count)" } + .mkString(", ")}", + ) + } + }) + + test( + "PMGetPartiesDetails", + "It should get details for multiple parties, if they exist", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + party1 <- ledger.allocateParty( + partyIdHint = Some("PMGetPartiesDetails_" + Random.alphanumeric.take(10).mkString), + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k1" -> "v1"), + ) + ), + ) + party2 <- ledger.allocateParty( + partyIdHint = Some("PMGetPartiesDetails_" + Random.alphanumeric.take(10).mkString), + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k2" -> "v2"), + ) + ), + ) + partyDetails <- ledger.getParties( + Seq(party1, party2, Party("non-existent")) + ) + noPartyDetails <- ledger.getParties(Seq(Party("non-existent"))) + zeroPartyDetails <- ledger.getParties(Seq.empty) + } yield { + val got = partyDetails.map(unsetResourceVersion).sortBy(_.party) + val want = Seq( + PartyDetails( + party = Ref.Party.assertFromString(party1), + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k1" -> "v1"), + ) + ), + identityProviderId = "", + ), + PartyDetails( + party = Ref.Party.assertFromString(party2), + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k2" -> "v2"), + ) + ), + identityProviderId = "", + ), + ).sortBy(_.party) + assert( + got == want, + s"The allocated parties, ${Seq(party1, party2)}, were not retrieved successfully.\nGot\n$got\n\nwant\n$want", + ) + assert( + noPartyDetails.isEmpty, + s"Retrieved some parties when the party specified did not exist: $noPartyDetails", + ) + assert( + zeroPartyDetails.isEmpty, + s"Retrieved some parties when no parties were requested: $zeroPartyDetails", + ) + } + }) + + test( + "PMListKnownParties", + "It should list all known, previously-allocated parties", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + party1 <- ledger + .allocateParty( + partyIdHint = Some("PMListKnownParties_" + Random.alphanumeric.take(10).mkString), + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k1" -> "v1"), + ) + ), + ) + .map(_.underlying) + party2 <- ledger + .allocateParty( + partyIdHint = Some("PMListKnownParties_" + Random.alphanumeric.take(10).mkString) + ) + .map(_.underlying) + party3 <- ledger + .allocateParty( + partyIdHint = Some("PMListKnownParties_" + Random.alphanumeric.take(10).mkString) + ) + .map(_.underlying) + knownPartyResp <- ledger.listKnownParties() + knownPartyIds = knownPartyResp.partyDetails.map(_.party).map(new ApiParty(_)).toSet + } yield { + val allocatedPartyIds = Set(party1, party2, party3) + assert( + allocatedPartyIds subsetOf knownPartyIds, + s"The allocated party IDs $allocatedPartyIds are not a subset of $knownPartyIds.", + ) + val fetchedAllocatedPartiesSet = knownPartyResp.partyDetails.collect { + case details if allocatedPartyIds.contains(Party(details.party)) => + unsetResourceVersion(details) + }.toSet + assertEquals( + fetchedAllocatedPartiesSet, + expected = Set( + PartyDetails( + party = party1.getValue, + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k1" -> "v1"), + ) + ), + identityProviderId = "", + ), + PartyDetails( + party = party2.getValue, + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map.empty, + ) + ), + identityProviderId = "", + ), + PartyDetails( + party = party3.getValue, + isLocal = true, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map.empty, + ) + ), + identityProviderId = "", + ), + ), + ) + } + }) + + test( + "PMGetPartiesBoundaryConditions", + "GetParties should correctly report in boundary conditions", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(alice))) => + val nonExisting = alpha.nextPartyId() + for { + // Running a dummy transaction seems to be required by the test framework to make parties visible via getParties + _ <- alpha.create(alice, new Dummy(alice)) + + alphaParties <- alpha.getParties(GetPartiesRequest(Seq(alice), "")) + alphaParties2 <- alpha.getParties(GetPartiesRequest(Seq(alice, nonExisting), "")) + _ <- alpha.getParties(GetPartiesRequest(Seq(nonExisting), "")) + _ <- alpha.getParties(GetPartiesRequest(Seq.empty, "")) + } yield { + assert( + alphaParties.partyDetails.exists(p => p.party == alice.getValue && p.isLocal), + "Missing expected party from the participant", + ) + assert( + alphaParties2.partyDetails.sizeIs == 1, + "Non existing party found in getParties response", + ) + assert( + alphaParties2.partyDetails.exists(p => p.party == alice.getValue && p.isLocal), + "Missing expected party from the participant", + ) + } + }) + + test( + "PMGetPartiesIsLocal", + "GetParties should correctly report whether parties are local or not", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob))) => + for { + // Running a dummy transaction seems to be required by the test framework to make parties visible via getParties + _ <- alpha.create(alice, new Dummy(alice)) + _ <- beta.create(bob, new Dummy(bob)) + + alphaParties <- alpha.getParties(Seq(alice, bob)) + betaParties <- beta.getParties(Seq(alice, bob)) + } yield { + assert( + alphaParties.exists(p => p.party == alice.getValue && p.isLocal), + "Missing expected party from first participant", + ) + assert( + betaParties.exists(p => p.party == bob.getValue && p.isLocal), + "Missing expected party from second participant", + ) + + // The following assertions allow some slack to distributed ledger implementations, as they can + // either publish parties across participants as non-local or bar that from happening entirely. + // Furthermore, as participants with matching ledger ids expose the "same ledger", such participants + // are allowed to expose parties as local on multiple participants, and therefore restrict the asserts to + // participants with different ledger ids. + if (alpha.endpointId != beta.endpointId) { + assert( + alphaParties.exists(p => p.party == bob.getValue && !p.isLocal) || !alphaParties.exists( + _.party == bob.getValue + ), + "Unexpected remote party marked as local found on first participant", + ) + assert( + betaParties.exists(p => p.party == alice.getValue && !p.isLocal) || !betaParties.exists( + _.party == alice.getValue + ), + "Unexpected remote party marked as local found on second participant", + ) + } + } + }) + + test( + "PMPagedListKnownPartiesNewPartyVisibleOnPage", + "Exercise ListKnownParties rpc: Creating a party makes it visible on a page", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + def assertPartyPresentIn(party: String, list: ListKnownPartiesResponse, msg: String): Unit = + assert(list.partyDetails.exists(_.party.startsWith(party)), msg) + + def assertPartyAbsentIn(party: String, list: ListKnownPartiesResponse, msg: String): Unit = + assert(!list.partyDetails.exists(_.party.startsWith(party)), msg) + + for { + pageBeforeCreate <- ledger.listKnownParties( + ListKnownPartiesRequest( + pageToken = "", + pageSize = 10, + identityProviderId = "", + ) + ) + // Construct an party-id that with high probability will be the first on the first page + newPartyId = pageBeforeCreate.partyDetails.headOption + .flatMap(_.party.split(':').headOption) + .flatMap(namePicker.lower) + .getOrElse("@BAD-PARTY@") + _ = assertPartyAbsentIn( + newPartyId, + pageBeforeCreate, + "new party should be absent before it's creation", + ) + _ <- ledger.allocateParty( + AllocatePartyRequest(newPartyId, None, "", "", ""), + p.minSynchronizers, + ) + pageAfterCreate <- ledger.listKnownParties( + ListKnownPartiesRequest( + pageToken = "", + pageSize = 10, + identityProviderId = "", + ) + ) + _ = assertPartyPresentIn( + newPartyId, + pageAfterCreate, + "new party should be present after it's creation", + ) + } yield { + () + } + }) + + test( + "PMPagedListKnownPartiesNewPartyInvisibleOnNextPage", + "Exercise ListKnownParties rpc: Adding a party to a previous page doesn't affect the subsequent page", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + val partyId1 = ledger.nextPartyId() + val partyId2 = ledger.nextPartyId() + val partyId3 = ledger.nextPartyId() + val partyId4 = ledger.nextPartyId() + + for { + // Create 4 parties to ensure we have at least two pages of two parties each + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId1, None, "", "", ""), + p.minSynchronizers, + ) + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId2, None, "", "", ""), + p.minSynchronizers, + ) + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId3, None, "", "", ""), + p.minSynchronizers, + ) + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId4, None, "", "", ""), + p.minSynchronizers, + ) + // Fetch the first two full pages + page1 <- ledger.listKnownParties( + ListKnownPartiesRequest(pageToken = "", pageSize = 2, identityProviderId = "") + ) + page2 <- ledger.listKnownParties( + ListKnownPartiesRequest( + pageToken = page1.nextPageToken, + pageSize = 2, + identityProviderId = "", + ) + ) + // Verify that the second page stays the same even after we have created a new party that is lexicographically smaller than the last party on the first page + newPartyId = (for { + beforeLast <- page1.partyDetails.dropRight(1).lastOption + beforeLastName <- beforeLast.party.split(':').headOption + last <- page1.partyDetails.lastOption + lastName <- last.party.split(':').headOption + pick <- namePicker.lowerConstrained(lastName, beforeLastName) + } yield pick).getOrElse("@BAD-PARTY@") + _ <- ledger.allocateParty( + AllocatePartyRequest(newPartyId, None, "", "", ""), + p.minSynchronizers, + ) + page2B <- ledger.listKnownParties( + ListKnownPartiesRequest( + pageToken = page1.nextPageToken, + pageSize = 2, + identityProviderId = "", + ) + ) + _ = assertEquals("after creating new party before the second page", page2, page2B) + } yield { + () + } + }) + + test( + "PMPagedListKnownPartiesReachingTheLastPage", + "Exercise ListKnownParties rpc: Listing all parties page by page eventually terminates reaching the last page", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val pageSize = Math.min(10000, ledger.features.partyManagement.maxPartiesPageSize) + + def fetchNextPage(pageToken: String, pagesFetched: Int): Future[Unit] = + for { + page <- ledger.listKnownParties( + ListKnownPartiesRequest( + pageSize = pageSize, + pageToken = pageToken, + identityProviderId = "", + ) + ) + _ = if (page.nextPageToken != "") { + if (pagesFetched > 10) { + fail( + s"Could not reach the last page even after fetching ${pagesFetched + 1} pages of size $pageSize each" + ) + } + fetchNextPage(pageToken = page.nextPageToken, pagesFetched = pagesFetched + 1) + } + } yield () + + fetchNextPage(pageToken = "", pagesFetched = 0) + }) + + test( + "PMPagedListKnownPartiesWithInvalidRequest", + "Exercise ListKnownParties rpc: Requesting invalid pageSize or pageToken results in an error", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + // Using not Base64 encoded string as the page token + onBadTokenError <- ledger + .listKnownParties( + ListKnownPartiesRequest( + pageToken = UUID.randomUUID().toString, + pageSize = 0, + identityProviderId = "", + ) + ) + .mustFail("using invalid page token string") + // Using negative pageSize + onNegativePageSizeError <- ledger + .listKnownParties( + ListKnownPartiesRequest(pageToken = "", pageSize = -100, identityProviderId = "") + ) + .mustFail("using negative page size") + } yield { + assertGrpcError( + t = onBadTokenError, + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = None, + ) + assertGrpcError( + t = onNegativePageSizeError, + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = None, + ) + } + + }) + + test( + "PMPagedListKnownPartiesZeroPageSize", + "Exercise ListKnownParties rpc: Requesting page of size zero means requesting server's default page size, which is larger than zero", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + val partyId1 = ledger.nextPartyId() + val partyId2 = ledger.nextPartyId() + for { + // Ensure we have at least two parties + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId1, None, "", "", ""), + p.minSynchronizers, + ) + _ <- ledger.allocateParty( + AllocatePartyRequest(partyId2, None, "", "", ""), + p.minSynchronizers, + ) + pageSizeZero <- ledger.listKnownParties( + ListKnownPartiesRequest(pageToken = "", pageSize = 0, identityProviderId = "") + ) + pageSizeOne <- ledger.listKnownParties( + ListKnownPartiesRequest(pageToken = "", pageSize = 1, identityProviderId = "") + ) + } yield { + assert( + pageSizeOne.partyDetails.nonEmpty, + "First page with requested pageSize zero should return some parties", + ) + assertEquals(pageSizeZero.partyDetails.head, pageSizeOne.partyDetails.head) + } + }) + + test( + "PMPagedListKnownPartiesMaxPageSize", + "Exercise ListKnownParties rpc: Requesting more than maxPartiesPageSize results in an error", + allocate(NoParties), + enabled = _.partyManagement.maxPartiesPageSize > 0, + disabledReason = "requires party management feature with parties page size limit", + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val maxPartiesPageSize = ledger.features.partyManagement.maxPartiesPageSize + for { + // request page size greater than the server's limit + onTooLargePageSizeError <- ledger + .listKnownParties( + ListKnownPartiesRequest( + pageSize = maxPartiesPageSize + 1, + pageToken = "", + identityProviderId = "", + ) + ) + .mustFail("using too large a page size") + } yield { + assertGrpcError( + t = onTooLargePageSizeError, + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("Page size must not exceed the server's maximum"), + ) + } + }) + + test( + "PMGenerateExternalPartyTopologyTransaction", + "Generate topology transactions for external parties", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val partyHint = ledger.nextPartyId() + val keyGen = KeyPairGenerator.getInstance("Ed25519") + val keyPair = keyGen.generateKeyPair() + val pb = keyPair.getPublic + val signing = Signature.getInstance("Ed25519") + signing.initSign(keyPair.getPrivate) + + for { + syncIds <- ledger.getConnectedSynchronizers(None, None) + syncId = syncIds.headOption.getOrElse(throw new Exception("No synchronizer connected")) + response <- ledger.generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId, + partyHint = partyHint, + publicKey = Some( + lapicrypto.SigningPublicKey( + format = + lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + keyData = ByteString.copyFrom(pb.getEncoded), + keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, + ) + ), + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + _ = { + signing.update(response.multiHash.toByteArray) + } + _ <- ledger.allocateExternalParty( + AllocateExternalPartyRequest( + synchronizer = syncId, + onboardingTransactions = response.topologyTransactions.map(x => + AllocateExternalPartyRequest + .SignedTransaction(transaction = x, signatures = Seq.empty) + ), + multiHashSignatures = Seq( + lapicrypto.Signature( + format = lapicrypto.SignatureFormat.SIGNATURE_FORMAT_RAW, + signature = ByteString.copyFrom(signing.sign()), + signedBy = response.publicKeyFingerprint, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) + ), + identityProviderId = "", + ), + minSynchronizers = Some(1), + ) + parties <- ledger.getParties( + GetPartiesRequest( + parties = Seq(response.partyId), + identityProviderId = "", + ) + ) + } yield { + assertEquals(parties.partyDetails.map(_.party), Seq(response.partyId)) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceUpdateRpcIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceUpdateRpcIT.scala new file mode 100644 index 0000000000..aca11f5bd0 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceUpdateRpcIT.scala @@ -0,0 +1,182 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + NoParties, + Participant, + Participants, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TestConstraints +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.{ + PartyDetails, + UpdatePartyDetailsRequest, + UpdatePartyDetailsResponse, +} +import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} +import com.google.protobuf.field_mask.FieldMask + +class PartyManagementServiceUpdateRpcIT extends PartyManagementITBase { + + testWithFreshPartyDetails( + "PMUpdateAllUpdatableFields", + "Update all updatable fields", + )(annotations = Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3"))(implicit ec => + implicit ledger => + partyDetails => + ledger + .updatePartyDetails( + updateRequest( + party = partyDetails.party, + annotations = Map("k1" -> "v1a", "k3" -> "", "k4" -> "v4", "k5" -> ""), + updatePaths = Seq( + "local_metadata.annotations" + ), + ) + ) + .map { updateResp => + assertEquals( + "updating user 1", + unsetResourceVersion(updateResp), + UpdatePartyDetailsResponse( + Some( + PartyDetails( + party = partyDetails.party, + isLocal = partyDetails.isLocal, + localMetadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = Map("k1" -> "v1a", "k2" -> "v2", "k4" -> "v4"), + ) + ), + identityProviderId = "", + ) + ) + ), + ) + } + ) + + testWithFreshPartyDetails( + "PMFailAttemptingToUpdateIsLocal", + "Fail attempting to update is_local attribute", + )()(implicit ec => + implicit ledger => + partyDetails => + ledger + .updatePartyDetails( + updateRequest( + party = partyDetails.party, + isLocal = !partyDetails.isLocal, + updatePaths = Seq( + "is_local" + ), + ) + ) + .mustFailWith( + "bad annotations key syntax on a user update", + errorCode = AdminServiceErrors.PartyManagement.InvalidUpdatePartyDetailsRequest, + ) + ) + + testWithFreshPartyDetails( + "PMAllowSpecifyingIsLocalAndDisplayNameIfMatchingTheRealValues", + "Allow specifying is_local if values in the update request match real values", + )()(implicit ec => + implicit ledger => + partyDetails => + ledger + .updatePartyDetails( + updateRequest( + party = partyDetails.party, + isLocal = partyDetails.isLocal, + updatePaths = Seq( + "is_local" + ), + ) + ) + .map { updateResp => + assertEquals( + "updating user", + unsetResourceVersion(updateResp), + unsetResourceVersion(UpdatePartyDetailsResponse(Some(partyDetails))), + ) + } + ) + + testWithFreshPartyDetails( + "UpdatePartyDetailsEvenIfMetadataIsNotSetInUpdateRequest", + "Update a party details even if the metadata field is not set in the update request", + )()(implicit ec => + implicit ledger => + partyDetails => + ledger + .updatePartyDetails( + UpdatePartyDetailsRequest( + partyDetails = Some( + PartyDetails( + party = partyDetails.party, + isLocal = false, + localMetadata = None, + identityProviderId = "", + ) + ), + updateMask = Some(FieldMask(Seq("party"))), + ) + ) + .map { updateResp => + assertEquals( + "update with the metadata not set in the request", + unsetResourceVersion(updateResp), + UpdatePartyDetailsResponse(Some(newPartyDetails(partyDetails.party))), + ) + } + ) + + test( + "FailingUpdateRequestsWhenPartyDetailsFieldIsUnset", + "Failing an update request when party_details field is unset", + allocate(NoParties), + limitation = TestConstraints.GrpcOnly( + "JSON API returns a different error when party id is not set in update" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + ledger + .updatePartyDetails( + UpdatePartyDetailsRequest( + partyDetails = None, + updateMask = Some(FieldMask(Seq("local_metadata"))), + ) + ) + .mustFailWith( + "update with an unknown update path", + errorCode = RequestValidationErrors.MissingField, + ) + }) + + test( + "FailUpdateNonExistentParty", + "Fail when attempting to update a non-existent party", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val party = ledger.nextPartyId() + for { + _ <- ledger + .updatePartyDetails( + updateRequest( + party = party, + annotations = Map("k1" -> "v1"), + updatePaths = Seq("local_metadata.annotations"), + ) + ) + .mustFailWith( + "updating a non-existent party", + errorCode = AdminServiceErrors.PartyManagement.PartyNotFound, + ) + } yield () + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/SemanticTests.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/SemanticTests.scala new file mode 100644 index 0000000000..74c9f6fb3b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/SemanticTests.scala @@ -0,0 +1,590 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party, TestConstraints} +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.api.v2.value.{Record, RecordField, Value} +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.semantic.semantictests.{PaintOffer, *} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, ConsistencyErrors} + +import java.math.BigDecimal +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* +import scala.util.Success + +final class SemanticTests extends LedgerTestSuite { + import CompanionImplicits.* + implicit val delegationCompanion + : ContractCompanion.WithoutKey[Delegation.Contract, Delegation.ContractId, Delegation] = + Delegation.COMPANION + implicit val sharedContractCompanion: ContractCompanion.WithoutKey[ + SharedContract.Contract, + SharedContract.ContractId, + SharedContract, + ] = SharedContract.COMPANION + implicit val paintOfferCompanion + : ContractCompanion.WithoutKey[PaintOffer.Contract, PaintOffer.ContractId, PaintOffer] = + PaintOffer.COMPANION + + private[this] val onePound = new Amount(BigDecimal.valueOf(1), "GBP") + private[this] val twoPounds = new Amount(BigDecimal.valueOf(2), "GBP") + + /* + * Consistency + * + * A transaction is internally consistent for a contract + * `c` if the following hold for all its subactions `act` + * on the contract `c`: + * + * 1. `act` does not happen before any Create c action (correct by construction in Daml) + * 2. `act` does not happen after the contract has been consumend (i.e. no double spending) + */ + + test( + "SemanticDoubleSpendBasic", + "Cannot double spend across transactions", + allocate(TwoParties, TwoParties), + )(implicit ec => { + case Participants( + Participant(alpha, Seq(payer, owner)), + Participant(_, Seq(newOwner, leftWithNothing)), + ) => + for { + iou <- alpha.create(payer, new Iou(payer, owner, onePound)) + _ <- alpha.exercise(owner, iou.exerciseTransfer(newOwner)) + failure <- alpha + .exercise(owner, iou.exerciseTransfer(leftWithNothing)) + .mustFail("consuming a contract twice") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "SemanticConcurrentDoubleSpend", + "Cannot concurrently double spend across transactions", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(payer, owner1)), Participant(beta, Seq(owner2))) => + // This test create a contract and then concurrently archives it several times + // through two different participants + val creates = 2 // Number of contracts to create + val archives = 10 // Number of concurrent archives per contract + // Each created contract is archived in parallel, + // Next contract is created only when previous one is archived + (1 to creates) + .foldLeft(Future(())) { + (f, c) => + f.flatMap(_ => + for { + shared <- alpha.create(payer, new SharedContract(payer, owner1, owner2)) + _ <- p.synchronize + results <- Future.traverse(1 to archives) { + case i if i % 2 == 0 => + alpha + .exercise(owner1, shared.exerciseSharedContract_Consume1()) + .transform(Success(_)) + case _ => + beta + .exercise(owner2, shared.exerciseSharedContract_Consume2()) + .transform(Success(_)) + } + } yield { + assertLength( + s"Contract $c successful archives", + 1, + results.filter(_.isSuccess), + ).discard + assertLength( + s"Contract $c failed archives", + archives - 1, + results.filter(_.isFailure), + ).discard + } + ) + } + }) + + test( + "SemanticDoubleSpendSameTx", + "Cannot double spend within a transaction", + allocate(TwoParties, TwoParties), + )(implicit ec => { + case Participants( + Participant(alpha, Seq(payer, owner)), + Participant(_, Seq(newOwner1, newOwner2)), + ) => + for { + iou <- alpha.create(payer, new Iou(payer, owner, onePound)) + doubleSpend = alpha.submitAndWaitRequest( + owner, + (iou.exerciseTransfer(newOwner1).commands.asScala ++ iou + .exerciseTransfer(newOwner2) + .commands + .asScala).asJava, + ) + failure <- alpha.submitAndWait(doubleSpend).mustFail("consuming a contract twice") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.ContractNotActive, + Some("Update failed due to fetch of an inactive contract"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "SemanticDoubleSpendShared", + "Different parties cannot spend the same contract", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(payer, owner1)), Participant(beta, Seq(owner2))) => + for { + shared <- alpha.create(payer, new SharedContract(payer, owner1, owner2)) + _ <- alpha.exercise(owner1, shared.exerciseSharedContract_Consume1()) + _ <- p.synchronize + failure <- beta + .exercise(owner2, shared.exerciseSharedContract_Consume2()) + .mustFail("consuming a contract twice") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + /* + * Authorization + * + * A commit is well-authorized if every subaction `act` of the commit is + * authorized by at least all of the required authorizers to `act`, where: + * + * 1. the required authorizers of a Create action on a contract `c` are the signatories of `c` + * 2. the required authorizers of an Exercise or a Fetch action are its actors + */ + + test( + "SemanticPaintOffer", + "Conduct the paint offer workflow successfully", + allocate(TwoParties, SingleParty), + limitation = TestConstraints.GrpcOnly( + "RecordId information is not propagated by transcode/SchemaProcessor" + ), + )(implicit ec => { + case Participants(Participant(alpha, Seq(bank, houseOwner)), Participant(beta, Seq(painter))) => + for { + iou <- alpha.create(bank, new Iou(bank, houseOwner, onePound)) + offer <- beta.create(painter, new PaintOffer(painter, houseOwner, bank, onePound)) + tree <- eventually("Exercise paint offer") { + alpha.exercise(houseOwner, offer.exercisePaintOffer_Accept(iou)) + } + } yield { + val agreement = assertSingleton( + "SemanticPaintOffer", + createdEvents(tree).filter(_.getTemplateId == PaintAgree.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + ) + assertEquals( + "Paint agreement parameters", + agreement.getCreateArguments, + Record( + recordId = Some(PaintAgree.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + fields = Seq( + RecordField("painter", Some(Value(Value.Sum.Party(painter)))), + RecordField("houseOwner", Some(Value(Value.Sum.Party(houseOwner)))), + ), + ), + ) + } + }) + + test( + "SemanticPaintCounterOffer", + "Conduct the paint counter-offer workflow successfully", + allocate(TwoParties, SingleParty), + limitation = TestConstraints.GrpcOnly( + "RecordId information is not propagated by transcode/SchemaProcessor" + ), + )(implicit ec => { + case Participants(Participant(alpha, Seq(bank, houseOwner)), Participant(beta, Seq(painter))) => + for { + iou <- alpha.create(bank, new Iou(bank, houseOwner, onePound)) + txReq <- alpha.getTransactionsRequest( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + houseOwner.getValue -> Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + Some(Iou.TEMPLATE_ID.toV1), + includeCreatedEventBlob = true, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ) + iouTxsForHouseOwner <- alpha.transactions(txReq) + iouCreatedEvent = createdEvents(iouTxsForHouseOwner.head).head + iouDisclosedContract = DisclosedContract( + templateId = iouCreatedEvent.templateId, + contractId = iouCreatedEvent.contractId, + createdEventBlob = iouCreatedEvent.createdEventBlob, + synchronizerId = "", + ) + offer <- beta.create(painter, new PaintOffer(painter, houseOwner, bank, twoPounds)) + counter <- eventually("exerciseAndGetContract") { + alpha.exerciseAndGetContract[PaintCounterOffer.ContractId, PaintCounterOffer]( + houseOwner, + offer.exercisePaintOffer_Counter(iou), + )(PaintCounterOffer.COMPANION) + } + tx <- eventually("exercisePaintCounterOffer") { + val request = beta + .submitAndWaitForTransactionRequest( + painter, + counter.exercisePaintCounterOffer_Accept().commands, + ) + .update(_.commands.disclosedContracts := Seq(iouDisclosedContract)) + beta.submitAndWaitForTransaction(request).map(_.getTransaction) + } + } yield { + val agreement = assertSingleton( + "SemanticPaintCounterOffer", + createdEvents(tx).filter( + _.getTemplateId == PaintAgree.TEMPLATE_ID_WITH_PACKAGE_ID.toV1 + ), + ) + assertEquals( + "Paint agreement parameters", + agreement.getCreateArguments, + Record( + recordId = Some(PaintAgree.TEMPLATE_ID_WITH_PACKAGE_ID.toV1), + fields = Seq( + RecordField("painter", Some(Value(Value.Sum.Party(painter)))), + RecordField("houseOwner", Some(Value(Value.Sum.Party(houseOwner)))), + ), + ), + ) + } + }) + + test( + "SemanticPartialSignatories", + "A signatory should not be able to create a contract on behalf of two parties", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(houseOwner)), Participant(_, Seq(painter))) => + for { + failure <- alpha + .create(houseOwner, new PaintAgree(painter, houseOwner))(PaintAgree.COMPANION) + .mustFail("creating a contract on behalf of two parties") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires authorizers"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "SemanticAcceptOnBehalf", + "It should not be possible to exercise a choice without the consent of the controller", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(bank, houseOwner)), Participant(beta, Seq(painter))) => + for { + iou <- beta.create(painter, new Iou(painter, houseOwner, onePound)) + offer <- beta.create(painter, new PaintOffer(painter, houseOwner, bank, onePound)) + failure <- beta + .exercise(painter, offer.exercisePaintOffer_Accept(iou)) + .mustFail("exercising a choice without consent") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires authorizers"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + /* + * Privacy + * + * Visibility of contracts we fetch. Since the Ledger API has + * no fetch operation built-in, we use a template with a choice + * that causes the fetch. + */ + + test( + "SemanticPrivacyProjections", + "Test visibility via contract fetches for the paint-offer flow", + allocate(TwoParties, SingleParty), + timeoutScale = 2.0, + )(implicit ec => { + case p @ Participants( + Participant(alpha, Seq(bank, houseOwner)), + Participant(beta, Seq(painter)), + ) => + for { + iou <- alpha.create(bank, new Iou(bank, houseOwner, onePound)) + _ <- p.synchronize + + // The IOU should be visible only to the payer and the owner + _ <- fetchIou(alpha, bank, iou) + _ <- fetchIou(alpha, houseOwner, iou) + iouFetchFailure <- fetchIou(beta, painter, iou) + .mustFail("fetching the IOU with the wrong party") + + offer <- beta.create(painter, new PaintOffer(painter, houseOwner, bank, onePound)) + _ <- p.synchronize + + // The house owner and the painter can see the offer but the bank can't + _ <- fetchPaintOffer(alpha, houseOwner, offer) + _ <- fetchPaintOffer(beta, painter, offer) + paintOfferFetchFailure <- fetchPaintOffer(alpha, bank, offer) + .mustFail("fetching the offer with the wrong party") + + tree <- alpha.exercise(houseOwner, offer.exercisePaintOffer_Accept(iou)) + (newIouEvents, agreementEvents) = createdEvents(tree).partition( + _.getTemplateId == Iou.TEMPLATE_ID_WITH_PACKAGE_ID.toV1 + ) + newIouEvent <- Future(newIouEvents.head) + agreementEvent <- Future(agreementEvents.head) + newIou = new Iou.ContractId(newIouEvent.contractId) + agreement = new PaintAgree.ContractId(agreementEvent.contractId) + _ <- p.synchronize + + // The Bank can see the new IOU, but it cannot see the PaintAgree contract + _ <- fetchIou(alpha, bank, newIou) + paintAgreeFetchFailure <- fetchPaintAgree(alpha, bank, agreement) + .mustFail("fetching the agreement with the wrong party") + + // The house owner and the painter can see the contract + _ <- fetchPaintAgree(beta, painter, agreement) + _ <- fetchPaintAgree(alpha, houseOwner, agreement) + + // The painter sees its new IOU but the house owner cannot see it + _ <- fetchIou(beta, painter, newIou) + withoutExplicitDisclosureFailure <- fetchIou(alpha, houseOwner, newIou) + .mustFail("fetching the new IOU without explicit disclosure") + txReq <- alpha.getTransactionsRequest( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + bank.getValue -> Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + Some(Iou.TEMPLATE_ID.toV1), + includeCreatedEventBlob = true, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ) + iouTxsForBank <- alpha.transactions(txReq) + iouCreatedEvent = iouTxsForBank.flatMap(createdEvents)(1) + iouDisclosedContract = DisclosedContract( + templateId = iouCreatedEvent.templateId, + contractId = iouCreatedEvent.contractId, + createdEventBlob = iouCreatedEvent.createdEventBlob, + synchronizerId = "", + ) + wrongPartyFailure <- fetchIouWithExplicitDisclosure( + alpha, + houseOwner, + newIou, + iouDisclosedContract, + ) + .mustFail("fetching the new IOU with the wrong party") + } yield { + assertEquals( + "the second created contract should be the new IOU", + iouCreatedEvent.contractId, + newIou.contractId, + ) + assertGrpcError( + iouFetchFailure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + paintOfferFetchFailure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + paintAgreeFetchFailure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + withoutExplicitDisclosureFailure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + wrongPartyFailure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires one of the stakeholders"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + private def fetchIou( + ledger: ParticipantTestContext, + party: Party, + iou: Iou.ContractId, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + fetch <- ledger.create(party, new FetchIou(party, iou))(FetchIou.COMPANION) + _ <- ledger.exercise(party, fetch.exerciseFetchIou_Fetch()) + } yield () + + private def fetchIouWithExplicitDisclosure( + ledger: ParticipantTestContext, + party: Party, + iou: Iou.ContractId, + iouDisclosedContract: DisclosedContract, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + fetch <- ledger.create(party, new FetchIou(party, iou))(FetchIou.COMPANION) + request = ledger + .submitAndWaitRequest( + party, + fetch.exerciseFetchIou_Fetch().commands(), + ) + .update(_.commands.disclosedContracts := Seq(iouDisclosedContract)) + _ <- ledger.submitAndWait(request).map(_ => ()) + } yield () + + private def fetchPaintOffer( + ledger: ParticipantTestContext, + party: Party, + paintOffer: PaintOffer.ContractId, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + fetch <- ledger.create(party, new FetchPaintOffer(party, paintOffer))( + FetchPaintOffer.COMPANION + ) + _ <- ledger.exercise(party, fetch.exerciseFetchPaintOffer_Fetch()) + } yield () + + private def fetchPaintAgree( + ledger: ParticipantTestContext, + party: Party, + agreement: PaintAgree.ContractId, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + fetch <- ledger.create(party, new FetchPaintAgree(party, agreement))( + FetchPaintAgree.COMPANION + ) + _ <- ledger.exercise(party, fetch.exerciseFetchPaintAgree_Fetch()) + } yield () + + /* + * Divulgence + */ + + test("SemanticDivulgence", "Respect divulgence rules", allocate(TwoParties, SingleParty))( + implicit ec => { + case p @ Participants( + Participant(alpha, Seq(issuer, owner)), + Participant(beta, Seq(delegate)), + ) => + for { + token <- alpha.create(issuer, new Token(issuer, owner, 1))(Token.COMPANION) + delegation <- alpha.create[Delegation.ContractId, Delegation]( + owner, + new Delegation(owner, delegate), + )(Delegation.COMPANION) + + // The owner tries to divulge with a non-consuming choice, which actually doesn't work + noDivulgeToken <- alpha.create(owner, new Delegation(owner, delegate))( + Delegation.COMPANION + ) + _ <- alpha + .exercise(owner, noDivulgeToken.exerciseDelegation_Wrong_Divulge_Token(token)) + _ <- p.synchronize + failureNonConsuming <- beta + .exercise(delegate, delegation.exerciseDelegation_Token_Consume(token)) + .mustFail("divulging with a non-consuming choice") + + // The owner tries to divulge with a consuming choice, which actually doesn't work neither + divulgeToken <- alpha.create(owner, new Delegation(owner, delegate))(Delegation.COMPANION) + _ <- alpha.exercise(owner, divulgeToken.exerciseDelegation_Divulge_Token(token)) + failureConsuming <- beta + .exercise(delegate, delegation.exerciseDelegation_Token_Consume(token)) + .mustFail("divulging with a consuming choice") + } yield { + assertGrpcError( + failureNonConsuming, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + failureConsuming, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + } + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/StateServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/StateServiceIT.scala new file mode 100644 index 0000000000..3b7abd0c3c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/StateServiceIT.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite + +final class StateServiceIT extends LedgerTestSuite { + test( + "StateServiceGetConnectedSynchronizersWithoutParty", + "Get connected synchronizers without party filter", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + connectedSynchronizers <- ledger.getConnectedSynchronizers(None, None) + } yield { + assert(connectedSynchronizers.sizeIs > 0, "Expected connected synchronizers") + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TimeServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TimeServiceIT.scala new file mode 100644 index 0000000000..822b9938cb --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TimeServiceIT.scala @@ -0,0 +1,124 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.semantic.timetests.* +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, RequestValidationErrors} + +import scala.concurrent.Future + +final class TimeServiceIT extends LedgerTestSuite { + implicit val timecheckerCompanion + : ContractCompanion.WithoutKey[TimeChecker.Contract, TimeChecker.ContractId, TimeChecker] = + TimeChecker.COMPANION + + test( + "TSTimeIsStatic", + "Time stands still when static time enabled", + allocate(NoParties), + runConcurrently = false, + enabled = _.staticTime, + disabledReason = "requires ledger static time feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + initialTime <- ledger.time() + _ <- Future(Thread.sleep(100)) + laterTime <- ledger.time() + } yield { + assertEquals("ledger time should stand still", laterTime, initialTime) + } + }) + + test( + "TSTimeCanBeAdvanced", + "Time can be advanced when static time enabled", + allocate(NoParties), + runConcurrently = false, + enabled = _.staticTime, + disabledReason = "requires ledger static time feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + initialTime <- ledger.time() + thirtySecLater = initialTime.plusSeconds(30) + _ <- ledger.setTime(initialTime, thirtySecLater) + laterTime <- ledger.time() + } yield { + assertEquals("ledger time should be advanced", laterTime, thirtySecLater) + } + }) + + test( + "TSTimeAdvancementCanFail", + "Time advancement can fail when current time is not accurate", + allocate(NoParties), + runConcurrently = false, + enabled = _.staticTime, + disabledReason = "requires ledger static time feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + initialTime <- ledger.time() + invalidInitialTime = initialTime.plusSeconds(1) + thirtySecLater = initialTime.plusSeconds(30) + _ <- ledger + .setTime(invalidInitialTime, thirtySecLater) + .mustFailWith( + "current_time mismatch", + RequestValidationErrors.InvalidArgument, + ) + } yield () + }) + + test( + "TSFailWhenTimeNotAdvanced", + "The submission of an exercise before time advancement should fail", + allocate(SingleParty), + runConcurrently = false, + enabled = _.staticTime, + disabledReason = "requires ledger static time feature", + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + initialTime <- ledger.time() + thirtySecLater = initialTime.plusSeconds(30) + checker <- ledger.create(party, new TimeChecker(party, thirtySecLater)) + failure <- ledger + .exercise(party, checker.exerciseTimeChecker_CheckTime()) + .mustFail("submitting choice prematurely") + } yield { + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("UNHANDLED_EXCEPTION"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "TSSucceedWhenTimeAdvanced", + "The submission of an exercise after time advancement should succeed", + allocate(SingleParty), + runConcurrently = false, + enabled = _.staticTime, + disabledReason = "requires ledger static time feature", + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + initialTime <- ledger.time() + thirtySecLater = initialTime.plusSeconds(30) + checker <- ledger.create(party, new TimeChecker(party, thirtySecLater)) + _ <- ledger.setTime(initialTime, initialTime.plusSeconds(30)) + _ <- ledger.exercise(party, checker.exerciseTimeChecker_CheckTime()) + } yield () + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TlsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TlsIT.scala new file mode 100644 index 0000000000..cc9f578d05 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TlsIT.scala @@ -0,0 +1,172 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{NoParties, allocate} +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{Endpoint, LedgerTestSuite} +import com.daml.ledger.api.v2.version_service.VersionServiceGrpc.VersionServiceBlockingStub +import com.daml.ledger.api.v2.version_service.{GetLedgerApiVersionRequest, VersionServiceGrpc} +import com.daml.ledger.resources.{ResourceContext, ResourceOwner} +import com.daml.tls.TlsVersion +import com.daml.tls.TlsVersion.TlsVersion +import com.digitalasset.canton.config.TlsClientConfig +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import io.grpc.StatusRuntimeException +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder + +import scala.concurrent.Future +import scala.concurrent.duration.* +import scala.util.{Failure, Success, Try} + +/** Verifies that a participant server correctly handles TLSv1.3 only mode, i.e.: + * - accepts TLSv1.3 connections, + * - rejects TLSv1.2 (or lower) connections. + */ +final class TLSOnePointThreeIT( + clientTlsConfiguration: Option[TlsClientConfig] +) extends TlsIT( + shortIdentifierPrefix = "ServerOnTLSv13ConnectionFromClientOn", + clientTlsConfiguration, + ) { + testTlsConnection( + clientTlsVersions = Seq[TlsVersion](TlsVersion.V1_2, TlsVersion.V1_3), + assertConnectionOk = true, + ) + testTlsConnection(clientTlsVersion = TlsVersion.V1_3, assertConnectionOk = true) + testTlsConnection(clientTlsVersion = TlsVersion.V1_2, assertConnectionOk = false) + testTlsConnection(clientTlsVersion = TlsVersion.V1_1, assertConnectionOk = false) + testTlsConnection(clientTlsVersion = TlsVersion.V1, assertConnectionOk = false) +} + +/** Verifies that a participant server disallows TLSv1.1 or older, i.e.: + * - accepts either TLSv1.2 or TLSv1.3 connections, + * - rejects TLSv1.1 (or lower) connections. + */ +final class TLSAtLeastOnePointTwoIT( + clientTlsConfiguration: Option[TlsClientConfig] +) extends TlsIT( + shortIdentifierPrefix = "ServerOnTLSConnectionFromClientOn", + clientTlsConfiguration, + ) { + testTlsConnection( + clientTlsVersions = Seq[TlsVersion](TlsVersion.V1_2, TlsVersion.V1_3), + assertConnectionOk = true, + ) + testTlsConnection(clientTlsVersion = TlsVersion.V1_3, assertConnectionOk = true) + testTlsConnection(clientTlsVersion = TlsVersion.V1_2, assertConnectionOk = true) + testTlsConnection(clientTlsVersion = TlsVersion.V1_1, assertConnectionOk = false) + testTlsConnection(clientTlsVersion = TlsVersion.V1, assertConnectionOk = false) +} + +/** Verifies that the given participant server correctly handles client connections over selected + * TLS versions. + * + * It works by creating and exercising a series of client service stubs, each over different TLS + * version. + */ +abstract class TlsIT( + shortIdentifierPrefix: String, + clientTlsConfiguration: Option[TlsClientConfig], +) extends LedgerTestSuite { + + def testTlsConnection(clientTlsVersion: TlsVersion, assertConnectionOk: Boolean): Unit = + testTlsConnection( + clientTlsVersions = Seq(clientTlsVersion), + assertConnectionOk = assertConnectionOk, + ) + + def testTlsConnection(clientTlsVersions: Seq[TlsVersion], assertConnectionOk: Boolean): Unit = { + + val (what, assertionOnServerResponse) = + if (assertConnectionOk) + ("accept", assertSuccessfulConnection) + else + ("reject", assertFailedConnection) + + val clientTlsVersionsText = clientTlsVersions + .map(_.version.replace(".", "")) + .mkString("and") + + testGivenAllParticipants( + s"$shortIdentifierPrefix$clientTlsVersionsText", + s"A ledger API server should $what a $clientTlsVersions connection", + allocate(NoParties), + ) { implicit ec => (testContexts: Seq[ParticipantTestContext]) => + { case _ => + // preconditions + assume(testContexts.nonEmpty, "Missing an expected participant test context!") + val firstTextContext = testContexts.head + assume( + clientTlsConfiguration.isDefined, + "Missing required TLS configuration!", + ) + val tlsConfiguration = clientTlsConfiguration.get + val Endpoint.Remote(ledgerHostname, ledgerPort) = + firstTextContext.ledgerEndpoint + .getOrElse( + throw new UnsupportedOperationException("This test works only for gRPC connections") + ) + .endpoint + .asInstanceOf[Endpoint.Remote] + + // given + val sslContext = + ClientChannelBuilder.sslContext(tlsConfiguration, enabledProtocols = clientTlsVersions) + val serviceStubOwner: ResourceOwner[VersionServiceBlockingStub] = for { + channel <- ResourceOwner.forChannel( + builder = NettyChannelBuilder + .forAddress(ledgerHostname, ledgerPort) + .useTransportSecurity() + .sslContext(sslContext), + shutdownTimeout = 2.seconds, + ) + } yield VersionServiceGrpc.blockingStub(channel) + + // when + val response: Future[String] = serviceStubOwner.use { versionService => + val response = versionService.getLedgerApiVersion(new GetLedgerApiVersionRequest()) + Future.successful(response.version) + }(ResourceContext(ec)) + + // then + response.transform[Unit] { + assertionOnServerResponse + } + } + } + } + + private lazy val assertSuccessfulConnection: Try[String] => Try[Unit] = { + case Success(version) => + Try[Unit] { + assert( + assertion = version ne null, + message = s"Expected a not null version!", + ) + } + case Failure(exception) => + throw new AssertionError(s"Failed to receive a successful server response!", exception) + } + + private lazy val assertFailedConnection: Try[String] => Try[Unit] = { + case Success(version) => + Try[Unit] { + assert( + assertion = false, + message = + s"Connection succeeded and returned version: $version but expected connection failure!", + ) + } + case Failure(_: StatusRuntimeException) => Success[Unit](()) + case Failure(other) => + Try[Unit] { + assert( + assertion = false, + message = s"Unexpected failure: $other", + ) + } + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceArgumentsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceArgumentsIT.scala new file mode 100644 index 0000000000..6b0b275625 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceArgumentsIT.scala @@ -0,0 +1,156 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.v2.value.Record.toJavaProto +import com.daml.ledger.api.v2.value.{Record, Value} +import com.daml.ledger.test.java.model.test.{ + Address, + Choice1, + Dummy, + NestedOptionalInteger, + ParameterShowcase, + optionalinteger, +} +import com.digitalasset.canton.ledger.api.util.TimestampConversion + +import java.math.BigDecimal +import java.util.List as JList +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +class TransactionServiceArgumentsIT extends LedgerTestSuite { + import ClearIdsImplicits.* + import CompanionImplicits.* + + test( + "TXCreateWithAnyType", + "Creates should not have issues dealing with any type of argument", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val template = new ParameterShowcase( + party, + 42L, + new BigDecimal("47.0000000000"), + "some text", + true, + TimestampConversion.MIN, + new NestedOptionalInteger(new optionalinteger.SomeInteger(-1L)), + JList.of(0, 1, 2, 3), + Some("some optional text").toJava, + ) + val create = ledger.submitAndWaitForTransactionRequest(party, template.create.commands) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(create) + } yield { + val transaction = transactionResponse.getTransaction + val contract = assertSingleton("CreateWithAnyType", createdEvents(transaction)) + assertEquals( + "CreateWithAnyType", + contract.getCreateArguments.clearValueIds, + Record.fromJavaProto(template.toValue.toProtoRecord), + ) + } + }) + + test( + "TXExerciseWithAnyType", + "Exercise should not have issues dealing with any type of argument", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val template = new ParameterShowcase( + party, + 42L, + new BigDecimal("47.0000000000"), + "some text", + true, + TimestampConversion.MIN, + new NestedOptionalInteger(new optionalinteger.SomeInteger(-1L)), + List(0L, 1L, 2L, 3L).map(long2Long).asJava, + Some("some optional text").toJava, + ) + val choice1 = new Choice1( + template.integer, + new BigDecimal("37.0000000000"), + template.text, + template.bool, + template.time, + template.nestedOptionalInteger, + template.integerList, + template.optionalText, + ) + for { + parameterShowcase <- ledger.create( + party, + template, + )(ParameterShowcase.COMPANION) + tree <- ledger.exercise(party, parameterShowcase.exerciseChoice1(choice1)) + } yield { + val contract = assertSingleton("ExerciseWithAnyType", exercisedEvents(tree)) + assertEquals( + "ExerciseWithAnyType", + clearIds(contract.getChoiceArgument), + Value.fromJavaProto(choice1.toValue.toProto), + ) + } + }) + + test( + "TXVeryLongList", + "Accept a submission with a very long list (10,000 items)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val n = 10000 + val veryLongList = List(List.iterate(0L, n)(_ + 1)*).map(long2Long).asJava + val template = new ParameterShowcase( + party, + 42L, + new BigDecimal("47.0000000000"), + "some text", + true, + TimestampConversion.MIN, + new NestedOptionalInteger(new optionalinteger.SomeInteger(-1L)), + veryLongList, + Some("some optional text").toJava, + ) + val create = ledger.submitAndWaitForTransactionRequest(party, template.create.commands) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(create) + } yield { + val transaction = transactionResponse.getTransaction + val contract = assertSingleton("VeryLongList", createdEvents(transaction)) + assertEquals( + "VeryLongList", + toJavaProto(contract.getCreateArguments.clearValueIds), + template.toValue.toProtoRecord, + ) + } + }) + + test( + "TXNoReorder", + "Don't reorder fields in data structures of choices", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy: Dummy.ContractId <- ledger.create(party, new Dummy(party)) + tree <- ledger.exercise( + party, + dummy.exerciseWrapWithAddress(new Address("street", "city", "state", "zip")), + ) + } yield { + val contract = assertSingleton("Contract in transaction", createdEvents(tree)) + val fields = assertLength("Fields in contract", 2, contract.getCreateArguments.fields) + assertEquals( + "NoReorder", + fields.flatMap(_.getValue.getRecord.fields).map(_.getValue.getText).zipWithIndex, + Seq("street" -> 0, "city" -> 1, "state" -> 2, "zip" -> 3), + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceAuthorizationIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceAuthorizationIT.scala new file mode 100644 index 0000000000..22d8c8663a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceAuthorizationIT.scala @@ -0,0 +1,181 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.v2.value.{Record, RecordField} +import com.daml.ledger.test.java.model.test.{ + Agreement, + AgreementFactory, + BranchingSignatories, + TriProposal, +} +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import scala.jdk.CollectionConverters.* + +class TransactionServiceAuthorizationIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "TXRequireAuthorization", + "Require only authorization of chosen branching signatory", + allocate(SingleParty, SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(alice)), Participant(_, Seq(bob))) => + import ClearIdsImplicits.* + val template = new BranchingSignatories(true, alice, bob) + for { + _ <- alpha.create(alice, template)(BranchingSignatories.COMPANION) + transactions <- alpha.transactions(AcsDelta, alice) + } yield { + assert( + Record.fromJavaProto( + template.toValue.toProtoRecord + ) == transactions.head.events.head.getCreated.getCreateArguments.clearValueIds + ) + } + }) + + test( + "TXMultiActorChoiceOkBasic", + "Accept exercising a well-authorized multi-actor choice", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(operator, receiver)), Participant(beta, Seq(giver))) => + for { + agreementFactory <- beta.create(giver, new AgreementFactory(receiver, giver)) + agreement <- eventually("exerciseAgreementFactoryAccept") { + alpha.exerciseAndGetContract[Agreement.ContractId, Agreement]( + receiver, + agreementFactory.exerciseAgreementFactoryAccept(), + )(Agreement.COMPANION) + } + triProposalTemplate = new TriProposal(operator, receiver, giver) + triProposal <- alpha.create(operator, triProposalTemplate) + tree <- eventually("exerciseAcceptTriProposal") { + beta.exercise(giver, agreement.exerciseAcceptTriProposal(triProposal)) + } + } yield { + val contract = assertSingleton("AcceptTriProposal", createdEvents(tree)) + assertEquals( + "AcceptTriProposal", + contract.getCreateArguments.fields, + triProposalTemplate.toValue.getFields.asScala.map(rf => + RecordField.fromJavaProto(rf.toProto) + ), + ) + } + }) + + test( + "TXMultiActorChoiceOkCoincidingControllers", + "Accept exercising a well-authorized multi-actor choice with coinciding controllers", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(operator)), Participant(beta, Seq(giver))) => + for { + agreementFactory <- beta.create(giver, new AgreementFactory(giver, giver)) + agreement <- + beta.exerciseAndGetContract[Agreement.ContractId, Agreement]( + giver, + agreementFactory.exerciseAgreementFactoryAccept(), + )( + Agreement.COMPANION + ) + triProposalTemplate = new TriProposal(operator, giver, giver) + triProposal <- alpha.create(operator, triProposalTemplate) + tree <- eventually("exerciseAcceptTriProposal") { + beta.exercise(giver, agreement.exerciseAcceptTriProposal(triProposal)) + } + } yield { + val contract = assertSingleton("AcceptTriProposalCoinciding", createdEvents(tree)) + assertEquals( + "AcceptTriProposalCoinciding", + contract.getCreateArguments.fields, + triProposalTemplate.toValue.getFields.asScala.map(rf => + RecordField.fromJavaProto(rf.toProto) + ), + ) + } + }) + + test( + "TXRejectMultiActorMissingAuth", + "Reject exercising a multi-actor choice with missing authorizers", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(operator, receiver)), Participant(beta, Seq(giver))) => + for { + triProposal <- alpha.create(operator, new TriProposal(operator, receiver, giver)) + _ <- eventually("exerciseTriProposalAccept") { + for { + failure <- beta + .exercise(giver, triProposal.exerciseTriProposalAccept()) + .mustFail("exercising with missing authorizers") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires authorizers"), + checkDefiniteAnswerMetadata = true, + ) + } + } + } yield { + // Check performed in the `eventually` block + } + }) + + // This is the current, most conservative semantics of multi-actor choice authorization. + // It is likely that this will change in the future. Should we delete this test, we should + // also remove the 'UnrestrictedAcceptTriProposal' choice from the 'Agreement' template. + test( + "TXRejectMultiActorExcessiveAuth", + "Reject exercising a multi-actor choice with too many authorizers", + allocate(TwoParties, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(operator, receiver)), Participant(beta, Seq(giver))) => + for { + agreementFactory <- beta.create(giver, new AgreementFactory(receiver, giver)) + // TODO(#16361) eventually is a temporary workaround. It should take into account + // that the contract needs to hit the target node before a choice + // is executed on it. + agreement <- eventually("exerciseAgreementFactoryAccept") { + alpha.exerciseAndGetContract[Agreement.ContractId, Agreement]( + receiver, + agreementFactory.exerciseAgreementFactoryAccept(), + )(Agreement.COMPANION) + } + triProposalTemplate = new TriProposal(operator, giver, giver) + triProposal <- alpha.create(operator, triProposalTemplate) + _ <- eventually("exerciseAcceptTriProposal") { + for { + failure <- beta + .exercise(giver, agreement.exerciseAcceptTriProposal(triProposal)) + .mustFail("exercising with failing assertion") + } yield { + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("Assertion failed"), + checkDefiniteAnswerMetadata = true, + ) + } + } + } yield { + // Check performed in the `eventually` block + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceCorrectnessIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceCorrectnessIT.scala new file mode 100644 index 0000000000..b348550d93 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceCorrectnessIT.scala @@ -0,0 +1,534 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.suites.v2_1.TransactionServiceCorrectnessIT.* +import com.daml.ledger.api.v2.event.Event.Event +import com.daml.ledger.api.v2.event.Event.Event.{Archived, Created, Empty, Exercised} +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.test.java.model.test.{AgreementFactory, Dummy} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.platform.store.utils.EventOps.EventOps + +import java.time.Instant +import scala.collection.immutable.Seq +import scala.concurrent.Future + +class TransactionServiceCorrectnessIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "TXProcessInTwoChunks", + "Serve the complete sequence of transactions even if processing is stopped and resumed", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 5 + for { + _ <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + endAfterFirstSection <- ledger.currentEnd() + firstSectionRequest <- ledger + .getTransactionsRequest(ledger.transactionFormat(Some(Seq(party)), verbose = true)) + .map( + _.update( + _.endInclusive := endAfterFirstSection + ) + ) + firstSection <- ledger.transactions(firstSectionRequest) + _ <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + secondSectionRequest <- ledger + .getTransactionsRequest(ledger.transactionFormat(Some(Seq(party)), verbose = true)) + .map( + _.update( + _.beginExclusive := endAfterFirstSection + ) + ) + secondSection <- ledger.transactions(secondSectionRequest) + fullSequence <- ledger.transactions(AcsDelta, party) + } yield { + val concatenation = Vector.concat(firstSection, secondSection) + assert( + fullSequence == concatenation, + s"The result of processing items in two chunk should yield the same result as getting the overall stream of transactions in the end but there are differences. " + + s"Full sequence: ${fullSequence.map(_.commandId).mkString(", ")}, " + + s"first section: ${firstSection.map(_.commandId).mkString(", ")}, " + + s"second section: ${secondSection.map(_.commandId).mkString(", ")}", + ) + } + }) + + test( + "TXParallel", + "The same data should be served for more than 1 identical, parallel requests", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 5 + val parallelRequests = 10 + for { + _ <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + results <- Future.sequence( + Vector.fill(parallelRequests)(ledger.transactions(AcsDelta, party)) + ) + } yield { + assert( + results.toSet.size == 1, + s"All requests are supposed to return the same results but there " + + s"where differences: ${results.map(_.map(_.commandId)).mkString(", ")}", + ) + } + }) + + test( + "TXSingleMultiSameBasic", + "The same transaction should be served regardless of subscribing as one or multiple parties", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + _ <- ledger.create(alice, new Dummy(alice)) + _ <- ledger.create(bob, new Dummy(bob)) + aliceView <- ledger.transactions(AcsDelta, alice) + bobView <- ledger.transactions(AcsDelta, bob) + multiSubscriptionView <- ledger.transactions(AcsDelta, alice, bob) + } yield { + val jointView = aliceView ++ bobView + assertEquals( + "Single- and multi-party subscription yield different results", + jointView, + multiSubscriptionView, + ) + } + }) + + test( + "TXSingleMultiSameLedgerEffectsBasic", + "The same ledger effects transactions should be served regardless of subscribing as one, multiple or wildcard parties", + allocate(TwoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + endOffsetAtTestStart <- ledger.currentEnd() + _ <- ledger.create(alice, new Dummy(alice)) + _ <- ledger.create(bob, new Dummy(bob)) + aliceView <- ledger.transactions(LedgerEffects, alice) + bobView <- ledger.transactions(LedgerEffects, bob) + multiSubscriptionView <- ledger.transactions(LedgerEffects, alice, bob) + txReq <- ledger.getTransactionsRequest( + transactionFormat = + ledger.transactionFormat(None, transactionShape = LedgerEffects, verbose = true), + begin = endOffsetAtTestStart, + ) + wildcardPartyView <- ledger.transactions(txReq) + } yield { + val jointView = aliceView ++ bobView + assertEquals( + "Single- and multi-party subscription yield different results", + jointView, + multiSubscriptionView, + ) + assertEquals( + "Multi-party and wildcard subscription yield different results", + wildcardPartyView, + multiSubscriptionView, + ) + } + }) + + test( + "TXSingleMultiSameStakeholders", + "The same transaction should be served to all stakeholders", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob))) => + for { + _ <- alpha.create(alice, new AgreementFactory(bob, alice)) + _ <- beta.create(bob, new AgreementFactory(alice, bob)) + _ <- p.synchronize + alphaView <- alpha.transactions(AcsDelta, alice, bob) + betaView <- beta.transactions(AcsDelta, alice, bob) + } yield { + verifyLength("Expected to get 2 transactions", 2, alphaView) + assertEquals( + "Single- and multi-party subscription yield different results", + comparableTransactions(alphaView), + comparableTransactions(betaView), + ) + } + }) + + test( + "TXSingleMultiSameLedgerEffectsStakeholders", + "The same ledger effects transactions should be served to all stakeholders", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob))) => + for { + _ <- alpha.create(alice, new AgreementFactory(bob, alice)) + _ <- beta.create(bob, new AgreementFactory(alice, bob)) + _ <- p.synchronize + alphaView <- alpha.transactions(LedgerEffects, alice, bob) + betaView <- beta.transactions(LedgerEffects, alice, bob) + } yield { + verifyLength("Expected to get 2 transactions", 2, alphaView) + assertEquals( + "Single- and multi-party subscription yield different results", + comparableTransactions(alphaView), + comparableTransactions(betaView), + ) + } + }) + + test( + "TXTransactionByIdLedgerEffectsSameAsTransactionStream", + "Expose the same events for each transaction as the output of getTransactions with Ledger Effects", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(submitter)), Participant(beta, Seq(listener))) => + for { + _ <- alpha.create(submitter, new AgreementFactory(listener, submitter)) + _ <- p.synchronize + transactions <- alpha.transactions(LedgerEffects, listener, submitter) + byId <- Future.sequence( + transactions.map(t => + beta.transactionById(t.updateId, Seq(listener, submitter), LedgerEffects) + ) + ) + } yield { + assertEquals( + "The events fetched by identifier did not match the ones on the transaction stream", + comparableTransactions(transactions), + comparableTransactions(byId), + ) + } + }) + + test( + "TXTransactionByIdAcsDeltaSameAsTransactionStream", + "Expose the same events for each transaction as the output of getTransactions", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(submitter)), Participant(beta, Seq(listener))) => + for { + _ <- alpha.create(submitter, new AgreementFactory(listener, submitter)) + _ <- p.synchronize + transactions <- alpha.transactions(AcsDelta, listener, submitter) + byId <- Future.sequence( + transactions.map(t => + beta.transactionById(t.updateId, Seq(listener, submitter), AcsDelta) + ) + ) + } yield { + assertEquals( + "The events fetched by identifier did not match the ones on the transaction stream", + comparableTransactions(transactions), + comparableTransactions(byId), + ) + } + }) + + test( + "TXAcsDeltaTransactionEvents", + "Expose offset and nodeId for events of each transaction", + allocate( + SingleParty + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + contract <- alpha.create(party, new Dummy(party)) + _ <- alpha.exercise(party, contract.exerciseDummyChoice1()) + transactions <- alpha.transactions(AcsDelta, party) + } yield { + val txsWithCreated = transactions.filter(_.events.exists(_.event.isCreated)) + val txWithCreated = assertSingleton( + "Only one transaction should contain the create event", + txsWithCreated, + ) + val createdEvents = txWithCreated.events.flatMap(_.event.created) + val createdEvent = assertSingleton( + "Transaction should contain a single created event", + createdEvents, + ) + assertEquals( + "The created event offset should match the transaction offset", + txWithCreated.offset, + createdEvent.offset, + ) + + val txsWithArchived = transactions.filter(_.events.exists(_.event.isArchived)) + val txWithArchived = assertSingleton( + "Only one transaction should contain the archived event", + txsWithArchived, + ) + val archivedEvents = txWithArchived.events.flatMap(_.event.archived) + val archivedEvent = assertSingleton( + "Transaction should contain the archived event", + archivedEvents, + ) + assertEquals( + "The archived event offset should match the transaction offset", + txWithArchived.offset, + archivedEvent.offset, + ) + } + }) + + test( + "TXTransactionLedgerEffectsEvents", + "Expose offset and nodeId for events of each transaction", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + contract <- alpha.create(party, new Dummy(party)) + _ <- alpha.exercise(party, contract.exerciseDummyChoice1()) + txsLedgerEffects <- alpha.transactions(LedgerEffects, party) + } yield { + val txsWithCreated = txsLedgerEffects.filter(_.events.view.exists(_.event.isCreated)) + val txWithCreated = assertSingleton( + "Only one transaction should contain the create event", + txsWithCreated, + ) + val createdEvents = txWithCreated.events.view.map(_.getCreated) + val createdEvent = assertSingleton( + "Transaction should contain the create event", + createdEvents.toSeq, + ) + assertEquals( + "The created event offset should match the transaction offset", + txWithCreated.offset, + createdEvent.offset, + ) + + val txsWithExercised = txsLedgerEffects.filter(_.events.view.exists(_.event.isExercised)) + + val txWithExercised = assertSingleton( + "Only one transaction should contain the exercise event", + txsWithExercised, + ) + val exercisedEvents = txWithExercised.events.view.map(_.getExercised) + + val exercisedEvent = assertSingleton( + "Transaction should contain the exercise event", + exercisedEvents.toSeq, + ) + assertEquals( + "The exercise event offset should match the transaction offset", + txWithExercised.offset, + exercisedEvent.offset, + ) + } + }) + + test( + "TXAcsDeltaSubsetOfLedgerEffects", + "The event identifiers in the acs delta stream should be a subset of those in the ledger effects stream", + allocate(SingleParty), + timeoutScale = 2.0, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val contracts = 50 + for { + _ <- Future.sequence( + Vector.fill(contracts)( + ledger + .create(party, new Dummy(party)) + .flatMap((contract: Dummy.ContractId) => + ledger.exercise(party, contract.exerciseDummyChoice1()) + ) + ) + ) + transactionsAcsDelta <- ledger.transactions(AcsDelta, party) + transactionsLedgerEffects <- ledger.transactions(LedgerEffects, party) + } yield { + assert( + transactionsAcsDelta + .flatMap(_.events.map(_.nodeId)) + .toSet + .subsetOf( + transactionsLedgerEffects + .flatMap(_.events.map(_.nodeId)) + .toSet + ) + ) + } + }) + + test( + "TXAcsDeltaWitnessesSubsetOfLedgerEffects", + "The witnesses in the acs delta stream should be a subset of those in the ledger effects stream", + allocate(SingleParty), + timeoutScale = 2.0, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val contracts = 50 + for { + _ <- Future.sequence( + Vector.fill(contracts)( + ledger + .create(party, new Dummy(party)) + .flatMap((contract: Dummy.ContractId) => + ledger.exercise(party, contract.exerciseDummyChoice1()) + ) + ) + ) + transactionsAcsDelta <- ledger.transactions(AcsDelta, party) + transactionsLedgerEffects <- ledger.transactions(LedgerEffects, party) + } yield { + val witnessesByEventIdInLedgerEffectsStream = + transactionsLedgerEffects + .flatMap(_.events) + .map(event => event.nodeId -> event.witnessParties.toSet) + .toMap + val witnessesByEventIdInAcsDeltaStream = + transactionsAcsDelta + .flatMap(_.events) + .map(event => event.nodeId -> event.witnessParties.toSet) + for ((event, witnesses) <- witnessesByEventIdInAcsDeltaStream) { + assert(witnesses.subsetOf(witnessesByEventIdInLedgerEffectsStream(event))) + } + } + }) + + test( + "TXSingleSubscriptionInOrder", + "Archives should always come after creations when subscribing as a single party", + allocate(SingleParty), + timeoutScale = 2.0, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val contracts = 50 + for { + _ <- Future.sequence( + Vector.fill(contracts)( + ledger + .create(party, new Dummy(party)) + .flatMap((contract: Dummy.ContractId) => + ledger.exercise(party, contract.exerciseDummyChoice1()) + ) + ) + ) + transactions <- ledger.transactions(AcsDelta, party) + } yield { + checkTransactionsOrder("Ledger", transactions, contracts) + } + }) + + test( + "TXMultiSubscriptionInOrder", + "Archives should always come after creations when subscribing as more than on party", + allocate(TwoParties), + timeoutScale = 2.0, + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + val contracts = 50 + for { + _ <- Future.sequence(Vector.tabulate(contracts) { n => + val party = if (n % 2 == 0) alice else bob + ledger + .create(party, new Dummy(party)) + .flatMap((contract: Dummy.ContractId) => + ledger.exercise(party, contract.exerciseDummyChoice1()) + ) + }) + transactions <- ledger.transactions(AcsDelta, alice, bob) + } yield { + checkTransactionsOrder("Ledger", transactions, contracts) + } + }) +} + +object TransactionServiceCorrectnessIT { + + // Erase span id from the trace context. It is an element of the trace context that + // is different on the different participants that are handling the transaction stream + // requests. See https://www.w3.org/TR/trace-context/#header-name for the format details. + def eraseSpanId(parentTraceId: String): String = + parentTraceId.split("-").toList match { + case ver :: traceId :: _ :: rest => + (ver :: traceId :: "0123456789abcdef" :: rest).mkString("-") + case _ => parentTraceId + } + + // Strip command id, offset, event id, node id and transaction id to yield a transaction comparable across participants + // Furthermore, makes sure that the order is not relevant for witness parties + // Sort by transactionId as on distributed ledgers updates can occur in different orders + // Even if transactionIds are not the same across distributes ledgers, we still can use them for sorting + private def comparableTransactions(transactions: Vector[Transaction]): Vector[Transaction] = { + def stripEventFields(event: Event) = + event match { + case Archived(value) => Archived(value.copy(offset = 0L, nodeId = 0)) + case Created(value) => Created(value.copy(offset = 0L, nodeId = 0)) + case Exercised(value) => Exercised(value.copy(offset = 0L, nodeId = 0)) + case Empty => Empty + } + + transactions + .sortBy(_.updateId) + .map(t => + t.copy( + commandId = "commandId", + offset = 12345678L, + events = t.events + .map(e => e.copy(event = stripEventFields(e.event)).modifyWitnessParties(_.sorted)), + updateId = "transactionId", + traceContext = t.traceContext.map(tc => tc.copy(tc.traceparent.map(eraseSpanId))), + ) + ) + } + + private def checkTransactionsOrder( + context: String, + transactions: Vector[Transaction], + contracts: Int, + ): Unit = { + val (cs, as) = + transactions.flatMap(_.events).zipWithIndex.partition { case (e, _) => + e.event.isCreated + } + val creations = cs.map { case (e, i) => e.getCreated.contractId -> i } + val archivals = as.map { case (e, i) => e.getArchived.contractId -> i } + assert( + creations.size == contracts && archivals.size == contracts, + s"$context: either the number of archive events (${archivals.size}) or the number of create events (${creations.size}) doesn't match the expected number of $contracts.", + ) + val createdContracts = creations.iterator.map(_._1).toSet + val archivedContracts = archivals.iterator.map(_._1).toSet + assert( + createdContracts.size == creations.size, + s"$context: there are duplicate contract identifiers in the create events", + ) + assert( + archivedContracts.size == archivals.size, + s"$context: there are duplicate contract identifiers in the archive events", + ) + assert( + createdContracts == archivedContracts, + s"$context: the contract identifiers for created and archived contracts differ: ${createdContracts + .diff(archivedContracts)}", + ) + val sortedCreations = creations.sortBy(_._1) + val sortedArchivals = archivals.sortBy(_._1) + for (i <- 0 until contracts) { + val (createdContract, creationIndex) = sortedCreations(i) + val (archivedContract, archivalIndex) = sortedArchivals(i) + assert( + createdContract == archivedContract, + s"$context: unexpected discrepancy between the created and archived events", + ) + assert( + creationIndex < archivalIndex, + s"$context: the creation of $createdContract did not appear in the stream before it's archival", + ) + } + + transactions.map(_.recordTime.get.asJavaInstant).foldLeft(Instant.MIN) { + case (previous, current) if previous isBefore current => current + case _ => fail(s"$context: record time of subsequent transactions was not increasing") + }: Unit + + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceExerciseIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceExerciseIT.scala new file mode 100644 index 0000000000..1688107bce --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceExerciseIT.scala @@ -0,0 +1,159 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.TransactionOps.* +import com.daml.ledger.test.java.model.test.{ + AgreementFactory, + CreateAndFetch, + Dummy, + DummyFactory, + DummyWithParam, +} +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.ledger.api.util.TimestampConversion +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors +import com.digitalasset.canton.platform.store.utils.EventOps.EventOps + +class TransactionServiceExerciseIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "TXUseCreateToExercise", + "Should be able to directly use a contract identifier to exercise a choice", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummyFactory <- ledger.create(party, new DummyFactory(party)) + transactions <- ledger.exercise(party, dummyFactory.exerciseDummyFactoryCall()) + } yield { + val events = transactions.events.filter(e => transactions.rootNodeIds().contains(e.nodeId)) + val exercised = events.filter(_.event.isExercised) + assert(exercised.size == 1, s"Only one exercise expected, got ${exercised.size}") + assert( + exercised.head.getExercised.contractId == dummyFactory.contractId, + s"The identifier of the exercised contract should have been ${dummyFactory.contractId} but instead it was ${exercised.head.getExercised.contractId}", + ) + } + }) + + test( + "TXContractIdFromExerciseWhenFilter", + "Expose contract identifiers that are results of exercising choices when filtering by template", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + factory <- ledger.create(party, new DummyFactory(party)) + _ <- ledger.exercise(party, factory.exerciseDummyFactoryCall()) + dummyWithParam <- ledger.transactionsByTemplateId( + DummyWithParam.TEMPLATE_ID, + Some(Seq(party)), + ) + dummyFactory <- ledger.transactionsByTemplateId( + DummyFactory.TEMPLATE_ID, + Some(Seq(party)), + ) + } yield { + val create = assertSingleton("GetCreate", dummyWithParam.flatMap(createdEvents)) + assertEquals( + "Create should be of DummyWithParam", + create.getTemplateId, + DummyWithParam.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + val archive = assertSingleton("GetArchive", dummyFactory.flatMap(archivedEvents)) + assertEquals( + "Archive should be of DummyFactory", + archive.getTemplateId, + DummyFactory.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + assertEquals( + "Mismatching archived contract identifier", + archive.contractId, + factory.contractId, + ) + } + }) + + test( + "TXNotArchiveNonConsuming", + "Expressing a non-consuming choice on a contract should not result in its archival", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(receiver)), Participant(beta, Seq(giver))) => + for { + agreementFactory: AgreementFactory.ContractId <- beta.create( + giver, + new AgreementFactory(receiver, giver), + ) + _ <- p.synchronize + _ <- alpha.exercise(receiver, agreementFactory.exerciseCreateAgreement()) + transactions <- alpha.transactions(AcsDelta, receiver, giver) + } yield { + assert( + !transactions.exists(_.events.exists(_.event.isArchived)), + s"The transaction include an archival: ${transactions.flatMap(_.events).filter(_.event.isArchived)}", + ) + } + }) + + test( + "TXFetchContractCreatedInTransaction", + "It should be possible to fetch a contract created within a transaction", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + createAndFetch: CreateAndFetch.ContractId <- ledger.create(party, new CreateAndFetch(party))( + CreateAndFetch.COMPANION + ) + transaction <- ledger.exercise( + party, + createAndFetch.exerciseCreateAndFetch_Run(), + AcsDelta, + ) + } yield { + val _ = assertSingleton("There should be only one create", createdEvents(transaction)) + val exercise = + assertSingleton("There should be only one archive", archivedEvents(transaction)) + assertEquals( + "The contract identifier of the exercise does not match", + createAndFetch.contractId, + exercise.contractId, + ) + } + }) + + test( + "TXRejectOnFailingAssertion", + "Reject a transaction on a failing assertion", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy: Dummy.ContractId <- ledger.create(party, new Dummy(party)) + failure <- ledger + .exercise( + party, + dummy + .exerciseConsumeIfTimeIsBetween(TimestampConversion.MAX, TimestampConversion.MAX), + ) + .mustFail("exercising with a failing assertion") + } yield { + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("Assertion failed"), + checkDefiniteAnswerMetadata = true, + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceFiltersIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceFiltersIT.scala new file mode 100644 index 0000000000..b43d1b9ea3 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceFiltersIT.scala @@ -0,0 +1,732 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.event.{CreatedEvent, ExercisedEvent} +import com.daml.ledger.api.v2.state_service.GetActiveContractsRequest +import com.daml.ledger.api.v2.transaction_filter.* +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} +import com.daml.ledger.test.java.model.test.{Dummy, DummyWithParam} +import com.daml.ledger.test.java.semantic.interfaceviews.* +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} + +import scala.concurrent.{ExecutionContext, Future} + +// Allows using deprecated Protobuf fields for testing +class TransactionServiceFiltersIT extends LedgerTestSuite { + + test( + "TSFInterfaceTemplatePlainFilters", + "Combine plain interface filters with plain template filters", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val filterT = + createEventFormat( + partyO = Some(party), + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = false), + ) + val filterTAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = false), + ) + val filterI = + createEventFormat( + partyO = Some(party), + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = false + ), + ) + val filterIAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = false + ), + ) + + testFilterCompositions(ledger, party, filterT, filterTAnyParty, filterI, filterIAnyParty) + + }) + + test( + "TSFInterfaceTemplateFiltersWithEventBlobs", + "Combine plain interface filters with template filters with event blobs", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val filterT = + createEventFormat( + partyO = Some(party), + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = true), + ) + val filterTAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = true), + ) + val filterI = + createEventFormat( + partyO = Some(party), + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = false + ), + ) + val filterIAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = false + ), + ) + + testFilterCompositions(ledger, party, filterT, filterTAnyParty, filterI, filterIAnyParty) + + }) + + test( + "TSFInterfaceWithEventBlobsTemplatePlainFilters", + "Combine interface filters with event blobs with plain template filters", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val filterT = + createEventFormat( + partyO = Some(party), + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = false), + ) + val filterTAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = false), + ) + val filterI = + createEventFormat( + partyO = Some(party), + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = true + ), + ) + val filterIAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = true + ), + ) + + testFilterCompositions(ledger, party, filterT, filterTAnyParty, filterI, filterIAnyParty) + + }) + + test( + "TSFInterfaceWithEventBlobsTemplateFiltersWithEventBlobs", + "Combine interface filters with event blobs with template filters with event blobs", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val filterT = + createEventFormat( + partyO = Some(party), + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = true), + ) + val filterTAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = Seq(), + templateFilters = createTemplateFilter(includeCreatedEventBlob = true), + ) + val filterI = + createEventFormat( + partyO = Some(party), + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = true + ), + ) + val filterIAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = createInterfaceFilter( + includeCreatedEventBlob = true + ), + ) + + testFilterCompositions(ledger, party, filterT, filterTAnyParty, filterI, filterIAnyParty) + + }) + + test( + "TSFTemplateWildcardFiltersWithEventBlobs", + "Template wildcard filters with event blobs", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val format = + createEventFormat( + partyO = Some(party), + interfaceFilters = Seq(), + templateFilters = Seq(), + wildcardFilterO = Some(WildcardFilter(includeCreatedEventBlob = true)), + ) + + import ledger.* + for { + endOffsetAtTestStart <- ledger.currentEnd() + c1 <- create(party, new T5(party, 1))(T5.COMPANION) + c2 <- create(party, new T6(party, party))(T6.COMPANION) + c3 <- create(party, new T3(party, 2))(T3.COMPANION) + c4 <- create(party, new T4(party, 4))(T4.COMPANION) + txReq <- getTransactionsRequest( + transactionFormat = + TransactionFormat(Some(format), transactionShape = TRANSACTION_SHAPE_ACS_DELTA), + begin = endOffsetAtTestStart, + ) + txReqLedgerEffects <- getTransactionsRequest( + transactionFormat = + TransactionFormat(Some(format), transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS), + begin = endOffsetAtTestStart, + ) + txEvents <- transactions(txReq).map(_.flatMap(createdEvents)) + txReqLedgerEffects <- transactions(txReqLedgerEffects).map(_.flatMap(createdEvents)) + currentEnd <- ledger.currentEnd() + acsEvents <- activeContracts(createActiveContractsRequest(format, currentEnd)) + + // archive active contracts to avoid interference with the next tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + eventBlobAssertions(txEvents) + eventBlobAssertions(txReqLedgerEffects) + eventBlobAssertions(acsEvents) + } + + }) + + test( + "TSFExercisedTemplateFilters", + "Filter exercised events with template filters", + allocate(TwoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party1, party2))) => + val formatT = + createEventFormat( + partyO = Some(party1), + interfaceFilters = Seq(), + templateFilters = templateFilterForDummy, + ) + val formatTAnyParty = + createEventFormat( + partyO = None, + interfaceFilters = Seq(), + templateFilters = templateFilterForDummy, + ) + + testFilterCompositionsForExercised(ledger, party1, party2, formatT, formatTAnyParty) + + }) + + test( + s"TransactionAcsDeltaWildcardFilters", + "Create arguments for wildcard filters on acs delta transaction streams", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T5(party, 1))(T5.COMPANION) + c2 <- create(party, new T6(party, party))(T6.COMPANION) + c3 <- create(party, new T3(party, 3))(T3.COMPANION) + c4 <- create(party, new T4(party, 4))(T4.COMPANION) + _ <- archive(ledger, party)(c1, c2, c3, c4) + txReq <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq.empty, + interfaceFilters = Seq.empty, + transactionShape = AcsDelta, + ) + ) + txs <- transactions(txReq) + txReqPartyWildcard <- getTransactionsRequest( + transactionFormat( + parties = None, + templateIds = Seq.empty, + interfaceFilters = Seq.empty, + transactionShape = AcsDelta, + ) + ) + txsPartyWildcard <- transactions(txReqPartyWildcard) + created = txs.flatMap(createdEvents) + createdPartyWildcard = txsPartyWildcard.flatMap(createdEvents) + } yield { + (created ++ createdPartyWildcard).foreach(event => + assertEquals( + s"Create event contract arguments must NOT be empty for $event", + event.createArguments.isEmpty, + false, + ) + ) + } + }) + + test( + s"TransactionLedgerEffectsWildcardFilters", + "Create arguments for wildcard filters on ledger effects transaction streams", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + for { + c1 <- create(party, new T5(party, 1))(T5.COMPANION) + c2 <- create(party, new T6(party, party))(T6.COMPANION) + c3 <- create(party, new T3(party, 3))(T3.COMPANION) + c4 <- create(party, new T4(party, 4))(T4.COMPANION) + _ <- archive(ledger, party)(c1, c2, c3, c4) + txReq <- getTransactionsRequest( + transactionFormat( + parties = Some(Seq(party)), + templateIds = Seq.empty, + interfaceFilters = Seq.empty, + transactionShape = LedgerEffects, + ) + ) + txs <- transactions(txReq) + txReqPartyWildcard <- getTransactionsRequest( + transactionFormat( + parties = None, + templateIds = Seq.empty, + interfaceFilters = Seq.empty, + transactionShape = LedgerEffects, + ) + ) + txsPartyWildcard <- transactions(txReqPartyWildcard) + created = txs.flatMap(createdEvents) + createdPartyWildcard = txsPartyWildcard.flatMap(createdEvents) + } yield { + (created ++ createdPartyWildcard).foreach(event => + assertEquals( + s"Create event contract arguments must NOT be empty for $event", + event.createArguments.isEmpty, + false, + ) + ) + } + }) + + private def testFilterCompositions( + ledger: ParticipantTestContext, + party: Party, + formatT: EventFormat, + formatTAnyParty: EventFormat, + formatI: EventFormat, + formatIAnyParty: EventFormat, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + _ <- testFilterComposition( + ledger, + party, + mergeFormats(formatT, formatI), + ) + + _ <- testFilterComposition( + ledger, + party, + mergeFormats(formatTAnyParty, formatIAnyParty), + ) + + _ <- testFilterComposition( + ledger, + party, + mergeFormats(formatT, formatIAnyParty), + ) + + _ <- testFilterComposition( + ledger, + party, + mergeFormats(formatTAnyParty, formatI), + ) + } yield () + + private def testFilterCompositionsForExercised( + ledger: ParticipantTestContext, + party1: Party, + party2: Party, + formatT: EventFormat, + formatTAnyParty: EventFormat, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + _ <- testFilterCompositionForExercised( + ledger, + party1, + party2, + formatT, + ) + + _ <- testFilterCompositionForExercised( + ledger, + party1, + party2, + formatTAnyParty, + ) + + } yield () + + private def archive(ledger: ParticipantTestContext, party: Party)( + c1: T5.ContractId, + c2: T6.ContractId, + c3: T3.ContractId, + c4: T4.ContractId, + )(implicit ec: ExecutionContext): Future[Unit] = + for { + _ <- ledger.exercise(party, c1.exerciseArchive()) + _ <- ledger.exercise(party, c2.exerciseArchive()) + _ <- ledger.exercise(party, c3.exerciseArchive()) + _ <- ledger.exercise(party, c4.exerciseArchive()) + } yield () + + private def testFilterComposition( + ledger: ParticipantTestContext, + party: Party, + format: EventFormat, + )(implicit ec: ExecutionContext): Future[Unit] = { + + import ledger.* + for { + endOffsetAtTestStart <- ledger.currentEnd() + c1 <- create(party, new T5(party, 1))(T5.COMPANION) + c2 <- create(party, new T6(party, party))(T6.COMPANION) + c3 <- create(party, new T3(party, 2))(T3.COMPANION) + c4 <- create(party, new T4(party, 4))(T4.COMPANION) + txReq <- getTransactionsRequest( + TransactionFormat( + eventFormat = Some(format), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ), + begin = endOffsetAtTestStart, + ) + txReqLedgerEffects <- getTransactionsRequest( + TransactionFormat( + eventFormat = Some(format), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ), + begin = endOffsetAtTestStart, + ) + txEvents <- transactions(txReq).map(_.flatMap(createdEvents)) + txEventsLedgerEffects <- transactions(txReqLedgerEffects).map(_.flatMap(createdEvents)) + currentEnd <- ledger.currentEnd() + acsEvents <- activeContracts(createActiveContractsRequest(format, currentEnd)) + // archive active contracts to avoid interference with the next tests + _ <- archive(ledger, party)(c1, c2, c3, c4) + } yield { + basicAssertions( + c1.contractId, + c2.contractId, + c3.contractId, + txEvents, + eventBlobFlagFromInterfaces(format), + eventBlobFlagFromTemplates(format), + ) + basicAssertions( + c1.contractId, + c2.contractId, + c3.contractId, + acsEvents, + eventBlobFlagFromInterfaces(format), + eventBlobFlagFromTemplates(format), + ) + basicAssertions( + c1.contractId, + c2.contractId, + c3.contractId, + txEventsLedgerEffects, + eventBlobFlagFromInterfaces(format), + eventBlobFlagFromTemplates(format), + ) + } + } + + private def testFilterCompositionForExercised( + ledger: ParticipantTestContext, + party1: Party, + party2: Party, + format: EventFormat, + )(implicit ec: ExecutionContext): Future[Unit] = { + + import ledger.* + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummy1 <- create(party1, new Dummy(party1))(Dummy.COMPANION) + dummy2 <- create(party2, new Dummy(party2))(Dummy.COMPANION) + dummyWithParam <- create(party1, new DummyWithParam(party1))(DummyWithParam.COMPANION) + _ <- ledger.exercise(party1, dummy1.exerciseDummyNonConsuming()) + _ <- ledger.exercise(party2, dummy2.exerciseDummyNonConsuming()) + _ <- ledger.exercise(party1, dummyWithParam.exerciseDummyChoice2("")) + _ <- ledger.exercise(party1, dummy1.exerciseArchive()) + _ <- ledger.exercise(party2, dummy2.exerciseArchive()) + + txReq <- getTransactionsRequest( + TransactionFormat( + eventFormat = Some(format), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ), + begin = endOffsetAtTestStart, + ) + txEvents <- transactions(txReq).map(_.flatMap(exercisedEvents)) + } yield { + assertionsForExercised( + txEvents, + wildcardParty = format.filtersForAnyParty.isDefined, + ) + } + } + + private def assertionsForExercised( + exercisedEvents: Vector[ExercisedEvent], + wildcardParty: Boolean, + ): Unit = { + val expectedEventsNum = if (wildcardParty) 4 else 2 + assertLength( + s"$expectedEventsNum exercised events should have been found", + expectedEventsNum, + exercisedEvents, + ).discard + + for (event <- exercisedEvents) { + assertEquals( + "Exercised event of Dummy template ID", + event.templateId.get, + Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + } + } + + private def basicAssertions( + c1: String, + c2: String, + c3: String, + createdEvents: Vector[CreatedEvent], + expectEventBlobFromInterfaces: Boolean, + expectEventBlobFromTemplates: Boolean, + ): Unit = { + val expectEventBlob = expectEventBlobFromInterfaces || expectEventBlobFromTemplates + assertLength("3 transactions found", 3, createdEvents).discard + + // T5 + val createdEvent1 = createdEvents(0) + assertEquals( + "Create event 1 template ID", + createdEvent1.templateId.get, + T5.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + assertEquals("Create event 1 contract ID", createdEvent1.contractId, c1).discard + assertLength("Create event 1 has a view", 1, createdEvent1.interfaceViews).discard + assertEquals( + "Create event 1 createArguments must NOT be empty", + createdEvent1.createArguments.isEmpty, + false, + ) + assertEquals( + s"""Create event 1 createdEventBlob must ${if (expectEventBlob) "NOT" else ""} be empty""", + createdEvent1.createdEventBlob.isEmpty, + !expectEventBlob, + ) + + // T6 + val createdEvent2 = createdEvents(1) + assertEquals( + "Create event 2 template ID", + createdEvent2.templateId.get, + T6.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + assertEquals("Create event 2 contract ID", createdEvent2.contractId, c2) + assertLength("Create event 2 has a view", 1, createdEvent2.interfaceViews).discard + assertEquals( + "Create event 2 createArguments must NOT be empty", + createdEvent2.createArguments.isEmpty, + false, + ) + assertEquals( + s"""Create event 2 createdEventBlob must ${if (expectEventBlobFromInterfaces) "NOT" + else ""} be empty""", + createdEvent2.createdEventBlob.isEmpty, + !expectEventBlobFromInterfaces, + ) + + // T3 + val createdEvent3 = createdEvents(2) + assertEquals( + "Create event 3 template ID", + createdEvent3.templateId.get.toString, + T3.TEMPLATE_ID_WITH_PACKAGE_ID.toV1.toString, + ) + assertEquals("Create event 3 contract ID", createdEvent3.contractId, c3) + assertLength("Create event 3 has no view", 0, createdEvent3.interfaceViews).discard + assertEquals( + "Create event 3 createArguments must NOT be empty", + createdEvent3.createArguments.isEmpty, + false, + ) + assertEquals( + s"""Create event 3 createdEventBlob must ${if (expectEventBlobFromTemplates) "NOT" + else ""} be empty""", + createdEvent3.createdEventBlob.isEmpty, + !expectEventBlobFromTemplates, + ) + } + + private def eventBlobAssertions( + createdEvents: Vector[CreatedEvent] + ): Unit = { + assertLength("4 transactions found", 4, createdEvents).discard + + createdEvents.foreach(createdEvent => + assertEquals( + s"Create event $createdEvent createdEventBlob must NOT be empty", + createdEvent.createdEventBlob.isEmpty, + false, + ) + ) + + } + + private def createInterfaceFilter( + includeCreatedEventBlob: Boolean + ) = + Seq( + new InterfaceFilter( + interfaceId = Some(I2.TEMPLATE_ID.toV1), + includeInterfaceView = true, + includeCreatedEventBlob = includeCreatedEventBlob, + ) + ) + + private def createTemplateFilter(includeCreatedEventBlob: Boolean): Seq[TemplateFilter] = + Seq( + new TemplateFilter( + templateId = Some(T3.TEMPLATE_ID.toV1), + includeCreatedEventBlob = includeCreatedEventBlob, + ), + new TemplateFilter( + templateId = Some(T5.TEMPLATE_ID.toV1), + includeCreatedEventBlob = includeCreatedEventBlob, + ), + ) + + private lazy val templateFilterForDummy: Seq[TemplateFilter] = + Seq( + TemplateFilter( + templateId = Some(Dummy.TEMPLATE_ID.toV1), + includeCreatedEventBlob = false, + ) + ) + + private def createEventFormat( + partyO: Option[Party], // party or party-wildcard + interfaceFilters: Seq[InterfaceFilter], + templateFilters: Seq[TemplateFilter] = Seq.empty, + wildcardFilterO: Option[WildcardFilter] = None, + ): EventFormat = { + val filters = Filters( + templateFilters.map(tf => CumulativeFilter(IdentifierFilter.TemplateFilter(tf))) + ++ + interfaceFilters.map(ifaceF => + CumulativeFilter(IdentifierFilter.InterfaceFilter(ifaceF)) + ) ++ (wildcardFilterO match { + case Some(wildcardFilter) => + Seq(CumulativeFilter.defaultInstance.withWildcardFilter(wildcardFilter)) + case None => Seq.empty + }) + ) + + partyO match { + case Some(party) => + EventFormat( + filtersByParty = Map(party.getValue -> filters), + filtersForAnyParty = None, + verbose = true, + ) + case None => + EventFormat( + filtersByParty = Map.empty, + filtersForAnyParty = Some(filters), + verbose = true, + ) + } + } + + private def createActiveContractsRequest(eventFormat: EventFormat, activeAtOffset: Long) = + GetActiveContractsRequest( + activeAtOffset = activeAtOffset, + eventFormat = Some(eventFormat), + ) + + private def eventBlobFlagFromInterfaces(format: EventFormat): Boolean = + extractFlag(format, _.includeCreatedEventBlob) + + private def eventBlobFlagFromTemplates(format: EventFormat): Boolean = + (for { + byParty <- format.filtersByParty.headOption.map(_._2) + templateFilter <- byParty.cumulative.collectFirst(_.identifierFilter match { + case IdentifierFilter.TemplateFilter(templateF) => templateF + }) + } yield templateFilter.includeCreatedEventBlob).getOrElse(false) || + (for { + anyParty <- format.filtersForAnyParty + templateFilter <- anyParty.cumulative.collectFirst(_.identifierFilter match { + case IdentifierFilter.TemplateFilter(templateF) => templateF + }) + } yield templateFilter.includeCreatedEventBlob).getOrElse(false) + + private def extractFlag( + format: EventFormat, + extractor: InterfaceFilter => Boolean, + ): Boolean = + (for { + byParty <- format.filtersByParty.headOption.map(_._2) + interfaceFilter <- byParty.cumulative.collectFirst(_.identifierFilter match { + case IdentifierFilter.InterfaceFilter(interfaceF) => interfaceF + }) + } yield extractor(interfaceFilter)).getOrElse(false) || + (for { + anyParty <- format.filtersForAnyParty + interfaceFilter <- anyParty.cumulative.collectFirst(_.identifierFilter match { + case IdentifierFilter.InterfaceFilter(interfaceF) => interfaceF + }) + } yield extractor(interfaceFilter)).getOrElse(false) + + private def mergeFormats( + t1: EventFormat, + t2: EventFormat, + ): EventFormat = EventFormat( + filtersByParty = t1.filtersByParty ++ t2.filtersByParty.map { case (p, f2) => + t1.filtersByParty.get(p) match { + case Some(f1) => p -> mergeFilters(f1, f2) + case None => p -> f2 + } + }, + filtersForAnyParty = t1.filtersForAnyParty match { + case Some(f1) => Some(t2.filtersForAnyParty.fold(f1)(f2 => mergeFilters(f1, f2))) + case None => t2.filtersForAnyParty + }, + verbose = t1.verbose || t2.verbose, + ) + + private def mergeFilters(f1: Filters, f2: Filters): Filters = + Filters(f1.cumulative ++ f2.cumulative) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceOutputsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceOutputsIT.scala new file mode 100644 index 0000000000..99ac1cd2c1 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceOutputsIT.scala @@ -0,0 +1,265 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party, TestConstraints} +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.value as api +import com.daml.ledger.api.v2.value.{Record, RecordField} +import com.daml.ledger.test.java.model.test.{Dummy, NothingArgument} +import com.daml.ledger.test.java.model.trailingnones.TrailingNones +import com.daml.ledger.test.java.model.trailingnonesiface.TrailingNonesIface +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} + +import java.util.Optional +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.OptionConverters.* + +class TransactionServiceOutputsIT extends LedgerTestSuite { + import ClearIdsImplicits.* + import CompanionImplicits.* + import com.daml.ledger.api.testtool.infrastructure.RemoveTrailingNone.Implicits + + test( + "TXUnitAsArgumentToNothing", + "Daml engine returns Unit as argument with trailing None fields removed", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val template = new NothingArgument(party, None.toJava) + val create = ledger.submitAndWaitForTransactionRequest(party, template.create.commands) + for { + transactionResponse <- ledger.submitAndWaitForTransaction(create) + } yield { + val contract = assertSingleton( + "UnitAsArgumentToNothing", + createdEvents(transactionResponse.getTransaction), + ) + assertEquals( + "UnitAsArgumentToNothing", + contract.getCreateArguments.clearValueIds, + Record.fromJavaProto(template.toValue.withoutTrailingNoneFields.toProtoRecord), + ) + } + }) + + test( + "TXVerbosity", + "Expose field names only if the verbose flag is set to true", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("Labels are always emitted by Transcode/SchemaProcessor"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + _ <- ledger.create(party, new Dummy(party)) + verboseTransactions <- ledger + .getTransactionsRequest( + ledger.transactionFormat( + parties = Some(Seq(party)), + transactionShape = AcsDelta, + verbose = true, + ) + ) + .flatMap(ledger.transactions) + verboseTransactionsLedgerEffects <- ledger + .getTransactionsRequest( + ledger + .transactionFormat( + parties = Some(Seq(party)), + transactionShape = LedgerEffects, + verbose = true, + ) + ) + .flatMap(ledger.transactions) + nonVerboseTransactions <- ledger + .getTransactionsRequest( + ledger.transactionFormat( + parties = Some(Seq(party)), + transactionShape = AcsDelta, + verbose = false, + ) + ) + .flatMap(ledger.transactions) + nonVerboseTransactionLedgerEffects <- ledger + .getTransactionsRequest( + ledger + .transactionFormat( + parties = Some(Seq(party)), + transactionShape = LedgerEffects, + verbose = false, + ) + ) + .flatMap(ledger.transactions) + } yield { + assertLabelsAreExposedCorrectly( + party, + verboseTransactions, + verboseTransactionsLedgerEffects, + labelIsNonEmpty = true, + ) + assertLabelsAreExposedCorrectly( + party, + nonVerboseTransactions, + nonVerboseTransactionLedgerEffects, + labelIsNonEmpty = false, + ) + } + }) + + test( + "TXVerboseNoTrailingNones", + "Ledger API does not populate trailing Optional Nones (in non-verbose mode) on Ledger API queries", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + testDroppedTrailingNones(ledger = ledger, party = party, verbose = true) + }) + + test( + "TXNonVerboseNoTrailingNones", + "Ledger API does not populate trailing Optional Nones (in verbose mode) on Ledger API queries", + allocate(SingleParty), + limitation = TestConstraints.GrpcOnly("JSON API always outputs label names"), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + testDroppedTrailingNones(ledger = ledger, party = party, verbose = false) + }) + + private def testDroppedTrailingNones( + ledger: ParticipantTestContext, + party: Party, + verbose: Boolean, + )(implicit ec: ExecutionContext): Future[Unit] = { + def ifVerbosePopulated(label: String): String = if (verbose) label else "" + for { + contract <- ledger.create(party, new TrailingNones(party, "some", Optional.empty())) + ledgerEnd <- ledger.currentEnd() + acsReq = ledger.activeContractsRequest( + parties = Some(Seq(party)), + activeAtOffset = ledgerEnd, + templateIds = Seq(TrailingNones.TEMPLATE_ID), + interfaceFilters = Seq(TrailingNonesIface.INTERFACE_ID -> true), + verbose = verbose, + ) + acs <- ledger.activeContracts(acsReq) + tx <- ledger + .submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party, + contract.exerciseExerciseChoice("populated field", Optional.empty()).commands(), + LedgerEffects, + verbose = verbose, + ) + ) + .map(_.getTransaction) + } yield { + val createdEvent = assertSingleton("Only one contract should be on the ACS", acs) + + // Assert create arguments + val createArgFields = + assertDefined(createdEvent.createArguments, "Create arguments should be defined").fields + assertEquals( + "Expected field arguments without trailing None", + createArgFields, + Seq( + RecordField( + ifVerbosePopulated("owner"), + Some(api.Value(api.Value.Sum.Party(party.getValue))), + ), + RecordField(ifVerbosePopulated("nonOpt"), Some(api.Value(api.Value.Sum.Text("some")))), + ), + ) + + // Assert interface view + val interfaceView = + assertSingleton("Only one interface view expected", createdEvent.interfaceViews) + + assertEquals( + "Mismatching interface views", + assertDefined(interfaceView.viewValue, "Interface view value should be defined").fields, + Seq(RecordField(ifVerbosePopulated("nonOpt"), Some(api.Value(api.Value.Sum.Text("some"))))), + ) + + val exercisedEvent = + assertSingleton("Only one exercise event expected", tx.events.map(_.getExercised)) + + // Assert exercise arguments + val exeArg = assertDefined(exercisedEvent.choiceArgument, "Choice argument should be defined") + assertEquals( + "Mismatching exercise argument", + exeArg.getRecord.fields, + Seq( + RecordField( + ifVerbosePopulated("nonOptArg"), + Some(api.Value(api.Value.Sum.Text("populated field"))), + ) + ), + ) + + // Assert exercise result + val exerciseResult = + assertDefined(exercisedEvent.exerciseResult, "Choice result should be defined").getRecord + + assertEquals( + "Mismatching exercise result", + exerciseResult.fields, + Seq( + RecordField( + ifVerbosePopulated("field1"), + Some(api.Value(api.Value.Sum.Text("populated field"))), + ) + ), + ) + } + } + + private def assertLabelsAreExposedCorrectly( + party: Party, + transactions: Seq[Transaction], + transactionsLedgerEffects: Seq[Transaction], + labelIsNonEmpty: Boolean, + ): Unit = { + + def transactionFields(createdEvent: Seq[CreatedEvent]): Seq[RecordField] = createdEvent + .flatMap(_.getCreateArguments.fields) + + val transactionLedgerEffectsCreatedEvents: Seq[CreatedEvent] = + for { + transactionLedgerEffects <- transactionsLedgerEffects + event <- transactionLedgerEffects.events + createdEvent = event.getCreated + } yield createdEvent + + val transactionLedgerEffectsFields: Seq[RecordField] = + transactionFields(transactionLedgerEffectsCreatedEvents) + + val flatTransactionFields: Seq[RecordField] = + transactionFields( + transactions + .flatMap(_.events) + .map(_.getCreated) + ) + + assert(transactions.nonEmpty, s"$party expected non empty transaction list") + assert( + transactionsLedgerEffects.nonEmpty, + s"$party expected non empty ledger effects transaction list", + ) + + val text = labelIsNonEmpty match { + case true => "with" + case false => "without" + } + assert( + flatTransactionFields.forall(_.label.nonEmpty == labelIsNonEmpty), + s"$party expected a contract $text labels, but received $transactions.", + ) + assert( + transactionLedgerEffectsFields.forall(_.label.nonEmpty == labelIsNonEmpty), + s"$party expected a contract $text labels, but received $transactionsLedgerEffects.", + ) + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceStakeholdersIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceStakeholdersIT.scala new file mode 100644 index 0000000000..0a92aa0392 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceStakeholdersIT.scala @@ -0,0 +1,140 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.test.java.model.test.{CallablePayout, Dummy, WithObservers} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} + +import scala.collection.immutable.Seq +import scala.concurrent.Future +import scala.jdk.CollectionConverters.* + +class TransactionServiceStakeholdersIT extends LedgerTestSuite { + import CompanionImplicits.* + + test("TXStakeholders", "Expose the correct stakeholders", allocate(SingleParty, SingleParty))( + implicit ec => { + case Participants(Participant(alpha @ _, Seq(receiver)), Participant(beta, Seq(giver))) => + for { + _ <- beta.create(giver, new CallablePayout(giver, receiver)) + transactions <- beta.transactions(AcsDelta, giver, receiver) + } yield { + val contract = assertSingleton("Stakeholders", transactions.flatMap(createdEvents)) + assertEquals("Signatories", contract.signatories, Seq(giver.getValue)) + assertEquals("Observers", contract.observers, Seq(receiver.getValue)) + } + } + ) + + test( + "TXnoSignatoryObservers", + "transactions' created events should not return overlapping signatories and observers", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + _ <- ledger.create(alice, new WithObservers(alice, Seq(alice, bob).map(_.getValue).asJava)) + acsDeltaTx <- ledger.transactions(AcsDelta, alice).flatMap(fs => Future(fs.head)) + acsDeltaWo <- Future(createdEvents(acsDeltaTx).head) + ledgerEffectsTx <- ledger.transactions(LedgerEffects, alice).flatMap(fs => Future(fs.head)) + ledgerEffectsWo <- Future(createdEvents(ledgerEffectsTx).head) + } yield { + assert( + acsDeltaWo.observers == Seq(bob.getValue), + s"Expected observers to only contain ${bob.getValue}, but received ${acsDeltaWo.observers}", + ) + assert( + ledgerEffectsWo.observers == Seq(bob.getValue), + s"Expected observers to only contain ${bob.getValue}, but received ${ledgerEffectsWo.observers}", + ) + } + }) + + test( + "TXTransientObservableSubmitter", + "transactions containing only transient evens should not be visible to submitting party", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + // Create command with transient contract + val createAndExercise = new Dummy(party).createAnd.exerciseArchive().commands + for { + _ <- ledger.submitAndWait(ledger.submitAndWaitRequest(party, createAndExercise)) + + emptyFlatTx <- ledger.transactions(AcsDelta, party) + emptyFlatTxByTemplateId <- ledger + .transactionsByTemplateId(Dummy.TEMPLATE_ID, Some(Seq(party))) + } yield { + assert( + emptyFlatTx.isEmpty, + s"Expected no flat transaction, but received $emptyFlatTx", + ) + assert( + emptyFlatTxByTemplateId.isEmpty, + s"Expected no transaction for Dummy, but received $emptyFlatTxByTemplateId", + ) + } + }) + + test( + "TXTransientNotObservableNoSubmitters", + "transactions with transient only events should not be visible if requester is not a submitting party", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(submitter, observer))) => + // Create command with transient contract + val createAndExerciseWithObservers = + new WithObservers(submitter, List(observer.getValue).asJava).createAnd + .exerciseArchive() + .commands + for { + // The in-memory fan-out serves at least N-1 transaction responses from a specific query window + // Then, submit 2 requests to ensure that a transaction from the in-memory fan-out would be forwarded. + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(submitter, createAndExerciseWithObservers) + ) + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(submitter, createAndExerciseWithObservers) + ) + // `observer` is just a stakeholder on the contract created by `submitter` + // but it should not see the completion/flat transaction if it has only transient events. + emptyFlatTx <- ledger.transactions(AcsDelta, observer) + emptyFlatTxByTemplateId <- ledger.transactionsByTemplateId( + WithObservers.TEMPLATE_ID, + Some(Seq(observer)), + ) + } yield { + assert(emptyFlatTx.isEmpty, s"No transaction expected but got $emptyFlatTx instead") + assert( + emptyFlatTxByTemplateId.isEmpty, + s"No transaction expected but got $emptyFlatTxByTemplateId instead", + ) + } + }) + + test( + "TXTransientSubmitAndWaitForTransaction", + "transactions containing only transient evens and submitted via submitAndWaitForTransaction should not contain any events", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + // Create command with transient contract + val createAndExercise = new Dummy(party).createAnd.exerciseArchive().commands + for { + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest(party, createAndExercise) + ) + } yield { + val tx = assertSingleton( + s"Expected a transaction but did not received one", + response.transaction.toList, + ) + assert( + tx.events.isEmpty, + s"Expected no events, but received ${tx.events}", + ) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceValidationIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceValidationIT.scala new file mode 100644 index 0000000000..c1422dccdb --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceValidationIT.scala @@ -0,0 +1,128 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.update_service.{GetUpdateByIdRequest, GetUpdateByOffsetRequest} +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.protocol.TestUpdateId + +class TransactionServiceValidationIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "TXRejectEmptyFilter", + "A query with an empty transaction filter should be rejected with an INVALID_ARGUMENT status", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + request <- ledger.getTransactionsRequest(ledger.transactionFormat(Some(Seq(party)))) + requestWithEmptyFilter = request.update( + _.updateFormat.includeTransactions.eventFormat.filtersByParty := Map.empty + ) + failure <- ledger + .transactions(requestWithEmptyFilter) + .mustFail("subscribing with empty filtersByParty and filtersForAnyParty") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.InvalidArgument, + Some("filtersByParty and filtersForAnyParty cannot be empty simultaneously"), + ) + } + }) + + test( + "TXRejectBeginAfterEnd", + "A request with the end before the begin should be rejected with INVALID_ARGUMENT", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + earlier <- ledger.currentEnd() + _ <- ledger.create(party, new Dummy(party)) + later <- ledger.currentEnd() + request <- ledger.getTransactionsRequest(ledger.transactionFormat(Some(Seq(party)))) + invalidRequest = request.update( + _.beginExclusive := later, + _.endInclusive := earlier, + ) + failure <- ledger + .transactions(invalidRequest) + .mustFail("subscribing with the end before the begin") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.OffsetOutOfRange, + Some("is before begin offset"), + ) + } + }) + + test( + "TXUpdateByIdWithoutParty", + "Return INVALID_ARGUMENT when looking up an update by identifier without specifying an update format", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + failure <- ledger + .updateById( + GetUpdateByIdRequest( + TestUpdateId("not-relevant").toHexString, + None, + ) + ) + .mustFail("looking up an update by id without an update format") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.MissingField, + Some( + "The submitted command is missing a mandatory field: update_format" + ), + ) + } + }) + + test( + "TXTransactionByOffsetInvalid", + "Return INVALID_ARGUMENT when looking up a flat transaction using an invalid offset", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + failure <- ledger + .transactionByOffset(-21, Seq(party), AcsDelta) + .mustFail("looking up a flat transaction using an invalid offset") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NonPositiveOffset, + Some("Offset -21 in offset is not a positive integer"), + ) + } + }) + + test( + "TXUpdateByOffsetWithoutParty", + "Return INVALID_ARGUMENT when looking up an update by offset without specifying a party or update format", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + failure <- ledger + .updateByOffset(GetUpdateByOffsetRequest(offset = 42, updateFormat = None)) + .mustFail("looking up an update without specifying a format") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.MissingField, + Some( + "The submitted command is missing a mandatory field: update_format" + ), + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceVisibilityIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceVisibilityIT.scala new file mode 100644 index 0000000000..cb78078176 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/TransactionServiceVisibilityIT.scala @@ -0,0 +1,762 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.TransactionOps.* +import com.daml.ledger.api.testtool.infrastructure.{Eventually, LedgerTestSuite, Party} +import com.daml.ledger.api.testtool.suites.v2_1.TransactionServiceVisibilityIT.* +import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.event.Event.Event.Exercised +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.value.Record +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.model.iou.{Iou, IouTransfer} +import com.daml.ledger.test.java.model.ioutrade.IouTrade +import com.daml.ledger.test.java.model.test.{ + AgreementFactory, + BranchingControllers, + BranchingSignatories, + Dummy, + WithObservers, +} +import com.daml.test.evidence.tag.EvidenceTag +import com.daml.test.evidence.tag.Security.SecurityTest +import com.daml.test.evidence.tag.Security.SecurityTest.Property.Privacy +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.platform.store.utils.EventOps.EventOps + +import scala.collection.immutable.Seq +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +class TransactionServiceVisibilityIT extends LedgerTestSuite { + + import com.digitalasset.canton.BigDecimalImplicits.* + import CompanionImplicits.* + + implicit val iouTransferCompanion + : ContractCompanion.WithoutKey[IouTransfer.Contract, IouTransfer.ContractId, IouTransfer] = + IouTransfer.COMPANION + implicit val iouTradeCompanion + : ContractCompanion.WithoutKey[IouTrade.Contract, IouTrade.ContractId, IouTrade] = + IouTrade.COMPANION + + def eventually[A]( + assertionName: String + )(runAssertion: => Future[A])(implicit ec: ExecutionContext): Future[A] = + Eventually.eventually( + assertionName, + attempts = + 12, // compared to the default of 10; 4x comes from exponential 2x backoff on each attempt + )(runAssertion) + + def privacyHappyCase(asset: String, happyCase: String)(implicit + lineNo: sourcecode.Line, + fileName: sourcecode.File, + ): List[EvidenceTag] = + List(SecurityTest(property = Privacy, asset = asset, happyCase)) + + test( + "TXLedgerEffectsBlinding", + "LedgerEffects transactions should be served according to the blinding/projection rules", + allocate(TwoParties, SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "LedgerEffects Transaction", + happyCase = "LedgerEffects transactions are served according to the blinding/projection rules", + ), + )(implicit ec => { + case Participants( + Participant(alpha, Seq(alice, gbp_bank)), + Participant(beta, Seq(bob)), + Participant(delta, Seq(dkk_bank)), + ) => + for { + gbpIouIssue <- alpha.create( + gbp_bank, + new Iou(gbp_bank, gbp_bank, "GBP", 100.toBigDecimal, Nil.asJava), + ) + gbpTransfer <- + alpha.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + gbp_bank, + gbpIouIssue.exerciseIou_Transfer(alice), + ) + dkkIouIssue <- delta.create( + dkk_bank, + new Iou(dkk_bank, dkk_bank, "DKK", 110.toBigDecimal, Nil.asJava), + ) + dkkTransfer <- + delta.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + dkk_bank, + dkkIouIssue.exerciseIou_Transfer(bob), + ) + + aliceIou1 <- eventually("exerciseIouTransfer_Accept") { + alpha.exerciseAndGetContract[Iou.ContractId, Iou]( + alice, + gbpTransfer.exerciseIouTransfer_Accept(), + ) + } + aliceIou <- eventually("exerciseIou_AddObserver") { + alpha.exerciseAndGetContract[Iou.ContractId, Iou]( + alice, + aliceIou1.exerciseIou_AddObserver(bob), + ) + } + bobIou <- eventually("exerciseIouTransfer_Accept") { + beta.exerciseAndGetContract[Iou.ContractId, Iou]( + bob, + dkkTransfer.exerciseIouTransfer_Accept(), + ) + } + + trade <- eventually("create") { + alpha.create( + alice, + new IouTrade( + alice, + bob, + aliceIou, + gbp_bank, + "GBP", + 100.toBigDecimal, + dkk_bank, + "DKK", + 110.toBigDecimal, + ), + ) + } + tx <- eventually("exerciseIouTrade_Accept") { + beta.exercise(bob, trade.exerciseIouTrade_Accept(bobIou)) + } + + aliceTree <- eventually("transactionTreeById1") { + alpha.transactionById(tx.updateId, Seq(alice), LedgerEffects) + } + bobTree <- beta.transactionById(tx.updateId, Seq(bob), LedgerEffects) + gbpTree <- eventually("transactionTreeById2") { + alpha.transactionById(tx.updateId, Seq(gbp_bank), LedgerEffects) + } + dkkTree <- eventually("transactionTreeById3") { + delta.transactionById(tx.updateId, Seq(dkk_bank), LedgerEffects) + } + } yield { + def treeIsWellformed(tx: Transaction): Unit = { + val eventsToObserve = + mutable.Map.empty[Int, Event] ++= tx.events.map(e => e.nodeId -> e) + + def go(nodeId: Int): Unit = { + val lastDescendantNodeId = eventsToObserve + .getOrElse( + nodeId, + throw new AssertionError( + s"Referenced nodeId $nodeId is not available as node in the transaction." + ), + ) + .event + .exercised + .fold(nodeId)(_.lastDescendantNodeId) + val descendantNodeIds = // including itself + eventsToObserve.view.keys.filter(id => nodeId <= id && id <= lastDescendantNodeId) + + descendantNodeIds.foreach(id => + eventsToObserve.remove(id) match { + case Some(_) => () + case None => + throw new AssertionError( + s"Referenced nodeId $id is not available as node in the transaction." + ) + } + ) + } + + tx.rootNodeIds().foreach(go) + assert( + eventsToObserve.isEmpty, + s"After traversing the transaction, there are still unvisited nodes: $eventsToObserve", + ) + } + + treeIsWellformed(aliceTree) + treeIsWellformed(bobTree) + treeIsWellformed(gbpTree) + treeIsWellformed(dkkTree) + + // both bob and alice see the entire transaction: + // 1x Exercise IouTrade.IouTrade_Accept + // 2 x Iou transfer with 4 nodes each (see below) + assert(aliceTree.events.size == 9) + assert(bobTree.events.size == 9) + + assert(aliceTree.rootNodeIds().size == 1) + assert(bobTree.rootNodeIds().size == 1) + + // banks only see the transfer of their issued Iou: + // Exercise Iou.Iou_Transfer -> Create IouTransfer + // Exercise IouTransfer.IouTransfer_Accept -> Create Iou + assert(gbpTree.events.size == 4) + assert(dkkTree.events.size == 4) + + // the exercises are the root nodes + assert(gbpTree.rootNodeIds().size == 2) + assert(dkkTree.rootNodeIds().size == 2) + + } + }) + + test( + "TXTreeLastDescendant", + "Trees formed by lastDescendantNodeIds should be maintaining Transaction event order", + allocate(TwoParties, SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "Transaction Tree", + happyCase = + "Trees formed by lastDescendantNodeIds should be maintaining Transaction event order", + ), + )(implicit ec => { + case Participants( + Participant(alpha, Seq(alice, gbp_bank)), + Participant(beta, Seq(bob)), + Participant(delta, Seq(dkk_bank)), + ) => + for { + gbpIouIssue <- alpha.create( + gbp_bank, + new Iou(gbp_bank, gbp_bank, "GBP", 100.toBigDecimal, Nil.asJava), + ) + gbpTransfer <- + alpha.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + gbp_bank, + gbpIouIssue.exerciseIou_Transfer(alice), + ) + dkkIouIssue <- delta.create( + dkk_bank, + new Iou(dkk_bank, dkk_bank, "DKK", 110.toBigDecimal, Nil.asJava), + ) + dkkTransfer <- + delta.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + dkk_bank, + dkkIouIssue.exerciseIou_Transfer(bob), + ) + + aliceIou1 <- eventually("exerciseIouTransfer_Accept") { + alpha.exerciseAndGetContract[Iou.ContractId, Iou]( + alice, + gbpTransfer.exerciseIouTransfer_Accept(), + ) + } + aliceIou <- eventually("exerciseIou_AddObserver") { + alpha.exerciseAndGetContract[Iou.ContractId, Iou]( + alice, + aliceIou1.exerciseIou_AddObserver(bob), + ) + } + bobIou <- eventually("exerciseIouTransfer_Accept") { + beta.exerciseAndGetContract[Iou.ContractId, Iou]( + bob, + dkkTransfer.exerciseIouTransfer_Accept(), + ) + } + + trade <- eventually("create") { + alpha.create( + alice, + new IouTrade( + alice, + bob, + aliceIou, + gbp_bank, + "GBP", + 100.toBigDecimal, + dkk_bank, + "DKK", + 110.toBigDecimal, + ), + ) + } + tree <- eventually("exerciseIouTrade_Accept") { + beta.exercise(bob, trade.exerciseIouTrade_Accept(bobIou)) + } + + aliceTree <- eventually("transactionTreeById1") { + alpha.transactionById(tree.updateId, Seq(alice), LedgerEffects) + } + bobTree <- beta.transactionById(tree.updateId, Seq(bob), LedgerEffects) + gbpTree <- eventually("transactionTreeById2") { + alpha.transactionById(tree.updateId, Seq(gbp_bank), LedgerEffects) + } + dkkTree <- eventually("transactionTreeById3") { + delta.transactionById(tree.updateId, Seq(dkk_bank), LedgerEffects) + } + aliceTrees <- alpha.transactions(LedgerEffects, alice) + bobTrees <- alpha.transactions(LedgerEffects, bob) + gbpTrees <- alpha.transactions(LedgerEffects, gbp_bank) + dkkTrees <- alpha.transactions(LedgerEffects, dkk_bank) + } yield { + def treeIsWellformed(tree: Transaction): Unit = { + val eventsToObserve = + mutable.Map + .empty[Int, Event.Event] ++= tree.events.view.map(e => e.nodeId -> e.event).toMap + + def go(nodeId: Int): Unit = + eventsToObserve.remove(nodeId) match { + case Some(Exercised(exercisedEvent)) => + val lastDescendantNodeId = exercisedEvent.lastDescendantNodeId + assertGreaterOrEquals( + context = + s"lastDescendantNodeId is less than the node id. Expected: $lastDescendantNodeId >= ${exercisedEvent.nodeId}", + a = lastDescendantNodeId, + b = exercisedEvent.nodeId, + ) + val descendantNodeIds = + eventsToObserve.view.keys.filter(id => nodeId < id && id <= lastDescendantNodeId) + descendantNodeIds.foreach(eventsToObserve.remove) + case Some(_) => () + case None => + throw new AssertionError( + s"Referenced nodeId $nodeId is not available as node in the transaction." + ) + } + + tree.rootNodeIds().foreach(go) + assert( + eventsToObserve.isEmpty, + s"After traversing the transaction, there are still unvisited nodes: $eventsToObserve", + ) + } + + treeIsWellformed(aliceTree) + treeIsWellformed(bobTree) + treeIsWellformed(gbpTree) + treeIsWellformed(dkkTree) + + Iterator( + aliceTrees, + bobTrees, + gbpTrees, + dkkTrees, + ).flatten.foreach(treeIsWellformed) + } + }) + + test( + "TXNotDivulge", + "Data should not be exposed to parties unrelated to a transaction", + allocate(SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "Transaction", + happyCase = "Transactions are not exposed to parties unrelated to a transaction", + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(alice)), Participant(_, Seq(bob))) => + for { + _ <- alpha.create(alice, new Dummy(alice)) + bobsView <- alpha.transactions(AcsDelta, bob) + } yield { + assert( + bobsView.isEmpty, + s"After Alice create a contract, Bob sees one or more transaction he shouldn't, namely those created by commands ${bobsView.map(_.commandId).mkString(", ")}", + ) + } + }) + + test( + "TXLedgerEffectsHideCommandIdToNonSubmittingStakeholders", + "A ledger effects transaction should be visible to a non-submitting stakeholder but its command identifier should be empty", + allocate(SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "Ledger Effects Transaction", + happyCase = + "A ledger effects transaction is visible to a non-submitting stakeholder but its command identifier should be empty", + ), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(submitter)), Participant(beta, Seq(listener))) => + for { + (id, _) <- alpha.createAndGetTransactionId( + submitter, + new AgreementFactory(listener, submitter), + ) + _ <- p.synchronize + tx <- beta.transactionById(id, Seq(listener), LedgerEffects) + txsFromStream <- beta.transactions(LedgerEffects, listener) + } yield { + assert( + tx.commandId.isEmpty, + s"The command identifier for the transaction was supposed to be empty but it's `${tx.commandId}` instead.", + ) + + assert( + txsFromStream.size == 1, + s"One transaction expected but got ${txsFromStream.size} instead.", + ) + + val txFromStreamCommandId = txsFromStream.head.commandId + assert( + txFromStreamCommandId.isEmpty, + s"The command identifier for the transaction was supposed to be empty but it's `$txFromStreamCommandId` instead.", + ) + } + }) + + test( + "TXHideCommandIdToNonSubmittingStakeholders", + "An acs delta transaction should be visible to a non-submitting stakeholder but its command identifier should be empty", + allocate(SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "Acs Delta Transaction", + happyCase = + "An acs delta transaction is visible to a non-submitting stakeholder but its command identifier is empty", + ), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(submitter)), Participant(beta, Seq(listener))) => + for { + (id, _) <- alpha.createAndGetTransactionId( + submitter, + new AgreementFactory(listener, submitter), + ) + _ <- p.synchronize + tx <- beta.transactionById(id, Seq(listener), AcsDelta) + txsFromStream <- beta.transactions(AcsDelta, listener) + } yield { + + assert( + tx.commandId.isEmpty, + s"The command identifier for the flat transaction was supposed to be empty but it's `${tx.commandId}` instead.", + ) + + assert( + txsFromStream.size == 1, + s"One transaction expected but got ${txsFromStream.size} instead.", + ) + + val txFromStreamCommandId = txsFromStream.head.commandId + assert( + txFromStreamCommandId.isEmpty, + s"The command identifier for the transaction was supposed to be empty but it's `$txFromStreamCommandId` instead.", + ) + } + }) + + test( + "TXNotDiscloseCreateToNonSignatory", + "Not disclose create to non-chosen branching signatory", + allocate(SingleParty, SingleParty), + tags = privacyHappyCase( + asset = "Flat Transaction", + happyCase = + "Transaction with a create event is not disclosed to non-chosen branching signatory", + ), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob))) => + val template = new BranchingSignatories(false, alice, bob) + val create = beta.submitAndWaitForTransactionRequest(bob, template.create.commands) + for { + transactionResponse <- beta.submitAndWaitForTransaction(create) + _ <- p.synchronize + aliceTransactions <- alpha.transactions(AcsDelta, alice) + } yield { + val transaction = transactionResponse.getTransaction + val branchingContractId = createdEvents(transaction) + .map(_.contractId) + .headOption + .getOrElse(fail(s"Expected single create event")) + val contractsVisibleByAlice = aliceTransactions.flatMap(createdEvents).map(_.contractId) + assert( + !contractsVisibleByAlice.contains(branchingContractId), + s"The transaction ${transaction.updateId} should not have been disclosed.", + ) + } + }) + + test( + "TXDiscloseCreateToSignatory", + "Disclose create to the chosen branching controller", + allocate(SingleParty, TwoParties), + tags = privacyHappyCase( + asset = "Flat Transaction", + happyCase = "Transaction with a create event is disclosed to chosen branching controller", + ), + )(implicit ec => { + case Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob, eve))) => + import ClearIdsImplicits.* + val template = + new BranchingControllers(alice, true, bob, eve) + for { + _ <- alpha.create(alice, template)(BranchingControllers.COMPANION) + _ <- eventually("flatTransactions") { + for { + aliceView <- alpha.transactions(AcsDelta, alice) + bobView <- beta.transactions(AcsDelta, bob) + evesView <- beta.transactions(AcsDelta, eve) + } yield { + val aliceCreate = + assertSingleton("Alice should see one transaction", aliceView.flatMap(createdEvents)) + assertEquals( + "Alice arguments do not match", + aliceCreate.getCreateArguments.clearValueIds, + Record.fromJavaProto(template.toValue.toProtoRecord), + ) + val bobCreate = + assertSingleton("Bob should see one transaction", bobView.flatMap(createdEvents)) + assertEquals( + "Bob arguments do not match", + bobCreate.getCreateArguments.clearValueIds, + Record.fromJavaProto(template.toValue.toProtoRecord), + ) + assert(evesView.isEmpty, "Eve should not see any contract") + } + } + } yield { + // Checks performed in the `eventually` block + } + }) + + test( + "TXNotDiscloseCreateToNonChosenBranchingController", + "Not disclose create to non-chosen branching controller", + allocate(SingleParty, TwoParties), + tags = privacyHappyCase( + asset = "Flat Transaction", + happyCase = + "Transaction with a create event is not disclosed to non-chosen branching controller", + ), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob, eve))) => + val template = + new BranchingControllers(alice, false, bob, eve) + val create = alpha.submitAndWaitForTransactionRequest(alice, template.create.commands) + for { + transactionResponse <- alpha.submitAndWaitForTransaction(create) + _ <- p.synchronize + transactions <- beta.transactions(AcsDelta, bob) + } yield { + val transaction = transactionResponse.getTransaction + assert( + !transactions.exists(_.updateId != transaction.updateId), + s"The transaction ${transaction.updateId} should not have been disclosed.", + ) + } + }) + + test( + "TXDiscloseCreateToObservers", + "Disclose create to observers", + allocate(SingleParty, TwoParties), + tags = privacyHappyCase( + asset = "Flat Transaction", + happyCase = "Transaction with a create event is disclosed to observers", + ), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, observers)) => + val template = new WithObservers(alice, observers.map(_.getValue).asJava) + val create = alpha.submitAndWaitRequest(alice, template.create.commands) + for { + _ <- p.synchronize // Ensures parties are visible to both participants + updateId <- alpha.submitAndWait(create).map(_.updateId) + _ <- p.synchronize // Ensures the transactions are visible to both participants + transactions <- beta.transactions(AcsDelta, observers*) + } yield { + assert(transactions.exists(_.updateId == updateId)) + } + }) + + test( + "TXFlatTransactionsVisibility", + "Transactions in the flat transactions stream should be disclosed only to the stakeholders", + allocate(Parties(3)), + timeoutScale = 2.0, + tags = privacyHappyCase( + asset = "Transaction", + happyCase = + "Transactions in the flat transactions stream are disclosed only to the stakeholders", + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(bank, alice, bob))) => + for { + gbpIouIssue <- ledger.create(bank, new Iou(bank, bank, "GBP", 100.toBigDecimal, Nil.asJava)) + gbpTransfer <- ledger.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + bank, + gbpIouIssue.exerciseIou_Transfer(alice), + ) + dkkIouIssue <- ledger.create(bank, new Iou(bank, bank, "DKK", 110.toBigDecimal, Nil.asJava)) + dkkTransfer <- ledger.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + bank, + dkkIouIssue.exerciseIou_Transfer(bob), + ) + aliceIou1 <- ledger.exerciseAndGetContract[Iou.ContractId, Iou]( + alice, + gbpTransfer.exerciseIouTransfer_Accept(), + ) + aliceIou <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](alice, aliceIou1.exerciseIou_AddObserver(bob)) + bobIou <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](bob, dkkTransfer.exerciseIouTransfer_Accept()) + + trade <- ledger.create( + alice, + new IouTrade( + alice, + bob, + aliceIou, + bank, + "GBP", + 100.toBigDecimal, + bank, + "DKK", + 110.toBigDecimal, + ), + ) + + tree <- ledger.exercise(bob, trade.exerciseIouTrade_Accept(bobIou)) + + aliceTransactions <- ledger.transactions(AcsDelta, alice) + bobTransactions <- ledger.transactions(AcsDelta, bob) + } yield { + val newIouList = createdEvents(tree) + .filter(event => event.templateId.exists(_.entityName == "Iou")) + + assert( + newIouList.length == 2, + s"Expected 2 new IOUs created, found: ${newIouList.length}", + ) + + val newAliceIou = newIouList + .find(iou => + iou.signatories.contains(alice.getValue) && iou.signatories.contains(bank.getValue) + ) + .map(_.contractId) + .getOrElse { + fail(s"Not found an IOU owned by $alice") + } + + val newBobIou = newIouList + .find(iou => + iou.signatories.contains(bob.getValue) && iou.signatories.contains(bank.getValue) + ) + .map(_.contractId) + .getOrElse { + fail(s"Not found an IOU owned by $bob") + } + + assert( + aliceTransactions.flatMap(createdEvents).map(_.contractId).contains(newAliceIou), + "Alice's flat transaction stream does not contain the new IOU", + ) + assert( + !aliceTransactions.flatMap(createdEvents).map(_.contractId).contains(newBobIou), + "Alice's flat transaction stream contains Bob's new IOU", + ) + assert( + bobTransactions.flatMap(createdEvents).map(_.contractId).contains(newBobIou), + "Bob's flat transaction stream does not contain the new IOU", + ) + assert( + !bobTransactions.flatMap(createdEvents).map(_.contractId).contains(newAliceIou), + "Bob's flat transaction stream contains Alice's new IOU", + ) + } + }) + + test( + "TXRequestingPartiesWitnessVisibility", + "Transactions in the flat transactions stream should not leak witnesses", + allocate(Parties(3)), + tags = privacyHappyCase( + asset = "Transaction", + happyCase = "Transactions in the flat transactions stream are not leaking witnesses", + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(bank, alice, bob))) => + for { + iouIssue <- ledger.create(bank, new Iou(bank, bank, "GBP", 100.toBigDecimal, Nil.asJava)) + transfer <- ledger.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + bank, + iouIssue.exerciseIou_Transfer(alice), + ) + aliceIou <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](alice, transfer.exerciseIouTransfer_Accept()) + _ <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](alice, aliceIou.exerciseIou_AddObserver(bob)) + aliceFlatTransactions <- ledger.transactions(AcsDelta, alice) + bobFlatTransactions <- ledger.transactions(AcsDelta, bob) + aliceBankFlatTransactions <- ledger.transactions(AcsDelta, alice, bank) + } yield { + onlyRequestingPartiesAsWitnesses(allTxWitnesses(aliceFlatTransactions), alice)( + "Alice's flat transactions contain other parties as witnesses" + ) + + onlyRequestingPartiesAsWitnesses(allTxWitnesses(bobFlatTransactions), bob)( + "Bob's flat transactions contain other parties as witnesses" + ) + + onlyRequestingPartiesAsWitnesses(allTxWitnesses(aliceBankFlatTransactions), alice, bank)( + "Alice's and Bank's flat transactions contain other parties as witnesses" + ) + } + }) + + test( + "TXLedgerEffectsRequestingPartiesWitnessVisibility", + "Transactions in the ledger effects transaction stream should not leak witnesses", + allocate(Parties(3)), + tags = privacyHappyCase( + asset = "Transaction", + happyCase = "Transactions in the ledger effects transaction stream are not leaking witnesses", + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(bank, alice, bob))) => + for { + iouIssue <- ledger.create(bank, new Iou(bank, bank, "GBP", 100.toBigDecimal, Nil.asJava)) + transfer <- ledger.exerciseAndGetContract[IouTransfer.ContractId, IouTransfer]( + bank, + iouIssue.exerciseIou_Transfer(alice), + )(IouTransfer.COMPANION) + aliceIou <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](alice, transfer.exerciseIouTransfer_Accept()) + _ <- ledger + .exerciseAndGetContract[Iou.ContractId, Iou](alice, aliceIou.exerciseIou_AddObserver(bob)) + aliceTransactionsLedgerEffects <- ledger.transactions(LedgerEffects, alice) + bobTransactionsLedgerEffects <- ledger.transactions(LedgerEffects, bob) + aliceBankTransactionsLedgerEffects <- ledger.transactions(LedgerEffects, alice, bank) + } yield { + onlyRequestingPartiesAsWitnesses(allTxWitnesses(aliceTransactionsLedgerEffects), alice)( + "Alice's ledger effects transactions contain other parties as witnesses" + ) + + onlyRequestingPartiesAsWitnesses(allTxWitnesses(bobTransactionsLedgerEffects), bob)( + "Bob's ledger effects transactions contain other parties as witnesses" + ) + + onlyRequestingPartiesAsWitnesses( + allTxWitnesses(aliceBankTransactionsLedgerEffects), + alice, + bank, + )( + "Alice's and Bank's ledger effects transactions contain other parties as witnesses" + ) + } + }) +} + +object TransactionServiceVisibilityIT { + private def onlyRequestingPartiesAsWitnesses( + allWitnesses: Set[String], + requestingParties: Party* + )(msg: String): Unit = { + val nonRequestingWitnesses = allWitnesses.diff(requestingParties.map(_.getValue).toSet) + assert( + nonRequestingWitnesses.isEmpty, + s"$msg: ${nonRequestingWitnesses.mkString("[", ",", "]")}", + ) + } + + private def allTxWitnesses(transactions: Vector[Transaction]): Set[String] = + transactions + .flatMap(_.events.map(_.event).flatMap { + case Event.Event.Empty => Seq.empty + case Event.Event.Created(createdEvent) => createdEvent.witnessParties + case Event.Event.Archived(archivedEvent) => archivedEvent.witnessParties + case Event.Event.Exercised(exercisedEvent) => exercisedEvent.witnessParties + }) + .toSet +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceQueryIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceQueryIT.scala new file mode 100644 index 0000000000..88535e3c61 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceQueryIT.scala @@ -0,0 +1,652 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.javaapi.data.codegen.ContractId +import com.daml.ledger.test.java.model.test.{Dummy, DummyWithParam} +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.protocol.TestUpdateId + +class UpdateServiceQueryIT extends LedgerTestSuite { + import CompanionImplicits.* + + test( + "TXTransactionByIdLedgerEffectsBasic", + "Expose a visible transaction tree by identifier", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + tree <- ledger.exercise(party, dummy.exerciseDummyChoice1()) + byId <- ledger.transactionById(tree.updateId, Seq(party), LedgerEffects) + } yield { + assertEquals("The transaction fetched by identifier does not match", tree, byId) + assertAcsDelta( + tree.events, + acsDelta = true, + "The acs_delta field in exercised events should be set", + ) + } + }) + + test( + "TXLedgerEffectsInvisibleUpdateByIdOtherParty", + "Do not expose an invisible transaction by identifier for an other party (LedgerEffects)", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(party)), Participant(beta, Seq(intruder))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + transaction <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + _ <- p.synchronize + failure <- beta + .transactionById(transaction.updateId, Seq(intruder), LedgerEffects) + .mustFail("subscribing to an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXLedgerEffectsInvisibleUpdateByIdOtherTemplate", + "Do not expose an invisible transaction by identifier for an other template (LedgerEffects)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + transaction <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + failure <- alpha + .transactionById( + updateId = transaction.updateId, + parties = Seq(party), + templateIds = Seq(DummyWithParam.TEMPLATE_ID), + transactionShape = LedgerEffects, + ) + .mustFail("subscribing to an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXLedgerEffectsUpdateByIdNotFound", + "Return NOT_FOUND when looking up a non-existent transaction by identifier (LedgerEffects)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + failure <- ledger + .transactionById( + TestUpdateId("a" * 60).toHexString, + Seq(party), + LedgerEffects, + ) + .mustFail("looking up an non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaUpdateByIdBasic", + "Expose a visible transaction by identifier (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + transaction <- ledger.exercise(party, dummy.exerciseDummyChoice1(), AcsDelta) + byId <- ledger.transactionById(transaction.updateId, Seq(party), AcsDelta) + } yield { + assertEquals("The transaction fetched by identifier does not match", transaction, byId) + } + }) + + test( + "TXAcsDeltaUpdateByIdCreateArgumentsNonEmpty", + "Include contract arguments in fetching a transaction by identifier (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest(party, new Dummy(party).create().commands) + ) + tx = response.getTransaction + byId <- ledger.transactionById(tx.updateId, Seq(party), AcsDelta) + // archive the created contract to not pollute the ledger + contractId = Dummy.COMPANION.toContractId( + new ContractId(tx.events.head.getCreated.contractId) + ) + _ <- ledger.exercise(party, contractId.exerciseArchive()) + } yield { + checkArgumentsNonEmpty(byId.events.head.getCreated) + } + }) + + test( + "TXAcsDeltaInvisibleUpdateByIdOtherParty", + "Do not expose an invisible transaction by identifier for an other party (AcsDelta)", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, intruder))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + tree <- ledger.exercise(party, dummy.exerciseDummyChoice1()) + failure <- ledger + .transactionById(tree.updateId, Seq(intruder), AcsDelta) + .mustFail("looking up an invisible flat transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaInvisibleUpdateByIdOtherTemplate", + "Do not expose an invisible transaction by identifier for an other template (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + transaction <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + failure <- alpha + .transactionById( + updateId = transaction.updateId, + parties = Seq(party), + templateIds = Seq(DummyWithParam.TEMPLATE_ID), + transactionShape = AcsDelta, + ) + .mustFail("subscribing to an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaUpdateByIdNotFound", + "Return NOT_FOUND when looking up a non-existent transaction by identifier (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + failure <- ledger + .transactionById( + TestUpdateId("a" * 60).toHexString, + Seq(party), + AcsDelta, + ) + .mustFail("looking up a non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXLedgerEffectsUpdateByOffsetBasic", + "Expose a visible transaction by offset (Ledger Effects)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + tx <- ledger.exercise(party, dummy.exerciseDummyChoice1()) + byOffset <- ledger.transactionByOffset(tx.offset, Seq(party), LedgerEffects) + } yield { + assertEquals("The transaction fetched by offset does not match", tx, byOffset) + } + }) + + test( + "TXLegerEffectsInvisibleUpdateByOffsetOtherParty", + "Do not expose an invisible transaction by offset for an other party (LedgerEffects)", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(party)), Participant(beta, Seq(intruder))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + tree <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + _ <- p.synchronize + failure <- beta + .transactionByOffset(tree.offset, Seq(intruder), LedgerEffects) + .mustFail("looking up an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXLegerEffectsInvisibleUpdateByOffsetOtherTemplate", + "Do not expose an invisible transaction by offset for an other template (LedgerEffects)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + ledgerEffectsTx <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + failure <- alpha + .transactionByOffset( + offset = ledgerEffectsTx.offset, + parties = Seq(party), + transactionShape = LedgerEffects, + templateIds = Seq(DummyWithParam.TEMPLATE_ID), + ) + .mustFail("looking up an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXLegerEffectsUpdateByOffsetNotFound", + "Return NOT_FOUND when looking up a non-existent transaction by offset (LedgerEffects)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + failure <- ledger + .transactionByOffset(21 * 60, Seq(party), LedgerEffects) + .mustFail("looking up a non-existent transaction tree") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaUpdateByOffsetBasic", + "Expose a visible transaction by offset (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + transaction <- ledger.exercise(party, dummy.exerciseDummyChoice1(), AcsDelta) + event = transaction.events.head.event + offset = event.archived.map(_.offset).get + byOffset <- ledger.transactionByOffset(offset, Seq(party), AcsDelta) + } yield { + assertEquals("The transaction fetched by offset does not match", transaction, byOffset) + } + }) + + test( + "TXAcsDeltaUpdateByOffsetCreateArgumentsNonEmpty", + "Include contract arguments in fetching a transaction by offset (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest(party, new Dummy(party).create().commands) + ) + tx = response.getTransaction + byOffset <- ledger.transactionByOffset(tx.offset, Seq(party), AcsDelta) + // archive the created contract to not pollute the ledger + contractId = Dummy.COMPANION.toContractId( + new ContractId(tx.events.head.getCreated.contractId) + ) + _ <- ledger.exercise(party, contractId.exerciseArchive()) + } yield { + checkArgumentsNonEmpty(byOffset.events.head.getCreated) + } + }) + + test( + "TXAcsDeltaInvisibleUpdateByOffsetOtherParty", + "Do not expose an invisible transaction by event for an other party identifier (AcsDelta)", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(party, intruder))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + tree <- ledger.exercise(party, dummy.exerciseDummyChoice1()) + failure <- ledger + .transactionByOffset(tree.offset, Seq(intruder), AcsDelta) + .mustFail("looking up an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaInvisibleUpdateByOffsetOtherTemplate", + "Do not expose an invisible transaction by offset for an other template (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + for { + dummy <- alpha.create(party, new Dummy(party)) + tree <- alpha.exercise(party, dummy.exerciseDummyChoice1()) + failure <- alpha + .transactionByOffset( + offset = tree.offset, + parties = Seq(party), + transactionShape = AcsDelta, + templateIds = Seq(DummyWithParam.TEMPLATE_ID), + ) + .mustFail("looking up an invisible transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXAcsDeltaUpdateByOffsetNotFound", + "Return NOT_FOUND when looking up a non-existent transaction by offset (AcsDelta)", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + failure <- ledger + .transactionByOffset(21 * 60, Seq(party), AcsDelta) + .mustFail("looking up a non-existent flat transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXUpdateByIdTransientContract", + "GetUpdateById returns NOT_FOUND for AcsDelta on a transient contract", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + response <- ledger.submitAndWait( + ledger.submitAndWaitRequest(owner, new Dummy(owner).createAnd().exerciseArchive().commands) + ) + failure <- ledger + .transactionById( + updateId = response.updateId, + parties = Seq(owner), + transactionShape = AcsDelta, + ) + .mustFail("acs delta lookup") + ledgerEffectsResult <- ledger.transactionById( + updateId = response.updateId, + parties = Seq(owner), + transactionShape = LedgerEffects, + ) + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + assertEquals( + "ledgerEffectsResult should not be empty", + ledgerEffectsResult.events.nonEmpty, + true, + ) + } + }) + + test( + "TXUpdateByOffsetTransientContract", + "GetUpdateByOffset returns NOT_FOUND on a transient contract", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party = owner, + commands = new Dummy(owner).createAnd().exerciseArchive().commands, + transactionShape = LedgerEffects, + ) + ) + offset = response.transaction.get.offset + failure <- ledger + .transactionByOffset(offset, Seq(owner), AcsDelta) + .mustFail("acs delta lookup") + ledgerEffectsResult <- ledger.transactionByOffset(offset, Seq(owner), LedgerEffects) + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + assertEquals( + "ledgerEffectsResult should not be empty", + ledgerEffectsResult.events.nonEmpty, + true, + ) + } + }) + + test( + "TXUpdateByIdNonConsumingChoice", + "GetUpdateById returns NOT_FOUND when command contains only a non-consuming choice", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + contractId: Dummy.ContractId <- ledger.create(owner, new Dummy(owner)) + response <- ledger.submitAndWait( + ledger.submitAndWaitRequest(owner, contractId.exerciseDummyNonConsuming().commands) + ) + failure <- + ledger + .transactionById( + updateId = response.updateId, + parties = Seq(owner), + transactionShape = AcsDelta, + ) + .mustFail("looking up an non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TXUpdateByOffsetNonConsumingChoice", + "GetUpdateByOffset returns NOT_FOUND when command contains only a non-consuming choice", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + contractId: Dummy.ContractId <- ledger.create(owner, new Dummy(owner)) + response <- ledger.submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + owner, + contractId.exerciseDummyNonConsuming().commands, + ) + ) + offset = response.transaction.get.offset + failure <- ledger + .transactionByOffset(offset, Seq(owner), AcsDelta) + .mustFail("looking up an non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TopologyTXByOffsetBasic", + "Expose a visible topology transaction by offset", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + begin <- ledger.currentEnd() + party <- ledger.allocateParty() + // For synchronization + topologyTx <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "TopologyTXByOffsetBasic", + begin = Some(begin), + ) + byOffset <- ledger.topologyTransactionByOffset(topologyTx.offset, Seq(party)) + byId <- ledger.topologyTransactionById(topologyTx.updateId, Seq(party)) + } yield { + assertEquals( + "The topology transactions fetched by identifier and by offset do not match", + byOffset, + byId, + ) + } + }) + + test( + "TopologyTXPartyWildcard", + "Expose a visible topology transaction without specifying parties", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + begin <- ledger.currentEnd() + _ <- ledger.allocateParty() + // For synchronization + topologyTx <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "TopologyTXPartyWildcard", + begin = Some(begin), + ) + byOffset <- ledger.topologyTransactionByOffset(topologyTx.offset, Seq()) + byId <- ledger.topologyTransactionById(topologyTx.updateId, Seq()) + } yield { + assertEquals( + "The topology transactions fetched by identifier and by offset do not match", + byOffset, + byId, + ) + } + }) + + test( + "TopologyTXInvisibleByIdOtherParty", + "Do not expose an invisible topology transaction by identifier for an other party", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(intruder))) => + for { + begin <- ledger.currentEnd() + _ <- ledger.allocateParty() + // For synchronization + topologyTx <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "TopologyTXInvisibleByIdOtherParty", + begin = Some(begin), + ) + failure <- ledger + .topologyTransactionById(topologyTx.updateId, Seq(intruder)) + .mustFail("subscribing to an invisible topology transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TopologyTXInvisibleById", + "Return NOT_FOUND when looking up a non-existent topology transaction", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + failure <- ledger + .topologyTransactionById( + TestUpdateId("a" * 60).toHexString, + Seq.empty, + ) + .mustFail("looking up an non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TopologyTXInvisibleByOffsetOtherParty", + "Do not expose an invisible topology transaction by offset for an other party", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(intruder))) => + for { + begin <- ledger.currentEnd() + party <- ledger.allocateParty() + // For synchronization + topologyTx <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "TopologyTXInvisibleByOffsetOtherParty", + begin = Some(begin), + ) + failure <- ledger + .topologyTransactionByOffset(topologyTx.offset, Seq(intruder)) + .mustFail("subscribing to an invisible topology transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + test( + "TopologyTXInvisibleByOffset", + "Return NOT_FOUND when looking up a non-existent topology transaction", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + failure <- ledger + .topologyTransactionByOffset(4200 * 60, Seq.empty) + .mustFail("looking up an non-existent transaction") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.NotFound.Update, + Some("Update not found, or not visible."), + ) + } + }) + + def checkArgumentsNonEmpty(event: CreatedEvent): Unit = + assertEquals( + s"Create event $event createArguments must NOT be empty", + event.createArguments.isEmpty, + false, + ) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceStreamsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceStreamsIT.scala new file mode 100644 index 0000000000..74855749c8 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceStreamsIT.scala @@ -0,0 +1,559 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.testtool.infrastructure.{ + LedgerTestSuite, + OngoingStreamPackageUploadTestDar, + TestConstraints, +} +import com.daml.ledger.api.v2.transaction_filter.TransactionFormat +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.test.java.model.test.{Dummy, DummyFactory} +import com.daml.ledger.test.java.ongoing_stream_package_upload.ongoingstreampackageuploadtest.OngoingStreamPackageUploadTestTemplate +import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.google.protobuf.ByteString + +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +class UpdateServiceStreamsIT extends LedgerTestSuite { + import CompanionImplicits.* + + private[this] val testPackageResourcePath = OngoingStreamPackageUploadTestDar.path + + private def loadTestPackage()(implicit ec: ExecutionContext): Future[ByteString] = { + val testPackage = Future { + val in = getClass.getClassLoader.getResourceAsStream(testPackageResourcePath) + assert(in != null, s"Unable to load test package resource at '$testPackageResourcePath'") + in + } + val bytes = testPackage.map(ByteString.readFrom) + bytes.onComplete(_ => testPackage.map(_.close())) + bytes + } + + test( + "TXLedgerEffectsEndToEnd", + "An empty stream of transactions should be served when getting transactions from and to the current ledger end", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + ledgerEnd <- ledger.currentEnd() + request <- ledger.getTransactionsRequest( + transactionFormat = + ledger.transactionFormat(Some(Seq(party)), transactionShape = LedgerEffects) + ) + fromAndToBegin = + request.update(_.beginExclusive := ledgerEnd, _.endInclusive := ledgerEnd) + transactions <- ledger.transactions(fromAndToBegin) + } yield { + assert( + transactions.isEmpty, + s"Received a non-empty stream with ${transactions.size} transactions in it.", + ) + } + }) + + test( + "TXEndToEnd", + "An empty stream should be served when getting transactions from and to the end of the ledger", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + _ <- ledger.create(party, new Dummy(party)) + request <- ledger.getTransactionsRequest(transactionFormat = + ledger.transactionFormat(Some(Seq(party))) + ) + end <- ledger.currentEnd() + endToEnd = request.update(_.beginExclusive := end, _.endInclusive := end) + transactions <- ledger.transactions(endToEnd) + } yield { + assert( + transactions.isEmpty, + s"No transactions were expected but ${transactions.size} were read", + ) + } + }) + + test( + "TXAfterEnd", + "An OUT_OF_RANGE error should be returned when subscribing past the ledger end", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + _ <- ledger.create(party, new Dummy(party)) + futureOffset <- ledger.offsetBeyondLedgerEnd() + request <- ledger.getTransactionsRequest( + transactionFormat = ledger.transactionFormat(Some(Seq(party))) + ) + beyondEnd = request.update( + _.beginExclusive := futureOffset, + _.optionalEndInclusive := None, + ) + failure <- ledger.transactions(beyondEnd).mustFail("subscribing past the ledger end") + } yield { + assertGrpcError( + failure, + RequestValidationErrors.OffsetAfterLedgerEnd, + Some("is after ledger end"), + ) + } + }) + + test( + "TXServeUntilCancellation", + "Items should be served until the client cancels", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsToRead = 10 + for { + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + transactions <- ledger.transactions(transactionsToRead, AcsDelta, party) + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + transactions.size == transactionsToRead, + s"$transactionsToRead should have been received but ${transactions.size} were instead", + ) + } + }) + + test( + "TXServeTailingStreamSingleParty", + "Items should be served if added during subscription", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsToRead = 15 + for { + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + txReq <- ledger + .getTransactionsRequest( + transactionFormat = ledger.transactionFormat(Some(Seq(party))) + ) + .map(_.update(_.optionalEndInclusive := None)) + flats = ledger.transactions( + transactionsToRead, + txReq, + ) + _ <- ledger.create(party, new Dummy(party)) + transactions <- flats + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + transactions.size == transactionsToRead, + s"$transactionsToRead should have been received but ${transactions.size} were instead", + ) + } + }) + + test( + "TXServeTailingStreamAcsDeltaPartyWildcard", + "Items should be served if party and transaction were added during subscription", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsToRead = 15 + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + txReq = ledger + .getTransactionsRequestWithEnd( + transactionFormat = ledger.transactionFormat(parties = None), + begin = endOffsetAtTestStart, + end = None, + ) + flats = ledger.transactions( + transactionsToRead, + txReq, + ) + party2 <- ledger.allocateParty() + _ <- ledger.create(party2, new Dummy(party2)) + transactions <- flats + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + transactions.size == transactionsToRead, + s"$transactionsToRead should have been received but ${transactions.size} were instead", + ) + assertAcsDelta( + transactions.flatMap(_.events), + acsDelta = true, + "The acs_delta field in created events should be set", + ) + } + }) + + test( + "TXServeTailingStreamLedgerEffectsPartyWildcard", + "Transaction with ledger effects should be served if party and transaction added during subscription", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsToRead = 15 + for { + endOffsetAtTestStart <- ledger.currentEnd() + dummies <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + txReq = ledger + .getTransactionsRequestWithEnd( + transactionFormat = + ledger.transactionFormat(parties = None, transactionShape = LedgerEffects), + begin = endOffsetAtTestStart, + end = None, + ) + trees = ledger.transactions( + transactionsToRead, + txReq, + ) + party2 <- ledger.allocateParty() + _ <- ledger.create(party2, new Dummy(party2)) + transactions <- trees + } yield { + assert( + dummies.size == transactionsToSubmit, + s"$transactionsToSubmit should have been submitted but ${dummies.size} were instead", + ) + assert( + transactions.size == transactionsToRead, + s"$transactionsToRead should have been received but ${transactions.size} were instead", + ) + assertAcsDelta( + transactions.flatMap(_.events), + acsDelta = true, + "The acs_delta field in created events should be set", + ) + } + }) + + test( + "TXServeTailingStreamsTemplateWildcard", + "Items should be served if package was added during subscription", + allocate(SingleParty), + runConcurrently = false, + limitation = TestConstraints.GrpcOnly( + "PackageManagementService listKnownPackages is not available in JSON" + ), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToRead = 2 + for { + endOffsetAtTestStart <- ledger.currentEnd() + knownPackagesBefore <- ledger.listKnownPackages().map(_.map(_.name)) + _ <- ledger.create(party, new Dummy(party)) + txReq = ledger + .getTransactionsRequestWithEnd( + transactionFormat = ledger.transactionFormat(parties = None), + begin = endOffsetAtTestStart, + end = None, + ) + txReqLedgerEffects = ledger + .getTransactionsRequestWithEnd( + transactionFormat = + ledger.transactionFormat(parties = None, transactionShape = LedgerEffects), + begin = endOffsetAtTestStart, + end = None, + ) + flats = ledger.transactions( + transactionsToRead, + txReq, + ) + trees = ledger.transactions( + transactionsToRead, + txReqLedgerEffects, + ) + + testPackage <- loadTestPackage() + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(testPackage) + _ <- ledger.create(party, new OngoingStreamPackageUploadTestTemplate(party))( + OngoingStreamPackageUploadTestTemplate.COMPANION + ) + + knownPackagesAfter <- ledger.listKnownPackages().map(_.map(_.name)) + flatTransactions <- flats + transactionTrees <- trees + } yield { + assert( + knownPackagesAfter.size == knownPackagesBefore.size + 1, + s"the test package should not have been already uploaded," + + s"already uploaded packages: $knownPackagesBefore", + ) + assert( + flatTransactions.size == transactionsToRead, + s"$transactionsToRead should have been received but ${flatTransactions.size} were instead", + ) + assert( + transactionTrees.size == transactionsToRead, + s"$transactionsToRead should have been received but ${transactionTrees.size} were instead", + ) + assertAcsDelta( + flatTransactions.flatMap(_.events), + acsDelta = true, + "The acs_delta field in created events should be set", + ) + assertAcsDelta( + transactionTrees.flatMap(_.events), + acsDelta = true, + "The acs_delta field in created events should be set", + ) + } + }) + + test( + "TXCompleteAcsDeltaOnLedgerEnd", + "A stream should complete as soon as the ledger end is hit", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsFuture = ledger.transactions( + transactionShape = AcsDelta, + parties = party, + ) + for { + _ <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + _ <- transactionsFuture + } yield { + // doing nothing: we are just checking that `transactionsFuture` completes successfully + } + }) + + test( + "TXCompleteLedgerEffectsOnLedgerEnd", + "A stream of ledger effects transactions should complete as soon as the ledger end is hit", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToSubmit = 14 + val transactionsFuture = ledger.transactions(LedgerEffects, party) + for { + _ <- Future.sequence( + Vector.fill(transactionsToSubmit)(ledger.create(party, new Dummy(party))) + ) + _ <- transactionsFuture + } yield { + // doing nothing: we are just checking that `transactionsFuture` completes successfully + } + }) + + test( + "TXFilterByTemplate", + "The transaction service should correctly filter by template identifier", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val create = ledger.submitAndWaitRequest( + party, + (new Dummy(party).create.commands.asScala ++ new DummyFactory( + party + ).create.commands.asScala).asJava, + ) + for { + _ <- ledger.submitAndWait(create) + transactions <- ledger.transactionsByTemplateId(Dummy.TEMPLATE_ID, Some(Seq(party))) + transactionsPartyWildcard <- ledger.transactionsByTemplateId(Dummy.TEMPLATE_ID, None) + } yield { + val contract = assertSingleton("FilterByTemplate", transactions.flatMap(createdEvents)) + assertEquals( + "FilterByTemplate", + contract.getTemplateId, + Dummy.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + assertEquals( + "FilterByTemplate transactions for party-wildcard should match the specific party", + transactions, + transactionsPartyWildcard, + ) + } + }) + + test( + "TXFilterByInterface", + "The transaction service should correctly filter by interface identifier", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import com.daml.ledger.test.java.model.iou.{Iou, IIou} + + val iou = new Iou(party, party, "USD", java.math.BigDecimal.ONE, Nil.asJava) + val create = ledger.submitAndWaitRequest(party, iou.create.commands) + + for { + _ <- ledger.submitAndWait(create) + interfaceFilter <- ledger.getTransactionsRequest( + ledger.transactionFormat(Some(Seq(party)), Seq.empty, Seq(IIou.TEMPLATE_ID -> true)) + ) + transactions <- ledger.transactions(interfaceFilter) + } yield { + import com.daml.ledger.api.v2.event.CreatedEvent.toJavaProto + import com.daml.ledger.javaapi.data.CreatedEvent.fromProto + + val created = assertSingleton("FilterByInterface", transactions.flatMap(createdEvents)) + assertEquals( + "FilterByInterface", + created.getTemplateId, + Iou.TEMPLATE_ID_WITH_PACKAGE_ID.toV1, + ) + + val view = IIou.INTERFACE.fromCreatedEvent(fromProto(toJavaProto(created))) + assertEquals(view.data.icurrency, "USD") + } + }) + + test( + "TXTransactionFormat", + "The transactions should be served when the transaction format is set", + allocate(SingleParty), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val transactionsToRead = 1 + for { + _ <- ledger.create(party, new Dummy(party)) + end <- ledger.currentEnd() + format = ledger.eventFormat(verbose = false, parties = Some(Seq(party))) + reqForTransactions = ledger + .getTransactionsRequestWithEnd( + transactionFormat = + ledger.transactionFormat(parties = Some(Seq(party)), transactionShape = LedgerEffects), + end = Some(end), + ) + reqForReassignments = ledger + .getUpdatesRequestWithEnd( + transactionFormatO = None, + reassignmentsFormatO = Some(format), + end = Some(end), + ) + reqForBoth = ledger + .getUpdatesRequestWithEnd( + transactionFormatO = + Some(TransactionFormat(Some(format), TRANSACTION_SHAPE_LEDGER_EFFECTS)), + reassignmentsFormatO = Some(format), + end = Some(end), + ) + txsFromReqForTransactions <- ledger.transactions(reqForTransactions) + txsFromReqForReassignments <- ledger.transactions(reqForReassignments) + txsFromReqForBoth <- ledger.transactions(reqForBoth) + } yield { + assertLength( + s"""$transactionsToRead transactions should have been received from the request for transactions but + | ${txsFromReqForTransactions.size} were instead""".stripMargin, + transactionsToRead, + txsFromReqForTransactions, + ) + assertLength( + s"""No transactions should have been received from the request for reassignments but + | ${txsFromReqForReassignments.size} were instead""".stripMargin, + 0, + txsFromReqForReassignments, + ) + assertLength( + s"""$transactionsToRead transactions should have been received from the request for both reassignments and + | transactions but ${txsFromReqForBoth.size} were instead""".stripMargin, + transactionsToRead, + txsFromReqForBoth, + ) + } + }) + + test( + "TXLedgerEffectsTransientContract", + "The transactions stream with LedgerEffects should return non empty events for a transient contract", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(owner, new Dummy(owner).createAnd().exerciseArchive().commands) + ) + txs <- ledger.transactions( + transactionShape = LedgerEffects, + parties = owner, + ) + } yield { + val tx = assertSingleton("One transaction should be found", txs) + assert(tx.events.nonEmpty, "Expected non empty events in the transaction") + assertAcsDelta( + tx.events, + acsDelta = false, + "The acs_delta field in transient events should not be set", + ) + } + }) + + test( + "TXAcsDeltaTransientContract", + "The transactions stream with AcsDelta should return no transaction for a transient contract", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(owner, new Dummy(owner).createAnd().exerciseArchive().commands) + ) + txs <- ledger.transactions( + transactionShape = AcsDelta, + parties = owner, + ) + } yield { + assert(txs.isEmpty, "Expected no transactions") + } + }) + + test( + "TXNonConsumingChoiceAcsDeltaFlag", + "GetUpdateById returns NOT_FOUND when command contains only a non-consuming choice", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + for { + contractId: Dummy.ContractId <- ledger.create(owner, new Dummy(owner)) + end <- ledger.currentEnd() + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(owner, contractId.exerciseDummyNonConsuming().commands) + ) + txs <- ledger + .getTransactionsRequest( + transactionFormat = + ledger.transactionFormat(Some(Seq(owner)), transactionShape = LedgerEffects), + begin = end, + ) + .flatMap(ledger.transactions) + } yield { + val tx = assertSingleton( + "Expected one transaction with a non-consuming event", + txs, + ) + assertSingleton( + "Expected one non-consuming event", + tx.events, + ) + assertAcsDelta( + tx.events, + acsDelta = false, + "The acs_delta field in transient events should not be set", + ) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceTopologyEventsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceTopologyEventsIT.scala new file mode 100644 index 0000000000..ed42dcfee3 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpdateServiceTopologyEventsIT.scala @@ -0,0 +1,98 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction + +import scala.concurrent.{ExecutionContext, Future} + +class UpdateServiceTopologyEventsIT extends LedgerTestSuite { + + testSubscription( + "USPartyEventsInUpdatesTailing", + "Events should be served in update stream if parties added during subscription", + implicit ec => + ledger => { + val futureTopos = ledger + .participantAuthorizationTransaction("USPartyEventsInUpdatesTailing") + for { + party <- ledger.allocateParty() + topos <- futureTopos + } yield (party, topos) + }, + ) + + testSubscription( + "USPartyEventsInUpdatesNotTailing", + "Events should be served in update stream if parties added before subscription", + implicit ec => + ledger => + for { + begin <- ledger.currentEnd() + party <- ledger.allocateParty() + // For synchronization + _ <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "USPartyEventsInUpdatesNotTailing", + begin = Some(begin), + ) + end <- ledger.currentEnd() + topo <- ledger.participantAuthorizationTransaction( + partyIdSubstring = "USPartyEventsInUpdatesNotTailing", + begin = Some(begin), + end = Some(end), + ) + } yield (party, topo), + ) + + private def testSubscription( + shortIdentifier: String, + description: String, + query: ExecutionContext => ParticipantTestContext => Future[ + (Party, TopologyTransaction) + ], + ): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + partyAllocation = allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + query(ec)(ledger).map { case (party, topo) => + val events = topo.events + assert( + events.nonEmpty, + "expected to observe a topology event after party allocation", + ) + assert( + topo.offset > 0, + "expected to populate valid offset", + ) + assert( + topo.updateId.nonEmpty, + "expected to populate valid update ID", + ) + assert( + topo.recordTime.nonEmpty, + "expected to populate valid record time", + ) + assert( + topo.synchronizerId.nonEmpty, + "expected to populate valid synchronizer ID", + ) + val authorization = events.headOption.flatMap(_.event.participantAuthorizationAdded) + assert( + authorization.nonEmpty, + "expected to observe a participant authorization added event", + ) + val actualParty = authorization.fold("")(_.partyId) + assert( + actualParty == party.underlying.getValue, + s"expected to observe topology event for ${party.underlying.getValue} but got $actualParty", + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpgradingIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpgradingIT.scala new file mode 100644 index 0000000000..c63945abfe --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UpgradingIT.scala @@ -0,0 +1,962 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + SingleParty, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.{verifyLength, *} +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.{ + createdEvents, + exercisedEvents, +} +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{ + Dars, + LedgerTestSuite, + Party, + UpgradeFetchTestDar1_0_0, + UpgradeFetchTestDar2_0_0, + UpgradeIfaceDar, + UpgradeTestDar1_0_0, + UpgradeTestDar2_0_0, + UpgradeTestDar3_0_0, +} +import com.daml.ledger.api.testtool.suites.v2_1.UpgradingIT.{ + EnrichedIdentifier, + SubInterface, + SubTemplate, + Subscriptions, + acsF, + createContract, + getActiveContractsRequest, + txRequest, + upload, +} +import com.daml.ledger.api.v2.event.Event.Event.Created +import com.daml.ledger.api.v2.event.{CreatedEvent, Event} +import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdRequest +import com.daml.ledger.api.v2.state_service.GetActiveContractsRequest +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + InterfaceFilter, + TemplateFilter, + TransactionFormat, + UpdateFormat, +} +import com.daml.ledger.api.v2.update_service.GetUpdatesRequest +import com.daml.ledger.api.v2.value.Identifier as ScalaPbIdentifier +import com.daml.ledger.api.v2.value.Identifier.toJavaProto +import com.daml.ledger.api.v2.{transaction, transaction_filter, value} +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId, ValueDecoder} +import com.daml.ledger.javaapi.data.{DamlRecord, Unit as _, *} +import com.daml.ledger.test.java.model.test.Dummy +import com.daml.ledger.test.java.upgrade_1_0_0.upgrade.UA as UA_V1 +import com.daml.ledger.test.java.upgrade_2_0_0.upgrade.{UA as UA_V2, UB as UB_V2} +import com.daml.ledger.test.java.upgrade_3_0_0.upgrade.{UA as UA_V3, UB as UB_V3} +import com.daml.ledger.test.java.upgrade_fetch_1_0_0.upgradefetch.{Fetch, Fetcher as Fetcher_V1} +import com.daml.ledger.test.java.upgrade_fetch_2_0_0.upgradefetch.Fetcher as Fetcher_V2 +import com.daml.ledger.test.java.upgrade_iface.iface +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.data.Ref.{PackageId, PackageRef, TypeConRef} +import org.scalatest.Inside.inside + +import java.util.Optional +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +class UpgradingIT extends LedgerTestSuite { + PackageRef.Id(PackageId.assertFromString(UA_V1.PACKAGE_ID)) + PackageRef.Id(PackageId.assertFromString(UB_V2.PACKAGE_ID)) + PackageRef.Id(PackageId.assertFromString(UB_V3.PACKAGE_ID)) + private val Iface1_Ref = ScalaPbIdentifier.fromJavaProto(iface.Iface1.TEMPLATE_ID.toProto) + private val UA_Ref = ScalaPbIdentifier.fromJavaProto(UA_V1.TEMPLATE_ID.toProto) + private val UB_Ref = ScalaPbIdentifier.fromJavaProto(UB_V2.TEMPLATE_ID.toProto) + + test( + "USubscriptionsUnknownPackageNames", + "Subscriptions are failed if created for package names that are not known to the participant", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + + val TemplateSubscription = + SubTemplate(TypeConRef.assertFromString("#unknownPkgName:module:entity")) + + for { + failedFlatTransactionsPackageNameNotFound <- transactions( + txRequest(ledger, TemplateSubscription, party) + ).mustFail("Package-name not found") + end <- ledger.currentEnd() + failedActiveContractsPackageNameNotFound <- activeContracts( + getActiveContractsRequest(TemplateSubscription, party, end) + ).mustFail("Package-name not found") + failedTransactionLedgerEffectsPackageNameNotFound <- transactions( + txRequest( + ledger = ledger, + subscriptionFilter = TemplateSubscription, + party = party, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ).mustFail("Package-name not found") + } yield { + + assertGrpcError( + failedFlatTransactionsPackageNameNotFound, + RequestValidationErrors.NotFound.PackageNamesNotFound, + Some( + "The following package names do not match upgradable packages uploaded on this participant: [unknownPkgName]." + ), + ) + assertGrpcError( + failedActiveContractsPackageNameNotFound, + RequestValidationErrors.NotFound.PackageNamesNotFound, + Some( + "The following package names do not match upgradable packages uploaded on this participant: [unknownPkgName]." + ), + ) + assertGrpcError( + failedTransactionLedgerEffectsPackageNameNotFound, + RequestValidationErrors.NotFound.PackageNamesNotFound, + Some( + "The following package names do not match upgradable packages uploaded on this participant: [unknownPkgName]." + ), + ) + } + }) + + test( + "USubscriptionsNoTemplatesForPackageName", + "Subscriptions are failed if created for package names that have no known templates with the specified qualified name", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + import ledger.* + + val knownPackageName = Dummy.PACKAGE_NAME + val unknownSubscriptionQualifiedNameForPackageName = SubTemplate( + TypeConRef.assertFromString(s"#$knownPackageName:unknownModule:unknownEntity") + ) + + for { + failedFlatTransactionsPackageNameNotFound <- transactions( + txRequest(ledger, unknownSubscriptionQualifiedNameForPackageName, party) + ).mustFail("Template not found") + end <- ledger.currentEnd() + failedActiveContractsPackageNameNotFound <- activeContracts( + getActiveContractsRequest(unknownSubscriptionQualifiedNameForPackageName, party, end) + ).mustFail("Template not found") + failedTransactionLedgerEffectsPackageNameNotFound <- transactions( + txRequest( + ledger = ledger, + subscriptionFilter = unknownSubscriptionQualifiedNameForPackageName, + party = party, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ).mustFail("Template not found") + } yield { + + assertGrpcError( + failedFlatTransactionsPackageNameNotFound, + RequestValidationErrors.NotFound.NoTemplatesForPackageNameAndQualifiedName, + Some( + "The following package-name/template qualified-name pairs do not reference any template-id uploaded on this participant: [(model-tests,unknownModule:unknownEntity)]." + ), + ) + assertGrpcError( + failedActiveContractsPackageNameNotFound, + RequestValidationErrors.NotFound.NoTemplatesForPackageNameAndQualifiedName, + Some( + "The following package-name/template qualified-name pairs do not reference any template-id uploaded on this participant: [(model-tests,unknownModule:unknownEntity)]." + ), + ) + assertGrpcError( + failedTransactionLedgerEffectsPackageNameNotFound, + RequestValidationErrors.NotFound.NoTemplatesForPackageNameAndQualifiedName, + Some( + "The following package-name/template qualified-name pairs do not reference any template-id uploaded on this participant: [(model-tests,unknownModule:unknownEntity)]." + ), + ) + } + }) + + // TODO(#25385): Add assertions for transaction pointwise queries + test( + "UDynamicSubscriptions", + "Template-id and interface-id resolution is updated on package upload during ongoing subscriptions", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + implicit val upgradingUA_V1Companion + : ContractCompanion.WithoutKey[UA_V1.Contract, UA_V1.ContractId, UA_V1] = + UA_V1.COMPANION + implicit val upgradingUA_V2Companion + : ContractCompanion.WithoutKey[UA_V2.Contract, UA_V2.ContractId, UA_V2] = + UA_V2.COMPANION + implicit val upgradingUA_V3Companion + : ContractCompanion.WithoutKey[UA_V3.Contract, UA_V3.ContractId, UA_V3] = + UA_V3.COMPANION + implicit val upgradingUB_V2Companion + : ContractCompanion.WithoutKey[UB_V2.Contract, UB_V2.ContractId, UB_V2] = + UB_V2.COMPANION + implicit val upgradingUB_V3Companion + : ContractCompanion.WithoutKey[UB_V3.Contract, UB_V3.ContractId, UB_V3] = + UB_V3.COMPANION + + for { + _ <- upload(ledger, UpgradeIfaceDar.path) + // Upload 1.0.0 package (with the first implementation of UA) + _ <- upload(ledger, UpgradeTestDar1_0_0.path) + + // TODO(#16651): Assert that subscriptions fail if subscribing for non-existing template-name + // but for known package-name + + // Start ongoing Iface1 subscriptions after uploading the V1 package + subscriptions_Iface1_at_v1 = new Subscriptions( + "Iface1 at v1", + ledger, + party, + SubInterface( + Iface1_Ref, + expectedTemplatesInResponses = Set(UA_Ref.toTypeConRef), + ), + expectedTxsSize = 3, + ) + + // Start ongoing UA subscriptions + subscriptions_UA = new Subscriptions( + "UA", + ledger, + party, + SubTemplate(UA_Ref.toTypeConRef), + expectedTxsSize = 4, + ) + + // Create UA#1: UA 1.0.0 contract arguments + payloadUA_1 = new UA_V1(party, party, 0L) + payloadUA_1_contractId <- createContract(ledger, party, payloadUA_1) + + fetchPayloadUA1_byContractId = () => + ledger + .getEventsByContractId( + GetEventsByContractIdRequest( + payloadUA_1_contractId.contractId, + Some( + ledger.eventFormat( + verbose = true, + Some(Seq(party)), + interfaceFilters = Seq(iface.Iface1.TEMPLATE_ID -> true), + ) + ), + ) + ) + + acs_before_v2_upload <- acsF(ledger, party, SubInterface(Iface1_Ref)) + _ <- fetchPayloadUA1_byContractId() + .mustFailWith("contract not found", RequestValidationErrors.NotFound.ContractEvents) + subscriptions_Iface1_at_v1_after_v1_create = new Subscriptions( + "Iface1 after v1 create", + ledger, + party, + SubInterface( + Iface1_Ref, + expectedTemplatesInResponses = Set(UA_Ref.toTypeConRef), + ), + expectedTxsSize = 3, + ) + + // Upload 2.0.0 package (with the first implementation of UB) + // 2.0.0 becomes the default package preference on the ledger + _ <- upload(ledger, UpgradeTestDar2_0_0.path) + acs_after_v2_upload <- acsF(ledger, party, SubInterface(Iface1_Ref)) + create1_fetched_by_contract_id_after_v2_upload <- fetchPayloadUA1_byContractId() + + subscriptions_Iface1_at_v2 = new Subscriptions( + "Iface1 at v2", + ledger, + party, + SubInterface( + Iface1_Ref, + expectedTemplatesInResponses = Set(UA_Ref.toTypeConRef), + ), + expectedTxsSize = 4, + ) + + // Start ongoing UB subscriptions + subscriptions_UB = new Subscriptions( + "UB", + ledger, + party, + SubTemplate(UB_Ref.toTypeConRef), + expectedTxsSize = 2, + ) + + // Create UA#2: UA 2.0.0 contract + payloadUA_2 = new UA_V2(party, party, 0L, Optional.of(Seq("more").asJava)) + _ <- createContract(ledger, party, payloadUA_2) + + // Create UA#3: UA 1.0.0 (by means of explicit package-id) + payloadUA_3 = new UA_V1(party, party, 0L) + _ <- createContract( + ledger, + party, + payloadUA_3, + Some(PackageRef.Id(Ref.PackageId.assertFromString(UA_V1.PACKAGE_ID))), + ) + + acs_before_v3_upload <- acsF(ledger, party, SubInterface(Iface1_Ref)) + + // Create UB#1: UB 2.0.0 contract + payloadUB_1 = new UB_V2(party, 0L) + _ <- createContract(ledger, party, payloadUB_1) + + // Upload 3.0.0 package + // 3.0.0 becomes the default package preference on the ledger + _ <- upload(ledger, UpgradeTestDar3_0_0.path) + acs_after_v3_upload <- acsF(ledger, party, SubInterface(Iface1_Ref)) + create1_fetched_by_contract_id_after_v3_upload <- fetchPayloadUA1_byContractId() + + // Create UA#4: UA 3.0.0 contract + payloadUA_4 = new UA_V3(party, party, 0L, Optional.of(Seq("more").asJava)) + _ <- createContract(ledger, party, payloadUA_4) + + acs_after_create_4 <- acsF(ledger, party, SubInterface(Iface1_Ref)) + + subscriptions_Iface1_at_v3 = new Subscriptions( + "Iface1 at v3", + ledger, + party, + SubInterface( + Iface1_Ref, + expectedTemplatesInResponses = Set(UA_Ref.toTypeConRef), + ), + expectedTxsSize = 4, + ) + + // Create UB#2: UB 3.0.0 contract + payloadUB_2 = new UB_V3(party, 0L, Optional.of(Seq("extra").asJava)) + _ <- createContract(ledger, party, payloadUB_2) + + // Wait for all UA transactions to be visible in the transaction streams + creates_UA <- subscriptions_UA.createsF(4) + + // Wait for all UB transactions to be visible in the transaction streams + creates_UB <- subscriptions_UB.createsF(2) + + creates_subscriptions_Iface1_at_v1 <- subscriptions_Iface1_at_v1.transactionsF(3) + + creates_subscriptions_Iface1_at_v1_after_v1_create <- + subscriptions_Iface1_at_v1_after_v1_create.transactionsF(3) + + creates_subscriptions_Iface1_at_v2 <- subscriptions_Iface1_at_v2.transactionsF(4) + + creates_subscriptions_Iface1_at_v3 <- subscriptions_Iface1_at_v3.transactionsF(4) + } yield { + def assertCreate[ + TCid <: ContractId[T], + T <: Template, + ]( + payload: T, + context: String, + create: CreatedEvent, + expectedInterfaceViewValue: Option[String] = None, + )(implicit companion: ContractCompanion[?, TCid, T]): Unit = + assertPayloadEquals( + context, + create, + payload, + ContractCompanion.valueDecoder(companion), + companion.TEMPLATE_ID_WITH_PACKAGE_ID, + assertCreateArgs = expectedInterfaceViewValue.isEmpty, + expectedInterfaceViewValue.toList + .map(expectedViewValue => + Identifier.fromProto(iface.Iface1.TEMPLATE_ID_WITH_PACKAGE_ID.toProto) -> { + (record: DamlRecord) => + assertEquals( + s"Iface1 view for create 1 - $context", + iface.Iface1View.valueDecoder().decode(record).name, + expectedViewValue, + ) + } + ) + .toMap, + ) + + // Assert GetEventsByContractId + assertCreate( + payload = payloadUA_1, + context = "GetEventsByContractId at v2", + create = create1_fetched_by_contract_id_after_v2_upload.getCreated.getCreatedEvent, + expectedInterfaceViewValue = Some("Iface1-UAv2"), + ) + + assertCreate( + payload = payloadUA_1, + context = "GetEventsByContractId at v3", + create = create1_fetched_by_contract_id_after_v3_upload.getCreated.getCreatedEvent, + expectedInterfaceViewValue = Some("Iface1-UAv3"), + ) + + // Assert interface subscriptions + inside(creates_subscriptions_Iface1_at_v1) { case Vector(create2, create3, create4) => + assertCreate(payloadUA_2, "2 - IFace1 subscription at v1", create2, Some("Iface1-UAv2")) + assertCreate(payloadUA_3, "3 - IFace1 subscription at v1", create3, Some("Iface1-UAv2")) + assertCreate(payloadUA_4, "4 - IFace1 subscription at v1", create4, Some("Iface1-UAv2")) + } + + inside(creates_subscriptions_Iface1_at_v1_after_v1_create) { + case Vector(create2, create3, create4) => + assertCreate( + payloadUA_2, + "2 - IFace1 subscription after v1 create", + create2, + Some("Iface1-UAv2"), + ) + assertCreate( + payloadUA_3, + "3 - IFace1 subscription after v1 create", + create3, + Some("Iface1-UAv2"), + ) + assertCreate( + payloadUA_4, + "4 - IFace1 subscription after v1 create", + create4, + Some("Iface1-UAv2"), + ) + } + + inside(creates_subscriptions_Iface1_at_v2) { + case Vector(create1, create2, create3, create4) => + assertCreate(payloadUA_1, "1 - Iface1 subscription at v2", create1, Some("Iface1-UAv2")) + assertCreate(payloadUA_2, "2 - Iface1 subscription at v2", create2, Some("Iface1-UAv2")) + assertCreate(payloadUA_3, "3 - Iface1 subscription at v2", create3, Some("Iface1-UAv2")) + assertCreate(payloadUA_4, "4 - Iface1 subscription at v2", create4, Some("Iface1-UAv2")) + } + + inside(creates_subscriptions_Iface1_at_v3) { + case Vector(create1, create2, create3, create4) => + assertCreate(payloadUA_1, "1 - Iface1 subscription at v3", create1, Some("Iface1-UAv3")) + assertCreate(payloadUA_2, "2 - Iface1 subscription at v3", create2, Some("Iface1-UAv3")) + assertCreate(payloadUA_3, "3 - Iface1 subscription at v3", create3, Some("Iface1-UAv3")) + assertCreate(payloadUA_4, "4 - Iface1 subscription at v2", create4, Some("Iface1-UAv3")) + } + + // Assert ACS interface subscription + assertIsEmpty(acs_before_v2_upload) + inside(acs_after_v2_upload) { case Vector(create1) => + assertCreate(payloadUA_1, "1 - ACS after v2 upload", create1, Some("Iface1-UAv2")) + } + inside(acs_before_v3_upload) { case Vector(create1, create2, create3) => + assertCreate(payloadUA_1, "1 - ACS after create 3", create1, Some("Iface1-UAv2")) + assertCreate(payloadUA_2, "2 - ACS after create 3", create2, Some("Iface1-UAv2")) + assertCreate(payloadUA_3, "3 - ACS after create 3", create3, Some("Iface1-UAv2")) + } + inside(acs_after_v3_upload) { case Vector(create1, create2, create3) => + assertCreate(payloadUA_1, "1 - ACS after v3 upload", create1, Some("Iface1-UAv3")) + assertCreate(payloadUA_2, "2 - ACS after v3 upload", create2, Some("Iface1-UAv3")) + assertCreate(payloadUA_3, "3 - ACS after v3 upload", create3, Some("Iface1-UAv3")) + } + inside(acs_after_create_4) { case Vector(create1, create2, create3, create4) => + assertCreate(payloadUA_1, "1 - ACS after create 4", create1, Some("Iface1-UAv3")) + assertCreate(payloadUA_2, "2 - ACS after create 4", create2, Some("Iface1-UAv3")) + assertCreate(payloadUA_3, "3 - ACS after create 4", create3, Some("Iface1-UAv3")) + assertCreate(payloadUA_4, "4 - ACS after create 4", create4, Some("Iface1-UAv3")) + } + + // Assert template subscriptions + inside(creates_UA) { + case Vector(create1, create2, create3, create4) => + assertCreate(payloadUA_1, "UA create 1", create1) + assertCreate(payloadUA_2, "UA create 2", create2) + assertCreate(payloadUA_3, "UA create 3", create3) + assertCreate(payloadUA_4, "UA create 4", create4) + case other => fail(s"Expected 4 create events, got ${other.size}") + } + + inside(creates_UB) { + case Vector(create1, create2) => + assertCreate(payloadUB_1, "UB create 1", create1) + assertCreate(payloadUB_2, "UB create 2", create2) + case other => fail(s"Expected two create events, got ${other.size}") + } + } + }) + + test( + "UPackageNameInEvents", + "Package-name is populated for Ledger API events", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val dummy = new Dummy(party) + implicit val dummyCompanion + : ContractCompanion.WithoutKey[Dummy.Contract, Dummy.ContractId, Dummy] = + Dummy.COMPANION + + val dummyTemplateSubscriptions = new Subscriptions( + "Dummy template", + ledger, + party, + SubTemplate(Dummy.TEMPLATE_ID.toV1.toTypeConRef), + expectedTxsSize = 2, + ) + for { + dummy <- createContract(ledger, party, dummy) + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(party, dummy.exerciseDummyNonConsuming().commands()) + ) + acsBeforeArchive <- acsF( + ledger = ledger, + party = party, + subscriptionFilter = SubTemplate(Dummy.TEMPLATE_ID.toV1.toTypeConRef), + ) + _ <- ledger.submitAndWait( + ledger.submitAndWaitRequest(party, dummy.exerciseArchive().commands()) + ) + acsDeltaTxs <- dummyTemplateSubscriptions.acsDeltaTxsF + ledgerEffectsTxs <- dummyTemplateSubscriptions.ledgerEffectsTxsF + } yield { + verifyLength("Dummy template ACS before archive", 1, acsBeforeArchive) + assertEquals( + "Dummy template package name", + acsBeforeArchive.head.packageName, + Dummy.PACKAGE_NAME, + ) + + verifyLength("Acs Delta transactions for Dummy template", 2, acsDeltaTxs) + verifyLength("Ledger Effects transactions for Dummy template", 2, ledgerEffectsTxs) + acsDeltaTxs + .flatMap( + _.events.flatMap(ev => + ev.event.created + .map(_.packageName) + .toList ++ ev.event.archived.map(_.packageName).toList + ) + ) + .foreach(packageName => + assertEquals("Package name in event", packageName, Dummy.PACKAGE_NAME) + ) + + ledgerEffectsTxs + .flatMap(_.events) + .flatMap(event => + event.event.created.map(_.packageName).toList ++ event.event.exercised + .map(_.packageName) + .toList + ) + .foreach(packageName => + assertEquals("Package name in event", packageName, Dummy.PACKAGE_NAME) + ) + } + }) + + test( + "URepresentativePackageIdInEvents", + "The representative package-id is populated for Ledger API create events", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val dummy = new Dummy(party) + implicit val dummyCompanion + : ContractCompanion.WithoutKey[Dummy.Contract, Dummy.ContractId, Dummy] = + Dummy.COMPANION + + val dummyTemplateSubscriptions = new Subscriptions( + "Dummy template", + ledger, + party, + SubTemplate(Dummy.TEMPLATE_ID.toV1.toTypeConRef), + expectedTxsSize = 1, + ) + for { + _ <- createContract(ledger, party, dummy) + acs <- acsF( + ledger = ledger, + party = party, + subscriptionFilter = SubTemplate(Dummy.TEMPLATE_ID.toV1.toTypeConRef), + ) + acsDeltaTxs <- dummyTemplateSubscriptions.acsDeltaTxsF + ledgerEffectsTxs <- dummyTemplateSubscriptions.ledgerEffectsTxsF + } yield { + assertEquals( + "ACS created event representative package-id", + assertSingleton("Only one create in the ACS", acs).representativePackageId, + // For create events stemming from command submissions, + // the representative package-id is the same as the contract's package-id + Dummy.PACKAGE_ID, + ) + + val acsDeltaTx = assertSingleton("Acs Delta transactions for Dummy template", acsDeltaTxs) + val ledgerFxTx = + assertSingleton("Ledger Effects transactions for Dummy template", ledgerEffectsTxs) + assertEquals( + "ACS delta TX created event representative package-id", + assertSingleton( + "ACS delta events", + acsDeltaTx.events, + ).event.created.get.representativePackageId, + Dummy.PACKAGE_ID, + ) + + assertEquals( + "Ledger Effects TX created event representative package-id", + assertSingleton( + "Ledger Effects events", + ledgerFxTx.events, + ).event.created.get.representativePackageId, + Dummy.PACKAGE_ID, + ) + } + }) + + test( + "UChoicePackageId", + "Report package id of the template exercised and use it to render the result", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + implicit val upgradingFetcherV1_Companion + : ContractCompanion.WithoutKey[Fetcher_V1.Contract, Fetcher_V1.ContractId, Fetcher_V1] = + Fetcher_V1.COMPANION + for { + // v1 is the only package for Fetcher + _ <- upload(ledger, UpgradeFetchTestDar1_0_0.path) + + cid <- ledger.create(party, new Fetcher_V1(party)) + + // Exercised as per v1 implementation of choice + _ <- ledger.exercise(party, cid.exerciseFetch(new Fetch(cid))) + + // v2 becomes the default package for Fetcher + _ <- upload(ledger, UpgradeFetchTestDar2_0_0.path) + + // Exercised as per v2 implementation of choice + _ <- ledger.exercise(party, cid.exerciseFetch(new Fetch(cid))) + + case Vector(exercised1, exercised2) <- ledger + .transactions(LedgerEffects, party) + .map(_.flatMap(exercisedEvents)) + } yield { + val v1TmplId = Fetcher_V1.TEMPLATE_ID_WITH_PACKAGE_ID + val v2TmplId = Fetcher_V2.TEMPLATE_ID_WITH_PACKAGE_ID + + // The first exercise reports template with package id per v1, and the second per v2 + assertEquals(toJavaProto(exercised1.templateId.get), v1TmplId.toProto) + assertEquals(toJavaProto(exercised2.templateId.get), v2TmplId.toProto) + + // The first exercise has a result shape per v1, and the second per v2 + assertExerciseResult(exercised1.exerciseResult.get, v1TmplId, new Fetcher_V1(party)) + assertExerciseResult( + exercised2.exerciseResult.get, + v2TmplId, + new Fetcher_V2(party, Optional.empty()), + ) + } + }) + + // We ignore recordId as it is not transferred over to Json API + private def assertExerciseResult[T <: Template]( + got: value.Value, + recordIdIfGiven: Identifier, + wantPayload: T, + ): Unit = { + import com.daml.ledger.api.testtool.infrastructure.RemoveTrailingNone.Implicits + val gotPb = value.Value.toJavaProto(got) + val wantFields = wantPayload.toValue.withoutTrailingNoneFields.getFields + val wantPb = + if (!gotPb.getRecord.hasRecordId) + new DamlRecord(wantFields).toProto + else + new DamlRecord(recordIdIfGiven, wantFields).toProto + + assertEquals(gotPb, wantPb) + } + + private def assertPayloadEquals[T]( + context: String, + createdEvent: CreatedEvent, + payload: T, + valueDecoder: ValueDecoder[T], + templateId: Identifier, + assertCreateArgs: Boolean, + assertViewDecoding: Map[Identifier, DamlRecord => Unit], + ): Unit = { + assertEquals(context, toJavaProto(createdEvent.templateId.get), templateId.toProto) + + if (assertCreateArgs) + assertEquals( + context, + valueDecoder.decode( + DamlRecord.fromProto(value.Record.toJavaProto(createdEvent.getCreateArguments)) + ), + payload, + ) + + createdEvent.interfaceViews.foreach { ifaceView => + val viewRecord = DamlRecord.fromProto(value.Record.toJavaProto(ifaceView.getViewValue)) + assertViewDecoding(Identifier.fromProto(toJavaProto(ifaceView.getInterfaceId)))(viewRecord) + } + } +} + +object UpgradingIT { + implicit class EnrichedCommands(commands: java.util.List[Command]) { + def overridePackageId(packageIdOverride: String): java.util.List[Command] = + commands.asScala + .map { + case cmd: CreateCommand => + new CreateCommand( + identifierWithPackageIdOverride(packageIdOverride, cmd.getTemplateId), + cmd.getCreateArguments, + ): Command + case other => fail(s"Unexpected command $other") + } + .toList + .asJava + } + + private def identifierWithPackageIdOverride(packageIdOverride: String, templateId: Identifier) = + new Identifier( + packageIdOverride, + templateId.getModuleName, + templateId.getEntityName, + ) + + private def acsF( + ledger: ParticipantTestContext, + party: Party, + subscriptionFilter: TemplateOrInterfaceWithImpls, + )(implicit executionContext: ExecutionContext) = + for { + end <- ledger.currentEnd() + acs <- ledger.activeContracts( + getActiveContractsRequest(subscriptionFilter, party, end) + ) + } yield acs + + private class Subscriptions( + context: String, + ledger: ParticipantTestContext, + party: Party, + subscriptionFilter: TemplateOrInterfaceWithImpls, + expectedTxsSize: Int, + )(implicit ec: ExecutionContext) { + import ledger.* + + val acsDeltaTxsF: Future[Vector[transaction.Transaction]] = transactions( + take = expectedTxsSize, + txRequest( + ledger, + subscriptionFilter, + party, + ), + ) + + val ledgerEffectsTxsF: Future[Vector[transaction.Transaction]] = transactions( + take = expectedTxsSize, + txRequest( + ledger, + subscriptionFilter, + party, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ), + ) + + def transactionsF( + expectedCreatesSize: Int + ): Future[Vector[CreatedEvent]] = + for { + acsDeltaTxs <- acsDeltaTxsF + ledgerEffectsTxs <- ledgerEffectsTxsF + acsDeltaTxCreates = acsDeltaTxs.flatMap(createdEvents) + ledgerEffectsTxCreates = ledgerEffectsTxs + .flatMap(createdEvents) + .filter { created => + subscriptionFilter.expectedTemplatesInResponses + .exists(_ matches (created.templateId.get, created.packageName)) + } + } yield { + verifyLength( + s"$context: Acs Delta transactions creates", + expectedCreatesSize, + acsDeltaTxCreates, + ) + verifyLength( + s"$context: Ledger Effects transactions creates", + expectedCreatesSize, + ledgerEffectsTxCreates, + ) + assertSameElements( + actual = acsDeltaTxCreates, + expected = ledgerEffectsTxCreates, + context = context, + ) + + ledgerEffectsTxCreates + } + + def createsF(expectedCreatesSize: Int): Future[Vector[CreatedEvent]] = + for { + acsCreates <- acsF(ledger, party, subscriptionFilter) + txCreates <- transactionsF(expectedCreatesSize) + } yield { + verifyLength(s"$context: ACS creates", expectedCreatesSize, acsCreates) + + assertSameElements(txCreates, acsCreates) + + acsCreates + } + } + + def createContract[ + TCid <: ContractId[T], + T <: Template, + ]( + ledger: ParticipantTestContext, + party: Party, + template: T, + overrideTypeO: Option[Ref.PackageRef] = None, + )(implicit companion: ContractCompanion[?, TCid, T], ec: ExecutionContext): Future[TCid] = { + val commands = template.create().commands() + + ledger + .submitAndWaitForTransaction( + ledger.submitAndWaitForTransactionRequest( + party, + overrideTypeO + .map(overrideType => commands.overridePackageId(overrideType.toString)) + .getOrElse(commands), + ) + ) + .map( + _.getTransaction.events + .collectFirst { case Event(Created(e)) => + companion.toContractId(new ContractId(e.contractId)) + } + .getOrElse(fail("No contract created")) + ) + } + + private def getActiveContractsRequest( + subscriptionFilter: TemplateOrInterfaceWithImpls, + party: Party, + activeAtOffset: Long, + includeCreatedEventBlobs: Boolean = false, + ) = + GetActiveContractsRequest( + activeAtOffset = activeAtOffset, + eventFormat = Some(eventFormat(subscriptionFilter, party, includeCreatedEventBlobs)), + ) + + def txRequest( + ledger: ParticipantTestContext, + subscriptionFilter: TemplateOrInterfaceWithImpls, + party: Party, + includeCreatedEventBlob: Boolean = false, + transactionShape: transaction_filter.TransactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ): GetUpdatesRequest = + new GetUpdatesRequest( + beginExclusive = ledger.begin, + endInclusive = None, + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + eventFormat(subscriptionFilter, party, includeCreatedEventBlob) + ), + transactionShape = transactionShape, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + ), + ) + + def eventFormat( + subscriptionFilter: TemplateOrInterfaceWithImpls, + party: Party, + includeCreatedEventBlobs: Boolean, + ): EventFormat = + EventFormat( + filtersByParty = Map( + party.getValue -> Filters( + Seq( + CumulativeFilter( + subscriptionFilter match { + case SubTemplate(template) => + IdentifierFilter.TemplateFilter( + TemplateFilter( + Some(template.toScalaPbIdentifier), + includeCreatedEventBlob = includeCreatedEventBlobs, + ) + ) + case SubInterface(interface, _) => + IdentifierFilter.InterfaceFilter( + InterfaceFilter( + Some(interface), + includeInterfaceView = true, + includeCreatedEventBlob = includeCreatedEventBlobs, + ) + ) + } + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = true, + ) + + // SubscriptionFilterWithExpectations + sealed trait TemplateOrInterfaceWithImpls { + def expectedTemplatesInResponses: Set[Ref.TypeConRef] + } + + case class SubTemplate(tpl: Ref.TypeConRef) extends TemplateOrInterfaceWithImpls { + override def expectedTemplatesInResponses: Set[Ref.TypeConRef] = + Set(tpl) + } + + case class SubInterface( + iface: ScalaPbIdentifier, + // Used to filter out other templates that can not be excluded in the trees filters + expectedTemplatesInResponses: Set[Ref.TypeConRef] = Set.empty, + ) extends TemplateOrInterfaceWithImpls + + private def upload(ledger: ParticipantTestContext, darPath: String): Future[Unit] = + ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(darPath)) + + implicit class EnrichedTypeConRef(val typeConRef: Ref.TypeConRef) { + def toScalaPbIdentifier: ScalaPbIdentifier = + ScalaPbIdentifier.of( + typeConRef.pkg.toString, + typeConRef.qualifiedName.module.toString(), + typeConRef.qualifiedName.name.toString(), + ) + + def matches(other: ScalaPbIdentifier, packageName: String): Boolean = + (typeConRef.pkg match { + case PackageRef.Name(name) => name == packageName + case PackageRef.Id(id) => other.packageId == id + }) && typeConRef.qualifiedName.module.toString() == other.moduleName && + typeConRef.qualifiedName.name.toString() == other.entityName + } + + implicit class EnrichedIdentifier(val identifier: ScalaPbIdentifier) { + def toTypeConRef: Ref.TypeConRef = + TypeConRef.assertFromString( + s"${identifier.packageId}:${identifier.moduleName}:${identifier.entityName}" + ) + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceIT.scala new file mode 100644 index 0000000000..d712d4aaf0 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceIT.scala @@ -0,0 +1,1504 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.{assertEquals, *} +import com.daml.ledger.api.testtool.infrastructure.Party +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.user_management_service as proto +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + CreateUserResponse, + DeleteUserRequest, + DeleteUserResponse, + GetUserRequest, + GetUserResponse, + GrantUserRightsRequest, + GrantUserRightsResponse, + ListUserRightsRequest, + ListUserRightsResponse, + ListUsersRequest, + ListUsersResponse, + RevokeUserRightsRequest, + RevokeUserRightsResponse, + Right as Permission, + UpdateUserIdentityProviderIdRequest, + User, +} +import com.digitalasset.base.error.utils.ErrorDetails +import com.digitalasset.base.error.utils.ErrorDetails.matches +import com.digitalasset.base.error.{BaseError, ErrorCode} +import com.digitalasset.canton.auth.AuthorizationChecksErrors +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.error.IndexErrors +import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} +import io.grpc.{Status, StatusRuntimeException} + +import java.util.UUID +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} + +final class UserManagementServiceIT extends UserManagementServiceITBase { + + private val adminPermission = + Permission(Permission.Kind.ParticipantAdmin(Permission.ParticipantAdmin())) + private def actAsPermission(party: String) = + Permission(Permission.Kind.CanActAs(Permission.CanActAs(party))) + private def readAsPermission(party: String) = + Permission(Permission.Kind.CanReadAs(Permission.CanReadAs(party))) + private def executeAsPermission(party: String) = + Permission(Permission.Kind.CanExecuteAs(Permission.CanExecuteAs(party))) + + private val AdminUserId = "participant_admin" + + def matchesOneOf(t: Throwable, errorCodes: ErrorCode*): Boolean = + errorCodes.exists(matches(t, _)) + + def isInternalError(t: Throwable): Boolean = t match { + case e: StatusRuntimeException => isInternalError(e) + case _ => false + } + + def isInternalError(e: StatusRuntimeException): Boolean = + e.getStatus.getCode == Status.Code.INTERNAL && e.getStatus.getDescription.startsWith( + BaseError.RedactedMessage.Prefix + ) + + test( + "UserManagementUpdateUserIdpWithNonDefaultIdps", + "Test reassigning user to a different idp using non default idps", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userId1 = ledger.nextUserId() + val idpId1 = ledger.nextIdentityProviderId() + val idpId2 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId2) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId1, + primaryParty = "", + identityProviderId = idpId1, + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId1, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = idpId2, + ) + ) + get1 <- ledger.userManagement.getUser( + GetUserRequest(userId = userId1, identityProviderId = idpId2) + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId1, + sourceIdentityProviderId = idpId2, + targetIdentityProviderId = idpId1, + ) + ) + get2 <- ledger.userManagement.getUser( + GetUserRequest(userId = userId1, identityProviderId = idpId1) + ) + error <- ledger.userManagement + .getUser( + GetUserRequest(userId = userId1, identityProviderId = idpId2) + ) + .mustFail("requesting with wrong idp") + // cleanup + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId1, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = "", + ) + ) + } yield { + assertEquals(get1.user.get.identityProviderId, idpId2) + assertEquals(get2.user.get.identityProviderId, idpId1) + assert( + error.getMessage.startsWith("PERMISSION_DENIED"), + s"Actual message: ${error.getMessage}", + ) + + } + }) + + test( + "UserManagementUpdateUserIdpWithDefaultIdp", + "Test reassigning user to a different idp using the default idp", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userId1 = ledger.nextUserId() + val idpId = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId1, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId1, + sourceIdentityProviderId = "", + targetIdentityProviderId = idpId, + ) + ) + get1 <- ledger.userManagement.getUser( + GetUserRequest(userId = userId1, identityProviderId = idpId) + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId1, + sourceIdentityProviderId = idpId, + targetIdentityProviderId = "", + ) + ) + get2 <- ledger.userManagement.getUser( + GetUserRequest(userId = userId1, identityProviderId = "") + ) + } yield { + assertEquals(get1.user.get.identityProviderId, idpId) + assertEquals(get2.user.get.identityProviderId, "") + } + }) + + test( + "UserManagementUpdateUserIdpNonExistentIdps", + "Test reassigning user to a different idp when source or target idp doesn't exist", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userId = ledger.nextUserId() + val idpNonExistent = ledger.nextIdentityProviderId() + for { + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.userManagement + .updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId, + sourceIdentityProviderId = idpNonExistent, + targetIdentityProviderId = "", + ) + ) + .mustFailWith( + "non existent source idp", + RequestValidationErrors.InvalidArgument, + ) + _ <- ledger.userManagement + .updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userId, + sourceIdentityProviderId = "", + targetIdentityProviderId = idpNonExistent, + ) + ) + .mustFailWith( + "non existent target idp", + RequestValidationErrors.InvalidArgument, + ) + } yield () + }) + + test( + "UserManagementUpdateUserIdpMismatchedSourceIdp", + "Test reassigning user to a different idp using mismatched source idp", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userIdDefault = ledger.nextUserId() + val userIdNonDefault = ledger.nextUserId() + val idpIdNonDefault = ledger.nextIdentityProviderId() + val idpIdTarget = ledger.nextIdentityProviderId() + val idpIdMismatched = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdMismatched) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdNonDefault) + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpIdTarget) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userIdDefault, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userIdNonDefault, + primaryParty = "", + identityProviderId = idpIdNonDefault, + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + error1 <- ledger.userManagement + .updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdDefault, + sourceIdentityProviderId = idpIdMismatched, + targetIdentityProviderId = idpIdTarget, + ) + ) + .mustFail("mismatched source idp id") + error2 <- ledger.userManagement + .updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdNonDefault, + sourceIdentityProviderId = idpIdMismatched, + targetIdentityProviderId = idpIdTarget, + ) + ) + .mustFail("mismatched source idp id") + // cleanup + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdNonDefault, + sourceIdentityProviderId = idpIdNonDefault, + targetIdentityProviderId = "", + ) + ) + } yield { + assert( + error1.getMessage.startsWith("PERMISSION_DENIED"), + s"Actual message: ${error1.getMessage}", + ) + assert( + error2.getMessage.startsWith("PERMISSION_DENIED"), + s"Actual message: ${error2.getMessage}", + ) + } + }) + + test( + "UserManagementUpdateUserIdpSourceAndTargetIdpTheSame", + "Test reassigning user to a different idp but source and target idps are the same", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userIdDefault = ledger.nextUserId() + val userIdNonDefault = ledger.nextUserId() + val idpId1 = ledger.nextIdentityProviderId() + for { + _ <- ledger.createIdentityProviderConfig(identityProviderId = idpId1) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userIdDefault, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userIdNonDefault, + primaryParty = "", + identityProviderId = idpId1, + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdDefault, + sourceIdentityProviderId = "", + targetIdentityProviderId = "", + ) + ) + get1 <- ledger.userManagement.getUser( + GetUserRequest(userId = userIdDefault, identityProviderId = "") + ) + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdNonDefault, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = idpId1, + ) + ) + get2 <- ledger.userManagement.getUser( + GetUserRequest(userId = userIdNonDefault, identityProviderId = idpId1) + ) + // cleanup + _ <- ledger.userManagement.updateUserIdentityProviderId( + UpdateUserIdentityProviderIdRequest( + userIdNonDefault, + sourceIdentityProviderId = idpId1, + targetIdentityProviderId = "", + ) + ) + } yield { + assertEquals("default idp", get1.user.get.identityProviderId, "") + assertEquals("non default idp", get2.user.get.identityProviderId, idpId1) + } + }) + + test( + "UserManagementUserRightsLimit", + "Test user rights per user limit", + allocate(NoParties), + enabled = features => + features.userManagement.maxRightsPerUser > 0 && features.userManagement.maxRightsPerUser <= 100, + disabledReason = "requires user management feature with user rights limit", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def assertTooManyUserRightsError(t: Throwable): Unit = + assertGrpcError( + t = t, + errorCode = AdminServiceErrors.UserManagement.TooManyUserRights, + exceptionMessageSubstring = None, + ) + + def createCanActAs(party: Party) = + Permission(Permission.Kind.CanActAs(Permission.CanActAs(party))) + + def allocateParty(id: Int) = + ledger.allocateParty(Some(s"acting-party-$id")) + + val user1 = newUser(UUID.randomUUID.toString) + val user2 = newUser(UUID.randomUUID.toString) + + val maxRightsPerUser = ledger.features.userManagement.maxRightsPerUser + val allocatePartiesMaxAndOne = (1 to (maxRightsPerUser + 1)).map(allocateParty) + + for { + // allocating parties before user is created + allocatedParties <- Future.sequence(allocatePartiesMaxAndOne) + permissionsMaxPlusOne = allocatedParties.map(createCanActAs) + permissionOne = permissionsMaxPlusOne.head + permissionsMax = permissionsMaxPlusOne.tail + // cannot create user with #limit+1 rights + create1 <- ledger + .createUser(CreateUserRequest(Some(user1), permissionsMaxPlusOne)) + .mustFail( + "creating user with too many rights" + ) + // can create user with #limit rights + create2 <- ledger.createUser(CreateUserRequest(Some(user1), permissionsMax)) + // fails adding one more right + grant1 <- ledger.userManagement + .grantUserRights(GrantUserRightsRequest(user1.id, rights = Seq(permissionOne), "")) + .mustFail( + "granting more rights exceeds max number of user rights per user" + ) + // rights already added are intact + rights1 <- ledger.userManagement.listUserRights(ListUserRightsRequest(user1.id, "")) + // can create other users with #limit rights + create3 <- ledger.createUser(CreateUserRequest(Some(user2), permissionsMax)) + } yield { + assertTooManyUserRightsError(create1) + assertEquals(unsetResourceVersion(create2), CreateUserResponse(Some(user1))) + assertTooManyUserRightsError(grant1) + assertEquals(rights1.rights.size, permissionsMaxPlusOne.tail.size) + assertSameElements(rights1.rights, permissionsMaxPlusOne.tail) + assertEquals(unsetResourceVersion(create3), CreateUserResponse(Some(user2))) + } + }) + + test( + "UserManagementCreateUserInvalidArguments", + "Test argument validation for UserManagement#CreateUser", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val userId = UUID.randomUUID.toString + + def createAndCheck( + problem: String, + user: User, + rights: Seq[proto.Right], + expectedErrorCode: ErrorCode, + ): Future[Unit] = + for { + throwable <- ledger + .createUser(CreateUserRequest(Some(user), rights)) + .mustFail(context = problem) + } yield assertGrpcError( + t = throwable, + errorCode = expectedErrorCode, + exceptionMessageSubstring = None, + ) + + for { + _ <- createAndCheck( + "empty user-id", + User.defaultInstance, + List.empty, + RequestValidationErrors.MissingField, + ) + _ <- createAndCheck( + "invalid user-id", + User( + id = "?", + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ), + List.empty, + RequestValidationErrors.InvalidField, + ) + _ <- createAndCheck( + "invalid primary-party", + User( + id = "u1-" + userId, + primaryParty = "party2-!!", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ), + List.empty, + RequestValidationErrors.InvalidArgument, + ) + r = proto.Right(proto.Right.Kind.CanActAs(proto.Right.CanActAs("party3-!!"))) + _ <- createAndCheck( + "invalid party in right", + User( + id = "u2-" + userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ), + List(r), + RequestValidationErrors.InvalidArgument, + ) + } yield () + }) + + test( + "UserManagementGetUserInvalidArguments", + "Test argument validation for UserManagement#GetUser", + allocate(NoParties), + enabled = _.userManagement.supported, + disabledReason = "requires user management feature", + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + def getAndCheck( + problem: String, + userId: String, + expectedErrorCode: ErrorCode, + messageAssertion: String, + ): Future[Unit] = + for { + error <- ledger.userManagement + .getUser(GetUserRequest(userId, "")) + .mustFail(problem) + } yield assertGrpcError(error, expectedErrorCode, Some(messageAssertion)) + + for { + _ <- getAndCheck( + "empty user-id", + "", + AuthorizationChecksErrors.InvalidToken, + "INVALID_ARGUMENT: INVALID_TOKEN", + ) + _ <- getAndCheck( + "empty user-id", + "", + AuthorizationChecksErrors.InvalidToken, + "The submitted request is missing a user-id: requests with an empty user-id are only supported if there is an authenticated user", + ) + _ <- getAndCheck( + "invalid user-id", + "?", + RequestValidationErrors.InvalidField, + "INVALID_ARGUMENT: INVALID_FIELD", + ) + _ <- getAndCheck( + "invalid user-id", + "?", + RequestValidationErrors.InvalidField, + "The submitted command has a field with invalid value: Invalid field user_id: User ID \"?\" does not match regex", + ) + } yield () + }) + + userManagementTest( + "RaceConditionCreateUsers", + "Tests scenario of multiple concurrent create-user calls for the same user", + runConcurrently = false, + ) { implicit ec => participant => _ => + val attempts = (1 to 10).toVector + val userId = participant.nextUserId() + val request = + CreateUserRequest( + Some( + User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + rights = Seq.empty, + ) + for { + results <- Future + .traverse(attempts) { _ => + participant.createUser(request).transform(Success(_)) + } + } yield { + assertSingleton( + "successful user creation", + results.filter(_.isSuccess), + ).discard + val unexpectedErrors = results.collect { + case Failure(t) + if !matchesOneOf( + t, + IndexErrors.DatabaseErrors.SqlTransientError, + AdminServiceErrors.UserManagement.UserAlreadyExists, + ) + && !isInternalError(t) => + t + } + assertIsEmpty(unexpectedErrors) + } + } + + userManagementTest( + "RaceConditionDeleteUsers", + "Tests scenario of multiple concurrent delete-user calls for the same user", + runConcurrently = false, + ) { implicit ec => participant => _ => + val attempts = (1 to 10).toVector + val userId = participant.nextUserId() + val createUserRequest = + CreateUserRequest( + Some( + User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + rights = Seq.empty, + ) + val deleteUserRequest = DeleteUserRequest(userId = userId, "") + for { + _ <- participant.createUser(createUserRequest) + results <- Future + .traverse(attempts) { _ => + participant.deleteUser(deleteUserRequest).transform(Success(_)) + } + } yield { + assertSingleton( + "successful user deletion", + results.filter(_.isSuccess), + ).discard + val unexpectedErrors = results + .collect { + case Failure(t) + if !matchesOneOf( + t, + IndexErrors.DatabaseErrors.SqlTransientError, + AdminServiceErrors.UserManagement.UserNotFound, + ) && !isInternalError(t) => + t + } + assertIsEmpty(unexpectedErrors) + } + } + + userManagementTest( + "RaceConditionGrantRights", + "Tests scenario of multiple concurrent grant-right calls for the same user and the same rights", + runConcurrently = false, + ) { implicit ec => participant => _ => + val attempts = (1 to 10).toVector + val userId = participant.nextUserId() + val suffix = UUID.randomUUID().toString + val createUserRequest = + CreateUserRequest( + Some( + User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + rights = Seq.empty, + ) + for { + parties <- allocateParties(participant, suffix) + userRights = getUserRights(parties) + grantRightsRequest = GrantUserRightsRequest(userId = userId, rights = userRights, "") + _ <- participant.createUser(createUserRequest) + results <- Future.traverse(attempts) { _ => + participant.userManagement.grantUserRights(grantRightsRequest).transform(Success(_)) + } + } yield { + val successes = results.collect { + case Success(resp @ GrantUserRightsResponse(newlyGrantedRights)) + if newlyGrantedRights.nonEmpty => + resp + } + assertSingleton("Success response", successes).discard + assertSameElements(successes.head.newlyGrantedRights, userRights) + val unexpectedErrors = results + .collect { + case Failure(t) + if + // Transient db error caused by unique constraint violation on H2 + !ErrorDetails.matches(t, IndexErrors.DatabaseErrors.SqlTransientError) + && + // Internal error caused by unique constraint violation on Postgres + !isInternalError(t) => + t + } + assertIsEmpty(unexpectedErrors) + } + } + + userManagementTest( + "RaceConditionRevokeRights", + "Tests scenario of multiple concurrent revoke-right calls for the same user and the same rights", + runConcurrently = false, + ) { implicit ec => participant => _ => + val attempts = (1 to 10).toVector + val userId = participant.nextUserId() + val suffix = UUID.randomUUID().toString + for { + parties <- allocateParties(participant, suffix) + userRights = getUserRights(parties) + createUserRequest = + CreateUserRequest( + Some( + User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + rights = userRights, + ) + _ <- participant.createUser(createUserRequest) + revokeRightsRequest = RevokeUserRightsRequest( + userId = userId, + rights = userRights, + identityProviderId = "", + ) + results <- Future.traverse(attempts) { _ => + participant.userManagement.revokeUserRights(revokeRightsRequest).transform(Success(_)) + } + } yield { + assertSingleton( + "Non empty revoke-rights responses", + results.collect { + case Success(RevokeUserRightsResponse(actuallyRevoked)) if actuallyRevoked.nonEmpty => + actuallyRevoked + }, + ).discard + assertIsEmpty(results.filter(_.isFailure)) + } + } + + userManagementTest( + "TestAdminExists", + "Ensure admin user exists", + ) { implicit ec => implicit ledger => _ => + for { + get1 <- ledger.userManagement.getUser(GetUserRequest(AdminUserId, "")) + rights1 <- ledger.userManagement.listUserRights(ListUserRightsRequest(AdminUserId, "")) + } yield { + assertEquals( + get1.user, + Some( + User( + id = AdminUserId, + metadata = Some( + ObjectMeta( + resourceVersion = get1.getUser.getMetadata.resourceVersion, + annotations = Map.empty, + ) + ), + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + ) + ), + ) + assertEquals(rights1, ListUserRightsResponse(Seq(adminPermission))) + } + } + + userManagementTest( + "TestCreateUser", + "Exercise CreateUser rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party1", + metadata = Some(ObjectMeta.defaultInstance), + identityProviderId = "", + isDeactivated = false, + ) + val user2 = User( + id = userId2, + primaryParty = "", + metadata = Some(ObjectMeta.defaultInstance), + identityProviderId = "", + isDeactivated = false, + ) + for { + res1 <- ledger.createUser( + CreateUserRequest(Some(user1), Nil) + ) + res2 <- ledger + .createUser(CreateUserRequest(Some(user1), Nil)) + .mustFail("allocating a duplicate user") + res3 <- ledger.createUser(CreateUserRequest(Some(user2), Nil)) + res4 <- ledger.deleteUser(DeleteUserRequest(userId2, "")) + } yield { + assertEquals(unsetResourceVersion(res1), CreateUserResponse(Some(user1))) + assertUserAlreadyExists(res2) + assertEquals(unsetResourceVersion(res3), CreateUserResponse(Some(user2))) + assertEquals(res4, DeleteUserResponse()) + val resourceVersion1 = res1.user.get.metadata.get.resourceVersion + assert(resourceVersion1.nonEmpty, "New user's resource version should be non empty") + val resourceVersion2 = res3.user.get.metadata.get.resourceVersion + assert(resourceVersion2.nonEmpty, "New user's resource version should be non empty") + } + } + + userManagementTest( + shortIdentifier = "TestInvalidResourceVersionInCreateUser", + description = "Exercise CreateUser rpc using resource version", + ) { implicit ec => implicit ledger => _ => + val userId = ledger.nextUserId() + val user = User( + id = userId, + primaryParty = "", + metadata = Some( + ObjectMeta( + resourceVersion = "someResourceVersion1", + annotations = Map.empty, + ) + ), + identityProviderId = "", + isDeactivated = false, + ) + for { + res <- ledger + .createUser(CreateUserRequest(Some(user), Nil)) + .mustFail( + "creating user with non empty resource version" + ) + } yield { + assertGrpcError( + res, + RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("user.metadata.resource_version"), + ) + } + } + + userManagementTest( + "TestGetUser", + "Exercise GetUser rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val user1 = newUser( + id = userId1, + primaryParty = "party1", + ) + for { + _ <- ledger.createUser( + CreateUserRequest(Some(user1), Nil) + ) + res1 <- ledger.userManagement.getUser(GetUserRequest(userId1, "")) + res2 <- ledger.userManagement + .getUser(GetUserRequest(userId2, "")) + .mustFail("retrieving non-existent user") + } yield { + assertUserNotFound(res2) + assert(unsetResourceVersion(res1) == GetUserResponse(Some(user1))) + } + } + + userManagementTest( + "TestDeleteUser", + "Exercise DeleteUser rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party1", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + for { + _ <- ledger.createUser( + CreateUserRequest(Some(user1), Nil) + ) + res1 <- ledger.deleteUser(DeleteUserRequest(userId1, "")) + res2 <- ledger.userManagement + .deleteUser(DeleteUserRequest(userId2, "")) + .mustFail("deleting non-existent user") + } yield { + assertEquals(res1, DeleteUserResponse()) + assertUserNotFound(res2) + } + } + + userManagementTest( + "TestListUsersVisibilityOfNewUserWhenCreatedAndThenDeleted", + "Exercise ListUsers rpc: Creating and deleting a user makes it visible and then absent from a page", + runConcurrently = false, + ) { implicit ec => implicit ledger => _ => + def assertUserPresentIn(user: User, list: ListUsersResponse, msg: String): Unit = + assert(list.users.map(unsetResourceVersion).contains(user), msg) + + def assertUserAbsentIn(user: User, list: ListUsersResponse, msg: String): Unit = + assert(!list.users.map(unsetResourceVersion).contains(user), msg) + + for { + pageBeforeCreate <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = "", pageSize = 10, identityProviderId = "") + ) + // Construct an user-id that with high probability will be the first on the first page + // (Note: "!" is the smallest valid user-id character) + newUserId = "!" + pageBeforeCreate.users.headOption + .map(_.id) + .getOrElse(ledger.nextUserId()) + newUser1 = newUser( + id = newUserId, + primaryParty = "", + ) + _ = assertUserAbsentIn( + newUser1, + pageBeforeCreate, + "new user should be absent before it's creation", + ) + _ <- ledger.createUser(CreateUserRequest(Some(newUser1), Nil)) + pageAfterCreate <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = "", pageSize = 10, identityProviderId = "") + ) + _ = assertUserPresentIn( + newUser1, + pageAfterCreate, + "new users should be present after it's creation", + ) + _ <- ledger.deleteUser(DeleteUserRequest(newUserId, identityProviderId = "")) + pageAfterDelete <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = "", pageSize = 10, identityProviderId = "") + ) + _ = assertUserAbsentIn( + newUser1, + pageAfterDelete, + "new user should be absent after it's deletion", + ) + } yield { + () + } + } + + userManagementTest( + "TestListUsersCreateOrDeleteUserOnPreviousPage", + "Exercise ListUsers rpc: Adding a user to a previous page doesn't affect the subsequent page", + runConcurrently = false, + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val userId3 = ledger.nextUserId() + val userId4 = ledger.nextUserId() + + for { + // Create 4 users to ensure we have at least two pages of two users each + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId1, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId2, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId3, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId4, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + // Fetch the first two full pages + page1 <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = "", pageSize = 2, identityProviderId = "") + ) + page2 <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = page1.nextPageToken, pageSize = 2, identityProviderId = "") + ) + // Verify that the second page stays the same even after we have created a new user that is lexicographically smaller than the last user on the first page + // (Note: "!" is the smallest valid user-id character) + newUserId = "!" + page1.users.last.id + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = newUserId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Seq.empty, + ) + ) + page2B <- ledger.userManagement.listUsers( + ListUsersRequest(pageToken = page1.nextPageToken, pageSize = 2, identityProviderId = "") + ) + _ = assertEquals("after creating new user before the second page", page2, page2B) + } yield { + () + } + } + + userManagementTest( + "TestListUsersReachingTheLastPage", + "Exercise ListUsers rpc: Listing all users page by page eventually terminates reaching the last page", + ) { implicit ec => implicit ledger => _ => + val pageSize = 10000 + + def fetchNextPage(pageToken: String, pagesFetched: Int): Future[Unit] = + for { + page <- ledger.userManagement.listUsers( + ListUsersRequest(pageSize = pageSize, pageToken = pageToken, identityProviderId = "") + ) + _ = if (page.nextPageToken != "") { + if (pagesFetched > 10) { + fail( + s"Could not reach the last page even after fetching ${pagesFetched + 1} pages of size $pageSize each" + ) + } + fetchNextPage(pageToken = page.nextPageToken, pagesFetched = pagesFetched + 1) + } + } yield () + + fetchNextPage(pageToken = "", pagesFetched = 0) + } + + userManagementTest( + "TestListUsersOnInvalidRequests", + "Exercise ListUsers rpc: Requesting invalid pageSize or pageToken results in an error", + ) { implicit ec => implicit ledger => _ => + for { + // Using not Base64 encoded string as the page token + onBadTokenError <- ledger.userManagement + .listUsers( + ListUsersRequest( + pageToken = UUID.randomUUID().toString, + pageSize = 0, + identityProviderId = "", + ) + ) + .mustFail("using invalid page token string") + // Using negative pageSize + onNegativePageSizeError <- ledger.userManagement + .listUsers(ListUsersRequest(pageSize = -100, pageToken = "", identityProviderId = "")) + .mustFail("using negative page size") + } yield { + assertGrpcError( + t = onBadTokenError, + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = None, + ) + assertGrpcError( + t = onNegativePageSizeError, + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = None, + ) + } + + } + + userManagementTest( + "TestListUsersRequestPageSizeZero", + "Exercise ListUsers rpc: Requesting page of size zero means requesting server's default page size, which is larger than zero", + runConcurrently = false, + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + for { + // Ensure we have at least two users + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId1, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + _ <- ledger.createUser( + CreateUserRequest( + Some( + User( + id = userId2, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + ), + Nil, + ) + ) + pageSizeZero <- ledger.userManagement.listUsers( + ListUsersRequest(pageSize = 0, pageToken = "", identityProviderId = "") + ) + pageSizeOne <- ledger.userManagement.listUsers( + ListUsersRequest(pageSize = 1, pageToken = "", identityProviderId = "") + ) + } yield { + assert( + pageSizeOne.users.nonEmpty, + "First page with requested pageSize zero should return some users", + ) + assertEquals(pageSizeZero.users.head, pageSizeOne.users.head) + } + } + + test( + "TestMaxUsersPageSize", + "Exercise ListUsers rpc: Requesting more than maxUsersPageSize results in at most maxUsersPageSize returned users", + allocate(NoParties), + enabled = _.userManagement.maxUsersPageSize > 0, + disabledReason = "requires user management feature with users page size limit", + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + val maxUsersPageSize = ledger.features.userManagement.maxUsersPageSize + val users = 1 + .to(maxUsersPageSize + 1) + .map(_ => + User( + id = ledger.nextUserId(), + primaryParty = "", + isDeactivated = false, + metadata = None, + identityProviderId = "", + ) + ) + for { + // create lots of users + _ <- Future.sequence( + users.map(u => ledger.createUser(CreateUserRequest(Some(u), Nil))) + ) + // request page size greater than the server's limit + page <- ledger.userManagement + .listUsers( + ListUsersRequest(pageSize = maxUsersPageSize + 1, pageToken = "", identityProviderId = "") + ) + } yield { + assert( + page.users.size <= maxUsersPageSize, + s"page size must be within limit. actual size: ${page.users.size}, server's limit: $maxUsersPageSize", + ) + } + }) + + userManagementTest( + "TestGrantTheEmptyRight", + "Test granting an empty right", + ) { implicit ec => implicit ledger => _ => + val userId = ledger.nextUserId() + val user = User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + for { + _ <- ledger.createUser(CreateUserRequest(Some(user), Nil)) + _ <- ledger.userManagement + .grantUserRights( + GrantUserRightsRequest(userId, List(Permission(Permission.Kind.Empty)), "") + ) + .mustFailWith( + "granting empty right", + RequestValidationErrors.InvalidArgument, + Some("unknown kind of right"), + ) + } yield () + } + + userManagementTest( + "TestGrantingAndRevokingEmptyListOfRights", + "Test granting and revoking empty list of rights", + ) { implicit ec => implicit ledger => _ => + val userId = ledger.nextUserId() + val user = User( + id = userId, + primaryParty = "", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + for { + _ <- ledger.createUser(CreateUserRequest(Some(user), Nil)) + _ <- ledger.userManagement.grantUserRights(GrantUserRightsRequest(userId, List.empty, "")) + _ <- ledger.userManagement.revokeUserRights(RevokeUserRightsRequest(userId, List.empty, "")) + } yield () + } + + userManagementTest( + "TestGrantUserRights", + "Exercise GrantUserRights rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party1", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + val suffix = UUID.randomUUID().toString + for { + parties <- allocateParties(ledger, suffix) + userRights = getUserRights(parties) + userBefore <- ledger.createUser(CreateUserRequest(Some(user1), Nil)) + res1 <- ledger.userManagement.grantUserRights( + GrantUserRightsRequest(userId1, List(adminPermission), "") + ) + res2 <- ledger.userManagement + .grantUserRights(GrantUserRightsRequest(userId2, List(adminPermission), "")) + .mustFail("granting right to a non-existent user") + res3 <- ledger.userManagement.grantUserRights( + GrantUserRightsRequest(userId1, List(adminPermission), "") + ) + res4 <- ledger.userManagement.grantUserRights( + GrantUserRightsRequest(userId1, userRights, "") + ) + userAfter <- ledger.userManagement.getUser( + GetUserRequest(userId = user1.id, identityProviderId = "") + ) + } yield { + assertSameElements(res1.newlyGrantedRights, List(adminPermission)) + assertUserNotFound(res2) + assertSameElements(res3.newlyGrantedRights, List.empty) + assertSameElements(res4.newlyGrantedRights.toSet, userRights.toSet) + val userResourceVersion1 = userBefore.user.get.metadata.get.resourceVersion + val userResourceVersion2 = userAfter.user.get.metadata.get.resourceVersion + assertEquals( + "changing user rights must not change user's resource version", + userResourceVersion1, + userResourceVersion2, + ) + } + } + + userManagementTest( + "TestAllocationTimeGrantUserRights", + "Exercise AllocateParty rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party1", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + val suffix = UUID.randomUUID().toString + val party = s"acting-party-1-$suffix" + for { + res1 <- ledger + .allocateParty(partyIdHint = Some(party), userId = user1.id) + .mustFail("granting right to a non-existent user") + _ <- ledger.createUser(CreateUserRequest(Some(user1), Nil)) + rightsBefore <- ledger.userManagement.listUserRights(ListUserRightsRequest(userId1, "")) + allocated <- ledger.allocateParty(partyIdHint = Some(party), userId = user1.id) + rightsAfter <- ledger.userManagement.listUserRights(ListUserRightsRequest(userId1, "")) + } yield { + assertUserNotFound(res1) + assert(rightsBefore.rights.isEmpty, "rights should be empty") + assertSameElements( + rightsAfter.rights.toSet, + Set(actAsPermission(allocated.underlying)), + ) + } + } + + private def getUserRights(parties: UserManagementServiceIT.Parties) = + List( + actAsPermission(parties.acting1), + actAsPermission(parties.acting2), + readAsPermission(parties.reading1), + readAsPermission(parties.reading2), + executeAsPermission(parties.executing1), + executeAsPermission(parties.executing2), + ) + + userManagementTest( + "TestRevokeUserRights", + "Exercise RevokeUserRights rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val userId2 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party1", + identityProviderId = "", + isDeactivated = false, + metadata = None, + ) + val suffix = UUID.randomUUID().toString + for { + parties <- allocateParties(ledger, suffix) + userRights = getUserRights(parties) + userBefore <- ledger.createUser( + CreateUserRequest(Some(user1), List(adminPermission) ++ userRights) + ) + res1 <- ledger.userManagement.revokeUserRights( + RevokeUserRightsRequest(userId1, List(adminPermission), "") + ) + res2 <- ledger.userManagement + .revokeUserRights(RevokeUserRightsRequest(userId2, List(adminPermission), "")) + .mustFail("revoking right from a non-existent user") + res3 <- ledger.userManagement.revokeUserRights( + RevokeUserRightsRequest(userId1, List(adminPermission), "") + ) + res4 <- ledger.userManagement.revokeUserRights( + RevokeUserRightsRequest(userId1, userRights, "") + ) + userAfter <- ledger.userManagement.getUser( + GetUserRequest(userId = user1.id, identityProviderId = "") + ) + } yield { + assertEquals(res1, RevokeUserRightsResponse(List(adminPermission))) + assertUserNotFound(res2) + assertSameElements(res3.newlyRevokedRights, List.empty) + assertSameElements(res4.newlyRevokedRights.toSet, userRights.toSet) + val userResourceVersion1 = userBefore.user.get.metadata.get.resourceVersion + val userResourceVersion2 = userAfter.user.get.metadata.get.resourceVersion + assertEquals( + "changing user rights must not change user's resource version", + userResourceVersion1, + userResourceVersion2, + ) + } + } + + userManagementTest( + "TestListUserRights", + "Exercise ListUserRights rpc", + ) { implicit ec => implicit ledger => _ => + val userId1 = ledger.nextUserId() + val user1 = User( + id = userId1, + primaryParty = "party4", + metadata = Some(ObjectMeta.defaultInstance), + identityProviderId = "", + isDeactivated = false, + ) + val suffix = UUID.randomUUID().toString + for { + parties <- allocateParties(ledger, suffix) + res1 <- ledger.createUser( + CreateUserRequest(Some(user1), Nil) + ) + res2 <- ledger.userManagement.listUserRights(ListUserRightsRequest(userId1, "")) + res3 <- ledger.userManagement.grantUserRights( + GrantUserRightsRequest( + userId1, + List( + adminPermission, + actAsPermission(parties.acting1), + readAsPermission(parties.reading1), + executeAsPermission(parties.executing1), + ), + "", + ) + ) + res4 <- ledger.userManagement.listUserRights(ListUserRightsRequest(userId1, "")) + res5 <- ledger.userManagement.revokeUserRights( + RevokeUserRightsRequest(userId1, List(adminPermission), "") + ) + res6 <- ledger.userManagement + .listUserRights(ListUserRightsRequest(userId1, "")) + } yield { + assertEquals(unsetResourceVersion(res1), CreateUserResponse(Some(user1))) + assertEquals(res2, ListUserRightsResponse(Seq.empty)) + assertSameElements( + res3.newlyGrantedRights.toSet, + Set( + adminPermission, + actAsPermission(parties.acting1), + readAsPermission(parties.reading1), + executeAsPermission(parties.executing1), + ), + ) + assertSameElements( + res4.rights.toSet, + Set( + adminPermission, + actAsPermission(parties.acting1), + readAsPermission(parties.reading1), + executeAsPermission(parties.executing1), + ), + ) + assertSameElements(res5.newlyRevokedRights, Seq(adminPermission)) + assertSameElements( + res6.rights.toSet, + Set( + actAsPermission(parties.acting1), + readAsPermission(parties.reading1), + executeAsPermission(parties.executing1), + ), + ) + } + } + + def allocateParties(ledger: ParticipantTestContext, suffix: String)(implicit + ec: ExecutionContext + ): Future[UserManagementServiceIT.Parties] = + for { + acting1 <- ledger.allocateParty(Some(s"acting-party-1-$suffix")) + acting2 <- ledger.allocateParty(Some(s"acting-party-2-$suffix")) + reading1 <- ledger.allocateParty(Some(s"reading-party-1-$suffix")) + reading2 <- ledger.allocateParty(Some(s"reading-party-2-$suffix")) + executing1 <- ledger.allocateParty(Some(s"executing-party-1-$suffix")) + executing2 <- ledger.allocateParty(Some(s"executing-party-2-$suffix")) + } yield UserManagementServiceIT.Parties( + acting1, + acting2, + reading1, + reading2, + executing1, + executing2, + ) +} + +object UserManagementServiceIT { + case class Parties( + acting1: String, + acting2: String, + reading1: String, + reading2: String, + executing1: String, + executing2: String, + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceITBase.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceITBase.scala new file mode 100644 index 0000000000..1486cc4636 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceITBase.scala @@ -0,0 +1,180 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + NoParties, + Participant, + Participants, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.{Features, ParticipantTestContext} +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, TestConstraints} +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + CreateUserResponse, + GetUserResponse, + UpdateUserRequest, + UpdateUserResponse, + User, +} +import com.digitalasset.canton.ledger.error.groups.AdminServiceErrors +import com.google.protobuf.field_mask.FieldMask + +import scala.concurrent.{ExecutionContext, Future} + +abstract class UserManagementServiceITBase extends LedgerTestSuite { + + def withFreshUser[T]( + primaryParty: String = "", + isDeactivated: Boolean = false, + annotations: Map[String, String] = Map.empty, + )( + f: User => Future[T] + )(implicit ledger: ParticipantTestContext, ec: ExecutionContext): Future[T] = { + val userId = ledger.nextUserId() + val newUser = User( + id = userId, + primaryParty = primaryParty, + isDeactivated = isDeactivated, + metadata = Some( + ObjectMeta( + resourceVersion = "", + annotations = annotations, + ) + ), + identityProviderId = "", + ) + for { + create <- ledger.userManagement.createUser(CreateUserRequest(Some(newUser), Nil)) + _ = assertEquals("withUser", unsetResourceVersion(create), CreateUserResponse(Some(newUser))) + v <- f(create.user.get) + } yield v + } + + def newUser( + id: String, + isDeactivated: Boolean = false, + primaryParty: String = "", + annotations: Map[String, String] = Map.empty, + ): User = User( + id = id, + isDeactivated = isDeactivated, + primaryParty = primaryParty, + metadata = Some(ObjectMeta(resourceVersion = "", annotations = annotations)), + identityProviderId = "", + ) + + def updateRequest( + id: String, + isDeactivated: Boolean = false, + primaryParty: String = "", + resourceVersion: String = "", + annotations: Map[String, String] = Map.empty, + updatePaths: Seq[String], + ): UpdateUserRequest = + UpdateUserRequest( + user = Some( + User( + id = id, + isDeactivated = isDeactivated, + primaryParty = primaryParty, + metadata = Some(ObjectMeta(resourceVersion = resourceVersion, annotations = annotations)), + identityProviderId = "", + ) + ), + updateMask = Some( + FieldMask(updatePaths) + ), + ) + + def extractIsDeactivated(updateResp: UpdateUserResponse): Boolean = + updateResp.getUser.isDeactivated + + def extractUpdatedPrimaryParty(updateResp: UpdateUserResponse): String = + updateResp.getUser.primaryParty + + def extractUpdatedAnnotations(updateResp: UpdateUserResponse): Map[String, String] = + updateResp.getUser.getMetadata.annotations + + def extractAnnotations(updateResp: CreateUserResponse): Map[String, String] = + updateResp.user.get.metadata.get.annotations + + def unsetResourceVersion[T](t: T): T = { + val t2: T = t match { + case u: User => u.update(_.metadata.resourceVersion := "").asInstanceOf[T] + case u: CreateUserResponse => u.update(_.user.metadata.resourceVersion := "").asInstanceOf[T] + case u: UpdateUserResponse => u.update(_.user.metadata.resourceVersion := "").asInstanceOf[T] + case u: GetUserResponse => u.update(_.user.metadata.resourceVersion := "").asInstanceOf[T] + case other => sys.error(s"could not match $other") + } + t2 + } + + def userManagementTest( + shortIdentifier: String, + description: String, + runConcurrently: Boolean = true, + limitation: TestConstraints = TestConstraints.NoLimitations, + )( + body: ExecutionContext => ParticipantTestContext => Participants => Future[Unit] + ): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + allocate(NoParties), + enabled = (features: Features) => { + features.userManagement.supported + }, + disabledReason = "requires user management feature", + runConcurrently = runConcurrently, + limitation = limitation, + )(implicit ec => { case p @ Participants(Participant(ledger, Seq())) => + body(ec)(ledger)(p) + }) + + def testWithFreshUser( + shortIdentifier: String, + description: String, + limitation: TestConstraints = TestConstraints.NoLimitations, + )( + primaryParty: String = "", + isDeactivated: Boolean = false, + annotations: Map[String, String] = Map.empty, + )( + body: ExecutionContext => ParticipantTestContext => User => Future[Unit] + ): Unit = + userManagementTest( + shortIdentifier = shortIdentifier, + description = description, + limitation = limitation, + ) { implicit ec => implicit ledger => _ => + withFreshUser( + primaryParty = primaryParty, + isDeactivated = isDeactivated, + annotations = annotations, + ) { user => + body(ec)(ledger)(user) + } + } + + def assertUserNotFound(t: Throwable): Unit = + assertGrpcError( + t = t, + errorCode = AdminServiceErrors.UserManagement.UserNotFound, + exceptionMessageSubstring = None, + ) + + def assertUserAlreadyExists( + t: Throwable + ): Unit = + assertGrpcError( + t = t, + errorCode = AdminServiceErrors.UserManagement.UserAlreadyExists, + exceptionMessageSubstring = None, + ) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceUpdateRpcIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceUpdateRpcIT.scala new file mode 100644 index 0000000000..72624a047e --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/UserManagementServiceUpdateRpcIT.scala @@ -0,0 +1,243 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TestConstraints +import com.daml.ledger.api.v2.admin.user_management_service.{ + UpdateUserRequest, + UpdateUserResponse, + User, +} +import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} +import com.google.protobuf.field_mask.FieldMask + +class UserManagementServiceUpdateRpcIT extends UserManagementServiceITBase { + + testWithFreshUser( + "UpdateAllUpdatableFields", + "Update all updatable fields", + )( + primaryParty = "primaryParty0", + isDeactivated = true, + annotations = Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3"), + )(implicit ec => + ledger => + user => { + ledger.userManagement + .updateUser( + updateRequest( + id = user.id, + annotations = Map("k1" -> "v1a", "k3" -> "", "k4" -> "v4", "k5" -> ""), + isDeactivated = false, + primaryParty = "primaryParty1", + updatePaths = Seq( + "is_deactivated", + "primary_party", + "metadata", + ), + ) + ) + .map(updateResp => + assertEquals( + "updating user 1", + unsetResourceVersion(updateResp), + UpdateUserResponse( + Some( + newUser( + id = user.id, + isDeactivated = false, + primaryParty = "primaryParty1", + annotations = Map("k1" -> "v1a", "k2" -> "v2", "k4" -> "v4"), + ) + ) + ), + ) + ) + } + ) + + testWithFreshUser( + "UpdateUserEvenIfMetadataIsNotSetInUpdateRequest", + "Update a user even if metadata field is not set in the update request", + )()(implicit ec => + ledger => + user => + ledger.userManagement + .updateUser( + UpdateUserRequest( + user = Some( + User( + id = user.id, + primaryParty = "nextPrimaryParty", + metadata = None, + isDeactivated = false, + identityProviderId = "", + ) + ), + updateMask = Some(FieldMask(Seq("primary_party"))), + ) + ) + .map { updateResp => + assertEquals( + "update with metadata not set in the request", + unsetResourceVersion(updateResp), + UpdateUserResponse(Some(newUser(user.id, primaryParty = "nextPrimaryParty"))), + ) + } + ) + + testWithFreshUser( + "InvalidUpdateRequestsUserFieldIsUnset", + "Failing update requests when user field is unset", + limitation = TestConstraints.GrpcOnly( + "emptyUser leads to other JSON request: /v2/idps/ which gives a list" + ), + )()(implicit ec => + ledger => + _ => + ledger.userManagement + .updateUser( + UpdateUserRequest( + user = None, + updateMask = Some(FieldMask(Seq("metadata"))), + ) + ) + .mustFailWith( + "update with an unknown update path", + errorCode = RequestValidationErrors.MissingField, + exceptionMessageSubstring = Some("missing a mandatory field: user"), + ) + ) + + userManagementTest( + "FailUpdateNonExistentUser", + "Fail when attempting to update a non-existent user", + ) { implicit ec => ledger => _ => + val userId1 = ledger.nextUserId() + for { + _ <- ledger.userManagement + .updateUser( + updateRequest( + id = userId1, + annotations = Map("k1" -> "v1"), + updatePaths = Seq("metadata.annotations"), + ) + ) + .mustFailWith( + "updating non-existent party", + errorCode = AdminServiceErrors.UserManagement.UserNotFound, + ) + } yield () + } + + testWithFreshUser( + "InvalidUpdateRequestsInvalidUserIdSyntax", + "Failing update requests when user id is not a valid user id", + )()(implicit ec => + ledger => + _ => + ledger.userManagement + .updateUser( + updateRequest( + id = "%%!!!", + annotations = Map("k2" -> "v2"), + updatePaths = Seq.empty, + ) + ) + .mustFailWith( + "update with an unknown update path", + errorCode = RequestValidationErrors.InvalidField, + ) + ) + + testWithFreshUser( + "UpdatePrimaryPartyUsingNonEmptyValue", + "Update primary party using a non-empty value", + )(primaryParty = "primaryParty0")(implicit ec => + ledger => + user => + ledger.userManagement + .updateUser( + updateRequest( + id = user.id, + primaryParty = "primaryParty1", + updatePaths = Seq("primary_party"), + ) + ) + .map { updateResp => + assertEquals( + "updating primary party", + extractUpdatedPrimaryParty(updateResp), + expected = "primaryParty1", + ) + } + ) + + testWithFreshUser( + "UpdatePrimaryPartyUsingEmptyValue", + "Update primary party using the empty value", + )(primaryParty = "primaryParty0")(implicit ec => + ledger => + user => + ledger.userManagement + .updateUser( + updateRequest(id = user.id, primaryParty = "", updatePaths = Seq("primary_party")) + ) + .map { updateResp => + assertEquals( + "updating primary party 3", + extractUpdatedPrimaryParty(updateResp), + expected = "", + ) + } + ) + + testWithFreshUser( + "UpdateIsDeactivatedUsingNonDefaultValue", + "Update primary party using a non default value", + )(isDeactivated = false)(implicit ec => + ledger => + user => + ledger.userManagement + .updateUser( + updateRequest( + id = user.id, + isDeactivated = true, + updatePaths = Seq("is_deactivated"), + ) + ) + .map { updateResp => + assertEquals( + "updating is_deactivated", + extractIsDeactivated(updateResp), + expected = true, + ) + } + ) + + testWithFreshUser( + "UpdateIsDeactivatedUsingTheDefaultValue", + "Update primary party using the default value", + )(isDeactivated = true)(implicit ec => + ledger => + user => + ledger.userManagement + .updateUser( + updateRequest( + id = user.id, + isDeactivated = false, + updatePaths = Seq("is_deactivated"), + ) + ) + .map { updateResp => + assertEquals( + "updating is_deactivated", + extractIsDeactivated(updateResp), + expected = false, + ) + } + ) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ValueLimitsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ValueLimitsIT.scala new file mode 100644 index 0000000000..5b9d7e1a0c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/ValueLimitsIT.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.test.java.model.test.DummyWithAnnotation +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta + +import scala.concurrent.Future +import scala.util.Random + +final class ValueLimitsIT extends LedgerTestSuite { + + test( + "VLLargeSubmittersNumberCreateContract", + "Create a contract with a large submitters number", + allocate(NoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq())) => + for { + // Need to manually allocate parties to avoid db string compression + parties <- Future.traverse(1 to 50) { number => + ledger.allocateParty( + partyIdHint = + Some(s"deduplicationRandomParty_${number}_" + Random.alphanumeric.take(100).mkString) + ) + } + request = ledger + .submitAndWaitForTransactionRequest( + actAs = parties.toList, + readAs = parties.toList, + commands = + new DummyWithAnnotation(parties.head.getValue, "First submission").create.commands, + transactionShape = AcsDelta, + ) + _ <- ledger.submitAndWaitForTransaction(request) + contracts <- ledger.activeContracts(Some(Seq(parties.head))) + } yield { + assertSingleton("Single create contract expected", contracts).discard + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/VettingIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/VettingIT.scala new file mode 100644 index 0000000000..d70d6f27c8 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/VettingIT.scala @@ -0,0 +1,825 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + NoParties, + Participant, + Participants, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{ + Dars, + LedgerTestSuite, + TestConstraints, + VettingAltDar, + VettingDepDar, + VettingMainDar_1_0_0, + VettingMainDar_2_0_0, + VettingMainDar_Split_Lineage_2_0_0, +} +import com.daml.ledger.api.v2.admin.package_management_service.{ + UpdateVettedPackagesRequest, + UploadDarFileRequest, + VettedPackagesChange, + VettedPackagesRef, +} +import com.daml.ledger.api.v2.package_reference.VettedPackages +import com.daml.ledger.api.v2.package_service.{ + ListVettedPackagesRequest, + ListVettedPackagesResponse, + TopologyStateFilter, +} +import com.daml.ledger.test.java.vetting_alt.alt.AltT +import com.daml.ledger.test.java.vetting_dep.dep.DepT +import com.daml.ledger.test.java.vetting_main_1_0_0.main.MainT as MainT_1_0_0 +import com.daml.ledger.test.java.vetting_main_2_0_0.main.MainT as MainT_2_0_0 +import com.daml.ledger.test.java.vetting_main_split_lineage_2_0_0.main.DifferentMainT as MainT_Split_Lineage_2_0_0 +import com.digitalasset.canton.ledger.api.{ + DontVetAnyPackages, + PackageMetadataFilter, + PriorTopologySerialExists, +} +import com.digitalasset.canton.participant.admin.CantonPackageServiceError +import com.digitalasset.canton.topology.TopologyManagerError.ParticipantTopologyManagerError +import com.digitalasset.daml.lf.archive.DarDecoder +import com.digitalasset.daml.lf.data.Ref +import com.google.protobuf.timestamp.Timestamp +import org.scalatest.Inside.inside +import org.scalatest.Inspectors.* +import org.scalatest.compatible.Assertion +import org.scalatest.matchers.should.Matchers.* + +import java.util.concurrent.atomic.AtomicReference +import java.util.zip.ZipInputStream +import scala.concurrent.{ExecutionContext, Future} + +class VettingIT extends LedgerTestSuite { + private val vettingDepPkgId = Ref.PackageId.assertFromString(DepT.PACKAGE_ID) + private val vettingAltPkgId = Ref.PackageId.assertFromString(AltT.PACKAGE_ID) + private val vettingMainPkgIdV1 = Ref.PackageId.assertFromString(MainT_1_0_0.PACKAGE_ID) + private val vettingMainPkgIdV2 = Ref.PackageId.assertFromString(MainT_2_0_0.PACKAGE_ID) + private val vettingMainPkgIdV2SplitLineage = + Ref.PackageId.assertFromString(MainT_Split_Lineage_2_0_0.PACKAGE_ID) + + private val vettingDepName = "vetting-dep" + private val vettingMainName = "vetting-main" + + private val synchronizer1Id: AtomicReference[Option[String]] = + new AtomicReference[Option[String]](None) + private def synchronizerIdOrFail = synchronizer1Id + .get() + .getOrElse(throw new IllegalStateException("synchronizerId not yet discovered")) + + private def assertListResponseHasPkgIds( + response: ListVettedPackagesResponse, + hasPkgIds: Seq[Ref.PackageId], + hasNotPkgIds: Seq[Ref.PackageId], + ): Assertion = + inside(response) { case ListVettedPackagesResponse(Seq(vettedPackages), _) => + assertVettedPackagesHasPkgIds(vettedPackages, hasPkgIds, hasNotPkgIds) + } + + private def assertSomeVettedPackagesHasPkgIds( + mbVettedPackages: Option[VettedPackages], + hasPkgIds: Seq[Ref.PackageId], + hasNotPkgIds: Seq[Ref.PackageId], + ): Assertion = + inside(mbVettedPackages) { case Some(vettedPackages) => + assertVettedPackagesHasPkgIds(vettedPackages, hasPkgIds, hasNotPkgIds) + } + + private def assertVettedPackagesHasPkgIds( + vettedPackages: VettedPackages, + hasPkgIds: Seq[Ref.PackageId], + hasNotPkgIds: Seq[Ref.PackageId], + ): Assertion = { + val allPkgIds = vettedPackages.packages.map(_.packageId) + forAll(hasPkgIds) { pkgId => + allPkgIds should contain(pkgId) + } + forAll(hasNotPkgIds) { pkgId => + allPkgIds should not contain pkgId + } + } + + private def assertListResponseHasPkgIdWithBounds( + response: ListVettedPackagesResponse, + targetPkgId: Ref.PackageId, + expectedLowerBound: Option[Timestamp], + expectedUpperBound: Option[Timestamp], + ): Assertion = + inside(response) { case ListVettedPackagesResponse(Seq(vettedPackages), _) => + val matching = vettedPackages.packages.find(_.packageId == targetPkgId) + inside(matching) { case Some(vetted) => + vetted.validFromInclusive shouldBe expectedLowerBound + vetted.validUntilExclusive shouldBe expectedUpperBound + } + } + + private def assertResponseHasAllVersions(response: ListVettedPackagesResponse): Assertion = + assertListResponseHasPkgIds( + response = response, + hasPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV2, vettingMainPkgIdV1), + hasNotPkgIds = Seq(vettingAltPkgId), + ) + + private def assertResponseHasV2(response: ListVettedPackagesResponse): Assertion = + assertListResponseHasPkgIds( + response = response, + hasPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV2), + hasNotPkgIds = Seq(vettingAltPkgId, vettingMainPkgIdV1), + ) + + private def assertResponseHasDep(response: ListVettedPackagesResponse): Assertion = + assertListResponseHasPkgIds( + response = response, + hasPkgIds = Seq(vettingDepPkgId), + hasNotPkgIds = Seq(vettingMainPkgIdV2, vettingAltPkgId), + ) + + private def assertResponseHasNothing(response: ListVettedPackagesResponse): Assertion = + assertListResponseHasPkgIds( + response = response, + hasPkgIds = Seq(), + hasNotPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV1, vettingMainPkgIdV2, vettingAltPkgId), + ) + + private def listAllRequest: ListVettedPackagesRequest = + listPackagesRequest(Seq(), Seq()) + + private def listNamesRequest(prefixes: Seq[String]): ListVettedPackagesRequest = + listPackagesRequest(Seq(), prefixes) + + private def listPkgIdsRequest(ids: Seq[Ref.PackageId]): ListVettedPackagesRequest = + listPackagesRequest(ids, Seq()) + + private def listPackagesRequest( + ids: Seq[Ref.PackageId], + namePrefixes: Seq[String], + ): ListVettedPackagesRequest = + ListVettedPackagesRequest( + Some(PackageMetadataFilter(ids, namePrefixes).toProtoLAPI), + Some( + TopologyStateFilter(participantIds = Seq.empty, synchronizerIds = Seq(synchronizerIdOrFail)) + ), + "", + 0, + ) + + private def refsToVetOp( + refs: Seq[VettedPackagesRef], + newValidFromInclusive: Option[Timestamp] = None, + newValidUntilExclusive: Option[Timestamp] = None, + ) = + VettedPackagesChange.Operation.Vet( + VettedPackagesChange.Vet( + refs, + newValidFromInclusive, + newValidUntilExclusive, + ) + ) + + private def refsToUnvetOp(refs: Seq[VettedPackagesRef]) = + VettedPackagesChange.Operation.Unvet( + VettedPackagesChange.Unvet( + refs + ) + ) + + private def changeOpsRequest( + operations: Seq[VettedPackagesChange.Operation], + dryRun: Boolean = false, + ): UpdateVettedPackagesRequest = + UpdateVettedPackagesRequest( + operations.map(VettedPackagesChange(_)), + dryRun, + synchronizerIdOrFail, + Some(PriorTopologySerialExists(0).toProtoLAPI), + ) + + private def changeOpRequest( + operation: VettedPackagesChange.Operation, + dryRun: Boolean = false, + ): UpdateVettedPackagesRequest = + changeOpsRequest(Seq(operation), dryRun) + + private def vetPkgIdsRequest( + pkgIds: Seq[String], + dryRun: Boolean = false, + ): UpdateVettedPackagesRequest = + changeOpsRequest( + Seq(refsToVetOp(pkgIds.map((pkgId: String) => VettedPackagesRef(pkgId, "", "")))), + dryRun, + ) + + private def vetPkgsMatchingRef( + ledger: ParticipantTestContext, + ref: VettedPackagesRef, + newValidFromInclusive: Option[Timestamp] = None, + newValidUntilExclusive: Option[Timestamp] = None, + )(implicit + ec: ExecutionContext + ): Future[Unit] = + ledger + .updateVettedPackages( + changeOpRequest( + refsToVetOp( + Seq(ref), + newValidFromInclusive, + newValidUntilExclusive, + ) + ) + ) + .map(_ => ()) + + private def vetAllInDar(ledger: ParticipantTestContext, darName: String)(implicit + ec: ExecutionContext + ): Future[Unit] = { + val allPackageIds = DarDecoder + .readArchive( + darName, + new ZipInputStream(getClass.getClassLoader.getResourceAsStream(darName)), + ) + .toOption + .get + .all + .map(_._1) + ledger + .updateVettedPackages( + vetPkgIdsRequest(allPackageIds.map(_.toString)) + ) + .map(_ => ()) + } + + private def opDARMains( + op: Seq[VettedPackagesRef] => VettedPackagesChange.Operation, + ledger: ParticipantTestContext, + darNames: Seq[String], + )(implicit + ec: ExecutionContext + ): Future[Unit] = { + val mainPackageIds = darNames.map((darName: String) => + DarDecoder + .readArchive( + darName, + new ZipInputStream(getClass.getClassLoader.getResourceAsStream(darName)), + ) + .toOption + .get + .main + ._1 + ) + + ledger + .updateVettedPackages( + changeOpsRequest( + Seq( + op( + mainPackageIds.map((pkgId: Ref.PackageId) => + VettedPackagesRef(pkgId.toString, "", "") + ) + ) + ), + false, + ) + ) + .map(_ => ()) + } + + private def unvetDARMains(ledger: ParticipantTestContext, darNames: Seq[String])(implicit + ec: ExecutionContext + ): Future[Unit] = + opDARMains(refsToUnvetOp, ledger, darNames) + + private def vetDARMains(ledger: ParticipantTestContext, darNames: Seq[String])(implicit + ec: ExecutionContext + ): Future[Unit] = + opDARMains(refsToVetOp(_), ledger, darNames) + + private def unvetAllDARMains( + ledger: ParticipantTestContext + )(implicit ec: ExecutionContext): Future[Unit] = + unvetDARMains( + ledger, + Seq( + VettingDepDar.path, + VettingMainDar_1_0_0.path, + VettingMainDar_2_0_0.path, + VettingAltDar.path, + ), + ) + .map(_ => ()) + + private def setSynchronizerId( + ledger: ParticipantTestContext + )(implicit ec: ExecutionContext): Future[Unit] = + ledger.connectedSynchronizers().map { connected => + connected.find(_.startsWith("synchronizer1")).foreach { syncId => + synchronizer1Id.set(Some(syncId)) + } + } + + private def uploadDarFileDontVetRequest(path: String) = + UploadDarFileRequest( + darFile = Dars.read(path), + "", + DontVetAnyPackages.toProto, + "", + ) + + test( + "PVListVettedPackagesBasic", + "Listing all, listing by name, listing by pkgId, listing by pkgId and name", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(VettingDepDar.path)) + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(VettingMainDar_1_0_0.path)) + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(VettingMainDar_2_0_0.path)) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + + allResponse <- ledger.listVettedPackages(listAllRequest) + depNameResponse <- ledger.listVettedPackages(listNamesRequest(Seq(vettingDepName))) + bothNameResponse <- ledger.listVettedPackages( + listNamesRequest(Seq(vettingDepName, vettingMainName)) + ) + depPkgIdResponse <- ledger.listVettedPackages(listPkgIdsRequest(Seq(vettingDepPkgId))) + bothPkgIdResponse <- ledger.listVettedPackages( + listPkgIdsRequest(Seq(vettingDepPkgId, vettingMainPkgIdV2)) + ) + depPkgIdAndNameResponse <- ledger.listVettedPackages( + listPackagesRequest(Seq(vettingDepPkgId), Seq(vettingDepName)) + ) + bothPkgIdAndNamesResponse <- ledger.listVettedPackages( + listPackagesRequest( + Seq(vettingDepPkgId, vettingMainPkgIdV2), + Seq(vettingDepName, vettingMainName), + ) + ) + commonPrefixResponse <- ledger.listVettedPackages(listNamesRequest(Seq("vetting-"))) + disjointPkgIdAndNameResponse <- ledger.listVettedPackages( + listPackagesRequest(Seq(vettingDepPkgId), Seq(vettingMainName)) + ) + + _ <- unvetAllDARMains(ledger) + } yield { + assertResponseHasAllVersions(allResponse) + assertResponseHasDep(depNameResponse) + assertResponseHasAllVersions(bothNameResponse) + assertResponseHasDep(depPkgIdResponse) + assertResponseHasV2(bothPkgIdResponse) + assertResponseHasDep(depPkgIdAndNameResponse) + assertResponseHasAllVersions(bothPkgIdAndNamesResponse) + assertResponseHasAllVersions(commonPrefixResponse) + assertResponseHasAllVersions(disjointPkgIdAndNameResponse) + } + }) + + test( + "PVUploadDarFileBasic", + "Uploading DAR files vets all packages by default, including dependencies", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers( + Dars.read(VettingMainDar_2_0_0.path) + ) // Should vet vetting-dep dependency + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + + allResponse <- ledger.listVettedPackages(listAllRequest) + _ <- unvetAllDARMains(ledger) + } yield { + assertResponseHasV2(allResponse) + } + }) + + def updateTest( + title: String, + description: String, + act: ExecutionContext => ParticipantTestContext => Future[Assertion], + ) = + test( + title, + description, + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingDepDar.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_1_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_2_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + _ <- unvetAllDARMains(ledger) + _ <- act(ec)(ledger) + _ <- unvetAllDARMains(ledger) + } yield { + () + } + }) + + updateTest( + "PVUpdateVetDepThenV2Succeeds", + "Successfully vet everything in vetting-dep and then just the main package from vetting-main", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetDARMains(ledger, Seq(VettingMainDar_2_0_0.path)) + + allResponse <- ledger.listVettedPackages(listAllRequest) + } yield assertResponseHasV2(allResponse), + ) + + updateTest( + "PVUpdateUnvetV2Succeeds", + "Successfully vet everything in vetting-main then just unvet the main package from vetting-main", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingMainDar_2_0_0.path) + _ <- unvetDARMains(ledger, Seq(VettingMainDar_2_0_0.path)) + + unvetTestAllResponse <- ledger.listVettedPackages(listAllRequest) + } yield assertResponseHasDep(unvetTestAllResponse), + ) + + updateTest( + "PVUpdateVetTwoPackagesAtATimeSucceeds", + "Successfully vet both vetting-dep and vetting-main in one update", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- ledger.updateVettedPackages( + vetPkgIdsRequest(Seq(vettingMainPkgIdV2, vettingDepPkgId)) + ) + + vetMainAndDepResponse <- ledger.listVettedPackages(listAllRequest) + } yield assertResponseHasV2(vetMainAndDepResponse), + ) + + updateTest( + "PVUpdateVetMultipleByNameFails", + "Successfully vet multiple packages by name", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef(ledger, VettedPackagesRef("", vettingMainName, "")) + .mustFailWith( + "Vetting multiple packages with a single reference should give AMBIGUOUS_VETTING_REFERENCE", + CantonPackageServiceError.Vetting.VettingReferenceMoreThanOne, + ) + } yield succeed, + ) + + updateTest( + "PVUpdateVetNonexistentNameFails", + "Fail to vet a package by a nonexistent name", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef(ledger, VettedPackagesRef("", "nonexistent-name", "")) + .mustFailWith( + "Vetting a nonexistent name in a reference should give UNRESOLVED_VETTING_REFERENCE", + CantonPackageServiceError.Vetting.VettingReferenceEmpty, + ) + } yield succeed, + ) + + updateTest( + "PVUpdateVetByNameAndVersion", + "Successfully vet a package by name and version", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef("", vettingMainName, ("2.0.0")), + ) + vetOnlyV2ByNameAndVersionResponse <- ledger.listVettedPackages(listAllRequest) + } yield assertResponseHasV2(vetOnlyV2ByNameAndVersionResponse), + ) + + updateTest( + "PVUpdateVetByNameAndNonexistentVersion", + "Fail when trying to vet nonexistent version of a package name that exists", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef("", vettingMainName, ("3.0.0")), + ).mustFailWith( + "Vetting an existing name with a nonexistent version should give UNRESOLVED_VETTING_REFERENCE", + CantonPackageServiceError.Vetting.VettingReferenceEmpty, + ) + } yield succeed, + ) + + updateTest( + "PVUpdateVetByIdAndNameAndVersion", + "Successfully vet a package by ID, name, version when all three match", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef( + vettingMainPkgIdV2, + vettingMainName, + "2.0.0", + ), + ) + vetOnlyV2ByAllResponse <- ledger.listVettedPackages(listAllRequest) + } yield assertResponseHasV2(vetOnlyV2ByAllResponse), + ) + + updateTest( + "PVUpdateVetByIdWithWrongName", + "Fail to vet a package by ID when paired with the wrong name", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef( + vettingMainPkgIdV2, + "nonexistent-name", + "2.0.0", + ), + ).mustFailWith( + "Vetting an existing pkg ID with the incorrect name should give VETTING_REFERENCE_EMPTY", + CantonPackageServiceError.Vetting.VettingReferenceEmpty, + ) + } yield succeed, + ) + + updateTest( + "PVUpdateVetByIdWithWrongVersion", + "Fail to vet a package by ID when paired with the wrong version", + implicit ec => + (ledger: ParticipantTestContext) => + for { + _ <- vetAllInDar(ledger, VettingDepDar.path) + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef( + vettingMainPkgIdV2, + vettingMainName, + "3.0.0", + ), + ).mustFailWith( + "Vetting an existing pkg ID with the incorrect name should give VETTING_REFERENCE_EMPTY", + CantonPackageServiceError.Vetting.VettingReferenceEmpty, + ) + } yield succeed, + ) + + test( + "PVListVettedPackagesNothingVetted", + "Listing vetted packages returns nothing when uploading DARs without vetting them", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingDepDar.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_2_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + + allResponse <- ledger.listVettedPackages(listAllRequest) + + _ <- unvetAllDARMains(ledger) + } yield { + assertResponseHasNothing(allResponse) + } + }) + + test( + "PVDryRun", + "Vetting with Dry Run returns the expected changes, but does not commit them", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_2_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + dryRunUpdateResponse <- ledger.updateVettedPackages( + vetPkgIdsRequest( + pkgIds = Seq(vettingAltPkgId), + dryRun = true, + ) + ) + + listAllResponse <- ledger.listVettedPackages(listAllRequest) + _ <- unvetAllDARMains(ledger) + } yield { + assertSomeVettedPackagesHasPkgIds( + mbVettedPackages = dryRunUpdateResponse.pastVettedPackages, + hasPkgIds = Seq(), + hasNotPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV2, vettingAltPkgId), + ) + + assertSomeVettedPackagesHasPkgIds( + mbVettedPackages = dryRunUpdateResponse.newVettedPackages, + hasPkgIds = Seq(vettingAltPkgId), + hasNotPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV2), + ) + + assertListResponseHasPkgIds( + response = listAllResponse, + hasPkgIds = Seq(), + hasNotPkgIds = Seq(vettingDepPkgId, vettingMainPkgIdV2, vettingAltPkgId), + ) + } + }) + + test( + "PVWritingAndOverwritingBounds", + "Vetting bounds can be written and overwritten", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_1_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingAltDar.path) + ) + _ <- vetAllInDar(ledger, VettingDepDar.path) + + _ <- vetPkgsMatchingRef( + ledger, + VettedPackagesRef(vettingMainPkgIdV2, "", ""), + Some(Timestamp.of(0, 0)), + Some(Timestamp.of(1, 0)), + ) + + listAfterBounds1 <- ledger.listVettedPackages(listAllRequest) + + _ <- ledger.updateVettedPackages( + changeOpRequest( + refsToVetOp( + Seq( + VettedPackagesRef(vettingMainPkgIdV2, "", ""), + VettedPackagesRef(vettingAltPkgId, "", ""), + ), + Some(Timestamp.of(2, 0)), + Some(Timestamp.of(3, 0)), + ) + ) + ) + + listAfterBounds2 <- ledger.listVettedPackages(listAllRequest) + + _ <- unvetAllDARMains(ledger) + } yield { + assertListResponseHasPkgIdWithBounds( + listAfterBounds1, + vettingMainPkgIdV2, + Some(Timestamp.of(0, 0)), + Some(Timestamp.of(1, 0)), + ) + + assertListResponseHasPkgIdWithBounds( + listAfterBounds2, + vettingMainPkgIdV2, + Some(Timestamp.of(2, 0)), + Some(Timestamp.of(3, 0)), + ) + + assertListResponseHasPkgIdWithBounds( + listAfterBounds2, + vettingAltPkgId, + Some(Timestamp.of(2, 0)), + Some(Timestamp.of(3, 0)), + ) + } + }) + + test( + "PVCheckUpgradeInvariants", + "Upgrade invariants are checked, including during dry run", + allocate(NoParties), + runConcurrently = false, + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- setSynchronizerId(ledger) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_2_0_0.path) + ) + _ <- ledger.uploadDarFile( + uploadDarFileDontVetRequest(VettingMainDar_Split_Lineage_2_0_0.path) + ) + + // Dry-run vetting without dependencies (should fail) + _ <- ledger + .updateVettedPackages(vetPkgIdsRequest(pkgIds = Seq(vettingMainPkgIdV2), dryRun = true)) + .mustFailWith( + "Vetting a package without its dependencies in a dry run should give TOPOLOGY_DEPENDENCIES_NOT_VETTED", + ParticipantTopologyManagerError.DependenciesNotVetted, + ) + + // Vet without dependencies (should fail) + _ <- ledger + .updateVettedPackages( + vetPkgIdsRequest(Seq(vettingMainPkgIdV2)) + ) + .mustFailWith( + "Vetting a package without its dependencies should give TOPOLOGY_DEPENDENCIES_NOT_VETTED", + ParticipantTopologyManagerError.DependenciesNotVetted, + ) + + _ <- vetAllInDar(ledger, VettingDepDar.path) + + // Vet two packages with the same version (should fail) + _ <- ledger + .updateVettedPackages( + vetPkgIdsRequest(Seq(vettingMainPkgIdV2, vettingMainPkgIdV2SplitLineage)) + ) + .mustFailWith( + "Update should fail to vet package with same name and version with KNOWN_PACKAGE_VERSION error", + ParticipantTopologyManagerError.UpgradeVersion, + ) + + // Vet a package while unvetting its dependencies (should fail) + _ <- ledger + .updateVettedPackages( + changeOpsRequest( + Seq( + refsToVetOp( + Seq(VettedPackagesRef(vettingMainPkgIdV2, "", "")) + ), + refsToUnvetOp( + Seq(VettedPackagesRef(vettingDepPkgId, "", "")) + ), + ) + ) + ) + .mustFailWith( + "Vetting a package while unvetting its dependencies should give TOPOLOGY_DEPENDENCIES_NOT_VETTED", + ParticipantTopologyManagerError.DependenciesNotVetted, + ) + + _ <- unvetAllDARMains(ledger) + } yield () + }) + + test( + "PVValidateDarCheckUpgradeInvariants", + "Upgrade invariants are checked, including during validate dar request", + allocate(NoParties), + runConcurrently = false, + limitation = TestConstraints.GrpcOnly(reason = "ValidateDarFile is not available in JSON API"), + )(implicit ec => { case Participants(Participant(ledger, _)) => + for { + _ <- ledger.uploadDarFileAndVetOnConnectedSynchronizers(Dars.read(VettingMainDar_2_0_0.path)) + _ <- ledger + .validateDarFile(Dars.read(VettingMainDar_Split_Lineage_2_0_0.path)) + .mustFailWith( + "Update should fail to vet package with same name and version with KNOWN_PACKAGE_VERSION error", + ParticipantTopologyManagerError.UpgradeVersion, + ) + _ <- unvetAllDARMains(ledger) + } yield () + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WitnessesIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WitnessesIT.scala new file mode 100644 index 0000000000..df8439deda --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WitnessesIT.scala @@ -0,0 +1,165 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.test.java.model.test.Witnesses +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects + +final class WitnessesIT extends LedgerTestSuite { + test( + "RespectDisclosureRules", + "The ledger should respect disclosure rules", + allocate(Parties(3)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie))) => + for { + // Create the Witnesses contract as Alice + (_, witnesses) <- ledger + .createAndGetTransactionId( + alice, + new Witnesses(alice, bob, charlie), + )(Witnesses.COMPANION) + txReq <- ledger.getTransactionsRequest( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Seq(alice, bob, charlie) + .map(party => + party.getValue -> new Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + Some(Witnesses.TEMPLATE_ID.toV1), + includeCreatedEventBlob = true, + ) + ) + ) + ) + ) + ) + .toMap, + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ) + witnessesTransactions <- ledger.transactions(txReq) + witnessesTransaction = witnessesTransactions.head + witnessesFromTransaction = witnessesTransaction.events.head.getCreated + disclosedWitnesses = DisclosedContract( + templateId = witnessesFromTransaction.templateId, + contractId = witnessesFromTransaction.contractId, + createdEventBlob = witnessesFromTransaction.createdEventBlob, + synchronizerId = "", + ) + + // A non-consuming choice is exercised with the expectation + // that Charlie is able to exercise a choice on an explicitly disclosed contract + // The ledger effects transaction is fetched from the identifier to ensure we get the witnesses as seen by all parties + nonConsuming <- ledger.submitAndWaitForTransaction( + ledger + .submitAndWaitForTransactionRequest( + party = charlie, + commands = witnesses.exerciseWitnessesNonConsumingChoice().commands, + transactionShape = LedgerEffects, + filterParties = Some(Seq(charlie)), + ) + .update(_.commands.disclosedContracts := Seq(disclosedWitnesses)) + ) + nonConsumingLedgerEffects <- ledger.transactionById( + nonConsuming.getTransaction.updateId, + Seq(alice, bob, charlie), + LedgerEffects, + ) + + // A consuming choice is exercised with the expectation + // that Charlie is able to exercise a choice on an explicitly disclosed contract + // The ledger effects transaction is fetched from the identifier to ensure we get the witnesses as seen by all parties + consuming <- ledger.submitAndWaitForTransaction( + ledger + .submitAndWaitForTransactionRequest( + charlie, + witnesses.exerciseWitnessesChoice().commands, + LedgerEffects, + ) + .update(_.commands.disclosedContracts := Seq(disclosedWitnesses)) + ) + consumingLedgerEffects <- ledger.transactionById( + consuming.getTransaction.updateId, + Seq( + alice, + bob, + charlie, + ), + LedgerEffects, + ) + } yield { + assert( + witnessesTransactions.size == 1, + s"There should be one transaction for Witnesses, but there was ${witnessesTransactions.size}", + ) + + assert( + witnessesTransaction.events.sizeIs == 1, + s"The transaction for creating the Witness contract should only contain a single event, but has ${witnessesTransaction.events.size}", + ) + val creationEvent = witnessesTransaction.events.head + assert( + creationEvent.event.isCreated, + s"The event in the transaction for creating the Witness should be a CreatedEvent, but was ${creationEvent.event}", + ) + + val expectedWitnessesOfCreation = Seq(alice, bob).map(_.getValue).sorted + assert( + creationEvent.getCreated.witnessParties.sorted == expectedWitnessesOfCreation, + s"The parties for witnessing the CreatedEvent should be $expectedWitnessesOfCreation, but were ${creationEvent.getCreated.witnessParties}", + ) + assert( + nonConsumingLedgerEffects.events.size == 1, + s"The transaction for exercising the non-consuming choice should only contain a single event, but has ${nonConsumingLedgerEffects.events.size}", + ) + val nonConsumingEvent = nonConsumingLedgerEffects.events.head + assert( + nonConsumingEvent.event.isExercised, + s"The event in the transaction for exercising the non-consuming choice should be an ExercisedEvent, but was ${nonConsumingEvent.event}", + ) + + val expectedWitnessesOfNonConsumingChoice = Seq(alice, charlie).map(_.getValue).sorted + assert( + nonConsumingEvent.getExercised.witnessParties.sorted == expectedWitnessesOfNonConsumingChoice, + s"The parties for witnessing the non-consuming ExercisedEvent should be $expectedWitnessesOfNonConsumingChoice, but were ${nonConsumingEvent.getCreated.witnessParties}", + ) + assert( + consumingLedgerEffects.events.size == 1, + s"The transaction for exercising the consuming choice should only contain a single event, but has ${consumingLedgerEffects.events.size}", + ) + + val consumingEvent = consumingLedgerEffects.events.head + assert( + consumingEvent.event.isExercised, + s"The event in the transaction for exercising the consuming choice should be an ExercisedEvent, but was ${consumingEvent.event}", + ) + val expectedWitnessesOfConsumingChoice = Seq(alice, bob, charlie).map(_.getValue).sorted + assert( + consumingEvent.getExercised.witnessParties.sorted == expectedWitnessesOfConsumingChoice, + s"The parties for witnessing the consuming ExercisedEvent should be $expectedWitnessesOfConsumingChoice, but were ${consumingEvent.getCreated.witnessParties}", + ) + + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WronglyTypedContractIdIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WronglyTypedContractIdIT.scala new file mode 100644 index 0000000000..39423ec760 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/WronglyTypedContractIdIT.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1 + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.test.java.model.test.{Dummy, DummyWithParam} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import scala.jdk.CollectionConverters.* + +final class WronglyTypedContractIdIT extends LedgerTestSuite { + import CompanionImplicits.* + + test("WTExerciseFails", "Exercising on a wrong type fails", allocate(SingleParty))( + implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + fakeDummyWithParam = new DummyWithParam.ContractId(dummy.contractId) + exerciseFailure <- ledger + .exercise(party, fakeDummyWithParam.exerciseDummyChoice2("txt")) + .mustFail("exercising on a wrong type") + } yield { + assertGrpcError( + exerciseFailure, + CommandExecutionErrors.Interpreter.WronglyTypedContract, + Some("wrongly typed contract id"), + checkDefiniteAnswerMetadata = true, + ) + } + } + ) + + test( + "WTMultipleExerciseFails", + "Exercising on a wrong type fails after correct exercise in same transaction", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + dummy: Dummy.ContractId <- ledger.create(party, new Dummy(party)) + fakeDummyWithParam = new DummyWithParam.ContractId(dummy.contractId) + failure <- ledger + .submitAndWait( + ledger.submitAndWaitRequest( + party, + (dummy + .exerciseClone() + .commands + .asScala ++ fakeDummyWithParam.exerciseDummyChoice2("").commands.asScala).asJava, + ) + ) + .mustFail("exercising on a wrong type") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.WronglyTypedContract, + Some("wrongly typed contract id"), + checkDefiniteAnswerMetadata = true, + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTests.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTests.scala new file mode 100644 index 0000000000..11df20d678 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTests.scala @@ -0,0 +1,406 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1.objectmeta + +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors + +import java.nio.charset.StandardCharsets +import scala.concurrent.Future + +// Gather all the tests common to resources that support ObjectMeta metadata +trait ObjectMetaTests extends ObjectMetaTestsBase { + + private[objectmeta] def maxAnnotationsSizeInBytes = 256 * 1024 + private[objectmeta] def valueExceedingAnnotationsLimit = "a" * maxAnnotationsSizeInBytes + private[objectmeta] def largestAllowedValue = "a" * (maxAnnotationsSizeInBytes - 1) + private[objectmeta] def annotationsOverSizeLimit = Map("a" -> largestAllowedValue, "c" -> "d") + private[objectmeta] def annotationsBelowMaxSizeLimitBecauseNotCountingEmptyValuedKeys = + Map("a" -> largestAllowedValue, "cc" -> "") + + private[objectmeta] def getAnnotationsBytes(annotations: Map[String, String]): Int = + annotations.iterator.map { case (k, v) => + k.getBytes(StandardCharsets.UTF_8).length + v.getBytes(StandardCharsets.UTF_8).length + }.sum + + assertEquals( + valueExceedingAnnotationsLimit.getBytes(StandardCharsets.UTF_8).length, + maxAnnotationsSizeInBytes, + ) + assertEquals( + getAnnotationsBytes(annotationsOverSizeLimit), + getAnnotationsBytes(annotationsBelowMaxSizeLimitBecauseNotCountingEmptyValuedKeys), + ) + + private[objectmeta] def invalidKey = ".aaaa.management.daml/foo_" + private[objectmeta] def validKey = "0-aaaa.management.daml/foo" + testWithFreshResource( + "AllowSpecifyingResourceVersionAndResourceIdInUpdateMask", + "Allow specifying resource_version and resource's id fields in the update mask", + )()(implicit ec => + implicit ledger => + resource => + update( + id = getId(resource), + annotations = Map.empty, + resourceVersion = "", + updatePaths = Seq(resourceIdPath, resourceVersionUpdatePath), + ).map { objectMeta => + assertEquals( + objectMeta.annotations, + extractAnnotations(resource), + ) + } + ) + + testWithFreshResource( + "UpdatingResourceUtilizingConcurrentChangeControl", + "Updating a resource utilizing the concurrent change control by means of the resource version", + )()(implicit ec => + implicit ledger => + resource => { + val rv1 = extractMetadata(resource).resourceVersion + for { + // Updating a resource with the concurrent change detection disabled + rv2: String <- update( + id = getId(resource), + annotations = Map( + "k1" -> "v1", + "k2" -> "v2", + ), + resourceVersion = "", + updatePaths = Seq(annotationsUpdatePath), + ).map { metadata => + assertEquals( + metadata.annotations, + Map("k1" -> "v1", "k2" -> "v2"), + ) + assertValidResourceVersionString(rv1, "a new resource") + val rv2 = metadata.resourceVersion + assertValidResourceVersionString(rv2, "an updated resource") + assert( + rv1 != rv2, + s"A resource's resource_version before and after an update must be different but was the same: '$rv2'", + ) + rv2 + } + // Updating a resource with the concurrent change detection enabled but providing an outdated resource version. + _ <- update( + id = getId(resource), + annotations = Map( + "k1" -> "v1", + "k2" -> "v2", + ), + resourceVersion = rv1, + updatePaths = Seq(annotationsUpdatePath), + ).mustFailWith( + "updating a resource using an outdated resource version", + concurrentUserUpdateDetectedErrorCode, + ) + // Updating a resource with the concurrent change detection enabled and prlviding the up-to-date resource version + _ <- update( + id = getId(resource), + annotations = Map( + "k1" -> "v1a", + "k2" -> "", + "k3" -> "v3", + ), + resourceVersion = rv2, + updatePaths = Seq(annotationsUpdatePath), + ).map { metadata => + assertEquals( + metadata.annotations, + Map("k1" -> "v1a", "k3" -> "v3"), + ) + val rv3 = metadata.resourceVersion + assert( + rv2 != rv3, + s"A resource's resource_version before and after an update must be different but was the same: '$rv2'", + ) + assertValidResourceVersionString( + rv3, + "updating a resource using the up-to-date resource version", + ) + } + } yield () + } + ) + + testWithFreshResource( + "RaceConditionUpdateResourceAnnotations", + "Tests scenario of multiple concurrent update annotations RPCs for the same resource", + )()(implicit ec => + implicit ledger => + resource => { + val attempts = (1 to 10).toVector + for { + _ <- Future.traverse(attempts) { attemptNo => + update( + id = getId(resource), + annotations = Map(s"key$attemptNo" -> "a"), + updatePaths = Seq(annotationsUpdatePath), + ) + } + annotations <- fetchNewestAnnotations(id = getId(resource)) + } yield { + assertEquals( + annotations, + Map( + "key1" -> "a", + "key2" -> "a", + "key3" -> "a", + "key4" -> "a", + "key5" -> "a", + "key6" -> "a", + "key7" -> "a", + "key8" -> "a", + "key9" -> "a", + "key10" -> "a", + ), + ) + } + } + ) + + testWithFreshResource( + "FailingUpdateRequestsWhenUpdatePathIsDuplicated", + "Failing an update request when an update path is duplicated", + )()(implicit ec => + implicit ledger => + resource => + update( + id = getId(resource), + annotations = Map("k1" -> "v1"), + updatePaths = Seq(annotationsUpdatePath, annotationsUpdatePath), + ).mustFailWith( + "updating a resource", + invalidUpdateRequestErrorCode, + ) + ) + + testWithFreshResource( + "FailingUpdateRequestWhenNoUpdatePaths", + "Failing an update request when the update mask is empty", + )()(implicit ec => + implicit ledger => + resource => + update( + id = getId(resource), + annotations = Map("k1" -> "v1"), + updatePaths = Seq.empty, + ) + .mustFailWith( + "updating a resource", + invalidUpdateRequestErrorCode, + ) + ) + + testWithFreshResource( + "FAilingUpdateRequestWhenUpdateMaskHaveUnknownFieldPath", + "Failing an update request when the update mask contains a path to an unknown field", + )()(implicit ec => + implicit ledger => + resource => + for { + _ <- update( + id = getId(resource), + annotations = Map.empty, + updatePaths = Seq("unknown_field"), + ).mustFailWith( + "fail 1", + invalidUpdateRequestErrorCode, + ) + _ <- update( + id = getId(resource), + annotations = Map.empty, + updatePaths = Seq("aaa!bbb"), + ).mustFailWith( + "fail 2", + invalidUpdateRequestErrorCode, + ) + _ <- update( + id = getId(resource), + annotations = Map.empty, + updatePaths = Seq(""), + ).mustFailWith( + "fail 3", + invalidUpdateRequestErrorCode, + ) + } yield () + ) + + testWithoutResource( + "FailingToCreateResourceWhenAnnotationsValueIsEmpty", + "Failing to create a resource when an annotations' value is empty", + ) { implicit ec => implicit ledger => p => + createResourceWithAnnotations( + p.minSynchronizers, + annotations = Map("k2" -> ""), + ).mustFailWith( + "creating a resource", + RequestValidationErrors.InvalidArgument, + Some("value of an annotation is empty"), + ) + } + + testWithoutResource( + "TestAnnotationsKeySyntaxOnResourceCreation", + "Test the annotations' key syntax on a resource creation", + ) { implicit ec => implicit ledger => p => + createResourceWithAnnotations( + p.minSynchronizers, + annotations = Map(invalidKey -> "a"), + ).mustFailWith( + "creating a resource", + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("has invalid syntax"), + ) + } + + testWithoutResource( + "TestAnnotationsSizeLimitsOnResourceCreation", + "Test annotations' size limit on creation", + ) { implicit ec => implicit ledger => p => + createResourceWithAnnotations( + p.minSynchronizers, + annotations = Map("a" -> valueExceedingAnnotationsLimit), + ) + .mustFailWith( + "total size of annotations exceeds 256kb max limit", + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("larger than the limit of 256kb"), + ) + } + + testWithFreshResource( + "UpdateAnnotationsUsingNonEmptyMap", + "Update the annotations using an update paths with a non-empty map value", + )(annotations = Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3"))(implicit ec => + implicit ledger => + resource => { + + def updateAnnotations(updatePath: String): Future[Unit] = + update( + id = getId(resource), + annotations = Map( + // update a value for an existing key + "k1" -> "v1a", + // remove an existing key-value pair + "k3" -> "", + // add a new key-value pair + "k4" -> "v4", + // attempt to remove a key-value pair which doesn't exist + "k5" -> "", + ), + updatePaths = Seq(updatePath), + ).map { objectMeta => + assertEquals( + "updating annotations", + objectMeta.annotations, + Map("k1" -> "v1a", "k2" -> "v2", "k4" -> "v4"), + ) + } + + for { + _ <- updateAnnotations(annotationsUpdatePath) + _ <- updateAnnotations(annotationsShortUpdatePath) + } yield () + } + ) + + testWithFreshResource( + "UpdateAnnotationsUsingEmptyMap", + "Update the annotations using update paths with the empty map value", + )(annotations = Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3"))(implicit ec => + implicit ledger => + resource => { + + def updateAnnotations(updatePath: String): Future[Unit] = + update( + id = getId(resource), + annotations = Map.empty, + updatePaths = Seq(updatePath), + ).map { objectMeta => + assertEquals( + "updating the annotations", + objectMeta.annotations, + Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3"), + ) + } + + for { + _ <- updateAnnotations(annotationsUpdatePath) + _ <- updateAnnotations(annotationsShortUpdatePath) + } yield () + } + ) + + testWithFreshResource( + "TestAnnotationsKeySyntaxOnResourceUpdateWhenAddingKey", + "Test the annotations' key syntax for the update RPC", + )(annotations = Map(validKey -> "a"))(implicit ec => + implicit ledger => + resource => + update( + id = getId(resource), + annotations = Map(invalidKey -> "a"), + updatePaths = Seq(annotationsUpdatePath), + ).mustFailWith( + "updating the annotations", + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("has invalid syntax"), + ) + ) + + testWithFreshResource( + "TestAnnotationsKeySyntaxOnResourceUpdateEvenWhenDeletingNonExistentKey", + "Test the annotations' key syntax for the update RPC even for map entries that represent a deletion of a non-existent key", + )()(implicit ec => + implicit ledger => + resource => + update( + id = getId(resource), + annotations = Map(invalidKey -> ""), + updatePaths = Seq(annotationsUpdatePath), + ).mustFailWith( + "deleting an annotations' key", + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("has invalid syntax"), + ) + ) + + testWithFreshResource( + "NotCountingEmptyValuedKeysToTheAnnotationsSizeLimitOnUpdate", + "Do not count the keys in the provided annotations map that correspond to deletions, towards the annotations' nax size limit", + )() { implicit ec => implicit ledger => resource => + update( + id = getId(resource), + annotations = annotationsBelowMaxSizeLimitBecauseNotCountingEmptyValuedKeys, + updatePaths = Seq(annotationsUpdatePath), + ).map { objectMeta => + assertEquals( + "updating and not exceeding annotations' max size limit because deletions are not counted towards the limit", + objectMeta.annotations, + Map("a" -> largestAllowedValue), + ) + } + } + + testWithFreshResource( + "TestAnnotationsMaxSizeLimitsWhenUpdatingResource", + "Test the annotations' max size limit on a resource update RPC", + )(annotations = Map("a" -> largestAllowedValue)) { implicit ec => implicit ledger => resource => + update( + id = getId(resource), + annotations = Map("a" -> valueExceedingAnnotationsLimit), + updatePaths = Seq(annotationsUpdatePath), + ) + .mustFailWith( + "total size of annotations, in a user update call, is over 256kb", + errorCode = RequestValidationErrors.InvalidArgument, + exceptionMessageSubstring = Some("larger than the limit of 256kb"), + ) + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTestsBase.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTestsBase.scala new file mode 100644 index 0000000000..0e17294f4d --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/ObjectMetaTestsBase.scala @@ -0,0 +1,79 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1.objectmeta + +import com.daml.ledger.api.testtool.infrastructure.Allocation.Participants +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.digitalasset.base.error.ErrorCode + +import scala.concurrent.{ExecutionContext, Future} + +trait ObjectMetaTestsBase { + + // A resource containing an ObjectMeta metadata + private[objectmeta] type Resource + private[objectmeta] type ResourceId + + private[objectmeta] def getId(resource: Resource): ResourceId + + private[objectmeta] def annotationsUpdateRequestFieldPath: String + + private[objectmeta] def resourceVersionUpdatePath: String + private[objectmeta] def annotationsUpdatePath: String + private[objectmeta] def annotationsShortUpdatePath: String + private[objectmeta] def resourceIdPath: String + + private[objectmeta] def extractAnnotations(resource: Resource): Map[String, String] + private[objectmeta] def extractMetadata(resource: Resource): ObjectMeta + + private[objectmeta] def update( + id: ResourceId, + annotations: Map[String, String], + updatePaths: Seq[String], + resourceVersion: String = "", + )(implicit + ec: ExecutionContext, + ledger: ParticipantTestContext, + ): Future[ObjectMeta] + + private[objectmeta] def fetchNewestAnnotations( + id: ResourceId + )(implicit + ec: ExecutionContext, + ledger: ParticipantTestContext, + ): Future[Map[String, String]] + + private[objectmeta] def createResourceWithAnnotations( + connectedSynchronizers: Int, + annotations: Map[String, String], + )(implicit + ec: ExecutionContext, + ledger: ParticipantTestContext, + ): Future[Map[String, String]] + + private[objectmeta] def testWithoutResource( + shortIdentifier: String, + description: String, + )( + body: ExecutionContext => ParticipantTestContext => Participants => Future[Unit] + ): Unit + + private[objectmeta] def testWithFreshResource( + shortIdentifier: String, + description: String, + )( + annotations: Map[String, String] = Map.empty + )( + body: ExecutionContext => ParticipantTestContext => Resource => Future[Unit] + ): Unit + + private[objectmeta] def assertValidResourceVersionString(v: String, sourceMsg: String): Unit = + assert(v.nonEmpty, s"resource version (from $sourceMsg) must be non empty") + + private[objectmeta] def concurrentUserUpdateDetectedErrorCode: ErrorCode + + private[objectmeta] def invalidUpdateRequestErrorCode: ErrorCode + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/PartyManagementServiceObjectMetaIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/PartyManagementServiceObjectMetaIT.scala new file mode 100644 index 0000000000..7ab7253dc4 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/PartyManagementServiceObjectMetaIT.scala @@ -0,0 +1,115 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1.objectmeta + +import com.daml.ledger.api.testtool.infrastructure.Allocation.Participants +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.suites.v2_1.PartyManagementITBase +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocatePartyRequest, + GetPartiesRequest, + PartyDetails, +} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.AdminServiceErrors + +import scala.concurrent.{ExecutionContext, Future} + +class PartyManagementServiceObjectMetaIT extends PartyManagementITBase with ObjectMetaTests { + + type Resource = PartyDetails + type ResourceId = String + + override private[objectmeta] def getId(resource: Resource): ResourceId = resource.party + + override private[objectmeta] def annotationsUpdateRequestFieldPath: String = + "party_details.local_metadata.annotations" + + override private[objectmeta] def resourceVersionUpdatePath: String = + "local_metadata.resource_version" + + override private[objectmeta] def annotationsUpdatePath: String = + "local_metadata.annotations" + + override private[objectmeta] def annotationsShortUpdatePath = "local_metadata" + + override private[objectmeta] def resourceIdPath = "party" + + override private[objectmeta] def extractAnnotations(resource: Resource): Map[String, String] = + resource.getLocalMetadata.annotations + + override private[objectmeta] def extractMetadata(resource: Resource): ObjectMeta = + resource.getLocalMetadata + + override private[objectmeta] def testWithFreshResource( + shortIdentifier: String, + description: String, + )( + annotations: Map[String, String] = Map.empty + )( + body: ExecutionContext => ParticipantTestContext => Resource => Future[Unit] + ): Unit = + testWithFreshPartyDetails( + shortIdentifier = shortIdentifier, + description = description, + )(annotations = annotations)(body) + + override private[objectmeta] def testWithoutResource( + shortIdentifier: String, + description: String, + )( + body: ExecutionContext => ParticipantTestContext => Participants => Future[Unit] + ): Unit = + testWithoutPartyDetails( + shortIdentifier = shortIdentifier, + description = description, + )(body) + + override private[objectmeta] def createResourceWithAnnotations( + connectedSynchronizers: Int, + annotations: Map[String, String], + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[Map[String, String]] = { + val req = AllocatePartyRequest( + partyIdHint = "", + localMetadata = Some(ObjectMeta(resourceVersion = "", annotations = annotations)), + identityProviderId = "", + synchronizerId = "", + userId = "", + ) + ledger + .allocateParty(req, connectedSynchronizers) + .map { case (p, _) => extractUpdatedAnnotations(p) } + } + + override private[objectmeta] def fetchNewestAnnotations( + id: ResourceId + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[Map[String, String]] = + ledger + .getParties(GetPartiesRequest(parties = Seq(id), identityProviderId = "")) + .map(_.partyDetails.head.getLocalMetadata.annotations) + + override private[objectmeta] def update( + id: ResourceId, + annotations: Map[String, String], + updatePaths: Seq[String], + resourceVersion: String = "", + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[ObjectMeta] = { + val req = updateRequest( + party = id, + annotations = annotations, + resourceVersion = resourceVersion, + updatePaths = updatePaths, + ) + ledger + .updatePartyDetails(req) + .map(_.getPartyDetails.getLocalMetadata) + } + + override private[objectmeta] def concurrentUserUpdateDetectedErrorCode: ErrorCode = + AdminServiceErrors.PartyManagement.ConcurrentPartyDetailsUpdateDetected + + override private[objectmeta] def invalidUpdateRequestErrorCode: ErrorCode = + AdminServiceErrors.PartyManagement.InvalidUpdatePartyDetailsRequest +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/UserManagementServiceObjectMetaIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/UserManagementServiceObjectMetaIT.scala new file mode 100644 index 0000000000..6d1c955d4e --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/objectmeta/UserManagementServiceObjectMetaIT.scala @@ -0,0 +1,124 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_1.objectmeta + +import com.daml.ledger.api.testtool.infrastructure.Allocation.Participants +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.suites.v2_1.UserManagementServiceITBase +import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v2.admin.user_management_service.{ + CreateUserRequest, + GetUserRequest, + User, +} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.AdminServiceErrors + +import scala.concurrent.{ExecutionContext, Future} + +class UserManagementServiceObjectMetaIT extends UserManagementServiceITBase with ObjectMetaTests { + + type Resource = User + type ResourceId = String + + override private[objectmeta] def getId(resource: Resource): ResourceId = resource.id + + override private[objectmeta] def annotationsUpdateRequestFieldPath: String = + "user.metadata.annotations" + + override private[objectmeta] def annotationsUpdatePath: String = + "metadata.annotations" + + override private[objectmeta] def annotationsShortUpdatePath = "metadata" + + override private[objectmeta] def resourceVersionUpdatePath = "metadata.resource_version" + + override private[objectmeta] def resourceIdPath = "id" + + override private[objectmeta] def extractAnnotations(resource: Resource): Map[String, String] = + resource.getMetadata.annotations + + override private[objectmeta] def extractMetadata(resource: Resource): ObjectMeta = + resource.getMetadata + + override private[objectmeta] def testWithFreshResource( + shortIdentifier: String, + description: String, + )( + annotations: Map[String, String] = Map.empty + )( + body: ExecutionContext => ParticipantTestContext => Resource => Future[Unit] + ): Unit = + userManagementTest( + shortIdentifier = shortIdentifier, + description = description, + ) { implicit ec => ledger => _ => + withFreshUser( + annotations = annotations + ) { user => + body(ec)(ledger)(user) + }(ledger, ec) + } + + override private[objectmeta] def testWithoutResource( + shortIdentifier: String, + description: String, + )( + body: ExecutionContext => ParticipantTestContext => Participants => Future[Unit] + ): Unit = + userManagementTest( + shortIdentifier = shortIdentifier, + description = description, + ) { implicit ec => ledger => p => + body(ec)(ledger)(p) + } + + override private[objectmeta] def createResourceWithAnnotations( + connectedSynchronizers: Int, + annotations: Map[String, String], + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[Map[String, String]] = { + val userId = ledger.nextUserId() + val req = CreateUserRequest( + user = Some( + newUser( + id = userId, + annotations = annotations, + ) + ), + rights = Nil, + ) + ledger.userManagement + .createUser(req) + .map(extractAnnotations) + } + + override private[objectmeta] def fetchNewestAnnotations( + id: ResourceId + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[Map[String, String]] = + ledger.userManagement + .getUser(GetUserRequest(userId = id, identityProviderId = "")) + .map(_.user.get.getMetadata.annotations) + + override private[objectmeta] def update( + id: ResourceId, + annotations: Map[String, String], + updatePaths: Seq[String], + resourceVersion: String = "", + )(implicit ec: ExecutionContext, ledger: ParticipantTestContext): Future[ObjectMeta] = { + val req = updateRequest( + id = id, + annotations = annotations, + resourceVersion = resourceVersion, + updatePaths = updatePaths, + ) + ledger.userManagement + .updateUser(req) + .map(_.getUser.getMetadata) + } + + override private[objectmeta] def concurrentUserUpdateDetectedErrorCode: ErrorCode = + AdminServiceErrors.UserManagement.ConcurrentUserUpdateDetected + override private[objectmeta] def invalidUpdateRequestErrorCode: ErrorCode = + AdminServiceErrors.UserManagement.InvalidUpdateUserRequest +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev.scala new file mode 100644 index 0000000000..80404c3663 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.digitalasset.canton.config.TlsClientConfig + +package object v2_dev { + def default(timeoutScaleFactor: Double): Vector[LedgerTestSuite] = + v2_1.default(timeoutScaleFactor) ++ Vector( + new ContractKeysCommandDeduplicationIT, + new ContractKeysContractIdIT, + new ContractKeysDeeplyNestedValueIT, + new ContractKeysDivulgenceIT, + new ContractKeysExplicitDisclosureIT, + new ContractKeysIT, + new ContractKeysMultiPartySubmissionIT, + new ContractKeysWronglyTypedContractIdIT, + new EventsDescendantsIT, + new ExceptionRaceConditionIT, + new ExceptionsIT, + new PrefetchContractKeysIT, + new RaceConditionIT, + ) + + def optional(tlsConfig: Option[TlsClientConfig]): Vector[LedgerTestSuite] = + v2_1.optional(tlsConfig) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCommandDeduplicationIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCommandDeduplicationIT.scala new file mode 100644 index 0000000000..76be9d06fb --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCommandDeduplicationIT.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest +import com.daml.ledger.test.java.experimental.da.types.Tuple2 +import com.daml.ledger.test.java.experimental.test.{TextKey, TextKeyOperations} +import com.digitalasset.canton.ledger.api.SubmissionIdGenerator +import com.digitalasset.daml.lf.data.Ref.SubmissionId + +import scala.concurrent.Future +import scala.concurrent.duration.* +import scala.jdk.CollectionConverters.* +import scala.util.{Failure, Success} + +final class ContractKeysCommandDeduplicationIT extends LedgerTestSuite { + + import ContractKeysCompanionImplicits.* + + test( + s"StopOnCompletionFailure", + "Stop deduplicating commands on completion failure", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val key = ledger.nextKeyId() + + for { + // Create a helper and a text key + ko <- ledger.create(party, new TextKeyOperations(party)) + _ <- ledger.create(party, new TextKey(party, key, List().asJava)) + + // Create two competing requests + requestA = ledger.submitAndWaitRequest( + party, + ko.exerciseTKOFetchAndRecreate(new Tuple2(party.getValue, key)).commands, + ) + requestB = ledger.submitAndWaitRequest( + party, + ko.exerciseTKOFetchAndRecreate(new Tuple2(party.getValue, key)).commands, + ) + + // Submit both requests in parallel. + // Either both succeed (if one transaction is recorded faster than the other submission starts command interpretation, unlikely) + // Or one submission is rejected (if one transaction is recorded during the call of lookupMaximumLedgerTime() in [[LedgerTimeHelper]], unlikely) + // Or one transaction is rejected (this is what we want to test) + submissionResults <- Future.traverse(List(requestA, requestB))(request => + ledger.submitAndWait(request).transform(result => Success(request -> result)) + ) + + // Resubmit a failed command. + // No matter what the rejection reason was (hopefully it was a rejected transaction), + // a resubmission of exactly the same command should succeed. + _ <- submissionResults + .collectFirst { case (request, Failure(_)) => request } + .fold(Future.unit)(request => + ledger.submitAndWait(updateWithFreshSubmissionId(request)).map(_ => ()) + ) + } yield { + () + } + }) + + private def updateWithFreshSubmissionId(request: SubmitAndWaitRequest): SubmitAndWaitRequest = + request.update(_.commands.submissionId := newSubmissionId()) + + private def newSubmissionId(): SubmissionId = SubmissionIdGenerator.Random.generate() + + val defaultCantonSkew = 365.days + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCompanionImplicits.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCompanionImplicits.scala new file mode 100644 index 0000000000..4c38f5dd8f --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysCompanionImplicits.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.da.types +import com.daml.ledger.test.java.experimental.test.{ + Delegated, + Delegation, + LocalKeyVisibilityOperations, + MaintainerNotSignatory, + ShowDelegated, + TextKey, + TextKeyOperations, + WithKey, +} +import com.daml.ledger.test.java.model.test.{CallablePayout, Dummy} + +object ContractKeysCompanionImplicits { + + implicit val dummyCompanion + : ContractCompanion.WithoutKey[Dummy.Contract, Dummy.ContractId, Dummy] = Dummy.COMPANION + implicit val textKeyCompanion: ContractCompanion.WithKey[ + TextKey.Contract, + TextKey.ContractId, + TextKey, + types.Tuple2[String, String], + ] = TextKey.COMPANION + implicit val textKeyOperationsCompanion: ContractCompanion.WithoutKey[ + TextKeyOperations.Contract, + TextKeyOperations.ContractId, + TextKeyOperations, + ] = TextKeyOperations.COMPANION + implicit val callablePayoutCompanion: ContractCompanion.WithoutKey[ + CallablePayout.Contract, + CallablePayout.ContractId, + CallablePayout, + ] = CallablePayout.COMPANION + implicit val delegatedCompanion: ContractCompanion.WithKey[ + Delegated.Contract, + Delegated.ContractId, + Delegated, + types.Tuple2[String, String], + ] = Delegated.COMPANION + implicit val delegationCompanion + : ContractCompanion.WithoutKey[Delegation.Contract, Delegation.ContractId, Delegation] = + Delegation.COMPANION + implicit val showDelegatedCompanion: ContractCompanion.WithoutKey[ + ShowDelegated.Contract, + ShowDelegated.ContractId, + ShowDelegated, + ] = ShowDelegated.COMPANION + implicit val maintainerNotSignatoryCompanion: ContractCompanion.WithKey[ + MaintainerNotSignatory.Contract, + MaintainerNotSignatory.ContractId, + MaintainerNotSignatory, + String, + ] = MaintainerNotSignatory.COMPANION + implicit val localKeyVisibilityOperationsCompanion: ContractCompanion.WithoutKey[ + LocalKeyVisibilityOperations.Contract, + LocalKeyVisibilityOperations.ContractId, + LocalKeyVisibilityOperations, + ] = LocalKeyVisibilityOperations.COMPANION + implicit val withKeyCompanion: ContractCompanion.WithKey[ + WithKey.Contract, + WithKey.ContractId, + WithKey, + String, + ] = WithKey.COMPANION +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysContractIdIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysContractIdIT.scala new file mode 100644 index 0000000000..18c3ff61e9 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysContractIdIT.scala @@ -0,0 +1,213 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.{ + assertErrorCode, + assertGrpcError, + fail, +} +import com.daml.ledger.api.testtool.infrastructure.participant.{Features, ParticipantTestContext} +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.testtool.suites.v2_dev.ContractKeysContractIdIT.* +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.javaapi.data.{ContractId, DamlRecord} +import com.daml.ledger.test.java.experimental.contractidtests.{Contract, ContractRef} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.{ + CommandExecutionErrors, + ConsistencyErrors, + RequestValidationErrors, +} +import com.digitalasset.canton.util.TryUtil +import io.grpc.StatusRuntimeException + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +// See `daml-lf/spec/contract-id.rst` for more information on contract ID formats. +// Check the Ledger API accepts or rejects non-suffixed contract ID. +// - Central committer ledger implementations (sandboxes, KV...) may accept non-suffixed CID +// - Distributed ledger implementations (e.g. Canton) must reject non-suffixed CID +final class ContractKeysContractIdIT extends LedgerTestSuite { + implicit val contractCompanion + : ContractCompanion.WithoutKey[Contract.Contract$, Contract.ContractId, Contract] = + Contract.COMPANION + implicit val contractRefCompanion: ContractCompanion.WithKey[ + ContractRef.Contract, + ContractRef.ContractId, + ContractRef, + String, + ] = ContractRef.COMPANION + + List( + TestConfiguration( + description = "non-suffixed v1", + example = nonSuffixedV1Cid, + accepted = false, + disabledReason = "non-suffixed V1 contract IDs are supported", + failsInPreprocessing = true, + ), + TestConfiguration( + description = "suffixed v1", + example = suffixedV1Cid, + accepted = true, + ), + ).foreach { + case TestConfiguration( + cidDescription, + example, + accepted, + isSupported, + disabledReason, + failsInPreprocessing, + ) => + val result = if (accepted) "Accept" else "Reject" + + def test( + description: String, + parseErrorCode: ErrorCode = RequestValidationErrors.InvalidArgument, + )( + update: ExecutionContext => ( + ParticipantTestContext, + Party, + ) => Future[Try[_]] + ): Unit = + super.test( + shortIdentifier = result + camelCase(cidDescription) + "CKCid" + camelCase(description), + description = result + "s " + cidDescription + " Contract Id in " + description, + partyAllocation = allocate(SingleParty), + enabled = isSupported, + disabledReason = disabledReason, + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + update(ec)(alpha, party).map { + case Success(_) if accepted => () + case Failure(err: Throwable) if !accepted => + val (prefix, errorCode) = + if (failsInPreprocessing) + ( + "Illegal Contract ID", + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + ) + else + ("cannot parse ContractId", parseErrorCode) + assertGrpcError( + err, + errorCode, + Some(s"""$prefix "$example""""), + checkDefiniteAnswerMetadata = true, + ) + () + case otherwise => + fail("Unexpected " + otherwise.fold(err => s"failure: $err", _ => "success")) + } + }) + + test("create payload") { implicit ec => (alpha, party) => + alpha + .create(party, new ContractRef(party, new Contract.ContractId(example))) + .transformWith(Future.successful) + } + + test("exercise target", parseErrorCode = RequestValidationErrors.InvalidField) { + implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- + alpha + .exercise( + party, + new ContractRef.ContractId(example).exerciseChange(contractCid), + ) + .transformWith(Future.successful) + } yield result match { + case Failure(exception: StatusRuntimeException) + if Try( + assertErrorCode( + statusRuntimeException = exception, + expectedErrorCode = ConsistencyErrors.ContractNotFound, + ) + ).isSuccess => + TryUtil.unit + + case Success(_) => Failure(new UnknownError("Unexpected Success")) + case otherwise => otherwise.map(_ => ()) + } + } + + test("choice argument") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + contractRefCid <- alpha.create(party, new ContractRef(party, contractCid)) + result <- alpha + .exercise(party, contractRefCid.exerciseChange(new Contract.ContractId(example))) + .transformWith(Future.successful) + } yield result + } + + test("create-and-exercise payload") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- alpha + .exercise( + party, + new ContractRef(party, new Contract.ContractId(example)).createAnd + .exerciseChange(contractCid), + ) + .transformWith(Future.successful) + } yield result + } + + test("create-and-exercise choice argument") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + result <- alpha + .exercise( + party, + new ContractRef(party, contractCid).createAnd + .exerciseChange(new Contract.ContractId(example)), + ) + .transformWith(Future.successful) + } yield result + } + + test("exercise by key") { implicit ec => (alpha, party) => + for { + contractCid <- alpha.create(party, new Contract(party)) + _ <- alpha.create(party, new ContractRef(party, contractCid)) + result <- alpha + .exerciseByKey( + party, + ContractRef.TEMPLATE_ID_WITH_PACKAGE_ID, + party, + "Change", + new DamlRecord( + new DamlRecord.Field(new ContractId(example)) + ), + ) + .transformWith(Future.successful) + } yield result + } + } +} + +object ContractKeysContractIdIT { + private val nonSuffixedV1Cid = (0 to 32).map("%02x".format(_)).mkString + private val suffixedV1Cid = + "00bb20b46000000000000000000000000000000000000000000000000000000000ca11122023967e650dce35adf1dd5deb7e7bdf00aa5c677c9091a23520faa49cb2cda76a" + + private def camelCase(s: String): String = + s.split("[ -]").iterator.map(_.capitalize).mkString("") + + final private case class TestConfiguration( + description: String, + example: String, + accepted: Boolean, + isSupported: Features => Boolean = _ => true, + disabledReason: String = "", + // Invalid v1 cids (e.g. no suffix when one is required) fail during command preprocessing. + failsInPreprocessing: Boolean = false, + ) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDeeplyNestedValueIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDeeplyNestedValueIT.scala new file mode 100644 index 0000000000..2bfee4c190 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDeeplyNestedValueIT.scala @@ -0,0 +1,127 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, Update} +import com.daml.ledger.test.java.experimental.deeplynestedvalue.Handler as Handler +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Success + +final class ContractKeysDeeplyNestedValueIT extends LedgerTestSuite { + implicit val handlerCompanion + : ContractCompanion.WithoutKey[Handler.Contract, Handler.ContractId, Handler] = + Handler.COMPANION + + private[this] def waitForTransactionId( + alpha: ParticipantTestContext, + party: Party, + command: Update[_], + )(implicit + ec: ExecutionContext + ): Future[Either[Throwable, String]] = + alpha + .submitAndWait( + alpha.submitAndWaitRequest(party, command.commands) + ) + .transform(x => Success(x.map(_.updateId).toEither)) + + private[this] def camlCase(s: String) = + s.split(" ").iterator.map(_.capitalize).mkString("") + + List[Long](46, 100, 101, 110, 200).foreach { nesting => + val accepted = nesting <= 100 + val result = if (accepted) "Accept" else "Reject" + + // Once converted to Nat, `n` will have a nesting `nesting`. + // Note that Nat.Z(()) has nesting 1. + val n = nesting - 1 + + // The nesting of the key of a `ContractWithKey` is one more than the nat it contains + val nKey = n - 1 + + def test[T](description: String, errorCodeIfExpected: ErrorCode)( + update: ExecutionContext => ( + ParticipantTestContext, + Party, + ) => Future[Either[Throwable, T]] + ): Unit = + super.test( + result + camlCase(description) + nesting.toString, + s"${result.toLowerCase}s $description with a nesting of $nesting", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + update(ec)(alpha, party).map { + case Right(_) if accepted => () + case Left(err: Throwable) if !accepted => + assertGrpcError( + err, + errorCodeIfExpected, + None, + checkDefiniteAnswerMetadata = true, + ) + case otherwise => + fail("Unexpected " + otherwise.fold(err => s"failure: $err", _ => "success")) + } + }) + + test( + "contract key", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- waitForTransactionId(alpha, party, handler.exerciseCreateKey(nKey)) + } yield result + } + + if (accepted) { + // Because we cannot create contracts with nesting > 100, + // it does not make sense to test fetch of those kinds of contracts. + test( + "fetch by key", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + _ <- alpha.exercise(party, handler.exerciseCreateKey(nKey)) + result <- waitForTransactionId(alpha, party, handler.exerciseFetchByKey(nKey)) + } yield result + } + } + + test( + "failing lookup by key", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + result <- waitForTransactionId(alpha, party, handler.exerciseLookupByKey(nKey)) + } yield result + } + + if (accepted) { + // Because we cannot create contracts with key nesting > 100, + // it does not make sens to test successful lookup for those keys. + test( + "successful lookup by key", + CommandExecutionErrors.Interpreter.ValueNesting, + ) { implicit ec => (alpha, party) => + for { + handler: Handler.ContractId <- alpha.create(party, new Handler(party)) + _ <- alpha.exercise(party, handler.exerciseCreateKey(nKey)) + result <- + waitForTransactionId(alpha, party, handler.exerciseLookupByKey(nKey)) + } yield result + } + } + + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDivulgenceIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDivulgenceIT.scala new file mode 100644 index 0000000000..57471cde7d --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysDivulgenceIT.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.Synchronize.waitForContract +import com.daml.ledger.test.java.experimental.test.{Asset, Proposal} + +final class ContractKeysDivulgenceIT extends LedgerTestSuite { + + test( + "DivulgenceKeys", + "Divulgence should behave as expected in a workflow involving keys", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(proposer)), Participant(beta, Seq(owner))) => + for { + offer <- alpha.create(proposer, new Proposal(proposer, owner))(Proposal.COMPANION) + asset <- beta.create(owner, new Asset(owner, owner))(Asset.COMPANION) + _ <- waitForContract(beta, owner, offer) + _ <- beta.exercise(owner, offer.exerciseProposalAccept(asset)) + } yield { + // nothing to test, if the workflow ends successfully the test is considered successful + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysExplicitDisclosureIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysExplicitDisclosureIT.scala new file mode 100644 index 0000000000..68204d8e4b --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysExplicitDisclosureIT.scala @@ -0,0 +1,144 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.createdEvents +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.command_service.SubmitAndWaitRequest +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.api.v2.value.Identifier +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.{DamlRecord, ExerciseByKeyCommand} +import com.daml.ledger.test.java.experimental.test.WithKey +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import java.util.List as JList + +final class ContractKeysExplicitDisclosureIT extends LedgerTestSuite { + import ContractKeysExplicitDisclosureIT.* + + test( + "EDExerciseByKeyDisclosedContract", + "A disclosed contract can be exercised by key with non-witness readers if authorized", + partyAllocation = allocate(SingleParty, SingleParty), + ) { implicit ec => + { + case p @ Participants( + Participant(ownerParticipant, Seq(owner)), + Participant(divulgeeParticipant, Seq(divulgee)), + ) => + for { + // Create contract with `owner` as only stakeholder + _ <- ownerParticipant.submitAndWait( + ownerParticipant.submitAndWaitRequest(owner, new WithKey(owner).create.commands) + ) + txReq <- ownerParticipant.getTransactionsRequest( + formatByPartyAndTemplate(owner, WithKey.TEMPLATE_ID) + ) + txs <- ownerParticipant.transactions(txReq) + withKeyCreationTx = assertSingleton("Transaction expected non-empty", txs) + withKeyCreate = createdEvents(withKeyCreationTx).head + withKeyDisclosedContract = createEventToDisclosedContract(withKeyCreate) + + // Ensure participants are synchronized + _ <- p.synchronize + + exerciseByKeyError <- divulgeeParticipant + .submitAndWait( + exerciseWithKey_byKey_request(divulgeeParticipant, owner, divulgee, None) + ) + .mustFail("divulgee does not see the contract") + // Assert that a random party can exercise the contract by key (if authorized) + // when passing the disclosed contract to the submission + _ <- divulgeeParticipant.submitAndWait( + exerciseWithKey_byKey_request( + divulgeeParticipant, + owner, + divulgee, + Some(withKeyDisclosedContract), + ) + ) + } yield assertGrpcError( + exerciseByKeyError, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + None, + checkDefiniteAnswerMetadata = true, + ) + } + } +} + +object ContractKeysExplicitDisclosureIT { + + private def formatByPartyAndTemplate( + owner: Party, + templateId: javaapi.data.Identifier, + ): TransactionFormat = { + val templateIdScalaPB = Identifier.fromJavaProto(templateId.toProto) + + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + owner.getValue -> new Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter(Some(templateIdScalaPB), includeCreatedEventBlob = true) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + } + + private def createEventToDisclosedContract(ev: CreatedEvent): DisclosedContract = + DisclosedContract( + templateId = ev.templateId, + contractId = ev.contractId, + createdEventBlob = ev.createdEventBlob, + synchronizerId = "", + ) + + private def exerciseWithKey_byKey_request( + ledger: ParticipantTestContext, + owner: Party, + party: Party, + withKeyDisclosedContract: Option[DisclosedContract], + ): SubmitAndWaitRequest = + ledger + .submitAndWaitRequest( + party, + JList.of( + new ExerciseByKeyCommand( + WithKey.TEMPLATE_ID_WITH_PACKAGE_ID, + owner, + "WithKey_NoOp", + new DamlRecord( + new DamlRecord.Field(party) + ), + ) + ), + ) + .update(_.commands.disclosedContracts := withKeyDisclosedContract.iterator.toSeq) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysIT.scala new file mode 100644 index 0000000000..356180f352 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysIT.scala @@ -0,0 +1,577 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.api.v2.commands.DisclosedContract +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.api.v2.value.{RecordField, Value} +import com.daml.ledger.javaapi.data.{DamlRecord, Party, Text} +import com.daml.ledger.test.java.experimental.da.types.Tuple2 +import com.daml.ledger.test.java.experimental.test.{ + Delegated, + Delegation, + LocalKeyVisibilityOperations, + MaintainerNotSignatory, + ShowDelegated, + TextKey, + TextKeyOperations, + WithKey, + WithKeyCreatorAlternative, +} +import com.daml.ledger.test.java.model.test.CallablePayout +import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, ConsistencyErrors} + +import java.util.regex.Pattern +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +final class ContractKeysIT extends LedgerTestSuite { + import ContractKeysCompanionImplicits.* + + test( + "CKNoContractKey", + "There should be no contract key if the template does not specify one", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha @ _, Seq(receiver)), Participant(beta, Seq(giver))) => + for { + _ <- beta.create(giver, new CallablePayout(giver, receiver)) + transactions <- beta.transactions(AcsDelta, giver, receiver) + } yield { + val contract = assertSingleton("NoContractKey", transactions.flatMap(createdEvents)) + assert( + contract.getContractKey.sum.isEmpty, + s"The key is not empty: ${contract.getContractKey}", + ) + } + }) + + test( + "CKFetchOrLookup", + "Divulged contracts cannot be fetched or looked up by key by non-stakeholders", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case Participants(Participant(alpha, Seq(owner)), Participant(beta, Seq(delegate))) => + val key = alpha.nextKeyId() + for { + // create contracts to work with + delegated <- alpha.create(owner, new Delegated(owner, key)) + delegation <- alpha.create(owner, new Delegation(owner, delegate)) + showDelegated <- alpha.create(owner, new ShowDelegated(owner, delegate)) + + // divulge the contract + _ <- alpha.exercise(owner, showDelegated.exerciseShowIt(delegated)) + + // fetch delegated + _ <- eventually("exerciseFetchDelegated") { + beta.exercise(delegate, delegation.exerciseFetchDelegated(delegated)) + } + + // fetch by key should fail during interpretation + // Reason: Only stakeholders see the result of fetchByKey, beta is neither stakeholder nor divulgee + fetchFailure <- beta + .exercise(delegate, delegation.exerciseFetchByKeyDelegated(owner, key)) + .mustFail("fetching by key with a party that cannot see the contract") + + // lookup by key delegation should fail during validation + // Reason: During command interpretation, the lookup did not find anything due to privacy rules, + // but validation determined that this result is wrong as the contract is there. + lookupByKeyFailure <- beta + .exercise(delegate, delegation.exerciseLookupByKeyDelegated(owner, key)) + .mustFail("looking up by key with a party that cannot see the contract") + } yield { + assertGrpcError( + fetchFailure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + ) + assertGrpcErrorRegex( + lookupByKeyFailure, + ConsistencyErrors.InconsistentContractKey, + Some(Pattern.compile("Inconsistent|Contract key lookup with different results")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CKNoFetchUndisclosed", + "Contract Keys should reject fetching an undisclosed contract", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(owner)), Participant(beta, Seq(delegate))) => + val key = alpha.nextKeyId() + for { + // create contracts to work with + delegated <- alpha.create(owner, new Delegated(owner, key)) + delegation <- alpha.create(owner, new Delegation(owner, delegate)) + + _ <- p.synchronize + + // fetch should fail + // Reason: contract not divulged to beta + fetchFailure <- beta + .exercise(delegate, delegation.exerciseFetchDelegated(delegated)) + .mustFail("fetching a contract with a party that cannot see it") + + // fetch by key should fail + // Reason: Only stakeholders see the result of fetchByKey, beta is only a divulgee + fetchByKeyFailure <- beta + .exercise(delegate, delegation.exerciseFetchByKeyDelegated(owner, key)) + .mustFail("fetching a contract by key with a party that cannot see it") + + // lookup by key should fail + // Reason: During command interpretation, the lookup did not find anything due to privacy rules, + // but validation determined that this result is wrong as the contract is there. + lookupByKeyFailure <- beta + .exercise(delegate, delegation.exerciseLookupByKeyDelegated(owner, key)) + .mustFail("looking up a contract by key with a party that cannot see it") + } yield { + assertGrpcError( + fetchFailure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + fetchByKeyFailure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + ) + assertGrpcErrorRegex( + lookupByKeyFailure, + ConsistencyErrors.InconsistentContractKey, + Some(Pattern.compile("Inconsistent|Contract key lookup with different results")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CKMaintainerScoped", + "Contract keys should be scoped by maintainer", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(alpha, Seq(alice)), Participant(beta, Seq(bob))) => + val key1 = alpha.nextKeyId() + val key2 = alpha.nextKeyId() + val unknownKey = alpha.nextKeyId() + + for { + // create contracts to work with + tk1 <- alpha.create(alice, new TextKey(alice, key1, List(bob.getValue).asJava)) + tk2 <- alpha.create(alice, new TextKey(alice, key2, List(bob.getValue).asJava)) + aliceTKO <- alpha.create(alice, new TextKeyOperations(alice)) + bobTKO <- beta.create(bob, new TextKeyOperations(bob)) + + _ <- p.synchronize + + // creating a contract with a duplicate key should fail + duplicateKeyFailure <- alpha + .create(alice, new TextKey(alice, key1, List(bob.getValue).asJava)) + .mustFail("creating a contract with a duplicate key") + + // trying to lookup an unauthorized key should fail + bobLooksUpTextKeyFailure <- beta + .exercise(bob, bobTKO.exerciseTKOLookup(new Tuple2(alice, key1), Some(tk1).toJava)) + .mustFail("looking up a contract with an unauthorized key") + + // trying to lookup an unauthorized non-existing key should fail + bobLooksUpBogusTextKeyFailure <- beta + .exercise(bob, bobTKO.exerciseTKOLookup(new Tuple2(alice, unknownKey), None.toJava)) + .mustFail("looking up a contract with an unauthorized, non-existing key") + + // successful, authorized lookup + _ <- alpha.exercise( + alice, + aliceTKO.exerciseTKOLookup(new Tuple2(alice, key1), Some(tk1).toJava), + ) + + // successful fetch + _ <- alpha.exercise(alice, aliceTKO.exerciseTKOFetch(new Tuple2(alice, key1), tk1)) + + // successful, authorized lookup of non-existing key + _ <- alpha.exercise( + alice, + aliceTKO.exerciseTKOLookup(new Tuple2(alice, unknownKey), None.toJava), + ) + + // failing fetch + aliceFailedFetch <- alpha + .exercise(alice, aliceTKO.exerciseTKOFetch(new Tuple2(alice, unknownKey), tk1)) + .mustFail("fetching a contract by an unknown key") + + // now we exercise the contract, thus archiving it, and then verify + // that we cannot look it up anymore + _ <- alpha.exercise(alice, tk1.exerciseTextKeyChoice()) + _ <- alpha.exercise(alice, aliceTKO.exerciseTKOLookup(new Tuple2(alice, key1), None.toJava)) + + // lookup the key, consume it, then verify we cannot look it up anymore + _ <- alpha.exercise( + alice, + aliceTKO.exerciseTKOConsumeAndLookup(tk2, new Tuple2(alice, key2)), + ) + + // failing create when a maintainer is not a signatory + maintainerNotSignatoryFailed <- alpha + .create(alice, new MaintainerNotSignatory(alice, bob)) + .mustFail("creating a contract where a maintainer is not a signatory") + } yield { + assertGrpcErrorRegex( + duplicateKeyFailure, + ConsistencyErrors.DuplicateContractKey, + Some(Pattern.compile("Inconsistent|contract key is not unique")), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + bobLooksUpTextKeyFailure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires authorizers"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + bobLooksUpBogusTextKeyFailure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("requires authorizers"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + aliceFailedFetch, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + maintainerNotSignatoryFailed, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some("are not a subset of the signatories"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test("CKRecreate", "Contract keys can be recreated in single transaction", allocate(SingleParty))( + implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + val key = ledger.nextKeyId() + for { + delegated1Tx <- ledger + .submitAndWaitForTransaction( + ledger + .submitAndWaitForTransactionRequest(owner, new Delegated(owner, key).create.commands) + ) + .map(_.getTransaction) + delegated1Id = new Delegated.ContractId( + delegated1Tx.events.head.getCreated.contractId + ) + + delegated2TxTree <- ledger.exercise(owner, delegated1Id.exerciseRecreate()) + } yield { + assert(delegated2TxTree.events.size == 2) + val event = delegated2TxTree.events.filter(_.event.isCreated).head + assert( + delegated1Id.contractId != event.getCreated.contractId, + "New contract was not created", + ) + assert( + event.getCreated.contractKey == delegated1Tx.events.head.getCreated.contractKey, + "Contract keys did not match", + ) + + } + } + ) + + test( + "CKTransients", + "Contract keys created by transient contracts are properly archived", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(owner))) => + val key = ledger.nextKeyId() + val key2 = ledger.nextKeyId() + + for { + delegation <- ledger.create(owner, new Delegation(owner, owner)) + delegated <- ledger.create(owner, new Delegated(owner, key)) + + failedFetch <- ledger + .exercise(owner, delegation.exerciseFetchByKeyDelegated(owner, key2)) + .mustFail("fetching a contract with an unknown key") + + // Create a transient contract with a key that is created and archived in same transaction. + _ <- ledger.exercise(owner, delegated.exerciseCreateAnotherAndArchive(key2)) + + // Try it again, expecting it to succeed. + _ <- ledger.exercise(owner, delegated.exerciseCreateAnotherAndArchive(key2)) + + } yield { + assertGrpcError( + failedFetch, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CKExposedByTemplate", + "The contract key should be exposed if the template specifies one", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val expectedKey = ledger.nextKeyId() + for { + _ <- ledger.create(party, new TextKey(party, expectedKey, List.empty.asJava)) + transactions <- ledger.transactions(AcsDelta, party) + } yield { + val contract = assertSingleton("CKExposedByTemplate", transactions.flatMap(createdEvents)) + assertEquals( + "CKExposedByTemplate", + contract.getContractKey.getRecord.fields, + Seq( + RecordField("_1", Some(Value(Value.Sum.Party(party.getValue)))), + RecordField("_2", Some(Value(Value.Sum.Text(expectedKey)))), + ), + ) + } + }) + + test( + "CKExerciseByKey", + "Exercising by key should be possible only when the corresponding contract is available", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val keyString = ledger.nextKeyId() + val expectedKey = new DamlRecord( + new DamlRecord.Field("_1", new Party(party.getValue)), + new DamlRecord.Field("_2", new Text(keyString)), + ) + for { + failureBeforeCreation <- ledger + .exerciseByKey( + party, + TextKey.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedKey, + "TextKeyChoice", + new DamlRecord(), + ) + .mustFail("exercising before creation") + _ <- ledger.create(party, new TextKey(party, keyString, List.empty.asJava)) + _ <- ledger.exerciseByKey( + party, + TextKey.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedKey, + "TextKeyChoice", + new DamlRecord(), + ) + failureAfterConsuming <- ledger + .exerciseByKey( + party, + TextKey.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedKey, + "TextKeyChoice", + new DamlRecord(), + ) + .mustFail("exercising after consuming") + } yield { + assertGrpcError( + failureBeforeCreation, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("dependency error: couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + assertGrpcError( + failureAfterConsuming, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("dependency error: couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "CKLocalLookupByKeyVisibility", + "Visibility should not be checked for lookup-by-key of contracts created in the current transaction", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(ledger1, Seq(party1)), Participant(ledger2, Seq(party2))) => + for { + ops: LocalKeyVisibilityOperations.ContractId <- ledger1.create( + party1, + new LocalKeyVisibilityOperations(party1, party2), + ) + _ <- p.synchronize + _ <- ledger2.exercise(party2, ops.exerciseLocalLookup()) + } yield () + }) + + test( + "CKLocalFetchByKeyVisibility", + "Visibility should not be checked for fetch-by-key of contracts created in the current transaction", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(ledger1, Seq(party1)), Participant(ledger2, Seq(party2))) => + for { + ops: LocalKeyVisibilityOperations.ContractId <- ledger1.create( + party1, + new LocalKeyVisibilityOperations(party1, party2), + ) + _ <- p.synchronize + _ <- ledger2.exercise(party2, ops.exerciseLocalFetch()) + } yield () + }) + + test( + "CKDisclosedContractKeyReusabilityAsSubmitter", + "Subsequent disclosed contracts can use the same contract key (disclosure because of submitting)", + allocate(SingleParty, SingleParty), + )(implicit ec => { + case p @ Participants(Participant(ledger1, Seq(party1)), Participant(ledger2, Seq(party2))) => + for { + // Create a helper contract and exercise a choice creating and disclosing a WithKey contract + creator1: WithKeyCreatorAlternative.ContractId <- ledger1.create( + party1, + new WithKeyCreatorAlternative(party1, party2), + )(WithKeyCreatorAlternative.COMPANION) + + _ <- p.synchronize + + _ <- ledger2.exercise( + party2, + creator1.exerciseWithKeyCreatorAlternative_DiscloseCreate(), + ) + + _ <- p.synchronize + + Seq(withKey1Event) <- ledger1.activeContractsByTemplateId( + List(WithKey.TEMPLATE_ID), + Some(Seq(party1)), + ) + withKey1 = new WithKey.ContractId(withKey1Event.contractId) + // Archive the disclosed contract + _ <- ledger1.exercise(party1, withKey1.exerciseArchive()) + + _ <- p.synchronize + + // Repeat the same steps for the second time + _ <- ledger2.exercise( + party2, + creator1.exerciseWithKeyCreatorAlternative_DiscloseCreate(), + ) + + _ <- p.synchronize + } yield () + }) + + test( + "CKGlocalKeyVisibility", + "Contract keys should be visible", + allocate(TwoParties), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob))) => + for { + + // create contracts to work with + cid <- ledger.create(alice, new WithKey(alice)) + + // double check its key can be found if visible + _ <- ledger.submit( + ledger.submitRequest( + alice, + WithKey.byKey(alice).exerciseWithKey_NoOp(alice).commands, + ) + ) + + end <- ledger.currentEnd() + // explicitly disclose the contract + withKeyTxs <- ledger.transactions( + ledger.getTransactionsRequestWithEnd( + transactionFormat = TransactionFormat( + Some( + EventFormat( + filtersByParty = Map( + alice.getValue -> new Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + Some(WithKey.TEMPLATE_ID.toV1), + includeCreatedEventBlob = true, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ), + end = Some(end), + ) + ) + withKeyCreatedEvent = createdEvents(withKeyTxs.head).head + disclosedWithKey = DisclosedContract( + templateId = withKeyCreatedEvent.templateId, + contractId = withKeyCreatedEvent.contractId, + createdEventBlob = withKeyCreatedEvent.createdEventBlob, + synchronizerId = "", + ) + + // double check it is properly disclosed + _ <- ledger.submitAndWait( + ledger + .submitAndWaitRequest( + bob, + cid.exerciseWithKey_NoOp(bob).commands, + ) + .update(_.commands.disclosedContracts := Seq(disclosedWithKey)) + ) + + // without explicit disclosure key lookup should fail + request = ledger + .submitRequest( + bob, + // exercise by key the contract + WithKey.byKey(alice).exerciseWithKey_NoOp(bob).commands, + ) + failure <- ledger + .submit(request) + .mustFail("exercise of a non visible key") + + // with explicit disclosure key lookup should succeed + request = ledger + .submitRequest( + bob, + // exercise by key the contract + WithKey.byKey(alice).exerciseWithKey_NoOp(bob).commands, + ) + .update(_.commands.disclosedContracts := Seq(disclosedWithKey)) + _ <- ledger.submit(request) + + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysMultiPartySubmissionIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysMultiPartySubmissionIT.scala new file mode 100644 index 0000000000..a0f7395135 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysMultiPartySubmissionIT.scala @@ -0,0 +1,241 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.test.MultiPartyContract +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import java.util.UUID +import java.util.regex.Pattern +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +final class ContractKeysMultiPartySubmissionIT extends LedgerTestSuite { + implicit val multiPartyContractCompanion: ContractCompanion.WithKey[ + MultiPartyContract.Contract, + MultiPartyContract.ContractId, + MultiPartyContract, + MultiPartyContract, + ] = MultiPartyContract.COMPANION + + test( + "MPSFetchOtherByKeyOtherSuccess", + "Exercise FetchOtherByKey succeeds with sufficient authorization and read delegation", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (_, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + _ <- ledger.exercise( + actAs = List(charlie, david), + readAs = List(alice), + exercise = + contractB.exerciseMPFetchOtherByKey(keyA, List(charlie, david).map(_.getValue).asJava), + ) + } yield () + }) + + test( + "MPSFetchOtherByKeyInsufficientAuthorization", + "Exercise FetchOtherByKey fails with insufficient authorization", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (_, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an authorization error + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List(bob, alice), + exercise = + contractB.exerciseMPFetchOtherByKey(keyA, List(charlie, david).map(_.getValue).asJava), + ) + .mustFail("exercising a choice without authorization to fetch another contract by key") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some(Pattern.compile("of the fetched contract to be an authorizer, but authorizers were")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSFetchOtherByKeyInvisible", + "Exercise FetchOtherByKey fails because the contract isn't visible", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (_, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an interpretation error because the fetched contract isn't visible to any submitter + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List.empty, + exercise = + contractB.exerciseMPFetchOtherByKey(keyA, List(charlie, david).map(_.getValue).asJava), + ) + .mustFail("exercising a choice without authorization to fetch another contract by key") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some(Pattern.compile("dependency error: couldn't find key")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSLookupOtherByKeyOtherSuccess", + "Exercise LookupOtherByKey succeeds with sufficient authorization and read delegation", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + _ <- ledger.exercise( + actAs = List(charlie, david), + readAs = List(alice), + exercise = contractB + .exerciseMPLookupOtherByKey( + keyA, + List(charlie, david).map(_.getValue).asJava, + Some(contractA).toJava, + ), + ) + } yield () + }) + + test( + "MPSLookupOtherByKeyInsufficientAuthorization", + "Exercise LookupOtherByKey fails with insufficient authorization", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an authorization error + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List(bob, alice), + exercise = contractB + .exerciseMPLookupOtherByKey( + keyA, + List(charlie, david).map(_.getValue).asJava, + Some(contractA).toJava, + ), + ) + .mustFail("exercising a choice without authorization to look up another contract by key") + } yield { + assertGrpcErrorRegex( + failure, + CommandExecutionErrors.Interpreter.AuthorizationError, + Some(Pattern.compile("requires authorizers (.*) for lookup by key, but it only has")), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "MPSLookupOtherByKeyInvisible", + "Exercise LookupOtherByKey fails because the contract isn't visible", + allocate(Parties(4)), + )(implicit ec => { case Participants(Participant(ledger, Seq(alice, bob, charlie, david))) => + for { + // Create contract A for (Alice, Bob) + (contractA, keyA) <- createMultiPartyContract(ledger, List(alice, bob)) + + // Create contract B for (Alice, Bob, Charlie, David) + (contractB, _) <- createMultiPartyContract(ledger, List(alice, bob, charlie, david)) + + // Fetch contract A through contract B as (Charlie, David) + // Should fail with an interpretation error because the fetched contract isn't visible to any submitter + failure <- ledger + .exercise( + actAs = List(charlie, david), + readAs = List.empty, + exercise = contractB + .exerciseMPLookupOtherByKey( + keyA, + List(charlie, david).map(_.getValue).asJava, + Some(contractA).toJava, + ), + ) + .mustFail("exercising a choice without authorization to look up another contract by key") + } yield { + val trace = + """ in choice [0-9a-f]{8}:Test:MultiPartyContract:MPLookupOtherByKey on contract [0-9a-f]{10} \(#0\) + | in exercise command [0-9a-f]{8}:Test:MultiPartyContract:MPLookupOtherByKey on contract [0-9a-f]{10}.""".stripMargin + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("LookupOtherByKey value matches"), + checkDefiniteAnswerMetadata = true, + additionalErrorAssertions = throwable => + assertMatches( + "exercise_trace", + extractErrorInfoMetadataValue(throwable, "exercise_trace"), + Pattern.compile(trace), + ), + ) + } + }) + + private[this] def createMultiPartyContract( + ledger: ParticipantTestContext, + submitters: List[Party], + value: String = UUID.randomUUID().toString, + )(implicit + ec: ExecutionContext + ): Future[(MultiPartyContract.ContractId, MultiPartyContract)] = + ledger + .create( + actAs = submitters, + readAs = List.empty, + template = new MultiPartyContract(submitters.map(_.getValue).asJava, value), + ) + .map(cid => cid -> new MultiPartyContract(submitters.map(_.getValue).asJava, value)) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysWronglyTypedContractIdIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysWronglyTypedContractIdIT.scala new file mode 100644 index 0000000000..efc58da28c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ContractKeysWronglyTypedContractIdIT.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.test.java.experimental.test.{Delegated, Delegation} +import com.daml.ledger.test.java.model.test.Dummy +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +final class ContractKeysWronglyTypedContractIdIT extends LedgerTestSuite { + import ContractKeysCompanionImplicits.* + + test("WTFetchFails", "Fetching of the wrong type fails", allocate(SingleParty))(implicit ec => { + case Participants(Participant(ledger, Seq(party))) => + for { + dummy <- ledger.create(party, new Dummy(party)) + fakeDelegated = new Delegated.ContractId(dummy.contractId) + delegation: Delegation.ContractId <- ledger.create(party, new Delegation(party, party)) + + fetchFailure <- ledger + .exercise(party, delegation.exerciseFetchDelegated(fakeDelegated)) + .mustFail("fetching the wrong type") + } yield { + assertGrpcError( + fetchFailure, + CommandExecutionErrors.Interpreter.WronglyTypedContract, + Some("wrongly typed contract id"), + checkDefiniteAnswerMetadata = true, + ) + } + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/EventsDescendantsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/EventsDescendantsIT.scala new file mode 100644 index 0000000000..40fa897974 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/EventsDescendantsIT.scala @@ -0,0 +1,455 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, TransactionHelpers} +import com.daml.ledger.api.testtool.suites.v2_1.CompanionImplicits.* +import com.daml.ledger.api.testtool.suites.v2_dev.EventsDescendantsIT.isDescendant +import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.event.Event.Event.Exercised +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.exceptions.ExceptionTester +import com.daml.ledger.test.java.model.test.{ + Agreement, + AgreementFactory, + Dummy, + DummyFactory, + TriProposal, +} +import com.digitalasset.canton.ledger.api.TransactionShape.LedgerEffects +import com.digitalasset.canton.platform.store.utils.EventOps.EventOps + +import scala.jdk.CollectionConverters.* + +class EventsDescendantsIT extends LedgerTestSuite { + import EventsDescendantsIT.CompanionImplicits.* + + test( + "SingleConsumingExercisedDescendants", + "Descendant events in single consuming exercised", + allocate( + SingleParty + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise DummyChoice1 + for { + contract <- alpha.create(party, new Dummy(party)) + tx <- alpha.exercise(party, contract.exerciseDummyChoice1()) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(tx) + val exercisedEvent = assertSingleton( + "Transaction should contain the exercised event", + exercisedEvents, + ) + + assert( + isDescendant( + who = exercisedEvent.nodeId, + of = Event(Exercised(exercisedEvent)), + ), + "The exercised event should have been descendant of itself", + ) + + } + }) + + test( + "SingleCreatedInExercisedDescendants", + "Descendant events in exercised with nested single created", + allocate( + SingleParty + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise Clone + // └─ #1 Create Dummy + for { + contract <- alpha.create(party, new Dummy(party)) + txTree <- alpha.exercise(party, contract.exerciseClone()) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + val exercisedEvent = assertSingleton( + "Transaction should contain the exercised event", + exercisedEvents, + ) + val createdEvents = TransactionHelpers.createdEvents(txTree) + assertSingleton( + "Transaction should contain the created event", + createdEvents, + ) + + val events = txTree.events + + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(exercisedEvent)), + ), + s"The event $event should have been descendant of the exercised event", + ) + ) + } + }) + + test( + "MultipleCreatedInExercisedDescendants", + "Descendant events in exercised with nested multiple created", + allocate( + SingleParty + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise DummyFactoryCall + // ├─ #1 Create Dummy + // └─ #2 Create DummyWithParam + for { + dummyFactory <- alpha.create(party, new DummyFactory(party)) + txTree <- alpha.exercise(party, dummyFactory.exerciseDummyFactoryCall()) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + val exercisedEvent = assertSingleton( + "Transaction should contain the exercised event", + exercisedEvents, + ) + val createdEvents = TransactionHelpers.createdEvents(txTree) + assertLength( + "Transaction should contain the two created events", + 2, + createdEvents, + ) + + val events = txTree.events + + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(exercisedEvent)), + ), + s"The event $event should have been descendant of the exercised event", + ) + ) + } + }) + + test( + "DeeplyNestedDescendants", + "Descendant events in exercised with deeply nested events", + allocate( + SingleParty + ), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise DummyFactoryCallWithExercise + // ├─ #1 Create Dummy1 + // ├─ #2 Create Dummy2 + // ├─ #3 Create Dummy3 + // ├─ #4 Create DummyWithParam + // ├─ #5 Exercise Clone on Dummy1 + // │ └─ #6 Create B1 + // ├─ #7 Exercise DummyChoice1 on Dummy2 + // └─ #8 Exercise DummyNonConsuming on Dummy3 + for { + dummyFactory <- alpha.create(party, new DummyFactory(party)) + txTree <- alpha.exercise(party, dummyFactory.exerciseDummyFactoryCallWithExercise()) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + val seminalExercisedEvent = assertSingleton( + "Transaction should contain at least one exercised event", + exercisedEvents.headOption.toList, + ) + val events = txTree.events + + // all the events should be descendants of the first exercise (even itself) + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(seminalExercisedEvent)), + ), + s"The event $event should have been descendant of the exercised event", + ) + ) + + val createdEvents = TransactionHelpers.createdEvents(txTree) + val lastCreatedEvent = assertSingleton( + "Transaction should contain 4 created events", + createdEvents.sortBy(_.nodeId).lastOption.toList, + ) + + val cloneExercisedEvent = assertSingleton( + "Transaction should contain the Clone exercised event", + exercisedEvents.filter(_.choice == "Clone").toList, + ) + + assert( + isDescendant( + who = lastCreatedEvent.nodeId, + of = Event(Exercised(cloneExercisedEvent)), + ), + s"The last created event $lastCreatedEvent should have been descendant of the Clone exercised event", + ) + + } + }) + + test( + "DescendantsWithFetch", + "Descendant events in transaction with fetch", + allocate(SingleParty, SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(operator)), Participant(_, Seq(_))) => + // Transaction + // └─ #0 Exercise AcceptTriProposal + // ├─ #1 Fetch (filtered out) + // └─ #2 Exercise TriProposalAccept + // └─ #3 Create TriAgreement + for { + agreementFactory <- alpha.create(operator, new AgreementFactory(operator, operator)) + agreement <- + alpha.exerciseAndGetContract[Agreement.ContractId, Agreement]( + operator, + agreementFactory.exerciseAgreementFactoryAccept(), + ) + triProposalTemplate = new TriProposal(operator, operator, operator) + triProposal <- alpha.create(operator, triProposalTemplate) + txTree <- eventually("exerciseAcceptTriProposal") { + alpha.exercise(operator, agreement.exerciseAcceptTriProposal(triProposal)) + } + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + assertLength( + "Transaction should contain two exercised events", + 2, + exercisedEvents, + ) + val createdEvents = TransactionHelpers.createdEvents(txTree) + val createdEvent = assertSingleton( + "Transaction should contain the created event", + createdEvents, + ) + + // created event should be descendant of both exercised events + exercisedEvents.foreach(exercisedEvent => + assert( + isDescendant( + who = createdEvent.nodeId, + of = Event(Exercised(exercisedEvent)), + ), + s"The created event $createdEvent should have been descendant of the exercised event $exercisedEvent", + ) + ) + + val seminalExercisedEvent = assertSingleton( + "Transaction should contain at least one exercised event", + exercisedEvents.headOption.toList, + ) + + // exercised events should be descendant of the first exercised event + exercisedEvents.foreach(exercisedEvent => + assert( + isDescendant( + who = exercisedEvent.nodeId, + of = Event(Exercised(seminalExercisedEvent)), + ), + s"The exercised event $exercisedEvent should have been descendant of the first exercised event $seminalExercisedEvent", + ) + ) + + } + }) + + test( + "NonDescendantsRollbackFetch", + "Events that are not descendant in transaction with rollback of fetch", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // ├─ #0 Exercise RollbackFetch + // │ └─ #1 Rollback (filtered out) + // │ └─ #2 Fetch (filtered out) + // └─ #3 Exercise Noop + for { + t <- alpha.create(party, new ExceptionTester(party)) + tFetch <- alpha.create(party, new ExceptionTester(party)) + commands = t.exerciseRollbackFetch(tFetch).commands().asScala + ++ tFetch.exerciseNoop.commands().asScala + ledgerEffects <- alpha + .submitAndWaitForTransaction( + alpha.submitAndWaitForTransactionRequest(party, commands.asJava, LedgerEffects) + ) + .map(_.getTransaction) + + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(ledgerEffects) + assertLength( + "Transaction should contain two exercised events", + 2, + exercisedEvents, + ) + val first = assertSingleton( + "exercised events should contain at least one event", + exercisedEvents.headOption.toList, + ) + val second = assertSingleton( + "exercised events should contain at least one event", + exercisedEvents.lastOption.toList, + ) + + // exercised events should not be descendant of each other + Seq(first -> second, second -> first).foreach { case (event1, event2) => + assert( + !isDescendant( + who = event1.nodeId, + of = Event(Exercised(event2)), + ), + s"The exercised event $event1 should have NOT been descendant of the exercised event $event2", + ) + } + + } + }) + + test( + "DescendantsRollbackFetch", + "Descendant events in transaction with rollback of fetch", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise RollbackFetchWithExercise + // ├─ #1 Rollback (filtered out) + // │ └─ #2 Fetch (filtered out) + // └─ #3 Exercise Noop + for { + t <- alpha.create(party, new ExceptionTester(party)) + tFetch <- alpha.create(party, new ExceptionTester(party)) + txTree <- alpha.exercise(party, t.exerciseRollbackFetchWithExercise(tFetch)) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + assertLength( + "Transaction should contain two exercised events", + 2, + exercisedEvents, + ) + val seminalExercisedEvent = assertSingleton( + "Transaction should contain at least one exercised event", + exercisedEvents.headOption.toList, + ) + + val events = txTree.events + + // events should be descendant of the first exercised event + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(seminalExercisedEvent)), + ), + s"The event $event should have been descendant of the first exercised event $seminalExercisedEvent", + ) + ) + + } + }) + + test( + "DescendantsRollbackExercise", + "Descendant events in transaction with rollback of exercise", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise RollbackConsumingWithCreate + // ├─ #1 Rollback (filtered out) + // │ └─ #2 Exercise (filtered out) + // └─ #3 Create ExceptionTester + for { + t <- alpha.create(party, new ExceptionTester(party)) + tExercise <- alpha.create(party, new ExceptionTester(party)) + txTree <- alpha.exercise(party, t.exerciseRollbackConsumingWithCreate(tExercise)) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + val seminalExercisedEvent = assertSingleton( + "Transaction should one exercised event", + exercisedEvents, + ) + + val events = txTree.events + + // events should be descendant of the first exercised event + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(seminalExercisedEvent)), + ), + s"The event $event should have been descendant of the first exercised event $seminalExercisedEvent", + ) + ) + + } + }) + + test( + "DescendantsRollbackCreate", + "Descendant events in transaction with rollback of create", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(alpha, Seq(party))) => + // Transaction + // └─ #0 Exercise RollbackCreateWithExercise + // ├─ #1 Rollback (filtered out) + // │ └─ #2 Exercise RolledBackChoice (filtered out) + // │ └─ #3 Create (filtered out) + // └─ #4 Exercise Noop + for { + t <- alpha.create(party, new ExceptionTester(party)) + txTree <- alpha.exercise(party, t.exerciseRollbackCreateWithExercise()) + } yield { + val exercisedEvents = TransactionHelpers.exercisedEvents(txTree) + assertLength( + "Transaction should contain two exercised events", + 2, + exercisedEvents, + ) + val seminalExercisedEvent = assertSingleton( + "Transaction should contain at least one exercised event", + exercisedEvents.headOption.toList, + ) + + val events = txTree.events + + // events should be descendant of the first exercised event + events.foreach(event => + assert( + isDescendant( + who = event.nodeId, + of = Event(Exercised(seminalExercisedEvent)), + ), + s"The event $event should have been descendant of the first exercised event $seminalExercisedEvent", + ) + ) + } + }) + +} + +object EventsDescendantsIT { + def isDescendant(who: Int, of: Event): Boolean = { + val nodeId = of.nodeId + val lastDescendantNodeId = of.event.exercised.fold(nodeId)(_.lastDescendantNodeId) + + who >= nodeId && who <= lastDescendantNodeId + } + + private object CompanionImplicits { + implicit val exceptionTesterCompanion: ContractCompanion.WithoutKey[ + ExceptionTester.Contract, + ExceptionTester.ContractId, + ExceptionTester, + ] = ExceptionTester.COMPANION + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionRaceConditionIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionRaceConditionIT.scala new file mode 100644 index 0000000000..e07916045a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionRaceConditionIT.scala @@ -0,0 +1,288 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.RaceConditionTests.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.value.RecordField +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.exceptionracetests.{ + ContractWithKey, + CreateWrapper, + ExerciseWrapper, + FetchWrapper, + LookupWrapper, +} + +import scala.annotation.nowarn +import scala.concurrent.{ExecutionContext, Future} + +final class ExceptionRaceConditionIT extends LedgerTestSuite { + + import ExceptionRaceConditionIT.CompanionImplicits.* + import ExceptionRaceConditionIT.ExceptionRaceTests + + raceConditionTest( + "RWRollbackCreateVsNonTransientCreate", + "Cannot create a contract in a rollback and a non-transient contract with the same key", + ) { implicit ec => ledger => alice => + for { + wrapper <- ledger.create(alice, new CreateWrapper(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 20, + once = ledger.create(alice, new ContractWithKey(alice)).map(_ => ()), + repeated = + ledger.exercise(alice, wrapper.exerciseCreateWrapper_CreateRollback()).map(_ => ()), + ) + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + + // We deliberately allow situations where no non-transient contract is created and verify the transactions + // order when such contract is actually created. + transactions.find(isCreate(_, ExceptionRaceTests.ContractWithKey.TemplateName)).foreach { + nonTransientCreateTransaction => + transactions + .filter(isExercise(_, ExceptionRaceTests.CreateWrapper.ChoiceCreateRollback)) + .foreach(assertTransactionOrder(_, nonTransientCreateTransaction)) + } + } + } + + raceConditionTest( + "RWArchiveVsRollbackNonConsumingChoice", + "Cannot exercise a non-consuming choice in a rollback after a contract archival", + ) { implicit ec => ledger => alice => + for { + wrapper: ExerciseWrapper.ContractId <- ledger.create(alice, new ExerciseWrapper(alice)) + contract: ContractWithKey.ContractId <- ledger.create(alice, new ContractWithKey(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 10, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise( + alice, + wrapper.exerciseExerciseWrapper_ExerciseNonConsumingRollback(contract), + ), + ) + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isExercise(_, ExceptionRaceTests.ExerciseWrapper.ChoiceNonConsumingRollback)) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsRollbackConsumingChoice", + "Cannot exercise a consuming choice in a rollback after a contract archival", + ) { implicit ec => ledger => alice => + for { + wrapper: ExerciseWrapper.ContractId <- ledger.create(alice, new ExerciseWrapper(alice)) + contract: ContractWithKey.ContractId <- ledger.create(alice, new ContractWithKey(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 10, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise( + alice, + wrapper.exerciseExerciseWrapper_ExerciseConsumingRollback(contract), + ), + ) + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isExercise(_, ExceptionRaceTests.ExerciseWrapper.ChoiceConsumingRollback)) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsRollbackFetch", + "Cannot fetch in a rollback after a contract archival", + ) { implicit ec => ledger => alice => + for { + contract: ContractWithKey.ContractId <- ledger.create(alice, new ContractWithKey(alice)) + fetchConract: FetchWrapper.ContractId <- ledger.create( + alice, + new FetchWrapper(alice, contract), + ) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 10, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise(alice, fetchConract.exerciseFetchWrapper_Fetch()), + ) + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isExercise(_, ExceptionRaceTests.FetchWrapper.ChoiceFetch)) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsRollbackLookupByKey", + "Cannot successfully lookup by key in a rollback after a contract archival", + ) { implicit ec => ledger => alice => + for { + contract: ContractWithKey.ContractId <- ledger.create(alice, new ContractWithKey(alice)) + looker: LookupWrapper.ContractId <- ledger.create(alice, new LookupWrapper(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 20, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise(alice, looker.exerciseLookupWrapper_Lookup()), + ) + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isRollbackContractLookup(success = true)) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsRollbackFailedLookupByKey", + "Lookup by key in a rollback cannot fail after a contract creation", + ) { implicit ec => ledger => alice => + for { + looker: LookupWrapper.ContractId <- ledger.create(alice, new LookupWrapper(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 5, + once = ledger.create(alice, new ContractWithKey(alice)), + repeated = ledger.exercise(alice, looker.exerciseLookupWrapper_Lookup()), + ): @nowarn("cat=lint-infer-any") + transactions <- transactions(ledger, alice) + } yield { + import ExceptionRaceConditionIT.TransactionUtil.* + val createNonTransientTransaction = assertSingleton( + "create-non-transient transactions", + transactions.filter(isCreate(_, ExceptionRaceTests.ContractWithKey.TemplateName)), + ) + transactions + .filter(isRollbackContractLookup(success = false)) + .foreach(assertTransactionOrder(_, createNonTransientTransaction)) + } + } + + private def raceConditionTest( + shortIdentifier: String, + description: String, + repeated: Int = DefaultRepetitionsNumber, + runConcurrently: Boolean = false, + )(testCase: ExecutionContext => ParticipantTestContext => Party => Future[Unit]): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + partyAllocation = allocate(SingleParty), + repeated = repeated, + runConcurrently = runConcurrently, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + testCase(ec)(ledger)(party) + }) +} + +object ExceptionRaceConditionIT { + object TransactionUtil { + + private implicit class TransactionTestOps(tx: Transaction) { + def hasEventsNumber(expectedNumberOfEvents: Int): Boolean = + tx.events.sizeIs == expectedNumberOfEvents + + def containsEvent(condition: Event => Boolean): Boolean = + tx.events.toList.exists(condition) + } + + private def isCreated(templateName: String)(event: Event): Boolean = + event.event.isCreated && event.getCreated.templateId.exists(_.entityName == templateName) + + private def isExerciseEvent(choiceName: String)(event: Event): Boolean = + event.event.isExercised && event.getExercised.choice == choiceName + + def isCreate(tx: Transaction, templateName: String): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isCreated(templateName)) + + def isExercise(tx: Transaction, choiceName: String): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isExerciseEvent(choiceName)) + + def isArchival(tx: Transaction): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isExerciseEvent(ExceptionRaceTests.ContractWithKey.ChoiceArchive)) + + private def isFoundContractField(found: Boolean)(field: RecordField) = + field.label == "found" && field.value.exists(_.getBool == found) + + def isRollbackContractLookup(success: Boolean)(tx: Transaction): Boolean = + tx.containsEvent { event => + isCreated(ExceptionRaceTests.LookupResult.TemplateName)(event) && + event.getCreated.getCreateArguments.fields.exists(isFoundContractField(found = success)) + } + } + + object ExceptionRaceTests { + object ContractWithKey { + val TemplateName = "ContractWithKey" + val ChoiceArchive = "ContractWithKey_Archive" + } + + object FetchWrapper { + val ChoiceFetch = "FetchWrapper_Fetch" + } + + object LookupResult { + val TemplateName = "LookupResult" + } + + object CreateWrapper { + val ChoiceCreateRollback = "CreateWrapper_CreateRollback" + } + + object ExerciseWrapper { + val ChoiceNonConsumingRollback = "ExerciseWrapper_ExerciseNonConsumingRollback" + val ChoiceConsumingRollback = "ExerciseWrapper_ExerciseConsumingRollback" + } + } + + private object CompanionImplicits { + implicit val createWrapperCompanion: ContractCompanion.WithoutKey[ + CreateWrapper.Contract, + CreateWrapper.ContractId, + CreateWrapper, + ] = CreateWrapper.COMPANION + implicit val contractWithKeyCompanion: ContractCompanion.WithKey[ + ContractWithKey.Contract, + ContractWithKey.ContractId, + ContractWithKey, + String, + ] = ContractWithKey.COMPANION + implicit val lookupWrapperCompanion: ContractCompanion.WithoutKey[ + LookupWrapper.Contract, + LookupWrapper.ContractId, + LookupWrapper, + ] = LookupWrapper.COMPANION + implicit val fetchWrapperCompanion: ContractCompanion.WithoutKey[ + FetchWrapper.Contract, + FetchWrapper.ContractId, + FetchWrapper, + ] = FetchWrapper.COMPANION + implicit val exerciseWrapperCompanion: ContractCompanion.WithoutKey[ + ExerciseWrapper.Contract, + ExerciseWrapper.ContractId, + ExerciseWrapper, + ] = ExerciseWrapper.COMPANION + } + +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionsIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionsIT.scala new file mode 100644 index 0000000000..41cca966fc --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/ExceptionsIT.scala @@ -0,0 +1,450 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.infrastructure.TransactionHelpers.* +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.da.types +import com.daml.ledger.test.java.experimental.exceptions.{ + Divulger, + ExceptionTester, + Fetcher, + Informer, + RollbackNestingHelper, + WithKey, + WithKeyDelegate, + WithSimpleKey, +} +import com.digitalasset.base.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.error.groups.{CommandExecutionErrors, ConsistencyErrors} + +import java.lang +import scala.jdk.CollectionConverters.* + +final class ExceptionsIT extends LedgerTestSuite { + import ExceptionsIT.CompanionImplicits.* + + test( + "ExUncaught", + "Uncaught exception returns INVALID_ARGUMENT", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + failure <- ledger.exercise(party, t.exerciseThrowUncaught()).mustFail("Unhandled exception") + } yield { + assertGrpcError( + failure, + new ErrorCode( + CommandExecutionErrors.Interpreter.FailureStatus.id, + ErrorCategory.InvalidGivenCurrentSystemStateOther, + )( + CommandExecutionErrors.Interpreter.FailureStatus.parent + ) {}, + Some("UNHANDLED_EXCEPTION"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExCaughtBasic", + "Exceptions can be caught", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tree <- ledger.exercise(party, t.exerciseThrowCaught()) + } yield { + assertLength(s"1 successful exercise", 1, exercisedEvents(tree)).discard + } + }) + + test( + "ExCaughtNested", + "Exceptions can be caught when thrown from a nested try block", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tree <- ledger.exercise(party, t.exerciseNestedCatch()) + } yield { + assertLength(s"1 successful exercise", 1, exercisedEvents(tree)).discard + } + }) + + test( + "ExRollbackActiveFetch", + "Rollback node depends on activeness of contract in a fetch", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tFetch <- ledger.create(party, new ExceptionTester(party)) + _ <- ledger.exercise(party, t.exerciseRollbackFetch(tFetch)) + _ <- ledger.exercise(party, tFetch.exerciseArchive()) + failure <- ledger + .exercise(party, t.exerciseRollbackFetch(tFetch)) + .mustFail("contract is archived") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExRollbackActiveExerciseConsuming", + "Rollback node depends on activeness of contract in a consuming exercise", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tExercise <- ledger.create(party, new ExceptionTester(party)) + _ <- ledger.exercise(party, t.exerciseRollbackConsuming(tExercise)) + _ <- ledger.exercise(party, tExercise.exerciseArchive()) + failure <- ledger + .exercise(party, t.exerciseRollbackConsuming(tExercise)) + .mustFail("contract is archived") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExRollbackActiveExerciseNonConsuming", + "Rollback node depends on activeness of contract in a non-consuming exercise", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tExercise <- ledger.create(party, new ExceptionTester(party)) + _ <- ledger.exercise(party, t.exerciseRollbackNonConsuming(tExercise)) + _ <- ledger.exercise(party, tExercise.exerciseArchive()) + failure <- ledger + .exercise(party, t.exerciseRollbackNonConsuming(tExercise)) + .mustFail("contract is archived") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExRolledbackArchiveConsuming", + "Rolled back archive does not block consuming exercise", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseRolledbackArchiveConsuming(withKey)) + } yield () + }) + + test( + "ExRolledbackArchiveNonConsuming", + "Rolled back archive does not block non-consuming exercise", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseRolledbackArchiveNonConsuming(withKey)) + } yield () + }) + + test( + "ExRolledbackKeyCreation", + "Rolled back key creation does not block creation of the same key", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + _ <- ledger.exercise(party, t.exerciseRolledbackDuplicateKey()) + } yield () + }) + + test( + "ExRollbackDuplicateKeyCreated", + "Rollback fails once contract with same key is created", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + _ <- ledger.exercise(party, t.exerciseDuplicateKey()) + _ <- ledger.create(party, new WithSimpleKey(party)) + failure <- ledger.exercise(party, t.exerciseDuplicateKey()).mustFail("duplicate key") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.DuplicateContractKey, + Some("DuplicateKey"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExRollbackDuplicateKeyArchived", + "Rollback succeeds once contract with same key is archived", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + failure <- ledger.exercise(party, t.exerciseDuplicateKey()).mustFail("duplicate key") + _ = assertGrpcError( + failure, + ConsistencyErrors.DuplicateContractKey, + Some("DuplicateKey"), + checkDefiniteAnswerMetadata = true, + ) + _ <- ledger.exercise(party, withKey.exerciseArchive()) + _ <- ledger.exercise(party, t.exerciseDuplicateKey()) + } yield () + }) + + test( + "ExRollbackKeyFetchCreated", + "Rollback with key fetch fails once contract is archived", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseFetchKey()) + _ <- ledger.exercise(party, withKey.exerciseArchive()) + failure <- ledger.exercise(party, t.exerciseFetchKey()).mustFail("couldn't find key") + } yield { + assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + () + } + }) + + test( + "ExRollbackKeyFetchArchived", + "Rollback with key fetch succeeds once contract is created", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + failure <- ledger.exercise(party, t.exerciseFetchKey()).mustFail("contract not found") + _ = assertGrpcError( + failure, + CommandExecutionErrors.Interpreter.LookupErrors.ContractKeyNotFound, + Some("couldn't find key"), + checkDefiniteAnswerMetadata = true, + ) + _ <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseFetchKey()) + } yield () + }) + + test( + "ExRollbackHidden", + "Create and exercise in rollback node is not exposed on ledger API", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + tree <- ledger.exercise(party, t.exerciseRollbackCreate()) + } yield { + // Create node should not be included + assertLength(s"no creates", 0, createdEvents(tree)).discard + // Only the root exercise should be included not the one in the rollback node. + val exercise = assertSingleton(s"1 exercise", exercisedEvents(tree)) + assert(exercise.choice == "RollbackCreate", "Choice name mismatch") + () + } + }) + + test( + "ExRollbackProjectionNormalization", + "Projection normalization is correctly applied", + allocate(SingleParty, SingleParty, SingleParty), + )(implicit ec => { + // We cannot test projection & normalization directly via the ledger API + // since rollback nodes are erased so this test only ensures + // that the code paths for this are exercised and do not + // throw errors. + case p @ Participants( + Participant(aLedger, Seq(aParty)), + Participant(bLedger, Seq(bParty)), + Participant(cLedger, Seq(cParty)), + ) => + for { + abInformer <- aLedger.create(aParty, new Informer(aParty, List(bParty.getValue).asJava)) + acInformer <- aLedger.create(aParty, new Informer(aParty, List(cParty.getValue).asJava)) + abcInformer <- aLedger.create( + aParty, + new Informer(aParty, List(bParty, cParty).map(_.getValue).asJava), + ) + keyDelegate <- bLedger.create(bParty, new WithKeyDelegate(aParty, bParty)) + _ <- p.synchronize + tester <- aLedger.create(aParty, new ExceptionTester(aParty)) + _ <- aLedger.exercise( + aParty, + tester.exerciseProjectionNormalization( + bParty, + keyDelegate, + abInformer, + acInformer, + abcInformer, + ), + ) + } yield () + }) + + test( + "ExRollbackProjectionNesting", + "Nested rollback nodes are handled properly", + allocate(SingleParty, SingleParty, SingleParty), + )(implicit ec => { + // We cannot test projection & normalization directly via the ledger API + // since rollback nodes are erased so this test only ensures + // that the code paths for this are exercised and do not + // throw errors. + case p @ Participants( + Participant(aLedger, Seq(aParty)), + Participant(bLedger, Seq(bParty)), + Participant(cLedger, Seq(cParty)), + ) => + for { + keyDelegate <- bLedger.create(bParty, new WithKeyDelegate(aParty, bParty)) + nestingHelper <- cLedger.create(cParty, new RollbackNestingHelper(aParty, bParty, cParty)) + _ <- p.synchronize + tester <- aLedger.create(aParty, new ExceptionTester(aParty)) + _ <- aLedger.exercise( + aParty, + tester.exerciseProjectionNesting(bParty, keyDelegate, nestingHelper), + ) + } yield () + }) + + test( + "ExCKRollbackGlobalArchivedLookup", + "Create with key succeeds after archive & rolledback negative lookup", + allocate(SingleParty), + )(implicit ec => { + case Participants( + Participant(ledger, Seq(party)) + ) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseRollbackGlobalArchivedLookup(withKey)) + } yield () + }) + + test( + "ExCKRollbackGlobalArchivedCreate", + "Create with key succeeds after archive & rolledback negative lookup", + allocate(SingleParty), + )(implicit ec => { + case Participants( + Participant(ledger, Seq(party)) + ) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, t.exerciseRollbackGlobalArchivedCreate(withKey)) + } yield () + }) + + test( + "ExRollbackCreate", + "Archiving a contract created within a rolled-back try-catch block, fails", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + t <- ledger.create(party, new ExceptionTester(party)) + failure <- ledger + .exercise(party, t.exerciseRollbackCreateBecomesInactive()) + .mustFail("contract is inactive") + } yield { + assertGrpcError( + failure, + ConsistencyErrors.ContractNotFound, + Some("Contract could not be found"), + checkDefiniteAnswerMetadata = true, + ) + } + }) + + test( + "ExRollbackExerciseCreateLookup", + "Lookup a contract Archiving a contract created within a rolled-back try-catch block, fails", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + for { + helper <- ledger.create(party, new ExceptionTester(party)) + withKey <- ledger.create(party, new WithSimpleKey(party)) + _ <- ledger.exercise(party, helper.exerciseRollbackExerciseCreateLookup(withKey)) + } yield () + }) + +} + +object ExceptionsIT { + private object CompanionImplicits { + implicit val exceptionTesterCompanion: ContractCompanion.WithoutKey[ + ExceptionTester.Contract, + ExceptionTester.ContractId, + ExceptionTester, + ] = ExceptionTester.COMPANION + implicit val withSimpleKeyCompanion: ContractCompanion.WithKey[ + WithSimpleKey.Contract, + WithSimpleKey.ContractId, + WithSimpleKey, + String, + ] = WithSimpleKey.COMPANION + implicit val withKeyCompanion: ContractCompanion.WithKey[ + WithKey.Contract, + WithKey.ContractId, + WithKey, + types.Tuple2[String, lang.Long], + ] = WithKey.COMPANION + implicit val informerCompanion + : ContractCompanion.WithoutKey[Informer.Contract, Informer.ContractId, Informer] = + Informer.COMPANION + implicit val withKeyDelegateCompanion: ContractCompanion.WithoutKey[ + WithKeyDelegate.Contract, + WithKeyDelegate.ContractId, + WithKeyDelegate, + ] = WithKeyDelegate.COMPANION + implicit val divulgerCompanion + : ContractCompanion.WithoutKey[Divulger.Contract, Divulger.ContractId, Divulger] = + Divulger.COMPANION + implicit val fetcherCompanion + : ContractCompanion.WithoutKey[Fetcher.Contract, Fetcher.ContractId, Fetcher] = + Fetcher.COMPANION + implicit val rollbackNestingHelperCompanion: ContractCompanion.WithoutKey[ + RollbackNestingHelper.Contract, + RollbackNestingHelper.ContractId, + RollbackNestingHelper, + ] = RollbackNestingHelper.COMPANION + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/PrefetchContractKeysIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/PrefetchContractKeysIT.scala new file mode 100644 index 0000000000..275990131c --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/PrefetchContractKeysIT.scala @@ -0,0 +1,177 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.{ + Participant, + Participants, + SingleParty, + allocate, +} +import com.daml.ledger.api.testtool.infrastructure.Assertions.{assertGrpcError, futureAssertions} +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.v2.commands +import com.daml.ledger.javaapi.data.PrefetchContractKey +import com.daml.ledger.test.java.experimental.da.types.Tuple2 +import com.daml.ledger.test.java.experimental.test.{TextKey, TextKeyOperations, WithKey} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors + +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +object PrefetchContractKeysIT { + implicit final class JavaBindingSupportExtension(val prefetch: PrefetchContractKey) + extends AnyVal { + def toProtoInner: commands.PrefetchContractKey = + commands.PrefetchContractKey.fromJavaProto(prefetch.toProto) + } +} + +class PrefetchContractKeysIT extends LedgerTestSuite { + import ContractKeysCompanionImplicits.* + import PrefetchContractKeysIT.* + + test( + "CSprefetchContractKeysBasic", + "Explicit contract key prefetches are accepted", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val prefetch = WithKey.byKey(party).toPrefetchKey().toProtoInner + val request = ledger + .submitAndWaitRequest(party, new WithKey(party).create.commands) + .update(_.commands.prefetchContractKeys := Seq(prefetch)) + for { + _ <- ledger.submitAndWait(request) + active <- ledger.activeContracts(Some(Seq(party))) + } yield { + assert(active.size == 1) + val dummyTemplateId = active.flatMap(_.templateId.toList).head + assert(dummyTemplateId == WithKey.TEMPLATE_ID_WITH_PACKAGE_ID.toV1) + } + }) + + test( + "CSprefetchContractKeysPrepareEndpointBasic", + "Explicit contract key prefetches are accepted by the prepare endpoint", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val prefetch = WithKey.byKey(party).toPrefetchKey().toProtoInner + val request = ledger + .prepareSubmissionRequest(party, new WithKey(party).create.commands) + .update(_.prefetchContractKeys := Seq(prefetch)) + for { + prepareResponse <- ledger.prepareSubmission(request) + } yield { + assert(prepareResponse.preparedTransaction.isDefined) + } + }) + + test( + "CSprefetchContractKeysWronglyTyped", + "Contract key prefetches with wrongly typed keys are rejected", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val prefetch = WithKey + .byKey(party) + .toPrefetchKey() + .toProtoInner + .update(_.templateId := TextKey.TEMPLATE_ID.toV1) + val request = ledger + .submitAndWaitRequest(party, new WithKey(party).create.commands) + .update(_.commands.prefetchContractKeys := Seq(prefetch)) + for { + failure <- ledger.submitAndWait(request).mustFail("wrongly typed key in prefetch list") + } yield assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("mismatching type"), + ) + }) + + test( + "CSprefetchContractKeysPrepareWronglyTyped", + "Contract key prefetches with wrongly typed keys are rejected by the prepare endpoint", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val prefetch = WithKey + .byKey(party) + .toPrefetchKey() + .toProtoInner + .update(_.templateId := TextKey.TEMPLATE_ID.toV1) + val request = ledger + .prepareSubmissionRequest(party, new WithKey(party).create.commands) + .update(_.prefetchContractKeys := Seq(prefetch)) + for { + failure <- ledger.prepareSubmission(request).mustFail("wrongly typed key in prefetch list") + } yield assertGrpcError( + failure, + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + Some("mismatching type"), + ) + }) + + test( + "CSprefetchContractKeysMany", + "Prefetch many contract keys", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val numPrefetches = 1000 + val prefetches = + (1 to numPrefetches).map(i => + TextKey.byKey(new Tuple2(party, s"key$i")).toPrefetchKey().toProtoInner + ) + val existingKeyIndex = 10 + for { + textKeyContract <- ledger.create(party, new TextKey(party, "key10", Seq.empty.asJava)) + textKeyOps <- ledger.create(party, new TextKeyOperations(party)) + exerciseCommands = (1 to numPrefetches) + .flatMap(i => + textKeyOps + .exerciseTKOLookup( + new Tuple2(party, s"key$i"), + Option.when(i == existingKeyIndex)(textKeyContract).toJava, + ) + .commands + .asScala + ) + .asJava + request = ledger + .submitAndWaitRequest(party, exerciseCommands) + .update(_.commands.prefetchContractKeys := prefetches) + _ <- ledger.submitAndWait(request) + } yield () + }) + + test( + "CSprefetchContractPrepareKeysMany", + "Prefetch many contract keys on the prepare endpoint", + allocate(SingleParty), + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + val numPrefetches = 1000 + val prefetches = + (1 to numPrefetches).map(i => + TextKey.byKey(new Tuple2(party, s"key$i")).toPrefetchKey().toProtoInner + ) + val existingKeyIndex = 10 + for { + textKeyContract <- ledger.create(party, new TextKey(party, "key10", Seq.empty.asJava)) + textKeyOps <- ledger.create(party, new TextKeyOperations(party)) + exerciseCommands = (1 to numPrefetches) + .flatMap(i => + textKeyOps + .exerciseTKOLookup( + new Tuple2(party, s"key$i"), + Option.when(i == existingKeyIndex)(textKeyContract).toJava, + ) + .commands + .asScala + ) + .asJava + request = ledger + .prepareSubmissionRequest(party, exerciseCommands) + .update(_.prefetchContractKeys := prefetches) + _ <- ledger.prepareSubmission(request) + } yield () + }) +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/RaceConditionIT.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/RaceConditionIT.scala new file mode 100644 index 0000000000..ee2fa54bf6 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_dev/RaceConditionIT.scala @@ -0,0 +1,346 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites.v2_dev + +import com.daml.ledger.api.testtool.infrastructure.Allocation.* +import com.daml.ledger.api.testtool.infrastructure.Assertions.* +import com.daml.ledger.api.testtool.infrastructure.RaceConditionTests.* +import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext +import com.daml.ledger.api.testtool.infrastructure.{LedgerTestSuite, Party} +import com.daml.ledger.api.v2.event.Event +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.api.v2.value.RecordField +import com.daml.ledger.javaapi.data.codegen.ContractCompanion +import com.daml.ledger.test.java.experimental.racetests.{ + ContractWithKey, + CreateWrapper, + DummyContract, + FetchWrapper, + LookupWrapper, +} +import com.daml.timer.Delayed +import com.digitalasset.canton.discard.Implicits.DiscardOps + +import scala.annotation.nowarn +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Success + +final class RaceConditionIT extends LedgerTestSuite { + implicit val contractWithKeyCompanion: ContractCompanion.WithKey[ + ContractWithKey.Contract, + ContractWithKey.ContractId, + ContractWithKey, + String, + ] = ContractWithKey.COMPANION + + raceConditionTest( + "WWDoubleNonTransientCreate", + "Cannot concurrently create multiple non-transient contracts with the same key", + runConcurrently = true, + ) { implicit ec => ledger => alice => + val attempts = (1 to 5).toVector + Future + .traverse(attempts) { _ => + ledger.create(alice, new ContractWithKey(alice)).transform(Success(_)) + } + .map { results => + assertSingleton( + "Successful contract creations", + results.filter(_.isSuccess), + ).discard + } + } + + raceConditionTest( + "WWDoubleArchive", + "Cannot archive the same contract multiple times", + runConcurrently = true, + ) { implicit ec => ledger => alice => + val attempts = (1 to 5).toVector + for { + contract <- ledger.create(alice, new ContractWithKey(alice)) + _ <- Future.traverse(attempts) { _ => + ledger.exercise(alice, contract.exerciseContractWithKey_Archive()).transform(Success(_)) + } + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + assertSingleton( + "Successful contract archivals", + transactions.filter(isArchival), + ).discard + } + } + + raceConditionTest( + "WWArchiveVsNonTransientCreate", + "Cannot create a contract with a key if that key is still used by another contract", + runConcurrently = true, + ) { implicit ec => ledger => alice => + /* + This test case is intended to catch a race condition ending up in two consecutive successful contract + create or archive commands. E.g.: + [create] [archive]-race-[create] + In case of a bug causing the second [create] to see a partial result of [archive] command we could end up + with two consecutive successful contract creations. + */ + for { + contract <- ledger.create(alice, new ContractWithKey(alice)) + _ <- Delayed.by(500.millis)(()) + createFuture = ledger.create(alice, new ContractWithKey(alice)).transform(Success(_)) + exerciseFuture = ledger + .exercise(alice, contract.exerciseContractWithKey_Archive()) + .transform(Success(_)) + _ <- createFuture + _ <- exerciseFuture + _ <- ledger.create( + alice, + new DummyContract(alice), + )( + DummyContract.COMPANION + ) // Create a dummy contract to ensure that we're not stuck with previous commands + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + + assert( + isCreateNonTransient(transactions.head), + "The first transaction is expected to be a contract creation", + ) + assert( + transactions.exists(isCreateDummyContract), + "A dummy contract creation is missing. A possible reason might be reading the transactions stream in the test case before submitted commands had chance to be processed.", + ) + + val (_, valid) = + transactions.filterNot(isCreateDummyContract).tail.foldLeft((transactions.head, true)) { + case ((previousTx, isValidSoFar), currentTx) => + if (isValidSoFar) { + val valid = (isArchival(previousTx) && isCreateNonTransient( + currentTx + )) || (isCreateNonTransient(previousTx) && isArchival(currentTx)) + (currentTx, valid) + } else { + (previousTx, isValidSoFar) + } + } + + if (!valid) + fail( + s"""Invalid transaction sequence: ${transactions.map(printTransaction).mkString("\n")}""" + ) + } + } + + raceConditionTest( + "RWTransientCreateVsNonTransientCreate", + "Cannot create a transient contract and a non-transient contract with the same key", + ) { implicit ec => ledger => alice => + for { + wrapper: CreateWrapper.ContractId <- ledger.create(alice, new CreateWrapper(alice))( + CreateWrapper.COMPANION + ) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 20, + once = ledger.create(alice, new ContractWithKey(alice)).map(_ => ()), + repeated = + ledger.exercise(alice, wrapper.exerciseCreateWrapper_CreateTransient()).map(_ => ()), + ) + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + + // We deliberately allow situations where no non-transient contract is created and verify the transactions + // order when such contract is actually created. + transactions.find(isCreateNonTransient).foreach { nonTransientCreateTransaction => + transactions + .filter(isTransientCreate) + .foreach(assertTransactionOrder(_, nonTransientCreateTransaction)) + } + } + } + + raceConditionTest( + "RWArchiveVsNonConsumingChoice", + "Cannot exercise a choice after a contract archival", + ) { implicit ec => ledger => alice => + for { + contract <- ledger.create(alice, new ContractWithKey(alice)) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 10, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise(alice, contract.exerciseContractWithKey_Exercise()), + ) + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isNonConsumingExercise) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsFetch", + "Cannot fetch an archived contract", + ) { implicit ec => ledger => alice => + for { + contract <- ledger.create(alice, new ContractWithKey(alice)) + fetchConract <- ledger.create(alice, new FetchWrapper(alice, contract))( + FetchWrapper.COMPANION + ) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 10, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise(alice, fetchConract.exerciseFetchWrapper_Fetch()), + ) + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isFetch) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsLookupByKey", + "Cannot successfully lookup by key an archived contract", + ) { implicit ec => ledger => alice => + for { + contract <- ledger.create(alice, new ContractWithKey(alice)) + looker <- ledger.create(alice, new LookupWrapper(alice))(LookupWrapper.COMPANION) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 20, + once = ledger.exercise(alice, contract.exerciseContractWithKey_Archive()), + repeated = ledger.exercise(alice, looker.exerciseLookupWrapper_Lookup()), + ) + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + val archivalTransaction = assertSingleton("archivals", transactions.filter(isArchival)) + transactions + .filter(isContractLookup(success = true)) + .foreach(assertTransactionOrder(_, archivalTransaction)) + } + } + + raceConditionTest( + "RWArchiveVsFailedLookupByKey", + "Lookup by key cannot fail after a contract creation", + ) { implicit ec => ledger => alice => + for { + looker <- ledger.create(alice, new LookupWrapper(alice))(LookupWrapper.COMPANION) + _ <- executeRepeatedlyWithRandomDelay( + numberOfAttempts = 5, + once = ledger.create(alice, new ContractWithKey(alice)), + repeated = ledger.exercise(alice, looker.exerciseLookupWrapper_Lookup()), + ): @nowarn("cat=lint-infer-any") + transactions <- transactions(ledger, alice) + } yield { + import RaceConditionIT.TransactionUtil.* + val createNonTransientTransaction = assertSingleton( + "create-non-transient transactions", + transactions.filter(isCreateNonTransient), + ) + transactions + .filter(isContractLookup(success = false)) + .foreach(assertTransactionOrder(_, createNonTransientTransaction)) + } + } + + private def raceConditionTest( + shortIdentifier: String, + description: String, + repeated: Int = DefaultRepetitionsNumber, + runConcurrently: Boolean = false, + )(testCase: ExecutionContext => ParticipantTestContext => Party => Future[Unit]): Unit = + test( + shortIdentifier = shortIdentifier, + description = description, + partyAllocation = allocate(SingleParty), + repeated = repeated, + runConcurrently = runConcurrently, + )(implicit ec => { case Participants(Participant(ledger, Seq(party))) => + testCase(ec)(ledger)(party) + }) +} + +object RaceConditionIT { + object TransactionUtil { + + private implicit class TransactionTestOps(tx: Transaction) { + def hasEventsNumber(expectedNumberOfEvents: Int): Boolean = + tx.events.size == expectedNumberOfEvents + + def containsEvent(condition: Event => Boolean): Boolean = + tx.events.toList.exists(condition) + } + + private def isCreated(templateName: String)(event: Event): Boolean = + event.event.isCreated && event.getCreated.templateId.exists(_.entityName == templateName) + + private def isExerciseEvent(choiceName: String)(event: Event): Boolean = + event.event.isExercised && event.getExercised.choice == choiceName + + def isCreateDummyContract(tx: Transaction): Boolean = + tx.containsEvent(isCreated(RaceTests.DummyContract.TemplateName)) + + def isCreateNonTransient(tx: Transaction): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isCreated(RaceTests.ContractWithKey.TemplateName)) + + def isTransientCreate(tx: Transaction): Boolean = + tx.containsEvent(isExerciseEvent(RaceTests.CreateWrapper.ChoiceCreateTransient)) && + tx.containsEvent(isCreated(RaceTests.ContractWithKey.TemplateName)) + + def isArchival(tx: Transaction): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isExerciseEvent(RaceTests.ContractWithKey.ChoiceArchive)) + + def isNonConsumingExercise(tx: Transaction): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isExerciseEvent(RaceTests.ContractWithKey.ChoiceExercise)) + + def isFetch(tx: Transaction): Boolean = + tx.hasEventsNumber(1) && + tx.containsEvent(isExerciseEvent(RaceTests.FetchWrapper.ChoiceFetch)) + + private def isFoundContractField(found: Boolean)(field: RecordField) = + field.label == "found" && field.value.exists(_.getBool == found) + + def isContractLookup(success: Boolean)(tx: Transaction): Boolean = + tx.containsEvent { event => + isCreated(RaceTests.LookupResult.TemplateName)(event) && + event.getCreated.getCreateArguments.fields.exists(isFoundContractField(found = success)) + } + } + + private object RaceTests { + object ContractWithKey { + val TemplateName = "ContractWithKey" + val ChoiceArchive = "ContractWithKey_Archive" + val ChoiceExercise = "ContractWithKey_Exercise" + } + + object DummyContract { + val TemplateName = "DummyContract" + } + + object FetchWrapper { + val ChoiceFetch = "FetchWrapper_Fetch" + } + + object LookupResult { + val TemplateName = "LookupResult" + } + + object CreateWrapper { + val ChoiceCreateTransient = "CreateWrapper_CreateTransient" + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/EventuallySpec.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/EventuallySpec.scala new file mode 100644 index 0000000000..00659ed300 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/EventuallySpec.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites + +import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually +import com.daml.timer.RetryStrategy.TooManyAttemptsException +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AsyncWordSpec + +import scala.concurrent.Future +import scala.concurrent.duration.DurationInt + +class EventuallySpec extends AsyncWordSpec with Matchers { + + "eventually" should { + "enhance the exception message with the assertion name" in { + recoverToExceptionIf[TooManyAttemptsException] { + eventually(assertionName = "test", attempts = 1, firstWaitTime = 0.millis) { + Future.failed(new RuntimeException()) + } + }.map(_.message should startWith("test: ")) + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamePickerSpec.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamePickerSpec.scala new file mode 100644 index 0000000000..822dccefc5 --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamePickerSpec.scala @@ -0,0 +1,78 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites + +import com.daml.ledger.api.testtool.infrastructure.NamePicker +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AsyncWordSpec + +class NamePickerSpec extends AsyncWordSpec with Matchers { + + "NamePicker" should { + "not have duplicates in the canonical alphabet" in { + val myPicker = NamePicker("aba") + myPicker.canon shouldBe "ab" + } + + "have min and max" in { + val myPicker = NamePicker("01ab") + myPicker.mx shouldBe 'b' + myPicker.mn shouldBe '0' + } + + "check for membership" in { + val myPicker = NamePicker("01ab") + myPicker.belongs("ab01") shouldBe true + myPicker.belongs("ab02") shouldBe false + myPicker.belongs("") shouldBe true + } + + "recognize all lowest" in { + val myPicker = NamePicker("01ab") + myPicker.allLowest("CCC") shouldBe false + myPicker.allLowest("aaa") shouldBe false + myPicker.allLowest("0a") shouldBe false + myPicker.allLowest("a0") shouldBe false + myPicker.allLowest("00") shouldBe true + myPicker.allLowest("0") shouldBe true + myPicker.allLowest("") shouldBe true + } + + "lower the input char" in { + val myPicker = NamePicker("01ab") + myPicker.lower('c') shouldBe None + myPicker.lower('b') shouldBe Some('a') + myPicker.lower('0') shouldBe None + } + + "lower the input string" in { + val myPicker = NamePicker("01ab") + myPicker.lower("abc") shouldBe None + myPicker.lower("00") shouldBe Some("0") + myPicker.lower("0") shouldBe Some("") + myPicker.lower("") shouldBe None + + myPicker.lower("ab") shouldBe Some("aa") + myPicker.lower("a0") shouldBe Some("1b") + } + + "lower the input string constrained" in { + val myPicker = NamePicker("01ab") + myPicker.lowerConstrained("abc", "aaa") shouldBe None + myPicker.lowerConstrained("aba", "aac") shouldBe None + + myPicker.lowerConstrained("aba", "abb") shouldBe None + myPicker.lowerConstrained("000", "00") shouldBe None + myPicker.lowerConstrained("0", "") shouldBe None + + myPicker.lowerConstrained("aa0", "aa") shouldBe None + myPicker.lowerConstrained("ab0", "aa") shouldBe Some("aab") + + myPicker.lowerConstrained("aba", "aaa") shouldBe Some("ab1") + myPicker.lowerConstrained("ab0", "aaa") shouldBe Some("aab") + myPicker.lowerConstrained("aab", "aaa") shouldBe Some("aaabbbbb") + myPicker.lowerConstrained("aaab", "aaa") shouldBe Some("aaaa") + } + } +} diff --git a/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamesSpec.scala b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamesSpec.scala new file mode 100644 index 0000000000..069532af4a --- /dev/null +++ b/canton/community/ledger-test-tool/suites/lf-v2.1/src/test/scala/com/daml/ledger/api/testtool/suites/NamesSpec.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool.suites + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import org.scalatest.AppendedClues +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +class NamesSpec_V2Dev + extends NamesSpec(v2_dev.default(timeoutScaleFactor = 1) ++ v2_dev.optional(tlsConfig = None)) + +class NamesSpec(val allTestSuites: Vector[LedgerTestSuite]) + extends AnyWordSpec + with Matchers + with AppendedClues { + private val allTestSuiteNames = allTestSuites.map(_.name).sorted + + private val allTests = allTestSuites.flatMap(_.tests) + private val allTestIdentifiers = allTests.map(_.shortIdentifier) + private val allTestIdentifiersPerTestSuite = + allTestSuites.map(suite => suite.name -> suite.tests.map(_.shortIdentifier)) + private val allTestNames = allTests.map(_.name).sorted + + "test suite names" should { + "only contain letters" in { + all(allTestSuiteNames) should fullyMatch regex """[A-Za-z]+""".r + } + + "not be a prefix of any other name, so that each suite can be included independently" in { + allTestSuiteNames.foreach { name => + all(allTestSuiteNames.toSet - name) should not startWith name + } + } + } + + "test identifiers" should { + "only contain letters and numbers, and start with a letter" in { + all(allTestIdentifiers) should fullyMatch regex """[A-Za-z][A-Za-z0-9]*""".r + } + + "not be a prefix of or equal to any other name, so that each test can be included independently (per each test suite)" in { + allTestIdentifiersPerTestSuite.map { case (testSuiteName, testIdentifiers) => + testIdentifiers.zipWithIndex.foreach { case (testIdentifier, i) => + all( + testIdentifiers.drop(i + 1) + ) should not startWith testIdentifier withClue (s"test suite name: '$testSuiteName''") + } + } + } + } + + "full test names" should { + "be unique" in { + allTestNames.foreach { name => + allTestNames.filter(_ == name) should have size 1 + } + } + } +} diff --git a/canton/community/ledger-test-tool/tool/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/Tests.scala b/canton/community/ledger-test-tool/tool/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/Tests.scala new file mode 100644 index 0000000000..721d48f9c7 --- /dev/null +++ b/canton/community/ledger-test-tool/tool/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/Tests.scala @@ -0,0 +1,17 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.digitalasset.canton.config.TlsClientConfig + +object Tests { + def default(timeoutScaleFactor: Double): Vector[LedgerTestSuite] = + suites.v2_1.default(timeoutScaleFactor) + + def optional(tlsConfig: Option[TlsClientConfig]): Vector[LedgerTestSuite] = + suites.v2_1.optional(tlsConfig) + + val lfVersion = "2.1" +} diff --git a/canton/community/ledger-test-tool/tool/lf-v2.dev/src/main/scala/com/daml/ledger/api/testtool/Tests.scala b/canton/community/ledger-test-tool/tool/lf-v2.dev/src/main/scala/com/daml/ledger/api/testtool/Tests.scala new file mode 100644 index 0000000000..cea061bb72 --- /dev/null +++ b/canton/community/ledger-test-tool/tool/lf-v2.dev/src/main/scala/com/daml/ledger/api/testtool/Tests.scala @@ -0,0 +1,17 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.digitalasset.canton.config.TlsClientConfig + +object Tests { + def default(timeoutScaleFactor: Double): Vector[LedgerTestSuite] = + suites.v2_dev.default(timeoutScaleFactor) + + def optional(tlsConfig: Option[TlsClientConfig]): Vector[LedgerTestSuite] = + suites.v2_dev.optional(tlsConfig) + + val lfVersion = "2.dev" +} diff --git a/canton/community/ledger-test-tool/tool/src/main/resources/logback.xml b/canton/community/ledger-test-tool/tool/src/main/resources/logback.xml new file mode 100644 index 0000000000..03a157a14b --- /dev/null +++ b/canton/community/ledger-test-tool/tool/src/main/resources/logback.xml @@ -0,0 +1,23 @@ + + + + System.err + + trace + + + %date [%thread] %-5level %logger{10} - %msg%n + + + + + + + + + + + + + + diff --git a/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/CliParser.scala b/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/CliParser.scala new file mode 100644 index 0000000000..5f5bf288ac --- /dev/null +++ b/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/CliParser.scala @@ -0,0 +1,296 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool + +import com.daml.ledger.api.testtool.infrastructure.PartyAllocationConfiguration +import com.daml.ledger.api.testtool.runner.Config +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.config.RequireTypes.ExistingFile +import com.digitalasset.canton.config.{PemFile, TlsClientCertificate} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import scopt.{OptionParser, Read} + +import java.io.File +import java.nio.file.Paths +import scala.concurrent.duration.{DurationInt, FiniteDuration} +import scala.util.Try +import scala.util.matching.Regex + +object CliParser { + private val Name = "ledger-api-test-tool" + + private implicit val fileRead: Read[File] = Read.reads(Paths.get(_).toFile) + + def parse(args: Array[String]): Option[Config] = + argParser.parse(args, Config.default) + + private def endpointRead: Read[(String, Int)] = new Read[(String, Int)] { + override val arity = 2 + override val reads: String => (String, Int) = { (s: String) => + splitAddress(s) match { + case (k, v) => Read.stringRead.reads(k) -> Read.intRead.reads(v) + } + } + } + + private def lapiAndOptionalAdminEndpointRead: Read[((String, Int), (String, Int))] = + new Read[((String, Int), (String, Int))] { + override val arity = 2 + override val reads: String => ((String, Int), (String, Int)) = { (s: String) => + s.split(';') match { + case Array(lapiEndpoint, adminEndpoint) => + (endpointRead.reads(lapiEndpoint), endpointRead.reads(adminEndpoint)) + case _ => + throw new IllegalArgumentException( + "both LAPI and Admin API addresses should be specified as `;`" + ) + } + } + } + + private def splitAddress(s: String): (String, String) = + s.indexOf(':') match { + case -1 => + throw new IllegalArgumentException("Addresses should be specified as `:`") + case n: Int => (s.slice(0, n), s.slice(n + 1, s.length)) + } + + private val argParser: OptionParser[Config] = new scopt.OptionParser[Config](Name) { + head( + """The Ledger API Test Tool is a command line tool for testing the correctness of + |ledger implementations based on Daml and Ledger API.""".stripMargin + ).discard + + arg[((String, Int), (String, Int))]("[endpoints...]")(lapiAndOptionalAdminEndpointRead) + .action { case ((lapiAddress, adminApiAddress), config) => + config.copy( + participantsEndpoints = config.participantsEndpoints :+ lapiAddress, + participantsAdminEndpoints = config.participantsAdminEndpoints :+ adminApiAddress, + ) + } + .unbounded() + .optional() + .text("Addresses of the participants to test, specified as `:`.") + .discard + + opt[Int]("max-connection-attempts") + .action((maxConnectionAttempts, config) => + config.copy(maxConnectionAttempts = maxConnectionAttempts) + ) + .optional() + .text("Number of connection attempts to the participants. Applied to all endpoints.") + .discard + + opt[Unit]("json-api-mode") + .action((_, config) => config.copy(jsonApiMode = true)) + .optional() + .discard + + opt[Seq[File]]("client-cert") + .optional() + .text( + "TLS: The crt file to be used as the cert chain and the pem file to be used as the private key." + ) + .valueName(",") + .action { + case (Seq(crt, pem), config) => + config.withTlsConfig(cfg => + cfg.copy(clientCert = + Some( + TlsClientCertificate( + certChainFile = PemFile(ExistingFile.tryCreate(crt)), + privateKeyFile = PemFile(ExistingFile.tryCreate(pem)), + ) + ) + ) + ) + case (_, config) => config + } + .discard + + opt[File]("cacrt") + .optional() + .text("TLS: The crt file to be used as the trusted root CA. Applied to all endpoints.") + .action { (path: File, config: Config) => + config.withTlsConfig( + _.copy(trustCollectionFile = Some(PemFile(ExistingFile.tryCreate(path)))) + ) + } + .discard + + opt[Double](name = "timeout-scale-factor") + .optional() + .action((v, c) => c.copy(timeoutScaleFactor = v)) + .text( + """Scale factor for timeouts used in all test suites. Useful to tune timeouts + |depending on the environment and the Ledger implementation under test. + |Defaults to 1.0. Use numbers higher than 1.0 to make test timeouts more lax, + |use numbers lower than 1.0 to make test timeouts more strict.""".stripMargin + ) + .discard + + opt[Int](name = "concurrent-test-runs") + .optional() + .action((v, c) => c.copy(concurrentTestRuns = v)) + .text( + "Number of tests to run concurrently. Defaults to the number of available processors or 4, whichever is smaller." + ) + .discard + + opt[Unit]("verbose") + .abbr("v") + .action((_, c) => c.copy(verbose = true)) + .text("Prints full stack traces on failures.") + .discard + + opt[Unit]("report-problems-only") + .action((_, c) => c.copy(reportOnFailuresOnly = true)) + .text("Prints report only if there are failures.") + .discard + + opt[Unit]("must-fail") + .action((_, c) => c.copy(mustFail = true)) + .text( + """Reverse success status logic of the tool. Use this flag if you expect one or + |more or the scenario tests to fail. If enabled, the tool will succeed when at + |least one test fails, and it will fail when all tests succeed. Defaults to + |false.""".stripMargin + ) + .discard + + opt[Unit]('x', "extract") + .action((_, c) => c.copy(extract = true)) + .text( + """Extract a DAR necessary to test a Daml ledger and exit without running tests. + |The DAR needs to be manually loaded into a Daml ledger for the tool to work.""".stripMargin + ) + .discard + + opt[Seq[String]]("exclude") + .action((ex, c) => c.copy(excluded = c.excluded ++ ex)) + .unbounded() + .text( + """A comma-separated list of exclusion prefixes. Tests whose name start with + |any of the given prefixes will be skipped. Can be specified multiple times, + |i.e. `--exclude=a,b` is the same as `--exclude=a --exclude=b`.""".stripMargin + ) + .discard + + opt[Seq[String]]("include") + .action((inc, c) => c.copy(included = c.included ++ inc)) + .unbounded() + .text( + """A comma-separated list of inclusion prefixes. If not specified, + |all default tests are included. If specified, only tests that match at least one + |of the given inclusion prefixes (and none of the given exclusion prefixes) will be run. + |Can be specified multiple times, i.e. `--include=a,b` is the same as `--include=a --include=b`. + |Mutually exclusive with `--additional`.""".stripMargin + ) + .discard + + opt[Seq[String]]("additional") + .action((additional, c) => c.copy(additional = c.additional ++ additional)) + .hidden() + .unbounded() + .text( + """A comma-separated list of additional prefixes. If specified, also tests that match at least one + |of the given inclusion prefixes (and none of the given exclusion prefixes) will be run. + |Can be specified multiple times, i.e. `--additional=a,b` is the same as `--additional=a --additional=b`. + |Mutually exclusive with `--include`.""".stripMargin + ) + .discard + + opt[Unit]("shuffle-participants") + .action((_, c) => c.copy(shuffleParticipants = true)) + .text( + """Shuffle the list of participants used in a test. + |By default participants are used in the order they're given.""".stripMargin + ) + .discard + + opt[Unit]("no-wait-for-parties") + .action((_, c) => c.copy(partyAllocation = PartyAllocationConfiguration.ClosedWorld)) + .text("Do not wait for parties to be allocated on all participants.") + .hidden() + .discard + + opt[Unit]("open-world") + .action((_, c) => c.copy(partyAllocation = PartyAllocationConfiguration.OpenWorld)) + .text( + """Do not allocate parties explicitly. + |Instead, expect the ledger to allocate parties dynamically. + |Party names must be their hints.""".stripMargin + ) + .discard + + opt[Unit]("list") + .action((_, c) => c.copy(listTestSuites = true)) + .text( + """Lists all available test suites that can be used in the include and exclude options. + |Test names always start with their suite name, so using the suite name as a prefix + |matches all tests in a given suite.""".stripMargin + ) + .discard + + opt[Unit]("list-all") + .action((_, c) => c.copy(listTests = true)) + .text("Lists all available tests that can be used in the include and exclude options.") + .discard + + opt[Unit]("version") + .optional() + .action { (_, _) => + println(BuildInfo.version) + sys.exit(0) + } + .text("Prints the version on stdout and exit.") + .discard + + opt[FiniteDuration]("ledger-clock-granularity")( + oneOfRead(Read.finiteDurationRead, Read.intRead.map(_.millis)) + ) + .optional() + .action((x, c) => c.copy(ledgerClockGranularity = x)) + .text( + """Specify the largest interval that you will see between clock ticks + |on the ledger under test. The default is \"1s\" (1 second).""".stripMargin + ) + .discard + + opt[String]("skip-dar-names-upload") + .optional() + .action((skipPattern, c) => + c.copy(skipDarNamesPattern = Option.when(skipPattern.nonEmpty)(new Regex(skipPattern))) + ) + .text("Skip uploading DARs whose names match the provided pattern") + .discard + + opt[Int](name = "connected-synchronizers") + .optional() + .action((v, c) => c.copy(connectedSynchronizers = v)) + .text( + "Number of synchronizers that each participant is connected to" + ) + .discard + + checkConfig(c => + if (c.included.nonEmpty && c.additional.nonEmpty) + failure("`--include` and `--additional` are mutually exclusive") + else + success + ).discard + + help("help").text("Prints this usage text").discard + } + + private def oneOfRead[T](readersHead: Read[T], readersTail: Read[T]*): Read[T] = Read.reads { + str => + val results = + (readersHead #:: LazyList(readersTail*)).map(reader => Try(reader.reads(str))) + results.find(_.isSuccess) match { + case Some(value) => value.get + case None => results.head.get // throw the first failure + } + } +} diff --git a/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/Main.scala b/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/Main.scala new file mode 100644 index 0000000000..3d9e4cf2ca --- /dev/null +++ b/canton/community/ledger-test-tool/tool/src/main/scala/com/daml/ledger/api/testtool/Main.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.api.testtool + +import com.daml.ledger.api.testtool.infrastructure.LedgerTestSuite +import com.daml.ledger.api.testtool.runner.{AvailableTests, TestRunner} + +object Main { + def main(args: Array[String]): Unit = { + val config = CliParser.parse(args).getOrElse(sys.exit(1)) + val availableTests = new AvailableTests { + override def defaultTests: Vector[LedgerTestSuite] = + Tests.default(timeoutScaleFactor = config.timeoutScaleFactor) + + override def optionalTests: Vector[LedgerTestSuite] = + Tests.optional(config.tlsConfig) + } + new TestRunner(availableTests, config, Tests.lfVersion).runAndExit() + } +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/TraceIdentifiers.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/TraceIdentifiers.scala index 7b0ea72759..0d33dd292c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/TraceIdentifiers.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/TraceIdentifiers.scala @@ -5,11 +5,9 @@ package com.digitalasset.canton.ledger.api import com.daml.ledger.api.v2.reassignment.Reassignment import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction -import com.daml.ledger.api.v2.transaction.{Transaction, TransactionTree} +import com.daml.ledger.api.v2.transaction.Transaction import com.daml.tracing.SpanAttribute -import scala.annotation.nowarn - /** Extracts identifiers from Protobuf messages to correlate traces. */ object TraceIdentifiers { @@ -31,25 +29,6 @@ object TraceIdentifiers { attributes.result() } - /** Extract identifiers from a transaction tree message. - */ - // TODO(#23504) remove this method once TransactionTrees are removed from the API - @nowarn("cat=deprecation") - def fromTransactionTree(transactionTree: TransactionTree): Map[SpanAttribute, String] = { - val attributes = Map.newBuilder[SpanAttribute, String] - def setIfNotEmpty(attribute: SpanAttribute, value: String): Unit = - if (value.nonEmpty) attributes += attribute -> value - def setIfNotZero(attribute: SpanAttribute, value: Long): Unit = - if (value != 0) attributes += attribute -> value.toString - - setIfNotZero(SpanAttribute.Offset, transactionTree.offset) - setIfNotEmpty(SpanAttribute.CommandId, transactionTree.commandId) - setIfNotEmpty(SpanAttribute.TransactionId, transactionTree.updateId) - setIfNotEmpty(SpanAttribute.WorkflowId, transactionTree.workflowId) - - attributes.result() - } - /** Extract identifiers from a reassignment message. */ def fromReassignment(reassignment: Reassignment): Map[SpanAttribute, String] = { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaims.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaims.scala index b07ca48867..5f94faefb8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaims.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaims.scala @@ -3,17 +3,10 @@ package com.digitalasset.canton.ledger.api.auth -import com.daml.ledger.api.v2.transaction_filter.{ - EventFormat, - TransactionFilter, - TransactionFormat, - UpdateFormat, -} +import com.daml.ledger.api.v2.transaction_filter.{EventFormat, TransactionFormat, UpdateFormat} import com.digitalasset.canton.auth.RequiredClaim import scalapb.lenses.Lens -import scala.annotation.nowarn - object RequiredClaims { def apply[Req](claims: RequiredClaim[Req]*): List[RequiredClaim[Req]] = claims.toList @@ -62,12 +55,6 @@ object RequiredClaims { }, ).flatten.distinct - // TODO(#23504) remove this method once TransactionFilter is removed from the API - @nowarn("cat=deprecation") - def transactionFilterClaims[Req](transactionFilter: TransactionFilter): List[RequiredClaim[Req]] = - readAsForAllParties[Req](transactionFilter.filtersByParty.keys) ::: - transactionFilter.filtersForAnyParty.map(_ => RequiredClaim.ReadAsAnyParty[Req]()).toList - def idpAdminClaimsAndMatchingRequestIdpId[Req]( identityProviderIdL: Lens[Req, String], mustBeParticipantAdmin: Boolean = false, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala index 47efbd89ae..d7e00ead48 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala @@ -17,7 +17,6 @@ import com.digitalasset.canton.ledger.api.validation.CommandsValidator import io.grpc.ServerServiceDefinition import scalapb.lenses.Lens -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} /** Note: the command service internally uses calls to the CommandSubmissionService and @@ -59,21 +58,6 @@ final class CommandServiceAuthorization( )(request) } - // TODO(#23504) remove this method once SubmitAndWaitForTransactionTreeResponse is removed from the API - @nowarn("cat=deprecation") - override def submitAndWaitForTransactionTree( - request: SubmitAndWaitRequest - ): Future[SubmitAndWaitForTransactionTreeResponse] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) - authorizer.rpc(service.submitAndWaitForTransactionTree)( - RequiredClaims.submissionClaims( - actAs = effectiveSubmitters.actAs, - readAs = effectiveSubmitters.readAs, - userIdL = Lens.unit[SubmitAndWaitRequest].commands.userId, - )* - )(request) - } - override def bindService(): ServerServiceDefinition = CommandServiceGrpc.bindService(this, executionContext) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageManagementServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageManagementServiceAuthorization.scala index 49971d93f2..1dc0dff6c6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageManagementServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageManagementServiceAuthorization.scala @@ -35,4 +35,9 @@ final class PackageManagementServiceAuthorization( PackageManagementServiceGrpc.bindService(this, executionContext) override def close(): Unit = service.close() + + override def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] = + authorizer.rpc(service.updateVettedPackages)(RequiredClaim.Admin())(request) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceAuthorization.scala index 6587047d34..8354fb9767 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceAuthorization.scala @@ -31,6 +31,11 @@ final class PackageServiceAuthorization( ): Future[GetPackageStatusResponse] = authorizer.rpc(service.getPackageStatus)(RequiredClaim.Public())(request) + override def listVettedPackages( + request: ListVettedPackagesRequest + ): Future[ListVettedPackagesResponse] = + authorizer.rpc(service.listVettedPackages)(RequiredClaim.Public())(request) + override def bindService(): ServerServiceDefinition = PackageServiceGrpc.bindService(this, executionContext) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PartyManagementServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PartyManagementServiceAuthorization.scala index fa4bb6aae9..446bed0902 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PartyManagementServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PartyManagementServiceAuthorization.scala @@ -8,7 +8,6 @@ import com.daml.ledger.api.v2.admin.party_management_service.PartyManagementServ import com.digitalasset.canton.auth.{Authorizer, RequiredClaim} import com.digitalasset.canton.ledger.api.ProxyCloseable import com.digitalasset.canton.ledger.api.auth.RequiredClaims -import com.digitalasset.canton.ledger.api.auth.services.PartyManagementServiceAuthorization.updatePartyDetailsClaims import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import io.grpc.ServerServiceDefinition import scalapb.lenses.Lens @@ -22,17 +21,16 @@ final class PartyManagementServiceAuthorization( extends PartyManagementService with ProxyCloseable with GrpcApiService { + import PartyManagementServiceAuthorization.* override def getParticipantId( request: GetParticipantIdRequest ): Future[GetParticipantIdResponse] = - authorizer.rpc(service.getParticipantId)(RequiredClaim.Admin())(request) + authorizer.rpc(service.getParticipantId)(RequiredClaim.Public())(request) override def getParties(request: GetPartiesRequest): Future[GetPartiesResponse] = authorizer.rpc(service.getParties)( - RequiredClaims.idpAdminClaimsAndMatchingRequestIdpId( - Lens.unit[GetPartiesRequest].identityProviderId - )* + getPartiesClaims(request)* )(request) override def listKnownParties( @@ -46,9 +44,7 @@ final class PartyManagementServiceAuthorization( override def allocateParty(request: AllocatePartyRequest): Future[AllocatePartyResponse] = authorizer.rpc(service.allocateParty)( - RequiredClaims.idpAdminClaimsAndMatchingRequestIdpId( - Lens.unit[AllocatePartyRequest].identityProviderId - )* + allocatePartyClaims* )(request) override def updatePartyDetails( @@ -63,10 +59,24 @@ final class PartyManagementServiceAuthorization( ): Future[UpdatePartyIdentityProviderIdResponse] = authorizer.rpc(service.updatePartyIdentityProviderId)(RequiredClaim.Admin())(request) + override def generateExternalPartyTopology( + request: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] = + authorizer.rpc(service.generateExternalPartyTopology)(RequiredClaim.Public())(request) + override def bindService(): ServerServiceDefinition = PartyManagementServiceGrpc.bindService(this, executionContext) override def close(): Unit = service.close() + + override def allocateExternalParty( + request: AllocateExternalPartyRequest + ): Future[AllocateExternalPartyResponse] = + authorizer.rpc(service.allocateExternalParty)( + RequiredClaims.idpAdminClaimsAndMatchingRequestIdpId( + Lens.unit[AllocateExternalPartyRequest].identityProviderId + )* + )(request) } object PartyManagementServiceAuthorization { @@ -81,4 +91,20 @@ object PartyManagementServiceAuthorization { case None => RequiredClaim.AdminOrIdpAdmin[UpdatePartyDetailsRequest]() :: Nil } + + def getPartiesClaims( + request: GetPartiesRequest + ): List[RequiredClaim[GetPartiesRequest]] = + RequiredClaims( + RequiredClaim.AdminOrIdpAdminOrOperateAsParty[GetPartiesRequest](request.parties), + RequiredClaim.MatchIdentityProviderId(Lens.unit[GetPartiesRequest].identityProviderId), + ) + + def allocatePartyClaims: List[RequiredClaim[AllocatePartyRequest]] = + RequiredClaims( + RequiredClaim.AdminOrIdpAdminOrSelfAdmin[AllocatePartyRequest]( + Lens.unit[AllocatePartyRequest].userId + ), + RequiredClaim.MatchIdentityProviderId(Lens.unit[AllocatePartyRequest].identityProviderId), + ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/StateServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/StateServiceAuthorization.scala index 2e258b962b..515c2381d8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/StateServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/StateServiceAuthorization.scala @@ -14,7 +14,6 @@ import io.grpc.ServerServiceDefinition import io.grpc.stub.StreamObserver import scalapb.lenses.Lens -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} final class StateServiceAuthorization( @@ -36,12 +35,15 @@ final class StateServiceAuthorization( override def getConnectedSynchronizers( request: GetConnectedSynchronizersRequest ): Future[GetConnectedSynchronizersResponse] = - authorizer.rpc(service.getConnectedSynchronizers)( - RequiredClaim.AdminOrIdpAdminOrReadAsParty(request.party), - RequiredClaim.MatchIdentityProviderId( - Lens.unit[GetConnectedSynchronizersRequest].identityProviderId - ), - )(request) + if (request.party.isEmpty) + authorizer.rpc(service.getConnectedSynchronizers)(RequiredClaim.Public())(request) + else + authorizer.rpc(service.getConnectedSynchronizers)( + RequiredClaim.AdminOrIdpAdminOrOperateAsParty(Seq(request.party)), + RequiredClaim.MatchIdentityProviderId( + Lens.unit[GetConnectedSynchronizersRequest].identityProviderId + ), + )(request) override def getLedgerEnd(request: GetLedgerEndRequest): Future[GetLedgerEndResponse] = authorizer.rpc(service.getLedgerEnd)(RequiredClaim.Public())(request) @@ -56,14 +58,10 @@ final class StateServiceAuthorization( } object StateServiceAuthorization { - // TODO(#23504) remove checking filter when it is removed from GetActiveContractsRequest - @nowarn("cat=deprecation") def getActiveContractsClaims( request: GetActiveContractsRequest ): List[RequiredClaim[GetActiveContractsRequest]] = request.eventFormat.toList.flatMap( RequiredClaims.eventFormatClaims[GetActiveContractsRequest] - ) ::: request.filter.toList.flatMap( - RequiredClaims.transactionFilterClaims[GetActiveContractsRequest] ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UpdateServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UpdateServiceAuthorization.scala index 31e20c522d..125a049be5 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UpdateServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UpdateServiceAuthorization.scala @@ -8,16 +8,11 @@ import com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.UpdateService import com.digitalasset.canton.auth.{Authorizer, RequiredClaim} import com.digitalasset.canton.ledger.api.ProxyCloseable import com.digitalasset.canton.ledger.api.auth.RequiredClaims -import com.digitalasset.canton.ledger.api.auth.services.UpdateServiceAuthorization.{ - getTransactionByIdClaims, - getTransactionByOffsetClaims, - getUpdatesClaims, -} +import com.digitalasset.canton.ledger.api.auth.services.UpdateServiceAuthorization.getUpdatesClaims import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import io.grpc.ServerServiceDefinition import io.grpc.stub.StreamObserver -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} final class UpdateServiceAuthorization( @@ -39,52 +34,6 @@ final class UpdateServiceAuthorization( getUpdatesClaims(request)* )(request, responseObserver) - // TODO(#23504) remove when TransactionTrees are removed from the API - @nowarn("cat=deprecation") - override def getUpdateTrees( - request: GetUpdatesRequest, - responseObserver: StreamObserver[GetUpdateTreesResponse], - ): Unit = - authorizer.stream(service.getUpdateTrees)( - getUpdatesClaims(request)* - )(request, responseObserver) - - // TODO(#23504) remove when TransactionTrees are removed from the API - @nowarn("cat=deprecation") - override def getTransactionTreeByOffset( - request: GetTransactionByOffsetRequest - ): Future[GetTransactionTreeResponse] = - authorizer.rpc(service.getTransactionTreeByOffset)( - RequiredClaims.readAsForAllParties[GetTransactionByOffsetRequest](request.requestingParties)* - )(request) - - // TODO(#23504) remove when TransactionTrees are removed from the API - @nowarn("cat=deprecation") - override def getTransactionTreeById( - request: GetTransactionByIdRequest - ): Future[GetTransactionTreeResponse] = - authorizer.rpc(service.getTransactionTreeById)( - RequiredClaims.readAsForAllParties[GetTransactionByIdRequest](request.requestingParties)* - )(request) - - // TODO(#23504) remove when getTransactionByOffset is removed from the API - @nowarn("cat=deprecation") - override def getTransactionByOffset( - request: GetTransactionByOffsetRequest - ): Future[GetTransactionResponse] = - authorizer.rpc(service.getTransactionByOffset)( - getTransactionByOffsetClaims(request)* - )(request) - - // TODO(#23504) remove when getTransactionById is removed from the API - @nowarn("cat=deprecation") - override def getTransactionById( - request: GetTransactionByIdRequest - ): Future[GetTransactionResponse] = - authorizer.rpc(service.getTransactionById)( - getTransactionByIdClaims(request)* - )(request) - override def getUpdateByOffset( request: GetUpdateByOffsetRequest ): Future[GetUpdateResponse] = @@ -106,34 +55,8 @@ final class UpdateServiceAuthorization( object UpdateServiceAuthorization { - // TODO(#23504) remove checking filter when it is removed from GetUpdatesRequest - @nowarn("cat=deprecation") def getUpdatesClaims(request: GetUpdatesRequest): List[RequiredClaim[GetUpdatesRequest]] = request.updateFormat.toList.flatMap( RequiredClaims.updateFormatClaims[GetUpdatesRequest] - ) ::: request.filter.toList.flatMap(RequiredClaims.transactionFilterClaims[GetUpdatesRequest]) - - // TODO(#23504) remove when getTransactionByOffset is removed from the API - @nowarn("cat=deprecation") - def getTransactionByOffsetClaims( - request: GetTransactionByOffsetRequest - ): List[RequiredClaim[GetTransactionByOffsetRequest]] = - request.transactionFormat.toList - .flatMap( - RequiredClaims.transactionFormatClaims[GetTransactionByOffsetRequest] - ) ::: RequiredClaims.readAsForAllParties[GetTransactionByOffsetRequest]( - request.requestingParties - ) - - // TODO(#23504) remove when getTransactionById is removed from the API - @nowarn("cat=deprecation") - def getTransactionByIdClaims( - request: GetTransactionByIdRequest - ): List[RequiredClaim[GetTransactionByIdRequest]] = - request.transactionFormat.toList - .flatMap( - RequiredClaims.transactionFormatClaims[GetTransactionByIdRequest] - ) ::: RequiredClaims.readAsForAllParties[GetTransactionByIdRequest]( - request.requestingParties ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UserManagementServiceAuthorization.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UserManagementServiceAuthorization.scala index ca32bfd870..c534947f4b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UserManagementServiceAuthorization.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UserManagementServiceAuthorization.scala @@ -7,7 +7,6 @@ import com.daml.ledger.api.v2.admin.user_management_service.* import com.digitalasset.canton.auth.{Authorizer, RequiredClaim} import com.digitalasset.canton.ledger.api.ProxyCloseable import com.digitalasset.canton.ledger.api.auth.RequiredClaims -import com.digitalasset.canton.ledger.api.auth.services.UserManagementServiceAuthorization.userReaderClaims import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import io.grpc.ServerServiceDefinition @@ -24,6 +23,7 @@ final class UserManagementServiceAuthorization( with ProxyCloseable with GrpcApiService with NamedLogging { + import UserManagementServiceAuthorization.* // Only ParticipantAdmin is allowed to grant ParticipantAdmin right private def containsParticipantAdmin(rights: Seq[Right]): Boolean = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByIdRequest.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByIdRequest.scala index e231be9c9e..f8179a63f8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByIdRequest.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByIdRequest.scala @@ -3,16 +3,10 @@ package com.digitalasset.canton.ledger.api.messages.update -import com.digitalasset.canton.ledger.api.{TransactionFormat, UpdateId} -import com.digitalasset.daml.lf.data.Ref.Party +import com.digitalasset.canton.ledger.api.TransactionFormat +import com.digitalasset.canton.protocol.UpdateId final case class GetTransactionByIdRequest( updateId: UpdateId, transactionFormat: TransactionFormat, ) - -// TODO(#23504) cleanup -final case class GetTransactionByIdRequestForTrees( - updateId: UpdateId, - requestingParties: Set[Party], -) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByOffsetRequest.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByOffsetRequest.scala index 2e083d4fe0..692f84912e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByOffsetRequest.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetTransactionByOffsetRequest.scala @@ -5,15 +5,8 @@ package com.digitalasset.canton.ledger.api.messages.update import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.TransactionFormat -import com.digitalasset.daml.lf.data.Ref.Party final case class GetTransactionByOffsetRequest( offset: Offset, transactionFormat: TransactionFormat, ) - -// TODO(#23504) cleanup -final case class GetTransactionByOffsetRequestForTrees( - offset: Offset, - requestingParties: Set[Party], -) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdateByIdRequest.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdateByIdRequest.scala index 29cdb1585d..e889d34d2e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdateByIdRequest.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdateByIdRequest.scala @@ -3,7 +3,8 @@ package com.digitalasset.canton.ledger.api.messages.update -import com.digitalasset.canton.ledger.api.{UpdateFormat, UpdateId} +import com.digitalasset.canton.ledger.api.UpdateFormat +import com.digitalasset.canton.protocol.UpdateId final case class GetUpdateByIdRequest( updateId: UpdateId, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdatesRequest.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdatesRequest.scala index b57011f6b7..0984092bbd 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdatesRequest.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/update/GetUpdatesRequest.scala @@ -4,17 +4,10 @@ package com.digitalasset.canton.ledger.api.messages.update import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.api.{EventFormat, UpdateFormat} +import com.digitalasset.canton.ledger.api.UpdateFormat final case class GetUpdatesRequest( startExclusive: Option[Offset], endInclusive: Option[Offset], updateFormat: UpdateFormat, ) - -// TODO(#23504) cleanup -final case class GetUpdatesRequestForTrees( - startExclusive: Option[Offset], - endInclusive: Option[Offset], - eventFormat: EventFormat, -) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandService.scala index cf4ba03ea8..187ec6649c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandService.scala @@ -8,13 +8,11 @@ import com.daml.ledger.api.v2.command_service.{ SubmitAndWaitForReassignmentResponse, SubmitAndWaitForTransactionRequest, SubmitAndWaitForTransactionResponse, - SubmitAndWaitForTransactionTreeResponse, SubmitAndWaitRequest, SubmitAndWaitResponse, } import com.digitalasset.canton.logging.LoggingContextWithTrace -import scala.annotation.nowarn import scala.concurrent.Future trait CommandService { @@ -29,10 +27,4 @@ trait CommandService { def submitAndWaitForReassignment( request: SubmitAndWaitForReassignmentRequest )(loggingContext: LoggingContextWithTrace): Future[SubmitAndWaitForReassignmentResponse] - - // TODO(#23504) remove when submitAndWaitForTransactionTree is removed from the API - @nowarn("cat=deprecation") - def submitAndWaitForTransactionTree( - request: SubmitAndWaitRequest - )(loggingContext: LoggingContextWithTrace): Future[SubmitAndWaitForTransactionTreeResponse] } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala index e0d0cdc449..2523c5093e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala @@ -27,7 +27,11 @@ import com.digitalasset.canton.version.HashingSchemeVersion import com.digitalasset.daml.lf.data.Ref.{SubmissionId, UserId} object InteractiveSubmissionService { - final case class PrepareRequest(commands: Commands, verboseHashing: Boolean) + final case class PrepareRequest( + commands: Commands, + verboseHashing: Boolean, + maxRecordTime: Option[LfTimestamp], + ) final case class ExecuteRequest( userId: UserId, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala index 94c9089391..18a3e616a8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala @@ -3,15 +3,30 @@ package com.digitalasset.canton.ledger.api +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.ledger.api.v2.admin.package_management_service import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ TRANSACTION_SHAPE_ACS_DELTA, TRANSACTION_SHAPE_LEDGER_EFFECTS, } +import com.daml.ledger.api.v2.{package_reference, package_service} import com.daml.logging.entries.{LoggingValue, ToLoggingValue} -import com.digitalasset.canton.data.DeduplicationPeriod +import com.daml.nonempty.* +import com.digitalasset.canton.ProtoDeserializationError.{ + FieldNotSet, + InvariantViolation, + UnrecognizedEnum, + ValueConversionError, +} +import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.LfFatContractInst -import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId} +import com.digitalasset.canton.util.OptionUtil import com.digitalasset.canton.{LfPackageId, LfPackageName, LfPackageVersion} import com.digitalasset.daml.lf.command.{ApiCommands as LfCommands, ApiContractKey} import com.digitalasset.daml.lf.data.Time.Timestamp @@ -20,6 +35,7 @@ import com.digitalasset.daml.lf.data.{ImmArray, Ref} import scalaz.@@ import scalaz.syntax.tag.* +import scala.annotation.nowarn import scala.collection.immutable final case class UpdateFormat( @@ -212,3 +228,418 @@ object Logging { implicit def `tagged value to LoggingValue`[T: ToLoggingValue, Tag]: ToLoggingValue[T @@ Tag] = value => value.unwrap } + +final case class ListVettedPackagesOpts( + packageFilter: Option[PackageMetadataFilter], + topologyStateFilter: Option[TopologyStateFilter], +) { + def toPredicate(metadata: PackageMetadata): Ref.PackageId => Boolean = { (pkgId: Ref.PackageId) => + val matchesMetadata = packageFilter.forall(_.toPredicate(metadata)(pkgId)) + + val matchesTopologyState = + topologyStateFilter.forall(_.toPredicate(metadata)(pkgId)) + + matchesMetadata && matchesTopologyState + } +} + +object ListVettedPackagesOpts { + def fromProto( + req: package_service.ListVettedPackagesRequest + ): ParsingResult[ListVettedPackagesOpts] = + for { + packageMetadataFilter <- req.packageMetadataFilter.traverse(PackageMetadataFilter.fromProto) + topologyStateFilter <- req.topologyStateFilter.traverse(TopologyStateFilter.fromProto) + } yield ListVettedPackagesOpts(packageMetadataFilter, topologyStateFilter) +} + +final case class PackageMetadataFilter( + packageIds: Seq[Ref.PackageId], + packageNamePrefixes: Seq[String], +) { + def toProtoLAPI: package_service.PackageMetadataFilter = + package_service.PackageMetadataFilter( + packageIds.map(_.toString), + packageNamePrefixes, + ) + + def toPredicate(metadata: PackageMetadata): Ref.PackageId => Boolean = { + lazy val noFilters = packageIds.isEmpty && packageNamePrefixes.isEmpty + lazy val allPackageIds = packageIds.toSet + lazy val allNames = (for { + name <- metadata.packageNameMap.keys + if packageNamePrefixes.exists(name.toString.startsWith(_)) + } yield name).toSet + + { (targetPkgId: Ref.PackageId) => + lazy val matchesPkgId = allPackageIds.contains(targetPkgId) + lazy val matchesName = metadata.packageIdVersionMap.get(targetPkgId) match { + case Some((name, _)) => allNames.contains(name) + case None => false // package ID is not known on this participant + } + noFilters || matchesPkgId || matchesName + } + } +} + +object PackageMetadataFilter { + def fromProto( + filter: package_service.PackageMetadataFilter + ): ParsingResult[PackageMetadataFilter] = + filter.packageIds + .traverse( + Ref.PackageId.fromString(_).leftMap(ValueConversionError("package_ids", _)) + ) + .map(PackageMetadataFilter(_, filter.packageNamePrefixes)) +} + +final case class TopologyStateFilter( + participantIds: Seq[ParticipantId], + synchronizerIds: Seq[SynchronizerId], +) { + def toProtoLAPI: package_service.TopologyStateFilter = + package_service.TopologyStateFilter( + participantIds.map(_.toString), + synchronizerIds.map(_.toString), + ) + + @nowarn + def toPredicate(metadata: PackageMetadata): Ref.PackageId => Boolean = + (_: Ref.PackageId) => true +} + +object TopologyStateFilter { + def fromProto( + filter: package_service.TopologyStateFilter + ): ParsingResult[TopologyStateFilter] = + for { + synchronizerIds <- filter.synchronizerIds.traverse( + SynchronizerId.fromProtoPrimitive(_, "synchronizer_ids") + ) + participantIds <- filter.participantIds.traverse( + ParticipantId.fromProtoPrimitive(_, "participant_ids") + ) + } yield TopologyStateFilter( + participantIds = participantIds, + synchronizerIds = synchronizerIds, + ) +} + +final case class UpdateVettedPackagesOpts( + changes: Seq[VettedPackagesChange], + dryRun: Boolean, + synchronizerIdO: Option[SynchronizerId], +) { + def toTargetStates: Seq[SinglePackageTargetVetting[VettedPackagesRef]] = + for { + change <- changes + ref <- change.packages + } yield change match { + case v: VettedPackagesChange.Vet => + SinglePackageTargetVetting(ref, Some((v.newValidFromInclusive, v.newValidUntilExclusive))) + case v: VettedPackagesChange.Unvet => SinglePackageTargetVetting(ref, None) + } +} + +object UpdateVettedPackagesOpts { + def fromProto( + req: package_management_service.UpdateVettedPackagesRequest + ): ParsingResult[UpdateVettedPackagesOpts] = for { + vettingChanges <- req.changes + .traverse(VettedPackagesChange.fromProto) + synchronizerIdO <- OptionUtil + .emptyStringAsNone(req.synchronizerId) + .traverse(SynchronizerId.fromProtoPrimitive(_, "synchronizer_id")) + } yield UpdateVettedPackagesOpts(vettingChanges, req.dryRun, synchronizerIdO) +} + +sealed trait VettedPackagesChange { + def packages: Seq[VettedPackagesRef] +} + +object VettedPackagesChange { + final case class Vet( + packages: Seq[VettedPackagesRef], + newValidFromInclusive: Option[CantonTimestamp], + newValidUntilExclusive: Option[CantonTimestamp], + ) extends VettedPackagesChange + + object Vet { + def fromProto(change: package_management_service.VettedPackagesChange.Vet): ParsingResult[Vet] = + for { + packages <- change.packages.traverse(VettedPackagesRef.fromProto) + newValidFromInclusive <- change.newValidFromInclusive.traverse( + CantonTimestamp.fromProtoTimestamp + ) + newValidUntilExclusive <- change.newValidUntilExclusive.traverse( + CantonTimestamp.fromProtoTimestamp + ) + } yield Vet(packages, newValidFromInclusive, newValidUntilExclusive) + } + + final case class Unvet( + packages: Seq[VettedPackagesRef] + ) extends VettedPackagesChange + + object Unvet { + def fromProto( + change: package_management_service.VettedPackagesChange.Unvet + ): ParsingResult[Unvet] = + change.packages + .traverse(VettedPackagesRef.fromProto) + .map(Unvet(_)) + } + + def fromProto( + change: package_management_service.VettedPackagesChange + ): ParsingResult[VettedPackagesChange] = + change.operation match { + case package_management_service.VettedPackagesChange.Operation.Vet(vet) => + Vet.fromProto(vet) + case package_management_service.VettedPackagesChange.Operation.Unvet(unvet) => + Unvet.fromProto(unvet) + case package_management_service.VettedPackagesChange.Operation.Empty => + Left(FieldNotSet("operation")) + } +} + +trait UploadDarVettingChange { + def toProto: package_management_service.UploadDarFileRequest.VettingChange +} +object VetAllPackages extends UploadDarVettingChange { + override def toProto = + package_management_service.UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES +} +object DontVetAnyPackages extends UploadDarVettingChange { + override def toProto = + package_management_service.UploadDarFileRequest.VettingChange.VETTING_CHANGE_DONT_VET_ANY_PACKAGES +} + +object UploadDarVettingChange { + val default: UploadDarVettingChange = VetAllPackages + + def fromProto( + fieldName: String, + change: Option[package_management_service.UploadDarFileRequest.VettingChange], + ): ParsingResult[UploadDarVettingChange] = + change.map(fromProto(fieldName, _)).getOrElse(Right(VetAllPackages)) + + def fromProto( + fieldName: String, + change: package_management_service.UploadDarFileRequest.VettingChange, + ): ParsingResult[UploadDarVettingChange] = + change match { + case package_management_service.UploadDarFileRequest.VettingChange.VETTING_CHANGE_UNSPECIFIED => + Right(default) + + case package_management_service.UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES => + Right(VetAllPackages) + case package_management_service.UploadDarFileRequest.VettingChange.VETTING_CHANGE_DONT_VET_ANY_PACKAGES => + Right(DontVetAnyPackages) + case package_management_service.UploadDarFileRequest.VettingChange + .Unrecognized(unrecognizedValue) => + Left(UnrecognizedEnum(fieldName, unrecognizedValue)) + } +} + +sealed trait VettedPackagesRef extends PrettyPrinting { + def toProtoLAPI: package_management_service.VettedPackagesRef + def findMatchingPackages(metadata: PackageMetadata): Either[String, NonEmpty[Set[Ref.PackageId]]] +} + +object VettedPackagesRef { + final case class Id( + id: Ref.PackageId + ) extends VettedPackagesRef { + def toProtoLAPI: package_management_service.VettedPackagesRef = + package_management_service.VettedPackagesRef(id.toString, "", "") + + def findMatchingPackages( + metadata: PackageMetadata + ): Either[String, NonEmpty[Set[Ref.PackageId]]] = + if (!metadata.packageIdVersionMap.contains(id)) { + Left(s"No packages with package ID $id") + } else { + Right(NonEmpty(Set, id)) + } + + override protected def pretty: Pretty[Id] = + prettyOfString(id => s"package-id: ${id.id.singleQuoted}") + } + + final case class NameAndVersion( + name: Ref.PackageName, + version: Ref.PackageVersion, + ) extends VettedPackagesRef { + def toProtoLAPI: package_management_service.VettedPackagesRef = + package_management_service.VettedPackagesRef( + "", + name.toString, + version.toString, + ) + + def findMatchingPackages( + metadata: PackageMetadata + ): Either[String, NonEmpty[Set[Ref.PackageId]]] = + metadata.packageNameMap.get(name) match { + case None => Left(s"Name $name did not match any packages.") + case Some(packageResolution) => + val matchingIds: Set[Ref.PackageId] = + packageResolution.allPackageIdsForName.toSet + .filter { matchingId => + val (_, matchingVersion) = metadata.packageIdVersionMap.getOrElse( + matchingId, + sys.error( + s"Unexpectedly missing package ID $matchingId from the package ID version map." + ), + ) + version == matchingVersion + } + NonEmpty.from(matchingIds) match { + case None => Left(s"No packages with name $name have version $version.") + case Some(ne) => Right(ne) + } + } + + override protected def pretty: Pretty[NameAndVersion] = prettyOfClass( + param("name", _.name), + param("version", _.version), + ) + } + + final case class All( + id: Ref.PackageId, + name: Ref.PackageName, + version: Ref.PackageVersion, + ) extends VettedPackagesRef { + def toProtoLAPI: package_management_service.VettedPackagesRef = + package_management_service.VettedPackagesRef( + id.toString, + name.toString, + version.toString, + ) + + def findMatchingPackages( + metadata: PackageMetadata + ): Either[String, NonEmpty[Set[Ref.PackageId]]] = + metadata.packageIdVersionMap.get(id) match { + case None => Left(s"No packages with package ID $id") + case Some((matchingName, matchingVersion)) => + if (name == matchingName && version == matchingVersion) { + Right(NonEmpty(Set, id)) + } else { + Left( + s"Package with package ID $id has name $matchingName and version $matchingVersion, but filter specifies name $name and version $version" + ) + } + } + + override protected def pretty: Pretty[All] = + prettyOfClass( + param("id", _.id), + param("name", _.name), + param("version", _.version), + ) + } + + final case class Name( + name: Ref.PackageName + ) extends VettedPackagesRef { + def toProtoLAPI: package_management_service.VettedPackagesRef = + package_management_service.VettedPackagesRef("", name.toString, "") + + def findMatchingPackages( + metadata: PackageMetadata + ): Either[String, NonEmpty[Set[Ref.PackageId]]] = + metadata.packageNameMap.get(name) match { + case None => Left(s"No packages with name $name") + case Some(packageResolution) => Right(packageResolution.allPackageIdsForName) + } + + override protected def pretty: Pretty[Name] = + prettyOfString(name => s"package-name: ${name.name.singleQuoted}") + } + + private def parseWith[A]( + name: String, + value: String, + f: String => Either[String, A], + ): ParsingResult[Option[A]] = + Some(value) + .filter(_.nonEmpty) + .traverse(f) + .leftMap(ValueConversionError(name, _)) + + private def process( + mbPackageId: Option[Ref.PackageId], + mbPackageName: Option[Ref.PackageName], + mbPackageVersion: Option[Ref.PackageVersion], + ): ParsingResult[VettedPackagesRef] = + (mbPackageId, mbPackageName, mbPackageVersion) match { + case (Some(id), Some(name), Some(version)) => Right(All(id, name, version)) + case (None, Some(name), Some(version)) => Right(NameAndVersion(name, version)) + case (Some(id), None, None) => Right(Id(id)) + case (None, Some(name), None) => Right(Name(name)) + case _ => + Left( + InvariantViolation( + "package_name", + "Either package_id must be set, or package_name and package_version must be set, or all three must be set.", + ) + ) + } + + def fromProto( + raw: package_management_service.VettedPackagesRef + ): ParsingResult[VettedPackagesRef] = + for { + mbPackageId <- parseWith("package_id", raw.packageId, Ref.PackageId.fromString) + mbPackageName <- parseWith("package_name", raw.packageName, Ref.PackageName.fromString) + mbPackageVersion <- parseWith( + "package_version", + raw.packageVersion, + Ref.PackageVersion.fromString, + ) + result <- process(mbPackageId, mbPackageName, mbPackageVersion) + } yield result +} + +final case class SinglePackageTargetVetting[R]( + ref: R, + bounds: Option[(Option[CantonTimestamp], Option[CantonTimestamp])], +) { + def isVetting: Boolean = !isUnvetting + def isUnvetting: Boolean = bounds.isEmpty +} + +final case class EnrichedVettedPackage( + vetted: VettedPackage, + name: Option[Ref.PackageName], + version: Option[Ref.PackageVersion], +) { + def toProtoLAPI: package_reference.VettedPackage = package_reference.VettedPackage( + vetted.packageId, + validFromInclusive = vetted.validFromInclusive.map(_.toProtoTimestamp), + validUntilExclusive = vetted.validUntilExclusive.map(_.toProtoTimestamp), + packageName = name.map(_.toString).getOrElse(""), + packageVersion = version.map(_.toString).getOrElse(""), + ) +} + +sealed trait PriorTopologySerial { + def toProtoLAPI: package_reference.PriorTopologySerial +} + +final case class PriorTopologySerialExists(serial: Int) extends PriorTopologySerial { + override def toProtoLAPI = + package_reference.PriorTopologySerial( + package_reference.PriorTopologySerial.Serial.Prior(serial) + ) +} + +final case object PriorTopologySerialNone extends PriorTopologySerial { + override def toProtoLAPI = + package_reference.PriorTopologySerial( + package_reference.PriorTopologySerial.Serial.NoPrior(com.google.protobuf.empty.Empty()) + ) +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala index 0fe2ddb874..5e09695399 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala @@ -17,6 +17,7 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ PrepareSubmissionRequest, } import com.daml.ledger.api.v2.reassignment_commands.{ReassignmentCommand, ReassignmentCommands} +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.data.{DeduplicationPeriod, Offset} import com.digitalasset.canton.ledger.api.messages.command.submission import com.digitalasset.canton.ledger.api.util.{DurationConversion, TimestampConversion} @@ -44,8 +45,8 @@ import scala.Ordering.Implicits.infixOrderingOps import scala.collection.immutable final class CommandsValidator( - validateDisclosedContracts: ValidateDisclosedContracts, validateUpgradingPackageResolutions: ValidateUpgradingPackageResolutions, + validateDisclosedContracts: ValidateDisclosedContracts = ValidateDisclosedContracts, topologyAwarePackageSelectionEnabled: Boolean = false, ) { @@ -75,7 +76,7 @@ final class CommandsValidator( prepareRequest.minLedgerTime.flatMap(_.time.minLedgerTimeAbs), prepareRequest.minLedgerTime.flatMap(_.time.minLedgerTimeRel), ) - validatedDisclosedContracts <- validateDisclosedContracts.fromDisclosedContracts( + validatedDisclosedContracts <- validateDisclosedContracts.validateDisclosedContracts( prepareRequest.disclosedContracts ) packageResolutions <- validateUpgradingPackageResolutions( @@ -139,7 +140,7 @@ final class CommandsValidator( commands.deduplicationPeriod, maxDeduplicationDuration, ) - validatedDisclosedContracts <- validateDisclosedContracts(commands) + validatedDisclosedContracts <- validateDisclosedContracts.validateCommands(commands) packageResolutions <- validateUpgradingPackageResolutions( commands.packageIdSelectionPreference ) @@ -258,6 +259,18 @@ final class CommandsValidator( } yield ledgerEffectiveTimestamp + def validateLfTime(protoTimestamp: com.google.protobuf.timestamp.Timestamp)(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, LfTimestamp] = + LfTimestamp + .fromInstant(TimestampConversion.toInstant(protoTimestamp)) + .left + .map(_ => + invalidArgument( + s"Can not represent ledger time $protoTimestamp as a Daml timestamp" + ) + ) + // Public because it is used by Canton. def validateInnerCommands( commands: Seq[Command] diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala new file mode 100644 index 0000000000..f7c01fca83 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala @@ -0,0 +1,79 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.validation + +import cats.syntax.either.* +import com.daml.ledger.api.v2.crypto +import com.daml.ledger.api.v2.crypto.{ + Signature as LAPISignature, + SignatureFormat as LAPISignatureFormat, +} +import com.digitalasset.canton.crypto.{ + Fingerprint, + Signature, + SignatureFormat, + SigningAlgorithmSpec, +} +import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidField +import com.digitalasset.canton.logging.ErrorLoggingContext +import io.grpc.StatusRuntimeException + +import scala.annotation.nowarn + +object CryptoValidator { + + def validateSignature( + cryptoSignatureP: crypto.Signature, + fieldName: String, + )(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, Signature] = { + val LAPISignature(formatP, signatureP, signedByP, signingAlgorithmSpecP) = + cryptoSignatureP + for { + format <- validateSignatureFormat(formatP, "format") + signature = signatureP + signedBy <- Fingerprint + .fromProtoPrimitive(signedByP) + .leftMap(err => invalidField(fieldName = fieldName, message = err.message)) + signingAlgorithmSpec <- validateSigningAlgorithmSpec(signingAlgorithmSpecP, fieldName) + } yield Signature.fromExternalSigning(format, signature, signedBy, signingAlgorithmSpec) + } + + private def validateSignatureFormat( + formatP: LAPISignatureFormat, + fieldName: String, + )(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, SignatureFormat] = + formatP match { + case LAPISignatureFormat.SIGNATURE_FORMAT_DER => Right(SignatureFormat.Der) + case LAPISignatureFormat.SIGNATURE_FORMAT_CONCAT => Right(SignatureFormat.Concat) + case LAPISignatureFormat.SIGNATURE_FORMAT_RAW => + Right(SignatureFormat.Raw: @nowarn("msg=Raw in object SignatureFormat is deprecated")) + case LAPISignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => Right(SignatureFormat.Symbolic) + case LAPISignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => + Left(invalidField(fieldName, message = "Signature format must be specified")) + case other: LAPISignatureFormat.Unrecognized => + Left(invalidField(fieldName, message = s"Signing algorithm spec $other not supported")) + } + + private def validateSigningAlgorithmSpec( + signingAlgorithmSpecP: crypto.SigningAlgorithmSpec, + fieldName: String, + )(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, SigningAlgorithmSpec] = + signingAlgorithmSpecP match { + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519 => + Right(SigningAlgorithmSpec.Ed25519) + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 => + Right(SigningAlgorithmSpec.EcDsaSha256) + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 => + Right(SigningAlgorithmSpec.EcDsaSha384) + case other => + Left(invalidField(fieldName, message = s"Signing algorithm spec $other not supported")) + } + +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala index f0d267cb5a..e95a7984e8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala @@ -15,7 +15,12 @@ import com.digitalasset.canton.ledger.api.validation.ValidationErrors.* import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.ledger.api.{IdentityProviderId, SubmissionId, WorkflowId} import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.topology.{ParticipantId, PartyId as TopologyPartyId, SynchronizerId} +import com.digitalasset.canton.topology.{ + ParticipantId, + PartyId as TopologyPartyId, + PhysicalSynchronizerId, + SynchronizerId, +} import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{Party, TypeConRef} import com.digitalasset.daml.lf.value.Value.ContractId @@ -204,6 +209,12 @@ object FieldValidator { if (s.isEmpty) Left(missingField(fieldName)) else SynchronizerId.fromString(s).left.map(invalidField(fieldName, _)) + def requirePhysicalSynchronizerId(s: String, fieldName: String)(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, PhysicalSynchronizerId] = + if (s.isEmpty) Left(missingField(fieldName)) + else PhysicalSynchronizerId.fromString(s).left.map(invalidField(fieldName, _)) + def optionalSynchronizerId(s: String, fieldName: String)(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, Option[SynchronizerId]] = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FormatValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FormatValidator.scala index 5bed77aec9..023da116bb 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FormatValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FormatValidator.scala @@ -5,21 +5,18 @@ package com.digitalasset.canton.ledger.api.validation import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter import com.daml.ledger.api.v2.transaction_filter.{ - CumulativeFilter as ProtoCumulativeFilter, EventFormat as ProtoEventFormat, Filters, InterfaceFilter as ProtoInterfaceFilter, ParticipantAuthorizationTopologyFormat as ProtoParticipantAuthorizationTopologyFormat, TemplateFilter as ProtoTemplateFilter, TopologyFormat as ProtoTopologyFormat, - TransactionFilter as ProtoTransactionFilter, TransactionFormat as ProtoTransactionFormat, TransactionShape as ProtoTransactionShape, UpdateFormat as ProtoUpdateFormat, WildcardFilter, } import com.daml.ledger.api.v2.value.Identifier -import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.ledger.api.{ CumulativeFilter, @@ -41,67 +38,11 @@ import scalaz.std.either.* import scalaz.std.list.* import scalaz.syntax.traverse.* -import scala.annotation.nowarn - object FormatValidator { import FieldValidator.* import ValidationErrors.* - // TODO(i23504) Cleanup - @nowarn("cat=deprecation") - def validate( - txFilter: ProtoTransactionFilter, - verbose: Boolean, - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, EventFormat] = - validate(ProtoEventFormat(txFilter.filtersByParty, txFilter.filtersForAnyParty, verbose)) - - // TODO(i23504) Cleanup - @nowarn("cat=deprecation") - def validateLegacyToUpdateFormat( - txFilter: ProtoTransactionFilter, - verbose: Boolean, - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, UpdateFormat] = - for { - eventFormat <- FormatValidator.validate(txFilter, verbose) - } yield UpdateFormat( - includeTransactions = - Some(TransactionFormat(eventFormat = eventFormat, transactionShape = AcsDelta)), - includeReassignments = Some(eventFormat), - includeTopologyEvents = None, - ) - - // TODO(i23504) Cleanup - @nowarn("cat=deprecation") - def validateLegacyToTransactionFormat( - requestingParties: Seq[String] - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, TransactionFormat] = { - val txFilter = ProtoTransactionFilter( - filtersByParty = requestingParties - .map( - _ -> Filters( - Seq( - ProtoCumulativeFilter( - ProtoCumulativeFilter.IdentifierFilter - .WildcardFilter(WildcardFilter(includeCreatedEventBlob = false)) - ) - ) - ) - ) - .toMap, - filtersForAnyParty = None, - ) - for { - eventFormat <- FormatValidator.validate(txFilter = txFilter, verbose = true) - } yield TransactionFormat(eventFormat = eventFormat, transactionShape = AcsDelta) - } - def validate(eventFormat: ProtoEventFormat)(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, EventFormat] = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala index 538c45e66b..284537b05d 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala @@ -9,21 +9,14 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ PartySignatures, PrepareSubmissionRequest, - Signature as InteractiveSignature, - SignatureFormat as InteractiveSignatureFormat, SinglePartySignatures, } import com.digitalasset.base.error.RpcError -import com.digitalasset.canton.crypto.{ - Fingerprint, - Signature, - SignatureFormat, - SigningAlgorithmSpec, -} +import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.ledger.api.SubmissionIdGenerator import com.digitalasset.canton.ledger.api.messages.command.submission +import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.ExecuteRequest -import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidField import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.logging.ErrorLoggingContext @@ -34,7 +27,6 @@ import io.grpc.StatusRuntimeException import scalaz.syntax.tag.* import java.time.{Duration, Instant} -import scala.annotation.nowarn class SubmitRequestValidator( commandsValidator: CommandsValidator @@ -65,7 +57,7 @@ class SubmitRequestValidator( maxDeduplicationDuration: Duration, )(implicit errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, submission.SubmitRequest] = + ): Either[StatusRuntimeException, InteractiveSubmissionService.PrepareRequest] = for { validatedCommands <- commandsValidator.validatePrepareRequest( req, @@ -73,58 +65,12 @@ class SubmitRequestValidator( currentUtcTime, maxDeduplicationDuration, ) - } yield submission.SubmitRequest(validatedCommands) - - private def validateSignatureFormat( - formatP: InteractiveSignatureFormat, - fieldName: String, - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, SignatureFormat] = - formatP match { - case InteractiveSignatureFormat.SIGNATURE_FORMAT_DER => Right(SignatureFormat.Der) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_CONCAT => Right(SignatureFormat.Concat) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_RAW => - Right(SignatureFormat.Raw: @nowarn("msg=Raw in object SignatureFormat is deprecated")) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => Right(SignatureFormat.Symbolic) - case other => - Left(invalidField(fieldName, message = s"Signature format $other not supported")) - } - - private def validateSigningAlgorithmSpec( - signingAlgorithmSpecP: iss.SigningAlgorithmSpec, - fieldName: String, - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, SigningAlgorithmSpec] = - signingAlgorithmSpecP match { - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519 => - Right(SigningAlgorithmSpec.Ed25519) - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 => - Right(SigningAlgorithmSpec.EcDsaSha256) - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 => - Right(SigningAlgorithmSpec.EcDsaSha384) - case other => - Left(invalidField(fieldName, message = s"Signing algorithm spec $other not supported")) - } - - private def validateSignature( - issSignatureP: iss.Signature, - fieldName: String, - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, Signature] = { - val InteractiveSignature(formatP, signatureP, signedByP, signingAlgorithmSpecP) = - issSignatureP - for { - format <- validateSignatureFormat(formatP, "format") - signature = signatureP - signedBy <- Fingerprint - .fromProtoPrimitive(signedByP) - .leftMap(err => invalidField(fieldName = fieldName, message = err.message)) - signingAlgorithmSpec <- validateSigningAlgorithmSpec(signingAlgorithmSpecP, fieldName) - } yield Signature.fromExternalSigning(format, signature, signedBy, signingAlgorithmSpec) - } + maxRecordTime <- req.maxRecordTime.traverse(commandsValidator.validateLfTime) + } yield InteractiveSubmissionService.PrepareRequest( + validatedCommands, + req.verboseHashing, + maxRecordTime, + ) private def validatePartySignatures( proto: PartySignatures @@ -136,7 +82,7 @@ class SubmitRequestValidator( for { partyId <- requireTopologyPartyIdField(partyP, "SinglePartySignatures.party") signatures <- signaturesP.traverse(s => - validateSignature(s, "SinglePartySignatures.signature") + CryptoValidator.validateSignature(s, "SinglePartySignatures.signature") ) } yield partyId -> signatures } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidator.scala index 270c70e904..99900a738a 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidator.scala @@ -4,21 +4,18 @@ package com.digitalasset.canton.ledger.api.validation import com.daml.ledger.api.v2.update_service.{ - GetTransactionByIdRequest, - GetTransactionByOffsetRequest, GetUpdateByIdRequest, GetUpdateByOffsetRequest, GetUpdatesRequest, } import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.api.UpdateId import com.digitalasset.canton.ledger.api.messages.update +import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidArgument import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.protocol.UpdateId import io.grpc.StatusRuntimeException -import scala.annotation.nowarn - object UpdateServiceRequestValidator { type Result[X] = Either[StatusRuntimeException, X] @@ -42,8 +39,6 @@ object UpdateServiceRequestValidator { end, ) - // TODO(#23504) remove filter and verbose matching when they are removed from GetUpdatesRequest - @nowarn("cat=deprecation") def validate( req: GetUpdatesRequest, ledgerEnd: Option[Offset], @@ -62,30 +57,8 @@ object UpdateServiceRequestValidator { partial.end, ledgerEnd, ) - updateFormat <- (req.filter, req.verbose, req.updateFormat) match { - case (Some(_), _, Some(_)) => - Left( - ValidationErrors.invalidArgument( - s"Both filter/verbose and update_format is specified. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both." - ) - ) - case (Some(legacyFilter), legacyVerbose, None) => - FormatValidator.validateLegacyToUpdateFormat(legacyFilter, legacyVerbose) - case (None, true, Some(_)) => - Left( - ValidationErrors.invalidArgument( - s"Both filter/verbose and update_format is specified. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both." - ) - ) - case (None, false, Some(updateFormat)) => - FormatValidator.validate(updateFormat) - case (None, _, None) => - Left( - ValidationErrors.invalidArgument( - s"Either filter/verbose or update_format is required. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both." - ) - ) - } + updateFormatProto <- requirePresence(req.updateFormat, "update_format") + updateFormat <- FormatValidator.validate(updateFormatProto) } yield { update.GetUpdatesRequest( partial.begin, @@ -94,169 +67,6 @@ object UpdateServiceRequestValidator { ) } - // TODO(#23504) cleanup - @nowarn("cat=deprecation") - def validateForTrees( - req: GetUpdatesRequest, - ledgerEnd: Option[Offset], - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Result[update.GetUpdatesRequestForTrees] = - for { - _ <- - if (req.updateFormat.nonEmpty) - Left( - ValidationErrors.invalidArgument( - s"The event_format field must be unset for trees requests." - ) - ) - else Right(()) - partial <- commonValidations(req) - _ <- requireParties(req.getFilter.filtersByParty.keySet) - _ <- ParticipantOffsetValidator.offsetIsBeforeEnd( - "Begin", - partial.begin, - ledgerEnd, - ) - _ <- ParticipantOffsetValidator.offsetIsBeforeEnd( - "End", - partial.end, - ledgerEnd, - ) - transactionFilter <- requirePresence(req.filter, "filter") - eventFormat <- FormatValidator.validate(transactionFilter, req.verbose) - } yield { - update.GetUpdatesRequestForTrees( - partial.begin, - partial.end, - eventFormat, - ) - } - - // TODO(#23504) remove when the GetTransactionById endpoint is removed - @nowarn("cat=deprecation") - def validateTransactionById( - req: GetTransactionByIdRequest - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Result[update.GetTransactionByIdRequest] = - for { - transactionFormat <- (req.requestingParties, req.transactionFormat) match { - case (parties, Some(_)) if parties.nonEmpty => - Left( - ValidationErrors.invalidArgument( - s"Both requesting_parties and transaction_format are specified. Please use either backwards compatible arguments (requesting_parties) or transaction_format, but not both." - ) - ) - case (_, Some(transactionFormat)) => - FormatValidator.validate(transactionFormat) - case (parties, None) if parties.isEmpty => - Left( - ValidationErrors.invalidArgument( - s"Either requesting_parties or transaction_format is required. Please use either backwards compatible arguments (requesting_parties) or transaction_format, but not both." - ) - ) - case (parties, None) => - FormatValidator.validateLegacyToTransactionFormat(parties) - } - - _ <- requireNonEmptyString(req.updateId, "update_id") - trId <- requireLedgerString(req.updateId) - } yield { - update.GetTransactionByIdRequest( - updateId = UpdateId(trId), - transactionFormat = transactionFormat, - ) - } - - // TODO(#23504) cleanup - @nowarn("cat=deprecation") - def validateTransactionByIdForTrees( - req: GetTransactionByIdRequest - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Result[update.GetTransactionByIdRequestForTrees] = - for { - _ <- - if (req.transactionFormat.nonEmpty) - Left( - ValidationErrors.invalidArgument( - s"The transaction_format field must be unset for trees requests." - ) - ) - else Right(()) - _ <- requireNonEmptyString(req.updateId, "update_id") - trId <- requireLedgerString(req.updateId) - _ <- requireNonEmpty(req.requestingParties, "requesting_parties") - parties <- requireParties(req.requestingParties.toSet) - } yield { - update.GetTransactionByIdRequestForTrees( - UpdateId(trId), - parties, - ) - } - - // TODO(#23504) remove when the GetTransactionByOffset endpoint is removed - @nowarn("cat=deprecation") - def validateTransactionByOffset( - req: GetTransactionByOffsetRequest - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Result[update.GetTransactionByOffsetRequest] = - for { - transactionFormat <- (req.requestingParties, req.transactionFormat) match { - case (parties, Some(_)) if parties.nonEmpty => - Left( - ValidationErrors.invalidArgument( - s"Both requesting_parties and transaction_format are specified. Please use either backwards compatible arguments (requesting_parties) or transaction_format, but not both." - ) - ) - case (_, Some(transactionFormat)) => - FormatValidator.validate(transactionFormat) - case (parties, None) if parties.isEmpty => - Left( - ValidationErrors.invalidArgument( - s"Either requesting_parties or transaction_format is required. Please use either backwards compatible arguments (requesting_parties) or transaction_format, but not both." - ) - ) - case (parties, None) => - FormatValidator.validateLegacyToTransactionFormat(parties) - } - - offset <- ParticipantOffsetValidator.validatePositive(req.offset, "offset") - } yield { - update.GetTransactionByOffsetRequest( - offset = offset, - transactionFormat = transactionFormat, - ) - } - - // TODO(#23504) cleanup - @nowarn("cat=deprecation") - def validateTransactionByOffsetForTrees( - req: GetTransactionByOffsetRequest - )(implicit - errorLoggingContext: ErrorLoggingContext - ): Result[update.GetTransactionByOffsetRequestForTrees] = - for { - _ <- - if (req.transactionFormat.nonEmpty) - Left( - ValidationErrors.invalidArgument( - s"The transaction_format field must be unset for trees requests." - ) - ) - else Right(()) - offset <- ParticipantOffsetValidator.validatePositive(req.offset, "offset") - _ <- requireNonEmpty(req.requestingParties, "requesting_parties") - parties <- requireParties(req.requestingParties.toSet) - } yield { - update.GetTransactionByOffsetRequestForTrees( - offset, - parties, - ) - } - def validateUpdateByOffset( req: GetUpdateByOffsetRequest )(implicit @@ -280,12 +90,13 @@ object UpdateServiceRequestValidator { ): Result[update.GetUpdateByIdRequest] = for { _ <- requireNonEmptyString(req.updateId, "update_id") - updateId <- requireLedgerString(req.updateId) + updateIdStr <- requireLedgerString(req.updateId) + updateId <- UpdateId.fromLedgerString(updateIdStr).left.map(e => invalidArgument(e.message)) updateFormatProto <- requirePresence(req.updateFormat, "update_format") updateFormat <- FormatValidator.validate(updateFormatProto) } yield { update.GetUpdateByIdRequest( - updateId = UpdateId(updateId), + updateId = updateId, updateFormat = updateFormat, ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContracts.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContracts.scala index c5bfdb2145..3e4366ae39 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContracts.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContracts.scala @@ -3,62 +3,89 @@ package com.digitalasset.canton.ledger.api.validation -import cats.implicits.toBifunctorOps +import cats.implicits.{toFoldableOps, toTraverseOps} import com.daml.ledger.api.v2.commands.{ Commands as ProtoCommands, DisclosedContract as ProtoDisclosedContract, } import com.digitalasset.canton.ledger.api.DisclosedContract import com.digitalasset.canton.ledger.api.validation.FieldValidator.{ - requireContractId, requireSynchronizerId, + validateOptional, +} +import com.digitalasset.canton.ledger.api.validation.ValidationErrors.{ + invalidArgument, + invalidField, } -import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidArgument import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticators.ContractAuthenticatorFn -import com.digitalasset.canton.util.{LegacyContractHash, OptionUtil} +import com.digitalasset.canton.protocol.LfFatContractInst +import com.digitalasset.canton.util.OptionUtil import com.digitalasset.daml.lf.data.ImmArray import com.digitalasset.daml.lf.transaction.{CreationTime, TransactionCoder} -import com.google.common.annotations.VisibleForTesting +import com.digitalasset.daml.lf.value.Value.ContractId import io.grpc.StatusRuntimeException -import scala.collection.mutable +trait ValidateDisclosedContracts { + + def validateCommands(commands: ProtoCommands)(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] + + def validateDisclosedContracts(disclosedContracts: Seq[ProtoDisclosedContract])(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] +} -class ValidateDisclosedContracts(contractAuthenticator: ContractAuthenticatorFn) { +object ValidateDisclosedContracts extends ValidateDisclosedContracts { - def apply(commands: ProtoCommands)(implicit + def validateCommands(commands: ProtoCommands)(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] = - fromDisclosedContracts(commands.disclosedContracts) + validateDisclosedContracts(commands.disclosedContracts) - def fromDisclosedContracts(disclosedContracts: Seq[ProtoDisclosedContract])(implicit + def validateDisclosedContracts(disclosedContracts: Seq[ProtoDisclosedContract])(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] = for { - validatedDisclosedContracts <- validateDisclosedContracts(disclosedContracts) + validatedDisclosedContracts <- validateContracts(disclosedContracts) + _ <- verifyNoDuplicates(validatedDisclosedContracts.map(_.fatContractInstance).toSeq) } yield validatedDisclosedContracts - private def validateDisclosedContracts( - disclosedContracts: Seq[ProtoDisclosedContract] + private def verifyNoDuplicates( + disclosedContracts: Seq[LfFatContractInst] )(implicit errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] = { - type ZeroType = - Either[ - StatusRuntimeException, - mutable.Builder[DisclosedContract, ImmArray[DisclosedContract]], - ] + ): Either[StatusRuntimeException, Unit] = + for { + _ <- disclosedContracts + .map(_.contractId) + .groupBy(identity) + .collectFirst { + case (contractId, occurrences) if occurrences.sizeIs > 1 => contractId.coid + } + .map(id => invalidArgument(s"Disclosed contracts contain duplicate contract id ($id)")) + .toLeft(()) + _ <- disclosedContracts + .flatMap(_.contractKeyWithMaintainers) + .groupBy(identity) + .collectFirst { + case (key, occurrences) if occurrences.sizeIs > 1 => key + } + .map(key => invalidArgument(s"Disclosed contracts contain duplicate contract key ($key)")) + .toLeft(()) + } yield () - disclosedContracts - .foldLeft[ZeroType](Right(ImmArray.newBuilder))((contracts, contract) => - for { - validatedContracts <- contracts - validatedContract <- validateDisclosedContract(contract) - } yield validatedContracts.addOne(validatedContract) + private def validateContracts( + disclosedContracts: Seq[ProtoDisclosedContract] + )(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, ImmArray[DisclosedContract]] = + disclosedContracts.toList + .foldM(ImmArray.newBuilder[DisclosedContract])((acc, contract) => + validateDisclosedContract(contract).map(acc.addOne) ) .map(_.result()) - } private def validateDisclosedContract( disclosedContract: ProtoDisclosedContract @@ -69,15 +96,10 @@ class ValidateDisclosedContracts(contractAuthenticator: ContractAuthenticatorFn) Left(ValidationErrors.missingField("DisclosedContract.createdEventBlob")) else for { - rawTemplateId <- requirePresence( - disclosedContract.templateId, - "DisclosedContract.template_id", - ) - validatedTemplateId <- validateIdentifier(rawTemplateId) - validatedContractId <- requireContractId( - disclosedContract.contractId, - "DisclosedContract.contract_id", - ) + validatedTemplateIdO <- validateOptionalIdentifier(disclosedContract.templateId) + validatedContractIdO <- validateOptional( + OptionUtil.emptyStringAsNone(disclosedContract.contractId) + )(ContractId.fromString(_).left.map(invalidField("DisclosedContract.contract_id", _))) synchronizerIdO <- OptionUtil .emptyStringAsNone(disclosedContract.synchronizerId) .map(requireSynchronizerId(_, "DisclosedContract.synchronizer_id").map(Some(_))) @@ -88,41 +110,30 @@ class ValidateDisclosedContracts(contractAuthenticator: ContractAuthenticatorFn) .map(decodeError => invalidArgument(s"Unable to decode disclosed contract event payload: $decodeError") ) - _ <- Either.cond( - validatedContractId == fatContractInstance.contractId, - (), - invalidArgument( - s"Mismatch between DisclosedContract.contract_id (${disclosedContract.contractId}) and contract_id from decoded DisclosedContract.created_event_blob (${fatContractInstance.contractId.coid})" - ), + _ <- validatedContractIdO.traverse(validatedContractId => + Either.cond( + validatedContractId == fatContractInstance.contractId, + (), + invalidArgument( + s"Mismatch between DisclosedContract.contract_id (${validatedContractId.coid}) and contract_id from decoded DisclosedContract.created_event_blob (${fatContractInstance.contractId.coid})" + ), + ) ) - _ <- Either.cond( - validatedTemplateId == fatContractInstance.templateId, - (), - invalidArgument( - s"Mismatch between DisclosedContract.template_id ($validatedTemplateId) and template_id from decoded DisclosedContract.created_event_blob (${fatContractInstance.templateId})" - ), + _ <- validatedTemplateIdO.traverse(validatedTemplateId => + Either.cond( + validatedTemplateId == fatContractInstance.templateId, + (), + invalidArgument( + s"Mismatch between DisclosedContract.template_id ($validatedTemplateId) and template_id from decoded DisclosedContract.created_event_blob (${fatContractInstance.templateId})" + ), + ) ) lfFatContractInst <- fatContractInstance.traverseCreateAt { case time: CreationTime.CreatedAt => Right(time) case _ => Left(invalidArgument("Contract creation time cannot be 'Now'")) } - contractHash <- LegacyContractHash.fatContractHash(lfFatContractInst).leftMap { error => - invalidArgument( - s"Failed to hash contract (${disclosedContract.contractId}): $error" - ) - } - _ <- contractAuthenticator(lfFatContractInst, contractHash).leftMap { error => - invalidArgument( - s"Contract authentication failed for attached disclosed contract with id (${disclosedContract.contractId}): $error" - ) - } } yield DisclosedContract( fatContractInstance = lfFatContractInst, synchronizerIdO = synchronizerIdO, ) } - -object ValidateDisclosedContracts { - @VisibleForTesting - val WithContractIdVerificationDisabled = new ValidateDisclosedContracts((_, _) => Right(())) -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutions.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutions.scala index 3b078e20e8..e86eb6a66b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutions.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutions.scala @@ -7,8 +7,8 @@ import cats.syntax.traverse.* import com.digitalasset.canton.ledger.api.validation.ValidateUpgradingPackageResolutions.ValidatedCommandPackageResolutionsSnapshot import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidArgument import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.PackageResolution +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.PackageResolution import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{PackageId, PackageName, PackageVersion} import com.google.common.annotations.VisibleForTesting diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PackageSyncService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PackageSyncService.scala index 115d1ca586..466c2e21ee 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PackageSyncService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PackageSyncService.scala @@ -3,8 +3,16 @@ package com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.ledger.api.{ + EnrichedVettedPackage, + ListVettedPackagesOpts, + UpdateVettedPackagesOpts, + UploadDarVettingChange, +} import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.archive.DamlLf.Archive import com.digitalasset.daml.lf.data.Ref @@ -32,6 +40,8 @@ trait PackageSyncService { def uploadDar( dars: Seq[ByteString], submissionId: Ref.SubmissionId, + vettingChange: UploadDarVettingChange, + synchronizerId: Option[SynchronizerId], )(implicit traceContext: TraceContext ): Future[SubmissionResult] @@ -54,9 +64,21 @@ trait PackageSyncService { def validateDar( dar: ByteString, darName: String, + synchronizerId: Option[SynchronizerId], )(implicit traceContext: TraceContext ): Future[SubmissionResult] = throw new UnsupportedOperationException() + def updateVettedPackages( + opts: UpdateVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[(Seq[EnrichedVettedPackage], Seq[EnrichedVettedPackage])] + + def listVettedPackages( + opts: ListVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[Seq[(Seq[EnrichedVettedPackage], SynchronizerId, PositiveInt)]] } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PartySyncService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PartySyncService.scala index ab984fcadd..673a4c08ec 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PartySyncService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/PartySyncService.scala @@ -3,9 +3,15 @@ package com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.topology.{ + ExternalPartyOnboardingDetails, + ParticipantId, + SynchronizerId, +} import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.daml.lf.data.Ref /** An interface for on-boarding parties via a participant. */ @@ -33,6 +39,8 @@ trait PartySyncService { * @param synchronizerIdO * The synchronizer on which the party should be allocated. Can be omitted if the participant * is connected to only one synchronizer. + * @param externalPartyOnboardingDetails + * Onboarding information when allocating an external party * @return * an async result of a SubmissionResult */ @@ -40,7 +48,19 @@ trait PartySyncService { hint: Ref.Party, submissionId: Ref.SubmissionId, synchronizerIdO: Option[SynchronizerId], + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext ): FutureUnlessShutdown[SubmissionResult] + + /** Return the protocol version for a synchronizer ID if the node is connected to it. + */ + def protocolVersionForSynchronizerId(synchronizerId: SynchronizerId): Option[ProtocolVersion] + + /** The participant id */ + def participantId: ParticipantId + + /** Hash ops of the participant */ + def hashOps: HashOps + } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala index 2bc6cf15df..5eb32fba1f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.ledger.participant.state import com.daml.logging.entries.{LoggingValue, ToLoggingValue} +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.data.DeduplicationPeriod import com.digitalasset.canton.ledger.participant.state.SubmitterInfo.ExternallySignedSubmission @@ -73,6 +74,8 @@ final case class SubmitterInfo( } object SubmitterInfo { + import com.digitalasset.canton.ledger.api.Commands.`Timestamp to LoggingValue` + implicit val `ExternallySignedSubmission to LoggingValue` : ToLoggingValue[ExternallySignedSubmission] = { case ExternallySignedSubmission( @@ -80,12 +83,14 @@ object SubmitterInfo { signatures, transactionUUID, mediatorGroup, + maxRecordTimeO, ) => LoggingValue.Nested.fromEntries( "version" -> version.index, "signatures" -> signatures.keys.map(_.toProtoPrimitive), "transactionUUID" -> transactionUUID.toString, "mediatorGroup" -> mediatorGroup.toString, + "maxRecordTimeO" -> maxRecordTimeO, ) } implicit val `SubmitterInfo to LoggingValue`: ToLoggingValue[SubmitterInfo] = { @@ -114,6 +119,7 @@ object SubmitterInfo { signatures: Map[PartyId, Seq[Signature]], transactionUUID: UUID, mediatorGroup: MediatorGroupIndex, + maxRecordTimeO: Option[LfTimestamp], ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SyncService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SyncService.scala index ef2578d3a2..e2f8155995 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SyncService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SyncService.scala @@ -173,7 +173,7 @@ trait SyncService object SyncService { final case class ConnectedSynchronizerRequest( - party: LfPartyId, + party: Option[LfPartyId], participantId: Option[ParticipantId], ) @@ -185,7 +185,7 @@ object SyncService { final case class ConnectedSynchronizer( synchronizerAlias: SynchronizerAlias, synchronizerId: PhysicalSynchronizerId, - permission: ParticipantPermission, + permission: Option[ParticipantPermission], ) } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/Update.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/Update.scala index 99c74dee4e..0ef73a2d40 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/Update.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/Update.scala @@ -5,14 +5,15 @@ package com.digitalasset.canton.ledger.participant.state import com.daml.logging.entries.{LoggingEntry, LoggingValue, ToLoggingValue} import com.digitalasset.base.error.GrpcStatuses +import com.digitalasset.canton.RepairCounter import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod} import com.digitalasset.canton.ledger.participant.state.Update.CommandRejected.RejectionReasonTemplate +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.protocol.LfHash +import com.digitalasset.canton.protocol.{LfHash, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext} -import com.digitalasset.canton.{RepairCounter, data} import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} import com.digitalasset.daml.lf.engine.Blinding @@ -169,7 +170,7 @@ object Update { } final case class TopologyTransactionEffective( - updateId: Ref.TransactionId, + updateId: UpdateId, events: Set[TopologyTransactionEffective.TopologyEvent], synchronizerId: SynchronizerId, effectiveTime: CantonTimestamp, @@ -183,7 +184,7 @@ object Update { prettyOfClass( param("effectiveTime", _.effectiveTime), param("synchronizerId", _.synchronizerId), - param("updateId", _.updateId), + param("updateId", _.updateId.tryAsLedgerTransactionId), indicateOmittedFields, ) } @@ -261,7 +262,7 @@ object Update { */ def transaction: CommittedTransaction - def updateId: data.UpdateId + def updateId: UpdateId /** For each contract created in this transaction, this map may contain contract authentication * data assigned by the ledger implementation. This data is opaque and can only be used in @@ -271,17 +272,24 @@ object Update { */ def contractAuthenticationData: Map[Value.ContractId, Bytes] + /** The representative package-ids for the contracts created in this transaction See + * [[TransactionAccepted.RepresentativePackageIds]] for more details. + */ + def representativePackageIds: RepresentativePackageIds + def externalTransactionHash: Option[Hash] def isAcsDelta(contractId: Value.ContractId): Boolean + def internalContractIds: Map[Value.ContractId, Long] + lazy val blindingInfo: BlindingInfo = Blinding.blind(transaction) override protected def pretty: Pretty[TransactionAccepted] = prettyOfClass( param("recordTime", _.recordTime), paramIfDefined("repairCounter", _.repairCounterO), - param("updateId", _.updateId), + param("updateId", _.updateId.tryAsLedgerTransactionId), param("transactionMeta", _.transactionMeta), paramIfDefined("completion", _.completionInfoO), param("nodes", _.transaction.nodes.size), @@ -303,17 +311,41 @@ object Update { Logging.synchronizerId(txAccepted.synchronizerId), ) } + + /** For each contract created in a transaction, a representative package exists in the + * Participant package store that is guaranteed to type-check the contract's argument. Such a + * package-id guarantee is required for ensuring correct rendering of contract create values on + * the gRPC/JSON Ledger API read queries. + */ + sealed trait RepresentativePackageIds extends Product with Serializable + object RepresentativePackageIds { + def from( + representativePackageIds: Map[Value.ContractId, Ref.PackageId] + ): DedicatedRepresentativePackageIds = + DedicatedRepresentativePackageIds(representativePackageIds) + + /** Signals that the representative package-id of the created contracts referenced in this + * transaction are the same as the contract's creation package-id. + */ + case object SameAsContractPackageId extends RepresentativePackageIds + + final case class DedicatedRepresentativePackageIds( + representativePackageIds: Map[Value.ContractId, Ref.PackageId] + ) extends RepresentativePackageIds + val Empty: DedicatedRepresentativePackageIds = DedicatedRepresentativePackageIds(Map.empty) + } } final case class SequencedTransactionAccepted( completionInfoO: Option[CompletionInfo], transactionMeta: TransactionMeta, transaction: CommittedTransaction, - updateId: data.UpdateId, + updateId: UpdateId, contractAuthenticationData: Map[Value.ContractId, Bytes], synchronizerId: SynchronizerId, recordTime: CantonTimestamp, acsChangeFactory: AcsChangeFactory, + internalContractIds: Map[Value.ContractId, Long], externalTransactionHash: Option[Hash] = None, )(implicit override val traceContext: TraceContext) extends TransactionAccepted @@ -321,16 +353,21 @@ object Update { with AcsChangeSequencedUpdate { override def isAcsDelta(contractId: Value.ContractId): Boolean = acsChangeFactory.contractActivenessChanged(contractId) + + override val representativePackageIds: RepresentativePackageIds.SameAsContractPackageId.type = + RepresentativePackageIds.SameAsContractPackageId } final case class RepairTransactionAccepted( transactionMeta: TransactionMeta, transaction: CommittedTransaction, - updateId: data.UpdateId, + updateId: UpdateId, contractAuthenticationData: Map[Value.ContractId, Bytes], + representativePackageIds: RepresentativePackageIds, synchronizerId: SynchronizerId, repairCounter: RepairCounter, recordTime: CantonTimestamp, + internalContractIds: Map[Value.ContractId, Long], )(implicit override val traceContext: TraceContext) extends TransactionAccepted with RepairUpdate { @@ -358,7 +395,7 @@ object Update { /** A unique identifier for this update assigned by the ledger. */ - def updateId: data.UpdateId + def updateId: UpdateId /** Common part of all type of reassignments. */ @@ -366,11 +403,13 @@ object Update { def reassignment: Reassignment.Batch + def internalContractIds: Map[Value.ContractId, Long] + override protected def pretty: Pretty[ReassignmentAccepted] = prettyOfClass( param("recordTime", _.recordTime), paramIfDefined("repairCounter", _.repairCounterO), - param("updateId", _.updateId), + param("updateId", _.updateId.tryAsLedgerTransactionId), paramIfDefined("completion", _.optCompletionInfo), param("source", _.reassignmentInfo.sourceSynchronizer), param("target", _.reassignmentInfo.targetSynchronizer), @@ -381,12 +420,13 @@ object Update { final case class SequencedReassignmentAccepted( optCompletionInfo: Option[CompletionInfo], workflowId: Option[Ref.WorkflowId], - updateId: data.UpdateId, + updateId: UpdateId, reassignmentInfo: ReassignmentInfo, reassignment: Reassignment.Batch, recordTime: CantonTimestamp, override val synchronizerId: SynchronizerId, acsChangeFactory: AcsChangeFactory, + internalContractIds: Map[Value.ContractId, Long], )(implicit override val traceContext: TraceContext) extends ReassignmentAccepted with SequencedUpdate @@ -394,12 +434,13 @@ object Update { final case class RepairReassignmentAccepted( workflowId: Option[Ref.WorkflowId], - updateId: data.UpdateId, + updateId: UpdateId, reassignmentInfo: ReassignmentInfo, reassignment: Reassignment.Batch, repairCounter: RepairCounter, recordTime: CantonTimestamp, override val synchronizerId: SynchronizerId, + internalContractIds: Map[Value.ContractId, Long], )(implicit override val traceContext: TraceContext) extends ReassignmentAccepted with RepairUpdate { @@ -408,13 +449,14 @@ object Update { final case class OnPRReassignmentAccepted( workflowId: Option[Ref.WorkflowId], - updateId: data.UpdateId, + updateId: UpdateId, reassignmentInfo: ReassignmentInfo, reassignment: Reassignment.Batch, repairCounter: RepairCounter, recordTime: CantonTimestamp, override val synchronizerId: SynchronizerId, acsChangeFactory: AcsChangeFactory, + internalContractIds: Map[Value.ContractId, Long], )(implicit override val traceContext: TraceContext) extends ReassignmentAccepted with RepairUpdate @@ -657,8 +699,8 @@ object Update { def party(party: Ref.Party): LoggingEntry = "party" -> party - def updateId(id: data.UpdateId): LoggingEntry = - "updateId" -> id + def updateId(id: UpdateId): LoggingEntry = + "updateId" -> id.toHexString def userId(id: Ref.UserId): LoggingEntry = "userId" -> id diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/ContractStore.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/ContractStore.scala index 1e385b0223..43c348e611 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/ContractStore.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/ContractStore.scala @@ -52,9 +52,10 @@ sealed trait ContractStateStatus extends Product with Serializable { } } object ContractStateStatus { - case object Active extends ContractStateStatus - case object Archived extends ContractStateStatus case object NotFound extends ContractStateStatus + sealed trait ExistingContractStatus extends ContractStateStatus + case object Archived extends ExistingContractStatus + case object Active extends ExistingContractStatus } object ContractState { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexActiveContractsService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexActiveContractsService.scala index 0283bcda6a..10b9379925 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexActiveContractsService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexActiveContractsService.scala @@ -16,7 +16,7 @@ import org.apache.pekko.stream.scaladsl.Source trait IndexActiveContractsService { def getActiveContracts( - filter: EventFormat, + eventFormat: EventFormat, activeAt: Option[Offset], )(implicit loggingContext: LoggingContextWithTrace): Source[GetActiveContractsResponse, NotUsed] } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexPartyManagementService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexPartyManagementService.scala index d03a1e2786..44f0e71d27 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexPartyManagementService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexPartyManagementService.scala @@ -12,6 +12,7 @@ import scala.concurrent.Future * [[com.daml.ledger.api.v2.admin.party_management_service.PartyManagementServiceGrpc]] */ trait IndexPartyManagementService { + def getParticipantId(): Future[ParticipantId] def getParties( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexUpdateService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexUpdateService.scala index fbda189a3b..6b6388f8f9 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexUpdateService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/IndexUpdateService.scala @@ -3,29 +3,19 @@ package com.digitalasset.canton.ledger.participant.state.index -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.api.{EventFormat, TransactionFormat, UpdateFormat, UpdateId} +import com.digitalasset.canton.ledger.api.UpdateFormat import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey -import com.digitalasset.daml.lf.data.Ref import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.annotation.nowarn import scala.concurrent.Future /** Serves as a backend to implement * [[com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.UpdateService]] */ -// TODO(#23504) remove deprecation warning suppression -@nowarn("cat=deprecation") trait IndexUpdateService extends LedgerEndService { def updates( begin: Option[Offset], @@ -33,37 +23,6 @@ trait IndexUpdateService extends LedgerEndService { updateFormat: UpdateFormat, )(implicit loggingContext: LoggingContextWithTrace): Source[GetUpdatesResponse, NotUsed] - // TODO(#23504) cleanup - def transactionTrees( - begin: Option[Offset], - endAt: Option[Offset], - eventFormat: EventFormat, - )(implicit loggingContext: LoggingContextWithTrace): Source[GetUpdateTreesResponse, NotUsed] - - // TODO(#23504) cleanup - def getTransactionById( - updateId: UpdateId, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] - - // TODO(#23504) cleanup - def getTransactionTreeById( - updateId: UpdateId, - requestingParties: Set[Ref.Party], - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] - - // TODO(#23504) cleanup - def getTransactionByOffset( - offset: Offset, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] - - // TODO(#23504) cleanup - def getTransactionTreeByOffset( - offset: Offset, - requestingParties: Set[Ref.Party], - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] - def getUpdateBy( lookupKey: LookupKey, updateFormat: UpdateFormat, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedSyncService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedSyncService.scala index 0371b874a8..56de832e5f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedSyncService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedSyncService.scala @@ -6,9 +6,17 @@ package com.digitalasset.canton.ledger.participant.state.metrics import cats.data.EitherT import com.daml.metrics.Timed import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.error.{TransactionError, TransactionRoutingError} import com.digitalasset.canton.ledger.api.health.HealthStatus +import com.digitalasset.canton.ledger.api.{ + EnrichedVettedPackage, + ListVettedPackagesOpts, + UpdateVettedPackagesOpts, + UploadDarVettingChange, +} import com.digitalasset.canton.ledger.participant.state.* import com.digitalasset.canton.ledger.participant.state.SyncService.{ ConnectedSynchronizerRequest, @@ -17,10 +25,16 @@ import com.digitalasset.canton.ledger.participant.state.SyncService.{ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.metrics.LedgerApiServerMetrics -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata import com.digitalasset.canton.protocol.{LfContractId, LfFatContractInst, LfSubmittedTransaction} -import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.topology.{ + ExternalPartyOnboardingDetails, + ParticipantId, + PhysicalSynchronizerId, + SynchronizerId, +} import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfKeyResolver, LfPartyId} import com.digitalasset.daml.lf.archive.DamlLf.Archive import com.digitalasset.daml.lf.data.Ref.PackageId @@ -88,24 +102,27 @@ final class TimedSyncService(delegate: SyncService, metrics: LedgerApiServerMetr override def uploadDar( dar: Seq[ByteString], submissionId: Ref.SubmissionId, + vettingChange: UploadDarVettingChange, + synchronizerId: Option[SynchronizerId], )(implicit traceContext: TraceContext ): Future[SubmissionResult] = Timed.future( metrics.services.write.uploadPackages, - delegate.uploadDar(dar, submissionId), + delegate.uploadDar(dar, submissionId, vettingChange, synchronizerId), ) override def allocateParty( hint: Ref.Party, submissionId: Ref.SubmissionId, synchronizerIdO: Option[SynchronizerId], + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext ): FutureUnlessShutdown[SubmissionResult] = Timed.future( metrics.services.write.allocateParty, - delegate.allocateParty(hint, submissionId, synchronizerIdO), + delegate.allocateParty(hint, submissionId, synchronizerIdO, externalPartyOnboardingDetails), ) override def prune( @@ -166,12 +183,36 @@ final class TimedSyncService(delegate: SyncService, metrics: LedgerApiServerMetr delegate.getLfArchive(packageId), ) - override def validateDar(dar: ByteString, darName: String)(implicit + override def validateDar( + dar: ByteString, + darName: String, + synchronizerId: Option[SynchronizerId], + )(implicit traceContext: TraceContext ): Future[SubmissionResult] = Timed.future( metrics.services.read.validateDar, - delegate.validateDar(dar, darName), + delegate.validateDar(dar, darName, synchronizerId), + ) + + override def updateVettedPackages( + opts: UpdateVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[(Seq[EnrichedVettedPackage], Seq[EnrichedVettedPackage])] = + Timed.future( + metrics.services.write.updateVettedPackages, + delegate.updateVettedPackages(opts), + ) + + override def listVettedPackages( + opts: ListVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[Seq[(Seq[EnrichedVettedPackage], SynchronizerId, PositiveInt)]] = + Timed.future( + metrics.services.read.listVettedPackages, + delegate.listVettedPackages(opts), ) // TODO(#25385): Time the operation @@ -238,4 +279,13 @@ final class TimedSyncService(delegate: SyncService, metrics: LedgerApiServerMetr traceContext: TraceContext ): RoutingSynchronizerState = delegate.getRoutingSynchronizerState + + override def protocolVersionForSynchronizerId( + synchronizerId: SynchronizerId + ): Option[ProtocolVersion] = delegate.protocolVersionForSynchronizerId(synchronizerId) + + override def hashOps: HashOps = delegate.hashOps + + override def participantId: ParticipantId = delegate.participantId + } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala index b6e07e5108..1229691ab9 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala @@ -17,7 +17,6 @@ import com.digitalasset.canton.platform.config.{ IdentityProviderManagementConfig, IndexServiceConfig, PartyManagementServiceConfig, - TransactionTreeStreamsConfig, UpdatesStreamsConfig, UserManagementServiceConfig, } @@ -168,9 +167,6 @@ class PureConfigReaderWriter(secure: Boolean = true) { : ConfigConvert[ActiveContractsServiceStreamsConfig] = deriveConvert[ActiveContractsServiceStreamsConfig] - implicit val transactionTreeStreamsConfigConvert: ConfigConvert[TransactionTreeStreamsConfig] = - deriveConvert[TransactionTreeStreamsConfig] - implicit val transactionFlatStreamsConfigConvert: ConfigConvert[UpdatesStreamsConfig] = deriveConvert[UpdatesStreamsConfig] diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala index 6166e19cc6..63df4d20d9 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala @@ -68,7 +68,7 @@ class InMemoryState( _ <- dispatcherState.stopDispatcher() // Reset the Ledger API caches to the latest ledger end _ <- Future { - contractStateCaches.reset(ledgerEndO.map(_.lastOffset)) + contractStateCaches.reset(ledgerEndO) inMemoryFanoutBuffer.flush() ledgerEndCache.set(ledgerEndO) transactionSubmissionTracker.close() @@ -150,7 +150,7 @@ object InMemoryState { ledgerEndCache = mutableLedgerEndCache, dispatcherState = dispatcherState, contractStateCaches = ContractStateCaches.build( - initialLedgerEnd.map(_.lastOffset), + initialLedgerEnd.map(_.lastEventSeqId).getOrElse(0L), maxContractStateCacheSize, maxContractKeyStateCacheSize, metrics, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/PackagePreferenceBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/PackagePreferenceBackend.scala index b648e95a26..44a6d8d92d 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/PackagePreferenceBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/PackagePreferenceBackend.scala @@ -21,7 +21,7 @@ import com.digitalasset.canton.logging.{ TracedLogger, } import com.digitalasset.canton.platform.PackagePreferenceBackend.* -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala index cd5049d70b..81e9a53473 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala @@ -306,7 +306,6 @@ object ApiServices { getPackageMetadataSnapshot = syncService.getPackageMetadataSnapshot(_) ) val commandsValidator = new CommandsValidator( - validateDisclosedContracts = new ValidateDisclosedContracts(contractAuthenticator), validateUpgradingPackageResolutions = validateUpgradingPackageResolutions, topologyAwarePackageSelectionEnabled = ledgerFeatures.topologyAwarePackageSelection, ) @@ -321,12 +320,12 @@ object ApiServices { metrics, loggerFactory, ) - val apiPartyManagementService = ApiPartyManagementService.createApiService( partyManagementService, userManagementStore, new IdentityProviderExists(identityProviderConfigStore), partyManagementServiceConfig.maxPartiesPageSize, + partyManagementServiceConfig.maxSelfAllocatedParties, partyRecordStore, syncService, managementServiceTimeout, @@ -366,8 +365,7 @@ object ApiServices { loggerFactory = loggerFactory, ) val updateServices = new CommandServiceImpl.UpdateServices( - getTransactionTreeById = ledgerApiUpdateService.getTransactionTreeById, - getUpdateById = ledgerApiUpdateService.getUpdateById, + getUpdateById = ledgerApiUpdateService.getUpdateById ) val apiCommandService = CommandServiceImpl.createApiService( commandsValidator = commandsValidator, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimedIndexService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimedIndexService.scala index 1b64205bd8..1f860c0ca1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimedIndexService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimedIndexService.scala @@ -6,17 +6,11 @@ package com.digitalasset.canton.platform.apiserver import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.daml.metrics.Timed import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.health.HealthStatus -import com.digitalasset.canton.ledger.api.{EventFormat, TransactionFormat, UpdateFormat, UpdateId} +import com.digitalasset.canton.ledger.api.{EventFormat, UpdateFormat} import com.digitalasset.canton.ledger.participant.state.index.* import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -30,11 +24,8 @@ import com.digitalasset.daml.lf.value.Value.ContractId import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.annotation.nowarn import scala.concurrent.Future -// TODO(#23504) remove deprecation warning suppression -@nowarn("cat=deprecation") final class TimedIndexService(delegate: IndexService, metrics: LedgerApiServerMetrics) extends IndexService { @@ -61,52 +52,6 @@ final class TimedIndexService(delegate: IndexService, metrics: LedgerApiServerMe delegate.updates(begin, endAt, updateFormat), ) - override def transactionTrees( - begin: Option[Offset], - endAt: Option[Offset], - eventFormat: EventFormat, - )(implicit loggingContext: LoggingContextWithTrace): Source[GetUpdateTreesResponse, NotUsed] = - Timed.source( - metrics.services.index.transactionTrees, - delegate.transactionTrees(begin, endAt, eventFormat), - ) - - override def getTransactionById( - updateId: UpdateId, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - Timed.future( - metrics.services.index.getTransactionById, - delegate.getTransactionById(updateId, transactionFormat), - ) - - override def getTransactionTreeById( - updateId: UpdateId, - requestingParties: Set[Ref.Party], - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] = - Timed.future( - metrics.services.index.getTransactionTreeById, - delegate.getTransactionTreeById(updateId, requestingParties), - ) - - def getTransactionByOffset( - offset: Offset, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - Timed.future( - metrics.services.index.getTransactionByOffset, - delegate.getTransactionByOffset(offset, transactionFormat), - ) - - def getTransactionTreeByOffset( - offset: Offset, - requestingParties: Set[Ref.Party], - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] = - Timed.future( - metrics.services.index.getTransactionTreeByOffset, - delegate.getTransactionTreeByOffset(offset, requestingParties), - ) - def getUpdateBy( lookupKey: LookupKey, updateFormat: UpdateFormat, @@ -117,12 +62,12 @@ final class TimedIndexService(delegate: IndexService, metrics: LedgerApiServerMe ) override def getActiveContracts( - filter: EventFormat, + eventFormat: EventFormat, activeAt: Option[Offset], )(implicit loggingContext: LoggingContextWithTrace): Source[GetActiveContractsResponse, NotUsed] = Timed.source( metrics.services.index.getActiveContracts, - delegate.getActiveContracts(filter, activeAt), + delegate.getActiveContracts(eventFormat, activeAt), ) override def lookupActiveContract( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreter.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreter.scala index 083da6defa..ddd2290141 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreter.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreter.scala @@ -25,13 +25,7 @@ import com.digitalasset.canton.platform.apiserver.configuration.EngineLoggingCon import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticators.ContractAuthenticatorFn import com.digitalasset.canton.platform.apiserver.execution.StoreBackedCommandInterpreter.PackageResolver import com.digitalasset.canton.platform.apiserver.services.ErrorCause -import com.digitalasset.canton.protocol.{ - CantonContractIdV1Version, - CantonContractIdV2Version, - CantonContractIdVersion, - LfFatContractInst, - LfHash, -} +import com.digitalasset.canton.protocol.{CantonContractIdVersion, LfFatContractInst} import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext @@ -42,7 +36,12 @@ import com.digitalasset.daml.lf.data.{ImmArray, Ref, Time} import com.digitalasset.daml.lf.engine.* import com.digitalasset.daml.lf.engine.ResultNeedContract.Response import com.digitalasset.daml.lf.language.Ast.Package -import com.digitalasset.daml.lf.transaction.{Node, SubmittedTransaction, Transaction} +import com.digitalasset.daml.lf.transaction.{ + GlobalKeyWithMaintainers, + Node, + SubmittedTransaction, + Transaction, +} import scalaz.syntax.tag.* import java.util.concurrent.TimeUnit @@ -138,20 +137,19 @@ final class StoreBackedCommandInterpreter( )(implicit tc: TraceContext ): Either[ErrorCause.DisclosedContractsSynchronizerIdMismatch, CommandInterpretationResult] = { - val disclosedContractsMap = - commands.disclosedContracts.iterator.map(d => d.fatContractInstance.contractId -> d).toMap - val processedDisclosedContractsSynchronizers = meta.disclosedEvents - .map { event => - val disclosedContract = disclosedContractsMap(event.coid) - disclosedContract.fatContractInstance -> disclosedContract.synchronizerIdO - } + val usedDisclosedContracts = { + val inputContractIds = updateTx.inputContracts + commands.disclosedContracts.filter(c => + inputContractIds.contains(c.fatContractInstance.contractId) + ) + } StoreBackedCommandInterpreter .considerDisclosedContractsSynchronizerId( commands.synchronizerId, - processedDisclosedContractsSynchronizers.map { case (disclosed, synchronizerIdO) => - disclosed.contractId -> synchronizerIdO + usedDisclosedContracts.map { disclosed => + disclosed.fatContractInstance.contractId -> disclosed.synchronizerIdO }, logger, ) @@ -185,7 +183,7 @@ final class StoreBackedCommandInterpreter( dependsOnLedgerTime = meta.dependsOnTime, interpretationTimeNanos = interpretationTimeNanos, globalKeyMapping = meta.globalKeyMapping, - processedDisclosedContracts = processedDisclosedContractsSynchronizers.map(_._1), + processedDisclosedContracts = usedDisclosedContracts.map(_.fatContractInstance), ) } } @@ -213,11 +211,10 @@ final class StoreBackedCommandInterpreter( submitters = commitAuthorizers, readAs = commands.readAs, cmds = commands.commands, - disclosures = commands.disclosedContracts.map(_.fatContractInstance), participantId = participant, submissionSeed = submissionSeed, prefetchKeys = commands.prefetchKeys, - config.toEngineLogger(loggerFactory.append("phase", "submission")), + engineLogger = config.toEngineLogger(loggerFactory.append("phase", "submission")), ) })), ) @@ -266,6 +263,18 @@ final class StoreBackedCommandInterpreter( val lookupContractKeyTime = new AtomicLong(0L) val lookupContractKeyCount = new AtomicLong(0L) + val disclosedContractsByKey = (for { + idC <- disclosedContracts.view + (id, c) = idC + k <- c.contractKeyWithMaintainers.toList + } yield k -> id).toMap + + def disclosedOrStoreLookup(acoid: ContractId): FutureUnlessShutdown[Option[LfFatContractInst]] = + disclosedContracts.get(acoid) match { + case Some(fatContract) => FutureUnlessShutdown.pure(Some(fatContract)) + case None => timedLookup(acoid) + } + def timedLookup(acoid: ContractId): FutureUnlessShutdown[Option[LfFatContractInst]] = { val start = System.nanoTime Timed @@ -273,9 +282,34 @@ final class StoreBackedCommandInterpreter( metrics.execution.lookupActiveContract, FutureUnlessShutdown.outcomeF(contractStore.lookupActiveContract(readers, acoid)), ) - .tap { _ => - lookupActiveContractTime.addAndGet(System.nanoTime() - start) - lookupActiveContractCount.incrementAndGet() + .map { + _.tap { _ => + lookupActiveContractTime.addAndGet(System.nanoTime() - start) + lookupActiveContractCount.incrementAndGet() + } + } + } + + def disclosedOrStoreKeyLookup( + key: GlobalKeyWithMaintainers + ): FutureUnlessShutdown[Option[ContractId]] = + disclosedContractsByKey.get(key) match { + case Some(fatContract) => FutureUnlessShutdown.pure(Some(fatContract)) + case None => timedKeyLookup(key) + } + + def timedKeyLookup(key: GlobalKeyWithMaintainers): FutureUnlessShutdown[Option[ContractId]] = { + val start = System.nanoTime + Timed + .future( + metrics.execution.lookupContractKey, + FutureUnlessShutdown.outcomeF(contractStore.lookupContractKey(readers, key.globalKey)), + ) + .map { + _.tap { _ => + lookupContractKeyTime.addAndGet(System.nanoTime() - start) + lookupContractKeyCount.incrementAndGet() + } } } @@ -288,17 +322,11 @@ final class StoreBackedCommandInterpreter( case ResultNeedContract(acoid, resume) => (CantonContractIdVersion.extractCantonContractIdVersion(acoid) match { case Right(version) => - val hashingMethod = version match { - case v1: CantonContractIdV1Version => v1.contractHashingMethod - case _: CantonContractIdV2Version => - // TODO(#23971) - Add support for transforming the contract argument prior to hashing and switch to TypedNormalForm - LfHash.HashingMethod.UpgradeFriendly - } - timedLookup(acoid).map[Response] { + disclosedOrStoreLookup(acoid).map[Response] { case Some(contract) => Response.ContractFound( contract, - hashingMethod, + version.contractHashingMethod, hash => contractAuthenticator(contract, hash).isRight, ) case None => Response.ContractNotFound @@ -317,22 +345,15 @@ final class StoreBackedCommandInterpreter( ) case ResultNeedKey(key, resume) => - val start = System.nanoTime - Timed - .future( - metrics.execution.lookupContractKey, - FutureUnlessShutdown.outcomeF(contractStore.lookupContractKey(readers, key.globalKey)), - ) - .flatMap { contractId => - lookupContractKeyTime.addAndGet(System.nanoTime() - start) - lookupContractKeyCount.incrementAndGet() + disclosedOrStoreKeyLookup(key) + .flatMap(response => resolveStep( Tracked.value( metrics.execution.engineRunning, - trackSyncExecution(interpretationTimeNanos)(resume(contractId)), + trackSyncExecution(interpretationTimeNanos)(resume(response)), ) ) - } + ) case ResultNeedPackage(packageId, resume) => packageResolver(packageId)(loggingContext.traceContext) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/TopologyAwareCommandExecutor.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/TopologyAwareCommandExecutor.scala index 33533f9607..c11c2eab2e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/TopologyAwareCommandExecutor.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/TopologyAwareCommandExecutor.scala @@ -35,7 +35,7 @@ import com.digitalasset.canton.platform.apiserver.execution.TopologyAwareCommand } import com.digitalasset.canton.platform.apiserver.services.ErrorCause import com.digitalasset.canton.platform.apiserver.services.ErrorCause.RoutingFailed -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandService.scala index a8106c749b..54551badb8 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandService.scala @@ -34,7 +34,6 @@ import com.digitalasset.canton.platform.apiserver.services.ApiCommandService.gen import io.grpc.ServerServiceDefinition import java.time.{Duration, Instant} -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} class ApiCommandService( @@ -67,15 +66,6 @@ class ApiCommandService( ): Future[SubmitAndWaitForReassignmentResponse] = enrichRequestAndSubmit(request = request)(service.submitAndWaitForReassignment) - // TODO(#23504) remove when submitAndWaitForTransactionTree is removed - @nowarn("cat=deprecation") - override def submitAndWaitForTransactionTree( - request: SubmitAndWaitRequest - ): Future[SubmitAndWaitForTransactionTreeResponse] = - enrichRequestAndSubmit(request = request)( - service.submitAndWaitForTransactionTree - ) - override def bindService(): ServerServiceDefinition = CommandServiceGrpc.bindService(this, executionContext) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala index a285366256..4d83ca544c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala @@ -24,12 +24,8 @@ import com.daml.ledger.api.v2.package_reference.PackageReference import com.daml.metrics.Timed import com.daml.tracing.Telemetry import com.digitalasset.canton.ledger.api.grpc.GrpcApiService -import com.digitalasset.canton.ledger.api.messages.command.submission.SubmitRequest import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService -import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.{ - ExecuteRequest, - PrepareRequest, -} +import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.ExecuteRequest import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, GetPreferredPackagesRequestValidator, @@ -108,9 +104,6 @@ class ApiInteractiveSubmissionService( maxDeduplicationDuration = maxDeduplicationDuration, )(errorLogger), ) - .map { case SubmitRequest(commands) => - PrepareRequest(commands, request.value.verboseHashing) - } .fold( t => FutureUnlessShutdown.failed(ValidationLogger.logFailureWithTrace(logger, request, t)), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala index 75fa5595c4..7b325f7228 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.apiserver.services +import com.daml.ledger.api.v2.package_reference.VettedPackages import com.daml.ledger.api.v2.package_service.PackageServiceGrpc.PackageService import com.daml.ledger.api.v2.package_service.{ GetPackageRequest, @@ -12,15 +13,22 @@ import com.daml.ledger.api.v2.package_service.{ HashFunction as APIHashFunction, ListPackagesRequest, ListPackagesResponse, + ListVettedPackagesRequest, + ListVettedPackagesResponse, PackageServiceGrpc, PackageStatus, } import com.daml.logging.LoggingContext import com.daml.tracing.Telemetry -import com.digitalasset.canton.ledger.api.ValidationLogger +import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.grpc.Logging.traceId import com.digitalasset.canton.ledger.api.validation.ValidationErrors +import com.digitalasset.canton.ledger.api.{ + ListVettedPackagesOpts, + PriorTopologySerialExists, + ValidationLogger, +} import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.ledger.participant.state.PackageSyncService import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext @@ -35,6 +43,7 @@ import com.digitalasset.canton.logging.{ NamedLoggerFactory, NamedLogging, } +import com.digitalasset.canton.util.EitherUtil.RichEither import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.daml.lf.archive.DamlLf.{Archive, HashFunction} import com.digitalasset.daml.lf.data.Ref @@ -117,6 +126,32 @@ private[apiserver] final class ApiPackageService( } } + override def listVettedPackages( + request: ListVettedPackagesRequest + ): Future[ListVettedPackagesResponse] = + withEnrichedLoggingContext(telemetry)( + traceId(telemetry.traceIdFromGrpcContext) + ) { implicit loggingContext => + for { + opts <- ListVettedPackagesOpts + .fromProto(request) + .toFuture(ProtoDeserializationFailure.Wrap(_).asGrpcError) + result <- packageSyncService.listVettedPackages(opts) + } yield ListVettedPackagesResponse( + vettedPackages = result.map { case (packages, synchronizerId, serial) => + VettedPackages( + packages = packages.map(_.toProtoLAPI), + // TODO(#27750) Populate these fields and assert over them when + // updates and queries can specify target participants + participantId = "", + synchronizerId = synchronizerId.toProtoPrimitive, + topologySerial = Some(PriorTopologySerialExists(serial.value).toProtoLAPI), + ) + }, + nextPageToken = "", + ) + } + private def withValidatedPackageId[T, R](packageId: String, request: R)( block: Ref.PackageId => Future[T] )(implicit loggingContext: LoggingContextWithTrace): Future[T] = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala index 5daff0c5e9..c794c1c538 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala @@ -9,11 +9,11 @@ import com.daml.logging.entries.LoggingEntries import com.daml.tracing.Telemetry import com.digitalasset.canton.ledger.api.ValidationLogger import com.digitalasset.canton.ledger.api.grpc.{GrpcApiService, StreamingServiceLifecycleManagement} +import com.digitalasset.canton.ledger.api.validation.ValueValidator.requirePresence import com.digitalasset.canton.ledger.api.validation.{ FieldValidator, FormatValidator, ParticipantOffsetValidator, - ValidationErrors, } import com.digitalasset.canton.ledger.participant.state.SyncService import com.digitalasset.canton.ledger.participant.state.index.{ @@ -37,7 +37,6 @@ import io.grpc.stub.StreamObserver import org.apache.pekko.stream.Materializer import org.apache.pekko.stream.scaladsl.Source -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} final class ApiStateService( @@ -56,8 +55,6 @@ final class ApiStateService( with GrpcApiService with NamedLogging { - // TODO(#23504) remove matching on filter and verbose when they are removed from GetActiveContractsRequest - @nowarn("cat=deprecation") override def getActiveContracts( request: GetActiveContractsRequest, responseObserver: StreamObserver[GetActiveContractsResponse], @@ -67,34 +64,8 @@ final class ApiStateService( registerStream(responseObserver) { val result = for { - filters <- (request.filter, request.verbose, request.eventFormat) match { - case (Some(_), _, Some(_)) => - Left( - ValidationErrors.invalidArgument( - s"Both filter/verbose and event_format is specified. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." - ) - ) - - case (Some(legacyFilter), legacyVerbose, None) => - FormatValidator.validate(legacyFilter, legacyVerbose) - - case (None, true, Some(_)) => - Left( - ValidationErrors.invalidArgument( - s"Both filter/verbose and event_format is specified. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." - ) - ) - - case (None, false, Some(eventFormat)) => - FormatValidator.validate(eventFormat) - - case (None, _, None) => - Left( - ValidationErrors.invalidArgument( - s"Either filter/verbose or event_format is required. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." - ) - ) - } + eventFormatProto <- requirePresence(request.eventFormat, "event_format") + eventFormat <- FormatValidator.validate(eventFormatProto) activeAt <- ParticipantOffsetValidator.validateNonNegative( request.activeAtOffset, @@ -102,14 +73,14 @@ final class ApiStateService( ) } yield { withEnrichedLoggingContext(telemetry)( - logging.eventFormat(filters) + logging.eventFormat(eventFormat) ) { implicit loggingContext => logger.info( s"Received request for active contracts: $request, ${loggingContext.serializeFiltered("filters")}." ) acsService .getActiveContracts( - filter = filters, + eventFormat = eventFormat, activeAt = activeAt, ) } @@ -136,11 +107,11 @@ final class ApiStateService( implicit val loggingContext: LoggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) val result = (for { - party <- FieldValidator - .requirePartyField(request.party, "party") + partyO <- FieldValidator + .optionalString(request.party)(FieldValidator.requirePartyField(_, "party")) participantId <- FieldValidator .optionalParticipantId(request.participantId, "participant_id") - } yield SyncService.ConnectedSynchronizerRequest(party, participantId)) + } yield SyncService.ConnectedSynchronizerRequest(partyO, participantId)) .fold( t => FutureUnlessShutdown.failed(ValidationLogger.logFailureWithTrace(logger, request, t)), request => @@ -148,24 +119,35 @@ final class ApiStateService( .getConnectedSynchronizers(request) .map(response => GetConnectedSynchronizersResponse( - response.connectedSynchronizers.flatMap { connectedSynchronizer => + response.connectedSynchronizers.map { connectedSynchronizer => val permissions = connectedSynchronizer.permission match { - case TopologyParticipantPermission.Submission => - Seq(ParticipantPermission.PARTICIPANT_PERMISSION_SUBMISSION) - case TopologyParticipantPermission.Observation => - Seq(ParticipantPermission.PARTICIPANT_PERMISSION_OBSERVATION) - case TopologyParticipantPermission.Confirmation => - Seq(ParticipantPermission.PARTICIPANT_PERMISSION_CONFIRMATION) - case _ => Nil + case Some(TopologyParticipantPermission.Submission) => + Some(ParticipantPermission.PARTICIPANT_PERMISSION_SUBMISSION) + case Some(TopologyParticipantPermission.Observation) => + Some(ParticipantPermission.PARTICIPANT_PERMISSION_OBSERVATION) + case Some(TopologyParticipantPermission.Confirmation) => + Some(ParticipantPermission.PARTICIPANT_PERMISSION_CONFIRMATION) + case _ => None } - permissions.map(permission => - GetConnectedSynchronizersResponse.ConnectedSynchronizer( - synchronizerAlias = connectedSynchronizer.synchronizerAlias.toProtoPrimitive, - synchronizerId = - connectedSynchronizer.synchronizerId.logical.toProtoPrimitive, - permission = permission, + permissions + .map(permission => + GetConnectedSynchronizersResponse.ConnectedSynchronizer( + synchronizerAlias = + connectedSynchronizer.synchronizerAlias.toProtoPrimitive, + synchronizerId = + connectedSynchronizer.synchronizerId.logical.toProtoPrimitive, + permission = permission, + ) + ) + .getOrElse( + GetConnectedSynchronizersResponse.ConnectedSynchronizer( + synchronizerAlias = + connectedSynchronizer.synchronizerAlias.toProtoPrimitive, + synchronizerId = + connectedSynchronizer.synchronizerId.logical.toProtoPrimitive, + permission = ParticipantPermission.PARTICIPANT_PERMISSION_UNSPECIFIED, + ) ) - ) } ) ), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala index bcced2b476..f212931e33 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala @@ -8,15 +8,9 @@ import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.v2.update_service.* import com.daml.logging.entries.LoggingEntries import com.daml.tracing.Telemetry -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.grpc.StreamingServiceLifecycleManagement import com.digitalasset.canton.ledger.api.validation.UpdateServiceRequestValidator -import com.digitalasset.canton.ledger.api.{ - TransactionFormat, - UpdateFormat, - UpdateId, - ValidationLogger, -} +import com.digitalasset.canton.ledger.api.{UpdateFormat, ValidationLogger} import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.ledger.participant.state.index.IndexUpdateService import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext @@ -29,17 +23,14 @@ import com.digitalasset.canton.logging.{ } import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.util.Thereafter.syntax.* import io.grpc.stub.StreamObserver import org.apache.pekko.stream.Materializer import org.apache.pekko.stream.scaladsl.Source -import scalaz.syntax.tag.* -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} -// TODO(#23504) remove deprecation warning suppression -@nowarn("cat=deprecation") final class ApiUpdateService( updateService: IndexUpdateService, metrics: LedgerApiServerMetrics, @@ -92,222 +83,6 @@ final class ApiUpdateService( } } - override def getUpdateTrees( - request: GetUpdatesRequest, - responseObserver: StreamObserver[GetUpdateTreesResponse], - ): Unit = { - implicit val loggingContextWithTrace: LoggingContextWithTrace = - LoggingContextWithTrace(loggerFactory, telemetry) - registerStream(responseObserver) { - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext(logger, loggingContextWithTrace) - - logger.debug(s"Received new update trees request $request.") - Source.future(updateService.currentLedgerEnd()).flatMapConcat { ledgerEnd => - val validation = UpdateServiceRequestValidator.validateForTrees( - GetUpdatesRequest( - beginExclusive = request.beginExclusive, - endInclusive = request.endInclusive, - filter = request.filter, - verbose = request.verbose, - updateFormat = None, - ), - ledgerEnd, - ) - - validation.fold( - t => Source.failed(ValidationLogger.logFailureWithTrace(logger, request, t)), - req => - if ( - req.eventFormat.filtersByParty.isEmpty && req.eventFormat.filtersForAnyParty.isEmpty - ) { - logger.debug("transaction filters were empty, will not return anything") - Source.empty - } else { - LoggingContextWithTrace.withEnrichedLoggingContext( - logging.startExclusive(req.startExclusive), - logging.endInclusive(req.endInclusive), - logging.eventFormat(req.eventFormat), - ) { implicit loggingContext => - logger.info( - s"Received request for update trees, ${loggingContext - .serializeFiltered("startExclusive", "endInclusive", "updateFormat")}." - )(loggingContext.traceContext) - } - logger.trace(s"Update tree request: $req.") - updateService - .transactionTrees( - req.startExclusive, - req.endInclusive, - req.eventFormat, - ) - .via( - logger.enrichedDebugStream("Responding with update trees.", updatesLoggable) - ) - .via(logger.logErrorsOnStream) - .via(StreamMetrics.countElements(metrics.lapi.streams.updateTrees)) - }, - ) - } - } - } - - override def getTransactionTreeByOffset( - req: GetTransactionByOffsetRequest - ): Future[GetTransactionTreeResponse] = { - implicit val loggingContextWithTrace: LoggingContextWithTrace = - LoggingContextWithTrace(loggerFactory, telemetry) - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext(logger, loggingContextWithTrace) - - UpdateServiceRequestValidator - .validateTransactionByOffsetForTrees(req) - .fold( - t => Future.failed(ValidationLogger.logFailureWithTrace(logger, req, t)), - request => { - implicit val enrichedLoggingContext: LoggingContextWithTrace = - LoggingContextWithTrace.enriched( - logging.offset(request.offset.unwrap), - logging.parties(request.requestingParties), - )(loggingContextWithTrace) - logger.info(s"Received request for transaction tree by offset, ${enrichedLoggingContext - .serializeFiltered("offset", "parties")}.")(loggingContextWithTrace.traceContext) - logger.trace(s"Transaction tree by offset request: $request")( - loggingContextWithTrace.traceContext - ) - val offset = request.offset - updateService - .getTransactionTreeByOffset(offset, request.requestingParties)( - loggingContextWithTrace - ) - .flatMap { - case None => - Future.failed( - RequestValidationErrors.NotFound.Transaction - .RejectWithOffset(offset.unwrap) - .asGrpcError - ) - case Some(transactionTree) => - Future.successful(transactionTree) - } - .thereafter( - logger - .logErrorsOnCall[GetTransactionTreeResponse](loggingContextWithTrace.traceContext) - ) - }, - ) - } - - override def getTransactionTreeById( - req: GetTransactionByIdRequest - ): Future[GetTransactionTreeResponse] = { - implicit val loggingContextWithTrace: LoggingContextWithTrace = - LoggingContextWithTrace(loggerFactory, telemetry) - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext(logger, loggingContextWithTrace) - - UpdateServiceRequestValidator - .validateTransactionByIdForTrees(req) - .fold( - t => Future.failed(ValidationLogger.logFailureWithTrace(logger, req, t)), - request => { - implicit val enrichedLoggingContext: LoggingContextWithTrace = - LoggingContextWithTrace.enriched( - logging.updateId(request.updateId), - logging.parties(request.requestingParties), - )(loggingContextWithTrace) - logger.info(s"Received request for transaction tree by ID, ${enrichedLoggingContext - .serializeFiltered("eventId", "parties")}.")(loggingContextWithTrace.traceContext) - logger.trace(s"Transaction tree by ID request: $request")( - loggingContextWithTrace.traceContext - ) - updateService - .getTransactionTreeById(request.updateId, request.requestingParties)( - loggingContextWithTrace - ) - .flatMap { - case None => - Future.failed( - RequestValidationErrors.NotFound.Transaction - .RejectWithTxId(request.updateId.unwrap) - .asGrpcError - ) - case Some(transactionTree) => - Future.successful(transactionTree) - } - .thereafter( - logger - .logErrorsOnCall[GetTransactionTreeResponse](loggingContextWithTrace.traceContext) - ) - }, - ) - } - - override def getTransactionByOffset( - req: GetTransactionByOffsetRequest - ): Future[GetTransactionResponse] = { - implicit val loggingContextWithTrace: LoggingContextWithTrace = - LoggingContextWithTrace(loggerFactory, telemetry) - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext(logger, loggingContextWithTrace) - - UpdateServiceRequestValidator - .validateTransactionByOffset(req) - .fold( - t => Future.failed(ValidationLogger.logFailureWithTrace(logger, req, t)), - request => { - implicit val enrichedLoggingContext: LoggingContextWithTrace = - LoggingContextWithTrace.enriched( - logging.offset(request.offset.unwrap), - logging.transactionFormat(request.transactionFormat), - )(loggingContextWithTrace) - logger.info(s"Received request for transaction by offset, ${enrichedLoggingContext - .serializeFiltered("offset", "parties")}.")(loggingContextWithTrace.traceContext) - logger.trace(s"Transaction by offset request: $request")( - loggingContextWithTrace.traceContext - ) - val offset = request.offset - internalGetTransactionByOffset(offset, request.transactionFormat)( - loggingContextWithTrace - ).thereafter( - logger.logErrorsOnCall[GetTransactionResponse](loggingContextWithTrace.traceContext) - ) - }, - ) - } - - override def getTransactionById( - req: GetTransactionByIdRequest - ): Future[GetTransactionResponse] = { - val loggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) - val errorLoggingContext = ErrorLoggingContext(logger, loggingContextWithTrace) - - UpdateServiceRequestValidator - .validateTransactionById(req)(errorLoggingContext) - .fold( - t => - Future - .failed(ValidationLogger.logFailureWithTrace(logger, req, t)(loggingContextWithTrace)), - request => { - implicit val enrichedLoggingContext: LoggingContextWithTrace = - LoggingContextWithTrace.enriched( - logging.updateId(request.updateId), - logging.transactionFormat(request.transactionFormat), - )(loggingContextWithTrace) - logger.info( - s"Received request for transaction by ID, ${enrichedLoggingContext - .serializeFiltered("eventId", "transactionFormat")}." - )(loggingContextWithTrace.traceContext) - logger.trace(s"Transaction by ID request: $request")(loggingContextWithTrace.traceContext) - - internalGetTransactionById(request.updateId, request.transactionFormat) - .thereafter( - logger.logErrorsOnCall[GetTransactionResponse](loggingContextWithTrace.traceContext) - ) - }, - ) - } - override def getUpdateByOffset( req: GetUpdateByOffsetRequest ): Future[GetUpdateResponse] = { @@ -333,7 +108,7 @@ final class ApiUpdateService( ) val offset = request.offset OptionT( - updateService.getUpdateBy(LookupKey.Offset(offset), request.updateFormat)( + updateService.getUpdateBy(LookupKey.ByOffset(offset), request.updateFormat)( loggingContextWithTrace ) ) @@ -381,42 +156,18 @@ final class ApiUpdateService( ) } - private def internalGetTransactionById( - updateId: UpdateId, - transactionFormat: TransactionFormat, - )(implicit - loggingContextWithTrace: LoggingContextWithTrace - ): Future[GetTransactionResponse] = - OptionT(updateService.getTransactionById(updateId, transactionFormat)) - .getOrElseF( - Future.failed( - RequestValidationErrors.NotFound.Transaction.RejectWithTxId(updateId.unwrap).asGrpcError - ) - ) - - private def internalGetTransactionByOffset( - offset: Offset, - transactionFormat: TransactionFormat, - )(implicit - loggingContextWithTrace: LoggingContextWithTrace - ): Future[GetTransactionResponse] = - OptionT(updateService.getTransactionByOffset(offset, transactionFormat)) - .getOrElseF( - Future.failed( - RequestValidationErrors.NotFound.Transaction.RejectWithOffset(offset.unwrap).asGrpcError - ) - ) - private def internalGetUpdateById( updateId: UpdateId, updateFormat: UpdateFormat, )(implicit loggingContextWithTrace: LoggingContextWithTrace ): Future[GetUpdateResponse] = - OptionT(updateService.getUpdateBy(LookupKey.UpdateId(updateId.unwrap), updateFormat)) + OptionT(updateService.getUpdateBy(LookupKey.ByUpdateId(updateId), updateFormat)) .getOrElseF( Future.failed( - RequestValidationErrors.NotFound.Update.RejectWithTxId(updateId.unwrap).asGrpcError + RequestValidationErrors.NotFound.Update + .RejectWithTxId(updateId.toHexString) + .asGrpcError ) ) @@ -434,18 +185,6 @@ final class ApiUpdateService( LoggingEntries() } - private def updatesLoggable(updates: GetUpdateTreesResponse): LoggingEntries = - updates.update match { - case GetUpdateTreesResponse.Update.TransactionTree(t) => - entityLoggable(t.commandId, t.updateId, t.workflowId, t.offset) - case GetUpdateTreesResponse.Update.Reassignment(r) => - entityLoggable(r.commandId, r.updateId, r.workflowId, r.offset) - case GetUpdateTreesResponse.Update.OffsetCheckpoint(c) => - LoggingEntries(logging.offset(c.offset)) - case GetUpdateTreesResponse.Update.Empty => - LoggingEntries() - } - private def entityLoggable( commandId: String, updateId: String, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala index d570179aa7..7c811280b4 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala @@ -78,6 +78,11 @@ private[apiserver] final class ApiVersionService( ) ), offsetCheckpoint = Some(ledgerFeatures.offsetCheckpointFeature), + packageFeature = Some( + PackageFeature.of( + maxVettedPackagesPageSize = 100 + ) + ), ) override def getLedgerApiVersion( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/RejectionGenerators.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/RejectionGenerators.scala index 0e687e45e7..3fc5d0ff53 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/RejectionGenerators.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/RejectionGenerators.scala @@ -156,16 +156,6 @@ object RejectionGenerators { case LfInterpretationError.Upgrade(error: LfInterpretationError.Upgrade.ValidationFailed) => CommandExecutionErrors.Interpreter.UpgradeError.ValidationFailed .Reject(renderedMessage, error) - case LfInterpretationError.Upgrade( - error: LfInterpretationError.Upgrade.DowngradeDropDefinedField - ) => - CommandExecutionErrors.Interpreter.UpgradeError.DowngradeDropDefinedField - .Reject(renderedMessage, error) - case LfInterpretationError.Upgrade( - error: LfInterpretationError.Upgrade.DowngradeFailed - ) => - CommandExecutionErrors.Interpreter.UpgradeError.DowngradeFailed - .Reject(renderedMessage, error) case LfInterpretationError.Crypto( error: LfInterpretationError.Crypto.MalformedByteEncoding ) => diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala index 69216238b1..326a8dcdc1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala @@ -3,22 +3,35 @@ package com.digitalasset.canton.platform.apiserver.services.admin +import cats.data.EitherT +import cats.implicits.{toBifunctorOps, toTraverseOps} import com.daml.ledger.api.v2.admin.package_management_service.* import com.daml.ledger.api.v2.admin.package_management_service.PackageManagementServiceGrpc.PackageManagementService +import com.daml.ledger.api.v2.package_reference.VettedPackages import com.daml.logging.LoggingContext import com.daml.tracing.Telemetry import com.digitalasset.base.error.RpcError +import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.util.TimestampConversion +import com.digitalasset.canton.ledger.api.{ + PriorTopologySerialNone, + UpdateVettedPackagesOpts, + UploadDarVettingChange as UploadDarOpts, +} import com.digitalasset.canton.ledger.participant.state.{PackageSyncService, SubmissionResult} import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.TracedLoggerOps.TracedLoggerOps import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil import com.digitalasset.canton.platform.apiserver.services.logging +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.util.EitherUtil.* import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{EitherTUtil, OptionUtil} import com.digitalasset.daml.lf.data.Ref -import io.grpc.ServerServiceDefinition +import io.grpc.{ServerServiceDefinition, StatusRuntimeException} import scala.concurrent.{ExecutionContext, Future} import scala.util.Try @@ -72,12 +85,27 @@ private[apiserver] final class ApiPackageManagementService private ( logging.submissionId(submissionIdGenerator(request.submissionId)) ) { implicit loggingContext: LoggingContextWithTrace => logger.info(s"Validating DAR file, ${loggingContext.serializeFiltered("submissionId")}.") - packageSyncService - .validateDar(dar = request.darFile, darName = "defaultDarName") - .flatMap { - case SubmissionResult.Acknowledged => Future.successful(ValidateDarFileResponse()) - case err: SubmissionResult.SynchronousError => Future.failed(err.exception) - } + for { + synchronizerIdO <- + EitherTUtil.toFuture( + CantonGrpcUtil.mapErrNew( + OptionUtil + .emptyStringAsNone(request.synchronizerId) + .traverse(SynchronizerId.fromProtoPrimitive(_, "synchronizer_id")) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + ) + ) + result <- packageSyncService + .validateDar( + dar = request.darFile, + darName = "defaultDarName", + synchronizerId = synchronizerIdO, + ) + .flatMap { + case SubmissionResult.Acknowledged => Future.successful(ValidateDarFileResponse()) + case err: SubmissionResult.SynchronousError => Future.failed(err.exception) + } + } yield result } override def uploadDarFile(request: UploadDarFileRequest): Future[UploadDarFileResponse] = { @@ -87,13 +115,75 @@ private[apiserver] final class ApiPackageManagementService private ( ) { implicit loggingContext: LoggingContextWithTrace => logger.info(s"Uploading DAR file, ${loggingContext.serializeFiltered("submissionId")}.") - packageSyncService - .uploadDar(Seq(request.darFile), submissionId) - .flatMap { - case SubmissionResult.Acknowledged => Future.successful(UploadDarFileResponse()) - case err: SubmissionResult.SynchronousError => Future.failed(err.exception) + val resultET = for { + synchronizerIdO <- + CantonGrpcUtil.mapErrNew( + OptionUtil + .emptyStringAsNone(request.synchronizerId) + .traverse(SynchronizerId.fromProtoPrimitive(_, "synchronizer_id")) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + ) + uploadDarVettingChange <- CantonGrpcUtil + .mapErrNew( + UploadDarOpts + .fromProto("vetting_change", request.vettingChange) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + ) + uploadResult <- EitherT.right( + packageSyncService + .uploadDar(Seq(request.darFile), submissionId, uploadDarVettingChange, synchronizerIdO) + ) + response <- uploadResult match { + case SubmissionResult.Acknowledged => + EitherT.rightT[Future, StatusRuntimeException](UploadDarFileResponse()) + case err: SubmissionResult.SynchronousError => + EitherT.leftT[Future, UploadDarFileResponse](err.exception) } - .thereafter(logger.logErrorsOnCall[UploadDarFileResponse]) + } yield response + EitherTUtil.toFuture(resultET).thereafter(logger.logErrorsOnCall[UploadDarFileResponse]) + } + } + + override def updateVettedPackages( + request: UpdateVettedPackagesRequest + ): Future[UpdateVettedPackagesResponse] = { + val submissionId = submissionIdGenerator("") + LoggingContextWithTrace.withEnrichedLoggingContext(telemetry)( + logging.submissionId(submissionId) + ) { implicit loggingContext: LoggingContextWithTrace => + for { + updateVettedPackagesOpts <- UpdateVettedPackagesOpts + .fromProto(request) + .toFuture(ProtoDeserializationFailure.Wrap(_).asGrpcError) + result <- packageSyncService.updateVettedPackages(updateVettedPackagesOpts) + } yield result match { + case (previousStates, newStates) => + UpdateVettedPackagesResponse( + // TODO(#27750) Make sure to only populate this when a prior vetting + // state actually exists. If no vetting state exists, this should be + // None. + pastVettedPackages = Some( + VettedPackages( + packages = previousStates.map(_.toProtoLAPI), + // TODO(#27750) Populate these fields and assert over them when + // updates and queries can specify target synchronizers + participantId = "", + synchronizerId = "", + topologySerial = Some(PriorTopologySerialNone.toProtoLAPI), + ) + ), + newVettedPackages = Some( + VettedPackages( + packages = newStates.map(_.toProtoLAPI), + // TODO(#27750) Populate these fields and assert over them when + // updates and queries can specify target synchronizers + participantId = "", + synchronizerId = "", + topologySerial = Some(PriorTopologySerialNone.toProtoLAPI), + ) + ), + ) + } } } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala index f9b63b327c..f2196681a6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala @@ -3,11 +3,17 @@ package com.digitalasset.canton.platform.apiserver.services.admin +import cats.syntax.either.* import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta as ProtoObjectMeta +import com.daml.ledger.api.v2.admin.party_management_service.AllocateExternalPartyRequest.SignedTransaction import com.daml.ledger.api.v2.admin.party_management_service.PartyManagementServiceGrpc.PartyManagementService import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocateExternalPartyRequest, + AllocateExternalPartyResponse, AllocatePartyRequest, AllocatePartyResponse, + GenerateExternalPartyTopologyRequest, + GenerateExternalPartyTopologyResponse, GetParticipantIdRequest, GetParticipantIdResponse, GetPartiesRequest, @@ -22,14 +28,17 @@ import com.daml.ledger.api.v2.admin.party_management_service.{ UpdatePartyIdentityProviderIdResponse, } import com.daml.logging.LoggingContext +import com.daml.nonempty.NonEmpty import com.daml.platform.v1.page_tokens.ListPartiesPageTokenPayload import com.daml.tracing.Telemetry import com.digitalasset.canton.auth.AuthorizationChecksErrors -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.v30.{SigningKeyScheme, SigningKeyUsage} +import com.digitalasset.canton.crypto.{Signature, SigningPublicKey, v30} import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.validation.FieldValidator.* -import com.digitalasset.canton.ledger.api.validation.ValidationErrors import com.digitalasset.canton.ledger.api.validation.ValueValidator.requirePresence +import com.digitalasset.canton.ledger.api.validation.{CryptoValidator, ValidationErrors} import com.digitalasset.canton.ledger.api.{ IdentityProviderId, ObjectMeta, @@ -37,10 +46,12 @@ import com.digitalasset.canton.ledger.api.{ User, UserRight, } +import com.digitalasset.canton.ledger.error.CommonErrors import com.digitalasset.canton.ledger.error.groups.{ PartyManagementServiceErrors, RequestValidationErrors, } +import com.digitalasset.canton.ledger.localstore.api.UserManagementStore.UserInfo import com.digitalasset.canton.ledger.localstore.api.{ ObjectMetaUpdate, PartyDetailsUpdate, @@ -50,7 +61,11 @@ import com.digitalasset.canton.ledger.localstore.api.{ UserManagementStore, } import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel.Observation +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.{ + AuthorizationEvent, + AuthorizationLevel, +} import com.digitalasset.canton.ledger.participant.state.index.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.* @@ -60,14 +75,23 @@ import com.digitalasset.canton.logging.LoggingContextWithTrace.{ withEnrichedLoggingContext, } import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementService.* -import com.digitalasset.canton.platform.apiserver.services.admin.PartyAllocation +import com.digitalasset.canton.platform.apiserver.services.admin.AuthenticatedUserContextResolver.AuthenticatedUserContext +import com.digitalasset.canton.platform.apiserver.services.admin.{ + PartyAllocation, + PendingPartyAllocations, +} import com.digitalasset.canton.platform.apiserver.services.logging import com.digitalasset.canton.platform.apiserver.services.tracking.StreamTracker import com.digitalasset.canton.platform.apiserver.update import com.digitalasset.canton.platform.apiserver.update.PartyRecordUpdateMapper +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings +import com.digitalasset.canton.topology.transaction.TopologyTransaction.PositiveTopologyTransaction import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionValidation} import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.Party import io.grpc.Status.Code.ALREADY_EXISTS import io.grpc.{ServerServiceDefinition, StatusRuntimeException} import io.opentelemetry.api.trace.Tracer @@ -86,6 +110,7 @@ private[apiserver] final class ApiPartyManagementService private ( userManagementStore: UserManagementStore, identityProviderExists: IdentityProviderExists, maxPartiesPageSize: PositiveInt, + maxSelfAllocatedParties: NonNegativeInt, partyRecordStore: PartyRecordStore, syncService: state.PartySyncService, managementServiceTimeout: FiniteDuration, @@ -98,7 +123,10 @@ private[apiserver] final class ApiPartyManagementService private ( tracer: Tracer, ) extends PartyManagementService with GrpcApiService - with NamedLogging { + with NamedLogging + with AuthenticatedUserContextResolver { + + private val pendingPartyAllocations = new PendingPartyAllocations() private implicit val loggingContext: LoggingContext = createLoggingContext(loggerFactory)(identity) @@ -135,7 +163,7 @@ private[apiserver] final class ApiPartyManagementService private ( ) parties <- request.parties.toList.traverse(requireParty) } yield (parties, identityProviderId) - } { case (parties: Seq[Party], identityProviderId: IdentityProviderId) => + } { case (parties: Seq[Ref.Party], identityProviderId: IdentityProviderId) => for { partyDetailsSeq <- partyManagementService.getParties(parties) partyRecordOptions <- fetchPartyRecords(partyDetailsSeq) @@ -215,7 +243,10 @@ private[apiserver] final class ApiPartyManagementService private ( key: PartyAllocation.TrackerKey )(implicit errorLogger: ErrorLoggingContext): StatusRuntimeException = CommonErrors.RequestAlreadyInFlight - .Reject(requestId = key.submissionId) + .Reject( + requestId = key.submissionId, + details = s"Party ${key.party} is in the process of being allocated on this node.", + ) .asGrpcError } @@ -233,6 +264,9 @@ private[apiserver] final class ApiPartyManagementService private ( ) implicit val errorLoggingContext: ErrorLoggingContext = ErrorLoggingContext(logger, loggingContext.toPropertiesMap, loggingContext.traceContext) + // Retrieving the authenticated user context from the thread-local context + val authenticatedUserContextF: Future[AuthenticatedUserContext] = + resolveAuthenticatedUserContext import com.digitalasset.canton.config.NonNegativeFiniteDuration withValidation { @@ -265,46 +299,59 @@ private[apiserver] final class ApiPartyManagementService private ( } yield (partyIdHintO, annotations, identityProviderId, synchronizerIdO, userId) } { case (partyIdHintO, annotations, identityProviderId, synchronizerIdO, userId) => val partyName = partyIdHintO.getOrElse(generatePartyName) - val trackerKey = submissionIdGenerator(partyName) + val trackerKey = submissionIdGenerator(partyName, AuthorizationLevel.Submission) withEnrichedLoggingContext(logging.submissionId(trackerKey.submissionId)) { implicit loggingContext => - for { - _ <- identityProviderExistsOrError(identityProviderId) - user <- getUserIfUserSpecified(userId, identityProviderId) - allocated <- partyAllocationTracker - .track( - trackerKey, - NonNegativeFiniteDuration(managementServiceTimeout), - ) { _ => - for { - result <- syncService.allocateParty( - partyName, - trackerKey.submissionId, - synchronizerIdO, - ) - _ <- checkSubmissionResult(result) - } yield () - } - .transform(alreadyExistsError(trackerKey.submissionId, loggingContext)) - _ <- verifyPartyIsNonExistentOrInIdp( - identityProviderId, - allocated.partyDetails.party, - ) - existingPartyRecord <- partyRecordStore.getPartyRecordO(allocated.partyDetails.party) - partyRecord <- updateOrCreatePartyRecord( - existingPartyRecord, - allocated.partyDetails.party, - identityProviderId, - annotations, - ) - _ <- updateUserInfoIfUserSpecified(allocated.partyDetails.party, user) - } yield { - val details = toProtoPartyDetails( - partyDetails = allocated.partyDetails, - metadataO = Some(partyRecord.metadata), - identityProviderId = Some(identityProviderId), - ) - AllocatePartyResponse(Some(details)) + pendingPartyAllocations.withUser(userId) { outstandingCalls => + for { + _ <- identityProviderExistsOrError(identityProviderId) + userInfo <- getUserIfUserSpecified(userId, identityProviderId) + _ <- checkUserLimitsIfUserSpecified( + userInfo.map(_.rights), + outstandingCalls, + authenticatedUserContextF, + ) + allocated <- partyAllocationTracker + .track( + trackerKey, + NonNegativeFiniteDuration(managementServiceTimeout), + ) { _ => + for { + result <- syncService.allocateParty( + partyName, + trackerKey.submissionId, + synchronizerIdO, + externalPartyOnboardingDetails = None, + ) + _ <- checkSubmissionResult(result) + } yield () + } + .transform(alreadyExistsError(trackerKey.submissionId, loggingContext)) + _ <- verifyPartyIsNonExistentOrInIdp( + identityProviderId, + allocated.partyDetails.party, + ) + existingPartyRecord <- partyRecordStore.getPartyRecordO( + allocated.partyDetails.party + ) + partyRecord <- updateOrCreatePartyRecord( + existingPartyRecord, + allocated.partyDetails.party, + identityProviderId, + annotations, + ) + _ <- updateUserInfoIfUserSpecified( + allocated.partyDetails.party, + userInfo.map(_.user), + ) + } yield { + val details = toProtoPartyDetails( + partyDetails = allocated.partyDetails, + metadataO = Some(partyRecord.metadata), + identityProviderId = Some(identityProviderId), + ) + AllocatePartyResponse(Some(details)) + } } } } @@ -389,7 +436,10 @@ private[apiserver] final class ApiPartyManagementService private ( override def updatePartyDetails( request: UpdatePartyDetailsRequest ): Future[UpdatePartyDetailsResponse] = { - val submissionId = submissionIdGenerator(request.partyDetails.fold("")(_.party)).submissionId + val submissionId = submissionIdGenerator( + request.partyDetails.fold("")(_.party), + AuthorizationLevel.Submission, + ).submissionId withEnrichedLoggingContext(telemetry)( logging.submissionId(submissionId) ) { implicit loggingContext => @@ -666,13 +716,38 @@ private[apiserver] final class ApiPartyManagementService private ( private def getUserIfUserSpecified( userId: Option[Ref.UserId], identityProviderId: IdentityProviderId, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[User]] = - userId.fold[Future[Option[User]]](Future.successful(None))( + )(implicit loggingContext: LoggingContextWithTrace): Future[Option[UserInfo]] = + userId.fold[Future[Option[UserInfo]]](Future.successful(None))( userManagementStore - .getUser(_, identityProviderId) + .getUserInfo(_, identityProviderId) .flatMap(result => Utils.handleResult("checking user's existence")(result).map(Some(_))) ) + private def checkUserLimitsIfUserSpecified( + userRights: Option[Set[UserRight]], + outstandingCalls: Int, + authenticatedUserContextF: Future[AuthenticatedUserContext], + )(implicit loggingContext: LoggingContextWithTrace): Future[Unit] = + userRights match { + case None => Future.successful(()) + case Some(rights) => + for { + authenticatedUserContext <- authenticatedUserContextF + resultingRightsCount = rights.flatMap(_.getParty).size + outstandingCalls + _ <- + if ( + authenticatedUserContext.isRegularUser && resultingRightsCount > maxSelfAllocatedParties.unwrap + ) + Future.failed( + AuthorizationChecksErrors.PermissionDenied + .Reject(s"User quota of party allocations exhausted") + .asGrpcError + ) + else + Future.successful(()) + } yield () + } + private def identityProviderExistsOrError( id: IdentityProviderId )(implicit @@ -690,6 +765,306 @@ private[apiserver] final class ApiPartyManagementService private ( .asGrpcError ) } + + private def parseSignedTransaction( + protocolVersion: ProtocolVersion, + signedTransaction: SignedTransaction, + )(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[ + StatusRuntimeException, + (PositiveTopologyTransaction, List[Signature]), + ] = + for { + transaction <- TopologyTransaction + .fromByteString( + // TODO(i27619): We may be able to not validate the protocol version here + // depending on the trust we put in the input + // Note that pinning to a protocol version makes it not possible to use transactions + // generated with an earlier protocol version (e.g in between synchronizer updates) + ProtocolVersionValidation(protocolVersion), + signedTransaction.transaction, + ) + .leftMap(error => + ValidationErrors.invalidField( + "onboarding_transactions.transaction", + s"Invalid transaction: ${error.message}", + ) + ) + positiveTransaction <- transaction + .selectOp[TopologyChangeOp.Replace] + .toRight( + ValidationErrors.invalidField( + "onboarding_transactions.transaction", + s"Onboarding topology transactions must be Replace operations", + ) + ) + _ <- Either.cond( + positiveTransaction.serial == PositiveInt.one, + (), + ValidationErrors.invalidField( + "onboarding_transactions.transaction.serial", + "Onboarding transaction serial must be 1", + ), + ) + signatures <- signedTransaction.signatures.toList.traverse( + CryptoValidator.validateSignature(_, "onboarding_transaction.signatures") + ) + } yield (positiveTransaction, signatures) + + override def allocateExternalParty( + request: AllocateExternalPartyRequest + ): Future[AllocateExternalPartyResponse] = { + implicit val loggingContext = LoggingContextWithTrace(telemetry)(this.loggingContext) + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext(logger, loggingContext.toPropertiesMap, loggingContext.traceContext) + import com.digitalasset.canton.config.NonNegativeFiniteDuration + + withValidation { + for { + synchronizerId <- requireSynchronizerId(request.synchronizer, "synchronizer") + .orElse( + // Take a physical synchronizer ID too + requirePhysicalSynchronizerId(request.synchronizer, "synchronizer").map(_.logical) + ) + protocolVersion <- syncService + .protocolVersionForSynchronizerId(synchronizerId) + .toRight( + ValidationErrors.invalidArgument( + s"This node is not connected to the requested synchronizer $synchronizerId." + ) + ) + transactionsWithSignatures <- request.onboardingTransactions.toList.traverse( + parseSignedTransaction(protocolVersion, _) + ) + signedTransactionsNE <- NonEmpty + .from(transactionsWithSignatures) + .toRight( + ValidationErrors + .invalidField("onboarding_transactions.transactions", "Transactions field is empty") + ) + parsedMultiSignatures <- request.multiHashSignatures.toList.traverse( + CryptoValidator.validateSignature(_, "multi_hash_signatures.signatures") + ) + _ = logger.debug( + s"External party allocation input transactions:\n ${signedTransactionsNE.map(_._1).mkString("\n")}" + ) + cantonParticipantId = this.syncService.participantId + externalPartyDetails <- ExternalPartyOnboardingDetails + .create(signedTransactionsNE, parsedMultiSignatures, protocolVersion, cantonParticipantId) + .leftMap(ValidationErrors.invalidArgument(_)) + partyName <- requireParty(externalPartyDetails.partyHint) + } yield (partyName, synchronizerId, externalPartyDetails) + } { case (partyName, synchronizerId, externalPartyOnboardingDetails) => + val hostingParticipantsString = externalPartyOnboardingDetails.hostingParticipants + .map { case HostingParticipant(participantId, permission, _onboarding) => + s"$participantId -> $permission" + } + .mkString("[", ", ", "]") + val signingKeysString = externalPartyOnboardingDetails.signedPartyToKeyMappingTransaction + .map { p2k => + s" and ${p2k.mapping.signingKeys.length} signing keys with threshold ${p2k.mapping.threshold.value}" + } + .getOrElse("") + logger.info( + s"Allocating external party ${externalPartyOnboardingDetails.partyId.toProtoPrimitive} on" + + s" $hostingParticipantsString with confirmation threshold ${externalPartyOnboardingDetails.confirmationThreshold.value}" + signingKeysString + ) + val trackerKey = + submissionIdGenerator( + partyName, + authorizationLevel = + if (externalPartyOnboardingDetails.isConfirming) AuthorizationLevel.Confirmation + else Observation, + ) + withEnrichedLoggingContext(telemetry)(logging.submissionId(trackerKey.submissionId)) { + implicit loggingContext => + def allocateFn = for { + result <- syncService.allocateParty( + partyName, + trackerKey.submissionId, + Some(synchronizerId), + Some(externalPartyOnboardingDetails), + ) + _ <- checkSubmissionResult(result) + } yield () + + // Only track the party if we expect it to be fully authorized + // Otherwise the party won't be fully onboarded here so this would time out + val partyIdF = + if (externalPartyOnboardingDetails.fullyAllocatesParty) { + partyAllocationTracker + .track( + trackerKey, + NonNegativeFiniteDuration(managementServiceTimeout), + )(_ => allocateFn) + .map(_.partyDetails.party) + } else { + allocateFn + .map(_ => externalPartyOnboardingDetails.partyId.toProtoPrimitive) + .failOnShutdownTo( + CommonErrors.ServiceNotRunning.Reject("PartyManagementService").asGrpcError + ) + } + partyIdF + .map(AllocateExternalPartyResponse.apply) + .transform(alreadyExistsError(trackerKey.submissionId, loggingContext)) + } + } + } + + override def generateExternalPartyTopology( + request: GenerateExternalPartyTopologyRequest + ): Future[GenerateExternalPartyTopologyResponse] = { + import io.scalaland.chimney.dsl.* + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext(logger, LoggingContextWithTrace(telemetry)) + val GenerateExternalPartyTopologyRequest( + synchronizerIdP, + partyHint, + publicKeyO, + localParticipantObservationOnly, + otherConfirmingParticipantUids, + confirmationThreshold, + observingParticipantUids, + ) = request + + val participantId = syncService.participantId + + val availableConfirmers = + (if (localParticipantObservationOnly) 0 else 1) + otherConfirmingParticipantUids.size + + val response = for { + publicKeyP <- ProtoConverter.required("public_key", publicKeyO).leftMap(_.message) + publicKeyT <- publicKeyP + .intoPartial[v30.SigningPublicKey] + .withFieldConst(_.scheme, SigningKeyScheme.SIGNING_KEY_SCHEME_UNSPECIFIED) + .withFieldConst( + _.usage, + Seq( + SigningKeyUsage.SIGNING_KEY_USAGE_NAMESPACE, + SigningKeyUsage.SIGNING_KEY_USAGE_PROOF_OF_OWNERSHIP, + SigningKeyUsage.SIGNING_KEY_USAGE_PROTOCOL, + ), + ) + .withFieldRenamed(_.keyData, _.publicKey) + .transform + .asEither + .leftMap(_.asErrorPathMessages.map { case (p, e) => s"$p: $e" }.mkString(", ")) + pubKey <- SigningPublicKey + .fromProtoV30(publicKeyT) + .leftMap(_.message) + namespace = Namespace(pubKey.fingerprint) + synchronizerIdWithVersion <- UniqueIdentifier + .fromProtoPrimitive_(synchronizerIdP) + .map(SynchronizerId(_)) + .leftMap(_.message) + .flatMap(synchronizerId => + syncService + .protocolVersionForSynchronizerId(synchronizerId) + .toRight(s"Unknown or not connected synchronizer $synchronizerId") + .map((synchronizerId, _)) + ) + _ <- Either.cond(partyHint.nonEmpty, (), "Party hint is empty") + _ <- UniqueIdentifier.verifyValidString(partyHint).leftMap(x => "party_hint: " + x) + uid <- UniqueIdentifier.create(partyHint, namespace) + _ <- Either.cond(confirmationThreshold >= 0, (), "Negative confirmation threshold observed") + confirmingPids <- otherConfirmingParticipantUids.toList + .traverse(UniqueIdentifier.fromProtoPrimitive_) + .leftMap(_.message) + observingPids <- observingParticipantUids.toList + .traverse(UniqueIdentifier.fromProtoPrimitive_) + .leftMap(_.message) + _ <- Either.cond( + !confirmingPids.contains(participantId.uid), + (), + s"This participant node ($participantId) is also listed in 'otherConfirmingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node and must not be listed in 'otherConfirmingParticipantUids'.", + ) + _ <- Either.cond( + !observingPids.contains(participantId.uid), + (), + s"This participant node ($participantId) is also listed in 'observingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node and must not be listed in 'observingParticipantUids'.", + ) + allParticipantIds = (confirmingPids ++ observingPids) + _ <- Either.cond( + allParticipantIds.distinct.sizeIs == allParticipantIds.size, + (), { + val duplicates = + allParticipantIds.groupBy(identity).collect { case (x, ys) if ys.sizeIs > 1 => x } + s"The following participant IDs are referenced multiple times in the request: ${duplicates + .mkString(", ")}." + + s" Please ensure all IDs are referenced only once" + + s" across 'otherConfirmingParticipantUids' and 'observingParticipantUids' fields." + }, + ) + _ <- Either.cond( + confirmationThreshold <= availableConfirmers, + (), + "Confirmation threshold exceeds number of confirming participants", + ) + threshold = + if (confirmationThreshold == 0) availableConfirmers + else confirmationThreshold + party = PartyId(uid) + nsd <- NamespaceDelegation.create(namespace, pubKey, CanSignAllMappings) + p2k <- PartyToKeyMapping.create( + party, + threshold = PositiveInt.one, + signingKeys = NonEmpty.mk(Seq, pubKey), + ) + p2p <- PartyToParticipant.create( + party, + threshold = PositiveInt.tryCreate(threshold), + HostingParticipant( + participantId, + if (localParticipantObservationOnly) ParticipantPermission.Observation + else ParticipantPermission.Confirmation, + ) +: (confirmingPids.map(uid => + HostingParticipant(ParticipantId(uid), ParticipantPermission.Confirmation) + ) ++ observingPids.map(uid => + HostingParticipant(ParticipantId(uid), ParticipantPermission.Observation) + )), + ) + } yield { + val (_synchronizerId, protocolVersion) = synchronizerIdWithVersion + val transactions = + NonEmpty + .mk(List, nsd, p2k, p2p) + .map(mapping => + TopologyTransaction( + op = TopologyChangeOp.Replace, + serial = PositiveInt.one, + mapping = mapping, + protocolVersion = protocolVersion, + ) + ) + + GenerateExternalPartyTopologyResponse( + partyId = party.toProtoPrimitive, + publicKeyFingerprint = pubKey.fingerprint.toProtoPrimitive, + topologyTransactions = transactions.map(_.toByteString), + multiHash = MultiTransactionSignature + .computeCombinedHash( + transactions.map(_.hash).toSet, + syncService.hashOps, + ) + .getCryptographicEvidence, + ) + + } + response match { + case Left(err) => + Future.failed( + RequestValidationErrors.InvalidArgument + .Reject(err) + .asGrpcError + ) + case Right(resp) => Future.successful(resp) + } + } + } private[apiserver] object ApiPartyManagementService { @@ -732,6 +1107,7 @@ private[apiserver] object ApiPartyManagementService { userManagementStore: UserManagementStore, identityProviderExists: IdentityProviderExists, maxPartiesPageSize: PositiveInt, + maxSelfAllocatedParties: NonNegativeInt, partyRecordStore: PartyRecordStore, writeBackend: state.PartySyncService, managementServiceTimeout: FiniteDuration, @@ -748,6 +1124,7 @@ private[apiserver] object ApiPartyManagementService { userManagementStore, identityProviderExists, maxPartiesPageSize, + maxSelfAllocatedParties, partyRecordStore, writeBackend, managementServiceTimeout, @@ -790,7 +1167,7 @@ private[apiserver] object ApiPartyManagementService { .asGrpcError } - def encodeNextPageToken(token: Option[Party]): String = + def encodeNextPageToken(token: Option[Ref.Party]): String = token .map { id => val bytes = Base64.getUrlEncoder.encode( @@ -801,25 +1178,25 @@ private[apiserver] object ApiPartyManagementService { .getOrElse("") trait CreateSubmissionId { - def apply(partyIdHint: String): PartyAllocation.TrackerKey + def apply( + partyIdHint: String, + authorizationLevel: AuthorizationLevel, + ): PartyAllocation.TrackerKey } object CreateSubmissionId { import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel def forParticipant(participantId: Ref.ParticipantId) = new CreateSubmissionId() { - override def apply(partyIdHint: String): PartyAllocation.TrackerKey = - PartyAllocation.TrackerKey.of( + override def apply( + partyIdHint: String, + authorizationLevel: AuthorizationLevel, + ): PartyAllocation.TrackerKey = + PartyAllocation.TrackerKey( partyIdHint, participantId, - AuthorizationEvent.Added(AuthorizationLevel.Submission), + AuthorizationEvent.Added(authorizationLevel), ) } - - def fixedForTests(const: String) = new CreateSubmissionId() { - override def apply(partyIdHint: String): PartyAllocation.TrackerKey = - PartyAllocation.TrackerKey.forTests(Ref.SubmissionId.assertFromString(const)) - } - } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementService.scala index bde969ad42..cf1224b2fc 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementService.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementService.scala @@ -15,8 +15,6 @@ import com.daml.ledger.api.v2.admin.user_management_service.{ import com.daml.platform.v1.page_tokens.ListUsersPageTokenPayload import com.daml.tracing.Telemetry import com.digitalasset.base.error.ErrorResource -import com.digitalasset.canton.auth.ClaimSet.Claims -import com.digitalasset.canton.auth.{AuthInterceptor, ClaimAdmin} import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.validation.{FieldValidator, ValueValidator} import com.digitalasset.canton.ledger.api.{ @@ -26,7 +24,6 @@ import com.digitalasset.canton.ledger.api.{ User, UserRight, } -import com.digitalasset.canton.ledger.error.LedgerApiErrors import com.digitalasset.canton.ledger.error.groups.{ RequestValidationErrors, UserManagementServiceErrors, @@ -67,9 +64,11 @@ private[apiserver] final class ApiUserManagementService( executionContext: ExecutionContext ) extends proto.UserManagementServiceGrpc.UserManagementService with GrpcApiService - with NamedLogging { + with NamedLogging + with AuthenticatedUserContextResolver { import ApiUserManagementService.* + import AuthenticatedUserContextResolver.* import FieldValidator.* import ValueValidator.* @@ -217,32 +216,6 @@ private[apiserver] final class ApiUserManagementService( } } - private def resolveAuthenticatedUserContext(implicit - errorLogger: ErrorLoggingContext - ): Future[AuthenticatedUserContext] = - AuthInterceptor - .extractClaimSetFromContext() - .fold( - fa = error => - Future.failed( - LedgerApiErrors.InternalError - .Generic("Could not extract a claim set from the context", throwableO = Some(error)) - .asGrpcError - ), - fb = { - case claims: Claims => - Future.successful(AuthenticatedUserContext(claims)) - case claimsSet => - Future.failed( - LedgerApiErrors.InternalError - .Generic( - s"Unexpected claims when trying to resolve the authenticated user: $claimsSet" - ) - .asGrpcError - ) - }, - ) - override def getUser(request: proto.GetUserRequest): Future[GetUserResponse] = { implicit val loggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) implicit val errorLoggingContext = ErrorLoggingContext(logger, loggingContextWithTrace) @@ -588,15 +561,6 @@ private[apiserver] final class ApiUserManagementService( } object ApiUserManagementService { - final case class AuthenticatedUserContext(userId: Option[String], isParticipantAdmin: Boolean) - object AuthenticatedUserContext { - def apply(claims: Claims): AuthenticatedUserContext = claims match { - case claims: Claims if claims.resolvedFromUser => - AuthenticatedUserContext(claims.userId, claims.claims.contains(ClaimAdmin)) - case claims: Claims => - AuthenticatedUserContext(None, claims.claims.contains(ClaimAdmin)) - } - } private def toProtoUser(user: User): proto.User = proto.User( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/AuthenticatedUserContextResolver.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/AuthenticatedUserContextResolver.scala new file mode 100644 index 0000000000..7784474379 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/AuthenticatedUserContextResolver.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.services.admin + +import com.digitalasset.canton.auth.ClaimSet.Claims +import com.digitalasset.canton.auth.{AuthInterceptor, ClaimAdmin, ClaimIdentityProviderAdmin} +import com.digitalasset.canton.ledger.error.LedgerApiErrors +import com.digitalasset.canton.logging.ErrorLoggingContext + +import scala.concurrent.Future + +trait AuthenticatedUserContextResolver { + import AuthenticatedUserContextResolver.* + def resolveAuthenticatedUserContext(implicit + errorLogger: ErrorLoggingContext + ): Future[AuthenticatedUserContext] = + AuthInterceptor + .extractClaimSetFromContext() + .fold( + fa = error => + Future.failed( + LedgerApiErrors.InternalError + .Generic("Could not extract a claim set from the context", throwableO = Some(error)) + .asGrpcError + ), + fb = { + case claims: Claims => + Future.successful(AuthenticatedUserContext(claims)) + case claimsSet => + Future.failed( + LedgerApiErrors.InternalError + .Generic( + s"Unexpected claims when trying to resolve the authenticated user: $claimsSet" + ) + .asGrpcError + ) + }, + ) +} + +object AuthenticatedUserContextResolver { + final case class AuthenticatedUserContext(claims: Claims) { + def userId: Option[String] = if (claims.resolvedFromUser) claims.userId else None + def isParticipantAdmin: Boolean = claims.claims.contains(ClaimAdmin) + def isIdpAdmin: Boolean = claims.claims.contains(ClaimIdentityProviderAdmin) + def isRegularUser: Boolean = claims.resolvedFromUser && !isParticipantAdmin && !isIdpAdmin + } +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidator.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidator.scala index 1a20f57578..006d965d80 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidator.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidator.scala @@ -29,6 +29,8 @@ class PackageUpgradeValidator( private case class PackageIdAndSignature(packageId: PackageId, signature: PackageSignature) { def version: Ref.PackageVersion = signature.metadata.version def name: Ref.PackageName = signature.metadata.name + def supportsUpgrades: Boolean = signature.supportsUpgrades(packageId) + def directDeps: Set[PackageId] = signature.directDeps def pkgIdWithNameAndVersion: PkgIdWithNameAndVersion = PkgIdWithNameAndVersion( (packageId, signature) ) @@ -38,37 +40,78 @@ class PackageUpgradeValidator( private val upgradeCompatCache = cacheConfig.buildScaffeine().build[(PackageId, PackageId), Either[TopologyManagerError, Unit]]() - def validateUpgrade(allPackages: List[(PackageId, PackageSignature)])(implicit + /** Validate the upgrade-compatibility of the vetted lineages that are affected by a new package + * to vet. That is, + * - the lineage of the new package itself + * - the lineage of each dependency, direct and transitive, of the new package + * + * This validation fails if: + * - a dependency is unknown (not in the package store) + * - a package claims to be daml-prim or daml-stdlib, but it is not a utility package + * - two distinct packages have the same name and version + * - a package in the affected lineages is upgrade-incompatible + * + * @param newPackagesToVet + * new packages to vet + * @param targetVettedPackages + * all packages in the next vetting state, including the new ones + * @param storedPackageMap + * all packages in the package store + * @param loggingContext + * @return + * a topology manager error if the validation fails, unit otherwise + */ + def validateUpgrade( + newPackagesToVet: Set[PackageId], + targetVettedPackages: Set[PackageId], + storedPackageMap: Map[PackageId, PackageSignature], + )(implicit loggingContext: LoggingContextWithTrace ): Either[TopologyManagerError, Unit] = { - val packageMap = allPackages.toMap - // Group packages by name and sort by version, to create the lineage of each package name - val packageNameToLineage: Map[Ref.PackageName, List[PackageIdAndSignature]] = allPackages - .map { case (id, sig) => PackageIdAndSignature(id, sig) } - .groupBy(_.name) - .view - .mapValues(_.sortBy(_.version)) - .toMap - // Sort the packages in topological order to get the dependencies first. This is useful to get - // the upgrade errors in a deterministic way. We then filter on the packageMap to only keep the - // packages to vet and not their dependencies which are already vetted. - val packageNamesInTopologicalOrder: Seq[Ref.PackageName] = - dependenciesInTopologicalOrder(allPackages.map(_._1), packageMap) - .flatMap(packageMap.get) - .map(_.metadata.name) - .distinct + // the upgrade errors in a deterministic way. + val packagesInTopologicalOrder = + dependenciesInTopologicalOrder(newPackagesToVet.toList, storedPackageMap) + + // We ignore the dependencies that are not in the store. This is acceptable because + // later we check that all required dependencies are known. + val packageNamesToCheckInTopologicalOrder: Seq[Ref.PackageName] = + packagesInTopologicalOrder.flatMap(storedPackageMap.get).map(_.metadata.name).distinct + + // We don't know yet if the dependencies are upgrade-compatible because of force flags. + // Therefore we keep them all and check them. + val packageNamesToCheck = packageNamesToCheckInTopologicalOrder.toSet + + def getPackageToCheck(packageId: PackageId): Option[PackageIdAndSignature] = + // Here we ignore the package if it is not in the store. This is acceptable because + // later we check that all required dependencies are known. + storedPackageMap.get(packageId).collect { + case packageSig if packageNamesToCheck.contains(packageSig.metadata.name) => + PackageIdAndSignature(packageId, packageSig) + } + + // Get packages to check, group them by name, and sort them by version, to create their lineages + val lineagesToCheck: Map[Ref.PackageName, List[PackageIdAndSignature]] = + targetVettedPackages.toList + .flatMap(getPackageToCheck) + .groupBy(_.name) + .view + .mapValues(_.sortBy(pkg => (pkg.version, pkg.packageId))) + .toMap // validate the upgradeability of each lineage - packageNamesInTopologicalOrder - .traverse(name => validatePackageLineage(name, packageNameToLineage(name), packageMap)) + packageNamesToCheckInTopologicalOrder + .filter( + lineagesToCheck.contains + ) // some lineage can be missing if unvetted dependencies are allowed + .traverse(name => validatePackageLineage(name, lineagesToCheck(name), storedPackageMap)) .map(_ => ()) } private def validatePackageLineage( name: Ref.PackageName, lineage: List[PackageIdAndSignature], - packageMap: Map[PackageId, PackageSignature], + storedPackageMap: Map[PackageId, PackageSignature], )(implicit loggingContext: LoggingContextWithTrace ): Either[TopologyManagerError, Unit] = { @@ -76,17 +119,35 @@ class PackageUpgradeValidator( s"Typechecking upgrades for lineage of package-name $name." ) val upgradingPairs: List[(PackageIdAndSignature, PackageIdAndSignature)] = - lineage.sliding(2).collect { case fst :: snd :: Nil => (fst, snd) }.toList + lineage + .filter(_.supportsUpgrades) + .sliding(2) + .collect { case fst :: snd :: Nil => (fst, snd) } + .toList for { + _ <- lineage.traverse(validateDependencies(_, storedPackageMap)) _ <- lineage.traverse(validateDamlPrimOrStdLib) _ <- upgradingPairs.traverse { case (fst, snd) => validateVersion(fst, snd) } _ <- upgradingPairs.traverse { case (fst, snd) => - cachedTypecheckUpgrades(fst, snd, packageMap) + cachedTypecheckUpgrades(fst, snd, storedPackageMap) } _ = logger.info(s"Typechecking upgrades for lineage of package-name $name succeeded.") } yield () } + private def validateDependencies( + pkg: PackageIdAndSignature, + storedPackageMap: Map[PackageId, PackageSignature], + )(implicit loggingContext: LoggingContextWithTrace): Either[TopologyManagerError, Unit] = + pkg.directDeps.toSeq + .traverse { packageId => + // we cannot check the upgradability of a package if one of its dependency is unknown + storedPackageMap + .get(packageId) + .toRight(CannotVetDueToMissingPackages.Missing(Set(packageId))) + } + .map(_ => ()) + private def validateDamlPrimOrStdLib( pkg: PackageIdAndSignature )(implicit loggingContext: LoggingContextWithTrace): Either[TopologyManagerError, Unit] = @@ -109,26 +170,26 @@ class PackageUpgradeValidator( private def cachedTypecheckUpgrades( oldPackage: PackageIdAndSignature, newPackage: PackageIdAndSignature, - packageMap: Map[PackageId, PackageSignature], + storedPackageMap: Map[PackageId, PackageSignature], )(implicit loggingContext: LoggingContextWithTrace ): Either[TopologyManagerError, Unit] = upgradeCompatCache.get( (oldPackage.packageId, newPackage.packageId), - _ => strictTypecheckUpgrades(oldPackage, newPackage, packageMap), + _ => strictTypecheckUpgrades(oldPackage, newPackage, storedPackageMap), ) private def strictTypecheckUpgrades( oldPackage: PackageIdAndSignature, newPackage: PackageIdAndSignature, - packageMap: Map[PackageId, PackageSignature], + storedPackageMap: Map[PackageId, PackageSignature], )(implicit loggingContext: LoggingContextWithTrace ): Either[TopologyManagerError, Unit] = { logger.info(s"Package $newPackage claims to upgrade package $oldPackage") TypecheckUpgrades .typecheckUpgrades( - packageMap, + storedPackageMap, (newPackage.packageId, newPackage.signature), oldPackage.packageId, Some(oldPackage.signature), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala index 66d4001071..120333786b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.apiserver.services.admin +import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose} import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent import com.digitalasset.canton.ledger.participant.state.index.IndexerPartyDetails import com.digitalasset.canton.platform.apiserver.services.tracking.StreamTracker @@ -10,27 +11,29 @@ import com.digitalasset.daml.lf.data.Ref object PartyAllocation { - final case class TrackerKey private (val submissionId: Ref.SubmissionId) - object TrackerKey { - def of( - party: String, - participantId: Ref.ParticipantId, - authorizationEvent: AuthorizationEvent, - ): TrackerKey = { - import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose} - + final case class TrackerKey( + party: String, + participantId: Ref.ParticipantId, + authorizationEvent: AuthorizationEvent, + ) { + lazy val submissionId = { val builder = Hash.build(HashPurpose.PartyUpdateId, HashAlgorithm.Sha256) builder.add(party.split("::")(0)) builder.add(participantId) builder.add(authorizationEvent.toString) val hash = builder.finish() - TrackerKey(Ref.SubmissionId.assertFromString(hash.toHexString)) + Ref.SubmissionId.assertFromString(hash.toHexString) } - private[admin] def forTests(submissionId: Ref.SubmissionId) = TrackerKey(submissionId) + // Override hashCode and equals to only consider submissionId for equality and hashing + // Needed for when they key is used in HashMaps etc... + override def hashCode(): Int = submissionId.hashCode + override def equals(obj: Any): Boolean = obj match { + case otherTrackerKey: TrackerKey => submissionId.equals(otherTrackerKey.submissionId) + case _ => false + } } - final case class Completed(submissionId: TrackerKey, partyDetails: IndexerPartyDetails) type Tracker = StreamTracker[TrackerKey, Completed] diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocations.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocations.scala new file mode 100644 index 0000000000..164bff9eaf --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocations.scala @@ -0,0 +1,39 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.services.admin + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.daml.lf.data.Ref + +import scala.collection.concurrent.{Map, TrieMap} +import scala.concurrent.{ExecutionContext, Future} + +class PendingPartyAllocations { + + private val pendingAllocations: Map[Ref.UserId, Int] = TrieMap.empty + + private def increment(user: Ref.UserId): Int = + pendingAllocations + .updateWith(user) { + case None => Some(1) + case Some(n) => Some(n + 1) + } + .fold(0)(identity) + + private def decrement(user: Ref.UserId): Int = + pendingAllocations + .updateWith(user) { + case Some(n) if n > 1 => Some(n - 1) + case _ => None + } + .fold(0)(identity) + + def withUser[T]( + user: Option[Ref.UserId] + )(f: Int => Future[T])(implicit executor: ExecutionContext): Future[T] = user match { + case None => f(0) + case Some(userId) => f(increment(userId)).thereafter(_ => decrement(userId).discard) + } +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImpl.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImpl.scala index 8fb8f6f02c..f234abdfad 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImpl.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImpl.scala @@ -14,12 +14,7 @@ import com.daml.ledger.api.v2.commands.Commands import com.daml.ledger.api.v2.reassignment_commands.ReassignmentCommands import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS import com.daml.ledger.api.v2.transaction_filter.{Filters, TransactionFormat, UpdateFormat} -import com.daml.ledger.api.v2.update_service.{ - GetTransactionByIdRequest, - GetTransactionTreeResponse, - GetUpdateByIdRequest, - GetUpdateResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateByIdRequest, GetUpdateResponse} import com.daml.tracing.Telemetry import com.digitalasset.canton.config import com.digitalasset.canton.ledger.api.SubmissionIdGenerator @@ -50,7 +45,6 @@ import io.grpc.{Context, Deadline, Status} import java.time.Instant import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean -import scala.annotation.nowarn import scala.concurrent.duration.Duration import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} @@ -146,25 +140,6 @@ private[apiserver] final class CommandServiceImpl private[services] ( } } - // TODO(#23504) remove - @nowarn("cat=deprecation") - def submitAndWaitForTransactionTree( - request: SubmitAndWaitRequest - )(loggingContext: LoggingContextWithTrace): Future[SubmitAndWaitForTransactionTreeResponse] = - withCommandsLoggingContext(request.getCommands, loggingContext) { (errorLogger, traceContext) => - submitAndWaitInternal(request.commands)(errorLogger, traceContext).flatMap { resp => - val effectiveActAs = CommandsValidator.effectiveSubmitters(request.getCommands).actAs - val txRequest = GetTransactionByIdRequest( - updateId = resp.completion.updateId, - requestingParties = effectiveActAs.toList, - transactionFormat = None, - ) - updateServices - .getTransactionTreeById(txRequest) - .map(resp => SubmitAndWaitForTransactionTreeResponse.of(resp.transaction)) - } - } - private def submitAndWaitInternal( commands: Option[Commands] )(implicit @@ -349,11 +324,8 @@ private[apiserver] object CommandServiceImpl { loggerFactory = loggerFactory, ) - // TODO(#23504) remove getTransactionTreeById - @nowarn("cat=deprecation") final class UpdateServices( - val getTransactionTreeById: GetTransactionByIdRequest => Future[GetTransactionTreeResponse], - val getUpdateById: GetUpdateByIdRequest => Future[GetUpdateResponse], + val getUpdateById: GetUpdateByIdRequest => Future[GetUpdateResponse] ) private[apiserver] def validateRequestTimeout( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala index ef6a72404d..3a3a3986f6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala @@ -16,6 +16,7 @@ import com.daml.ledger.api.v2.update_service.GetUpdateResponse import com.daml.scalautil.future.FutureConversion.CompletionStageConversionOps import com.digitalasset.base.error.ErrorCode.LoggedApiException import com.digitalasset.base.error.RpcError +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher @@ -175,13 +176,19 @@ private[apiserver] final class InteractiveSubmissionServiceImpl private[services request.commands.submissionId.map(SubmissionId.unwrap), ) - evaluateAndHash(seedService.nextSeed(), request.commands, request.verboseHashing) + evaluateAndHash( + seedService.nextSeed(), + request.commands, + request.verboseHashing, + request.maxRecordTime, + ) } private def evaluateAndHash( submissionSeed: crypto.Hash, commands: ApiCommands, verboseHashing: Boolean, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContext: LoggingContextWithTrace, errorLoggingContext: ErrorLoggingContext, @@ -212,6 +219,7 @@ private[apiserver] final class InteractiveSubmissionServiceImpl private[services commands, config.contractLookupParallelism, hashTracer, + maxRecordTime, ) .leftWiden[RpcError] hashingDetails = hashTracer match { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala index 4649aa208c..8b24ffa34e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} import com.digitalasset.daml.lf.data.ImmArray +import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.engine.Enricher import com.digitalasset.daml.lf.transaction.{ FatContractInstance, @@ -118,6 +119,7 @@ final case class PrepareTransactionData( private[codec] val synchronizerId: SynchronizerId, private[codec] val mediatorGroup: Int, private[codec] val transactionUUID: UUID, + private[codec] val maxRecordTime: Option[Timestamp], ) extends EnrichedTransactionData /** Transaction data for an enriched external submission during the execute phase. This is usually diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala index 5e341c5ef2..a5a73b38b1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.apiserver.services.command.interactive. import cats.data.EitherT import cats.syntax.either.* import com.daml.ledger.api.v2.interactive.interactive_submission_service.PreparedTransaction +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher @@ -25,8 +26,11 @@ import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFact import com.digitalasset.canton.platform.apiserver.execution.CommandExecutionResult import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.EnrichedTransactionData.ExternalInputContract import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.ExternalTransactionProcessor.PrepareResult +import com.digitalasset.canton.platform.store.dao.events.InputContractPackages +import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.collection.MapsUtil import com.digitalasset.canton.version.HashingSchemeVersion import com.digitalasset.daml.lf.transaction.{SubmittedTransaction, Transaction} import com.digitalasset.daml.lf.value.Value.ContractId @@ -89,91 +93,76 @@ class ExternalTransactionProcessor( private def lookupAndEnrichInputContracts( transaction: Transaction, - disclosedContracts: Seq[DisclosedContract], + disclosedContracts: Map[ContractId, LfFatContractInst], contractLookupParallelism: PositiveInt, )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, ): EitherT[FutureUnlessShutdown, String, Map[ContractId, ExternalInputContract]] = { - val disclosedContractsByCoid = - disclosedContracts.groupMap(_.fatContractInstance.contractId)(_.fatContractInstance) - MonadUtil - .parTraverseWithLimit(contractLookupParallelism)(transaction.inputContracts.toList) { - inputCoid => - // First check the disclosed contracts - disclosedContractsByCoid.get(inputCoid) match { - // We expect a single disclosed contract for a coid - case Some(Seq(originalFci)) => - EitherT.liftF[FutureUnlessShutdown, String, (ContractId, ExternalInputContract)]( - enricher.enrichContract(originalFci).map { enrichedFci => - val externalInputContract = ExternalInputContract( - originalContract = originalFci, - enrichedContract = enrichedFci, + def lookupContract(coid: ContractId): FutureUnlessShutdown[LfFatContractInst] = + disclosedContracts.get(coid) match { + case Some(inst) => + FutureUnlessShutdown.pure(inst) + case None => + FutureUnlessShutdown + .outcomeF(contractStore.lookupContractState(coid)) + .flatMap[LfFatContractInst] { + + case active: ContractState.Active => + FutureUnlessShutdown.pure(active.contractInstance) + + // Engine interpretation likely would have failed if that was the case + // However it's possible that the contract was archived or pruned in the meantime + // That's not an issue however because if that was the case the transaction would have failed later + // anyway during conflict detection. + case ContractState.NotFound => + FutureUnlessShutdown + .failed( + ConsistencyErrors.ContractNotFound + .Reject( + s"Contract was not found in the participant contract store. You must either explicitly disclose the contract, or prepare the transaction via a participant that has knowledge of it", + coid, + ) + .asGrpcError ) - externalInputContract.contractId -> externalInputContract - } - ) - case Some(_) => - EitherT.leftT[FutureUnlessShutdown, (ContractId, ExternalInputContract)]( - s"Contract ID $inputCoid is not unique" - ) - // If the contract is not disclosed, look it up from the store - case None => - EitherT { + case ContractState.Archived => FutureUnlessShutdown - .outcomeF( - contractStore - .lookupContractState(inputCoid) + .failed( + CommandExecutionErrors.Interpreter.ContractNotActive + .Reject( + "Input contract has seemingly already been archived immediately after interpretation of the transaction", + coid, + None, + ) + .asGrpcError ) - .flatMap { - case active: ContractState.Active => - val originalFci = active.contractInstance - enricher - .enrichContract(originalFci) - .map { enrichedFci => - val externalInputContract = ExternalInputContract( - originalContract = originalFci, - enrichedContract = enrichedFci, - ) - Right(externalInputContract.contractId -> externalInputContract) - } - // Engine interpretation likely would have failed if that was the case - // However it's possible that the contract was archived or pruned in the meantime - // That's not an issue however because if that was the case the transaction would have failed later - // anyway during conflict detection. - case ContractState.NotFound => - FutureUnlessShutdown - .failed[Either[String, (ContractId, ExternalInputContract)]]( - ConsistencyErrors.ContractNotFound - .Reject( - s"Contract was not found in the participant contract store. You must either explicitly disclose the contract, or prepare the transaction via a participant that has knowledge of it", - inputCoid, - ) - .asGrpcError - ) - case ContractState.Archived => - FutureUnlessShutdown - .failed[Either[String, (ContractId, ExternalInputContract)]]( - CommandExecutionErrors.Interpreter.ContractNotActive - .Reject( - "Input contract has seemingly already been archived immediately after interpretation of the transaction", - inputCoid, - None, - ) - .asGrpcError - ) - } - } - } + } + } + + MonadUtil + .parTraverseWithLimit(contractLookupParallelism)( + InputContractPackages.forTransaction(transaction).toList + ) { case (inputCoid, targetPackageIds) => + for { + original <- EitherT.right[String](lookupContract(inputCoid)) + enriched <- enricher.enrichContract(original, targetPackageIds) + } yield { + inputCoid -> ExternalInputContract( + originalContract = original, + enrichedContract = enriched, + ) + } } .map(_.toMap) + } private def enrich( commandExecutionResult: CommandExecutionResult, - commands: ApiCommands, + disclosedContracts: Seq[DisclosedContract], contractLookupParallelism: PositiveInt, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, @@ -189,10 +178,21 @@ class ExternalTransactionProcessor( commandExecutionResult.commandInterpretationResult.transaction ) ) + disclosedContractMap <- EitherT.fromEither[FutureUnlessShutdown]( + MapsUtil + .toNonConflictingMap( + disclosedContracts.map(_.fatContractInstance).map(c => c.contractId -> c) + ) + .leftMap(err => + CommandExecutionErrors.InteractiveSubmissionPreparationError.Reject( + s"Disclosed contracts contain non-unique contract IDs: $err" + ) + ) + ) // Compute input contracts by looking them up either from disclosed contracts or the local store inputContracts <- lookupAndEnrichInputContracts( enrichedTransaction.transaction, - commands.disclosedContracts.toList, + disclosedContractMap, contractLookupParallelism, ) .leftMap(CommandExecutionErrors.InteractiveSubmissionPreparationError.Reject(_)) @@ -207,6 +207,7 @@ class ExternalTransactionProcessor( synchronizerId = synchronizerId.logical, mediatorGroup = 0, transactionUUID = UUID.randomUUID(), + maxRecordTime = maxRecordTime, ) } yield transactionData @@ -217,6 +218,7 @@ class ExternalTransactionProcessor( commands: ApiCommands, contractLookupParallelism: PositiveInt, hashTracer: HashTracer, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, @@ -227,7 +229,12 @@ class ExternalTransactionProcessor( ] = for { // Enrich first - enriched <- enrich(commandExecutionResult, commands, contractLookupParallelism) + enriched <- enrich( + commandExecutionResult, + commands.disclosedContracts.toList, + contractLookupParallelism, + maxRecordTime, + ) // Then encode encoded <- EitherT .liftF[ diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala index c8d0f71c38..6c00b0171f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala @@ -36,11 +36,11 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf import com.digitalasset.daml.lf.data.Ref.TypeConId import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.{ CreationTime, FatContractInstance, NodeId, + SerializationVersion as LfSerializationVersion, TransactionCoder, } import com.digitalasset.daml.lf.value.Value @@ -110,13 +110,8 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa * Straightforward decoders for simple proto values */ private implicit val languageVersionTransformer - : PartialTransformer[String, lf.language.LanguageVersion] = - PartialTransformer { - case "dev" => - Result.fromValue(lf.language.LanguageVersion.v2_dev) - case src => - lf.language.LanguageVersion.fromString(src).toResult - } + : PartialTransformer[String, LfSerializationVersion] = + PartialTransformer(LfSerializationVersion.fromString(_).toResult) private implicit val contractIdTransformer : PartialTransformer[String, lf.value.Value.ContractId] = @@ -210,7 +205,7 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa .traverse(_.transformIntoPartial[lf.value.Value]) .flatMap(_.toRight("Missing argument value").toResult), ) - .withFieldComputedPartial(_.version, _.lfVersion.transformIntoPartial[LanguageVersion]) + .withFieldComputedPartial(_.version, _.lfVersion.transformIntoPartial[LfSerializationVersion]) // Fields not supported in V1 .withFieldConst(_.keyOpt, None) .buildTransformer @@ -220,7 +215,7 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa ): PartialTransformer[isdv1.Fetch, lf.transaction.Node.Fetch] = Transformer .definePartial[isdv1.Fetch, lf.transaction.Node.Fetch] .withFieldRenamed(_.contractId, _.coid) - .withFieldComputedPartial(_.version, _.lfVersion.transformIntoPartial[LanguageVersion]) + .withFieldComputedPartial(_.version, _.lfVersion.transformIntoPartial[LfSerializationVersion]) // Not supported in V1 .withFieldConst(_.keyOpt, None) .withFieldConst(_.byKey, false) @@ -236,7 +231,10 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa _.choiceObservers, _.choiceObservers.traverse(_.transformIntoPartial[lf.data.Ref.Party]).map(_.toSet), ) - .withFieldComputedPartial(_.version, _.lfVersion.transformIntoPartial[LanguageVersion]) + .withFieldComputedPartial( + _.version, + _.lfVersion.transformIntoPartial[LfSerializationVersion], + ) // Fields not supported in V1 .withFieldConst(_.keyOpt, None) .withFieldConst(_.byKey, false) @@ -288,7 +286,7 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa ): PartialTransformer[iss.DamlTransaction, lf.transaction.VersionedTransaction] = PartialTransformer { src => def lfVersionedConstructor( - version: LanguageVersion, + version: LfSerializationVersion, nodes: Map[LfNodeId, LfNode], roots: ImmArray[LfNodeId], ): lf.transaction.VersionedTransaction = lf.transaction.VersionedTransaction( @@ -424,11 +422,15 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa mediatorGroup <- ProtoConverter .parseNonNegativeInt("mediator_group", metadataProto.mediatorGroup) .toFutureWithLoggedFailuresDecode("Failed to deserialize mediator group", logger) + maxLedgerTimeO <- metadataProto.maxRecordTime + .transformIntoPartial[Option[lf.data.Time.Timestamp]] + .toFutureWithLoggedFailuresDecode("Failed to deserialize max record time", logger) } yield ExternallySignedSubmission( executeRequest.serializationVersion, executeRequest.signatures, transactionUUID = transactionUUID, mediatorGroup = mediatorGroup, + maxRecordTimeO = maxLedgerTimeO, ) submitterInfo <- submitterInfoProto .intoPartial[SubmitterInfo] diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala index dfdc62157d..3c470bc3f6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala @@ -28,13 +28,12 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf import com.digitalasset.daml.lf.crypto import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.{ CreationTime, GlobalKey, Node, NodeId, - TransactionVersion, + SerializationVersion, } import com.digitalasset.daml.lf.value.Value import com.google.common.annotations.VisibleForTesting @@ -57,12 +56,12 @@ final class PreparedTransactionEncoder( * for several LF Versions. */ private val nodeTransformers = Map( - LanguageVersion.v2_1 -> v1.nodeTransformer(LanguageVersion.v2_1), - LanguageVersion.v2_dev -> v1.nodeTransformer(LanguageVersion.v2_dev), + SerializationVersion.V1 -> v1.nodeTransformer(SerializationVersion.V1), + SerializationVersion.VDev -> v1.nodeTransformer(SerializationVersion.VDev), ) private def getEncoderForVersion( - version: LanguageVersion + version: SerializationVersion ): Result[PartialTransformer[lf.transaction.Node, iss.DamlTransaction.Node.VersionedNode]] = nodeTransformers .get(version) @@ -109,9 +108,9 @@ final class PreparedTransactionEncoder( private implicit val partyVersionTransformer: Transformer[lf.data.Ref.Party, String] = Transformer.derive - private implicit val languageVersionTransformer - : Transformer[lf.language.LanguageVersion, String] = - TransactionVersion.toProtoValue(_) + private implicit val lfSerializationVersionVersionTransformer + : Transformer[lf.transaction.SerializationVersion, String] = + SerializationVersion.toProtoValue(_) private implicit val nodeIdTransformer: Transformer[lf.transaction.NodeId, String] = _.index.toString @@ -140,18 +139,18 @@ final class PreparedTransactionEncoder( */ object v1 { private implicit def createNodeTransformer(implicit - languageVersion: LanguageVersion + SerializationVersion: SerializationVersion ): PartialTransformer[lf.transaction.Node.Create, isdv1.Create] = Transformer .definePartial[lf.transaction.Node.Create, isdv1.Create] .withFieldRenamed(_.coid, _.contractId) .withFieldRenamed(_.arg, _.argument) .withFieldComputed(_.signatories, _.signatories.toSeq.sorted) .withFieldComputed(_.stakeholders, _.stakeholders.toSeq.sorted) - .withFieldConst(_.lfVersion, languageVersion.transformInto[String]) + .withFieldConst(_.lfVersion, SerializationVersion.transformInto[String]) .buildTransformer private[interactive] implicit def exerciseTransformer(implicit - languageVersion: LanguageVersion + SerializationVersion: SerializationVersion ): PartialTransformer[lf.transaction.Node.Exercise, isdv1.Exercise] = Transformer .definePartial[lf.transaction.Node.Exercise, isdv1.Exercise] .withFieldRenamed(_.targetCoid, _.contractId) @@ -159,18 +158,18 @@ final class PreparedTransactionEncoder( .withFieldComputed(_.stakeholders, _.stakeholders.toSeq.sorted) .withFieldComputed(_.actingParties, _.actingParties.toSeq.sorted) .withFieldComputed(_.choiceObservers, _.choiceObservers.toSeq.sorted) - .withFieldConst(_.lfVersion, languageVersion.transformInto[String]) + .withFieldConst(_.lfVersion, SerializationVersion.transformInto[String]) .buildTransformer private[interactive] implicit def fetchTransformer(implicit - languageVersion: LanguageVersion + SerializationVersion: SerializationVersion ): PartialTransformer[lf.transaction.Node.Fetch, isdv1.Fetch] = Transformer .definePartial[lf.transaction.Node.Fetch, isdv1.Fetch] .withFieldRenamed(_.coid, _.contractId) .withFieldComputed(_.signatories, _.signatories.toSeq.sorted) .withFieldComputed(_.stakeholders, _.stakeholders.toSeq.sorted) .withFieldComputed(_.actingParties, _.actingParties.toSeq.sorted) - .withFieldConst(_.lfVersion, languageVersion.transformInto[String]) + .withFieldConst(_.lfVersion, SerializationVersion.transformInto[String]) .buildTransformer private implicit val rollbackTransformer @@ -179,7 +178,7 @@ final class PreparedTransactionEncoder( .buildTransformer private[interactive] def nodeTransformer(implicit - languageVersion: LanguageVersion + SerializationVersion: SerializationVersion ): PartialTransformer[lf.transaction.Node, iss.DamlTransaction.Node.VersionedNode] = PartialTransformer[lf.transaction.Node, iss.DamlTransaction.Node.VersionedNode] { lfNode => val nodeType = lfNode match { @@ -217,8 +216,8 @@ final class PreparedTransactionEncoder( lfNode => val transformerResult = lfNode match { // Rollback nodes are not versioned so lfNode.optVersion will be empty - // Just pick the transformer for the default version as it doesn't matter here - case _: Node.Rollback => getEncoderForVersion(LanguageVersion.default) + // Just pick the transformer for the min version as it doesn't matter here + case _: Node.Rollback => getEncoderForVersion(SerializationVersion.minVersion) case _ => lfNode.optVersion .toRight("Expected a node version but was empty") @@ -311,6 +310,7 @@ final class PreparedTransactionEncoder( transactionUUID: UUID, mediatorGroup: Int, inputContracts: Seq[ExternalInputContract], + maxRecordTime: Option[lf.data.Time.Timestamp], ): PartialTransformer[PrepareTransactionData, iss.Metadata] = Transformer .definePartial[PrepareTransactionData, iss.Metadata] @@ -343,6 +343,10 @@ final class PreparedTransactionEncoder( _.maxLedgerEffectiveTime, _.transactionMeta.timeBoundaries.maxConstraint.map(_.transformInto[Long]), ) + .withFieldConst( + _.maxRecordTime, + maxRecordTime.map(_.transformInto[Long]), + ) .buildTransformer @VisibleForTesting @@ -379,6 +383,7 @@ final class PreparedTransactionEncoder( transactionUUID, mediatorGroup, prepareTransactionData.inputContracts.values.toSeq, + prepareTransactionData.maxRecordTime, ) val versionedTransaction = lf.transaction.VersionedTransaction( prepareTransactionData.transaction.version, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala index f76c972111..c2a9aa4e22 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala @@ -15,12 +15,11 @@ import com.digitalasset.canton.ledger.api.{ TransactionFormat, TransactionShape, UpdateFormat, - UpdateId, } +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.daml.lf.data.Ref.{Identifier, Party} import com.digitalasset.daml.lf.data.logging.* import com.digitalasset.daml.lf.value.Value.ContractId -import scalaz.syntax.tag.ToTagOps package object logging { @@ -176,7 +175,7 @@ package object logging { "updateId" -> id private[services] def updateId(id: UpdateId): LoggingEntry = - "updateId" -> id.unwrap + "updateId" -> id.toHexString private[services] def workflowId(id: String): LoggingEntry = "workflowId" -> id diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/IndexServiceConfig.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/IndexServiceConfig.scala index 0db7ef5838..ea0cce545b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/IndexServiceConfig.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/IndexServiceConfig.scala @@ -66,7 +66,6 @@ final case class IndexServiceConfig( activeContractsServiceStreams: ActiveContractsServiceStreamsConfig = ActiveContractsServiceStreamsConfig.default, updatesStreams: UpdatesStreamsConfig = UpdatesStreamsConfig.default, - transactionTreeStreams: TransactionTreeStreamsConfig = TransactionTreeStreamsConfig.default, globalMaxEventIdQueries: Int = 20, globalMaxEventPayloadQueries: Int = 10, offsetCheckpointCacheUpdateInterval: NonNegativeFiniteDuration = @@ -125,6 +124,8 @@ final case class ActiveContractsServiceStreamsConfig( maxPayloadsPerPayloadsPage: Int = ActiveContractsServiceStreamsConfig.DefaultEventsPageSize, maxParallelIdCreateQueries: Int = ActiveContractsServiceStreamsConfig.DefaultAcsIdFetchingParallelism, + idFilterQueryParallelism: Int = + ActiveContractsServiceStreamsConfig.DefaultAcsIdFilterQueryParallelism, // Must be a power of 2 maxParallelPayloadCreateQueries: Int = ActiveContractsServiceStreamsConfig.DefaultAcsContractFetchingParallelism, @@ -139,7 +140,8 @@ object ActiveContractsServiceStreamsConfig { val DefaultAcsIdPageSize: Int = 20000 val DefaultAcsIdPageBufferSize: Int = 1 val DefaultAcsIdPageWorkingMemoryBytes: Int = 100 * 1024 * 1024 - val DefaultAcsIdFetchingParallelism: Int = 2 + val DefaultAcsIdFetchingParallelism: Int = 4 + val DefaultAcsIdFilterQueryParallelism: Int = 2 // Must be a power of 2 val DefaultAcsContractFetchingParallelism: Int = 2 val DefaultContractProcessingParallelism: Int = 8 @@ -211,68 +213,3 @@ final case class UpdatesStreamsConfig( object UpdatesStreamsConfig { val default: UpdatesStreamsConfig = UpdatesStreamsConfig() } - -// TODO(#23504) Cleanup -/** Transaction tree streams configuration. - * - * @param maxIdsPerIdPage - * Number of event ids to retrieve in a single query (a page of event ids). - * @param maxPagesPerIdPagesBuffer - * Number of id pages to store in a buffer. There is a buffer for each decomposed filtering - * constraint. - * @param maxWorkingMemoryInBytesForIdPages - * Memory for storing id pages across all id pages buffers. Per single stream. - * @param maxPayloadsPerPayloadsPage - * Number of event payloads to retrieve in a single query (a page of event payloads). - * @param maxParallelIdCreateQueries - * Number of parallel queries that fetch ids of create events. Per single stream. - * @param maxParallelIdConsumingQueries - * Number of parallel queries that fetch ids of consuming events. Per single stream. - * @param maxParallelIdNonConsumingQueries - * Number of parallel queries that fetch payloads of non-consuming events. Per single stream. - * @param maxParallelIdAssignQueries - * Number of parallel queries that fetch payloads of assign events. Per single stream. - * @param maxParallelIdUnassignQueries - * Number of parallel queries that fetch payloads of unassign events. Per single stream. - * @param maxParallelIdTopologyEventsQueries - * Number of parallel queries that fetch payloads of topology events. Per single stream. - * @param maxParallelPayloadCreateQueries - * Number of parallel queries that fetch payloads of create events. Per single stream. - * @param maxParallelPayloadConsumingQueries - * Number of parallel queries that fetch payloads of consuming events. Per single stream. - * @param maxParallelPayloadNonConsumingQueries - * Number of parallel queries that fetch ids of non-consuming events. Per single stream. - * @param maxParallelPayloadAssignQueries - * Number of parallel queries that fetch ids of assign events. Per single stream. - * @param maxParallelPayloadUnassignQueries - * Number of parallel queries that fetch ids of unassign events. Per single stream. - * @param maxParallelPayloadTopologyEventsQueries - * Number of parallel queries that fetch ids of topology events. Per single stream. - * @param maxParallelPayloadQueries - * Upper bound on the number of parallel queries that fetch payloads. Per single stream. - * @param transactionsProcessingParallelism - * Number of transactions to process in parallel. Per single stream. - */ -final case class TransactionTreeStreamsConfig( - maxIdsPerIdPage: Int = 20000, - maxPagesPerIdPagesBuffer: Int = 1, - maxWorkingMemoryInBytesForIdPages: Int = 100 * 1024 * 1024, - maxPayloadsPerPayloadsPage: Int = 1000, - maxParallelIdCreateQueries: Int = 8, - maxParallelIdConsumingQueries: Int = 8, - maxParallelIdNonConsumingQueries: Int = 4, - maxParallelIdAssignQueries: Int = 4, - maxParallelIdUnassignQueries: Int = 4, - maxParallelIdTopologyEventsQueries: Int = 4, - maxParallelPayloadCreateQueries: Int = 2, - maxParallelPayloadConsumingQueries: Int = 2, - maxParallelPayloadNonConsumingQueries: Int = 2, - maxParallelPayloadAssignQueries: Int = 2, - maxParallelPayloadUnassignQueries: Int = 2, - maxParallelPayloadTopologyEventsQueries: Int = 2, - maxParallelPayloadQueries: Int = 2, - transactionsProcessingParallelism: Int = 8, -) -object TransactionTreeStreamsConfig { - val default: TransactionTreeStreamsConfig = TransactionTreeStreamsConfig() -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/PartyManagementServiceConfig.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/PartyManagementServiceConfig.scala index 30a63ebef4..5a58051dfb 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/PartyManagementServiceConfig.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/PartyManagementServiceConfig.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.platform.config -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} /** Ledger api party management service specific configurations * @@ -11,14 +11,18 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt * maximum number of parties returned */ final case class PartyManagementServiceConfig( - maxPartiesPageSize: PositiveInt = PartyManagementServiceConfig.DefaultMaxPartiesPageSize + maxPartiesPageSize: PositiveInt = PartyManagementServiceConfig.DefaultMaxPartiesPageSize, + maxSelfAllocatedParties: NonNegativeInt = + PartyManagementServiceConfig.DefaultMaxSelfAllocatedParties, ) object PartyManagementServiceConfig { val DefaultMaxPartiesPageSize: PositiveInt = PositiveInt.tryCreate(10000) + val DefaultMaxSelfAllocatedParties: NonNegativeInt = NonNegativeInt.tryCreate(0) def default: PartyManagementServiceConfig = PartyManagementServiceConfig( - maxPartiesPageSize = DefaultMaxPartiesPageSize + maxPartiesPageSize = DefaultMaxPartiesPageSize, + maxSelfAllocatedParties = DefaultMaxSelfAllocatedParties, ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala index 7967b10035..446e2126a1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.data.DeduplicationPeriod.{DeduplicationDuration, import com.digitalasset.canton.data.Offset import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.TopologyEvent.PartyToParticipantAuthorization +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.index.IndexerPartyDetails import com.digitalasset.canton.ledger.participant.state.{CompletionInfo, Update} import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} @@ -27,10 +28,9 @@ import com.digitalasset.canton.platform.store.cache.OffsetCheckpoint import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent.ReassignmentAccepted import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate -import com.digitalasset.canton.platform.{FatContract, InMemoryState, Key, KeyWithMaintainers, Party} +import com.digitalasset.canton.platform.{InMemoryState, Key} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.transaction.CreationTime import com.digitalasset.daml.lf.transaction.Node.{Create, Exercise} import org.apache.pekko.NotUsed import org.apache.pekko.stream.FlowShape @@ -58,15 +58,15 @@ private[platform] object InMemoryStateUpdaterFlow { logger: TracedLogger, )( inMemoryState: InMemoryState, - prepare: (Vector[(Offset, Update)], LedgerEnd) => PrepareResult, + prepare: (Vector[(Offset, Update)], LedgerEnd, TraceContext) => PrepareResult, update: (PrepareResult, Boolean) => Unit, )(implicit traceContext: TraceContext): UpdaterFlow = { repairMode => - Flow[(Vector[(Offset, Update)], LedgerEnd)] + Flow[(Vector[(Offset, Update)], LedgerEnd, TraceContext)] .filter(_._1.nonEmpty) .via(updateOffsetCheckpointCacheFlow(inMemoryState, offsetCheckpointCacheUpdateInterval)) - .mapAsync(prepareUpdatesParallelism) { case (batch, ledgerEnd) => + .mapAsync(prepareUpdatesParallelism) { case (batch, ledgerEnd, batchTraceContext) => Future { - batch -> prepare(batch, ledgerEnd) + batch -> prepare(batch, ledgerEnd, batchTraceContext) }(prepareUpdatesExecutionContext) .checkIfComplete(preparePackageMetadataTimeOutWarning)( logger.warn( @@ -88,8 +88,8 @@ private[platform] object InMemoryStateUpdaterFlow { inMemoryState: InMemoryState, interval: FiniteDuration, ): Flow[ - (Vector[(Offset, Update)], LedgerEnd), - (Vector[(Offset, Update)], LedgerEnd), + (Vector[(Offset, Update)], LedgerEnd, TraceContext), + (Vector[(Offset, Update)], LedgerEnd, TraceContext), NotUsed, ] = { // tick source so that we update offset checkpoint caches @@ -105,8 +105,8 @@ private[platform] object InMemoryStateUpdaterFlow { updateOffsetCheckpointCache: OffsetCheckpoint => Unit, tick: Source[Option[Nothing], NotUsed], ): Flow[ - (Vector[(Offset, Update)], LedgerEnd), - (Vector[(Offset, Update)], LedgerEnd), + (Vector[(Offset, Update)], LedgerEnd, TraceContext), + (Vector[(Offset, Update)], LedgerEnd, TraceContext), NotUsed, ] = Flow.fromGraph(GraphDSL.create() { implicit builder => @@ -117,15 +117,15 @@ private[platform] object InMemoryStateUpdaterFlow { // them with a tick source that ticks every interval seconds to signify the update of the cache val broadcast = - builder.add(Broadcast[(Vector[(Offset, Update)], LedgerEnd)](2)) + builder.add(Broadcast[(Vector[(Offset, Update)], LedgerEnd, TraceContext)](2)) val merge = builder.add(Merge[Option[(Offset, Update)]](inputPorts = 2, eagerComplete = true)) - val preprocess: Flow[(Vector[(Offset, Update)], LedgerEnd), Option[ + val preprocess: Flow[(Vector[(Offset, Update)], LedgerEnd, TraceContext), Option[ (Offset, Update) ], NotUsed] = - Flow[(Vector[(Offset, Update)], LedgerEnd)] + Flow[(Vector[(Offset, Update)], LedgerEnd, TraceContext)] .map(_._1) .mapConcat(identity) .map { case (off, tracedUpdate) => (off, tracedUpdate) } @@ -188,9 +188,10 @@ private[platform] object InMemoryStateUpdater { updates: Vector[TransactionLogUpdate], ledgerEnd: LedgerEnd, lastTraceContext: TraceContext, + batchTraceContext: TraceContext, ) type UpdaterFlow = - Boolean => Flow[(Vector[(Offset, Update)], LedgerEnd), Vector[ + Boolean => Flow[(Vector[(Offset, Update)], LedgerEnd, TraceContext), Vector[ (Offset, Update) ], NotUsed] def owner( @@ -231,6 +232,7 @@ private[platform] object InMemoryStateUpdater { private[index] def prepare( batch: Vector[(Offset, Update)], ledgerEnd: LedgerEnd, + batchTraceContext: TraceContext, ): PrepareResult = { val traceContext = batch.lastOption.fold( throw new NoSuchElementException("empty batch") @@ -248,6 +250,7 @@ private[platform] object InMemoryStateUpdater { }, ledgerEnd = ledgerEnd, lastTraceContext = traceContext, + batchTraceContext = batchTraceContext, ) } @@ -255,7 +258,7 @@ private[platform] object InMemoryStateUpdater { inMemoryState: InMemoryState, logger: TracedLogger, )(result: PrepareResult, repairMode: Boolean): Unit = { - updateCaches(inMemoryState, result.updates, result.ledgerEnd.lastOffset) + updateCaches(inMemoryState, result.updates, result.ledgerEnd, result.batchTraceContext) // must be the last update: see the comment inside the method for more details // must be after cache updates: see the comment inside the method for more details // in case of Repair Mode we will update directly, at the end from the indexer queue @@ -327,7 +330,7 @@ private[platform] object InMemoryStateUpdater { .collect { case TransactionLogUpdate.TopologyTransactionEffective(_, _, _, _, events) => events.collect { case u: TransactionLogUpdate.PartyToParticipantAuthorization => PartyAllocation.Completed( - PartyAllocation.TrackerKey.of(u.party, u.participant, u.authorizationEvent), + PartyAllocation.TrackerKey(u.party, u.participant, u.authorizationEvent), IndexerPartyDetails(party = u.party, isLocal = u.participant == participantId), ) } @@ -338,17 +341,20 @@ private[platform] object InMemoryStateUpdater { private def updateCaches( inMemoryState: InMemoryState, updates: Vector[TransactionLogUpdate], - lastOffset: Offset, + ledgerEnd: LedgerEnd, + batchTraceContext: TraceContext, ): Unit = { - updates - .foreach { transaction => - inMemoryState.inMemoryFanoutBuffer.push(transaction) - val contractStateEventsBatch = convertToContractStateEvents(transaction) - NonEmptyVector - .fromVector(contractStateEventsBatch) - .foreach(inMemoryState.contractStateCaches.push(_)(transaction.traceContext)) - } - inMemoryState.cachesUpdatedUpto.set(Some(lastOffset)) + updates.foreach(inMemoryState.inMemoryFanoutBuffer.push) + NonEmptyVector + .fromVector( + updates.iterator + .flatMap(convertToContractStateEvents) + .toVector + ) + .foreach( + inMemoryState.contractStateCaches.push(_, ledgerEnd.lastEventSeqId)(batchTraceContext) + ) + inMemoryState.cachesUpdatedUpto.set(Some(ledgerEnd.lastOffset)) } def updateLedgerEnd( @@ -373,29 +379,14 @@ private[platform] object InMemoryStateUpdater { // cannot lead to successful contract lookup and usage in interpretation anyway if createdEvent.flatEventWitnesses.nonEmpty => ContractStateEvent.Created( - contract = FatContract.fromCreateNode( - Create( - coid = createdEvent.contractId, - packageName = createdEvent.packageName, - templateId = createdEvent.templateId, - arg = createdEvent.createArgument.unversioned, - signatories = createdEvent.createSignatories, - stakeholders = createdEvent.flatEventWitnesses.map(Party.assertFromString), - keyOpt = (createdEvent.contractKey zip createdEvent.createKeyMaintainers).map { - case (k, maintainers) => - KeyWithMaintainers.assertBuild( - templateId = createdEvent.templateId, - value = k.unversioned, - maintainers = maintainers, - packageName = createdEvent.packageName, - ) - }, - version = createdEvent.createArgument.version, - ), - createTime = CreationTime.CreatedAt(createdEvent.ledgerEffectiveTime), - authenticationData = createdEvent.authenticationData, + contractId = createdEvent.contractId, + globalKey = createdEvent.contractKey.map(k => + Key.assertBuild( + createdEvent.templateId, + k.unversioned, + createdEvent.packageName, + ) ), - eventOffset = createdEvent.eventOffset, ) case exercisedEvent: TransactionLogUpdate.ExercisedEvent // no state updates for participant divulged events and transient events as these events @@ -410,8 +401,6 @@ private[platform] object InMemoryStateUpdater { exercisedEvent.packageName, ) ), - stakeholders = exercisedEvent.flatEventWitnesses.map(Party.assertFromString), - eventOffset = exercisedEvent.eventOffset, ) } @@ -421,8 +410,8 @@ private[platform] object InMemoryStateUpdater { tx match { case tx: TransactionLogUpdate.TransactionAccepted => tx.events.iterator.collect(convertLogToStateEvent).toVector - case tx: TransactionLogUpdate.ReassignmentAccepted => - Vector(ReassignmentAccepted(tx.offset)) + case _: TransactionLogUpdate.ReassignmentAccepted => + Vector(ReassignmentAccepted) case _ => Vector.empty } @@ -439,12 +428,13 @@ private[platform] object InMemoryStateUpdater { val events = rawEvents.collect { case NodeInfo(nodeId, create: Create, _) => + val contractId = create.coid TransactionLogUpdate.CreatedEvent( eventOffset = offset, - updateId = txAccepted.updateId, + updateId = txAccepted.updateId.toHexString, nodeId = nodeId.index, eventSequentialId = 0L, - contractId = create.coid, + contractId = contractId, ledgerEffectiveTime = txAccepted.transactionMeta.ledgerEffectiveTime, templateId = create.templateId, packageName = create.packageName, @@ -456,7 +446,7 @@ private[platform] object InMemoryStateUpdater { ), treeEventWitnesses = blinding.disclosure.getOrElse(nodeId, Set.empty), flatEventWitnesses = - if (txAccepted.isAcsDelta(create.coid)) create.stakeholders else Set.empty, + if (txAccepted.isAcsDelta(contractId)) create.stakeholders else Set.empty, submitters = txAccepted.completionInfoO .map(_.actAs.toSet) .getOrElse(Set.empty), @@ -468,16 +458,28 @@ private[platform] object InMemoryStateUpdater { createKey = create.keyOpt.map(_.globalKey), createKeyMaintainers = create.keyOpt.map(_.maintainers), authenticationData = txAccepted.contractAuthenticationData.getOrElse( - create.coid, + contractId, throw new IllegalStateException( - s"missing authentication data for contract ${create.coid}" + s"missing authentication data for contract $contractId" ), ), + representativePackageId = txAccepted.representativePackageIds match { + case RepresentativePackageIds.SameAsContractPackageId => create.templateId.packageId + case RepresentativePackageIds.DedicatedRepresentativePackageIds( + representativePackageIds + ) => + representativePackageIds.getOrElse( + contractId, + throw new IllegalStateException( + s"Missing representative package id for contract $contractId" + ), + ) + }, ) case NodeInfo(nodeId, exercise: Exercise, lastDescendantNodeId) => TransactionLogUpdate.ExercisedEvent( eventOffset = offset, - updateId = txAccepted.updateId, + updateId = txAccepted.updateId.toHexString, nodeId = nodeId.index, eventSequentialId = 0L, contractId = exercise.targetCoid, @@ -529,7 +531,7 @@ private[platform] object InMemoryStateUpdater { } TransactionLogUpdate.TransactionAccepted( - updateId = txAccepted.updateId, + updateId = txAccepted.updateId.toHexString, commandId = txAccepted.completionInfoO.map(_.commandId).getOrElse(""), workflowId = txAccepted.transactionMeta.workflowId.getOrElse(""), effectiveAt = txAccepted.transactionMeta.ledgerEffectiveTime, @@ -594,7 +596,7 @@ private[platform] object InMemoryStateUpdater { } TransactionLogUpdate.ReassignmentAccepted( - updateId = u.updateId, + updateId = u.updateId.toHexString, commandId = u.optCompletionInfo.map(_.commandId).getOrElse(""), workflowId = u.workflowId.getOrElse(""), offset = offset, @@ -611,7 +613,7 @@ private[platform] object InMemoryStateUpdater { u: Update.TopologyTransactionEffective, ) = TransactionLogUpdate.TopologyTransactionEffective( - updateId = u.updateId, + updateId = u.updateId.toHexString, offset = offset, effectiveTime = u.effectiveTime.toLf, synchronizerId = u.synchronizerId.toProtoPrimitive, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala index 18616edd94..efa75bc1d1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala @@ -6,13 +6,7 @@ package com.digitalasset.canton.platform.index import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.daml.metrics.InstrumentedGraph.* import com.daml.tracing.{Event, SpanAttribute, Spans} import com.digitalasset.base.error.DamlErrorWithDefiniteAnswer @@ -26,9 +20,7 @@ import com.digitalasset.canton.ledger.api.{ CumulativeFilter, EventFormat, TraceIdentifiers, - TransactionFormat, UpdateFormat, - UpdateId, } import com.digitalasset.canton.ledger.error.LedgerApiErrors.InterfaceViewUpgradeFailureWrapper import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors @@ -55,8 +47,6 @@ import com.digitalasset.canton.platform.store.dao.{ LedgerDaoUpdateReader, LedgerReadDao, } -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.PackageResolution import com.digitalasset.canton.platform.{ InternalEventFormat, InternalTransactionFormat, @@ -66,6 +56,8 @@ import com.digitalasset.canton.platform.{ TemplatePartiesFilter, *, } +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.PackageResolution import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{ FullIdentifier, @@ -80,9 +72,7 @@ import com.google.rpc.Status import io.grpc.StatusRuntimeException import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.{Flow, Source} -import scalaz.syntax.tag.ToTagOps -import scala.annotation.nowarn import scala.collection.concurrent.TrieMap import scala.concurrent.Future import scala.util.{Failure, Success, Try} @@ -220,83 +210,6 @@ private[index] class IndexServiceImpl( elem } - // TODO(#23504) remove when TransactionTrees are removed - @nowarn("cat=deprecation") - override def transactionTrees( - startExclusive: Option[Offset], - endInclusive: Option[Offset], - eventFormat: EventFormat, - )(implicit loggingContext: LoggingContextWithTrace): Source[GetUpdateTreesResponse, NotUsed] = { - val interfaceViewPackageUpgrade = createViewUpgradeMemoized - val contextualizedErrorLogger = ErrorLoggingContext(logger, loggingContext) - withValidatedFilter( - eventFormat, - getPackageMetadataSnapshot(contextualizedErrorLogger), - ) { - val isTailingStream = endInclusive.isEmpty - val parties = - if (eventFormat.filtersForAnyParty.isEmpty) - Some(eventFormat.filtersByParty.keySet) - else None // party-wildcard - between(startExclusive, endInclusive) { (from, to) => - from.foreach(offset => - Spans.setCurrentSpanAttribute(SpanAttribute.OffsetFrom, offset.toDecimalString) - ) - to.foreach(offset => - Spans.setCurrentSpanAttribute(SpanAttribute.OffsetTo, offset.toDecimalString) - ) - dispatcher() - .startingAt( - startExclusive = from, - subSource = RangeSource { - val memoFilter = - memoizedTransactionFilterProjection( - getPackageMetadataSnapshot, - eventFormat, - interfaceViewPackageUpgrade, - ) - (startInclusive, endInclusive) => - Source(memoFilter().toList) - .flatMapConcat { case (_, eventProjectionProperties) => - updatesReader - .getTransactionTrees( - startInclusive = startInclusive, - endInclusive = endInclusive, - // on the query filter side we treat every party as template-wildcard party, - // if the party-wildcard is given then the transactions for all the templates and all the parties are fetched - requestingParties = parties, - eventProjectionProperties = eventProjectionProperties, - ) - .via(rangeDecorator(startInclusive, endInclusive)) - } - }, - endInclusive = to, - ) - // when a tailing stream is requested add checkpoint messages - .via( - checkpointFlow( - cond = isTailingStream, - fetchOffsetCheckpoint = fetchOffsetCheckpoint, - responseFromCheckpoint = updateTreesResponse, - ) - ) - .mapError(shutdownError) - .buffered(metrics.index.transactionTreesBufferSize, LedgerApiStreamsBufferSize) - }.wireTap( - _.update match { - case GetUpdateTreesResponse.Update.TransactionTree(transactionTree) => - Spans.addEventToCurrentSpan( - Event( - transactionTree.commandId, - TraceIdentifiers.fromTransactionTree(transactionTree), - ) - ) - case _ => () - } - ) - }(contextualizedErrorLogger) - } - override def getCompletions( startExclusive: Option[Offset], userId: Ref.UserId, @@ -384,93 +297,6 @@ private[index] class IndexServiceImpl( ): Future[Option[FatContract]] = contractStore.lookupActiveContract(forParties, contractId) - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - override def getTransactionById( - updateId: UpdateId, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = { - val interfaceViewPackageUpgrade = createViewUpgradeMemoized - val currentPackageMetadata = getPackageMetadataSnapshot(implicitly) - checkUnknownIdentifiers(transactionFormat.eventFormat, currentPackageMetadata).left - .map(_.asGrpcError) - .fold( - Future.failed, - _ => { - val internalTransactionFormatO = eventFormatProjection( - eventFormat = transactionFormat.eventFormat, - metadata = currentPackageMetadata, - interfaceViewPackageUpgrade, - ).map(internalEventFormat => - InternalTransactionFormat( - internalEventFormat = internalEventFormat, - transactionShape = transactionFormat.transactionShape, - ) - ) - - internalTransactionFormatO match { - case Some(internalTransactionFormat) => - updatesReader.lookupTransactionById(updateId.unwrap, internalTransactionFormat) - case None => Future.successful(None) - } - }, - ) - } - - // TODO(#23504) remove when TransactionTrees are removed - @nowarn("cat=deprecation") - override def getTransactionTreeById( - updateId: UpdateId, - requestingParties: Set[Ref.Party], - )(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[GetTransactionTreeResponse]] = { - val interfaceViewPackageUpgrade = createViewUpgradeMemoized - updatesReader - .lookupTransactionTreeById( - updateId.unwrap, - requestingParties, - EventProjectionProperties( - verbose = true - )( - interfaceViewPackageUpgrade = interfaceViewPackageUpgrade - ), - ) - } - - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - override def getTransactionByOffset( - offset: Offset, - transactionFormat: TransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = { - val interfaceViewPackageUpgrade = createViewUpgradeMemoized - val currentPackageMetadata = getPackageMetadataSnapshot(implicitly) - checkUnknownIdentifiers(transactionFormat.eventFormat, currentPackageMetadata).left - .map(_.asGrpcError) - .fold( - Future.failed, - _ => { - val internalTransactionFormatO = eventFormatProjection( - eventFormat = transactionFormat.eventFormat, - metadata = currentPackageMetadata, - interfaceViewPackageUpgrade, - ).map(internalEventFormat => - InternalTransactionFormat( - internalEventFormat = internalEventFormat, - transactionShape = transactionFormat.transactionShape, - ) - ) - - internalTransactionFormatO match { - case Some(internalTransactionFormat) => - updatesReader.lookupTransactionByOffset(offset, internalTransactionFormat) - case None => Future.successful(None) - } - }, - ) - } - override def getUpdateBy( lookupKey: LookupKey, updateFormat: UpdateFormat, @@ -498,27 +324,6 @@ private[index] class IndexServiceImpl( ) } - // TODO(#23504) remove when TransactionTrees are removed - @nowarn("cat=deprecation") - override def getTransactionTreeByOffset( - offset: Offset, - requestingParties: Set[Ref.Party], - )(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[GetTransactionTreeResponse]] = { - val interfaceViewPackageUpgrade = createViewUpgradeMemoized - updatesReader - .lookupTransactionTreeByOffset( - offset, - requestingParties, - EventProjectionProperties( - verbose = true - )( - interfaceViewPackageUpgrade = interfaceViewPackageUpgrade - ), - ) - } - override def getEventsByContractId( contractId: ContractId, eventFormat: EventFormat, @@ -743,13 +548,18 @@ object IndexServiceImpl { /** Computes an optimal package-id of the ``originalCreateTemplate`` interface instance that's * used for rendering a view for interface ``interfaceId``. * + * @param interfaceId + * The interface-id of the view being requested + * @param representativeCreateTemplate + * The template-id of the contract's representative package + * * @return * the identifier for the ``originalCreateTemplate`` with the package-id adjusted to the * selection result */ def upgrade( interfaceId: Identifier, - originalCreateTemplate: Identifier, + representativeCreateTemplate: Identifier, ): Future[Either[Status, Identifier]] } @@ -887,20 +697,6 @@ object IndexServiceImpl { } yield source ) - // TODO(#23504) cleanup - private[index] def withValidatedFilter[T]( - apiEventFormat: EventFormat, - metadata: PackageMetadata, - )( - source: => Source[T, NotUsed] - )(implicit errorLogger: ErrorLoggingContext): Source[T, NotUsed] = - foldToSource( - for { - _ <- checkUnknownIdentifiers(apiEventFormat, metadata)(errorLogger).left - .map(_.asGrpcError) - } yield source - ) - private[index] def validatedAcsActiveAtOffset[T]( activeAt: Option[Offset], ledgerEnd: Option[Offset], @@ -971,32 +767,6 @@ object IndexServiceImpl { ) } - // TODO(#23504) cleanup - @SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var")) - private[index] def memoizedTransactionFilterProjection( - getPackageMetadataSnapshot: ErrorLoggingContext => PackageMetadata, - eventFormat: EventFormat, - interfaceViewPackageUpgrade: InterfaceViewPackageUpgrade, - )(implicit - contextualizedErrorLogger: ErrorLoggingContext - ): () => Option[(TemplatePartiesFilter, EventProjectionProperties)] = { - @volatile var metadata: PackageMetadata = null - @volatile var filters: Option[(TemplatePartiesFilter, EventProjectionProperties)] = None - () => - val currentMetadata = getPackageMetadataSnapshot(contextualizedErrorLogger) - if (metadata ne currentMetadata) { - metadata = currentMetadata - filters = eventFormatProjection( - eventFormat, - metadata, - interfaceViewPackageUpgrade, - ).map(internalEventFormat => - (internalEventFormat.templatePartiesFilter, internalEventFormat.eventProjectionProperties) - ) - } - filters - } - private def eventFormatProjection( eventFormat: EventFormat, metadata: PackageMetadata, @@ -1254,13 +1024,6 @@ object IndexServiceImpl { ): GetUpdatesResponse = GetUpdatesResponse.defaultInstance.withOffsetCheckpoint(offsetCheckpoint.toApi) - // TODO(#23504) remove when TransactionTrees are removed - @nowarn("cat=deprecation") - private def updateTreesResponse( - offsetCheckpoint: OffsetCheckpoint - ): GetUpdateTreesResponse = - GetUpdateTreesResponse.defaultInstance.withOffsetCheckpoint(offsetCheckpoint.toApi) - private def completionsResponse( offsetCheckpoint: OffsetCheckpoint ): CompletionStreamResponse = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala index 481b9e8795..67751db989 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala @@ -20,11 +20,11 @@ import com.digitalasset.canton.logging.{ NamedLogging, } import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.InMemoryState import com.digitalasset.canton.platform.apiserver.TimedIndexService import com.digitalasset.canton.platform.config.IndexServiceConfig import com.digitalasset.canton.platform.index.IndexServiceOwner.GetPackagePreferenceForViewsUpgrading -import com.digitalasset.canton.platform.store.DbSupport import com.digitalasset.canton.platform.store.backend.common.MismatchException import com.digitalasset.canton.platform.store.cache.* import com.digitalasset.canton.platform.store.dao.events.{ @@ -38,7 +38,8 @@ import com.digitalasset.canton.platform.store.dao.{ LedgerReadDao, } import com.digitalasset.canton.platform.store.interning.StringInterning -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.platform.store.{DbSupport, PruningOffsetService} +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref import io.opentelemetry.api.trace.Tracer @@ -66,6 +67,8 @@ final class IndexServiceOwner( lfValueTranslation: LfValueTranslation, queryExecutionContext: ExecutionContextExecutorService, commandExecutionContext: ExecutionContextExecutorService, + participantContractStore: ContractStore, + pruningOffsetService: PruningOffsetService, ) extends ResourceOwner[IndexService] with NamedLogging { private val initializationRetryDelay = 100.millis @@ -77,6 +80,7 @@ final class IndexServiceOwner( stringInterning = inMemoryState.stringInterningView, contractLoader = contractLoader, lfValueTranslation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, queryExecutionContext = queryExecutionContext, commandExecutionContext = commandExecutionContext, ) @@ -88,6 +92,7 @@ final class IndexServiceOwner( ledgerDao.contractsReader, contractStateCaches = inMemoryState.contractStateCaches, loggerFactory = loggerFactory, + contractStore = participantContractStore, )(commandExecutionContext) bufferedTransactionsReader = BufferedUpdateReader( @@ -191,6 +196,7 @@ final class IndexServiceOwner( ledgerEndCache: LedgerEndCache, stringInterning: StringInterning, contractLoader: ContractLoader, + pruningOffsetService: PruningOffsetService, lfValueTranslation: LfValueTranslation, queryExecutionContext: ExecutionContextExecutorService, commandExecutionContext: ExecutionContextExecutorService, @@ -206,7 +212,6 @@ final class IndexServiceOwner( completionsPageSize = config.completionsPageSize, activeContractsServiceStreamsConfig = config.activeContractsServiceStreams, updatesStreamsConfig = config.updatesStreams, - transactionTreeStreamsConfig = config.transactionTreeStreams, globalMaxEventIdQueries = config.globalMaxEventIdQueries, globalMaxEventPayloadQueries = config.globalMaxEventPayloadQueries, tracer = tracer, @@ -214,7 +219,9 @@ final class IndexServiceOwner( incompleteOffsets = incompleteOffsets, contractLoader = contractLoader, lfValueTranslation = lfValueTranslation, - ) + pruningOffsetService = pruningOffsetService, + contractStore = participantContractStore, + )(queryExecutionContext) private object InMemoryStateNotInitialized extends NoStackTrace } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala index d9feda99e6..81db9af6ae 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala @@ -12,13 +12,23 @@ import com.digitalasset.canton.platform.store.backend.postgresql.PostgresDataSou import scala.concurrent.duration.{DurationInt, FiniteDuration} /** See com.digitalasset.canton.platform.indexer.JdbcIndexer for semantics on these configurations. + * + * - enableCompression: switches on compression for both consuming and non-consuming exercises, + * equivalent to setting both enableCompressionConsumingExercise and + * enableCompressionNonConsumingExercise to true. This is to maintain backward compatibility + * with existing config files. + * - enableCompressionConsumingExercise: switches on compression for consuming exercises + * - enableCompressionNonConsumingExercise: switches on compression for non-consuming exercises */ final case class IndexerConfig( batchingParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultBatchingParallelism), enableCompression: Boolean = DefaultEnableCompression, + enableCompressionConsumingExercise: Boolean = DefaultEnableCompression, + enableCompressionNonConsumingExercise: Boolean = DefaultEnableCompression, ingestionParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultIngestionParallelism), inputMappingParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultInputMappingParallelism), + dbPrepareParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultDbPrepareParallelism), maxInputBufferSize: NonNegativeInt = NonNegativeInt.tryCreate(DefaultMaxInputBufferSize), restartDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(DefaultRestartDelay.toSeconds), @@ -40,17 +50,17 @@ object IndexerConfig { // Exposed as public method so defaults can be overriden in the downstream code. def createDataSourcePropertiesForTesting( - ingestionParallelism: Int + indexerConfig: IndexerConfig ): DataSourceProperties = DataSourceProperties( // PostgresSQL specific configurations postgres = PostgresDataSourceConfig( synchronousCommit = Some(PostgresDataSourceConfig.SynchronousCommitValue.Off) ), - connectionPool = createConnectionPoolConfig(ingestionParallelism), + connectionPool = createConnectionPoolConfig(indexerConfig), ) def createConnectionPoolConfig( - ingestionParallelism: Int, + indexerConfig: IndexerConfig, connectionTimeout: FiniteDuration = FiniteDuration( // 250 millis is the lowest possible value for this Hikari configuration (see HikariConfig JavaDoc) 250, @@ -59,13 +69,15 @@ object IndexerConfig { ): ConnectionPoolConfig = ConnectionPoolConfig( connectionPoolSize = - ingestionParallelism + 2, // + 2 for the tailing ledger_end and post processing end updates + indexerConfig.ingestionParallelism.unwrap + indexerConfig.dbPrepareParallelism.unwrap + + 2, // + 2 for the tailing ledger_end and post processing end updates connectionTimeout = connectionTimeout, ) val DefaultRestartDelay: FiniteDuration = 10.seconds val DefaultMaxInputBufferSize: Int = 50 val DefaultInputMappingParallelism: Int = 16 + val DefaultDbPrepareParallelism: Int = 4 val DefaultBatchingParallelism: Int = 4 val DefaultIngestionParallelism: Int = 16 val DefaultSubmissionBatchSize: Long = 50L diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala index e3d4cff080..2abaac8c10 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala @@ -63,6 +63,10 @@ object JdbcIndexer { val ingestionStorageBackend = factory.createIngestionStorageBackend val parameterStorageBackend = factory.createParameterStorageBackend(inMemoryState.stringInterningView) + val contractStorageBackend = factory.createContractStorageBackend( + inMemoryState.stringInterningView, + inMemoryState.ledgerEndCache, + ) val DBLockStorageBackend = factory.createDBLockStorageBackend val stringInterningStorageBackend = factory.createStringInterningStorageBackend val completionStorageBackend = @@ -96,6 +100,7 @@ object JdbcIndexer { parallelIndexerSubscription = ParallelIndexerSubscription( parameterStorageBackend = parameterStorageBackend, ingestionStorageBackend = ingestionStorageBackend, + contractStorageBackend = contractStorageBackend, participantId = participantId, translation = new LfValueTranslation( metrics = metrics, @@ -105,9 +110,14 @@ object JdbcIndexer { ), compressionStrategy = if (config.enableCompression) CompressionStrategy.allGZIP(metrics) - else CompressionStrategy.none(metrics), + else + CompressionStrategy.buildFromConfig(metrics)( + config.enableCompressionConsumingExercise, + config.enableCompressionNonConsumingExercise, + ), maxInputBufferSize = config.maxInputBufferSize.unwrap, inputMappingParallelism = config.inputMappingParallelism.unwrap, + dbPrepareParallelism = config.dbPrepareParallelism.unwrap, batchingParallelism = config.batchingParallelism.unwrap, ingestionParallelism = ingestionParallelism, submissionBatchSize = config.submissionBatchSize, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala index 504e8ddfcb..635f0df898 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala @@ -17,6 +17,8 @@ object BatchingParallelIngestionPipe { inputMapper: Iterable[IN] => Future[IN_BATCH], seqMapperZero: IN_BATCH, seqMapper: (IN_BATCH, IN_BATCH) => IN_BATCH, + dbPrepareParallelism: Int, + dbPrepare: IN_BATCH => Future[IN_BATCH], batchingParallelism: Int, batcher: IN_BATCH => Future[DB_BATCH], ingestingParallelism: Int, @@ -24,23 +26,25 @@ object BatchingParallelIngestionPipe { maxTailerBatchSize: Int, ingestTail: Vector[DB_BATCH] => Future[Vector[DB_BATCH]], ): Flow[IN, DB_BATCH, NotUsed] = - // Stage 1: the stream coming from ReadService, involves deserialization and translation to Update-s + // The stream coming from ReadService, involves deserialization and translation to Update-s Flow[IN] - // Stage 2: Batching plus mapping to Database DTOs encapsulates all the CPU intensive computation of the ingestion. Executed in parallel. + // Batching plus mapping to Database DTOs encapsulates all the CPU intensive computation of the ingestion. Executed in parallel. .via(BatchN(submissionBatchSize.toInt, inputMappingParallelism)) .mapAsync(inputMappingParallelism)(inputMapper) - // Stage 3: Encapsulates sequential/stateful computation (generation of sequential IDs for events) + // Encapsulates sequential/stateful computation (generation of sequential IDs for events) .scan(seqMapperZero)(seqMapper) .drop(1) // remove the zero element from the beginning of the stream - // Stage 4: Mapping to Database specific representation, encapsulates all database specific preparation of the data. Executed in parallel. + .async + .mapAsync(dbPrepareParallelism)(dbPrepare) + // Mapping to Database specific representation, encapsulates all database specific preparation of the data. Executed in parallel. .async .mapAsync(batchingParallelism)(batcher) - // Stage 5: Inserting data into the database. Almost no CPU load here, threads are executing SQL commands over JDBC, and waiting for the result. This defines the parallelism on the SQL database side, same amount of PostgreSQL Backend processes will do the ingestion work. + // Inserting data into the database. Almost no CPU load here, threads are executing SQL commands over JDBC, and waiting for the result. This defines the parallelism on the SQL database side, same amount of PostgreSQL Backend processes will do the ingestion work. .async .mapAsync(ingestingParallelism)(ingester) - // Stage 6: Batching data for throttled ledger-end update in database + // Batching data for throttled ledger-end update in database .batch(maxTailerBatchSize.toLong, Vector(_))(_ :+ _) - // Stage 7: Updating ledger-end and related data in database (this stage completion demarcates the consistent point-in-time) + // Updating ledger-end and related data in database (this stage completion demarcates the consistent point-in-time) .mapAsync(1)(ingestTail) .mapConcat(identity) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala index 8d5b6aa1c7..4dba3bdc12 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.indexer.parallel +import com.daml.logging.entries.LoggingEntries import com.daml.metrics.InstrumentedGraph.* import com.daml.metrics.Timed import com.daml.metrics.api.MetricsContext @@ -31,6 +32,7 @@ import com.digitalasset.canton.platform.indexer.ha.Handle import com.digitalasset.canton.platform.indexer.parallel.AsyncSupport.* import com.digitalasset.canton.platform.store.backend.* import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd +import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.DbDispatcher import com.digitalasset.canton.platform.store.dao.events.{CompressionStrategy, LfValueTranslation} import com.digitalasset.canton.time.Clock @@ -39,6 +41,7 @@ import com.digitalasset.canton.tracing.{Spanning, TraceContext} import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.PekkoUtil.{Commit, FutureQueue, PekkoSourceQueueToFutureQueue} import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.value.Value.ContractId import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.scaladsl.{Flow, Keep, Sink, Source} import org.apache.pekko.stream.{KillSwitches, Materializer, OverflowStrategy} @@ -46,6 +49,7 @@ import org.apache.pekko.{Done, NotUsed} import java.sql.Connection import java.util.concurrent.atomic.AtomicReference +import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} import scala.math.Ordered.orderingToOrdered import scala.util.chaining.* @@ -53,11 +57,13 @@ import scala.util.chaining.* private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( ingestionStorageBackend: IngestionStorageBackend[DB_BATCH], parameterStorageBackend: ParameterStorageBackend, + contractStorageBackend: ContractStorageBackend, participantId: Ref.ParticipantId, translation: LfValueTranslation, compressionStrategy: CompressionStrategy, maxInputBufferSize: Int, inputMappingParallelism: Int, + dbPrepareParallelism: Int, batchingParallelism: Int, ingestionParallelism: Int, submissionBatchSize: Long, @@ -149,7 +155,7 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( inputMapper( metrics, mapInSpan( - UpdateToDbDto( + UpdateToDbDtoLegacy( participantId = participantId, translation = translation, compressionStrategy = compressionStrategy, @@ -168,6 +174,14 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( metrics, clock, logger, + inMemoryState.ledgerEndCache, + ), + dbPrepareParallelism = dbPrepareParallelism, + dbPrepare = dbPrepare( + lastActivations = contractStorageBackend.lastActivations, + dbDispatcher = dbDispatcher, + logger = logger, + metrics = metrics, ), batchingParallelism = batchingParallelism, batcher = batcherExecutor.execute( @@ -175,7 +189,8 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( ingestionStorageBackend.batch( _, inMemoryState.stringInterningView, - ) + ), + logger, ) ), ingestingParallelism = ingestionParallelism, @@ -225,7 +240,7 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( } ) .mapConcat( - _.map(batch => (batch.offsetsUpdates, batch.ledgerEnd)) + _.map(batch => (batch.offsetsUpdates, batch.ledgerEnd, batch.batchTraceContext)) ) .buffered( counter = metrics.indexer.outputBatchedBufferLength, @@ -278,24 +293,37 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( } object ParallelIndexerSubscription { + val EmptyActiveContracts: mutable.LinkedHashMap[(SynchronizerId, ContractId), Long] = + mutable.LinkedHashMap.empty /** Batch wraps around a T-typed batch, enriching it with processing relevant information. * * @param ledgerEnd * The LedgerEnd for the batch. Needed for tail ingestion. - * @param lastTraceContext - * The latest trace context contained in the batch. Needed for logging. * @param batch * The batch of variable type. * @param batchSize * Size of the batch measured in number of updates. Needed for metrics population. + * @param offsetsUpdates + * The Updates with Offsets, the source of the batch. + * @param activeContracts + * The active contracts at the head of the ledger - the ones which are not persisted yet. Key + * is the Synchronizer ID of activation and the Contract ID, and the value is the + * event_sequential_id of the activation. + * @param missingDeactivatedActivations + * The set of deactivations need to be looked up at dbPrepare stage. It is optional as this is + * where the lookup-results are stored as well. + * @param batchTraceContext + * The TraceContext constructed for the whole batch. */ final case class Batch[+T]( ledgerEnd: LedgerEnd, - lastTraceContext: TraceContext, batch: T, batchSize: Int, offsetsUpdates: Vector[(Offset, Update)], + activeContracts: mutable.LinkedHashMap[(SynchronizerId, ContractId), Long], + missingDeactivatedActivations: Map[(SynchronizerId, ContractId), Option[Long]], + batchTraceContext: TraceContext, ) val ZeroLedgerEnd: LedgerEnd = LedgerEnd( @@ -414,7 +442,6 @@ object ParallelIndexerSubscription { s"${prefix}Storing at offset=${offset.unwrap} $update" )(update.traceContext) toDbDto(offset)(update) - }.toVector eventMetricsUpdater(input) @@ -427,10 +454,14 @@ object ParallelIndexerSubscription { lastOffset = last._1 // the rest will be filled later in the sequential step ), - lastTraceContext = last._2.traceContext, batch = batch, batchSize = input.size, offsetsUpdates = input.toVector, + activeContracts = EmptyActiveContracts, // will be overridden later + missingDeactivatedActivations = Map.empty, // will be filled later + batchTraceContext = TraceContext.ofBatch("indexer_update_batch")( + input.iterator.map(_._2) + )(logger), ) } @@ -439,10 +470,13 @@ object ParallelIndexerSubscription { ): Batch[Vector[DbDto]] = Batch( ledgerEnd = initialLedgerEndO.getOrElse(ZeroLedgerEnd), - lastTraceContext = TraceContext.empty, batch = Vector.empty, batchSize = 0, offsetsUpdates = Vector.empty, + activeContracts = + mutable.LinkedHashMap.empty, // this mutable will propagate forward in sequential mapping + missingDeactivatedActivations = Map.empty, // will be populated later + batchTraceContext = TraceContext.empty, // will be populated later ) def seqMapper( @@ -450,6 +484,7 @@ object ParallelIndexerSubscription { metrics: LedgerApiServerMetrics, clock: Clock, logger: TracedLogger, + ledgerEndCache: LedgerEndCache, )( previous: Batch[Vector[DbDto]], current: Batch[Vector[DbDto]], @@ -463,35 +498,101 @@ object ParallelIndexerSubscription { previous.ledgerEnd.lastPublicationTime, ) if (now < next) { - implicit val batchTraceContext: TraceContext = TraceContext.ofBatch("seq_map_batch")( - current.offsetsUpdates.iterator.map(_._2) - )(logger) logger.info( s"Local participant clock at $now is before a previous publication time $next. Has the clock been reset, e.g., during participant failover?" - ) + )(current.batchTraceContext) } next } + val activeContracts = previous.activeContracts + val missingDeactivatedActivationsBuilder = + Map.newBuilder[(SynchronizerId, ContractId), Option[Long]] + def setActivation(synCon: (SynchronizerId, ContractId), eventSeqId: Long): Unit = { + if (activeContracts.contains(synCon)) { + logger.warn( + s"Double activation at eventSeqId: $eventSeqId. Previous at ${activeContracts.get(synCon)} This should not happen" + )(current.batchTraceContext) + activeContracts.remove(synCon).discard // we will add a new now + } + activeContracts.addOne(synCon -> eventSeqId) + } + def tryToGetDeactivated(synCon: (SynchronizerId, ContractId)): Long = + activeContracts.get(synCon) match { + case Some(activationSeqId) => + activeContracts.remove(synCon).discard + activationSeqId + case None => + missingDeactivatedActivationsBuilder.addOne(synCon -> None) + 0 // will be filled later + } + @SuppressWarnings(Array("org.wartremover.warts.Var")) var eventSeqId = previous.ledgerEnd.lastEventSeqId @SuppressWarnings(Array("org.wartremover.warts.Var")) var lastTransactionMetaEventSeqId = eventSeqId val batchWithSeqIdsAndPublicationTime = current.batch.map { + case dbDto: DbDto.EventActivate => + eventSeqId += 1 + // activation + setActivation( + dbDto.synchronizer_id -> dbDto.notPersistedContractId, + eventSeqId, + ) + dbDto.copy(event_sequential_id = eventSeqId) + + case dbDto: DbDto.EventDeactivate => + eventSeqId += 1 + // deactivation + dbDto.copy( + event_sequential_id = eventSeqId, + deactivated_event_sequential_id = Some( + tryToGetDeactivated(dbDto.synchronizer_id -> dbDto.contract_id) + ), + ) + + case dbDto: DbDto.EventVariousWitnessed => + eventSeqId += 1 + dbDto.copy(event_sequential_id = eventSeqId) + case dbDto: DbDto.EventCreate => eventSeqId += 1 + if (dbDto.flat_event_witnesses.nonEmpty) { + // activation + setActivation( + dbDto.synchronizer_id -> dbDto.contract_id, + eventSeqId, + ) + } dbDto.copy(event_sequential_id = eventSeqId) case dbDto: DbDto.EventExercise => eventSeqId += 1 - dbDto.copy(event_sequential_id = eventSeqId) + val deactivated = Option.when(dbDto.consuming && dbDto.flat_event_witnesses.nonEmpty) { + // deactivation + tryToGetDeactivated(dbDto.synchronizer_id -> dbDto.contract_id) + } + dbDto.copy( + event_sequential_id = eventSeqId, + deactivated_event_sequential_id = deactivated, + ) case dbDto: DbDto.EventUnassign => eventSeqId += 1 - dbDto.copy(event_sequential_id = eventSeqId) + dbDto.copy( + event_sequential_id = eventSeqId, + deactivated_event_sequential_id = Some( + tryToGetDeactivated(dbDto.source_synchronizer_id -> dbDto.contract_id) + ), + ) case dbDto: DbDto.EventAssign => eventSeqId += 1 + // activation + setActivation( + dbDto.target_synchronizer_id -> dbDto.contract_id, + eventSeqId, + ) dbDto.copy(event_sequential_id = eventSeqId) case dbDto: DbDto.EventPartyToParticipant => @@ -499,6 +600,8 @@ object ParallelIndexerSubscription { dbDto.copy(event_sequential_id = eventSeqId) // we do not increase the event_seq_id here, because all the DbDto-s must have the same eventSeqId as the preceding Event + case dbDto: DbDto.IdFilterDbDto => + dbDto.withEventSequentialId(eventSeqId) case dbDto: DbDto.IdFilterCreateStakeholder => dbDto.copy(event_sequential_id = eventSeqId) case dbDto: DbDto.IdFilterCreateNonStakeholderInformee => @@ -539,6 +642,15 @@ object ParallelIndexerSubscription { )(last => last.internalId -> (batchWithSeqIdsAndPublicationTime ++ newEntries)) ) + // prune active contracts so, that only activations remain which are not visible on DB yet + ledgerEndCache().foreach(ledgerEnd => + activeContracts.iterator + .takeWhile(_._2 <= ledgerEnd.lastEventSeqId) + .map(_._1) + .toList + .foreach(activeContracts.remove(_).discard) + ) + current.copy( ledgerEnd = current.ledgerEnd.copy( lastEventSeqId = eventSeqId, @@ -546,17 +658,111 @@ object ParallelIndexerSubscription { lastPublicationTime = publicationTime, ), batch = dbDtosWithStringInterning, + activeContracts = activeContracts, + missingDeactivatedActivations = missingDeactivatedActivationsBuilder.result(), ) }, ) + def dbPrepare( + lastActivations: Iterable[(SynchronizerId, ContractId)] => Connection => Map[ + (SynchronizerId, ContractId), + Long, + ], + dbDispatcher: DbDispatcher, + metrics: LedgerApiServerMetrics, + logger: TracedLogger, + ): Batch[Vector[DbDto]] => Future[Batch[Vector[DbDto]]] = { + val directExecutionContext = DirectExecutionContext(logger) + batch => + val missingActivations = batch.missingDeactivatedActivations.keys + if (missingActivations.isEmpty) Future.successful(batch) + else { + implicit val loggingContextWithTrace: LoggingContextWithTrace = + new LoggingContextWithTrace(LoggingEntries.empty, batch.batchTraceContext) + dbDispatcher + .executeSql(metrics.index.db.lookupLastActivationsDbMetrics)( + lastActivations(missingActivations) + ) + .map { results => + batch.copy( + missingDeactivatedActivations = batch.missingDeactivatedActivations.++( + results.iterator.map { case (synCon, eventSeqId) => + synCon -> Some(eventSeqId) + } + ) + ) + }(directExecutionContext) + } + } + + def refillMissingDeactivatedActivations( + logger: TracedLogger + )(batch: Batch[Vector[DbDto]]): Batch[Vector[DbDto]] = { + def deactivationRefFor( + synchronizerId: SynchronizerId, + contractId: ContractId, + marker: => String, + ): Option[Long] = + batch.missingDeactivatedActivations.get(synchronizerId -> contractId) match { + case None => + ErrorUtil.invalidState( + s"Programming error: deactivation reference is missing for $marker for synchronizerId:$synchronizerId contractId:$contractId, but lookup was not even initiated." + )(ErrorLoggingContext.fromTracedLogger(logger)(batch.batchTraceContext)) + + case Some(None) => + logger.warn( + s"Activation is missing for a deactivation for $marker for synchronizerId:$synchronizerId contractId:$contractId." + )(batch.batchTraceContext) + None + + case Some(Some(deactivationReference)) => Some(deactivationReference) + } + val dbDtosWithDeactivationReferences = batch.batch.map { + case deactivate: DbDto.EventDeactivate + if deactivate.deactivated_event_sequential_id.contains(0L) => + deactivate.copy( + deactivated_event_sequential_id = deactivationRefFor( + deactivate.synchronizer_id, + deactivate.contract_id, + s"deactivated event with type:${PersistentEventType + .fromInt(deactivate.event_type)} offset:${deactivate.event_offset} nodeId:${deactivate.node_id}", + ) + ) + + case unassign: DbDto.EventUnassign if unassign.deactivated_event_sequential_id.contains(0L) => + unassign.copy( + deactivated_event_sequential_id = deactivationRefFor( + unassign.source_synchronizer_id, + unassign.contract_id, + s"unassign event with offset:${unassign.event_offset} nodeId:${unassign.node_id}", + ) + ) + + case consumingExercise: DbDto.EventExercise + if consumingExercise.deactivated_event_sequential_id.contains(0L) => + consumingExercise.copy( + deactivated_event_sequential_id = deactivationRefFor( + consumingExercise.synchronizer_id, + consumingExercise.contract_id, + s"consuming exercise event with offset:${consumingExercise.event_offset} nodeId:${consumingExercise.node_id}", + ) + ) + + case noChange => noChange + } + batch.copy(batch = dbDtosWithDeactivationReferences) + } + def batcher[DB_BATCH]( - batchF: Vector[DbDto] => DB_BATCH - ): Batch[Vector[DbDto]] => Batch[DB_BATCH] = { inBatch => - val dbBatch = batchF(inBatch.batch) - inBatch.copy( - batch = dbBatch - ) + batchF: Vector[DbDto] => DB_BATCH, + logger: TracedLogger, + )(inBatch: Batch[Vector[DbDto]]): Batch[DB_BATCH] = { + val dbBatch = inBatch + .pipe(refillMissingDeactivatedActivations(logger)) + .batch + .pipe(batchF) + inBatch.copy(batch = dbBatch) } def ingester[DB_BATCH]( @@ -566,7 +772,7 @@ object ParallelIndexerSubscription { dbDispatcher: DbDispatcher, metrics: LedgerApiServerMetrics, logger: TracedLogger, - )(implicit traceContext: TraceContext): Batch[DB_BATCH] => Future[Batch[DB_BATCH]] = { + ): Batch[DB_BATCH] => Future[Batch[DB_BATCH]] = { val directExecutionContext = DirectExecutionContext(logger) batch => LoggingContextWithTrace.withNewLoggingContext( @@ -587,7 +793,7 @@ object ParallelIndexerSubscription { cleanUnusedBatch(zeroDbBatch)(batch) } )(directExecutionContext) - } + }(batch.batchTraceContext) } def ledgerEndSynchronizerIndexFrom( @@ -683,9 +889,6 @@ object ParallelIndexerSubscription { ): Batch[DB_BATCH] => Future[Batch[DB_BATCH]] = { val directExecutionContext = DirectExecutionContext(logger) batch => { - val batchTraceContext: TraceContext = TraceContext.ofBatch("post_process_batch")( - batch.offsetsUpdates.iterator.map(_._2) - )(logger) val postPublishData = batch.offsetsUpdates.flatMap { case (offset, update) => PostPublishData.from( update, @@ -693,7 +896,7 @@ object ParallelIndexerSubscription { batch.ledgerEnd.lastPublicationTime, ) } - processor(postPublishData, batchTraceContext).map(_ => batch)(directExecutionContext) + processor(postPublishData, batch.batchTraceContext).map(_ => batch)(directExecutionContext) } } @@ -792,6 +995,8 @@ object ParallelIndexerSubscription { _.copy( batch = zeroDbBatch, // not used anymore batchSize = 0, // not used anymore + activeContracts = EmptyActiveContracts, // not used anymore + missingDeactivatedActivations = Map.empty, // not used anymore ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala index 75e283f8d5..21744f6ad0 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala @@ -48,8 +48,6 @@ package object platform { private[platform] val ModuleName = lfdata.Ref.ModuleName private[platform] type LedgerString = lfdata.Ref.LedgerString private[platform] val LedgerString = lfdata.Ref.LedgerString - private[platform] type UpdateId = lfdata.Ref.LedgerString - private[platform] val UpdateId = lfdata.Ref.LedgerString private[platform] type WorkflowId = lfdata.Ref.LedgerString private[platform] val WorkflowId = lfdata.Ref.LedgerString private[platform] type SubmissionId = lfdata.Ref.SubmissionId diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala index 7380da47da..a5efaaf76f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala @@ -10,6 +10,7 @@ import com.daml.ledger.api.v2.completion.Completion.DeduplicationPeriod.Empty import com.daml.ledger.api.v2.offset_checkpoint.SynchronizerTime import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.util.TimestampConversion.fromInstant +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} import com.digitalasset.daml.lf.data.Time.Timestamp @@ -27,7 +28,7 @@ object CompletionFromTransaction { recordTime: Timestamp, offset: Offset, commandId: String, - updateId: String, + updateId: UpdateId, userId: String, synchronizerId: String, traceContext: TraceContext, @@ -41,7 +42,7 @@ object CompletionFromTransaction { toApiCompletion( submitters = submitters, commandId = commandId, - updateId = updateId, + updateId = updateId.toHexString, userId = userId, traceContext = traceContext, optStatus = Some(OkStatus), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala new file mode 100644 index 0000000000..f2f4bdd4d6 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store + +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future + +trait PruningOffsetService { + def pruningOffset(implicit + traceContext: TraceContext + ): Future[Option[Offset]] +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/ScalaPbStreamingOptimizations.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/ScalaPbStreamingOptimizations.scala index e54e5bd43f..6e3c3278f7 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/ScalaPbStreamingOptimizations.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/ScalaPbStreamingOptimizations.scala @@ -20,7 +20,7 @@ object ScalaPbStreamingOptimizations { * * We've observed empirically that ScalaPB-generated message classes have associated * marshallers with significant latencies when encoding complex payloads (e.g. - * [[com.daml.ledger.api.v2.update_service.GetUpdateTreesResponse]]), with the gRPC marshalling + * [[com.daml.ledger.api.v2.update_service.GetUpdatesResponse]]), with the gRPC marshalling * bottleneck appearing in some performance tests. * * To alleviate the problem, we can leverage the fact that ScalaPB message classes have the diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/Conversions.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/Conversions.scala index cde985578a..48cb3ae024 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/Conversions.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/Conversions.scala @@ -7,6 +7,7 @@ import anorm.* import anorm.Column.nonNull import anorm.SqlParser.int import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent.{ Added, ChangedTo, @@ -21,13 +22,17 @@ import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransacti AuthorizationEvent, AuthorizationLevel, } +import com.digitalasset.canton.platform.store.interning.StringInterning +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.tracing.{SerializableTraceContextConverter, TraceContext} import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} import com.digitalasset.daml.lf.value.Value +import com.google.protobuf.ByteString import com.typesafe.scalalogging.Logger +import java.nio.ByteBuffer import java.sql.PreparedStatement private[backend] object Conversions { @@ -64,6 +69,12 @@ private[backend] object Conversions { } } + def parties(stringInterning: StringInterning)(columName: String): RowParser[Seq[Ref.Party]] = + SqlParser + .byteArray(columName) + .map(IntArrayDBSerialization.decodeFromByteArray) + .map(_.map(stringInterning.party.externalize)) + // PackageId implicit val packageIdToStatement: ToStatement[Ref.PackageId] = @@ -169,6 +180,21 @@ private[backend] object Conversions { .map(_.getOrElse(TraceContext.empty)) } + // UpdateId + + implicit object UpdateIdToStatement extends ToStatement[UpdateId] { + override def set(s: PreparedStatement, i: Int, v: UpdateId): Unit = + s.setBytes(i, v.getCryptographicEvidence.toByteArray) + } + + private implicit val columnToUpdateId: Column[UpdateId] = + binaryColumnToX(byteArray => + UpdateId.fromProtoPrimitive(ByteString.copyFrom(byteArray)).left.map(_.message) + ) + + def updateId(columnName: String): RowParser[UpdateId] = + SqlParser.get[UpdateId](columnName)(columnToUpdateId) + // AuthorizationEvent private lazy val authorizationLevelToIntMapping: Map[AuthorizationLevel, Int] = Map( @@ -224,4 +250,37 @@ private[backend] object Conversions { authorizationEvent(eventType, level) } + object IntArrayDBSerialization { + // Ints to Byte Array (with version byte prefix) + def encodeToByteArray(ints: Set[Int]): Array[Byte] = + if (ints.nonEmpty) { + val buffer = ByteBuffer.allocate(1 + ints.size * 4) + buffer.put(1.toByte) // version byte + ints.foreach(buffer.putInt(_).discard) + buffer.array() + } else Array.emptyByteArray + + // Ints from Byte Array (with prefix version byte) + def decodeFromByteArray(bytes: Array[Byte]): Seq[Int] = + if (bytes.sizeIs > 1) { + val buf = ByteBuffer.wrap(bytes) + // first byte = version + val version = buf.get().toInt + if (version != 1) { + throw new IllegalArgumentException( + s"Decoding the bytes to integers failed. Unknown version: $version. The first byte is used as the version byte and should be set to 1." + ) + } + + // remaining are 4-byte ints + val ints = Iterator + .continually(if (buf.remaining() >= 4) Some(buf.getInt()) else None) + .takeWhile(_.isDefined) + .flatten + .toSeq + + ints + } else Seq.empty + + } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDto.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDto.scala index ccdfb21de7..af933c75fd 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDto.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDto.scala @@ -4,6 +4,8 @@ package com.digitalasset.canton.platform.store.backend import com.daml.scalautil.NeverEqualsOverride +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.daml.lf.value.Value.ContractId sealed trait DbDto extends NeverEqualsOverride @@ -12,18 +14,162 @@ sealed trait DbDto object DbDto { + final case class EventActivate( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_type: Int, + event_sequential_id: Long, + node_id: Int, + additional_witnesses: Option[Set[String]], + source_synchronizer_id: Option[SynchronizerId], + reassignment_counter: Option[Long], + reassignment_id: Option[Array[Byte]], + representative_package_id: String, + + // contract related columns + notPersistedContractId: ContractId, // just needed for processing + internal_contract_id: Long, + create_key_hash: Option[String], + ) extends DbDto + final case class IdFilterActivateStakeholder(idFilter: IdFilter) extends IdFilterDbDto + final case class IdFilterActivateWitness(idFilter: IdFilter) extends IdFilterDbDto + + final case class EventDeactivate( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_type: Int, + event_sequential_id: Long, + node_id: Int, + deactivated_event_sequential_id: Option[Long], + additional_witnesses: Option[Set[String]], + exercise_choice: Option[String], + exercise_choice_interface_id: Option[String], + exercise_argument: Option[Array[Byte]], + exercise_result: Option[Array[Byte]], + exercise_actors: Option[Set[String]], + exercise_last_descendant_node_id: Option[Int], + exercise_argument_compression: Option[Int], + exercise_result_compression: Option[Int], + reassignment_id: Option[Array[Byte]], + assignment_exclusivity: Option[Long], + target_synchronizer_id: Option[SynchronizerId], + reassignment_counter: Option[Long], + + // contract related columns + contract_id: ContractId, + internal_contract_id: Option[Long], + template_id: String, + package_id: String, + stakeholders: Set[String], + ledger_effective_time: Option[Long], + ) extends DbDto + final case class IdFilterDeactivateStakeholder(idFilter: IdFilter) extends IdFilterDbDto + final case class IdFilterDeactivateWitness(idFilter: IdFilter) extends IdFilterDbDto + + final case class EventVariousWitnessed( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_type: Int, + event_sequential_id: Long, + node_id: Int, + additional_witnesses: Set[String], + consuming: Option[Boolean], + exercise_choice: Option[String], + exercise_choice_interface_id: Option[String], + exercise_argument: Option[Array[Byte]], + exercise_result: Option[Array[Byte]], + exercise_actors: Option[Set[String]], + exercise_last_descendant_node_id: Option[Int], + exercise_argument_compression: Option[Int], + exercise_result_compression: Option[Int], + representative_package_id: Option[String], + + // contract related columns + contract_id: Option[ContractId], + internal_contract_id: Option[Long], + template_id: Option[String], + package_id: Option[String], + ledger_effective_time: Option[Long], + ) extends DbDto + + final case class IdFilterVariousWitness(idFilter: IdFilter) extends IdFilterDbDto + + sealed trait IdFilterDbDto extends DbDto { + def idFilter: IdFilter + def withEventSequentialId(id: Long): IdFilterDbDto = { + def idFilterWithEventSequentialId(idFilter: IdFilter): IdFilter = + idFilter.copy(event_sequential_id = id) + this match { + case IdFilterActivateStakeholder(idFilter) => + IdFilterActivateStakeholder(idFilterWithEventSequentialId(idFilter)) + case IdFilterActivateWitness(idFilter) => + IdFilterActivateWitness(idFilterWithEventSequentialId(idFilter)) + case IdFilterDeactivateStakeholder(idFilter) => + IdFilterDeactivateStakeholder(idFilterWithEventSequentialId(idFilter)) + case IdFilterDeactivateWitness(idFilter) => + IdFilterDeactivateWitness(idFilterWithEventSequentialId(idFilter)) + case IdFilterVariousWitness(idFilter) => + IdFilterVariousWitness(idFilterWithEventSequentialId(idFilter)) + } + } + } + final case class IdFilter( + event_sequential_id: Long, + template_id: String, + party_id: String, + first_per_sequential_id: Boolean, + ) { + def activateStakeholder: IdFilterActivateStakeholder = IdFilterActivateStakeholder(this) + def activateWitness: IdFilterActivateWitness = IdFilterActivateWitness(this) + def deactivateStakeholder: IdFilterDeactivateStakeholder = IdFilterDeactivateStakeholder(this) + def deactivateWitness: IdFilterDeactivateWitness = IdFilterDeactivateWitness(this) + def variousWitness: IdFilterVariousWitness = IdFilterVariousWitness(this) + } + + // TODO(#28008) remove final case class EventCreate( event_offset: Long, - update_id: String, + update_id: Array[Byte], ledger_effective_time: Long, command_id: Option[String], workflow_id: Option[String], user_id: Option[String], submitters: Option[Set[String]], node_id: Int, - contract_id: Array[Byte], + contract_id: ContractId, template_id: String, package_id: String, + representative_package_id: String, flat_event_witnesses: Set[String], tree_event_witnesses: Set[String], create_argument: Array[Byte], @@ -36,30 +182,32 @@ object DbDto { create_key_value_compression: Option[Int], event_sequential_id: Long, authentication_data: Array[Byte], - synchronizer_id: String, + synchronizer_id: SynchronizerId, trace_context: Array[Byte], record_time: Long, external_transaction_hash: Option[Array[Byte]], + internal_contract_id: Long, ) extends DbDto + // TODO(#28008) remove final case class EventExercise( consuming: Boolean, event_offset: Long, - update_id: String, + update_id: Array[Byte], ledger_effective_time: Long, command_id: Option[String], workflow_id: Option[String], user_id: Option[String], submitters: Option[Set[String]], node_id: Int, - contract_id: Array[Byte], + contract_id: ContractId, template_id: String, package_id: String, - flat_event_witnesses: Set[ - String - ], // only for consuming, for non-consuming exercise this field is omitted + // only for consuming, for non-consuming exercise this field is omitted + flat_event_witnesses: Set[String], tree_event_witnesses: Set[String], exercise_choice: String, + exercise_choice_interface_id: Option[String], exercise_argument: Array[Byte], exercise_result: Option[Array[Byte]], exercise_actors: Set[String], @@ -67,20 +215,22 @@ object DbDto { exercise_argument_compression: Option[Int], exercise_result_compression: Option[Int], event_sequential_id: Long, - synchronizer_id: String, + synchronizer_id: SynchronizerId, trace_context: Array[Byte], record_time: Long, external_transaction_hash: Option[Array[Byte]], + deactivated_event_sequential_id: Option[Long], ) extends DbDto + // TODO(#28008) remove final case class EventAssign( event_offset: Long, - update_id: String, + update_id: Array[Byte], command_id: Option[String], workflow_id: Option[String], submitter: Option[String], node_id: Int, - contract_id: Array[Byte], + contract_id: ContractId, template_id: String, package_id: String, flat_event_witnesses: Set[String], @@ -95,44 +245,47 @@ object DbDto { event_sequential_id: Long, ledger_effective_time: Long, authentication_data: Array[Byte], - source_synchronizer_id: String, - target_synchronizer_id: String, - reassignment_id: String, + source_synchronizer_id: SynchronizerId, + target_synchronizer_id: SynchronizerId, + reassignment_id: Array[Byte], reassignment_counter: Long, trace_context: Array[Byte], record_time: Long, + internal_contract_id: Long, ) extends DbDto + // TODO(#28008) remove final case class EventUnassign( event_offset: Long, - update_id: String, + update_id: Array[Byte], command_id: Option[String], workflow_id: Option[String], submitter: Option[String], node_id: Int, - contract_id: Array[Byte], + contract_id: ContractId, template_id: String, package_id: String, flat_event_witnesses: Set[String], event_sequential_id: Long, - source_synchronizer_id: String, - target_synchronizer_id: String, - reassignment_id: String, + source_synchronizer_id: SynchronizerId, + target_synchronizer_id: SynchronizerId, + reassignment_id: Array[Byte], reassignment_counter: Long, assignment_exclusivity: Option[Long], trace_context: Array[Byte], record_time: Long, + deactivated_event_sequential_id: Option[Long], ) extends DbDto final case class EventPartyToParticipant( event_sequential_id: Long, event_offset: Long, - update_id: String, + update_id: Array[Byte], party_id: String, participant_id: String, participant_permission: Int, participant_authorization_event: Int, - synchronizer_id: String, + synchronizer_id: SynchronizerId, record_time: Long, trace_context: Array[Byte], ) extends DbDto @@ -154,7 +307,7 @@ object DbDto { user_id: String, submitters: Set[String], command_id: String, - update_id: Option[String], + update_id: Option[Array[Byte]], rejection_status_code: Option[Int], rejection_status_message: Option[String], rejection_status_details: Option[Array[Byte]], @@ -162,7 +315,7 @@ object DbDto { deduplication_offset: Option[Long], deduplication_duration_seconds: Option[Long], deduplication_duration_nanos: Option[Int], - synchronizer_id: String, + synchronizer_id: SynchronizerId, message_uuid: Option[String], is_transaction: Boolean, trace_context: Array[Byte], @@ -178,57 +331,505 @@ object DbDto { StringInterningDto(entry._1, entry._2) } + // TODO(#28008) remove final case class IdFilterCreateStakeholder( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterCreateNonStakeholderInformee( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterConsumingStakeholder( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterConsumingNonStakeholderInformee( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterNonConsumingInformee( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterUnassignStakeholder( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto + // TODO(#28008) remove final case class IdFilterAssignStakeholder( event_sequential_id: Long, template_id: String, party_id: String, + first_per_sequential_id: Boolean, ) extends DbDto final case class TransactionMeta( - update_id: String, + update_id: Array[Byte], event_offset: Long, publication_time: Long, record_time: Long, - synchronizer_id: String, + synchronizer_id: SynchronizerId, event_sequential_id_first: Long, event_sequential_id_last: Long, ) extends DbDto - final case class SequencerIndexMoved(synchronizerId: String) extends DbDto + final case class SequencerIndexMoved(synchronizerId: SynchronizerId) extends DbDto + + def createDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_sequential_id: Long, + node_id: Int, + additional_witnesses: Set[String], + representative_package_id: String, + + // contract related columns + notPersistedContractId: ContractId, + internal_contract_id: Long, + create_key_hash: Option[String], + )(stakeholders: Set[String], template_id: String): Iterator[DbDto] = + Iterator( + EventActivate( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + // event related columns + event_type = PersistentEventType.Create.asInt, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = Some(additional_witnesses), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = representative_package_id, + // contract related columns + notPersistedContractId = notPersistedContractId, + internal_contract_id = internal_contract_id, + create_key_hash = create_key_hash, + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = stakeholders.iterator, + )(_.activateStakeholder) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = additional_witnesses.iterator, + )(_.activateWitness) + + def assignDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitter: Option[String], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + + // event related columns + event_sequential_id: Long, + node_id: Int, + source_synchronizer_id: SynchronizerId, + reassignment_counter: Long, + reassignment_id: Array[Byte], + representative_package_id: String, + + // contract related columns + notPersistedContractId: ContractId, + internal_contract_id: Long, + )(stakeholders: Set[String], template_id: String): Iterator[DbDto] = + Iterator( + EventActivate( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitter.map(Set(_)), + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = None, + event_type = PersistentEventType.Assign.asInt, + // event related columns + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = None, + source_synchronizer_id = Some(source_synchronizer_id), + reassignment_counter = Some(reassignment_counter), + reassignment_id = Some(reassignment_id), + representative_package_id = representative_package_id, + // contract related columns + notPersistedContractId = notPersistedContractId, + internal_contract_id = internal_contract_id, + create_key_hash = None, + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = stakeholders.iterator, + )(_.activateStakeholder) + + def consumingExerciseDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_sequential_id: Long, + node_id: Int, + deactivated_event_sequential_id: Option[Long], + additional_witnesses: Set[String], + exercise_choice: String, + exercise_choice_interface_id: Option[String], + exercise_argument: Array[Byte], + exercise_result: Option[Array[Byte]], + exercise_actors: Set[String], + exercise_last_descendant_node_id: Int, + exercise_argument_compression: Option[Int], + exercise_result_compression: Option[Int], + + // contract related columns + contract_id: ContractId, + internal_contract_id: Option[Long], + template_id: String, + package_id: String, + stakeholders: Set[String], + ledger_effective_time: Long, + ): Iterator[DbDto] = + Iterator( + EventDeactivate( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + // event related columns + event_type = PersistentEventType.ConsumingExercise.asInt, + event_sequential_id = event_sequential_id, + node_id = node_id, + deactivated_event_sequential_id = deactivated_event_sequential_id, + additional_witnesses = Some(additional_witnesses), + exercise_choice = Some(exercise_choice), + exercise_choice_interface_id = exercise_choice_interface_id, + exercise_argument = Some(exercise_argument), + exercise_result = exercise_result, + exercise_actors = Some(exercise_actors), + exercise_last_descendant_node_id = Some(exercise_last_descendant_node_id), + exercise_argument_compression = exercise_argument_compression, + exercise_result_compression = exercise_result_compression, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + // contract related columns + contract_id = contract_id, + internal_contract_id = internal_contract_id, + template_id = template_id, + package_id = package_id, + stakeholders = stakeholders, + ledger_effective_time = Some(ledger_effective_time), + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = stakeholders.iterator, + )(_.deactivateStakeholder) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = additional_witnesses.iterator, + )(_.deactivateWitness) + + def unassignDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitter: Option[String], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + + // event related columns + event_sequential_id: Long, + node_id: Int, + deactivated_event_sequential_id: Option[Long], + reassignment_id: Array[Byte], + assignment_exclusivity: Option[Long], + target_synchronizer_id: SynchronizerId, + reassignment_counter: Long, + + // contract related columns + contract_id: ContractId, + internal_contract_id: Option[Long], + template_id: String, + package_id: String, + stakeholders: Set[String], + ): Iterator[DbDto] = + Iterator( + EventDeactivate( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitter.map(Set(_)), + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = None, + // event related columns + event_type = PersistentEventType.Unassign.asInt, + event_sequential_id = event_sequential_id, + node_id = node_id, + deactivated_event_sequential_id = deactivated_event_sequential_id, + additional_witnesses = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + reassignment_id = Some(reassignment_id), + assignment_exclusivity = assignment_exclusivity, + target_synchronizer_id = Some(target_synchronizer_id), + reassignment_counter = Some(reassignment_counter), + // contract related columns + contract_id = contract_id, + internal_contract_id = internal_contract_id, + template_id = template_id, + package_id = package_id, + stakeholders = stakeholders, + ledger_effective_time = None, + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = stakeholders.iterator, + )(_.deactivateStakeholder) + + def witnessedCreateDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_sequential_id: Long, + node_id: Int, + additional_witnesses: Set[String], + representative_package_id: String, + + // contract related columns + internal_contract_id: Long, + )(template_id: String): Iterator[DbDto] = + Iterator( + EventVariousWitnessed( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + + // event related columns + event_type = PersistentEventType.WitnessedCreate.asInt, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = additional_witnesses, + consuming = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = Some(representative_package_id), + + // contract related columns + contract_id = None, + internal_contract_id = Some(internal_contract_id), + template_id = None, + package_id = None, + ledger_effective_time = None, + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = additional_witnesses.iterator, + )(_.variousWitness) + + def witnessedExercisedDbDtos( + // update related columns + event_offset: Long, + update_id: Array[Byte], + workflow_id: Option[String], + command_id: Option[String], + submitters: Option[Set[String]], + record_time: Long, + synchronizer_id: SynchronizerId, + trace_context: Array[Byte], + external_transaction_hash: Option[Array[Byte]], + + // event related columns + event_sequential_id: Long, + node_id: Int, + additional_witnesses: Set[String], + consuming: Boolean, + exercise_choice: String, + exercise_choice_interface_id: Option[String], + exercise_argument: Array[Byte], + exercise_result: Option[Array[Byte]], + exercise_actors: Set[String], + exercise_last_descendant_node_id: Int, + exercise_argument_compression: Option[Int], + exercise_result_compression: Option[Int], + + // contract related columns + contract_id: ContractId, + internal_contract_id: Option[Long], + template_id: String, + package_id: String, + ledger_effective_time: Long, + ): Iterator[DbDto] = + Iterator( + EventVariousWitnessed( + // update related columns + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + + // event related columns + event_type = + if (consuming) PersistentEventType.WitnessedConsumingExercise.asInt + else PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = additional_witnesses, + consuming = Some(consuming), + exercise_choice = Some(exercise_choice), + exercise_choice_interface_id = exercise_choice_interface_id, + exercise_argument = Some(exercise_argument), + exercise_result = exercise_result, + exercise_actors = Some(exercise_actors), + exercise_last_descendant_node_id = Some(exercise_last_descendant_node_id), + exercise_argument_compression = exercise_argument_compression, + exercise_result_compression = exercise_result_compression, + representative_package_id = None, + + // contract related columns + contract_id = Some(contract_id), + internal_contract_id = internal_contract_id, + template_id = Some(template_id), + package_id = Some(package_id), + ledger_effective_time = Some(ledger_effective_time), + ) + ) ++ idFilters( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_ids = additional_witnesses.iterator, + )(_.variousWitness) + + def idFilters( + party_ids: Iterator[String], + template_id: String, + event_sequential_id: Long, + )(toIdFilterDbDto: IdFilter => IdFilterDbDto): Iterator[IdFilterDbDto] = + party_ids + .take(1) + .map(party_id => + IdFilter( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_id = party_id, + first_per_sequential_id = true, + ) + ) + .++( + party_ids.map(party_id => + IdFilter( + event_sequential_id = event_sequential_id, + template_id = template_id, + party_id = party_id, + first_per_sequential_id = false, + ) + ) + ) + .map(toIdFilterDbDto) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterning.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterning.scala index 7bb85d1654..2edc436342 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterning.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterning.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.platform.store.interning.DomainStringIterators +import com.digitalasset.canton.topology.SynchronizerId object DbDtoToStringsForInterning { @@ -13,28 +14,30 @@ object DbDtoToStringsForInterning { parties = dbDtos.iterator.flatMap(partiesOf), synchronizerIds = dbDtos.iterator.flatMap(synchronizerIdsOf), packageIds = dbDtos.iterator.flatMap(packageIdsOf), + userIds = dbDtos.iterator.flatMap(userIdsOf), + participantIds = dbDtos.iterator.flatMap(participantIdsOf), + choiceNames = dbDtos.iterator.flatMap(choiceNamesOf), + interfaceIds = dbDtos.iterator.flatMap(interfaceIdsOf), ) private def templateIdsOf(dbDto: DbDto): Iterator[String] = dbDto match { - case dbDto: DbDto.EventExercise => - Iterator(dbDto.template_id) - - case dbDto: DbDto.EventCreate => - Iterator(dbDto.template_id) - - case dbDto: DbDto.EventUnassign => - Iterator(dbDto.template_id) - - case dbDto: DbDto.EventAssign => - Iterator(dbDto.template_id) - + case dbDto: DbDto.EventDeactivate => Iterator(dbDto.template_id) + case dbDto: DbDto.EventVariousWitnessed => dbDto.template_id.iterator + case dbDto: DbDto.EventExercise => Iterator(dbDto.template_id) + case dbDto: DbDto.EventCreate => Iterator(dbDto.template_id) + case dbDto: DbDto.EventUnassign => Iterator(dbDto.template_id) + case dbDto: DbDto.EventAssign => Iterator(dbDto.template_id) case _ => Iterator.empty } private def packageIdsOf(dbDto: DbDto): Iterator[String] = dbDto match { - case dbDto: DbDto.EventCreate => Iterator(dbDto.package_id) + case dbDto: DbDto.EventActivate => Iterator(dbDto.representative_package_id) + case dbDto: DbDto.EventDeactivate => Iterator(dbDto.package_id) + case dbDto: DbDto.EventVariousWitnessed => + dbDto.package_id.iterator ++ dbDto.representative_package_id.iterator + case dbDto: DbDto.EventCreate => Iterator(dbDto.package_id, dbDto.representative_package_id) case dbDto: DbDto.EventAssign => Iterator(dbDto.package_id) case dbDto: DbDto.EventExercise => Iterator(dbDto.package_id) case dbDto: DbDto.EventUnassign => Iterator(dbDto.package_id) @@ -43,6 +46,21 @@ object DbDtoToStringsForInterning { private def partiesOf(dbDto: DbDto): Iterator[String] = dbDto match { + case dbDto: DbDto.EventActivate => + dbDto.submitters.getOrElse(Set.empty).iterator ++ + dbDto.additional_witnesses.getOrElse(Set.empty).iterator + + case dbDto: DbDto.EventDeactivate => + dbDto.submitters.getOrElse(Set.empty).iterator ++ + dbDto.additional_witnesses.getOrElse(Set.empty).iterator ++ + dbDto.exercise_actors.getOrElse(Set.empty).iterator ++ + dbDto.stakeholders.iterator + + case dbDto: DbDto.EventVariousWitnessed => + dbDto.submitters.getOrElse(Set.empty).iterator ++ + dbDto.additional_witnesses.iterator ++ + dbDto.exercise_actors.getOrElse(Set.empty).iterator + case dbDto: DbDto.EventExercise => dbDto.submitters.getOrElse(Set.empty).iterator ++ dbDto.tree_event_witnesses.iterator ++ @@ -84,8 +102,13 @@ object DbDtoToStringsForInterning { case _ => Iterator.empty } - private def synchronizerIdsOf(dbDto: DbDto): Iterator[String] = + private def synchronizerIdsOf(dbDto: DbDto): Iterator[SynchronizerId] = dbDto match { + case dbDto: DbDto.EventActivate => + Iterator(dbDto.synchronizer_id) ++ dbDto.source_synchronizer_id.iterator + case dbDto: DbDto.EventDeactivate => + Iterator(dbDto.synchronizer_id) ++ dbDto.target_synchronizer_id.iterator + case dbDto: DbDto.EventVariousWitnessed => Iterator(dbDto.synchronizer_id) case dbDto: DbDto.EventExercise => Iterator(dbDto.synchronizer_id) case dbDto: DbDto.EventCreate => Iterator(dbDto.synchronizer_id) case dbDto: DbDto.EventUnassign => @@ -98,4 +121,32 @@ object DbDtoToStringsForInterning { case dbDto: DbDto.TransactionMeta => Iterator(dbDto.synchronizer_id) case _ => Iterator.empty } + + private def userIdsOf(dbDto: DbDto): Iterator[String] = + dbDto match { + case dbDto: DbDto.CommandCompletion => Iterator(dbDto.user_id) + case _ => Iterator.empty + } + + private def participantIdsOf(dbDto: DbDto): Iterator[String] = + dbDto match { + case dbDto: DbDto.EventPartyToParticipant => Iterator(dbDto.participant_id) + case _ => Iterator.empty + } + + private def choiceNamesOf(dbDto: DbDto): Iterator[String] = + dbDto match { + case dbDto: DbDto.EventDeactivate => dbDto.exercise_choice.iterator + case dbDto: DbDto.EventVariousWitnessed => dbDto.exercise_choice.iterator + case dbDto: DbDto.EventExercise => Iterator(dbDto.exercise_choice) + case _ => Iterator.empty + } + + private def interfaceIdsOf(dbDto: DbDto): Iterator[String] = + dbDto match { + case dbDto: DbDto.EventDeactivate => dbDto.exercise_choice_interface_id.iterator + case dbDto: DbDto.EventVariousWitnessed => dbDto.exercise_choice_interface_id.iterator + case dbDto: DbDto.EventExercise => dbDto.exercise_choice_interface_id.iterator + case _ => Iterator.empty + } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/PersistentEventType.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/PersistentEventType.scala new file mode 100644 index 0000000000..da7e81ddb7 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/PersistentEventType.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +sealed trait PersistentEventType extends Product with Serializable { + def asInt: Int +} + +// WARNING! The PersistentEventType mappings are stored in DB, changing them only allowed in a backwards compatible way to ensure data continuity! +// Changing these should be reflected in the debug-views (see V2_1__lapi_3.0_views.sql) +object PersistentEventType { + // activations + sealed abstract class ActivationPersistentEventType(override val asInt: Int) + extends PersistentEventType + case object Create extends ActivationPersistentEventType(1) + case object Assign extends ActivationPersistentEventType(2) + // deactivations + sealed abstract class DeactivationPersistentEventType(override val asInt: Int) + extends PersistentEventType + case object ConsumingExercise extends DeactivationPersistentEventType(3) + case object Unassign extends DeactivationPersistentEventType(4) + // various witnessed + sealed abstract class VariousWitnessedPersistentEventType(override val asInt: Int) + extends PersistentEventType + case object NonConsumingExercise extends VariousWitnessedPersistentEventType(5) + case object WitnessedCreate extends VariousWitnessedPersistentEventType(6) + case object WitnessedConsumingExercise extends VariousWitnessedPersistentEventType(7) + // topology transactions + sealed abstract class TopologyTransactionPersistentEventType(override val asInt: Int) + extends PersistentEventType + case object PartyToParticipant extends TopologyTransactionPersistentEventType(8) + + val allEventTypes: Seq[PersistentEventType] = List( + Create, + Assign, + ConsumingExercise, + Unassign, + NonConsumingExercise, + WitnessedCreate, + WitnessedConsumingExercise, + PartyToParticipant, + ) + + private val formIntMap: Map[Int, PersistentEventType] = + allEventTypes + .map(persistentEventType => persistentEventType.asInt -> persistentEventType) + .toMap + + def fromInt(i: Int): PersistentEventType = + formIntMap.getOrElse( + i, + throw new IllegalStateException( + s"Invalid Int $i - no such PersistentEventType can be found." + ), + ) + + assert(allEventTypes.sizeIs == 8) + assert(allEventTypes.toSet.sizeIs == 8) + assert(formIntMap.sizeIs == 8) +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/RowDef.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/RowDef.scala new file mode 100644 index 0000000000..de206a6eb8 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/RowDef.scala @@ -0,0 +1,648 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import anorm.{Row, RowParser, SimpleSql, SqlRequestError, ~} +import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* + +import java.sql.Connection + +final case class RowDef[+T]( + columns: Vector[String], + rowParser: RowParser[T], +) { + def queryMultipleRows(sql: CompositeSql => SimpleSql[Row])(implicit + connection: Connection + ): Vector[T] = + sql(cSQL"#${columns.mkString(", ")}").asVectorOf(rowParser)(connection) + + def map[U](f: T => U): RowDef[U] = RowDef(columns, rowParser.map(f)) + + def branch[U, X >: T](branches: (X, RowDef[U])*): RowDef[U] = { + val branchMap = branches.toMap + RowDef( + columns = branches.flatMap(_._2.columns).++(columns).distinct.toVector, + rowParser = rowParser.flatMap { branchValue => + branchMap.get(branchValue) match { + case Some(rowDef) => rowDef.rowParser + case None => + _ => + anorm.Error( + SqlRequestError( + new IllegalStateException( + s"Cannot find suitable branch for result parsing for extracted branch value $branchValue" + ) + ) + ) + } + }, + ) + } +} + +object RowDef { + def static[T](t: T): RowDef[T] = + RowDef(Vector.empty, _ => anorm.Success(t)) + + def column[T]( + columnName: String, + rowParser: String => RowParser[T], + ): RowDef[T] = + RowDef(Vector(columnName), rowParser(columnName)) + + def combine[A, B, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + )(f: (A, B) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser).map { case r1 ~ r2 => f(r1, r2) }, + ) + + def combine[A, B, C, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + )(f: (A, B, C) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser) + map { case r1 ~ r2 ~ r3 => f(r1, r2, r3) }, + ) + + def combine[A, B, C, D, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + )(f: (A, B, C, D) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser ~ p4.rowParser) + map { case r1 ~ r2 ~ r3 ~ r4 => f(r1, r2, r3, r4) }, + ) + + def combine[A, B, C, D, E, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + )(f: (A, B, C, D, E) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser ~ p4.rowParser ~ p5.rowParser) + map { case r1 ~ r2 ~ r3 ~ r4 ~ r5 => f(r1, r2, r3, r4, r5) }, + ) + + def combine[A, B, C, D, E, F, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + )(f: (A, B, C, D, E, F) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser ~ p4.rowParser ~ p5.rowParser ~ p6.rowParser) + map { case r1 ~ r2 ~ r3 ~ r4 ~ r5 ~ r6 => f(r1, r2, r3, r4, r5, r6) }, + ) + + def combine[A, B, C, D, E, F, G, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + )(f: (A, B, C, D, E, F, G) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser ~ p4.rowParser ~ p5.rowParser ~ p6.rowParser ~ p7.rowParser) + map { case r1 ~ r2 ~ r3 ~ r4 ~ r5 ~ r6 ~ r7 => f(r1, r2, r3, r4, r5, r6, r7) }, + ) + + def combine[A, B, C, D, E, F, G, H, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + )(f: (A, B, C, D, E, F, G, H) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8).flatMap(_.columns).distinct, + (p1.rowParser ~ p2.rowParser ~ p3.rowParser ~ p4.rowParser ~ p5.rowParser ~ p6.rowParser ~ p7.rowParser ~ p8.rowParser) + map { case r1 ~ r2 ~ r3 ~ r4 ~ r5 ~ r6 ~ r7 ~ r8 => f(r1, r2, r3, r4, r5, r6, r7, r8) }, + ) + + def combine[A, B, C, D, E, F, G, H, I, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + )(f: (A, B, C, D, E, F, G, H, I) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + )(f: (A, B, C, D, E, F, G, H, I, J) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + )(f: (A, B, C, D, E, F, G, H, I, J, K) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, L, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + p12: RowDef[L], + )(f: (A, B, C, D, E, F, G, H, I, J, K, L) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser ~ + p12.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 ~ + r12 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, L, M, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + p12: RowDef[L], + p13: RowDef[M], + )(f: (A, B, C, D, E, F, G, H, I, J, K, L, M) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser ~ + p12.rowParser ~ + p13.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 ~ + r12 ~ + r13 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + r13, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, L, M, N, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + p12: RowDef[L], + p13: RowDef[M], + p14: RowDef[N], + )(f: (A, B, C, D, E, F, G, H, I, J, K, L, M, N) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser ~ + p12.rowParser ~ + p13.rowParser ~ + p14.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 ~ + r12 ~ + r13 ~ + r14 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + r13, + r14, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + p12: RowDef[L], + p13: RowDef[M], + p14: RowDef[N], + p15: RowDef[O], + )(f: (A, B, C, D, E, F, G, H, I, J, K, L, M, N, O) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser ~ + p12.rowParser ~ + p13.rowParser ~ + p14.rowParser ~ + p15.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 ~ + r12 ~ + r13 ~ + r14 ~ + r15 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + r13, + r14, + r15, + ) + }, + ) + + def combine[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, RESULT]( + p1: RowDef[A], + p2: RowDef[B], + p3: RowDef[C], + p4: RowDef[D], + p5: RowDef[E], + p6: RowDef[F], + p7: RowDef[G], + p8: RowDef[H], + p9: RowDef[I], + p10: RowDef[J], + p11: RowDef[K], + p12: RowDef[L], + p13: RowDef[M], + p14: RowDef[N], + p15: RowDef[O], + p16: RowDef[P], + )(f: (A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) => RESULT): RowDef[RESULT] = + RowDef( + Vector(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16) + .flatMap(_.columns) + .distinct, + ( + p1.rowParser ~ + p2.rowParser ~ + p3.rowParser ~ + p4.rowParser ~ + p5.rowParser ~ + p6.rowParser ~ + p7.rowParser ~ + p8.rowParser ~ + p9.rowParser ~ + p10.rowParser ~ + p11.rowParser ~ + p12.rowParser ~ + p13.rowParser ~ + p14.rowParser ~ + p15.rowParser ~ + p16.rowParser + ) map { + case r1 ~ + r2 ~ + r3 ~ + r4 ~ + r5 ~ + r6 ~ + r7 ~ + r8 ~ + r9 ~ + r10 ~ + r11 ~ + r12 ~ + r13 ~ + r14 ~ + r15 ~ + r16 => + f( + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + r13, + r14, + r15, + r16, + ) + }, + ) +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackend.scala index 4a433067f4..5e20c7c094 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackend.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.platform.store.backend import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse +import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.ledger.api.ParticipantId import com.digitalasset.canton.ledger.participant.state.SynchronizerIndex @@ -12,27 +13,19 @@ import com.digitalasset.canton.ledger.participant.state.index.IndexerPartyDetail import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.indexer.parallel.PostPublishData -import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ - Entry, - RawActiveContract, - RawAssignEvent, - RawFlatEvent, - RawParticipantAuthorization, - RawTreeEvent, - RawUnassignEvent, - SequentialIdBatch, - SynchronizerOffset, - UnassignProperties, -} +import com.digitalasset.canton.platform.store.backend.EventStorageBackend.* import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.PruneUptoInclusiveAndLedgerEnd import com.digitalasset.canton.platform.store.backend.common.{ EventPayloadSourceForUpdatesAcsDelta, + EventPayloadSourceForUpdatesAcsDeltaLegacy, EventPayloadSourceForUpdatesLedgerEffects, + EventPayloadSourceForUpdatesLedgerEffectsLegacy, EventReaderQueries, UpdatePointwiseQueries, UpdateStreamingQueries, } import com.digitalasset.canton.platform.store.backend.postgresql.PostgresDataSourceConfig +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.KeyState import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.topology.SynchronizerId @@ -217,6 +210,24 @@ trait CompletionStorageBackend { trait ContractStorageBackend { + /** Batch lookup of key states + * + * If the backend does not support batch lookups, the implementation will fall back to sequential + * lookups + */ + def keyStatesNew(keys: Seq[Key], validAtEventSeqId: Long)(connection: Connection): Map[Key, Long] + + /** Sequential lookup of key states */ + def keyStateNew(key: Key, validAtEventSeqId: Long)(connection: Connection): Option[Long] + + def activeContractsNew(internalContractIds: Seq[Long], beforeEventSeqId: Long)( + connection: Connection + ): Map[Long, Boolean] + + def lastActivationsNew(synchronizerContracts: Iterable[(SynchronizerId, Long)])( + connection: Connection + ): Map[(SynchronizerId, Long), Long] + /** Returns true if the batch lookup is implemented */ def supportsBatchKeyStateLookups: Boolean @@ -225,20 +236,24 @@ trait ContractStorageBackend { * If the backend does not support batch lookups, the implementation will fall back to sequential * lookups */ - def keyStates(keys: Seq[Key], validAt: Offset)(connection: Connection): Map[Key, KeyState] + def keyStates(keys: Seq[Key], validAtEventSeqId: Long)(connection: Connection): Map[Key, KeyState] /** Sequential lookup of key states */ - def keyState(key: Key, validAt: Offset)(connection: Connection): KeyState + def keyState(key: Key, validAtEventSeqId: Long)(connection: Connection): KeyState - def archivedContracts(contractIds: Seq[ContractId], before: Offset)( + def archivedContracts(contractIds: Seq[ContractId], beforeEventSeqId: Long)( connection: Connection - ): Map[ContractId, ContractStorageBackend.RawArchivedContract] - def createdContracts(contractIds: Seq[ContractId], before: Offset)( + ): Set[ContractId] + def createdContracts(contractIds: Seq[ContractId], beforeEventSeqId: Long)( connection: Connection - ): Map[ContractId, ContractStorageBackend.RawCreatedContract] - def assignedContracts(contractIds: Seq[ContractId], before: Offset)( + ): Set[ContractId] + def assignedContracts(contractIds: Seq[ContractId], beforeEventSeqId: Long)( connection: Connection - ): Map[ContractId, ContractStorageBackend.RawCreatedContract] + ): Set[ContractId] + + def lastActivations(synchronizerContracts: Iterable[(SynchronizerId, ContractId)])( + connection: Connection + ): Map[(SynchronizerId, ContractId), Long] } object ContractStorageBackend { @@ -272,7 +287,7 @@ trait EventStorageBackend { /** Part of pruning process, this needs to be in the same transaction as the other pruning related * database operations */ - def pruneEvents( + def pruneEventsLegacy( pruneUpToInclusive: Offset, incompleteReassignmentOffsets: Vector[Offset], )(implicit @@ -280,57 +295,77 @@ trait EventStorageBackend { traceContext: TraceContext, ): Unit - def activeContractCreateEventBatch( + /** Part of pruning process, this needs to be in the same transaction as the other pruning related + * database operations + */ + def pruneEvents( + pruneUpToInclusive: Offset, + incompleteReassignmentOffsets: Vector[Offset], + )(implicit + connection: Connection, + traceContext: TraceContext, + ): Unit = + ??? // TODO(#28005): Implement + + def activeContractBatch( + eventSequentialIds: Iterable[Long], + allFilterParties: Option[Set[Party]], + endInclusive: Long, + )(connection: Connection): Vector[RawThinActiveContract] + + def activeContractCreateEventBatchLegacy( eventSequentialIds: Iterable[Long], allFilterParties: Option[Set[Party]], endInclusive: Long, - )(connection: Connection): Vector[RawActiveContract] + )(connection: Connection): Vector[RawActiveContractLegacy] - def activeContractAssignEventBatch( + def activeContractAssignEventBatchLegacy( eventSequentialIds: Iterable[Long], allFilterParties: Option[Set[Party]], endInclusive: Long, - )(connection: Connection): Vector[RawActiveContract] + )(connection: Connection): Vector[RawActiveContractLegacy] - def fetchAssignEventIdsForStakeholder( + def fetchAssignEventIdsForStakeholderLegacy( stakeholderO: Option[Party], templateId: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] + )(connection: Connection): PaginationInput => Vector[Long] - def fetchUnassignEventIdsForStakeholder( + def fetchUnassignEventIdsForStakeholderLegacy( stakeholderO: Option[Party], templateId: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] + )(connection: Connection): PaginationInput => Vector[Long] - def assignEventBatch( + def assignEventBatchLegacy( eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawAssignEvent]] + )(connection: Connection): Vector[Entry[RawAssignEventLegacy]] - def unassignEventBatch( + def unassignEventBatchLegacy( eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawUnassignEvent]] + )(connection: Connection): Vector[Entry[RawUnassignEventLegacy]] def lookupAssignSequentialIdByOffset( offsets: Iterable[Long] )(connection: Connection): Vector[Long] + def lookupAssignSequentialIdByOffsetLegacy( + offsets: Iterable[Long] + )(connection: Connection): Vector[Long] + def lookupUnassignSequentialIdByOffset( offsets: Iterable[Long] )(connection: Connection): Vector[Long] - def lookupAssignSequentialIdBy( + def lookupUnassignSequentialIdByOffsetLegacy( + offsets: Iterable[Long] + )(connection: Connection): Vector[Long] + + def lookupAssignSequentialIdByLegacy( unassignProperties: Iterable[UnassignProperties] )(connection: Connection): Map[UnassignProperties, Long] - def lookupCreateSequentialIdByContractId( + def lookupCreateSequentialIdByContractIdLegacy( contractIds: Iterable[ContractId] )(connection: Connection): Vector[Long] @@ -364,16 +399,20 @@ trait EventStorageBackend { beforeOrAtRecordTimeInclusive: Timestamp, )(connection: Connection): Option[SynchronizerOffset] - def archivals(fromExclusive: Option[Offset], toInclusive: Offset)( + /** The contracts which were archived or participant-divulged in the specified range. These are + * the contracts in the ContractStore, which can be pruned in a single-synchronizer setup. + */ + def prunableContracts(fromExclusive: Option[Offset], toInclusive: Offset)( + connection: Connection + ): Set[Long] + + def archivalsLegacy(fromExclusive: Option[Offset], toInclusive: Offset)( connection: Connection ): Set[ContractId] - def fetchTopologyPartyEventIds( - party: Option[Party], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] + def fetchTopologyPartyEventIds(party: Option[Party])( + connection: Connection + ): PaginationInput => Vector[Long] def topologyPartyEventBatch( eventSequentialIds: SequentialIdBatch @@ -387,21 +426,223 @@ trait EventStorageBackend { def fetchEventPayloadsAcsDelta(target: EventPayloadSourceForUpdatesAcsDelta)( eventSequentialIds: SequentialIdBatch, requestingParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawFlatEvent]] + )(connection: Connection): Vector[RawThinAcsDeltaEvent] + + def fetchEventPayloadsAcsDeltaLegacy(target: EventPayloadSourceForUpdatesAcsDeltaLegacy)( + eventSequentialIds: SequentialIdBatch, + requestingParties: Option[Set[Party]], + )(connection: Connection): Vector[Entry[RawAcsDeltaEventLegacy]] def fetchEventPayloadsLedgerEffects(target: EventPayloadSourceForUpdatesLedgerEffects)( eventSequentialIds: SequentialIdBatch, requestingParties: Option[Set[Ref.Party]], - )(connection: Connection): Vector[Entry[RawTreeEvent]] + )(connection: Connection): Vector[RawThinLedgerEffectsEvent] + def fetchEventPayloadsLedgerEffectsLegacy( + target: EventPayloadSourceForUpdatesLedgerEffectsLegacy + )( + eventSequentialIds: SequentialIdBatch, + requestingParties: Option[Set[Ref.Party]], + )(connection: Connection): Vector[Entry[RawLedgerEffectsEventLegacy]] } object EventStorageBackend { + sealed trait RawEvent extends Product with Serializable { + def templateId: FullIdentifier + def witnessParties: Set[String] + } + + sealed trait RawAcsDeltaEvent extends RawEvent + sealed trait RawLedgerEffectsEvent extends RawEvent + + sealed trait RawReassignmentEvent extends RawEvent { + def reassignmentProperties: ReassignmentProperties + } + + sealed trait RawTransactionEvent extends RawEvent { + def ledgerEffectiveTime: Timestamp + def transactionProperties: TransactionProperties + } + + sealed trait RawThinEvent extends Product with Serializable + + sealed trait RawThinAcsDeltaEvent extends RawThinEvent + sealed trait RawThinLedgerEffectsEvent extends RawThinEvent + + sealed trait RawThinTransactionEvent extends RawThinEvent + sealed trait RawThinReassignmentEvent extends RawThinEvent + + final case class CommonEventProperties( + eventSequentialId: Long, + offset: Long, + nodeId: Int, + workflowId: Option[String], + synchronizerId: String, + ) + + final case class CommonUpdateProperties( + updateId: String, + commandId: Option[String], + traceContext: Array[Byte], + recordTime: Timestamp, + ) + + final case class TransactionProperties( + commonEventProperties: CommonEventProperties, + commonUpdateProperties: CommonUpdateProperties, + externalTransactionHash: Option[Array[Byte]], + ) + + final case class ReassignmentProperties( + commonEventProperties: CommonEventProperties, + commonUpdateProperties: CommonUpdateProperties, + reassignmentId: String, + submitter: Option[String], + reassignmentCounter: Long, + ) + + final case class ThinCreatedEventProperties( + representativePackageId: String, + filteredAdditionalWitnessParties: Set[String], + internalContractId: Long, + requestingParties: Option[Set[String]], + reassignmentCounter: Long, + acsDelta: Boolean, + ) + + final case class FatCreatedEventProperties( + thinCreatedEventProperties: ThinCreatedEventProperties, + fatContract: FatContract, + ) { + def templateId: FullIdentifier = + fatContract.templateId.toFullIdentifier(fatContract.packageName) + + def witnessParties: Set[String] = + thinCreatedEventProperties.filteredAdditionalWitnessParties.iterator + .++(fatContract.stakeholders.iterator.map(_.toString)) + .filter(party => + thinCreatedEventProperties.requestingParties match { + case Some(requestingParties) => requestingParties.contains(party) + case None => true + } + ) + .toSet + } + + final case class RawThinActiveContract( + commonEventProperties: CommonEventProperties, + thinCreatedEventProperties: ThinCreatedEventProperties, + ) extends RawThinEvent + + final case class RawFatActiveContract( + commonEventProperties: CommonEventProperties, + fatCreatedEventProperties: FatCreatedEventProperties, + ) extends RawEvent { + override def templateId: FullIdentifier = fatCreatedEventProperties.templateId + + override def witnessParties: Set[String] = fatCreatedEventProperties.witnessParties + } + + final case class RawThinCreatedEvent( + transactionProperties: TransactionProperties, + thinCreatedEventProperties: ThinCreatedEventProperties, + ) extends RawThinAcsDeltaEvent + with RawThinLedgerEffectsEvent + with RawThinTransactionEvent + + final case class RawFatCreatedEvent( + transactionProperties: TransactionProperties, + fatCreatedEventProperties: FatCreatedEventProperties, + ) extends RawAcsDeltaEvent + with RawLedgerEffectsEvent + with RawTransactionEvent { + override def templateId: FullIdentifier = fatCreatedEventProperties.templateId + + override def witnessParties: Set[String] = fatCreatedEventProperties.witnessParties + + override def ledgerEffectiveTime: Timestamp = + fatCreatedEventProperties.fatContract.createdAt.time + } + + final case class RawThinAssignEvent( + reassignmentProperties: ReassignmentProperties, + thinCreatedEventProperties: ThinCreatedEventProperties, + sourceSynchronizerId: String, + ) extends RawThinAcsDeltaEvent + with RawThinLedgerEffectsEvent + with RawThinReassignmentEvent + + final case class RawFatAssignEvent( + reassignmentProperties: ReassignmentProperties, + fatCreatedEventProperties: FatCreatedEventProperties, + ) extends RawAcsDeltaEvent + with RawLedgerEffectsEvent + with RawReassignmentEvent { + override def templateId: FullIdentifier = fatCreatedEventProperties.templateId + + override def witnessParties: Set[String] = fatCreatedEventProperties.witnessParties + } + + final case class RawArchivedEvent( + transactionProperties: TransactionProperties, + contractId: ContractId, + templateId: FullIdentifier, + filteredStakeholderParties: Set[String], + ledgerEffectiveTime: Timestamp, + ) extends RawAcsDeltaEvent + with RawTransactionEvent + with RawThinAcsDeltaEvent + with RawThinTransactionEvent { + override def witnessParties: Set[String] = filteredStakeholderParties + } + + final case class RawExercisedEvent( + transactionProperties: TransactionProperties, + contractId: ContractId, + templateId: FullIdentifier, + exerciseConsuming: Boolean, + exerciseChoice: ChoiceName, + exerciseChoiceInterface: Option[Ref.Identifier], + exerciseArgument: Array[Byte], + exerciseArgumentCompression: Option[Int], + exerciseResult: Option[Array[Byte]], + exerciseResultCompression: Option[Int], + exerciseActors: Set[String], + exerciseLastDescendantNodeId: Int, + filteredAdditionalWitnessParties: Set[String], + filteredStakeholderParties: Set[String], + ledgerEffectiveTime: Timestamp, + acsDelta: Boolean, + ) extends RawLedgerEffectsEvent + with RawTransactionEvent + with RawThinLedgerEffectsEvent + with RawThinTransactionEvent { + override def witnessParties: Set[String] = + filteredStakeholderParties ++ filteredAdditionalWitnessParties + } + + final case class RawUnassignEvent( + reassignmentProperties: ReassignmentProperties, + contractId: ContractId, + templateId: FullIdentifier, + filteredStakeholderParties: Set[String], + assignmentExclusivity: Option[Timestamp], + targetSynchronizerId: String, + ) extends RawAcsDeltaEvent + with RawLedgerEffectsEvent + with RawReassignmentEvent + with RawThinAcsDeltaEvent + with RawThinLedgerEffectsEvent + with RawThinReassignmentEvent { + override def witnessParties: Set[String] = filteredStakeholderParties + } + final case class Entry[+E]( offset: Long, + nodeId: Int, updateId: String, eventSequentialId: Long, - ledgerEffectiveTime: Timestamp, + ledgerEffectiveTime: Option[Timestamp], commandId: Option[String], workflowId: Option[String], synchronizerId: String, @@ -409,25 +650,24 @@ object EventStorageBackend { recordTime: Timestamp, externalTransactionHash: Option[Array[Byte]], event: E, - ) + ) { + def map[T](f: E => T): Entry[T] = this.copy(event = f(event)) + def withEvent[T](t: T): Entry[T] = this.copy(event = t) + } - sealed trait RawEvent { + sealed trait RawEventLegacy { def templateId: FullIdentifier def witnessParties: Set[String] } - // TODO(#23504) rename to RawAcsDeltaEvent? - sealed trait RawFlatEvent extends RawEvent - // TODO(#23504) rename to RawLedgerEffectsEvent? - sealed trait RawTreeEvent extends RawEvent + sealed trait RawAcsDeltaEventLegacy extends RawEventLegacy + sealed trait RawLedgerEffectsEventLegacy extends RawEventLegacy - sealed trait RawReassignmentEvent extends RawEvent + sealed trait RawReassignmentEventLegacy extends RawEventLegacy - final case class RawCreatedEvent( - updateId: String, - offset: Long, - nodeId: Int, + final case class RawCreatedEventLegacy( contractId: ContractId, templateId: FullIdentifier, + representativePackageId: LfPackageId, witnessParties: Set[String], flatEventWitnesses: Set[String], signatories: Set[String], @@ -440,26 +680,22 @@ object EventStorageBackend { ledgerEffectiveTime: Timestamp, createKeyHash: Option[Hash], authenticationData: Array[Byte], - ) extends RawFlatEvent - with RawTreeEvent + internalContractId: Long, + ) extends RawAcsDeltaEventLegacy + with RawLedgerEffectsEventLegacy - final case class RawArchivedEvent( - updateId: String, - offset: Long, - nodeId: Int, + final case class RawArchivedEventLegacy( contractId: ContractId, templateId: FullIdentifier, witnessParties: Set[String], - ) extends RawFlatEvent + ) extends RawAcsDeltaEventLegacy - final case class RawExercisedEvent( - updateId: String, - offset: Long, - nodeId: Int, + final case class RawExercisedEventLegacy( contractId: ContractId, templateId: FullIdentifier, exerciseConsuming: Boolean, - exerciseChoice: String, + exerciseChoice: ChoiceName, + exerciseChoiceInterface: Option[Ref.Identifier], exerciseArgument: Array[Byte], exerciseArgumentCompression: Option[Int], exerciseResult: Option[Array[Byte]], @@ -468,17 +704,19 @@ object EventStorageBackend { exerciseLastDescendantNodeId: Int, witnessParties: Set[String], flatEventWitnesses: Set[String], - ) extends RawTreeEvent + ) extends RawLedgerEffectsEventLegacy - final case class RawActiveContract( + final case class RawActiveContractLegacy( workflowId: Option[String], synchronizerId: String, reassignmentCounter: Long, - rawCreatedEvent: RawCreatedEvent, + rawCreatedEvent: RawCreatedEventLegacy, eventSequentialId: Long, + nodeId: Int, + offset: Long, ) - final case class RawUnassignEvent( + final case class RawUnassignEventLegacy( sourceSynchronizerId: String, targetSynchronizerId: String, reassignmentId: String, @@ -488,17 +726,16 @@ object EventStorageBackend { templateId: FullIdentifier, witnessParties: Set[String], assignmentExclusivity: Option[Timestamp], - nodeId: Int, - ) extends RawReassignmentEvent + ) extends RawReassignmentEventLegacy - final case class RawAssignEvent( + final case class RawAssignEventLegacy( sourceSynchronizerId: String, targetSynchronizerId: String, reassignmentId: String, submitter: Option[String], reassignmentCounter: Long, - rawCreatedEvent: RawCreatedEvent, - ) extends RawReassignmentEvent { + rawCreatedEvent: RawCreatedEventLegacy, + ) extends RawReassignmentEventLegacy { override def templateId: FullIdentifier = rawCreatedEvent.templateId override def witnessParties: Set[String] = rawCreatedEvent.witnessParties } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackendFactory.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackendFactory.scala index 0f4a541be4..a6d5a7ca87 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackendFactory.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackendFactory.scala @@ -25,7 +25,8 @@ trait StorageBackendFactory { loggerFactory: NamedLoggerFactory, ): CompletionStorageBackend def createContractStorageBackend( - stringInterning: StringInterning + stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache, ): ContractStorageBackend def createEventStorageBackend( ledgerEndCache: LedgerEndCache, @@ -48,7 +49,7 @@ trait StorageBackendFactory { ReadStorageBackend( partyStorageBackend = createPartyStorageBackend(ledgerEndCache), completionStorageBackend = createCompletionStorageBackend(stringInterning, loggerFactory), - contractStorageBackend = createContractStorageBackend(stringInterning), + contractStorageBackend = createContractStorageBackend(stringInterning, ledgerEndCache), eventStorageBackend = createEventStorageBackend(ledgerEndCache, stringInterning, loggerFactory), ) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala index ea5813692a..0a939f3508 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala @@ -6,13 +6,13 @@ package com.digitalasset.canton.platform.store.backend import com.daml.metrics.api.MetricsContext import com.daml.metrics.api.MetricsContext.{withExtraMetricLabels, withOptionalMetricLabels} import com.daml.platform.v1.index.StatusDetails -import com.digitalasset.canton.data import com.digitalasset.canton.data.DeduplicationPeriod.{DeduplicationDuration, DeduplicationOffset} import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.{ AuthorizationEvent, TopologyEvent, } +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.{CompletionInfo, Reassignment, Update} import com.digitalasset.canton.metrics.{IndexerMetrics, LedgerApiServerMetrics} import com.digitalasset.canton.platform.* @@ -24,6 +24,8 @@ import com.digitalasset.canton.platform.store.backend.Conversions.{ } import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao import com.digitalasset.canton.platform.store.dao.events.* +import com.digitalasset.canton.protocol.UpdateId +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContext import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.daml.lf.data.Ref.PackageRef @@ -82,8 +84,6 @@ object UpdateToDbDto { case u: ReassignmentAccepted => reassignmentAcceptedToDbDto( - translation = translation, - compressionStrategy = compressionStrategy, metrics = metrics, offset = offset, serializedTraceContext = serializedTraceContext, @@ -92,7 +92,7 @@ object UpdateToDbDto { case u: SequencerIndexMoved => // nothing to persist, this is only a synthetic DbDto to facilitate updating the StringInterning - Iterator(DbDto.SequencerIndexMoved(u.synchronizerId.toProtoPrimitive)) + Iterator(DbDto.SequencerIndexMoved(u.synchronizerId)) case _: EmptyAcsPublicationRequired => Iterator.empty case _: LogicalSynchronizerUpgradeTimeReached => Iterator.empty @@ -131,7 +131,7 @@ object UpdateToDbDto { recordTime = commandRejected.recordTime.toLf, updateId = None, completionInfo = commandRejected.completionInfo, - synchronizerId = commandRejected.synchronizerId.toProtoPrimitive, + synchronizerId = commandRejected.synchronizerId, messageUuid = messageUuid, serializedTraceContext = serializedTraceContext, isTransaction = @@ -183,11 +183,11 @@ object UpdateToDbDto { ) val transactionMeta = DbDto.TransactionMeta( - update_id = topologyTransaction.updateId, + update_id = topologyTransaction.updateId.toProtoPrimitive.toByteArray, event_offset = offset.unwrap, publication_time = 0, // this is filled later record_time = topologyTransaction.recordTime.toMicros, - synchronizer_id = topologyTransaction.synchronizerId.toProtoPrimitive, + synchronizer_id = topologyTransaction.synchronizerId, event_sequential_id_first = 0, // this is filled later event_sequential_id_last = 0, // this is filled later ) @@ -199,12 +199,12 @@ object UpdateToDbDto { DbDto.EventPartyToParticipant( event_sequential_id = 0, // this is filled later event_offset = offset.unwrap, - update_id = topologyTransaction.updateId, + update_id = topologyTransaction.updateId.toProtoPrimitive.toByteArray, party_id = party, participant_id = participant, participant_permission = participantPermissionInt(authorizationEvent), participant_authorization_event = authorizationEventInt(authorizationEvent), - synchronizer_id = topologyTransaction.synchronizerId.toProtoPrimitive, + synchronizer_id = topologyTransaction.synchronizerId, record_time = topologyTransaction.recordTime.toMicros, trace_context = serializedTraceContext, ) @@ -216,7 +216,7 @@ object UpdateToDbDto { ledger_offset = offset.unwrap, recorded_at = topologyTransaction.recordTime.toMicros, submission_id = Some( - PartyAllocation.TrackerKey.of(party, participant, authorizationEvent).submissionId + PartyAllocation.TrackerKey(party, participant, authorizationEvent).submissionId ), party = Some(party), typ = JdbcLedgerDao.acceptType, @@ -256,11 +256,11 @@ object UpdateToDbDto { } val transactionMeta = DbDto.TransactionMeta( - update_id = transactionAccepted.updateId, + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, event_offset = offset.unwrap, publication_time = 0, // this is filled later record_time = transactionAccepted.recordTime.toMicros, - synchronizer_id = transactionAccepted.synchronizerId.toProtoPrimitive, + synchronizer_id = transactionAccepted.synchronizerId, event_sequential_id_first = 0, // this is filled later event_sequential_id_last = 0, // this is filled later ) @@ -273,8 +273,6 @@ object UpdateToDbDto { .flatMap { case NodeInfo(nodeId, create: Create, _) => createNodeToDbDto( - compressionStrategy = compressionStrategy, - translation = translation, offset = offset, serializedTraceContext = serializedTraceContext, transactionAccepted = transactionAccepted, @@ -306,7 +304,7 @@ object UpdateToDbDto { recordTime = transactionAccepted.recordTime.toLf, updateId = Some(transactionAccepted.updateId), completionInfo = completionInfo, - synchronizerId = transactionAccepted.synchronizerId.toProtoPrimitive, + synchronizerId = transactionAccepted.synchronizerId, messageUuid = None, serializedTraceContext = serializedTraceContext, isTransaction = true, @@ -326,8 +324,6 @@ object UpdateToDbDto { reassignment.templateId.copy(pkg = PackageRef.Name(reassignment.packageName)).toString private def createNodeToDbDto( - compressionStrategy: CompressionStrategy, - translation: LfValueSerialization, offset: Offset, serializedTraceContext: Array[Byte], transactionAccepted: TransactionAccepted, @@ -335,68 +331,70 @@ object UpdateToDbDto { create: Create, ): Iterator[DbDto] = { val templateId = templateIdWithPackageName(create) - val flatWitnesses: Set[String] = - if (transactionAccepted.isAcsDelta(create.coid)) - create.stakeholders.map(_.toString) - else - Set.empty - val (createArgument, createKeyValue) = translation.serialize(create) - val treeWitnesses = + val representativePackageId = transactionAccepted.representativePackageIds match { + case RepresentativePackageIds.SameAsContractPackageId => + create.templateId.packageId + case RepresentativePackageIds.DedicatedRepresentativePackageIds(representativePackageIds) => + representativePackageIds.getOrElse( + create.coid, + throw new IllegalStateException( + s"Missing representative package id for contract $create.coid" + ), + ) + } + val witnesses = transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) - val treeWitnessesWithoutFlatWitnesses = treeWitnesses.diff(flatWitnesses) - Iterator( - DbDto.EventCreate( + val internal_contract_id = transactionAccepted.internalContractIds.getOrElse( + create.coid, + throw new IllegalStateException( + s"missing internal contract id for contract ${create.coid}" + ), + ) + + if (transactionAccepted.isAcsDelta(create.coid)) { + val stakeholders = create.stakeholders.map(_.toString) + val additional_witnesses = witnesses.diff(stakeholders) + DbDto.createDbDtos( event_offset = offset.unwrap, - update_id = transactionAccepted.updateId, - ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, - command_id = transactionAccepted.completionInfoO.map(_.commandId), + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, workflow_id = transactionAccepted.transactionMeta.workflowId, - user_id = transactionAccepted.completionInfoO.map(_.userId), + command_id = transactionAccepted.completionInfoO.map(_.commandId), submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), - node_id = nodeId.index, - contract_id = create.coid.toBytes.toByteArray, - template_id = templateId, - package_id = create.templateId.packageId.toString, - flat_event_witnesses = flatWitnesses, - tree_event_witnesses = treeWitnesses, - create_argument = compressionStrategy.createArgumentCompression.compress(createArgument), - create_signatories = create.signatories.map(_.toString), - create_observers = create.stakeholders.diff(create.signatories).map(_.toString), - create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), - create_key_maintainers = create.keyOpt.map(_.maintainers.map(_.toString)), - create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, - create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), - event_sequential_id = 0, // this is filled later - authentication_data = transactionAccepted.contractAuthenticationData - .get(create.coid) - .map(_.toByteArray) - .getOrElse( - throw new IllegalStateException( - s"missing authentication data for contract ${create.coid}" - ) - ), - synchronizer_id = transactionAccepted.synchronizerId.toProtoPrimitive, - trace_context = serializedTraceContext, record_time = transactionAccepted.recordTime.toMicros, + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, external_transaction_hash = transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), - ) - ) ++ flatWitnesses.iterator.map(stakeholder => - DbDto.IdFilterCreateStakeholder( event_sequential_id = 0, // this is filled later + node_id = nodeId.index, + additional_witnesses = additional_witnesses, + representative_package_id = representativePackageId.toString, + notPersistedContractId = create.coid, + internal_contract_id = internal_contract_id, + create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), + )( + stakeholders = stakeholders, template_id = templateId, - party_id = stakeholder, ) - ) ++ treeWitnessesWithoutFlatWitnesses.iterator.map(stakeholder => - DbDto.IdFilterCreateNonStakeholderInformee( + } else { + DbDto.witnessedCreateDbDtos( + event_offset = offset.unwrap, + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, + workflow_id = transactionAccepted.transactionMeta.workflowId, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + record_time = transactionAccepted.recordTime.toMicros, + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, + external_transaction_hash = + transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), event_sequential_id = 0, // this is filled later - template_id = templateId, - party_id = stakeholder, - ) - ) + node_id = nodeId.index, + additional_witnesses = witnesses, + representative_package_id = representativePackageId.toString, + internal_contract_id = internal_contract_id, + )(templateId) + } } private def exerciseNodeToDbDto( @@ -409,80 +407,94 @@ object UpdateToDbDto { exercise: Exercise, lastDescendantNodeId: NodeId, ): Iterator[DbDto] = { - val (exerciseArgument, exerciseResult, createKeyValue) = + val (exerciseArgument, exerciseResult, _) = translation.serialize(exercise) - val stakeholders = exercise.stakeholders.map(_.toString) - val treeWitnesses = - transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) - val flatWitnesses = - if (exercise.consuming && transactionAccepted.isAcsDelta(exercise.targetCoid)) - stakeholders - else - Set.empty[String] - val treeWitnessesWithoutFlatWitnesses = treeWitnesses.diff(flatWitnesses) val templateId = templateIdWithPackageName(exercise) - Iterator( - DbDto.EventExercise( - consuming = exercise.consuming, + val witnesses = + transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) + if (exercise.consuming && transactionAccepted.isAcsDelta(exercise.targetCoid)) { + val stakeholders = exercise.stakeholders.map(_.toString) + val additional_witnesses = witnesses.diff(stakeholders) + DbDto.consumingExerciseDbDtos( event_offset = offset.unwrap, - update_id = transactionAccepted.updateId, - ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, - command_id = transactionAccepted.completionInfoO.map(_.commandId), + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, workflow_id = transactionAccepted.transactionMeta.workflowId, - user_id = transactionAccepted.completionInfoO.map(_.userId), + command_id = transactionAccepted.completionInfoO.map(_.commandId), submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + record_time = transactionAccepted.recordTime.toMicros, + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, + external_transaction_hash = + transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), + event_sequential_id = 0, // this is filled later node_id = nodeId.index, - contract_id = exercise.targetCoid.toBytes.toByteArray, - template_id = templateId, - package_id = exercise.templateId.packageId.toString, - flat_event_witnesses = flatWitnesses, - tree_event_witnesses = treeWitnesses, - exercise_choice = exercise.qualifiedChoiceName.toString, + deactivated_event_sequential_id = None, // this is filled later + additional_witnesses = additional_witnesses, + exercise_choice = exercise.qualifiedChoiceName.choiceName, + exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), exercise_argument = - compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), - exercise_result = exerciseResult - .map(compressionStrategy.exerciseResultCompression.compress), + compressionStrategy.consumingExerciseArgumentCompression.compress(exerciseArgument), + exercise_result = + exerciseResult.map(compressionStrategy.consumingExerciseResultCompression.compress), exercise_actors = exercise.actingParties.map(_.toString), exercise_last_descendant_node_id = lastDescendantNodeId.index, - exercise_argument_compression = compressionStrategy.exerciseArgumentCompression.id, - exercise_result_compression = compressionStrategy.exerciseResultCompression.id, - event_sequential_id = 0, // this is filled later - synchronizer_id = transactionAccepted.synchronizerId.toProtoPrimitive, - trace_context = serializedTraceContext, + exercise_argument_compression = compressionStrategy.consumingExerciseArgumentCompression.id, + exercise_result_compression = compressionStrategy.consumingExerciseResultCompression.id, + contract_id = exercise.targetCoid, + internal_contract_id = None, // this will be filled later + template_id = templateId, + package_id = exercise.templateId.packageId.toString, + stakeholders = stakeholders, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, + ) + } else { + val internal_contract_id = + if (exercise.consuming) transactionAccepted.internalContractIds.get(exercise.targetCoid) + else None + val (argumentCompression, resultCompression) = + if (exercise.consuming) + ( + compressionStrategy.consumingExerciseArgumentCompression, + compressionStrategy.consumingExerciseResultCompression, + ) + else + ( + compressionStrategy.nonConsumingExerciseArgumentCompression, + compressionStrategy.nonConsumingExerciseResultCompression, + ) + DbDto.witnessedExercisedDbDtos( + event_offset = offset.unwrap, + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, + workflow_id = transactionAccepted.transactionMeta.workflowId, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), record_time = transactionAccepted.recordTime.toMicros, + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, external_transaction_hash = transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), + event_sequential_id = 0, // this is filled later + node_id = nodeId.index, + additional_witnesses = witnesses, + consuming = exercise.consuming, + exercise_choice = exercise.qualifiedChoiceName.choiceName, + exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), + exercise_argument = argumentCompression.compress(exerciseArgument), + exercise_result = exerciseResult.map(resultCompression.compress), + exercise_actors = exercise.actingParties.map(_.toString), + exercise_last_descendant_node_id = lastDescendantNodeId.index, + exercise_argument_compression = argumentCompression.id, + exercise_result_compression = resultCompression.id, + contract_id = exercise.targetCoid, + internal_contract_id = internal_contract_id, + template_id = templateId, + package_id = exercise.templateId.packageId.toString, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, ) - ) ++ { - if (exercise.consuming) { - flatWitnesses.iterator.map(stakeholder => - DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, // this is filled later - template_id = templateId, - party_id = stakeholder, - ) - ) ++ treeWitnessesWithoutFlatWitnesses.iterator.map(stakeholder => - DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, // this is filled later - template_id = templateId, - party_id = stakeholder, - ) - ) - } else { - treeWitnesses.iterator.map(informee => - DbDto.IdFilterNonConsumingInformee( - event_sequential_id = 0, // this is filled later - template_id = templateId, - party_id = informee, - ) - ) - } } } private def reassignmentAcceptedToDbDto( - translation: LfValueSerialization, - compressionStrategy: CompressionStrategy, metrics: LedgerApiServerMetrics, offset: Offset, serializedTraceContext: Array[Byte], @@ -511,8 +523,6 @@ object UpdateToDbDto { case assign: Reassignment.Assign => assignToDbDto( - translation = translation, - compressionStrategy = compressionStrategy, offset = offset, serializedTraceContext = serializedTraceContext, reassignmentAccepted = reassignmentAccepted, @@ -528,18 +538,18 @@ object UpdateToDbDto { recordTime = reassignmentAccepted.recordTime.toLf, updateId = Some(reassignmentAccepted.updateId), completionInfo = completionInfo, - synchronizerId = reassignmentAccepted.synchronizerId.toProtoPrimitive, + synchronizerId = reassignmentAccepted.synchronizerId, messageUuid = None, serializedTraceContext = serializedTraceContext, isTransaction = false, ) val transactionMeta = DbDto.TransactionMeta( - update_id = reassignmentAccepted.updateId, + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, event_offset = offset.unwrap, publication_time = 0, // this is filled later record_time = reassignmentAccepted.recordTime.toMicros, - synchronizer_id = reassignmentAccepted.synchronizerId.toProtoPrimitive, + synchronizer_id = reassignmentAccepted.synchronizerId, event_sequential_id_first = 0, // this is filled later event_sequential_id_last = 0, // this is filled later ) @@ -558,91 +568,63 @@ object UpdateToDbDto { unassign: Reassignment.Unassign, ): Iterator[DbDto] = { val flatEventWitnesses = unassign.stakeholders.map(_.toString) - Iterator( - DbDto.EventUnassign( - event_offset = offset.unwrap, - update_id = reassignmentAccepted.updateId, - command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), - workflow_id = reassignmentAccepted.workflowId, - submitter = reassignmentAccepted.reassignmentInfo.submitter, - node_id = unassign.nodeId, - contract_id = unassign.contractId.toBytes.toByteArray, - template_id = templateIdWithPackageName(unassign), - package_id = unassign.templateId.packageId.toString, - flat_event_witnesses = flatEventWitnesses.toSet, - event_sequential_id = 0L, // this is filled later - source_synchronizer_id = - reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap.toProtoPrimitive, - target_synchronizer_id = - reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap.toProtoPrimitive, - reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toProtoPrimitive, - reassignment_counter = unassign.reassignmentCounter, - assignment_exclusivity = unassign.assignmentExclusivity.map(_.micros), - trace_context = serializedTraceContext, - record_time = reassignmentAccepted.recordTime.toMicros, - ) - ) ++ flatEventWitnesses.map(party => - DbDto.IdFilterUnassignStakeholder( - 0L, // this is filled later - templateIdWithPackageName(unassign), - party, - ) + DbDto.unassignDbDtos( + event_offset = offset.unwrap, + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + workflow_id = reassignmentAccepted.workflowId, + submitter = reassignmentAccepted.reassignmentInfo.submitter, + record_time = reassignmentAccepted.recordTime.toMicros, + synchronizer_id = reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap, + trace_context = serializedTraceContext, + event_sequential_id = 0L, // this is filled later + node_id = unassign.nodeId, + deactivated_event_sequential_id = None, // this is filled later + reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toBytes.toByteArray, + assignment_exclusivity = unassign.assignmentExclusivity.map(_.micros), + target_synchronizer_id = reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap, + reassignment_counter = unassign.reassignmentCounter, + contract_id = unassign.contractId, + internal_contract_id = None, // this is filled later + template_id = templateIdWithPackageName(unassign), + package_id = unassign.templateId.packageId, + stakeholders = flatEventWitnesses, ) } private def assignToDbDto( - translation: LfValueSerialization, - compressionStrategy: CompressionStrategy, offset: Offset, serializedTraceContext: Array[Byte], reassignmentAccepted: ReassignmentAccepted, assign: Reassignment.Assign, ): Iterator[DbDto] = { - val (createArgument, createKeyValue) = translation.serialize(assign.createNode) val templateId = templateIdWithPackageName(assign) val flatEventWitnesses = assign.createNode.stakeholders.map(_.toString) - Iterator( - DbDto.EventAssign( - event_offset = offset.unwrap, - update_id = reassignmentAccepted.updateId, - command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), - workflow_id = reassignmentAccepted.workflowId, - submitter = reassignmentAccepted.reassignmentInfo.submitter, - node_id = assign.nodeId, - contract_id = assign.createNode.coid.toBytes.toByteArray, - template_id = templateId, - package_id = assign.createNode.templateId.packageId.toString, - flat_event_witnesses = flatEventWitnesses, - create_argument = createArgument, - create_signatories = assign.createNode.signatories.map(_.toString), - create_observers = assign.createNode.stakeholders - .diff(assign.createNode.signatories) - .map(_.toString), - create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), - create_key_maintainers = assign.createNode.keyOpt.map(_.maintainers.map(_.toString)), - create_key_hash = assign.createNode.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, - create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), - event_sequential_id = 0L, // this is filled later - ledger_effective_time = assign.ledgerEffectiveTime.micros, - authentication_data = assign.contractAuthenticationData.toByteArray, - source_synchronizer_id = - reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap.toProtoPrimitive, - target_synchronizer_id = - reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap.toProtoPrimitive, - reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toProtoPrimitive, - reassignment_counter = assign.reassignmentCounter, - trace_context = serializedTraceContext, - record_time = reassignmentAccepted.recordTime.toMicros, - ) - ) ++ flatEventWitnesses.map( - DbDto.IdFilterAssignStakeholder( - 0L, // this is filled later - templateId, - _, - ) + DbDto.assignDbDtos( + event_offset = offset.unwrap, + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, + workflow_id = reassignmentAccepted.workflowId, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + submitter = reassignmentAccepted.reassignmentInfo.submitter, + record_time = reassignmentAccepted.recordTime.toMicros, + synchronizer_id = reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap, + trace_context = serializedTraceContext, + event_sequential_id = 0L, // this is filled later + node_id = assign.nodeId, + source_synchronizer_id = reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap, + reassignment_counter = assign.reassignmentCounter, + reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toBytes.toByteArray, + representative_package_id = assign.createNode.templateId.packageId.toString, + notPersistedContractId = assign.createNode.coid, + internal_contract_id = reassignmentAccepted.internalContractIds.getOrElse( + assign.createNode.coid, + throw new IllegalStateException( + s"missing internal contract id for contract ${assign.createNode.coid}" + ), + ), + )( + stakeholders = flatEventWitnesses, + template_id = templateId, ) } @@ -663,9 +645,9 @@ object UpdateToDbDto { private def commandCompletion( offset: Offset, recordTime: Time.Timestamp, - updateId: Option[data.UpdateId], + updateId: Option[UpdateId], completionInfo: CompletionInfo, - synchronizerId: String, + synchronizerId: SynchronizerId, messageUuid: Option[UUID], isTransaction: Boolean, serializedTraceContext: Array[Byte], @@ -691,7 +673,7 @@ object UpdateToDbDto { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = updateId, + update_id = updateId.map(_.toProtoPrimitive.toByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala new file mode 100644 index 0000000000..7e68dd3b0f --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala @@ -0,0 +1,767 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import com.daml.metrics.api.MetricsContext +import com.daml.metrics.api.MetricsContext.{withExtraMetricLabels, withOptionalMetricLabels} +import com.daml.platform.v1.index.StatusDetails +import com.digitalasset.canton.data.DeduplicationPeriod.{DeduplicationDuration, DeduplicationOffset} +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.{ + AuthorizationEvent, + TopologyEvent, +} +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds +import com.digitalasset.canton.ledger.participant.state.{CompletionInfo, Reassignment, Update} +import com.digitalasset.canton.metrics.{IndexerMetrics, LedgerApiServerMetrics} +import com.digitalasset.canton.platform.* +import com.digitalasset.canton.platform.indexer.TransactionTraversalUtils +import com.digitalasset.canton.platform.indexer.TransactionTraversalUtils.NodeInfo +import com.digitalasset.canton.platform.store.backend.Conversions.{ + authorizationEventInt, + participantPermissionInt, +} +import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao +import com.digitalasset.canton.platform.store.dao.events.* +import com.digitalasset.canton.protocol.UpdateId +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.SerializableTraceContext +import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension +import com.digitalasset.daml.lf.data.Ref.PackageRef +import com.digitalasset.daml.lf.data.{Ref, Time} +import com.digitalasset.daml.lf.transaction.Node.Action +import io.grpc.Status + +import java.util.UUID + +object UpdateToDbDtoLegacy { + import Update.* + + def apply( + participantId: Ref.ParticipantId, + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + metrics: LedgerApiServerMetrics, + )(implicit mc: MetricsContext): Offset => Update => Iterator[DbDto] = { offset => tracedUpdate => + val serializedTraceContext = + SerializableTraceContext(tracedUpdate.traceContext).toDamlProto.toByteArray + tracedUpdate match { + case u: CommandRejected => + commandRejectedToDbDto( + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + commandRejected = u, + ) + + case u: PartyAddedToParticipant => + partyAddedToParticipantToDbDto( + metrics = metrics, + participantId = participantId, + offset = offset, + partyAddedToParticipant = u, + ) + + case u: TopologyTransactionEffective => + topologyTransactionToDbDto( + metrics = metrics, + participantId = participantId, + offset = offset, + serializedTraceContext = serializedTraceContext, + topologyTransaction = u, + ) + + case u: TransactionAccepted => + transactionAcceptedToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = u, + ) + + case u: ReassignmentAccepted => + reassignmentAcceptedToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = u, + ) + + case u: SequencerIndexMoved => + // nothing to persist, this is only a synthetic DbDto to facilitate updating the StringInterning + Iterator(DbDto.SequencerIndexMoved(u.synchronizerId)) + + case _: EmptyAcsPublicationRequired => Iterator.empty + case _: LogicalSynchronizerUpgradeTimeReached => Iterator.empty + + case _: CommitRepair => + Iterator.empty + } + } + + private def commandRejectedToDbDto( + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + commandRejected: CommandRejected, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withExtraMetricLabels( + IndexerMetrics.Labels.grpcCode -> Status + .fromCodeValue(commandRejected.reasonTemplate.code) + .getCode + .name(), + IndexerMetrics.Labels.userId -> commandRejected.completionInfo.userId, + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.transaction, + IndexerMetrics.Labels.status.rejected, + ) + } + val messageUuid = commandRejected match { + case _: SequencedCommandRejected => None + case unSequenced: UnSequencedCommandRejected => Some(unSequenced.messageUuid) + } + Iterator( + commandCompletion( + offset = offset, + recordTime = commandRejected.recordTime.toLf, + updateId = None, + completionInfo = commandRejected.completionInfo, + synchronizerId = commandRejected.synchronizerId, + messageUuid = messageUuid, + serializedTraceContext = serializedTraceContext, + isTransaction = + true, // please note from usage point of view (deduplication) rejections are always used both for transactions and reassignments at the moment. + ).copy( + rejection_status_code = Some(commandRejected.reasonTemplate.code), + rejection_status_message = Some(commandRejected.reasonTemplate.message), + rejection_status_details = + Some(StatusDetails.of(commandRejected.reasonTemplate.status.details).toByteArray), + ) + ) + } + + private def partyAddedToParticipantToDbDto( + metrics: LedgerApiServerMetrics, + participantId: Ref.ParticipantId, + offset: Offset, + partyAddedToParticipant: PartyAddedToParticipant, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.partyAllocation, + IndexerMetrics.Labels.status.accepted, + ) + Iterator( + DbDto.PartyEntry( + ledger_offset = offset.unwrap, + recorded_at = partyAddedToParticipant.recordTime.toMicros, + submission_id = partyAddedToParticipant.submissionId, + party = Some(partyAddedToParticipant.party), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(partyAddedToParticipant.participantId == participantId), + ) + ) + } + + private def topologyTransactionToDbDto( + metrics: LedgerApiServerMetrics, + participantId: Ref.ParticipantId, + offset: Offset, + serializedTraceContext: Array[Byte], + topologyTransaction: TopologyTransactionEffective, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.topologyTransaction, + IndexerMetrics.Labels.status.accepted, + ) + + val transactionMeta = DbDto.TransactionMeta( + update_id = topologyTransaction.updateId.toProtoPrimitive.toByteArray, + event_offset = offset.unwrap, + publication_time = 0, // this is filled later + record_time = topologyTransaction.recordTime.toMicros, + synchronizer_id = topologyTransaction.synchronizerId, + event_sequential_id_first = 0, // this is filled later + event_sequential_id_last = 0, // this is filled later + ) + + val events = topologyTransaction.events.iterator.flatMap { + case TopologyEvent.PartyToParticipantAuthorization(party, participant, authorizationEvent) => + import com.digitalasset.canton.platform.apiserver.services.admin.PartyAllocation + val eventPartyToParticipant = Iterator( + DbDto.EventPartyToParticipant( + event_sequential_id = 0, // this is filled later + event_offset = offset.unwrap, + update_id = topologyTransaction.updateId.toProtoPrimitive.toByteArray, + party_id = party, + participant_id = participant, + participant_permission = participantPermissionInt(authorizationEvent), + participant_authorization_event = authorizationEventInt(authorizationEvent), + synchronizer_id = topologyTransaction.synchronizerId, + record_time = topologyTransaction.recordTime.toMicros, + trace_context = serializedTraceContext, + ) + ) + val partyEntry = Seq(authorizationEvent) + .collect { case active: AuthorizationEvent.ActiveAuthorization => active } + .map(_ => + DbDto.PartyEntry( + ledger_offset = offset.unwrap, + recorded_at = topologyTransaction.recordTime.toMicros, + submission_id = Some( + PartyAllocation.TrackerKey(party, participant, authorizationEvent).submissionId + ), + party = Some(party), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(participant == participantId), + ) + ) + .iterator + eventPartyToParticipant ++ partyEntry + } + + // TransactionMeta DTO must come last in this sequence + // because in a later stage the preceding events + // will be assigned consecutive event sequential ids + // and transaction meta is assigned sequential ids of its first and last event + events ++ Seq(transactionMeta) + } + + private def transactionAcceptedToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withOptionalMetricLabels( + IndexerMetrics.Labels.userId -> transactionAccepted.completionInfoO.map( + _.userId + ) + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.transaction, + IndexerMetrics.Labels.status.accepted, + ) + } + + val transactionMeta = DbDto.TransactionMeta( + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, + event_offset = offset.unwrap, + publication_time = 0, // this is filled later + record_time = transactionAccepted.recordTime.toMicros, + synchronizer_id = transactionAccepted.synchronizerId, + event_sequential_id_first = 0, // this is filled later + event_sequential_id_last = 0, // this is filled later + ) + + val events: Iterator[DbDto] = TransactionTraversalUtils + .executionOrderTraversalForIngestion( + transactionAccepted.transaction.transaction + ) + .iterator + .flatMap { + case NodeInfo(nodeId, create: Create, _) => + createNodeToDbDto( + compressionStrategy = compressionStrategy, + translation = translation, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = transactionAccepted, + nodeId = nodeId, + create = create, + ) + + case NodeInfo(nodeId, exercise: Exercise, lastDescendantNodeId) => + exerciseNodeToDbDto( + compressionStrategy = compressionStrategy, + translation = translation, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = transactionAccepted, + nodeId = nodeId, + exercise = exercise, + lastDescendantNodeId = lastDescendantNodeId, + ) + + case _ => + Iterator.empty // It is okay to collect: blinding info is already there, we are free at hand to filter out the fetch and lookup nodes here already + } + + val completions = + for { + completionInfo <- transactionAccepted.completionInfoO + } yield commandCompletion( + offset = offset, + recordTime = transactionAccepted.recordTime.toLf, + updateId = Some(transactionAccepted.updateId), + completionInfo = completionInfo, + synchronizerId = transactionAccepted.synchronizerId, + messageUuid = None, + serializedTraceContext = serializedTraceContext, + isTransaction = true, + ) + + // TransactionMeta DTO must come last in this sequence + // because in a later stage the preceding events + // will be assigned consecutive event sequential ids + // and transaction meta is assigned sequential ids of its first and last event + events ++ completions ++ Seq(transactionMeta) + } + + def templateIdWithPackageName(node: Action): String = + node.templateId.copy(pkg = PackageRef.Name(node.packageName)).toString + + def templateIdWithPackageName(reassignment: Reassignment): String = + reassignment.templateId.copy(pkg = PackageRef.Name(reassignment.packageName)).toString + + private def createNodeToDbDto( + compressionStrategy: CompressionStrategy, + translation: LfValueSerialization, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + nodeId: NodeId, + create: Create, + ): Iterator[DbDto] = { + val templateId = templateIdWithPackageName(create) + val flatWitnesses: Set[String] = + if (transactionAccepted.isAcsDelta(create.coid)) + create.stakeholders.map(_.toString) + else + Set.empty + val (createArgument, createKeyValue) = translation.serialize(create) + val treeWitnesses = + transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) + val treeWitnessesWithoutFlatWitnesses = treeWitnesses.diff(flatWitnesses) + val representativePackageId = transactionAccepted.representativePackageIds match { + case RepresentativePackageIds.SameAsContractPackageId => + create.templateId.packageId + case RepresentativePackageIds.DedicatedRepresentativePackageIds(representativePackageIds) => + representativePackageIds.getOrElse( + create.coid, + throw new IllegalStateException( + s"Missing representative package id for contract $create.coid" + ), + ) + } + Iterator( + DbDto.EventCreate( + event_offset = offset.unwrap, + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + workflow_id = transactionAccepted.transactionMeta.workflowId, + user_id = transactionAccepted.completionInfoO.map(_.userId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + node_id = nodeId.index, + contract_id = create.coid, + template_id = templateId, + package_id = create.templateId.packageId.toString, + representative_package_id = representativePackageId.toString, + flat_event_witnesses = flatWitnesses, + tree_event_witnesses = treeWitnesses, + create_argument = + compressionStrategy.createArgumentCompressionLegacy.compress(createArgument), + create_signatories = create.signatories.map(_.toString), + create_observers = create.stakeholders.diff(create.signatories).map(_.toString), + create_key_value = createKeyValue + .map(compressionStrategy.createKeyValueCompressionLegacy.compress), + create_key_maintainers = create.keyOpt.map(_.maintainers.map(_.toString)), + create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), + create_argument_compression = compressionStrategy.createArgumentCompressionLegacy.id, + create_key_value_compression = + compressionStrategy.createKeyValueCompressionLegacy.id.filter(_ => + createKeyValue.isDefined + ), + event_sequential_id = 0, // this is filled later + authentication_data = transactionAccepted.contractAuthenticationData + .get(create.coid) + .map(_.toByteArray) + .getOrElse( + throw new IllegalStateException( + s"missing authentication data for contract ${create.coid}" + ) + ), + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, + record_time = transactionAccepted.recordTime.toMicros, + external_transaction_hash = + transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), + internal_contract_id = transactionAccepted.internalContractIds.getOrElse( + create.coid, + throw new IllegalStateException( + s"missing internal contract id for contract ${create.coid}" + ), + ), + ) + ) ++ withFirstMarked( + flatWitnesses, + (party, first) => + DbDto.IdFilterCreateStakeholder( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = party, + first_per_sequential_id = first, + ), + ) ++ withFirstMarked( + treeWitnessesWithoutFlatWitnesses, + (party, first) => + DbDto.IdFilterCreateNonStakeholderInformee( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = party, + first_per_sequential_id = first, + ), + ) + } + + private def exerciseNodeToDbDto( + compressionStrategy: CompressionStrategy, + translation: LfValueSerialization, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + nodeId: NodeId, + exercise: Exercise, + lastDescendantNodeId: NodeId, + ): Iterator[DbDto] = { + val (exerciseArgument, exerciseResult, createKeyValue) = + translation.serialize(exercise) + val stakeholders = exercise.stakeholders.map(_.toString) + val treeWitnesses = + transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) + val flatWitnesses = + if (exercise.consuming && transactionAccepted.isAcsDelta(exercise.targetCoid)) + stakeholders + else + Set.empty[String] + val treeWitnessesWithoutFlatWitnesses = treeWitnesses.diff(flatWitnesses) + val templateId = templateIdWithPackageName(exercise) + Iterator( + DbDto.EventExercise( + consuming = exercise.consuming, + event_offset = offset.unwrap, + update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + workflow_id = transactionAccepted.transactionMeta.workflowId, + user_id = transactionAccepted.completionInfoO.map(_.userId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + node_id = nodeId.index, + contract_id = exercise.targetCoid, + template_id = templateId, + package_id = exercise.templateId.packageId.toString, + flat_event_witnesses = flatWitnesses, + tree_event_witnesses = treeWitnesses, + exercise_choice = exercise.qualifiedChoiceName.choiceName, + exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), + exercise_argument = + compressionStrategy.consumingExerciseArgumentCompression.compress(exerciseArgument), + exercise_result = exerciseResult + .map(compressionStrategy.consumingExerciseResultCompression.compress), + exercise_actors = exercise.actingParties.map(_.toString), + exercise_last_descendant_node_id = lastDescendantNodeId.index, + exercise_argument_compression = compressionStrategy.consumingExerciseArgumentCompression.id, + exercise_result_compression = compressionStrategy.consumingExerciseResultCompression.id, + event_sequential_id = 0, // this is filled later + synchronizer_id = transactionAccepted.synchronizerId, + trace_context = serializedTraceContext, + record_time = transactionAccepted.recordTime.toMicros, + external_transaction_hash = + transactionAccepted.externalTransactionHash.map(_.unwrap.toByteArray), + deactivated_event_sequential_id = None, // this is filled later + ) + ) ++ { + if (exercise.consuming) { + withFirstMarked( + flatWitnesses, + (party, first) => + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = party, + first_per_sequential_id = first, + ), + ) ++ withFirstMarked( + treeWitnessesWithoutFlatWitnesses, + (party, first) => + DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = party, + first_per_sequential_id = first, + ), + ) + } else { + withFirstMarked( + treeWitnesses, + (informee, first) => + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = informee, + first_per_sequential_id = first, + ), + ) + } + } + } + + private def reassignmentAcceptedToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withOptionalMetricLabels( + IndexerMetrics.Labels.userId -> reassignmentAccepted.optCompletionInfo.map( + _.userId + ) + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.reassignment, + IndexerMetrics.Labels.status.accepted, + ) + } + + val events: Iterator[DbDto] = reassignmentAccepted.reassignment.iterator.flatMap { + case unassign: Reassignment.Unassign => + unassignToDbDto( + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = reassignmentAccepted, + unassign = unassign, + ) + + case assign: Reassignment.Assign => + assignToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = reassignmentAccepted, + assign = assign, + ) + } + + val completions: Option[DbDto] = + for { + completionInfo <- reassignmentAccepted.optCompletionInfo + } yield commandCompletion( + offset = offset, + recordTime = reassignmentAccepted.recordTime.toLf, + updateId = Some(reassignmentAccepted.updateId), + completionInfo = completionInfo, + synchronizerId = reassignmentAccepted.synchronizerId, + messageUuid = None, + serializedTraceContext = serializedTraceContext, + isTransaction = false, + ) + + val transactionMeta = DbDto.TransactionMeta( + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, + event_offset = offset.unwrap, + publication_time = 0, // this is filled later + record_time = reassignmentAccepted.recordTime.toMicros, + synchronizer_id = reassignmentAccepted.synchronizerId, + event_sequential_id_first = 0, // this is filled later + event_sequential_id_last = 0, // this is filled later + ) + + // TransactionMeta DTO must come last in this sequence + // because in a later stage the preceding events + // will be assigned consecutive event sequential ids + // and transaction meta is assigned sequential ids of its first and last event + events ++ completions.iterator ++ Iterator(transactionMeta) + } + + private def unassignToDbDto( + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + unassign: Reassignment.Unassign, + ): Iterator[DbDto] = { + val flatEventWitnesses = unassign.stakeholders.map(_.toString) + Iterator( + DbDto.EventUnassign( + event_offset = offset.unwrap, + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + workflow_id = reassignmentAccepted.workflowId, + submitter = reassignmentAccepted.reassignmentInfo.submitter, + node_id = unassign.nodeId, + contract_id = unassign.contractId, + template_id = templateIdWithPackageName(unassign), + package_id = unassign.templateId.packageId.toString, + flat_event_witnesses = flatEventWitnesses.toSet, + event_sequential_id = 0L, // this is filled later + source_synchronizer_id = reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap, + target_synchronizer_id = reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap, + reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toBytes.toByteArray, + reassignment_counter = unassign.reassignmentCounter, + assignment_exclusivity = unassign.assignmentExclusivity.map(_.micros), + trace_context = serializedTraceContext, + record_time = reassignmentAccepted.recordTime.toMicros, + deactivated_event_sequential_id = None, // this is filled later + ) + ) ++ withFirstMarked( + flatEventWitnesses, + (party, first) => + DbDto.IdFilterUnassignStakeholder( + 0L, // this is filled later + templateIdWithPackageName(unassign), + party, + first_per_sequential_id = first, + ), + ) + } + + private def assignToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + assign: Reassignment.Assign, + ): Iterator[DbDto] = { + val (createArgument, createKeyValue) = translation.serialize(assign.createNode) + val templateId = templateIdWithPackageName(assign) + val flatEventWitnesses = assign.createNode.stakeholders.map(_.toString) + Iterator( + DbDto.EventAssign( + event_offset = offset.unwrap, + update_id = reassignmentAccepted.updateId.toProtoPrimitive.toByteArray, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + workflow_id = reassignmentAccepted.workflowId, + submitter = reassignmentAccepted.reassignmentInfo.submitter, + node_id = assign.nodeId, + contract_id = assign.createNode.coid, + template_id = templateId, + package_id = assign.createNode.templateId.packageId.toString, + flat_event_witnesses = flatEventWitnesses, + create_argument = createArgument, + create_signatories = assign.createNode.signatories.map(_.toString), + create_observers = assign.createNode.stakeholders + .diff(assign.createNode.signatories) + .map(_.toString), + create_key_value = createKeyValue + .map(compressionStrategy.createKeyValueCompressionLegacy.compress), + create_key_maintainers = assign.createNode.keyOpt.map(_.maintainers.map(_.toString)), + create_key_hash = assign.createNode.keyOpt.map(_.globalKey.hash.bytes.toHexString), + create_argument_compression = compressionStrategy.createArgumentCompressionLegacy.id, + create_key_value_compression = + compressionStrategy.createKeyValueCompressionLegacy.id.filter(_ => + createKeyValue.isDefined + ), + event_sequential_id = 0L, // this is filled later + ledger_effective_time = assign.ledgerEffectiveTime.micros, + authentication_data = assign.contractAuthenticationData.toByteArray, + source_synchronizer_id = reassignmentAccepted.reassignmentInfo.sourceSynchronizer.unwrap, + target_synchronizer_id = reassignmentAccepted.reassignmentInfo.targetSynchronizer.unwrap, + reassignment_id = reassignmentAccepted.reassignmentInfo.reassignmentId.toBytes.toByteArray, + reassignment_counter = assign.reassignmentCounter, + trace_context = serializedTraceContext, + record_time = reassignmentAccepted.recordTime.toMicros, + internal_contract_id = reassignmentAccepted.internalContractIds.getOrElse( + assign.createNode.coid, + throw new IllegalStateException( + s"missing internal contract id for contract ${assign.createNode.coid}" + ), + ), + ) + ) ++ withFirstMarked( + flatEventWitnesses, + (party, first) => + DbDto.IdFilterAssignStakeholder( + 0L, // this is filled later + templateId, + party, + first_per_sequential_id = first, + ), + ) + } + + private def incrementCounterForEvent( + metrics: IndexerMetrics, + eventType: String, + status: String, + )(implicit + mc: MetricsContext + ): Unit = + withExtraMetricLabels( + IndexerMetrics.Labels.eventType.key -> eventType, + IndexerMetrics.Labels.status.key -> status, + ) { implicit mc => + metrics.eventsMeter.mark() + } + + private def commandCompletion( + offset: Offset, + recordTime: Time.Timestamp, + updateId: Option[UpdateId], + completionInfo: CompletionInfo, + synchronizerId: SynchronizerId, + messageUuid: Option[UUID], + isTransaction: Boolean, + serializedTraceContext: Array[Byte], + ): DbDto.CommandCompletion = { + val (deduplicationOffset, deduplicationDurationSeconds, deduplicationDurationNanos) = + completionInfo.optDeduplicationPeriod + .map { + case DeduplicationOffset(offset) => + ( + Some(offset.fold(0L)(_.unwrap)), + None, + None, + ) + case DeduplicationDuration(duration) => + (None, Some(duration.getSeconds), Some(duration.getNano)) + } + .getOrElse((None, None, None)) + + DbDto.CommandCompletion( + completion_offset = offset.unwrap, + record_time = recordTime.micros, + publication_time = 0L, // will be filled later + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = updateId.map(_.toProtoPrimitive.toByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = deduplicationOffset, + deduplication_duration_seconds = deduplicationDurationSeconds, + deduplication_duration_nanos = deduplicationDurationNanos, + synchronizer_id = synchronizerId, + message_uuid = messageUuid.map(_.toString), + is_transaction = isTransaction, + trace_context = serializedTraceContext, + ) + } + + private def withFirstMarked( + parties: Set[String], + create: (String, Boolean) => DbDto, + ): Seq[DbDto] = + parties.iterator.zipWithIndex.map { case (party, idx) => + create(party, idx == 0) + }.toSeq +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala index b31d6abf78..cf20bbe7a2 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala @@ -14,12 +14,15 @@ import com.digitalasset.canton.platform.store.CompletionFromTransaction import com.digitalasset.canton.platform.store.backend.CompletionStorageBackend import com.digitalasset.canton.platform.store.backend.Conversions.{ offset, + parties, timestampFromMicros, traceContextOption, + updateId, } import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.platform.{Party, UserId} +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Time.Timestamp @@ -43,66 +46,68 @@ class CompletionStorageBackendTemplate( limit: Int, )(connection: Connection): Vector[CompletionStreamResponse] = { import ComposableQuery.* - import com.digitalasset.canton.platform.store.backend.Conversions.userIdToStatement import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* - val internedParties = - parties.view.map(stringInterning.party.tryInternalize).flatMap(_.toList).toSet - if (internedParties.isEmpty) { + if (parties.isEmpty) { Vector.empty } else { - val rows = SQL""" - SELECT - submitters, - completion_offset, - record_time, - command_id, - update_id, - rejection_status_code, - rejection_status_message, - rejection_status_details, - user_id, - submission_id, - deduplication_offset, - deduplication_duration_seconds, - deduplication_duration_nanos, - synchronizer_id, - trace_context - FROM - lapi_command_completions - WHERE - ${QueryStrategy.offsetIsBetween( - nonNullableColumn = "completion_offset", - startInclusive = startInclusive, - endInclusive = endInclusive, - )} AND - user_id = $userId - ORDER BY completion_offset ASC - ${QueryStrategy.limitClause(Some(limit))}""" - .asVectorOf(completionParser(internedParties))(connection) - rows.collect { - case (submitters, response) if submitters.exists(internedParties) => response + stringInterning.userId.tryInternalize(userId) match { + case Some(internedUserId) => + val rows = SQL""" + SELECT + submitters, + completion_offset, + record_time, + command_id, + update_id, + rejection_status_code, + rejection_status_message, + rejection_status_details, + user_id, + submission_id, + deduplication_offset, + deduplication_duration_seconds, + deduplication_duration_nanos, + synchronizer_id, + trace_context + FROM + lapi_command_completions + WHERE + ${QueryStrategy.offsetIsBetween( + nonNullableColumn = "completion_offset", + startInclusive = startInclusive, + endInclusive = endInclusive, + )} AND + user_id = $internedUserId + ORDER BY completion_offset ASC + ${QueryStrategy.limitClause(Some(limit))}""" + .asVectorOf(completionParser(parties))(connection) + rows.collect { + case (submitters, response) if submitters.exists(parties) => + response + } + case None => Vector.empty } } } private val sharedColumns: RowParser[ - Array[Int] ~ Offset ~ Timestamp ~ String ~ String ~ Option[String] ~ Int ~ TraceContext + Seq[Party] ~ Offset ~ Timestamp ~ String ~ Int ~ Option[String] ~ Int ~ TraceContext ] = - array[Int]("submitters") ~ + parties(stringInterning)("submitters") ~ offset("completion_offset") ~ timestampFromMicros("record_time") ~ str("command_id") ~ - str("user_id") ~ + int("user_id") ~ str("submission_id").? ~ int("synchronizer_id") ~ traceContextOption("trace_context")(noTracingLogger) private val acceptedCommandSharedColumns: RowParser[ - Array[Int] ~ Offset ~ Timestamp ~ String ~ String ~ Option[ + Seq[Party] ~ Offset ~ Timestamp ~ String ~ Int ~ Option[ String - ] ~ Int ~ TraceContext ~ String + ] ~ Int ~ TraceContext ~ UpdateId ] = - sharedColumns ~ str("update_id") + sharedColumns ~ updateId("update_id") private val deduplicationOffsetColumn: RowParser[Option[Long]] = long("deduplication_offset").? @@ -112,23 +117,20 @@ class CompletionStorageBackendTemplate( int("deduplication_duration_nanos").? private def acceptedCommandParser( - internedParties: Set[Int] - ): RowParser[(Array[Int], CompletionStreamResponse)] = + parties: Set[Party] + ): RowParser[(Seq[Party], CompletionStreamResponse)] = acceptedCommandSharedColumns ~ deduplicationOffsetColumn ~ deduplicationDurationSecondsColumn ~ deduplicationDurationNanosColumn map { - case submitters ~ offset ~ recordTime ~ commandId ~ userId ~ submissionId ~ internedSynchronizerId ~ traceContext ~ updateId ~ + case submitters ~ offset ~ recordTime ~ commandId ~ internedUserId ~ submissionId ~ internedSynchronizerId ~ traceContext ~ updateId ~ deduplicationOffset ~ deduplicationDurationSeconds ~ deduplicationDurationNanos => submitters -> CompletionFromTransaction.acceptedCompletion( - submitters = submitters.iterator - .filter(internedParties) - .map(stringInterning.party.unsafe.externalize) - .toSet, + submitters = submitters.filter(parties).toSet, recordTime = recordTime, offset = offset, commandId = commandId, updateId = updateId, - userId = userId, + userId = stringInterning.userId.unsafe.externalize(internedUserId), optSubmissionId = submissionId, optDeduplicationOffset = deduplicationOffset, optDeduplicationDurationSeconds = deduplicationDurationSeconds, @@ -145,29 +147,26 @@ class CompletionStorageBackendTemplate( byteArray("rejection_status_details").? private def rejectedCommandParser( - internedParties: Set[Int] - ): RowParser[(Array[Int], CompletionStreamResponse)] = + internedParties: Set[Party] + ): RowParser[(Seq[Party], CompletionStreamResponse)] = sharedColumns ~ deduplicationOffsetColumn ~ deduplicationDurationSecondsColumn ~ deduplicationDurationNanosColumn ~ rejectionStatusCodeColumn ~ rejectionStatusMessageColumn ~ rejectionStatusDetailsColumn map { - case submitters ~ offset ~ recordTime ~ commandId ~ userId ~ submissionId ~ internedSynchronizerId ~ traceContext ~ + case submitters ~ offset ~ recordTime ~ commandId ~ internedUserId ~ submissionId ~ internedSynchronizerId ~ traceContext ~ deduplicationOffset ~ deduplicationDurationSeconds ~ deduplicationDurationNanos ~ rejectionStatusCode ~ rejectionStatusMessage ~ rejectionStatusDetails => val status = buildStatusProto(rejectionStatusCode, rejectionStatusMessage, rejectionStatusDetails) submitters -> CompletionFromTransaction.rejectedCompletion( - submitters = submitters.iterator - .filter(internedParties) - .map(stringInterning.party.unsafe.externalize) - .toSet, + submitters = submitters.filter(internedParties).toSet, recordTime = recordTime, offset = offset, commandId = commandId, status = status, - userId = userId, + userId = stringInterning.userId.unsafe.externalize(internedUserId), optSubmissionId = submissionId, optDeduplicationOffset = deduplicationOffset, optDeduplicationDurationSeconds = deduplicationDurationSeconds, @@ -179,24 +178,24 @@ class CompletionStorageBackendTemplate( } private def completionParser( - internedParties: Set[Int] - ): RowParser[(Array[Int], CompletionStreamResponse)] = + internedParties: Set[Party] + ): RowParser[(Seq[Party], CompletionStreamResponse)] = acceptedCommandParser(internedParties) | rejectedCommandParser(internedParties) private val postPublishDataParser: RowParser[Option[PostPublishData]] = int("synchronizer_id") ~ str("message_uuid").? ~ long("record_time") ~ - str("user_id") ~ + int("user_id") ~ str("command_id") ~ - array[Int]("submitters") ~ + parties(stringInterning)("submitters") ~ offset("completion_offset") ~ long("publication_time") ~ str("submission_id").? ~ - str("update_id").? ~ + updateId("update_id").? ~ traceContextOption("trace_context")(noTracingLogger) ~ bool("is_transaction") map { - case internedSynchronizerId ~ messageUuidString ~ recordTimeMicros ~ userId ~ + case internedSynchronizerId ~ messageUuidString ~ recordTimeMicros ~ internedUserId ~ commandId ~ submitters ~ offset ~ publicationTimeMicros ~ submissionId ~ updateIdOpt ~ traceContext ~ true => // note: we only collect completions for transactions here for acceptance and transactions and reassignments for rejection (is_transaction will be true in rejection reassignment case as well) Some( @@ -211,9 +210,9 @@ class CompletionStorageBackendTemplate( CantonTimestamp.ofEpochMicro(recordTimeMicros) ) ), - userId = Ref.UserId.assertFromString(userId), + userId = stringInterning.userId.externalize(internedUserId), commandId = Ref.CommandId.assertFromString(commandId), - actAs = submitters.view.map(stringInterning.party.externalize).toSet, + actAs = submitters.toSet, offset = offset, publicationTime = CantonTimestamp.ofEpochMicro(publicationTimeMicros), submissionId = submissionId.map(Ref.SubmissionId.assertFromString), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ContractStorageBackendTemplate.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ContractStorageBackendTemplate.scala index beed78c455..8b96d066d1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ContractStorageBackendTemplate.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ContractStorageBackendTemplate.scala @@ -3,20 +3,13 @@ package com.digitalasset.canton.platform.store.backend.common -import anorm.SqlParser.{array, byteArray, int} +import anorm.SqlParser.{bool, long} import anorm.{RowParser, ~} -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.platform.store.backend.ContractStorageBackend -import com.digitalasset.canton.platform.store.backend.ContractStorageBackend.{ - RawArchivedContract, - RawCreatedContract, -} -import com.digitalasset.canton.platform.store.backend.Conversions.{ - OffsetToStatement, - contractId, - timestampFromMicros, -} +import com.digitalasset.canton.platform.store.backend.Conversions.contractId import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.`SimpleSql ops` +import com.digitalasset.canton.platform.store.backend.{ContractStorageBackend, PersistentEventType} +import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.{ KeyAssigned, KeyState, @@ -24,143 +17,190 @@ import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReade } import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.platform.{ContractId, Key} +import com.digitalasset.canton.topology.SynchronizerId import java.sql.Connection class ContractStorageBackendTemplate( queryStrategy: QueryStrategy, stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache, ) extends ContractStorageBackend { + /** Batch lookup of key states + * + * If the backend does not support batch lookups, the implementation will fall back to sequential + * lookups + */ + override def keyStatesNew(keys: Seq[Key], validAtEventSeqId: Long)( + connection: Connection + ): Map[Key, Long] = + keys.iterator + .flatMap(key => + keyStateNew(key, validAtEventSeqId)(connection) + .map(key -> _) + ) + .toMap + + /** Sequential lookup of key states */ + override def keyStateNew(key: Key, validAtEventSeqId: Long)( + connection: Connection + ): Option[Long] = { + import com.digitalasset.canton.platform.store.backend.Conversions.HashToStatement + SQL""" + WITH last_contract_key_create AS ( + SELECT lapi_events_activate_contract.* + FROM lapi_events_activate_contract + WHERE create_key_hash = ${key.hash} + AND event_sequential_id <= $validAtEventSeqId + ORDER BY event_sequential_id DESC + FETCH NEXT 1 ROW ONLY + ) + SELECT internal_contract_id + FROM last_contract_key_create + WHERE NOT EXISTS + (SELECT 1 + FROM lapi_events_deactivate_contract + WHERE + internal_contract_id = last_contract_key_create.internal_contract_id + AND event_sequential_id <= $validAtEventSeqId + AND event_type = ${PersistentEventType.ConsumingExercise.asInt} + )""" + .as(long("internal_contract_id").singleOpt)(connection) + } + + override def activeContractsNew(internalContractIds: Seq[Long], beforeEventSeqId: Long)( + connection: Connection + ): Map[Long, Boolean] = + if (internalContractIds.isEmpty) Map.empty + else { + SQL""" + SELECT + internal_contract_id, + NOT EXISTS ( + SELECT 1 + FROM lapi_events_deactivate_contract + WHERE + internal_contract_id = lapi_events_activate_contract.internal_contract_id + AND event_sequential_id <= $beforeEventSeqId + AND event_type = ${PersistentEventType.ConsumingExercise.asInt} + LIMIT 1 + ) active + FROM lapi_events_activate_contract + WHERE + internal_contract_id ${queryStrategy.anyOf(internalContractIds)} + AND event_sequential_id <= $beforeEventSeqId""" + .asVectorOf(long("internal_contract_id") ~ bool("active"))(connection) + .view + .map { case internalContractId ~ active => + internalContractId -> active + } + .toMap + } + + override def lastActivationsNew(synchronizerContracts: Iterable[(SynchronizerId, Long)])( + connection: Connection + ): Map[(SynchronizerId, Long), Long] = + ledgerEndCache() + .map { ledgerEnd => + synchronizerContracts.iterator.flatMap { case (synchronizerId, internalContractId) => + val internedSynchronizerId = stringInterning.synchronizerId.internalize(synchronizerId) + SQL""" + SELECT event_sequential_id + FROM lapi_events_activate_contract as activate + WHERE + internal_contract_id = $internalContractId AND + event_sequential_id <= ${ledgerEnd.lastEventSeqId} AND + EXISTS ( -- subquery for triggering (event_sequential_id) INCLUDE (synchronizer_id) index usage + SELECT 1 + FROM lapi_events_activate_contract as activate2 + WHERE + activate2.event_sequential_id = activate.event_sequential_id AND + activate2.synchronizer_id = $internedSynchronizerId + ) + ORDER BY event_sequential_id DESC + LIMIT 1""" + .as(long("event_sequential_id").singleOpt)(connection) + .map((synchronizerId, internalContractId) -> _) + }.toMap + } + .getOrElse(Map.empty) + override def supportsBatchKeyStateLookups: Boolean = false - override def keyStates(keys: Seq[Key], validAt: Offset)( + override def keyStates(keys: Seq[Key], validAtEventSeqId: Long)( connection: Connection - ): Map[Key, KeyState] = keys.map(key => key -> keyState(key, validAt)(connection)).toMap + ): Map[Key, KeyState] = keys.map(key => key -> keyState(key, validAtEventSeqId)(connection)).toMap - override def keyState(key: Key, validAt: Offset)(connection: Connection): KeyState = { - val resultParser = (contractId("contract_id") ~ array[Int]("flat_event_witnesses")).map { - case cId ~ stakeholders => - KeyAssigned(cId, stakeholders.view.map(stringInterning.party.externalize).toSet) - }.singleOpt + override def keyState(key: Key, validAtEventSeqId: Long)(connection: Connection): KeyState = { + val resultParser = contractId("contract_id").map(KeyAssigned.apply).singleOpt import com.digitalasset.canton.platform.store.backend.Conversions.HashToStatement SQL""" WITH last_contract_key_create AS ( SELECT lapi_events_create.* FROM lapi_events_create WHERE create_key_hash = ${key.hash} - AND event_offset <= $validAt - AND cardinality(flat_event_witnesses) > 0 -- exclude participant divulgence and transients + AND event_sequential_id <= $validAtEventSeqId + AND length(flat_event_witnesses) > 1 -- exclude participant divulgence and transients ORDER BY event_sequential_id DESC FETCH NEXT 1 ROW ONLY ) - SELECT contract_id, flat_event_witnesses + SELECT contract_id FROM last_contract_key_create WHERE NOT EXISTS (SELECT 1 FROM lapi_events_consuming_exercise WHERE contract_id = last_contract_key_create.contract_id - AND event_offset <= $validAt + AND event_sequential_id <= $validAtEventSeqId )""" .as(resultParser)(connection) .getOrElse(KeyUnassigned) } - private val archivedContractRowParser: RowParser[(ContractId, RawArchivedContract)] = - (contractId("contract_id") ~ array[Int]("flat_event_witnesses")) - .map { case coid ~ flatEventWitnesses => - coid -> RawArchivedContract( - flatEventWitnesses = flatEventWitnesses.view - .map(stringInterning.party.externalize) - .toSet - ) - } + private val contractIdRowParser: RowParser[ContractId] = + contractId("contract_id") - override def archivedContracts(contractIds: Seq[ContractId], before: Offset)( + override def archivedContracts(contractIds: Seq[ContractId], beforeEventSeqId: Long)( connection: Connection - ): Map[ContractId, RawArchivedContract] = - if (contractIds.isEmpty) Map.empty + ): Set[ContractId] = + if (contractIds.isEmpty) Set.empty else { SQL""" - SELECT contract_id, flat_event_witnesses + SELECT contract_id FROM lapi_events_consuming_exercise WHERE contract_id ${queryStrategy.anyOfBinary(contractIds.map(_.toBytes.toByteArray))} - AND event_offset <= $before - AND cardinality(flat_event_witnesses) > 0 -- exclude participant divulgence and transients""" - .as(archivedContractRowParser.*)(connection) - .toMap + AND event_sequential_id <= $beforeEventSeqId + AND length(flat_event_witnesses) > 1 -- exclude participant divulgence and transients""" + .as(contractIdRowParser.*)(connection) + .toSet } - private val rawCreatedContractRowParser - : RowParser[(ContractId, ContractStorageBackend.RawCreatedContract)] = - (contractId("contract_id") - ~ int("template_id") - ~ int("package_id") - ~ array[Int]("flat_event_witnesses") - ~ byteArray("create_argument") - ~ int("create_argument_compression").? - ~ timestampFromMicros("ledger_effective_time") - ~ array[Int]("create_signatories") - ~ byteArray("create_key_value").? - ~ int("create_key_value_compression").? - ~ array[Int]("create_key_maintainers").? - ~ byteArray("authentication_data")) - .map { - case coid ~ internedTemplateId ~ internedPackageId ~ flatEventWitnesses ~ createArgument ~ createArgumentCompression ~ ledgerEffectiveTime ~ signatories ~ createKey ~ createKeyCompression ~ keyMaintainers ~ authenticationData => - coid -> RawCreatedContract( - templateId = stringInterning.templateId.unsafe.externalize(internedTemplateId), - packageId = stringInterning.packageId.unsafe.externalize(internedPackageId), - flatEventWitnesses = - flatEventWitnesses.view.map(stringInterning.party.externalize).toSet, - createArgument = createArgument, - createArgumentCompression = createArgumentCompression, - ledgerEffectiveTime = ledgerEffectiveTime, - signatories = signatories.view.map(i => stringInterning.party.externalize(i)).toSet, - createKey = createKey, - createKeyCompression = createKeyCompression, - keyMaintainers = - keyMaintainers.map(_.view.map(i => stringInterning.party.externalize(i)).toSet), - authenticationData = authenticationData, - ) - } - - override def createdContracts(contractIds: Seq[ContractId], before: Offset)( + override def createdContracts(contractIds: Seq[ContractId], beforeEventSeqId: Long)( connection: Connection - ): Map[ContractId, RawCreatedContract] = - if (contractIds.isEmpty) Map.empty + ): Set[ContractId] = + if (contractIds.isEmpty) Set.empty else { SQL""" SELECT - contract_id, - template_id, - package_id, - flat_event_witnesses, - create_argument, - create_argument_compression, - ledger_effective_time, - create_signatories, - create_key_value, - create_key_value_compression, - create_key_maintainers, - authentication_data + contract_id FROM lapi_events_create WHERE contract_id ${queryStrategy.anyOfBinary(contractIds.map(_.toBytes.toByteArray))} - AND event_offset <= $before - AND cardinality(flat_event_witnesses) > 0 -- exclude participant divulgence and transients""" - .as(rawCreatedContractRowParser.*)(connection) - .toMap + AND event_sequential_id <= $beforeEventSeqId + AND length(flat_event_witnesses) > 1 -- exclude participant divulgence and transients""" + .as(contractIdRowParser.*)(connection) + .toSet } override def assignedContracts( contractIds: Seq[ContractId], - before: Offset, + beforeEventSeqId: Long, )( connection: Connection - ): Map[ContractId, RawCreatedContract] = - if (contractIds.isEmpty) Map.empty + ): Set[ContractId] = + if (contractIds.isEmpty) Set.empty else { SQL""" WITH min_event_sequential_ids_of_assign AS ( @@ -168,26 +208,49 @@ class ContractStorageBackendTemplate( FROM lapi_events_assign WHERE contract_id ${queryStrategy.anyOfBinary(contractIds.map(_.toBytes.toByteArray))} - AND event_offset <= $before + AND event_sequential_id <= $beforeEventSeqId GROUP BY contract_id ) SELECT - contract_id, - template_id, - package_id, - flat_event_witnesses, - create_argument, - create_argument_compression, - ledger_effective_time, - create_signatories, - create_key_value, - create_key_value_compression, - create_key_maintainers, - authentication_data + contract_id FROM lapi_events_assign, min_event_sequential_ids_of_assign WHERE event_sequential_id = min_event_sequential_ids_of_assign.min_event_sequential_id""" - .as(rawCreatedContractRowParser.*)(connection) - .toMap + .as(contractIdRowParser.*)(connection) + .toSet + } + + override def lastActivations( + synchronizerContracts: Iterable[(SynchronizerId, ContractId)] + )( + connection: Connection + ): Map[(SynchronizerId, ContractId), Long] = ledgerEndCache() + .map { ledgerEnd => + synchronizerContracts.iterator.flatMap { case (synchronizerId, contractId) => + val internedSynchronizerId = stringInterning.synchronizerId.internalize(synchronizerId) + val createEventSeqId = SQL""" + SELECT event_sequential_id + FROM lapi_events_create + WHERE + contract_id = ${contractId.toBytes.toByteArray} AND + synchronizer_id = $internedSynchronizerId AND + event_sequential_id <= ${ledgerEnd.lastEventSeqId} + -- not checking here the fact of activation (flat_event_witnesses) because it is invalid to have non-divulged deactivation for non-divulged create. Transients won't be searched for in the first place. + LIMIT 1""" + .as(long("event_sequential_id").singleOpt)(connection) + val assignEventSeqId = SQL""" + SELECT event_sequential_id + FROM lapi_events_assign + WHERE + contract_id = ${contractId.toBytes.toByteArray} AND + target_synchronizer_id = $internedSynchronizerId AND + event_sequential_id <= ${ledgerEnd.lastEventSeqId} + ORDER BY event_sequential_id DESC + LIMIT 1""" + .as(long("event_sequential_id").singleOpt)(connection) + List(createEventSeqId, assignEventSeqId).flatten.maxOption + .map((synchronizerId, contractId) -> _) + }.toMap } + .getOrElse(Map.empty) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala index ae8c1a5ed5..a720f0278c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala @@ -6,11 +6,17 @@ package com.digitalasset.canton.platform.store.backend.common import anorm.RowParser import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, + RawAcsDeltaEventLegacy, RawArchivedEvent, - RawCreatedEvent, - RawFlatEvent, + RawArchivedEventLegacy, + RawCreatedEventLegacy, + RawThinCreatedEvent, +} +import com.digitalasset.canton.platform.store.backend.PersistentEventType +import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ + CompositeSql, + SqlStringInterpolation, } -import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.daml.lf.data.Ref.Party @@ -27,12 +33,117 @@ class EventReaderQueries(stringInterning: StringInterning) { case class SelectTable(tableName: String, selectColumns: String) def fetchContractIdEvents( + internalContractId: Long, + requestingParties: Option[Set[Party]], + endEventSequentialId: EventSequentialId, + )( + connection: Connection + ): (Option[RawThinCreatedEvent], Option[RawArchivedEvent]) = { + def queryByInternalContractId( + tableName: String, + eventType: PersistentEventType, + ascending: Boolean, + )(columns: CompositeSql) = + SQL""" + SELECT $columns + FROM #$tableName + WHERE + internal_contract_id = $internalContractId + AND event_sequential_id <= $endEventSequentialId + AND event_type = ${eventType.asInt} + ORDER BY event_sequential_id #${if (ascending) "ASC" else "DESC"} + LIMIT 1 + """ + + def lookupActivateCreated: Option[RawThinCreatedEvent] = + RowDefs + .rawThinCreatedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + witnessIsAcsDelta = true, + eventIsAcsDelta = true, + ) + .queryMultipleRows( + queryByInternalContractId( + tableName = "lapi_events_activate_contract", + eventType = PersistentEventType.Create, + ascending = true, + ) + )(connection) + .headOption + + def lookupDeactivateArchived: Option[RawArchivedEvent] = + RowDefs + .rawArchivedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + acsDelta = true, + ) + .queryMultipleRows( + queryByInternalContractId( + tableName = "lapi_events_deactivate_contract", + eventType = PersistentEventType.ConsumingExercise, + ascending = false, + ) + )(connection) + .headOption + + def lookupWitnessedCreated: Option[RawThinCreatedEvent] = + RowDefs + .rawThinCreatedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + witnessIsAcsDelta = false, + eventIsAcsDelta = false, + ) + .queryMultipleRows( + queryByInternalContractId( + tableName = "lapi_events_various_witnessed", + eventType = PersistentEventType.WitnessedCreate, + ascending = true, + ) + )(connection) + .headOption + + def lookupTransienArchived(createOffset: Long): Option[RawArchivedEvent] = + RowDefs + .rawArchivedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + acsDelta = false, + ) + .queryMultipleRows(columns => SQL""" + SELECT $columns + FROM lapi_events_various_witnessed + WHERE + internal_contract_id = $internalContractId + AND event_sequential_id <= $endEventSequentialId + AND event_type = ${PersistentEventType.WitnessedConsumingExercise.asInt} + AND event_offset = $createOffset + ORDER BY event_sequential_id + LIMIT 1 + """)(connection) + .headOption + + lookupActivateCreated + .map(create => Some(create) -> lookupDeactivateArchived) + .orElse( + lookupWitnessedCreated.flatMap(create => + lookupTransienArchived( + create.transactionProperties.commonEventProperties.offset + ).map(transientArchive => Some(create) -> Some(transientArchive)) + ) + ) + .getOrElse(None -> None) + } + + def fetchContractIdEventsLegacy( contractId: ContractId, requestingParties: Option[Set[Party]], endEventSequentialId: EventSequentialId, )( connection: Connection - ): Vector[Entry[RawFlatEvent]] = { + ): Vector[Entry[RawAcsDeltaEventLegacy]] = { val witnessesColumn = "flat_event_witnesses" @@ -87,18 +198,18 @@ class EventReaderQueries(stringInterning: StringInterning) { private def selectLatestKeyCreateEvent( keyHash: String, - intRequestingParties: Set[Int], + requestingParties: Set[Party], extRequestingParties: Set[String], lastExclusiveSeqId: EventSequentialId, maxIterations: Int, )( conn: Connection - ): (Option[Entry[RawCreatedEvent]], Option[EventSequentialId]) = { + ): (Option[Entry[RawCreatedEventLegacy]], Option[EventSequentialId]) = { @tailrec def go( endExclusiveSeqId: EventSequentialId, iterations: Int, - ): (Option[Entry[RawCreatedEvent]], Option[EventSequentialId]) = { + ): (Option[Entry[RawCreatedEventLegacy]], Option[EventSequentialId]) = { val query = SQL""" WITH max_event AS ( @@ -113,7 +224,7 @@ class EventReaderQueries(stringInterning: StringInterning) { FROM max_event JOIN lapi_events_create c on c.event_sequential_id = max_event.sequential_id """ - query.as(createdEventParser(Some(intRequestingParties), stringInterning).singleOpt)( + query.as(createdEventParser(Some(requestingParties), stringInterning).singleOpt)( conn ) match { case Some(c) if c.event.witnessParties.exists(extRequestingParties) => @@ -126,9 +237,9 @@ class EventReaderQueries(stringInterning: StringInterning) { go(lastExclusiveSeqId, 1) } - private def selectArchivedEvent(contractId: Array[Byte], intRequestingParties: Set[Int])( + private def selectArchivedEvent(contractId: Array[Byte], requestingParties: Set[Party])( conn: Connection - ): Option[Entry[RawArchivedEvent]] = { + ): Option[Entry[RawArchivedEventLegacy]] = { val query = SQL""" SELECT #$selectColumnsForFlatTransactionsExercise, @@ -138,7 +249,7 @@ class EventReaderQueries(stringInterning: StringInterning) { FROM lapi_events_consuming_exercise WHERE contract_id = $contractId """ - query.as(archivedEventParser(Some(intRequestingParties), stringInterning).singleOpt)(conn) + query.as(archivedEventParser(Some(requestingParties), stringInterning).singleOpt)(conn) } def fetchNextKeyEvents( @@ -148,21 +259,18 @@ class EventReaderQueries(stringInterning: StringInterning) { maxIterations: Int, )( conn: Connection - ): (Option[RawCreatedEvent], Option[RawArchivedEvent], Option[EventSequentialId]) = { - - val intRequestingParties = - requestingParties.iterator.map(stringInterning.party.tryInternalize).flatMap(_.iterator).toSet + ): (Option[RawCreatedEventLegacy], Option[RawArchivedEventLegacy], Option[EventSequentialId]) = { val (createEvent, continuationToken) = selectLatestKeyCreateEvent( keyHash, - intRequestingParties, + requestingParties, requestingParties.map(identity), endExclusiveSeqId, maxIterations, )(conn) val archivedEvent = createEvent.flatMap(c => - selectArchivedEvent(c.event.contractId.toBytes.toByteArray, intRequestingParties)(conn) + selectArchivedEvent(c.event.contractId.toBytes.toByteArray, requestingParties)(conn) ) (createEvent.map(_.event), archivedEvent.map(_.event), continuationToken) @@ -170,14 +278,9 @@ class EventReaderQueries(stringInterning: StringInterning) { private def eventParser( requestingParties: Option[Set[Party]] - ): RowParser[Entry[RawFlatEvent]] = + ): RowParser[Entry[RawAcsDeltaEventLegacy]] = rawAcsDeltaEventParser( - requestingParties.map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ), + requestingParties, stringInterning, ) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala index dd8ed1af42..8d7efec7ed 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala @@ -7,44 +7,44 @@ import anorm.SqlParser.* import anorm.{Row, RowParser, SimpleSql, ~} import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.platform.Party import com.digitalasset.canton.platform.store.backend.Conversions.{ authorizationEventParser, contractId, hashFromHexString, offset, + parties, timestampFromMicros, + updateId, } -import com.digitalasset.canton.platform.store.backend.EventStorageBackend -import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ - Entry, - RawActiveContract, - RawArchivedEvent, - RawAssignEvent, - RawCreatedEvent, - RawExercisedEvent, - RawFlatEvent, - RawReassignmentEvent, - RawTreeEvent, - RawUnassignEvent, - SequentialIdBatch, - SynchronizerOffset, - UnassignProperties, -} +import com.digitalasset.canton.platform.store.backend.EventStorageBackend.* +import com.digitalasset.canton.platform.store.backend.RowDef.* import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ CompositeSql, SqlStringInterpolation, } import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* +import com.digitalasset.canton.platform.store.backend.{ + EventStorageBackend, + PersistentEventType, + RowDef, +} import com.digitalasset.canton.platform.store.cache.LedgerEndCache +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import com.digitalasset.canton.platform.store.interning.StringInterning +import com.digitalasset.canton.platform.{ContractId, Party} +import com.digitalasset.canton.protocol.{ReassignmentId, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, NameTypeConRefConverter} +import com.digitalasset.daml.lf.data.Ref.{ + ChoiceName, + FullIdentifier, + Identifier, + NameTypeConRef, + NameTypeConRefConverter, +} import com.digitalasset.daml.lf.data.Time.Timestamp -import com.digitalasset.daml.lf.value.Value.ContractId import java.sql.Connection import scala.util.Using @@ -53,6 +53,274 @@ object EventStorageBackendTemplate { private val MaxBatchSizeOfIncompleteReassignmentOffsetTempTablePopulation: Int = 500 + object RowDefs { + // update related + val workflowId: RowDef[Option[String]] = + column("workflow_id", str(_).?) + def genSynchronizerId(columnName: String)(stringInterning: StringInterning): RowDef[String] = + column(columnName, int(_).map(stringInterning.synchronizerId.unsafe.externalize)) + def synchronizerId(stringInterning: StringInterning): RowDef[String] = + genSynchronizerId("synchronizer_id")(stringInterning) + def sourceSynchronizerId(stringInterning: StringInterning): RowDef[String] = + genSynchronizerId("source_synchronizer_id")(stringInterning) + def targetSynchronizerId(stringInterning: StringInterning): RowDef[String] = + genSynchronizerId("target_synchronizer_id")(stringInterning) + val eventOffset: RowDef[Long] = + column("event_offset", long) + val updateIdDef: RowDef[String] = + column("update_id", updateId(_).map(_.toHexString)) + def commandId( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[Option[String]] = + combine( + column("command_id", str(_).?), + column("submitters", parties(stringInterning)(_).?), + )(filteredCommandId(_, _, allQueryingPartiesO)) + val traceContext: RowDef[Array[Byte]] = + column("trace_context", byteArray(_)) + val recordTime: RowDef[Timestamp] = + column("record_time", timestampFromMicros) + val externalTransactionHash: RowDef[Option[Array[Byte]]] = + column("external_transaction_hash", byteArray(_).?) + + // event related + val nodeId: RowDef[Int] = + column("node_id", int) + val eventSequentialId: RowDef[Long] = + column("event_sequential_id", long) + def filteredAdditionalWitnesses( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + )(witnessIsAcsDelta: Boolean): RowDef[Set[String]] = + if (witnessIsAcsDelta) + static(Set.empty) + else + column("additional_witnesses", parties(stringInterning)(_)) + .map(filterWitnesses(allQueryingPartiesO, _)) + val eventType: RowDef[PersistentEventType] = + column("event_type", int(_).map(PersistentEventType.fromInt)) + + // contract related + def representativePackageId(stringInterning: StringInterning): RowDef[String] = + column("representative_package_id", int(_).map(stringInterning.packageId.unsafe.externalize)) + val contractIdDef: RowDef[ContractId] = + column("contract_id", contractId) + val internalContractId: RowDef[Long] = + column("internal_contract_id", long) + val reassignmentCounter: RowDef[Long] = + column("reassignment_counter", long(_).?.map(_.getOrElse(0L))) + val ledgerEffectiveTime: RowDef[Timestamp] = + column("ledger_effective_time", timestampFromMicros) + def templateId(stringInterning: StringInterning): RowDef[FullIdentifier] = + combine( + column("template_id", int(_).map(stringInterning.templateId.externalize)), + column("package_id", int(_).map(stringInterning.packageId.externalize)), + )(_ toFullIdentifier _) + def filteredStakeholderParties( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[Set[String]] = + // stakeholders are not present in various_witnessed, but exercises and transient/divulged contracts retrieved from there + column("stakeholders", parties(stringInterning)(_).?) + .map(_.getOrElse(Seq.empty)) + .map(filterWitnesses(allQueryingPartiesO, _)) + + // reassignment related + val reassignmentId: RowDef[String] = + column( + "reassignment_id", + byteArray(_).map(ReassignmentId.assertFromBytes(_).toProtoPrimitive), + ) + def submitter(stringInterning: StringInterning): RowDef[Option[String]] = + column("submitters", parties(stringInterning)(_).?.map(_.getOrElse(Seq.empty).headOption)) + val assignmentExclusivity: RowDef[Option[Timestamp]] = + column("assignment_exclusivity", timestampFromMicros(_).?) + + // exercise related + val consuming: RowDef[Boolean] = + column("consuming", bool(_)) + def exerciseChoice(stringInterning: StringInterning): RowDef[ChoiceName] = + column("exercise_choice", int(_).map(stringInterning.choiceName.externalize)) + def exerciseChoiceInterface(stringInterning: StringInterning): RowDef[Option[Identifier]] = + column( + "exercise_choice_interface", + int(_).?.map(_.map(stringInterning.interfaceId.externalize)), + ) + val exerciseArgument: RowDef[Array[Byte]] = + column("exercise_argument", byteArray(_)) + val exerciseArgumentCompression: RowDef[Option[Int]] = + column("exercise_argument_compression", int(_).?) + val exerciseResult: RowDef[Option[Array[Byte]]] = + column("exercise_result", byteArray(_).?) + val exerciseResultCompression: RowDef[Option[Int]] = + column("exercise_result_compression", int(_).?) + def exerciseActors(stringInterning: StringInterning): RowDef[Set[String]] = + column("exercise_actors", parties(stringInterning)(_).map(_.map(_.toString).toSet)) + val exerciseLastDescendantNodeId: RowDef[Int] = + column("exercise_last_descendant_node_id", int) + + // properties + def commonEventPropertiesParser( + stringInterning: StringInterning + ): RowDef[CommonEventProperties] = + combine( + eventSequentialId, + eventOffset, + nodeId, + workflowId, + synchronizerId(stringInterning), + )(CommonEventProperties.apply) + + def commonUpdatePropertiesParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[CommonUpdateProperties] = + combine( + updateIdDef, + commandId(stringInterning, allQueryingPartiesO), + traceContext, + recordTime, + )(CommonUpdateProperties.apply) + + def transactionPropertiesParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[TransactionProperties] = + combine( + commonEventPropertiesParser(stringInterning), + commonUpdatePropertiesParser(stringInterning, allQueryingPartiesO), + externalTransactionHash, + )(TransactionProperties.apply) + + def reassignmentPropertiesParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[ReassignmentProperties] = + combine( + commonEventPropertiesParser(stringInterning), + commonUpdatePropertiesParser(stringInterning, allQueryingPartiesO), + reassignmentId, + submitter(stringInterning), + reassignmentCounter, + )(ReassignmentProperties.apply) + + def thinCreatedEventPropertiesParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + witnessIsAcsDelta: Boolean, + eventIsAcsDelta: Boolean, + ): RowDef[ThinCreatedEventProperties] = + combine( + representativePackageId(stringInterning), + filteredAdditionalWitnesses(stringInterning, allQueryingPartiesO)(witnessIsAcsDelta), + internalContractId, + static(allQueryingPartiesO.map(_.map(_.toString))), + if (eventIsAcsDelta) reassignmentCounter else static(0L), + static(eventIsAcsDelta), + )(ThinCreatedEventProperties.apply) + + // raws + def rawThinActiveContractParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[RawThinActiveContract] = + combine( + commonEventPropertiesParser(stringInterning), + thinCreatedEventPropertiesParser( + stringInterning = stringInterning, + allQueryingPartiesO = allQueryingPartiesO, + witnessIsAcsDelta = true, + eventIsAcsDelta = true, + ), + )(RawThinActiveContract.apply) + + def rawThinCreatedEventParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + witnessIsAcsDelta: Boolean, + eventIsAcsDelta: Boolean, + ): RowDef[RawThinCreatedEvent] = + combine( + transactionPropertiesParser(stringInterning, allQueryingPartiesO), + thinCreatedEventPropertiesParser( + stringInterning = stringInterning, + allQueryingPartiesO = allQueryingPartiesO, + witnessIsAcsDelta = witnessIsAcsDelta, + eventIsAcsDelta = eventIsAcsDelta, + ), + )(RawThinCreatedEvent.apply) + + def rawThinAssignEventParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[RawThinAssignEvent] = + combine( + reassignmentPropertiesParser(stringInterning, allQueryingPartiesO), + thinCreatedEventPropertiesParser( + stringInterning = stringInterning, + allQueryingPartiesO = allQueryingPartiesO, + witnessIsAcsDelta = true, + eventIsAcsDelta = true, + ), + sourceSynchronizerId(stringInterning), + )(RawThinAssignEvent.apply) + + def rawArchivedEventParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + acsDelta: Boolean, + ): RowDef[RawArchivedEvent] = + combine( + transactionPropertiesParser(stringInterning, allQueryingPartiesO), + contractIdDef, + templateId(stringInterning), + if (acsDelta) filteredStakeholderParties(stringInterning, allQueryingPartiesO) + else static(Set.empty[String]), + ledgerEffectiveTime, + )(RawArchivedEvent.apply) + + def rawExercisedEventParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + eventIsAcsDelta: Boolean, + ): RowDef[RawExercisedEvent] = + combine( + transactionPropertiesParser(stringInterning, allQueryingPartiesO), + contractIdDef, + templateId(stringInterning), + if (eventIsAcsDelta) static(true) else consuming, + exerciseChoice(stringInterning), + exerciseChoiceInterface(stringInterning), + exerciseArgument, + exerciseArgumentCompression, + exerciseResult, + exerciseResultCompression, + exerciseActors(stringInterning), + exerciseLastDescendantNodeId, + filteredAdditionalWitnesses(stringInterning, allQueryingPartiesO)(witnessIsAcsDelta = + false + ), + if (eventIsAcsDelta) filteredStakeholderParties(stringInterning, allQueryingPartiesO) + else static(Set.empty[String]), + ledgerEffectiveTime, + static(eventIsAcsDelta), + )(RawExercisedEvent.apply) + + def rawUnassignEventParser( + stringInterning: StringInterning, + allQueryingPartiesO: Option[Set[Party]], + ): RowDef[RawUnassignEvent] = + combine( + reassignmentPropertiesParser(stringInterning, allQueryingPartiesO), + contractIdDef, + templateId(stringInterning), + filteredStakeholderParties(stringInterning, allQueryingPartiesO), + assignmentExclusivity, + targetSynchronizerId(stringInterning), + )(RawUnassignEvent.apply) + } + private val baseColumnsForFlatTransactionsCreate = Seq( "event_offset", @@ -64,6 +332,7 @@ object EventStorageBackendTemplate { "contract_id", "template_id", "package_id", + "representative_package_id", "create_argument", "create_argument_compression", "create_signatories", @@ -79,6 +348,7 @@ object EventStorageBackendTemplate { "record_time", "external_transaction_hash", "flat_event_witnesses", + "internal_contract_id", ) private val baseColumnsForFlatTransactionsExercise = @@ -92,6 +362,7 @@ object EventStorageBackendTemplate { "contract_id", "template_id", "package_id", + "NULL as representative_package_id", "NULL as create_argument", "NULL as create_argument_compression", "NULL as create_signatories", @@ -107,6 +378,7 @@ object EventStorageBackendTemplate { "record_time", "external_transaction_hash", "flat_event_witnesses", + "NULL as internal_contract_id", ) val selectColumnsForFlatTransactionsCreate: String = @@ -116,13 +388,13 @@ object EventStorageBackendTemplate { baseColumnsForFlatTransactionsExercise.mkString(", ") private type SharedRow = - Long ~ String ~ Int ~ Long ~ ContractId ~ Timestamp ~ Int ~ Int ~ Option[String] ~ - Option[String] ~ Array[Int] ~ Option[Array[Int]] ~ Int ~ Option[Array[Byte]] ~ Timestamp ~ + Long ~ UpdateId ~ Int ~ Long ~ ContractId ~ Timestamp ~ Int ~ Int ~ Option[String] ~ + Option[String] ~ Seq[Party] ~ Option[Seq[Party]] ~ Int ~ Option[Array[Byte]] ~ Timestamp ~ Option[Array[Byte]] - private val sharedRow: RowParser[SharedRow] = + private def sharedRow(stringInterning: StringInterning): RowParser[SharedRow] = long("event_offset") ~ - str("update_id") ~ + updateId("update_id") ~ int("node_id") ~ long("event_sequential_id") ~ contractId("contract_id") ~ @@ -131,58 +403,62 @@ object EventStorageBackendTemplate { int("package_id") ~ str("command_id").? ~ str("workflow_id").? ~ - array[Int]("event_witnesses") ~ - array[Int]("submitters").? ~ + parties(stringInterning)("event_witnesses") ~ + parties(stringInterning)("submitters").? ~ int("synchronizer_id") ~ byteArray("trace_context").? ~ timestampFromMicros("record_time") ~ byteArray("external_transaction_hash").? private type CreatedEventRow = - SharedRow ~ Array[Byte] ~ Option[Int] ~ Array[Int] ~ Array[Int] ~ - Option[Array[Byte]] ~ Option[Hash] ~ Option[Int] ~ Option[Array[Int]] ~ - Array[Byte] ~ Array[Int] + SharedRow ~ Array[Byte] ~ Option[Int] ~ Seq[Party] ~ Seq[Party] ~ + Option[Array[Byte]] ~ Option[Hash] ~ Option[Int] ~ Option[Seq[Party]] ~ + Array[Byte] ~ Seq[Party] ~ Int ~ Long - private val createdEventRow: RowParser[CreatedEventRow] = - sharedRow ~ + private def createdEventRow(stringInterning: StringInterning): RowParser[CreatedEventRow] = + sharedRow(stringInterning) ~ byteArray("create_argument") ~ int("create_argument_compression").? ~ - array[Int]("create_signatories") ~ - array[Int]("create_observers") ~ + parties(stringInterning)("create_signatories") ~ + parties(stringInterning)("create_observers") ~ byteArray("create_key_value").? ~ hashFromHexString("create_key_hash").? ~ int("create_key_value_compression").? ~ - array[Int]("create_key_maintainers").? ~ + parties(stringInterning)("create_key_maintainers").? ~ byteArray("authentication_data") ~ - array[Int]("flat_event_witnesses") + parties(stringInterning)("flat_event_witnesses") ~ + int("representative_package_id") ~ + long("internal_contract_id") private type ExercisedEventRow = - SharedRow ~ Boolean ~ String ~ Array[Byte] ~ Option[Int] ~ Option[Array[Byte]] ~ Option[Int] ~ - Array[Int] ~ Int ~ Option[Array[Int]] + SharedRow ~ Boolean ~ Int ~ Option[Int] ~ Array[Byte] ~ Option[Int] ~ + Option[Array[Byte]] ~ Option[Int] ~ Seq[Party] ~ Int ~ Option[Seq[Party]] - private val exercisedEventRow: RowParser[ExercisedEventRow] = { + private def exercisedEventRow(stringInterning: StringInterning): RowParser[ExercisedEventRow] = { import com.digitalasset.canton.platform.store.backend.Conversions.bigDecimalColumnToBoolean - sharedRow ~ + sharedRow(stringInterning) ~ bool("exercise_consuming") ~ - str("exercise_choice") ~ + int("exercise_choice") ~ + int("exercise_choice_interface").? ~ byteArray("exercise_argument") ~ int("exercise_argument_compression").? ~ byteArray("exercise_result").? ~ int("exercise_result_compression").? ~ - array[Int]("exercise_actors") ~ + parties(stringInterning)("exercise_actors") ~ int("exercise_last_descendant_node_id") ~ - array[Int]("flat_event_witnesses").? + parties(stringInterning)("flat_event_witnesses").? } private type ArchiveEventRow = SharedRow - private val archivedEventRow: RowParser[ArchiveEventRow] = sharedRow + private def archivedEventRow(stringInterning: StringInterning): RowParser[ArchiveEventRow] = + sharedRow(stringInterning) private[common] def createdEventParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawCreatedEvent]] = - createdEventRow map { + ): RowParser[Entry[RawCreatedEventLegacy]] = + createdEventRow(stringInterning) map { case offset ~ updateId ~ nodeId ~ @@ -193,7 +469,6 @@ object EventStorageBackendTemplate { packageId ~ commandId ~ workflowId ~ - eventWitnesses ~ submitters ~ internedSynchronizerId ~ @@ -209,45 +484,43 @@ object EventStorageBackendTemplate { createKeyValueCompression ~ createKeyMaintainers ~ authenticationData ~ - flatEventWitnesses => + flatEventWitnesses ~ + representativePackageId ~ + internalContractId => Entry( offset = offset, - updateId = updateId, + nodeId = nodeId, + updateId = updateId.toHexString, eventSequentialId = eventSequentialId, - ledgerEffectiveTime = ledgerEffectiveTime, + ledgerEffectiveTime = Some(ledgerEffectiveTime), commandId = filteredCommandId(commandId, submitters, allQueryingPartiesO), workflowId = workflowId, - event = RawCreatedEvent( - updateId = updateId, - offset = offset, - nodeId = nodeId, + event = RawCreatedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), - witnessParties = filterAndExternalizeWitnesses( + representativePackageId = + stringInterning.packageId.externalize(representativePackageId), + witnessParties = filterWitnesses( allQueryingPartiesO, eventWitnesses, - stringInterning, ), - flatEventWitnesses = filterAndExternalizeWitnesses( + flatEventWitnesses = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ), - signatories = - createSignatories.view.map(stringInterning.party.unsafe.externalize).toSet, - observers = createObservers.view.map(stringInterning.party.unsafe.externalize).toSet, + signatories = createSignatories.toSet, + observers = createObservers.toSet, createArgument = createArgument, createArgumentCompression = createArgumentCompression, createKeyValue = createKeyValue, createKeyValueCompression = createKeyValueCompression, - createKeyMaintainers = createKeyMaintainers - .map(_.map(stringInterning.party.unsafe.externalize).toSet) - .getOrElse(Set.empty), + createKeyMaintainers = createKeyMaintainers.map(_.toSet[String]).getOrElse(Set.empty), ledgerEffectiveTime = ledgerEffectiveTime, createKeyHash = createKeyHash, authenticationData = authenticationData, + internalContractId = internalContractId, ), synchronizerId = stringInterning.synchronizerId.unsafe.externalize(internedSynchronizerId), @@ -258,10 +531,10 @@ object EventStorageBackendTemplate { } private[common] def archivedEventParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawArchivedEvent]] = - archivedEventRow map { + ): RowParser[Entry[RawArchivedEventLegacy]] = + archivedEventRow(stringInterning) map { case eventOffset ~ updateId ~ nodeId ~ @@ -280,45 +553,48 @@ object EventStorageBackendTemplate { externalTransactionHash => Entry( offset = eventOffset, - updateId = updateId, + nodeId = nodeId, + updateId = updateId.toHexString, eventSequentialId = eventSequentialId, - ledgerEffectiveTime = ledgerEffectiveTime, - commandId = filteredCommandId(commandId, submitters, allQueryingPartiesO), + ledgerEffectiveTime = Some(ledgerEffectiveTime), + commandId = filteredCommandId( + commandId, + submitters, + allQueryingPartiesO, + ), workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(internedSynchronizerId), traceContext = traceContext, recordTime = recordTime, externalTransactionHash = externalTransactionHash, - event = RawArchivedEvent( - updateId = updateId, - offset = eventOffset, - nodeId = nodeId, + event = RawArchivedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), - witnessParties = filterAndExternalizeWitnesses( + witnessParties = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ), ), ) } def rawAcsDeltaEventParser( - allQueryingParties: Option[Set[Int]], + allQueryingParties: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawFlatEvent]] = - (createdEventParser(allQueryingParties, stringInterning): RowParser[Entry[RawFlatEvent]]) | + ): RowParser[Entry[RawAcsDeltaEventLegacy]] = + (createdEventParser(allQueryingParties, stringInterning): RowParser[ + Entry[RawAcsDeltaEventLegacy] + ]) | archivedEventParser(allQueryingParties, stringInterning) private def exercisedEventParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawExercisedEvent]] = - exercisedEventRow map { + ): RowParser[Entry[RawExercisedEventLegacy]] = + exercisedEventRow(stringInterning) map { case eventOffset ~ updateId ~ nodeId ~ @@ -336,7 +612,8 @@ object EventStorageBackendTemplate { recordTime ~ externalTransactionHash ~ exerciseConsuming ~ - choice ~ + choiceName ~ + choiceInterface ~ exerciseArgument ~ exerciseArgumentCompression ~ exerciseResult ~ @@ -346,52 +623,56 @@ object EventStorageBackendTemplate { flatEventWitnesses => Entry( offset = eventOffset, - updateId = updateId, + nodeId = nodeId, + updateId = updateId.toHexString, eventSequentialId = eventSequentialId, - ledgerEffectiveTime = ledgerEffectiveTime, - commandId = filteredCommandId(commandId, submitters, allQueryingPartiesO), + ledgerEffectiveTime = Some(ledgerEffectiveTime), + commandId = filteredCommandId( + commandId, + submitters, + allQueryingPartiesO, + ), workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(internedSynchronizerId), traceContext = traceContext, recordTime = recordTime, externalTransactionHash = externalTransactionHash, - event = RawExercisedEvent( - updateId = updateId, - offset = eventOffset, - nodeId = nodeId, + event = RawExercisedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), exerciseConsuming = exerciseConsuming, - exerciseChoice = choice, + exerciseChoice = stringInterning.choiceName.externalize(choiceName), + exerciseChoiceInterface = choiceInterface.map { interfaceId => + stringInterning.interfaceId.externalize(interfaceId) + }, exerciseArgument = exerciseArgument, exerciseArgumentCompression = exerciseArgumentCompression, exerciseResult = exerciseResult, exerciseResultCompression = exerciseResultCompression, - exerciseActors = - exerciseActors.view.map(stringInterning.party.unsafe.externalize).toSeq, + exerciseActors = exerciseActors, exerciseLastDescendantNodeId = exerciseLastDescendantNodeId, - witnessParties = filterAndExternalizeWitnesses( + witnessParties = filterWitnesses( allQueryingPartiesO, treeEventWitnesses, - stringInterning, ), - flatEventWitnesses = filterAndExternalizeWitnesses( + flatEventWitnesses = filterWitnesses( allQueryingPartiesO, - flatEventWitnesses.getOrElse(Array.empty), - stringInterning, + flatEventWitnesses.getOrElse(Seq.empty), ), ), ) } - def rawTreeEventParser( - allQueryingParties: Option[Set[Int]], + def rawLedgerEffectsEventParser( + allQueryingParties: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawTreeEvent]] = - (createdEventParser(allQueryingParties, stringInterning): RowParser[Entry[RawTreeEvent]]) | + ): RowParser[Entry[RawLedgerEffectsEventLegacy]] = + (createdEventParser(allQueryingParties, stringInterning): RowParser[ + Entry[RawLedgerEffectsEventLegacy] + ]) | exercisedEventParser(allQueryingParties, stringInterning) val selectColumnsForTransactionTreeCreate: String = Seq( @@ -403,6 +684,7 @@ object EventStorageBackendTemplate { "ledger_effective_time", "template_id", "package_id", + "representative_package_id", "workflow_id", "create_argument", "create_argument_compression", @@ -413,6 +695,7 @@ object EventStorageBackendTemplate { "create_key_value_compression", "create_key_maintainers", "NULL as exercise_choice", + "NULL as exercise_choice_interface", "NULL as exercise_argument", "NULL as exercise_argument_compression", "NULL as exercise_result", @@ -426,6 +709,7 @@ object EventStorageBackendTemplate { "record_time", "external_transaction_hash", "flat_event_witnesses", + "internal_contract_id", ).mkString(", ") def selectColumnsForTransactionTreeExercise(includeFlatEventWitnesses: Boolean): String = @@ -438,6 +722,7 @@ object EventStorageBackendTemplate { "ledger_effective_time", "template_id", "package_id", + "NULL as representative_package_id", "workflow_id", "NULL as create_argument", "NULL as create_argument_compression", @@ -448,6 +733,7 @@ object EventStorageBackendTemplate { "NULL as create_key_value_compression", "NULL as create_key_maintainers", "exercise_choice", + "exercise_choice_interface", "exercise_argument", "exercise_argument_compression", "exercise_result", @@ -461,6 +747,7 @@ object EventStorageBackendTemplate { "record_time", "external_transaction_hash", (if (includeFlatEventWitnesses) "" else "NULL as ") + "flat_event_witnesses", + "NULL as internal_contract_id", ).mkString(", ") val EventSequentialIdFirstLast: RowParser[(Long, Long)] = @@ -472,9 +759,9 @@ object EventStorageBackendTemplate { val partyToParticipantEventRow = long("event_sequential_id") ~ offset("event_offset") ~ - str("update_id") ~ + updateId("update_id") ~ int("party_id") ~ - str("participant_id") ~ + int("participant_id") ~ authorizationEventParser("participant_permission", "participant_authorization_event") ~ int("synchronizer_id") ~ timestampFromMicros("record_time") ~ @@ -495,9 +782,9 @@ object EventStorageBackendTemplate { traceContext => EventStorageBackend.RawParticipantAuthorization( offset = eventOffset, - updateId = updateId, + updateId = updateId.toHexString, partyId = stringInterning.party.unsafe.externalize(partyId), - participantId = participantId, + participantId = stringInterning.participantId.unsafe.externalize(participantId), authorizationEvent = authorizationEvent, recordTime = recordTime, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(synchronizerId), @@ -505,40 +792,41 @@ object EventStorageBackendTemplate { ) } - val assignEventRow = + def assignEventRow(stringInterning: StringInterning) = str("command_id").? ~ str("workflow_id").? ~ long("event_offset") ~ int("source_synchronizer_id") ~ int("target_synchronizer_id") ~ - str("reassignment_id") ~ + byteArray("reassignment_id") ~ int("submitter").? ~ long("reassignment_counter") ~ - str("update_id") ~ + updateId("update_id") ~ contractId("contract_id") ~ int("template_id") ~ int("package_id") ~ - array[Int]("flat_event_witnesses") ~ - array[Int]("create_signatories") ~ - array[Int]("create_observers") ~ + parties(stringInterning)("flat_event_witnesses") ~ + parties(stringInterning)("create_signatories") ~ + parties(stringInterning)("create_observers") ~ byteArray("create_argument") ~ int("create_argument_compression").? ~ byteArray("create_key_value").? ~ int("create_key_value_compression").? ~ - array[Int]("create_key_maintainers").? ~ + parties(stringInterning)("create_key_maintainers").? ~ timestampFromMicros("ledger_effective_time") ~ hashFromHexString("create_key_hash").? ~ byteArray("authentication_data") ~ byteArray("trace_context").? ~ timestampFromMicros("record_time") ~ long("event_sequential_id") ~ - int("node_id") + int("node_id") ~ + long("internal_contract_id") private def assignEventParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawAssignEvent]] = - assignEventRow map { + ): RowParser[Entry[RawAssignEventLegacy]] = + assignEventRow(stringInterning) map { case commandId ~ workflowId ~ offset ~ @@ -565,54 +853,55 @@ object EventStorageBackendTemplate { traceContext ~ recordTime ~ eventSequentialId ~ - nodeId => - val witnessParties = filterAndExternalizeWitnesses( + nodeId ~ + internalContractId => + val witnessParties = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ) Entry( offset = offset, - updateId = updateId, + nodeId = nodeId, + updateId = updateId.toHexString, eventSequentialId = eventSequentialId, - ledgerEffectiveTime = Timestamp.MinValue, // Not applicable - commandId = - filteredCommandId(commandId, submitter.map(Array[Int](_)), allQueryingPartiesO), + ledgerEffectiveTime = None, // Not applicable + commandId = filteredCommandId( + commandId, + submitter.map(s => Seq(stringInterning.party.externalize(s))), + allQueryingPartiesO, + ), workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(targetSynchronizerId), traceContext = traceContext, recordTime = recordTime, - event = RawAssignEvent( + event = RawAssignEventLegacy( sourceSynchronizerId = stringInterning.synchronizerId.unsafe.externalize(sourceSynchronizerId), targetSynchronizerId = stringInterning.synchronizerId.unsafe.externalize(targetSynchronizerId), - reassignmentId = reassignmentId, + reassignmentId = ReassignmentId.assertFromBytes(reassignmentId).toProtoPrimitive, submitter = submitter.map(stringInterning.party.unsafe.externalize), reassignmentCounter = reassignmentCounter, - rawCreatedEvent = RawCreatedEvent( - updateId = updateId, - offset = offset, - nodeId = nodeId, + rawCreatedEvent = RawCreatedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), + // TODO(#27872): Use the assignment representative package ID when available + representativePackageId = stringInterning.packageId.externalize(packageId), witnessParties = witnessParties, flatEventWitnesses = witnessParties, - signatories = - createSignatories.view.map(stringInterning.party.unsafe.externalize).toSet, - observers = createObservers.view.map(stringInterning.party.unsafe.externalize).toSet, + signatories = createSignatories.toSet, + observers = createObservers.toSet, createArgument = createArgument, createArgumentCompression = createArgumentCompression, - createKeyMaintainers = createKeyMaintainers - .map(_.view.map(stringInterning.party.unsafe.externalize).toSet) - .getOrElse(Set.empty), + createKeyMaintainers = createKeyMaintainers.map(_.toSet[String]).getOrElse(Set.empty), createKeyValue = createKeyValue, createKeyValueCompression = createKeyValueCompression, ledgerEffectiveTime = ledgerEffectiveTime, createKeyHash = createKeyHash, authenticationData = authenticationData, + internalContractId = internalContractId, ), ), // TODO(i26562) Assignments are not externally signed @@ -620,20 +909,20 @@ object EventStorageBackendTemplate { ) } - val unassignEventRow = + def unassignEventRow(stringInterning: StringInterning) = str("command_id").? ~ str("workflow_id").? ~ long("event_offset") ~ int("source_synchronizer_id") ~ int("target_synchronizer_id") ~ - str("reassignment_id") ~ + byteArray("reassignment_id") ~ int("submitter").? ~ long("reassignment_counter") ~ - str("update_id") ~ + updateId("update_id") ~ contractId("contract_id") ~ int("template_id") ~ int("package_id") ~ - array[Int]("flat_event_witnesses") ~ + parties(stringInterning)("flat_event_witnesses") ~ timestampFromMicros("assignment_exclusivity").? ~ byteArray("trace_context").? ~ timestampFromMicros("record_time") ~ @@ -641,10 +930,10 @@ object EventStorageBackendTemplate { int("node_id") private def unassignEventParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[Entry[RawUnassignEvent]] = - unassignEventRow map { + ): RowParser[Entry[RawUnassignEventLegacy]] = + unassignEventRow(stringInterning) map { case commandId ~ workflowId ~ offset ~ @@ -665,77 +954,71 @@ object EventStorageBackendTemplate { nodeId => Entry( offset = offset, - updateId = updateId, + nodeId = nodeId, + updateId = updateId.toHexString, eventSequentialId = eventSequentialId, - ledgerEffectiveTime = Timestamp.MinValue, // Not applicable - commandId = - filteredCommandId(commandId, submitter.map(Array[Int](_)), allQueryingPartiesO), + ledgerEffectiveTime = None, // Not applicable + commandId = filteredCommandId( + commandId, + submitter.map(s => Seq(stringInterning.party.externalize(s))), + allQueryingPartiesO, + ), workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(sourceSynchronizerId), traceContext = traceContext, recordTime = recordTime, - event = RawUnassignEvent( + event = RawUnassignEventLegacy( sourceSynchronizerId = stringInterning.synchronizerId.unsafe.externalize(sourceSynchronizerId), targetSynchronizerId = stringInterning.synchronizerId.unsafe.externalize(targetSynchronizerId), - reassignmentId = reassignmentId, + reassignmentId = ReassignmentId.assertFromBytes(reassignmentId).toProtoPrimitive, submitter = submitter.map(stringInterning.party.unsafe.externalize), reassignmentCounter = reassignmentCounter, contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), - witnessParties = filterAndExternalizeWitnesses( + witnessParties = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ), assignmentExclusivity = assignmentExclusivity, - nodeId = nodeId, ), // TODO(i26562) Unassignments are not externally signed externalTransactionHash = None, ) } - def rawReassignmentEventParser( - allQueryingParties: Option[Set[Int]], - stringInterning: StringInterning, - ): RowParser[Entry[RawReassignmentEvent]] = - (assignEventParser(allQueryingParties, stringInterning): RowParser[ - Entry[RawReassignmentEvent] - ]) | - unassignEventParser(allQueryingParties, stringInterning) - - val assignActiveContractRow = + def assignActiveContractRow(stringInterning: StringInterning) = str("workflow_id").? ~ int("target_synchronizer_id") ~ long("reassignment_counter") ~ - str("update_id") ~ + updateId("update_id") ~ long("event_offset") ~ contractId("contract_id") ~ int("template_id") ~ int("package_id") ~ - array[Int]("flat_event_witnesses") ~ - array[Int]("create_signatories") ~ - array[Int]("create_observers") ~ + parties(stringInterning)("flat_event_witnesses") ~ + parties(stringInterning)("create_signatories") ~ + parties(stringInterning)("create_observers") ~ byteArray("create_argument") ~ int("create_argument_compression").? ~ byteArray("create_key_value").? ~ int("create_key_value_compression").? ~ - array[Int]("create_key_maintainers").? ~ + parties(stringInterning)("create_key_maintainers").? ~ timestampFromMicros("ledger_effective_time") ~ hashFromHexString("create_key_hash").? ~ byteArray("authentication_data") ~ long("event_sequential_id") ~ - int("node_id") + int("node_id") ~ + long("internal_contract_id") private def assignActiveContractParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[RawActiveContract] = - assignActiveContractRow map { + ): RowParser[RawActiveContractLegacy] = + assignActiveContractRow(stringInterning) map { case workflowId ~ targetSynchronizerId ~ reassignmentCounter ~ @@ -756,71 +1039,72 @@ object EventStorageBackendTemplate { createKeyHash ~ authenticationData ~ eventSequentialId ~ - nodeId => - val witnessParties = filterAndExternalizeWitnesses( + nodeId ~ + internalContractId => + val witnessParties = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ) - RawActiveContract( + RawActiveContractLegacy( workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(targetSynchronizerId), reassignmentCounter = reassignmentCounter, - rawCreatedEvent = RawCreatedEvent( - updateId = updateId, - offset = offset, - nodeId = nodeId, + rawCreatedEvent = RawCreatedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), + // TODO(#27872): Use the assignment representative package ID when available + representativePackageId = stringInterning.packageId.externalize(packageId), witnessParties = witnessParties, flatEventWitnesses = witnessParties, - signatories = - createSignatories.view.map(stringInterning.party.unsafe.externalize).toSet, - observers = createObservers.view.map(stringInterning.party.unsafe.externalize).toSet, + signatories = createSignatories.toSet, + observers = createObservers.toSet, createArgument = createArgument, createArgumentCompression = createArgumentCompression, createKeyValue = createKeyValue, createKeyValueCompression = createKeyValueCompression, - createKeyMaintainers = createKeyMaintainers - .map(_.map(stringInterning.party.unsafe.externalize).toSet) - .getOrElse(Set.empty), + createKeyMaintainers = createKeyMaintainers.map(_.toSet[String]).getOrElse(Set.empty), ledgerEffectiveTime = ledgerEffectiveTime, createKeyHash = createKeyHash, authenticationData = authenticationData, + internalContractId = internalContractId, ), eventSequentialId = eventSequentialId, + nodeId = nodeId, + offset = offset, ) } - val createActiveContractRow = + def createActiveContractRow(stringInterning: StringInterning) = str("workflow_id").? ~ int("synchronizer_id") ~ - str("update_id") ~ + updateId("update_id") ~ long("event_offset") ~ contractId("contract_id") ~ int("template_id") ~ int("package_id") ~ - array[Int]("flat_event_witnesses") ~ - array[Int]("create_signatories") ~ - array[Int]("create_observers") ~ + int("representative_package_id") ~ + parties(stringInterning)("flat_event_witnesses") ~ + parties(stringInterning)("create_signatories") ~ + parties(stringInterning)("create_observers") ~ byteArray("create_argument") ~ int("create_argument_compression").? ~ byteArray("create_key_value").? ~ int("create_key_value_compression").? ~ - array[Int]("create_key_maintainers").? ~ + parties(stringInterning)("create_key_maintainers").? ~ timestampFromMicros("ledger_effective_time") ~ hashFromHexString("create_key_hash").? ~ byteArray("authentication_data") ~ long("event_sequential_id") ~ - int("node_id") + int("node_id") ~ + long("internal_contract_id") private def createActiveContractParser( - allQueryingPartiesO: Option[Set[Int]], + allQueryingPartiesO: Option[Set[Party]], stringInterning: StringInterning, - ): RowParser[RawActiveContract] = - createActiveContractRow map { + ): RowParser[RawActiveContractLegacy] = + createActiveContractRow(stringInterning) map { case workflowId ~ targetSynchronizerId ~ updateId ~ @@ -828,6 +1112,7 @@ object EventStorageBackendTemplate { contractId ~ templateId ~ packageId ~ + representativePackageId ~ flatEventWitnesses ~ createSignatories ~ createObservers ~ @@ -840,65 +1125,62 @@ object EventStorageBackendTemplate { createKeyHash ~ authenticationData ~ eventSequentialId ~ - nodeId => - val witnessParties = filterAndExternalizeWitnesses( + nodeId ~ + internalContractId => + val witnessParties = filterWitnesses( allQueryingPartiesO, flatEventWitnesses, - stringInterning, ) - RawActiveContract( + RawActiveContractLegacy( workflowId = workflowId, synchronizerId = stringInterning.synchronizerId.unsafe.externalize(targetSynchronizerId), reassignmentCounter = 0L, // zero for create - rawCreatedEvent = RawCreatedEvent( - updateId = updateId, - offset = offset, - nodeId = nodeId, + rawCreatedEvent = RawCreatedEventLegacy( contractId = contractId, templateId = stringInterning.templateId .externalize(templateId) .toFullIdentifier(stringInterning.packageId.externalize(packageId)), + representativePackageId = + stringInterning.packageId.externalize(representativePackageId), witnessParties = witnessParties, flatEventWitnesses = witnessParties, - signatories = - createSignatories.view.map(stringInterning.party.unsafe.externalize).toSet, - observers = createObservers.view.map(stringInterning.party.unsafe.externalize).toSet, + signatories = createSignatories.toSet, + observers = createObservers.toSet, createArgument = createArgument, createArgumentCompression = createArgumentCompression, - createKeyMaintainers = createKeyMaintainers - .map(_.map(stringInterning.party.unsafe.externalize).toSet) - .getOrElse(Set.empty), + createKeyMaintainers = createKeyMaintainers.map(_.toSet[String]).getOrElse(Set.empty), createKeyValue = createKeyValue, createKeyValueCompression = createKeyValueCompression, ledgerEffectiveTime = ledgerEffectiveTime, createKeyHash = createKeyHash, authenticationData = authenticationData, + internalContractId = internalContractId, ), eventSequentialId = eventSequentialId, + nodeId = nodeId, + offset = offset, ) } - private def filterAndExternalizeWitnesses( - allQueryingPartiesO: Option[Set[Int]], - flatEventWitnesses: Array[Int], - stringInterning: StringInterning, + private def filterWitnesses( + allQueryingPartiesO: Option[Set[Party]], + witnesses: Seq[Party], ): Set[String] = allQueryingPartiesO - .fold(flatEventWitnesses)(allQueryingParties => - flatEventWitnesses + .fold(witnesses)(allQueryingParties => + witnesses .filter(allQueryingParties) ) - .map(stringInterning.party.unsafe.externalize) .toSet private def filteredCommandId( commandId: Option[String], - submitters: Option[Array[Int]], - allQueryingPartiesO: Option[Set[Int]], + submitters: Option[Seq[Party]], + allQueryingPartiesO: Option[Set[Party]], ): Option[String] = { def submittersInQueryingParties: Boolean = allQueryingPartiesO match { case Some(allQueryingParties) => - submitters.getOrElse(Array.empty).exists(allQueryingParties) + submitters.getOrElse(Seq.empty).exists(allQueryingParties) case None => submitters.nonEmpty } commandId.filter(_ != "" && submittersInQueryingParties) @@ -930,6 +1212,7 @@ object EventStorageBackendTemplate { stringInterning: StringInterning ): RowParser[SynchronizerOffset] = synchronizerOffsetParser("event_offset", stringInterning) + } abstract class EventStorageBackendTemplate( @@ -943,15 +1226,10 @@ abstract class EventStorageBackendTemplate( import com.digitalasset.canton.platform.store.backend.Conversions.OffsetToStatement override def updatePointwiseQueries: UpdatePointwiseQueries = - new UpdatePointwiseQueries( - ledgerEndCache = ledgerEndCache, - stringInterning = stringInterning, - ) + new UpdatePointwiseQueries(ledgerEndCache) override def updateStreamingQueries: UpdateStreamingQueries = - new UpdateStreamingQueries( - stringInterning = stringInterning - ) + new UpdateStreamingQueries(stringInterning, queryStrategy) override def eventReaderQueries: EventReaderQueries = new EventReaderQueries(stringInterning) @@ -960,9 +1238,9 @@ abstract class EventStorageBackendTemplate( /** Deletes a subset of the indexed data (up to the pruning offset) in the following order and in * the manner specified: * 1. entries from filter for create stakeholders for which there is an archive for the - * corresponding create event or the corresponding create event is immediatly divulged, + * corresponding create event or the corresponding create event is immediately divulged, * 1. entries from filter for create non-stakeholder informees for which there is an archive - * for the corresponding create event or the corresponding create event is immediatly + * for the corresponding create event or the corresponding create event is immediately * divulged, * 1. all entries from filter for consuming stakeholders, * 1. all entries from filter for consuming non-stakeholders informees, @@ -973,7 +1251,7 @@ abstract class EventStorageBackendTemplate( * 1. all non-consuming events, * 1. transaction meta entries for which there exists at least one create event. */ - override def pruneEvents( + override def pruneEventsLegacy( pruneUpToInclusive: Offset, incompleteReassignmentOffsets: Vector[Offset], )(implicit connection: Connection, traceContext: TraceContext): Unit = { @@ -1069,7 +1347,7 @@ abstract class EventStorageBackendTemplate( where -- do not prune incomplete ${reassignmentIsNotIncomplete("delete_events")} - -- do not prune if it is preceeded in the same synchronizer by an incomplete assign + -- do not prune if it is preceded in the same synchronizer by an incomplete assign -- this is needed so that incomplete assign is not resulting in an active contract and ${deactivationIsNotDirectlyPrecededByIncompleteAssign( "delete_events", @@ -1102,7 +1380,7 @@ abstract class EventStorageBackendTemplate( // Improvement idea: // In order to prune an id filter table we query an events table to discover // the event offset corresponding. - // This query can simplified not to query the events table at all + // This query can be simplified not to query the events table at all // if we were to prune by the sequential id rather than by the offset. def pruneIdFilterConsuming( idFilterTableName: String @@ -1198,7 +1476,7 @@ abstract class EventStorageBackendTemplate( createEventTableName, "synchronizer_id", pruneUpToInclusive, - )}) or cardinality(#$createEventTableName.flat_event_witnesses) = 0) + )}) or length(#$createEventTableName.flat_event_witnesses) <= 1) """ private def assignIsPrunable( @@ -1340,18 +1618,10 @@ abstract class EventStorageBackendTemplate( ) - 1 } - override def assignEventBatch( + override def assignEventBatchLegacy( eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawAssignEvent]] = { - val allInternedFilterParties = - allFilterParties - .map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[Entry[RawAssignEventLegacy]] = SQL""" SELECT * FROM lapi_events_assign assign_evs @@ -1359,20 +1629,12 @@ abstract class EventStorageBackendTemplate( ORDER BY assign_evs.event_sequential_id -- deliver in index order """ .withFetchSize(Some(fetchSize(eventSequentialIds))) - .asVectorOf(assignEventParser(allInternedFilterParties, stringInterning))(connection) - } + .asVectorOf(assignEventParser(allFilterParties, stringInterning))(connection) - override def unassignEventBatch( + override def unassignEventBatchLegacy( eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawUnassignEvent]] = { - val allInternedFilterParties = allFilterParties - .map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[Entry[RawUnassignEventLegacy]] = SQL""" SELECT * FROM lapi_events_unassign unassign_evs @@ -1380,126 +1642,96 @@ abstract class EventStorageBackendTemplate( ORDER BY unassign_evs.event_sequential_id -- deliver in index order """ .withFetchSize(Some(fetchSize(eventSequentialIds))) - .asVectorOf(unassignEventParser(allInternedFilterParties, stringInterning))(connection) - } + .asVectorOf(unassignEventParser(allFilterParties, stringInterning))(connection) - override def activeContractAssignEventBatch( + override def activeContractBatch( eventSequentialIds: Iterable[Long], allFilterParties: Option[Set[Party]], endInclusive: Long, - )(connection: Connection): Vector[RawActiveContract] = { - val allInternedFilterParties = - allFilterParties - .map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[RawThinActiveContract] = + RowDefs + .rawThinActiveContractParser(stringInterning, allFilterParties) + .queryMultipleRows(columns => + SQL""" + SELECT $columns + FROM lapi_events_activate_contract + WHERE + event_sequential_id ${queryStrategy.anyOf(eventSequentialIds)} + ORDER BY event_sequential_id -- deliver in index order + """ + .withFetchSize(Some(eventSequentialIds.size)) + )(connection) + + override def activeContractAssignEventBatchLegacy( + eventSequentialIds: Iterable[Long], + allFilterParties: Option[Set[Party]], + endInclusive: Long, + )(connection: Connection): Vector[RawActiveContractLegacy] = SQL""" SELECT * FROM lapi_events_assign assign_evs WHERE assign_evs.event_sequential_id ${queryStrategy.anyOf(eventSequentialIds)} - AND NOT EXISTS ( -- check not archived as of snapshot in the same synchronizer - SELECT 1 - FROM lapi_events_consuming_exercise consuming_evs - WHERE - assign_evs.contract_id = consuming_evs.contract_id - AND assign_evs.target_synchronizer_id = consuming_evs.synchronizer_id - AND consuming_evs.event_sequential_id <= $endInclusive - ) - AND NOT EXISTS ( -- check not unassigned after as of snapshot in the same synchronizer - SELECT 1 - FROM lapi_events_unassign unassign_evs - WHERE - assign_evs.contract_id = unassign_evs.contract_id - AND assign_evs.target_synchronizer_id = unassign_evs.source_synchronizer_id - AND unassign_evs.event_sequential_id > assign_evs.event_sequential_id - AND unassign_evs.event_sequential_id <= $endInclusive - ${QueryStrategy.limitClause(Some(1))} - ) ORDER BY assign_evs.event_sequential_id -- deliver in index order """ .withFetchSize(Some(eventSequentialIds.size)) - .asVectorOf(assignActiveContractParser(allInternedFilterParties, stringInterning))(connection) - } + .asVectorOf(assignActiveContractParser(allFilterParties, stringInterning))(connection) - override def activeContractCreateEventBatch( + override def activeContractCreateEventBatchLegacy( eventSequentialIds: Iterable[Long], allFilterParties: Option[Set[Party]], endInclusive: Long, - )(connection: Connection): Vector[RawActiveContract] = { - val allInternedFilterParties = allFilterParties.map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[RawActiveContractLegacy] = SQL""" SELECT * FROM lapi_events_create create_evs WHERE create_evs.event_sequential_id ${queryStrategy.anyOf(eventSequentialIds)} - AND NOT EXISTS ( -- check not archived as of snapshot in the same synchronizer - SELECT 1 - FROM lapi_events_consuming_exercise consuming_evs - WHERE - create_evs.contract_id = consuming_evs.contract_id - AND create_evs.synchronizer_id = consuming_evs.synchronizer_id - AND consuming_evs.event_sequential_id <= $endInclusive - ) - AND NOT EXISTS ( -- check not unassigned as of snapshot in the same synchronizer - SELECT 1 - FROM lapi_events_unassign unassign_evs - WHERE - create_evs.contract_id = unassign_evs.contract_id - AND create_evs.synchronizer_id = unassign_evs.source_synchronizer_id - AND unassign_evs.event_sequential_id <= $endInclusive - ${QueryStrategy.limitClause(Some(1))} - ) ORDER BY create_evs.event_sequential_id -- deliver in index order """ .withFetchSize(Some(eventSequentialIds.size)) - .asVectorOf(createActiveContractParser(allInternedFilterParties, stringInterning))(connection) - } + .asVectorOf(createActiveContractParser(allFilterParties, stringInterning))(connection) - override def fetchAssignEventIdsForStakeholder( + override def fetchAssignEventIdsForStakeholderLegacy( stakeholderO: Option[Party], templateId: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = + )(connection: Connection): PaginationInput => Vector[Long] = UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_assign_id_filter_stakeholder", witnessO = stakeholderO, templateIdO = templateId, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) - override def fetchUnassignEventIdsForStakeholder( + override def fetchUnassignEventIdsForStakeholderLegacy( stakeholderO: Option[Party], templateId: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = + )(connection: Connection): PaginationInput => Vector[Long] = UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_reassignment_id_filter_stakeholder", witnessO = stakeholderO, templateIdO = templateId, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) override def lookupAssignSequentialIdByOffset( offsets: Iterable[Long] + )(connection: Connection): Vector[Long] = + SQL""" + SELECT event_sequential_id + FROM lapi_events_activate_contract + WHERE + event_offset ${queryStrategy.anyOf(offsets)} + ORDER BY event_sequential_id -- deliver in index order + """ + .asVectorOf(long("event_sequential_id"))(connection) + + override def lookupAssignSequentialIdByOffsetLegacy( + offsets: Iterable[Long] )(connection: Connection): Vector[Long] = SQL""" SELECT event_sequential_id @@ -1512,6 +1744,18 @@ abstract class EventStorageBackendTemplate( override def lookupUnassignSequentialIdByOffset( offsets: Iterable[Long] + )(connection: Connection): Vector[Long] = + SQL""" + SELECT event_sequential_id + FROM lapi_events_deactivate_contract + WHERE + event_offset ${queryStrategy.anyOf(offsets)} + ORDER BY event_sequential_id -- deliver in index order + """ + .asVectorOf(long("event_sequential_id"))(connection) + + override def lookupUnassignSequentialIdByOffsetLegacy( + offsets: Iterable[Long] )(connection: Connection): Vector[Long] = SQL""" SELECT event_sequential_id @@ -1526,7 +1770,7 @@ abstract class EventStorageBackendTemplate( // it finds the sequential id of the assign that has the same contract and synchronizer ids and has the largest // sequential id < sequential id given // it returns the mapping from the tuple of the search parameters to the corresponding sequential id (if exists) - override def lookupAssignSequentialIdBy( + override def lookupAssignSequentialIdByLegacy( unassignProperties: Iterable[UnassignProperties] )(connection: Connection): Map[UnassignProperties, Long] = unassignProperties.flatMap { @@ -1550,7 +1794,7 @@ abstract class EventStorageBackendTemplate( } }.toMap - override def lookupCreateSequentialIdByContractId( + override def lookupCreateSequentialIdByContractIdLegacy( contractIds: Iterable[ContractId] )(connection: Connection): Vector[Long] = SQL""" @@ -1749,7 +1993,39 @@ abstract class EventStorageBackendTemplate( .headOption } - override def archivals(fromExclusive: Option[Offset], toInclusive: Offset)( + override def prunableContracts(fromExclusive: Option[Offset], toInclusive: Offset)( + connection: Connection + ): Set[Long] = { + val fromExclusiveSeqId = + fromExclusive + .map(from => maxEventSequentialId(Some(from))(connection)) + .getOrElse(-1L) + val toInclusiveSeqId = maxEventSequentialId(Some(toInclusive))(connection) + val archivals = SQL""" + SELECT internal_contract_id + FROM lapi_events_deactivate_contract + WHERE + event_sequential_id > $fromExclusiveSeqId AND + event_sequential_id <= $toInclusiveSeqId AND + event_type = ${PersistentEventType.ConsumingExercise.asInt} + """ + .asVectorOf(long("internal_contract_id").?)(connection) + val divulgedAndTransientContracts = SQL""" + SELECT internal_contract_id + FROM lapi_events_various_witnessed + WHERE + event_sequential_id > $fromExclusiveSeqId AND + event_sequential_id <= $toInclusiveSeqId AND + event_type = ${PersistentEventType.WitnessedCreate.asInt} + """ + .asVectorOf(long("internal_contract_id").?)(connection) + archivals.iterator + .++(divulgedAndTransientContracts.iterator) + .flatten + .toSet + } + + override def archivalsLegacy(fromExclusive: Option[Offset], toInclusive: Offset)( connection: Connection ): Set[ContractId] = { val fromExclusiveSeqId = @@ -1768,19 +2044,15 @@ abstract class EventStorageBackendTemplate( .toSet } override def fetchTopologyPartyEventIds( - party: Option[Party], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = + party: Option[Party] + )(connection: Connection): PaginationInput => Vector[Long] = UpdateStreamingQueries.fetchEventIds( tableName = "lapi_events_party_to_participant", witnessO = party, templateIdO = None, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = false, )(connection) override def topologyPartyEventBatch( @@ -1815,19 +2087,141 @@ abstract class EventStorageBackendTemplate( .filter(offset => Option(offset) <= ledgerEndCache().map(_.lastOffset)) ) + private def fetchByEventSequentialIds( + tableName: String, + eventSequentialIds: SequentialIdBatch, + )(columns: CompositeSql): SimpleSql[Row] = + SQL""" + SELECT $columns + FROM #$tableName + WHERE ${queryStrategy.inBatch("event_sequential_id", eventSequentialIds)} + ORDER BY event_sequential_id + """.withFetchSize(Some(fetchSize(eventSequentialIds))) + + override def fetchEventPayloadsAcsDelta(target: EventPayloadSourceForUpdatesAcsDelta)( + eventSequentialIds: SequentialIdBatch, + requestingParties: Option[Set[Party]], + )(connection: Connection): Vector[RawThinAcsDeltaEvent] = + target match { + case EventPayloadSourceForUpdatesAcsDelta.Activate => + RowDefs.eventType + .branch( + PersistentEventType.Create -> RowDefs.rawThinCreatedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + witnessIsAcsDelta = true, + eventIsAcsDelta = true, + ), + PersistentEventType.Assign -> RowDefs.rawThinAssignEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + ), + ) + .queryMultipleRows( + fetchByEventSequentialIds( + tableName = "lapi_events_activate_contract", + eventSequentialIds = eventSequentialIds, + ) + )(connection) + case EventPayloadSourceForUpdatesAcsDelta.Deactivate => + RowDefs.eventType + .branch( + PersistentEventType.ConsumingExercise -> RowDefs.rawArchivedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + acsDelta = true, + ), + PersistentEventType.Unassign -> RowDefs.rawUnassignEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + ), + ) + .queryMultipleRows( + fetchByEventSequentialIds( + tableName = "lapi_events_deactivate_contract", + eventSequentialIds = eventSequentialIds, + ) + )(connection) + } + + override def fetchEventPayloadsLedgerEffects(target: EventPayloadSourceForUpdatesLedgerEffects)( + eventSequentialIds: SequentialIdBatch, + requestingParties: Option[Set[Party]], + )(connection: Connection): Vector[RawThinLedgerEffectsEvent] = + target match { + case EventPayloadSourceForUpdatesLedgerEffects.Activate => + RowDefs.eventType + .branch( + PersistentEventType.Create -> RowDefs.rawThinCreatedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + witnessIsAcsDelta = false, + eventIsAcsDelta = true, + ), + PersistentEventType.Assign -> RowDefs.rawThinAssignEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + ), + ) + .queryMultipleRows( + fetchByEventSequentialIds( + tableName = "lapi_events_activate_contract", + eventSequentialIds = eventSequentialIds, + ) + )(connection) + case EventPayloadSourceForUpdatesLedgerEffects.Deactivate => + RowDefs.eventType + .branch( + PersistentEventType.ConsumingExercise -> RowDefs.rawExercisedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + eventIsAcsDelta = true, + ), + PersistentEventType.Unassign -> RowDefs.rawUnassignEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + ), + ) + .queryMultipleRows( + fetchByEventSequentialIds( + tableName = "lapi_events_deactivate_contract", + eventSequentialIds = eventSequentialIds, + ) + )(connection) + case EventPayloadSourceForUpdatesLedgerEffects.VariousWitnessed => + RowDefs.eventType + .branch( + PersistentEventType.WitnessedCreate -> RowDefs.rawThinCreatedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + witnessIsAcsDelta = false, + eventIsAcsDelta = false, + ), + PersistentEventType.WitnessedConsumingExercise -> RowDefs.rawExercisedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + eventIsAcsDelta = false, + ), + PersistentEventType.NonConsumingExercise -> RowDefs.rawExercisedEventParser( + stringInterning = stringInterning, + allQueryingPartiesO = requestingParties, + eventIsAcsDelta = false, + ), + ) + .queryMultipleRows( + fetchByEventSequentialIds( + tableName = "lapi_events_various_witnessed", + eventSequentialIds = eventSequentialIds, + ) + )(connection) + } + private def fetchAcsDeltaEvents( tableName: String, selectColumns: String, eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Ref.Party]], - )(connection: Connection): Vector[Entry[RawFlatEvent]] = { - val internedAllParties: Option[Set[Int]] = allFilterParties - .map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[Entry[RawAcsDeltaEventLegacy]] = SQL""" SELECT #$selectColumns, @@ -1841,22 +2235,21 @@ abstract class EventStorageBackendTemplate( event_sequential_id """ .withFetchSize(Some(fetchSize(eventSequentialIds))) - .asVectorOf(rawAcsDeltaEventParser(internedAllParties, stringInterning))(connection) - } + .asVectorOf(rawAcsDeltaEventParser(allFilterParties, stringInterning))(connection) - override def fetchEventPayloadsAcsDelta(target: EventPayloadSourceForUpdatesAcsDelta)( + override def fetchEventPayloadsAcsDeltaLegacy(target: EventPayloadSourceForUpdatesAcsDeltaLegacy)( eventSequentialIds: SequentialIdBatch, requestingParties: Option[Set[Ref.Party]], - )(connection: Connection): Vector[Entry[RawFlatEvent]] = + )(connection: Connection): Vector[Entry[RawAcsDeltaEventLegacy]] = target match { - case EventPayloadSourceForUpdatesAcsDelta.Consuming => + case EventPayloadSourceForUpdatesAcsDeltaLegacy.Consuming => fetchAcsDeltaEvents( tableName = "lapi_events_consuming_exercise", selectColumns = selectColumnsForFlatTransactionsExercise, eventSequentialIds = eventSequentialIds, allFilterParties = requestingParties, )(connection) - case EventPayloadSourceForUpdatesAcsDelta.Create => + case EventPayloadSourceForUpdatesAcsDeltaLegacy.Create => fetchAcsDeltaEvents( tableName = "lapi_events_create", selectColumns = selectColumnsForFlatTransactionsCreate, @@ -1870,14 +2263,7 @@ abstract class EventStorageBackendTemplate( selectColumns: String, eventSequentialIds: SequentialIdBatch, allFilterParties: Option[Set[Ref.Party]], - )(connection: Connection): Vector[Entry[RawTreeEvent]] = { - val internedAllParties: Option[Set[Int]] = allFilterParties - .map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) + )(connection: Connection): Vector[Entry[RawLedgerEffectsEventLegacy]] = SQL""" SELECT #$selectColumns, @@ -1891,15 +2277,16 @@ abstract class EventStorageBackendTemplate( event_sequential_id """ .withFetchSize(Some(fetchSize(eventSequentialIds))) - .asVectorOf(rawTreeEventParser(internedAllParties, stringInterning))(connection) - } + .asVectorOf(rawLedgerEffectsEventParser(allFilterParties, stringInterning))(connection) - override def fetchEventPayloadsLedgerEffects(target: EventPayloadSourceForUpdatesLedgerEffects)( + override def fetchEventPayloadsLedgerEffectsLegacy( + target: EventPayloadSourceForUpdatesLedgerEffectsLegacy + )( eventSequentialIds: SequentialIdBatch, requestingParties: Option[Set[Ref.Party]], - )(connection: Connection): Vector[Entry[RawTreeEvent]] = + )(connection: Connection): Vector[Entry[RawLedgerEffectsEventLegacy]] = target match { - case EventPayloadSourceForUpdatesLedgerEffects.Consuming => + case EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming => fetchLedgerEffectsEvents( tableName = "lapi_events_consuming_exercise", selectColumns = @@ -1908,7 +2295,7 @@ abstract class EventStorageBackendTemplate( eventSequentialIds = eventSequentialIds, allFilterParties = requestingParties, )(connection) - case EventPayloadSourceForUpdatesLedgerEffects.Create => + case EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create => fetchLedgerEffectsEvents( tableName = "lapi_events_create", selectColumns = @@ -1916,7 +2303,7 @@ abstract class EventStorageBackendTemplate( eventSequentialIds = eventSequentialIds, allFilterParties = requestingParties, )(connection) - case EventPayloadSourceForUpdatesLedgerEffects.NonConsuming => + case EventPayloadSourceForUpdatesLedgerEffectsLegacy.NonConsuming => fetchLedgerEffectsEvents( tableName = "lapi_events_non_consuming_exercise", selectColumns = diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Field.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Field.scala index 8c98b82850..cf24fdbc29 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Field.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Field.scala @@ -98,6 +98,9 @@ private[backend] final case class BigintOptional[FROM]( override def convert: Option[Long] => java.lang.Long = _.map(Long.box).orNull } +private[backend] final case class Smallint[FROM](extract: StringInterning => FROM => Int) + extends TrivialField[FROM, Int] + private[backend] final case class SmallintOptional[FROM]( extract: StringInterning => FROM => Option[Int] ) extends Field[FROM, Option[Int], java.lang.Integer] { @@ -121,15 +124,3 @@ private[backend] final case class StringArray[FROM]( ) extends Field[FROM, Iterable[String], Array[String]] { override def convert: Iterable[String] => Array[String] = _.toArray } - -private[backend] final case class IntArray[FROM](extract: StringInterning => FROM => Iterable[Int]) - extends Field[FROM, Iterable[Int], Array[Int]] { - override def convert: Iterable[Int] => Array[Int] = _.toArray -} - -private[backend] final case class IntArrayOptional[FROM]( - extract: StringInterning => FROM => Option[Iterable[Int]] -) extends Field[FROM, Option[Iterable[Int]], Array[Int]] { - @SuppressWarnings(Array("org.wartremover.warts.Null")) - override def convert: Option[Iterable[Int]] => Array[Int] = _.map(_.toArray).orNull -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IngestionStorageBackendTemplate.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IngestionStorageBackendTemplate.scala index a25de4dd78..ed07963a1e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IngestionStorageBackendTemplate.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IngestionStorageBackendTemplate.scala @@ -28,6 +28,22 @@ private[backend] class IngestionStorageBackendTemplate( List( SQL"DELETE FROM lapi_command_completions WHERE ${QueryStrategy .offsetIsGreater("completion_offset", ledgerOffset)}", + SQL"DELETE FROM lapi_events_activate_contract WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_filter_activate_stakeholder WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_filter_activate_witness WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_events_deactivate_contract WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_filter_deactivate_stakeholder WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_filter_deactivate_witness WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_events_various_witnessed WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", + SQL"DELETE FROM lapi_filter_various_witness WHERE ${QueryStrategy + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", SQL"DELETE FROM lapi_events_create WHERE ${QueryStrategy.offsetIsGreater("event_offset", ledgerOffset)}", SQL"DELETE FROM lapi_events_consuming_exercise WHERE ${QueryStrategy .offsetIsGreater("event_offset", ledgerOffset)}", @@ -37,7 +53,7 @@ private[backend] class IngestionStorageBackendTemplate( SQL"DELETE FROM lapi_events_assign WHERE ${QueryStrategy.offsetIsGreater("event_offset", ledgerOffset)}", SQL"DELETE FROM lapi_party_entries WHERE ${QueryStrategy.offsetIsGreater("ledger_offset", ledgerOffset)}", SQL"DELETE FROM lapi_events_party_to_participant WHERE ${QueryStrategy - .offsetIsGreater("event_offset", ledgerOffset)}", + .eventSeqIdIsGreater("event_sequential_id", lastEventSequentialId)}", lastStringInterningIdO match { case None => SQL"DELETE FROM lapi_string_interning" case Some(lastStringInterningId) => diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IntegrityStorageBackendImpl.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IntegrityStorageBackendImpl.scala index 57929e8dda..a969a51204 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IntegrityStorageBackendImpl.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IntegrityStorageBackendImpl.scala @@ -3,13 +3,14 @@ package com.digitalasset.canton.platform.store.backend.common -import anorm.SqlParser.{array, int, long, str} +import anorm.SqlParser.{byteArray, int, long, str} import anorm.{RowParser, ~} import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.platform.store.backend.IntegrityStorageBackend import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.`SimpleSql ops` +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.topology.SynchronizerId import com.google.common.annotations.VisibleForTesting @@ -186,10 +187,10 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack meta1.event_offset != meta2.event_offset FETCH NEXT 1 ROWS ONLY """ - .asSingleOpt(str("uId") ~ offset("offset1") ~ offset("offset2"))(connection) + .asSingleOpt(updateId("uId") ~ offset("offset1") ~ offset("offset2"))(connection) .foreach { case uId ~ offset1 ~ offset2 => throw new RuntimeException( - s"occurrence of duplicate update ID [$uId] found for offsets $offset1, $offset2" + s"occurrence of duplicate update ID [${uId.toHexString}] found for offsets $offset1, $offset2" ) } @@ -251,10 +252,10 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack """ .asVectorOf( offset("completion_offset") ~ - str("user_id") ~ - array[Int]("submitters") ~ + int("user_id") ~ + byteArray("submitters") ~ str("command_id") ~ - str("update_id").? ~ + updateId("update_id").? ~ str("submission_id").? ~ str("message_uuid").? ~ long("record_time") ~ @@ -262,7 +263,7 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack case offset ~ userId ~ submitters ~ commandId ~ updateId ~ submissionId ~ messageUuid ~ recordTimeLong ~ synchronizerId => CompletionEntry( userId, - submitters.toList, + IntArrayDBSerialization.decodeFromByteArray(submitters).toList, commandId, updateId, submissionId, @@ -302,6 +303,24 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack ) ) + // Verify no duplicate completion entry + val internalContractIds = SQL""" + SELECT + internal_contract_id + FROM par_contracts + """ + .asVectorOf(long("internal_contract_id"))(connection) + val firstTenDuplicatedInternalIds = internalContractIds + .groupMap(identity)(identity) + .iterator + .filter(_._2.sizeIs > 1) + .take(10) + .map(_._1) + .toSeq + if (firstTenDuplicatedInternalIds.nonEmpty) + throw new RuntimeException( + s"duplicate internal_contract_id-s found in table par_contracts (first 10 shown) $firstTenDuplicatedInternalIds" + ) } catch { case t: Throwable if !failForEmptyDB => val failure = t.getMessage @@ -339,7 +358,9 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack */ @VisibleForTesting override def moveLedgerEndBackToScratch()(connection: Connection): Unit = { - SQL"DELETE FROM lapi_parameters".executeUpdate()(connection).discard + SQL"UPDATE lapi_parameters SET ledger_end = 1, ledger_end_sequential_id = 0" + .executeUpdate()(connection) + .discard SQL"DELETE FROM lapi_post_processing_end".executeUpdate()(connection).discard SQL"DELETE FROM lapi_ledger_end_synchronizer_index".executeUpdate()(connection).discard SQL"DELETE FROM par_command_deduplication".executeUpdate()(connection).discard @@ -347,10 +368,10 @@ private[backend] object IntegrityStorageBackendImpl extends IntegrityStorageBack } private final case class CompletionEntry( - userId: String, + userId: Int, submitters: List[Int], commandId: String, - updateId: Option[String], + updateId: Option[UpdateId], submissionId: Option[String], messageUuid: Option[String], recordTimeLong: Long, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala index f557e4a999..2f6c8a6dd2 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala @@ -129,6 +129,14 @@ trait QueryStrategy { cSQL"= ANY($longArray)" } + /** ANY SQL clause generation for a number of smallint values + */ + def anyOfSmallInts(ints: Iterable[Int]): CompositeSql = { + val intArray: Array[java.lang.Integer] = + ints.view.map(Int.box).toArray + cSQL"= ANY($intArray)" + } + /** ANY SQL clause generation for a number of String values */ def anyOfStrings(strings: Iterable[String]): CompositeSql = { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Schema.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Schema.scala index 67ec3062ef..2eae3123c6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Schema.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Schema.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.store.backend.common +import com.digitalasset.canton.platform.store.backend.Conversions.IntArrayDBSerialization.encodeToByteArray import com.digitalasset.canton.platform.store.backend.DbDto import com.digitalasset.canton.platform.store.interning.StringInterning @@ -32,16 +33,6 @@ private[backend] object AppendOnlySchema { ): Field[FROM, Iterable[String], _] = StringArray(extractor) - def intArray[FROM]( - extractor: StringInterning => FROM => Iterable[Int] - ): Field[FROM, Iterable[Int], _] = - IntArray(extractor) - - def intArrayOptional[FROM]( - extractor: StringInterning => FROM => Option[Iterable[Int]] - ): Field[FROM, Option[Iterable[Int]], _] = - IntArrayOptional(extractor) - def bytea[FROM]( extractor: StringInterning => FROM => Array[Byte] ): Field[FROM, Array[Byte], _] = @@ -52,6 +43,27 @@ private[backend] object AppendOnlySchema { ): Field[FROM, Option[Array[Byte]], _] = ByteaOptional(extractor) + def parties[FROM]( + extractor: FROM => Set[String] + ): Field[FROM, Array[Byte], _] = + bytea(stringInterning => + from => + encodeToByteArray( + extractor(from) + .map(stringInterning.party.unsafe.internalize) + ) + ) + + def partiesOptional[FROM]( + extractor: FROM => Option[Set[String]] + ): Field[FROM, Option[Array[Byte]], _] = + byteaOptional(stringInterning => + from => + extractor(from) + .map(_.map(stringInterning.party.unsafe.internalize)) + .map(encodeToByteArray) + ) + def bigint[FROM](extractor: StringInterning => FROM => Long): Field[FROM, Long, _] = Bigint(extractor) @@ -65,6 +77,11 @@ private[backend] object AppendOnlySchema { ): Field[FROM, Option[Int], _] = SmallintOptional(extractor) + def smallint[FROM]( + extractor: StringInterning => FROM => Int + ): Field[FROM, Int, _] = + Smallint(extractor) + def int[FROM](extractor: StringInterning => FROM => Int): Field[FROM, Int, _] = Integer(extractor) @@ -84,47 +101,216 @@ private[backend] object AppendOnlySchema { BooleanMandatory(extractor) def insert[FROM](tableName: String)(fields: (String, Field[FROM, _, _])*): Table[FROM] - def idempotentInsert[FROM](tableName: String, keyFieldIndex: Int, ordering: Ordering[FROM])( - fields: (String, Field[FROM, _, _])* - ): Table[FROM] } def apply(fieldStrategy: FieldStrategy): Schema[DbDto] = { - val eventsCreate: Table[DbDto.EventCreate] = - fieldStrategy.insert("lapi_events_create")( + def idFilter[T <: DbDto.IdFilterDbDto](tableName: String): Table[T] = + fieldStrategy.insert(tableName)( + "event_sequential_id" -> fieldStrategy.bigint(_ => _.idFilter.event_sequential_id), + "template_id" -> fieldStrategy.int(stringInterning => + dto => stringInterning.templateId.unsafe.internalize(dto.idFilter.template_id) + ), + "party_id" -> fieldStrategy.int(stringInterning => + dto => stringInterning.party.unsafe.internalize(dto.idFilter.party_id) + ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.idFilter.first_per_sequential_id)(true) + ), + ) + + val eventActivate: Table[DbDto.EventActivate] = + fieldStrategy.insert("lapi_events_activate_contract")( + // update related columns "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), - "update_id" -> fieldStrategy.string(_ => _.update_id), - "ledger_effective_time" -> fieldStrategy.bigint(_ => _.ledger_effective_time), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), + "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), + "submitters" -> fieldStrategy.partiesOptional(_.submitters), + "record_time" -> fieldStrategy.bigint(_ => _.record_time), + "synchronizer_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) + ), + "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), + "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => + _.external_transaction_hash + ), + + // event related columns + "event_type" -> fieldStrategy.smallint(_ => _.event_type), + "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), + "node_id" -> fieldStrategy.int(_ => _.node_id), + "additional_witnesses" -> fieldStrategy.partiesOptional(_.additional_witnesses), + "source_synchronizer_id" -> fieldStrategy.intOptional(stringInterning => + _.source_synchronizer_id.map(stringInterning.synchronizerId.internalize) + ), + "reassignment_counter" -> fieldStrategy.bigintOptional(_ => _.reassignment_counter), + "reassignment_id" -> fieldStrategy.byteaOptional(_ => _.reassignment_id), + "representative_package_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.packageId.unsafe.internalize(dbDto.representative_package_id) + ), + + // contract related columns + "internal_contract_id" -> fieldStrategy.bigint(_ => _.internal_contract_id), + "create_key_hash" -> fieldStrategy.stringOptional(_ => _.create_key_hash), + ) + val idFilterActivateStakeholder: Table[DbDto.IdFilterActivateStakeholder] = + idFilter("lapi_filter_activate_stakeholder") + val idFilterActivateWitness: Table[DbDto.IdFilterActivateWitness] = + idFilter("lapi_filter_activate_witness") + + val eventDeactivate: Table[DbDto.EventDeactivate] = + fieldStrategy.insert("lapi_events_deactivate_contract")( + // update related columns + "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), - "submitters" -> fieldStrategy.intArrayOptional(stringInterning => - _.submitters.map(_.map(stringInterning.party.unsafe.internalize)) + "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), + "submitters" -> fieldStrategy.partiesOptional(_.submitters), + "record_time" -> fieldStrategy.bigint(_ => _.record_time), + "synchronizer_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), + "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), + "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => + _.external_transaction_hash + ), + + // event related columns + "event_type" -> fieldStrategy.smallint(_ => _.event_type), + "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "node_id" -> fieldStrategy.int(_ => _.node_id), - "contract_id" -> fieldStrategy.bytea(_ => _.contract_id), + "deactivated_event_sequential_id" -> fieldStrategy.bigintOptional(_ => + _.deactivated_event_sequential_id + ), + "additional_witnesses" -> fieldStrategy.partiesOptional(_.additional_witnesses), + "exercise_choice" -> fieldStrategy.intOptional(stringInterning => + _.exercise_choice.map(stringInterning.choiceName.unsafe.internalize) + ), + "exercise_choice_interface" -> fieldStrategy.intOptional(stringInterning => + _.exercise_choice_interface_id.map(stringInterning.interfaceId.unsafe.internalize) + ), + "exercise_argument" -> fieldStrategy.byteaOptional(_ => _.exercise_argument), + "exercise_result" -> fieldStrategy.byteaOptional(_ => _.exercise_result), + "exercise_actors" -> fieldStrategy.partiesOptional(_.exercise_actors), + "exercise_last_descendant_node_id" -> fieldStrategy.intOptional(_ => + _.exercise_last_descendant_node_id + ), + "exercise_argument_compression" -> fieldStrategy.smallintOptional(_ => + _.exercise_argument_compression + ), + "exercise_result_compression" -> fieldStrategy.smallintOptional(_ => + _.exercise_result_compression + ), + "reassignment_id" -> fieldStrategy.byteaOptional(_ => _.reassignment_id), + "assignment_exclusivity" -> fieldStrategy.bigintOptional(_ => _.assignment_exclusivity), + "target_synchronizer_id" -> fieldStrategy.intOptional(stringInterning => + _.target_synchronizer_id.map(stringInterning.synchronizerId.internalize) + ), + "reassignment_counter" -> fieldStrategy.bigintOptional(_ => _.reassignment_counter), + + // contract related columns + "contract_id" -> fieldStrategy.bytea(_ => _.contract_id.toBytes.toByteArray), + "internal_contract_id" -> fieldStrategy.bigintOptional(_ => _.internal_contract_id), "template_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.templateId.unsafe.internalize(dbDto.template_id) ), "package_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.packageId.unsafe.internalize(dbDto.package_id) ), - "flat_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.flat_event_witnesses.map(stringInterning.party.unsafe.internalize) + "stakeholders" -> fieldStrategy.parties(_.stakeholders), + "ledger_effective_time" -> fieldStrategy.bigintOptional(_ => _.ledger_effective_time), + ) + val idFilterDeactivateStakeholder: Table[DbDto.IdFilterDeactivateStakeholder] = + idFilter("lapi_filter_deactivate_stakeholder") + val idFilterDeactivateWitness: Table[DbDto.IdFilterDeactivateWitness] = + idFilter("lapi_filter_deactivate_witness") + + val eventVariousWitnessed: Table[DbDto.EventVariousWitnessed] = + fieldStrategy.insert("lapi_events_various_witnessed")( + // update related columns + "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), + "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), + "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), + "submitters" -> fieldStrategy.partiesOptional(_.submitters), + "record_time" -> fieldStrategy.bigint(_ => _.record_time), + "synchronizer_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), - "tree_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.tree_event_witnesses.map(stringInterning.party.unsafe.internalize) + "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), + "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => + _.external_transaction_hash ), - "create_argument" -> fieldStrategy.bytea(_ => _.create_argument), - "create_signatories" -> fieldStrategy.intArray(stringInterning => - _.create_signatories.map(stringInterning.party.unsafe.internalize) + + // event related columns + "event_type" -> fieldStrategy.smallint(_ => _.event_type), + "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), + "node_id" -> fieldStrategy.int(_ => _.node_id), + "additional_witnesses" -> fieldStrategy.parties(_.additional_witnesses), + "consuming" -> fieldStrategy.booleanOptional(_ => _.consuming), + "exercise_choice" -> fieldStrategy.intOptional(stringInterning => + _.exercise_choice.map(stringInterning.choiceName.unsafe.internalize) ), - "create_observers" -> fieldStrategy.intArray(stringInterning => - _.create_observers.map(stringInterning.party.unsafe.internalize) + "exercise_choice_interface" -> fieldStrategy.intOptional(stringInterning => + _.exercise_choice_interface_id.map(stringInterning.interfaceId.unsafe.internalize) ), - "create_key_value" -> fieldStrategy.byteaOptional(_ => _.create_key_value), - "create_key_maintainers" -> fieldStrategy.intArrayOptional(stringInterning => - _.create_key_maintainers.map(_.map(stringInterning.party.unsafe.internalize)) + "exercise_argument" -> fieldStrategy.byteaOptional(_ => _.exercise_argument), + "exercise_result" -> fieldStrategy.byteaOptional(_ => _.exercise_result), + "exercise_actors" -> fieldStrategy.partiesOptional(_.exercise_actors), + "exercise_last_descendant_node_id" -> fieldStrategy.intOptional(_ => + _.exercise_last_descendant_node_id + ), + "exercise_argument_compression" -> fieldStrategy.smallintOptional(_ => + _.exercise_argument_compression + ), + "exercise_result_compression" -> fieldStrategy.smallintOptional(_ => + _.exercise_result_compression + ), + "representative_package_id" -> fieldStrategy.intOptional(stringInterning => + _.representative_package_id.map(stringInterning.packageId.unsafe.internalize) + ), + + // contract related columns + "contract_id" -> fieldStrategy.byteaOptional(_ => _.contract_id.map(_.toBytes.toByteArray)), + "internal_contract_id" -> fieldStrategy.bigintOptional(_ => _.internal_contract_id), + "template_id" -> fieldStrategy.intOptional(stringInterning => + _.template_id.map(stringInterning.templateId.unsafe.internalize) ), + "package_id" -> fieldStrategy.intOptional(stringInterning => + _.package_id.map(stringInterning.packageId.unsafe.internalize) + ), + "ledger_effective_time" -> fieldStrategy.bigintOptional(_ => _.ledger_effective_time), + ) + val idFilterVariousWitness: Table[DbDto.IdFilterVariousWitness] = + idFilter("lapi_filter_various_witness") + + // TODO(#28008) remove + val eventsCreate: Table[DbDto.EventCreate] = + fieldStrategy.insert("lapi_events_create")( + "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), + "ledger_effective_time" -> fieldStrategy.bigint(_ => _.ledger_effective_time), + "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), + "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), + "submitters" -> fieldStrategy.partiesOptional(_.submitters), + "node_id" -> fieldStrategy.int(_ => _.node_id), + "contract_id" -> fieldStrategy.bytea(_ => _.contract_id.toBytes.toByteArray), + "template_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.templateId.unsafe.internalize(dbDto.template_id) + ), + "package_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.packageId.unsafe.internalize(dbDto.package_id) + ), + "representative_package_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.packageId.unsafe.internalize(dbDto.representative_package_id) + ), + "flat_event_witnesses" -> fieldStrategy.parties(_.flat_event_witnesses), + "tree_event_witnesses" -> fieldStrategy.parties(_.tree_event_witnesses), + "create_argument" -> fieldStrategy.bytea(_ => _.create_argument), + "create_signatories" -> fieldStrategy.parties(_.create_signatories), + "create_observers" -> fieldStrategy.parties(_.create_observers), + "create_key_value" -> fieldStrategy.byteaOptional(_ => _.create_key_value), + "create_key_maintainers" -> fieldStrategy.partiesOptional(_.create_key_maintainers), "create_key_hash" -> fieldStrategy.stringOptional(_ => _.create_key_hash), "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "create_argument_compression" -> fieldStrategy.smallintOptional(_ => @@ -135,31 +321,36 @@ private[backend] object AppendOnlySchema { ), "authentication_data" -> fieldStrategy.bytea(_ => _.authentication_data), "synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), "record_time" -> fieldStrategy.bigint(_ => _.record_time), - "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => _.external_transaction_hash), + "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => + _.external_transaction_hash + ), + "internal_contract_id" -> fieldStrategy.bigint(_ => _.internal_contract_id), ) + // TODO(#28008) remove val exerciseFields: Vector[(String, Field[DbDto.EventExercise, _, _])] = Vector[(String, Field[DbDto.EventExercise, _, _])]( "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), - "contract_id" -> fieldStrategy.bytea(_ => _.contract_id), - "update_id" -> fieldStrategy.string(_ => _.update_id), + "contract_id" -> fieldStrategy.bytea(_ => _.contract_id.toBytes.toByteArray), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "ledger_effective_time" -> fieldStrategy.bigint(_ => _.ledger_effective_time), "node_id" -> fieldStrategy.int(_ => _.node_id), "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), - "submitters" -> fieldStrategy.intArrayOptional(stringInterning => - _.submitters.map(_.map(stringInterning.party.unsafe.internalize)) + "submitters" -> fieldStrategy.partiesOptional(_.submitters), + "exercise_choice" -> fieldStrategy.int(stringInterning => + dto => stringInterning.choiceName.unsafe.internalize(dto.exercise_choice) + ), + "exercise_choice_interface" -> fieldStrategy.intOptional(stringInterning => + _.exercise_choice_interface_id.map(stringInterning.interfaceId.unsafe.internalize) ), - "exercise_choice" -> fieldStrategy.string(_ => _.exercise_choice), "exercise_argument" -> fieldStrategy.bytea(_ => _.exercise_argument), "exercise_result" -> fieldStrategy.byteaOptional(_ => _.exercise_result), - "exercise_actors" -> fieldStrategy.intArray(stringInterning => - _.exercise_actors.map(stringInterning.party.unsafe.internalize) - ), + "exercise_actors" -> fieldStrategy.parties(_.exercise_actors), "exercise_last_descendant_node_id" -> fieldStrategy.int(_ => _.exercise_last_descendant_node_id ), @@ -169,9 +360,7 @@ private[backend] object AppendOnlySchema { "package_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.packageId.unsafe.internalize(dbDto.package_id) ), - "tree_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.tree_event_witnesses.map(stringInterning.party.unsafe.internalize) - ), + "tree_event_witnesses" -> fieldStrategy.parties(_.tree_event_witnesses), "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "exercise_argument_compression" -> fieldStrategy.smallintOptional(_ => _.exercise_argument_compression @@ -180,100 +369,99 @@ private[backend] object AppendOnlySchema { _.exercise_result_compression ), "synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), "record_time" -> fieldStrategy.bigint(_ => _.record_time), "external_transaction_hash" -> fieldStrategy.byteaOptional(_ => _.external_transaction_hash), ) + // TODO(#28008) remove val consumingExerciseFields: Vector[(String, Field[DbDto.EventExercise, _, _])] = exerciseFields ++ Vector[(String, Field[DbDto.EventExercise, _, _])]( - "flat_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.flat_event_witnesses.map(stringInterning.party.unsafe.internalize) - ) + "flat_event_witnesses" -> fieldStrategy.parties(_.flat_event_witnesses), + "deactivated_event_sequential_id" -> fieldStrategy.bigintOptional(_ => + _.deactivated_event_sequential_id + ), ) + // TODO(#28008) remove val eventsConsumingExercise: Table[DbDto.EventExercise] = fieldStrategy.insert("lapi_events_consuming_exercise")(consumingExerciseFields*) + // TODO(#28008) remove val eventsNonConsumingExercise: Table[DbDto.EventExercise] = fieldStrategy.insert("lapi_events_non_consuming_exercise")(exerciseFields*) + // TODO(#28008) remove val eventsUnassign: Table[DbDto.EventUnassign] = fieldStrategy.insert("lapi_events_unassign")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), - "update_id" -> fieldStrategy.string(_ => _.update_id), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), "submitter" -> fieldStrategy.intOptional(stringInterning => _.submitter.map(stringInterning.party.unsafe.internalize) ), "node_id" -> fieldStrategy.int(_ => _.node_id), - "contract_id" -> fieldStrategy.bytea(_ => u => u.contract_id.toArray), + "contract_id" -> fieldStrategy.bytea(_ => u => u.contract_id.toBytes.toByteArray), "template_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.templateId.unsafe.internalize(dbDto.template_id) ), "package_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.packageId.unsafe.internalize(dbDto.package_id) ), - "flat_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.flat_event_witnesses.map(stringInterning.party.unsafe.internalize) - ), + "flat_event_witnesses" -> fieldStrategy.parties(_.flat_event_witnesses), "source_synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.source_synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.source_synchronizer_id) ), "target_synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.target_synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.target_synchronizer_id) ), - "reassignment_id" -> fieldStrategy.string(_ => _.reassignment_id), + "reassignment_id" -> fieldStrategy.bytea(_ => _.reassignment_id), "reassignment_counter" -> fieldStrategy.bigint(_ => _.reassignment_counter), "assignment_exclusivity" -> fieldStrategy.bigintOptional(_ => _.assignment_exclusivity), "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), "record_time" -> fieldStrategy.bigint(_ => _.record_time), + "deactivated_event_sequential_id" -> fieldStrategy.bigintOptional(_ => + _.deactivated_event_sequential_id + ), ) + // TODO(#28008) remove val eventsAssign: Table[DbDto.EventAssign] = fieldStrategy.insert("lapi_events_assign")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), - "update_id" -> fieldStrategy.string(_ => _.update_id), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "workflow_id" -> fieldStrategy.stringOptional(_ => _.workflow_id), "command_id" -> fieldStrategy.stringOptional(_ => _.command_id), "submitter" -> fieldStrategy.intOptional(stringInterning => _.submitter.map(stringInterning.party.unsafe.internalize) ), "node_id" -> fieldStrategy.int(_ => _.node_id), - "contract_id" -> fieldStrategy.bytea(_ => _.contract_id), + "contract_id" -> fieldStrategy.bytea(_ => _.contract_id.toBytes.toByteArray), "template_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.templateId.unsafe.internalize(dbDto.template_id) ), "package_id" -> fieldStrategy.int(stringInterning => dbDto => stringInterning.packageId.unsafe.internalize(dbDto.package_id) ), - "flat_event_witnesses" -> fieldStrategy.intArray(stringInterning => - _.flat_event_witnesses.map(stringInterning.party.unsafe.internalize) - ), + "flat_event_witnesses" -> fieldStrategy.parties(_.flat_event_witnesses), "source_synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.source_synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.source_synchronizer_id) ), "target_synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.target_synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.target_synchronizer_id) ), - "reassignment_id" -> fieldStrategy.string(_ => _.reassignment_id), + "reassignment_id" -> fieldStrategy.bytea(_ => _.reassignment_id), "reassignment_counter" -> fieldStrategy.bigint(_ => _.reassignment_counter), "create_argument" -> fieldStrategy.bytea(_ => _.create_argument), - "create_signatories" -> fieldStrategy.intArray(stringInterning => - _.create_signatories.map(stringInterning.party.unsafe.internalize) - ), - "create_observers" -> fieldStrategy.intArray(stringInterning => - _.create_observers.map(stringInterning.party.unsafe.internalize) - ), + "create_signatories" -> fieldStrategy.parties(_.create_signatories), + "create_observers" -> fieldStrategy.parties(_.create_observers), "create_key_value" -> fieldStrategy.byteaOptional(_ => _.create_key_value), - "create_key_maintainers" -> fieldStrategy.intArrayOptional(stringInterning => - _.create_key_maintainers.map(_.map(stringInterning.party.unsafe.internalize)) - ), + "create_key_maintainers" -> fieldStrategy.partiesOptional(_.create_key_maintainers), "create_key_hash" -> fieldStrategy.stringOptional(_ => _.create_key_hash), "create_argument_compression" -> fieldStrategy.smallintOptional(_ => _.create_argument_compression @@ -285,6 +473,7 @@ private[backend] object AppendOnlySchema { "authentication_data" -> fieldStrategy.bytea(_ => _.authentication_data), "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), "record_time" -> fieldStrategy.bigint(_ => _.record_time), + "internal_contract_id" -> fieldStrategy.bigint(_ => _.internal_contract_id), ) val partyEntries: Table[DbDto.PartyEntry] = @@ -305,18 +494,19 @@ private[backend] object AppendOnlySchema { fieldStrategy.insert("lapi_events_party_to_participant")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), - "update_id" -> fieldStrategy.string(_ => _.update_id), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), - // TODO(i21859) Implement interning for participant ids - "participant_id" -> fieldStrategy.string(_ => _.participant_id), + "participant_id" -> fieldStrategy.int(stringInterning => + dto => stringInterning.participantId.unsafe.internalize(dto.participant_id) + ), "participant_permission" -> fieldStrategy.int(_ => _.participant_permission), "participant_authorization_event" -> fieldStrategy.int(_ => _.participant_authorization_event ), "synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), "record_time" -> fieldStrategy.bigint(_ => _.record_time), "trace_context" -> fieldStrategy.bytea(_ => _.trace_context), @@ -327,12 +517,12 @@ private[backend] object AppendOnlySchema { "completion_offset" -> fieldStrategy.bigint(_ => _.completion_offset), "record_time" -> fieldStrategy.bigint(_ => _.record_time), "publication_time" -> fieldStrategy.bigint(_ => _.publication_time), - "user_id" -> fieldStrategy.string(_ => _.user_id), - "submitters" -> fieldStrategy.intArray(stringInterning => - _.submitters.map(stringInterning.party.unsafe.internalize) + "user_id" -> fieldStrategy.int(stringInterning => + dbDto => stringInterning.userId.unsafe.internalize(dbDto.user_id) ), + "submitters" -> fieldStrategy.parties(_.submitters), "command_id" -> fieldStrategy.string(_ => _.command_id), - "update_id" -> fieldStrategy.stringOptional(_ => _.update_id), + "update_id" -> fieldStrategy.byteaOptional(_ => _.update_id), "rejection_status_code" -> fieldStrategy.intOptional(_ => _.rejection_status_code), "rejection_status_message" -> fieldStrategy.stringOptional(_ => _.rejection_status_message), "rejection_status_details" -> fieldStrategy.byteaOptional(_ => _.rejection_status_details), @@ -345,7 +535,7 @@ private[backend] object AppendOnlySchema { _.deduplication_duration_nanos ), "synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), "message_uuid" -> fieldStrategy.stringOptional(_ => _.message_uuid), "is_transaction" -> fieldStrategy.boolean(_ => _.is_transaction), @@ -358,6 +548,7 @@ private[backend] object AppendOnlySchema { "external_string" -> fieldStrategy.string(_ => _.externalString), ) + // TODO(#28008) remove val idFilterCreateStakeholderTable: Table[DbDto.IdFilterCreateStakeholder] = fieldStrategy.insert("lapi_pe_create_id_filter_stakeholder")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), @@ -367,8 +558,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterCreateNonStakeholderInformeeTable : Table[DbDto.IdFilterCreateNonStakeholderInformee] = fieldStrategy.insert("lapi_pe_create_id_filter_non_stakeholder_informee")( @@ -379,8 +574,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterConsumingStakeholderTable: Table[DbDto.IdFilterConsumingStakeholder] = fieldStrategy.insert("lapi_pe_consuming_id_filter_stakeholder")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), @@ -390,8 +589,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterConsumingNonStakeholderInformeeTable : Table[DbDto.IdFilterConsumingNonStakeholderInformee] = fieldStrategy.insert("lapi_pe_consuming_id_filter_non_stakeholder_informee")( @@ -402,8 +605,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterNonConsumingInformeeTable: Table[DbDto.IdFilterNonConsumingInformee] = fieldStrategy.insert("lapi_pe_non_consuming_id_filter_informee")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), @@ -413,8 +620,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterUnassignStakeholderTable: Table[DbDto.IdFilterUnassignStakeholder] = fieldStrategy.insert("lapi_pe_reassignment_id_filter_stakeholder")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), @@ -424,8 +635,12 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) + // TODO(#28008) remove val idFilterAssignStakeholderTable: Table[DbDto.IdFilterAssignStakeholder] = fieldStrategy.insert("lapi_pe_assign_id_filter_stakeholder")( "event_sequential_id" -> fieldStrategy.bigint(_ => _.event_sequential_id), @@ -435,22 +650,33 @@ private[backend] object AppendOnlySchema { "party_id" -> fieldStrategy.int(stringInterning => dto => stringInterning.party.unsafe.internalize(dto.party_id) ), + "first_per_sequential_id" -> fieldStrategy.booleanOptional(_ => + dto => Option.when(dto.first_per_sequential_id)(true) + ), ) val transactionMeta: Table[DbDto.TransactionMeta] = fieldStrategy.insert("lapi_update_meta")( - "update_id" -> fieldStrategy.string(_ => _.update_id), + "update_id" -> fieldStrategy.bytea(_ => _.update_id), "event_offset" -> fieldStrategy.bigint(_ => _.event_offset), "publication_time" -> fieldStrategy.bigint(_ => _.publication_time), "record_time" -> fieldStrategy.bigint(_ => _.record_time), "synchronizer_id" -> fieldStrategy.int(stringInterning => - dbDto => stringInterning.synchronizerId.unsafe.internalize(dbDto.synchronizer_id) + dbDto => stringInterning.synchronizerId.internalize(dbDto.synchronizer_id) ), "event_sequential_id_first" -> fieldStrategy.bigint(_ => _.event_sequential_id_first), "event_sequential_id_last" -> fieldStrategy.bigint(_ => _.event_sequential_id_last), ) val executes: Seq[Array[Array[_]] => Connection => Unit] = List( + eventActivate.executeUpdate, + idFilterActivateStakeholder.executeUpdate, + idFilterActivateWitness.executeUpdate, + eventDeactivate.executeUpdate, + idFilterDeactivateStakeholder.executeUpdate, + idFilterDeactivateWitness.executeUpdate, + eventVariousWitnessed.executeUpdate, + idFilterVariousWitness.executeUpdate, eventsCreate.executeUpdate, eventsConsumingExercise.executeUpdate, eventsNonConsumingExercise.executeUpdate, @@ -480,6 +706,17 @@ private[backend] object AppendOnlySchema { def collect[T <: DbDto: ClassTag]: Vector[T] = collectWithFilter[T](_ => true) import DbDto.* Array( + eventActivate.prepareData(collect[EventActivate], stringInterning), + idFilterActivateStakeholder + .prepareData(collect[IdFilterActivateStakeholder], stringInterning), + idFilterActivateWitness.prepareData(collect[IdFilterActivateWitness], stringInterning), + eventDeactivate.prepareData(collect[EventDeactivate], stringInterning), + idFilterDeactivateStakeholder + .prepareData(collect[IdFilterDeactivateStakeholder], stringInterning), + idFilterDeactivateWitness + .prepareData(collect[IdFilterDeactivateWitness], stringInterning), + eventVariousWitnessed.prepareData(collect[EventVariousWitnessed], stringInterning), + idFilterVariousWitness.prepareData(collect[IdFilterVariousWitness], stringInterning), eventsCreate.prepareData(collect[EventCreate], stringInterning), eventsConsumingExercise .prepareData(collectWithFilter[EventExercise](_.consuming), stringInterning), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala index f9302887e8..47e868a53e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala @@ -32,19 +32,24 @@ private[backend] object SimpleSqlExtensions { @throws[Throwable] def asVectorOf[A](parser: RowParser[A])(implicit conn: Connection): Vector[A] = { + val resultBuilder = Vector.newBuilder[A] + @annotation.tailrec - def go(results: Vector[A])(cursor: Option[Cursor]): Try[Vector[A]] = + def go(cursor: Option[Cursor]): Try[Vector[A]] = cursor match { case Some(cursor) => cursor.row.as(parser) match { - case Success(value) => go(results :+ value)(cursor.next) + case Success(value) => + resultBuilder.addOne(value) + go(cursor.next) + case Failure(f) => Failure(f) } - case _ => Try(results) + case _ => Try(resultBuilder.result()) } sql - .withResult(go(Vector.empty)) + .withResult(go) .fold( _.headOption.fold(throw new NoSuchElementException("empty list of errors"))(throw _), _.fold(throw _, identity), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdatePointwiseQueries.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdatePointwiseQueries.scala index 632b40dcec..0c1fbedbbd 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdatePointwiseQueries.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdatePointwiseQueries.scala @@ -3,25 +3,21 @@ package com.digitalasset.canton.platform.store.backend.common -import anorm.RowParser import com.digitalasset.canton.data import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.platform.Party -import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{Entry, RawTreeEvent} +import com.digitalasset.canton.platform.store.backend.Conversions.* import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ CompositeSql, SqlStringInterpolation, } -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.cache.LedgerEndCache -import com.digitalasset.canton.platform.store.interning.StringInterning +import com.digitalasset.canton.protocol.UpdateId import java.sql.Connection class UpdatePointwiseQueries( - ledgerEndCache: LedgerEndCache, - stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache ) { import EventStorageBackendTemplate.* @@ -29,7 +25,6 @@ class UpdatePointwiseQueries( def fetchIdsFromUpdateMeta( lookupKey: LookupKey )(connection: Connection): Option[(Long, Long)] = { - import com.digitalasset.canton.platform.store.backend.Conversions.ledgerStringToStatement import com.digitalasset.canton.platform.store.backend.Conversions.OffsetToStatement // 1. Checking whether "event_offset <= ledgerEndOffset" is needed because during indexing // the events and transaction_meta tables are written to prior to the ledger end being updated. @@ -38,9 +33,9 @@ class UpdatePointwiseQueries( ledgerEndOffsetO.flatMap { ledgerEndOffset => val lookupKeyClause: CompositeSql = lookupKey match { - case LookupKey.UpdateId(updateId) => + case LookupKey.ByUpdateId(updateId) => cSQL"t.update_id = $updateId" - case LookupKey.Offset(offset) => + case LookupKey.ByOffset(offset) => cSQL"t.event_offset = $offset" } @@ -57,108 +52,12 @@ class UpdatePointwiseQueries( """.as(EventSequentialIdFirstLast.singleOpt)(connection) } } - - // TODO(#23504) remove when transaction trees legacy endpoints are removed - def fetchTreeTransactionEvents( - firstEventSequentialId: Long, - lastEventSequentialId: Long, - requestingParties: Option[Set[Party]], - )(connection: Connection): Vector[Entry[RawTreeEvent]] = - fetchEventsForTransactionPointWiseLookup( - firstEventSequentialId = firstEventSequentialId, - lastEventSequentialId = lastEventSequentialId, - witnessesColumn = "tree_event_witnesses", - tables = List( - SelectTable( - tableName = "lapi_events_create", - selectColumns = - s"$selectColumnsForTransactionTreeCreate, ${QueryStrategy.constBooleanSelect(false)} as exercise_consuming", - ), - SelectTable( - tableName = "lapi_events_consuming_exercise", - selectColumns = - s"${selectColumnsForTransactionTreeExercise(includeFlatEventWitnesses = true)}, ${QueryStrategy - .constBooleanSelect(true)} as exercise_consuming", - ), - SelectTable( - tableName = "lapi_events_non_consuming_exercise", - selectColumns = - s"${selectColumnsForTransactionTreeExercise(includeFlatEventWitnesses = false)}, ${QueryStrategy - .constBooleanSelect(false)} as exercise_consuming", - ), - ), - requestingParties = requestingParties, - filteringRowParser = rawTreeEventParser(_, stringInterning), - )(connection) - - case class SelectTable(tableName: String, selectColumns: String) - - private def fetchEventsForTransactionPointWiseLookup[T]( - firstEventSequentialId: Long, - lastEventSequentialId: Long, - witnessesColumn: String, - tables: List[SelectTable], - requestingParties: Option[Set[Party]], - filteringRowParser: Option[Set[Int]] => RowParser[Entry[T]], - )(connection: Connection): Vector[Entry[T]] = { - val allInternedParties: Option[Set[Int]] = requestingParties.map( - _.iterator - .map(stringInterning.party.tryInternalize) - .flatMap(_.iterator) - .toSet - ) - // Improvement idea: Add support for `fetchSizeHint` and `limit`. - def selectFrom(tableName: String, selectColumns: String) = cSQL""" - ( - SELECT - #$selectColumns, - event_witnesses, - command_id - FROM - ( - SELECT - #$selectColumns, - #$witnessesColumn as event_witnesses, - e.command_id - FROM - #$tableName e - JOIN lapi_parameters p - ON - p.participant_pruned_up_to_inclusive IS NULL - OR - e.event_offset > p.participant_pruned_up_to_inclusive - WHERE - e.event_sequential_id >= $firstEventSequentialId - AND - e.event_sequential_id <= $lastEventSequentialId - ORDER BY - e.event_sequential_id - ) x - ) - """ - val unionQuery = tables - .map(table => - selectFrom( - tableName = table.tableName, - selectColumns = table.selectColumns, - ) - ) - .mkComposite("", " UNION ALL", "") - val parsedRows: Vector[Entry[T]] = SQL""" - $unionQuery - ORDER BY event_sequential_id""" - .asVectorOf( - parser = filteringRowParser(allInternedParties) - )(connection) - parsedRows - } - } object UpdatePointwiseQueries { sealed trait LookupKey object LookupKey { - final case class UpdateId(updateId: data.UpdateId) extends LookupKey - final case class Offset(offset: data.Offset) extends LookupKey + final case class ByUpdateId(updateId: UpdateId) extends LookupKey + final case class ByOffset(offset: data.Offset) extends LookupKey } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdateStreamingQueries.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdateStreamingQueries.scala index 5b8a238376..85cec8b19b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdateStreamingQueries.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/UpdateStreamingQueries.scala @@ -5,8 +5,18 @@ package com.digitalasset.canton.platform.store.backend.common import anorm.SqlParser.long import com.digitalasset.canton.platform.Party -import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation +import com.digitalasset.canton.platform.store.backend.PersistentEventType +import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ + CompositeSql, + SqlStringInterpolation, +} import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.{ + IdFilterInput, + IdFilterPaginationInput, + PaginationInput, + PaginationLastOnlyInput, +} import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.NameTypeConRef @@ -15,119 +25,264 @@ import java.sql.Connection sealed trait EventIdSource object EventIdSource { - object CreateStakeholder extends EventIdSource - object CreateNonStakeholder extends EventIdSource - object ConsumingStakeholder extends EventIdSource - object ConsumingNonStakeholder extends EventIdSource - object NonConsumingInformee extends EventIdSource + object ActivateStakeholder extends EventIdSource + object ActivateWitnesses extends EventIdSource + object DeactivateStakeholder extends EventIdSource + object DeactivateWitnesses extends EventIdSource + object VariousWitnesses extends EventIdSource +} +sealed trait EventIdSourceLegacy +object EventIdSourceLegacy { + object CreateStakeholder extends EventIdSourceLegacy + object CreateNonStakeholder extends EventIdSourceLegacy + object ConsumingStakeholder extends EventIdSourceLegacy + object ConsumingNonStakeholder extends EventIdSourceLegacy + object NonConsumingInformee extends EventIdSourceLegacy } sealed trait EventPayloadSourceForUpdatesAcsDelta object EventPayloadSourceForUpdatesAcsDelta { - object Create extends EventPayloadSourceForUpdatesAcsDelta - object Consuming extends EventPayloadSourceForUpdatesAcsDelta + object Activate extends EventPayloadSourceForUpdatesAcsDelta + object Deactivate extends EventPayloadSourceForUpdatesAcsDelta +} +sealed trait EventPayloadSourceForUpdatesAcsDeltaLegacy +object EventPayloadSourceForUpdatesAcsDeltaLegacy { + object Create extends EventPayloadSourceForUpdatesAcsDeltaLegacy + object Consuming extends EventPayloadSourceForUpdatesAcsDeltaLegacy } sealed trait EventPayloadSourceForUpdatesLedgerEffects object EventPayloadSourceForUpdatesLedgerEffects { - object Create extends EventPayloadSourceForUpdatesLedgerEffects - object Consuming extends EventPayloadSourceForUpdatesLedgerEffects - object NonConsuming extends EventPayloadSourceForUpdatesLedgerEffects + object Activate extends EventPayloadSourceForUpdatesLedgerEffects + object Deactivate extends EventPayloadSourceForUpdatesLedgerEffects + object VariousWitnessed extends EventPayloadSourceForUpdatesLedgerEffects +} +sealed trait EventPayloadSourceForUpdatesLedgerEffectsLegacy +object EventPayloadSourceForUpdatesLedgerEffectsLegacy { + object Create extends EventPayloadSourceForUpdatesLedgerEffectsLegacy + object Consuming extends EventPayloadSourceForUpdatesLedgerEffectsLegacy + object NonConsuming extends EventPayloadSourceForUpdatesLedgerEffectsLegacy } class UpdateStreamingQueries( - stringInterning: StringInterning + stringInterning: StringInterning, + queryStrategy: QueryStrategy, ) { def fetchEventIds(target: EventIdSource)( + witnessO: Option[Party], + templateIdO: Option[NameTypeConRef], + eventTypes: Set[PersistentEventType], + )(connection: Connection): IdFilterPaginationInput => Vector[Long] = { + def idFilter(tableName: String): Option[CompositeSql] = Option.when(eventTypes.nonEmpty)( + cSQL""" + EXISTS ( + SELECT 1 + FROM #$tableName data_table + WHERE + filters.event_sequential_id = data_table.event_sequential_id + AND data_table.event_type ${queryStrategy.anyOfSmallInts(eventTypes.map(_.asInt))} + )""" + ) + target match { + case EventIdSource.ActivateStakeholder => + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_activate_stakeholder", + witnessO = witnessO, + templateIdO = templateIdO, + idFilter = idFilter("lapi_events_activate_contract"), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + case EventIdSource.ActivateWitnesses => + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_activate_witness", + witnessO = witnessO, + templateIdO = templateIdO, + idFilter = idFilter("lapi_events_activate_contract"), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + case EventIdSource.DeactivateStakeholder => + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_deactivate_stakeholder", + witnessO = witnessO, + templateIdO = templateIdO, + idFilter = idFilter("lapi_events_deactivate_contract"), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + case EventIdSource.DeactivateWitnesses => + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_deactivate_witness", + witnessO = witnessO, + templateIdO = templateIdO, + idFilter = idFilter("lapi_events_deactivate_contract"), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + case EventIdSource.VariousWitnesses => + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_various_witness", + witnessO = witnessO, + templateIdO = templateIdO, + idFilter = idFilter("lapi_events_various_witnessed"), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + } + } + + def fetchEventIdsLegacy(target: EventIdSourceLegacy)( stakeholderO: Option[Party], templateIdO: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = target match { - case EventIdSource.ConsumingStakeholder => + )(connection: Connection): PaginationInput => Vector[Long] = target match { + case EventIdSourceLegacy.ConsumingStakeholder => fetchIdsOfConsumingEventsForStakeholder( stakeholder = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, )( connection ) - case EventIdSource.ConsumingNonStakeholder => + case EventIdSourceLegacy.ConsumingNonStakeholder => UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_consuming_id_filter_non_stakeholder_informee", witnessO = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) - case EventIdSource.CreateStakeholder => + case EventIdSourceLegacy.CreateStakeholder => fetchIdsOfCreateEventsForStakeholder( stakeholderO = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, )( connection ) - case EventIdSource.CreateNonStakeholder => + case EventIdSourceLegacy.CreateNonStakeholder => UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_create_id_filter_non_stakeholder_informee", witnessO = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) - case EventIdSource.NonConsumingInformee => + case EventIdSourceLegacy.NonConsumingInformee => UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_non_consuming_id_filter_informee", witnessO = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) } - def fetchIdsOfCreateEventsForStakeholder( + private def fetchIdsOfCreateEventsForStakeholder( + stakeholderO: Option[Ref.Party], + templateIdO: Option[NameTypeConRef], + )(connection: Connection): PaginationInput => Vector[Long] = + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_pe_create_id_filter_stakeholder", + witnessO = stakeholderO, + templateIdO = templateIdO, + idFilter = None, + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + + def fetchActiveIds( + stakeholderO: Option[Ref.Party], + templateIdO: Option[NameTypeConRef], + activeAtEventSeqId: Long, + )(connection: Connection): IdFilterPaginationInput => Vector[Long] = + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_filter_activate_stakeholder", + witnessO = stakeholderO, + templateIdO = templateIdO, + idFilter = Some( + cSQL""" + NOT EXISTS ( + SELECT 1 + FROM lapi_events_deactivate_contract deactivate_evs + WHERE + filters.event_sequential_id = deactivate_evs.deactivated_event_sequential_id + AND deactivate_evs.event_sequential_id <= $activeAtEventSeqId + )""" + ), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + + def fetchActiveIdsOfCreateEventsForStakeholderLegacy( stakeholderO: Option[Ref.Party], templateIdO: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = + activeAtEventSeqId: Long, + )(connection: Connection): IdFilterPaginationInput => Vector[Long] = UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_create_id_filter_stakeholder", witnessO = stakeholderO, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = Some( + cSQL""" + NOT EXISTS ( + SELECT 1 + FROM lapi_events_consuming_exercise consuming_evs + WHERE + filters.event_sequential_id = consuming_evs.deactivated_event_sequential_id + AND consuming_evs.event_sequential_id <= $activeAtEventSeqId + ) AND NOT EXISTS ( + SELECT 1 + FROM lapi_events_unassign unassign_evs + WHERE + filters.event_sequential_id = unassign_evs.deactivated_event_sequential_id + AND unassign_evs.event_sequential_id <= $activeAtEventSeqId + )""" + ), stringInterning = stringInterning, + hasFirstPerSequentialId = true, + )(connection) + + def fetchActiveIdsOfAssignEventsForStakeholderLegacy( + stakeholderO: Option[Ref.Party], + templateIdO: Option[NameTypeConRef], + activeAtEventSeqId: Long, + )(connection: Connection): IdFilterPaginationInput => Vector[Long] = + UpdateStreamingQueries.fetchEventIds( + tableName = "lapi_pe_assign_id_filter_stakeholder", + witnessO = stakeholderO, + templateIdO = templateIdO, + idFilter = Some( + cSQL""" + NOT EXISTS ( + SELECT 1 + FROM lapi_events_consuming_exercise consuming_evs + WHERE + filters.event_sequential_id = consuming_evs.deactivated_event_sequential_id + AND consuming_evs.event_sequential_id <= $activeAtEventSeqId + ) AND NOT EXISTS ( + SELECT 1 + FROM lapi_events_unassign unassign_evs + WHERE + filters.event_sequential_id = unassign_evs.deactivated_event_sequential_id + AND unassign_evs.event_sequential_id <= $activeAtEventSeqId + )""" + ), + stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) private def fetchIdsOfConsumingEventsForStakeholder( stakeholder: Option[Ref.Party], templateIdO: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - )(connection: Connection): Vector[Long] = + )(connection: Connection): PaginationInput => Vector[Long] = UpdateStreamingQueries.fetchEventIds( tableName = "lapi_pe_consuming_id_filter_stakeholder", witnessO = stakeholder, templateIdO = templateIdO, - startExclusive = startExclusive, - endInclusive = endInclusive, - limit = limit, + idFilter = None, stringInterning = stringInterning, + hasFirstPerSequentialId = true, )(connection) } @@ -136,22 +291,31 @@ object UpdateStreamingQueries { // TODO(i22416): Rename the arguments of this function, as witnessO and templateIdO are inadequate for party topology events. /** @param tableName - * one of filter tables for create, consuming or non-consuming events + * one of the filter tables for create, consuming or non-consuming events * @param witnessO * the party for which to fetch the event ids, if None the event ids for all the parties should * be fetched * @param templateIdO * NOTE: this parameter is not applicable for tree tx stream only oriented filters + * @param idFilter + * Inside of the composable SQL the event_sequential_id-s of the candidates need to be referred + * as ''filters.event_sequential_id''. EXISTS and NOT EXISTS expressions suggested to trigger + * pointwise iteration on target indexes. + * @param stringInterning + * the string interning instance to use for internalizing the party and template id + * @param hasFirstPerSequentialId + * true if the table has the first_per_sequential_id column, false otherwise. If true and + * witnessO is None, only a single row per event_sequential_id will be fetched and thus only + * unique event ids will be returned */ def fetchEventIds( tableName: String, witnessO: Option[Ref.Party], templateIdO: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, + idFilter: Option[CompositeSql], stringInterning: StringInterning, - )(connection: Connection): Vector[Long] = { + hasFirstPerSequentialId: Boolean, + )(connection: Connection): IdFilterPaginationInput => Vector[Long] = { val partyIdFilterO = witnessO match { case Some(witness) => stringInterning.party @@ -180,29 +344,89 @@ object UpdateStreamingQueries { } } + // if we do not filter by party and the table has first_per_sequential_id column, we only fetch a single row per event_sequential_id + val firstPerSequentialIdClause = witnessO match { + case None if hasFirstPerSequentialId => + cSQL"AND filters.first_per_sequential_id = true" + case _ => cSQL"" + } + (partyIdFilterO, templateIdFilterO) match { case ( Some((partyIdFilterClause, partyIdOrderingClause)), Some((templateIdFilterClause, templateIdOrderingClause)), ) => - SQL""" - SELECT filters.event_sequential_id - FROM - #$tableName filters - WHERE - $startExclusive < event_sequential_id - AND event_sequential_id <= $endInclusive - $partyIdFilterClause - $templateIdFilterClause - ORDER BY - $partyIdOrderingClause - $templateIdOrderingClause - filters.event_sequential_id -- deliver in index order - ${QueryStrategy.limitClause(Some(limit))} - """ - .asVectorOf(long("event_sequential_id"))(connection) - case _ => Vector.empty + def filterTableSelect( + startExclusive: Long, + endInclusive: Long, + limit: Option[Int], + idFilter: Option[CompositeSql], + ): CompositeSql = + cSQL""" + SELECT filters.event_sequential_id event_sequential_id + FROM + #$tableName filters + WHERE + $startExclusive < filters.event_sequential_id + AND filters.event_sequential_id <= $endInclusive + $partyIdFilterClause + $templateIdFilterClause + $firstPerSequentialIdClause + ${idFilter.map(f => cSQL"AND $f").getOrElse(cSQL"")} + ORDER BY + $partyIdOrderingClause + $templateIdOrderingClause + filters.event_sequential_id -- deliver in index order + ${limit.map(l => cSQL"LIMIT $l").getOrElse(cSQL"")}""" + idPaginationInput => + val sql = idPaginationInput match { + case PaginationInput(startExclusive, endInclusive, limit) => + filterTableSelect( + startExclusive = startExclusive, + endInclusive = endInclusive, + limit = Some(limit), + idFilter = + None, // disable regardless - this is the case where we reuse the query for a no-ID-filter population case + ) + + case IdFilterInput(_, _) if idFilter.isEmpty => + throw new IllegalStateException( + "Using non-id-filter compliant query for ID filtration. In this case the ID filter needs to be defined" + ) + + case IdFilterInput(startExclusive, endInclusive) => + filterTableSelect( + startExclusive = startExclusive, + endInclusive = endInclusive, + limit = None, + idFilter = idFilter, + ) + + case PaginationLastOnlyInput(_, _, _) if idFilter.isEmpty => + throw new IllegalStateException( + "Using non-id-filter compliant query for ID filtration. In this case the ID filter needs to be defined" + ) + + case PaginationLastOnlyInput(startExclusive, endInclusive, limit) => + val filterTableSQL = filterTableSelect( + startExclusive = startExclusive, + endInclusive = endInclusive, + limit = Some(limit), + idFilter = + None, // disable regardless - this is the case where we reuse the query for a ID-filter population: the paginated query + ) + cSQL""" + WITH unfiltered_ids AS ( + $filterTableSQL + ) + SELECT unfiltered_ids.event_sequential_id event_sequential_id + FROM unfiltered_ids + ORDER BY event_sequential_id DESC + LIMIT 1""" + } + SQL"$sql".asVectorOf(long("event_sequential_id"))(connection) + + case _ => _ => Vector.empty } } - } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Field.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Field.scala index 7d7b065d9e..85680d71fc 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Field.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Field.scala @@ -8,19 +8,6 @@ import com.digitalasset.canton.platform.store.interning.StringInterning import java.io.{ByteArrayInputStream, InputStream} -private[h2] final case class IntArray[FROM](extract: StringInterning => FROM => Iterable[Int]) - extends Field[FROM, Iterable[Int], Array[java.lang.Integer]] { - override def convert: Iterable[Int] => Array[java.lang.Integer] = _.view.map(Int.box).toArray -} - -private[h2] final case class IntArrayOptional[FROM]( - extract: StringInterning => FROM => Option[Iterable[Int]] -) extends Field[FROM, Option[Iterable[Int]], Array[java.lang.Integer]] { - @SuppressWarnings(Array("org.wartremover.warts.Null")) - override def convert: Option[Iterable[Int]] => Array[java.lang.Integer] = - _.map(_.view.map(Int.box).toArray).orNull -} - private[h2] final case class H2Bytea[FROM](extract: StringInterning => FROM => Array[Byte]) extends Field[FROM, Array[Byte], InputStream] { override def convert: Array[Byte] => InputStream = new ByteArrayInputStream(_) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ResetStorageBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ResetStorageBackend.scala index ed074fecb7..6d2d43f274 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ResetStorageBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ResetStorageBackend.scala @@ -17,6 +17,14 @@ object H2ResetStorageBackend extends ResetStorageBackend { truncate table lapi_parameters; truncate table lapi_ledger_end_synchronizer_index; truncate table lapi_command_completions; + truncate table lapi_events_activate_contract; + truncate table lapi_filter_activate_stakeholder; + truncate table lapi_filter_activate_witness; + truncate table lapi_events_deactivate_contract; + truncate table lapi_filter_deactivate_stakeholder; + truncate table lapi_filter_deactivate_witness; + truncate table lapi_events_various_witnessed; + truncate table lapi_filter_various_witness; truncate table lapi_events_create; truncate table lapi_events_consuming_exercise; truncate table lapi_events_non_consuming_exercise; diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Schema.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Schema.scala index 3bc03760ca..6368334e53 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Schema.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Schema.scala @@ -15,16 +15,6 @@ import com.digitalasset.canton.platform.store.interning.StringInterning private[h2] object H2Schema { private val H2FieldStrategy = new FieldStrategy { - override def intArray[FROM]( - extractor: StringInterning => FROM => Iterable[Int] - ): Field[FROM, Iterable[Int], _] = - IntArray(extractor) - - override def intArrayOptional[FROM]( - extractor: StringInterning => FROM => Option[Iterable[Int]] - ): Field[FROM, Option[Iterable[Int]], _] = - IntArrayOptional(extractor) - override def bytea[FROM]( extractor: StringInterning => FROM => Array[Byte] ): Field[FROM, Array[Byte], _] = @@ -39,15 +29,6 @@ private[h2] object H2Schema { fields: (String, Field[FROM, _, _])* ): Table[FROM] = Table.batchedInsert(tableName)(fields*) - - override def idempotentInsert[FROM]( - tableName: String, - keyFieldIndex: Int, - ordering: Ordering[FROM], - )( - fields: (String, Field[FROM, _, _])* - ): Table[FROM] = - H2Table.idempotentBatchedInsert(tableName, keyFieldIndex, ordering)(fields*) } val schema: Schema[DbDto] = AppendOnlySchema(H2FieldStrategy) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala index f531a805fe..ba012e7947 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala @@ -54,9 +54,10 @@ object H2StorageBackendFactory extends StorageBackendFactory with CommonStorageB new CompletionStorageBackendTemplate(stringInterning, loggerFactory) override def createContractStorageBackend( - stringInterning: StringInterning + stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache, ): ContractStorageBackend = - new ContractStorageBackendTemplate(H2QueryStrategy, stringInterning) + new ContractStorageBackendTemplate(H2QueryStrategy, stringInterning, ledgerEndCache) override def createEventStorageBackend( ledgerEndCache: LedgerEndCache, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Table.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Table.scala deleted file mode 100644 index d8c30c8980..0000000000 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Table.scala +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.backend.h2 - -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.platform.store.backend.common.{BaseTable, Field, Table} - -import java.sql.Connection -import scala.annotation.tailrec -import scala.util.{Failure, Success, Try} - -private[h2] object H2Table { - private def idempotentBatchedInsertBase[FROM]( - insertStatement: String, - keyFieldIndex: Int, - ordering: Ordering[FROM], - )(fields: Seq[(String, Field[FROM, _, _])]): Table[FROM] = - new BaseTable[FROM](fields, Some(ordering)) { - override def executeUpdate: Array[Array[_]] => Connection => Unit = - data => - connection => - Table.ifNonEmpty(data) { - val preparedStatement = connection.prepareStatement(insertStatement) - data(0).indices.foreach { dataIndex => - fields(keyFieldIndex)._2.prepareData( - preparedStatement, - 1, - data(keyFieldIndex)(dataIndex), - ) - fields.indices.foreach { fieldIndex => - fields(fieldIndex)._2.prepareData( - preparedStatement, - fieldIndex + 2, - data(fieldIndex)(dataIndex), - ) - } - preparedStatement.addBatch() - } - retry(10) { - preparedStatement.executeBatch() - }.discard - preparedStatement.close() - } - } - - @tailrec - private def retry[T](maxRetry: Int)(body: => T): T = Try(body) match { - case Success(result) => result - case Failure(t) if maxRetry <= 0 => throw t - case Failure(_) => retry(maxRetry - 1)(body) - } - - private def idempotentBatchedInsertStatement( - tableName: String, - fields: Seq[(String, Field[_, _, _])], - keyFieldIndex: Int, - ): String = { - def commaSeparatedOf(extractor: ((String, Field[_, _, _])) => String): String = - fields.view - .map(extractor) - .mkString(",") - val tableFields = commaSeparatedOf(_._1) - val selectFields = commaSeparatedOf { case (_, field) => - field.selectFieldExpression("?") - } - val keyFieldName = fields(keyFieldIndex)._1 - val keyFieldSelectExpression = fields(keyFieldIndex)._2.selectFieldExpression("?") - s"""MERGE INTO $tableName USING DUAL on $keyFieldName = $keyFieldSelectExpression - |WHEN NOT MATCHED THEN INSERT ($tableFields) - |VALUES ($selectFields) - |""".stripMargin - } - - def idempotentBatchedInsert[FROM]( - tableName: String, - keyFieldIndex: Int, - ordering: Ordering[FROM], - )( - fields: (String, Field[FROM, _, _])* - ): Table[FROM] = - idempotentBatchedInsertBase( - idempotentBatchedInsertStatement(tableName, fields, keyFieldIndex), - keyFieldIndex, - ordering, - )(fields) -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGField.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGField.scala index 7b76f2fc19..618efcd290 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGField.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGField.scala @@ -25,26 +25,13 @@ private[postgresql] final case class PGStringArray[FROM]( override def convert: Iterable[String] => String = convertBase } -private[postgresql] trait PGIntArrayBase[FROM, TO] extends Field[FROM, TO, String] { +private[postgresql] final case class PGSmallint[FROM]( + extract: StringInterning => FROM => Int +) extends Field[FROM, Int, java.lang.Integer] { override def selectFieldExpression(inputFieldName: String): String = - s"string_to_array($inputFieldName, '|')::integer[]" - - protected def convertBase: Iterable[Int] => String = { in => - in.mkString("|") - } -} - -private[postgresql] final case class PGIntArray[FROM]( - extract: StringInterning => FROM => Iterable[Int] -) extends PGIntArrayBase[FROM, Iterable[Int]] { - override def convert: Iterable[Int] => String = convertBase -} + s"$inputFieldName::smallint" -private[postgresql] final case class PGIntArrayOptional[FROM]( - extract: StringInterning => FROM => Option[Iterable[Int]] -) extends PGIntArrayBase[FROM, Option[Iterable[Int]]] { - @SuppressWarnings(Array("org.wartremover.warts.Null")) - override def convert: Option[Iterable[Int]] => String = _.map(convertBase).orNull + override def convert: Int => Integer = Int.box } private[postgresql] final case class PGSmallintOptional[FROM]( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGSchema.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGSchema.scala index 63b7806ae3..d91ed5cafe 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGSchema.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGSchema.scala @@ -20,15 +20,10 @@ private[postgresql] object PGSchema { ): Field[FROM, Iterable[String], _] = PGStringArray(extractor) - override def intArray[FROM]( - extractor: StringInterning => FROM => Iterable[Int] - ): Field[FROM, Iterable[Int], _] = - PGIntArray(extractor) - - override def intArrayOptional[FROM]( - extractor: StringInterning => FROM => Option[Iterable[Int]] - ): Field[FROM, Option[Iterable[Int]], _] = - PGIntArrayOptional(extractor) + override def smallint[FROM]( + extractor: StringInterning => FROM => Int + ): Field[FROM, Int, _] = + PGSmallint(extractor) override def smallintOptional[FROM]( extractor: StringInterning => FROM => Option[Int] @@ -39,15 +34,6 @@ private[postgresql] object PGSchema { fields: (String, Field[FROM, _, _])* ): Table[FROM] = PGTable.transposedInsert(tableName)(fields*) - - override def idempotentInsert[FROM]( - tableName: String, - keyFieldIndex: Int, - ordering: Ordering[FROM], - )( - fields: (String, Field[FROM, _, _])* - ): Table[FROM] = - PGTable.idempotentTransposedInsert(tableName, keyFieldIndex, ordering)(fields*) } val schema: Schema[DbDto] = AppendOnlySchema(PGFieldStrategy) diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGTable.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGTable.scala index 396a5dcf1a..a1016c3a4c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGTable.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGTable.scala @@ -59,18 +59,4 @@ private[postgresql] object PGTable { fields: (String, Field[FROM, _, _])* ): Table[FROM] = transposedInsertBase(transposedInsertStatement(tableName, fields))(fields) - - def idempotentTransposedInsert[FROM]( - tableName: String, - keyFieldIndex: Int, - ordering: Ordering[FROM], - )( - fields: (String, Field[FROM, _, _])* - ): Table[FROM] = { - val insertSuffix = s"on conflict (${fields(keyFieldIndex)._1}) do nothing" - transposedInsertBase( - transposedInsertStatement(tableName, fields, insertSuffix), - Some(ordering), - )(fields) - } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresContractStorageBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresContractStorageBackend.scala index ea88463354..44e4fb832c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresContractStorageBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresContractStorageBackend.scala @@ -3,40 +3,128 @@ package com.digitalasset.canton.platform.store.backend.postgresql -import anorm.SqlParser.array +import anorm.SqlParser.{int, long} import anorm.{SqlStringInterpolation, ~} -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.platform.Key import com.digitalasset.canton.platform.store.backend.Conversions.{contractId, hashFromHexString} +import com.digitalasset.canton.platform.store.backend.PersistentEventType import com.digitalasset.canton.platform.store.backend.common.ContractStorageBackendTemplate +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* +import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.{ KeyAssigned, KeyState, KeyUnassigned, } import com.digitalasset.canton.platform.store.interning.StringInterning +import com.digitalasset.canton.platform.{ContractId, Key} +import com.digitalasset.canton.topology.SynchronizerId import java.sql.Connection class PostgresContractStorageBackend( - stringInterning: StringInterning -) extends ContractStorageBackendTemplate(PostgresQueryStrategy, stringInterning) { + stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache, +) extends ContractStorageBackendTemplate(PostgresQueryStrategy, stringInterning, ledgerEndCache) { + + override def keyStatesNew(keys: Seq[Key], validAtEventSeqId: Long)( + connection: Connection + ): Map[Key, Long] = + if (keys.isEmpty) Map.empty + else { + val res = SQL""" + WITH last_contract_key_create AS ( + SELECT p.internal_contract_id, p.create_key_hash + FROM UNNEST(${keys.view + .map(_.hash.bytes.toHexString) + .toArray[String]}) AS k(create_key_hash) + CROSS JOIN LATERAL ( + SELECT * + FROM lapi_events_activate_contract p + WHERE + p.create_key_hash = k.create_key_hash AND + p.event_sequential_id <= $validAtEventSeqId + ORDER BY p.event_sequential_id DESC + LIMIT 1 + ) p + ) + SELECT internal_contract_id, create_key_hash + FROM last_contract_key_create + WHERE NOT EXISTS ( + SELECT 1 + FROM lapi_events_deactivate_contract + WHERE + internal_contract_id = last_contract_key_create.internal_contract_id AND + event_sequential_id <= $validAtEventSeqId AND + event_type = ${PersistentEventType.ConsumingExercise.asInt} + )""" + .asVectorOf( + long("internal_contract_id") ~ hashFromHexString("create_key_hash") map { + case internalContractId ~ hash => hash -> internalContractId + } + )(connection) + .toMap + keys + .flatMap(key => res.get(key.hash).map(key -> _)) + .toMap + } + + override def lastActivationsNew(synchronizerContracts: Iterable[(SynchronizerId, Long)])( + connection: Connection + ): Map[(SynchronizerId, Long), Long] = + ledgerEndCache() + .map { ledgerEnd => + val inputWithIndex = synchronizerContracts.zipWithIndex + val inputIndexes: Array[java.lang.Integer] = inputWithIndex.iterator.map { + case (_, index) => + Int.box(index) + }.toArray + val inputSynchronizerIds: Array[java.lang.Integer] = inputWithIndex.iterator.map { + case ((synchronizerId, _), _) => + Int.box(stringInterning.synchronizerId.internalize(synchronizerId)) + }.toArray + val inputInternalContractIds: Array[java.lang.Long] = inputWithIndex.iterator.map { + case ((_, contractId), _) => Long.box(contractId) + }.toArray + val resultParser = int("result_index") ~ long("result_event_sequential_id") map { + case index ~ resultEventSeqId => index -> resultEventSeqId + } + val results = SQL""" + SELECT input.index as result_index, activate_evs.event_sequential_id as result_event_sequential_id + FROM UNNEST($inputIndexes, $inputSynchronizerIds, $inputInternalContractIds) AS input(index, synchronizer_id, internal_contract_id) + CROSS JOIN LATERAL ( + SELECT * + FROM lapi_events_activate_contract activate_evs + WHERE activate_evs.internal_contract_id = input.internal_contract_id + AND activate_evs.event_sequential_id <= ${ledgerEnd.lastEventSeqId} + AND EXISTS ( -- subquery for triggering (event_sequential_id) INCLUDE (synchronizer_id) index usage + SELECT 1 + FROM lapi_events_activate_contract as activate_evs2 + WHERE + activate_evs2.event_sequential_id = activate_evs.event_sequential_id AND + activate_evs2.synchronizer_id = input.synchronizer_id + ) + ORDER BY activate_evs.event_sequential_id DESC + LIMIT 1 + ) activate_evs""" + .asVectorOf(resultParser)(connection) + .toMap + inputWithIndex.iterator.flatMap { case (synCon, index) => + results.get(index).map(synCon -> _) + }.toMap + } + .getOrElse(Map.empty) override final def supportsBatchKeyStateLookups: Boolean = true - override def keyStates(keys: Seq[Key], validAt: Offset)( + override def keyStates(keys: Seq[Key], validAtEventSeqId: Long)( connection: Connection ): Map[Key, KeyState] = if (keys.isEmpty) Map() else { val resultParser = - (contractId("contract_id") ~ hashFromHexString("create_key_hash") ~ array[Int]( - "flat_event_witnesses" - )).map { case cId ~ hash ~ stakeholders => - hash -> KeyAssigned(cId, stakeholders.view.map(stringInterning.party.externalize).toSet) + (contractId("contract_id") ~ hashFromHexString("create_key_hash")).map { case cId ~ hash => + hash -> KeyAssigned(cId) }.* - import com.digitalasset.canton.platform.store.backend.Conversions.OffsetToStatement - // efficient adaption of the "single lookup query" using postgres specific syntax // the unnest will expand the keys into a temporary table. the cross join lateral runs the // efficient single key lookup query on each row in the temporary table. this efficient @@ -50,7 +138,7 @@ class PostgresContractStorageBackend( SELECT * FROM lapi_events_create p WHERE p.create_key_hash = k.create_key_hash - AND p.event_offset <= $validAt + AND p.event_sequential_id <= $validAtEventSeqId ORDER BY p.event_sequential_id DESC LIMIT 1 ) p @@ -61,7 +149,7 @@ class PostgresContractStorageBackend( SELECT 1 FROM lapi_events_consuming_exercise WHERE contract_id = last_contract_key_create.contract_id - AND event_offset <= $validAt + AND event_sequential_id <= $validAtEventSeqId )""" .as(resultParser)(connection) .toMap @@ -70,4 +158,62 @@ class PostgresContractStorageBackend( }.toMap } + override def lastActivations( + synchronizerContracts: Iterable[(SynchronizerId, ContractId)] + )( + connection: Connection + ): Map[(SynchronizerId, ContractId), Long] = ledgerEndCache() + .map { ledgerEnd => + val inputWithIndex = synchronizerContracts.zipWithIndex + val inputIndexes: Array[java.lang.Integer] = inputWithIndex.iterator.map { case (_, index) => + Int.box(index) + }.toArray + val inputSynchronizerIds: Array[java.lang.Integer] = inputWithIndex.iterator.map { + case ((synchronizerId, _), _) => + Int.box(stringInterning.synchronizerId.internalize(synchronizerId)) + }.toArray + // need to override the default to not allow anorm to guess incorrectly the array type + import PostgresQueryStrategy.ArrayByteaToStatement + val inputContractIds: Array[Array[Byte]] = inputWithIndex.iterator.map { + case ((_, contractId), _) => contractId.toBytes.toByteArray + }.toArray + val resultParser = int("result_index") ~ long("result_event_sequential_id") map { + case index ~ resultEventSeqId => index -> resultEventSeqId + } + val resultsFromAssign = SQL""" + SELECT input.index as result_index, assign_evs.event_sequential_id as result_event_sequential_id + FROM UNNEST($inputIndexes, $inputSynchronizerIds, $inputContractIds) AS input(index, synchronizer_id, contract_id) + CROSS JOIN LATERAL ( + SELECT * + FROM lapi_events_assign assign_evs + WHERE assign_evs.contract_id = input.contract_id + AND assign_evs.target_synchronizer_id = input.synchronizer_id + AND assign_evs.event_sequential_id <= ${ledgerEnd.lastEventSeqId} + ORDER BY assign_evs.event_sequential_id DESC + LIMIT 1 + ) assign_evs""" + .asVectorOf(resultParser)(connection) + .toMap + val resultsFromCreate = SQL""" + SELECT input.index as result_index, create_evs.event_sequential_id as result_event_sequential_id + FROM UNNEST($inputIndexes, $inputSynchronizerIds, $inputContractIds) AS input(index, synchronizer_id, contract_id) + CROSS JOIN LATERAL ( + SELECT * + FROM lapi_events_create create_evs + WHERE create_evs.contract_id = input.contract_id + AND create_evs.synchronizer_id = input.synchronizer_id + AND create_evs.event_sequential_id <= ${ledgerEnd.lastEventSeqId} + ORDER BY create_evs.event_sequential_id DESC + LIMIT 1 + ) create_evs""" + .asVectorOf(resultParser)(connection) + .toMap + inputWithIndex.iterator.flatMap { case (synCon, index) => + List(resultsFromAssign, resultsFromCreate) + .flatMap(_.get(index)) + .maxOption + .map(synCon -> _) + }.toMap + } + .getOrElse(Map.empty) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala index 48b7251069..ff7a67d96b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala @@ -27,7 +27,7 @@ class PostgresEventStorageBackend( loggerFactory = loggerFactory, ) { - override def lookupAssignSequentialIdBy( + override def lookupAssignSequentialIdByLegacy( unassignProperties: Iterable[UnassignProperties] )(connection: Connection): Map[UnassignProperties, Long] = if (unassignProperties.isEmpty) Map.empty diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala index 846749a8e3..7e0834deb9 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala @@ -25,6 +25,12 @@ object PostgresQueryStrategy extends QueryStrategy { cSQL"= ANY($longArray::bigint[])" } + override def anyOfSmallInts(ints: Iterable[Int]): CompositeSql = { + val intArray: Array[java.lang.Integer] = + ints.view.map(Int.box).toArray + cSQL"= ANY($intArray::smallint[])" + } + override def anyOfStrings(strings: Iterable[String]): CompositeSql = { val stringArray: Array[String] = strings.toArray diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresResetStorageBackend.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresResetStorageBackend.scala index a62546759b..b174f50c12 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresResetStorageBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresResetStorageBackend.scala @@ -16,6 +16,14 @@ object PostgresResetStorageBackend extends ResetStorageBackend { delete from lapi_parameters cascade; delete from lapi_ledger_end_synchronizer_index cascade; delete from lapi_command_completions cascade; + delete from lapi_events_activate_contract cascade; + delete from lapi_filter_activate_stakeholder cascade; + delete from lapi_filter_activate_witness cascade; + delete from lapi_events_deactivate_contract cascade; + delete from lapi_filter_deactivate_stakeholder cascade; + delete from lapi_filter_deactivate_witness cascade; + delete from lapi_events_various_witnessed cascade; + delete from lapi_filter_various_witness cascade; delete from lapi_events_create cascade; delete from lapi_events_consuming_exercise cascade; delete from lapi_events_non_consuming_exercise cascade; diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala index a931c062b2..fa0a72857d 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala @@ -31,9 +31,10 @@ final case class PostgresStorageBackendFactory(loggerFactory: NamedLoggerFactory new CompletionStorageBackendTemplate(stringInterning, loggerFactory) override def createContractStorageBackend( - stringInterning: StringInterning + stringInterning: StringInterning, + ledgerEndCache: LedgerEndCache, ): ContractStorageBackend = - new PostgresContractStorageBackend(stringInterning) + new PostgresContractStorageBackend(stringInterning, ledgerEndCache) override def createEventStorageBackend( ledgerEndCache: LedgerEndCache, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractKeyStateCache.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractKeyStateCache.scala index 371211231f..e4c1780cd1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractKeyStateCache.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractKeyStateCache.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.platform.store.cache import com.digitalasset.canton.caching.SizedCache -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.daml.lf.transaction.GlobalKey @@ -13,7 +12,7 @@ import scala.concurrent.ExecutionContext object ContractKeyStateCache { def apply( - initialCacheIndex: Option[Offset], + initialCacheEventSeqIdIndex: Long, cacheSize: Long, metrics: LedgerApiServerMetrics, loggerFactory: NamedLoggerFactory, @@ -21,7 +20,7 @@ object ContractKeyStateCache { ec: ExecutionContext ): StateCache[GlobalKey, ContractKeyStateValue] = StateCache( - initialCacheIndex = initialCacheIndex, + initialCacheEventSeqIdIndex = initialCacheEventSeqIdIndex, emptyLedgerState = ContractKeyStateValue.Unassigned, cache = SizedCache.from[GlobalKey, ContractKeyStateValue]( SizedCache.Configuration(cacheSize), @@ -36,8 +35,7 @@ sealed trait ContractKeyStateValue extends Product with Serializable object ContractKeyStateValue { - final case class Assigned(contractId: ContractId, createWitnesses: Set[Party]) - extends ContractKeyStateValue + final case class Assigned(contractId: ContractId) extends ContractKeyStateValue final case object Unassigned extends ContractKeyStateValue } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractStateCaches.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractStateCaches.scala index b861115dbd..2ddc5fcf73 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractStateCaches.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractStateCaches.scala @@ -4,16 +4,17 @@ package com.digitalasset.canton.platform.store.cache import cats.data.NonEmptyVector -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.{ + Active, + Archived, + ExistingContractStatus, +} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.* +import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.cache.ContractKeyStateValue.{Assigned, Unassigned} -import com.digitalasset.canton.platform.store.cache.ContractStateValue.{ - Active, - Archived, - ExistingContractValue, -} import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent.ReassignmentAccepted import com.digitalasset.canton.tracing.TraceContext @@ -34,7 +35,7 @@ import scala.concurrent.ExecutionContext */ class ContractStateCaches( private[cache] val keyState: StateCache[GlobalKey, ContractKeyStateValue], - private[cache] val contractState: StateCache[ContractId, ContractStateValue], + private[cache] val contractState: StateCache[ContractId, ContractStateStatus], val loggerFactory: NamedLoggerFactory, ) extends NamedLogging { @@ -45,49 +46,49 @@ class ContractStateCaches( * strictly increasing event sequential ids. */ def push( - eventsBatch: NonEmptyVector[ContractStateEvent] + eventsBatch: NonEmptyVector[ContractStateEvent], + lastEventSeqId: Long, )(implicit traceContext: TraceContext): Unit = { val keyMappingsBuilder = Map.newBuilder[Key, ContractKeyStateValue] - val contractMappingsBuilder = Map.newBuilder[ContractId, ExistingContractValue] + val contractMappingsBuilder = Map.newBuilder[ContractId, ExistingContractStatus] eventsBatch.toVector.foreach { case created: ContractStateEvent.Created => created.globalKey.foreach(key => keyMappingsBuilder.addOne( - key -> Assigned(created.contractId, created.contract.stakeholders) + key -> Assigned(created.contractId) ) ) - contractMappingsBuilder.addOne( - created.contractId -> Active(created.contract) - ) + contractMappingsBuilder.addOne(created.contractId -> Active) case archived: ContractStateEvent.Archived => archived.globalKey.foreach { key => keyMappingsBuilder.addOne(key -> Unassigned) } - contractMappingsBuilder.addOne(archived.contractId -> Archived(archived.stakeholders)) + contractMappingsBuilder.addOne(archived.contractId -> Archived) - case _: ReassignmentAccepted => () + case ReassignmentAccepted => () } val keyMappings = keyMappingsBuilder.result() val contractMappings = contractMappingsBuilder.result() - val validAt = eventsBatch.last.eventOffset + val validAt = lastEventSeqId keyState.putBatch(validAt, keyMappings) contractState.putBatch(validAt, contractMappings) } /** Reset the contract and key state caches to the specified offset. */ - def reset(lastPersistedLedgerEnd: Option[Offset]): Unit = { - keyState.reset(lastPersistedLedgerEnd) - contractState.reset(lastPersistedLedgerEnd) + def reset(lastPersistedLedgerEnd: Option[LedgerEnd]): Unit = { + val index = lastPersistedLedgerEnd.map(_.lastEventSeqId).getOrElse(0L) + keyState.reset(index) + contractState.reset(index) } } object ContractStateCaches { def build( - initialCacheIndex: Option[Offset], + initialCacheEventSeqIdIndex: Long, maxContractsCacheSize: Long, maxKeyCacheSize: Long, metrics: LedgerApiServerMetrics, @@ -96,9 +97,14 @@ object ContractStateCaches { executionContext: ExecutionContext ): ContractStateCaches = new ContractStateCaches( - contractState = - ContractsStateCache(initialCacheIndex, maxContractsCacheSize, metrics, loggerFactory), - keyState = ContractKeyStateCache(initialCacheIndex, maxKeyCacheSize, metrics, loggerFactory), + contractState = ContractsStateCache( + initialCacheEventSeqIdIndex, + maxContractsCacheSize, + metrics, + loggerFactory, + ), + keyState = + ContractKeyStateCache(initialCacheEventSeqIdIndex, maxKeyCacheSize, metrics, loggerFactory), loggerFactory = loggerFactory, ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractsStateCache.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractsStateCache.scala index 733ec75bad..3622d5acc0 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractsStateCache.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractsStateCache.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.platform.store.cache import com.digitalasset.canton.caching.SizedCache -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -12,17 +12,17 @@ import scala.concurrent.ExecutionContext object ContractsStateCache { def apply( - initialCacheIndex: Option[Offset], + initialCacheEventSeqIdIndex: Long, cacheSize: Long, metrics: LedgerApiServerMetrics, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext - ): StateCache[ContractId, ContractStateValue] = + ): StateCache[ContractId, ContractStateStatus] = StateCache( - initialCacheIndex = initialCacheIndex, - emptyLedgerState = ContractStateValue.NotFound, - cache = SizedCache.from[ContractId, ContractStateValue]( + initialCacheEventSeqIdIndex = initialCacheEventSeqIdIndex, + emptyLedgerState = ContractStateStatus.NotFound, + cache = SizedCache.from[ContractId, ContractStateStatus]( SizedCache.Configuration(cacheSize), metrics.execution.cache.contractState.stateCache, ), @@ -30,15 +30,3 @@ object ContractsStateCache { loggerFactory = loggerFactory, ) } - -sealed trait ContractStateValue extends Product with Serializable - -object ContractStateValue { - final case object NotFound extends ContractStateValue - - sealed trait ExistingContractValue extends ContractStateValue - - final case class Active(contract: FatContract) extends ExistingContractValue - - final case class Archived(stakeholders: Set[Party]) extends ExistingContractValue -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBuffer.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBuffer.scala index c7b3815b8e..940b8e449f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBuffer.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBuffer.scala @@ -43,7 +43,7 @@ class InMemoryFanoutBuffer( @volatile private[cache] var _bufferLog = Vector.empty[(Offset, TransactionLogUpdate)] @volatile private[cache] var _lookupMap = - Map.empty[UpdateId, TransactionLogUpdate] + Map.empty[String, TransactionLogUpdate] private val bufferMetrics = metrics.services.index.inMemoryFanoutBuffer private val pushTimer = bufferMetrics.push @@ -125,7 +125,7 @@ class InMemoryFanoutBuffer( /** Lookup the accepted transaction update by transaction id. */ def lookupTransaction( - updateId: UpdateId + updateId: String ): Option[TransactionLogUpdate.TransactionAccepted] = _lookupMap.get(updateId).collect { case tx: TransactionLogUpdate.TransactionAccepted => tx } @@ -133,13 +133,13 @@ class InMemoryFanoutBuffer( def lookup( lookupKey: LookupKey ): Option[TransactionLogUpdate] = lookupKey match { - case LookupKey.UpdateId(updateId) => lookup(updateId) - case LookupKey.Offset(offset) => lookup(offset) + case LookupKey.ByUpdateId(updateId) => lookup(updateId.toHexString) + case LookupKey.ByOffset(offset) => lookup(offset) } /** Lookup the accepted transaction log update by update id. */ private def lookup( - updateId: UpdateId + updateId: String ): Option[TransactionLogUpdate] = _lookupMap.get(updateId) /** Lookup the accepted transaction log update by update offset. */ @@ -156,23 +156,6 @@ class InMemoryFanoutBuffer( } } - // TODO(#23504) remove - /** Lookup the accepted transaction update by transaction offset. */ - def lookupTransaction( - offset: Offset - ): Option[TransactionLogUpdate.TransactionAccepted] = { - val vectorSnapshot = _bufferLog - - val searchResult = vectorSnapshot.view.map(_._1).search(offset) - - val foundUpdate = searchResult match { - case Found(idx) => Some(vectorSnapshot(idx)._2) - case _ => None - } - - foundUpdate.collect { case tx: TransactionLogUpdate.TransactionAccepted => tx } - } - /** Removes entries starting from the buffer head up until `endInclusive`. * * @param endInclusive @@ -222,7 +205,7 @@ class InMemoryFanoutBuffer( private def dropOldest(dropCount: Int): Unit = blocking(synchronized { val (evicted, remainingBufferLog) = _bufferLog.splitAt(dropCount) - val lookupKeysToEvict: View[UpdateId] = + val lookupKeysToEvict: View[String] = evicted.view.map(_._2).flatMap(extractEntryFromMap).map(_._1) _bufferLog = remainingBufferLog @@ -231,7 +214,7 @@ class InMemoryFanoutBuffer( private def extractEntryFromMap( transactionLogUpdate: TransactionLogUpdate - ): Option[(UpdateId, TransactionLogUpdate)] = + ): Option[(String, TransactionLogUpdate)] = transactionLogUpdate match { case txAccepted: TransactionLogUpdate.TransactionAccepted => Some(txAccepted.updateId -> txAccepted) @@ -245,7 +228,6 @@ class InMemoryFanoutBuffer( } private[platform] object InMemoryFanoutBuffer { - type UpdateId = String /** Specialized slice representation of a Vector */ private[platform] sealed trait BufferSlice[+ELEM] extends Product with Serializable { diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStore.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStore.scala index 19cd6561fe..6bd1585bc1 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStore.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStore.scala @@ -4,18 +4,19 @@ package com.digitalasset.canton.platform.store.cache import com.digitalasset.canton.ledger.participant.state.index -import com.digitalasset.canton.ledger.participant.state.index.ContractStore +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.ExistingContractStatus +import com.digitalasset.canton.ledger.participant.state.index.{ + ContractState, + ContractStateStatus, + ContractStore, +} import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors +import com.digitalasset.canton.participant.store import com.digitalasset.canton.platform.store.cache.ContractKeyStateValue.* -import com.digitalasset.canton.platform.store.cache.ContractStateValue.* import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader -import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.{ - ActiveContract, - ArchivedContract, - ContractState, - KeyState, -} +import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.KeyState import com.digitalasset.daml.lf.transaction.GlobalKey import scala.concurrent.{ExecutionContext, Future} @@ -24,6 +25,7 @@ private[platform] class MutableCacheBackedContractStore( contractsReader: LedgerDaoContractsReader, val loggerFactory: NamedLoggerFactory, private[cache] val contractStateCaches: ContractStateCaches, + contractStore: store.ContractStore, )(implicit executionContext: ExecutionContext) extends ContractStore with NamedLogging { @@ -31,26 +33,32 @@ private[platform] class MutableCacheBackedContractStore( override def lookupActiveContract(readers: Set[Party], contractId: ContractId)(implicit loggingContext: LoggingContextWithTrace ): Future[Option[FatContract]] = - lookupContractStateValue(contractId) - .flatMap(contractStateToResponse(readers)) + lookupContractState(contractId) + .map(contractStateToResponse(readers)) override def lookupContractState( contractId: ContractId - )(implicit loggingContext: LoggingContextWithTrace): Future[index.ContractState] = - lookupContractStateValue(contractId) - .map { - case active: Active => index.ContractState.Active(active.contract) - case _: Archived => index.ContractState.Archived - case NotFound => index.ContractState.NotFound - } - - private def lookupContractStateValue( - contractId: ContractId - )(implicit loggingContext: LoggingContextWithTrace): Future[ContractStateValue] = + )(implicit loggingContext: LoggingContextWithTrace): Future[ContractState] = contractStateCaches.contractState .get(contractId) .map(Future.successful) .getOrElse(readThroughContractsCache(contractId)) + .flatMap { + case ContractStateStatus.Active => + contractStore + .lookupPersisted(contractId) + .failOnShutdownTo(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError) + .map { + case Some(persistedContract) => ContractState.Active(persistedContract.inst) + case None => + logger.error( + s"Contract $contractId marked as active in index (db or cache) but not found in participant's contract store" + ) + ContractState.NotFound + } + case ContractStateStatus.Archived => Future.successful(ContractState.Archived) + case ContractStateStatus.NotFound => Future.successful(ContractState.NotFound) + } override def lookupContractKey(readers: Set[Party], key: GlobalKey)(implicit loggingContext: LoggingContextWithTrace @@ -59,11 +67,11 @@ private[platform] class MutableCacheBackedContractStore( .get(key) .map(Future.successful) .getOrElse(readThroughKeyCache(key)) - .map(keyStateToResponse(_, readers)) + .flatMap(keyStateToResponse(_, readers)) private def readThroughContractsCache(contractId: ContractId)(implicit loggingContext: LoggingContextWithTrace - ): Future[ContractStateValue] = + ): Future[ContractStateStatus] = contractStateCaches.contractState .putAsync( contractId, @@ -73,31 +81,31 @@ private[platform] class MutableCacheBackedContractStore( private def keyStateToResponse( value: ContractKeyStateValue, readers: Set[Party], - ): Option[ContractId] = value match { - case Assigned(contractId, createWitnesses) if nonEmptyIntersection(readers, createWitnesses) => - Some(contractId) - case _: Assigned | Unassigned => Option.empty + )(implicit loggingContext: LoggingContextWithTrace): Future[Option[ContractId]] = value match { + case Assigned(contractId) => + lookupContractState(contractId).map( + contractStateToResponse(readers)(_).map(_.contractId) + ) + + case _: Assigned | Unassigned => Future.successful(None) } private def contractStateToResponse(readers: Set[Party])( - value: ContractStateValue - ): Future[Option[FatContract]] = + value: index.ContractState + ): Option[FatContract] = value match { - case Active(contract) if nonEmptyIntersection(contract.stakeholders, readers) => - Future.successful(Some(contract)) + case ContractState.Active(contract) if nonEmptyIntersection(contract.stakeholders, readers) => + Some(contract) case _ => - Future.successful(Option.empty) + None } - private val toContractCacheValue: Option[ContractState] => ContractStateValue = { - case Some(active: ActiveContract) => ContractStateValue.Active(active.contract) - case Some(ArchivedContract(stakeholders)) => ContractStateValue.Archived(stakeholders) - case None => ContractStateValue.NotFound - } + private val toContractCacheValue: Option[ExistingContractStatus] => ContractStateStatus = + _.getOrElse(ContractStateStatus.NotFound) private val toKeyCacheValue: KeyState => ContractKeyStateValue = { - case LedgerDaoContractsReader.KeyAssigned(contractId, stakeholders) => - Assigned(contractId, stakeholders) + case LedgerDaoContractsReader.KeyAssigned(contractId) => + Assigned(contractId) case LedgerDaoContractsReader.KeyUnassigned => Unassigned } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala index ed9ab1bdca..40dc4a6775 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.platform.store.cache import com.daml.metrics.Timed import com.daml.metrics.api.MetricHandle.Timer import com.digitalasset.canton.caching.Cache -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.platform.store.cache.StateCache.PendingUpdatesState @@ -27,7 +26,7 @@ import scala.concurrent.{ExecutionContext, Future, blocking} */ @SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests private[platform] case class StateCache[K, V]( - initialCacheIndex: Option[Offset], + initialCacheEventSeqIdIndex: Long, emptyLedgerState: V, cache: Cache[K, V], registerUpdateTimer: Timer, @@ -36,7 +35,7 @@ private[platform] case class StateCache[K, V]( extends NamedLogging { private[cache] val pendingUpdates = mutable.Map.empty[K, PendingUpdatesState] @SuppressWarnings(Array("org.wartremover.warts.Var")) - @volatile private[cache] var cacheIndex = initialCacheIndex + @volatile private[cache] var cacheEventSeqIdIndex = initialCacheEventSeqIdIndex /** Fetch the corresponding value for an input key, if present. * @@ -58,12 +57,12 @@ private[platform] case class StateCache[K, V]( /** Synchronous cache updates evolve the cache ahead with the most recent Index DB entries. This * method increases the `cacheIndex` monotonically. * - * @param validAt + * @param validAtEventSeqId * ordering discriminator for pending updates for the same key * @param batch * the batch of events updating the cache at `validAt` */ - def putBatch(validAt: Offset, batch: Map[K, V])(implicit + def putBatch(validAtEventSeqId: Long, batch: Map[K, V])(implicit traceContext: TraceContext ): Unit = Timed.value( @@ -72,21 +71,20 @@ private[platform] case class StateCache[K, V]( // The mutable contract state cache update stream should generally increase the cacheIndex strictly monotonically. // However, the most recent updates can be replayed in case of failure of the mutable contract state cache update stream. // In this case, we must ignore the already seen updates (i.e. that have `validAt` before or at the cacheIndex). - if (Option(validAt) > cacheIndex) { + if (validAtEventSeqId > cacheEventSeqIdIndex) { batch.keySet.foreach { key => - pendingUpdates.updateWith(key)(_.map(_.withValidAt(validAt))).discard + pendingUpdates.updateWith(key)(_.map(_.withValidAt(validAtEventSeqId))).discard } - cacheIndex = Some(validAt) + cacheEventSeqIdIndex = validAtEventSeqId cache.putAll(batch) logger.debug( s"Updated cache with a batch of ${batch .map { case (k, v) => s"$k -> ${truncateValueForLogging(v)}" } - .mkString("[", ", ", "]")} at $validAt" + .mkString("[", ", ", "]")} at $validAtEventSeqId" ) } else logger.warn( - s"Ignoring incoming synchronous update at an index (${validAt.unwrap}) equal to or before the cache index (${cacheIndex - .fold(0L)(_.unwrap)})" + s"Ignoring incoming synchronous update at an index at event sequential ID($validAtEventSeqId) equal to or before the cache index ($cacheEventSeqIdIndex)" ) }), ) @@ -103,52 +101,48 @@ private[platform] case class StateCache[K, V]( * fetches asynchronously the value for key `key` at the current cache index */ @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) - def putAsync(key: K, fetchAsync: Offset => Future[V])(implicit + def putAsync(key: K, fetchAsync: Long => Future[V])(implicit traceContext: TraceContext ): Future[V] = Timed.value( registerUpdateTimer, blocking(pendingUpdates.synchronized { - cacheIndex match { - case Some(validAt) => - val eventualValue = Future.delegate(fetchAsync(validAt)) - pendingUpdates.get(key) match { - case Some(freshPendingUpdate) if freshPendingUpdate.latestValidAt == validAt => - eventualValue - - case Some(freshPendingUpdate) if freshPendingUpdate.latestValidAt > validAt => - ErrorUtil.invalidState( - s"Pending update ($freshPendingUpdate) should never be later than the cacheIndex ($validAt)." - ) - - case outdatedOrNew => - pendingUpdates - .put( - key, - PendingUpdatesState( - outdatedOrNew.map(_.pendingCount).getOrElse(0L) + 1L, - validAt, - ), - ) - .discard - registerEventualCacheUpdate(key, eventualValue, validAt) - .flatMap(_ => eventualValue) - } - - case None => - Future.successful(emptyLedgerState) + // dereferencing the mutable var to a val here is critical as it will be used asynchronously below + val validAt = cacheEventSeqIdIndex + val eventualValue = Future.delegate(fetchAsync(validAt)) + pendingUpdates.get(key) match { + case Some(freshPendingUpdate) if freshPendingUpdate.latestValidAt == validAt => + eventualValue + + case Some(freshPendingUpdate) if freshPendingUpdate.latestValidAt > validAt => + ErrorUtil.invalidState( + s"Pending update ($freshPendingUpdate) should never be later than the cacheIndex ($validAt)." + ) + + case outdatedOrNew => + pendingUpdates + .put( + key, + PendingUpdatesState( + outdatedOrNew.map(_.pendingCount).getOrElse(0L) + 1L, + validAt, + ), + ) + .discard + registerEventualCacheUpdate(key, eventualValue, validAt) + .flatMap(_ => eventualValue) } }), ) /** Resets the cache and cancels are pending asynchronous updates. * - * @param resetAtOffset - * The cache re-initialization offset + * @param resetAtEventSeqId + * The cache re-initialization event sequential ID */ - def reset(resetAtOffset: Option[Offset]): Unit = + def reset(resetAtEventSeqId: Long): Unit = blocking(pendingUpdates.synchronized { - cacheIndex = resetAtOffset + cacheEventSeqIdIndex = resetAtEventSeqId pendingUpdates.clear() cache.invalidateAll() }) @@ -156,7 +150,7 @@ private[platform] case class StateCache[K, V]( private def registerEventualCacheUpdate( key: K, eventualUpdate: Future[V], - validAt: Offset, + validAtEventSeqId: Long, )(implicit traceContext: TraceContext): Future[Unit] = eventualUpdate .map { (value: V) => @@ -169,10 +163,10 @@ private[platform] case class StateCache[K, V]( // sampled when initially dispatched in `putAsync`. // Otherwise we can assume that a more recent `putAsync` has an update in-flight // or that the entry has been updated synchronously with `put` with a recent Index DB entry. - if (pendingForKey.latestValidAt == validAt) { + if (pendingForKey.latestValidAt == validAtEventSeqId) { cache.put(key, value) logger.debug( - s"Updated cache for $key with ${truncateValueForLogging(value)} at $validAt" + s"Updated cache for $key with ${truncateValueForLogging(value)} at $validAtEventSeqId" ) } removeFromPending(key) @@ -227,9 +221,9 @@ object StateCache { */ private[cache] final case class PendingUpdatesState( pendingCount: Long, - latestValidAt: Offset, + latestValidAt: Long, ) { - def withValidAt(validAt: Offset): PendingUpdatesState = + def withValidAt(validAt: Long): PendingUpdatesState = this.copy(latestValidAt = validAt) def decPendingCount: PendingUpdatesState = this.copy(pendingCount = pendingCount - 1) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReader.scala deleted file mode 100644 index d1be5e75df..0000000000 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReader.scala +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.dao - -import com.digitalasset.canton.logging.LoggingContextWithTrace -import com.digitalasset.canton.platform.store.dao.BufferedTransactionByIdReader.{ - FetchTransactionPointwiseFromPersistence, - ToApiResponse, -} -import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate.TransactionAccepted - -import scala.concurrent.Future - -/** Generic class that helps serving Ledger API point-wise lookups - * (UpdateService.{GetTransactionById, GetTransactionTreeById, GetTransactionByOffset, - * GetTransactionTreeByOffset}) from either the in-memory fan-out buffer or from persistence. - * - * @param fetchFromPersistence - * Fetch a transaction by offset or id from persistence. - * @param toApiResponse - * Convert a - * [[com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate.TransactionAccepted]] - * to a specific API response while also filtering for visibility. - * @tparam QUERY_PARAM_TYPE - * The query parameter type. - * @tparam API_RESPONSE - * The Ledger API response type. - */ -class BufferedTransactionPointwiseReader[QUERY_PARAM_TYPE, API_RESPONSE]( - fetchFromPersistence: FetchTransactionPointwiseFromPersistence[QUERY_PARAM_TYPE, API_RESPONSE], - fetchFromBuffer: QUERY_PARAM_TYPE => Option[TransactionAccepted], - toApiResponse: ToApiResponse[QUERY_PARAM_TYPE, API_RESPONSE], -) { - - /** Serves processed and filtered transaction from the buffer by the query parameter, with - * fallback to a persistence fetch if the transaction is not anymore in the buffer (i.e. it was - * evicted) - * - * @param queryParam - * The query parameter. - * @param loggingContext - * The logging context - * @return - * A future wrapping the API response if found. - */ - def fetch(queryParam: QUERY_PARAM_TYPE)(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[API_RESPONSE]] = - fetchFromBuffer(queryParam) match { - case Some(value) => toApiResponse(value, queryParam, loggingContext) - case None => - fetchFromPersistence(queryParam, loggingContext) - } -} - -object BufferedTransactionByIdReader { - trait FetchTransactionPointwiseFromPersistence[QUERY_PARAM_TYPE, API_RESPONSE] { - def apply( - queryParam: QUERY_PARAM_TYPE, - loggingContext: LoggingContextWithTrace, - ): Future[Option[API_RESPONSE]] - } - - trait ToApiResponse[QUERY_PARAM_TYPE, API_RESPONSE] { - def apply( - transactionAccepted: TransactionAccepted, - queryParam: QUERY_PARAM_TYPE, - loggingContext: LoggingContextWithTrace, - ): Future[Option[API_RESPONSE]] - } -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala index 00ec129631..9359176419 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala @@ -13,8 +13,6 @@ import com.digitalasset.canton.platform.{Party, UserId} import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import java.sql.Connection - /** @param pageSize * a single DB fetch query is guaranteed to fetch no more than this many results. */ @@ -44,7 +42,7 @@ private[dao] final class CommandCompletionsReader( loggingContext: LoggingContextWithTrace ): Source[(Offset, CompletionStreamResponse), NotUsed] = { val pruneSafeQuery = - (range: QueryRange[Offset]) => { implicit connection: Connection => + (range: QueryRange[Offset]) => queryValidRange.withRangeNotPruned[Vector[CompletionStreamResponse]]( minOffsetInclusive = startInclusive, maxOffsetInclusive = endInclusive, @@ -54,15 +52,16 @@ private[dao] final class CommandCompletionsReader( s"Command completions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset .fold(0L)(_.unwrap)}", ) { - storageBackend.commandCompletions( - startInclusive = range.startInclusive, - endInclusive = range.endInclusive, - userId = userId, - parties = parties, - limit = pageSize, - )(connection) + dispatcher.executeSql(metrics.index.db.getCompletions)( + storageBackend.commandCompletions( + startInclusive = range.startInclusive, + endInclusive = range.endInclusive, + userId = userId, + parties = parties, + limit = pageSize, + ) + ) } - } val initialRange = new QueryRange[Offset]( startInclusive = startInclusive, @@ -76,7 +75,7 @@ private[dao] final class CommandCompletionsReader( initialRange.copy(startInclusive = lastOffset.increment) }, ) { (subRange: QueryRange[Offset]) => - dispatcher.executeSql(metrics.index.db.getCompletions)(pruneSafeQuery(subRange)) + pruneSafeQuery(subRange) } source.map(response => offsetFor(response) -> response) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala index 3c3e49d45a..c6d20a1891 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala @@ -14,10 +14,10 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.config.{ ActiveContractsServiceStreamsConfig, - TransactionTreeStreamsConfig, UpdatesStreamsConfig, } import com.digitalasset.canton.platform.store.* @@ -27,12 +27,15 @@ import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.events.* import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.platform.store.utils.QueueBasedConcurrencyLimiter +import com.digitalasset.canton.protocol.{ContractInstance, ContractMetadata, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} -import com.digitalasset.daml.lf.transaction.CommittedTransaction +import com.digitalasset.daml.lf.transaction.CreationTime.CreatedAt +import com.digitalasset.daml.lf.transaction.{CommittedTransaction, Node} +import com.google.protobuf.ByteString import io.opentelemetry.api.trace.Tracer import scala.concurrent.{ExecutionContext, Future} @@ -51,7 +54,6 @@ private class JdbcLedgerDao( completionsPageSize: Int, activeContractsServiceStreamsConfig: ActiveContractsServiceStreamsConfig, updatesStreamsConfig: UpdatesStreamsConfig, - transactionTreeStreamsConfig: TransactionTreeStreamsConfig, globalMaxEventIdQueries: Int, globalMaxEventPayloadQueries: Int, tracer: Tracer, @@ -63,7 +65,10 @@ private class JdbcLedgerDao( ) => FutureUnlessShutdown[Vector[Offset]], contractLoader: ContractLoader, translation: LfValueTranslation, -) extends LedgerReadDao + contractStore: ContractStore, + pruningOffsetService: PruningOffsetService, +)(implicit ec: ExecutionContext) + extends LedgerReadDao with LedgerWriteDaoForTests with NamedLogging { @@ -121,7 +126,7 @@ private class JdbcLedgerDao( party = partyDetails.party, // HACK: the `PartyAddedToParticipant` transmits `participantId`s, while here we only have the information // whether the party is locally hosted or not. We use the `nonLocalParticipantId` to get the desired effect of - // the `isLocal = False` information to be transmitted via a `PartyAddedToParticpant` `Update`. + // the `isLocal = False` information to be transmitted via a `PartyAddedToParticipant` `Update`. // // This will be properly resolved once we move away from the `sandbox-classic` codebase. participantId = if (partyDetails.isLocal) participantId else NonLocalParticipantId, @@ -196,7 +201,7 @@ private class JdbcLedgerDao( dbDispatcher .executeSql(metrics.index.db.pruneDbMetrics) { conn => - readStorageBackend.eventStorageBackend.pruneEvents( + readStorageBackend.eventStorageBackend.pruneEventsLegacy( pruneUpToInclusive, incompleteReassignmentOffsets, )( @@ -225,11 +230,13 @@ private class JdbcLedgerDao( override def pruningOffset(implicit loggingContext: LoggingContextWithTrace ): Future[Option[Offset]] = - dbDispatcher.executeSql(metrics.index.db.fetchPruningOffsetsMetrics)( - parameterStorageBackend.prunedUpToInclusive - ) + pruningOffsetService.pruningOffset - private val queryValidRange = QueryValidRangeImpl(parameterStorageBackend, loggerFactory) + private val queryValidRange = QueryValidRangeImpl( + ledgerEndCache = ledgerEndCache, + pruningOffsetService = pruningOffsetService, + loggerFactory = loggerFactory, + ) private val globalIdQueriesLimiter = new QueueBasedConcurrencyLimiter( parallelism = globalMaxEventIdQueries, @@ -249,6 +256,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, incompleteOffsets = incompleteOffsets, metrics = metrics, tracer = tracer, @@ -272,6 +280,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, metrics = metrics, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -284,6 +293,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, metrics = metrics, tracer = tracer, topologyTransactionsStreamReader = topologyTransactionsStreamReader, @@ -291,25 +301,13 @@ private class JdbcLedgerDao( loggerFactory = loggerFactory, )(queryExecutionContext) - private val treeTransactionsStreamReader = new TransactionsTreeStreamReader( - config = transactionTreeStreamsConfig, - globalIdQueriesLimiter = globalIdQueriesLimiter, - globalPayloadQueriesLimiter = globalPayloadQueriesLimiter, - dbDispatcher = dbDispatcher, - queryValidRange = queryValidRange, - eventStorageBackend = readStorageBackend.eventStorageBackend, - lfValueTranslation = translation, - metrics = metrics, - tracer = tracer, - reassignmentStreamReader = reassignmentStreamReader, - loggerFactory = loggerFactory, - )(queryExecutionContext) - private val reassignmentPointwiseReader = new ReassignmentPointwiseReader( dbDispatcher = dbDispatcher, eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, + contractStore = contractStore, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -318,6 +316,7 @@ private class JdbcLedgerDao( eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -326,6 +325,8 @@ private class JdbcLedgerDao( eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, + contractStore = contractStore, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -340,14 +341,6 @@ private class JdbcLedgerDao( loggerFactory = loggerFactory, )(queryExecutionContext) - private val treeTransactionPointwiseReader = new TransactionTreePointwiseReader( - dbDispatcher = dbDispatcher, - eventStorageBackend = readStorageBackend.eventStorageBackend, - metrics = metrics, - lfValueTranslation = translation, - loggerFactory = loggerFactory, - )(queryExecutionContext) - override val updateReader: UpdateReader = new UpdateReader( dispatcher = dbDispatcher, @@ -356,8 +349,6 @@ private class JdbcLedgerDao( metrics = metrics, updatesStreamReader = updatesStreamReader, updatePointwiseReader = updatePointwiseReader, - treeTransactionsStreamReader = treeTransactionsStreamReader, - treeTransactionPointwiseReader = treeTransactionPointwiseReader, acsReader = acsReader, )(queryExecutionContext) @@ -372,13 +363,14 @@ private class JdbcLedgerDao( override def eventsReader: LedgerDaoEventsReader = new EventsReader( - dbDispatcher, - readStorageBackend.eventStorageBackend, - parameterStorageBackend, - metrics, - translation, - ledgerEndCache, - loggerFactory, + dbDispatcher = dbDispatcher, + eventStorageBackend = readStorageBackend.eventStorageBackend, + parameterStorageBackend = parameterStorageBackend, + metrics = metrics, + lfValueTranslation = translation, + contractStore = contractStore, + ledgerEndCache = ledgerEndCache, + loggerFactory = loggerFactory, )(queryExecutionContext) override val completions: CommandCompletionsReader = @@ -405,9 +397,37 @@ private class JdbcLedgerDao( contractActivenessChanged: Boolean, )(implicit loggingContext: LoggingContextWithTrace - ): Future[PersistenceResponse] = { - logger.info("Storing transaction") - dbDispatcher + ): Future[PersistenceResponse] = for { + _ <- Future.successful(logger.info("Storing contracts into participant contract store")) + _ <- contractStore + .storeContracts( + transaction.nodes.values + .collect { case create: Node.Create => create } + .map(FatContract.fromCreateNode(_, CreatedAt(ledgerEffectiveTime), Bytes.Empty)) + .map( + ContractInstance + .ContractInstanceImpl( + _, + ContractMetadata.empty, + ByteString.EMPTY, + ) + ) + .toSeq + ) + .failOnShutdownTo(new IllegalStateException("Storing contracts was interrupted")) + + contractIds = + transaction.nodes.values + .collect { case create: Node.Create => create.coid } + + internalContractIds <- contractStore + .lookupBatchedNonCachedInternalIds(contractIds) + .failOnShutdownTo( + new IllegalStateException("Looking up internal contract ids was interrupted") + ) + + _ <- Future.successful(logger.info("Storing transaction")) + _ <- dbDispatcher .executeSql(metrics.index.db.storeTransactionDbMetrics) { implicit conn => sequentialIndexer.store( conn, @@ -444,11 +464,13 @@ private class JdbcLedgerDao( externalTransactionHash = None, acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = contractActivenessChanged), + internalContractIds = internalContractIds, ) ), ) - PersistenceResponse.Ok } + } yield { + PersistenceResponse.Ok } } @@ -458,9 +480,6 @@ private[platform] object JdbcLedgerDao { object Logging { def submissionId(id: String): LoggingEntry = "submissionId" -> id - - def updateId(id: UpdateId): LoggingEntry = - "updateId" -> id } def read( @@ -474,7 +493,6 @@ private[platform] object JdbcLedgerDao { completionsPageSize: Int, activeContractsServiceStreamsConfig: ActiveContractsServiceStreamsConfig, updatesStreamsConfig: UpdatesStreamsConfig, - transactionTreeStreamsConfig: TransactionTreeStreamsConfig, globalMaxEventIdQueries: Int, globalMaxEventPayloadQueries: Int, tracer: Tracer, @@ -486,7 +504,9 @@ private[platform] object JdbcLedgerDao { ) => FutureUnlessShutdown[Vector[Offset]], contractLoader: ContractLoader = ContractLoader.dummyLoader, lfValueTranslation: LfValueTranslation, - ): LedgerReadDao = + pruningOffsetService: PruningOffsetService, + contractStore: ContractStore, + )(implicit ec: ExecutionContext): LedgerReadDao = new JdbcLedgerDao( dbDispatcher = dbSupport.dbDispatcher, queryExecutionContext = queryExecutionContext, @@ -502,7 +522,6 @@ private[platform] object JdbcLedgerDao { completionsPageSize = completionsPageSize, activeContractsServiceStreamsConfig = activeContractsServiceStreamsConfig, updatesStreamsConfig = updatesStreamsConfig, - transactionTreeStreamsConfig = transactionTreeStreamsConfig, globalMaxEventIdQueries = globalMaxEventIdQueries, globalMaxEventPayloadQueries = globalMaxEventPayloadQueries, tracer = tracer, @@ -510,6 +529,8 @@ private[platform] object JdbcLedgerDao { incompleteOffsets = incompleteOffsets, contractLoader = contractLoader, translation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) def writeForTests( @@ -523,14 +544,15 @@ private[platform] object JdbcLedgerDao { completionsPageSize: Int, activeContractsServiceStreamsConfig: ActiveContractsServiceStreamsConfig, updatesStreamsConfig: UpdatesStreamsConfig, - transactionTreeStreamsConfig: TransactionTreeStreamsConfig, globalMaxEventIdQueries: Int, globalMaxEventPayloadQueries: Int, tracer: Tracer, loggerFactory: NamedLoggerFactory, contractLoader: ContractLoader = ContractLoader.dummyLoader, lfValueTranslation: LfValueTranslation, - ): LedgerReadDao with LedgerWriteDaoForTests = + pruningOffsetService: PruningOffsetService, + contractStore: ContractStore, + )(implicit ec: ExecutionContext): LedgerReadDao with LedgerWriteDaoForTests = new JdbcLedgerDao( dbDispatcher = dbSupport.dbDispatcher, queryExecutionContext = servicesExecutionContext, @@ -546,7 +568,6 @@ private[platform] object JdbcLedgerDao { completionsPageSize = completionsPageSize, activeContractsServiceStreamsConfig = activeContractsServiceStreamsConfig, updatesStreamsConfig = updatesStreamsConfig, - transactionTreeStreamsConfig = transactionTreeStreamsConfig, globalMaxEventIdQueries = globalMaxEventIdQueries, globalMaxEventPayloadQueries = globalMaxEventPayloadQueries, tracer = tracer, @@ -554,6 +575,8 @@ private[platform] object JdbcLedgerDao { incompleteOffsets = (_, _, _) => FutureUnlessShutdown.pure(Vector.empty), contractLoader = contractLoader, translation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) val acceptType = "accept" diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala index 6711ef3272..0755b6c26d 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala @@ -6,13 +6,7 @@ package com.digitalasset.canton.platform.store.dao import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.ParticipantId import com.digitalasset.canton.ledger.api.health.ReportsHealth @@ -23,12 +17,12 @@ import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.transaction.CommittedTransaction import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.annotation.nowarn import scala.concurrent.Future private[platform] trait LedgerDaoUpdateReader { @@ -40,52 +34,11 @@ private[platform] trait LedgerDaoUpdateReader { loggingContext: LoggingContextWithTrace ): Source[(Offset, GetUpdatesResponse), NotUsed] - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - def lookupTransactionById( - updateId: UpdateId, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] - - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - def lookupTransactionByOffset( - offset: Offset, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] - def lookupUpdateBy( lookupKey: LookupKey, internalUpdateFormat: InternalUpdateFormat, )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetUpdateResponse]] - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - def getTransactionTrees( - startInclusive: Offset, - endInclusive: Offset, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] - - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - def lookupTransactionTreeById( - updateId: UpdateId, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] - - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - def lookupTransactionTreeByOffset( - offset: Offset, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] - def getActiveContracts( activeAt: Option[Offset], filter: TemplatePartiesFilter, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PaginatingAsyncStream.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PaginatingAsyncStream.scala index 09c22b7c7a..456b4f3a31 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PaginatingAsyncStream.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PaginatingAsyncStream.scala @@ -6,10 +6,12 @@ package com.digitalasset.canton.platform.store.dao import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.platform.store.dao.events.IdPageSizing +import com.digitalasset.canton.tracing.TraceContext import org.apache.pekko.NotUsed import org.apache.pekko.stream.OverflowStrategy import org.apache.pekko.stream.scaladsl.Source +import java.sql.Connection import scala.concurrent.Future private[platform] class PaginatingAsyncStream( @@ -20,40 +22,6 @@ private[platform] class PaginatingAsyncStream( private val directEc = DirectExecutionContext(noTracingLogger) - /** Concatenates the results of multiple asynchronous calls into a single [[Source]], injecting - * the offset of the next page to retrieve for every call. - * - * This is designed to work with database limit/offset pagination and in particular to break down - * large queries intended to serve streams into smaller ones. The reason for this is that we are - * currently using simple blocking JDBC APIs and a long-running stream would end up occupying a - * thread in the DB pool, severely limiting the ability of keeping multiple, concurrent, - * long-running streams while serving lookup calls. - * - * This is not designed to page through results using the "seek method": - * https://use-the-index-luke.com/sql/partial-results/fetch-next-page - * - * @param pageSize - * number of items to retrieve per call - * @param queryPage - * takes the offset from which to start the next page and returns that page - * @tparam T - * the type of the items returned in each call - */ - def streamFromLimitOffsetPagination[T]( - pageSize: Int - )(queryPage: Long => Future[Vector[T]]): Source[T, NotUsed] = - Source - .unfoldAsync(Option(0L)) { - case None => Future.successful(None) - case Some(queryOffset) => - queryPage(queryOffset).map { result => - val resultSize = result.size.toLong - val newQueryOffset = if (resultSize < pageSize) None else Some(queryOffset + pageSize) - Some(newQueryOffset -> result) - }(directEc) - } - .flatMapConcat(Source(_)) - /** Concatenates the results of multiple asynchronous calls into a single [[Source]], passing the * last seen event's offset to the next iteration query, so it can continue reading events from * this point. @@ -90,21 +58,44 @@ private[platform] class PaginatingAsyncStream( } .flatMapConcat(Source(_)) - def streamIdsFromSeekPagination( + def streamIdsFromSeekPaginationWithoutIdFilter( + idStreamName: String, idPageSizing: IdPageSizing, idPageBufferSize: Int, initialFromIdExclusive: Long, + initialEndInclusive: Long, )( - fetchPage: IdPaginationState => Future[Vector[Long]] + fetchPageDbQuery: Connection => PaginationInput => Vector[Long] + )( + executeIdQuery: (Connection => Vector[Long]) => Future[Vector[Long]] + )(implicit + traceContext: TraceContext ): Source[Long, NotUsed] = { assert(idPageBufferSize > 0) + def wrapIdDbQuery(paginationInput: PaginationInput): Connection => Vector[Long] = { c => + val started = System.nanoTime() + val result = fetchPageDbQuery(c)(paginationInput) + def elapsedMillis: Long = (System.nanoTime() - started) / 1000000 + logger.debug( + s"ID query for $idStreamName for IDs returned: limit:${paginationInput.limit} from:${paginationInput.startExclusive} #IDs:${result.size} lastID:${result.lastOption} DB query took: ${elapsedMillis}ms" + ) + result + } val initialState = IdPaginationState( fromIdExclusive = initialFromIdExclusive, pageSize = idPageSizing.minPageSize, ) Source .unfoldAsync[IdPaginationState, Vector[Long]](initialState) { state => - fetchPage(state).map { ids => + executeIdQuery( + wrapIdDbQuery( + PaginationInput( + startExclusive = state.fromIdExclusive, + endInclusive = initialEndInclusive, + limit = state.pageSize, + ) + ) + ).map { ids => ids.lastOption.map { last => val nextState = IdPaginationState( fromIdExclusive = last, @@ -117,9 +108,109 @@ private[platform] class PaginatingAsyncStream( .buffer(idPageBufferSize, OverflowStrategy.backpressure) .mapConcat(identity) } + + def streamIdsFromSeekPaginationWithIdFilter( + idStreamName: String, + idPageSizing: IdPageSizing, + idPageBufferSize: Int, + initialFromIdExclusive: Long, + initialEndInclusive: Long, + )( + fetchPageDbQuery: Connection => IdFilterPaginationInput => Vector[Long] + )( + executeLastIdQuery: (Connection => Vector[Long]) => Future[Vector[Long]], + idFilterQueryParallelism: Int, + executeIdFilterQuery: (Connection => Vector[Long]) => Future[Vector[Long]], + )(implicit + traceContext: TraceContext + ): Source[Long, NotUsed] = { + assert(idPageBufferSize > 0) + def wrapIdDbQuery( + idFilterPaginationInput: IdFilterPaginationInput + )(debugLogMiddle: Vector[Long] => String): Connection => Vector[Long] = { c => + val started = System.nanoTime() + val result = fetchPageDbQuery(c)(idFilterPaginationInput) + def elapsedMillis: Long = (System.nanoTime() - started) / 1000000 + logger.debug( + s"ID query for $idStreamName ${debugLogMiddle(result)} DB query took: ${elapsedMillis}ms" + ) + result + } + def lastIdDbQuery( + paginationLastOnlyInput: PaginationLastOnlyInput + ): Connection => Vector[Long] = + wrapIdDbQuery(paginationLastOnlyInput)(result => + s"for next ID window returned: limit:${paginationLastOnlyInput.limit} from:${paginationLastOnlyInput.startExclusive} to:$result" + ) + def idFilterDbQuery(idFilterInput: IdFilterInput): Connection => Vector[Long] = + wrapIdDbQuery(idFilterInput)(result => + s"for filtered IDs returned: from:${idFilterInput.startExclusive} to:${idFilterInput.endInclusive} #IDs:${result.size}" + ) + val initialState = IdPaginationState( + fromIdExclusive = initialFromIdExclusive, + pageSize = idPageSizing.minPageSize, + ) + Source + .unfoldAsync[IdPaginationState, PaginationFromTo](initialState) { state => + executeLastIdQuery( + lastIdDbQuery( + PaginationLastOnlyInput( + startExclusive = state.fromIdExclusive, + endInclusive = initialEndInclusive, + limit = state.pageSize, + ) + ) + ).map { ids => + ids.lastOption.map { last => + val nextState = IdPaginationState( + fromIdExclusive = last, + pageSize = Math.min(state.pageSize * 4, idPageSizing.maxPageSize), + ) + nextState -> PaginationFromTo( + fromExclusive = state.fromIdExclusive, + toInclusive = last, + ) + } + }(directEc) + } + .mapAsync(idFilterQueryParallelism)(paginationFromTo => + executeIdFilterQuery( + idFilterDbQuery( + IdFilterInput( + startExclusive = paginationFromTo.fromExclusive, + endInclusive = paginationFromTo.toInclusive, + ) + ) + ) + ) + .buffer(idPageBufferSize, OverflowStrategy.backpressure) + .mapConcat(identity) + } } object PaginatingAsyncStream { final case class IdPaginationState(fromIdExclusive: Long, pageSize: Int) + + final case class PaginationFromTo( + fromExclusive: Long, + toInclusive: Long, + ) + + sealed trait IdFilterPaginationInput + final case class PaginationInput( + startExclusive: Long, + endInclusive: Long, + limit: Int, + ) extends IdFilterPaginationInput + final case class IdFilterInput( + startExclusive: Long, + endInclusive: Long, + ) extends IdFilterPaginationInput + final case class PaginationLastOnlyInput( + startExclusive: Long, + endInclusive: Long, + limit: Int, + ) extends IdFilterPaginationInput + } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDao.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDao.scala index 8de7829aea..76ed61b797 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDao.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDao.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.platform.store.dao import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.data.{CantonTimestamp, Offset} +import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.participant.state.Update import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -14,7 +15,7 @@ import com.digitalasset.canton.platform.store.backend.{ DbDtoToStringsForInterning, IngestionStorageBackend, ParameterStorageBackend, - UpdateToDbDto, + UpdateToDbDtoLegacy, } import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.dao.events.{CompressionStrategy, LfValueTranslation} @@ -23,9 +24,12 @@ import com.digitalasset.canton.platform.store.interning.{ InternizingStringInterningView, StringInterning, } +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.value.Value.ContractId import java.sql.Connection +import scala.collection.mutable import scala.concurrent.{Future, blocking} import scala.util.chaining.scalaUtilChainingOps @@ -49,7 +53,7 @@ object SequentialWriteDao { ingestionStorageBackend = ingestionStorageBackend, parameterStorageBackend = parameterStorageBackend, updateToDbDtos = offset => - UpdateToDbDto( + UpdateToDbDtoLegacy( participantId = participantId, translation = new LfValueTranslation( metrics = metrics, @@ -92,6 +96,8 @@ private[dao] final case class SequentialWriteDaoImpl[DB_BATCH]( @SuppressWarnings(Array("org.wartremover.warts.Var")) private var previousTransactionMetaToEventSeqId: Long = _ + private val acs: mutable.HashMap[(SynchronizerId, ContractId), Long] = mutable.HashMap() + private def lazyInit(connection: Connection): Unit = if (!lastEventSeqIdInitialized) { val ledgerEnd = parameterStorageBackend.ledgerEnd(connection) @@ -108,8 +114,28 @@ private[dao] final case class SequentialWriteDaoImpl[DB_BATCH]( private def adaptEventSeqIds(dbDtos: Iterator[DbDto]): Vector[DbDto] = dbDtos.map { - case e: DbDto.EventCreate => e.copy(event_sequential_id = nextEventSeqId) - case e: DbDto.EventExercise => e.copy(event_sequential_id = nextEventSeqId) + case e: DbDto.EventCreate => + val eventSeqId = nextEventSeqId + if (e.flat_event_witnesses.nonEmpty) { + acs.put(e.synchronizer_id -> e.contract_id, eventSeqId).discard + } + e.copy(event_sequential_id = eventSeqId) + case e: DbDto.EventExercise => + val deactivatedEventSeqId = Option + .when(e.consuming && e.flat_event_witnesses.nonEmpty) { + acs.get(e.synchronizer_id -> e.contract_id) match { + case Some(deactivatedEventSeqId) => + acs.remove(e.synchronizer_id -> e.contract_id).discard + Some(deactivatedEventSeqId) + case None => + None + } + } + .flatten + e.copy( + event_sequential_id = nextEventSeqId, + deactivated_event_sequential_id = deactivatedEventSeqId, + ) case e: DbDto.IdFilterCreateStakeholder => e.copy(event_sequential_id = lastEventSeqId) case e: DbDto.IdFilterCreateNonStakeholderInformee => diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala index 87b5ceb33a..bb841d16cd 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala @@ -19,20 +19,19 @@ import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTr import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown -import com.digitalasset.canton.platform.TemplatePartiesFilter +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.config.ActiveContractsServiceStreamsConfig import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawActiveContract, - RawAssignEvent, - RawCreatedEvent, - RawUnassignEvent, + RawActiveContractLegacy, + RawAssignEventLegacy, + RawCreatedEventLegacy, + RawUnassignEventLegacy, UnassignProperties, } -import com.digitalasset.canton.platform.store.backend.common.EventPayloadSourceForUpdatesAcsDelta -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState +import com.digitalasset.canton.platform.store.backend.common.EventPayloadSourceForUpdatesAcsDeltaLegacy import com.digitalasset.canton.platform.store.dao.events.UpdateReader.endSpanOnTermination import com.digitalasset.canton.platform.store.dao.{ DbDispatcher, @@ -44,8 +43,10 @@ import com.digitalasset.canton.platform.store.utils.{ QueueBasedConcurrencyLimiter, Telemetry, } +import com.digitalasset.canton.platform.{FatContract, TemplatePartiesFilter} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.PekkoUtil.syntax.* +import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.FullIdentifier import com.digitalasset.daml.lf.value.Value.ContractId @@ -54,8 +55,8 @@ import org.apache.pekko.NotUsed import org.apache.pekko.stream.Attributes import org.apache.pekko.stream.scaladsl.Source -import java.sql.Connection import scala.concurrent.{ExecutionContext, Future} +import scala.util.Success import scala.util.chaining.* /** Streams ACS events (active contracts) in a two step process consisting of: @@ -76,6 +77,7 @@ class ACSReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, incompleteOffsets: ( Offset, Option[Set[Ref.Party]], @@ -123,7 +125,7 @@ class ACSReader( loggingContext: LoggingContextWithTrace ): Source[GetActiveContractsResponse, NotUsed] = { val (activeAtOffset, activeAtEventSeqId) = activeAt - def withValidatedActiveAt[T](query: => T)(implicit connection: Connection) = + def withValidatedActiveAt[T](query: => Future[T]) = queryValidRange.withOffsetNotBeforePruning( activeAtOffset, pruned => @@ -157,106 +159,129 @@ class ACSReader( ) def fetchCreateIds(filter: DecomposedFilter): Source[Long, NotUsed] = - paginatingAsyncStream.streamIdsFromSeekPagination( + paginatingAsyncStream.streamIdsFromSeekPaginationWithIdFilter( + idStreamName = s"ActiveContractIds for create events $filter", idPageSizing = idQueryPageSizing, idPageBufferSize = config.maxPagesPerIdPagesBuffer, initialFromIdExclusive = 0L, - )((state: IdPaginationState) => - createIdQueriesLimiter.execute( - globalIdQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractIdsForCreated) { connection => - val ids = - eventStorageBackend.updateStreamingQueries - .fetchIdsOfCreateEventsForStakeholder( - stakeholderO = filter.party, - templateIdO = filter.templateId, - startExclusive = state.fromIdExclusive, - endInclusive = activeAtEventSeqId, - limit = state.pageSize, - )(connection) - logger.debug( - s"ActiveContractIds for create events $filter returned #${ids.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - ids - } - ) + initialEndInclusive = activeAtEventSeqId, + )( + eventStorageBackend.updateStreamingQueries.fetchActiveIdsOfCreateEventsForStakeholderLegacy( + stakeholderO = filter.party, + templateIdO = filter.templateId, + activeAtEventSeqId = activeAtEventSeqId, ) + )( + executeLastIdQuery = f => + createIdQueriesLimiter.execute( + globalIdQueriesLimiter.execute( + dispatcher.executeSql(metrics.index.db.getActiveContractIdRangesForCreatedLegacy)(f) + ) + ), + idFilterQueryParallelism = config.idFilterQueryParallelism, + executeIdFilterQuery = f => + createIdQueriesLimiter.execute( + globalIdQueriesLimiter.execute( + dispatcher.executeSql( + metrics.index.db.getFilteredActiveContractIdsForCreatedLegacy + )(f) + ) + ), ) def fetchAssignIds(filter: DecomposedFilter): Source[Long, NotUsed] = - paginatingAsyncStream.streamIdsFromSeekPagination( + paginatingAsyncStream.streamIdsFromSeekPaginationWithIdFilter( + idStreamName = s"ActiveContractIds for assign events $filter", idPageSizing = idQueryPageSizing, idPageBufferSize = config.maxPagesPerIdPagesBuffer, initialFromIdExclusive = 0L, - )((state: IdPaginationState) => - assignIdQueriesLimiter.execute( - globalIdQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractIdsForAssigned) { connection => - val ids = - eventStorageBackend.fetchAssignEventIdsForStakeholder( - stakeholderO = filter.party, - templateId = filter.templateId, - startExclusive = state.fromIdExclusive, - endInclusive = activeAtEventSeqId, - limit = state.pageSize, - )(connection) - logger.debug( - s"ActiveContractIds for assign events $filter returned #${ids.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - ids - } - ) + initialEndInclusive = activeAtEventSeqId, + )( + eventStorageBackend.updateStreamingQueries.fetchActiveIdsOfAssignEventsForStakeholderLegacy( + stakeholderO = filter.party, + templateIdO = filter.templateId, + activeAtEventSeqId = activeAtEventSeqId, ) + )( + executeLastIdQuery = f => + assignIdQueriesLimiter.execute( + globalIdQueriesLimiter.execute( + dispatcher.executeSql(metrics.index.db.getActiveContractIdRangesForAssignedLegacy)(f) + ) + ), + idFilterQueryParallelism = config.idFilterQueryParallelism, + executeIdFilterQuery = f => + assignIdQueriesLimiter.execute( + globalIdQueriesLimiter.execute( + dispatcher.executeSql( + metrics.index.db.getFilteredActiveContractIdsForAssignedLegacy + )(f) + ) + ), ) + def withFatContracts[T]( + internalContractId: T => Long + )(payloads: Vector[T]): Future[Vector[(T, Option[FatContract])]] = + for { + contractsM <- contractStore + .lookupBatchedNonCached( + payloads.map(internalContractId) + ) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield payloads + .map { payload => + val fatContractO = contractsM.get(internalContractId(payload)).map(_.inst) + (payload, fatContractO) + } + def fetchActiveCreatePayloads( ids: Iterable[Long] - ): Future[Vector[RawActiveContract]] = + ): Future[Vector[(RawActiveContractLegacy, Option[FatContract])]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractBatchForCreated) { - implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.activeContractCreateEventBatch( + withValidatedActiveAt( + dispatcher + .executeSql(metrics.index.db.getActiveContractBatchForCreatedLegacy) { + eventStorageBackend.activeContractCreateEventBatchLegacy( eventSequentialIds = ids, allFilterParties = allFilterParties, endInclusive = activeAtEventSeqId, - )(connection) - ) - logger.debug( - s"getActiveContractBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - result + ) + } + .flatMap(withFatContracts(_.rawCreatedEvent.internalContractId)) + ).thereafterP { case Success(result) => + logger.debug( + s"getActiveContractBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) } ) ) def fetchActiveAssignPayloads( ids: Iterable[Long] - ): Future[Vector[RawActiveContract]] = + ): Future[Vector[(RawActiveContractLegacy, Option[FatContract])]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractBatchForAssigned) { - implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.activeContractAssignEventBatch( + withValidatedActiveAt( + dispatcher + .executeSql(metrics.index.db.getActiveContractBatchForAssignedLegacy)( + eventStorageBackend.activeContractAssignEventBatchLegacy( eventSequentialIds = ids, allFilterParties = allFilterParties, endInclusive = activeAtEventSeqId, - )(connection) - ) - logger.debug( - s"getActiveContractBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" + ) ) - result + .flatMap(withFatContracts(_.rawCreatedEvent.internalContractId)) + ).thereafterP { case Success(result) => + logger.debug( + s"getActiveContractBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } ) ) @@ -268,7 +293,7 @@ class ACSReader( dispatcher.executeSql(metrics.index.db.getAssingIdsForOffsets) { connection => val ids = eventStorageBackend - .lookupAssignSequentialIdByOffset(offsets.map(_.unwrap))(connection) + .lookupAssignSequentialIdByOffsetLegacy(offsets.map(_.unwrap))(connection) logger.debug( s"Assign Ids for offsets returned #${ids.size} (from ${offsets.size}) ${ids.lastOption .map(last => s"until $last") @@ -285,7 +310,7 @@ class ACSReader( dispatcher.executeSql(metrics.index.db.getUnassingIdsForOffsets) { connection => val ids = eventStorageBackend - .lookupUnassignSequentialIdByOffset(offsets.map(_.unwrap))(connection) + .lookupUnassignSequentialIdByOffsetLegacy(offsets.map(_.unwrap))(connection) logger.debug( s"Unassign Ids for offsets returned #${ids.size} (from ${offsets.size}) ${ids.lastOption .map(last => s"until $last") @@ -297,50 +322,53 @@ class ACSReader( def fetchAssignPayloads( ids: Iterable[Long] - ): Future[Vector[Entry[RawAssignEvent]]] = + ): Future[Vector[(Entry[RawAssignEventLegacy], Option[FatContract])]] = if (ids.isEmpty) Future.successful(Vector.empty) else localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql( - metrics.index.db.reassignmentStream.fetchEventAssignPayloads - ) { implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.assignEventBatch( - eventSequentialIds = Ids(ids), - allFilterParties = allFilterParties, - )(connection) - ) - logger.debug( - s"assignEventBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - result - } + withValidatedActiveAt( + dispatcher + .executeSql( + metrics.index.db.reassignmentStream.fetchEventAssignPayloadsLegacy + )( + eventStorageBackend.assignEventBatchLegacy( + eventSequentialIds = Ids(ids), + allFilterParties = allFilterParties, + ) + ) + .flatMap(withFatContracts(_.event.rawCreatedEvent.internalContractId)) + ) + .thereafterP { case Success(result) => + logger.debug( + s"assignEventBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } ) ) def fetchUnassignPayloads( ids: Iterable[Long] - ): Future[Vector[Entry[RawUnassignEvent]]] = + ): Future[Vector[Entry[RawUnassignEventLegacy]]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql( - metrics.index.db.reassignmentStream.fetchEventUnassignPayloads - ) { implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.unassignEventBatch( + withValidatedActiveAt( + dispatcher.executeSql( + metrics.index.db.reassignmentStream.fetchEventUnassignPayloadsLegacy + )( + eventStorageBackend.unassignEventBatchLegacy( eventSequentialIds = Ids(ids), allFilterParties = allFilterParties, - )(connection) + ) ) + ).thereafterP { case Success(result) => logger.debug( - s"unassignEventBatch returned ${ids.size}/${result.size} ${ids.lastOption + s"unassignEventBatch returned ${result.size}/${ids.size} ${ids.lastOption .map(last => s"until $last") .getOrElse("")}" ) - result } ) ) @@ -349,9 +377,9 @@ class ACSReader( contractIds: Iterable[ContractId] ): Future[Vector[Long]] = globalIdQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getCreateIdsForContractIds) { connection => + dispatcher.executeSql(metrics.index.db.getCreateIdsForContractIdsLegacy) { connection => val ids = - eventStorageBackend.lookupCreateSequentialIdByContractId(contractIds)( + eventStorageBackend.lookupCreateSequentialIdByContractIdLegacy(contractIds)( connection ) logger.debug( @@ -369,9 +397,9 @@ class ACSReader( if (unassignPropertiesSeq.isEmpty) Future.successful(Map.empty) else globalIdQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getAssignIdsForContractIds) { connection => + dispatcher.executeSql(metrics.index.db.getAssignIdsForContractIdsLegacy) { connection => val idForUnassignProperties: Map[UnassignProperties, Long] = - eventStorageBackend.lookupAssignSequentialIdBy( + eventStorageBackend.lookupAssignSequentialIdByLegacy( unassignPropertiesSeq )(connection) logger.debug( @@ -383,40 +411,48 @@ class ACSReader( def fetchCreatePayloads( ids: Iterable[Long] - ): Future[Vector[Entry[RawCreatedEvent]]] = + ): Future[Vector[(Entry[RawCreatedEventLegacy], Option[FatContract])]] = if (ids.isEmpty) Future.successful(Vector.empty) else globalPayloadQueriesLimiter.execute( - dispatcher - .executeSql(metrics.index.db.updatesAcsDeltaStream.fetchEventCreatePayloads) { - implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.fetchEventPayloadsAcsDelta( - EventPayloadSourceForUpdatesAcsDelta.Create - )( - eventSequentialIds = Ids(ids), - requestingParties = allFilterParties, - )(connection) - ) - logger.debug( - s"fetchEventPayloads for Create returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" + withValidatedActiveAt( + dispatcher + .executeSql( + metrics.index.db.updatesAcsDeltaStream.fetchEventCreatePayloadsLegacy + )( + eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy( + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create + )( + eventSequentialIds = Ids(ids), + requestingParties = allFilterParties, ) + ) + .map(result => result.view.collect { entry => entry.event match { - case created: RawCreatedEvent => + case created: RawCreatedEventLegacy => entry.copy(event = created) } }.toVector - } + ) + .flatMap(withFatContracts(_.event.internalContractId)) + .thereafterP { case Success(result) => + logger.debug( + s"fetchEventPayloads for Create returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } + ) ) - def fetchCreatedEventsForUnassignedBatch(batch: Seq[Entry[RawUnassignEvent]]): Future[ - Seq[(Entry[RawUnassignEvent], RawCreatedEvent)] + def fetchCreatedEventsForUnassignedBatch(batch: Seq[Entry[RawUnassignEventLegacy]]): Future[ + Seq[(Entry[RawUnassignEventLegacy], (Entry[RawCreatedEventLegacy], Option[FatContract]))] ] = { - def extractUnassignProperties(unassignEntry: Entry[RawUnassignEvent]): UnassignProperties = + def extractUnassignProperties( + unassignEntry: Entry[RawUnassignEventLegacy] + ): UnassignProperties = UnassignProperties( contractId = unassignEntry.event.contractId, synchronizerId = unassignEntry.event.sourceSynchronizerId, @@ -434,18 +470,25 @@ class ACSReader( unassignPropertiesToAssignedIds: Map[UnassignProperties, Long] <- fetchAssignIdsFor( unassignPropertiesSeq ) - assignedPayloads: Seq[Entry[RawAssignEvent]] <- fetchAssignPayloads( - unassignPropertiesToAssignedIds.values - ) - assignedIdsToPayloads: Map[Long, Entry[RawAssignEvent]] = assignedPayloads - .map(payload => payload.eventSequentialId -> payload) - .toMap + assignedPayloads: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] <- + fetchAssignPayloads( + unassignPropertiesToAssignedIds.values + ) + assignedIdsToPayloads: Map[Long, (Entry[RawAssignEventLegacy], Option[FatContract])] = + assignedPayloads + .map(payload => payload._1.eventSequentialId -> payload) + .toMap // map the requested unassign event properties to the returned raw created events using the assign sequential id - rawCreatedFromAssignedResults: Map[UnassignProperties, RawCreatedEvent] = + rawCreatedFromAssignedResults: Map[ + UnassignProperties, + (Entry[RawCreatedEventLegacy], Option[FatContract]), + ] = unassignPropertiesToAssignedIds.flatMap { case (params, assignedId) => assignedIdsToPayloads .get(assignedId) - .map(assignEntry => (params, assignEntry.event.rawCreatedEvent)) + .map { case (assignEntry, fatContract) => + (params, (assignEntry.map(_.rawCreatedEvent), fatContract)) + } } // if not found in the assigned events, search the created events @@ -457,8 +500,10 @@ class ACSReader( .distinct createdIds <- fetchCreateIdsForContractIds(missingContractIds) createdPayloads <- fetchCreatePayloads(createdIds) - rawCreatedFromCreatedResults: Map[ContractId, Vector[Entry[RawCreatedEvent]]] = - createdPayloads.groupBy(_.event.contractId) + rawCreatedFromCreatedResults: Map[ContractId, Vector[ + (Entry[RawCreatedEventLegacy], Option[FatContract]) + ]] = + createdPayloads.groupBy(_._1.event.contractId) } yield batch.flatMap { rawUnassignEntry => val unassignProperties = extractUnassignProperties(rawUnassignEntry) rawCreatedFromAssignedResults @@ -473,9 +518,8 @@ class ACSReader( candidateCreateEntries .find(createdEntry => // the created event should match the synchronizer id of the unassign entry and have a lower sequential id than it - createdEntry.synchronizerId == unassignProperties.synchronizerId && createdEntry.eventSequentialId < unassignProperties.sequentialId + createdEntry._1.synchronizerId == unassignProperties.synchronizerId && createdEntry._1.eventSequentialId < unassignProperties.sequentialId ) - .map(_.event) } } .orElse { @@ -503,12 +547,12 @@ class ACSReader( } ) - def unassignMeetsConstraints(rawUnassignEntry: Entry[RawUnassignEvent]): Boolean = + def unassignMeetsConstraints(rawUnassignEntry: Entry[RawUnassignEventLegacy]): Boolean = eventMeetsConstraints( rawUnassignEntry.event.templateId, rawUnassignEntry.event.witnessParties, ) - def assignMeetsConstraints(rawAssignEntry: Entry[RawAssignEvent]): Boolean = + def assignMeetsConstraints(rawAssignEntry: Entry[RawAssignEventLegacy]): Boolean = eventMeetsConstraints( rawAssignEntry.event.rawCreatedEvent.templateId, rawAssignEntry.event.rawCreatedEvent.witnessParties, @@ -526,7 +570,6 @@ class ACSReader( maxBatchSize = config.maxPayloadsPerPayloadsPage, maxBatchCount = config.maxParallelPayloadCreateQueries + 1, ) - .async .addAttributes(Attributes.inputBuffer(initial = inputBufferSize, max = inputBufferSize)) .mapAsync(config.maxParallelPayloadCreateQueries)(fetchActiveCreatePayloads) .mapConcat(identity) @@ -538,16 +581,15 @@ class ACSReader( maxBatchSize = config.maxPayloadsPerPayloadsPage, maxBatchCount = config.maxParallelPayloadCreateQueries + 1, ) - .async .addAttributes(Attributes.inputBuffer(initial = inputBufferSize, max = inputBufferSize)) .mapAsync(config.maxParallelPayloadCreateQueries)(fetchActiveAssignPayloads) .mapConcat(identity) activeFromCreatePipe - .mergeSorted(activeFromAssignPipe)(Ordering.by(_.eventSequentialId)) - .mapAsync(config.contractProcessingParallelism)( - toApiResponseActiveContract(_, eventProjectionProperties) - ) + .mergeSorted(activeFromAssignPipe)(Ordering.by(_._1.eventSequentialId)) + .mapAsync(config.contractProcessingParallelism) { case (rawActiveContract, fatContractO) => + toApiResponseActiveContract(rawActiveContract, fatContractO, eventProjectionProperties) + } .concatLazy( // compute incomplete reassignments Source.lazyFutureSource(() => @@ -570,7 +612,7 @@ class ACSReader( .mapAsync(config.maxParallelPayloadCreateQueries)( fetchAssignPayloads ) - .mapConcat(_.filter(assignMeetsConstraints)) + .mapConcat(_.filter(entryPair => assignMeetsConstraints(entryPair._1))) .mapAsync(config.contractProcessingParallelism)( toApiResponseIncompleteAssigned(eventProjectionProperties) ) @@ -609,15 +651,25 @@ class ACSReader( } private def toApiResponseActiveContract( - rawActiveContract: RawActiveContract, + rawActiveContract: RawActiveContractLegacy, + fatContract: Option[FatContract], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[GetActiveContractsResponse] = Timed.future( future = Future.delegate( lfValueTranslation - .deserializeRaw( - eventProjectionProperties, - rawActiveContract.rawCreatedEvent, + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawActiveContract.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), + offset = rawActiveContract.offset, + nodeId = rawActiveContract.nodeId, + representativePackageId = rawActiveContract.rawCreatedEvent.representativePackageId, + witnesses = rawActiveContract.rawCreatedEvent.witnessParties, + acsDelta = true, ) .map(createdEvent => GetActiveContractsResponse( @@ -636,38 +688,66 @@ class ACSReader( ) private def toApiResponseIncompleteAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntry: Entry[RawAssignEvent] + rawAssignEntryFatContract: (Entry[RawAssignEventLegacy], Option[FatContract]) )(implicit lc: LoggingContextWithTrace): Future[(Long, GetActiveContractsResponse)] = - Timed.future( - future = Future.delegate( - lfValueTranslation - .deserializeRaw( - eventProjectionProperties, - rawAssignEntry.event.rawCreatedEvent, - ) - .map(createdEvent => - rawAssignEntry.offset -> GetActiveContractsResponse( - workflowId = rawAssignEntry.workflowId.getOrElse(""), - contractEntry = GetActiveContractsResponse.ContractEntry.IncompleteAssigned( - IncompleteAssigned( - Some(UpdateReader.toAssignedEvent(rawAssignEntry.event, createdEvent)) + rawAssignEntryFatContract match { + case (rawAssignEntry, fatContract) => + Timed.future( + future = Future.delegate( + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawAssignEntry.event.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), + offset = rawAssignEntry.offset, + nodeId = rawAssignEntry.nodeId, + representativePackageId = + rawAssignEntry.event.rawCreatedEvent.representativePackageId, + witnesses = rawAssignEntry.event.rawCreatedEvent.witnessParties, + acsDelta = true, + ) + .map(createdEvent => + rawAssignEntry.offset -> GetActiveContractsResponse( + workflowId = rawAssignEntry.workflowId.getOrElse(""), + contractEntry = GetActiveContractsResponse.ContractEntry.IncompleteAssigned( + IncompleteAssigned( + Some(UpdateReader.toAssignedEvent(rawAssignEntry.event, createdEvent)) + ) + ), ) - ), - ) - ) - ), - timer = dbMetrics.getActiveContracts.translationTimer, - ) + ) + ), + timer = dbMetrics.getActiveContracts.translationTimer, + ) + } private def toApiResponseIncompleteUnassigned( eventProjectionProperties: EventProjectionProperties )( - rawUnassignEntryWithCreate: (Entry[RawUnassignEvent], RawCreatedEvent) + rawUnassignEntryWithCreate: ( + Entry[RawUnassignEventLegacy], + (Entry[RawCreatedEventLegacy], Option[FatContract]), + ) )(implicit lc: LoggingContextWithTrace): Future[(Long, GetActiveContractsResponse)] = { - val (rawUnassignEntry, rawCreate) = rawUnassignEntryWithCreate + val (rawUnassignEntry, (rawCreate, fatContract)) = rawUnassignEntryWithCreate Timed.future( future = lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawCreate) + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreate.event.internalContractId} was not found in the contract store." + ) + ), + offset = rawCreate.offset, + nodeId = rawCreate.nodeId, + representativePackageId = rawCreate.event.representativePackageId, + witnesses = rawCreate.event.witnessParties, + acsDelta = true, + ) .map(createdEvent => rawUnassignEntry.offset -> GetActiveContractsResponse( workflowId = rawUnassignEntry.workflowId.getOrElse(""), @@ -675,7 +755,7 @@ class ACSReader( IncompleteUnassigned( createdEvent = Some(createdEvent), unassignedEvent = Some( - UpdateReader.toUnassignedEvent(rawUnassignEntry.offset, rawUnassignEntry.event) + UpdateReader.toUnassignedEvent(rawUnassignEntry.offset, rawUnassignEntry) ), ) ), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/BufferedUpdateReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/BufferedUpdateReader.scala index 71be9876ee..01a9c07aa3 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/BufferedUpdateReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/BufferedUpdateReader.scala @@ -4,13 +4,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory} @@ -18,52 +12,27 @@ import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer import com.digitalasset.canton.platform.store.dao.BufferedStreamsReader.FetchFromPersistence -import com.digitalasset.canton.platform.store.dao.events.TransactionLogUpdatesConversions.{ - ToFlatTransaction, - ToTransactionTree, -} +import com.digitalasset.canton.platform.store.dao.events.TransactionLogUpdatesConversions import com.digitalasset.canton.platform.store.dao.{ BufferedStreamsReader, - BufferedTransactionPointwiseReader, BufferedUpdatePointwiseReader, EventProjectionProperties, LedgerDaoUpdateReader, } import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate -import com.digitalasset.canton.platform.{ - InternalTransactionFormat, - InternalUpdateFormat, - Party, - TemplatePartiesFilter, -} -import com.digitalasset.canton.{data, platform} +import com.digitalasset.canton.platform.{InternalUpdateFormat, TemplatePartiesFilter} import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} -// TODO(#23504) remove TransactionTrees, getTransactionById and getTransactionByOffset related methods -@nowarn("cat=deprecation") private[events] class BufferedUpdateReader( delegate: LedgerDaoUpdateReader, bufferedUpdatesReader: BufferedStreamsReader[InternalUpdateFormat, GetUpdatesResponse], - bufferedTransactionTreesReader: BufferedStreamsReader[ - (Option[Set[Party]], EventProjectionProperties), - GetUpdateTreesResponse, - ], - bufferedTransactionTreeByIdReader: BufferedTransactionPointwiseReader[ - (String, Set[Party], EventProjectionProperties), - GetTransactionTreeResponse, - ], bufferedUpdateReader: BufferedUpdatePointwiseReader[ (LookupKey, InternalUpdateFormat), GetUpdateResponse, ], - bufferedTransactionTreeByOffsetReader: BufferedTransactionPointwiseReader[ - (Offset, Set[Party], EventProjectionProperties), - GetTransactionTreeResponse, - ], lfValueTranslation: LfValueTranslation, directEC: DirectExecutionContext, )(implicit executionContext: ExecutionContext) @@ -81,103 +50,21 @@ private[events] class BufferedUpdateReader( startInclusive = startInclusive, endInclusive = endInclusive, persistenceFetchArgs = internalUpdateFormat, - bufferFilter = ToFlatTransaction + bufferFilter = TransactionLogUpdatesConversions .filter(internalUpdateFormat), - toApiResponse = ToFlatTransaction + toApiResponse = TransactionLogUpdatesConversions .toGetUpdatesResponse(internalUpdateFormat, lfValueTranslation)( loggingContext, directEC, ), ) - override def getTransactionTrees( - startInclusive: Offset, - endInclusive: Offset, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] = - bufferedTransactionTreesReader - .stream( - startInclusive = startInclusive, - endInclusive = endInclusive, - persistenceFetchArgs = (requestingParties, eventProjectionProperties), - bufferFilter = ToTransactionTree - .filter(requestingParties), - toApiResponse = ToTransactionTree - .toGetTransactionTreesResponse( - requestingParties, - eventProjectionProperties, - lfValueTranslation, - )( - loggingContext, - directEC, - ), - ) - - override def lookupTransactionById( - updateId: data.UpdateId, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - Future.delegate( - bufferedUpdateReader - .fetch( - LookupKey.UpdateId(updateId) -> InternalUpdateFormat( - includeTransactions = Some(internalTransactionFormat), - includeReassignments = None, - includeTopologyEvents = None, - ) - ) - .map(_.flatMap(_.update.transaction)) - .map(_.map(tx => GetTransactionResponse(transaction = Some(tx)))) - ) - - override def lookupTransactionByOffset( - offset: data.Offset, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - Future.delegate( - bufferedUpdateReader - .fetch( - LookupKey.Offset(offset) -> InternalUpdateFormat( - includeTransactions = Some(internalTransactionFormat), - includeReassignments = None, - includeTopologyEvents = None, - ) - ) - .map(_.flatMap(_.update.transaction)) - .map(_.map(tx => GetTransactionResponse(transaction = Some(tx)))) - ) - def lookupUpdateBy( lookupKey: LookupKey, internalUpdateFormat: InternalUpdateFormat, )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetUpdateResponse]] = Future.delegate(bufferedUpdateReader.fetch(lookupKey -> internalUpdateFormat)) - override def lookupTransactionTreeById( - updateId: data.UpdateId, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] = - Future.delegate( - bufferedTransactionTreeByIdReader.fetch( - (updateId, requestingParties, eventProjectionProperties) - ) - ) - - override def lookupTransactionTreeByOffset( - offset: Offset, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionTreeResponse]] = - Future.delegate( - bufferedTransactionTreeByOffsetReader.fetch( - (offset, requestingParties, eventProjectionProperties) - ) - ) - override def getActiveContracts( activeAt: Option[Offset], filter: TemplatePartiesFilter, @@ -188,8 +75,6 @@ private[events] class BufferedUpdateReader( delegate.getActiveContracts(activeAt, filter, eventProjectionProperties) } -// TODO(#23504) remove TransactionTrees, getTransactionById and getTransactionByOffset related methods -@nowarn("cat=deprecation") private[platform] object BufferedUpdateReader { def apply( delegate: LedgerDaoUpdateReader, @@ -229,71 +114,6 @@ private[platform] object BufferedUpdateReader { loggerFactory, ) - val transactionTreesStreamReader = - new BufferedStreamsReader[ - (Option[Set[Party]], EventProjectionProperties), - GetUpdateTreesResponse, - ]( - inMemoryFanoutBuffer = updatesBuffer, - fetchFromPersistence = new FetchFromPersistence[ - (Option[Set[Party]], EventProjectionProperties), - GetUpdateTreesResponse, - ] { - override def apply( - startInclusive: Offset, - endInclusive: Offset, - filter: (Option[Set[Party]], EventProjectionProperties), - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] = { - val (requestingParties, eventProjectionProperties) = filter - delegate - .getTransactionTrees( - startInclusive = startInclusive, - endInclusive = endInclusive, - requestingParties = requestingParties, - eventProjectionProperties = eventProjectionProperties, - ) - } - }, - bufferedStreamEventsProcessingParallelism = eventProcessingParallelism, - metrics = metrics, - streamName = "transaction_trees", - loggerFactory, - ) - - val bufferedTransactionTreeByIdReader = - new BufferedTransactionPointwiseReader[ - (String, Set[Party], EventProjectionProperties), - GetTransactionTreeResponse, - ]( - fetchFromPersistence = { - case ( - (updateId, parties, eventProjectionProperties), - loggingContext: LoggingContextWithTrace, - ) => - delegate.lookupTransactionTreeById( - updateId = platform.UpdateId.assertFromString(updateId), - requestingParties = parties, - eventProjectionProperties = eventProjectionProperties, - )(loggingContext) - }, - fetchFromBuffer = { case (updateId, _, _) => updatesBuffer.lookupTransaction(updateId) }, - toApiResponse = { - case ( - transactionAccepted: TransactionLogUpdate.TransactionAccepted, - (_updateId, parties, eventProjectionProperties), - loggingContext: LoggingContextWithTrace, - ) => - ToTransactionTree.toGetTransactionResponse( - transactionLogUpdate = transactionAccepted, - requestingParties = parties, - eventProjectionProperties = eventProjectionProperties, - lfValueTranslation = lfValueTranslation, - )(loggingContext, directEC) - }, - ) - val updatePointwiseReader = new BufferedUpdatePointwiseReader[ (LookupKey, InternalUpdateFormat), @@ -315,51 +135,18 @@ private[platform] object BufferedUpdateReader { queryParam: (LookupKey, InternalUpdateFormat), loggingContext: LoggingContextWithTrace, ) => - ToFlatTransaction.toGetUpdateResponse( + TransactionLogUpdatesConversions.toGetUpdateResponse( transactionLogUpdate, queryParam._2, lfValueTranslation, )(loggingContext, directEC), ) - val bufferedTransactionTreeByOffsetReader = - new BufferedTransactionPointwiseReader[ - (Offset, Set[Party], EventProjectionProperties), - GetTransactionTreeResponse, - ]( - fetchFromPersistence = { - case ( - (offset, parties, eventProjectionProperties), - loggingContext: LoggingContextWithTrace, - ) => - delegate.lookupTransactionTreeByOffset( - offset = offset, - requestingParties = parties, - eventProjectionProperties = eventProjectionProperties, - )(loggingContext) - }, - fetchFromBuffer = queryParam => updatesBuffer.lookupTransaction(queryParam._1), - toApiResponse = ( - transactionAccepted: TransactionLogUpdate.TransactionAccepted, - queryParam: (Offset, Set[Party], EventProjectionProperties), - loggingContext: LoggingContextWithTrace, - ) => - ToTransactionTree.toGetTransactionResponse( - transactionAccepted, - queryParam._2, - queryParam._3, - lfValueTranslation, - )(loggingContext, directEC), - ) - new BufferedUpdateReader( delegate = delegate, bufferedUpdatesReader = updatesStreamReader, bufferedUpdateReader = updatePointwiseReader, - bufferedTransactionTreesReader = transactionTreesStreamReader, lfValueTranslation = lfValueTranslation, - bufferedTransactionTreeByIdReader = bufferedTransactionTreeByIdReader, - bufferedTransactionTreeByOffsetReader = bufferedTransactionTreeByOffsetReader, directEC = directEC, ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala index ae3868a2d8..55f488612d 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala @@ -10,10 +10,12 @@ import com.digitalasset.canton.platform.store.serialization.Compression import java.io.ByteArrayOutputStream final case class CompressionStrategy( - createArgumentCompression: FieldCompressionStrategy, - createKeyValueCompression: FieldCompressionStrategy, - exerciseArgumentCompression: FieldCompressionStrategy, - exerciseResultCompression: FieldCompressionStrategy, + createArgumentCompressionLegacy: FieldCompressionStrategy, // TODO(i25857) not needed with new schema anymore + createKeyValueCompressionLegacy: FieldCompressionStrategy, // TODO(i25857) not needed with new schema anymore + consumingExerciseArgumentCompression: FieldCompressionStrategy, + consumingExerciseResultCompression: FieldCompressionStrategy, + nonConsumingExerciseArgumentCompression: FieldCompressionStrategy, + nonConsumingExerciseResultCompression: FieldCompressionStrategy, ) object CompressionStrategy { @@ -24,29 +26,59 @@ object CompressionStrategy { def allGZIP(metrics: LedgerApiServerMetrics): CompressionStrategy = buildUniform(Compression.Algorithm.GZIP, metrics) + def buildFromConfig( + metrics: LedgerApiServerMetrics + )(consumingExercise: Boolean, nonConsumingExercise: Boolean): CompressionStrategy = { + val consumingAlgorithm: Compression.Algorithm = + if (consumingExercise) Compression.Algorithm.GZIP else Compression.Algorithm.None + val nonConsumingAlgorithm: Compression.Algorithm = + if (nonConsumingExercise) Compression.Algorithm.GZIP else Compression.Algorithm.None + build( + Compression.Algorithm.None, + Compression.Algorithm.None, + consumingAlgorithm, + consumingAlgorithm, + nonConsumingAlgorithm, + nonConsumingAlgorithm, + metrics, + ) + } + def buildUniform( algorithm: Compression.Algorithm, metrics: LedgerApiServerMetrics, ): CompressionStrategy = - build(algorithm, algorithm, algorithm, algorithm, metrics) + build(algorithm, algorithm, algorithm, algorithm, algorithm, algorithm, metrics) def build( createArgumentAlgorithm: Compression.Algorithm, createKeyValueAlgorithm: Compression.Algorithm, - exerciseArgumentAlgorithm: Compression.Algorithm, - exerciseResultAlgorithm: Compression.Algorithm, + consumingExerciseArgumentAlgorithm: Compression.Algorithm, + consumingExerciseResultAlgorithm: Compression.Algorithm, + nonConsumingExerciseArgumentAlgorithm: Compression.Algorithm, + nonConsumingExerciseResultAlgorithm: Compression.Algorithm, metrics: LedgerApiServerMetrics, ): CompressionStrategy = CompressionStrategy( - createArgumentCompression = + createArgumentCompressionLegacy = FieldCompressionStrategy(createArgumentAlgorithm, CompressionMetrics.createArgument(metrics)), - createKeyValueCompression = + createKeyValueCompressionLegacy = FieldCompressionStrategy(createKeyValueAlgorithm, CompressionMetrics.createKeyValue(metrics)), - exerciseArgumentCompression = FieldCompressionStrategy( - exerciseArgumentAlgorithm, + consumingExerciseArgumentCompression = FieldCompressionStrategy( + consumingExerciseArgumentAlgorithm, + CompressionMetrics.exerciseArgument(metrics), + ), + consumingExerciseResultCompression = FieldCompressionStrategy( + consumingExerciseResultAlgorithm, + CompressionMetrics.exerciseResult(metrics), + ), + nonConsumingExerciseArgumentCompression = FieldCompressionStrategy( + nonConsumingExerciseArgumentAlgorithm, CompressionMetrics.exerciseArgument(metrics), ), - exerciseResultCompression = - FieldCompressionStrategy(exerciseResultAlgorithm, CompressionMetrics.exerciseResult(metrics)), + nonConsumingExerciseResultCompression = FieldCompressionStrategy( + nonConsumingExerciseResultAlgorithm, + CompressionMetrics.exerciseResult(metrics), + ), ) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractLoader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractLoader.scala index 479dd0fbe0..69f366de36 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractLoader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractLoader.scala @@ -7,8 +7,12 @@ import com.daml.ledger.resources.ResourceOwner import com.daml.metrics.InstrumentedGraph import com.daml.metrics.api.MetricHandle.Histogram import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.error.LedgerApiErrors +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.{ + Active, + Archived, + ExistingContractStatus, +} import com.digitalasset.canton.logging.{ ErrorLoggingContext, LoggingContextWithTrace, @@ -17,11 +21,6 @@ import com.digitalasset.canton.logging.{ } import com.digitalasset.canton.metrics.{BatchLoaderMetrics, LedgerApiServerMetrics} import com.digitalasset.canton.platform.store.backend.ContractStorageBackend -import com.digitalasset.canton.platform.store.backend.ContractStorageBackend.{ - RawArchivedContract, - RawContractState, - RawCreatedContract, -} import com.digitalasset.canton.platform.store.dao.DbDispatcher import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.{ KeyState, @@ -135,23 +134,23 @@ class PekkoStreamParallelBatchedLoader[KEY, VALUE]( * insertion). */ trait ContractLoader { - def contracts: Loader[(ContractId, Offset), RawContractState] - def keys: Loader[(GlobalKey, Offset), KeyState] + def contracts: Loader[(ContractId, Long), ExistingContractStatus] + def keys: Loader[(GlobalKey, Long), KeyState] } object ContractLoader { private[events] def maxOffsetAndContextFromBatch[T]( - batch: Seq[((T, Offset), LoggingContextWithTrace)], + batch: Seq[((T, Long), LoggingContextWithTrace)], histogram: Histogram, - ): (Offset, LoggingContextWithTrace) = { - val ((_, latestValidAtOffset), usedLoggingContext) = batch + ): (Long, LoggingContextWithTrace) = { + val ((_, latestValidAtEventSeqId), usedLoggingContext) = batch .maxByOption(_._1._2) .getOrElse( throw new IllegalStateException("A batch should never be empty") ) histogram.update(batch.size)(MetricsContext.Empty) - (latestValidAtOffset, usedLoggingContext) + (latestValidAtEventSeqId, usedLoggingContext) } private[events] def createQueue[K, V](maxQueueSize: Int, metrics: BatchLoaderMetrics)(implicit @@ -178,17 +177,17 @@ object ContractLoader { materializer: Materializer, executionContext: ExecutionContext, ): ResourceOwner[PekkoStreamParallelBatchedLoader[ - (ContractId, Offset), - RawContractState, + (ContractId, Long), + ExistingContractStatus, ]] = ResourceOwner .forReleasable(() => new PekkoStreamParallelBatchedLoader[ - (ContractId, Offset), - RawContractState, + (ContractId, Long), + ExistingContractStatus, ]( batchLoad = { batch => - val (latestValidAtOffset, usedLoggingContext) = maxOffsetAndContextFromBatch( + val (latestValidAtEventSeqId, usedLoggingContext) = maxOffsetAndContextFromBatch( batch, metrics.index.db.activeContracts.batchSize, ) @@ -198,7 +197,7 @@ object ContractLoader { .executeSql(metrics.index.db.lookupArchivedContractsDbMetrics)( contractStorageBackend.archivedContracts( contractIds = contractIds, - before = latestValidAtOffset, + beforeEventSeqId = latestValidAtEventSeqId, ) )(usedLoggingContext) val createdContractsF = @@ -206,23 +205,23 @@ object ContractLoader { .executeSql(metrics.index.db.lookupCreatedContractsDbMetrics)( contractStorageBackend.createdContracts( contractIds = contractIds, - before = latestValidAtOffset, + beforeEventSeqId = latestValidAtEventSeqId, ) )(usedLoggingContext) def additionalContractsF( - archivedContracts: Map[ContractId, RawArchivedContract], - createdContracts: Map[ContractId, RawCreatedContract], - ): Future[Map[ContractId, RawCreatedContract]] = { + archivedContracts: Set[ContractId], + createdContracts: Set[ContractId], + ): Future[Set[ContractId]] = { val notFoundContractIds = contractIds.view .filterNot(archivedContracts.contains) .filterNot(createdContracts.contains) .toSeq - if (notFoundContractIds.isEmpty) Future.successful(Map.empty) + if (notFoundContractIds.isEmpty) Future.successful(Set.empty) else dbDispatcher.executeSql(metrics.index.db.lookupAssignedContractsDbMetrics)( contractStorageBackend.assignedContracts( contractIds = notFoundContractIds, - before = latestValidAtOffset, + beforeEventSeqId = latestValidAtEventSeqId, ) )(usedLoggingContext) } @@ -234,10 +233,13 @@ object ContractLoader { createdContracts = createdContracts, ) } yield batch.view.flatMap { case ((contractId, offset), _) => - archivedContracts - .get(contractId) - .orElse(createdContracts.get(contractId): Option[RawContractState]) - .orElse(additionalContracts.get(contractId): Option[RawContractState]) + { + if (archivedContracts.contains(contractId)) Some(Archived) + else if ( + createdContracts.contains(contractId) || additionalContracts.contains(contractId) + ) Some(Active) + else None + } .map((contractId, offset) -> _) .toList }.toMap @@ -262,18 +264,18 @@ object ContractLoader { materializer: Materializer, executionContext: ExecutionContext, ): ResourceOwner[PekkoStreamParallelBatchedLoader[ - (GlobalKey, Offset), + (GlobalKey, Long), KeyState, ]] = ResourceOwner .forReleasable(() => new PekkoStreamParallelBatchedLoader[ - (GlobalKey, Offset), + (GlobalKey, Long), KeyState, ]( batchLoad = { batch => // we can use the latest offset as the API only requires us to not return a state older than the given offset - val (latestValidAtOffset, usedLoggingContext) = + val (latestValidAtEventSeqId, usedLoggingContext) = ContractLoader.maxOffsetAndContextFromBatch( batch, metrics.index.db.activeContracts.batchSize, @@ -284,12 +286,12 @@ object ContractLoader { .executeSql(metrics.index.db.lookupContractByKeyDbMetrics)( contractStorageBackend.keyStates( keys = contractKeys, - validAt = latestValidAtOffset, + validAtEventSeqId = latestValidAtEventSeqId, ) )(usedLoggingContext) contractKeysF.map { keys => - batch.view.map { case (key, _offset) => + batch.view.map { case (key, _eventSeqId) => (key) -> keys .getOrElse( key._1, { @@ -317,14 +319,14 @@ object ContractLoader { dbDispatcher: DbDispatcher, metrics: LedgerApiServerMetrics, )( - keyWithOffset: (GlobalKey, Offset) + keyWithOffset: (GlobalKey, Long) )(implicit loggingContext: LoggingContextWithTrace) = { val (key, offset) = keyWithOffset dbDispatcher .executeSql(metrics.index.db.lookupContractByKeyDbMetrics)( contractStorageBackend.keyState( key = key, - validAt = offset, + validAtEventSeqId = offset, ) )(loggingContext) } @@ -365,23 +367,23 @@ object ContractLoader { else ResourceOwner.successful(None) } yield { new ContractLoader { - override final val contracts: Loader[(ContractId, Offset), RawContractState] = - new Loader[(ContractId, Offset), RawContractState] { - override def load(key: (ContractId, Offset))(implicit + override final val contracts: Loader[(ContractId, Long), ExistingContractStatus] = + new Loader[(ContractId, Long), ExistingContractStatus] { + override def load(key: (ContractId, Long))(implicit loggingContext: LoggingContextWithTrace - ): Future[Option[RawContractState]] = contractsBatchLoader.load(key) + ): Future[Option[ExistingContractStatus]] = contractsBatchLoader.load(key) } - override final val keys: Loader[(GlobalKey, Offset), KeyState] = + override final val keys: Loader[(GlobalKey, Long), KeyState] = contractKeysBatchLoader match { case Some(batchLoader) => - new Loader[(GlobalKey, Offset), KeyState] { - override def load(key: (GlobalKey, Offset))(implicit + new Loader[(GlobalKey, Long), KeyState] { + override def load(key: (GlobalKey, Long))(implicit loggingContext: LoggingContextWithTrace ): Future[Option[KeyState]] = batchLoader.load(key) } case None => - new Loader[(GlobalKey, Offset), KeyState] { - override def load(key: (GlobalKey, Offset))(implicit + new Loader[(GlobalKey, Long), KeyState] { + override def load(key: (GlobalKey, Long))(implicit loggingContext: LoggingContextWithTrace ): Future[Option[KeyState]] = fetchOneKey(contractStorageBackend, dbDispatcher, metrics)(key).map(Some(_)) @@ -391,15 +393,15 @@ object ContractLoader { } val dummyLoader = new ContractLoader { - override final val contracts: Loader[(ContractId, Offset), RawContractState] = - new Loader[(ContractId, Offset), RawContractState] { - override def load(key: (ContractId, Offset))(implicit + override final val contracts: Loader[(ContractId, Long), ExistingContractStatus] = + new Loader[(ContractId, Long), ExistingContractStatus] { + override def load(key: (ContractId, Long))(implicit loggingContext: LoggingContextWithTrace - ): Future[Option[RawContractState]] = Future.successful(None) + ): Future[Option[ExistingContractStatus]] = Future.successful(None) } - override final val keys: Loader[(GlobalKey, Offset), KeyState] = - new Loader[(GlobalKey, Offset), KeyState] { - override def load(key: (GlobalKey, Offset))(implicit + override final val keys: Loader[(GlobalKey, Long), KeyState] = + new Loader[(GlobalKey, Long), KeyState] { + override def load(key: (GlobalKey, Long))(implicit loggingContext: LoggingContextWithTrace ): Future[Option[KeyState]] = Future.successful(None) } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractStateEvent.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractStateEvent.scala index 44aed0efb3..94ef15fece 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractStateEvent.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractStateEvent.scala @@ -3,29 +3,19 @@ package com.digitalasset.canton.platform.store.dao.events -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.platform.* -sealed trait ContractStateEvent extends Product with Serializable { - def eventOffset: Offset -} +sealed trait ContractStateEvent extends Product with Serializable object ContractStateEvent { final case class Created( - contract: FatContract, - eventOffset: Offset, - ) extends ContractStateEvent { - def contractId: ContractId = contract.contractId - def globalKey: Option[Key] = contract.contractKeyWithMaintainers.map(_.globalKey) - } + contractId: ContractId, + globalKey: Option[Key], + ) extends ContractStateEvent final case class Archived( contractId: ContractId, globalKey: Option[Key], - stakeholders: Set[Party], - eventOffset: Offset, ) extends ContractStateEvent // This is merely an offset placeholder for now, sole purpose is to tick the StateCaches internal offset - final case class ReassignmentAccepted( - eventOffset: Offset - ) extends ContractStateEvent + case object ReassignmentAccepted extends ContractStateEvent } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractsReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractsReader.scala index 3a893fc68f..1e12295c58 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractsReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractsReader.scala @@ -4,27 +4,15 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.metrics.Timed -import com.daml.metrics.api.MetricHandle.Timer -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.ExistingContractStatus import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.store.backend.ContractStorageBackend -import com.digitalasset.canton.platform.store.backend.ContractStorageBackend.{ - RawArchivedContract, - RawCreatedContract, -} import com.digitalasset.canton.platform.store.dao.DbDispatcher -import com.digitalasset.canton.platform.store.dao.events.ContractsReader.* import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.* -import com.digitalasset.canton.platform.store.serialization.{Compression, ValueSerializer} -import com.digitalasset.daml.lf.data.Bytes -import com.digitalasset.daml.lf.data.Ref.NameTypeConRef -import com.digitalasset.daml.lf.transaction.{CreationTime, GlobalKeyWithMaintainers, Node} -import com.digitalasset.daml.lf.value.Value.VersionedValue -import java.io.{ByteArrayInputStream, InputStream} import scala.concurrent.{ExecutionContext, Future} private[dao] sealed class ContractsReader( @@ -41,13 +29,13 @@ private[dao] sealed class ContractsReader( * * Used to unit test the SQL queries for key lookups. Does not use batching. */ - override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanOffset: Offset)(implicit + override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[Map[Key, KeyState]] = Timed.future( metrics.index.db.lookupKey, dispatcher.executeSql(metrics.index.db.lookupContractByKeyDbMetrics)( - storageBackend.keyStates(keys, notEarlierThanOffset) + storageBackend.keyStates(keys, notEarlierThanEventSeqId) ), ) @@ -55,97 +43,35 @@ private[dao] sealed class ContractsReader( * * @param key * the contract key - * @param notEarlierThanOffset + * @param notEarlierThanEventSeqId * the lower bound offset of the ledger for which to query for the key state * @return * the key state. */ - override def lookupKeyState(key: Key, notEarlierThanOffset: Offset)(implicit + override def lookupKeyState(key: Key, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[KeyState] = Timed.future( metrics.index.db.lookupKey, - contractLoader.keys.load(key -> notEarlierThanOffset).map { + contractLoader.keys.load(key -> notEarlierThanEventSeqId).map { case Some(value) => value case None => logger - .error(s"Key $key resulted in an invalid empty load at offset $notEarlierThanOffset")( + .error( + s"Key $key resulted in an invalid empty load at offset $notEarlierThanEventSeqId" + )( loggingContext.traceContext ) KeyUnassigned }, ) - override def lookupContractState(contractId: ContractId, notEarlierThanOffset: Offset)(implicit + override def lookupContractState(contractId: ContractId, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace - ): Future[Option[ContractState]] = + ): Future[Option[ExistingContractStatus]] = Timed.future( metrics.index.db.lookupActiveContract, - contractLoader.contracts - .load(contractId -> notEarlierThanOffset) - .map(_.map { - case raw: RawCreatedContract => - val decompressionTimer = - metrics.index.db.lookupCreatedContractsDbMetrics.compressionTimer - val deserializationTimer = - metrics.index.db.lookupCreatedContractsDbMetrics.translationTimer - - val packageId = PackageId.assertFromString(raw.packageId) - val templateId = NameTypeConRef.assertFromString(raw.templateId) - val createArg = { - val argCompression = Compression.Algorithm.assertLookup(raw.createArgumentCompression) - val decompressed = decompress(raw.createArgument, argCompression, decompressionTimer) - deserializeValue( - decompressed, - deserializationTimer, - s"Failed to deserialize create argument for contract ${contractId.coid}", - ) - } - - val keyOpt: Option[KeyWithMaintainers] = (raw.createKey, raw.keyMaintainers) match { - case (None, None) => - None - case (Some(key), Some(maintainers)) => - val keyCompression = Compression.Algorithm.assertLookup(raw.createKeyCompression) - val decompressed = decompress(key, keyCompression, decompressionTimer) - val value = deserializeValue( - decompressed, - deserializationTimer, - s"Failed to deserialize create key for contract ${contractId.coid}", - ) - Some( - GlobalKeyWithMaintainers.assertBuild( - templateId = templateId.copy(pkg = packageId), - value = value.unversioned, - maintainers = maintainers, - packageName = templateId.pkg.name, - ) - ) - case (keyOpt, _) => - val msg = - s"contract ${contractId.coid} has " + - (if (keyOpt.isDefined) "a key but no maintainers" else "maintainers but no key") - logger.error(msg)(loggingContext.traceContext) - sys.error(msg) - } - ActiveContract( - FatContract.fromCreateNode( - Node.Create( - coid = contractId, - packageName = templateId.pkg.name, - templateId = templateId.copy(pkg = packageId), - arg = createArg.unversioned, - signatories = raw.signatories, - stakeholders = raw.flatEventWitnesses, - keyOpt = keyOpt, - version = createArg.version, - ), - createTime = CreationTime.CreatedAt(raw.ledgerEffectiveTime), - authenticationData = Bytes.fromByteArray(raw.authenticationData), - ) - ) - case raw: RawArchivedContract => ArchivedContract(raw.flatEventWitnesses) - }), + contractLoader.contracts.load(contractId -> notEarlierThanEventSeqId), ) } @@ -166,24 +92,4 @@ private[dao] object ContractsReader { loggerFactory = loggerFactory, ) - private def decompress( - data: Array[Byte], - algorithm: Compression.Algorithm, - timer: Timer, - ): InputStream = - Timed.value( - timer, - value = algorithm.decompress(new ByteArrayInputStream(data)), - ) - - private def deserializeValue( - decompressed: InputStream, - timer: Timer, - errorContext: String, - ): VersionedValue = - Timed.value( - timer, - value = ValueSerializer.deserializeValue(decompressed, errorContext), - ) - } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala index b948e26571..87ecf254ad 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.event_query_service.{Archived, Created, GetEventsByContractIdResponse} import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{ ErrorLoggingContext, LoggingContextWithTrace, @@ -13,12 +14,14 @@ import com.digitalasset.canton.logging.{ NamedLogging, } import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.InternalEventFormat import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawArchivedEvent, - RawCreatedEvent, - RawFlatEvent, + RawAcsDeltaEventLegacy, + RawArchivedEventLegacy, + RawCreatedEventLegacy, } import com.digitalasset.canton.platform.store.backend.{EventStorageBackend, ParameterStorageBackend} import com.digitalasset.canton.platform.store.cache.LedgerEndCache @@ -35,6 +38,7 @@ private[dao] sealed class EventsReader( val parameterStorageBackend: ParameterStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val contractStore: ContractStore, val ledgerEndCache: LedgerEndCache, override val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) @@ -53,22 +57,24 @@ private[dao] sealed class EventsReader( .map { internalEventFormat => for { rawEvents <- dbDispatcher.executeSql(dbMetrics.getEventsByContractId)( - eventStorageBackend.eventReaderQueries.fetchContractIdEvents( + eventStorageBackend.eventReaderQueries.fetchContractIdEventsLegacy( contractId, requestingParties = internalEventFormat.templatePartiesFilter.allFilterParties, endEventSequentialId = ledgerEndCache().map(_.lastEventSeqId).getOrElse(0L), ) ) - rawCreatedEvent: Option[Entry[RawCreatedEvent]] = rawEvents.view.collectFirst { entry => - entry.event match { - case created: RawCreatedEvent => - entry.copy(event = created) - } + rawCreatedEvent: Option[Entry[RawCreatedEventLegacy]] = rawEvents.view.collectFirst { + entry => + entry.event match { + case created: RawCreatedEventLegacy => + entry.copy(event = created) + } } - rawArchivedEvent: Option[Entry[RawArchivedEvent]] = rawEvents.view.collectFirst { entry => - entry.event match { - case archived: RawArchivedEvent => entry.copy(event = archived) - } + rawArchivedEvent: Option[Entry[RawArchivedEventLegacy]] = rawEvents.view.collectFirst { + entry => + entry.event match { + case archived: RawArchivedEventLegacy => entry.copy(event = archived) + } } rawEventsRestoredWitnesses = restoreWitnessesForTransient( @@ -78,19 +84,31 @@ private[dao] sealed class EventsReader( rawCreatedEventRestoredWitnesses = rawEventsRestoredWitnesses.view .map(_.event) - .collectFirst { case created: RawCreatedEvent => + .collectFirst { case created: RawCreatedEventLegacy => created } + contractsM <- contractStore + .lookupBatchedNonCached( + // we only need the internal contract id for the created event, if it exists + rawCreatedEventRestoredWitnesses.map(_.internalContractId).toList + ) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + deserialized <- Future.delegate { implicit val ec: ExecutionContext = directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name MonadUtil.sequentialTraverse(rawEventsRestoredWitnesses) { event => + val fatContractO = event.event match { + case created: RawCreatedEventLegacy => + contractsM.get(created.internalContractId).map(_.inst) + case _ => None + } UpdateReader - .deserializeRawFlatEvent( + .deserializeRawAcsDeltaEvent( internalEventFormat.eventProjectionProperties, lfValueTranslation, - )(event) + )(event -> fatContractO) .map(_ -> event.synchronizerId) } } @@ -140,17 +158,17 @@ private[dao] sealed class EventsReader( // transient events have empty witnesses, so we need to restore them from the created event private def restoreWitnessesForTransient( - createdEventO: Option[Entry[RawCreatedEvent]], - archivedEventO: Option[Entry[RawArchivedEvent]], - ): Seq[Entry[RawFlatEvent]] = + createdEventO: Option[Entry[RawCreatedEventLegacy]], + archivedEventO: Option[Entry[RawArchivedEventLegacy]], + ): Seq[Entry[RawAcsDeltaEventLegacy]] = (createdEventO, archivedEventO) match { case (Some(created), Some(archived)) if created.offset == archived.offset => val witnesses = created.event.signatories ++ created.event.observers val newCreated = created.copy(event = created.event.copy(witnessParties = witnesses)) val newArchived = archived.copy(event = archived.event.copy(witnessParties = witnesses)) - Seq(newCreated: Entry[RawFlatEvent], newArchived) - case _ => (createdEventO.toList: Seq[Entry[RawFlatEvent]]) ++ archivedEventO.toList + Seq(newCreated: Entry[RawAcsDeltaEventLegacy], newArchived) + case _ => (createdEventO.toList: Seq[Entry[RawAcsDeltaEventLegacy]]) ++ archivedEventO.toList } // TODO(i16065): Re-enable getEventsByContractKey tests diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsTable.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsTable.scala index 7190dc8f45..4563f1e63a 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsTable.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsTable.scala @@ -14,16 +14,8 @@ import com.daml.ledger.api.v2.topology_transaction.{ TopologyTransaction, } import com.daml.ledger.api.v2.trace_context.TraceContext as DamlTraceContext -import com.daml.ledger.api.v2.transaction.{ - Transaction as ApiTransaction, - TransactionTree as ApiTransactionTree, - TreeEvent, -} -import com.daml.ledger.api.v2.update_service.{ - GetTransactionTreeResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.transaction.Transaction as ApiTransaction +import com.daml.ledger.api.v2.update_service.GetUpdatesResponse import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.TransactionShape import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} @@ -38,11 +30,8 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawParticipantAuthorization, } -import com.digitalasset.canton.platform.store.utils.EventOps.TreeEventOps import com.google.protobuf.ByteString -import scala.annotation.nowarn - object EventsTable { object TransactionConversions { @@ -90,7 +79,16 @@ object EventsTable { ApiTransaction( updateId = first.updateId, commandId = first.commandId.getOrElse(""), - effectiveAt = Some(TimestampConversion.fromLf(first.ledgerEffectiveTime)), + effectiveAt = Some( + first.event.event.created + .flatMap(_.createdAt) + .orElse(first.ledgerEffectiveTime.map(TimestampConversion.fromLf)) + .getOrElse( + throw new IllegalStateException( + "Either newly created contracts or the Entry for exercises should provide the ledgerEffectiveTime." + ) + ) + ), workflowId = first.workflowId.getOrElse(""), offset = first.offset, events = events, @@ -169,62 +167,6 @@ object EventsTable { recordTime = Some(TimestampConversion.fromLf(first.recordTime)), ) } - - // TODO(#23504) remove when TreeEvent is removed - @nowarn("cat=deprecation") - private def treeOf( - events: Seq[Entry[TreeEvent]] - ): (Map[Int, TreeEvent], Option[DamlTraceContext]) = { - - // All events in this transaction by their identifier, with their children - // filtered according to those visible for this request - val eventsById = - events.iterator - .map(_.event) - .map(e => e.nodeId -> e) - .toMap - - (eventsById, extractTraceContext(events)) - - } - - // TODO(#23504) remove when TreeEvent is removed - @nowarn("cat=deprecation") - private def transactionTree( - events: Seq[Entry[TreeEvent]] - ): Option[ApiTransactionTree] = - events.headOption.map { first => - val (eventsById, traceContext) = treeOf(events) - ApiTransactionTree( - updateId = first.updateId, - commandId = first.commandId.getOrElse(""), - workflowId = first.workflowId.getOrElse(""), - effectiveAt = Some(TimestampConversion.fromLf(first.ledgerEffectiveTime)), - offset = first.offset, - eventsById = eventsById, - synchronizerId = first.synchronizerId, - traceContext = traceContext, - recordTime = Some(TimestampConversion.fromLf(first.recordTime)), - ) - } - - // TODO(#23504) remove when TreeEvent is removed - @nowarn("cat=deprecation") - def toGetTransactionTreesResponse( - events: Seq[Entry[TreeEvent]] - ): List[(Long, GetUpdateTreesResponse)] = - transactionTree(events).toList.map(tx => - tx.offset -> GetUpdateTreesResponse(GetUpdateTreesResponse.Update.TransactionTree(tx)) - .withPrecomputedSerializedSize() - ) - - // TODO(#23504) remove when TreeEvent is removed - @nowarn("cat=deprecation") - def toGetTransactionTreeResponse( - events: Seq[Entry[TreeEvent]] - ): Option[GetTransactionTreeResponse] = - transactionTree(events).map(tx => GetTransactionTreeResponse(Some(tx))) - } } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala new file mode 100644 index 0000000000..0b50dd5f75 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.dao.events + +import cats.implicits.toFunctorOps +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.protocol.GenContractInstance +import com.digitalasset.daml.lf.data +import com.digitalasset.daml.lf.data.Relation +import com.digitalasset.daml.lf.transaction.{FatContractInstance, Node, Transaction} +import com.digitalasset.daml.lf.value.Value.ContractId + +object InputContractPackages { + + /** Returns a mapping from all contract ids referenced in the transaction to their package ids, + * excluding those that are created within the transaction. + */ + def forTransaction(tx: Transaction): data.Relation[ContractId, LfPackageId] = + tx.fold(data.Relation.empty[ContractId, LfPackageId]) { + case ( + acc, + (_, Node.Exercise(coid, _, templateId, _, _, _, _, _, _, _, _, _, _, _, _, _, _)), + ) => + Relation.update(acc, coid, templateId.packageId) + case (acc, (_, Node.Fetch(coid, _, templateId, _, _, _, _, _, _, _))) => + Relation.update(acc, coid, templateId.packageId) + case (acc, (_, Node.LookupByKey(_, templateId, _, Some(coid), _))) => + Relation.update(acc, coid, templateId.packageId) + case (acc, _) => acc + } -- tx.localContracts.keySet + + /** Merges two maps, returning an error if their key sets differ. */ + private[events] def strictZipByKey[K, V1, V2]( + m1: Map[K, V1], + m2: Map[K, V2], + ): Either[Set[K], Map[K, (V1, V2)]] = { + val keys1 = m1.keySet + val keys2 = m2.keySet + Either.cond( + keys1 == keys2, + keys1.view.map(k => k -> (m1(k), m2(k))).toMap, + (keys1 union keys2) -- (keys1 intersect keys2), + ) + } + + /** Returns a mapping from all contract ids referenced in the transaction to their (contract + * instance, package id), excluding those that are created within the transaction. Fails if the + * set of contract ids in the transaction and in the provided contracts differ. + */ + def forTransactionWithContracts( + tx: Transaction, + contracts: Map[ContractId, GenContractInstance], + ): Either[Set[ContractId], Map[ContractId, (FatContractInstance, Set[LfPackageId])]] = + strictZipByKey(contracts.fmap(_.inst), forTransaction(tx)) + +} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala index 27933bfcb5..fdd6c6ddfe 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.platform.store.dao.events -import cats.implicits.toTraverseOps import com.daml.ledger.api.v2.event.{ArchivedEvent, CreatedEvent, ExercisedEvent, InterfaceView} import com.daml.ledger.api.v2.value import com.daml.ledger.api.v2.value.{Record as ApiRecord, Value as ApiValue} @@ -19,9 +18,9 @@ import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.apiserver.services.{ErrorCause, RejectionGenerators} import com.digitalasset.canton.platform.packages.DeduplicatingPackageLoader import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ - RawArchivedEvent, - RawCreatedEvent, - RawExercisedEvent, + Entry, + RawArchivedEventLegacy, + RawExercisedEventLegacy, } import com.digitalasset.canton.platform.store.dao.EventProjectionProperties import com.digitalasset.canton.platform.store.dao.events.LfValueTranslation.ApiContractData @@ -35,8 +34,8 @@ import com.digitalasset.canton.platform.{ Value as LfValue, } import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.daml.lf.data.Ref.{FullIdentifier, Identifier, Party} -import com.digitalasset.daml.lf.data.{Bytes, Ref} +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.data.Ref.{FullIdentifier, Identifier} import com.digitalasset.daml.lf.engine as LfEngine import com.digitalasset.daml.lf.engine.Engine import com.digitalasset.daml.lf.transaction.* @@ -233,13 +232,14 @@ final class LfValueTranslation( ) ) - def deserializeRaw( + def deserializeRawExercised( eventProjectionProperties: EventProjectionProperties, - rawExercisedEvent: RawExercisedEvent, + rawExercisedEventEntry: Entry[RawExercisedEventLegacy], )(implicit ec: ExecutionContext, loggingContext: LoggingContextWithTrace, - ): Future[ExercisedEvent] = + ): Future[ExercisedEvent] = { + val rawExercisedEvent = rawExercisedEventEntry.event for { // Deserialize contract argument and contract key // This returns the values in Daml-LF format. @@ -260,7 +260,10 @@ final class LfValueTranslation( ) ) Ref.QualifiedChoiceId(interfaceId, choiceName) = - Ref.QualifiedChoiceId.assertFromString(rawExercisedEvent.exerciseChoice) + Ref.QualifiedChoiceId( + rawExercisedEvent.exerciseChoiceInterface, + rawExercisedEvent.exerciseChoice, + ) // Convert Daml-LF values to ledger API values. // In verbose mode, this involves loading Daml-LF packages and filling in missing type information. choiceArgument <- toApiValue( @@ -292,8 +295,8 @@ final class LfValueTranslation( case None => Future.successful(None) } } yield ExercisedEvent( - offset = rawExercisedEvent.offset, - nodeId = rawExercisedEvent.nodeId, + offset = rawExercisedEventEntry.offset, + nodeId = rawExercisedEventEntry.nodeId, contractId = rawExercisedEvent.contractId.coid, templateId = Some( LfEngineToApi.toApiIdentifier(rawExercisedEvent.templateId.toIdentifier) @@ -319,14 +322,16 @@ final class LfValueTranslation( else Nil, acsDelta = rawExercisedEvent.flatEventWitnesses.nonEmpty, ) + } - def deserializeRaw( + def deserializeRawArchived( eventProjectionProperties: EventProjectionProperties, - rawArchivedEvent: RawArchivedEvent, - ): ArchivedEvent = + rawArchivedEventEntry: Entry[RawArchivedEventLegacy], + ): ArchivedEvent = { + val rawArchivedEvent = rawArchivedEventEntry.event ArchivedEvent( - offset = rawArchivedEvent.offset, - nodeId = rawArchivedEvent.nodeId, + offset = rawArchivedEventEntry.offset, + nodeId = rawArchivedEventEntry.nodeId, contractId = rawArchivedEvent.contractId.coid, templateId = Some( LfEngineToApi.toApiIdentifier(rawArchivedEvent.templateId.toIdentifier) @@ -339,137 +344,105 @@ final class LfValueTranslation( rawArchivedEvent.templateId, ), ) + } - def deserializeRaw( + def toApiCreatedEvent( eventProjectionProperties: EventProjectionProperties, - rawCreatedEvent: RawCreatedEvent, + fatContractInstance: FatContractInstance, + offset: Long, + nodeId: Int, + representativePackageId: LfPackageId, + witnesses: Set[String], + acsDelta: Boolean, )(implicit ec: ExecutionContext, loggingContext: LoggingContextWithTrace, ): Future[CreatedEvent] = { - def getFatContractInstance( - createArgument: VersionedValue, - createKey: Option[VersionedValue], - ): Either[String, FatContractInstance] = - for { - signatories <- rawCreatedEvent.signatories.toList.traverse(Party.fromString).map(_.toSet) - observers <- rawCreatedEvent.observers.toList.traverse(Party.fromString).map(_.toSet) - maintainers <- rawCreatedEvent.createKeyMaintainers.toList - .traverse(Party.fromString) - .map(_.toSet) - globalKey <- createKey - .traverse(key => - GlobalKey - .build( - rawCreatedEvent.templateId.toIdentifier, - key.unversioned, - rawCreatedEvent.templateId.pkgName, - ) - .left - .map(_.msg) - ) - } yield FatContractInstance.fromCreateNode( - Node.Create( - coid = rawCreatedEvent.contractId, - templateId = rawCreatedEvent.templateId.toIdentifier, - packageName = rawCreatedEvent.templateId.pkgName, - arg = createArgument.unversioned, - signatories = signatories, - stakeholders = signatories ++ observers, - keyOpt = globalKey.map(GlobalKeyWithMaintainers(_, maintainers)), - version = createArgument.version, - ), - createTime = CreationTime.CreatedAt(rawCreatedEvent.ledgerEffectiveTime), - authenticationData = Bytes.fromByteArray(rawCreatedEvent.authenticationData), - ) + val createArgument = fatContractInstance.createArg + val createKey = fatContractInstance.contractKeyWithMaintainers.map(_.globalKey.key) + + val representativeTemplateId = + fatContractInstance.templateId + .toFullIdentifier(fatContractInstance.packageName) + .copy(pkgId = representativePackageId) for { - createKey <- Future( - rawCreatedEvent.createKeyValue - .map( - decompressAndDeserialize( - Compression.Algorithm - .assertLookup(rawCreatedEvent.createKeyValueCompression), - _, - ) - ) - ) - createArgument <- Future( - decompressAndDeserialize( - Compression.Algorithm - .assertLookup(rawCreatedEvent.createArgumentCompression), - rawCreatedEvent.createArgument, - ) - ) apiContractData <- toApiContractData( value = createArgument, - key = createKey, - templateId = rawCreatedEvent.templateId, - witnesses = rawCreatedEvent.witnessParties, + keyO = createKey, + representativeTemplateId = representativeTemplateId, + witnesses = witnesses, eventProjectionProperties = eventProjectionProperties, - fatContractInstance = getFatContractInstance(createArgument, createKey), + fatContractInstance = fatContractInstance, ) } yield CreatedEvent( - offset = rawCreatedEvent.offset, - nodeId = rawCreatedEvent.nodeId, - contractId = rawCreatedEvent.contractId.coid, + offset = offset, + nodeId = nodeId, + contractId = fatContractInstance.contractId.coid, templateId = Some( - LfEngineToApi.toApiIdentifier(rawCreatedEvent.templateId.toIdentifier) + LfEngineToApi.toApiIdentifier(fatContractInstance.templateId) ), contractKey = apiContractData.contractKey, createArguments = Some(apiContractData.createArguments), createdEventBlob = apiContractData.createdEventBlob.getOrElse(ByteString.EMPTY), interfaceViews = apiContractData.interfaceViews, - witnessParties = rawCreatedEvent.witnessParties.toList, - signatories = rawCreatedEvent.signatories.toList, - observers = rawCreatedEvent.observers.toList, - createdAt = Some(TimestampConversion.fromLf(rawCreatedEvent.ledgerEffectiveTime)), - packageName = rawCreatedEvent.templateId.pkgName, - acsDelta = rawCreatedEvent.flatEventWitnesses.nonEmpty, + witnessParties = witnesses.toSeq, + signatories = fatContractInstance.signatories.toSeq, + observers = fatContractInstance.stakeholders.diff(fatContractInstance.signatories).toSeq, + createdAt = fatContractInstance.createdAt match { + case CreationTime.CreatedAt(t) => Some(TimestampConversion.fromLf(t)) + case _ => None + }, + packageName = fatContractInstance.packageName, + acsDelta = acsDelta, + representativePackageId = representativePackageId, ) } - def toApiContractData( - value: LfValue, - key: Option[VersionedValue], - templateId: FullIdentifier, + private def toApiContractData( + value: Value, + keyO: Option[Value], + representativeTemplateId: FullIdentifier, witnesses: Set[String], eventProjectionProperties: EventProjectionProperties, - fatContractInstance: => Either[String, FatContractInstance], + fatContractInstance: FatContractInstance, )(implicit ec: ExecutionContext, loggingContext: LoggingContextWithTrace, ): Future[ApiContractData] = { val renderResult = - eventProjectionProperties.render(witnesses, templateId.toNameTypeConRef) + eventProjectionProperties.render(witnesses, representativeTemplateId.toNameTypeConRef) val verbose = eventProjectionProperties.verbose def asyncContractArguments = enrichAsync( - verbose, - value.unversioned, - enricher.enrichContractValue(templateId.toIdentifier, _), + verbose = verbose, + value = value, + enrich = enricher.enrichContractValue(representativeTemplateId.toIdentifier, _), ) .map(toContractArgumentApi(verbose)) - @SuppressWarnings(Array("org.wartremover.warts.OptionPartial")) - def asyncContractKey = condFuture(key.isDefined)( - enrichAsync( - verbose, - key.get.unversioned, - enricher.enrichContractKey(templateId.toIdentifier, _), - ) - .map(toContractKeyApi(verbose)) - ) + def asyncContractKey = keyO match { + case None => Future.successful(None) + case Some(key) => + enrichAsync( + verbose = verbose, + value = key, + enrich = enricher.enrichContractKey(representativeTemplateId.toIdentifier, _), + ) + .map(toContractKeyApi(verbose)) + .map(Some(_)) + } + def asyncInterfaceViews = MonadUtil.sequentialTraverse(renderResult.interfaces.toList)(interfaceId => for { upgradedInstanceIdentifierResultE <- eventProjectionProperties.interfaceViewPackageUpgrade - .upgrade(interfaceId.toIdentifier, templateId.toIdentifier) + .upgrade(interfaceId.toIdentifier, representativeTemplateId.toIdentifier) viewResult <- upgradedInstanceIdentifierResultE.fold( failureStatus => Future.successful(Left(failureStatus)), upgradedInstanceIdentifier => computeInterfaceView( templateId = upgradedInstanceIdentifier, - value = value.unversioned, + value = value, interfaceId = interfaceId.toIdentifier, ), ) @@ -483,9 +456,8 @@ final class LfValueTranslation( def asyncCreatedEventBlob = condFuture(renderResult.createdEventBlob) { (for { - fatInstance <- fatContractInstance encoded <- TransactionCoder - .encodeFatContractInstance(fatInstance) + .encodeFatContractInstance(fatContractInstance) .left .map(_.errorMessage) } yield encoded).fold( diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala index bfa615a421..f732593bf3 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala @@ -5,15 +5,19 @@ package com.digitalasset.canton.platform.store.dao.events import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{ ErrorLoggingContext, LoggingContextWithTrace, NamedLoggerFactory, NamedLogging, } -import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend +import com.digitalasset.canton.platform.store.PruningOffsetService +import com.digitalasset.canton.platform.store.cache.LedgerEndCache +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* -import java.sql.Connection +import scala.concurrent.{ExecutionContext, Future} trait QueryValidRange { def withRangeNotPruned[T]( @@ -21,25 +25,33 @@ trait QueryValidRange { maxOffsetInclusive: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] def withOffsetNotBeforePruning[T]( offset: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] + + def filterPrunedEvents[T](offset: T => Offset)( + events: Seq[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + traceContext: TraceContext, + ): Future[Seq[T]] } final case class QueryValidRangeImpl( - storageBackend: ParameterStorageBackend, - val loggerFactory: NamedLoggerFactory, + ledgerEndCache: LedgerEndCache, + pruningOffsetService: PruningOffsetService, + loggerFactory: NamedLoggerFactory, +)(implicit + ec: ExecutionContext ) extends QueryValidRange with NamedLogging { @@ -72,49 +84,49 @@ final case class QueryValidRangeImpl( maxOffsetInclusive: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T = { + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] = { assert(Option(maxOffsetInclusive) >= minOffsetInclusive.decrement) - val result = query - val params = storageBackend.prunedUpToInclusiveAndLedgerEnd(conn) - - params.pruneUptoInclusive - .filter(_ >= minOffsetInclusive) - .foreach(pruningOffsetUpToInclusive => - throw RequestValidationErrors.ParticipantPrunedDataAccessed + val ledgerEnd = ledgerEndCache().map(_.lastOffset) + if (Option(maxOffsetInclusive) > ledgerEnd) { + Future.failed( + RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd .Reject( - cause = errorPruning(pruningOffsetUpToInclusive), - earliestOffset = pruningOffsetUpToInclusive.unwrap, + cause = errorLedgerEnd(ledgerEnd), + latestOffset = ledgerEnd.fold(0L)(_.unwrap), )( ErrorLoggingContext(logger, loggingContext) ) .asGrpcError ) - - if (Option(maxOffsetInclusive) > params.ledgerEnd) { - throw RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd - .Reject( - cause = errorLedgerEnd(params.ledgerEnd), - latestOffset = params.ledgerEnd.fold(0L)(_.unwrap), - )( - ErrorLoggingContext(logger, loggingContext) - ) - .asGrpcError - } - - result + } else + query.thereafterF(_ => + pruningOffsetService.pruningOffset + .map(pruningOffsetO => + pruningOffsetO + .filter(_ >= minOffsetInclusive) + .foreach(pruningOffsetUpToInclusive => + throw RequestValidationErrors.ParticipantPrunedDataAccessed + .Reject( + cause = errorPruning(pruningOffsetUpToInclusive), + earliestOffset = pruningOffsetUpToInclusive.unwrap, + )( + ErrorLoggingContext(logger, loggingContext) + ) + .asGrpcError + ) + ) + ) } override def withOffsetNotBeforePruning[T]( offset: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T = + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] = withRangeNotPruned( // as the range not pruned forms a condition that the minOffsetInclusive is greater than the pruning offset, // by setting this to the offset + 1 we ensure that the offset is greater than or equal to the pruning offset. @@ -123,4 +135,43 @@ final case class QueryValidRangeImpl( errorPruning = errorPruning, errorLedgerEnd = errorLedgerEnd, )(query) + + /** Filters out events that are at or below the participant's pruning offset. + * + * @param offset + * function to extract the offset from an event + * @param events + * the events to filter + * @tparam T + * the type of the events + * @return + * a future of the filtered events + */ + def filterPrunedEvents[T](offset: T => Offset)( + events: Seq[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + traceContext: TraceContext, + ): Future[Seq[T]] = { + val ledgerEnd = ledgerEndCache().map(_.lastOffset) + val beyondLegerEndO = events.find(event => Option(offset(event)) > ledgerEnd) + beyondLegerEndO match { + case Some(event) => + Future.failed( + RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd + .Reject( + cause = + s"Offset of event to be filtered ${offset(event)} is beyond ledger end $ledgerEnd", + latestOffset = ledgerEnd.fold(0L)(_.unwrap), + )(errorLoggingContext) + .asGrpcError + ) + case None => + pruningOffsetService.pruningOffset + .map(participantPrunedUpTo => + events.filter(event => Option(offset(event)) > participantPrunedUpTo) + ) + } + } + } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala index 78e9bad1bf..59e0802fb2 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala @@ -6,19 +6,28 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.reassignment.Reassignment import com.daml.metrics.Timed import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawAssignEvent, - RawEvent, - RawReassignmentEvent, - RawUnassignEvent, + RawAssignEventLegacy, + RawReassignmentEventLegacy, + RawUnassignEventLegacy, } import com.digitalasset.canton.platform.store.dao.{DbDispatcher, EventProjectionProperties} -import com.digitalasset.canton.platform.{InternalEventFormat, Party, TemplatePartiesFilter} +import com.digitalasset.canton.platform.{ + FatContract, + InternalEventFormat, + Party, + TemplatePartiesFilter, +} +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.{ExecutionContext, Future} @@ -27,6 +36,8 @@ final class ReassignmentPointwiseReader( val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, + val contractStore: ContractStore, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -40,22 +51,22 @@ final class ReassignmentPointwiseReader( requestingParties: Option[Set[Party]], )(implicit loggingContext: LoggingContextWithTrace - ): Future[Vector[Entry[RawReassignmentEvent]]] = for { - assignEvents: Vector[Entry[RawReassignmentEvent]] <- + ): Future[Vector[Entry[RawReassignmentEventLegacy]]] = for { + assignEvents: Vector[Entry[RawReassignmentEventLegacy]] <- dbDispatcher.executeSql( - dbMetrics.reassignmentPointwise.fetchEventAssignPayloads + dbMetrics.reassignmentPointwise.fetchEventAssignPayloadsLegacy )( - eventStorageBackend.assignEventBatch( + eventStorageBackend.assignEventBatchLegacy( eventSeqIdRange, requestingParties, ) ) - unassignEvents: Vector[Entry[RawReassignmentEvent]] <- + unassignEvents: Vector[Entry[RawReassignmentEventLegacy]] <- dbDispatcher.executeSql( - dbMetrics.reassignmentPointwise.fetchEventUnassignPayloads + dbMetrics.reassignmentPointwise.fetchEventUnassignPayloadsLegacy )( - eventStorageBackend.unassignEventBatch( + eventStorageBackend.unassignEventBatchLegacy( eventSeqIdRange, requestingParties, ) @@ -66,7 +77,7 @@ final class ReassignmentPointwiseReader( } private def toApiAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntries: Seq[Entry[RawAssignEvent]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace): Future[Option[Reassignment]] = Timed.future( future = Future.delegate { @@ -79,8 +90,8 @@ final class ReassignmentPointwiseReader( def entriesToReassignment( eventProjectionProperties: EventProjectionProperties - )( - rawReassignmentEntries: Seq[Entry[RawReassignmentEvent]] + )(rawReassignmentEntries: Seq[Entry[RawReassignmentEventLegacy]])( + contractsM: Map[Long, FatContract] )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, @@ -88,35 +99,52 @@ final class ReassignmentPointwiseReader( assignO <- toApiAssigned(eventProjectionProperties)( rawReassignmentEntries.collect(entry => entry.event match { - case rawAssign: RawAssignEvent => entry.copy(event = rawAssign) + case rawAssign: RawAssignEventLegacy => + val fatContractO = contractsM.get(rawAssign.rawCreatedEvent.internalContractId) + entry.copy(event = rawAssign) -> fatContractO } ) ) unassignO = UpdateReader.toApiUnassigned( rawReassignmentEntries.collect(entry => entry.event match { - case rawUnassign: RawUnassignEvent => entry.copy(event = rawUnassign) + case rawUnassign: RawUnassignEventLegacy => entry.copy(event = rawUnassign) } ) ) } yield assignO.orElse(unassignO) - private def fetchAndFilterEvents[T <: RawEvent]( + private def fetchAndFilterEvents[T <: RawReassignmentEventLegacy]( fetchRawEvents: Future[Vector[Entry[T]]], templatePartiesFilter: TemplatePartiesFilter, - toResponse: Seq[Entry[T]] => Future[Option[Reassignment]], - ): Future[Option[Reassignment]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + toResponse: Seq[Entry[T]] => Map[Long, FatContract] => Future[Option[Reassignment]], + )(implicit traceContext: TraceContext): Future[Option[Reassignment]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filtering by template filters - filteredRawEvents = UpdateReader.filterRawEvents(templatePartiesFilter)(rawEvents) - // Deserialization of lf values - deserialized <- toResponse(filteredRawEvents) - } yield { - deserialized - } + .map(UpdateReader.filterRawEvents(templatePartiesFilter)) + // Checking if events are not pruned + .flatMap( + queryValidRange.filterPrunedEvents[Entry[T]](entry => Offset.tryFromLong(entry.offset)) + ) + .flatMap(rawPrunedEvents => + for { + // Fetching all contracts for the filtered assigned events + fatInstancesM <- contractStore + .lookupBatchedNonCached( + rawPrunedEvents.collect(_.event match { + case assign: RawAssignEventLegacy => assign.rawCreatedEvent.internalContractId + }) + ) + .map(_.view.mapValues(_.inst).toMap) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + // Deserialization of lf values + deserialized <- toResponse(rawPrunedEvents)(fatInstancesM) + } yield { + deserialized + } + ) def lookupReassignmentBy( eventSeqIdRange: (Long, Long), @@ -129,7 +157,7 @@ final class ReassignmentPointwiseReader( val templatePartiesFilter = internalEventFormat.templatePartiesFilter val (firstEventSeqId, lastEventSeqId) = eventSeqIdRange - fetchAndFilterEvents[RawReassignmentEvent]( + fetchAndFilterEvents[RawReassignmentEventLegacy]( fetchRawEvents = fetchRawReassignmentEvents( eventSeqIdRange = IdRange(firstEventSeqId, lastEventSeqId), requestingParties = requestingParties, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala index a8b11a8065..02bc01c6b6 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala @@ -10,16 +10,18 @@ import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics -import com.digitalasset.canton.platform.TemplatePartiesFilter +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawAssignEvent, - RawUnassignEvent, + RawAssignEventLegacy, + RawReassignmentEventLegacy, + RawUnassignEventLegacy, SequentialIdBatch, } -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import com.digitalasset.canton.platform.store.dao.events.ReassignmentStreamReader.{ IdDbQuery, PayloadDbQuery, @@ -34,6 +36,7 @@ import com.digitalasset.canton.platform.store.utils.{ ConcurrencyLimiter, QueueBasedConcurrencyLimiter, } +import com.digitalasset.canton.platform.{FatContract, TemplatePartiesFilter} import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, Party} @@ -52,6 +55,7 @@ class ReassignmentStreamReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, metrics: LedgerApiServerMetrics, val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) @@ -77,6 +81,7 @@ class ReassignmentStreamReader( new QueueBasedConcurrencyLimiter(maxParallelIdUnassignQueries, executionContext) def fetchIds( + streamName: String, maxParallelIdQueriesLimiter: QueueBasedConcurrencyLimiter, maxOutputBatchCount: Int, metric: DatabaseMetrics, @@ -84,26 +89,24 @@ class ReassignmentStreamReader( ): Source[Iterable[Long], NotUsed] = decomposedFilters .map { filter => - paginatingAsyncStream.streamIdsFromSeekPagination( + paginatingAsyncStream.streamIdsFromSeekPaginationWithoutIdFilter( + idStreamName = s"Update IDs for $streamName $filter", idPageSizing = idPageSizing, idPageBufferSize = maxPagesPerIdPagesBuffer, initialFromIdExclusive = queryRange.startInclusiveEventSeqId, + initialEndInclusive = queryRange.endInclusiveEventSeqId, )( - fetchPage = (state: IdPaginationState) => { + idDbQuery.fetchIds( + stakeholder = filter.party, + templateIdO = filter.templateId, + ) + )( + executeIdQuery = f => maxParallelIdQueriesLimiter.execute { globalIdQueriesLimiter.execute { - dbDispatcher.executeSql(metric) { - idDbQuery.fetchIds( - stakeholder = filter.party, - templateIdO = filter.templateId, - startExclusive = state.fromIdExclusive, - endInclusive = queryRange.endInclusiveEventSeqId, - limit = state.pageSize, - ) - } + dbDispatcher.executeSql(metric)(f) } } - } ) } .pipe(EventIdsUtils.sortAndDeduplicateIds) @@ -112,12 +115,12 @@ class ReassignmentStreamReader( maxBatchCount = maxOutputBatchCount, ) - def fetchPayloads[T]( + def fetchPayloads[T <: RawReassignmentEventLegacy]( ids: Source[Iterable[Long], NotUsed], maxParallelPayloadQueries: Int, dbMetric: DatabaseMetrics, payloadDbQuery: PayloadDbQuery[Entry[T]], - deserialize: Seq[Entry[T]] => Future[Option[Reassignment]], + deserialize: Seq[(Entry[T], Option[FatContract])] => Future[Option[Reassignment]], ): Source[Reassignment, NotUsed] = { // Pekko requires for this buffer's size to be a power of two. val inputBufferSize = Utils.largestSmallerOrEqualPowerOfTwo(maxParallelPayloadQueries) @@ -126,28 +129,49 @@ class ReassignmentStreamReader( .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - payloadDbQuery.fetchPayloads( - eventSequentialIds = Ids(ids), - allFilterParties = filteringConstraints.allFilterParties, - )(connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher + .executeSql(dbMetric)( + payloadDbQuery.fetchPayloads( + eventSequentialIds = Ids(ids), + allFilterParties = filteringConstraints.allFilterParties, + ) + ) + .flatMap { payloads => + val internalContractIds = + payloads.map(_.event).collect { case assign: RawAssignEventLegacy => + assign.rawCreatedEvent.internalContractId + } + for { + contractsM <- contractStore + .lookupBatchedNonCached(internalContractIds) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield payloads.map { payload => + payload.event match { + case assign: RawAssignEventLegacy => + payload -> contractsM + .get(assign.rawCreatedEvent.internalContractId) + .map(_.inst) + case _: RawUnassignEventLegacy => + payload -> None + } + } + } } } } ) .mapConcat(identity) UpdateReader - .groupContiguous(serializedPayloads)(by = _.updateId) + .groupContiguous(serializedPayloads)(by = _._1.updateId) .mapAsync(deserializationProcessingParallelism)(t => deserializationQueriesLimiter.execute( deserialize(t) @@ -158,32 +182,34 @@ class ReassignmentStreamReader( val idsAssign = fetchIds( + streamName = "assigned events", maxParallelIdQueriesLimiter = assignedEventIdQueriesLimiter, maxOutputBatchCount = maxParallelPayloadAssignQueries + 1, - metric = dbMetrics.reassignmentStream.fetchEventAssignIdsStakeholder, - idDbQuery = eventStorageBackend.fetchAssignEventIdsForStakeholder, + metric = dbMetrics.reassignmentStream.fetchEventAssignIdsStakeholderLegacy, + idDbQuery = eventStorageBackend.fetchAssignEventIdsForStakeholderLegacy, ) val idsUnassign = fetchIds( + streamName = "unassigned events", maxParallelIdQueriesLimiter = unassignedEventIdQueriesLimiter, maxOutputBatchCount = maxParallelPayloadUnassignQueries + 1, - metric = dbMetrics.reassignmentStream.fetchEventUnassignIdsStakeholder, - idDbQuery = eventStorageBackend.fetchUnassignEventIdsForStakeholder, + metric = dbMetrics.reassignmentStream.fetchEventUnassignIdsStakeholderLegacy, + idDbQuery = eventStorageBackend.fetchUnassignEventIdsForStakeholderLegacy, ) val payloadsAssign = fetchPayloads( ids = idsAssign, maxParallelPayloadQueries = maxParallelPayloadAssignQueries, - dbMetric = dbMetrics.reassignmentStream.fetchEventAssignPayloads, - payloadDbQuery = eventStorageBackend.assignEventBatch, + dbMetric = dbMetrics.reassignmentStream.fetchEventAssignPayloadsLegacy, + payloadDbQuery = eventStorageBackend.assignEventBatchLegacy, deserialize = toApiAssigned(eventProjectionProperties), ) val payloadsUnassign = fetchPayloads( ids = idsUnassign, maxParallelPayloadQueries = maxParallelPayloadUnassignQueries, - dbMetric = dbMetrics.reassignmentStream.fetchEventUnassignPayloads, - payloadDbQuery = eventStorageBackend.unassignEventBatch, + dbMetric = dbMetrics.reassignmentStream.fetchEventUnassignPayloadsLegacy, + payloadDbQuery = eventStorageBackend.unassignEventBatchLegacy, deserialize = toApiUnassigned, ) @@ -193,15 +219,15 @@ class ReassignmentStreamReader( } private def toApiUnassigned( - rawUnassignEntries: Seq[Entry[RawUnassignEvent]] + rawUnassignEntries: Seq[(Entry[RawUnassignEventLegacy], Option[FatContract])] ): Future[Option[Reassignment]] = Timed.future( - future = Future.successful(UpdateReader.toApiUnassigned(rawUnassignEntries)), + future = Future.successful(UpdateReader.toApiUnassigned(rawUnassignEntries.map(_._1))), timer = dbMetrics.reassignmentStream.translationTimer, ) private def toApiAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntries: Seq[Entry[RawAssignEvent]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace): Future[Option[Reassignment]] = Timed.future( future = Future.delegate { @@ -237,10 +263,7 @@ object ReassignmentStreamReader { def fetchIds( stakeholder: Option[Party], templateIdO: Option[NameTypeConRef], - startExclusive: Long, - endInclusive: Long, - limit: Int, - ): Connection => Vector[Long] + ): Connection => PaginationInput => Vector[Long] } @FunctionalInterface diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala index 82fbb61047..63359f468c 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction import com.digitalasset.canton.ledger.api.TopologyFormat +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.Party @@ -13,6 +14,7 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.RawPar import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.dao.DbDispatcher import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.{ExecutionContext, Future} @@ -21,6 +23,7 @@ final class TopologyTransactionPointwiseReader( val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -45,19 +48,19 @@ final class TopologyTransactionPointwiseReader( fetchRawEvents: Future[Vector[RawParticipantAuthorization]], requestingParties: Option[Set[Party]], // None is a party-wildcard toResponse: Vector[RawParticipantAuthorization] => Future[Option[TopologyTransaction]], - ): Future[Option[TopologyTransaction]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + )(implicit traceContext: TraceContext): Future[Option[TopologyTransaction]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filter out events that do not include the parties - filteredEvents = rawEvents.filter(event => - requestingParties.fold(true)(parties => parties.map(_.toString).contains(event.partyId)) + .map( + _.filter(event => + requestingParties.fold(true)(parties => parties.map(_.toString).contains(event.partyId)) + ) ) + // Checking if events are not pruned + .flatMap(queryValidRange.filterPrunedEvents[RawParticipantAuthorization](_.offset)) // Convert to api response - response <- toResponse(filteredEvents) - } yield { - response - } + .flatMap(filteredEventsPruned => toResponse(filteredEventsPruned.toVector)) def lookupTopologyTransaction( eventSeqIdRange: (Long, Long), diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala index e9c11e276f..7e0255ca7a 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala @@ -7,6 +7,7 @@ import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction import com.daml.metrics.DatabaseMetrics import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.ParticipantAuthorizationFormat +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.Party @@ -16,7 +17,7 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ RawParticipantAuthorization, SequentialIdBatch, } -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions import com.digitalasset.canton.platform.store.dao.events.TopologyTransactionsStreamReader.{ IdDbQuery, @@ -77,25 +78,23 @@ class TopologyTransactionsStreamReader( } partiesO .map { partyO => - paginatingAsyncStream.streamIdsFromSeekPagination( + paginatingAsyncStream.streamIdsFromSeekPaginationWithoutIdFilter( + idStreamName = s"Update IDs for topology transaction events for partyO:$partyO", idPageSizing = idPageSizing, idPageBufferSize = maxPagesPerIdPagesBuffer, initialFromIdExclusive = queryRange.startInclusiveEventSeqId, + initialEndInclusive = queryRange.endInclusiveEventSeqId, )( - fetchPage = (state: IdPaginationState) => { + idDbQuery.fetchIds( + stakeholder = partyO + ) + )( + executeIdQuery = f => maxParallelIdQueriesLimiter.execute { globalIdQueriesLimiter.execute { - dbDispatcher.executeSql(metric) { - idDbQuery.fetchIds( - stakeholder = partyO, - startExclusive = state.fromIdExclusive, - endInclusive = queryRange.endInclusiveEventSeqId, - limit = state.pageSize, - ) - } + dbDispatcher.executeSql(metric)(f) } } - } ) } .pipe(EventIdsUtils.sortAndDeduplicateIds) @@ -118,18 +117,19 @@ class TopologyTransactionsStreamReader( .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - payloadDbQuery.fetchPayloads(eventSequentialIds = Ids(ids))(connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher.executeSql(dbMetric)( + payloadDbQuery.fetchPayloads(eventSequentialIds = Ids(ids)) + ) + } } } @@ -174,11 +174,8 @@ object TopologyTransactionsStreamReader { @FunctionalInterface trait IdDbQuery { def fetchIds( - stakeholder: Option[Party], - startExclusive: Long, - endInclusive: Long, - limit: Int, - ): Connection => Vector[Long] + stakeholder: Option[Party] + ): Connection => PaginationInput => Vector[Long] } @FunctionalInterface diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala index 0882c4da55..9be0e332b9 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala @@ -15,17 +15,8 @@ import com.daml.ledger.api.v2.reassignment.{ UnassignedEvent as ApiUnassignedEvent, } import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction -import com.daml.ledger.api.v2.transaction.{ - Transaction as FlatTransaction, - TransactionTree, - TreeEvent, -} -import com.daml.ledger.api.v2.update_service.{ - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} +import com.daml.ledger.api.v2.transaction.Transaction as FlatTransaction +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects} @@ -41,10 +32,10 @@ import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate.{ CreatedEvent, ExercisedEvent, } -import com.digitalasset.canton.platform.store.utils.EventOps.TreeEventOps import com.digitalasset.canton.platform.{ InternalTransactionFormat, InternalUpdateFormat, + PackageId as LfPackageId, TemplatePartiesFilter, Value, } @@ -59,665 +50,415 @@ import com.digitalasset.daml.lf.transaction.{ FatContractInstance, GlobalKeyWithMaintainers, Node, - Versioned, } -import com.google.protobuf.ByteString -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} private[events] object TransactionLogUpdatesConversions { - // TODO(i23504) flatten to the main object - object ToFlatTransaction { - def filter( - internalUpdateFormat: InternalUpdateFormat - ): TransactionLogUpdate => Option[TransactionLogUpdate] = { - case transaction: TransactionLogUpdate.TransactionAccepted => - internalUpdateFormat.includeTransactions.flatMap { transactionFormat => - val transactionEvents = transaction.events.collect { - case createdEvent: TransactionLogUpdate.CreatedEvent => createdEvent - case exercisedEvent: TransactionLogUpdate.ExercisedEvent - if exercisedEvent.consuming || transactionFormat.transactionShape == LedgerEffects => - exercisedEvent - } - val filteredEvents = transactionEvents - .filter(transactionPredicate(transactionFormat)) - - transactionFormat.transactionShape match { - case AcsDelta => - val commandId = getCommandId( - filteredEvents, - transactionFormat.internalEventFormat.templatePartiesFilter.allFilterParties, - ) - Option.when(filteredEvents.nonEmpty)( - transaction.copy( - commandId = commandId, - events = filteredEvents, - )(transaction.traceContext) - ) + def filter( + internalUpdateFormat: InternalUpdateFormat + ): TransactionLogUpdate => Option[TransactionLogUpdate] = { + case transaction: TransactionLogUpdate.TransactionAccepted => + internalUpdateFormat.includeTransactions.flatMap { transactionFormat => + val transactionEvents = transaction.events.collect { + case createdEvent: TransactionLogUpdate.CreatedEvent => createdEvent + case exercisedEvent: TransactionLogUpdate.ExercisedEvent + if exercisedEvent.consuming || transactionFormat.transactionShape == LedgerEffects => + exercisedEvent + } + val filteredEvents = transactionEvents + .filter(transactionPredicate(transactionFormat)) + + transactionFormat.transactionShape match { + case AcsDelta => + val commandId = getCommandId( + filteredEvents, + transactionFormat.internalEventFormat.templatePartiesFilter.allFilterParties, + ) + Option.when(filteredEvents.nonEmpty)( + transaction.copy( + commandId = commandId, + events = filteredEvents, + )(transaction.traceContext) + ) - case LedgerEffects => - Option.when(filteredEvents.nonEmpty)( - transaction.copy( - events = filteredEvents - )(transaction.traceContext) - ) - } + case LedgerEffects => + Option.when(filteredEvents.nonEmpty)( + transaction.copy( + events = filteredEvents + )(transaction.traceContext) + ) } + } - case _: TransactionLogUpdate.TransactionRejected => None - - case u: TransactionLogUpdate.ReassignmentAccepted => - internalUpdateFormat.includeReassignments.flatMap { reassignmentFormat => - val filteredReassignments = u.reassignment.iterator.filter { r => - partiesMatchFilter( - reassignmentFormat.templatePartiesFilter, - u.reassignment.iterator - .map(r => r.templateId.toFullIdentifier(r.packageName).toNameTypeConRef) - .toSet, - )(r.stakeholders) - } - NonEmpty - .from(filteredReassignments.toSeq) - .map(rs => u.copy(reassignment = Reassignment.Batch(rs))(u.traceContext)) + case _: TransactionLogUpdate.TransactionRejected => None + + case u: TransactionLogUpdate.ReassignmentAccepted => + internalUpdateFormat.includeReassignments.flatMap { reassignmentFormat => + val filteredReassignments = u.reassignment.iterator.filter { r => + partiesMatchFilter( + reassignmentFormat.templatePartiesFilter, + u.reassignment.iterator + .map(r => r.templateId.toFullIdentifier(r.packageName).toNameTypeConRef) + .toSet, + )(r.stakeholders) } + NonEmpty + .from(filteredReassignments.toSeq) + .map(rs => u.copy(reassignment = Reassignment.Batch(rs))(u.traceContext)) + } - case u: TransactionLogUpdate.TopologyTransactionEffective => - internalUpdateFormat.includeTopologyEvents - .flatMap(_.participantAuthorizationFormat) - .flatMap { participantAuthorizationFormat => - val filteredEvents = - u.events.filter(topologyEventPredicate(participantAuthorizationFormat)) - Option.when(filteredEvents.nonEmpty)( - u.copy(events = filteredEvents)(u.traceContext) - ) - } - } + case u: TransactionLogUpdate.TopologyTransactionEffective => + internalUpdateFormat.includeTopologyEvents + .flatMap(_.participantAuthorizationFormat) + .flatMap { participantAuthorizationFormat => + val filteredEvents = + u.events.filter(topologyEventPredicate(participantAuthorizationFormat)) + Option.when(filteredEvents.nonEmpty)( + u.copy(events = filteredEvents)(u.traceContext) + ) + } + } - def toGetUpdatesResponse( - internalUpdateFormat: InternalUpdateFormat, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): TransactionLogUpdate => Future[GetUpdatesResponse] = { - case transactionAccepted: TransactionLogUpdate.TransactionAccepted => - val internalTransactionFormat = internalUpdateFormat.includeTransactions - .getOrElse( - throw new IllegalStateException( - "Transaction cannot be converted as there is no transaction format specified in update format" - ) + def toGetUpdatesResponse( + internalUpdateFormat: InternalUpdateFormat, + lfValueTranslation: LfValueTranslation, + )(implicit + loggingContext: LoggingContextWithTrace, + executionContext: ExecutionContext, + ): TransactionLogUpdate => Future[GetUpdatesResponse] = { + case transactionAccepted: TransactionLogUpdate.TransactionAccepted => + val internalTransactionFormat = internalUpdateFormat.includeTransactions + .getOrElse( + throw new IllegalStateException( + "Transaction cannot be converted as there is no transaction format specified in update format" ) - toTransaction( - transactionAccepted, - internalTransactionFormat, - lfValueTranslation, - transactionAccepted.traceContext, ) - .map(transaction => - GetUpdatesResponse(GetUpdatesResponse.Update.Transaction(transaction)) - .withPrecomputedSerializedSize() - ) + toTransaction( + transactionAccepted, + internalTransactionFormat, + lfValueTranslation, + transactionAccepted.traceContext, + ) + .map(transaction => + GetUpdatesResponse(GetUpdatesResponse.Update.Transaction(transaction)) + .withPrecomputedSerializedSize() + ) - case reassignmentAccepted: TransactionLogUpdate.ReassignmentAccepted => - val reassignmentInternalEventFormat = internalUpdateFormat.includeReassignments - .getOrElse( - throw new IllegalStateException( - "Reassignment cannot be converted as there is no reassignment specified in update format" - ) + case reassignmentAccepted: TransactionLogUpdate.ReassignmentAccepted => + val reassignmentInternalEventFormat = internalUpdateFormat.includeReassignments + .getOrElse( + throw new IllegalStateException( + "Reassignment cannot be converted as there is no reassignment specified in update format" ) - toReassignment( - reassignmentAccepted, - reassignmentInternalEventFormat.templatePartiesFilter.allFilterParties, - reassignmentInternalEventFormat.eventProjectionProperties, - lfValueTranslation, - reassignmentAccepted.traceContext, ) - .map(reassignment => - GetUpdatesResponse(GetUpdatesResponse.Update.Reassignment(reassignment)) - .withPrecomputedSerializedSize() - ) - - case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => - toTopologyTransaction(topologyTransaction).map(transaction => - GetUpdatesResponse(GetUpdatesResponse.Update.TopologyTransaction(transaction)) + toReassignment( + reassignmentAccepted, + reassignmentInternalEventFormat.templatePartiesFilter.allFilterParties, + reassignmentInternalEventFormat.eventProjectionProperties, + lfValueTranslation, + reassignmentAccepted.traceContext, + ) + .map(reassignment => + GetUpdatesResponse(GetUpdatesResponse.Update.Reassignment(reassignment)) .withPrecomputedSerializedSize() ) - case illegal => throw new IllegalStateException(s"$illegal is not expected here") - } + case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => + toTopologyTransaction(topologyTransaction).map(transaction => + GetUpdatesResponse(GetUpdatesResponse.Update.TopologyTransaction(transaction)) + .withPrecomputedSerializedSize() + ) - def toGetUpdateResponse( - transactionLogUpdate: TransactionLogUpdate, - internalUpdateFormat: InternalUpdateFormat, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[Option[GetUpdateResponse]] = - filter(internalUpdateFormat)(transactionLogUpdate) - .collect { - case transactionAccepted: TransactionLogUpdate.TransactionAccepted => - val internalTransactionFormat = internalUpdateFormat.includeTransactions - .getOrElse( - throw new IllegalStateException( - "Transaction cannot be converted as there is no transaction format specified in update format" - ) - ) - toTransaction( - transactionAccepted, - internalTransactionFormat, - lfValueTranslation, - transactionAccepted.traceContext, - ) - .map(transaction => - GetUpdateResponse(GetUpdateResponse.Update.Transaction(transaction)) - .withPrecomputedSerializedSize() - ) + case illegal => throw new IllegalStateException(s"$illegal is not expected here") + } - case reassignmentAccepted: TransactionLogUpdate.ReassignmentAccepted => - val reassignmentInternalEventFormat = internalUpdateFormat.includeReassignments - .getOrElse( - throw new IllegalStateException( - "Reassignment cannot be converted as there is no reassignment specified in update format" - ) + def toGetUpdateResponse( + transactionLogUpdate: TransactionLogUpdate, + internalUpdateFormat: InternalUpdateFormat, + lfValueTranslation: LfValueTranslation, + )(implicit + loggingContext: LoggingContextWithTrace, + executionContext: ExecutionContext, + ): Future[Option[GetUpdateResponse]] = + filter(internalUpdateFormat)(transactionLogUpdate) + .collect { + case transactionAccepted: TransactionLogUpdate.TransactionAccepted => + val internalTransactionFormat = internalUpdateFormat.includeTransactions + .getOrElse( + throw new IllegalStateException( + "Transaction cannot be converted as there is no transaction format specified in update format" ) - toReassignment( - reassignmentAccepted, - reassignmentInternalEventFormat.templatePartiesFilter.allFilterParties, - reassignmentInternalEventFormat.eventProjectionProperties, - lfValueTranslation, - reassignmentAccepted.traceContext, ) - .map(reassignment => - GetUpdateResponse(GetUpdateResponse.Update.Reassignment(reassignment)) - .withPrecomputedSerializedSize() - ) - - case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => - toTopologyTransaction(topologyTransaction).map(transaction => - GetUpdateResponse(GetUpdateResponse.Update.TopologyTransaction(transaction)) + toTransaction( + transactionAccepted, + internalTransactionFormat, + lfValueTranslation, + transactionAccepted.traceContext, + ) + .map(transaction => + GetUpdateResponse(GetUpdateResponse.Update.Transaction(transaction)) .withPrecomputedSerializedSize() ) - } - .map(_.map(Some(_))) - .getOrElse(Future.successful(None)) - - private def toTransaction( - transactionAccepted: TransactionLogUpdate.TransactionAccepted, - internalTransactionFormat: InternalTransactionFormat, - lfValueTranslation: LfValueTranslation, - traceContext: TraceContext, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[FlatTransaction] = - Future.delegate { - MonadUtil - .sequentialTraverse(transactionAccepted.events)(event => - toEvent( - event, - internalTransactionFormat, - lfValueTranslation, + + case reassignmentAccepted: TransactionLogUpdate.ReassignmentAccepted => + val reassignmentInternalEventFormat = internalUpdateFormat.includeReassignments + .getOrElse( + throw new IllegalStateException( + "Reassignment cannot be converted as there is no reassignment specified in update format" + ) ) + toReassignment( + reassignmentAccepted, + reassignmentInternalEventFormat.templatePartiesFilter.allFilterParties, + reassignmentInternalEventFormat.eventProjectionProperties, + lfValueTranslation, + reassignmentAccepted.traceContext, ) - .map(events => - FlatTransaction( - updateId = transactionAccepted.updateId, - commandId = transactionAccepted.commandId, - workflowId = transactionAccepted.workflowId, - effectiveAt = Some(TimestampConversion.fromLf(transactionAccepted.effectiveAt)), - events = events, - offset = transactionAccepted.offset.unwrap, - synchronizerId = transactionAccepted.synchronizerId, - traceContext = SerializableTraceContext(traceContext).toDamlProtoOpt, - recordTime = Some(TimestampConversion.fromLf(transactionAccepted.recordTime)), - externalTransactionHash = transactionAccepted.externalTransactionHash.map(_.unwrap), + .map(reassignment => + GetUpdateResponse(GetUpdateResponse.Update.Reassignment(reassignment)) + .withPrecomputedSerializedSize() ) + + case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => + toTopologyTransaction(topologyTransaction).map(transaction => + GetUpdateResponse(GetUpdateResponse.Update.TopologyTransaction(transaction)) + .withPrecomputedSerializedSize() ) } + .map(_.map(Some(_))) + .getOrElse(Future.successful(None)) - private def transactionPredicate( - transactionFormat: InternalTransactionFormat - )(event: TransactionLogUpdate.Event): Boolean = - partiesMatchFilter( - transactionFormat.internalEventFormat.templatePartiesFilter, - Set(event.templateId.toFullIdentifier(event.packageName).toNameTypeConRef), - )(event.witnesses(transactionFormat.transactionShape)) - - private def topologyEventPredicate( - participantAuthorizationFormat: ParticipantAuthorizationFormat - )(event: TransactionLogUpdate.PartyToParticipantAuthorization): Boolean = - matchPartyInSet(event.party)(participantAuthorizationFormat.parties) - - private def toEvent( - event: TransactionLogUpdate.Event, - internalTransactionFormat: InternalTransactionFormat, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[apiEvent.Event] = { - val requestingParties = - internalTransactionFormat.internalEventFormat.templatePartiesFilter.allFilterParties - - event match { - case createdEvent: TransactionLogUpdate.CreatedEvent => - createdToApiCreatedEvent( - requestingParties, - internalTransactionFormat.internalEventFormat.eventProjectionProperties, - lfValueTranslation, - createdEvent, - _.witnesses(internalTransactionFormat.transactionShape), - ).map(apiCreatedEvent => apiEvent.Event(apiEvent.Event.Event.Created(apiCreatedEvent))) - - case exercisedEvent: TransactionLogUpdate.ExercisedEvent => - exercisedToEvent( - requestingParties, - exercisedEvent, - internalTransactionFormat.transactionShape, - internalTransactionFormat.internalEventFormat.eventProjectionProperties, + private def toTransaction( + transactionAccepted: TransactionLogUpdate.TransactionAccepted, + internalTransactionFormat: InternalTransactionFormat, + lfValueTranslation: LfValueTranslation, + traceContext: TraceContext, + )(implicit + loggingContext: LoggingContextWithTrace, + executionContext: ExecutionContext, + ): Future[FlatTransaction] = + Future.delegate { + MonadUtil + .sequentialTraverse(transactionAccepted.events)(event => + toEvent( + event, + internalTransactionFormat, lfValueTranslation, ) - } + ) + .map(events => + FlatTransaction( + updateId = transactionAccepted.updateId, + commandId = transactionAccepted.commandId, + workflowId = transactionAccepted.workflowId, + effectiveAt = Some(TimestampConversion.fromLf(transactionAccepted.effectiveAt)), + events = events, + offset = transactionAccepted.offset.unwrap, + synchronizerId = transactionAccepted.synchronizerId, + traceContext = SerializableTraceContext(traceContext).toDamlProtoOpt, + recordTime = Some(TimestampConversion.fromLf(transactionAccepted.recordTime)), + externalTransactionHash = transactionAccepted.externalTransactionHash.map(_.unwrap), + ) + ) } - private def partiesMatchFilter( - filter: TemplatePartiesFilter, - templateIds: Set[NameTypeConRef], - )(parties: Set[Party]) = { - val matchesByWildcard: Boolean = - filter.templateWildcardParties match { - case Some(include) => parties.exists(p => include(p)) - case None => parties.nonEmpty // the witnesses should not be empty - } + private def transactionPredicate( + transactionFormat: InternalTransactionFormat + )(event: TransactionLogUpdate.Event): Boolean = + partiesMatchFilter( + transactionFormat.internalEventFormat.templatePartiesFilter, + Set(event.templateId.toFullIdentifier(event.packageName).toNameTypeConRef), + )(event.witnesses(transactionFormat.transactionShape)) + + private def topologyEventPredicate( + participantAuthorizationFormat: ParticipantAuthorizationFormat + )(event: TransactionLogUpdate.PartyToParticipantAuthorization): Boolean = + matchPartyInSet(event.party)(participantAuthorizationFormat.parties) + + private def toEvent( + event: TransactionLogUpdate.Event, + internalTransactionFormat: InternalTransactionFormat, + lfValueTranslation: LfValueTranslation, + )(implicit + loggingContext: LoggingContextWithTrace, + executionContext: ExecutionContext, + ): Future[apiEvent.Event] = { + val requestingParties = + internalTransactionFormat.internalEventFormat.templatePartiesFilter.allFilterParties - def matchesByTemplateId(templateId: NameTypeConRef): Boolean = - filter.relation.get(templateId) match { - case Some(Some(include)) => parties.exists(include) - case Some(None) => parties.nonEmpty // party wildcard - case None => false // templateId is not in the filter - } + event match { + case createdEvent: TransactionLogUpdate.CreatedEvent => + createdToApiCreatedEvent( + requestingParties, + internalTransactionFormat.internalEventFormat.eventProjectionProperties, + lfValueTranslation, + createdEvent, + _.witnesses(internalTransactionFormat.transactionShape), + ).map(apiCreatedEvent => apiEvent.Event(apiEvent.Event.Event.Created(apiCreatedEvent))) - matchesByWildcard || templateIds.exists(matchesByTemplateId) + case exercisedEvent: TransactionLogUpdate.ExercisedEvent => + exercisedToEvent( + requestingParties, + exercisedEvent, + internalTransactionFormat.transactionShape, + internalTransactionFormat.internalEventFormat.eventProjectionProperties, + lfValueTranslation, + ) } + } - private def exercisedToEvent( - requestingParties: Option[Set[Party]], - exercisedEvent: ExercisedEvent, - transactionShape: TransactionShape, - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[apiEvent.Event] = - transactionShape match { - case AcsDelta if !exercisedEvent.consuming => - Future.failed( - new IllegalStateException( - "Non consuming exercise cannot be rendered for ACS delta shape" - ) - ) + private def partiesMatchFilter( + filter: TemplatePartiesFilter, + templateIds: Set[NameTypeConRef], + )(parties: Set[Party]) = { + val matchesByWildcard: Boolean = + filter.templateWildcardParties match { + case Some(include) => parties.exists(p => include(p)) + case None => parties.nonEmpty // the witnesses should not be empty + } - case AcsDelta => - val witnessParties = requestingParties match { - case Some(parties) => - parties.iterator.filter(exercisedEvent.flatEventWitnesses).toSeq - // party-wildcard - case None => exercisedEvent.flatEventWitnesses.toSeq - } - Future.successful( - apiEvent.Event( - apiEvent.Event.Event.Archived( - apiEvent.ArchivedEvent( - offset = exercisedEvent.eventOffset.unwrap, - nodeId = exercisedEvent.nodeId, - contractId = exercisedEvent.contractId.coid, - templateId = Some(LfEngineToApi.toApiIdentifier(exercisedEvent.templateId)), - packageName = exercisedEvent.packageName, - witnessParties = witnessParties, - implementedInterfaces = lfValueTranslation.implementedInterfaces( - eventProjectionProperties, - witnessParties.toSet, - exercisedEvent.templateId.toFullIdentifier( - exercisedEvent.packageName - ), - ), - ) - ) - ) - ) + def matchesByTemplateId(templateId: NameTypeConRef): Boolean = + filter.relation.get(templateId) match { + case Some(Some(include)) => parties.exists(include) + case Some(None) => parties.nonEmpty // party wildcard + case None => false // templateId is not in the filter + } - case LedgerEffects => - val choiceArgumentEnricher = (value: Value) => - lfValueTranslation.enricher - .enrichChoiceArgument( - exercisedEvent.templateId, - exercisedEvent.interfaceId, - Ref.Name.assertFromString(exercisedEvent.choice), - value.unversioned, - ) + matchesByWildcard || templateIds.exists(matchesByTemplateId) + } - val eventualChoiceArgument = lfValueTranslation.toApiValue( - exercisedEvent.exerciseArgument, - eventProjectionProperties.verbose, - "exercise argument", - choiceArgumentEnricher, + private def exercisedToEvent( + requestingParties: Option[Set[Party]], + exercisedEvent: ExercisedEvent, + transactionShape: TransactionShape, + eventProjectionProperties: EventProjectionProperties, + lfValueTranslation: LfValueTranslation, + )(implicit + loggingContext: LoggingContextWithTrace, + executionContext: ExecutionContext, + ): Future[apiEvent.Event] = + transactionShape match { + case AcsDelta if !exercisedEvent.consuming => + Future.failed( + new IllegalStateException( + "Non consuming exercise cannot be rendered for ACS delta shape" ) + ) - val eventualExerciseResult = exercisedEvent.exerciseResult - .map { exerciseResult => - val choiceResultEnricher = (value: Value) => - lfValueTranslation.enricher.enrichChoiceResult( - exercisedEvent.templateId, - exercisedEvent.interfaceId, - Ref.Name.assertFromString(exercisedEvent.choice), - value.unversioned, - ) - - lfValueTranslation - .toApiValue( - value = exerciseResult, - verbose = eventProjectionProperties.verbose, - attribute = "exercise result", - enrich = choiceResultEnricher, - ) - .map(Some(_)) - } - .getOrElse(Future.successful(None)) - - for { - choiceArgument <- eventualChoiceArgument - maybeExerciseResult <- eventualExerciseResult - witnessParties = requestingParties - .fold(exercisedEvent.treeEventWitnesses)( - _.filter(exercisedEvent.treeEventWitnesses) - ) - .toSeq - flatEventWitnesses = requestingParties - .fold(exercisedEvent.flatEventWitnesses)( - _.filter(exercisedEvent.flatEventWitnesses) - ) - .toSeq - } yield apiEvent.Event( - apiEvent.Event.Event.Exercised( - apiEvent.ExercisedEvent( + case AcsDelta => + val witnessParties = requestingParties match { + case Some(parties) => + parties.iterator.filter(exercisedEvent.flatEventWitnesses).toSeq + // party-wildcard + case None => exercisedEvent.flatEventWitnesses.toSeq + } + Future.successful( + apiEvent.Event( + apiEvent.Event.Event.Archived( + apiEvent.ArchivedEvent( offset = exercisedEvent.eventOffset.unwrap, nodeId = exercisedEvent.nodeId, contractId = exercisedEvent.contractId.coid, templateId = Some(LfEngineToApi.toApiIdentifier(exercisedEvent.templateId)), packageName = exercisedEvent.packageName, - interfaceId = exercisedEvent.interfaceId.map(LfEngineToApi.toApiIdentifier), - choice = exercisedEvent.choice, - choiceArgument = Some(choiceArgument), - actingParties = exercisedEvent.actingParties.toSeq, - consuming = exercisedEvent.consuming, witnessParties = witnessParties, - lastDescendantNodeId = exercisedEvent.lastDescendantNodeId, - exerciseResult = maybeExerciseResult, - implementedInterfaces = - if (exercisedEvent.consuming) - lfValueTranslation.implementedInterfaces( - eventProjectionProperties, - witnessParties.toSet, - exercisedEvent.templateId.toFullIdentifier( - exercisedEvent.packageName - ), - ) - else Nil, - acsDelta = flatEventWitnesses.nonEmpty, + implementedInterfaces = lfValueTranslation.implementedInterfaces( + eventProjectionProperties, + witnessParties.toSet, + exercisedEvent.templateId.toFullIdentifier( + exercisedEvent.packageName + ), + ), ) ) ) - } - } - - // TODO(i23504) remove - @nowarn("cat=deprecation") - object ToTransactionTree { - def filter( - requestingParties: Option[Set[Party]] - ): TransactionLogUpdate => Option[TransactionLogUpdate] = { - case transaction: TransactionLogUpdate.TransactionAccepted => - val filteredForVisibility = - transaction.events.filter(transactionTreePredicate(requestingParties)) - - Option.when(filteredForVisibility.nonEmpty)( - transaction.copy(events = filteredForVisibility)(transaction.traceContext) - ) - case _: TransactionLogUpdate.TransactionRejected => None - case u: TransactionLogUpdate.ReassignmentAccepted => - Option.when( - requestingParties.fold(true)(u.stakeholders.exists(_)) - )(u) - case _: TransactionLogUpdate.TopologyTransactionEffective => None - } - - def toGetTransactionResponse( - transactionLogUpdate: TransactionLogUpdate, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[Option[GetTransactionTreeResponse]] = - filter(Some(requestingParties))(transactionLogUpdate) - .collect { case tx: TransactionLogUpdate.TransactionAccepted => - toTransactionTree( - transactionAccepted = tx, - Some(requestingParties), - eventProjectionProperties = eventProjectionProperties, - lfValueTranslation = lfValueTranslation, - traceContext = tx.traceContext, - ) - } - .map(_.map(transactionTree => Some(GetTransactionTreeResponse(Some(transactionTree))))) - .getOrElse(Future.successful(None)) - - def toGetTransactionTreesResponse( - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): TransactionLogUpdate => Future[GetUpdateTreesResponse] = { - case transactionAccepted: TransactionLogUpdate.TransactionAccepted => - toTransactionTree( - transactionAccepted, - requestingParties, - eventProjectionProperties, - lfValueTranslation, - transactionAccepted.traceContext, - ) - .map(txTree => - GetUpdateTreesResponse(GetUpdateTreesResponse.Update.TransactionTree(txTree)) - .withPrecomputedSerializedSize() - ) - case reassignmentAccepted: TransactionLogUpdate.ReassignmentAccepted => - toReassignment( - reassignmentAccepted, - requestingParties, - eventProjectionProperties, - lfValueTranslation, - reassignmentAccepted.traceContext, ) - .map(reassignment => - GetUpdateTreesResponse(GetUpdateTreesResponse.Update.Reassignment(reassignment)) - .withPrecomputedSerializedSize() - ) - - case illegal => throw new IllegalStateException(s"$illegal is not expected here") - } - private def toTransactionTree( - transactionAccepted: TransactionLogUpdate.TransactionAccepted, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - traceContext: TraceContext, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[TransactionTree] = - Future.delegate { - MonadUtil - .sequentialTraverse(transactionAccepted.events)(event => - toTransactionTreeEvent( - requestingParties, - eventProjectionProperties, - lfValueTranslation, - )(event) - ) - .map { treeEvents => - val eventsById = treeEvents.iterator - .map(e => e.nodeId -> e) - .toMap - - TransactionTree( - updateId = transactionAccepted.updateId, - commandId = getCommandId(transactionAccepted.events, requestingParties), - workflowId = transactionAccepted.workflowId, - effectiveAt = Some(TimestampConversion.fromLf(transactionAccepted.effectiveAt)), - offset = transactionAccepted.offset.unwrap, - eventsById = eventsById, - synchronizerId = transactionAccepted.synchronizerId, - traceContext = SerializableTraceContext(traceContext).toDamlProtoOpt, - recordTime = Some(TimestampConversion.fromLf(transactionAccepted.recordTime)), - ) - } - } - - private def toTransactionTreeEvent( - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )(event: TransactionLogUpdate.Event)(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ): Future[TreeEvent] = - event match { - case createdEvent: TransactionLogUpdate.CreatedEvent => - createdToApiCreatedEvent( - requestingParties, - eventProjectionProperties, - lfValueTranslation, - createdEvent, - _.treeEventWitnesses, - ).map(apiCreatedEvent => TreeEvent(TreeEvent.Kind.Created(apiCreatedEvent))) - - case exercisedEvent: TransactionLogUpdate.ExercisedEvent => - exercisedToTransactionTreeEvent( - requestingParties, - eventProjectionProperties, - lfValueTranslation, - exercisedEvent, - ) - } - - private def exercisedToTransactionTreeEvent( - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - exercisedEvent: ExercisedEvent, - )(implicit - loggingContext: LoggingContextWithTrace, - executionContext: ExecutionContext, - ) = { - val choiceArgumentEnricher = (value: Value) => - lfValueTranslation.enricher - .enrichChoiceArgument( - exercisedEvent.templateId, - exercisedEvent.interfaceId, - Ref.Name.assertFromString(exercisedEvent.choice), - value.unversioned, - ) - - val eventualChoiceArgument = lfValueTranslation.toApiValue( - exercisedEvent.exerciseArgument, - eventProjectionProperties.verbose, - "exercise argument", - choiceArgumentEnricher, - ) - - val eventualExerciseResult = exercisedEvent.exerciseResult - .map { exerciseResult => - val choiceResultEnricher = (value: Value) => - lfValueTranslation.enricher.enrichChoiceResult( + case LedgerEffects => + val choiceArgumentEnricher = (value: Value) => + lfValueTranslation.enricher + .enrichChoiceArgument( exercisedEvent.templateId, exercisedEvent.interfaceId, Ref.Name.assertFromString(exercisedEvent.choice), value.unversioned, ) - lfValueTranslation - .toApiValue( - value = exerciseResult, - verbose = eventProjectionProperties.verbose, - attribute = "exercise result", - enrich = choiceResultEnricher, + val eventualChoiceArgument = lfValueTranslation.toApiValue( + exercisedEvent.exerciseArgument, + eventProjectionProperties.verbose, + "exercise argument", + choiceArgumentEnricher, + ) + + val eventualExerciseResult = exercisedEvent.exerciseResult + .map { exerciseResult => + val choiceResultEnricher = (value: Value) => + lfValueTranslation.enricher.enrichChoiceResult( + exercisedEvent.templateId, + exercisedEvent.interfaceId, + Ref.Name.assertFromString(exercisedEvent.choice), + value.unversioned, + ) + + lfValueTranslation + .toApiValue( + value = exerciseResult, + verbose = eventProjectionProperties.verbose, + attribute = "exercise result", + enrich = choiceResultEnricher, + ) + .map(Some(_)) + } + .getOrElse(Future.successful(None)) + + for { + choiceArgument <- eventualChoiceArgument + maybeExerciseResult <- eventualExerciseResult + witnessParties = requestingParties + .fold(exercisedEvent.treeEventWitnesses)( + _.filter(exercisedEvent.treeEventWitnesses) + ) + .toSeq + flatEventWitnesses = requestingParties + .fold(exercisedEvent.flatEventWitnesses)( + _.filter(exercisedEvent.flatEventWitnesses) + ) + .toSeq + } yield apiEvent.Event( + apiEvent.Event.Event.Exercised( + apiEvent.ExercisedEvent( + offset = exercisedEvent.eventOffset.unwrap, + nodeId = exercisedEvent.nodeId, + contractId = exercisedEvent.contractId.coid, + templateId = Some(LfEngineToApi.toApiIdentifier(exercisedEvent.templateId)), + packageName = exercisedEvent.packageName, + interfaceId = exercisedEvent.interfaceId.map(LfEngineToApi.toApiIdentifier), + choice = exercisedEvent.choice, + choiceArgument = Some(choiceArgument), + actingParties = exercisedEvent.actingParties.toSeq, + consuming = exercisedEvent.consuming, + witnessParties = witnessParties, + lastDescendantNodeId = exercisedEvent.lastDescendantNodeId, + exerciseResult = maybeExerciseResult, + implementedInterfaces = + if (exercisedEvent.consuming) + lfValueTranslation.implementedInterfaces( + eventProjectionProperties, + witnessParties.toSet, + exercisedEvent.templateId.toFullIdentifier( + exercisedEvent.packageName + ), + ) + else Nil, + acsDelta = flatEventWitnesses.nonEmpty, ) - .map(Some(_)) - } - .getOrElse(Future.successful(None)) - - for { - choiceArgument <- eventualChoiceArgument - maybeExerciseResult <- eventualExerciseResult - witnessParties = requestingParties - .fold(exercisedEvent.treeEventWitnesses)( - _.filter(exercisedEvent.treeEventWitnesses) - ) - .toSeq - flatEventWitnesses = requestingParties - .fold(exercisedEvent.treeEventWitnesses)( - _.filter(exercisedEvent.treeEventWitnesses) - ) - .toSeq - } yield TreeEvent( - TreeEvent.Kind.Exercised( - apiEvent.ExercisedEvent( - offset = exercisedEvent.eventOffset.unwrap, - nodeId = exercisedEvent.nodeId, - contractId = exercisedEvent.contractId.coid, - templateId = Some(LfEngineToApi.toApiIdentifier(exercisedEvent.templateId)), - packageName = exercisedEvent.packageName, - interfaceId = exercisedEvent.interfaceId.map(LfEngineToApi.toApiIdentifier), - choice = exercisedEvent.choice, - choiceArgument = Some(choiceArgument), - actingParties = exercisedEvent.actingParties.toSeq, - consuming = exercisedEvent.consuming, - witnessParties = witnessParties, - lastDescendantNodeId = exercisedEvent.lastDescendantNodeId, - exerciseResult = maybeExerciseResult, - implementedInterfaces = - if (exercisedEvent.consuming) - lfValueTranslation.implementedInterfaces( - eventProjectionProperties, - witnessParties.toSet, - exercisedEvent.templateId.toFullIdentifier(exercisedEvent.packageName), - ) - else Nil, - acsDelta = flatEventWitnesses.nonEmpty, ) ) - ) } - private def transactionTreePredicate( - requestingPartiesO: Option[Set[Party]] - ): TransactionLogUpdate.Event => Boolean = { - case createdEvent: CreatedEvent => - requestingPartiesO.fold(true)(_.exists(createdEvent.treeEventWitnesses)) - case exercised: ExercisedEvent => - requestingPartiesO.fold(true)(_.exists(exercised.treeEventWitnesses)) - case _ => false - } - } - private def createdToApiCreatedEvent( requestingPartiesO: Option[Set[Party]], eventProjectionProperties: EventProjectionProperties, @@ -749,6 +490,7 @@ private[events] object TransactionLogUpdatesConversions { offset = createdEvent.eventOffset, nodeId = createdEvent.nodeId, authenticationData = createdEvent.authenticationData, + representativePackageId = createdEvent.representativePackageId, createdEventWitnesses = createdWitnesses(createdEvent), flatEventWitnesses = createdEvent.flatEventWitnesses, ) @@ -763,6 +505,7 @@ private[events] object TransactionLogUpdatesConversions { offset: Offset, nodeId: Int, authenticationData: Bytes, + representativePackageId: LfPackageId, createdEventWitnesses: Set[Party], flatEventWitnesses: Set[Party], )(implicit @@ -770,13 +513,11 @@ private[events] object TransactionLogUpdatesConversions { executionContext: ExecutionContext, ): Future[apiEvent.CreatedEvent] = { - def getFatContractInstance: Right[Nothing, FatContractInstance] = - Right( - FatContractInstance.fromCreateNode( - create, - CreationTime.CreatedAt(ledgerEffectiveTime), - authenticationData, - ) + val fatContractInstance: FatContractInstance = + FatContractInstance.fromCreateNode( + create, + CreationTime.CreatedAt(ledgerEffectiveTime), + authenticationData, ) val witnesses = requestingPartiesO @@ -786,33 +527,15 @@ private[events] object TransactionLogUpdatesConversions { val acsDelta = requestingPartiesO.fold(flatEventWitnesses.view)(_.view.filter(flatEventWitnesses)).nonEmpty - lfValueTranslation - .toApiContractData( - value = Versioned(create.version, create.arg), - key = create.keyOpt.map(k => Versioned(create.version, k.value)), - templateId = create.templateId.toFullIdentifier(create.packageName), - witnesses = witnesses, - eventProjectionProperties = eventProjectionProperties, - fatContractInstance = getFatContractInstance, - ) - .map(apiContractData => - apiEvent.CreatedEvent( - offset = offset.unwrap, - nodeId = nodeId, - contractId = create.coid.coid, - templateId = Some(LfEngineToApi.toApiIdentifier(create.templateId)), - packageName = create.packageName, - contractKey = apiContractData.contractKey, - createArguments = Some(apiContractData.createArguments), - createdEventBlob = apiContractData.createdEventBlob.getOrElse(ByteString.EMPTY), - interfaceViews = apiContractData.interfaceViews, - witnessParties = witnesses.toSeq, - signatories = create.signatories.toSeq, - observers = create.stakeholders.diff(create.signatories).toSeq, - createdAt = Some(TimestampConversion.fromLf(ledgerEffectiveTime)), - acsDelta = acsDelta, - ) - ) + lfValueTranslation.toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractInstance, + offset = offset.unwrap, + nodeId = nodeId, + representativePackageId = representativePackageId, + witnesses = witnesses, + acsDelta = acsDelta, + ) } private def matchPartyInSet(party: Party)(optSet: Option[Set[Party]]) = @@ -857,6 +580,8 @@ private[events] object TransactionLogUpdatesConversions { offset = reassignmentAccepted.offset, nodeId = assigned.nodeId, authenticationData = assigned.contractAuthenticationData, + // TODO(#27872): Use the assignment representative package ID when available + representativePackageId = assigned.createNode.templateId.packageId, createdEventWitnesses = assigned.createNode.stakeholders, flatEventWitnesses = assigned.createNode.stakeholders, ).map(createdEvent => diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala index 0f32a0b4eb..2864f8baf3 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala @@ -4,171 +4,50 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.event.Event -import com.daml.ledger.api.v2.transaction.{Transaction, TreeEvent} -import com.daml.ledger.api.v2.update_service.GetTransactionTreeResponse +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.metrics.Timed import com.daml.metrics.api.MetricHandle -import com.daml.metrics.{DatabaseMetrics, Timed} import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.TransactionShape +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawEvent, - RawFlatEvent, - RawTreeEvent, + RawAcsDeltaEventLegacy, + RawCreatedEventLegacy, + RawEventLegacy, + RawLedgerEffectsEventLegacy, } -import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.backend.common.{ - EventPayloadSourceForUpdatesAcsDelta, - EventPayloadSourceForUpdatesLedgerEffects, + EventPayloadSourceForUpdatesAcsDeltaLegacy, + EventPayloadSourceForUpdatesLedgerEffectsLegacy, } -import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions.toTransaction import com.digitalasset.canton.platform.store.dao.{DbDispatcher, EventProjectionProperties} -import com.digitalasset.canton.platform.{InternalTransactionFormat, Party, TemplatePartiesFilter} +import com.digitalasset.canton.platform.{ + FatContract, + InternalTransactionFormat, + Party, + TemplatePartiesFilter, +} +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil -import java.sql.Connection -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} -// TODO(#23504) cleanup -sealed trait TransactionPointwiseReaderLegacy { - type EventT - type RawEventT <: RawEvent - type RespT - - def dbDispatcher: DbDispatcher - def eventStorageBackend: EventStorageBackend - def lfValueTranslation: LfValueTranslation - val metrics: LedgerApiServerMetrics - val dbMetric: DatabaseMetrics - val directEC: DirectExecutionContext - implicit def ec: ExecutionContext - - protected val dbMetrics: metrics.index.db.type = metrics.index.db - - protected def fetchTransaction( - firstEventSequentialId: Long, - lastEventSequentialId: Long, - requestingParties: Set[Party], - )(connection: Connection): Vector[EventStorageBackend.Entry[RawEventT]] - - protected def deserializeEntry( - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )( - entry: Entry[RawEventT] - )(implicit - loggingContext: LoggingContextWithTrace, - ec: ExecutionContext, - ): Future[Entry[EventT]] - - protected def toTransactionResponse( - events: Seq[Entry[EventT]] - ): Option[RespT] - - final def lookupTransactionBy( - lookupKey: LookupKey, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[RespT]] = { - val requestingPartiesStrings: Set[String] = requestingParties.toSet[String] - for { - // Fetching event sequential id range corresponding to the requested transaction id - eventSeqIdRangeO <- dbDispatcher.executeSql(dbMetric)( - eventStorageBackend.updatePointwiseQueries - .fetchIdsFromUpdateMeta( - lookupKey = lookupKey - ) - ) - response <- eventSeqIdRangeO match { - case Some((firstEventSeqId, lastEventSeqId)) => - for { - // Fetching all events from the event sequential id range - rawEvents <- dbDispatcher.executeSql(dbMetric)( - fetchTransaction( - firstEventSequentialId = firstEventSeqId, - lastEventSequentialId = lastEventSeqId, - requestingParties = requestingParties, - ) - ) - // Filtering by requesting parties - filteredRawEvents = rawEvents.filter( - _.event.witnessParties.exists(requestingPartiesStrings) - ) - // Deserialization of lf values - deserialized <- Timed.value( - timer = dbMetric.translationTimer, - value = Future.delegate { - implicit val ec: ExecutionContext = - directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name - MonadUtil.sequentialTraverse(filteredRawEvents)( - deserializeEntry(eventProjectionProperties, lfValueTranslation) - ) - }, - ) - } yield { - // Conversion to API response type - toTransactionResponse(deserialized) - } - case None => Future.successful[Option[RespT]](None) - } - } yield response - } -} - -// TODO(#23504) remove when TransactionTrees are removed -@nowarn("cat=deprecation") -final class TransactionTreePointwiseReader( - override val dbDispatcher: DbDispatcher, - override val eventStorageBackend: EventStorageBackend, - override val metrics: LedgerApiServerMetrics, - override val lfValueTranslation: LfValueTranslation, - override val loggerFactory: NamedLoggerFactory, -)(implicit val ec: ExecutionContext) - extends TransactionPointwiseReaderLegacy - with NamedLogging { - - override type EventT = TreeEvent - override type RawEventT = RawTreeEvent - override type RespT = GetTransactionTreeResponse - - override val dbMetric: DatabaseMetrics = dbMetrics.lookupTransactionTreeById - override val directEC: DirectExecutionContext = DirectExecutionContext(logger) - - override protected def fetchTransaction( - firstEventSequentialId: Long, - lastEventSequentialId: Long, - requestingParties: Set[Party], - )(connection: Connection): Vector[Entry[RawEventT]] = - eventStorageBackend.updatePointwiseQueries.fetchTreeTransactionEvents( - firstEventSequentialId = firstEventSequentialId, - lastEventSequentialId = lastEventSequentialId, - requestingParties = Some(requestingParties), - )(connection) - - override protected def toTransactionResponse(events: Seq[Entry[EventT]]): Option[RespT] = - TransactionConversions.toGetTransactionTreeResponse(events) - - override protected def deserializeEntry( - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )(entry: Entry[RawTreeEvent])(implicit - loggingContext: LoggingContextWithTrace, - ec: ExecutionContext, - ): Future[Entry[TreeEvent]] = - UpdateReader.deserializeTreeEvent(eventProjectionProperties, lfValueTranslation)(entry) -} - final class TransactionPointwiseReader( val dbDispatcher: DbDispatcher, val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, + val contractStore: ContractStore, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -177,18 +56,18 @@ final class TransactionPointwiseReader( val directEC: DirectExecutionContext = DirectExecutionContext(logger) - private def fetchRawFlatEvents( + private def fetchRawAcsDeltaEvents( firstEventSequentialId: Long, lastEventSequentialId: Long, requestingParties: Option[Set[Party]], )(implicit loggingContext: LoggingContextWithTrace - ): Future[Vector[EventStorageBackend.Entry[RawFlatEvent]]] = for { + ): Future[Vector[EventStorageBackend.Entry[RawAcsDeltaEventLegacy]]] = for { createEvents <- dbDispatcher.executeSql( - dbMetrics.updatesAcsDeltaPointwise.fetchEventCreatePayloads + dbMetrics.updatesAcsDeltaPointwise.fetchEventCreatePayloadsLegacy )( - eventStorageBackend.fetchEventPayloadsAcsDelta(target = - EventPayloadSourceForUpdatesAcsDelta.Create + eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy(target = + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create )( eventSequentialIds = IdRange(firstEventSequentialId, lastEventSequentialId), requestingParties = requestingParties, @@ -197,10 +76,10 @@ final class TransactionPointwiseReader( consumingEvents <- dbDispatcher.executeSql( - dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloads + dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloadsLegacy )( - eventStorageBackend.fetchEventPayloadsAcsDelta(target = - EventPayloadSourceForUpdatesAcsDelta.Consuming + eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy(target = + EventPayloadSourceForUpdatesAcsDeltaLegacy.Consuming )( eventSequentialIds = (IdRange(firstEventSequentialId, lastEventSequentialId)), requestingParties = requestingParties, @@ -211,19 +90,19 @@ final class TransactionPointwiseReader( (createEvents ++ consumingEvents).sortBy(_.eventSequentialId) } - private def fetchRawTreeEvents( + private def fetchRawLedgerEffectsEvents( firstEventSequentialId: Long, lastEventSequentialId: Long, requestingParties: Option[Set[Party]], )(implicit loggingContext: LoggingContextWithTrace - ): Future[Vector[EventStorageBackend.Entry[RawTreeEvent]]] = for { + ): Future[Vector[EventStorageBackend.Entry[RawLedgerEffectsEventLegacy]]] = for { createEvents <- dbDispatcher.executeSql( - dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloads + dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloadsLegacy )( - eventStorageBackend.fetchEventPayloadsLedgerEffects(target = - EventPayloadSourceForUpdatesLedgerEffects.Create + eventStorageBackend.fetchEventPayloadsLedgerEffectsLegacy(target = + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create )( eventSequentialIds = IdRange(firstEventSequentialId, lastEventSequentialId), requestingParties = requestingParties, @@ -232,10 +111,10 @@ final class TransactionPointwiseReader( consumingEvents <- dbDispatcher.executeSql( - dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloads + dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloadsLegacy )( - eventStorageBackend.fetchEventPayloadsLedgerEffects(target = - EventPayloadSourceForUpdatesLedgerEffects.Consuming + eventStorageBackend.fetchEventPayloadsLedgerEffectsLegacy(target = + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming )( eventSequentialIds = IdRange(firstEventSequentialId, lastEventSequentialId), requestingParties = requestingParties, @@ -244,10 +123,10 @@ final class TransactionPointwiseReader( nonConsumingEvents <- dbDispatcher.executeSql( - dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloads + dbMetrics.updatesAcsDeltaPointwise.fetchEventConsumingPayloadsLegacy )( - eventStorageBackend.fetchEventPayloadsLedgerEffects(target = - EventPayloadSourceForUpdatesLedgerEffects.NonConsuming + eventStorageBackend.fetchEventPayloadsLedgerEffectsLegacy(target = + EventPayloadSourceForUpdatesLedgerEffectsLegacy.NonConsuming )( eventSequentialIds = IdRange(firstEventSequentialId, lastEventSequentialId), requestingParties = requestingParties, @@ -261,44 +140,70 @@ final class TransactionPointwiseReader( private def deserializeEntryAcsDelta( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, - )(entry: Entry[RawFlatEvent])(implicit + )(entry: (Entry[RawAcsDeltaEventLegacy], Option[FatContract]))(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, ): Future[Entry[Event]] = - UpdateReader.deserializeRawFlatEvent(eventProjectionProperties, lfValueTranslation)(entry) + UpdateReader.deserializeRawAcsDeltaEvent(eventProjectionProperties, lfValueTranslation)(entry) private def deserializeEntryLedgerEffects( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, - )(entry: Entry[RawTreeEvent])(implicit + )(entry: (Entry[RawLedgerEffectsEventLegacy], Option[FatContract]))(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, ): Future[Entry[Event]] = - UpdateReader.deserializeRawTreeEvent(eventProjectionProperties, lfValueTranslation)(entry) + UpdateReader.deserializeRawLedgerEffectsEvent(eventProjectionProperties, lfValueTranslation)( + entry + ) - private def fetchAndFilterEvents[T <: RawEvent]( + private def fetchAndFilterEvents[T <: RawEventLegacy]( fetchRawEvents: Future[Vector[Entry[T]]], templatePartiesFilter: TemplatePartiesFilter, - deserializeEntry: Entry[T] => Future[Entry[Event]], + deserializeEntry: ((Entry[T], Option[FatContract])) => Future[Entry[Event]], timer: MetricHandle.Timer, - ): Future[Seq[Entry[Event]]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + )(implicit traceContext: TraceContext): Future[Seq[Entry[Event]]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filtering by template filters - filteredRawEvents = UpdateReader.filterRawEvents(templatePartiesFilter)(rawEvents) - // Deserialization of lf values - deserialized <- Timed.future( - timer = timer, - future = Future.delegate { - implicit val ec: ExecutionContext = - directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name - MonadUtil.sequentialTraverse(filteredRawEvents)(deserializeEntry) - }, + .map(UpdateReader.filterRawEvents(templatePartiesFilter)) + // Checking if events are not pruned + .flatMap( + queryValidRange.filterPrunedEvents[Entry[T]](entry => Offset.tryFromLong(entry.offset)) + ) + .flatMap(rawEventsPruned => + for { + // Fetching all contracts for the filtered assigned events + contractsM <- contractStore + .lookupBatchedNonCached( + rawEventsPruned.collect(_.event match { + case created: RawCreatedEventLegacy => created.internalContractId + }) + ) + .map(_.view.mapValues(_.inst).toMap) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + // Deserialization of lf values + deserialized <- + Timed.future( + timer = timer, + future = Future.delegate { + implicit val ec: ExecutionContext = + directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name + MonadUtil.sequentialTraverse(rawEventsPruned)(entry => + entry.event match { + case created: RawCreatedEventLegacy => + val fatContractO = contractsM.get(created.internalContractId) + deserializeEntry(entry -> fatContractO) + case _ => + deserializeEntry(entry -> None) + } + ) + }, + ) + } yield { + deserialized + } ) - } yield { - deserialized - } def lookupTransactionBy( eventSeqIdRange: (Long, Long), @@ -316,7 +221,7 @@ final class TransactionPointwiseReader( val events = txShape match { case TransactionShape.AcsDelta => fetchAndFilterEvents( - fetchRawEvents = fetchRawFlatEvents( + fetchRawEvents = fetchRawAcsDeltaEvents( firstEventSequentialId = firstEventSeqId, lastEventSequentialId = lastEventSeqId, requestingParties = requestingParties, @@ -328,7 +233,7 @@ final class TransactionPointwiseReader( ) case TransactionShape.LedgerEffects => fetchAndFilterEvents( - fetchRawEvents = fetchRawTreeEvents( + fetchRawEvents = fetchRawLedgerEffectsEvents( firstEventSequentialId = firstEventSeqId, lastEventSequentialId = lastEventSeqId, requestingParties = requestingParties, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsTreeStreamReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsTreeStreamReader.scala deleted file mode 100644 index a71d7488e2..0000000000 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsTreeStreamReader.scala +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.dao.events - -import com.daml.ledger.api.v2.transaction.TreeEvent -import com.daml.ledger.api.v2.update_service.GetUpdateTreesResponse -import com.daml.metrics.{DatabaseMetrics, Timed} -import com.daml.nameof.NameOf.qualifiedNameOfCurrentFunc -import com.daml.tracing -import com.daml.tracing.Spans -import com.digitalasset.canton.concurrent.DirectExecutionContext -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.api.TraceIdentifiers -import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext -import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.metrics.LedgerApiServerMetrics -import com.digitalasset.canton.platform.config.TransactionTreeStreamsConfig -import com.digitalasset.canton.platform.store.backend.EventStorageBackend -import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids -import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{Entry, RawTreeEvent} -import com.digitalasset.canton.platform.store.backend.common.{ - EventIdSource, - EventPayloadSourceForUpdatesLedgerEffects, -} -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState -import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions -import com.digitalasset.canton.platform.store.dao.events.ReassignmentStreamReader.ReassignmentStreamQueryParams -import com.digitalasset.canton.platform.store.dao.{ - DbDispatcher, - EventProjectionProperties, - PaginatingAsyncStream, -} -import com.digitalasset.canton.platform.store.utils.{ - ConcurrencyLimiter, - QueueBasedConcurrencyLimiter, - Telemetry, -} -import com.digitalasset.canton.platform.{Party, TemplatePartiesFilter} -import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.canton.util.PekkoUtil.syntax.* -import io.opentelemetry.api.trace.Tracer -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.Attributes -import org.apache.pekko.stream.scaladsl.Source - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} -import scala.util.chaining.* - -// TODO(#23504) remove when TransctionTrees are removed -@nowarn("cat=deprecation") -class TransactionsTreeStreamReader( - config: TransactionTreeStreamsConfig, - globalIdQueriesLimiter: ConcurrencyLimiter, - globalPayloadQueriesLimiter: ConcurrencyLimiter, - dbDispatcher: DbDispatcher, - queryValidRange: QueryValidRange, - eventStorageBackend: EventStorageBackend, - lfValueTranslation: LfValueTranslation, - metrics: LedgerApiServerMetrics, - tracer: Tracer, - reassignmentStreamReader: ReassignmentStreamReader, - val loggerFactory: NamedLoggerFactory, -)(implicit executionContext: ExecutionContext) - extends NamedLogging { - import UpdateReader.* - import config.* - - private val dbMetrics = metrics.index.db - - private val orderBySequentialEventId = - Ordering.by[Entry[RawTreeEvent], Long](_.eventSequentialId) - - private val paginatingAsyncStream = new PaginatingAsyncStream(loggerFactory) - - private val directEC = DirectExecutionContext(logger) - - def streamTreeTransaction( - queryRange: EventsRange, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] = { - val span = - Telemetry.Updates.createSpan( - tracer, - queryRange.startInclusiveOffset, - queryRange.endInclusiveOffset, - )( - qualifiedNameOfCurrentFunc - ) - logger.debug( - s"streamTreeTransaction(${queryRange.startInclusiveOffset}, ${queryRange.endInclusiveOffset}, $requestingParties, $eventProjectionProperties)" - ) - val sourceOfTreeTransactions = doStreamTreeTransaction( - queryRange, - requestingParties, - eventProjectionProperties, - ) - sourceOfTreeTransactions - .wireTap(_ match { - case (_, response) => - response.update match { - case GetUpdateTreesResponse.Update.TransactionTree(txn) => - Spans.addEventToSpan( - tracing.Event("transaction", TraceIdentifiers.fromTransactionTree(txn)), - span, - ) - case GetUpdateTreesResponse.Update.Reassignment(reassignment) => - Spans.addEventToSpan( - tracing.Event("transaction", TraceIdentifiers.fromReassignment(reassignment)), - span, - ) - case _ => () - } - }) - .watchTermination()(endSpanOnTermination(span)) - } - - private def doStreamTreeTransaction( - queryRange: EventsRange, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] = { - val createEventIdQueriesLimiter = - new QueueBasedConcurrencyLimiter(maxParallelIdCreateQueries, executionContext) - val consumingEventIdQueriesLimiter = - new QueueBasedConcurrencyLimiter(maxParallelIdConsumingQueries, executionContext) - val nonConsumingEventIdQueriesLimiter = - new QueueBasedConcurrencyLimiter(maxParallelIdNonConsumingQueries, executionContext) - val payloadQueriesLimiter = - new QueueBasedConcurrencyLimiter(maxParallelPayloadQueries, executionContext) - val deserializationQueriesLimiter = - new QueueBasedConcurrencyLimiter(transactionsProcessingParallelism, executionContext) - val filterParties: Vector[Option[Party]] = - requestingParties.fold(Vector(None: Option[Party]))(_.map(Some(_)).toVector) - val idPageSizing = IdPageSizing.calculateFrom( - maxIdPageSize = maxIdsPerIdPage, - // The ids for tree transactions are retrieved from seven separate id tables: - // * Create stakeholder - // * Create non-stakeholder - // * Exercise consuming stakeholder - // * Exercise consuming non-stakeholder - // * Exercise non-consuming - // * Assign - // * Unassign - // To account for that we assign a seventh of the working memory to each table. - workingMemoryInBytesForIdPages = maxWorkingMemoryInBytesForIdPages / 7, - numOfDecomposedFilters = filterParties.size, - numOfPagesInIdPageBuffer = maxPagesPerIdPagesBuffer, - loggerFactory = loggerFactory, - ) - - def fetchIds( - filterParty: Option[Party], - target: EventIdSource, - maxParallelIdQueriesLimiter: QueueBasedConcurrencyLimiter, - metric: DatabaseMetrics, - ): Source[Long, NotUsed] = - paginatingAsyncStream.streamIdsFromSeekPagination( - idPageSizing = idPageSizing, - idPageBufferSize = maxPagesPerIdPagesBuffer, - initialFromIdExclusive = queryRange.startInclusiveEventSeqId, - )( - fetchPage = (state: IdPaginationState) => { - maxParallelIdQueriesLimiter.execute { - globalIdQueriesLimiter.execute { - dbDispatcher.executeSql(metric) { connection => - eventStorageBackend.updateStreamingQueries.fetchEventIds( - target = target - )( - stakeholderO = filterParty, - templateIdO = None, - startExclusive = state.fromIdExclusive, - endInclusive = queryRange.endInclusiveEventSeqId, - limit = state.pageSize, - )(connection) - } - } - } - } - ) - - def fetchPayloads( - ids: Source[Iterable[Long], NotUsed], - target: EventPayloadSourceForUpdatesLedgerEffects, - maxParallelPayloadQueries: Int, - metric: DatabaseMetrics, - ): Source[Entry[RawTreeEvent], NotUsed] = { - // Pekko requires for this buffer's size to be a power of two. - val inputBufferSize = Utils.largestSmallerOrEqualPowerOfTwo(maxParallelPayloadQueries) - ids.async - .addAttributes(Attributes.inputBuffer(initial = inputBufferSize, max = inputBufferSize)) - .mapAsync(maxParallelPayloadQueries)(ids => - payloadQueriesLimiter.execute { - globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(metric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Transactions request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Transactions request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - eventStorageBackend.fetchEventPayloadsLedgerEffects( - target = target - )( - eventSequentialIds = Ids(ids), - requestingParties = requestingParties, - )(connection) - } - } - } - } - ) - .mapConcat(identity) - } - - val idsCreate = - (filterParties.map(filter => - fetchIds( - filter, - EventIdSource.CreateStakeholder, - createEventIdQueriesLimiter, - dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsStakeholder, - ) - ) ++ filterParties.map(filter => - fetchIds( - filter, - EventIdSource.CreateNonStakeholder, - createEventIdQueriesLimiter, - dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsNonStakeholder, - ) - )).pipe( - mergeSortAndBatch( - maxOutputBatchSize = maxPayloadsPerPayloadsPage, - maxOutputBatchCount = maxParallelPayloadCreateQueries + 1, - ) - ) - val idsConsuming = - (filterParties.map(filter => - fetchIds( - filter, - EventIdSource.ConsumingStakeholder, - consumingEventIdQueriesLimiter, - dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsStakeholder, - ) - ) ++ filterParties.map(filter => - fetchIds( - filter, - EventIdSource.ConsumingNonStakeholder, - consumingEventIdQueriesLimiter, - dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsNonStakeholder, - ) - )).pipe( - mergeSortAndBatch( - maxOutputBatchSize = maxPayloadsPerPayloadsPage, - maxOutputBatchCount = maxParallelPayloadConsumingQueries + 1, - ) - ) - val idsNonConsuming = filterParties - .map(filter => - fetchIds( - filter, - EventIdSource.NonConsumingInformee, - nonConsumingEventIdQueriesLimiter, - dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingIds, - ) - ) - .pipe( - mergeSortAndBatch( - maxOutputBatchSize = maxPayloadsPerPayloadsPage, - maxOutputBatchCount = maxParallelPayloadNonConsumingQueries + 1, - ) - ) - val payloadsCreate = fetchPayloads( - idsCreate, - EventPayloadSourceForUpdatesLedgerEffects.Create, - maxParallelPayloadCreateQueries, - dbMetrics.updatesLedgerEffectsStream.fetchEventCreatePayloads, - ) - val payloadsConsuming = fetchPayloads( - idsConsuming, - EventPayloadSourceForUpdatesLedgerEffects.Consuming, - maxParallelPayloadConsumingQueries, - dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingPayloads, - ) - val payloadsNonConsuming = fetchPayloads( - idsNonConsuming, - EventPayloadSourceForUpdatesLedgerEffects.NonConsuming, - maxParallelPayloadNonConsumingQueries, - dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingPayloads, - ) - val allSortedPayloads = payloadsConsuming - .mergeSorted(payloadsCreate)(orderBySequentialEventId) - .mergeSorted(payloadsNonConsuming)(orderBySequentialEventId) - val sourceOfTreeTransactions = UpdateReader - .groupContiguous(allSortedPayloads)(by = _.updateId) - .mapAsync(transactionsProcessingParallelism)(rawEvents => - deserializationQueriesLimiter.execute( - deserializeLfValues(rawEvents, eventProjectionProperties) - ) - ) - .mapConcat { events => - val responses = TransactionConversions.toGetTransactionTreesResponse(events) - responses.map { case (offset, response) => Offset.tryFromLong(offset) -> response } - } - - val reassignments = - reassignmentStreamReader - .streamReassignments( - ReassignmentStreamQueryParams( - queryRange = queryRange, - filteringConstraints = TemplatePartiesFilter( - relation = Map.empty, - templateWildcardParties = requestingParties, - ), - eventProjectionProperties = eventProjectionProperties, - payloadQueriesLimiter = payloadQueriesLimiter, - deserializationQueriesLimiter = deserializationQueriesLimiter, - idPageSizing = idPageSizing, - decomposedFilters = filterParties.map(DecomposedFilter(_, None)), - maxParallelIdAssignQueries = maxParallelIdAssignQueries, - maxParallelIdUnassignQueries = maxParallelIdUnassignQueries, - maxPagesPerIdPagesBuffer = maxPagesPerIdPagesBuffer, - maxPayloadsPerPayloadsPage = maxPayloadsPerPayloadsPage, - maxParallelPayloadAssignQueries = maxParallelPayloadAssignQueries, - maxParallelPayloadUnassignQueries = maxParallelPayloadUnassignQueries, - deserializationProcessingParallelism = transactionsProcessingParallelism, - ) - ) - .map { case (offset, reassignment) => - offset -> GetUpdateTreesResponse( - GetUpdateTreesResponse.Update.Reassignment(reassignment) - ) - } - - sourceOfTreeTransactions - .mergeSorted(reassignments.map { case (offset, response) => - offset -> response - })(Ordering.by(_._1)) - } - - private def mergeSortAndBatch( - maxOutputBatchSize: Int, - maxOutputBatchCount: Int, - )(sourcesOfIds: Vector[Source[Long, NotUsed]]): Source[Iterable[Long], NotUsed] = - EventIdsUtils - .sortAndDeduplicateIds(sourcesOfIds) - .batchN( - maxBatchSize = maxOutputBatchSize, - maxBatchCount = maxOutputBatchCount, - ) - - private def deserializeLfValues( - rawEvents: Vector[Entry[RawTreeEvent]], - eventProjectionProperties: EventProjectionProperties, - )(implicit lc: LoggingContextWithTrace): Future[Seq[Entry[TreeEvent]]] = - Timed.future( - future = Future.delegate { - implicit val executionContext: ExecutionContext = - directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name - MonadUtil.sequentialTraverse(rawEvents)( - UpdateReader.deserializeTreeEvent(eventProjectionProperties, lfValueTranslation) - ) - }, - timer = dbMetrics.updatesLedgerEffectsStream.translationTimer, - ) - -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala index 5b9f8cfab0..82472163f7 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.platform.InternalUpdateFormat import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.backend.{EventStorageBackend, ParameterStorageBackend} import com.digitalasset.canton.platform.store.dao.DbDispatcher -import com.digitalasset.canton.platform.store.dao.events.UpdatePointwiseReader.getOffset import scala.concurrent.{ExecutionContext, Future} @@ -88,24 +87,9 @@ final class UpdatePointwiseReader( ) .map(_.flatten) - prunedUpToInclusive <- dbDispatcher.executeSql(metrics.index.db.fetchPruningOffsetsMetrics)( - parameterStorageBackend.prunedUpToInclusive - ) - - notPruned = agg.filter(update => getOffset(update) > prunedUpToInclusive.fold(0L)(_.unwrap)) - } yield { // only a single update should exist for a specific offset or update id - notPruned.headOption.map(GetUpdateResponse.apply) + agg.headOption.map(GetUpdateResponse.apply) } } - -object UpdatePointwiseReader { - private def getOffset(update: Update): Long = update match { - case Update.Empty => throw new RuntimeException("The update was unexpectedly empty.") - case Update.Transaction(tx) => tx.offset - case Update.Reassignment(reassignment) => reassignment.offset - case Update.TopologyTransaction(topologyTx) => topologyTx.offset - } -} diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala index 4384efc6c0..d354cc6f24 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala @@ -12,15 +12,7 @@ import com.daml.ledger.api.v2.reassignment.{ } import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse import com.daml.ledger.api.v2.trace_context.TraceContext as DamlTraceContext -import com.daml.ledger.api.v2.transaction.TreeEvent -import com.daml.ledger.api.v2.update_service.{ - GetTransactionResponse, - GetTransactionTreeResponse, - GetUpdateResponse, - GetUpdateTreesResponse, - GetUpdatesResponse, -} -import com.digitalasset.canton.data +import com.daml.ledger.api.v2.update_service.{GetUpdateResponse, GetUpdatesResponse} import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.util.{LfEngineToApi, TimestampConversion} import com.digitalasset.canton.logging.LoggingContextWithTrace @@ -28,14 +20,14 @@ import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawArchivedEvent, - RawAssignEvent, - RawCreatedEvent, - RawEvent, - RawExercisedEvent, - RawFlatEvent, - RawTreeEvent, - RawUnassignEvent, + RawAcsDeltaEventLegacy, + RawArchivedEventLegacy, + RawAssignEventLegacy, + RawCreatedEventLegacy, + RawEventLegacy, + RawExercisedEventLegacy, + RawLedgerEffectsEventLegacy, + RawUnassignEventLegacy, } import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.dao.{ @@ -43,18 +35,12 @@ import com.digitalasset.canton.platform.store.dao.{ EventProjectionProperties, LedgerDaoUpdateReader, } -import com.digitalasset.canton.platform.{ - InternalTransactionFormat, - InternalUpdateFormat, - Party, - TemplatePartiesFilter, -} +import com.digitalasset.canton.platform.{FatContract, InternalUpdateFormat, TemplatePartiesFilter} import com.digitalasset.canton.util.MonadUtil import io.opentelemetry.api.trace.Span import org.apache.pekko.stream.scaladsl.Source import org.apache.pekko.{Done, NotUsed} -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} @@ -82,8 +68,6 @@ import scala.util.{Failure, Success} private[dao] final class UpdateReader( updatesStreamReader: UpdatesStreamReader, updatePointwiseReader: UpdatePointwiseReader, - treeTransactionsStreamReader: TransactionsTreeStreamReader, - treeTransactionPointwiseReader: TransactionTreePointwiseReader, dispatcher: DbDispatcher, queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, @@ -114,42 +98,6 @@ private[dao] final class UpdateReader( .mapMaterializedValue((_: Future[NotUsed]) => NotUsed) } - // TODO(#23504) remove when getTransactionById is removed - @nowarn("cat=deprecation") - override def lookupTransactionById( - updateId: data.UpdateId, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - updatePointwiseReader - .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(updateId), - internalUpdateFormat = InternalUpdateFormat( - includeTransactions = Some(internalTransactionFormat), - includeReassignments = None, - includeTopologyEvents = None, - ), - ) - .map(_.flatMap(_.update.transaction)) - .map(_.map(tx => GetTransactionResponse(transaction = Some(tx)))) - - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - override def lookupTransactionByOffset( - offset: data.Offset, - internalTransactionFormat: InternalTransactionFormat, - )(implicit loggingContext: LoggingContextWithTrace): Future[Option[GetTransactionResponse]] = - updatePointwiseReader - .lookupUpdateBy( - lookupKey = LookupKey.Offset(offset), - internalUpdateFormat = InternalUpdateFormat( - includeTransactions = Some(internalTransactionFormat), - includeReassignments = None, - includeTopologyEvents = None, - ), - ) - .map(_.flatMap(_.update.transaction)) - .map(_.map(tx => GetTransactionResponse(transaction = Some(tx)))) - override def lookupUpdateBy( lookupKey: LookupKey, internalUpdateFormat: InternalUpdateFormat, @@ -159,60 +107,6 @@ private[dao] final class UpdateReader( internalUpdateFormat = internalUpdateFormat, ) - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - override def lookupTransactionTreeById( - updateId: data.UpdateId, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[GetTransactionTreeResponse]] = - treeTransactionPointwiseReader.lookupTransactionBy( - lookupKey = LookupKey.UpdateId(updateId), - requestingParties = requestingParties, - eventProjectionProperties = eventProjectionProperties, - ) - - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - override def lookupTransactionTreeByOffset( - offset: data.Offset, - requestingParties: Set[Party], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[GetTransactionTreeResponse]] = - treeTransactionPointwiseReader.lookupTransactionBy( - lookupKey = LookupKey.Offset(offset), - requestingParties = requestingParties, - eventProjectionProperties = eventProjectionProperties, - ) - - // TODO(#23504) remove when getTransactionByOffset is removed - @nowarn("cat=deprecation") - override def getTransactionTrees( - startInclusive: Offset, - endInclusive: Offset, - requestingParties: Option[Set[Party]], - eventProjectionProperties: EventProjectionProperties, - )(implicit - loggingContext: LoggingContextWithTrace - ): Source[(Offset, GetUpdateTreesResponse), NotUsed] = { - val futureSource = - getEventSeqIdRange(startInclusive, endInclusive) - .map(queryRange => - treeTransactionsStreamReader.streamTreeTransaction( - queryRange = queryRange, - requestingParties = requestingParties, - eventProjectionProperties = eventProjectionProperties, - ) - ) - Source - .futureSource(futureSource) - .mapMaterializedValue((_: Future[NotUsed]) => NotUsed) - } - override def getActiveContracts( activeAt: Option[Offset], filter: TemplatePartiesFilter, @@ -239,50 +133,46 @@ private[dao] final class UpdateReader( private def getMaxAcsEventSeqId(activeAt: Offset)(implicit loggingContext: LoggingContextWithTrace ): Future[Long] = - dispatcher - .executeSql(dbMetrics.getAcsEventSeqIdRange)(implicit connection => - queryValidRange.withOffsetNotBeforePruning( - offset = activeAt, - errorPruning = pruned => - ACSReader.acsBeforePruningErrorReason( - acsOffset = activeAt, - prunedUpToOffset = pruned, - ), - errorLedgerEnd = ledgerEnd => - ACSReader.acsAfterLedgerEndErrorReason( - acsOffset = activeAt, - ledgerEndOffset = ledgerEnd, - ), - )( - eventStorageBackend.maxEventSequentialId(Some(activeAt))(connection) - ) + queryValidRange.withOffsetNotBeforePruning( + offset = activeAt, + errorPruning = pruned => + ACSReader.acsBeforePruningErrorReason( + acsOffset = activeAt, + prunedUpToOffset = pruned, + ), + errorLedgerEnd = ledgerEnd => + ACSReader.acsAfterLedgerEndErrorReason( + acsOffset = activeAt, + ledgerEndOffset = ledgerEnd, + ), + )( + dispatcher.executeSql(dbMetrics.getAcsEventSeqIdRange)( + eventStorageBackend.maxEventSequentialId(Some(activeAt)) ) + ) private def getEventSeqIdRange( startInclusive: Offset, endInclusive: Offset, )(implicit loggingContext: LoggingContextWithTrace): Future[EventsRange] = - dispatcher - .executeSql(dbMetrics.getEventSeqIdRange)(implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = startInclusive, - maxOffsetInclusive = endInclusive, - errorPruning = (prunedOffset: Offset) => - s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - EventsRange( - startInclusiveOffset = startInclusive, - startInclusiveEventSeqId = - eventStorageBackend.maxEventSequentialId(startInclusive.decrement)(connection), - endInclusiveOffset = endInclusive, - endInclusiveEventSeqId = - eventStorageBackend.maxEventSequentialId(Some(endInclusive))(connection), - ) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = startInclusive, + maxOffsetInclusive = endInclusive, + errorPruning = (prunedOffset: Offset) => + s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + )(dispatcher.executeSql(dbMetrics.getEventSeqIdRange) { connection => + EventsRange( + startInclusiveOffset = startInclusive, + startInclusiveEventSeqId = + eventStorageBackend.maxEventSequentialId(startInclusive.decrement)(connection), + endInclusiveOffset = endInclusive, + endInclusiveEventSeqId = + eventStorageBackend.maxEventSequentialId(Some(endInclusive))(connection), ) + }) } @@ -330,25 +220,29 @@ private[dao] object UpdateReader { .fold(Vector.empty[A])(_ :+ _) .concatSubstreams - def toUnassignedEvent(offset: Long, rawUnassignEvent: RawUnassignEvent): UnassignedEvent = + def toUnassignedEvent( + offset: Long, + rawUnassignEvent: Entry[RawUnassignEventLegacy], + ): UnassignedEvent = UnassignedEvent( offset = offset, - reassignmentId = rawUnassignEvent.reassignmentId, - contractId = rawUnassignEvent.contractId.coid, - templateId = Some(LfEngineToApi.toApiIdentifier(rawUnassignEvent.templateId.toIdentifier)), - packageName = rawUnassignEvent.templateId.pkgName, - source = rawUnassignEvent.sourceSynchronizerId, - target = rawUnassignEvent.targetSynchronizerId, - submitter = rawUnassignEvent.submitter.getOrElse(""), - reassignmentCounter = rawUnassignEvent.reassignmentCounter, + reassignmentId = rawUnassignEvent.event.reassignmentId, + contractId = rawUnassignEvent.event.contractId.coid, + templateId = + Some(LfEngineToApi.toApiIdentifier(rawUnassignEvent.event.templateId.toIdentifier)), + packageName = rawUnassignEvent.event.templateId.pkgName, + source = rawUnassignEvent.event.sourceSynchronizerId, + target = rawUnassignEvent.event.targetSynchronizerId, + submitter = rawUnassignEvent.event.submitter.getOrElse(""), + reassignmentCounter = rawUnassignEvent.event.reassignmentCounter, assignmentExclusivity = - rawUnassignEvent.assignmentExclusivity.map(TimestampConversion.fromLf), - witnessParties = rawUnassignEvent.witnessParties.toSeq, + rawUnassignEvent.event.assignmentExclusivity.map(TimestampConversion.fromLf), + witnessParties = rawUnassignEvent.event.witnessParties.toSeq, nodeId = rawUnassignEvent.nodeId, ) def toApiUnassigned( - rawUnassignEntries: Seq[Entry[RawUnassignEvent]] + rawUnassignEntries: Seq[Entry[RawUnassignEventLegacy]] ): Option[Reassignment] = rawUnassignEntries.headOption map { first => Reassignment( @@ -359,7 +253,7 @@ private[dao] object UpdateReader { events = rawUnassignEntries.map(entry => ReassignmentEvent( ReassignmentEvent.Event.Unassigned( - UpdateReader.toUnassignedEvent(first.offset, entry.event) + UpdateReader.toUnassignedEvent(first.offset, entry) ) ) ), @@ -370,7 +264,7 @@ private[dao] object UpdateReader { } def toAssignedEvent( - rawAssignEvent: RawAssignEvent, + rawAssignEvent: RawAssignEventLegacy, createdEvent: CreatedEvent, ): AssignedEvent = AssignedEvent( @@ -386,24 +280,34 @@ private[dao] object UpdateReader { eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawAssignEntries: Seq[Entry[RawAssignEvent]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace, ec: ExecutionContext): Future[Option[Reassignment]] = MonadUtil - .sequentialTraverse(rawAssignEntries) { rawAssignEntry => + .sequentialTraverse(rawAssignEntries) { case (rawAssignEntry, fatContractO) => lfValueTranslation - .deserializeRaw( - eventProjectionProperties, - rawAssignEntry.event.rawCreatedEvent, + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawAssignEntry.event.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), + offset = rawAssignEntry.offset, + nodeId = rawAssignEntry.nodeId, + representativePackageId = rawAssignEntry.event.rawCreatedEvent.representativePackageId, + witnesses = rawAssignEntry.event.rawCreatedEvent.witnessParties, + acsDelta = rawAssignEntry.event.rawCreatedEvent.flatEventWitnesses.nonEmpty, ) + } .map(createdEvents => - rawAssignEntries.headOption.map(first => + rawAssignEntries.headOption.map { case (first, _) => Reassignment( updateId = first.updateId, commandId = first.commandId.getOrElse(""), workflowId = first.workflowId.getOrElse(""), offset = first.offset, - events = rawAssignEntries.zip(createdEvents).map { case (entry, created) => + events = rawAssignEntries.zip(createdEvents).map { case ((entry, _), created) => ReassignmentEvent( ReassignmentEvent.Event.Assigned( UpdateReader.toAssignedEvent(entry.event, created) @@ -414,94 +318,101 @@ private[dao] object UpdateReader { traceContext = first.traceContext.map(DamlTraceContext.parseFrom), synchronizerId = first.synchronizerId, ) - ) + } ) - def deserializeRawFlatEvent( + def deserializeRawAcsDeltaEvent( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawFlatEntry: Entry[RawFlatEvent] + rawFlatEntryFatContract: (Entry[RawAcsDeltaEventLegacy], Option[FatContract]) )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, - ): Future[Entry[Event]] = rawFlatEntry.event match { - case rawCreated: RawCreatedEvent => - lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawCreated) - .map(createdEvent => rawFlatEntry.copy(event = Event(Event.Event.Created(createdEvent)))) - - case rawArchived: RawArchivedEvent => - Future.successful( - rawFlatEntry.copy( - event = Event( - Event.Event.Archived( - lfValueTranslation.deserializeRaw(eventProjectionProperties, rawArchived) + ): Future[Entry[Event]] = + rawFlatEntryFatContract match { + case (rawFlatEntry, fatContractO) => + rawFlatEntry.event match { + case rawCreated: RawCreatedEventLegacy => + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreated.internalContractId} was not found in the contract store." + ) + ), + offset = rawFlatEntry.offset, + nodeId = rawFlatEntry.nodeId, + representativePackageId = rawCreated.representativePackageId, + witnesses = rawCreated.witnessParties, + acsDelta = rawCreated.flatEventWitnesses.nonEmpty, + ) + .map(createdEvent => rawFlatEntry.withEvent(Event(Event.Event.Created(createdEvent)))) + + case rawArchived: RawArchivedEventLegacy => + Future.successful( + rawFlatEntry.withEvent( + Event( + Event.Event.Archived( + lfValueTranslation.deserializeRawArchived( + eventProjectionProperties, + rawFlatEntry.withEvent(rawArchived), + ) + ) + ) + ) ) - ) - ) - ) - } - - // TODO(#23504) cleanup - @nowarn("cat=deprecation") - def deserializeTreeEvent( - eventProjectionProperties: EventProjectionProperties, - lfValueTranslation: LfValueTranslation, - )( - rawTreeEntry: Entry[RawTreeEvent] - )(implicit - loggingContext: LoggingContextWithTrace, - ec: ExecutionContext, - ): Future[Entry[TreeEvent]] = rawTreeEntry.event match { - case rawCreated: RawCreatedEvent => - lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawCreated) - .map(createdEvent => - rawTreeEntry.copy( - event = TreeEvent(TreeEvent.Kind.Created(createdEvent)) - ) - ) - - case rawExercised: RawExercisedEvent => - lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawExercised) - .map(exercisedEvent => - rawTreeEntry.copy( - event = TreeEvent(TreeEvent.Kind.Exercised(exercisedEvent)) - ) - ) - } + } + } - def deserializeRawTreeEvent( + def deserializeRawLedgerEffectsEvent( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawTreeEntry: Entry[RawTreeEvent] + rawTreeEntryFatContract: (Entry[RawLedgerEffectsEventLegacy], Option[FatContract]) )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, - ): Future[Entry[Event]] = rawTreeEntry.event match { - case rawCreated: RawCreatedEvent => - lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawCreated) - .map(createdEvent => - rawTreeEntry.copy( - event = Event(Event.Event.Created(createdEvent)) - ) - ) - - case rawExercised: RawExercisedEvent => - lfValueTranslation - .deserializeRaw(eventProjectionProperties, rawExercised) - .map(exercisedEvent => - rawTreeEntry.copy( - event = Event(Event.Event.Exercised(exercisedEvent)) - ) - ) - } + ): Future[Entry[Event]] = + rawTreeEntryFatContract match { + case (rawTreeEntry, fatContractO) => + rawTreeEntry.event match { + case rawCreated: RawCreatedEventLegacy => + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreated.internalContractId} was not found in the contract store." + ) + ), + offset = rawTreeEntry.offset, + nodeId = rawTreeEntry.nodeId, + representativePackageId = rawCreated.representativePackageId, + witnesses = rawCreated.witnessParties, + acsDelta = rawCreated.flatEventWitnesses.nonEmpty, + ) + .map(createdEvent => + rawTreeEntry.withEvent( + Event(Event.Event.Created(createdEvent)) + ) + ) - def filterRawEvents[T <: RawEvent](templatePartiesFilter: TemplatePartiesFilter)( + case rawExercised: RawExercisedEventLegacy => + lfValueTranslation + .deserializeRawExercised( + eventProjectionProperties, + rawTreeEntry.withEvent(rawExercised), + ) + .map(exercisedEvent => + rawTreeEntry.copy( + event = Event(Event.Event.Exercised(exercisedEvent)) + ) + ) + } + } + def filterRawEvents[T <: RawEventLegacy](templatePartiesFilter: TemplatePartiesFilter)( rawEvents: Seq[Entry[T]] ): Seq[Entry[T]] = { val templateWildcardPartiesO = templatePartiesFilter.templateWildcardParties diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala index efcfaa820d..96f78f2b1e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala @@ -16,20 +16,24 @@ import com.digitalasset.canton.ledger.api.{TraceIdentifiers, TransactionShape} import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.config.UpdatesStreamsConfig import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, - RawFlatEvent, - RawTreeEvent, + RawAcsDeltaEventLegacy, + RawArchivedEventLegacy, + RawCreatedEventLegacy, + RawExercisedEventLegacy, + RawLedgerEffectsEventLegacy, } import com.digitalasset.canton.platform.store.backend.common.{ - EventIdSource, - EventPayloadSourceForUpdatesAcsDelta, - EventPayloadSourceForUpdatesLedgerEffects, + EventIdSourceLegacy, + EventPayloadSourceForUpdatesAcsDeltaLegacy, + EventPayloadSourceForUpdatesLedgerEffectsLegacy, } -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions import com.digitalasset.canton.platform.store.dao.events.ReassignmentStreamReader.ReassignmentStreamQueryParams import com.digitalasset.canton.platform.store.dao.events.TopologyTransactionsStreamReader.TopologyTransactionsStreamQueryParams @@ -44,6 +48,7 @@ import com.digitalasset.canton.platform.store.utils.{ Telemetry, } import com.digitalasset.canton.platform.{ + FatContract, InternalEventFormat, InternalTransactionFormat, InternalUpdateFormat, @@ -68,6 +73,7 @@ class UpdatesStreamReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, metrics: LedgerApiServerMetrics, tracer: Tracer, topologyTransactionsStreamReader: TopologyTransactionsStreamReader, @@ -81,10 +87,12 @@ class UpdatesStreamReader( private val dbMetrics = metrics.index.db private val orderBySequentialEventIdFlat = - Ordering.by[Entry[RawFlatEvent], Long](_.eventSequentialId) + Ordering.by[(Entry[RawAcsDeltaEventLegacy], Option[FatContract]), Long](_._1.eventSequentialId) private val orderBySequentialEventIdTree = - Ordering.by[Entry[RawTreeEvent], Long](_.eventSequentialId) + Ordering.by[(Entry[RawLedgerEffectsEventLegacy], Option[FatContract]), Long]( + _._1.eventSequentialId + ) private val paginatingAsyncStream = new PaginatingAsyncStream(loggerFactory) @@ -289,7 +297,7 @@ class UpdatesStreamReader( def fetchIdsSorted( txDecomposedFilters: Vector[DecomposedFilter], - target: EventIdSource, + target: EventIdSourceLegacy, maxParallelIdQueriesLimiter: QueueBasedConcurrencyLimiter, maxOutputBatchCount: Int, metric: DatabaseMetrics, @@ -308,7 +316,7 @@ class UpdatesStreamReader( val idsCreate = fetchIdsSorted( txDecomposedFilters = txDecomposedFilters, - target = EventIdSource.CreateStakeholder, + target = EventIdSourceLegacy.CreateStakeholder, maxParallelIdQueriesLimiter = createEventIdQueriesLimiter, maxOutputBatchCount = maxParallelPayloadCreateQueries + 1, metric = dbMetrics.updatesAcsDeltaStream.fetchEventCreateIdsStakeholder, @@ -316,25 +324,36 @@ class UpdatesStreamReader( val idsConsuming = fetchIdsSorted( txDecomposedFilters = txDecomposedFilters, - target = EventIdSource.ConsumingStakeholder, + target = EventIdSourceLegacy.ConsumingStakeholder, maxParallelIdQueriesLimiter = consumingEventIdQueriesLimiter, maxOutputBatchCount = maxParallelPayloadConsumingQueries + 1, metric = dbMetrics.updatesAcsDeltaStream.fetchEventConsumingIdsStakeholder, ) + + def getInternalContractIdFromCreated(event: RawAcsDeltaEventLegacy): Long = event match { + case created: RawCreatedEventLegacy => created.internalContractId + case _: RawArchivedEventLegacy => + throw new IllegalStateException( + s"archived event should not be used to lookup a contract" + ) + } + val payloadsCreate = fetchPayloads( queryRange = queryRange, ids = idsCreate, fetchEvents = (ids, connection) => - eventStorageBackend.fetchEventPayloadsAcsDelta(target = - EventPayloadSourceForUpdatesAcsDelta.Create + eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy(target = + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create )( eventSequentialIds = Ids(ids), requestingParties = txFilteringConstraints.allFilterParties, )(connection), maxParallelPayloadQueries = maxParallelPayloadCreateQueries, - dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventCreatePayloads, + dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventCreatePayloadsLegacy, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = Some(getInternalContractIdFromCreated), ) val payloadsConsuming = fetchPayloads( @@ -342,18 +361,22 @@ class UpdatesStreamReader( ids = idsConsuming, fetchEvents = (ids, connection) => eventStorageBackend - .fetchEventPayloadsAcsDelta(target = EventPayloadSourceForUpdatesAcsDelta.Consuming)( + .fetchEventPayloadsAcsDeltaLegacy(target = + EventPayloadSourceForUpdatesAcsDeltaLegacy.Consuming + )( eventSequentialIds = Ids(ids), requestingParties = txFilteringConstraints.allFilterParties, )(connection), maxParallelPayloadQueries = maxParallelPayloadConsumingQueries, - dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventConsumingPayloads, + dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventConsumingPayloadsLegacy, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val allSortedPayloads = payloadsConsuming.mergeSorted(payloadsCreate)(orderBySequentialEventIdFlat) UpdateReader - .groupContiguous(allSortedPayloads)(by = _.updateId) + .groupContiguous(allSortedPayloads)(by = _._1.updateId) .mapAsync(transactionsProcessingParallelism)(rawEvents => deserializationQueriesLimiter.execute( deserializeLfValues(rawEvents, internalEventFormat.eventProjectionProperties) @@ -392,18 +415,18 @@ class UpdatesStreamReader( queryRange = queryRange, filter = filter, idPageSizing = idPageSizing, - target = EventIdSource.CreateStakeholder, + target = EventIdSourceLegacy.CreateStakeholder, maxParallelIdQueriesLimiter = createEventIdQueriesLimiter, - metric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsStakeholder, + metric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsStakeholderLegacy, ) ) ++ txDecomposedFilters.map(filter => fetchIds( queryRange = queryRange, filter = filter, idPageSizing = idPageSizing, - target = EventIdSource.CreateNonStakeholder, + target = EventIdSourceLegacy.CreateNonStakeholder, maxParallelIdQueriesLimiter = createEventIdQueriesLimiter, - metric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsNonStakeholder, + metric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreateIdsNonStakeholderLegacy, ) )).pipe( mergeSortAndBatch( @@ -416,19 +439,19 @@ class UpdatesStreamReader( fetchIds( queryRange = queryRange, filter = filter, - target = EventIdSource.ConsumingStakeholder, + target = EventIdSourceLegacy.ConsumingStakeholder, idPageSizing = idPageSizing, maxParallelIdQueriesLimiter = consumingEventIdQueriesLimiter, - metric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsStakeholder, + metric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsStakeholderLegacy, ) ) ++ txDecomposedFilters.map(filter => fetchIds( queryRange = queryRange, filter = filter, - target = EventIdSource.ConsumingNonStakeholder, + target = EventIdSourceLegacy.ConsumingNonStakeholder, idPageSizing = idPageSizing, maxParallelIdQueriesLimiter = consumingEventIdQueriesLimiter, - metric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsNonStakeholder, + metric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingIdsNonStakeholderLegacy, ) )).pipe( mergeSortAndBatch( @@ -441,10 +464,10 @@ class UpdatesStreamReader( fetchIds( queryRange = queryRange, filter = filter, - target = EventIdSource.NonConsumingInformee, + target = EventIdSourceLegacy.NonConsumingInformee, idPageSizing = idPageSizing, maxParallelIdQueriesLimiter = nonConsumingEventIdQueriesLimiter, - metric = dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingIds, + metric = dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingIdsLegacy, ) ) .pipe( @@ -455,47 +478,62 @@ class UpdatesStreamReader( ) def fetchEventPayloadsLedgerEffects( - target: EventPayloadSourceForUpdatesLedgerEffects - )(ids: Iterable[Long], connection: Connection): Vector[Entry[RawTreeEvent]] = - eventStorageBackend.fetchEventPayloadsLedgerEffects( + target: EventPayloadSourceForUpdatesLedgerEffectsLegacy + )(ids: Iterable[Long], connection: Connection): Vector[Entry[RawLedgerEffectsEventLegacy]] = + eventStorageBackend.fetchEventPayloadsLedgerEffectsLegacy( target = target )( eventSequentialIds = Ids(ids), requestingParties = txFilteringConstraints.allFilterParties, )(connection) + def getInternalContractIdFromCreated(event: RawLedgerEffectsEventLegacy): Long = event match { + case created: RawCreatedEventLegacy => created.internalContractId + case _: RawExercisedEventLegacy => + throw new IllegalStateException( + s"exercised event should not be used to lookup a contract" + ) + } + val payloadsCreate = fetchPayloads( queryRange = queryRange, ids = idsCreate, fetchEvents = - fetchEventPayloadsLedgerEffects(EventPayloadSourceForUpdatesLedgerEffects.Create), + fetchEventPayloadsLedgerEffects(EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create), maxParallelPayloadQueries = maxParallelPayloadCreateQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreatePayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = Some(getInternalContractIdFromCreated), ) val payloadsConsuming = fetchPayloads( queryRange = queryRange, ids = idsConsuming, fetchEvents = - fetchEventPayloadsLedgerEffects(EventPayloadSourceForUpdatesLedgerEffects.Consuming), + fetchEventPayloadsLedgerEffects(EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming), maxParallelPayloadQueries = maxParallelPayloadConsumingQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingPayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val payloadsNonConsuming = fetchPayloads( queryRange = queryRange, ids = idsNonConsuming, - fetchEvents = - fetchEventPayloadsLedgerEffects(EventPayloadSourceForUpdatesLedgerEffects.NonConsuming), + fetchEvents = fetchEventPayloadsLedgerEffects( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.NonConsuming + ), maxParallelPayloadQueries = maxParallelPayloadNonConsumingQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingPayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val allSortedPayloads = payloadsConsuming .mergeSorted(payloadsCreate)(orderBySequentialEventIdTree) .mergeSorted(payloadsNonConsuming)(orderBySequentialEventIdTree) UpdateReader - .groupContiguous(allSortedPayloads)(by = _.updateId) + .groupContiguous(allSortedPayloads)(by = _._1.updateId) .mapAsync(transactionsProcessingParallelism)(rawEvents => deserializationQueriesLimiter.execute( deserializeLfValuesTree(rawEvents, internalEventFormat.eventProjectionProperties) @@ -514,35 +552,33 @@ class UpdatesStreamReader( private def fetchIds( queryRange: EventsRange, filter: DecomposedFilter, - target: EventIdSource, + target: EventIdSourceLegacy, idPageSizing: IdPageSizing, maxParallelIdQueriesLimiter: QueueBasedConcurrencyLimiter, metric: DatabaseMetrics, )(implicit loggingContext: LoggingContextWithTrace ): Source[Long, NotUsed] = - paginatingAsyncStream.streamIdsFromSeekPagination( + paginatingAsyncStream.streamIdsFromSeekPaginationWithoutIdFilter( + idStreamName = s"Update IDs for $target $filter", idPageSizing = idPageSizing, idPageBufferSize = maxPagesPerIdPagesBuffer, initialFromIdExclusive = queryRange.startInclusiveEventSeqId, + initialEndInclusive = queryRange.endInclusiveEventSeqId, )( - fetchPage = (state: IdPaginationState) => { + eventStorageBackend.updateStreamingQueries.fetchEventIdsLegacy( + target = target + )( + stakeholderO = filter.party, + templateIdO = filter.templateId, + ) + )( + executeIdQuery = f => maxParallelIdQueriesLimiter.execute { globalIdQueriesLimiter.execute { - dbDispatcher.executeSql(metric) { connection => - eventStorageBackend.updateStreamingQueries.fetchEventIds( - target = target - )( - stakeholderO = filter.party, - templateIdO = filter.templateId, - startExclusive = state.fromIdExclusive, - endInclusive = queryRange.endInclusiveEventSeqId, - limit = state.pageSize, - )(connection) - } + dbDispatcher.executeSql(metric)(f) } } - } ) private def mergeSortAndBatch( @@ -563,28 +599,49 @@ class UpdatesStreamReader( maxParallelPayloadQueries: Int, dbMetric: DatabaseMetrics, payloadQueriesLimiter: ConcurrencyLimiter, + contractStore: ContractStore, + getInternalContractIdO: Option[T => Long], )(implicit loggingContext: LoggingContextWithTrace - ): Source[Entry[T], NotUsed] = { + ): Source[(Entry[T], Option[FatContract]), NotUsed] = { // Pekko requires for this buffer's size to be a power of two. val inputBufferSize = Utils.largestSmallerOrEqualPowerOfTwo(maxParallelPayloadQueries) - ids.async + ids .addAttributes(Attributes.inputBuffer(initial = inputBufferSize, max = inputBufferSize)) .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - fetchEvents(ids, connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher + .executeSql(dbMetric) { connection => + fetchEvents(ids, connection) + } + .flatMap(events => + getInternalContractIdO match { + case Some(getInternalContractId) => + val internalContractIds = + events.map(entry => getInternalContractId(entry.event)) + for { + contractsM <- contractStore + .lookupBatchedNonCached(internalContractIds) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield events.map { entry => + entry -> contractsM + .get(getInternalContractId(entry.event)) + .map(_.inst) + } + case None => + Future.successful(events.map(_ -> None)) + } + ) } } } @@ -593,7 +650,7 @@ class UpdatesStreamReader( } private def deserializeLfValuesTree( - rawEvents: Vector[Entry[RawTreeEvent]], + rawEvents: Vector[(Entry[RawLedgerEffectsEventLegacy], Option[FatContract])], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[Seq[Entry[Event]]] = Timed.future( @@ -601,14 +658,15 @@ class UpdatesStreamReader( implicit val executionContext: ExecutionContext = directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name MonadUtil.sequentialTraverse(rawEvents)( - UpdateReader.deserializeRawTreeEvent(eventProjectionProperties, lfValueTranslation) + UpdateReader + .deserializeRawLedgerEffectsEvent(eventProjectionProperties, lfValueTranslation) ) }, timer = dbMetrics.updatesLedgerEffectsStream.translationTimer, ) private def deserializeLfValues( - rawEvents: Vector[Entry[RawFlatEvent]], + rawEvents: Vector[(Entry[RawAcsDeltaEventLegacy], Option[FatContract])], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[Seq[Entry[Event]]] = Timed.future( @@ -616,7 +674,7 @@ class UpdatesStreamReader( implicit val executionContext: ExecutionContext = directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name MonadUtil.sequentialTraverse(rawEvents)( - UpdateReader.deserializeRawFlatEvent(eventProjectionProperties, lfValueTranslation) + UpdateReader.deserializeRawAcsDeltaEvent(eventProjectionProperties, lfValueTranslation) ) }, timer = dbMetrics.updatesAcsDeltaStream.translationTimer, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/LedgerDaoContractsReader.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/LedgerDaoContractsReader.scala index 66623bf227..08e31bed2b 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/LedgerDaoContractsReader.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/LedgerDaoContractsReader.scala @@ -3,11 +3,9 @@ package com.digitalasset.canton.platform.store.interfaces -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.ExistingContractStatus import com.digitalasset.canton.logging.LoggingContextWithTrace -import com.digitalasset.canton.platform.Party import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.* -import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.daml.lf.transaction.GlobalKey import com.google.common.annotations.VisibleForTesting @@ -22,14 +20,15 @@ private[platform] trait LedgerDaoContractsReader { * * @param contractId * the contract id to query - * @param notEarlierThanOffset + * @param notEarlierThanEventSeqId * the offset threshold to resolve the contract state (state can be newer, but not older) * @return - * the optional [[ContractState]] + * the optional boolean flag indicating whether the contract is active (true) or archived + * (false). None if the contract is not found. */ - def lookupContractState(contractId: ContractId, notEarlierThanOffset: Offset)(implicit + def lookupContractState(contractId: ContractId, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace - ): Future[Option[ContractState]] + ): Future[Option[ExistingContractStatus]] /** Looks up the state of a contract key * @@ -38,12 +37,12 @@ private[platform] trait LedgerDaoContractsReader { * * @param key * the contract key to query - * @param notEarlierThanOffset + * @param notEarlierThanEventSeqId * the offset threshold to resolve the key state (state can be newer, but not older) * @return * the [[KeyState]] */ - def lookupKeyState(key: GlobalKey, notEarlierThanOffset: Offset)(implicit + def lookupKeyState(key: GlobalKey, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[KeyState] @@ -52,7 +51,7 @@ private[platform] trait LedgerDaoContractsReader { * Used to unit test the SQL queries for key lookups. Does not use batching. */ @VisibleForTesting - def lookupKeyStatesFromDb(keys: Seq[GlobalKey], notEarlierThanOffset: Offset)(implicit + def lookupKeyStatesFromDb(keys: Seq[GlobalKey], notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[Map[GlobalKey, KeyState]] @@ -61,22 +60,10 @@ private[platform] trait LedgerDaoContractsReader { object LedgerDaoContractsReader { import com.digitalasset.daml.lf.value.Value as lfval private type ContractId = lfval.ContractId - private type Contract = LfFatContractInst - - sealed trait ContractState extends Product with Serializable { - def stakeholders: Set[Party] - } - - // Note that for TransactionVersion <= V15 maintainers may not be populated even where globalKey is - final case class ActiveContract(contract: Contract) extends ContractState { - override def stakeholders: Set[Party] = contract.stakeholders - } - - final case class ArchivedContract(stakeholders: Set[Party]) extends ContractState sealed trait KeyState extends Product with Serializable - final case class KeyAssigned(contractId: ContractId, stakeholders: Set[Party]) extends KeyState + final case class KeyAssigned(contractId: ContractId) extends KeyState final case object KeyUnassigned extends KeyState } diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala index 60a51d46ae..719b7a4d7f 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala @@ -33,7 +33,7 @@ object TransactionLogUpdate { /** Complete view of a ledger transaction. * * @param updateId - * The transaction it. + * The transaction id. * @param workflowId * The workflow id. * @param effectiveAt @@ -124,6 +124,7 @@ object TransactionLogUpdate { contractId: ContractId, ledgerEffectiveTime: Timestamp, templateId: Identifier, + representativePackageId: Ref.PackageId, packageName: PackageName, packageVersion: Option[Ref.PackageVersion], commandId: String, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterning.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterning.scala index f4917b2903..1c524a2d7e 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterning.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterning.scala @@ -5,7 +5,14 @@ package com.digitalasset.canton.platform.store.interning import com.digitalasset.canton.platform.Party import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, PackageId} +import com.digitalasset.daml.lf.data.Ref.{ + ChoiceName, + Identifier, + NameTypeConRef, + PackageId, + ParticipantId, + UserId, +} /** The facade for all supported string-interning domains * @@ -18,6 +25,10 @@ trait StringInterning { def packageId: StringInterningDomain[PackageId] def party: StringInterningDomain[Party] def synchronizerId: StringInterningDomain[SynchronizerId] + def userId: StringInterningDomain[UserId] + def participantId: StringInterningDomain[ParticipantId] + def choiceName: StringInterningDomain[ChoiceName] + def interfaceId: StringInterningDomain[Identifier] } /** Composes a StringInterningAccessor for the domain-string type and an unsafe diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterningView.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterningView.scala index ea673d8660..ebad74ddef 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterningView.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterningView.scala @@ -7,15 +7,26 @@ import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.platform.Party import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, PackageId} +import com.digitalasset.daml.lf.data.Ref.{ + ChoiceName, + Identifier, + NameTypeConRef, + PackageId, + ParticipantId, + UserId, +} import scala.concurrent.{Future, blocking} class DomainStringIterators( val parties: Iterator[String], val templateIds: Iterator[String], - val synchronizerIds: Iterator[String], + val synchronizerIds: Iterator[SynchronizerId], val packageIds: Iterator[String], + val userIds: Iterator[String], + val participantIds: Iterator[String], + val choiceNames: Iterator[String], + val interfaceIds: Iterator[String], ) trait InternizingStringInterningView { @@ -98,6 +109,10 @@ class StringInterningView(override protected val loggerFactory: NamedLoggerFacto private val PartyPrefix = "p|" private val SynchronizerIdPrefix = "d|" private val PackageIdPrefix = "i|" + private val UserIdPrefix = "u|" + private val ParticipantIdPrefix = "n|" + private val ChoicePrefix = "c|" + private val InterfacePrefix = "f|" override val templateId: StringInterningDomain[NameTypeConRef] = StringInterningDomain.prefixing( @@ -131,13 +146,51 @@ class StringInterningView(override protected val loggerFactory: NamedLoggerFacto from = identity, ) + override val userId: StringInterningDomain[UserId] = + StringInterningDomain.prefixing( + prefix = UserIdPrefix, + prefixedAccessor = rawAccessor, + to = UserId.assertFromString, + from = identity, + ) + + override def participantId: StringInterningDomain[ParticipantId] = + StringInterningDomain.prefixing( + prefix = ParticipantIdPrefix, + prefixedAccessor = rawAccessor, + to = ParticipantId.assertFromString, + from = identity, + ) + + override val choiceName: StringInterningDomain[ChoiceName] = + StringInterningDomain.prefixing( + prefix = ChoicePrefix, + prefixedAccessor = rawAccessor, + to = ChoiceName.assertFromString, + from = identity, + ) + + override val interfaceId: StringInterningDomain[Identifier] = + StringInterningDomain.prefixing( + prefix = InterfacePrefix, + prefixedAccessor = rawAccessor, + to = Identifier.assertFromString, + from = _.toString, + ) + override def internize(domainStringIterators: DomainStringIterators): Iterable[(Int, String)] = blocking(synchronized { val allPrefixedStrings = domainStringIterators.parties.map(PartyPrefix + _) ++ domainStringIterators.templateIds.map(TemplatePrefix + _) ++ - domainStringIterators.synchronizerIds.map(SynchronizerIdPrefix + _) ++ - domainStringIterators.packageIds.map(PackageIdPrefix + _) + domainStringIterators.synchronizerIds + .map(_.toProtoPrimitive) + .map(SynchronizerIdPrefix + _) ++ + domainStringIterators.packageIds.map(PackageIdPrefix + _) ++ + domainStringIterators.userIds.map(UserIdPrefix + _) ++ + domainStringIterators.participantIds.map(ParticipantIdPrefix + _) ++ + domainStringIterators.choiceNames.map(ChoicePrefix + _) ++ + domainStringIterators.interfaceIds.map(InterfacePrefix + _) val newEntries = RawStringInterning.newEntries( strings = allPrefixedStrings, diff --git a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/EventOps.scala b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/EventOps.scala index fbe667958c..6601bcb468 100644 --- a/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/EventOps.scala +++ b/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/EventOps.scala @@ -3,17 +3,10 @@ package com.digitalasset.canton.platform.store.utils +import com.daml.ledger.api.v2.event.Event import com.daml.ledger.api.v2.event.Event.Event.{Archived, Created, Empty, Exercised} -import com.daml.ledger.api.v2.event.{CreatedEvent, Event, ExercisedEvent} -import com.daml.ledger.api.v2.transaction.TreeEvent -import com.daml.ledger.api.v2.transaction.TreeEvent.Kind.{ - Created as TreeCreated, - Exercised as TreeExercised, -} import com.daml.ledger.api.v2.value.Identifier -import scala.annotation.nowarn - object EventOps { implicit class EventOps(val event: Event) extends AnyVal { @@ -87,28 +80,4 @@ object EventOps { } } - - implicit final class TreeEventKindOps(val kind: TreeEvent.Kind) extends AnyVal { - def fold[T](exercise: ExercisedEvent => T, create: CreatedEvent => T): T = - kind match { - case TreeExercised(value) => exercise(value) - case TreeCreated(value) => create(value) - case tk => throw new IllegalArgumentException(s"Unknown TreeEvent type: $tk") - } - } - - // TODO(#23504) remove when TreeEvent is removed - @nowarn("cat=deprecation") - implicit final class TreeEventOps(val event: TreeEvent) extends AnyVal { - def nodeId: Int = event.kind.fold(_.nodeId, _.nodeId) - def witnessParties: Seq[String] = event.kind.fold(_.witnessParties, _.witnessParties) - def modifyWitnessParties(f: Seq[String] => Seq[String]): TreeEvent = - event.kind.fold( - exercise => - TreeEvent(TreeExercised(exercise.copy(witnessParties = f(exercise.witnessParties)))), - create => TreeEvent(TreeCreated(create.copy(witnessParties = f(create.witnessParties)))), - ) - def templateId: Option[Identifier] = event.kind.fold(_.templateId, _.templateId) - } - } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/auth/AuthorizerSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/auth/AuthorizerSpec.scala index 7c0afed7df..a1bccb60f0 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/auth/AuthorizerSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/auth/AuthorizerSpec.scala @@ -10,6 +10,7 @@ import com.digitalasset.daml.lf.data.Ref import io.grpc.{Status, StatusRuntimeException} import org.mockito.MockitoSugar import org.scalatest.Assertion +import org.scalatest.Assertions.succeed import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers import scalapb.lenses.Lens @@ -349,96 +350,390 @@ class AuthorizerSpec ), ).foreach(generateAuthorizationTest) - behavior of s"$className.authorize for RequiredClaim.MatchUserId" + val userId1 = "userId1" + val userId2 = "userId2" - it should "authorize for resolvedFromUser and user ID matching user ID" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( + def matchUserIdTestDef( + claims: Seq[Claim], + userId: Option[String], + req: String, + expectedResult: ExpectedResult, + descSuffix: String, + resolvedFromUser: Boolean = true, + ): TestDefinition = + TestDefinition( + RequiredClaim.MatchUserIdForUserManagement(simpleLens), ClaimSet.Claims.Empty.copy( - claims = Nil, - userId = Some(dummyRequest.toString), - resolvedFromUser = true, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - }.map(_ shouldBe expectedSuccessfulResponse) - } + claims = claims, + userId = userId, + resolvedFromUser = resolvedFromUser, + ), + expectedResult, + req = req, + descSuffix = descSuffix, + resultAssert = _ shouldBe userId1, + ) - it should "return permission denied for resolvedFromUser and user ID not matching user ID" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( - ClaimSet.Claims.Empty.copy( - claims = Nil, - userId = Some("x"), - resolvedFromUser = true, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - } - .transform( - assertExpectedFailure(Status.PERMISSION_DENIED.getCode) - ) - } + behavior of s"$className.authorize for RequiredClaim.MatchUserIdForUserManagement without Admin rights" - it should "return permission denied for not resolvedFromUser" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( - ClaimSet.Claims.Empty.copy( - claims = Nil, - resolvedFromUser = false, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - } - .transform( - assertExpectedFailure(Status.PERMISSION_DENIED.getCode) - ) - } + List( + matchUserIdTestDef( + Nil, + Some(userId1), + userId1, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + matchUserIdTestDef(Nil, Some(userId1), "", ExpectedSuccess, "when missing request user ID"), + matchUserIdTestDef( + Nil, + Some(userId2), + userId1, + expectedPermissionDenied, + "when authorized and request user IDs don't match", + ), + matchUserIdTestDef(Nil, None, userId1, expectedInternal, "when undefined authorized user ID"), + ).foreach(generateAuthorizationTest) - it should "return permission denied for resolvedFromUser and not defined user ID" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( - ClaimSet.Claims.Empty.copy( - claims = Nil, - resolvedFromUser = false, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - } - .transform( - assertExpectedFailure(Status.PERMISSION_DENIED.getCode) - ) - } + behavior of s"$className.authorize for RequiredClaim.MatchUserIdForUserManagement with Admin rights" + List( + matchUserIdTestDef( + Seq(ClaimAdmin), + Some(userId1), + userId1, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when missing request user ID", + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + Some(userId2), + userId1, + ExpectedSuccess, + "when authorized and request user IDs don't match", + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + None, + userId1, + expectedInternal, + "when undefined authorized user ID", + ), + ).foreach(generateAuthorizationTest) - it should "authorize for resolvedFromUser and user ID not matching user ID if Admin rights available" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( - ClaimSet.Claims.Empty.copy( - claims = Seq(ClaimAdmin), - userId = Some("x"), - resolvedFromUser = true, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - }.map(_ shouldBe expectedSuccessfulResponse) - } + behavior of s"$className.authorize for RequiredClaim.MatchUserIdForUserManagement with IDP Admin rights" + List( + matchUserIdTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + userId1, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + matchUserIdTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when missing request user ID", + ), + matchUserIdTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId2), + userId1, + ExpectedSuccess, + "when authorized and request user IDs don't match", + ), + matchUserIdTestDef( + Seq(ClaimIdentityProviderAdmin), + None, + userId1, + expectedInternal, + "when undefined authorized user ID", + ), + ).foreach(generateAuthorizationTest) - it should "authorize for resolvedFromUser and user ID not matching user ID if IDP Admin rights available" in { - val userL = Lens[Long, String](_.toString)((_, s) => s.toLong) - contextWithClaims( + behavior of s"$className.authorize for RequiredClaim.MatchUserIdForUserManagement not resolvedFromUser" + + List( + matchUserIdTestDef( + Nil, + Some(userId1), + userId1, + expectedPermissionDenied, + "when authorized and request user IDs match", + resolvedFromUser = false, + ), + matchUserIdTestDef( + Nil, + None, + userId1, + expectedPermissionDenied, + "when undefined authenticated user ID", + resolvedFromUser = false, + ), + matchUserIdTestDef( + Nil, + Some(userId2), + userId1, + expectedPermissionDenied, + "when authorized and request user IDs don't match", + resolvedFromUser = false, + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + Some(userId1), + userId1, + ExpectedSuccess, + "when authorized and request user IDs match", + resolvedFromUser = false, + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + None, + userId1, + ExpectedSuccess, + "when undefined authenticated user ID", + resolvedFromUser = false, + ), + matchUserIdTestDef( + Seq(ClaimAdmin), + Some(userId2), + userId1, + ExpectedSuccess, + "when authorized and request user IDs don't match", + resolvedFromUser = false, + ), + ).foreach(generateAuthorizationTest) + + def anyAdminTestDef( + claims: Seq[Claim], + userId: Option[String], + req: String, + expectedResult: ExpectedResult, + descSuffix: String, + resolvedFromUser: Boolean = true, + ): TestDefinition = + TestDefinition( + RequiredClaim.AdminOrIdpAdminOrSelfAdmin(simpleLens), ClaimSet.Claims.Empty.copy( - claims = Seq(ClaimIdentityProviderAdmin), - userId = Some("x"), - resolvedFromUser = true, - ) - ) { - authorizer().rpc(dummyReqRes)(RequiredClaim.MatchUserIdForUserManagement(userL))(dummyRequest) - }.map(_ shouldBe expectedSuccessfulResponse) - } + claims = claims, + userId = userId, + resolvedFromUser = resolvedFromUser, + ), + expectedResult, + req = req, + descSuffix = descSuffix, + ) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin with Admin claims" + List( + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId2), + userId2, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId1), + userId2, + ExpectedSuccess, + "when authorized and request user IDs don't match", + ), + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when request user ID missing", + ), + anyAdminTestDef( + Seq(ClaimAdmin), + None, + userId2, + ExpectedSuccess, + "when authorized user ID missing", + ), + ).foreach(generateAuthorizationTest) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin with Idp Admin claims" + List( + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId2), + userId2, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + userId2, + ExpectedSuccess, + "when authorized and request user IDs don't match", + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when request user ID missing", + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + None, + userId2, + ExpectedSuccess, + "when authorized user ID missing", + ), + ) + .foreach(generateAuthorizationTest) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin without Admin claims" + List( + anyAdminTestDef( + Nil, + Some(userId2), + userId2, + ExpectedSuccess, + "when authorized and request user IDs match", + ), + anyAdminTestDef( + Nil, + Some(userId1), + userId2, + expectedPermissionDenied, + "when authorized and request user IDs don't match", + ), + anyAdminTestDef( + Nil, + Some(userId1), + "", + expectedPermissionDenied, + "when request user ID missing", + ), + anyAdminTestDef(Nil, None, userId2, expectedInternal, "when authorized user ID missing"), + ).foreach(generateAuthorizationTest) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin with Admin claims when not resolvedFromUser" + List( + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId2), + userId2, + ExpectedSuccess, + "when authorized and request user IDs match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId1), + userId2, + ExpectedSuccess, + "when authorized and request user IDs don't match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when request user ID missing", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimAdmin), + None, + userId2, + ExpectedSuccess, + "when authorized user ID missing", + resolvedFromUser = false, + ), + ).foreach(generateAuthorizationTest) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin with Idp Admin claims when not resolvedFromUser" + List( + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId2), + userId2, + ExpectedSuccess, + "when authorized and request user IDs match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + userId2, + ExpectedSuccess, + "when authorized and request user IDs don't match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + Some(userId1), + "", + ExpectedSuccess, + "when request user ID missing", + resolvedFromUser = false, + ), + anyAdminTestDef( + Seq(ClaimIdentityProviderAdmin), + None, + userId2, + ExpectedSuccess, + "when authorized user ID missing", + resolvedFromUser = false, + ), + ).foreach(generateAuthorizationTest) + + behavior of s"$className.authorize for AdminOrIdpAdminOrSelfAdmin without Admin claims when not resolvedFromUser" + List( + anyAdminTestDef( + Nil, + Some(userId2), + userId2, + expectedPermissionDenied, + "when authorized and request user IDs match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Nil, + Some(userId1), + userId2, + expectedPermissionDenied, + "when authorized and request user IDs don't match", + resolvedFromUser = false, + ), + anyAdminTestDef( + Nil, + Some(userId1), + "", + expectedPermissionDenied, + "when request user ID missing", + resolvedFromUser = false, + ), + anyAdminTestDef( + Nil, + None, + userId2, + expectedPermissionDenied, + "when authorized user ID missing", + resolvedFromUser = false, + ), + ).foreach(generateAuthorizationTest) behavior of s"$className.authorize for RequiredClaim.MatchUserId" - it should "authorize for user ID matching user ID" in { + it should "authorize for authenticated user ID matching request user ID" in { val userIdL = Lens[Long, String](_.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -450,7 +745,7 @@ class AuthorizerSpec }.map(_ shouldBe expectedSuccessfulResponse) } - it should "authorize for no user ID specified but available in the claims" in { + it should "authorize for authenticated user ID when request user ID is missing" in { val userIdL = Lens[Long, String](l => if (l == 0) "" else l.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -462,7 +757,7 @@ class AuthorizerSpec }.map(_ shouldBe expectedSuccessfulResponse) } - it should "return permission denied for user ID not matching user ID" in { + it should "return permission denied for authenticated user ID not matching request user ID" in { val userIdL = Lens[Long, String](_.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -477,7 +772,7 @@ class AuthorizerSpec ) } - it should "return permission denied for user ID not matching user ID if skipUserIdValidationForAnyPartyReaders" in { + it should "return permission denied for authenticated user ID not matching request user ID if skipUserIdValidationForAnyPartyReaders" in { val userIdL = Lens[Long, String](_.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -497,7 +792,7 @@ class AuthorizerSpec ) } - it should "authorize for user ID not matching user ID for AnyPartyReaders-s if skipUserIdValidationForAnyPartyReaders" in { + it should "authorize for authenticated user ID not matching request user ID for AnyPartyReaders-s if skipUserIdValidationForAnyPartyReaders" in { val userIdL = Lens[Long, String](_.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -514,7 +809,7 @@ class AuthorizerSpec }.map(_ shouldBe expectedSuccessfulResponse) } - it should "return invalid argument for no user ID in claims, and none provided in the request" in { + it should "return invalid argument for no authenticated user ID, and no request user ID" in { val userIdL = Lens[Long, String](l => if (l == 0) "" else l.toString)((_, s) => s.toLong) contextWithClaims( ClaimSet.Claims.Empty.copy( @@ -624,14 +919,14 @@ class AuthorizerSpec val prettyClaims = s"Claims(${td.suppliedClaim.claims.map(_.toString).mkString(",")})" val testDescription = td.expectedResult match { case ExpectedSuccess => - s"authorize for $prettyClaims" + s"authorize for $prettyClaims ${td.descSuffix}" case ExpectedFailure(code) => - s"return $code for $prettyClaims" + s"return $code for $prettyClaims ${td.descSuffix}" } it should testDescription in { contextWithClaims(td.suppliedClaim) { - authorizer().rpc(dummyReqRes)(td.requiredClaim)(dummyRequest) - }.transform { + authorizer().rpc(simpleReqRes)(td.requiredClaim)(td.req) + }.map(td.resultAssert).transform { case Success(_) => td.expectedResult match { case ExpectedSuccess => @@ -659,10 +954,18 @@ object AuthorizerSpec { final case class ExpectedFailure(code: Status.Code) extends ExpectedResult val expectedPermissionDenied: ExpectedFailure = ExpectedFailure(Status.PERMISSION_DENIED.getCode) + val expectedInternal: ExpectedFailure = ExpectedFailure(Status.INTERNAL.getCode) + + val simpleRequest: String = "simpleRequest" + val simpleReqRes: String => Future[String] = s => Future.successful(s) + val simpleLens: Lens[String, String] = Lens[String, String](s => s)((_, s) => s) final case class TestDefinition( - requiredClaim: RequiredClaim[Long], + requiredClaim: RequiredClaim[String], suppliedClaim: ClaimSet.Claims, expectedResult: ExpectedResult, + req: String = simpleRequest, + descSuffix: String = "", + resultAssert: String => Assertion = _ => succeed, ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/TraceIdentifiersTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/TraceIdentifiersTest.scala index 405cd88edb..60217eb056 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/TraceIdentifiersTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/TraceIdentifiersTest.scala @@ -3,15 +3,11 @@ package com.digitalasset.canton.ledger.api -import com.daml.ledger.api.v2.transaction.{Transaction, TransactionTree} +import com.daml.ledger.api.v2.transaction.Transaction import com.daml.tracing.SpanAttribute import org.scalatest.matchers.should.Matchers.* import org.scalatest.wordspec.AnyWordSpec -import scala.annotation.nowarn - -// TODO(#23504) remove tests for TransactionTrees when TransactionTree is removed -@nowarn("cat=deprecation") class TraceIdentifiersTest extends AnyWordSpec { val expected = Map( (SpanAttribute.TransactionId, "transaction-id"), @@ -46,28 +42,4 @@ class TraceIdentifiersTest extends AnyWordSpec { } } - "extract identifiers from TransactionTree" should { - "set non-empty values" in { - val observed = TraceIdentifiers.fromTransactionTree( - TransactionTree( - "transaction-id", - "command-id", - "workflow-id", - None, - 12345678L, - Map(), - "", - None, - None, - ) - ) - observed shouldEqual expected - } - - "not set empty values" in { - val observed = - TraceIdentifiers.fromTransaction(Transaction.defaultInstance) - observed shouldBe empty - } - } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaimsSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaimsSpec.scala index 27bb212460..1127050d74 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaimsSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/RequiredClaimsSpec.scala @@ -9,7 +9,6 @@ import com.daml.ledger.api.v2.transaction_filter.{ Filters, ParticipantAuthorizationTopologyFormat, TopologyFormat, - TransactionFilter, TransactionFormat, UpdateFormat, } @@ -19,10 +18,6 @@ import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers import scalapb.lenses.Lens -import scala.annotation.nowarn - -// TODO(#23504) remove TransactionFilter related when TransactionFilter is removed -@nowarn("cat=deprecation") class RequiredClaimsSpec extends AsyncFlatSpec with BaseTest with Matchers { behavior of "submissionClaims" @@ -113,52 +108,6 @@ class RequiredClaimsSpec extends AsyncFlatSpec with BaseTest with Matchers { ) } - behavior of "transactionFilterClaims" - - it should "compute the correct claims in the happy path" in { - RequiredClaims.transactionFilterClaims[String]( - TransactionFilter( - filtersByParty = Map( - "a" -> Filters(Nil), - "b" -> Filters(Nil), - "c" -> Filters(Nil), - ), - filtersForAnyParty = Some(Filters(Nil)), - ) - ) should contain theSameElementsAs RequiredClaims[String]( - RequiredClaim.ReadAs("a"), - RequiredClaim.ReadAs("b"), - RequiredClaim.ReadAs("c"), - RequiredClaim.ReadAsAnyParty(), - ) - } - - it should "compute the correct claims if no any party filters" in { - RequiredClaims.transactionFilterClaims[String]( - TransactionFilter( - filtersByParty = Map( - "a" -> Filters(Nil), - "b" -> Filters(Nil), - "c" -> Filters(Nil), - ), - filtersForAnyParty = None, - ) - ) should contain theSameElementsAs RequiredClaims[String]( - RequiredClaim.ReadAs("a"), - RequiredClaim.ReadAs("b"), - RequiredClaim.ReadAs("c"), - ) - } - - it should "compute the correct claims if no any party filters and no party filters" in { - RequiredClaims.transactionFilterClaims[String]( - TransactionFilter( - filtersByParty = Map.empty, - filtersForAnyParty = None, - ) - ) shouldBe Nil - } - behavior of "readAsForAllParties" it should "compute the correct claims in the happy path" in { diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala index 6f370ada71..e8e307dd3b 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala @@ -5,7 +5,13 @@ package com.digitalasset.canton.ledger.api.auth import com.daml.grpc.adapter.client.pekko.ClientAdapter import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.daml.ledger.api.v2.transaction_filter.{Filters, TransactionFilter} +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + EventFormat, + Filters, + TransactionFormat, + UpdateFormat, +} import com.daml.ledger.api.v2.update_service.* import com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.{UpdateService, UpdateServiceStub} import com.daml.ledger.resources.{ResourceContext, ResourceOwner} @@ -44,13 +50,10 @@ import org.scalatest.matchers.should.Matchers import java.time.Instant import java.util.concurrent.atomic.AtomicReference -import scala.annotation.nowarn import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContextExecutor, Future, Promise} import scala.util.Try -// TODO(#23504) remove suppressions when deprecated methods are removed -@nowarn("cat=deprecation") class StreamAuthorizationComponentSpec extends AsyncFlatSpec with BaseTest @@ -270,27 +273,6 @@ class StreamAuthorizationComponentSpec def notSupported = throw new UnsupportedOperationException() - override def getUpdateTrees( - request: GetUpdatesRequest, - responseObserver: StreamObserver[GetUpdateTreesResponse], - ): Unit = notSupported - - override def getTransactionTreeByOffset( - request: GetTransactionByOffsetRequest - ): Future[GetTransactionTreeResponse] = notSupported - - override def getTransactionTreeById( - request: GetTransactionByIdRequest - ): Future[GetTransactionTreeResponse] = notSupported - - override def getTransactionByOffset( - request: GetTransactionByOffsetRequest - ): Future[GetTransactionResponse] = notSupported - - override def getTransactionById( - request: GetTransactionByIdRequest - ): Future[GetTransactionResponse] = notSupported - override def getUpdateByOffset( request: GetUpdateByOffsetRequest ): Future[GetUpdateResponse] = notSupported @@ -346,17 +328,27 @@ class StreamAuthorizationComponentSpec GetUpdatesRequest( beginExclusive = 0, endInclusive = None, - filter = Some( - TransactionFilter( - Map( - partyId1 -> Filters(Nil), - partyId2 -> Filters(Nil), + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + partyId1 -> Filters(Nil), + partyId2 -> Filters(Nil), + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) ), - None, + includeReassignments = None, + includeTopologyEvents = None, ) ), - verbose = false, - updateFormat = None, ), ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala index 4ca87d35c7..e7a4909942 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala @@ -26,7 +26,6 @@ import com.daml.ledger.api.v2.transaction_filter.{ Filters, ParticipantAuthorizationTopologyFormat, TopologyFormat, - TransactionFilter, TransactionFormat, UpdateFormat, } @@ -46,10 +45,7 @@ import org.scalatest.matchers.should.Matchers import scalapb.lenses.Lens import java.util.UUID -import scala.annotation.nowarn -// TODO(#23504) remove TransactionFilter once all usages are migrated to EventFormat -@nowarn("cat=deprecation") class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matchers { behavior of "CommandCompletionServiceAuthorization.completionStreamClaims" @@ -127,8 +123,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc it should "compute the correct claims in the happy path" in { StateServiceAuthorization.getActiveContractsClaims( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = 15, eventFormat = Some( EventFormat( @@ -153,8 +147,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc it should "compute the correct claims if no filtersForAnyParty" in { StateServiceAuthorization.getActiveContractsClaims( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = 15, eventFormat = Some( EventFormat( @@ -178,8 +170,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc it should "compute the correct claims if no filtersByParty" in { StateServiceAuthorization.getActiveContractsClaims( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = 15, eventFormat = Some( EventFormat( @@ -195,148 +185,12 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc it should "compute the correct claims if no eventFormat" in { StateServiceAuthorization.getActiveContractsClaims( GetActiveContractsRequest( - filter = None, - verbose = true, activeAtOffset = 15, eventFormat = None, ) ) shouldBe Nil } - // TODO(i23504) Remove - it should "compute the aggregated claims if both legacy and new usage" in { - StateServiceAuthorization.getActiveContractsClaims( - GetActiveContractsRequest( - filter = Some( - TransactionFilter( - filtersByParty = Map( - "1" -> Filters(Nil), - "2" -> Filters(Nil), - "3" -> Filters(Nil), - ), - filtersForAnyParty = Some(Filters(Nil)), - ) - ), - verbose = true, - activeAtOffset = 15, - eventFormat = Some( - EventFormat( - filtersByParty = Map( - "a" -> Filters(Nil), - "b" -> Filters(Nil), - "c" -> Filters(Nil), - ), - filtersForAnyParty = Some(Filters(Nil)), - verbose = true, - ) - ), - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAs("a"), - RequiredClaim.ReadAs("b"), - RequiredClaim.ReadAs("c"), - RequiredClaim.ReadAsAnyParty(), - RequiredClaim.ReadAs("1"), - RequiredClaim.ReadAs("2"), - RequiredClaim.ReadAs("3"), - RequiredClaim.ReadAsAnyParty(), - ) - } - - // TODO(i23504) Remove - it should "compute the aggregated claims if both legacy and new usage, if one of the any party filters missing" in { - StateServiceAuthorization.getActiveContractsClaims( - GetActiveContractsRequest( - filter = Some( - TransactionFilter( - filtersByParty = Map( - "1" -> Filters(Nil), - "2" -> Filters(Nil), - "3" -> Filters(Nil), - ), - filtersForAnyParty = None, - ) - ), - verbose = true, - activeAtOffset = 15, - eventFormat = Some( - EventFormat( - filtersByParty = Map( - "a" -> Filters(Nil), - "b" -> Filters(Nil), - "c" -> Filters(Nil), - ), - filtersForAnyParty = Some(Filters(Nil)), - verbose = true, - ) - ), - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAs("a"), - RequiredClaim.ReadAs("b"), - RequiredClaim.ReadAs("c"), - RequiredClaim.ReadAsAnyParty(), - RequiredClaim.ReadAs("1"), - RequiredClaim.ReadAs("2"), - RequiredClaim.ReadAs("3"), - ) - } - - // TODO(i23504) Remove - it should "compute the aggregated claims if both legacy and new usage, if only one any party filters, and one by party filters" in { - StateServiceAuthorization.getActiveContractsClaims( - GetActiveContractsRequest( - filter = Some( - TransactionFilter( - filtersByParty = Map( - "1" -> Filters(Nil), - "2" -> Filters(Nil), - "3" -> Filters(Nil), - ), - filtersForAnyParty = None, - ) - ), - verbose = true, - activeAtOffset = 15, - eventFormat = Some( - EventFormat( - filtersByParty = Map.empty, - filtersForAnyParty = Some(Filters(Nil)), - verbose = true, - ) - ), - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAsAnyParty(), - RequiredClaim.ReadAs("1"), - RequiredClaim.ReadAs("2"), - RequiredClaim.ReadAs("3"), - ) - } - - // TODO(i23504) Remove - it should "compute the aggregated claims if both legacy and new usage, if all empty" in { - StateServiceAuthorization.getActiveContractsClaims( - GetActiveContractsRequest( - filter = Some( - TransactionFilter( - filtersByParty = Map.empty, - filtersForAnyParty = None, - ) - ), - verbose = true, - activeAtOffset = 15, - eventFormat = Some( - EventFormat( - filtersByParty = Map.empty, - filtersForAnyParty = None, - verbose = true, - ) - ), - ) - ) shouldBe Nil - } - behavior of "InteractiveSubmissionServiceAuthorization.prepareSubmission" it should "compute the correct claims in the happy path" in { @@ -495,8 +349,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = Some( @@ -546,8 +398,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = Some( @@ -603,8 +453,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = Some( @@ -638,8 +486,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = Some( @@ -663,8 +509,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = false, updateFormat = None, ) ) shouldBe Nil @@ -675,8 +519,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = None, @@ -698,8 +540,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = Some( @@ -729,8 +569,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeReassignments = Some( @@ -755,8 +593,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc GetUpdatesRequest( beginExclusive = 10, endInclusive = Some(15), - filter = None, - verbose = true, updateFormat = Some( UpdateFormat( includeTransactions = None, @@ -772,79 +608,6 @@ class ApiServicesRequiredClaimSpec extends AsyncFlatSpec with BaseTest with Matc ) } - // TODO(i23504) Remove - it should "compute the aggregated claims if legacy, happy path" in { - UpdateServiceAuthorization.getUpdatesClaims( - GetUpdatesRequest( - beginExclusive = 10, - endInclusive = Some(15), - filter = Some( - TransactionFilter( - filtersByParty = Map( - "1" -> Filters(Nil), - "2" -> Filters(Nil), - "3" -> Filters(Nil), - ), - filtersForAnyParty = Some(Filters(Nil)), - ) - ), - verbose = true, - updateFormat = None, - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAs("1"), - RequiredClaim.ReadAs("2"), - RequiredClaim.ReadAs("3"), - RequiredClaim.ReadAsAnyParty(), - ) - } - - // TODO(i23504) Remove - it should "compute the aggregated claims if legacy, if the any party filters are missing" in { - UpdateServiceAuthorization.getUpdatesClaims( - GetUpdatesRequest( - beginExclusive = 10, - endInclusive = Some(15), - filter = Some( - TransactionFilter( - filtersByParty = Map( - "1" -> Filters(Nil), - "2" -> Filters(Nil), - "3" -> Filters(Nil), - ), - filtersForAnyParty = None, - ) - ), - verbose = true, - updateFormat = None, - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAs("1"), - RequiredClaim.ReadAs("2"), - RequiredClaim.ReadAs("3"), - ) - } - - // TODO(i23504) Remove - it should "compute the aggregated claims if legacy, if only any party filters exist" in { - UpdateServiceAuthorization.getUpdatesClaims( - GetUpdatesRequest( - beginExclusive = 10, - endInclusive = Some(15), - filter = Some( - TransactionFilter( - filtersByParty = Map.empty, - filtersForAnyParty = Some(Filters(Nil)), - ) - ), - verbose = true, - updateFormat = None, - ) - ) should contain theSameElementsAs RequiredClaims[GetActiveContractsRequest]( - RequiredClaim.ReadAsAnyParty() - ) - } - behavior of "UserManagementServiceAuthorization.userReaderClaims" it should "compute the correct claims in the happy path" in { @@ -920,6 +683,7 @@ object ApiServicesRequiredClaimSpec { packageIdSelectionPreference = Seq.empty, verboseHashing = true, prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, ) val preparedTransaction = PreparedTransaction( @@ -940,6 +704,7 @@ object ApiServicesRequiredClaimSpec { minLedgerEffectiveTime = None, maxLedgerEffectiveTime = None, globalKeyMapping = Seq.empty, + maxRecordTime = Option.empty, ) ), ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala index 0fa7f0bf96..7d33fb6df3 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala @@ -29,8 +29,12 @@ import com.digitalasset.daml.lf.command.{ } import com.digitalasset.daml.lf.data.* import com.digitalasset.daml.lf.data.Ref.TypeConRef -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Node as LfNode} +import com.digitalasset.daml.lf.transaction.{ + CreationTime, + FatContractInstance, + Node as LfNode, + SerializationVersion as LfSerializationVersion, +} import com.digitalasset.daml.lf.value.Value as Lf import com.digitalasset.daml.lf.value.Value.ValueRecord import com.google.protobuf.duration.Duration @@ -144,7 +148,7 @@ class SubmitRequestValidatorTest signatories = Set(Ref.Party.assertFromString("party")), stakeholders = Set(Ref.Party.assertFromString("party")), keyOpt = None, - version = LanguageVersion.v2_dev, + version = LfSerializationVersion.VDev, ), createTime = CreationTime.CreatedAt(Time.Timestamp.now()), authenticationData = Bytes.Empty, @@ -203,7 +207,7 @@ class SubmitRequestValidatorTest private val testedCommandValidator = { val validateDisclosedContractsMock = mock[ValidateDisclosedContracts] - when(validateDisclosedContractsMock(any[Commands])(any[ErrorLoggingContext])) + when(validateDisclosedContractsMock.validateCommands(any[Commands])(any[ErrorLoggingContext])) .thenReturn(Right(internal.disclosedContracts)) new CommandsValidator( @@ -484,7 +488,7 @@ class SubmitRequestValidatorTest internal.maxDeduplicationDuration, ) inside(result) { case Right(valid) => - valid.deduplicationPeriod shouldBe (expectedDeduplication) + valid.deduplicationPeriod shouldBe expectedDeduplication } } } @@ -555,7 +559,9 @@ class SubmitRequestValidatorTest "fail when disclosed contracts validation fails" in { val validateDisclosedContractsMock = mock[ValidateDisclosedContracts] - when(validateDisclosedContractsMock(any[Commands])(any[ErrorLoggingContext])) + when( + validateDisclosedContractsMock.validateCommands(any[Commands])(any[ErrorLoggingContext]) + ) .thenReturn( Left( RequestValidationErrors.InvalidField @@ -565,8 +571,8 @@ class SubmitRequestValidatorTest ) val failingDisclosedContractsValidator = new CommandsValidator( - validateDisclosedContracts = validateDisclosedContractsMock, validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty, + validateDisclosedContracts = validateDisclosedContractsMock, ) requestMustFailWith( @@ -587,7 +593,9 @@ class SubmitRequestValidatorTest "when upgrading" should { val validateDisclosedContractsMock = mock[ValidateDisclosedContracts] - when(validateDisclosedContractsMock(any[Commands])(any[ErrorLoggingContext])) + when( + validateDisclosedContractsMock.validateCommands(any[Commands])(any[ErrorLoggingContext]) + ) .thenReturn(Right(internal.disclosedContracts)) val packageMap = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidatorTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidatorTest.scala index 6986879e68..83334bee86 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidatorTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/UpdateServiceRequestValidatorTest.scala @@ -11,8 +11,8 @@ import com.daml.ledger.api.v2.transaction_filter.{ *, } import com.daml.ledger.api.v2.update_service.{ - GetTransactionByIdRequest, - GetTransactionByOffsetRequest, + GetUpdateByIdRequest, + GetUpdateByOffsetRequest, GetUpdatesRequest, } import com.daml.ledger.api.v2.value.Identifier @@ -24,10 +24,6 @@ import io.grpc.Status.Code.* import org.mockito.MockitoSugar import org.scalatest.wordspec.AnyWordSpec -import scala.annotation.nowarn - -// TODO(#23504) remove suppressions when deprecated methods are removed -@nowarn("cat=deprecation") class UpdateServiceRequestValidatorTest extends AnyWordSpec with ValidatorTestUtils @@ -36,49 +32,6 @@ class UpdateServiceRequestValidatorTest private val templateId = Identifier(packageId, includedModule, includedTemplate) - private def txReqBuilderLegacy(templateIdsForParty: Seq[Identifier]) = GetUpdatesRequest( - beginExclusive = 0L, - endInclusive = Some(offsetLong), - filter = Some( - TransactionFilter( - Map( - party -> - Filters( - templateIdsForParty - .map(tId => - ProtoCumulativeFilter( - IdentifierFilter.TemplateFilter( - ProtoTemplateFilter(Some(tId), includeCreatedEventBlob = false) - ) - ) - ) - ++ - Seq( - ProtoCumulativeFilter( - IdentifierFilter.InterfaceFilter( - ProtoInterfaceFilter( - interfaceId = Some( - Identifier( - packageId, - moduleName = includedModule, - entityName = includedTemplate, - ) - ), - includeInterfaceView = true, - includeCreatedEventBlob = true, - ) - ) - ) - ) - ) - ), - None, - ) - ), - verbose = verbose, - updateFormat = None, - ) - private def getFiltersByParty(templateIdsForParty: Seq[Identifier]): Map[String, Filters] = Map( party -> @@ -119,8 +72,6 @@ class UpdateServiceRequestValidatorTest GetUpdatesRequest( beginExclusive = 0L, endInclusive = Some(offsetLong), - filter = None, - verbose = false, updateFormat = Some( UpdateFormat( includeTransactions = transactionTemplateIdsO @@ -151,11 +102,6 @@ class UpdateServiceRequestValidatorTest ), ) - private val txReqLegacy = txReqBuilderLegacy(Seq(templateId)) - private val txReqLegacyWithPackageNameScoping = txReqBuilderLegacy( - Seq(templateId.copy(packageId = Ref.PackageRef.Name(packageName).toString)) - ) - private val txReq = updatesReqBuilder(Some(Seq(templateId))) private val reassignmentsReq = updatesReqBuilder( transactionTemplateIdsO = None, @@ -165,293 +111,54 @@ class UpdateServiceRequestValidatorTest Some(Seq(templateId.copy(packageId = Ref.PackageRef.Name(packageName).toString))) ) - private val txByOffsetReqLegacy = - GetTransactionByOffsetRequest(offsetLong, Seq(party), None) private val txByOffsetReq = - GetTransactionByOffsetRequest( + GetUpdateByOffsetRequest( offset = offsetLong, - requestingParties = Nil, - transactionFormat = Some( - TransactionFormat( - eventFormat = Some( - EventFormat( - filtersByParty = Map(party -> Filters(Nil)), - filtersForAnyParty = None, - verbose = false, + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map(party -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TransactionShape.TRANSACTION_SHAPE_ACS_DELTA, ) ), - transactionShape = TransactionShape.TRANSACTION_SHAPE_ACS_DELTA, + includeReassignments = None, + includeTopologyEvents = None, ) ), ) - private val txByIdReqLegacy = - GetTransactionByIdRequest(updateId, Seq(party), None) private val txByIdReq = - GetTransactionByIdRequest( - updateId = updateId, - requestingParties = Nil, - transactionFormat = Some( - TransactionFormat( - eventFormat = Some( - EventFormat( - filtersByParty = Map(party -> Filters(Nil)), - filtersForAnyParty = None, - verbose = false, + GetUpdateByIdRequest( + updateId = updateId.toHexString, + updateFormat = Some( + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map(party -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TransactionShape.TRANSACTION_SHAPE_ACS_DELTA, ) ), - transactionShape = TransactionShape.TRANSACTION_SHAPE_ACS_DELTA, + includeReassignments = None, + includeTopologyEvents = None, ) ), ) "UpdateRequestValidation" when { - // TODO(#23504) remove - "validating regular legacy requests" should { - - "accept simple requests" in { - inside(UpdateServiceRequestValidator.validate(txReqLegacy, ledgerEnd)) { case Right(req) => - req.startExclusive shouldBe None - req.endInclusive shouldBe offset - val filtersByParty = - req.updateFormat.includeTransactions.map(_.eventFormat.filtersByParty).value - filtersByParty should have size 1 - hasExpectedFilters(req) - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - - } - - "return the correct error on missing filter" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validateForTrees(txReqLegacy.update(_.optionalFilter := None), ledgerEnd), - code = INVALID_ARGUMENT, - description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: filter", - metadata = Map.empty, - ) - } - - "return the correct error on missing filter/verbose and update_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validate(txReqLegacy.update(_.optionalFilter := None), ledgerEnd), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Either filter/verbose or update_format is required. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both.", - metadata = Map.empty, - ) - } - - "return the correct error on defining both filter and update_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validate( - txReqLegacy.update(_.optionalUpdateFormat := txReq.updateFormat, _.verbose := false), - ledgerEnd, - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Both filter/verbose and update_format is specified. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both.", - metadata = Map.empty, - ) - } - - "return the correct error on defining both verbose and update_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validate( - txReq.update(_.verbose := true), - ledgerEnd, - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Both filter/verbose and update_format is specified. Please use either backwards compatible arguments (filter and verbose) or update_format, but not both.", - metadata = Map.empty, - ) - } - - "return the correct error on empty filter" in { - requestMustFailWith( - request = UpdateServiceRequestValidator.validate( - txReqLegacy.update(_.filter.filtersByParty := Map.empty), - ledgerEnd, - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: filtersByParty and filtersForAnyParty cannot be empty simultaneously", - metadata = Map.empty, - ) - } - - "return the correct error on empty interfaceId in interfaceFilter" in { - requestMustFailWith( - request = UpdateServiceRequestValidator.validate( - txReqLegacy.update(_.filter.filtersByParty.modify(_.map { case (p, f) => - p -> f.update( - _.cumulative := Seq( - ProtoCumulativeFilter( - IdentifierFilter.InterfaceFilter( - ProtoInterfaceFilter( - None, - includeInterfaceView = true, - includeCreatedEventBlob = false, - ) - ) - ) - ) - ) - })), - ledgerEnd, - ), - code = INVALID_ARGUMENT, - description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: interfaceId", - metadata = Map.empty, - ) - } - - "tolerate empty filters_inclusive" in { - inside( - UpdateServiceRequestValidator.validate( - txReqLegacy.update(_.filter.filtersByParty.modify(_.map { case (p, f) => - p -> f.update(_.cumulative := Seq(ProtoCumulativeFilter.defaultInstance)) - })), - ledgerEnd, - ) - ) { case Right(req) => - req.startExclusive shouldEqual None - req.endInclusive shouldEqual offset - val filtersByParty = - req.updateFormat.includeTransactions.map(_.eventFormat.filtersByParty).value - filtersByParty should have size 1 - inside(filtersByParty.headOption.value) { case (p, filters) => - p shouldEqual party - filters shouldEqual CumulativeFilter.templateWildcardFilter() - } - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - } - - "tolerate missing filters_inclusive" in { - inside( - UpdateServiceRequestValidator.validate( - txReqLegacy.update(_.filter.filtersByParty.modify(_.map { case (p, f) => - p -> f.update(_.cumulative := Seq()) - })), - ledgerEnd, - ) - ) { case Right(req) => - req.startExclusive shouldEqual None - req.endInclusive shouldEqual offset - val filtersByParty = - req.updateFormat.includeTransactions.map(_.eventFormat.filtersByParty).value - filtersByParty should have size 1 - inside(filtersByParty.headOption.value) { case (p, filters) => - p shouldEqual party - filters shouldEqual CumulativeFilter.templateWildcardFilter() - } - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - } - - "tolerate all fields filled out" in { - inside(UpdateServiceRequestValidator.validate(txReqLegacy, ledgerEnd)) { case Right(req) => - req.startExclusive shouldEqual None - req.endInclusive shouldEqual offset - hasExpectedFilters(req) - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - } - - "allow package-name scoped templates" in { - inside( - UpdateServiceRequestValidator.validate(txReqLegacyWithPackageNameScoping, ledgerEnd) - ) { case Right(req) => - req.startExclusive shouldEqual None - req.endInclusive shouldEqual offset - hasExpectedFilters( - req, - expectedTemplates = - Set(Ref.TypeConRef(Ref.PackageRef.Name(packageName), templateQualifiedName)), - ) - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - } - - "still allow populated packageIds in templateIds (for backwards compatibility)" in { - inside(UpdateServiceRequestValidator.validate(txReqLegacy, ledgerEnd)) { case Right(req) => - req.startExclusive shouldEqual None - req.endInclusive shouldEqual offset - hasExpectedFilters(req) - req.updateFormat.includeTransactions.value.eventFormat.verbose shouldEqual verbose - } - } - - "current definition populate the right api request" in { - val result = UpdateServiceRequestValidator.validate( - txReqBuilderLegacy(Seq.empty).copy( - filter = Some( - TransactionFilter( - Map( - party -> Filters( - Seq( - ProtoCumulativeFilter( - IdentifierFilter.InterfaceFilter( - ProtoInterfaceFilter( - interfaceId = Some(templateId), - includeInterfaceView = true, - includeCreatedEventBlob = true, - ) - ) - ) - ) - ++ - Seq( - ProtoCumulativeFilter( - IdentifierFilter.TemplateFilter( - ProtoTemplateFilter(Some(templateId), includeCreatedEventBlob = true) - ) - ) - ) - ) - ), - None, - ) - ) - ), - ledgerEnd, - ) - result.map( - _.updateFormat.includeTransactions.value.eventFormat.filtersByParty - ) shouldBe Right( - Map( - party -> - CumulativeFilter( - templateFilters = Set( - TemplateFilter( - TypeConRef.assertFromString("packageId:includedModule:includedTemplate"), - includeCreatedEventBlob = true, - ) - ), - interfaceFilters = Set( - InterfaceFilter( - interfaceTypeRef = Ref.TypeConRef.assertFromString( - "packageId:includedModule:includedTemplate" - ), - includeView = true, - includeCreatedEventBlob = true, - ) - ), - templateWildcardFilter = None, - ) - ) - ) - } - } - "validating regular requests" should { "accept simple requests" in { @@ -845,8 +552,7 @@ class UpdateServiceRequestValidatorTest "fail on empty transactionId" in { requestMustFailWith( - request = - UpdateServiceRequestValidator.validateTransactionById(txByIdReqLegacy.withUpdateId("")), + request = UpdateServiceRequestValidator.validateUpdateById(txByIdReq.withUpdateId("")), code = INVALID_ARGUMENT, description = "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: update_id", @@ -854,84 +560,24 @@ class UpdateServiceRequestValidatorTest ) } - // TODO(#23504) enable the test -// "fail on empty transaction format" in { -// requestMustFailWith( -// request = UpdateServiceRequestValidator.validateTransactionById( -// txByIdReq.clearTransactionFormat -// ), -// code = INVALID_ARGUMENT, -// description = -// "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: transaction_format", -// metadata = Map.empty, -// ) -// } - - } - - // TODO(#23504) remove - "validating transaction by id legacy requests" should { - - "fail on empty transactionId" in { - requestMustFailWith( - request = - UpdateServiceRequestValidator.validateTransactionById(txByIdReqLegacy.withUpdateId("")), - code = INVALID_ARGUMENT, - description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: update_id", - metadata = Map.empty, - ) - } - - "fail on empty requesting parties" in { + "fail on empty update format" in { requestMustFailWith( - request = UpdateServiceRequestValidator.validateTransactionByIdForTrees( - txByIdReqLegacy.withRequestingParties(Nil) + request = UpdateServiceRequestValidator.validateUpdateById( + txByIdReq.clearUpdateFormat ), code = INVALID_ARGUMENT, description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: requesting_parties", - metadata = Map.empty, - ) - } - - "return the correct error on missing requesting parties and transaction_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validateTransactionById( - txByIdReq.update(_.optionalTransactionFormat := None) - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Either requesting_parties or " + - "transaction_format is required. Please use either backwards compatible arguments (requesting_parties) or " + - "transaction_format, but not both.", + "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: update_format", metadata = Map.empty, ) } - - "return the correct error on defining both requesting_parties and transaction_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validateTransactionById( - txByIdReqLegacy.update(_.optionalTransactionFormat := txByIdReq.transactionFormat) - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Both requesting_parties and " + - "transaction_format are specified. Please use either backwards compatible arguments (requesting_parties) " + - "or transaction_format, but not both.", - metadata = Map.empty, - ) - } - } "validating transaction by offset requests" should { "fail on zero offset" in { requestMustFailWith( - request = UpdateServiceRequestValidator.validateTransactionByOffset( + request = UpdateServiceRequestValidator.validateUpdateByOffset( txByOffsetReq.withOffset(0) ), code = INVALID_ARGUMENT, @@ -943,7 +589,7 @@ class UpdateServiceRequestValidatorTest "fail on negative offset" in { requestMustFailWith( - request = UpdateServiceRequestValidator.validateTransactionByOffset( + request = UpdateServiceRequestValidator.validateUpdateByOffset( txByOffsetReq.withOffset(-21) ), code = INVALID_ARGUMENT, @@ -953,64 +599,14 @@ class UpdateServiceRequestValidatorTest ) } - // TODO(#23504) enable the test -// "fail on empty transaction format" in { -// requestMustFailWith( -// request = UpdateServiceRequestValidator.validateTransactionByOffset( -// txByOffsetReq.clearTransactionFormat -// ), -// code = INVALID_ARGUMENT, -// description = -// "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: transaction_format", -// metadata = Map.empty, -// ) -// } - - } - - // TODO(#23504) remove - "validating transaction by offset legacy requests" should { - - "fail on empty requesting parties" in { + "fail on empty update format" in { requestMustFailWith( - request = UpdateServiceRequestValidator.validateTransactionByOffsetForTrees( - txByOffsetReqLegacy.withRequestingParties(Nil) + request = UpdateServiceRequestValidator.validateUpdateByOffset( + txByOffsetReq.clearUpdateFormat ), code = INVALID_ARGUMENT, description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: requesting_parties", - metadata = Map.empty, - ) - } - - "return the correct error on missing requesting parties and transaction_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validateTransactionByOffset( - txByOffsetReq.update(_.optionalTransactionFormat := None) - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Either requesting_parties or " + - "transaction_format is required. Please use either backwards compatible arguments (requesting_parties) or " + - "transaction_format, but not both.", - metadata = Map.empty, - ) - } - - "return the correct error on defining both requesting_parties and transaction_format" in { - requestMustFailWith( - UpdateServiceRequestValidator - .validateTransactionByOffset( - txByOffsetReqLegacy.update( - _.optionalTransactionFormat := txByOffsetReq.transactionFormat - ) - ), - code = INVALID_ARGUMENT, - description = - "INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Both requesting_parties and " + - "transaction_format are specified. Please use either backwards compatible arguments (requesting_parties) " + - "or transaction_format, but not both.", + "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: update_format", metadata = Map.empty, ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContractsTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContractsTest.scala index 503646a5d6..aef1ef4351 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContractsTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContractsTest.scala @@ -15,17 +15,10 @@ import com.digitalasset.canton.ledger.api.validation.ValidateDisclosedContractsT api, lf, lfContractId, - validateDisclosedContracts, + underTest, } import com.digitalasset.canton.logging.{ErrorLoggingContext, NoLogging} -import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticators.ContractAuthenticatorFn -import com.digitalasset.canton.protocol.{ - AuthenticatedContractIdVersionV11, - ContractAuthenticationDataV1, - LfFatContractInst, - LfTransactionVersion, - Unicum, -} +import com.digitalasset.canton.protocol.* import com.digitalasset.canton.{DefaultDamlValues, LfValue} import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} import com.digitalasset.daml.lf.transaction.* @@ -47,7 +40,7 @@ class ValidateDisclosedContractsTest behavior of classOf[ValidateDisclosedContracts].getSimpleName it should "validate the disclosed contracts when enabled" in { - validateDisclosedContracts.apply(api.protoCommands) shouldBe Right( + underTest.validateCommands(api.protoCommands) shouldBe Right( lf.expectedDisclosedContracts ) } @@ -63,7 +56,7 @@ class ValidateDisclosedContractsTest ) requestMustFailWith( - request = validateDisclosedContracts(withMissingBlob), + request = underTest.validateCommands(withMissingBlob), code = Status.Code.INVALID_ARGUMENT, description = "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: DisclosedContract.createdEventBlob", @@ -71,22 +64,9 @@ class ValidateDisclosedContractsTest ) } - it should "fail validation if contract fails authentication" in { - - val underTest = new ValidateDisclosedContracts((_, _) => Left("Auth failure!")) - - requestMustFailWith( - request = underTest(api.protoCommands), - code = Status.Code.INVALID_ARGUMENT, - description = - s"INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Contract authentication failed for attached disclosed contract with id (${api.contractId}): Auth failure!", - metadata = Map.empty, - ) - } - - it should "fail validation on absent contract_id" in { - requestMustFailWith( - request = validateDisclosedContracts( + it should "support absent contract_id" in { + underTest + .validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -97,35 +77,27 @@ class ValidateDisclosedContractsTest .copy(contractId = "") ) ) - ), - code = Status.Code.INVALID_ARGUMENT, - description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: DisclosedContract.contract_id", - metadata = Map.empty, - ) + ) + .value shouldBe lf.expectedDisclosedContracts } - it should "fail validation on absent template_id" in { - requestMustFailWith( - request = validateDisclosedContracts( + it should "support absent template_id" in { + underTest + .validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract .copy(templateId = None) ) ) - ), - code = Status.Code.INVALID_ARGUMENT, - description = - "MISSING_FIELD(8,0): The submitted command is missing a mandatory field: DisclosedContract.template_id", - metadata = Map.empty, - ) + ) + .value shouldBe lf.expectedDisclosedContracts } it should "fail validation on invalid contract_id" in { val invalidContractId = "invalidContractId" requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -147,7 +119,7 @@ class ValidateDisclosedContractsTest it should "fail validation on invalid template_id" in { val invalidTemplateId = ProtoIdentifier("pkgId", "", "entity") requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -169,7 +141,7 @@ class ValidateDisclosedContractsTest it should "fail validation when provided contract_id mismatches the one decoded from the created_event_blob" in { val otherContractId = "00" + "00" * 31 + "ff" requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -191,7 +163,7 @@ class ValidateDisclosedContractsTest it should "fail validation when provided template_id mismatches the one decoded from the created_event_blob" in { val otherTemplateId = ProtoIdentifier("otherPkgId", "otherModule", "otherEntity") requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -212,7 +184,7 @@ class ValidateDisclosedContractsTest it should "fail validation if decoding the created_event_blob fails" in { requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( api.protoCommands.copy( disclosedContracts = scala.Seq( api.protoDisclosedContract @@ -231,7 +203,7 @@ class ValidateDisclosedContractsTest it should "fail validation on invalid synchronizer_id" in { requestMustFailWith( - request = validateDisclosedContracts( + request = underTest.validateCommands( ProtoCommands.defaultInstance.copy(disclosedContracts = scala.Seq(api.protoDisclosedContract.copy(synchronizerId = "cantBe!")) ) @@ -242,16 +214,48 @@ class ValidateDisclosedContractsTest metadata = Map.empty, ) } + + it should "fail validation on duplicate contract ids" in { + val commandsWithDuplicateDisclosedContracts = + ProtoCommands.defaultInstance.copy(disclosedContracts = + scala.Seq( + api.protoDisclosedContract, + api.protoDisclosedContract, + ) + ) + requestMustFailWith( + request = underTest.validateCommands(commandsWithDuplicateDisclosedContracts), + code = Status.Code.INVALID_ARGUMENT, + description = + s"INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Disclosed contracts contain duplicate contract id (${api.contractId})", + metadata = Map.empty, + ) + } + + it should "fail validation on duplicate contract keys" in { + val commandsWithDuplicateDisclosedContracts = + ProtoCommands.defaultInstance.copy(disclosedContracts = + scala.Seq( + api.protoDisclosedContract, + api.dupKeyProtoDisclosedContract, + ) + ) + requestMustFailWith( + request = underTest.validateCommands(commandsWithDuplicateDisclosedContracts), + code = Status.Code.INVALID_ARGUMENT, + description = + s"INVALID_ARGUMENT(8,0): The submitted request has invalid arguments: Disclosed contracts contain duplicate contract key (${lf.keyWithMaintainers})", + metadata = Map.empty, + ) + } + } object ValidateDisclosedContractsTest { - private val dummyContractIdAuthenticator: ContractAuthenticatorFn = (_, _) => Right(()) + private val underTest = ValidateDisclosedContracts - private val validateDisclosedContracts = - new ValidateDisclosedContracts(dummyContractIdAuthenticator) - - val lfContractId: ContractId.V1 = AuthenticatedContractIdVersionV11.fromDiscriminator( + val lfContractId: ContractId.V1 = CantonContractIdVersion.maxV1.fromDiscriminator( DefaultDamlValues.lfhash(3), Unicum(TestHash.digest(4)), ) @@ -281,6 +285,17 @@ object ValidateDisclosedContractsTest { synchronizerId = "", ) + val dupKeyProtoDisclosedContract: ProtoDisclosedContract = protoDisclosedContract.copy( + contractId = lf.dupKeyFatContractInstance.contractId.coid, + createdEventBlob = TransactionCoder + .encodeFatContractInstance(lf.dupKeyFatContractInstance) + .fold( + err => + throw new RuntimeException(s"Cannot serialize createdEventBlob: ${err.errorMessage}"), + identity, + ), + ) + val protoCommands: ProtoCommands = ProtoCommands.defaultInstance.copy(disclosedContracts = scala.Seq(api.protoDisclosedContract)) } @@ -300,9 +315,9 @@ object ValidateDisclosedContractsTest { private val salt = Salt.tryDeriveSalt(seedSalt, 0, new SymbolicPureCrypto()) private val authenticationDataBytes: Bytes = - ContractAuthenticationDataV1(salt)(AuthenticatedContractIdVersionV11).toLfBytes + ContractAuthenticationDataV1(salt)(CantonContractIdVersion.maxV1).toLfBytes - private val keyWithMaintainers: GlobalKeyWithMaintainers = GlobalKeyWithMaintainers.assertBuild( + val keyWithMaintainers: GlobalKeyWithMaintainers = GlobalKeyWithMaintainers.assertBuild( lf.templateId, LfValue.ValueRecord( None, @@ -323,16 +338,24 @@ object ValidateDisclosedContractsTest { signatories = api.signatories, stakeholders = api.stakeholders, keyOpt = Some(lf.keyWithMaintainers), - version = LfTransactionVersion.StableVersions.max, + version = LfSerializationVersion.StableVersions.max, ) - val fatContractInstance: LfFatContractInst = FatContractInstance.fromCreateNode( + private val dupKeyCreateNode = createNode.copy(ExampleContractFactory.buildContractId()) + + def fatContractInstance: LfFatContractInst = FatContractInstance.fromCreateNode( create = createNode, createTime = CreationTime.CreatedAt(Time.Timestamp.assertFromLong(api.createdAtSeconds * 1000000L)), authenticationData = lf.authenticationDataBytes, ) + def dupKeyFatContractInstance: LfFatContractInst = FatContractInstance.fromCreateNode( + create = dupKeyCreateNode, + createTime = fatContractInstance.createdAt, + authenticationData = fatContractInstance.authenticationData, + ) + val expectedDisclosedContracts: ImmArray[DisclosedContract] = ImmArray( DisclosedContract(fatContractInstance, None) ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutionsTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutionsTest.scala index 82636c37fe..c28601aeb4 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutionsTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateUpgradingPackageResolutionsTest.scala @@ -6,8 +6,8 @@ package com.digitalasset.canton.ledger.api.validation import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ledger.api.validation.ValidateUpgradingPackageResolutions.ValidatedCommandPackageResolutionsSnapshot import com.digitalasset.canton.logging.{ErrorLoggingContext, NoLogging} -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.{ +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ LocalPackagePreference, PackageResolution, } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidatorTestUtils.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidatorTestUtils.scala index 4e074a8ea0..17655f5fc8 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidatorTestUtils.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidatorTestUtils.scala @@ -7,6 +7,7 @@ import com.daml.grpc.GrpcStatus import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.messages.update import com.digitalasset.canton.ledger.api.{CumulativeFilter, InterfaceFilter, TemplateFilter} +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.value.Value.ContractId @@ -36,7 +37,7 @@ trait ValidatorTestUtils extends Matchers with Inside with OptionValues with Eit protected val party = Ref.Party.assertFromString("party") protected val party2 = Ref.Party.assertFromString("party2") protected val verbose = false - protected val updateId = "42" + protected val updateId = TestUpdateId("42") protected val ledgerEnd = Some(Offset.tryFromLong(1000)) protected val contractId = ContractId.V1.assertFromString("00" * 32 + "0001") protected val moduleName = Ref.ModuleName.assertFromString(includedModule) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala index c4e5cc1f99..b7f18f5181 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala @@ -216,39 +216,9 @@ object ArbitraryConfig { transactionsProcessingParallelism = transactionsProcessingParallelism, ) - def genTransactionTreeStreams: Gen[TransactionTreeStreamsConfig] = - for { - maxIdsPerIdPage <- Gen.chooseNum(0, Int.MaxValue) - maxPayloadsPerPayloadsPage <- Gen.chooseNum(0, Int.MaxValue) - maxPagesPerIdPagesBuffer <- Gen.chooseNum(0, Int.MaxValue) - maxWorkingMemoryInBytesForIdPages <- Gen.chooseNum(0, Int.MaxValue) - maxParallelIdCreateQueries <- Gen.chooseNum(0, Int.MaxValue) - maxParallelPayloadCreateQueries <- Gen.chooseNum(0, Int.MaxValue) - maxParallelIdConsumingQueries <- Gen.chooseNum(0, Int.MaxValue) - maxParallelPayloadConsumingQueries <- Gen.chooseNum(0, Int.MaxValue) - maxParallelPayloadQueries <- Gen.chooseNum(0, Int.MaxValue) - transactionsProcessingParallelism <- Gen.chooseNum(0, Int.MaxValue) - maxParallelIdNonConsumingQueries <- Gen.chooseNum(0, Int.MaxValue) - maxParallelPayloadNonConsumingQueries <- Gen.chooseNum(0, Int.MaxValue) - } yield TransactionTreeStreamsConfig( - maxIdsPerIdPage = maxIdsPerIdPage, - maxPagesPerIdPagesBuffer = maxPayloadsPerPayloadsPage, - maxWorkingMemoryInBytesForIdPages = maxPagesPerIdPagesBuffer, - maxPayloadsPerPayloadsPage = maxWorkingMemoryInBytesForIdPages, - maxParallelIdCreateQueries = maxParallelIdCreateQueries, - maxParallelIdConsumingQueries = maxParallelPayloadCreateQueries, - maxParallelPayloadCreateQueries = maxParallelIdConsumingQueries, - maxParallelPayloadConsumingQueries = maxParallelPayloadConsumingQueries, - maxParallelPayloadQueries = maxParallelPayloadQueries, - transactionsProcessingParallelism = transactionsProcessingParallelism, - maxParallelIdNonConsumingQueries = maxParallelIdNonConsumingQueries, - maxParallelPayloadNonConsumingQueries = maxParallelPayloadNonConsumingQueries, - ) - val indexServiceConfig: Gen[IndexServiceConfig] = for { activeContractsServiceStreamsConfig <- genActiveContractsServiceStreamConfig transactionFlatStreams <- genTransactionFlatStreams - transactionTreeStreams <- genTransactionTreeStreams eventsProcessingParallelism <- Gen.chooseNum(0, Int.MaxValue) bufferedStreamsPageSize <- Gen.chooseNum(0, Int.MaxValue) maxContractStateCacheSize <- Gen.long @@ -264,7 +234,6 @@ object ArbitraryConfig { apiStreamShutdownTimeout, activeContractsServiceStreams = activeContractsServiceStreamsConfig, updatesStreams = transactionFlatStreams, - transactionTreeStreams = transactionTreeStreams, ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala index f8522700e7..dd603854b6 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala @@ -388,7 +388,8 @@ class PureConfigReaderWriterSpec | contract-processing-parallelism=8 | max-ids-per-id-page=20000 | max-pages-per-id-pages-buffer=1 - | max-parallel-id-create-queries=2 + | max-parallel-id-create-queries=4 + | id-filter-query-parallelism=2 | max-parallel-payload-create-queries=2 | max-payloads-per-payloads-page=1000 | max-working-memory-in-bytes-for-id-pages=104857600 @@ -415,20 +416,6 @@ class PureConfigReaderWriterSpec | max-payloads-per-payloads-page=1000 | max-working-memory-in-bytes-for-id-pages=104857600 | transactions-processing-parallelism=8 - |} - |transaction-tree-streams { - | max-ids-per-id-page=20000 - | max-pages-per-id-pages-buffer=1 - | max-parallel-id-consuming-queries=8 - | max-parallel-id-create-queries=8 - | max-parallel-id-non-consuming-queries=4 - | max-parallel-payload-consuming-queries=2 - | max-parallel-payload-create-queries=2 - | max-parallel-payload-non-consuming-queries=2 - | max-parallel-payload-queries=2 - | max-payloads-per-payloads-page=1000 - | max-working-memory-in-bytes-for-id-pages=104857600 - | transactions-processing-parallelism=8 |}""".stripMargin it should "support current defaults" in { diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala index 38a7287bef..05ade69e08 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala @@ -78,7 +78,7 @@ class InMemoryStateSpec extends AsyncFlatSpec with MockitoSugar with Matchers wi // ASSERT STATE INITIALIZED inOrder.verify(dispatcherState).stopDispatcher() - inOrder.verify(contractStateCaches).reset(Some(initOffset)) + inOrder.verify(contractStateCaches).reset(Some(initLedgerEnd)) inOrder.verify(inMemoryFanoutBuffer).flush() inOrder .verify(mutableLedgerEndCache) @@ -136,7 +136,7 @@ class InMemoryStateSpec extends AsyncFlatSpec with MockitoSugar with Matchers wi when(dispatcherState.isRunning).thenReturn(false) inMemoryState.initialized shouldBe false - inOrder.verify(contractStateCaches).reset(Some(reInitOffset)) + inOrder.verify(contractStateCaches).reset(Some(reInitLedgerEnd)) inOrder.verify(inMemoryFanoutBuffer).flush() inOrder .verify(mutableLedgerEndCache) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentLoadTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentLoadTest.scala new file mode 100644 index 0000000000..2f1a17c3ae --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentLoadTest.scala @@ -0,0 +1,573 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform + +import com.daml.ledger.api.v2.update_service.GetUpdateResponse +import com.digitalasset.canton +import com.digitalasset.canton.crypto.HashAlgorithm.Sha256 +import com.digitalasset.canton.crypto.{Hash, HashPurpose} +import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries} +import com.digitalasset.canton.ledger.api.{ + CumulativeFilter, + EventFormat, + TemplateWildcardFilter, + TransactionFormat, + TransactionShape, + UpdateFormat, +} +import com.digitalasset.canton.ledger.participant.state.{ + Reassignment, + ReassignmentInfo, + TestAcsChangeFactory, + TransactionMeta, + Update, +} +import com.digitalasset.canton.logging.LoggingContextWithTrace +import com.digitalasset.canton.platform +import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId, UpdateId} +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ReassignmentTag +import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} +import com.digitalasset.daml.lf.transaction.{CommittedTransaction, Node} +import com.digitalasset.daml.lf.value.Value +import com.google.protobuf.ByteString +import org.apache.pekko.stream.scaladsl.{Sink, Source} +import org.scalatest.Ignore +import org.scalatest.concurrent.PatienceConfiguration +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.time.Span + +import java.util.concurrent.atomic.{AtomicLong, AtomicReference} +import scala.collection.mutable +import scala.concurrent.duration.{Duration, FiniteDuration} + +/** Goal of this test is to provide a light-weight approach to ingest synthetic Index DB data for + * load-testing, benchmarking purposes. This test is not supposed to run in CI (this is why it is + * ignored, and logs on WARN log level). + */ +@Ignore +class IndexComponentLoadTest extends AnyFlatSpec with IndexComponentTest { + + override val jdbcUrl: String = s"jdbc:postgresql://localhost:5433/load_test?user=postgres" + + private val synchronizer1 = SynchronizerId.tryFromString("x::synchronizer1") + private val synchronizer2 = SynchronizerId.tryFromString("x::synchronizer2") + private val packageName: Ref.PackageName = Ref.PackageName.assertFromString("-package-name-") + private val dsoParty = Ref.Party.assertFromString("dsoParty") // sees all + private lazy val parties = + (1 to 10000).view.map(index => Ref.Party.assertFromString(s"party$index")).toVector + private lazy val templates = + (1 to 300).view.map(index => Ref.Identifier.assertFromString(s"P:M:T$index")).toVector + private val wildcardTemplates = CumulativeFilter( + templateFilters = Set.empty, + interfaceFilters = Set.empty, + templateWildcardFilter = Some(TemplateWildcardFilter(includeCreatedEventBlob = false)), + ) + private def eventFormat(party: Ref.Party) = EventFormat( + filtersByParty = Map( + party -> wildcardTemplates + ), + filtersForAnyParty = None, + verbose = false, + ) + private val allPartyEventFormat = EventFormat( + filtersByParty = Map.empty, + filtersForAnyParty = Some(wildcardTemplates), + verbose = false, + ) + private val someLFHash = com.digitalasset.daml.lf.crypto.Hash + .assertFromString("01cf85cfeb36d628ca2e6f583fa2331be029b6b28e877e1008fb3f862306c086") + override implicit val traceContext: TraceContext = TraceContext.createNew("load-test") + + private val builder = TxBuilder() + private val testAcsChangeFactory = TestAcsChangeFactory() + + it should "Index assign/unassign updates" ignore { + val nextRecordTime = nextRecordTimeFactory() + logger.warn(s"start preparing updates...") + val passes = 1 + val batchesPerPasses = 50 // this is doubled: first all assign batches then all unassign batches + val eventsPerBatch = 10 + val allUpdates = 1 + .to(passes) + .toVector + .flatMap(_ => + allAssignsThenAllUnassigns( + nextRecordTime = nextRecordTime, + assignPayloadLength = 400, + unassignPayloadLength = 150, + batchSize = eventsPerBatch, + batches = batchesPerPasses, + ) + ) :+ assigns(nextRecordTime(), 400)(Vector(builder.newCid, builder.newCid)) + val allUpdateSize = allUpdates.size + logger.warn(s"prepared $allUpdateSize updates") + indexUpdates(allUpdates) + } + + it should "Index CN ACS Export NFR fixture updates" ignore { + val nextRecordTime: () => CantonTimestamp = nextRecordTimeFactory() + def ingestionIteration(): Unit = { + logger.warn(s"start preparing updates...") + if (1 == "1".toInt) + fail( + "WARNING! Please check if you are really want to do this! The following parameters result in a fixture probably not fitting in the memory. Please verify parameters. Bigger workloads are possible with doing multiple iterations." + ) + val passes = 4320 + val txSize = 5 + // 2000*5 = 10'000 contracts created then archived in a pass, + // 10'000*4320 = 43'200'000 contracts created and archived at the end + val txsCreatedAndArchivedPerPass = 2000 + // 23*5 = 115 contracts staying active per pass, + // 115*4320 = 496'800 contracts staying alive at the end + val txsStayingActivePerPass = 23 + val createPayloadLength = 300 + val archiveArgumentPayloadLengthFromTo = (13, 38) + val archiveResultPayloadLengthFromTo = (13, 58) + val allUpdates = (1 to passes).toVector + .flatMap(_ => + createsAndArchives( + nextRecordTime = nextRecordTime, + txSize = txSize, + txsCreatedThenArchived = txsCreatedAndArchivedPerPass, + txsCreatedNotArchived = txsStayingActivePerPass, + createPayloadLength = createPayloadLength, + archiveArgumentPayloadLengthFromTo = archiveArgumentPayloadLengthFromTo, + archiveResultPayloadLengthFromTo = archiveResultPayloadLengthFromTo, + ) + ) + val allUpdateSize = allUpdates.size + logger.warn(s"prepared $allUpdateSize updates") + indexUpdates(allUpdates) + } + + (1 to 1).foreach { i => + logger.warn(s"ingestion iteration: $i started") + ingestionIteration() + logger.warn(s"ingestion iteration: $i finished") + } + } + + it should "Fetch ACS" ignore TraceContext.withNewTraceContext("ACS fetch") { + implicit traceContext => + val ledgerEndOffset = index.currentLedgerEnd().futureValue + implicit val loggingContextWithTrace: LoggingContextWithTrace = + LoggingContextWithTrace(loggerFactory) + logger.warn("start fetching acs...") + val startTime = System.currentTimeMillis() + index + .getActiveContracts( + eventFormat = eventFormat(dsoParty), + activeAt = ledgerEndOffset, + ) + .zipWithIndex + .runWith(Sink.last) + .map { case (last, lastIndex) => + val totalMillis = System.currentTimeMillis() - startTime + logger.warn( + s"finished fetching acs in $totalMillis ms, ${lastIndex + 1} active contracts returned." + ) + logger.warn(s"last active contract acs: $last") + } + .futureValue( + PatienceConfiguration.Timeout(Span.convertDurationToSpan(Duration(2000, "seconds"))) + ) + } + + private def nextRecordTimeFactory(): () => CantonTimestamp = { + logger.warn(s"looking up base record time") + val ledgerEnd = index.currentLedgerEnd().futureValue + val baseRecordTime: CantonTimestamp = ledgerEnd match { + case Some(offset) => + // try to get the last one + logger.warn(s"looks like ledger not empty, getting record time from last update") + val lastUpdate: GetUpdateResponse.Update = index + .getUpdateBy( + LookupKey.ByOffset(offset), + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = allPartyEventFormat, + transactionShape = TransactionShape.LedgerEffects, + ) + ), + includeReassignments = Some(allPartyEventFormat), + includeTopologyEvents = None, + ), + ) + .futureValue + .value + .update + lastUpdate.reassignment + .flatMap(_.recordTime) + .orElse(lastUpdate.transaction.flatMap(_.recordTime)) + .map(CantonTimestamp.fromProtoTimestamp(_).value) + .getOrElse(fail("On LedgerEnd a reassignment or transaction is expected")) + case None => + // empty ledger getting now + logger.warn(s"looks like ledger is empty, using now as a baseline record time") + CantonTimestamp.now() + } + val recordTime = new AtomicReference(baseRecordTime) + () => recordTime.updateAndGet(_.immediateSuccessor) + } + + private def indexUpdates(updates: Vector[Update]): Unit = { + val numOfUpdates = updates.size + val startTime = System.currentTimeMillis() + val state = new AtomicLong(0L) + val ledgerEndLongBefore = index.currentLedgerEnd().futureValue.map(_.positive).getOrElse(0L) + logger.warn(s"start ingesting $numOfUpdates updates...") + val reportingSeconds = 5 + val reporter = system.scheduler.scheduleAtFixedRate( + initialDelay = FiniteDuration(reportingSeconds, "seconds"), + interval = FiniteDuration(reportingSeconds, "seconds"), + )(new Runnable { + val lastState = new AtomicLong(0L) + override def run(): Unit = { + val current = state.get() + val last = lastState.get() + lastState.set(current) + val reportRate = (current - last) / reportingSeconds + val avgRate = current * 1000 / (System.currentTimeMillis() - startTime) + val minutesLeft = (numOfUpdates - current) / avgRate / 60 + logger.warn( + s"ingesting $current/$numOfUpdates, ${100 * current / numOfUpdates}% (since last: ${current - last}, $reportRate update/seconds) (avg: $avgRate update/seconds, estimated minutes left: $minutesLeft)..." + ) + } + }) + Source + .fromIterator(() => updates.iterator) + .async + .mapAsync(1)(ingestUpdateAsync) + .async + .foreach(_ => state.incrementAndGet()) + .run() + .map { _ => + reporter.cancel() + logger.warn( + s"finished pushing $numOfUpdates updates to indexer, waiting for all to be indexed..." + ) + eventually( + timeUntilSuccess = FiniteDuration(1000, "seconds"), + maxPollInterval = FiniteDuration(100, "milliseconds"), + ) { + (index + .currentLedgerEnd() + .futureValue + .map(_.positive) + .getOrElse(0L) - ledgerEndLongBefore) shouldBe numOfUpdates + } + val avgRate = numOfUpdates * 1000 / (System.currentTimeMillis() - startTime) + logger.warn( + s"finished ingesting $numOfUpdates updates with average rate $avgRate updates/second" + ) + } + .futureValue( + PatienceConfiguration.Timeout(Span.convertDurationToSpan(Duration(200000, "seconds"))) + ) + } + + private val random = new scala.util.Random + private def randomString(length: Int) = { + val sb = new mutable.StringBuilder() + for (i <- 1 to length) { + sb.append(random.alphanumeric.head) + } + sb.toString + } + + private def randomTemplate = templates(random.nextInt(templates.size)) + private def randomParty = parties(random.nextInt(parties.size)) + private def randomHash: Hash = Hash.digest( + HashPurpose.PreparedSubmission, + ByteString.copyFromUtf8(s"${random.nextLong()}"), + Sha256, + ) + private def randomUpdateId: UpdateId = TestUpdateId(randomHash.toHexString) + private def randomLength(lengthFromToInclusive: (Int, Int)) = { + val (from, to) = lengthFromToInclusive + val randomDistance = to - from + 1 + assert(randomDistance > 1) + from + random.nextInt(randomDistance) + } + + private def allAssignsThenAllUnassigns( + nextRecordTime: () => CantonTimestamp, + assignPayloadLength: Int, + unassignPayloadLength: Int, + batchSize: Int, + batches: Int, + ): Vector[Update.SequencedReassignmentAccepted] = { + val cidBatches = + 1.to(batches).map(_ => 1.to(batchSize).map(_ => builder.newCid).toVector).toVector + cidBatches + .map(cids => assigns(nextRecordTime(), assignPayloadLength)(cids)) + .++(cidBatches.map(cids => unassigns(nextRecordTime(), unassignPayloadLength)(cids))) + } + + private def createsAndArchives( + nextRecordTime: () => CantonTimestamp, + txSize: Int, + txsCreatedThenArchived: Int, + txsCreatedNotArchived: Int, + createPayloadLength: Int, + archiveArgumentPayloadLengthFromTo: (Int, Int), + archiveResultPayloadLengthFromTo: (Int, Int), + ): Vector[Update.SequencedTransactionAccepted] = { + val (createTxs, createNodes) = + (1 to txsCreatedThenArchived + txsCreatedNotArchived).iterator + .map(_ => + creates( + recordTime = nextRecordTime, + payloadLength = createPayloadLength, + )( + (1 to txSize).map(_ => builder.newCid).toVector + ) + ) + .toVector + .unzip + val archivingTxs = createNodes.iterator + .take(txsCreatedThenArchived) + .map( + archives( + recordTime = nextRecordTime, + argumentLength = randomLength(archiveArgumentPayloadLengthFromTo), + resultLength = randomLength(archiveResultPayloadLengthFromTo), + ) + ) + .toVector + createTxs ++ archivingTxs + } + + private def assigns(recordTime: CantonTimestamp, payloadLength: Int)( + coids: Seq[ContractId] + ): Update.SequencedReassignmentAccepted = + reassignment( + sourceSynchronizerId = synchronizer2, + targetSynchronizerId = synchronizer1, + synchronizerId = synchronizer1, + recordTime = recordTime, + workflowId = None, + )(coids.zipWithIndex.map { case (coid, index) => + assign( + coid = coid, + nodeId = index, + ledgerEffectiveTime = recordTime.underlying, + argumentPayload = randomString(payloadLength), + ) + }) + + private def unassigns(recordTime: CantonTimestamp, payloadLength: Int)( + coids: Seq[ContractId] + ): Update.SequencedReassignmentAccepted = + reassignment( + sourceSynchronizerId = synchronizer1, + targetSynchronizerId = synchronizer2, + synchronizerId = synchronizer1, + recordTime = recordTime, + workflowId = Some( + WorkflowId.assertFromString(randomString(payloadLength)) + ), // mimick unassign payload with workflowID. This is also stored with all events. + )(coids.zipWithIndex.map { case (coid, index) => + unassign( + coid = coid, + nodeId = index, + ) + }) + + private def creates(recordTime: () => CantonTimestamp, payloadLength: Int)( + coids: Seq[ContractId] + ): (Update.SequencedTransactionAccepted, Vector[Create]) = { + val txBuilder = TxBuilder() + val creates = coids.iterator + .map(coid => + create( + coid = coid, + argumentPayload = randomString(payloadLength), + template = randomTemplate, + signatories = Set( + dsoParty, + randomParty, + randomParty, + randomParty, + ), + ) + ) + .toVector + creates.foreach(txBuilder.add) + val tx = txBuilder.buildCommitted() + val contractAuthenticationData = coids.iterator + .map( + _ -> Bytes.fromByteString(ByteString.copyFromUtf8(randomString(42))) + ) + .toMap + transaction( + synchronizerId = synchronizer1, + recordTime = recordTime(), + )(tx, contractAuthenticationData) -> creates + } + + private def archives( + recordTime: () => CantonTimestamp, + argumentLength: Int, + resultLength: Int, + )( + creates: Seq[Node.Create] + ): Update.SequencedTransactionAccepted = { + val txBuilder = TxBuilder() + val archives = creates.iterator + .map(create => + archive( + create = create, + actingParties = Set( + randomParty, + randomParty, + randomParty, + ), + argumentPayload = randomString(argumentLength), + resultPayload = randomString(resultLength), + ) + ) + .toVector + archives.foreach(txBuilder.add) + val tx = txBuilder.buildCommitted() + transaction( + synchronizerId = synchronizer1, + recordTime = recordTime(), + )(tx) + } + + private def create( + coid: ContractId, + argumentPayload: String, + template: Ref.Identifier, + signatories: Set[Party], + ): canton.platform.Create = + builder.create( + id = coid, + templateId = template, + argument = Value.ValueRecord( + tycon = None, + fields = ImmArray(None -> Value.ValueText(argumentPayload)), + ), + signatories = signatories, + observers = Set.empty, + packageName = packageName, + ) + + private def archive( + create: Node.Create, + actingParties: Set[Ref.Party], + argumentPayload: String, + resultPayload: String, + ): platform.Exercise = + builder.exercise( + contract = create, + choice = Ref.Name.assertFromString("archivingarchivingarchivingarchivingarchivingarchiving"), + consuming = true, + actingParties = actingParties, + argument = Value.ValueRecord( + tycon = None, + fields = ImmArray(None -> Value.ValueText(argumentPayload)), + ), + byKey = false, + interfaceId = None, + result = Some( + Value.ValueRecord( + tycon = None, + fields = ImmArray(None -> Value.ValueText(resultPayload)), + ) + ), + ) + + private def assign( + coid: ContractId, + nodeId: Int, + ledgerEffectiveTime: Time.Timestamp, + argumentPayload: String, + ): Reassignment.Assign = + Reassignment.Assign( + ledgerEffectiveTime = ledgerEffectiveTime, + createNode = create( + coid = coid, + argumentPayload = argumentPayload, + template = templates(0), + signatories = Set(dsoParty), + ), + contractAuthenticationData = Bytes.Empty, + reassignmentCounter = 10L, + nodeId = nodeId, + ) + + private def unassign( + coid: ContractId, + nodeId: Int, + ): Reassignment.Unassign = + Reassignment.Unassign( + contractId = coid, + templateId = templates(0), + packageName = packageName, + stakeholders = Set(dsoParty), + assignmentExclusivity = None, + reassignmentCounter = 11L, + nodeId = nodeId, + ) + + private def reassignment( + sourceSynchronizerId: SynchronizerId, + targetSynchronizerId: SynchronizerId, + synchronizerId: SynchronizerId, + recordTime: CantonTimestamp, + workflowId: Option[WorkflowId], + )(reassignments: Seq[Reassignment]): Update.SequencedReassignmentAccepted = + Update.SequencedReassignmentAccepted( + optCompletionInfo = None, + workflowId = workflowId, + updateId = randomUpdateId, + reassignmentInfo = ReassignmentInfo( + sourceSynchronizer = ReassignmentTag.Source(sourceSynchronizerId), + targetSynchronizer = ReassignmentTag.Target(targetSynchronizerId), + submitter = Some(dsoParty), + reassignmentId = ReassignmentId.tryCreate("000123"), + isReassigningParticipant = false, + ), + reassignment = Reassignment.Batch(reassignments.head, reassignments.tail*), + recordTime = recordTime, + synchronizerId = synchronizerId, + acsChangeFactory = testAcsChangeFactory, + internalContractIds = Map.empty, + ) + + private def transaction( + synchronizerId: SynchronizerId, + recordTime: CantonTimestamp, + )( + transaction: CommittedTransaction, + contractAuthenticationData: Map[ContractId, Bytes] = Map.empty, + ): Update.SequencedTransactionAccepted = + Update.SequencedTransactionAccepted( + completionInfoO = None, + transactionMeta = TransactionMeta( + ledgerEffectiveTime = recordTime.underlying, + workflowId = None, + preparationTime = recordTime.underlying, + submissionSeed = someLFHash, + timeBoundaries = LedgerTimeBoundaries.unconstrained, + optUsedPackages = None, + optNodeSeeds = None, + optByKeyNodes = None, + ), + transaction = transaction, + updateId = randomUpdateId, + contractAuthenticationData = contractAuthenticationData, + synchronizerId = synchronizerId, + recordTime = recordTime, + acsChangeFactory = testAcsChangeFactory, + externalTransactionHash = None, + internalContractIds = Map.empty, + ) +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala index ef417b0569..2e2974c571 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala @@ -12,7 +12,9 @@ import com.digitalasset.canton.ledger.participant.state.index.IndexService import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore import com.digitalasset.canton.platform.IndexComponentTest.TestServices +import com.digitalasset.canton.platform.LedgerApiServerInternals import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.config.{IndexServiceConfig, ServerRole} import com.digitalasset.canton.platform.index.IndexServiceOwner @@ -23,8 +25,8 @@ import com.digitalasset.canton.platform.store.DbSupport.{ConnectionPoolConfig, D import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.dao.events.{ContractLoader, LfValueTranslation} import com.digitalasset.canton.platform.store.interning.StringInterningView -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.{DbSupport, FlywayMigrations} +import com.digitalasset.canton.platform.store.{DbSupport, FlywayMigrations, PruningOffsetService} +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.WallClock import com.digitalasset.canton.tracing.NoReportingTracerProvider import com.digitalasset.canton.util.PekkoUtil.{FutureQueue, IndexingFutureQueue} @@ -50,7 +52,13 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx LoggingContextWithTrace.ForTesting // if we would need multi-db, polimorphism can come here, look for JdbcLedgerDaoBackend - private val jdbcUrl = s"jdbc:h2:mem:${getClass.getSimpleName.toLowerCase};db_close_delay=-1" + protected val jdbcUrl = s"jdbc:h2:mem:${getClass.getSimpleName.toLowerCase};db_close_delay=-1" + + protected val indexerConfig: IndexerConfig = IndexerConfig() + + protected val indexServiceConfig: IndexServiceConfig = IndexServiceConfig() + + protected val indexReadConnectionPoolSize: Int = 10 private val testServicesRef: AtomicReference[TestServices] = new AtomicReference() @@ -70,8 +78,14 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx } } + protected def ingestUpdateAsync(update: Update): Future[Unit] = + testServices.indexer.offer(update).map(_ => ()) + protected def index: IndexService = testServices.index + protected def participantContractStore: InMemoryContractStore = + testServices.participantContractStore + protected def sequentialPostProcessor: Update => Unit = _ => () override protected def beforeAll(): Unit = { @@ -79,14 +93,14 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx // We use the dispatcher here because the default Scalatest execution context is too slow. implicit val resourceContext: ResourceContext = ResourceContext(system.dispatcher) - val indexerConfig = IndexerConfig() - val engine = new Engine( EngineConfig(LanguageVersion.StableVersions(LanguageMajorVersion.V2)) ) val mutableLedgerEndCache = MutableLedgerEndCache() val stringInterningView = new StringInterningView(loggerFactory) val participantId = Ref.ParticipantId.assertFromString("index-component-test-participant-id") + val participantContractStore = new InMemoryContractStore(timeouts, loggerFactory) + val pruningOffsetService = mock[PruningOffsetService] val indexResourceOwner = for { @@ -108,7 +122,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx dbConfig = DbConfig( jdbcUrl = jdbcUrl, connectionPool = ConnectionPoolConfig( - connectionPoolSize = 10, + connectionPoolSize = indexReadConnectionPoolSize, connectionTimeout = 250.millis, ), ), @@ -124,9 +138,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx executionContext = ec, tracer = NoReportingTracerProvider.tracer, loggerFactory = loggerFactory, - dataSourceProperties = IndexerConfig.createDataSourcePropertiesForTesting( - indexerConfig.ingestionParallelism.unwrap - ), + dataSourceProperties = IndexerConfig.createDataSourcePropertiesForTesting(indexerConfig), highAvailability = HaConfig(), indexSericeDbDispatcher = Some(dbSupport.dbDispatcher), clock = clock, @@ -143,7 +155,8 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx } contractLoader <- ContractLoader.create( contractStorageBackend = dbSupport.storageBackendFactory.createContractStorageBackend( - inMemoryState.stringInterningView + inMemoryState.stringInterningView, + inMemoryState.ledgerEndCache, ), dbDispatcher = dbSupport.dbDispatcher, metrics = LedgerApiServerMetrics.ForTesting, @@ -154,7 +167,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx ) indexService <- new IndexServiceOwner( dbSupport = dbSupport, - config = IndexServiceConfig(), + config = indexServiceConfig, participantId = Ref.ParticipantId.assertFromString(IndexComponentTest.TestParticipantId), metrics = LedgerApiServerMetrics.ForTesting, inMemoryState = inMemoryState, @@ -178,6 +191,8 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx _: String, _: LoggingContextWithTrace, ) => FutureUnlessShutdown.pure(Left("not used")), + participantContractStore = participantContractStore, + pruningOffsetService = pruningOffsetService, ) } yield indexService -> indexer @@ -189,6 +204,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx indexResource = indexResource, index = index, indexer = indexer, + participantContractStore = participantContractStore, ) ) } @@ -208,11 +224,10 @@ object IndexComponentTest { val TestParticipantId = "index-component-test-participant-id" - val maxUpdateCount = 1000000 - final case class TestServices( indexResource: Resource[Any], index: IndexService, indexer: FutureQueue[Update], + participantContractStore: InMemoryContractStore, ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/FatContractInstanceHelper.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/FatContractInstanceHelper.scala index a7a366503d..80ea7e73a5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/FatContractInstanceHelper.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/FatContractInstanceHelper.scala @@ -5,12 +5,12 @@ package com.digitalasset.canton.platform.apiserver import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.{ CreationTime, FatContractInstance, GlobalKeyWithMaintainers, Node, + SerializationVersion as LfSerializationVersion, } import com.digitalasset.daml.lf.value.Value @@ -26,7 +26,7 @@ object FatContractInstanceHelper { signatories: Set[Ref.Party], stakeholders: Set[Ref.Party], keyOpt: Option[GlobalKeyWithMaintainers], - version: LanguageVersion, + version: LfSerializationVersion, ): LfFatContractInst = { val create = Node.Create( templateId = templateId, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutorSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutorSpec.scala index f549bdc027..a621205029 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutorSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutorSpec.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.apiserver.FatContractInstanceHelper import com.digitalasset.canton.platform.apiserver.services.ErrorCause import com.digitalasset.canton.platform.apiserver.services.ErrorCause.LedgerTime -import com.digitalasset.canton.protocol.LfTransactionVersion +import com.digitalasset.canton.protocol.LfSerializationVersion import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.{BaseTest, FailOnShutdown} import com.digitalasset.daml.lf.command.ApiCommands as LfCommands @@ -80,7 +80,7 @@ class LedgerTimeAwareCommandExecutorSpec signatories = Set(alice), stakeholders = Set(alice), keyOpt = None, - version = LfTransactionVersion.minVersion, + version = LfSerializationVersion.V1, ) ) private val synchronizerRank = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTimeSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTimeSpec.scala index 83dee9c953..9d1290e4d0 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTimeSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTimeSpec.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.ledger.participant.state.index.{ } import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.platform.apiserver.FatContractInstanceHelper -import com.digitalasset.canton.protocol.LfTransactionVersion +import com.digitalasset.canton.protocol.LfSerializationVersion import com.digitalasset.canton.{BaseTest, HasExecutionContext} import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.Ref.{Identifier, PackageName} @@ -90,7 +90,7 @@ class ResolveMaximumLedgerTimeSpec signatories = Set(alice), stakeholders = Set(alice), keyOpt = None, - version = LfTransactionVersion.minVersion, + version = LfSerializationVersion.V1, ) private def contractId(id: Int): ContractId = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreterSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreterSpec.scala index 22577208aa..7452d0fec5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreterSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandInterpreterSpec.scala @@ -29,7 +29,7 @@ import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} import com.digitalasset.daml.lf.engine import com.digitalasset.daml.lf.engine.* import com.digitalasset.daml.lf.transaction.test.TransactionBuilder -import com.digitalasset.daml.lf.transaction.{CreationTime, Node as LfNode} +import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance, Node as LfNode} import com.digitalasset.daml.lf.value.Value import com.google.protobuf.ByteString import monocle.Monocle.toAppliedFocusOps @@ -54,10 +54,14 @@ class StoreBackedCommandInterpreterSpec private val createCycleApiCommand: Commands = testEngine.validateCommand(new Cycle("id", alice).create().commands.loneElement, alice) - private def repeatCycleApiCommand(cid: ContractId): Commands = + private def repeatCycleApiCommand( + cid: ContractId, + disclosedContracts: Seq[FatContractInstance] = Seq.empty, + ): Commands = testEngine.validateCommand( new Cycle.ContractId(cid.coid).exerciseRepeat().commands().loneElement, alice, + disclosedContracts, ) private def createCycleContract() = { @@ -70,7 +74,7 @@ class StoreBackedCommandInterpreterSpec } private val salt: Bytes = ContractAuthenticationDataV1(TestSalt.generateSalt(36))( - AuthenticatedContractIdVersionV11 + CantonContractIdVersion.maxV1 ).toLfBytes private val identifier: Identifier = Ref.Identifier(Ref.PackageId.assertFromString("p"), Ref.QualifiedName.assertFromString("m:n")) @@ -95,7 +99,7 @@ class StoreBackedCommandInterpreterSpec packageName, ) ), - version = LfTransactionVersion.StableVersions.max, + version = LfSerializationVersion.StableVersions.max, ) private val disclosedCreateNode = mkCreateNode() private val disclosedContractCreateTime = Time.Timestamp.now() @@ -309,85 +313,98 @@ class StoreBackedCommandInterpreterSpec } - "complete if contract authentication passes" in { - - val (_, _, contract) = createCycleContract() - val inst: LfFatContractInst = contract.inst - - val contractStore = mock[ContractStore] - - when( - contractStore.lookupContractState( - contractId = any[ContractId] - )(any[LoggingContextWithTrace]) - ).thenReturn(Future.successful(ContractState.NotFound)) // prefetch only - - when( - contractStore.lookupActiveContract( - readers = any[Set[Ref.Party]], - contractId = eqTo(inst.contractId), - )(any[LoggingContextWithTrace]) - ).thenReturn(Future.successful(Some(inst))) - - val commands = repeatCycleApiCommand(inst.contractId) + forAll(Seq(true, false)) { disclosed => + val contractType = if (disclosed) "disclosed contract" else "local contract" + s"complete if $contractType authentication passes" in { + + val (_, _, contract) = createCycleContract() + val inst: LfFatContractInst = contract.inst + + val contractStore = mock[ContractStore] + + when( + contractStore.lookupContractState( + contractId = any[ContractId] + )(any[LoggingContextWithTrace]) + ).thenReturn(Future.successful(ContractState.NotFound)) // prefetch only + + // When a disclosed contract should be used the mock will cause a failure if it is tried and so + // verifies that the disclosed key lookup takes precedence. + if (!disclosed) { + when( + contractStore.lookupActiveContract( + readers = any[Set[Ref.Party]], + contractId = eqTo(inst.contractId), + )(any[LoggingContextWithTrace]) + ).thenReturn(Future.successful(Some(inst))) + } - val sut = mkSut( - testEngine.engine, - contractStore = contractStore, - contractAuthenticator = (_, _) => Either.unit, - ) + val commands = + repeatCycleApiCommand(inst.contractId, if (disclosed) Seq(inst) else Seq.empty) - sut - .interpret(commands, submissionSeed)( - LoggingContextWithTrace(loggerFactory), - executionContext, + val sut = mkSut( + testEngine.engine, + contractStore = contractStore, + contractAuthenticator = (_, _) => Either.unit, ) - .map { - case Right(_) => succeed - case other => fail(s"Expected success, got $other") - } - } - - "error if contract authentication fails" in { + sut + .interpret(commands, submissionSeed)( + LoggingContextWithTrace(loggerFactory), + executionContext, + ) + .map { + case Right(_) => succeed + case other => fail(s"Expected success, got $other") + } + } - val (_, _, contract) = createCycleContract() - val inst: LfFatContractInst = contract.inst + s"error if $contractType authentication fails" in { - val contractStore = mock[ContractStore] + val (_, _, contract) = createCycleContract() + val inst: LfFatContractInst = contract.inst - when( - contractStore.lookupContractState( - contractId = any[ContractId] - )(any[LoggingContextWithTrace]) - ).thenReturn(Future.successful(ContractState.NotFound)) // prefetch only + val contractStore = mock[ContractStore] - when( - contractStore.lookupActiveContract( - readers = any[Set[Ref.Party]], - contractId = eqTo(inst.contractId), - )(any[LoggingContextWithTrace]) - ).thenReturn(Future.successful(Some(inst))) + when( + contractStore.lookupContractState( + contractId = any[ContractId] + )(any[LoggingContextWithTrace]) + ).thenReturn(Future.successful(ContractState.NotFound)) // prefetch only - val commands = repeatCycleApiCommand(inst.contractId) + // When a disclosed contract should be used the mock will cause a failure if it is tried and so + // verifies that the disclosed lookup takes precedence. + if (!disclosed) { + when( + contractStore.lookupActiveContract( + readers = any[Set[Ref.Party]], + contractId = eqTo(inst.contractId), + )(any[LoggingContextWithTrace]) + ).thenReturn(Future.successful(Some(inst))) + } - val sut = mkSut( - testEngine.engine, - contractStore = contractStore, - contractAuthenticator = (_, _) => Left("Not authorized"), - ) + val commands = + repeatCycleApiCommand(inst.contractId, if (disclosed) Seq(inst) else Seq.empty) - sut - .interpret(commands, submissionSeed)( - LoggingContextWithTrace(loggerFactory), - executionContext, + val sut = mkSut( + testEngine.engine, + contractStore = contractStore, + contractAuthenticator = (_, _) => Left("Not authorized"), ) - .map { - case Left(ErrorCause.DamlLf(engine.Error.Interpretation(_, _))) => succeed - case other => fail(s"Did not expect: $other") - } + sut + .interpret(commands, submissionSeed)( + LoggingContextWithTrace(loggerFactory), + executionContext, + ) + .map { + case Left(ErrorCause.DamlLf(engine.Error.Interpretation(_, _))) => succeed + case other => fail(s"Did not expect: $other") + } + + } } + } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceSpec.scala index 761f4655d4..b8470c0b70 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceSpec.scala @@ -8,7 +8,6 @@ import com.daml.ledger.api.v2.command_service.{ SubmitAndWaitForReassignmentResponse, SubmitAndWaitForTransactionRequest, SubmitAndWaitForTransactionResponse, - SubmitAndWaitForTransactionTreeResponse, SubmitAndWaitRequest, SubmitAndWaitResponse, } @@ -20,7 +19,6 @@ import com.digitalasset.canton.ledger.api.MockMessages.* import com.digitalasset.canton.ledger.api.services.CommandService import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, - ValidateDisclosedContracts, ValidateUpgradingPackageResolutions, } import com.digitalasset.canton.logging.LoggingContextWithTrace @@ -32,11 +30,8 @@ import org.scalatest.wordspec.AsyncWordSpec import java.time.{Duration, Instant} import java.util.concurrent.atomic.AtomicInteger -import scala.annotation.nowarn import scala.concurrent.Future -// TODO(#23504) remove TransactionTree related methods when TransactionTree is removed from the API -@nowarn("cat=deprecation") class ApiCommandServiceSpec extends AsyncWordSpec with MockitoSugar @@ -71,9 +66,6 @@ class ApiCommandServiceSpec _ <- grpcCommandService.submitAndWaitForTransaction( aSubmitAndWaitForTransactionRequestWithNoSubmissionId ) - _ <- grpcCommandService.submitAndWaitForTransactionTree( - aSubmitAndWaitRequestWithNoSubmissionId - ) _ <- grpcCommandService.submitAndWaitForReassignment( aSubmitAndWaitForReassignmentRequestWithNoSubmissionId ) @@ -110,17 +102,13 @@ class ApiCommandServiceSpec ) requestCaptorSubmitAndWaitForTransaction.value shouldBe expectedSubmitAndWaitForTransactionRequest("2") - verify(mockCommandService).submitAndWaitForTransactionTree( - requestCaptorSubmitAndWait.capture - )(any[LoggingContextWithTrace]) - requestCaptorSubmitAndWait.value shouldBe expectedSubmitAndWaitRequest("3") verify(mockCommandService).submitAndWaitForReassignment( requestCaptorSubmitAndWaitForReassignment.capture )( any[LoggingContextWithTrace] ) requestCaptorSubmitAndWaitForReassignment.value shouldBe - expectedSubmitAndWaitForReassignmentRequest("4") + expectedSubmitAndWaitForReassignmentRequest("3") succeed } } @@ -152,7 +140,6 @@ class ApiCommandServiceSpec _ <- grpcCommandService.submitAndWaitForTransaction( submissionWithDisclosedContractsForTransaction ) - _ <- grpcCommandService.submitAndWaitForTransactionTree(submissionWithDisclosedContracts) } yield { succeed } @@ -161,8 +148,6 @@ class ApiCommandServiceSpec } } -// TODO(#23504) remove TransactionTree related methods when TransactionTree is removed from the API -@nowarn("cat=deprecation") object ApiCommandServiceSpec { private val aCommand = Command.of( Command.Command.Create( @@ -193,8 +178,7 @@ object ApiCommandServiceSpec { private val submissionIdPrefix = "submissionId-" private val commandsValidator = new CommandsValidator( - validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty, - validateDisclosedContracts = ValidateDisclosedContracts.WithContractIdVerificationDisabled, + validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty ) def createMockCommandService: CommandService & AutoCloseable = { @@ -217,12 +201,6 @@ object ApiCommandServiceSpec { ) ) .thenReturn(Future.successful(SubmitAndWaitForReassignmentResponse.defaultInstance)) - when( - mockCommandService.submitAndWaitForTransactionTree(any[SubmitAndWaitRequest])( - any[LoggingContextWithTrace] - ) - ) - .thenReturn(Future.successful(SubmitAndWaitForTransactionTreeResponse.defaultInstance)) mockCommandService } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala index 8d8a0fe32e..e720ddd7e6 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.ledger.api.messages.command.submission.SubmitRequ import com.digitalasset.canton.ledger.api.services.CommandSubmissionService import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, - ValidateDisclosedContracts, ValidateUpgradingPackageResolutions, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -131,8 +130,7 @@ class ApiCommandSubmissionServiceSpec new ApiCommandSubmissionService( commandSubmissionService = commandSubmissionService, commandsValidator = new CommandsValidator( - validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty, - validateDisclosedContracts = ValidateDisclosedContracts.WithContractIdVerificationDisabled, + validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty ), submissionSyncService = null, currentLedgerTime = () => Instant.EPOCH, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/DisclosedContractCreator.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/DisclosedContractCreator.scala index bfc0a4738d..6c2605f148 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/DisclosedContractCreator.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/DisclosedContractCreator.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.platform.apiserver.services import com.daml.ledger.api.v2.commands.DisclosedContract import com.daml.ledger.api.v2.value.Identifier import com.digitalasset.canton.LfValue -import com.digitalasset.canton.protocol.{ExampleContractFactory, LfTransactionVersion} +import com.digitalasset.canton.protocol.{ExampleContractFactory, LfSerializationVersion} import com.digitalasset.daml.lf.data.{ImmArray, Ref, Time} import com.digitalasset.daml.lf.transaction.{ CreationTime, @@ -18,7 +18,7 @@ import com.digitalasset.daml.lf.value.Value.{ValueRecord, ValueTrue} object DisclosedContractCreator { - private val testTxVersion = LfTransactionVersion.minVersion + private val testTxVersion = LfSerializationVersion.minVersion private object api { val templateId: Identifier = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala index 111a0609d3..51828417bc 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala @@ -15,9 +15,17 @@ import com.daml.nonempty.NonEmpty import com.daml.tracing.DefaultOpenTelemetry import com.daml.tracing.TelemetrySpecBase.* import com.digitalasset.base.error.ErrorsAssertions +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.error.{TransactionError, TransactionRoutingError} import com.digitalasset.canton.ledger.api.health.HealthStatus +import com.digitalasset.canton.ledger.api.{ + EnrichedVettedPackage, + ListVettedPackagesOpts, + UpdateVettedPackagesOpts, + UploadDarVettingChange, +} import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.ledger.participant.state.{ InternalIndexService, @@ -32,9 +40,16 @@ import com.digitalasset.canton.ledger.participant.state.{ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.protocol.{LfContractId, LfFatContractInst, LfSubmittedTransaction} -import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} +import com.digitalasset.canton.topology.{ + DefaultTestIdentities, + ExternalPartyOnboardingDetails, + ParticipantId, + PhysicalSynchronizerId, + SynchronizerId, +} import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext} import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{BaseTest, LfKeyResolver, LfPackageId, LfPartyId} import com.digitalasset.daml.lf.data.Ref.{CommandId, Party, SubmissionId, UserId, WorkflowId} import com.digitalasset.daml.lf.data.{ImmArray, Ref} @@ -80,7 +95,14 @@ class ApiPackageManagementServiceSpec val span = testTelemetrySetup.anEmptySpan() val scope = span.makeCurrent() apiService - .uploadDarFile(UploadDarFileRequest(ByteString.EMPTY, aSubmissionId)) + .uploadDarFile( + UploadDarFileRequest( + ByteString.EMPTY, + aSubmissionId, + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId = "", + ) + ) .thereafter { _ => scope.close() span.end() @@ -99,7 +121,14 @@ class ApiPackageManagementServiceSpec loggerFactory.assertLogsSeq(SuppressionRule.LevelAndAbove(DEBUG))( within = { apiService - .uploadDarFile(UploadDarFileRequest(ByteString.EMPTY, aSubmissionId)) + .uploadDarFile( + UploadDarFileRequest( + ByteString.EMPTY, + aSubmissionId, + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES, + synchronizerId = "", + ) + ) .map(_ => succeed) }, { logEntries => @@ -114,7 +143,7 @@ class ApiPackageManagementServiceSpec "validate a dar" in { val apiService = createApiService() apiService - .validateDarFile(ValidateDarFileRequest(ByteString.EMPTY, aSubmissionId)) + .validateDarFile(ValidateDarFileRequest(ByteString.EMPTY, aSubmissionId, "")) .map { case ValidateDarFileResponse() => succeed } } } @@ -134,6 +163,8 @@ object ApiPackageManagementServiceSpec { override def uploadDar( dar: Seq[ByteString], submissionId: Ref.SubmissionId, + vettingChange: UploadDarVettingChange, + synchronizerId: Option[SynchronizerId], )(implicit traceContext: TraceContext ): Future[SubmissionResult] = { @@ -145,7 +176,11 @@ object ApiPackageManagementServiceSpec { Future.successful(state.SubmissionResult.Acknowledged) } - override def validateDar(dar: ByteString, darName: String)(implicit + override def validateDar( + dar: ByteString, + darName: String, + synchronizerId: Option[SynchronizerId], + )(implicit traceContext: TraceContext ): Future[SubmissionResult] = { val telemetryContext = traceContext.toDamlTelemetryContext(tracer) @@ -168,6 +203,8 @@ object ApiPackageManagementServiceSpec { override def currentHealth(): HealthStatus = throw new UnsupportedOperationException() + override def hashOps: HashOps = throw new UnsupportedOperationException() + override def submitTransaction( transaction: SubmittedTransaction, synchronizerRank: SynchronizerRank, @@ -197,6 +234,7 @@ object ApiPackageManagementServiceSpec { hint: Party, submissionId: SubmissionId, synchronizerIdO: Option[SynchronizerId], + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext): FutureUnlessShutdown[SubmissionResult] = throw new UnsupportedOperationException() @@ -246,5 +284,26 @@ object ApiPackageManagementServiceSpec { traceContext: TraceContext ): RoutingSynchronizerState = throw new UnsupportedOperationException() + + override def listVettedPackages( + opts: ListVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[Seq[(Seq[EnrichedVettedPackage], SynchronizerId, PositiveInt)]] = + throw new UnsupportedOperationException() + + override def updateVettedPackages( + opts: UpdateVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[(Seq[EnrichedVettedPackage], Seq[EnrichedVettedPackage])] = + throw new UnsupportedOperationException() + + override def protocolVersionForSynchronizerId( + synchronizerId: SynchronizerId + ): Option[ProtocolVersion] = + throw new UnsupportedOperationException() + + override def participantId: ParticipantId = DefaultTestIdentities.participant1 } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala index a73f8e2eb9..00afe0dd40 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala @@ -3,18 +3,33 @@ package com.digitalasset.canton.platform.apiserver.services.admin +import cats.syntax.traverse.* import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll +import com.daml.ledger.api.v2.admin.party_management_service.AllocateExternalPartyRequest.SignedTransaction import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocateExternalPartyRequest, AllocatePartyRequest, + GenerateExternalPartyTopologyRequest, + GenerateExternalPartyTopologyResponse, PartyDetails as ProtoPartyDetails, } +import com.daml.ledger.api.v2.crypto.SignatureFormat.SIGNATURE_FORMAT_RAW +import com.daml.ledger.api.v2.{crypto, crypto as lapicrypto} +import com.daml.nonempty.NonEmpty import com.daml.tracing.TelemetrySpecBase.* import com.daml.tracing.{DefaultOpenTelemetry, NoOpTelemetry} import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.base.error.utils.ErrorDetails.RetryInfoDetail -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.v30.SigningKeyScheme.SIGNING_KEY_SCHEME_UNSPECIFIED +import com.digitalasset.canton.crypto.{ + Fingerprint, + HashOps, + SigningKeyUsage, + SigningPublicKey, + TestHash, +} import com.digitalasset.canton.ledger.api.{IdentityProviderId, ObjectMeta} import com.digitalasset.canton.ledger.localstore.api.{ PartyRecord, @@ -22,30 +37,67 @@ import com.digitalasset.canton.ledger.localstore.api.{ UserManagementStore, } import com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent.Added +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel.Submission import com.digitalasset.canton.ledger.participant.state.index.{ IndexPartyManagementService, IndexerPartyDetails, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory} -import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementService.blindAndConvertToProto +import com.digitalasset.canton.logging.{ + LoggingContextWithTrace, + NamedLoggerFactory, + SuppressionRule, +} +import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementService.{ + CreateSubmissionId, + blindAndConvertToProto, +} import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementServiceSpec.* import com.digitalasset.canton.platform.apiserver.services.admin.PartyAllocation import com.digitalasset.canton.platform.apiserver.services.tracking.{InFlight, StreamTracker} -import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace +import com.digitalasset.canton.topology.transaction.{ + DecentralizedNamespaceDefinition, + DelegationRestriction, + HostingParticipant, + NamespaceDelegation, + ParticipantPermission, + PartyHostingLimits, + PartyToKeyMapping, + PartyToParticipant, + TopologyChangeOp, + TopologyTransaction, +} +import com.digitalasset.canton.topology.{ + DefaultTestIdentities, + ExternalPartyOnboardingDetails, + Namespace, + ParticipantId, + PartyId, + SynchronizerId, +} import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext} import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{BaseTest, HasExecutorService} import com.digitalasset.daml.lf.data.Ref +import com.google.protobuf.ByteString import io.grpc.Status.Code import io.grpc.StatusRuntimeException import io.opentelemetry.api.trace.Tracer import io.opentelemetry.sdk.OpenTelemetrySdk +import io.scalaland.chimney.dsl.* import org.mockito.{ArgumentMatchers, ArgumentMatchersSugar, MockitoSugar} import org.scalatest.BeforeAndAfterEach import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec +import org.slf4j.event.Level +import scalapb.lenses.{Lens, Mutation} +import java.security.{KeyPair, KeyPairGenerator, Signature} import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} @@ -59,12 +111,43 @@ class ApiPartyManagementServiceSpec with PekkoBeforeAndAfterAll with ErrorsAssertions with BaseTest - with BeforeAndAfterEach { + with BeforeAndAfterEach + with HasExecutorService { var testTelemetrySetup: TestTelemetrySetup = _ val partiesPageSize = PositiveInt.tryCreate(100) - val aSubmissionId = Ref.SubmissionId.assertFromString("aSubmissionId") + val aPartyAllocationTracker = + PartyAllocation.TrackerKey("aParty", DefaultTestIdentities.participant1.toLf, Added(Submission)) + val createSubmissionId = new CreateSubmissionId { + override def apply( + partyIdHint: String, + authorizationLevel: TopologyTransactionEffective.AuthorizationLevel, + ): PartyAllocation.TrackerKey = aPartyAllocationTracker + } + + lazy val ( + _mockIndexTransactionsService, + mockIdentityProviderExists, + mockIndexPartyManagementService, + mockPartyRecordStore, + ) = mockedServices() + val partyAllocationTracker = makePartyAllocationTracker(loggerFactory) + + lazy val apiService = ApiPartyManagementService.createApiService( + mock[IndexPartyManagementService], + mock[UserManagementStore], + mock[IdentityProviderExists], + partiesPageSize, + NonNegativeInt.tryCreate(0), + mock[PartyRecordStore], + TestPartySyncService(testTelemetrySetup.tracer), + oneHour, + createSubmissionId, + NoOpTelemetry, + mock[PartyAllocation.Tracker], + loggerFactory = loggerFactory, + ) override def beforeEach(): Unit = testTelemetrySetup = new TestTelemetrySetup() @@ -74,6 +157,10 @@ class ApiPartyManagementServiceSpec private implicit val ec: ExecutionContext = directExecutionContext + val ApiPartyManagementServiceSuppressionRule: SuppressionRule = + SuppressionRule.LoggerNameContains("ApiPartyManagementService") && + SuppressionRule.Level(Level.ERROR) + "ApiPartyManagementService" should { def blind( idpId: IdentityProviderId, @@ -108,6 +195,430 @@ class ApiPartyManagementServiceSpec .copy(isLocal = false) } + def createSigningKey: (Option[crypto.SigningPublicKey], KeyPair) = { + val keyGen = KeyPairGenerator.getInstance("Ed25519") + val keyPair = keyGen.generateKeyPair() + val protoKey = Some( + lapicrypto.SigningPublicKey( + format = lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + keyData = ByteString.copyFrom(keyPair.getPublic.getEncoded), + keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, + ) + ) + (protoKey, keyPair) + } + + def cantonSigningPublicKey(publicKey: crypto.SigningPublicKey) = + SigningPublicKey + .fromProtoV30( + com.digitalasset.canton.crypto.v30.SigningPublicKey( + format = + publicKey.format.transformInto[com.digitalasset.canton.crypto.v30.CryptoKeyFormat], + publicKey = publicKey.keyData, + // Deprecated field + scheme = SIGNING_KEY_SCHEME_UNSPECIFIED, + usage = Seq(SigningKeyUsage.Namespace.toProtoEnum), + keySpec = + publicKey.keySpec.transformInto[com.digitalasset.canton.crypto.v30.SigningKeySpec], + ) + ) + .value + + def sign(keyPair: KeyPair, data: ByteString, signedBy: Fingerprint) = { + val signatureInstance = Signature.getInstance("Ed25519") + signatureInstance.initSign(keyPair.getPrivate) + signatureInstance.update(data.toByteArray) + lapicrypto.Signature( + format = SIGNATURE_FORMAT_RAW, + signature = ByteString.copyFrom(signatureInstance.sign()), + signedBy = signedBy.toProtoPrimitive, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) + } + + "validate allocateExternalParty request" when { + def testAllocateExternalPartyValidation( + requestTransform: Lens[ + AllocateExternalPartyRequest, + AllocateExternalPartyRequest, + ] => Mutation[AllocateExternalPartyRequest], + expectedFailure: PartyId => Option[String], + ) = { + val (publicKey, keyPair) = createSigningKey + val cantonPublicKey = cantonSigningPublicKey(publicKey.value) + val partyId = PartyId.tryCreate("alice", cantonPublicKey.fingerprint) + for { + generatedTransactions <- apiService.generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = + Seq(DefaultTestIdentities.participant3.uid.toProtoPrimitive), + ) + ) + signature = sign(keyPair, generatedTransactions.multiHash, partyId.fingerprint) + request = AllocateExternalPartyRequest( + synchronizer = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + onboardingTransactions = generatedTransactions.topologyTransactions.map(tx => + AllocateExternalPartyRequest.SignedTransaction(tx, Seq.empty) + ), + multiHashSignatures = Seq(signature), + identityProviderId = "", + ).update(requestTransform) + result <- apiService + .allocateExternalParty(request) + .transform { + case Failure(e: io.grpc.StatusRuntimeException) => + expectedFailure(partyId) match { + case Some(value) => + e.getStatus.getCode.value() shouldBe io.grpc.Status.INVALID_ARGUMENT.getCode + .value() + e.getStatus.getDescription should include(value) + Success(succeed) + case None => + fail(s"Expected success but allocation failed with $e") + } + case Failure(other) => fail(s"expected a gRPC exception but got $other") + case Success(_) if expectedFailure(partyId).isDefined => + fail("Expected a failure but got a success") + case Success(_) => Success(succeed) + } + } yield result + } + + val (bobKey, bobKeyPair) = { + val (publicKey, keyPair) = createSigningKey + (cantonSigningPublicKey(publicKey.value), keyPair) + } + val bobParty = PartyId.tryCreate("bob", bobKey.fingerprint) + + def mkDecentralizedTx(ownerSize: Int): (SignedTransaction, Namespace) = { + val ownersKeys = Seq.fill(ownerSize)(createSigningKey).map { case (publicKey, keyPair) => + (cantonSigningPublicKey(publicKey.value), keyPair) + } + val namespaceOwners = ownersKeys.map(_._1.fingerprint).toSet.map(Namespace(_)) + val decentralizedNamespace = + DecentralizedNamespaceDefinition.computeNamespace(namespaceOwners) + val decentralizedTx = TopologyTransaction( + Replace, + PositiveInt.one, + DecentralizedNamespaceDefinition.tryCreate( + decentralizedNamespace = decentralizedNamespace, + threshold = PositiveInt.one, + owners = NonEmpty.from(namespaceOwners).value, + ), + testedProtocolVersion, + ) + val signatures = ownersKeys.map { case (publicKey, keyPair) => + sign(keyPair, decentralizedTx.getCryptographicEvidence, publicKey.fingerprint) + } + ( + SignedTransaction( + decentralizedTx.toByteString, + signatures, + ), + decentralizedNamespace, + ) + } + + "fail if missing synchronizerId" in { + testAllocateExternalPartyValidation( + _.synchronizer.modify(_ => ""), + _ => Some("The submitted command is missing a mandatory field: synchronizer"), + ) + } + + "fail if missing a party to participant" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .nonEmpty + ) + ), + _ => Some("One transaction of type PartyToParticipant must be provided, got 0"), + ) + } + + "allow a single P2P" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.filter(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .nonEmpty + ) + ), + _ => None, + ) + } + + "refuse a P2P with Submission rights" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq(HostingParticipant(participantId, ParticipantPermission.Submission)), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The PartyToParticipant transaction must not contain any node with Submission permission. Nodes with submission permission: PAR::participant1::participant1..." + ), + ) + } + + "refuse a non multi-hosted party submitted to another node" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq( + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Confirmation, + ) + ), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The party is to be hosted on a single participant (PAR::participant2::participant2...) that is not this participant (PAR::participant1::participant1...). Submit the allocation request on PAR::participant2::participant2... instead." + ), + ) + } + + "refuse a multi-hosted party with no confirming node" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq( + HostingParticipant( + participantId, + ParticipantPermission.Observation, + ), + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Observation, + ), + ), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The PartyToParticipant transaction must contain at least one node with Confirmation permission" + ), + ) + } + + "refuse mismatching party namespace and p2p namespace" in { + val updatedTransaction = TopologyTransaction( + Replace, + PositiveInt.one, + NamespaceDelegation.tryCreate( + namespace = bobParty.namespace, + target = bobKey, + restriction = DelegationRestriction.CanSignAllMappings, + ), + testedProtocolVersion, + ) + val signature = sign( + bobKeyPair, + updatedTransaction.hash.hash.getCryptographicEvidence, + bobParty.fingerprint, + ) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .map(_ => updatedTransaction) + .map { updatedTx => + SignedTransaction( + updatedTx.toByteString, + Seq(signature), + ) + } + .getOrElse(tx) + ) + ), + partyId => + Some( + s"The Party namespace (${bobParty.namespace}) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse mismatching p2k namespace and p2p namespace" in { + def updatedTransaction(signingKeys: NonEmpty[Seq[SigningPublicKey]]) = TopologyTransaction( + Replace, + PositiveInt.one, + PartyToKeyMapping.tryCreate( + partyId = bobParty, + threshold = PositiveInt.one, + signingKeys = signingKeys, + ), + testedProtocolVersion, + ) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToKeyMapping] + .map(p2k => updatedTransaction(p2k.mapping.signingKeys)) + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + partyId => + Some( + s"The PartyToKeyMapping namespace (${bobParty.namespace}) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse mismatching decentralized namespace and p2p namespace" in { + val (decentralizedNamespaceTx, namespace) = mkDecentralizedTx(1) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + // Remove the Namespace delegation generated by default + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .isDefined + ) + // replace it with a decentralized namespace + .appended(decentralizedNamespaceTx) + ), + partyId => + Some( + s"The Party namespace ($namespace) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse decentralized namespace with too many owners" in { + val max = ExternalPartyOnboardingDetails.maxDecentralizedOwnersSize + val (decentralizedNamespaceTx, namespace) = mkDecentralizedTx(max.increment.value) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + // Remove the Namespace delegation generated by default + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .isDefined + ) + // replace it with a decentralized namespace with too many owners + .appended(decentralizedNamespaceTx) + ), + partyId => + Some( + s"The Party namespace ($namespace) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse unwanted transactions" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.appended( + SignedTransaction( + TopologyTransaction( + TopologyChangeOp.Replace, + PositiveInt.one, + PartyHostingLimits.apply( + DefaultTestIdentities.synchronizerId, + DefaultTestIdentities.party1, + ), + testedProtocolVersion, + ).toByteString, + Seq.empty, + ) + ) + ), + _ => + Some( + s"Unsupported transactions found: PartyHostingLimits. Supported transactions are: NamespaceDelegation, DecentralizedNamespaceDefinition, PartyToParticipant, PartyToKeyMapping" + ), + ) + } + } + "propagate trace context" in { val ( mockIdentityProviderExists, @@ -116,44 +627,49 @@ class ApiPartyManagementServiceSpec mockPartyRecordStore, ) = mockedServices() val partyAllocationTracker = makePartyAllocationTracker(loggerFactory) - val apiService = ApiPartyManagementService.createApiService( mockIndexPartyManagementService, mockUserManagementStore, mockIdentityProviderExists, partiesPageSize, + NonNegativeInt.tryCreate(0), mockPartyRecordStore, TestPartySyncService(testTelemetrySetup.tracer), oneHour, - ApiPartyManagementService.CreateSubmissionId.fixedForTests(aSubmissionId), + createSubmissionId, new DefaultOpenTelemetry(OpenTelemetrySdk.builder().build()), partyAllocationTracker, loggerFactory = loggerFactory, ) - val span = testTelemetrySetup.anEmptySpan() - val scope = span.makeCurrent() + loggerFactory.suppress( + ApiPartyManagementServiceSuppressionRule + ) { - // Kick the interaction off - val future = apiService - .allocateParty(AllocatePartyRequest("aParty", None, "", "", "")) - .thereafter { _ => - scope.close() - span.end() - } + val span = testTelemetrySetup.anEmptySpan() + val scope = span.makeCurrent() - // Allow the tracker to complete - partyAllocationTracker.onStreamItem( - PartyAllocation.Completed( - PartyAllocation.TrackerKey.forTests(aSubmissionId), - IndexerPartyDetails(aParty, isLocal = true), + // Kick the interaction off + val future = apiService + .allocateParty(AllocatePartyRequest("aParty", None, "", "", "")) + .thereafter { _ => + scope.close() + span.end() + } + + // Allow the tracker to complete + partyAllocationTracker.onStreamItem( + PartyAllocation.Completed( + aPartyAllocationTracker, + IndexerPartyDetails(aParty, isLocal = true), + ) ) - ) - // Wait for tracker to complete - future.futureValue + // Wait for tracker to complete + future.futureValue - testTelemetrySetup.reportedSpanAttributes should contain(anUserIdSpanAttribute) + testTelemetrySetup.reportedSpanAttributes should contain(anUserIdSpanAttribute) + } } "close while allocating party" in { @@ -164,56 +680,335 @@ class ApiPartyManagementServiceSpec mockPartyRecordStore, ) = mockedServices() val partyAllocationTracker = makePartyAllocationTracker(loggerFactory) - val apiPartyManagementService = ApiPartyManagementService.createApiService( mockIndexPartyManagementService, mockUserManagementStore, mockIdentityProviderExists, partiesPageSize, + NonNegativeInt.tryCreate(0), mockPartyRecordStore, TestPartySyncService(testTelemetrySetup.tracer), oneHour, - ApiPartyManagementService.CreateSubmissionId.fixedForTests(aSubmissionId.toString), + createSubmissionId, NoOpTelemetry, partyAllocationTracker, loggerFactory = loggerFactory, ) - // Kick the interaction off - val future = - apiPartyManagementService.allocateParty(AllocatePartyRequest("aParty", None, "", "", "")) - - // Close the service - apiPartyManagementService.close() - - // Assert that it caused the appropriate failure - future - .transform { - case Success(_) => - fail("Expected a failure, but received success") - case Failure(err: StatusRuntimeException) => - assertError( - actual = err, - expectedStatusCode = Code.UNAVAILABLE, - expectedMessage = "ABORTED_DUE_TO_SHUTDOWN(1,0): request aborted due to shutdown", - expectedDetails = List( - ErrorDetails.ErrorInfoDetail( - "ABORTED_DUE_TO_SHUTDOWN", - Map( - "parties" -> "['aParty']", - "category" -> "1", - "test" -> s"'${getClass.getSimpleName}'", + loggerFactory.suppress( + ApiPartyManagementServiceSuppressionRule + ) { + // Kick the interaction off + val future = + apiPartyManagementService.allocateParty(AllocatePartyRequest("aParty", None, "", "", "")) + + // Close the service + apiPartyManagementService.close() + + // Assert that it caused the appropriate failure + future + .transform { + case Success(_) => + fail("Expected a failure, but received success") + case Failure(err: StatusRuntimeException) => + assertError( + actual = err, + expectedStatusCode = Code.UNAVAILABLE, + expectedMessage = "ABORTED_DUE_TO_SHUTDOWN(1,0): request aborted due to shutdown", + expectedDetails = List( + ErrorDetails.ErrorInfoDetail( + "ABORTED_DUE_TO_SHUTDOWN", + Map( + "parties" -> "['aParty']", + "category" -> "1", + "test" -> s"'${getClass.getSimpleName}'", + ), ), + RetryInfoDetail(10.seconds), ), - RetryInfoDetail(10.seconds), - ), - verifyEmptyStackTrace = true, + verifyEmptyStackTrace = true, + ) + Success(succeed) + case Failure(other) => + fail("Unexpected error", other) + } + } + } + + "generate-external-topology" when { + def getMappingsFromResponse(response: GenerateExternalPartyTopologyResponse) = { + response.topologyTransactions should have length (3) + val txs = response.topologyTransactions.toList + .traverse(tx => + TopologyTransaction + .fromByteString(ProtocolVersion.latest, tx) + ) + .valueOrFail("unable to parse topology txs") + .map(_.mapping) + txs match { + case (nd: NamespaceDelegation) :: (pk: PartyToKeyMapping) :: (pp: PartyToParticipant) :: Nil => + (nd, pk, pp) + case other => fail("unexpected mappings: " + other) + } + } + "correctly pass through all fields" in { + val (publicKey, _) = createSigningKey + val syncId = DefaultTestIdentities.synchronizerId + + for { + response <- apiService.generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 2, + observingParticipantUids = + Seq(DefaultTestIdentities.participant3.uid.toProtoPrimitive), ) - Success(succeed) - case Failure(other) => - fail("Unexpected error", other) + ) + } yield { + val (nd, pk, pp) = getMappingsFromResponse(response) + pk.party shouldBe pp.partyId + pk.party.namespace shouldBe nd.namespace + nd.namespace.fingerprint shouldBe nd.target.fingerprint + pk.threshold.value shouldBe 1 + pp.participants.toSet shouldBe Set( + HostingParticipant( + DefaultTestIdentities.participant1, + ParticipantPermission.Confirmation, + ), + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Confirmation, + ), + HostingParticipant( + DefaultTestIdentities.participant3, + ParticipantPermission.Observation, + ), + ) + pp.threshold.value shouldBe 2 + + } + } + "correctly interpret local observer" in { + val (publicKey, _) = createSigningKey + val syncId = DefaultTestIdentities.synchronizerId + + for { + response <- apiService.generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = true, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + } yield { + val (_, _, pp) = getMappingsFromResponse(response) + pp.participants.toSet shouldBe Set( + HostingParticipant( + DefaultTestIdentities.participant1, + ParticipantPermission.Observation, + ), + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Confirmation, + ), + ) + pp.threshold.value shouldBe 1 + } + } + "correctly reject invalid threshold" in { + val (publicKey, _) = createSigningKey + val syncId = DefaultTestIdentities.synchronizerId + + for { + response <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 3, + observingParticipantUids = Seq(), + ) + ) + .failed + } yield { + response.getMessage should include( + "Confirmation threshold exceeds number of confirming participants" + ) + } + } + "fail gracefully on invalid synchronizer-ids" in { + val (publicKey, _) = createSigningKey + for { + response1 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = "", + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + response2 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = SynchronizerId.tryFromString("not::valid").toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + } yield { + response1.getMessage should include("Empty string is not a valid unique identifier") + response2.getMessage should include("Unknown or not connected synchronizer not::valid") + } + } + "fail gracefully on invalid party hints" in { + val (publicKey, _) = createSigningKey + val syncId = DefaultTestIdentities.synchronizerId + for { + response1 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + response2 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = + "Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + } yield { + response1.getMessage should include("Party hint is empty") + response2.getMessage should include("is too long") + } + } + "fail gracefully on empty keys" in { + val syncId = DefaultTestIdentities.synchronizerId + for { + response1 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = None, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + } yield { + response1.getMessage should include("Field `public_key` is not set") + } + } + "fail gracefully on invalid duplicate participant ids" in { + val (publicKey, _) = createSigningKey + val syncId = DefaultTestIdentities.synchronizerId + for { + response1 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant1.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + .failed + response2 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = + Seq(DefaultTestIdentities.participant1.uid.toProtoPrimitive), + ) + ) + .failed + response3 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + ) + ) + .failed + } yield { + response1.getMessage should include( + s"This participant node ($participantId) is also listed in 'otherConfirmingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node" + + s" and must not be listed in 'otherConfirmingParticipantUids'." + ) + response2.getMessage should include( + "This participant node (PAR::participant1::participant1...) is also listed in 'observingParticipantUids'." + + " By sending the request to this node, it is de facto a hosting node" + + " and must not be listed in 'observingParticipantUids'." + ) + response3.getMessage should include( + "The following participant IDs are referenced multiple times in the request:" + + " participant2::participant2.... " + + "Please ensure all IDs are referenced only once across" + + " 'otherConfirmingParticipantUids' and 'observingParticipantUids' fields." + ) } + } + } + } private def makePartyAllocationTracker( @@ -221,7 +1016,7 @@ class ApiPartyManagementServiceSpec ): PartyAllocation.Tracker = StreamTracker.withTimer[PartyAllocation.TrackerKey, PartyAllocation.Completed]( timer = new java.util.Timer("test-timer"), - itemKey = (_ => Some(PartyAllocation.TrackerKey.forTests(aSubmissionId))), + itemKey = (_ => Some(aPartyAllocationTracker)), inFlightCounter = InFlight.Limited(100, mock[com.daml.metrics.api.MetricHandle.Counter]), loggerFactory, ) @@ -267,7 +1062,7 @@ class ApiPartyManagementServiceSpec object ApiPartyManagementServiceSpec { - val participantId = Ref.ParticipantId.assertFromString("participant1") + val participantId = DefaultTestIdentities.participant1 val partyDetails: IndexerPartyDetails = IndexerPartyDetails( party = Ref.Party.assertFromString("Bob"), @@ -294,6 +1089,7 @@ object ApiPartyManagementServiceSpec { hint: Ref.Party, submissionId: Ref.SubmissionId, synchronizerIdO: Option[SynchronizerId], + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext ): FutureUnlessShutdown[state.SubmissionResult] = { @@ -304,5 +1100,14 @@ object ApiPartyManagementServiceSpec { ) FutureUnlessShutdown.pure(state.SubmissionResult.Acknowledged) } + + override def protocolVersionForSynchronizerId( + synchronizerId: SynchronizerId + ): Option[ProtocolVersion] = + Option.when(synchronizerId == DefaultTestIdentities.synchronizerId)(ProtocolVersion.latest) + + override def participantId: ParticipantId = DefaultTestIdentities.participant1 + + override def hashOps: HashOps = TestHash } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidatorSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidatorSpec.scala index 415ee03d89..61ffe8f0d0 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidatorSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PackageUpgradeValidatorSpec.scala @@ -59,39 +59,38 @@ class PackageUpgradeValidatorSpec ) "validate empty lineage" in { - val res = packageUpgradeValidator.validateUpgrade(List.empty) + val res = validateUpgrade(List.empty, List.empty) res shouldBe Right(()) } "validate compatible lineage" in { - packageUpgradeValidator.validateUpgrade(List(v1)) shouldBe Right(()) - packageUpgradeValidator.validateUpgrade(List(v1, v2Compatible)) shouldBe Right(()) + validateUpgrade(List(v1), List.empty) shouldBe Right(()) + validateUpgrade(List(v2Compatible), List(v1)) shouldBe Right(()) + validateUpgrade(List(v1, v2Compatible), List.empty) shouldBe Right(()) } "fail validation of incompatible lineage" in { - inside(packageUpgradeValidator.validateUpgrade(List(v1, v2Compatible, v3Incompatible))) { + inside(validateUpgrade(List(v3Incompatible), List(v1, v2Compatible))) { case Left(error: Upgradeability.Error) => error.newPackage shouldBe Util.PkgIdWithNameAndVersion(v3Incompatible) error.oldPackage shouldBe Util.PkgIdWithNameAndVersion(v2Compatible) } - // it does not depend on the order of input packages - inside(packageUpgradeValidator.validateUpgrade(List(v1, v3Incompatible, v2Compatible))) { + // it does not depend on the vetting order + inside(validateUpgrade(List(v2Compatible), List(v1, v3Incompatible))) { case Left(error: Upgradeability.Error) => error.newPackage shouldBe Util.PkgIdWithNameAndVersion(v3Incompatible) error.oldPackage shouldBe Util.PkgIdWithNameAndVersion(v2Compatible) } - inside(packageUpgradeValidator.validateUpgrade(List(v1, v11Incompatible, v2Compatible))) { + inside(validateUpgrade(List(v2Compatible), List(v1, v11Incompatible))) { case Left(error: Upgradeability.Error) => error.newPackage shouldBe Util.PkgIdWithNameAndVersion(v11Incompatible) error.oldPackage shouldBe Util.PkgIdWithNameAndVersion(v1) } inside( - packageUpgradeValidator.validateUpgrade( - List(v1, v2Compatible, v3Incompatible, v11Incompatible) - ) + validateUpgrade(List(v11Incompatible), List(v1, v2Compatible, v3Incompatible)) ) { case Left(error: Upgradeability.Error) => error.newPackage shouldBe Util.PkgIdWithNameAndVersion(v11Incompatible) error.oldPackage shouldBe Util.PkgIdWithNameAndVersion(v1) @@ -99,7 +98,7 @@ class PackageUpgradeValidatorSpec } "fail validation because of packages with same name and version" in { - inside(packageUpgradeValidator.validateUpgrade(List(v1, v3Incompatible, v3Compatible))) { + inside(validateUpgrade(List(v3Incompatible), List(v1, v3Compatible))) { case Left(error: UpgradeVersion.Error) => Set(error.firstPackage, error.secondPackage) shouldBe Set( Util.PkgIdWithNameAndVersion(v3Incompatible), @@ -108,6 +107,16 @@ class PackageUpgradeValidatorSpec } } + private def validateUpgrade( + newPackages: List[(Ref.PackageId, Ast.PackageSignature)], + existingPackages: List[(Ref.PackageId, Ast.PackageSignature)], + ) = + packageUpgradeValidator.validateUpgrade( + newPackages.map(_._1).toSet, + (newPackages ++ existingPackages).map(_._1).toSet, + (newPackages ++ existingPackages).toMap, + ) + private def samplePackageSig( packageId: String, packageName: String, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocationsSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocationsSpec.scala new file mode 100644 index 0000000000..fe854f57d3 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/PendingPartyAllocationsSpec.scala @@ -0,0 +1,80 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.services.admin + +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import com.digitalasset.daml.lf.data.Ref +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.util.concurrent.Semaphore +import scala.concurrent.Future + +class PendingPartyAllocationsSpec + extends AsyncFlatSpec + with Matchers + with BaseTest + with HasExecutionContext { + + private val className = classOf[PendingPartyAllocations].getSimpleName + + private val ken = Some(Ref.UserId.assertFromString("ken")) + behavior of s"$className.withUser" + + it should "not keep a tally when user not provided" in { + val ppa = new PendingPartyAllocations + for { + outstanding <- ppa.withUser(None)(Future.successful) + } yield { + outstanding shouldBe 0 + } + } + + it should "give 1 as the number of operations running when sequential" in { + val ppa = new PendingPartyAllocations + for { + first <- ppa.withUser(ken)(Future.successful) + second <- ppa.withUser(ken)(Future.successful) + } yield { + first shouldBe 1 + second shouldBe 1 + } + } + + it should "keep tally when one of the operations throws" in { + val ppa = new PendingPartyAllocations + for { + first <- ppa.withUser(ken)(Future.successful) + _ <- ppa + .withUser(ken)(_ => Future.failed(new RuntimeException("deliberate throw"))) + .recover(_ => 1) + second <- ppa.withUser(ken)(Future.successful) + } yield { + first shouldBe 1 + second shouldBe 1 + } + } + + it should "keep tally when concurrent operations" in { + val semaphore = new Semaphore(0) + val elements = 3 + def waitAndReturn(count: Int) = + Future { + if (count < elements) + semaphore.acquire() + else + semaphore.release(3) + count + } + val ppa = new PendingPartyAllocations + val expected = (1 to elements).toList + val futures = expected.map(_ => ppa.withUser(ken)(waitAndReturn)) + for { + result <- Future.sequence(futures) + } yield { + result shouldBe expected + } + } + +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala index 4574639bb9..1024e0f593 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala @@ -18,7 +18,6 @@ import com.daml.ledger.resources.{ResourceContext, ResourceOwner} import com.daml.tracing.DefaultOpenTelemetry import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, - ValidateDisclosedContracts, ValidateUpgradingPackageResolutions, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -257,8 +256,7 @@ class CommandServiceImplSpec deadlineTicker: Deadline.Ticker = Deadline.getSystemTicker, ): ResourceOwner[CommandServiceGrpc.CommandServiceStub] = { val commandsValidator = new CommandsValidator( - validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty, - validateDisclosedContracts = ValidateDisclosedContracts.WithContractIdVerificationDisabled, + validateUpgradingPackageResolutions = ValidateUpgradingPackageResolutions.Empty ) val apiService = new ApiCommandService( service = service, @@ -289,9 +287,7 @@ class CommandServiceImplSpec object CommandServiceImplSpec { private val UnimplementedTransactionServices = new CommandServiceImpl.UpdateServices( - getTransactionTreeById = _ => - Future.failed(new RuntimeException("This should never be called.")), - getUpdateById = _ => Future.failed(new RuntimeException("This should never be called.")), + getUpdateById = _ => Future.failed(new RuntimeException("This should never be called.")) ) private val OkStatus = StatusProto.of(Status.Code.OK.value, "", Seq.empty) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImplSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImplSpec.scala index bc0ac0eae9..65d0b80b28 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImplSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImplSpec.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.platform.apiserver.execution.{ } import com.digitalasset.canton.platform.apiserver.services.{ErrorCause, TimeProviderType} import com.digitalasset.canton.platform.apiserver.{FatContractInstanceHelper, SeedService} -import com.digitalasset.canton.protocol.LfTransactionVersion +import com.digitalasset.canton.protocol.LfSerializationVersion import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.{BaseTest, HasExecutionContext} @@ -237,7 +237,7 @@ class CommandSubmissionServiceImplSpec signatories = Set(alice), stakeholders = Set(alice), keyOpt = None, - version = LfTransactionVersion.minVersion, + version = LfSerializationVersion.V1, ) val disclosedContract = DisclosedContract( diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala index 3703657d0e..55e001de62 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala @@ -12,17 +12,17 @@ import com.digitalasset.canton.platform.apiserver.services.command.interactive.c import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.PrepareTransactionData import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.canton.topology.{GeneratorsTopology, SynchronizerId} -import com.digitalasset.canton.{GeneratorsLf, LedgerUserId, LfPackageId, LfPartyId} +import com.digitalasset.canton.{GeneratorsLf, LedgerUserId, LfPackageId, LfPartyId, LfTimestamp} import com.digitalasset.daml.lf.crypto import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Time} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.{ CreationTime, FatContractInstance, GlobalKey, Node, NodeId, + SerializationVersion as LfSerializationVersion, SubmittedTransaction, Transaction, VersionedTransaction, @@ -49,7 +49,7 @@ final class GeneratorsInteractiveSubmission( case node: Node.Create => node .copy( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, keyOpt = None, // signatories should be a subset of stakeholders for the node to be valid // take a random size subset of stakeholders, but 1 minimum @@ -59,7 +59,7 @@ final class GeneratorsInteractiveSubmission( case node: Node.Exercise => node .copy( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, keyOpt = None, byKey = false, choiceAuthorizers = None, @@ -68,7 +68,7 @@ final class GeneratorsInteractiveSubmission( case node: Node.Fetch => node .copy( - version = LanguageVersion.v2_1, + version = LfSerializationVersion.V1, keyOpt = None, byKey = false, ) @@ -89,7 +89,7 @@ final class GeneratorsInteractiveSubmission( private val versionedTransactionGenerator = for { transaction <- noDanglingRefGenTransaction - } yield VersionedTransaction(LanguageVersion.v2_1, transaction.nodes, transaction.roots) + } yield VersionedTransaction(LfSerializationVersion.V1, transaction.nodes, transaction.roots) implicit val transactionArb: Arbitrary[VersionedTransaction] = Arbitrary( versionedTransactionGenerator @@ -190,7 +190,7 @@ final class GeneratorsInteractiveSubmission( private def inputContractsGen(overrideCid: Value.ContractId): Gen[LfFatContractInst] = for { create <- ValueGenerators - .malformedCreateNodeGenWithVersion(LanguageVersion.v2_1) + .malformedCreateNodeGenWithVersion(LfSerializationVersion.V1) .map(normalizeNodeForV1) createdAt <- Arbitrary.arbitrary[Time.Timestamp] authenticationData <- Arbitrary.arbitrary[Array[Byte]].map(Bytes.fromByteArray) @@ -211,6 +211,7 @@ final class GeneratorsInteractiveSubmission( enrichedInputContracts <- Gen.sequence(coids.map(inputContractsGen)) mediatorGroup <- Arbitrary.arbitrary[PositiveInt] transactionUUID <- Gen.uuid + maxRecordTime <- Arbitrary.arbitrary[Option[LfTimestamp]] } yield PrepareTransactionData( submitterInfo, transactionMeta, @@ -225,6 +226,7 @@ final class GeneratorsInteractiveSubmission( synchronizerId, mediatorGroup.value, transactionUUID, + maxRecordTime, ) implicit val preparedTransactionDataArb: Arbitrary[PrepareTransactionData] = Arbitrary( diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/PreparedTransactionCodecV1Spec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/PreparedTransactionCodecV1Spec.scala index ed169457f1..a099b00557 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/PreparedTransactionCodecV1Spec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/PreparedTransactionCodecV1Spec.scala @@ -14,8 +14,12 @@ import com.digitalasset.canton.topology.GeneratorsTopology import com.digitalasset.canton.{BaseTest, GeneratorsLf, HasExecutionContext} import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.ImmArray -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{Node, NodeId, VersionedTransaction} +import com.digitalasset.daml.lf.transaction.{ + Node, + NodeId, + SerializationVersion as LfSerializationVersion, + VersionedTransaction, +} import com.digitalasset.daml.lf.value.test.ValueGenerators import org.scalacheck.Arbitrary import org.scalatest.matchers.should.Matchers @@ -69,7 +73,7 @@ class PreparedTransactionCodecV1Spec forAll { (node: Node.Exercise) => val encoded = - encoder.v1.exerciseTransformer(LanguageVersion.v2_1).transform(node).asEither.value + encoder.v1.exerciseTransformer(LfSerializationVersion.V1).transform(node).asEither.value decoder.v1.exerciseTransformer.transform(encoded).asEither.value shouldEqual node } } @@ -86,7 +90,7 @@ class PreparedTransactionCodecV1Spec forAll { (node: Node.Fetch) => val encoded = - encoder.v1.fetchTransformer(LanguageVersion.v2_1).transform(node).asEither.value + encoder.v1.fetchTransformer(LfSerializationVersion.V1).transform(node).asEither.value decoder.v1.fetchTransformer.transform(encoded).asEither.value shouldEqual node } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala index 874f01343a..b79263d074 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala @@ -15,7 +15,6 @@ import com.digitalasset.canton.ledger.error.groups.{ } import com.digitalasset.canton.ledger.error.{CommonErrors, IndexErrors, LedgerApiErrors} import com.digitalasset.canton.logging.{ErrorLoggingContext, SuppressionRule} -import com.digitalasset.daml.lf.data.Ref import com.google.rpc.* import io.grpc.Status.Code import io.grpc.StatusRuntimeException @@ -296,29 +295,6 @@ class ErrorFactoriesSpec ) } - "return a transactionNotFound error" in { - val msg = - s"TRANSACTION_NOT_FOUND(11,$truncatedCorrelationId): Transaction not found, or not visible." - assertError( - RequestValidationErrors.NotFound.Transaction - .RejectWithTxId(Ref.TransactionId.assertFromString("tId"))(errorLoggingContext) - )( - code = Code.NOT_FOUND, - message = msg, - details = Seq[ErrorDetails.ErrorDetail]( - ErrorDetails.ErrorInfoDetail( - "TRANSACTION_NOT_FOUND", - Map("category" -> "11", "definite_answer" -> "false", "test" -> getClass.getSimpleName), - ), - expectedCorrelationIdRequestInfo, - ErrorDetails.ResourceInfoDetail(typ = "TRANSACTION_ID", name = "tId"), - ), - logLevel = Level.INFO, - logMessage = msg, - logErrorContextRegEx = expectedLocationRegex, - ) - } - "return an updateNotFound error" in { val msg = s"UPDATE_NOT_FOUND(11,$truncatedCorrelationId): Update not found, or not visible." diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeServiceSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeServiceSpec.scala index b1e71bcdb1..552ac9c7f8 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeServiceSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeServiceSpec.scala @@ -16,8 +16,12 @@ import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.Bytes import com.digitalasset.daml.lf.data.Ref.Party import com.digitalasset.daml.lf.data.Time.Timestamp -import com.digitalasset.daml.lf.language.LanguageMajorVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, GlobalKey, Node} +import com.digitalasset.daml.lf.transaction.{ + CreationTime, + GlobalKey, + Node, + SerializationVersion as LfSerializationVersion, +} import com.digitalasset.daml.lf.value.Value import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers @@ -252,7 +256,7 @@ class ContractStoreBasedMaximumLedgerTimeServiceSpec signatories = Set(alice), stakeholders = Set(alice), keyOpt = None, - version = LanguageMajorVersion.V2.maxStableVersion, + version = LfSerializationVersion.V1, ) private def active(ledgerEffectiveTime: Timestamp): ContractState = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala index 6f05d03377..fb756e59d5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala @@ -48,23 +48,27 @@ import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate.{ } import com.digitalasset.canton.platform.store.interning.StringInterningView import com.digitalasset.canton.platform.{DispatcherState, InMemoryState} -import com.digitalasset.canton.protocol.ReassignmentId +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag -import com.digitalasset.canton.{BaseTest, HasExecutorServiceGeneric, TestEssentials, data} +import com.digitalasset.canton.{BaseTest, HasExecutorServiceGeneric, TestEssentials} import com.digitalasset.daml.lf.crypto import com.digitalasset.daml.lf.data.Ref.Identifier import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateTransactionVersion +import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateSerializationVersion import com.digitalasset.daml.lf.transaction.test.{ NodeIdTransactionBuilder, TestNodeBuilder, TransactionBuilder, } -import com.digitalasset.daml.lf.transaction.{CommittedTransaction, Node, NodeId} +import com.digitalasset.daml.lf.transaction.{ + CommittedTransaction, + Node, + NodeId, + SerializationVersion as LfSerializationVersion, +} import com.digitalasset.daml.lf.value.Value import com.google.protobuf.ByteString import com.google.rpc.status.Status @@ -104,8 +108,8 @@ class InMemoryStateUpdaterSpec ) runFlow( Seq( - (Vector(update1, metadataChangedUpdate), someLedgerEnd), - (Vector(update3, update4), secondLedgerEnd), + (Vector(update1, metadataChangedUpdate), someLedgerEnd, traceContext), + (Vector(update3, update4), secondLedgerEnd, traceContext), ) ) cacheUpdates should contain theSameElementsInOrderAs Seq( @@ -127,9 +131,9 @@ class InMemoryStateUpdaterSpec runFlow( Seq( // Empty input batch should have no effect - (Vector.empty, someLedgerEnd), - (Vector(update3), secondLedgerEnd), - (Vector(anotherMetadataChangedUpdate), thirdLedgerEnd), + (Vector.empty, someLedgerEnd, traceContext), + (Vector(update3), secondLedgerEnd, traceContext), + (Vector(anotherMetadataChangedUpdate), thirdLedgerEnd, traceContext), ) ) @@ -145,6 +149,7 @@ class InMemoryStateUpdaterSpec InMemoryStateUpdater.prepare( Vector.empty, someLedgerEnd, + traceContext, ) } } @@ -153,10 +158,12 @@ class InMemoryStateUpdaterSpec InMemoryStateUpdater.prepare( Vector(update1), someLedgerEnd, + traceContext, ) shouldBe PrepareResult( Vector(txLogUpdate1), someLedgerEnd, update1._2.traceContext, + traceContext, ) } @@ -172,6 +179,7 @@ class InMemoryStateUpdaterSpec val preparedWithHashResult = InMemoryStateUpdater.prepare( Vector(updateWithTransactionHash), someLedgerEnd, + traceContext, ) inside(preparedWithHashResult.updates.loneElement) { case transactionAccepted: TransactionAccepted => @@ -181,6 +189,7 @@ class InMemoryStateUpdaterSpec val preparedWithoutHashResult = InMemoryStateUpdater.prepare( Vector(update1), someLedgerEnd, + traceContext, ) inside(preparedWithoutHashResult.updates.loneElement) { case transactionAccepted: TransactionAccepted => @@ -192,10 +201,12 @@ class InMemoryStateUpdaterSpec InMemoryStateUpdater.prepare( Vector(update1, update7, update8), someLedgerEnd, + traceContext, ) shouldBe PrepareResult( Vector(txLogUpdate1, assignLogUpdate, unassignLogUpdate), someLedgerEnd, update1._2.traceContext, + traceContext, ) } @@ -203,10 +214,12 @@ class InMemoryStateUpdaterSpec InMemoryStateUpdater.prepare( Vector(update1, update9), someLedgerEnd, + traceContext, ) shouldBe PrepareResult( Vector(txLogUpdate1, topologyTransactionLogUpdate), someLedgerEnd, update1._2.traceContext, + traceContext, ) } @@ -214,10 +227,12 @@ class InMemoryStateUpdaterSpec InMemoryStateUpdater.prepare( Vector(update1, metadataChangedUpdate), someLedgerEnd, + traceContext, ) shouldBe PrepareResult( Vector(txLogUpdate1), someLedgerEnd, metadataChangedUpdate._2.traceContext, + traceContext, ) } @@ -253,6 +268,7 @@ class InMemoryStateUpdaterSpec .prepare( Vector(update), someLedgerEnd, + traceContext, ) .updates .collect { case txAccepted: TransactionLogUpdate.TransactionAccepted => txAccepted } @@ -287,18 +303,15 @@ class InMemoryStateUpdaterSpec .push( tx_accepted_withCompletionStreamResponse ) - inOrder - .verify(contractStateCaches) - .push(any[NonEmptyVector[ContractStateEvent]])(any[TraceContext]) inOrder .verify(inMemoryFanoutBuffer) .push( tx_accepted_withoutCompletionStreamResponse ) + inOrder.verify(inMemoryFanoutBuffer).push(tx_rejected) inOrder .verify(contractStateCaches) - .push(any[NonEmptyVector[ContractStateEvent]])(any[TraceContext]) - inOrder.verify(inMemoryFanoutBuffer).push(tx_rejected) + .push(any[NonEmptyVector[ContractStateEvent]], any[Long])(any[TraceContext]) inOrder .verify(ledgerEndCache) @@ -332,7 +345,7 @@ class InMemoryStateUpdaterSpec ) inOrder .verify(contractStateCaches) - .push(any[NonEmptyVector[ContractStateEvent]])(any[TraceContext]) + .push(any[NonEmptyVector[ContractStateEvent]], any[Long])(any[TraceContext]) inOrder .verify(ledgerEndCache) .set(Some(lastLedgerEnd)) @@ -348,15 +361,15 @@ class InMemoryStateUpdaterSpec .push( tx_accepted_withCompletionStreamResponse ) - inOrder - .verify(contractStateCaches) - .push(any[NonEmptyVector[ContractStateEvent]])(any[TraceContext]) inOrder .verify(inMemoryFanoutBuffer) .push( tx_accepted_withoutCompletionStreamResponse ) inOrder.verify(inMemoryFanoutBuffer).push(tx_rejected) + inOrder + .verify(contractStateCaches) + .push(any[NonEmptyVector[ContractStateEvent]], any[Long])(any[TraceContext]) inOrder .verify(transactionSubmissionTracker) @@ -382,18 +395,18 @@ class InMemoryStateUpdaterSpec inOrder .verify(inMemoryFanoutBuffer) .push(tx_accepted_withFlatEventWitnesses) + inOrder + .verify(inMemoryFanoutBuffer) + .push( + tx_accepted_withoutFlatEventWitnesses + ) inOrder .verify(contractStateCaches) - .push(any[NonEmptyVector[ContractStateEvent]])(any[TraceContext]) + .push(any[NonEmptyVector[ContractStateEvent]], any[Long])(any[TraceContext]) // the tx_accepted_withoutFlatEventWitnesses should not be pushed as it has empty flatEventWitnesses verifyNoMoreInteractions(contractStateCaches) - inOrder - .verify(inMemoryFanoutBuffer) - .push( - tx_accepted_withoutFlatEventWitnesses - ) inOrder .verify(ledgerEndCache) .set(Some(lastLedgerEnd.copy(lastOffset = tx_accepted_withoutFlatEventWitnesses.offset))) @@ -521,10 +534,10 @@ object InMemoryStateUpdaterSpec { import TraceContext.Implicits.Empty.* - private val txId1 = Ref.TransactionId.assertFromString("tx1") - private val txId2 = Ref.TransactionId.assertFromString("tx2") - private val txId3 = Ref.TransactionId.assertFromString("tx3") - private val txId4 = Ref.TransactionId.assertFromString("tx4") + private val txId1 = TestUpdateId("tx1") + private val txId2 = TestUpdateId("tx2") + private val txId3 = TestUpdateId("tx3") + private val txId4 = TestUpdateId("tx4") private val synchronizerId1 = SynchronizerId.tryFromString("x::synchronizerID1") private val synchronizerId2 = SynchronizerId.tryFromString("x::synchronizerID2") @@ -540,6 +553,8 @@ object InMemoryStateUpdaterSpec { private val participantId = Ref.ParticipantId.assertFromString("participant1") private val someContractMetadataBytes = Bytes.assertFromString("00aabb") private val workflowId: Ref.WorkflowId = Ref.WorkflowId.assertFromString("Workflow") + private val representativePackageId = + Ref.PackageId.assertFromString("some-representative-package-id") trait Scope extends Matchers @@ -556,7 +571,7 @@ object InMemoryStateUpdaterSpec { val txLogUpdate1 = TransactionLogUpdate.TransactionAccepted( - updateId = txId1, + updateId = txId1.toHexString, commandId = "", workflowId = workflowId, effectiveAt = Timestamp.Epoch, @@ -570,7 +585,7 @@ object InMemoryStateUpdaterSpec { val assignLogUpdate = TransactionLogUpdate.ReassignmentAccepted( - updateId = txId3, + updateId = txId3.toHexString, commandId = "", workflowId = workflowId, offset = offset(17L), @@ -597,7 +612,7 @@ object InMemoryStateUpdaterSpec { val unassignLogUpdate = TransactionLogUpdate.ReassignmentAccepted( - updateId = txId4, + updateId = txId4.toHexString, commandId = "", workflowId = workflowId, offset = offset(18L), @@ -626,7 +641,7 @@ object InMemoryStateUpdaterSpec { val topologyTransactionLogUpdate = TransactionLogUpdate.TopologyTransactionEffective( - updateId = txId3, + updateId = txId3.toHexString, synchronizerId = synchronizerId1.toProtoPrimitive, offset = offset(19L), effectiveTime = Timestamp.Epoch, @@ -689,7 +704,7 @@ object InMemoryStateUpdaterSpec { logger = logger, )( inMemoryState = inMemoryState, - prepare = (_, ledgerEnd) => result(ledgerEnd), + prepare = (_, ledgerEnd, _) => result(ledgerEnd), update = cachesUpdateCaptor, )(emptyTraceContext) @@ -739,7 +754,7 @@ object InMemoryStateUpdaterSpec { val tx_accepted_withCompletionStreamResponse: TransactionLogUpdate.TransactionAccepted = TransactionLogUpdate.TransactionAccepted( - updateId = tx_accepted_updateId, + updateId = TestUpdateId(tx_accepted_updateId).toHexString, commandId = tx_accepted_commandId, workflowId = "wAccepted", effectiveAt = Timestamp.assertFromLong(1L), @@ -749,7 +764,7 @@ object InMemoryStateUpdaterSpec { toCreatedEvent( genCreateNode, tx_accepted_withCompletionStreamResponse_offset, - Ref.TransactionId.assertFromString(tx_accepted_updateId), + TestUpdateId(tx_accepted_updateId), NodeId(i), ) ) @@ -773,7 +788,7 @@ object InMemoryStateUpdaterSpec { toCreatedEvent( genCreateNode, tx_accepted_withFlatEventWitnesses_offset, - Ref.TransactionId.assertFromString(tx_accepted_updateId), + TestUpdateId(tx_accepted_updateId), NodeId(0), ) ), @@ -786,7 +801,7 @@ object InMemoryStateUpdaterSpec { toCreatedEvent( genCreateNode, tx_accepted_withoutFlatEventWitnesses_offset, - Ref.TransactionId.assertFromString(tx_accepted_updateId), + TestUpdateId(tx_accepted_updateId), NodeId(0), ).copy( flatEventWitnesses = Set.empty @@ -820,11 +835,13 @@ object InMemoryStateUpdaterSpec { updates = updates, ledgerEnd = lastLedgerEnd, emptyTraceContext, + traceContext, ) val prepareResultOnlyReassignment: PrepareResult = PrepareResult( updates = Vector(assignLogUpdate), ledgerEnd = lastLedgerEnd, emptyTraceContext, + traceContext, ) val prepareResultWithEmptyFlatEventWitnesses: PrepareResult = PrepareResult( updates = Vector( @@ -833,6 +850,7 @@ object InMemoryStateUpdaterSpec { ), ledgerEnd = lastLedgerEnd.copy(lastOffset = tx_accepted_withoutFlatEventWitnesses.offset), emptyTraceContext, + traceContext, ) def result(ledgerEnd: LedgerEnd): PrepareResult = @@ -840,10 +858,11 @@ object InMemoryStateUpdaterSpec { Vector.empty, ledgerEnd, emptyTraceContext, + traceContext, ) def runFlow( - input: Seq[(Vector[(Offset, Update)], LedgerEnd)] + input: Seq[(Vector[(Offset, Update)], LedgerEnd, TraceContext)] )(implicit mat: Materializer): Done = Source(input) .via(inMemoryStateUpdater(false)) @@ -861,7 +880,7 @@ object InMemoryStateUpdaterSpec { argument = Value.ValueUnit, signatories = Set(party1), observers = Set(party2), - version = CreateTransactionVersion.Version(LanguageVersion.v2_dev), + version = CreateSerializationVersion.Version(LfSerializationVersion.VDev), ) } private val someCreateNode = genCreateNode @@ -869,12 +888,12 @@ object InMemoryStateUpdaterSpec { private def toCreatedEvent( createdNode: Node.Create, txOffset: Offset, - updateId: data.UpdateId, + updateId: UpdateId, nodeId: NodeId, ) = CreatedEvent( eventOffset = txOffset, - updateId = updateId, + updateId = updateId.toHexString, nodeId = nodeId.index, eventSequentialId = 0, contractId = createdNode.coid, @@ -896,6 +915,7 @@ object InMemoryStateUpdaterSpec { createKey = createdNode.keyOpt.map(_.globalKey), createKeyMaintainers = createdNode.keyOpt.map(_.maintainers), authenticationData = someContractMetadataBytes, + representativePackageId = representativePackageId, ) implicit val defaultValueProviderCreatedEvent @@ -907,7 +927,7 @@ object InMemoryStateUpdaterSpec { toCreatedEvent( genCreateNode, Offset.firstOffset, - Ref.TransactionId.assertFromString("yolo"), + TestUpdateId("yolo"), NodeId(0), ) ) @@ -944,6 +964,7 @@ object InMemoryStateUpdaterSpec { synchronizerId = SynchronizerId.tryFromString("da::default"), recordTime = CantonTimestamp.MinValue, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) private val update4 = offset(14L) -> @@ -1065,7 +1086,7 @@ object InMemoryStateUpdaterSpec { var checkpoints: Seq[OffsetCheckpoint] = Seq.empty val output = sourceSomes - .map((_, someLedgerEnd)) + .map((_, someLedgerEnd, emptyTraceContext)) .via( InMemoryStateUpdaterFlow .updateOffsetCheckpointCacheFlowWithTickingSource( @@ -1102,6 +1123,7 @@ object InMemoryStateUpdaterSpec { recordTime = CantonTimestamp(Timestamp(t)), externalTransactionHash = externalTransactionHash, acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged), + internalContractIds = Map.empty, ) private def assignmentAccepted( @@ -1132,6 +1154,7 @@ object InMemoryStateUpdaterSpec { recordTime = CantonTimestamp(Timestamp(t)), synchronizerId = target, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) private def unassignmentAccepted( @@ -1164,6 +1187,7 @@ object InMemoryStateUpdaterSpec { recordTime = CantonTimestamp(Timestamp(t)), synchronizerId = source, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) private def commandRejected(t: Long, synchronizerId: SynchronizerId): Update.CommandRejected = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/IndexServiceImplSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/IndexServiceImplSpec.scala index b5a08fcd13..9c80905f7d 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/IndexServiceImplSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/IndexServiceImplSpec.scala @@ -27,18 +27,18 @@ import com.digitalasset.canton.platform.store.dao.EventProjectionProperties.{ Projection, UseOriginalViewPackageId, } -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.{ - LocalPackagePreference, - PackageResolution, -} import com.digitalasset.canton.platform.{ InternalEventFormat, InternalTransactionFormat, InternalUpdateFormat, TemplatePartiesFilter, } +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ + LocalPackagePreference, + PackageResolution, +} import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{ FullIdentifier, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala index 6da8efe2b0..68e028764c 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala @@ -192,6 +192,8 @@ class BatchingParallelIngestionPipeSpec (index + lastIndex + 1, value) } }, + dbPrepareParallelism = 2, + dbPrepare = inBatch => Future(inBatch), batchingParallelism = 2, batcher = inBatch => Future { diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/EventMetricsUpdaterSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/EventMetricsUpdaterSpec.scala index a1f9ea592c..8b8f1804c5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/EventMetricsUpdaterSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/EventMetricsUpdaterSpec.scala @@ -8,16 +8,17 @@ import com.daml.metrics.api.{MetricHandle, MetricsContext} import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries, Offset} import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.ledger.participant.state.TestAcsChangeFactory +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.{ImmArray, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.transaction.TransactionNodeStatistics.EmptyActions import com.digitalasset.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder} import com.digitalasset.daml.lf.transaction.{ CommittedTransaction, NodeId, + SerializationVersion as LfSerializationVersion, TransactionNodeStatistics, VersionedTransaction, } @@ -92,11 +93,12 @@ class EventMetricsUpdaterSpec extends AnyWordSpec with MetricValues { ) ), ), - updateId = Ref.TransactionId.assertFromString("UpdateId"), + updateId = TestUpdateId("UpdateId"), Map.empty, synchronizerId = SynchronizerId.tryFromString("da::default"), CantonTimestamp.now(), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) "extract transaction metering" in { @@ -151,7 +153,7 @@ class EventMetricsUpdaterSpec extends AnyWordSpec with MetricValues { val meter: MetricHandle.Meter = mock[MetricHandle.Meter] val txWithNoActionCount = someTransactionAccepted.copy( transaction = CommittedTransaction( - VersionedTransaction(LanguageVersion.v2_dev, Map.empty, ImmArray.empty) + VersionedTransaction(LfSerializationVersion.VDev, Map.empty, ImmArray.empty) ) ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala index 07c8966c90..89f31e8875 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala @@ -8,6 +8,7 @@ import com.daml.metrics.DatabaseMetrics import com.digitalasset.canton.RepairCounter import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries, Offset} import com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.Update.{ RepairTransactionAccepted, TopologyTransactionEffective, @@ -32,11 +33,18 @@ import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.indexer.ha.TestConnection import com.digitalasset.canton.platform.indexer.parallel.ParallelIndexerSubscription.{ Batch, + EmptyActiveContracts, ZeroLedgerEnd, } import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd -import com.digitalasset.canton.platform.store.backend.{DbDto, ParameterStorageBackend} +import com.digitalasset.canton.platform.store.backend.{ + DbDto, + ParameterStorageBackend, + ScalatestEqualityHelpers, +} +import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.dao.DbDispatcher +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.time.SimClock import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension @@ -68,6 +76,7 @@ class ParallelIndexerSubscriptionSpec with Matchers with NamedLogging { + implicit private val DbDtoEqual: org.scalactic.Equality[DbDto] = ScalatestEqualityHelpers.DbDtoEq implicit val traceContext: TraceContext = TraceContext.empty private val serializableTraceContext = SerializableTraceContext(traceContext).toDamlProto.toByteArray @@ -76,6 +85,7 @@ class ParallelIndexerSubscriptionSpec classOf[ParallelIndexerSubscriptionSpec].getSimpleName ) implicit val materializer: Materializer = Materializer(actorSystem) + val emptyByteArray = new Array[Byte](0) private val someParty = DbDto.PartyEntry( ledger_offset = 1, @@ -102,24 +112,117 @@ class ParallelIndexerSubscriptionSpec submissionId = Some(Ref.SubmissionId.assertFromString("abc")), ) + private val updateId = TestUpdateId("mock_hash") + private val updateIdByteArray = updateId.toProtoPrimitive.toByteArray + private def offset(l: Long): Offset = Offset.tryFromLong(l) private val metrics = LedgerApiServerMetrics.ForTesting private def hashCid(key: String): ContractId = ContractId.V1(Hash.hashPrivateKey(key)) + private val someEventActivate = DbDto.EventActivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = None, + command_id = None, + submitters = None, + record_time = 1, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = None, + event_type = 1, + event_sequential_id = 15, + node_id = 3, + additional_witnesses = None, + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = "", + notPersistedContractId = hashCid("1"), + internal_contract_id = 1, + create_key_hash = None, + ) + + private val someEventDeactivate = DbDto.EventDeactivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = None, + command_id = None, + submitters = None, + record_time = 1, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = None, + event_type = 1, + event_sequential_id = 1, + node_id = 1, + deactivated_event_sequential_id = None, + additional_witnesses = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + contract_id = hashCid("1"), + internal_contract_id = None, + template_id = "", + package_id = "", + stakeholders = Set.empty, + ledger_effective_time = None, + ) + + private val someEventWitnessed = DbDto.EventVariousWitnessed( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = None, + command_id = None, + submitters = None, + record_time = 1, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = None, + event_type = 1, + event_sequential_id = 1, + node_id = 1, + additional_witnesses = Set.empty, + consuming = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = None, + contract_id = None, + internal_contract_id = None, + template_id = None, + package_id = None, + ledger_effective_time = None, + ) + private val someEventCreated = DbDto.EventCreate( event_offset = 1, - update_id = "", + update_id = emptyByteArray, ledger_effective_time = 15, command_id = None, workflow_id = None, user_id = None, submitters = None, node_id = 3, - contract_id = hashCid("1").toBytes.toByteArray, + contract_id = hashCid("1"), template_id = "", package_id = "", + representative_package_id = "", flat_event_witnesses = Set.empty, tree_event_witnesses = Set.empty, create_argument = Array.empty, @@ -132,28 +235,30 @@ class ParallelIndexerSubscriptionSpec create_key_value_compression = None, event_sequential_id = 0, authentication_data = Array.empty, - synchronizer_id = "x::sourcesynchronizer", + synchronizer_id = someSynchronizerId, trace_context = serializableTraceContext, record_time = 0, external_transaction_hash = None, + internal_contract_id = 1, ) private val someEventExercise = DbDto.EventExercise( consuming = true, event_offset = 1, - update_id = "", + update_id = emptyByteArray, ledger_effective_time = 15, command_id = None, workflow_id = None, user_id = None, submitters = None, node_id = 3, - contract_id = hashCid("1").toBytes.toByteArray, + contract_id = hashCid("1"), template_id = "", package_id = "", flat_event_witnesses = Set.empty, tree_event_witnesses = Set.empty, exercise_choice = "", + exercise_choice_interface_id = None, exercise_argument = Array.empty, exercise_result = None, exercise_actors = Set.empty, @@ -161,20 +266,21 @@ class ParallelIndexerSubscriptionSpec exercise_argument_compression = None, exercise_result_compression = None, event_sequential_id = 0, - synchronizer_id = "", + synchronizer_id = someSynchronizerId, trace_context = serializableTraceContext, record_time = 0, external_transaction_hash = None, + deactivated_event_sequential_id = None, ) private val someEventAssign = DbDto.EventAssign( event_offset = 1, - update_id = "", + update_id = emptyByteArray, command_id = None, workflow_id = None, submitter = None, node_id = 0, - contract_id = hashCid("1").toBytes.toByteArray, + contract_id = hashCid("1"), template_id = "", package_id = "", flat_event_witnesses = Set.empty, @@ -189,33 +295,35 @@ class ParallelIndexerSubscriptionSpec event_sequential_id = 0, ledger_effective_time = 0, authentication_data = Array.empty, - source_synchronizer_id = "", - target_synchronizer_id = "", - reassignment_id = "", + source_synchronizer_id = someSynchronizerId, + target_synchronizer_id = someSynchronizerId, + reassignment_id = new Array[Byte](0), reassignment_counter = 0, trace_context = serializableTraceContext, record_time = 0, + internal_contract_id = 1, ) private val someEventUnassign = DbDto.EventUnassign( event_offset = 1, - update_id = "", + update_id = emptyByteArray, command_id = None, workflow_id = None, submitter = None, node_id = 1, - contract_id = hashCid("1").toBytes.toByteArray, + contract_id = hashCid("1"), template_id = "", package_id = "", flat_event_witnesses = Set.empty, event_sequential_id = 0, - source_synchronizer_id = "", - target_synchronizer_id = "", - reassignment_id = "", + source_synchronizer_id = someSynchronizerId, + target_synchronizer_id = someSynchronizerId, + reassignment_id = new Array[Byte](0), reassignment_counter = 0, assignment_exclusivity = None, trace_context = serializableTraceContext, record_time = 0, + deactivated_event_sequential_id = None, ) private val someCompletion = DbDto.CommandCompletion( @@ -233,7 +341,7 @@ class ParallelIndexerSubscriptionSpec deduplication_offset = None, deduplication_duration_seconds = None, deduplication_duration_nanos = None, - synchronizer_id = "x::sourcesynchronizer", + synchronizer_id = someSynchronizerId, message_uuid = None, is_transaction = true, trace_context = serializableTraceContext, @@ -272,7 +380,7 @@ class ParallelIndexerSubscriptionSpec lastStringInterningId = 0, lastPublicationTime = CantonTimestamp.MinValue, ), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector( someParty, someParty, @@ -283,8 +391,11 @@ class ParallelIndexerSubscriptionSpec ), batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) - actual shouldBe expected + actual.copy(batchTraceContext = TraceContext.empty) shouldBe expected + actual.activeContracts eq ParallelIndexerSubscription.EmptyActiveContracts } behavior of "seqMapperZero" @@ -297,22 +408,28 @@ class ParallelIndexerSubscriptionSpec lastPublicationTime = CantonTimestamp.now(), ) - ParallelIndexerSubscription.seqMapperZero(Some(ledgerEnd)) shouldBe Batch( + val result = ParallelIndexerSubscription.seqMapperZero(Some(ledgerEnd)) + result shouldBe Batch( ledgerEnd = ledgerEnd, - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector.empty, batchSize = 0, offsetsUpdates = Vector.empty, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) + result.activeContracts eq ParallelIndexerSubscription.EmptyActiveContracts } it should "provide required Batch in case starting from scratch" in { ParallelIndexerSubscription.seqMapperZero(None) shouldBe Batch( ledgerEnd = ZeroLedgerEnd, - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector.empty, batchSize = 0, offsetsUpdates = Vector.empty, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) } @@ -331,51 +448,63 @@ class ParallelIndexerSubscriptionSpec lastStringInterningId = 26, lastPublicationTime = previousPublicationTime, ) + val ledgerEndCache = MutableLedgerEndCache() val result = ParallelIndexerSubscription.seqMapper( internize = _.zipWithIndex.map(x => x._2 -> x._2.toString).take(2), metrics, simClock, logger, + ledgerEndCache, )( previous = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)), current = Batch( ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector( someParty, someParty, someEventCreated, - DbDto.IdFilterCreateStakeholder(0L, "", ""), - DbDto.IdFilterCreateNonStakeholderInformee(0L, "", ""), - DbDto.IdFilterConsumingStakeholder(0L, "", ""), - DbDto.IdFilterConsumingNonStakeholderInformee(0L, "", ""), - DbDto.IdFilterNonConsumingInformee(0L, "", ""), + DbDto.IdFilterCreateStakeholder(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterCreateNonStakeholderInformee(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterConsumingStakeholder(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterConsumingNonStakeholderInformee(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterNonConsumingInformee(0L, "", "", first_per_sequential_id = true), someEventCreated, someEventCreated, - DbDto.TransactionMeta("", 1, 0L, 0L, "x::sourcesynchronizer", 0L, 0L), + DbDto.TransactionMeta(emptyByteArray, 1, 0L, 0L, someSynchronizerId, 0L, 0L), someParty, someEventExercise, - DbDto.TransactionMeta("", 1, 0L, 0L, "x::sourcesynchronizer", 0L, 0L), + DbDto.TransactionMeta(emptyByteArray, 1, 0L, 0L, someSynchronizerId, 0L, 0L), someParty, someEventAssign, - DbDto.IdFilterAssignStakeholder(0L, "", ""), - DbDto.IdFilterAssignStakeholder(0L, "", ""), - DbDto.TransactionMeta("", 1, 0L, 0L, "x::sourcesynchronizer", 0L, 0L), + DbDto.IdFilterAssignStakeholder(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterAssignStakeholder(0L, "", "", first_per_sequential_id = false), + DbDto.TransactionMeta(emptyByteArray, 1, 0L, 0L, someSynchronizerId, 0L, 0L), someParty, someEventUnassign, - DbDto.IdFilterUnassignStakeholder(0L, "", ""), - DbDto.IdFilterUnassignStakeholder(0L, "", ""), - DbDto.TransactionMeta("", 1, 0L, 0L, "x::sourcesynchronizer", 0L, 0L), + DbDto.IdFilterUnassignStakeholder(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterUnassignStakeholder(0L, "", "", first_per_sequential_id = false), + DbDto.TransactionMeta(emptyByteArray, 1, 0L, 0L, someSynchronizerId, 0L, 0L), someParty, someCompletion, + someEventActivate, + DbDto.IdFilter(0, "", "", first_per_sequential_id = false).activateStakeholder, + DbDto.IdFilter(0, "", "", first_per_sequential_id = false).activateWitness, + someEventDeactivate, + DbDto.IdFilter(0, "", "", first_per_sequential_id = false).deactivateStakeholder, + DbDto.IdFilter(0, "", "", first_per_sequential_id = false).deactivateWitness, + someEventWitnessed, + DbDto.IdFilter(0, "", "", first_per_sequential_id = false).variousWitness, ), batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ), ) import scala.util.chaining.* - result.ledgerEnd.lastEventSeqId shouldBe 21 + result.ledgerEnd.lastEventSeqId shouldBe 24 result.ledgerEnd.lastStringInterningId shouldBe 1 result.ledgerEnd.lastPublicationTime shouldBe currentPublicationTime result.ledgerEnd.lastOffset shouldBe offset(2) @@ -428,10 +557,38 @@ class ParallelIndexerSubscriptionSpec .batch(25) .asInstanceOf[DbDto.CommandCompletion] .publication_time shouldBe currentPublicationTime.toMicros - result.batch(26).asInstanceOf[DbDto.StringInterningDto].internalId shouldBe 0 - result.batch(26).asInstanceOf[DbDto.StringInterningDto].externalString shouldBe "0" - result.batch(27).asInstanceOf[DbDto.StringInterningDto].internalId shouldBe 1 - result.batch(27).asInstanceOf[DbDto.StringInterningDto].externalString shouldBe "1" + result.batch(26).asInstanceOf[DbDto.EventActivate].event_sequential_id shouldBe 22L + result + .batch(27) + .asInstanceOf[DbDto.IdFilterActivateStakeholder] + .idFilter + .event_sequential_id shouldBe 22L + result + .batch(28) + .asInstanceOf[DbDto.IdFilterActivateWitness] + .idFilter + .event_sequential_id shouldBe 22L + result.batch(29).asInstanceOf[DbDto.EventDeactivate].event_sequential_id shouldBe 23L + result + .batch(30) + .asInstanceOf[DbDto.IdFilterDeactivateStakeholder] + .idFilter + .event_sequential_id shouldBe 23L + result + .batch(31) + .asInstanceOf[DbDto.IdFilterDeactivateWitness] + .idFilter + .event_sequential_id shouldBe 23L + result.batch(32).asInstanceOf[DbDto.EventVariousWitnessed].event_sequential_id shouldBe 24L + result + .batch(33) + .asInstanceOf[DbDto.IdFilterVariousWitness] + .idFilter + .event_sequential_id shouldBe 24L + result.batch(34).asInstanceOf[DbDto.StringInterningDto].internalId shouldBe 0 + result.batch(34).asInstanceOf[DbDto.StringInterningDto].externalString shouldBe "0" + result.batch(35).asInstanceOf[DbDto.StringInterningDto].internalId shouldBe 1 + result.batch(35).asInstanceOf[DbDto.StringInterningDto].externalString shouldBe "1" } it should "preserve sequence id if nothing to assign" in { @@ -442,11 +599,17 @@ class ParallelIndexerSubscriptionSpec lastPublicationTime = CantonTimestamp.now(), ) val simClock = new SimClock(loggerFactory = loggerFactory) - val result = ParallelIndexerSubscription.seqMapper(_ => Nil, metrics, simClock, logger)( + val result = ParallelIndexerSubscription.seqMapper( + _ => Nil, + metrics, + simClock, + logger, + MutableLedgerEndCache(), + )( ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)), Batch( ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector( someParty, someParty, @@ -455,6 +618,8 @@ class ParallelIndexerSubscriptionSpec ), batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ), ) result.ledgerEnd.lastEventSeqId shouldBe 15 @@ -462,25 +627,26 @@ class ParallelIndexerSubscriptionSpec result.ledgerEnd.lastOffset shouldBe offset(2) } + private val now = CantonTimestamp.now() + private val previous = now.plusSeconds(10) + private val previousLedgerEnd = LedgerEnd( + lastOffset = offset(1), + lastEventSeqId = 15, + lastStringInterningId = 25, + lastPublicationTime = previous, + ) + private val simClock = new SimClock(now, loggerFactory = loggerFactory) + it should "take the last publication time, if bigger than the current time, and log" in { - val now = CantonTimestamp.now() - val simClock = new SimClock(now, loggerFactory = loggerFactory) - val previous = now.plusSeconds(10) - val previousLedgerEnd = LedgerEnd( - lastOffset = offset(1), - lastEventSeqId = 15, - lastStringInterningId = 25, - lastPublicationTime = previous, - ) loggerFactory.assertLogs( LoggerNameContains("ParallelIndexerSubscription") && SuppressionRule.Level(Level.INFO) )( ParallelIndexerSubscription - .seqMapper(_ => Nil, metrics, simClock, logger)( + .seqMapper(_ => Nil, metrics, simClock, logger, MutableLedgerEndCache())( ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)), Batch( ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector( someParty, someParty, @@ -489,6 +655,8 @@ class ParallelIndexerSubscriptionSpec ), batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ), ) .ledgerEnd @@ -497,15 +665,533 @@ class ParallelIndexerSubscriptionSpec ) } + it should "activations are added to the ACS" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + val result = ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someEventCreated.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("A"), + flat_event_witnesses = Set("party"), + ), + someEventAssign.copy( + target_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("B"), + ), + someEventActivate.copy( + synchronizer_id = someSynchronizerId2, + notPersistedContractId = hashCid("C"), + ), + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ) + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId, hashCid("A")) -> 16L, + (someSynchronizerId2, hashCid("B")) -> 17L, + (someSynchronizerId2, hashCid("C")) -> 18L, + ) + result.missingDeactivatedActivations shouldBe Map.empty + } + + it should "double activations are reported as warnings" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + zeroBatch.activeContracts.addAll( + Seq( + (someSynchronizerId, hashCid("A")) -> 1L, + (someSynchronizerId2, hashCid("B")) -> 2L, + ) + ) + val result = loggerFactory.assertLogs( + LoggerNameContains("ParallelIndexerSubscription") && SuppressionRule.Level(Level.WARN) + )( + ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someEventCreated.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("A"), + flat_event_witnesses = Set("party"), + ), + someEventAssign.copy( + target_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("B"), + ), + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ), + _.warningMessage should include( + "Double activation at eventSeqId: 16. Previous at Some(1) This should not happen" + ), + _.warningMessage should include( + "Double activation at eventSeqId: 17. Previous at Some(2) This should not happen" + ), + ) + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId, hashCid("A")) -> 16L, + (someSynchronizerId2, hashCid("B")) -> 17L, + ) + result.missingDeactivatedActivations shouldBe Map.empty + } + + it should "activations with no flat_event_witnesses are not added to the acs" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + val result = ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someEventCreated.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("A"), + flat_event_witnesses = Set(), + ) + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ) + zeroBatch.activeContracts shouldBe Map.empty + result.missingDeactivatedActivations shouldBe Map.empty + } + + it should "deactivation is extending the missing activations if not found (but not for divulged or non-consumed contracts)" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + val result = ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId, + contract_id = hashCid("A"), + ), + someEventDeactivate.copy( + synchronizer_id = someSynchronizerId2, + contract_id = hashCid("E"), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("C"), + flat_event_witnesses = Set.empty, + ), + someEventExercise.copy( + consuming = false, + synchronizer_id = someSynchronizerId, + contract_id = hashCid("D"), + flat_event_witnesses = Set("party"), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + ), + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ) + zeroBatch.activeContracts shouldBe Map.empty + result.missingDeactivatedActivations shouldBe Map( + (someSynchronizerId, hashCid("A")) -> None, + (someSynchronizerId, hashCid("B")) -> None, + (someSynchronizerId2, hashCid("E")) -> None, + ) + result.batch + .collect { case u: DbDto.EventDeactivate => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + Some(0) + ) + ) + result.batch + .collect { case u: DbDto.EventUnassign => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + Some(0) + ) + ) + result.batch + .collect { case u: DbDto.EventExercise => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + None, + None, + Some(0), + ) + ) + } + + it should "deactivation is computed directly from the active contracts if it has it - and also removing activeness thereof" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + zeroBatch.activeContracts + .addAll( + Seq( + (someSynchronizerId, hashCid("A")) -> 1L, + (someSynchronizerId2, hashCid("B")) -> 2L, + (someSynchronizerId3, hashCid("A")) -> 3L, + (someSynchronizerId3, hashCid("B")) -> 4L, + (someSynchronizerId3, hashCid("C")) -> 5L, + (someSynchronizerId, hashCid("C")) -> 6L, + ) + ) + val result = ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someEventDeactivate.copy( + synchronizer_id = someSynchronizerId3, + contract_id = hashCid("C"), + ), + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + ), + someEventDeactivate.copy( + synchronizer_id = someSynchronizerId2, + contract_id = hashCid("C"), + ), + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId, + contract_id = hashCid("A"), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId2, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + ), + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ) + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId3, hashCid("A")) -> 3L, + (someSynchronizerId3, hashCid("B")) -> 4L, + (someSynchronizerId, hashCid("C")) -> 6L, + ) + result.missingDeactivatedActivations shouldBe Map( + (someSynchronizerId2, hashCid("A")) -> None, + (someSynchronizerId, hashCid("B")) -> None, + (someSynchronizerId2, hashCid("C")) -> None, + ) + result.batch + .collect { case u: DbDto.EventDeactivate => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + Some(5L), + Some(0L), + ) + ) + result.batch + .collect { case u: DbDto.EventUnassign => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + Some(0L), + Some(1L), + ) + ) + result.batch + .collect { case u: DbDto.EventExercise => + u.deactivated_event_sequential_id + } + .shouldBe( + Seq( + Some(0L), + Some(2L), + ) + ) + } + + it should "activations pruned correctly based on actual ledger-end" in { + val simClock = new SimClock(now, loggerFactory = loggerFactory) + val zeroBatch = ParallelIndexerSubscription.seqMapperZero(Some(previousLedgerEnd)) + val ledgerEndCache = MutableLedgerEndCache() + zeroBatch.activeContracts + .addAll( + Seq( + (someSynchronizerId, hashCid("A")) -> 100L, + (someSynchronizerId2, hashCid("B")) -> 110L, + (someSynchronizerId3, hashCid("A")) -> 120L, + (someSynchronizerId3, hashCid("B")) -> 130L, + ) + ) + def processSeqMapper() = ParallelIndexerSubscription + .seqMapper(_ => Nil, metrics, simClock, logger, ledgerEndCache)( + zeroBatch, + Batch( + ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), + batchTraceContext = TraceContext.empty, + batch = Vector( + someParty + ), + batchSize = 10, + offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, + ), + ) + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId, hashCid("A")) -> 100L, + (someSynchronizerId2, hashCid("B")) -> 110L, + (someSynchronizerId3, hashCid("A")) -> 120L, + (someSynchronizerId3, hashCid("B")) -> 130L, + ) + + // ledger end below + ledgerEndCache.set( + Some( + previousLedgerEnd.copy( + lastEventSeqId = 10 + ) + ) + ) + processSeqMapper() + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId, hashCid("A")) -> 100L, + (someSynchronizerId2, hashCid("B")) -> 110L, + (someSynchronizerId3, hashCid("A")) -> 120L, + (someSynchronizerId3, hashCid("B")) -> 130L, + ) + + // ledger end on first + ledgerEndCache.set( + Some( + previousLedgerEnd.copy( + lastEventSeqId = 100L + ) + ) + ) + processSeqMapper() + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId2, hashCid("B")) -> 110L, + (someSynchronizerId3, hashCid("A")) -> 120L, + (someSynchronizerId3, hashCid("B")) -> 130L, + ) + + // ledger end after third + ledgerEndCache.set( + Some( + previousLedgerEnd.copy( + lastEventSeqId = 125L + ) + ) + ) + processSeqMapper() + zeroBatch.activeContracts shouldBe Map( + (someSynchronizerId3, hashCid("B")) -> 130L + ) + } + + behavior of "refillMissingDeactivatiedActivations" + + it should "correctly refill the missing activations" in { + ParallelIndexerSubscription + .refillMissingDeactivatedActivations(logger)( + Batch( + ledgerEnd = previousLedgerEnd, + batch = Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + deactivated_event_sequential_id = Some(0), + ), + someEventDeactivate.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("C"), + deactivated_event_sequential_id = Some(0), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = Some(0), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = None, + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = Some(10000), + ), + ), + batchSize = 1, + offsetsUpdates = Vector.empty, + activeContracts = EmptyActiveContracts, + missingDeactivatedActivations = Map( + (someSynchronizerId2, hashCid("A")) -> Some(123), + (someSynchronizerId, hashCid("B")) -> Some(1234), + (someSynchronizerId, hashCid("C")) -> Some(12345), + ), + batchTraceContext = TraceContext.empty, + ) + ) + .batch should contain theSameElementsInOrderAs Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + deactivated_event_sequential_id = Some(123), + ), + someEventDeactivate.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("C"), + deactivated_event_sequential_id = Some(12345), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = Some(1234), + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = None, + ), + someEventExercise.copy( + synchronizer_id = someSynchronizerId, + contract_id = hashCid("B"), + flat_event_witnesses = Set("party"), + deactivated_event_sequential_id = Some(10000), + ), + ) + } + + it should "report warning, but succeed, if activation is missing" in { + loggerFactory.assertLogs( + LoggerNameContains("ParallelIndexerSubscription") && SuppressionRule.Level(Level.WARN) + )( + ParallelIndexerSubscription + .refillMissingDeactivatedActivations(logger)( + Batch( + ledgerEnd = previousLedgerEnd, + batch = Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + deactivated_event_sequential_id = Some(0), + ) + ), + batchSize = 1, + offsetsUpdates = Vector.empty, + activeContracts = EmptyActiveContracts, + missingDeactivatedActivations = Map( + (someSynchronizerId2, hashCid("A")) -> None, + (someSynchronizerId, hashCid("B")) -> Some(1234), + ), + batchTraceContext = TraceContext.empty, + ) + ) + .batch should contain theSameElementsInOrderAs Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + deactivated_event_sequential_id = None, + ) + ), + _.warningMessage should include( + s"Activation is missing for a deactivation for unassign event with offset:1 nodeId:1 for synchronizerId:$someSynchronizerId2 contractId:${hashCid("A")}." + ), + ) + } + + it should "report error and fail, if activation was not even requested" in { + loggerFactory.assertInternalError[IllegalStateException]( + ParallelIndexerSubscription.refillMissingDeactivatedActivations(logger)( + Batch( + ledgerEnd = previousLedgerEnd, + batch = Vector( + someEventUnassign.copy( + source_synchronizer_id = someSynchronizerId2, + contract_id = hashCid("A"), + deactivated_event_sequential_id = Some(0), + ) + ), + batchSize = 1, + offsetsUpdates = Vector.empty, + activeContracts = EmptyActiveContracts, + missingDeactivatedActivations = Map( + (someSynchronizerId, hashCid("B")) -> Some(1234) + ), + batchTraceContext = TraceContext.empty, + ) + ), + _.getMessage should include( + s"Programming error: deactivation reference is missing for unassign event with offset:1 nodeId:1 for synchronizerId:$someSynchronizerId2 contractId:${hashCid("A")}, but lookup was not even initiated." + ), + ) + } + behavior of "batcher" it should "batch correctly in happy path case" in { val result = ParallelIndexerSubscription.batcher( - batchF = _ => "bumm" + batchF = _ => "bumm", + logger = logger, )( Batch( ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = Vector( someParty, someParty, @@ -514,14 +1200,18 @@ class ParallelIndexerSubscriptionSpec ), batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) ) result shouldBe Batch( ledgerEnd = ZeroLedgerEnd.copy(lastOffset = offset(2)), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = "bumm", batchSize = 3, offsetsUpdates = offsetsAndUpdates, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) } @@ -563,10 +1253,12 @@ class ParallelIndexerSubscriptionSpec ) val inBatch = Batch( ledgerEnd = ledgerEnd, - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = batchPayload, batchSize = 0, offsetsUpdates = Vector.empty, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) val persistedTransferOffsets = new AtomicBoolean(false) @@ -586,8 +1278,6 @@ class ParallelIndexerSubscriptionSpec dbDispatcher, metrics, logger, - )( - traceContext )(inBatch) val outBatch = Await.result(outBatchF, 10.seconds) @@ -595,10 +1285,12 @@ class ParallelIndexerSubscriptionSpec outBatch shouldBe Batch( ledgerEnd = ledgerEnd, - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = zeroDbBatch, batchSize = 0, offsetsUpdates = Vector.empty, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) persistedTransferOffsets.get() shouldBe true } @@ -627,10 +1319,12 @@ class ParallelIndexerSubscriptionSpec val batch = Batch( ledgerEnd = ledgerEnd, - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = "Some batch payload", batchSize = 0, offsetsUpdates = Vector.empty, + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ) val batchOfBatches = Vector( @@ -784,7 +1478,7 @@ class ParallelIndexerSubscriptionSpec lastStringInterningId = 310, lastPublicationTime = CantonTimestamp.ofEpochMicro(15), ), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = (), batchSize = 0, offsetsUpdates = Vector( @@ -799,6 +1493,8 @@ class ParallelIndexerSubscriptionSpec recordTime = someSequencerIndex1.sequencerTimestamp, ), ), + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ), Batch( ledgerEnd = LedgerEnd( @@ -807,7 +1503,7 @@ class ParallelIndexerSubscriptionSpec lastStringInterningId = 320, lastPublicationTime = CantonTimestamp.ofEpochMicro(25), ), - lastTraceContext = TraceContext.empty, + batchTraceContext = TraceContext.empty, batch = (), batchSize = 0, offsetsUpdates = Vector( @@ -822,6 +1518,8 @@ class ParallelIndexerSubscriptionSpec recordTime = someSequencerIndex2.sequencerTimestamp, ), ), + missingDeactivatedActivations = Map.empty, + activeContracts = ParallelIndexerSubscription.EmptyActiveContracts, ), ) @@ -1305,16 +2003,18 @@ class ParallelIndexerSubscriptionSpec optByKeyNodes = None, ), transaction = CommittedTransaction(TransactionBuilder.Empty), - updateId = Ref.TransactionId.fromLong(15000), + updateId = TestUpdateId("15000"), contractAuthenticationData = Map.empty, + representativePackageIds = RepresentativePackageIds.Empty, synchronizerId = SynchronizerId.tryFromString("x::synchronizer"), repairCounter = repairCounter, recordTime = recordTime, + internalContractIds = Map.empty, )(TraceContext.empty) def floatingUpdate(recordTime: CantonTimestamp): Update = TopologyTransactionEffective( - updateId = Ref.TransactionId.fromLong(16000), + updateId = TestUpdateId("16000"), events = Set.empty, synchronizerId = SynchronizerId.tryFromString("x::synchronizer"), effectiveTime = recordTime, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/PostPublishDataSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/PostPublishDataSpec.scala index 84866e27f5..eca90bf56b 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/PostPublishDataSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/PostPublishDataSpec.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.indexer.parallel import com.digitalasset.canton.RepairCounter import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries, Offset} import com.digitalasset.canton.ledger.participant.state.Update.CommandRejected.FinalReason +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.Update.{ RepairTransactionAccepted, SequencedCommandRejected, @@ -18,6 +19,7 @@ import com.digitalasset.canton.ledger.participant.state.{ TransactionMeta, } import com.digitalasset.canton.logging.{NamedLogging, SuppressingLogger} +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.crypto @@ -41,7 +43,7 @@ class PostPublishDataSpec extends AnyFlatSpec with Matchers with NamedLogging { private val commandId = Ref.CommandId.assertFromString(UUID.randomUUID().toString) private val offset = Offset.tryFromLong(15) private val submissionId = Some(Ref.SubmissionId.assertFromString(UUID.randomUUID().toString)) - private val updateId = Ref.TransactionId.fromLong(15000) + private val updateId = TestUpdateId("15000") private val someHash = crypto.Hash.assertFromString("01cf85cfeb36d628ca2e6f583fa2331be029b6b28e877e1008fb3f862306c086") private val transactionMeta = TransactionMeta( @@ -79,6 +81,7 @@ class PostPublishDataSpec extends AnyFlatSpec with Matchers with NamedLogging { synchronizerId = synchronizerId, recordTime = cantonTime2, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, )(TraceContext.empty), offset = offset, publicationTime = cantonTime1, @@ -111,6 +114,7 @@ class PostPublishDataSpec extends AnyFlatSpec with Matchers with NamedLogging { synchronizerId = synchronizerId, recordTime = cantonTime2, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, )(TraceContext.empty), offset = offset, publicationTime = cantonTime1, @@ -124,9 +128,11 @@ class PostPublishDataSpec extends AnyFlatSpec with Matchers with NamedLogging { transaction = CommittedTransaction(TransactionBuilder.Empty), updateId = updateId, contractAuthenticationData = Map.empty, + representativePackageIds = RepresentativePackageIds.Empty, synchronizerId = synchronizerId, repairCounter = RepairCounter(65), recordTime = cantonTime2, + internalContractIds = Map.empty, )(TraceContext.empty), offset = offset, publicationTime = cantonTime1, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala index 050c5eac8c..57a34e59a9 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala @@ -11,8 +11,9 @@ import com.digitalasset.canton.ledger.participant.state.{ TestAcsChangeFactory, Update, } +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.IndexComponentTest -import com.digitalasset.canton.protocol.ReassignmentId +import com.digitalasset.canton.protocol.{ExampleContractFactory, ReassignmentId, TestUpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} @@ -21,6 +22,7 @@ import com.digitalasset.daml.lf.value.Value import org.scalatest.flatspec.AnyFlatSpec import scala.collection.mutable +import scala.concurrent.Future class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponentTest { behavior of "MultiSynchronizer contract lookup" @@ -33,13 +35,42 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen it should "successfully look up contract, even if only the assigned event is visible" in { val party = Ref.Party.assertFromString("party1") - val (reassignmentAccepted1, cn1) = - mkReassignmentAccepted(party, "UpdateId1", withAcsChange = false) - val (reassignmentAccepted2, cn2) = - mkReassignmentAccepted(party, "UpdateId2", withAcsChange = true) - ingestUpdates(reassignmentAccepted1, reassignmentAccepted2) - + val c1 = + ExampleContractFactory.build( + stakeholders = Set(party), + signatories = Set(party), + templateId = Ref.Identifier.assertFromString("P:M:T"), + argument = Value.ValueUnit, + ) + val c2 = + ExampleContractFactory.build( + stakeholders = Set(party), + signatories = Set(party), + templateId = Ref.Identifier.assertFromString("P:M:T"), + argument = Value.ValueUnit, + ) (for { + // contracts should be stored in canton contract store before ingesting the updates to get the internal contract ids mapping + _ <- participantContractStore + .storeContracts(Seq(c1, c2)) + .failOnShutdown("failed to store contracts") + (reassignmentAccepted1, cn1) <- + mkReassignmentAccepted( + party, + "UpdateId1", + createNode = c1.inst.toCreateNode, + withAcsChange = false, + participantContractStore = participantContractStore, + ) + (reassignmentAccepted2, cn2) <- + mkReassignmentAccepted( + party, + "UpdateId2", + createNode = c2.inst.toCreateNode, + withAcsChange = true, + participantContractStore = participantContractStore, + ) + _ = ingestUpdates(reassignmentAccepted1, reassignmentAccepted2) activeContractO1 <- index.lookupActiveContract(Set(party), cn1.coid) activeContractO2 <- index.lookupActiveContract(Set(party), cn2.coid) } yield { @@ -64,22 +95,18 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen party: Ref.Party, updateIdS: String, withAcsChange: Boolean, - ): (Update.ReassignmentAccepted, Node.Create) = { + createNode: Node.Create, + participantContractStore: ContractStore, + ): Future[(Update.ReassignmentAccepted, Node.Create)] = { val synchronizer1 = SynchronizerId.tryFromString("x::synchronizer1") val synchronizer2 = SynchronizerId.tryFromString("x::synchronizer2") - val builder = TxBuilder() - val contractId = builder.newCid - val createNode = builder - .create( - id = contractId, - templateId = Ref.Identifier.assertFromString("P:M:T"), - argument = Value.ValueUnit, - signatories = Set(party), - observers = Set.empty, - ) - val updateId = Ref.TransactionId.assertFromString(updateIdS) + val updateId = TestUpdateId(updateIdS) val recordTime = Time.Timestamp.now() - ( + for { + internalContractIds <- participantContractStore + .lookupBatchedNonCachedInternalIds(Seq(createNode.coid)) + .failOnShutdown + } yield ( if (withAcsChange) Update.OnPRReassignmentAccepted( workflowId = None, @@ -104,6 +131,7 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen recordTime = CantonTimestamp(recordTime), synchronizerId = synchronizer2, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = internalContractIds, ) else Update.RepairReassignmentAccepted( @@ -128,6 +156,7 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen repairCounter = RepairCounter.Genesis, recordTime = CantonTimestamp(recordTime), synchronizerId = synchronizer2, + internalContractIds = internalContractIds, ), createNode, ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/CompletionFromTransactionSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/CompletionFromTransactionSpec.scala index e5db07c455..1b9d6563b8 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/CompletionFromTransactionSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/CompletionFromTransactionSpec.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store import com.daml.ledger.api.v2.completion.Completion.DeduplicationPeriod import com.digitalasset.canton.TestEssentials import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.daml.lf.data.Time import com.google.protobuf.duration.Duration import com.google.protobuf.timestamp.Timestamp @@ -73,7 +74,7 @@ class CompletionFromTransactionSpec Time.Timestamp.Epoch, Offset.firstOffset, "commandId", - "transactionId", + TestUpdateId("transactionId"), "userId", "synchronizer id", traceContext, @@ -88,7 +89,7 @@ class CompletionFromTransactionSpec completion.offset shouldBe 1L completion.commandId shouldBe "commandId" - completion.updateId shouldBe "transactionId" + completion.updateId shouldBe TestUpdateId("transactionId").toHexString completion.userId shouldBe "userId" completion.submissionId shouldBe expectedSubmissionId completion.deduplicationPeriod shouldBe expectedDeduplicationPeriod @@ -110,7 +111,7 @@ class CompletionFromTransactionSpec Time.Timestamp.Epoch, Offset.firstOffset, "commandId", - "transactionId", + TestUpdateId("transactionId"), "userId", "synchronizer id", traceContext, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEq.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEq.scala deleted file mode 100644 index 5017cfa860..0000000000 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEq.scala +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.backend - -import org.scalactic.Equality -import org.scalatest.matchers.should.Matchers - -import scala.annotation.nowarn - -// DbDto case classes contain serialized values in Arrays (sometimes wrapped in Options), -// because this representation can efficiently be passed to Jdbc. -// Using Arrays means DbDto instances are not comparable, so we have to define a custom equality operator. -object DbDtoEq extends Matchers { - - @nowarn("cat=lint-infer-any") - val DbDtoEq: org.scalactic.Equality[DbDto] = { - case (a: DbDto, b: DbDto) => - (a.productPrefix === b.productPrefix) && - (a.productArity == b.productArity) && - (a.productIterator zip b.productIterator).forall { - case (x: Array[_], y: Array[_]) => x sameElements y - case (Some(x: Array[_]), Some(y: Array[_])) => x sameElements y - case (x, y) => x === y - } - case (_, _) => false - } - - val DbDtoSeqEq: org.scalactic.Equality[Seq[DbDto]] = { - case (a: Seq[_], b: Seq[_]) => - a.sizeCompare(b) == 0 && a.zip(b).forall { case (x, y) => DbDtoEq.areEqual(x, y) } - case (_, _) => false - } - - implicit val eqOptArray: Equality[Option[Array[Byte]]] = (first: Option[Array[Byte]], b: Any) => { - val second = Option(b).getOrElse(Some[Array[Byte]]).asInstanceOf[Option[Array[Byte]]] - (first, second) match { - case (None, None) => true - case (None, Some(s)) => s.isEmpty - case (Some(f), None) => f.isEmpty - case (Some(f), Some(s)) => f === s - } - } - -} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEqSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEqSpec.scala deleted file mode 100644 index 573ec15967..0000000000 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEqSpec.scala +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.backend - -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -class DbDtoEqSpec extends AnyWordSpec with Matchers { - - import DbDtoEq.* - - "DbDtoEq" should { - - "compare DbDto when used with `decided` keyword" in { - - val dto0 = DbDto.StringInterningDto( - internalId = 1337, - externalString = "leet", - ) - - val dto1 = dto0.copy() - val dto2 = dto0.copy() - - dto0 should equal(dto0) // Works due to object equality shortcut - dto1 shouldNot equal(dto2) // As equality is overridden to be false with DbDto - dto1 should equal(dto2)(decided by DbDtoEq) - List(dto1) should equal(List(dto2))(decided by DbDtoSeqEq) - - } - - } - -} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoSpec.scala new file mode 100644 index 0000000000..ef2bbab639 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoSpec.scala @@ -0,0 +1,560 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import com.digitalasset.canton.platform.store.backend.DbDto.IdFilter +import com.digitalasset.canton.protocol.TestUpdateId +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +class DbDtoSpec extends AnyWordSpec with Matchers { + import StorageBackendTestValues.* + implicit private val DbDtoEqual: org.scalactic.Equality[DbDto] = ScalatestEqualityHelpers.DbDtoEq + + val updateId = TestUpdateId("mock_hash") + val updateIdByteArray = updateId.toProtoPrimitive.toByteArray + + "DbDto.createDbDtos" should { + "populate correct DbDtos" in { + DbDto + .createDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + representative_package_id = someRepresentativePackageId, + notPersistedContractId = hashCid("1"), + internal_contract_id = 3, + create_key_hash = Some("hash"), + )( + stakeholders = Set("party3", "party4"), + template_id = "template", + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventActivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_type = PersistentEventType.Create.asInt, + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Some(Set("party2")), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = someRepresentativePackageId, + notPersistedContractId = hashCid("1"), + internal_contract_id = 3, + create_key_hash = Some("hash"), + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party3", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party4", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterActivateWitness( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party2", + first_per_sequential_id = true, + ) + ), + ) + } + } + + "DbDto.assignDbDtos" should { + "populate correct DbDtos" in { + DbDto + .assignDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitter = Some("party"), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + event_sequential_id = 3, + node_id = 4, + source_synchronizer_id = someSynchronizerId2, + reassignment_counter = 19, + reassignment_id = Array(1, 2), + representative_package_id = someRepresentativePackageId, + notPersistedContractId = hashCid("1"), + internal_contract_id = 3, + )( + stakeholders = Set("party3", "party4"), + template_id = "template", + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventActivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = None, + event_type = PersistentEventType.Assign.asInt, + event_sequential_id = 3, + node_id = 4, + additional_witnesses = None, + source_synchronizer_id = Some(someSynchronizerId2), + reassignment_counter = Some(19), + reassignment_id = Some(Array(1, 2)), + representative_package_id = someRepresentativePackageId, + notPersistedContractId = hashCid("1"), + internal_contract_id = 3, + create_key_hash = None, + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party3", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party4", + first_per_sequential_id = false, + ) + ), + ) + } + } + + "DbDto.consumingExerciseDbDtos" should { + "populate correct DbDtos" in { + DbDto + .consumingExerciseDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_sequential_id = 3, + node_id = 4, + deactivated_event_sequential_id = Some(10), + additional_witnesses = Set("party2"), + exercise_choice = "choice", + exercise_choice_interface_id = Some("interface"), + exercise_argument = Array(1, 2, 3), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Set("party5"), + exercise_last_descendant_node_id = 10, + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + stakeholders = Set("1", "2", "3"), + ledger_effective_time = 13, + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventDeactivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_type = PersistentEventType.ConsumingExercise.asInt, + event_sequential_id = 3, + node_id = 4, + deactivated_event_sequential_id = Some(10), + additional_witnesses = Some(Set("party2")), + exercise_choice = Some("choice"), + exercise_choice_interface_id = Some("interface"), + exercise_argument = Some(Array(1, 2, 3)), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Some(Set("party5")), + exercise_last_descendant_node_id = Some(10), + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + stakeholders = Set("1", "2", "3"), + ledger_effective_time = Some(13), + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "1", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "2", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "3", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterDeactivateWitness( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party2", + first_per_sequential_id = true, + ) + ), + ) + } + } + + "DbDto.unassignDbDtos" should { + "populate correct DbDtos" in { + DbDto + .unassignDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitter = Some("party"), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + event_sequential_id = 3, + node_id = 4, + deactivated_event_sequential_id = Some(10), + reassignment_id = Array(2, 3, 4), + assignment_exclusivity = Some(10), + target_synchronizer_id = someSynchronizerId2, + reassignment_counter = 234, + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + stakeholders = Set("1", "2", "3"), + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventDeactivate( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = None, + event_type = PersistentEventType.Unassign.asInt, + event_sequential_id = 3, + node_id = 4, + deactivated_event_sequential_id = Some(10), + additional_witnesses = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + reassignment_id = Some(Array(2, 3, 4)), + assignment_exclusivity = Some(10), + target_synchronizer_id = Some(someSynchronizerId2), + reassignment_counter = Some(234), + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + stakeholders = Set("1", "2", "3"), + ledger_effective_time = None, + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "1", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "2", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "3", + first_per_sequential_id = false, + ) + ), + ) + } + } + + "DbDto.witnessedExercisedDbDtos" should { + "populate correct DbDtos for witnessed consuming exercise" in { + DbDto + .witnessedExercisedDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + consuming = true, + exercise_choice = "choice", + exercise_choice_interface_id = Some("interface"), + exercise_argument = Array(1, 2, 3), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Set("party5"), + exercise_last_descendant_node_id = 10, + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + ledger_effective_time = 13, + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventVariousWitnessed( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_type = PersistentEventType.WitnessedConsumingExercise.asInt, + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + consuming = Some(true), + exercise_choice = Some("choice"), + exercise_choice_interface_id = Some("interface"), + exercise_argument = Some(Array(1, 2, 3)), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Some(Set("party5")), + exercise_last_descendant_node_id = Some(10), + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + representative_package_id = None, + contract_id = Some(hashCid("23")), + internal_contract_id = Some(3), + template_id = Some("template"), + package_id = Some("package"), + ledger_effective_time = Some(13), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party2", + first_per_sequential_id = true, + ) + ), + ) + } + } + + "DbDto.witnessedExercisedDbDtos" should { + "populate correct DbDtos for witnessed non consuming exercise" in { + DbDto + .witnessedExercisedDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + consuming = false, + exercise_choice = "choice", + exercise_choice_interface_id = Some("interface"), + exercise_argument = Array(1, 2, 3), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Set("party5"), + exercise_last_descendant_node_id = 10, + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + contract_id = hashCid("23"), + internal_contract_id = Some(3), + template_id = "template", + package_id = "package", + ledger_effective_time = 13, + ) + .toList should contain theSameElementsInOrderAs List( + DbDto.EventVariousWitnessed( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + consuming = Some(false), + exercise_choice = Some("choice"), + exercise_choice_interface_id = Some("interface"), + exercise_argument = Some(Array(1, 2, 3)), + exercise_result = Some(Array(1, 2, 3, 4)), + exercise_actors = Some(Set("party5")), + exercise_last_descendant_node_id = Some(10), + exercise_argument_compression = Some(1), + exercise_result_compression = Some(2), + representative_package_id = None, + contract_id = Some(hashCid("23")), + internal_contract_id = Some(3), + template_id = Some("template"), + package_id = Some("package"), + ledger_effective_time = Some(13), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party2", + first_per_sequential_id = true, + ) + ), + ) + } + } + + "DbDto.witnessedCreateDbDtos" should { + "populate correct DbDtos" in { + DbDto + .witnessedCreateDbDtos( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + representative_package_id = someRepresentativePackageId, + internal_contract_id = 3, + )(template_id = "template") + .toList should contain theSameElementsInOrderAs List( + DbDto.EventVariousWitnessed( + event_offset = 1, + update_id = updateIdByteArray, + workflow_id = Some("w"), + command_id = Some("c"), + submitters = Some(Set("party")), + record_time = 2, + synchronizer_id = someSynchronizerId, + trace_context = serializableTraceContext, + external_transaction_hash = Some(someExternalTransactionHashBinary), + event_type = PersistentEventType.WitnessedCreate.asInt, + event_sequential_id = 3, + node_id = 4, + additional_witnesses = Set("party2"), + consuming = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = Some(someRepresentativePackageId), + contract_id = None, + internal_contract_id = Some(3), + template_id = None, + package_id = None, + ledger_effective_time = None, + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 3, + template_id = "template", + party_id = "party2", + first_per_sequential_id = true, + ) + ), + ) + } + } +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterningSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterningSpec.scala index 9ee8aefdb8..02c8b75a87 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterningSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterningSpec.scala @@ -5,8 +5,10 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.crypto.HashAlgorithm.Sha256 import com.digitalasset.canton.crypto.{Hash, HashPurpose} +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} +import com.digitalasset.daml.lf.value.Value.ContractId import com.google.protobuf.ByteString import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -16,6 +18,8 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { it should "select all relevant strings for interning" in { val iterators = DbDtoToStringsForInterning(fixture) iterators.templateIds.toList.sorted shouldBe List( + "08", + "09", "25", "50", "87", @@ -23,6 +27,23 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { ).sorted iterators.parties.toList.sorted shouldBe List( "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15", + "16", + "17", + "18", + "19", "20", "21", "22", @@ -63,23 +84,51 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { "s2", "95", "96", + "97", ).sorted - iterators.synchronizerIds.toList.sorted shouldBe List( - "synchronizer2", - "synchronizer3", - "synchronizer4", - "synchronizer5", - "synchronizer6", - "synchronizer7", - "synchronizer8", - "synchronizer9", + iterators.synchronizerIds.toList.map(_.toProtoPrimitive).sorted shouldBe List( + "x::synchronizer1", + "x::synchronizer1b", + "x::synchronizer1c", + "x::synchronizer1d", + "x::synchronizer1e", + "x::synchronizer2", + "x::synchronizer3", + "x::synchronizer4", + "x::synchronizer5", + "x::synchronizer6", + "x::synchronizer7", + "x::synchronizer8", + "x::synchronizer9", + "x::synchronizer10", ).sorted iterators.packageIds.toList.sorted shouldBe List( + "11.1", + "11.2", + "11.3", + "11.4", "25.1", + "25.2", "50.1", "87.1", "94.1", ).sorted + iterators.userIds.toList.sorted shouldBe List( + "65" + ).sorted + iterators.participantIds.toList.sorted shouldBe List( + "participant1" + ).sorted + iterators.choiceNames.toList.sorted shouldBe List( + "c_42", + "c_44", + "c_60", + ).sorted + iterators.interfaceIds.toList.sorted shouldBe List( + "43", + "45", + "61", + ).sorted } private val serializableTraceContext = @@ -105,18 +154,105 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { internalId = 1, externalString = "5", ), + DbDto.EventActivate( + event_offset = 10, + update_id = updateId("10"), + workflow_id = Some("10"), + command_id = Some("11"), + submitters = Some(Set("3", "4", "5")), + record_time = 1, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + trace_context = serializableTraceContext, + external_transaction_hash = Some(externalTransactionHash), + event_type = 5, + event_sequential_id = 1, + node_id = 1, + additional_witnesses = Some(Set("6", "7")), + source_synchronizer_id = Some(SynchronizerId.tryFromString("x::synchronizer1b")), + reassignment_counter = None, + reassignment_id = None, + representative_package_id = "11.1", + notPersistedContractId = hashCid("24"), + internal_contract_id = 55, + create_key_hash = None, + ), + DbDto.EventDeactivate( + event_offset = 11, + update_id = updateId("11"), + workflow_id = Some("11"), + command_id = Some("12"), + submitters = Some(Set("8", "9", "10")), + record_time = 1, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1c"), + trace_context = serializableTraceContext, + external_transaction_hash = Some(externalTransactionHash), + event_type = 5, + event_sequential_id = 1, + node_id = 1, + deactivated_event_sequential_id = None, + additional_witnesses = Some(Set("11", "12")), + exercise_choice = Some("c_42"), + exercise_choice_interface_id = Some("43"), + exercise_argument = None, + exercise_result = None, + exercise_actors = Some(Set("13")), + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = Some(SynchronizerId.tryFromString("x::synchronizer1d")), + reassignment_counter = None, + contract_id = hashCid("56"), + internal_contract_id = Some(57), + template_id = "08", + package_id = "11.2", + stakeholders = Set("14"), + ledger_effective_time = None, + ), + DbDto.EventVariousWitnessed( + event_offset = 12, + update_id = updateId("12"), + workflow_id = Some("12"), + command_id = Some("13"), + submitters = Some(Set("15", "16")), + record_time = 1, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1e"), + trace_context = serializableTraceContext, + external_transaction_hash = Some(externalTransactionHash), + event_type = 5, + event_sequential_id = 1, + node_id = 1, + additional_witnesses = Set("17", "18"), + consuming = Some(false), + exercise_choice = Some("c_44"), + exercise_choice_interface_id = Some("45"), + exercise_argument = None, + exercise_result = None, + exercise_actors = Some(Set("19")), + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = Some("11.3"), + contract_id = Some(hashCid("57")), + internal_contract_id = Some(58), + template_id = Some("09"), + package_id = Some("11.4"), + ledger_effective_time = None, + ), DbDto.EventCreate( event_offset = 15, - update_id = "16", + update_id = updateId("16"), ledger_effective_time = 1, command_id = Some("17"), workflow_id = Some("18"), user_id = Some("19"), submitters = Some(Set("20", "21", "22")), node_id = 1, - contract_id = Array(24), + contract_id = hashCid("24"), template_id = "25", package_id = "25.1", + representative_package_id = "25.2", flat_event_witnesses = Set("26", "27", "28"), tree_event_witnesses = Set("29", "30", "31"), create_argument = Array.empty, @@ -129,22 +265,23 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { create_key_value_compression = Some(1), event_sequential_id = 1, authentication_data = Array.empty, - synchronizer_id = "synchronizer2", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), trace_context = serializableTraceContext, record_time = 1, external_transaction_hash = Some(externalTransactionHash), + internal_contract_id = 101, ), DbDto.EventExercise( consuming = true, event_offset = 40, - update_id = "41", + update_id = updateId("41"), ledger_effective_time = 1, command_id = Some("42"), workflow_id = Some("43"), user_id = Some("44"), submitters = Some(Set("45", "46", "47")), node_id = 1, - contract_id = Array(49), + contract_id = hashCid("49"), template_id = "50", package_id = "50.1", flat_event_witnesses = Set("51", "52", "53"), @@ -153,14 +290,16 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { exercise_actors = Set("57", "58", "59"), exercise_argument_compression = Some(1), event_sequential_id = 1, - exercise_choice = "60", + exercise_choice = "c_60", + exercise_choice_interface_id = Some("61"), exercise_result = None, exercise_last_descendant_node_id = 63, exercise_result_compression = Some(1), - synchronizer_id = "synchronizer3", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer3"), trace_context = serializableTraceContext, record_time = 1, external_transaction_hash = Some(externalTransactionHash), + deactivated_event_sequential_id = None, ), DbDto.CommandCompletion( completion_offset = 64, @@ -169,7 +308,7 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { user_id = "65", submitters = Set("66", "67", "68"), command_id = "69", - update_id = Some("70"), + update_id = Some(updateId("70")), rejection_status_code = Some(1), rejection_status_message = Some("71"), rejection_status_details = None, @@ -177,19 +316,19 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { deduplication_offset = Some(73), deduplication_duration_seconds = Some(1), deduplication_duration_nanos = Some(1), - synchronizer_id = "synchronizer4", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer4"), message_uuid = None, is_transaction = true, trace_context = serializableTraceContext, ), DbDto.EventAssign( event_offset = 1, - update_id = "", + update_id = updateId(""), command_id = None, workflow_id = None, submitter = Option("s1"), node_id = 0, - contract_id = Array(114), + contract_id = hashCid("114"), template_id = "87", package_id = "87.1", flat_event_witnesses = Set("88", "89"), @@ -204,34 +343,52 @@ class DbDtoToStringsForInterningSpec extends AnyFlatSpec with Matchers { event_sequential_id = 0, ledger_effective_time = 0, authentication_data = Array.empty, - source_synchronizer_id = "synchronizer5", - target_synchronizer_id = "synchronizer6", - reassignment_id = "", + source_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer5"), + target_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer6"), + reassignment_id = new Array[Byte](0), reassignment_counter = 0, trace_context = serializableTraceContext, record_time = 0, + internal_contract_id = 102, ), DbDto.EventUnassign( event_offset = 1, - update_id = "", + update_id = updateId(""), command_id = None, workflow_id = None, submitter = Option("s2"), node_id = 0, - contract_id = Array(115), + contract_id = hashCid("115"), template_id = "94", package_id = "94.1", flat_event_witnesses = Set("95", "96"), event_sequential_id = 0, - source_synchronizer_id = "synchronizer7", - target_synchronizer_id = "synchronizer8", - reassignment_id = "", + source_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer7"), + target_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer8"), + reassignment_id = new Array[Byte](0), reassignment_counter = 0, assignment_exclusivity = None, trace_context = serializableTraceContext, record_time = 0, + deactivated_event_sequential_id = None, + ), + DbDto.SequencerIndexMoved(SynchronizerId.tryFromString("x::synchronizer9")), + DbDto.EventPartyToParticipant( + event_sequential_id = 0, + event_offset = 1, + update_id = updateId(""), + party_id = "97", + participant_id = "participant1", + participant_permission = 1, + participant_authorization_event = 2, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer10"), + record_time = 0, + trace_context = Array.empty, ), - DbDto.SequencerIndexMoved("synchronizer9"), ) + private def hashCid(key: String): ContractId = + ContractId.V1(com.digitalasset.daml.lf.crypto.Hash.hashPrivateKey(key)) + + private def updateId(key: String): Array[Byte] = key.getBytes } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpers.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpers.scala new file mode 100644 index 0000000000..52275aa35b --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpers.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import org.scalactic.Equality +import org.scalatest.matchers.should.Matchers + +import scala.annotation.nowarn + +// DbDto case classes contain serialized values in Arrays (sometimes wrapped in Options), +// because this representation can efficiently be passed to Jdbc. +// Using Arrays means DbDto instances are not comparable, so we have to define a custom equality operator. +object ScalatestEqualityHelpers extends Matchers { + + @nowarn("cat=lint-infer-any") + val DbDtoEq: org.scalactic.Equality[DbDto] = { + case (a: DbDto, b: DbDto) => + (a.productPrefix === b.productPrefix) && + (a.productArity == b.productArity) && + (a.productIterator zip b.productIterator).forall { + case (x: Array[_], y: Array[_]) => x sameElements y + case (Some(x: Array[_]), Some(y: Array[_])) => x sameElements y + case (x, y) => x === y + } + case (_, _) => false + } + + @nowarn("cat=lint-infer-any") + @SuppressWarnings(Array("org.wartremover.warts.Product")) + def caseClassArrayEq[T <: Product]: org.scalactic.Equality[T] = { + case (a: Product, b: Product) => + (a.productPrefix === b.productPrefix) && + (a.productArity == b.productArity) && + (a.productIterator zip b.productIterator).forall { + case (x: Array[_], y: Array[_]) => x sameElements y + case (Some(x: Array[_]), Some(y: Array[_])) => x sameElements y + case (p1: Product, p2: Product) => caseClassArrayEq.areEquivalent(p1, p2) + case (x, y) => x === y + } + case (_, _) => false + } + + val DbDtoSeqEq: org.scalactic.Equality[Seq[DbDto]] = { + case (a: Seq[_], b: Seq[_]) => + a.sizeCompare(b) == 0 && a.zip(b).forall { case (x, y) => DbDtoEq.areEqual(x, y) } + case (_, _) => false + } + + implicit val eqOptArray: Equality[Option[Array[Byte]]] = (first: Option[Array[Byte]], b: Any) => { + val second = Option(b).getOrElse(Some[Array[Byte]]).asInstanceOf[Option[Array[Byte]]] + (first, second) match { + case (None, None) => true + case (None, Some(s)) => s.isEmpty + case (Some(f), None) => f.isEmpty + case (Some(f), Some(s)) => f === s + } + } + +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpersSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpersSpec.scala new file mode 100644 index 0000000000..81eaceb752 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/ScalatestEqualityHelpersSpec.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +class ScalatestEqualityHelpersSpec extends AnyWordSpec with Matchers { + + import ScalatestEqualityHelpers.* + + "DbDtoEq" should { + + "compare DbDto when used with `decided` keyword" in { + + val dto0 = DbDto.StringInterningDto( + internalId = 1337, + externalString = "leet", + ) + + val dto1 = dto0.copy() + val dto2 = dto0.copy() + + dto0 should equal(dto0) // Works due to object equality shortcut + dto1 shouldNot equal(dto2) // As equality is overridden to be false with DbDto + dto1 should equal(dto2)(decided by DbDtoEq) + List(dto1) should equal(List(dto2))(decided by DbDtoSeqEq) + + } + + } + +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala index a7851a471a..71b634a8a8 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.BaseTest import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.platform.store.PruningOffsetService import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.backend.h2.H2StorageBackendFactory import com.digitalasset.canton.platform.store.backend.localstore.{ @@ -17,6 +18,7 @@ import com.digitalasset.canton.platform.store.backend.postgresql.PostgresStorage import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.interning.MockStringInterning import com.digitalasset.canton.platform.store.testing.postgresql.PostgresAroundAll +import org.mockito.MockitoSugar.mock import org.scalatest.Suite import java.sql.Connection @@ -87,6 +89,7 @@ trait StorageBackendProviderH2 extends StorageBackendProvider with BaseTest { th final case class TestBackend( ingestion: IngestionStorageBackend[_], parameter: ParameterStorageBackend, + pruningOffsetService: PruningOffsetService, party: PartyStorageBackend, completion: CompletionStorageBackend, contract: ContractStorageBackend, @@ -115,10 +118,12 @@ object TestBackend { TestBackend( ingestion = storageBackendFactory.createIngestionStorageBackend, parameter = storageBackendFactory.createParameterStorageBackend(stringInterning), + pruningOffsetService = mock[PruningOffsetService], party = storageBackendFactory.createPartyStorageBackend(ledgerEndCache), completion = storageBackendFactory.createCompletionStorageBackend(stringInterning, loggerFactory), - contract = storageBackendFactory.createContractStorageBackend(stringInterning), + contract = + storageBackendFactory.createContractStorageBackend(stringInterning, ledgerEndCache), event = storageBackendFactory .createEventStorageBackend(ledgerEndCache, stringInterning, loggerFactory), dataSource = storageBackendFactory.createDataSourceStorageBackend, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSuite.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSuite.scala index 4481459853..97c840f99a 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSuite.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSuite.scala @@ -8,6 +8,7 @@ import org.scalatest.flatspec.AnyFlatSpec trait StorageBackendSuite extends StorageBackendTestsInitialization with StorageBackendTestsInitializeIngestion + with StorageBackendTestsConversions with StorageBackendTestsParties with StorageBackendTestsEvents with StorageBackendTestsTransactionStreamsEvents diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestValues.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestValues.scala index af76c51df6..3fb61a1848 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestValues.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestValues.scala @@ -3,7 +3,8 @@ package com.digitalasset.canton.platform.store.backend -import com.digitalasset.canton.data +import com.digitalasset.canton.crypto.HashAlgorithm.Sha256 +import com.digitalasset.canton.crypto.{Hash as CantonHash, HashPurpose} import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.ledger.api.ParticipantId import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent.Added @@ -16,12 +17,13 @@ import com.digitalasset.canton.platform.store.backend.Conversions.{ participantPermissionInt, } import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} import com.digitalasset.daml.lf.archive.DamlLf import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, NameTypeConRefConverter} +import com.digitalasset.daml.lf.data.Ref.{Identifier, NameTypeConRef, NameTypeConRefConverter} import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} import com.digitalasset.daml.lf.value.Value.ContractId @@ -41,8 +43,10 @@ private[store] object StorageBackendTestValues { def offset(x: Long): Offset = Offset.tryFromLong(x) def ledgerEnd(o: Long, e: Long): ParameterStorageBackend.LedgerEnd = ParameterStorageBackend.LedgerEnd(offset(o), e, 0, CantonTimestamp.now()) - def updateIdFromOffset(x: Offset): Ref.LedgerString = - Ref.LedgerString.assertFromString(x.toDecimalString) + def updateIdFromOffset(x: Offset): UpdateId = TestUpdateId(x.toDecimalString) + def updateIdArrayFromOffset(x: Offset): Array[Byte] = updateIdFromOffset( + x + ).toProtoPrimitive.toByteArray def timestampFromInstant(i: Instant): Timestamp = Timestamp.assertFromInstant(i) val someTime: Timestamp = timestampFromInstant(Instant.now()) @@ -52,7 +56,10 @@ private[store] object StorageBackendTestValues { ) val somePackageId: Ref.PackageId = Ref.PackageId.assertFromString("pkg") val someTemplateId: NameTypeConRef = NameTypeConRef.assertFromString("#pkg-name:Mod:Template") + val someInterfaceId: Identifier = Identifier.assertFromString("0abc:Mod:Template") val someTemplateIdFull: Ref.FullIdentifier = someTemplateId.toFullIdentifier(somePackageId) + val someRepresentativePackageId: Ref.PackageId = + Ref.PackageId.assertFromString("representative-pkg") val someTemplateId2: NameTypeConRef = NameTypeConRef.assertFromString("#pkg-name:Mod:Template2") val someIdentityParams: ParameterStorageBackend.IdentityParams = ParameterStorageBackend.IdentityParams(someParticipantId) @@ -71,11 +78,18 @@ private[store] object StorageBackendTestValues { .build val someSerializedDamlLfValue: Array[Byte] = Array.empty[Byte] - val someSynchronizerId: SynchronizerId = SynchronizerId.tryFromString("x::somesynchronizer") - val someSynchronizerId2: SynchronizerId = SynchronizerId.tryFromString("x::somesynchronizer2") + val someSynchronizerId: SynchronizerId = SynchronizerId.tryFromString("x::sourcesynchronizer") + val someSynchronizerId2: SynchronizerId = SynchronizerId.tryFromString("x::targetsynchronizer") - private val serializableTraceContext: Array[Byte] = + val serializableTraceContext: Array[Byte] = SerializableTraceContext(TraceContext.empty).toDamlProto.toByteArray + val someExternalTransactionHash: CantonHash = + CantonHash + .digest(HashPurpose.PreparedSubmission, ByteString.copyFromUtf8("mock_hash"), Sha256) + val someExternalTransactionHashBinary: Array[Byte] = + someExternalTransactionHash.getCryptographicEvidence.toByteArray + val reassignmentId: Array[Byte] = + ReassignmentId.create("0012345678").toOption.get.toBytes.toByteArray def dtoPartyEntry( offset: Offset, @@ -93,9 +107,327 @@ private[store] object StorageBackendTestValues { is_local = Some(isLocal), ) + def dtosCreate( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitters: Option[Set[String]] = Some(Set("submitter1", "submitter2")), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + external_transaction_hash: Option[Array[Byte]] = Some(someExternalTransactionHashBinary), + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + additional_witnesses: Set[String] = Set("witness1", "witness2"), + representative_package_id: String = "representativepackage", + + // contract related columns + notPersistedContractId: ContractId = hashCid("c1"), + internal_contract_id: Long = 10, + create_key_hash: Option[String] = Some("keyhash"), + )( + stakeholders: Set[String] = Set("stakeholder1", "stakeholder2"), + template_id: String = "tem:pl:ate", + ): Seq[DbDto] = DbDto + .createDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = additional_witnesses, + representative_package_id = representative_package_id, + notPersistedContractId = notPersistedContractId, + internal_contract_id = internal_contract_id, + create_key_hash = create_key_hash, + )( + stakeholders = stakeholders, + template_id = template_id, + ) + .toSeq + + def dtosAssign( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitter: Option[String] = Some("submitter1"), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + source_synchronizer_id: SynchronizerId = someSynchronizerId2, + reassignment_counter: Long = 345, + reassignment_id: Array[Byte] = reassignmentId, + representative_package_id: String = "representativepackage", + + // contract related columns + notPersistedContractId: ContractId = hashCid("c1"), + internal_contract_id: Long = 10, + )( + stakeholders: Set[String] = Set("stakeholder1", "stakeholder2"), + template_id: String = "tem:pl:ate", + ): Seq[DbDto] = DbDto + .assignDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitter = submitter, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + event_sequential_id = event_sequential_id, + node_id = node_id, + source_synchronizer_id = source_synchronizer_id, + reassignment_counter = reassignment_counter, + reassignment_id = reassignment_id, + representative_package_id = representative_package_id, + notPersistedContractId = notPersistedContractId, + internal_contract_id = internal_contract_id, + )( + stakeholders = stakeholders, + template_id = template_id, + ) + .toSeq + + def dtosConsumingExercise( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitters: Option[Set[String]] = Some(Set("submitter1", "submitter2")), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + external_transaction_hash: Option[Array[Byte]] = Some(someExternalTransactionHashBinary), + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + deactivated_event_sequential_id: Option[Long] = Some(600L), + additional_witnesses: Set[String] = Set("witness1", "witness2"), + exercise_choice: String = "choice", + exercise_choice_interface_id: Option[String] = Some("in:ter:face"), + exercise_argument: Array[Byte] = Array(1, 2, 3), + exercise_result: Option[Array[Byte]] = Some(Array(2, 3, 4)), + exercise_actors: Set[String] = Set("actor1", "actor2"), + exercise_last_descendant_node_id: Int = 3, + exercise_argument_compression: Option[Int] = Some(1), + exercise_result_compression: Option[Int] = Some(2), + + // contract related columns + contract_id: ContractId = hashCid("c1"), + internal_contract_id: Option[Long] = Some(10), + template_id: String = "#tem:pl:ate", + package_id: String = "package", + stakeholders: Set[String] = Set("stakeholder1", "stakeholder2"), + ledger_effective_time: Long = 123456, + ): Seq[DbDto] = DbDto + .consumingExerciseDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + event_sequential_id = event_sequential_id, + node_id = node_id, + deactivated_event_sequential_id = deactivated_event_sequential_id, + additional_witnesses = additional_witnesses, + exercise_choice = exercise_choice, + exercise_choice_interface_id = exercise_choice_interface_id, + exercise_argument = exercise_argument, + exercise_result = exercise_result, + exercise_actors = exercise_actors, + exercise_last_descendant_node_id = exercise_last_descendant_node_id, + exercise_argument_compression = exercise_argument_compression, + exercise_result_compression = exercise_result_compression, + contract_id = contract_id, + internal_contract_id = internal_contract_id, + template_id = template_id, + package_id = package_id, + stakeholders = stakeholders, + ledger_effective_time = ledger_effective_time, + ) + .toSeq + + def dtosUnassign( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitter: Option[String] = Some("submitter1"), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + deactivated_event_sequential_id: Option[Long] = Some(67), + reassignment_id: Array[Byte] = reassignmentId, + assignment_exclusivity: Option[Long] = Some(111333), + target_synchronizer_id: SynchronizerId = someSynchronizerId2, + reassignment_counter: Long = 345, + + // contract related columns + contract_id: ContractId = hashCid("c1"), + internal_contract_id: Option[Long] = Some(10), + template_id: String = "#tem:pl:ate", + package_id: String = "package", + stakeholders: Set[String] = Set("stakeholder1", "stakeholder2"), + ): Seq[DbDto] = DbDto + .unassignDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitter = submitter, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + event_sequential_id = event_sequential_id, + node_id = node_id, + deactivated_event_sequential_id = deactivated_event_sequential_id, + reassignment_id = reassignment_id, + assignment_exclusivity = assignment_exclusivity, + target_synchronizer_id = target_synchronizer_id, + reassignment_counter = reassignment_counter, + contract_id = contract_id, + internal_contract_id = internal_contract_id, + template_id = template_id, + package_id = package_id, + stakeholders = stakeholders, + ) + .toSeq + + def dtosWitnessedCreate( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitters: Option[Set[String]] = Some(Set("submitter1", "submitter2")), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + external_transaction_hash: Option[Array[Byte]] = Some(someExternalTransactionHashBinary), + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + additional_witnesses: Set[String] = Set("witness1", "witness2"), + representative_package_id: String = "representativepackage", + + // contract related columns + internal_contract_id: Long = 10, + )(template_id: String = "tem:pl:ate"): Seq[DbDto] = DbDto + .witnessedCreateDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = additional_witnesses, + representative_package_id = representative_package_id, + internal_contract_id = internal_contract_id, + )( + template_id = template_id + ) + .toSeq + + def dtosWitnessedExercised( + // update related columns + event_offset: Long = 10L, + update_id: Array[Byte] = TestUpdateId("update").toProtoPrimitive.toByteArray, + workflow_id: Option[String] = Some("workflow-id"), + command_id: Option[String] = Some("command-id"), + submitters: Option[Set[String]] = Some(Set("submitter1", "submitter2")), + record_time: Long = 100L, + synchronizer_id: SynchronizerId = someSynchronizerId, + trace_context: Array[Byte] = serializableTraceContext, + external_transaction_hash: Option[Array[Byte]] = Some(someExternalTransactionHashBinary), + + // event related columns + event_sequential_id: Long = 500L, + node_id: Int = 15, + additional_witnesses: Set[String] = Set("witness1", "witness2"), + consuming: Boolean = true, + exercise_choice: String = "choice", + exercise_choice_interface_id: Option[String] = Some("in:ter:face"), + exercise_argument: Array[Byte] = Array(1, 2, 3), + exercise_result: Option[Array[Byte]] = Some(Array(2, 3, 4)), + exercise_actors: Set[String] = Set("actor1", "actor2"), + exercise_last_descendant_node_id: Int = 3, + exercise_argument_compression: Option[Int] = Some(1), + exercise_result_compression: Option[Int] = Some(2), + + // contract related columns + contract_id: ContractId = hashCid("c1"), + internal_contract_id: Option[Long] = Some(10), + template_id: String = "#tem:pl:ate", + package_id: String = "package", + ledger_effective_time: Long = 123456, + ): Seq[DbDto] = DbDto + .witnessedExercisedDbDtos( + event_offset = event_offset, + update_id = update_id, + workflow_id = workflow_id, + command_id = command_id, + submitters = submitters, + record_time = record_time, + synchronizer_id = synchronizer_id, + trace_context = trace_context, + external_transaction_hash = external_transaction_hash, + event_sequential_id = event_sequential_id, + node_id = node_id, + additional_witnesses = additional_witnesses, + consuming = consuming, + exercise_choice = exercise_choice, + exercise_choice_interface_id = exercise_choice_interface_id, + exercise_argument = exercise_argument, + exercise_result = exercise_result, + exercise_actors = exercise_actors, + exercise_last_descendant_node_id = exercise_last_descendant_node_id, + exercise_argument_compression = exercise_argument_compression, + exercise_result_compression = exercise_result_compression, + contract_id = contract_id, + internal_contract_id = internal_contract_id, + template_id = template_id, + package_id = package_id, + ledger_effective_time = ledger_effective_time, + ) + .toSeq + /** A simple create event. Corresponds to a transaction with a single create node. */ - def dtoCreate( + def dtoCreateLegacy( offset: Offset, eventSequentialId: Long, contractId: ContractId, @@ -106,15 +438,17 @@ private[store] object StorageBackendTestValues { ledgerEffectiveTime: Timestamp = someTime, authenticationData: Array[Byte] = Array.empty, keyHash: Option[String] = None, - synchronizerId: String = "x::sourcesynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId, createKey: Option[Array[Byte]] = None, createKeyMaintainer: Option[String] = None, traceContext: Array[Byte] = serializableTraceContext, recordTime: Timestamp = someTime, externalTransactionHash: Option[Array[Byte]] = None, emptyFlatEventWitnesses: Boolean = false, + representativePackageId: Ref.PackageId = somePackageId, + internalContractId: Long = 0, ): DbDto.EventCreate = { - val updateId = updateIdFromOffset(offset) + val updateId = updateIdArrayFromOffset(offset) val stakeholders = Set(signatory, observer) val informees = stakeholders ++ nonStakeholderInformees DbDto.EventCreate( @@ -126,7 +460,7 @@ private[store] object StorageBackendTestValues { user_id = Some(someUserId), submitters = None, node_id = 0, - contract_id = contractId.toBytes.toByteArray, + contract_id = contractId, template_id = someTemplateId.toString, package_id = somePackageId.toString, flat_event_witnesses = if (!emptyFlatEventWitnesses) stakeholders else Set.empty, @@ -145,6 +479,8 @@ private[store] object StorageBackendTestValues { trace_context = traceContext, record_time = recordTime.micros, external_transaction_hash = externalTransactionHash, + representative_package_id = representativePackageId, + internal_contract_id = internalContractId, ) } @@ -155,7 +491,7 @@ private[store] object StorageBackendTestValues { * @param actor * The choice actor, who is also the submitter */ - def dtoExercise( + def dtoExerciseLegacy( offset: Offset, eventSequentialId: Long, consuming: Boolean, @@ -163,13 +499,14 @@ private[store] object StorageBackendTestValues { signatory: String = "signatory", actor: String = "actor", commandId: String = UUID.randomUUID().toString, - synchronizerId: String = "x::sourcesynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId, traceContext: Array[Byte] = serializableTraceContext, recordTime: Timestamp = someTime, externalTransactionHash: Option[Array[Byte]] = None, emptyFlatEventWitnesses: Boolean = false, + deactivatedEventSeqId: Option[Long] = None, ): DbDto.EventExercise = { - val updateId = updateIdFromOffset(offset) + val updateId = updateIdArrayFromOffset(offset) DbDto.EventExercise( consuming = consuming, event_offset = offset.unwrap, @@ -180,13 +517,14 @@ private[store] object StorageBackendTestValues { user_id = Some(someUserId), submitters = Some(Set(actor)), node_id = 0, - contract_id = contractId.toBytes.toByteArray, + contract_id = contractId, template_id = someTemplateId.toString, package_id = somePackageId, flat_event_witnesses = if (consuming && !emptyFlatEventWitnesses) Set(signatory) else Set.empty, tree_event_witnesses = Set(signatory, actor), exercise_choice = "exercise_choice", + exercise_choice_interface_id = Some(someInterfaceId.toString), exercise_argument = someSerializedDamlLfValue, exercise_result = Some(someSerializedDamlLfValue), exercise_actors = Set(actor), @@ -198,10 +536,11 @@ private[store] object StorageBackendTestValues { trace_context = traceContext, record_time = recordTime.micros, external_transaction_hash = externalTransactionHash, + deactivated_event_sequential_id = deactivatedEventSeqId, ) } - def dtoAssign( + def dtoAssignLegacy( offset: Offset, eventSequentialId: Long, contractId: ContractId, @@ -209,13 +548,14 @@ private[store] object StorageBackendTestValues { observer: String = "observer", commandId: String = UUID.randomUUID().toString, authenticationData: Bytes = someAuthenticationData, - sourceSynchronizerId: String = "x::sourcesynchronizer", - targetSynchronizerId: String = "x::targetsynchronizer", + sourceSynchronizerId: SynchronizerId = someSynchronizerId, + targetSynchronizerId: SynchronizerId = someSynchronizerId2, traceContext: Array[Byte] = serializableTraceContext, recordTime: Timestamp = someTime, nodeId: Int = 0, + internalContractId: Long = 0, ): DbDto.EventAssign = { - val updateId = updateIdFromOffset(offset) + val updateId = updateIdArrayFromOffset(offset) DbDto.EventAssign( event_offset = offset.unwrap, update_id = updateId, @@ -223,7 +563,7 @@ private[store] object StorageBackendTestValues { workflow_id = Some("workflow_id"), submitter = Option(someParty), node_id = nodeId, - contract_id = contractId.toBytes.toByteArray, + contract_id = contractId, template_id = someTemplateId.toString, package_id = somePackageId.toString, flat_event_witnesses = Set(signatory, observer), @@ -240,27 +580,29 @@ private[store] object StorageBackendTestValues { authentication_data = authenticationData.toByteArray, source_synchronizer_id = sourceSynchronizerId, target_synchronizer_id = targetSynchronizerId, - reassignment_id = "123456789", + reassignment_id = reassignmentId, reassignment_counter = 1000L, trace_context = traceContext, record_time = recordTime.micros, + internal_contract_id = internalContractId, ) } - def dtoUnassign( + def dtoUnassignLegacy( offset: Offset, eventSequentialId: Long, contractId: ContractId, signatory: String = "signatory", observer: String = "observer", commandId: String = UUID.randomUUID().toString, - sourceSynchronizerId: String = "x::sourcesynchronizer", - targetSynchronizerId: String = "x::targetsynchronizer", + sourceSynchronizerId: SynchronizerId = someSynchronizerId, + targetSynchronizerId: SynchronizerId = someSynchronizerId2, traceContext: Array[Byte] = serializableTraceContext, recordTime: Timestamp = someTime, nodeId: Int = 0, + deactivatedEventSeqId: Option[Long] = None, ): DbDto.EventUnassign = { - val updateId = updateIdFromOffset(offset) + val updateId = updateIdArrayFromOffset(offset) DbDto.EventUnassign( event_offset = offset.unwrap, update_id = updateId, @@ -268,18 +610,19 @@ private[store] object StorageBackendTestValues { workflow_id = Some("workflow_id"), submitter = Option(someParty), node_id = nodeId, - contract_id = contractId.toBytes.toByteArray, + contract_id = contractId, template_id = someTemplateId.toString, package_id = somePackageId, flat_event_witnesses = Set(signatory, observer), event_sequential_id = eventSequentialId, source_synchronizer_id = sourceSynchronizerId, target_synchronizer_id = targetSynchronizerId, - reassignment_id = "123456789", + reassignment_id = reassignmentId, reassignment_counter = 1000L, assignment_exclusivity = Some(11111), trace_context = traceContext, record_time = recordTime.micros, + deactivated_event_sequential_id = deactivatedEventSeqId, ) } @@ -289,11 +632,11 @@ private[store] object StorageBackendTestValues { party: String = someParty, participant: String = someParticipantId.toString, authorizationEvent: AuthorizationEvent = Added(AuthorizationLevel.Submission), - synchronizerId: String = "x::sourcesynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId, recordTime: Timestamp = someTime, traceContext: Array[Byte] = serializableTraceContext, ): DbDto.EventPartyToParticipant = { - val updateId = updateIdFromOffset(offset) + val updateId = updateIdArrayFromOffset(offset) DbDto.EventPartyToParticipant( event_sequential_id = eventSequentialId, event_offset = offset.unwrap, @@ -317,11 +660,11 @@ private[store] object StorageBackendTestValues { deduplicationOffset: Option[Long] = None, deduplicationDurationSeconds: Option[Long] = None, deduplicationDurationNanos: Option[Int] = None, - synchronizerId: String = "x::sourcesynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId, traceContext: Array[Byte] = serializableTraceContext, recordTime: Timestamp = someTime, messageUuid: Option[String] = None, - updateId: Option[String] = Some(""), + updateId: Option[Array[Byte]] = Some(new Array[Byte](0)), publicationTime: Timestamp = someTime, isTransaction: Boolean = true, ): DbDto.CommandCompletion = @@ -332,7 +675,7 @@ private[store] object StorageBackendTestValues { user_id = userId, submitters = submitters, command_id = commandId, - update_id = updateId.filter(_ == "").map(_ => updateIdFromOffset(offset)), + update_id = updateId.filter(_.isEmpty).map(_ => updateIdArrayFromOffset(offset)), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -351,11 +694,11 @@ private[store] object StorageBackendTestValues { event_sequential_id_first: Long, event_sequential_id_last: Long, recordTime: Timestamp = someTime, - udpateId: Option[String] = None, - synchronizerId: String = someSynchronizerId.toProtoPrimitive, + udpateId: Option[Array[Byte]] = None, + synchronizerId: SynchronizerId = someSynchronizerId, publicationTime: Timestamp = someTime, ): DbDto.TransactionMeta = DbDto.TransactionMeta( - update_id = udpateId.getOrElse(updateIdFromOffset(offset)), + update_id = udpateId.getOrElse(updateIdArrayFromOffset(offset)), event_offset = offset.unwrap, publication_time = publicationTime.micros, record_time = recordTime.micros, @@ -368,8 +711,14 @@ private[store] object StorageBackendTestValues { event_sequential_id: Long, template_id: NameTypeConRef, party_id: String, + first_per_sequential_id: Boolean, ): DbDto.IdFilterCreateStakeholder = - DbDto.IdFilterCreateStakeholder(event_sequential_id, template_id.toString, party_id) + DbDto.IdFilterCreateStakeholder( + event_sequential_id, + template_id.toString, + party_id, + first_per_sequential_id, + ) def dtoInterning( internal: Int, @@ -379,12 +728,12 @@ private[store] object StorageBackendTestValues { externalString = external, ) - def dtoTransactionId(dto: DbDto): data.UpdateId = + def dtoTransactionId(dto: DbDto): UpdateId = dto match { - case e: DbDto.EventCreate => Ref.TransactionId.assertFromString(e.update_id) - case e: DbDto.EventExercise => Ref.TransactionId.assertFromString(e.update_id) - case e: DbDto.EventAssign => Ref.TransactionId.assertFromString(e.update_id) - case e: DbDto.EventUnassign => Ref.TransactionId.assertFromString(e.update_id) + case e: DbDto.EventCreate => UpdateId.tryFromByteArray(e.update_id) + case e: DbDto.EventExercise => UpdateId.tryFromByteArray(e.update_id) + case e: DbDto.EventAssign => UpdateId.tryFromByteArray(e.update_id) + case e: DbDto.EventUnassign => UpdateId.tryFromByteArray(e.update_id) case _ => sys.error(s"$dto does not have a transaction id") } @@ -417,11 +766,11 @@ private[store] object StorageBackendTestValues { } def metaFromSingle(dbDto: DbDto): DbDto.TransactionMeta = DbDto.TransactionMeta( - update_id = dtoTransactionId(dbDto), + update_id = dtoTransactionId(dbDto).toProtoPrimitive.toByteArray, event_offset = dtoOffset(dbDto), publication_time = someTime.micros, record_time = someTime.micros, - synchronizer_id = someSynchronizerId.toProtoPrimitive, + synchronizer_id = someSynchronizerId, event_sequential_id_first = dtoEventSeqId(dbDto), event_sequential_id_last = dtoEventSeqId(dbDto), ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsCompletions.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsCompletions.scala index df21bf7c8a..b064115dca 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsCompletions.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsCompletions.scala @@ -354,6 +354,7 @@ private[backend] trait StorageBackendTestsCompletions val publicationTime = Timestamp.now() val recordTime = Timestamp.now().addMicros(15) val submissionId = UUID.randomUUID().toString + val synchronizerId = SynchronizerId.tryFromString("x::synchronizer1") val dtos = Vector( dtoCompletion( offset(1) @@ -364,7 +365,7 @@ private[backend] trait StorageBackendTestsCompletions commandId = commandId, userId = "userid1", submissionId = Some(submissionId), - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId, messageUuid = Some(messageUuid.toString), publicationTime = publicationTime, isTransaction = true, @@ -375,7 +376,7 @@ private[backend] trait StorageBackendTestsCompletions commandId = commandId, userId = "userid1", submissionId = Some(submissionId), - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId, messageUuid = Some(messageUuid.toString), publicationTime = publicationTime, isTransaction = false, @@ -386,7 +387,7 @@ private[backend] trait StorageBackendTestsCompletions commandId = commandId, userId = "userid1", submissionId = Some(submissionId), - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId, recordTime = recordTime, messageUuid = None, updateId = None, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsContracts.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsContracts.scala index 47b5ec18ef..81d426f9e9 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsContracts.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsContracts.scala @@ -3,8 +3,11 @@ package com.digitalasset.canton.platform.store.backend +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value.Value.ContractId +import com.digitalasset.daml.lf.data.Ref.Identifier +import com.digitalasset.daml.lf.transaction.GlobalKey +import com.digitalasset.daml.lf.value.Value.{ValueText, ValueUnit} import org.scalatest.Inside import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -15,19 +18,406 @@ private[backend] trait StorageBackendTestsContracts with StorageBackendSpec { this: AnyFlatSpec => + import StorageBackendTestValues.* + behavior of "StorageBackend (contracts)" - import StorageBackendTestValues.* + it should "correctly find key states" in { + val key1 = GlobalKey.assertBuild( + Identifier.assertFromString("A:B:C"), + ValueUnit, + someTemplateId.pkg.name, + ) + val key2 = GlobalKey.assertBuild( + Identifier.assertFromString("A:B:C"), + ValueText("value"), + someTemplateId.pkg.name, + ) + val internalContractId = 123L + val internalContractId2 = 223L + val internalContractId3 = 323L + val internalContractId4 = 423L + val signatory = Ref.Party.assertFromString("signatory") + + val dtos: Vector[DbDto] = Vector( + dtosCreate( + event_offset = 1L, + event_sequential_id = 1L, + internal_contract_id = internalContractId4, + create_key_hash = Some(key2.hash.toHexString), + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosCreate( + event_offset = 2L, + event_sequential_id = 2L, + internal_contract_id = internalContractId, + create_key_hash = Some(key1.hash.toHexString), + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosCreate( + event_offset = 3L, + event_sequential_id = 3L, + internal_contract_id = internalContractId2, + create_key_hash = Some(key1.hash.toHexString), + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosConsumingExercise( + event_offset = 4L, + event_sequential_id = 4L, + internal_contract_id = Some(internalContractId2), + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosCreate( + event_offset = 5L, + event_sequential_id = 5L, + internal_contract_id = internalContractId3, + create_key_hash = Some(key1.hash.toHexString), + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql( + updateLedgerEnd(offset(5), 5L) + ) + val keyStates2 = executeSql( + backend.contract.keyStatesNew( + List( + key1, + key2, + ), + 2L, + ) + ) + val keyStateKey1_2 = executeSql( + backend.contract.keyStateNew(key1, 2L) + ) + val keyStateKey2_2 = executeSql( + backend.contract.keyStateNew(key2, 2L) + ) + val keyStates3 = executeSql( + backend.contract.keyStatesNew( + List( + key1, + key2, + ), + 3L, + ) + ) + val keyStateKey1_3 = executeSql( + backend.contract.keyStateNew(key1, 3L) + ) + val keyStateKey2_3 = executeSql( + backend.contract.keyStateNew(key2, 3L) + ) + val keyStates4 = executeSql( + backend.contract.keyStatesNew( + List( + key1, + key2, + ), + 4L, + ) + ) + val keyStateKey1_4 = executeSql( + backend.contract.keyStateNew(key1, 4L) + ) + val keyStateKey2_4 = executeSql( + backend.contract.keyStateNew(key2, 4L) + ) + val keyStates5 = executeSql( + backend.contract.keyStatesNew( + List( + key1, + key2, + ), + 5L, + ) + ) + val keyStateKey1_5 = executeSql( + backend.contract.keyStateNew(key1, 5L) + ) + val keyStateKey2_5 = executeSql( + backend.contract.keyStateNew(key2, 5L) + ) + + keyStates2 shouldBe Map( + key1 -> internalContractId, + key2 -> internalContractId4, + ) + keyStateKey1_2 shouldBe Some(internalContractId) + keyStateKey2_2 shouldBe Some(internalContractId4) + keyStates3 shouldBe Map( + key1 -> internalContractId2, + key2 -> internalContractId4, + ) + keyStateKey1_3 shouldBe Some(internalContractId2) + keyStateKey2_3 shouldBe Some(internalContractId4) + keyStates4 shouldBe Map( + key2 -> internalContractId4 + ) + keyStateKey1_4 shouldBe None + keyStateKey2_4 shouldBe Some(internalContractId4) + keyStates5 shouldBe Map( + key1 -> internalContractId3, + key2 -> internalContractId4, + ) + keyStateKey1_5 shouldBe Some(internalContractId3) + keyStateKey2_5 shouldBe Some(internalContractId4) + } + + it should "correctly find active contracts" in { + val internalContractId = 123L + val internalContractId2 = 223L + val internalContractId3 = 323L + val signatory = Ref.Party.assertFromString("signatory") + + val dtos: Vector[DbDto] = Vector( + dtosCreate( + event_offset = 1L, + event_sequential_id = 1L, + internal_contract_id = internalContractId, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosAssign( + event_offset = 2L, + event_sequential_id = 2L, + internal_contract_id = internalContractId2, + synchronizer_id = someSynchronizerId2, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosAssign( + event_offset = 3L, + event_sequential_id = 3L, + internal_contract_id = internalContractId3, + synchronizer_id = someSynchronizerId2, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosAssign( + event_offset = 4L, + event_sequential_id = 4L, + internal_contract_id = internalContractId3, + synchronizer_id = someSynchronizerId, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql( + updateLedgerEnd(offset(3), 3L) + ) + val activeContracts2 = executeSql( + backend.contract.activeContractsNew( + List( + internalContractId, + internalContractId2, + internalContractId3, + ), + 2L, + ) + ) + val activeContracts3 = executeSql( + backend.contract.activeContractsNew( + List( + internalContractId, + internalContractId2, + internalContractId3, + ), + 3L, + ) + ) + val activeIds = executeSql( + backend.event.updateStreamingQueries.fetchActiveIds( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 1000, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 1000L, + ) + ) + ) + val lastActivations = executeSql( + backend.contract.lastActivationsNew( + List( + someSynchronizerId -> internalContractId, + someSynchronizerId -> internalContractId3, + someSynchronizerId2 -> internalContractId2, + someSynchronizerId2 -> internalContractId3, + ) + ) + ) + + activeContracts2 shouldBe Map( + internalContractId -> true, + internalContractId2 -> true, + ) + activeContracts3 shouldBe Map( + internalContractId -> true, + internalContractId2 -> true, + internalContractId3 -> true, + ) + activeIds shouldBe Vector(1L, 2L, 3L, 4L) + lastActivations shouldBe Map( + (someSynchronizerId, internalContractId) -> 1L, + (someSynchronizerId2, internalContractId2) -> 2L, + (someSynchronizerId2, internalContractId3) -> 3L, + ) + } + + it should "correctly find deactivated contracts" in { + val internalContractId = 123L + val internalContractId2 = 223L + val internalContractId3 = 323L + val signatory = Ref.Party.assertFromString("signatory") + + val dtos: Vector[DbDto] = Vector( + dtosCreate( + event_offset = 1L, + event_sequential_id = 1L, + internal_contract_id = internalContractId, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosAssign( + event_offset = 2L, + event_sequential_id = 2L, + internal_contract_id = internalContractId2, + synchronizer_id = someSynchronizerId2, + )( + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosUnassign( + event_offset = 3L, + event_sequential_id = 3L, + internal_contract_id = Some(internalContractId), + deactivated_event_sequential_id = Some(1L), + synchronizer_id = someSynchronizerId, + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + dtosConsumingExercise( + event_offset = 4L, + event_sequential_id = 4L, + internal_contract_id = Some(internalContractId2), + deactivated_event_sequential_id = Some(2L), + synchronizer_id = someSynchronizerId2, + stakeholders = Set(signatory), + template_id = someTemplateId.toString(), + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql( + updateLedgerEnd(offset(4), 4L) + ) + val activeContracts2 = executeSql( + backend.contract.activeContractsNew( + List( + internalContractId, + internalContractId2, + ), + 2L, + ) + ) + val activeContracts4 = executeSql( + backend.contract.activeContractsNew( + List( + internalContractId, + internalContractId2, + internalContractId3, + ), + 4L, + ) + ) + val activeIds2 = executeSql( + backend.event.updateStreamingQueries.fetchActiveIds( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 2L, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 2L, + ) + ) + ) + val activeIds4 = executeSql( + backend.event.updateStreamingQueries.fetchActiveIds( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 4, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 4L, + ) + ) + ) + val lastActivations = executeSql( + backend.contract.lastActivationsNew( + List( + someSynchronizerId -> internalContractId, + someSynchronizerId2 -> internalContractId2, + ) + ) + ) + + activeContracts2 shouldBe Map( + internalContractId -> true, + internalContractId2 -> true, + ) + activeContracts4 shouldBe Map( + internalContractId -> true, // although deactivated, this logic only cares about archivals + internalContractId2 -> false, + ) + activeIds2 shouldBe Vector(1L, 2L) + activeIds4 shouldBe Vector.empty + // lastActivation does not care about deactivations + lastActivations shouldBe Map( + (someSynchronizerId, internalContractId) -> 1L, + (someSynchronizerId2, internalContractId2) -> 2L, + ) + } + + behavior of "StorageBackend (contracts) legacy" it should "correctly find an active contract" in { val contractId = hashCid("#1") val signatory = Ref.Party.assertFromString("signatory") - val observer = Ref.Party.assertFromString("observer") val dtos: Vector[DbDto] = Vector( // 1: transaction with create node - dtoCreate(offset(1), 1L, contractId = contractId, signatory = signatory), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, signatory), + dtoCreateLegacy(offset(1), 1L, contractId = contractId, signatory = signatory), + DbDto.IdFilterCreateStakeholder( + event_sequential_id = 1L, + template_id = someTemplateId.toString, + party_id = signatory, + first_per_sequential_id = true, + ), dtoCompletion(offset(1)), ) @@ -37,19 +427,37 @@ private[backend] trait StorageBackendTestsContracts updateLedgerEnd(offset(1), 1L) ) val createdContracts = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(1)) + backend.contract.createdContracts(contractId :: Nil, 1) ) val archivedContracts = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(1)) + backend.contract.archivedContracts(contractId :: Nil, 1) + ) + val activeCreateIds = executeSql( + backend.event.updateStreamingQueries.fetchActiveIdsOfCreateEventsForStakeholderLegacy( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 1000, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 1000L, + ) + ) + ) + val lastActivations = executeSql( + backend.contract.lastActivations( + List( + someSynchronizerId -> contractId + ) + ) ) - createdContracts.contains(contractId) shouldBe true - createdContracts.get(contractId).foreach { c => - c.templateId shouldBe someTemplateId.toString - c.createArgumentCompression shouldBe None - c.flatEventWitnesses shouldBe Set(signatory, observer) - } + createdContracts should contain(contractId) archivedContracts shouldBe empty + activeCreateIds shouldBe Vector(1L) + lastActivations shouldBe Map( + (someSynchronizerId, contractId) -> 1L + ) } it should "not find an active contract with empty flat event witnesses" in { @@ -58,14 +466,19 @@ private[backend] trait StorageBackendTestsContracts val dtos: Vector[DbDto] = Vector( // 1: transaction with create node with no flat event witnesses - dtoCreate( + dtoCreateLegacy( offset(1), 1L, contractId = contractId, signatory = signatory, emptyFlatEventWitnesses = true, ), - DbDto.IdFilterCreateNonStakeholderInformee(1L, someTemplateId.toString, signatory), + DbDto.IdFilterCreateNonStakeholderInformee( + 1L, + someTemplateId.toString, + signatory, + first_per_sequential_id = true, + ), dtoCompletion(offset(1)), ) @@ -75,29 +488,52 @@ private[backend] trait StorageBackendTestsContracts updateLedgerEnd(offset(1), 1L) ) val createdContracts = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(1)) + backend.contract.createdContracts(contractId :: Nil, 1) ) val archivedContracts = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(1)) + backend.contract.archivedContracts(contractId :: Nil, 1) + ) + val activeCreateIds = executeSql( + backend.event.updateStreamingQueries.fetchActiveIdsOfCreateEventsForStakeholderLegacy( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 1000, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 1000L, + ) + ) + ) + val lastActivations = executeSql( + backend.contract.lastActivations( + List( + someSynchronizerId -> contractId + ) + ) ) createdContracts shouldBe empty archivedContracts shouldBe empty + activeCreateIds shouldBe Vector.empty + // Last activation for divulged contracts can be looked up with this query, but we ensure in code that we won't do this: + // only divulged deactivation can have a divulged activation pair. + lastActivations shouldBe Map( + (someSynchronizerId, contractId) -> 1L + ) } it should "correctly find a contract from assigned table" in { val contractId1 = hashCid("#1") val contractId2 = hashCid("#2") val contractId3 = hashCid("#3") - val signatory = Ref.Party.assertFromString("signatory") - val observer = Ref.Party.assertFromString("observer") val observer2 = Ref.Party.assertFromString("observer2") val dtos: Vector[DbDto] = Vector( - dtoAssign(offset(1), 1L, contractId1), - dtoAssign(offset(2), 2L, contractId1, observer = observer2), - dtoAssign(offset(3), 3L, contractId2), - dtoAssign(offset(4), 4L, contractId2, observer = observer2), + dtoAssignLegacy(offset(1), 1L, contractId1), + dtoAssignLegacy(offset(2), 2L, contractId1, observer = observer2), + dtoAssignLegacy(offset(3), 3L, contractId2), + dtoAssignLegacy(offset(4), 4L, contractId2, observer = observer2), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -107,50 +543,42 @@ private[backend] trait StorageBackendTestsContracts ) val assignedContracts1 = executeSql( backend.contract - .assignedContracts(Seq(contractId1, contractId2, contractId3), offset(4)) + .assignedContracts(Seq(contractId1, contractId2, contractId3), 4) ) val assignedContracts2 = executeSql( backend.contract - .assignedContracts(Seq(contractId1, contractId2, contractId3), offset(2)) + .assignedContracts(Seq(contractId1, contractId2, contractId3), 2) ) assignedContracts1.size shouldBe 2 - assignedContracts1.contains(contractId1) shouldBe true - assignedContracts1.get(contractId1).foreach { raw => - raw.templateId shouldBe someTemplateId.toString - raw.createArgumentCompression shouldBe Some(123) - raw.flatEventWitnesses shouldBe Set(signatory, observer) - raw.signatories shouldBe Set(signatory) - } - assignedContracts1.contains(contractId2) shouldBe true - assignedContracts1.get(contractId2).foreach { raw => - raw.templateId shouldBe someTemplateId.toString - raw.createArgumentCompression shouldBe Some(123) - raw.flatEventWitnesses shouldBe Set(signatory, observer) - raw.signatories shouldBe Set(signatory) - } + assignedContracts1 should contain(contractId1) + assignedContracts1 should contain(contractId2) assignedContracts2.size shouldBe 1 - assignedContracts2.contains(contractId1) shouldBe true - assignedContracts2.get(contractId1).foreach { raw => - raw.templateId shouldBe someTemplateId.toString - raw.createArgumentCompression shouldBe Some(123) - raw.flatEventWitnesses shouldBe Set(signatory, observer) - raw.signatories shouldBe Set(signatory) - } + assignedContracts2 should contain(contractId1) assignedContracts2.contains(contractId2) shouldBe false } it should "not find an archived contract" in { val contractId = hashCid("#1") val signatory = Ref.Party.assertFromString("signatory") - val observer = Ref.Party.assertFromString("observer") val dtos: Vector[DbDto] = Vector( // 1: transaction with create node - dtoCreate(offset(1), 1L, contractId = contractId, signatory = signatory), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, signatory), + dtoCreateLegacy(offset(1), 1L, contractId = contractId, signatory = signatory), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + signatory, + first_per_sequential_id = true, + ), dtoCompletion(offset(1)), // 2: transaction that archives the contract - dtoExercise(offset(2), 2L, consuming = true, contractId), + dtoExerciseLegacy( + offset(2), + 2L, + consuming = true, + contractId, + deactivatedEventSeqId = Some(1L), + ), dtoCompletion(offset(2)), ) @@ -160,53 +588,75 @@ private[backend] trait StorageBackendTestsContracts updateLedgerEnd(offset(2), 2L) ) val createdContracts1 = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(1)) + backend.contract.createdContracts(contractId :: Nil, 1) ) val archivedContracts1 = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(1)) + backend.contract.archivedContracts(contractId :: Nil, 1) ) val createdContracts2 = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(2)) + backend.contract.createdContracts(contractId :: Nil, 2) ) val archivedContracts2 = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(2)) - ) - - createdContracts1.contains(contractId) shouldBe true - createdContracts1.get(contractId).foreach { c => - c.templateId shouldBe someTemplateId.toString - c.createArgumentCompression shouldBe None - c.flatEventWitnesses shouldBe Set(signatory, observer) - } - archivedContracts1.get(contractId) shouldBe None - createdContracts2.contains(contractId) shouldBe true - createdContracts2.get(contractId).foreach { c => - c.templateId shouldBe someTemplateId.toString - c.createArgumentCompression shouldBe None - c.flatEventWitnesses shouldBe Set(signatory, observer) - } - archivedContracts2.contains(contractId) shouldBe true - archivedContracts2.get(contractId).foreach { c => - c.flatEventWitnesses shouldBe Set(signatory) - } + backend.contract.archivedContracts(contractId :: Nil, 2) + ) + val activeCreateIds = executeSql( + backend.event.updateStreamingQueries.fetchActiveIdsOfCreateEventsForStakeholderLegacy( + stakeholderO = Some(signatory), + templateIdO = None, + activeAtEventSeqId = 1000, + )(_)( + PaginatingAsyncStream.IdFilterInput( + startExclusive = 0L, + endInclusive = 1000L, + ) + ) + ) + val lastActivations = executeSql( + backend.contract.lastActivations( + List( + someSynchronizerId -> contractId + ) + ) + ) + + createdContracts1 should contain(contractId) + archivedContracts1 should not contain contractId + createdContracts2 should contain(contractId) + archivedContracts2 should contain(contractId) + activeCreateIds shouldBe Vector.empty + lastActivations shouldBe Map( + (someSynchronizerId, contractId) -> 1L + ) } + it should "not find an archived contract with empty flat event witnesses" in { val contractId = hashCid("#1") val signatory = Ref.Party.assertFromString("signatory") val dtos: Vector[DbDto] = Vector( // 1: transaction with create node - dtoCreate( + dtoCreateLegacy( offset(1), 1L, contractId = contractId, signatory = signatory, emptyFlatEventWitnesses = true, ), - DbDto.IdFilterCreateNonStakeholderInformee(1L, someTemplateId.toString, signatory), + DbDto.IdFilterCreateNonStakeholderInformee( + 1L, + someTemplateId.toString, + signatory, + first_per_sequential_id = true, + ), dtoCompletion(offset(1)), // 2: transaction that archives the contract - dtoExercise(offset(2), 2L, consuming = true, contractId, emptyFlatEventWitnesses = true), + dtoExerciseLegacy( + offset(2), + 2L, + consuming = true, + contractId, + emptyFlatEventWitnesses = true, + ), dtoCompletion(offset(2)), ) @@ -216,16 +666,16 @@ private[backend] trait StorageBackendTestsContracts updateLedgerEnd(offset(2), 2L) ) val createdContracts1 = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(1)) + backend.contract.createdContracts(contractId :: Nil, 1) ) val archivedContracts1 = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(1)) + backend.contract.archivedContracts(contractId :: Nil, 1) ) val createdContracts2 = executeSql( - backend.contract.createdContracts(contractId :: Nil, offset(2)) + backend.contract.createdContracts(contractId :: Nil, 2) ) val archivedContracts2 = executeSql( - backend.contract.archivedContracts(contractId :: Nil, offset(2)) + backend.contract.archivedContracts(contractId :: Nil, 2) ) createdContracts1 shouldBe empty @@ -241,18 +691,17 @@ private[backend] trait StorageBackendTestsContracts val contractId4 = hashCid("#4") val contractId5 = hashCid("#5") val signatory = Ref.Party.assertFromString("signatory") - val observer = Ref.Party.assertFromString("observer") val dtos: Vector[DbDto] = Vector( // 1: transaction with create nodes - dtoCreate(offset(1), 1L, contractId = contractId1, signatory = signatory), - dtoCreate(offset(1), 2L, contractId = contractId2, signatory = signatory), - dtoCreate(offset(1), 3L, contractId = contractId3, signatory = signatory), - dtoCreate(offset(1), 4L, contractId = contractId4, signatory = signatory), + dtoCreateLegacy(offset(1), 1L, contractId = contractId1, signatory = signatory), + dtoCreateLegacy(offset(1), 2L, contractId = contractId2, signatory = signatory), + dtoCreateLegacy(offset(1), 3L, contractId = contractId3, signatory = signatory), + dtoCreateLegacy(offset(1), 4L, contractId = contractId4, signatory = signatory), // 2: transaction that archives the contract - dtoExercise(offset(2), 5L, consuming = true, contractId1), + dtoExerciseLegacy(offset(2), 5L, consuming = true, contractId1), // 3: transaction that creates one more contract - dtoCreate(offset(3), 6L, contractId = contractId5, signatory = signatory), + dtoCreateLegacy(offset(3), 6L, contractId = contractId5, signatory = signatory), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -269,7 +718,7 @@ private[backend] trait StorageBackendTestsContracts contractId4, contractId5, ), - offset(2), + 5L, ) ) val archivedContracts = executeSql( @@ -281,32 +730,19 @@ private[backend] trait StorageBackendTestsContracts contractId4, contractId5, ), - offset(2), + 5L, ) ) - createdContracts.keySet shouldBe Set( + createdContracts shouldBe Set( contractId1, contractId2, contractId3, contractId4, ) - def assertContract( - contractId: ContractId, - witnesses: Set[Ref.Party] = Set(signatory, observer), - ) = { - createdContracts(contractId).templateId shouldBe someTemplateId.toString - createdContracts(contractId).createArgumentCompression shouldBe None - createdContracts(contractId).flatEventWitnesses shouldBe witnesses - } - assertContract(contractId1) - assertContract(contractId2) - assertContract(contractId3) - assertContract(contractId4) - archivedContracts.keySet shouldBe Set( + archivedContracts shouldBe Set( contractId1 ) - archivedContracts(contractId1).flatEventWitnesses shouldBe Set(signatory) } it should "be able to query with 1000 contract ids" in { @@ -317,17 +753,17 @@ private[backend] trait StorageBackendTestsContracts val createdContracts = executeSql( backend.contract.createdContracts( 1.to(1000).map(n => hashCid(s"#$n")), - offset(2), + 2, ) ) val archivedContracts = executeSql( backend.contract.archivedContracts( 1.to(1000).map(n => hashCid(s"#$n")), - offset(2), + 2, ) ) - createdContracts shouldBe Map.empty - archivedContracts shouldBe Map.empty + createdContracts shouldBe empty + archivedContracts shouldBe empty } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsConversions.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsConversions.scala new file mode 100644 index 0000000000..24ca780d6a --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsConversions.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import com.digitalasset.canton.platform.store.backend.Conversions.IntArrayDBSerialization.{ + decodeFromByteArray, + encodeToByteArray, +} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks + +private[backend] trait StorageBackendTestsConversions + extends Matchers + with ScalaCheckDrivenPropertyChecks { this: AnyFlatSpec => + + behavior of "StorageBackend (conversions)" + + it should "serialize and deserialize sets of ints to bytes correctly" in { + import org.scalacheck.Gen + + val setGen: Gen[Set[Int]] = + Gen.containerOf[Set, Int](Gen.choose(Int.MinValue, Int.MaxValue)) + + forAll(setGen) { set => + val encoded = encodeToByteArray(set) + + decodeFromByteArray(encoded) should contain theSameElementsAs set + if (set.isEmpty) { + encoded shouldBe empty + } else { + encoded(0) shouldBe 1 + encoded.length shouldBe set.size * 4 + 1 // version byte + 4 bytes per int + } + } + } +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsEvents.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsEvents.scala index 18c59447cd..e6cdef73b9 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsEvents.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsEvents.scala @@ -4,16 +4,50 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ - RawCreatedEvent, - RawTreeEvent, + CommonEventProperties, + CommonUpdateProperties, + RawArchivedEvent, + RawCreatedEventLegacy, + RawExercisedEvent, + RawLedgerEffectsEventLegacy, + RawThinAcsDeltaEvent, + RawThinAssignEvent, + RawThinCreatedEvent, + RawThinLedgerEffectsEvent, + RawUnassignEvent, + ReassignmentProperties, + SequentialIdBatch, SynchronizerOffset, + ThinCreatedEventProperties, + TransactionProperties, } +import com.digitalasset.canton.platform.store.backend.common.{ + EventIdSource, + EventIdSourceLegacy, + EventPayloadSourceForUpdatesAcsDelta, + EventPayloadSourceForUpdatesLedgerEffects, + EventPayloadSourceForUpdatesLedgerEffectsLegacy, +} +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.{ + IdFilterInput, + PaginationInput, + PaginationLastOnlyInput, +} +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.NameTypeConRef +import com.digitalasset.daml.lf.data.Ref.{ + ChoiceName, + Identifier, + NameTypeConRef, + PackageName, + Party, +} import com.digitalasset.daml.lf.data.Time.Timestamp +import org.scalactic.Equality import org.scalatest.Inside import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -27,217 +61,265 @@ private[backend] trait StorageBackendTestsEvents behavior of "StorageBackend (events)" import StorageBackendTestValues.* - import DbDtoEq.* + import ScalatestEqualityHelpers.* - it should "find contracts by party" in { + it should "find contracts by party legacy" in { val partySignatory = Ref.Party.assertFromString("signatory") val partyObserver1 = Ref.Party.assertFromString("observer1") val partyObserver2 = Ref.Party.assertFromString("observer2") val dtos = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), signatory = partySignatory, observer = partyObserver1, ), - dtoCreateFilter(1L, someTemplateId, partySignatory), - dtoCreateFilter(1L, someTemplateId, partyObserver1), - dtoCreate( + dtoCreateFilter(1L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(1L, someTemplateId, partyObserver1, first_per_sequential_id = false), + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), signatory = partySignatory, observer = partyObserver2, ), - dtoCreateFilter(2L, someTemplateId, partySignatory), - dtoCreateFilter(2L, someTemplateId, partyObserver2), + dtoCreateFilter(2L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(2L, someTemplateId, partyObserver2, first_per_sequential_id = false), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(ingest(dtos, _)) executeSql(updateLedgerEnd(offset(2), 2L)) val resultSignatory = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver1 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver1), - templateIdO = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver1), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver2), - templateIdO = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver2), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultSuperReader = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = None, - templateIdO = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = None, + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) resultSignatory should contain theSameElementsAs Vector(1L, 2L) resultObserver1 should contain theSameElementsAs Vector(1L) resultObserver2 should contain theSameElementsAs Vector(2L) - resultSuperReader should contain theSameElementsAs Vector(1L, 1L, 2L, 2L) + resultSuperReader should contain theSameElementsAs Vector(1L, 2L) } - it should "find contracts by party and template" in { + it should "find contracts by party and template legacy" in { val partySignatory = Ref.Party.assertFromString("signatory") val partyObserver1 = Ref.Party.assertFromString("observer1") val partyObserver2 = Ref.Party.assertFromString("observer2") val dtos = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), signatory = partySignatory, observer = partyObserver1, ), - dtoCreateFilter(1L, someTemplateId, partySignatory), - dtoCreateFilter(1L, someTemplateId, partyObserver1), - dtoCreate( + dtoCreateFilter(1L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(1L, someTemplateId, partyObserver1, first_per_sequential_id = false), + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), signatory = partySignatory, observer = partyObserver2, ), - dtoCreateFilter(2L, someTemplateId, partySignatory), - dtoCreateFilter(2L, someTemplateId, partyObserver2), + dtoCreateFilter(2L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(2L, someTemplateId, partyObserver2, first_per_sequential_id = false), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(ingest(dtos, _)) executeSql(updateLedgerEnd(offset(2), 2L)) val resultSignatory = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = Some(someTemplateId), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = Some(someTemplateId), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver1 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver1), - templateIdO = Some(someTemplateId), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver1), + templateIdO = Some(someTemplateId), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver2), - templateIdO = Some(someTemplateId), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver2), + templateIdO = Some(someTemplateId), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultSuperReader = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = None, - templateIdO = Some(someTemplateId), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = None, + templateIdO = Some(someTemplateId), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) resultSignatory should contain theSameElementsAs Vector(1L, 2L) resultObserver1 should contain theSameElementsAs Vector(1L) resultObserver2 should contain theSameElementsAs Vector(2L) - resultSuperReader should contain theSameElementsAs Vector(1L, 1L, 2L, 2L) + resultSuperReader should contain theSameElementsAs Vector(1L, 2L) } - it should "not find contracts when the template doesn't match" in { + it should "not find contracts when the template doesn't match legacy" in { val partySignatory = Ref.Party.assertFromString("signatory") val partyObserver1 = Ref.Party.assertFromString("observer1") val partyObserver2 = Ref.Party.assertFromString("observer2") val otherTemplate = NameTypeConRef.assertFromString("#pkg-name:Mod:Template2") val dtos = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), signatory = partySignatory, observer = partyObserver1, ), - dtoCreateFilter(1L, someTemplateId, partySignatory), - dtoCreateFilter(1L, someTemplateId, partyObserver1), - dtoCreate( + dtoCreateFilter(1L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(1L, someTemplateId, partyObserver1, first_per_sequential_id = false), + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), signatory = partySignatory, observer = partyObserver2, ), - dtoCreateFilter(2L, someTemplateId, partySignatory), - dtoCreateFilter(2L, someTemplateId, partyObserver2), + dtoCreateFilter(2L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(2L, someTemplateId, partyObserver2, first_per_sequential_id = false), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(ingest(dtos, _)) executeSql(updateLedgerEnd(offset(2), 2L)) val resultSignatory = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = Some(otherTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = Some(otherTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver1 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver1), - templateIdO = Some(otherTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver1), + templateIdO = Some(otherTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultObserver2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyObserver2), - templateIdO = Some(otherTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyObserver2), + templateIdO = Some(otherTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultSuperReader = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = None, - templateIdO = Some(otherTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = None, + templateIdO = Some(otherTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) resultSignatory shouldBe empty @@ -246,62 +328,78 @@ private[backend] trait StorageBackendTestsEvents resultSuperReader shouldBe empty } - it should "not find contracts when unknown names are used" in { + it should "not find contracts when unknown names are used legacy" in { val partySignatory = Ref.Party.assertFromString("signatory") val partyObserver = Ref.Party.assertFromString("observer") val partyUnknown = Ref.Party.assertFromString("unknown") val unknownTemplate = NameTypeConRef.assertFromString("#unknown:unknown:unknown") val dtos = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), signatory = partySignatory, observer = partyObserver, ), - dtoCreateFilter(1L, someTemplateId, partySignatory), - dtoCreateFilter(1L, someTemplateId, partyObserver), + dtoCreateFilter(1L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(1L, someTemplateId, partyObserver, first_per_sequential_id = false), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(ingest(dtos, _)) executeSql(updateLedgerEnd(offset(1), 1L)) val resultUnknownParty = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyUnknown), - templateIdO = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyUnknown), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultUnknownTemplate = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = Some(unknownTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = Some(unknownTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultUnknownPartyAndTemplate = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partyUnknown), - templateIdO = Some(unknownTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partyUnknown), + templateIdO = Some(unknownTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) val resultUnknownTemplateSuperReader = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = None, - templateIdO = Some(unknownTemplate), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = None, + templateIdO = Some(unknownTemplate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) ) resultUnknownParty shouldBe empty @@ -310,70 +408,652 @@ private[backend] trait StorageBackendTestsEvents resultUnknownTemplateSuperReader shouldBe empty } - it should "respect bounds and limits" in { + it should "respect bounds and limits legacy" in { val partySignatory = Ref.Party.assertFromString("signatory") val partyObserver1 = Ref.Party.assertFromString("observer1") val partyObserver2 = Ref.Party.assertFromString("observer2") val dtos = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), signatory = partySignatory, observer = partyObserver1, ), - dtoCreateFilter(1L, someTemplateId, partySignatory), - dtoCreateFilter(1L, someTemplateId, partyObserver1), - dtoCreate( + dtoCreateFilter(1L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(1L, someTemplateId, partyObserver1, first_per_sequential_id = false), + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), signatory = partySignatory, observer = partyObserver2, ), - dtoCreateFilter(2L, someTemplateId, partySignatory), - dtoCreateFilter(2L, someTemplateId, partyObserver2), + dtoCreateFilter(2L, someTemplateId, partySignatory, first_per_sequential_id = true), + dtoCreateFilter(2L, someTemplateId, partyObserver2, first_per_sequential_id = false), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(ingest(dtos, _)) executeSql(updateLedgerEnd(offset(2), 2L)) val result01L2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = None, - startExclusive = 0L, - endInclusive = 1L, - limit = 2, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 1L, + limit = 2, + ) + ) ) val result12L2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = None, - startExclusive = 1L, - endInclusive = 2L, - limit = 2, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 1L, + endInclusive = 2L, + limit = 2, + ) + ) ) val result02L1 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = None, - startExclusive = 0L, - endInclusive = 2L, - limit = 1, - ) + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 2L, + limit = 1, + ) + ) ) val result02L2 = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( - stakeholderO = Some(partySignatory), - templateIdO = None, - startExclusive = 0L, - endInclusive = 2L, - limit = 2, + backend.event.updateStreamingQueries + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( + stakeholderO = Some(partySignatory), + templateIdO = None, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 2L, + limit = 2, + ) + ) + ) + + result01L2 should contain theSameElementsAs Vector(1L) + result12L2 should contain theSameElementsAs Vector(2L) + result02L1 should contain theSameElementsAs Vector(1L) + result02L2 should contain theSameElementsAs Vector(1L, 2L) + } + + it should "find contracts by party" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver1 = Ref.Party.assertFromString("observer1") + val partyObserver2 = Ref.Party.assertFromString("observer2") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver1) + ), + dtosCreate( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + )( + stakeholders = Set(partySignatory, partyObserver2) + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val resultSignatory = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver1 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver1), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver2), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultSuperReader = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = None, + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + + resultSignatory should contain theSameElementsAs Vector(1L, 2L) + resultObserver1 should contain theSameElementsAs Vector(1L) + resultObserver2 should contain theSameElementsAs Vector(2L) + resultSuperReader should contain theSameElementsAs Vector(1L, 2L) + } + + it should "find contracts by party and by event_type" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver1 = Ref.Party.assertFromString("observer1") + val partyObserver2 = Ref.Party.assertFromString("observer2") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver1) + ), + dtosAssign( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + )( + stakeholders = Set(partySignatory, partyObserver2) + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val resultCreate = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.Create), + )(_)( + IdFilterInput( + startExclusive = 0L, + endInclusive = 10L, + ) + ) + ) + val resultAssign = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.Assign), + )(_)( + IdFilterInput( + startExclusive = 0L, + endInclusive = 10L, + ) + ) + ) + val resultBoth = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.Assign, PersistentEventType.Create), + )(_)( + IdFilterInput( + startExclusive = 0L, + endInclusive = 10L, + ) + ) + ) + val resultForeign = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.WitnessedCreate), + )(_)( + IdFilterInput( + startExclusive = 0L, + endInclusive = 10L, + ) + ) + ) + val resultForeignPaginationInput = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.WitnessedCreate), + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 100, + ) + ) + ) + val resultBothLast = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.Assign, PersistentEventType.Create), + )(_)( + PaginationLastOnlyInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 100, + ) + ) + ) + val resultCreateLast = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set(PersistentEventType.Create), + )(_)( + PaginationLastOnlyInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 100, + ) + ) + ) + + resultBoth should contain theSameElementsAs Vector(1L, 2L) + resultCreate should contain theSameElementsAs Vector(1L) + resultAssign should contain theSameElementsAs Vector(2L) + resultForeign should contain theSameElementsAs Vector.empty + resultForeignPaginationInput should contain theSameElementsAs Vector(1L, 2L) + resultBothLast should contain theSameElementsAs Vector(2L) + resultCreateLast should contain theSameElementsAs Vector(2L) + } + + it should "find contracts by party and template" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver1 = Ref.Party.assertFromString("observer1") + val partyObserver2 = Ref.Party.assertFromString("observer2") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver1), + template_id = someTemplateId.toString(), + ), + dtosCreate( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + )( + stakeholders = Set(partySignatory, partyObserver2), + template_id = someTemplateId.toString(), + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val resultSignatory = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = Some(someTemplateId), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver1 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver1), + templateIdO = Some(someTemplateId), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver2), + templateIdO = Some(someTemplateId), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultSuperReader = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = None, + templateIdO = Some(someTemplateId), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + + resultSignatory should contain theSameElementsAs Vector(1L, 2L) + resultObserver1 should contain theSameElementsAs Vector(1L) + resultObserver2 should contain theSameElementsAs Vector(2L) + resultSuperReader should contain theSameElementsAs Vector(1L, 2L) + } + + it should "not find contracts when the template doesn't match" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver1 = Ref.Party.assertFromString("observer1") + val partyObserver2 = Ref.Party.assertFromString("observer2") + val otherTemplate = NameTypeConRef.assertFromString("#pkg-name:Mod:Template2") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver1) + ), + dtosCreate( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + )( + stakeholders = Set(partySignatory, partyObserver2) + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val resultSignatory = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = Some(otherTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver1 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver1), + templateIdO = Some(otherTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultObserver2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyObserver2), + templateIdO = Some(otherTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultSuperReader = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = None, + templateIdO = Some(otherTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + + resultSignatory shouldBe empty + resultObserver1 shouldBe empty + resultObserver2 shouldBe empty + resultSuperReader shouldBe empty + } + + it should "not find contracts when unknown names are used" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver = Ref.Party.assertFromString("observer") + val partyUnknown = Ref.Party.assertFromString("unknown") + val unknownTemplate = NameTypeConRef.assertFromString("#unknown:unknown:unknown") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver) ) + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(1), 1L)) + val resultUnknownParty = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyUnknown), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultUnknownTemplate = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = Some(unknownTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultUnknownPartyAndTemplate = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partyUnknown), + templateIdO = Some(unknownTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + val resultUnknownTemplateSuperReader = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = None, + templateIdO = Some(unknownTemplate), + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) + ) + ) + + resultUnknownParty shouldBe empty + resultUnknownTemplate shouldBe empty + resultUnknownPartyAndTemplate shouldBe empty + resultUnknownTemplateSuperReader shouldBe empty + } + + it should "respect bounds and limits" in { + val partySignatory = Ref.Party.assertFromString("signatory") + val partyObserver1 = Ref.Party.assertFromString("observer1") + val partyObserver2 = Ref.Party.assertFromString("observer2") + + val dtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + )( + stakeholders = Set(partySignatory, partyObserver1) + ), + dtosCreate( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + )( + stakeholders = Set(partySignatory, partyObserver2) + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dtos, _)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val result01L2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 1L, + limit = 2, + ) + ) + ) + val result12L2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 1L, + endInclusive = 2L, + limit = 2, + ) + ) + ) + val result02L1 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 2L, + limit = 1, + ) + ) + ) + val result02L2 = executeSql( + backend.event.updateStreamingQueries + .fetchEventIds(EventIdSource.ActivateStakeholder)( + witnessO = Some(partySignatory), + templateIdO = None, + eventTypes = Set.empty, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 2L, + limit = 2, + ) + ) ) result01L2 should contain theSameElementsAs Vector(1L) @@ -427,40 +1107,40 @@ private[backend] trait StorageBackendTestsEvents .flatMap(_ => List(TraceContext.empty, TraceContext.withNewTraceContext("test")(identity))) .map(SerializableTraceContext(_).toDamlProto.toByteArray) val dbDtos = Vector( - dtoCreate( + dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), traceContext = traceContexts(0), ), - dtoCreate( + dtoCreateLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), traceContext = traceContexts(1), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(3), eventSequentialId = 3L, consuming = false, contractId = hashCid("#1"), traceContext = traceContexts(2), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(4), eventSequentialId = 4L, consuming = false, contractId = hashCid("#2"), traceContext = traceContexts(3), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(5), eventSequentialId = 5L, consuming = true, contractId = hashCid("#1"), traceContext = traceContexts(4), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(6), eventSequentialId = 6L, consuming = true, @@ -475,8 +1155,20 @@ private[backend] trait StorageBackendTestsEvents executeSql(updateLedgerEnd(offset(2), 2L)) val transactionTrees = executeSql( - backend.event.updatePointwiseQueries.fetchTreeTransactionEvents(1L, 6L, Some(Set.empty)) - ) + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create + )(eventSequentialIds = IdRange(1L, 6L), Some(Set.empty)) + ) ++ + executeSql( + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.NonConsuming + )(eventSequentialIds = IdRange(1L, 6L), Some(Set.empty)) + ) ++ + executeSql( + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming + )(eventSequentialIds = IdRange(1L, 6L), Some(Set.empty)) + ) for (i <- traceContexts.indices) yield transactionTrees(i).traceContext should equal(Some(traceContexts(i))) @@ -487,14 +1179,14 @@ private[backend] trait StorageBackendTestsEvents val someMaintainer = Some("maintainer") val someMaintainers = Array("maintainer") val dbDtos = Vector( - dtoCreate( + dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), createKey = someKey, createKeyMaintainer = someMaintainer, ), - dtoCreate( + dtoCreateLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), @@ -508,15 +1200,17 @@ private[backend] trait StorageBackendTestsEvents executeSql(updateLedgerEnd(offset(2), 2L)) val transactionTrees = executeSql( - backend.event.updatePointwiseQueries.fetchTreeTransactionEvents(1L, 6L, Some(Set.empty)) + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create + )(eventSequentialIds = IdRange(1L, 4L), Some(Set.empty)) ) def checkKeyAndMaintainersInTrees( - event: RawTreeEvent, + event: RawLedgerEffectsEventLegacy, createKey: Option[Array[Byte]], createKeyMaintainers: Array[String], ) = event match { - case created: RawCreatedEvent => + case created: RawCreatedEventLegacy => created.createKeyValue should equal(createKey) created.createKeyMaintainers should equal(createKeyMaintainers.toSet) case _ => fail() @@ -534,13 +1228,13 @@ private[backend] trait StorageBackendTestsEvents val dbDtos = Vector( dtoCompletion( offset = offset(1), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(500), publicationTime = startPublicationTime.addMicros(500), ), dtoTransactionMeta( offset = offset(3), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = startRecordTimeSynchronizer2.addMicros(500), publicationTime = startPublicationTime.addMicros(500), event_sequential_id_first = 1, @@ -548,7 +1242,7 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(5), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(1000), publicationTime = startPublicationTime.addMicros(1000), event_sequential_id_first = 1, @@ -556,19 +1250,19 @@ private[backend] trait StorageBackendTestsEvents ), dtoCompletion( offset = offset(7), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = startRecordTimeSynchronizer2.addMicros(1000), publicationTime = startPublicationTime.addMicros(1000), ), dtoCompletion( offset = offset(9), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(2000), publicationTime = startPublicationTime.addMicros(1000), ), dtoTransactionMeta( offset = offset(11), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = startRecordTimeSynchronizer2.addMicros(2000), publicationTime = startPublicationTime.addMicros(1000), event_sequential_id_first = 1, @@ -576,13 +1270,13 @@ private[backend] trait StorageBackendTestsEvents ), dtoCompletion( offset = offset(13), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(3000), publicationTime = startPublicationTime.addMicros(2000), ), dtoTransactionMeta( offset = offset(15), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = startRecordTimeSynchronizer2.addMicros(3000), publicationTime = startPublicationTime.addMicros(2000), event_sequential_id_first = 1, @@ -1126,7 +1820,7 @@ private[backend] trait StorageBackendTestsEvents val dbDtos = Vector( dtoTransactionMeta( offset = offset(3), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(500), publicationTime = startPublicationTime.addMicros(500), event_sequential_id_first = 1, @@ -1134,7 +1828,7 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(7), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(700), event_sequential_id_first = 1, @@ -1142,7 +1836,7 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(9), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(800), event_sequential_id_first = 1, @@ -1151,7 +1845,7 @@ private[backend] trait StorageBackendTestsEvents // insertion is out of order for this entry, for testing result is not reliant on insertion order, but rather on index order (regression for bug #26434) dtoTransactionMeta( offset = offset(5), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(600), event_sequential_id_first = 1, @@ -1159,7 +1853,7 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(11), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(600), publicationTime = startPublicationTime.addMicros(900), event_sequential_id_first = 1, @@ -1193,32 +1887,32 @@ private[backend] trait StorageBackendTestsEvents val dbDtos = Vector( dtoCompletion( offset = offset(3), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(500), publicationTime = startPublicationTime.addMicros(500), ), dtoCompletion( offset = offset(7), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(700), ), dtoCompletion( offset = offset(9), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(800), ), // insertion is out of order for this entry, for testing result is not reliant on insertion order, but rather on index order (regression for bug #26434) dtoCompletion( offset = offset(5), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(550), publicationTime = startPublicationTime.addMicros(600), ), dtoCompletion( offset = offset(11), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = startRecordTimeSynchronizer.addMicros(600), publicationTime = startPublicationTime.addMicros(900), ), @@ -1244,15 +1938,15 @@ private[backend] trait StorageBackendTestsEvents ).value.offset shouldBe offset(5) } - it should "work properly for archivals query" in { + it should "work properly for archivals query legacy" in { val dbDtos = Vector( - dtoExercise( + dtoExerciseLegacy( offset = offset(5), eventSequentialId = 14, consuming = true, contractId = hashCid("#1"), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(5), eventSequentialId = 18, consuming = true, @@ -1260,17 +1954,17 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(5), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, event_sequential_id_first = 10, event_sequential_id_last = 20, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(15), eventSequentialId = 118, consuming = true, contractId = hashCid("#3"), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(15), eventSequentialId = 119, consuming = true, @@ -1278,23 +1972,23 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(15), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, event_sequential_id_first = 110, event_sequential_id_last = 120, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(25), eventSequentialId = 211, consuming = true, contractId = hashCid("#5"), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(25), eventSequentialId = 212, consuming = false, contractId = hashCid("#55"), ), - dtoExercise( + dtoExerciseLegacy( offset = offset(25), eventSequentialId = 214, consuming = true, @@ -1302,11 +1996,11 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(25), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, event_sequential_id_first = 210, event_sequential_id_last = 220, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(35), eventSequentialId = 315, consuming = true, @@ -1314,7 +2008,7 @@ private[backend] trait StorageBackendTestsEvents ), dtoTransactionMeta( offset = offset(35), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, event_sequential_id_first = 310, event_sequential_id_last = 320, ), @@ -1399,7 +2093,174 @@ private[backend] trait StorageBackendTestsEvents s"test $index archivals($fromExclusive,$toInclusive)" ) { executeSql( - backend.event.archivals( + backend.event.archivalsLegacy( + fromExclusive = fromExclusive, + toInclusive = toInclusive, + ) + ) shouldBe expectation + } + } + } + + it should "work properly for prunableContract" in { + val dbDtos = Vector( + dtosConsumingExercise( + event_offset = 5, + event_sequential_id = 14, + internal_contract_id = Some(1), + ), + dtosConsumingExercise( + event_offset = 5, + event_sequential_id = 18, + internal_contract_id = Some(2), + ), + Vector( + dtoTransactionMeta( + offset = offset(5), + synchronizerId = someSynchronizerId2, + event_sequential_id_first = 10, + event_sequential_id_last = 20, + ) + ), + dtosConsumingExercise( + event_offset = 15, + event_sequential_id = 118, + internal_contract_id = Some(3), + ), + dtosConsumingExercise( + event_offset = 15, + event_sequential_id = 119, + internal_contract_id = Some(4), + ), + Vector( + dtoTransactionMeta( + offset = offset(15), + synchronizerId = someSynchronizerId2, + event_sequential_id_first = 110, + event_sequential_id_last = 120, + ) + ), + dtosConsumingExercise( + event_offset = 25, + event_sequential_id = 211, + internal_contract_id = Some(5), + ), + dtosUnassign( + event_offset = 25, + event_sequential_id = 212, + internal_contract_id = Some(55), + ), + dtosConsumingExercise( + event_offset = 25, + event_sequential_id = 214, + internal_contract_id = Some(6), + ), + dtosCreate( + event_offset = 25, + event_sequential_id = 215, + internal_contract_id = 61, + )(), + dtosAssign( + event_offset = 25, + event_sequential_id = 216, + internal_contract_id = 62, + )(), + dtosWitnessedCreate( + event_offset = 25, + event_sequential_id = 217, + internal_contract_id = 63, + )(), + dtosWitnessedExercised( + event_offset = 25, + event_sequential_id = 218, + internal_contract_id = Some(64), + ), + dtosWitnessedExercised( + event_offset = 25, + consuming = false, + event_sequential_id = 219, + internal_contract_id = Some(65), + ), + Vector( + dtoTransactionMeta( + offset = offset(25), + synchronizerId = someSynchronizerId2, + event_sequential_id_first = 210, + event_sequential_id_last = 220, + ) + ), + dtosConsumingExercise( + event_offset = 35, + event_sequential_id = 315, + internal_contract_id = Some(7), + ), + Vector( + dtoTransactionMeta( + offset = offset(35), + synchronizerId = someSynchronizerId2, + event_sequential_id_first = 310, + event_sequential_id_last = 320, + ) + ), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dbDtos, _)) + executeSql( + updateLedgerEnd(offset(25), 220L) + ) + + Vector( + None -> offset(4) -> Set(), + None -> offset(5) -> Set( + 1, + 2, + ), + None -> offset(10) -> Set( + 1, + 2, + ), + None -> offset(15) -> Set( + 1, + 2, + 3, + 4, + ), + None -> offset(25) -> Set( + 1, 2, 3, 4, 5, 6, 63, + ), + None -> offset(1000) -> Set( + 1, 2, 3, 4, 5, 6, 63, + ), + Some(offset(4)) -> offset(1000) -> Set( + 1, 2, 3, 4, 5, 6, 63, + ), + Some(offset(5)) -> offset(1000) -> Set( + 3, 4, 5, 6, 63, + ), + Some(offset(6)) -> offset(1000) -> Set( + 3, 4, 5, 6, 63, + ), + Some(offset(15)) -> offset(1000) -> Set( + 5, + 6, + 63, + ), + Some(offset(15)) -> offset(15) -> Set( + ), + Some(offset(6)) -> offset(25) -> Set( + 3, 4, 5, 6, 63, + ), + Some(offset(6)) -> offset(24) -> Set( + 3, + 4, + ), + ).zipWithIndex.foreach { case (((fromExclusive, toInclusive), expectation), index) => + withClue( + s"test $index archivals($fromExclusive,$toInclusive)" + ) { + executeSql( + backend.event.prunableContracts( fromExclusive = fromExclusive, toInclusive = toInclusive, ) @@ -1407,4 +2268,615 @@ private[backend] trait StorageBackendTestsEvents } } } + + it should "fetch correctly AcsDelta and LedgerEffects Raw events" in { + implicit val eq: Equality[RawThinAcsDeltaEvent] = caseClassArrayEq + implicit val eq2: Equality[RawThinLedgerEffectsEvent] = caseClassArrayEq + + val dbDtos = Vector( + dtosCreate(event_sequential_id = 1L)(), + dtosAssign(event_sequential_id = 2L)(), + dtosConsumingExercise(event_sequential_id = 3L), + dtosUnassign(event_sequential_id = 4L), + dtosWitnessedCreate(event_sequential_id = 5L)(), + dtosWitnessedExercised(event_sequential_id = 6L), + dtosWitnessedExercised(event_sequential_id = 7L, consuming = false), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dbDtos, _)) + executeSql( + updateLedgerEnd(offset(10000), 10000L) + ) + + executeSql( + backend.event.fetchEventPayloadsAcsDelta(EventPayloadSourceForUpdatesAcsDelta.Activate)( + eventSequentialIds = SequentialIdBatch.IdRange(0, 100), + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + ) + ).toList should contain theSameElementsInOrderAs List( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 1L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = 10L, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = true, + ), + ), + RawThinAssignEvent( + reassignmentProperties = ReassignmentProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 2L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + reassignmentId = "0012345678", + submitter = Some("submitter1"), + reassignmentCounter = 345, + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = 10L, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 345, + acsDelta = true, + ), + sourceSynchronizerId = someSynchronizerId2.toProtoPrimitive, + ), + ) + executeSql( + backend.event.fetchEventPayloadsAcsDelta(EventPayloadSourceForUpdatesAcsDelta.Deactivate)( + eventSequentialIds = SequentialIdBatch.IdRange(0, 100), + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + ) + ).toList should contain theSameElementsInOrderAs List( + RawArchivedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 3L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + filteredStakeholderParties = Set("stakeholder1"), + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + ), + RawUnassignEvent( + reassignmentProperties = ReassignmentProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 4L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + reassignmentId = "0012345678", + submitter = Some("submitter1"), + reassignmentCounter = 345, + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + filteredStakeholderParties = Set("stakeholder1"), + assignmentExclusivity = Some(Timestamp.assertFromLong(111333)), + targetSynchronizerId = someSynchronizerId2.toProtoPrimitive, + ), + ) + + executeSql( + backend.event.fetchEventPayloadsLedgerEffects( + EventPayloadSourceForUpdatesLedgerEffects.Activate + )( + eventSequentialIds = SequentialIdBatch.IdRange(0, 100), + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + ) + ).toList should contain theSameElementsInOrderAs List( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 1L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set("witness1"), + internalContractId = 10L, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = true, + ), + ), + RawThinAssignEvent( + reassignmentProperties = ReassignmentProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 2L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + reassignmentId = "0012345678", + submitter = Some("submitter1"), + reassignmentCounter = 345, + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = 10L, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 345, + acsDelta = true, + ), + sourceSynchronizerId = someSynchronizerId2.toProtoPrimitive, + ), + ) + executeSql( + backend.event.fetchEventPayloadsLedgerEffects( + EventPayloadSourceForUpdatesLedgerEffects.Deactivate + )( + eventSequentialIds = SequentialIdBatch.IdRange(0, 100), + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + ) + ).toList should contain theSameElementsInOrderAs List( + RawExercisedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 3L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + exerciseConsuming = true, + exerciseChoice = ChoiceName.assertFromString("choice"), + exerciseChoiceInterface = Option(Identifier.assertFromString("in:ter:face")), + exerciseArgument = Array(1, 2, 3), + exerciseArgumentCompression = Some(1), + exerciseResult = Some(Array(2, 3, 4)), + exerciseResultCompression = Some(2), + exerciseActors = Set("actor1", "actor2"), + exerciseLastDescendantNodeId = 3, + filteredAdditionalWitnessParties = Set("witness1"), + filteredStakeholderParties = Set("stakeholder1"), + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + acsDelta = true, + ), + RawUnassignEvent( + reassignmentProperties = ReassignmentProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 4L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + reassignmentId = "0012345678", + submitter = Some("submitter1"), + reassignmentCounter = 345, + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + filteredStakeholderParties = Set("stakeholder1"), + assignmentExclusivity = Some(Timestamp.assertFromLong(111333)), + targetSynchronizerId = someSynchronizerId2.toProtoPrimitive, + ), + ) + executeSql( + backend.event.fetchEventPayloadsLedgerEffects( + EventPayloadSourceForUpdatesLedgerEffects.VariousWitnessed + )( + eventSequentialIds = SequentialIdBatch.IdRange(0, 100), + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + ) + ).toList should contain theSameElementsInOrderAs List( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 5L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set("witness1"), + internalContractId = 10L, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = false, + ), + ), + RawExercisedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 6L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + exerciseConsuming = true, + exerciseChoice = ChoiceName.assertFromString("choice"), + exerciseChoiceInterface = Option(Identifier.assertFromString("in:ter:face")), + exerciseArgument = Array(1, 2, 3), + exerciseArgumentCompression = Some(1), + exerciseResult = Some(Array(2, 3, 4)), + exerciseResultCompression = Some(2), + exerciseActors = Set("actor1", "actor2"), + exerciseLastDescendantNodeId = 3, + filteredAdditionalWitnessParties = Set("witness1"), + filteredStakeholderParties = Set.empty, + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + acsDelta = false, + ), + RawExercisedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 7L, + offset = 10L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + exerciseConsuming = false, + exerciseChoice = ChoiceName.assertFromString("choice"), + exerciseChoiceInterface = Option(Identifier.assertFromString("in:ter:face")), + exerciseArgument = Array(1, 2, 3), + exerciseArgumentCompression = Some(1), + exerciseResult = Some(Array(2, 3, 4)), + exerciseResultCompression = Some(2), + exerciseActors = Set("actor1", "actor2"), + exerciseLastDescendantNodeId = 3, + filteredAdditionalWitnessParties = Set("witness1"), + filteredStakeholderParties = Set.empty, + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + acsDelta = false, + ), + ) + } + + it should "fetch correctly EventQueryService results" in { + implicit val eq: Equality[RawThinCreatedEvent] = caseClassArrayEq + implicit val eq2: Equality[RawArchivedEvent] = caseClassArrayEq + + val createOnlyInternalContractId = 10L + val createAndArchiveInternalContractId = 11L + val transientInternalContractId = 12L + val divulgedInternalContractId = 13L + + val dbDtos = Vector( + dtosCreate( + event_sequential_id = 1L, + internal_contract_id = createOnlyInternalContractId, + event_offset = 116, + )(), + dtosCreate( + event_sequential_id = 2L, + internal_contract_id = createAndArchiveInternalContractId, + event_offset = 136, + )(), + dtosConsumingExercise( + event_sequential_id = 3L, + internal_contract_id = Some(createAndArchiveInternalContractId), + event_offset = 146, + ), + dtosWitnessedCreate( + event_sequential_id = 4L, + internal_contract_id = transientInternalContractId, + event_offset = 156, + )(), + dtosWitnessedExercised( + event_sequential_id = 5L, + internal_contract_id = Some(transientInternalContractId), + event_offset = 156, + ), + dtosWitnessedCreate( + event_sequential_id = 6L, + internal_contract_id = divulgedInternalContractId, + event_offset = 160, + )(), + ).flatten + + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dbDtos, _)) + executeSql( + updateLedgerEnd(offset(10000), 10000L) + ) + + val createOnly = executeSql( + backend.event.eventReaderQueries.fetchContractIdEvents( + internalContractId = createOnlyInternalContractId, + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + endEventSequentialId = 10000L, + ) + ) + createOnly._1.value should equal( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 1L, + offset = 116L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = createOnlyInternalContractId, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = true, + ), + ) + ) + createOnly._2 shouldBe None + + val createAndArchiveOnly = executeSql( + backend.event.eventReaderQueries.fetchContractIdEvents( + internalContractId = createAndArchiveInternalContractId, + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + endEventSequentialId = 10000L, + ) + ) + createAndArchiveOnly._1.value should equal( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 2L, + offset = 136L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = createAndArchiveInternalContractId, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = true, + ), + ) + ) + createAndArchiveOnly._2.value should equal( + RawArchivedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 3L, + offset = 146L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + filteredStakeholderParties = Set("stakeholder1"), + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + ) + ) + + val transient = executeSql( + backend.event.eventReaderQueries.fetchContractIdEvents( + internalContractId = transientInternalContractId, + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + endEventSequentialId = 10000L, + ) + ) + transient._1.value should equal( + RawThinCreatedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 4L, + offset = 156L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set("witness1"), + internalContractId = transientInternalContractId, + requestingParties = Some(Set("witness1", "stakeholder1", "submitter1", "actor1")), + reassignmentCounter = 0L, + acsDelta = false, + ), + ) + ) + transient._2.value should equal( + RawArchivedEvent( + transactionProperties = TransactionProperties( + commonEventProperties = CommonEventProperties( + eventSequentialId = 5L, + offset = 156L, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + commonUpdateProperties = CommonUpdateProperties( + updateId = TestUpdateId("update").toHexString, + commandId = Some("command-id"), + traceContext = serializableTraceContext, + recordTime = Timestamp.assertFromLong(100L), + ), + externalTransactionHash = Some(someExternalTransactionHashBinary), + ), + contractId = hashCid("c1"), + templateId = Identifier + .assertFromString("package:pl:ate") + .toFullIdentifier(PackageName.assertFromString("tem")), + filteredStakeholderParties = Set.empty, + ledgerEffectiveTime = Timestamp.assertFromLong(123456), + ) + ) + + val divulged = executeSql( + backend.event.eventReaderQueries.fetchContractIdEvents( + internalContractId = divulgedInternalContractId, + requestingParties = + Some(Set("witness1", "stakeholder1", "submitter1", "actor1").map(Party.assertFromString)), + endEventSequentialId = 10000L, + ) + ) + divulged shouldBe (None -> None) + } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitializeIngestion.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitializeIngestion.scala index 874b4aab71..725d93de1c 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitializeIngestion.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitializeIngestion.scala @@ -6,8 +6,10 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.SuppressingLogger import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange -import com.digitalasset.canton.platform.store.backend.common.EventIdSource +import com.digitalasset.canton.platform.store.backend.common.EventIdSourceLegacy import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.daml.lf.data.Ref import org.scalatest.Inside import org.scalatest.compatible.Assertion @@ -69,9 +71,19 @@ private[backend] trait StorageBackendTestsInitializeIngestion { val dtos = Vector( // 1: transaction with a create node - dtoCreate(offset(1), 1L, hashCid("#101"), signatory = signatory), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, someParty), - DbDto.IdFilterCreateNonStakeholderInformee(1L, someTemplateId.toString, someParty), + dtoCreateLegacy(offset(1), 1L, hashCid("#101"), signatory = signatory), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateNonStakeholderInformee( + 1L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), dtoTransactionMeta( offset(1), event_sequential_id_first = 1L, @@ -79,11 +91,26 @@ private[backend] trait StorageBackendTestsInitializeIngestion ), dtoCompletion(offset(41)), // 2: transaction with exercise node - dtoExercise(offset(2), 2L, false, hashCid("#101")), - DbDto.IdFilterNonConsumingInformee(2L, someTemplateId.toString, someParty), - dtoExercise(offset(2), 3L, true, hashCid("#102")), - DbDto.IdFilterConsumingStakeholder(3L, someTemplateId.toString, someParty), - DbDto.IdFilterConsumingNonStakeholderInformee(3L, someTemplateId.toString, someParty), + dtoExerciseLegacy(offset(2), 2L, false, hashCid("#101")), + DbDto.IdFilterNonConsumingInformee( + 2L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + dtoExerciseLegacy(offset(2), 3L, true, hashCid("#102")), + DbDto.IdFilterConsumingStakeholder( + 3L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + 3L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), dtoTransactionMeta( offset(2), event_sequential_id_first = 2L, @@ -91,21 +118,41 @@ private[backend] trait StorageBackendTestsInitializeIngestion ), dtoCompletion(offset(2)), // 3: assign - dtoAssign( + dtoAssignLegacy( offset(3), eventSequentialId = 4, contractId = hashCid("#103"), ), - DbDto.IdFilterAssignStakeholder(4, someTemplateId.toString, someParty), - DbDto.IdFilterAssignStakeholder(4, someTemplateId.toString, someParty2), + DbDto.IdFilterAssignStakeholder( + 4, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterAssignStakeholder( + 4, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), // 4: unassign - dtoUnassign( + dtoUnassignLegacy( offset(4), eventSequentialId = 5, contractId = hashCid("#103"), ), - DbDto.IdFilterUnassignStakeholder(5, someTemplateId.toString, someParty), - DbDto.IdFilterUnassignStakeholder(5, someTemplateId.toString, someParty2), + DbDto.IdFilterUnassignStakeholder( + 5, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterUnassignStakeholder( + 5, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), // 5: topology transactions dtoPartyToParticipant( offset(5), @@ -124,9 +171,19 @@ private[backend] trait StorageBackendTestsInitializeIngestion it should "delete overspill entries - events, transaction meta, completions" in { val dtos2 = Vector( // 6: transaction with create node - dtoCreate(offset(6), 8L, hashCid("#201"), signatory = signatory), - DbDto.IdFilterCreateStakeholder(8L, someTemplateId.toString, someParty), - DbDto.IdFilterCreateNonStakeholderInformee(8L, someTemplateId.toString, someParty), + dtoCreateLegacy(offset(6), 8L, hashCid("#201"), signatory = signatory), + DbDto.IdFilterCreateStakeholder( + 8L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateNonStakeholderInformee( + 8L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), dtoTransactionMeta( offset(6), event_sequential_id_first = 8L, @@ -134,11 +191,26 @@ private[backend] trait StorageBackendTestsInitializeIngestion ), dtoCompletion(offset(6)), // 7: transaction with exercise node - dtoExercise(offset(7), 9L, false, hashCid("#201")), - DbDto.IdFilterNonConsumingInformee(9L, someTemplateId.toString, someParty), - dtoExercise(offset(7), 10L, true, hashCid("#202")), - DbDto.IdFilterConsumingStakeholder(10L, someTemplateId.toString, someParty), - DbDto.IdFilterConsumingNonStakeholderInformee(10L, someTemplateId.toString, someParty), + dtoExerciseLegacy(offset(7), 9L, false, hashCid("#201")), + DbDto.IdFilterNonConsumingInformee( + 9L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + dtoExerciseLegacy(offset(7), 10L, true, hashCid("#202")), + DbDto.IdFilterConsumingStakeholder( + 10L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + 10L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), dtoTransactionMeta( offset(7), event_sequential_id_first = 9L, @@ -146,21 +218,41 @@ private[backend] trait StorageBackendTestsInitializeIngestion ), dtoCompletion(offset(7)), // 8: assign - dtoAssign( + dtoAssignLegacy( offset(8), eventSequentialId = 11, contractId = hashCid("#203"), ), - DbDto.IdFilterAssignStakeholder(11, someTemplateId.toString, someParty), - DbDto.IdFilterAssignStakeholder(11, someTemplateId.toString, someParty2), + DbDto.IdFilterAssignStakeholder( + 11, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterAssignStakeholder( + 11, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), // 9: unassign - dtoUnassign( + dtoUnassignLegacy( offset(9), eventSequentialId = 12, contractId = hashCid("#203"), ), - DbDto.IdFilterUnassignStakeholder(12, someTemplateId.toString, someParty), - DbDto.IdFilterUnassignStakeholder(12, someTemplateId.toString, someParty2), + DbDto.IdFilterUnassignStakeholder( + 12, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterUnassignStakeholder( + 12, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), // 10: topology transactions dtoPartyToParticipant( offset(10), @@ -187,36 +279,36 @@ private[backend] trait StorageBackendTestsInitializeIngestion val contractsCreated = executeSql( backend.contract - .createdContracts(List(hashCid("#101"), hashCid("#201")), offset(1000)) + .createdContracts(List(hashCid("#101"), hashCid("#201")), 1000) ) val contractsArchived = executeSql( backend.contract - .archivedContracts(List(hashCid("#101"), hashCid("#201")), offset(1000)) + .archivedContracts(List(hashCid("#101"), hashCid("#201")), 1000) ) val contractsAssigned = executeSql( backend.contract - .assignedContracts(List(hashCid("#103"), hashCid("#203")), offset(1000)) + .assignedContracts(List(hashCid("#103"), hashCid("#203")), 1000) ) val assignedEvents = executeSql( - backend.event.assignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.assignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.rawCreatedEvent.contractId) val unassignedEvents = executeSql( - backend.event.unassignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.unassignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.contractId) val topologyPartyEvents = executeSql( backend.event.topologyPartyEventBatch(IdRange(1L, 100L)) ).map(_.partyId) - contractsCreated.get(hashCid("#101")) should not be empty - contractsCreated.get(hashCid("#201")) should not be empty - contractsArchived.get(hashCid("#101")) shouldBe empty - contractsArchived.get(hashCid("#201")) shouldBe empty - contractsAssigned.get(hashCid("#103")) should not be empty - contractsAssigned.get(hashCid("#203")) should not be empty + contractsCreated should contain(hashCid("#101")) + contractsCreated should contain(hashCid("#201")) + contractsArchived should not contain hashCid("#101") + contractsArchived should not contain hashCid("#201") + contractsAssigned should contain(hashCid("#103")) + contractsAssigned should contain(hashCid("#203")) assignedEvents shouldBe List( hashCid("#103"), hashCid("#203"), @@ -256,36 +348,36 @@ private[backend] trait StorageBackendTestsInitializeIngestion val contractsCreated = executeSql( backend.contract - .createdContracts(List(hashCid("#101"), hashCid("#201")), offset(1000)) + .createdContracts(List(hashCid("#101"), hashCid("#201")), 1000) ) val contractsArchived = executeSql( backend.contract - .archivedContracts(List(hashCid("#101"), hashCid("#201")), offset(1000)) + .archivedContracts(List(hashCid("#101"), hashCid("#201")), 1000) ) val contractsAssigned = executeSql( backend.contract - .assignedContracts(List(hashCid("#103"), hashCid("#203")), offset(1000)) + .assignedContracts(List(hashCid("#103"), hashCid("#203")), 1000) ) val assignedEvents = executeSql( - backend.event.assignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.assignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.rawCreatedEvent.contractId) val unassignedEvents = executeSql( - backend.event.unassignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.unassignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.contractId) val topologyPartyEvents = executeSql( backend.event.topologyPartyEventBatch(IdRange(1L, 100L)) ).map(_.partyId) - contractsCreated.get(hashCid("#101")) should not be empty - contractsCreated.get(hashCid("#201")) shouldBe empty - contractsArchived.get(hashCid("#101")) shouldBe empty - contractsArchived.get(hashCid("#201")) shouldBe empty - contractsAssigned.get(hashCid("#103")) should not be empty - contractsAssigned.get(hashCid("#203")) shouldBe empty + contractsCreated should contain(hashCid("#101")) + contractsCreated should not contain hashCid("#201") + contractsArchived should not contain hashCid("#101") + contractsArchived should not contain hashCid("#201") + contractsAssigned should contain(hashCid("#103")) + contractsAssigned should not contain hashCid("#203") assignedEvents shouldBe List(hashCid("#103")) // not constrained by ledger end unassignedEvents shouldBe List(hashCid("#103")) // not constrained by ledger end topologyPartyEvents shouldBe List( @@ -322,28 +414,28 @@ private[backend] trait StorageBackendTestsInitializeIngestion val contractsCreated = executeSql( backend.contract - .createdContracts(List(hashCid("#101"), hashCid("#201")), offset(1000)) + .createdContracts(List(hashCid("#101"), hashCid("#201")), 1000) ) val contractsAssigned = executeSql( backend.contract - .assignedContracts(List(hashCid("#103"), hashCid("#203")), offset(1000)) + .assignedContracts(List(hashCid("#103"), hashCid("#203")), 1000) ) val assignedEvents = executeSql( - backend.event.assignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.assignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.rawCreatedEvent.contractId) val unassignedEvents = executeSql( - backend.event.unassignEventBatch(IdRange(1L, 100L), Some(Set.empty)) + backend.event.unassignEventBatchLegacy(IdRange(1L, 100L), Some(Set.empty)) ).map(_.event.contractId) val topologyPartyEvents = executeSql( backend.event.topologyPartyEventBatch(IdRange(1L, 100L)) ).map(_.partyId) - contractsCreated.get(hashCid("#101")) shouldBe None - contractsAssigned.get(hashCid("#103")) shouldBe empty - contractsAssigned.get(hashCid("#203")) shouldBe empty + contractsCreated should not contain hashCid("#101") + contractsAssigned should not contain hashCid("#103") + contractsAssigned should not contain hashCid("#203") assignedEvents shouldBe empty unassignedEvents shouldBe empty topologyPartyEvents shouldBe empty @@ -368,105 +460,131 @@ private[backend] trait StorageBackendTestsInitializeIngestion private def fetchIdsNonConsuming(): Vector[Long] = executeSql( - backend.event.updateStreamingQueries.fetchEventIds( - EventIdSource.NonConsumingInformee + backend.event.updateStreamingQueries.fetchEventIdsLegacy( + EventIdSourceLegacy.NonConsumingInformee )( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsConsumingNonStakeholder(): Vector[Long] = executeSql( backend.event.updateStreamingQueries - .fetchEventIds(EventIdSource.ConsumingNonStakeholder)( + .fetchEventIdsLegacy(EventIdSourceLegacy.ConsumingNonStakeholder)( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsConsumingStakeholder(): Vector[Long] = executeSql( backend.event.updateStreamingQueries - .fetchEventIds(EventIdSource.ConsumingStakeholder)( + .fetchEventIdsLegacy(EventIdSourceLegacy.ConsumingStakeholder)( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsCreateNonStakeholder(): Vector[Long] = executeSql( backend.event.updateStreamingQueries - .fetchEventIds(EventIdSource.CreateNonStakeholder)( + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateNonStakeholder)( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsCreateStakeholder(): Vector[Long] = executeSql( backend.event.updateStreamingQueries - .fetchEventIds(EventIdSource.CreateStakeholder)( + .fetchEventIdsLegacy(EventIdSourceLegacy.CreateStakeholder)( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsAssignStakeholder(): Vector[Long] = executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchIdsUnassignStakeholder(): Vector[Long] = executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) private def fetchTopologyParty(): Vector[Long] = executeSql( backend.event.fetchTopologyPartyEventIds( - party = Some(someParty), - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + party = Some(someParty) + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) - private def fetchIdsFromTransactionMetaUpdateIds(udpateIds: Seq[String]): Set[(Long, Long)] = { + private def fetchIdsFromTransactionMetaUpdateIds( + updateIds: Seq[Array[Byte]] + ): Set[(Long, Long)] = { val txPointwiseQueries = backend.event.updatePointwiseQueries - udpateIds - .map(Ref.TransactionId.assertFromString) + updateIds + .map(UpdateId.tryFromByteArray) .map { updateId => executeSql( txPointwiseQueries.fetchIdsFromUpdateMeta( - lookupKey = LookupKey.UpdateId(updateId) + lookupKey = LookupKey.ByUpdateId(updateId) ) ) } @@ -481,7 +599,7 @@ private[backend] trait StorageBackendTestsInitializeIngestion .map { offset => executeSql( txPointwiseQueries.fetchIdsFromUpdateMeta( - lookupKey = LookupKey.Offset(offset) + lookupKey = LookupKey.ByOffset(offset) ) ) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIntegrity.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIntegrity.scala index 8f78a3c929..e434f55666 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIntegrity.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIntegrity.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransacti Revoked, } import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.daml.lf.data.Time.Timestamp import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -32,8 +33,8 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "find duplicate event ids" in { val updates = Vector( - dtoCreate(offset(7), 7L, hashCid("#7")), - dtoCreate(offset(7), 7L, hashCid("#7")), // duplicate id + dtoCreateLegacy(offset(7), 7L, hashCid("#7")), + dtoCreateLegacy(offset(7), 7L, hashCid("#7")), // duplicate id ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -48,8 +49,8 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "find duplicate event ids with different offsets" in { val updates = Vector( - dtoCreate(offset(6), 7L, hashCid("#7")), - dtoCreate(offset(7), 7L, hashCid("#7")), // duplicate id + dtoCreateLegacy(offset(6), 7L, hashCid("#7")), + dtoCreateLegacy(offset(7), 7L, hashCid("#7")), // duplicate id ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -64,8 +65,8 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "find non-consecutive event ids" in { val updates = Vector( - dtoCreate(offset(1), 1L, hashCid("#1")), - dtoCreate(offset(3), 3L, hashCid("#3")), // non-consecutive id + dtoCreateLegacy(offset(1), 1L, hashCid("#1")), + dtoCreateLegacy(offset(3), 3L, hashCid("#3")), // non-consecutive id ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -80,13 +81,13 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "not find non-consecutive event ids if those gaps are before the pruning offset" in { val updates = Vector( - dtoCreate(offset(1), 1L, hashCid("#1")), - dtoCreate( + dtoCreateLegacy(offset(1), 1L, hashCid("#1")), + dtoCreateLegacy( offset(3), 3L, hashCid("#3"), ), // non-consecutive id but after pruning offset - dtoCreate(offset(4), 4L, hashCid("#4")), + dtoCreateLegacy(offset(4), 4L, hashCid("#4")), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -98,39 +99,39 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in created table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), - dtoCreate( + dtoCreateLegacy( offset(3), 3L, hashCid("#3"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 4L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 5L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -147,40 +148,40 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in consuming exercise table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), - dtoExercise( + dtoExerciseLegacy( offset(3), 3L, consuming = true, hashCid("#3"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 4L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 5L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -197,40 +198,40 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in non-consuming exercise table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), - dtoExercise( + dtoExerciseLegacy( offset(3), 3L, consuming = false, hashCid("#3"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 4L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 5L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -247,39 +248,39 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in assign table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), - dtoAssign( + dtoAssignLegacy( offset(3), 3L, hashCid("#3"), - targetSynchronizerId = someSynchronizerId.toProtoPrimitive, + targetSynchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 4L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 5L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -296,39 +297,39 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in unassign table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), - dtoUnassign( + dtoUnassignLegacy( offset(3), 3L, hashCid("#3"), - sourceSynchronizerId = someSynchronizerId.toProtoPrimitive, + sourceSynchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 4L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 5L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -345,37 +346,37 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in completions table" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), dtoCompletion( offset(3), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, ), - dtoCreate( + dtoCreateLegacy( offset(4), 3L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 4L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -392,38 +393,38 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "detect monotonicity violation of record times for one synchronizer in completions table, if it is a timely-reject going backwards" in { val updates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, hashCid("#1"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), - dtoCreate( + dtoCreateLegacy( offset(2), 2L, hashCid("#2"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), dtoCompletion( offset(3), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, messageUuid = Some("message uuid"), ), - dtoCreate( + dtoCreateLegacy( offset(4), 3L, hashCid("#4"), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), - dtoCreate( + dtoCreateLegacy( offset(5), 4L, hashCid("#5"), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -446,7 +447,7 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag someParty, someParticipantId.toString, Added(AuthorizationLevel.Submission), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time5, ), dtoPartyToParticipant( @@ -455,7 +456,7 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag someParty, someParticipantId.toString, ChangedTo(AuthorizationLevel.Confirmation), - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time1, ), dtoPartyToParticipant( @@ -464,7 +465,7 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag someParty, someParticipantId.toString, ChangedTo(AuthorizationLevel.Observation), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time7, ), dtoPartyToParticipant( @@ -473,7 +474,7 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag someParty, someParticipantId.toString, Revoked, - synchronizerId = someSynchronizerId2.toProtoPrimitive, + synchronizerId = someSynchronizerId2, recordTime = time3, ), dtoPartyToParticipant( @@ -482,7 +483,7 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag someParty, someParticipantId.toString, Added(AuthorizationLevel.Submission), - synchronizerId = someSynchronizerId.toProtoPrimitive, + synchronizerId = someSynchronizerId, recordTime = time6, ), ) @@ -503,25 +504,25 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag offset(1), 1L, 4L, - udpateId = Some(updateIdFromOffset(offset(1))), + udpateId = Some(updateIdArrayFromOffset(offset(1))), ), dtoTransactionMeta( offset(2), 1L, 4L, - udpateId = Some(updateIdFromOffset(offset(2))), + udpateId = Some(updateIdArrayFromOffset(offset(2))), ), dtoTransactionMeta( offset(3), 1L, 4L, - udpateId = Some(updateIdFromOffset(offset(2))), + udpateId = Some(updateIdArrayFromOffset(offset(2))), ), dtoTransactionMeta( offset(4), 1L, 4L, - udpateId = Some(updateIdFromOffset(offset(4))), + udpateId = Some(updateIdArrayFromOffset(offset(4))), ), ) @@ -530,8 +531,9 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag executeSql(updateLedgerEnd(offset(5), 4L)) val failure = intercept[RuntimeException](executeSql(backend.integrity.verifyIntegrity())) + val hashForOffset2 = updateIdFromOffset(offset(2)).toHexString failure.getMessage should include( - "occurrence of duplicate update ID [2] found for offsets Offset(2), Offset(3)" + s"occurrence of duplicate update ID [$hashForOffset2] found for offsets Offset(2), Offset(3)" ) } @@ -567,13 +569,13 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag offset(2), commandId = "commandid", submissionId = Some("submissionid"), - updateId = Some(updateIdFromOffset(offset(2))), + updateId = Some(updateIdArrayFromOffset(offset(2))), ), dtoCompletion( offset(3), commandId = "commandid", submissionId = Some("submissionid"), - updateId = Some(updateIdFromOffset(offset(2))), + updateId = Some(updateIdArrayFromOffset(offset(2))), ), ) @@ -597,14 +599,14 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag offset(2), commandId = "commandid1", submissionId = Some("submissionid1"), - updateId = Some(updateIdFromOffset(offset(2))), + updateId = Some(updateIdArrayFromOffset(offset(2))), messageUuid = messageUuid, ), dtoCompletion( offset(3), commandId = "commandid", submissionId = Some("submissionid"), - updateId = Some(updateIdFromOffset(offset(3))), + updateId = Some(updateIdArrayFromOffset(offset(3))), messageUuid = messageUuid, ), ) @@ -628,14 +630,14 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag offset(2), commandId = "commandid", submissionId = Some("submissionid"), - updateId = Some(updateIdFromOffset(offset(2))), + updateId = Some(updateIdArrayFromOffset(offset(2))), ), dtoCompletion( offset(3), commandId = "commandid", submissionId = Some("submissionid"), - updateId = Some(updateIdFromOffset(offset(2))), - synchronizerId = "x::othersynchronizerid", + updateId = Some(updateIdArrayFromOffset(offset(2))), + synchronizerId = SynchronizerId.tryFromString("x::othersynchronizerid"), ), ) @@ -647,11 +649,11 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag it should "not find errors beyond the ledger end" in { val updates = Vector( - dtoCreate(offset(1), 1L, hashCid("#1")), - dtoCreate(offset(2), 2L, hashCid("#2")), - dtoCreate(offset(7), 7L, hashCid("#7")), // beyond the ledger end - dtoCreate(offset(7), 7L, hashCid("#7")), // duplicate id (beyond ledger end) - dtoCreate(offset(9), 9L, hashCid("#9")), // non-consecutive id (beyond ledger end) + dtoCreateLegacy(offset(1), 1L, hashCid("#1")), + dtoCreateLegacy(offset(2), 2L, hashCid("#2")), + dtoCreateLegacy(offset(7), 7L, hashCid("#7")), // beyond the ledger end + dtoCreateLegacy(offset(7), 7L, hashCid("#7")), // duplicate id (beyond ledger end) + dtoCreateLegacy(offset(9), 9L, hashCid("#9")), // non-consecutive id (beyond ledger end) ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPartyToParticipant.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPartyToParticipant.scala index 152b4b0469..43fd300087 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPartyToParticipant.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPartyToParticipant.scala @@ -22,6 +22,8 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.Sequen IdRange, Ids, } +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Time.Timestamp @@ -77,13 +79,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant def toRaw(dbDto: DbDto.EventPartyToParticipant): RawParticipantAuthorization = RawParticipantAuthorization( offset = Offset.tryFromLong(dbDto.event_offset), - updateId = dbDto.update_id, + updateId = UpdateId.tryFromByteArray(dbDto.update_id).toHexString, partyId = dbDto.party_id, participantId = dbDto.participant_id, authorizationEvent = Conversions .authorizationEvent(dbDto.participant_authorization_event, dbDto.participant_permission), recordTime = Timestamp.assertFromLong(dbDto.record_time), - synchronizerId = dbDto.synchronizer_id, + synchronizerId = dbDto.synchronizer_id.toProtoPrimitive, traceContext = Some(dbDto.trace_context), ) @@ -95,10 +97,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant executeSql(ingest(singleDto, _)) val eventsForAll = executeSql( backend.event.fetchTopologyPartyEventIds( - party = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, + party = None + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) ) ) executeSql( @@ -106,10 +111,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant ) val eventsForSomeParty = executeSql( backend.event.fetchTopologyPartyEventIds( - party = Some(someParty), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, + party = Some(someParty) + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) ) ) @@ -122,10 +130,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant executeSql(ingest(multipleDtos, _)) val eventsForAll = executeSql( backend.event.fetchTopologyPartyEventIds( - party = None, - startExclusive = 0L, - endInclusive = 10L, - limit = 10, + party = None + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) ) ) executeSql( @@ -133,10 +144,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant ) val eventsForSomeParty = executeSql( backend.event.fetchTopologyPartyEventIds( - party = Some(someParty), - startExclusive = 0L, - endInclusive = 10L, - limit = 10, + party = Some(someParty) + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 10L, + limit = 10, + ) ) ) @@ -205,19 +219,19 @@ private[backend] trait StorageBackendTestsPartyToParticipant dtoPartyToParticipant( offset(1), 1L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1504), ), dtoPartyToParticipant( offset(2), 2L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1505), ), dtoPartyToParticipant( offset(3), 3L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1506), ), ), @@ -246,13 +260,13 @@ private[backend] trait StorageBackendTestsPartyToParticipant dtoPartyToParticipant( offset(1), 1L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1504), ), dtoPartyToParticipant( offset(3), 3L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1506), ), ), @@ -279,19 +293,19 @@ private[backend] trait StorageBackendTestsPartyToParticipant dtoPartyToParticipant( offset(1), 1L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1504), ), dtoPartyToParticipant( offset(2), 2L, - synchronizerId = synchronizerId2.toProtoPrimitive, + synchronizerId = synchronizerId2, recordTime = Timestamp.assertFromLong(1505), ), dtoPartyToParticipant( offset(3), 3L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1506), ), ), @@ -318,19 +332,19 @@ private[backend] trait StorageBackendTestsPartyToParticipant dtoPartyToParticipant( offset(1), 1L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1504), ), dtoPartyToParticipant( offset(2), 2L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1505), ), dtoPartyToParticipant( offset(3), 3L, - synchronizerId = synchronizerId1.toProtoPrimitive, + synchronizerId = synchronizerId1, recordTime = Timestamp.assertFromLong(1506), ), ), diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPruning.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPruning.scala index ca8bcf3ba9..3793a44662 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPruning.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPruning.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store.backend import com.daml.scalautil.Statement import com.digitalasset.canton.data.Offset import com.digitalasset.canton.platform.store.backend.PruningDto.* +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref import org.scalatest.flatspec.AnyFlatSpec @@ -36,7 +37,7 @@ private[backend] trait StorageBackendTestsPruning ): Unit = executeSql { conn => conn.setAutoCommit(false) - backend.event.pruneEvents( + backend.event.pruneEventsLegacy( pruneUpToInclusive, incompleteReassignmentOffsets, )( @@ -77,15 +78,20 @@ private[backend] trait StorageBackendTestsPruning executeSql( ingest( Vector( - dtoExercise( + dtoExerciseLegacy( offset = offset(3), eventSequentialId = 5L, contractId = hashCid("#1"), consuming = false, signatory = signatoryParty, ), - DbDto.IdFilterNonConsumingInformee(5L, someTemplateId.toString, signatoryParty), - dtoExercise( + DbDto.IdFilterNonConsumingInformee( + 5L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + dtoExerciseLegacy( offset = offset(4), eventSequentialId = 6L, contractId = hashCid("#1"), @@ -93,26 +99,46 @@ private[backend] trait StorageBackendTestsPruning signatory = signatoryParty, actor = actorParty, ), - DbDto.IdFilterConsumingStakeholder(6L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterConsumingNonStakeholderInformee(6L, someTemplateId.toString, actorParty), - dtoUnassign( + DbDto.IdFilterConsumingStakeholder( + 6L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + 6L, + someTemplateId.toString, + actorParty, + first_per_sequential_id = true, + ), + dtoUnassignLegacy( offset = offset(5), eventSequentialId = 7L, contractId = hashCid("#1"), signatory = signatoryParty, ), - DbDto.IdFilterUnassignStakeholder(7L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 7L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), ) ++ Vector( - dtoExercise( + dtoExerciseLegacy( offset = offset(6), eventSequentialId = 8L, contractId = hashCid("#2"), consuming = false, signatory = signatoryParty, ), - DbDto.IdFilterNonConsumingInformee(8L, someTemplateId.toString, signatoryParty), - dtoExercise( + DbDto.IdFilterNonConsumingInformee( + 8L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + dtoExerciseLegacy( offset = offset(7), eventSequentialId = 9L, contractId = hashCid("#2"), @@ -120,15 +146,30 @@ private[backend] trait StorageBackendTestsPruning signatory = signatoryParty, actor = actorParty, ), - DbDto.IdFilterConsumingStakeholder(9L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterConsumingNonStakeholderInformee(9L, someTemplateId.toString, actorParty), - dtoUnassign( + DbDto.IdFilterConsumingStakeholder( + 9L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + 9L, + someTemplateId.toString, + actorParty, + first_per_sequential_id = true, + ), + dtoUnassignLegacy( offset = offset(8), eventSequentialId = 10L, contractId = hashCid("#1"), signatory = signatoryParty, ), - DbDto.IdFilterUnassignStakeholder(10L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 10L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), ), _, ) @@ -138,13 +179,13 @@ private[backend] trait StorageBackendTestsPruning def assertAllDataPresent(): Assertion = assertIndexDbDataSql( consuming = Vector(EventConsuming(6), EventConsuming(9)), consumingFilterStakeholder = - Vector(FilterConsumingStakeholder(6, 4), FilterConsumingStakeholder(9, 4)), + Vector(FilterConsumingStakeholder(6, 6), FilterConsumingStakeholder(9, 6)), consumingFilterNonStakeholder = Vector(FilterConsumingNonStakeholder(6, 1), FilterConsumingNonStakeholder(9, 1)), nonConsuming = Vector(EventNonConsuming(5), EventNonConsuming(8)), - nonConsumingFilter = Vector(FilterNonConsuming(5, 4), FilterNonConsuming(8, 4)), + nonConsumingFilter = Vector(FilterNonConsuming(5, 6), FilterNonConsuming(8, 6)), unassign = Vector(EventUnassign(7), EventUnassign(10)), - unassignFilter = Vector(FilterUnassign(7, 4), FilterUnassign(10, 4)), + unassignFilter = Vector(FilterUnassign(7, 6), FilterUnassign(10, 6)), ) assertAllDataPresent() @@ -155,18 +196,18 @@ private[backend] trait StorageBackendTestsPruning pruneEventsSql(offset(5)) assertIndexDbDataSql( consuming = Vector(EventConsuming(9)), - consumingFilterStakeholder = Vector(FilterConsumingStakeholder(9, 4)), + consumingFilterStakeholder = Vector(FilterConsumingStakeholder(9, 6)), consumingFilterNonStakeholder = Vector(FilterConsumingNonStakeholder(9, 1)), nonConsuming = Vector(EventNonConsuming(8)), - nonConsumingFilter = Vector(FilterNonConsuming(8, 4)), + nonConsumingFilter = Vector(FilterNonConsuming(8, 6)), unassign = Vector(EventUnassign(10)), - unassignFilter = Vector(FilterUnassign(10, 4)), + unassignFilter = Vector(FilterUnassign(10, 6)), ) // Prune at the ledger end, but setting the unassign incomplete pruneEventsSql(endOffset, Vector(offset(8))) assertIndexDbDataSql( unassign = Vector(EventUnassign(10)), - unassignFilter = Vector(FilterUnassign(10, 4)), + unassignFilter = Vector(FilterUnassign(10, 6)), ) // Prune at the ledger end pruneEventsSql(endOffset) @@ -175,23 +216,23 @@ private[backend] trait StorageBackendTestsPruning it should "prune an archived contract" in { // a create event in its own transaction - val create = dtoCreate( + val create = dtoCreateLegacy( offset = offset(10), eventSequentialId = 1L, contractId = hashCid("#1"), signatory = signatoryParty, observer = observerParty, nonStakeholderInformees = Set(nonStakeholderInformeeParty), - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) // a consuming event in its own transaction - val archive = dtoExercise( + val archive = dtoExerciseLegacy( offset = offset(11), eventSequentialId = 2L, consuming = true, contractId = hashCid("#1"), signatory = signatoryParty, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) // Ingest a create and archive event @@ -205,17 +246,38 @@ private[backend] trait StorageBackendTestsPruning ) ++ Vector( create, - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, observerParty), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + observerParty, + first_per_sequential_id = false, + ), DbDto.IdFilterCreateNonStakeholderInformee( 1L, someTemplateId.toString, nonStakeholderInformeeParty, + first_per_sequential_id = true, ), metaFromSingle(create), archive, - DbDto.IdFilterConsumingStakeholder(2L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterConsumingStakeholder(2L, someTemplateId.toString, observerParty), + DbDto.IdFilterConsumingStakeholder( + 2L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingStakeholder( + 2L, + someTemplateId.toString, + observerParty, + first_per_sequential_id = false, + ), metaFromSingle(archive), ), _, @@ -254,17 +316,17 @@ private[backend] trait StorageBackendTestsPruning it should "prune a contract which was unassigned later" in { // a create event in its own transaction - val create = dtoCreate( + val create = dtoCreateLegacy( offset = offset(10), eventSequentialId = 1L, contractId = hashCid("#1"), signatory = signatoryParty, observer = observerParty, nonStakeholderInformees = Set(nonStakeholderInformeeParty), - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) // a consuming event in its own transaction - val unassign = dtoUnassign( + val unassign = dtoUnassignLegacy( offset = offset(11), eventSequentialId = 2L, contractId = hashCid("#1"), @@ -282,16 +344,32 @@ private[backend] trait StorageBackendTestsPruning ) ++ Vector( create, - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, observerParty), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + observerParty, + first_per_sequential_id = false, + ), DbDto.IdFilterCreateNonStakeholderInformee( 1L, someTemplateId.toString, nonStakeholderInformeeParty, + first_per_sequential_id = true, ), metaFromSingle(create), unassign, - DbDto.IdFilterUnassignStakeholder(2L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 2L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassign), ), _, @@ -324,61 +402,61 @@ private[backend] trait StorageBackendTestsPruning } it should "not prune an active contract" in { - val create = dtoCreate( + val create = dtoCreateLegacy( offset = offset(2), eventSequentialId = 1L, contractId = hashCid("#1"), signatory = signatoryParty, nonStakeholderInformees = Set(nonStakeholderInformeeParty), - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) - val archiveDifferentSynchronizer = dtoExercise( + val archiveDifferentSynchronizer = dtoExerciseLegacy( offset = offset(3), eventSequentialId = 2L, consuming = true, contractId = hashCid("#1"), signatory = signatoryParty, - synchronizerId = "x::targetsynchronizer", + synchronizerId = someSynchronizerId2, ) - val archiveDifferentContractId = dtoExercise( + val archiveDifferentContractId = dtoExerciseLegacy( offset = offset(4), eventSequentialId = 3L, consuming = true, contractId = hashCid("#2"), signatory = signatoryParty, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) - val unassignDifferentSynchronizer = dtoUnassign( + val unassignDifferentSynchronizer = dtoUnassignLegacy( offset = offset(5), eventSequentialId = 4L, contractId = hashCid("#1"), signatory = signatoryParty, - sourceSynchronizerId = "x::targetsynchronizer", - targetSynchronizerId = "x::sourcesynchronizer", + sourceSynchronizerId = someSynchronizerId2, + targetSynchronizerId = someSynchronizerId, ) - val unassignDifferentContractId = dtoUnassign( + val unassignDifferentContractId = dtoUnassignLegacy( offset = offset(6), eventSequentialId = 5L, contractId = hashCid("#2"), signatory = signatoryParty, - sourceSynchronizerId = "x::sourcesynchronizer", - targetSynchronizerId = "x::targetsynchronizer", + sourceSynchronizerId = someSynchronizerId, + targetSynchronizerId = someSynchronizerId2, ) - val archiveAfter = dtoExercise( + val archiveAfter = dtoExerciseLegacy( offset = offset(7), eventSequentialId = 6L, consuming = true, contractId = hashCid("#1"), signatory = signatoryParty, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) - val unassignAfter = dtoUnassign( + val unassignAfter = dtoUnassignLegacy( offset = offset(8), eventSequentialId = 7L, contractId = hashCid("#1"), signatory = signatoryParty, - sourceSynchronizerId = "x::sourcesynchronizer", - targetSynchronizerId = "x::targetsynchronizer", + sourceSynchronizerId = someSynchronizerId, + targetSynchronizerId = someSynchronizerId2, ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) // Ingest a create and archive event @@ -387,31 +465,72 @@ private[backend] trait StorageBackendTestsPruning Vector( dtoPartyEntry(offset(1), signatoryParty), create, - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, observerParty), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + observerParty, + first_per_sequential_id = false, + ), DbDto.IdFilterCreateNonStakeholderInformee( 1L, someTemplateId.toString, nonStakeholderInformeeParty, + first_per_sequential_id = true, ), metaFromSingle(create), archiveDifferentSynchronizer, - DbDto.IdFilterConsumingStakeholder(2L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterConsumingStakeholder( + 2L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(archiveDifferentSynchronizer), archiveDifferentContractId, - DbDto.IdFilterConsumingStakeholder(3L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterConsumingStakeholder( + 3L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(archiveDifferentContractId), unassignDifferentSynchronizer, - DbDto.IdFilterUnassignStakeholder(4L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 4L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassignDifferentSynchronizer), unassignDifferentContractId, - DbDto.IdFilterUnassignStakeholder(5L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 5L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassignDifferentContractId), archiveAfter, - DbDto.IdFilterConsumingStakeholder(6L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterConsumingStakeholder( + 6L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(archiveAfter), unassignAfter, - DbDto.IdFilterUnassignStakeholder(7L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 7L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassignAfter), ), _, @@ -515,23 +634,23 @@ private[backend] trait StorageBackendTestsPruning it should "prune an assign if archived in the same synchronizer" in { // an assign event in its own transaction - val assign = dtoAssign( + val assign = dtoAssignLegacy( offset = offset(10), eventSequentialId = 1L, contractId = hashCid("#1"), signatory = signatoryParty, observer = observerParty, - sourceSynchronizerId = "x::sourcesynchronizer", - targetSynchronizerId = "x::targetsynchronizer", + sourceSynchronizerId = someSynchronizerId, + targetSynchronizerId = someSynchronizerId2, ) // a consuming event in its own transaction - val archive = dtoExercise( + val archive = dtoExerciseLegacy( offset = offset(11), eventSequentialId = 2L, consuming = true, contractId = hashCid("#1"), signatory = signatoryParty, - synchronizerId = "x::targetsynchronizer", + synchronizerId = someSynchronizerId2, ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) // Ingest an assign and an archive event @@ -544,11 +663,26 @@ private[backend] trait StorageBackendTestsPruning ) ++ Vector( assign, - DbDto.IdFilterAssignStakeholder(1L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterAssignStakeholder( + 1L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(assign), archive, - DbDto.IdFilterConsumingStakeholder(2L, someTemplateId.toString, signatoryParty), - DbDto.IdFilterConsumingStakeholder(2L, someTemplateId.toString, observerParty), + DbDto.IdFilterConsumingStakeholder( + 2L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingStakeholder( + 2L, + someTemplateId.toString, + observerParty, + first_per_sequential_id = false, + ), metaFromSingle(archive), ), _, @@ -557,11 +691,11 @@ private[backend] trait StorageBackendTestsPruning def assertAllDataPresent(txMeta: Vector[TxMeta]): Assertion = assertIndexDbDataSql( assign = Vector(EventAssign(1)), - assignFilter = Vector(FilterAssign(1, 4)), + assignFilter = Vector(FilterAssign(1, 6)), consuming = Vector(EventConsuming(2)), consumingFilterStakeholder = Vector( - FilterConsumingStakeholder(2, 4), - FilterConsumingStakeholder(2, 7), + FilterConsumingStakeholder(2, 6), + FilterConsumingStakeholder(2, 9), ), txMeta = txMeta, ) @@ -581,24 +715,24 @@ private[backend] trait StorageBackendTestsPruning it should "prune an assign which was unassigned in the same synchronizer later" in { // an assign event in its own transaction - val assign = dtoAssign( + val assign = dtoAssignLegacy( offset = offset(10), eventSequentialId = 1L, contractId = hashCid("#1"), signatory = signatoryParty, observer = observerParty, - sourceSynchronizerId = "x::sourcesynchronizer", - targetSynchronizerId = "x::targetsynchronizer", + sourceSynchronizerId = someSynchronizerId, + targetSynchronizerId = someSynchronizerId2, ) // an unassign event in its own transaction - val unassign = dtoUnassign( + val unassign = dtoUnassignLegacy( offset = offset(11), eventSequentialId = 2L, contractId = hashCid("#1"), signatory = signatoryParty, - sourceSynchronizerId = "x::targetsynchronizer", - targetSynchronizerId = "x::sourcesynchronizer", + sourceSynchronizerId = someSynchronizerId2, + targetSynchronizerId = someSynchronizerId, ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) // Ingest the assign and unassign event @@ -611,10 +745,20 @@ private[backend] trait StorageBackendTestsPruning ) ++ Vector( assign, - DbDto.IdFilterAssignStakeholder(1L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterAssignStakeholder( + 1L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(assign), unassign, - DbDto.IdFilterUnassignStakeholder(2L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterUnassignStakeholder( + 2L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassign), ), _, @@ -647,9 +791,9 @@ private[backend] trait StorageBackendTestsPruning offsetInt: Int, eventSequentialId: Long, hashCidString: String = "#1", - synchronizerId: String = "x::targetsynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId2, ): Vector[DbDto] = { - val archive = dtoExercise( + val archive = dtoExerciseLegacy( offset = offset(offsetInt.toLong), eventSequentialId = eventSequentialId, consuming = true, @@ -660,7 +804,12 @@ private[backend] trait StorageBackendTestsPruning Vector( archive, DbDto - .IdFilterConsumingStakeholder(eventSequentialId, someTemplateId.toString, signatoryParty), + .IdFilterConsumingStakeholder( + eventSequentialId, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(archive), ) } @@ -668,20 +817,25 @@ private[backend] trait StorageBackendTestsPruning offsetInt: Int, eventSequentialId: Long, hashCidString: String = "#1", - synchronizerId: String = "x::targetsynchronizer", + synchronizerId: SynchronizerId = someSynchronizerId2, ): Vector[DbDto] = { - val unassign = dtoUnassign( + val unassign = dtoUnassignLegacy( offset = offset(offsetInt.toLong), eventSequentialId = eventSequentialId, contractId = hashCid(hashCidString), signatory = signatoryParty, sourceSynchronizerId = synchronizerId, - targetSynchronizerId = "x::thirdsynchronizer", + targetSynchronizerId = SynchronizerId.tryFromString("x::thirdsynchronizer"), ) Vector( unassign, DbDto - .IdFilterUnassignStakeholder(eventSequentialId, someTemplateId.toString, signatoryParty), + .IdFilterUnassignStakeholder( + eventSequentialId, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(unassign), ) } @@ -689,34 +843,39 @@ private[backend] trait StorageBackendTestsPruning val archiveDifferentSynchronizerEarlierThanAssing = archive( offsetInt = 2, eventSequentialId = 1, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) val unassignDifferentSynchronizerEarlierThanAssing = unassign( offsetInt = 3, eventSequentialId = 2, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) val unassignEarlierThanAssing = unassign( offsetInt = 4, eventSequentialId = 3, ) - val assign = dtoAssign( + val assign = dtoAssignLegacy( offset = offset(5), eventSequentialId = 4L, contractId = hashCid("#1"), signatory = signatoryParty, - sourceSynchronizerId = "x::sourcesynchronizer", - targetSynchronizerId = "x::targetsynchronizer", + sourceSynchronizerId = someSynchronizerId, + targetSynchronizerId = someSynchronizerId2, ) val assignEvents = Vector( assign, - DbDto.IdFilterAssignStakeholder(4L, someTemplateId.toString, signatoryParty), + DbDto.IdFilterAssignStakeholder( + 4L, + someTemplateId.toString, + signatoryParty, + first_per_sequential_id = true, + ), metaFromSingle(assign), ) val archiveDifferentSynchronizerEarlierThanPruning = archive( offsetInt = 6, eventSequentialId = 5, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) val archiveDifferentCidEarlierThanPruning = archive( offsetInt = 7, @@ -726,7 +885,7 @@ private[backend] trait StorageBackendTestsPruning val unassignDifferentSynchronizerEarlierThanPruning = unassign( offsetInt = 8, eventSequentialId = 7, - synchronizerId = "x::sourcesynchronizer", + synchronizerId = someSynchronizerId, ) val unassignDifferentCidEarlierThanPruning = unassign( offsetInt = 9, // pruning offset @@ -764,14 +923,14 @@ private[backend] trait StorageBackendTestsPruning def assertAllDataPresent(txMeta: Seq[TxMeta]): Assertion = assertIndexDbDataSql( assign = Vector(EventAssign(4)), - assignFilter = Vector(FilterAssign(4, 4)), + assignFilter = Vector(FilterAssign(4, 6)), consuming = Vector(EventConsuming(1), EventConsuming(5), EventConsuming(6), EventConsuming(9)), consumingFilterStakeholder = Vector( - FilterConsumingStakeholder(1, 4), - FilterConsumingStakeholder(5, 4), - FilterConsumingStakeholder(6, 4), - FilterConsumingStakeholder(9, 4), + FilterConsumingStakeholder(1, 6), + FilterConsumingStakeholder(5, 6), + FilterConsumingStakeholder(6, 6), + FilterConsumingStakeholder(9, 6), ), unassign = Vector( EventUnassign(2), @@ -781,11 +940,11 @@ private[backend] trait StorageBackendTestsPruning EventUnassign(10), ), unassignFilter = Vector( - FilterUnassign(2, 4), - FilterUnassign(3, 4), - FilterUnassign(7, 4), - FilterUnassign(8, 4), - FilterUnassign(10, 4), + FilterUnassign(2, 6), + FilterUnassign(3, 6), + FilterUnassign(7, 6), + FilterUnassign(8, 6), + FilterUnassign(10, 6), ), txMeta = txMeta, ) @@ -823,12 +982,12 @@ private[backend] trait StorageBackendTestsPruning pruneEventsSql(offset(5)) assertIndexDbDataSql( assign = Vector(EventAssign(4)), - assignFilter = Vector(FilterAssign(4, 4)), + assignFilter = Vector(FilterAssign(4, 6)), consuming = Vector(EventConsuming(5), EventConsuming(6), EventConsuming(9)), consumingFilterStakeholder = Vector( - FilterConsumingStakeholder(5, 4), - FilterConsumingStakeholder(6, 4), - FilterConsumingStakeholder(9, 4), + FilterConsumingStakeholder(5, 6), + FilterConsumingStakeholder(6, 6), + FilterConsumingStakeholder(9, 6), ), unassign = Vector( EventUnassign(7), @@ -836,9 +995,9 @@ private[backend] trait StorageBackendTestsPruning EventUnassign(10), ), unassignFilter = Vector( - FilterUnassign(7, 4), - FilterUnassign(8, 4), - FilterUnassign(10, 4), + FilterUnassign(7, 6), + FilterUnassign(8, 6), + FilterUnassign(10, 6), ), txMeta = Vector( TxMeta(6), @@ -853,16 +1012,16 @@ private[backend] trait StorageBackendTestsPruning pruneEventsSql(offset(9)) assertIndexDbDataSql( assign = Vector(EventAssign(4)), - assignFilter = Vector(FilterAssign(4, 4)), + assignFilter = Vector(FilterAssign(4, 6)), consuming = Vector(EventConsuming(9)), consumingFilterStakeholder = Vector( - FilterConsumingStakeholder(9, 4) + FilterConsumingStakeholder(9, 6) ), unassign = Vector( EventUnassign(10) ), unassignFilter = Vector( - FilterUnassign(10, 4) + FilterUnassign(10, 6) ), txMeta = Vector( TxMeta(10), @@ -877,16 +1036,16 @@ private[backend] trait StorageBackendTestsPruning ) assertIndexDbDataSql( assign = Vector(EventAssign(4)), - assignFilter = Vector(FilterAssign(4, 4)), + assignFilter = Vector(FilterAssign(4, 6)), consuming = Vector(EventConsuming(9)), consumingFilterStakeholder = Vector( - FilterConsumingStakeholder(9, 4) + FilterConsumingStakeholder(9, 6) ), unassign = Vector( EventUnassign(10) ), unassignFilter = Vector( - FilterUnassign(10, 4) + FilterUnassign(10, 6) ), txMeta = Vector.empty, ) @@ -898,12 +1057,12 @@ private[backend] trait StorageBackendTestsPruning ) assertIndexDbDataSql( assign = Vector(EventAssign(4)), - assignFilter = Vector(FilterAssign(4, 4)), + assignFilter = Vector(FilterAssign(4, 6)), unassign = Vector( EventUnassign(10) ), unassignFilter = Vector( - FilterUnassign(10, 4) + FilterUnassign(10, 6) ), txMeta = Vector.empty, ) @@ -917,14 +1076,14 @@ private[backend] trait StorageBackendTestsPruning val divulgee = Ref.Party.assertFromString(partyName) val contract1_id = hashCid("#1") val contract2_id = hashCid("#2") - val contract1_immediateDivulgence = dtoCreate( + val contract1_immediateDivulgence = dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = contract1_id, signatory = divulgee, emptyFlatEventWitnesses = true, ) - val contract2_createWithLocalStakeholder = dtoCreate( + val contract2_createWithLocalStakeholder = dtoCreateLegacy( offset = offset(2), eventSequentialId = 2L, contractId = contract2_id, @@ -940,12 +1099,14 @@ private[backend] trait StorageBackendTestsPruning 1L, someTemplateId.toString, divulgee, + first_per_sequential_id = true, ), contract2_createWithLocalStakeholder, DbDto.IdFilterCreateStakeholder( 2L, someTemplateId.toString, divulgee, + first_per_sequential_id = true, ), ), _, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala index 2f65bae6ab..4bf2234bbc 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala @@ -17,143 +17,166 @@ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.slf4j.event.Level +import scala.concurrent.{ExecutionContext, Future} + private[backend] trait StorageBackendTestsQueryValidRange extends Matchers with StorageBackendSpec { this: AnyFlatSpec => implicit val loggingContextWithTrace: LoggingContextWithTrace = new LoggingContextWithTrace(LoggingEntries.empty, TraceContext.empty) + implicit val ec: ExecutionContext = directExecutionContext + behavior of "QueryValidRange.withRangeNotPruned" it should "allow valid range if no pruning and before ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(3), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(3), + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range if no pruning and before ledger end and start from ledger begin" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = Offset.firstOffset, - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = Offset.firstOffset, + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range after pruning and before ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(6), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(6), + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range boundary case" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(4), - maxOffsetInclusive = offset(10), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(4), + maxOffsetInclusive = offset(10), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "deny in-valid range: earlier than pruning" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(3), maxOffsetInclusive = offset(10), errorPruning = pruningOffset => s"pruning issue: ${pruningOffset.unwrap}", errorLedgerEnd = _ => "", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" + ), + ) + .futureValue } it should "deny in-valid range: later than ledger end when ledger is not empty" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(4), maxOffsetInclusive = offset(11), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" + ), + ) } it should "deny in-valid range: later than ledger end when ledger end is none" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(1), maxOffsetInclusive = offset(1), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 0" - ), - ) - } - - it should "execute query before reading parameters from the db" in { - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(3), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - ) { - backend.parameter.initializeParameters(someIdentityParams, loggerFactory)(connection) - updateLedgerEnd(offset(10), 10L)(connection) - } - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 0" + ), + ) + .futureValue } behavior of "QueryValidRange.withOffsetNotBeforePruning" @@ -161,92 +184,196 @@ private[backend] trait StorageBackendTestsQueryValidRange extends Matchers with it should "allow offset in the valid range" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(5), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(5), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range if no pruning before" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(5), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(5), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range lower boundary" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(3), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(3), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range higher boundary" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(10), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(10), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "deny in-valid range: earlier than pruning" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( offset = offset(2), errorPruning = pruningOffset => s"pruning issue: ${pruningOffset.unwrap}", errorLedgerEnd = _ => "", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" + ), + ) + .futureValue } it should "deny in-valid range: later than ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( offset = offset(11), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" + ), + ) + .futureValue + } + + behavior of "QueryValidRange.filterPrunedEvents" + + it should "return all events if no pruning" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe events + } + + it should "filter out events at or below the pruning offset" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe (4L to 5L).map(offset) + } + + it should "return empty if all events are pruned" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 3L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe empty + } + + it should "return all events if pruning offset is before all events" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (5L to 7L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe events + } + + it should "fail if any event offset is beyond ledger end" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): Offset of event to be filtered Offset(3) is beyond ledger end" + ), + ) + .futureValue } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReassignmentEvents.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReassignmentEvents.scala index 5bc2795c64..9d22154fab 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReassignmentEvents.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReassignmentEvents.scala @@ -8,14 +8,19 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.Sequen Ids, } import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ + CommonEventProperties, Entry, - RawCreatedEvent, + RawCreatedEventLegacy, + ThinCreatedEventProperties, UnassignProperties, } +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput +import com.digitalasset.canton.protocol.TestUpdateId +import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} -import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Ref, Time} +import org.scalactic.Equality import org.scalatest.OptionValues import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -27,7 +32,9 @@ private[backend] trait StorageBackendTestsReassignmentEvents this: AnyFlatSpec => import StorageBackendTestValues.* - import DbDtoEq.* + import ScalatestEqualityHelpers.eqOptArray + + implicit val dbDtoEq: Equality[DbDto] = ScalatestEqualityHelpers.DbDtoEq private val emptyTraceContext = SerializableTraceContext(TraceContext.empty).toDamlProto.toByteArray @@ -36,9 +43,24 @@ private[backend] trait StorageBackendTestsReassignmentEvents it should "return the correct event ids for assign event stakeholder" in { val dbDtos = Vector( - DbDto.IdFilterAssignStakeholder(1, someTemplateId.toString, someParty), - DbDto.IdFilterAssignStakeholder(1, someTemplateId.toString, someParty2), - DbDto.IdFilterAssignStakeholder(2, someTemplateId2.toString, someParty), + DbDto.IdFilterAssignStakeholder( + 1, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterAssignStakeholder( + 1, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), + DbDto.IdFilterAssignStakeholder( + 2, + someTemplateId2.toString, + someParty, + first_per_sequential_id = true, + ), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -46,92 +68,119 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1, 2) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty2), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) - ) shouldBe Vector(1, 1, 2) + ) shouldBe Vector(1, 2) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 1, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 1, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId2), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(2) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 1, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 1, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1, + limit = 10, + ) ) - ) shouldBe Vector(1, 1) + ) shouldBe Vector(1) executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = Some(someTemplateId2), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(2) @@ -139,9 +188,24 @@ private[backend] trait StorageBackendTestsReassignmentEvents it should "return the correct event ids for unassign event stakeholder" in { val dbDtos = Vector( - DbDto.IdFilterUnassignStakeholder(1, someTemplateId.toString, someParty), - DbDto.IdFilterUnassignStakeholder(1, someTemplateId.toString, someParty2), - DbDto.IdFilterUnassignStakeholder(2, someTemplateId2.toString, someParty), + DbDto.IdFilterUnassignStakeholder( + 1, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), + DbDto.IdFilterUnassignStakeholder( + 1, + someTemplateId.toString, + someParty2, + first_per_sequential_id = false, + ), + DbDto.IdFilterUnassignStakeholder( + 2, + someTemplateId2.toString, + someParty, + first_per_sequential_id = true, + ), ) executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) @@ -149,112 +213,145 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1, 2) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty2), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) - ) shouldBe Vector(1, 1, 2) + ) shouldBe Vector(1, 2) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 1, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 1, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(1, 2) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId2), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(2) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = Some(someTemplateId2), - startExclusive = 0, - endInclusive = 2, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 2, + limit = 10, + ) ) ) shouldBe Vector(2) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 1, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1, + limit = 10, + ) ) ) shouldBe Vector(1) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 1, - limit = 10, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1, + limit = 10, + ) ) - ) shouldBe Vector(1, 1) + ) shouldBe Vector(1) executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = None, templateId = Some(someTemplateId), - startExclusive = 0, - endInclusive = 1, - limit = 1, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1, + limit = 1, + ) ) ) shouldBe Vector(1) } @@ -263,19 +360,21 @@ private[backend] trait StorageBackendTestsReassignmentEvents it should "return the correct assign events" in { val dbDtos = Vector( - dtoAssign( + dtoAssignLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", nodeId = 24, + internalContractId = 42L, ), - dtoAssign( + dtoAssignLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), commandId = "command id 2", nodeId = 42, + internalContractId = 43L, ), ) @@ -284,7 +383,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) val result = executeSql( - backend.event.assignEventBatch( + backend.event.assignEventBatchLegacy( eventSequentialIds = Ids(List(1L, 2L)), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -296,22 +395,20 @@ private[backend] trait StorageBackendTestsReassignmentEvents commandId = Some("command id 1"), workflowId = Some("workflow_id"), offset = 1, + nodeId = 24, traceContext = Some(emptyTraceContext), recordTime = someTime, - updateId = offset(1).toDecimalString, + updateId = TestUpdateId(offset(1).toDecimalString).toHexString, eventSequentialId = 0L, - ledgerEffectiveTime = Timestamp.MinValue, + ledgerEffectiveTime = None, synchronizerId = "x::targetsynchronizer", - event = EventStorageBackend.RawAssignEvent( + event = EventStorageBackend.RawAssignEventLegacy( sourceSynchronizerId = "x::sourcesynchronizer", targetSynchronizerId = "x::targetsynchronizer", - reassignmentId = "123456789", + reassignmentId = "0012345678", submitter = Option(someParty), reassignmentCounter = 1000L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(1).toDecimalString, - offset = 1, - nodeId = 24, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#1"), templateId = someTemplateIdFull, witnessParties = Set("signatory"), @@ -326,6 +423,8 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someTemplateIdFull.pkgId, + internalContractId = 42L, ), ), externalTransactionHash = None, @@ -334,22 +433,20 @@ private[backend] trait StorageBackendTestsReassignmentEvents commandId = Some("command id 2"), workflowId = Some("workflow_id"), offset = 2, + nodeId = 42, traceContext = Some(emptyTraceContext), recordTime = someTime, - updateId = offset(2).toDecimalString, + updateId = TestUpdateId(offset(2).toDecimalString).toHexString, eventSequentialId = 0L, - ledgerEffectiveTime = Timestamp.MinValue, + ledgerEffectiveTime = None, synchronizerId = "x::targetsynchronizer", - event = EventStorageBackend.RawAssignEvent( + event = EventStorageBackend.RawAssignEventLegacy( sourceSynchronizerId = "x::sourcesynchronizer", targetSynchronizerId = "x::targetsynchronizer", - reassignmentId = "123456789", + reassignmentId = "0012345678", submitter = Option(someParty), reassignmentCounter = 1000L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(2).toDecimalString, - offset = 2, - nodeId = 42, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#2"), templateId = someTemplateIdFull, witnessParties = Set("signatory"), @@ -364,6 +461,8 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someTemplateIdFull.pkgId, + internalContractId = 43L, ), ), externalTransactionHash = None, @@ -372,7 +471,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents ) val resultRange = executeSql( - backend.event.assignEventBatch( + backend.event.assignEventBatchLegacy( eventSequentialIds = IdRange(1L, 2L), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -383,14 +482,14 @@ private[backend] trait StorageBackendTestsReassignmentEvents it should "return the correct unassign events" in { val dbDtos = Vector( - dtoUnassign( + dtoUnassignLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", nodeId = 24, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), @@ -404,7 +503,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) val result = executeSql( - backend.event.unassignEventBatch( + backend.event.unassignEventBatchLegacy( eventSequentialIds = Ids(List(1L, 2L)), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -419,23 +518,23 @@ private[backend] trait StorageBackendTestsReassignmentEvents commandId = Some("command id 1"), workflowId = Some("workflow_id"), offset = 1, + nodeId = 24, traceContext = Some(emptyTraceContext), recordTime = someTime, - updateId = offset(1).toDecimalString, + updateId = TestUpdateId(offset(1).toDecimalString).toHexString, eventSequentialId = 0L, - ledgerEffectiveTime = Timestamp.MinValue, + ledgerEffectiveTime = None, synchronizerId = "x::sourcesynchronizer", - event = EventStorageBackend.RawUnassignEvent( + event = EventStorageBackend.RawUnassignEventLegacy( sourceSynchronizerId = "x::sourcesynchronizer", targetSynchronizerId = "x::targetsynchronizer", - reassignmentId = "123456789", + reassignmentId = "0012345678", submitter = Option(someParty), reassignmentCounter = 1000L, contractId = hashCid("#1"), templateId = someTemplateIdFull, witnessParties = Set("signatory"), assignmentExclusivity = Some(Time.Timestamp.assertFromLong(11111)), - nodeId = 24, ), externalTransactionHash = None, ), @@ -443,30 +542,30 @@ private[backend] trait StorageBackendTestsReassignmentEvents commandId = Some("command id 2"), workflowId = Some("workflow_id"), offset = 2, + nodeId = 42, traceContext = Some(emptyTraceContext), recordTime = someTime, - updateId = offset(2).toDecimalString, + updateId = TestUpdateId(offset(2).toDecimalString).toHexString, eventSequentialId = 0L, - ledgerEffectiveTime = Timestamp.MinValue, + ledgerEffectiveTime = None, synchronizerId = "x::sourcesynchronizer", - event = EventStorageBackend.RawUnassignEvent( + event = EventStorageBackend.RawUnassignEventLegacy( sourceSynchronizerId = "x::sourcesynchronizer", targetSynchronizerId = "x::targetsynchronizer", - reassignmentId = "123456789", + reassignmentId = "0012345678", submitter = Option(someParty), reassignmentCounter = 1000L, contractId = hashCid("#2"), templateId = someTemplateIdFull, witnessParties = Set("signatory"), assignmentExclusivity = Some(Time.Timestamp.assertFromLong(11111)), - nodeId = 42, ), externalTransactionHash = None, ), ) val resultRange = executeSql( - backend.event.unassignEventBatch( + backend.event.unassignEventBatchLegacy( eventSequentialIds = IdRange(1L, 2L), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -484,14 +583,14 @@ private[backend] trait StorageBackendTestsReassignmentEvents TraceContext.withNewTraceContext("test") { aTraceContext => val serializableTraceContext = SerializableTraceContext(aTraceContext).toDamlProto.toByteArray val dbDtos = Vector( - dtoAssign( + dtoAssignLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", traceContext = emptyTraceContext, ), - dtoAssign( + dtoAssignLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), @@ -505,7 +604,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) val assignments = executeSql( - backend.event.assignEventBatch( + backend.event.assignEventBatchLegacy( eventSequentialIds = Ids(List(1L, 2L)), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -519,14 +618,14 @@ private[backend] trait StorageBackendTestsReassignmentEvents TraceContext.withNewTraceContext("test") { aTraceContext => val serializableTraceContext = SerializableTraceContext(aTraceContext).toDamlProto.toByteArray val dbDtos = Vector( - dtoUnassign( + dtoUnassignLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", traceContext = emptyTraceContext, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), @@ -540,7 +639,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(2), 2L)) val unassignments = executeSql( - backend.event.unassignEventBatch( + backend.event.unassignEventBatchLegacy( eventSequentialIds = Ids(List(1L, 2L)), allFilterParties = Some(Set(Ref.Party.assertFromString("signatory"), someParty)), ) @@ -552,49 +651,52 @@ private[backend] trait StorageBackendTestsReassignmentEvents behavior of "active contract batch lookup for contracts" - it should "return the correct active contracts from create events, and only if not archived/unassigned" in { + it should "return the correct active contracts from create events, and only if not archived/unassigned legacy" in { val dbDtos = Vector( - dtoCreate( + dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId, authenticationData = someAuthenticationDataBytes, + internalContractId = 42L, ), - dtoCreate( + dtoCreateLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), commandId = "command id 2", - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId, authenticationData = someAuthenticationDataBytes, + representativePackageId = someRepresentativePackageId, + internalContractId = 43L, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(3), eventSequentialId = 3L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer2", + synchronizerId = someSynchronizerId2, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(4), eventSequentialId = 4L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer2", + sourceSynchronizerId = someSynchronizerId2, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(10), eventSequentialId = 10L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(11), eventSequentialId = 11L, contractId = hashCid("#1"), - sourceSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = someSynchronizerId, ), ) @@ -603,7 +705,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(11), 11L)) executeSql( - backend.event.activeContractCreateEventBatch( + backend.event.activeContractCreateEventBatchLegacy( eventSequentialIds = List(1, 2), allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), endInclusive = 6, @@ -617,14 +719,12 @@ private[backend] trait StorageBackendTestsReassignmentEvents ) ) ) shouldBe Vector( - EventStorageBackend.RawActiveContract( + EventStorageBackend.RawActiveContractLegacy( workflowId = Some("workflow_id"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId.toProtoPrimitive, reassignmentCounter = 0L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(1).toDecimalString, - offset = 1, - nodeId = 0, + offset = 1, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#1"), templateId = someTemplateIdFull, witnessParties = Set("observer"), @@ -639,17 +739,19 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someTemplateIdFull.pkgId, + internalContractId = 42L, ), eventSequentialId = 1L, + nodeId = 0, ), - EventStorageBackend.RawActiveContract( + EventStorageBackend.RawActiveContractLegacy( workflowId = Some("workflow_id"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId.toProtoPrimitive, reassignmentCounter = 0L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(2).toDecimalString, - offset = 2, - nodeId = 0, + offset = 2, + nodeId = 0, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#2"), templateId = someTemplateIdFull, witnessParties = Set("observer"), @@ -664,86 +766,128 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someRepresentativePackageId, + internalContractId = 43L, ), eventSequentialId = 2L, ), ) + } - // same query as first to double check equality predicate - executeSql( - backend.event.activeContractCreateEventBatch( - eventSequentialIds = List(1, 2), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 6, - ) - ).map(_.rawCreatedEvent.updateId) shouldBe List(1L, 2L).map(x => offset(x).toDecimalString) + it should "return the correct active contracts from create events" in { + val dbDtos = Vector( + dtosCreate( + event_offset = 1, + event_sequential_id = 1L, + notPersistedContractId = hashCid("#1"), + command_id = Some("command id 1"), + synchronizer_id = someSynchronizerId, + )(), + dtosAssign( + event_offset = 2, + event_sequential_id = 2L, + notPersistedContractId = hashCid("#2"), + command_id = Some("command id 2"), + )(), + ).flatten - // archive in the same synchronizer renders it inactive - executeSql( - backend.event.activeContractCreateEventBatch( - eventSequentialIds = List(1, 2), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 10, - ) - ).map(_.rawCreatedEvent.updateId) shouldBe List(1L).map(x => offset(x).toDecimalString) + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(ingest(dbDtos, _)) + executeSql(updateLedgerEnd(offset(11), 11L)) - // unassignment in the same synchronizer renders it inactive executeSql( - backend.event.activeContractCreateEventBatch( + backend.event.activeContractBatch( eventSequentialIds = List(1, 2), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 11, + allFilterParties = Some(Set(Ref.Party.assertFromString("stakeholder1"))), + endInclusive = 6, ) - ).map(_.rawCreatedEvent.updateId) shouldBe Nil + ) should contain theSameElementsInOrderAs Vector( + EventStorageBackend.RawThinActiveContract( + commonEventProperties = CommonEventProperties( + eventSequentialId = 1L, + offset = 1, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = 10L, + requestingParties = Some(Set("stakeholder1")), + reassignmentCounter = 0L, + acsDelta = true, + ), + ), + EventStorageBackend.RawThinActiveContract( + commonEventProperties = CommonEventProperties( + eventSequentialId = 2L, + offset = 2, + nodeId = 15, + workflowId = Some("workflow-id"), + synchronizerId = someSynchronizerId.toProtoPrimitive, + ), + thinCreatedEventProperties = ThinCreatedEventProperties( + representativePackageId = "representativepackage", + filteredAdditionalWitnessParties = Set.empty, + internalContractId = 10L, + requestingParties = Some(Set("stakeholder1")), + reassignmentCounter = 345L, + acsDelta = true, + ), + ), + ) } it should "return the correct active contracts from assign events, and only if not archived/unassigned" in { val dbDtos = Vector( - dtoUnassign( + dtoUnassignLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), - sourceSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = someSynchronizerId, ), - dtoAssign( + dtoAssignLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#1"), commandId = "command id 1", - targetSynchronizerId = "x::synchronizer1", + targetSynchronizerId = someSynchronizerId, + internalContractId = 42L, ), - dtoAssign( + dtoAssignLegacy( offset = offset(3), eventSequentialId = 3L, contractId = hashCid("#2"), commandId = "command id 2", - targetSynchronizerId = "x::synchronizer1", + targetSynchronizerId = someSynchronizerId, + internalContractId = 43L, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(4), eventSequentialId = 4L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer2", + synchronizerId = someSynchronizerId2, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(5), eventSequentialId = 5L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer2", + sourceSynchronizerId = someSynchronizerId2, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(10), eventSequentialId = 10L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(11), eventSequentialId = 11L, contractId = hashCid("#1"), - sourceSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = someSynchronizerId, ), ) @@ -752,7 +896,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(11), 11L)) executeSql( - backend.event.activeContractAssignEventBatch( + backend.event.activeContractAssignEventBatchLegacy( eventSequentialIds = List(2, 3), allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), endInclusive = 6, @@ -766,14 +910,13 @@ private[backend] trait StorageBackendTestsReassignmentEvents ) ) ) shouldBe Vector( - EventStorageBackend.RawActiveContract( + EventStorageBackend.RawActiveContractLegacy( workflowId = Some("workflow_id"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId.toProtoPrimitive, reassignmentCounter = 1000L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(2).toDecimalString, - offset = 2, - nodeId = 0, + offset = 2, + nodeId = 0, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#1"), templateId = someTemplateIdFull, witnessParties = Set("observer"), @@ -788,17 +931,18 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someTemplateIdFull.pkgId, + internalContractId = 42L, ), eventSequentialId = 2L, ), - EventStorageBackend.RawActiveContract( + EventStorageBackend.RawActiveContractLegacy( workflowId = Some("workflow_id"), - synchronizerId = "x::synchronizer1", + synchronizerId = someSynchronizerId.toProtoPrimitive, reassignmentCounter = 1000L, - rawCreatedEvent = RawCreatedEvent( - updateId = offset(3).toDecimalString, - offset = 3, - nodeId = 0, + offset = 3, + nodeId = 0, + rawCreatedEvent = RawCreatedEventLegacy( contractId = hashCid("#2"), templateId = someTemplateIdFull, witnessParties = Set("observer"), @@ -813,37 +957,12 @@ private[backend] trait StorageBackendTestsReassignmentEvents ledgerEffectiveTime = someTime, createKeyHash = None, authenticationData = someAuthenticationDataBytes, + representativePackageId = someTemplateIdFull.pkgId, + internalContractId = 43L, ), eventSequentialId = 3L, ), ) - - // same query as first to double check equality predicate - executeSql( - backend.event.activeContractAssignEventBatch( - eventSequentialIds = List(2, 3), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 6, - ) - ).map(_.rawCreatedEvent.updateId) shouldBe List(2L, 3L).map(x => offset(x).toDecimalString) - - // archive in the same synchronizer renders it inactive - executeSql( - backend.event.activeContractAssignEventBatch( - eventSequentialIds = List(2, 3), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 10, - ) - ).map(_.rawCreatedEvent.updateId) shouldBe List(2L).map(x => offset(x).toDecimalString) - - // unassignment in the same synchronizer renders it inactive - executeSql( - backend.event.activeContractAssignEventBatch( - eventSequentialIds = List(2, 3), - allFilterParties = Some(Set(Ref.Party.assertFromString("observer"))), - endInclusive = 11, - ) - ).map(_.rawCreatedEvent.updateId) shouldBe Nil } behavior of "incomplete lookup related event_sequential_id lookup queries" @@ -859,93 +978,98 @@ private[backend] trait StorageBackendTestsReassignmentEvents ) } + val synchronizerId1 = SynchronizerId.tryFromString("x::synchronizer1") + val synchronizerId2 = SynchronizerId.tryFromString("x::synchronizer2") + val synchronizerId3 = SynchronizerId.tryFromString("x::synchronizer3") + val synchronizerId4 = SynchronizerId.tryFromString("x::synchronizer4") + val dbDtos = Vector( - dtoCreate( + dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = hashCid("#1"), commandId = "command id 1", - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId1, authenticationData = someAuthenticationDataBytes, ), - dtoCreate( + dtoCreateLegacy( offset = offset(2), eventSequentialId = 2L, contractId = hashCid("#2"), commandId = "command id 2", - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId1, authenticationData = someAuthenticationDataBytes, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(3), eventSequentialId = 3L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer2", + synchronizerId = synchronizerId2, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(4), eventSequentialId = 4L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer2", - targetSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = synchronizerId2, + targetSynchronizerId = synchronizerId1, ), - dtoAssign( + dtoAssignLegacy( offset = offset(5), eventSequentialId = 5L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer2", - targetSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = synchronizerId2, + targetSynchronizerId = synchronizerId1, ), - dtoAssign( + dtoAssignLegacy( offset = offset(6), eventSequentialId = 6L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer3", - targetSynchronizerId = "x::synchronizer4", + sourceSynchronizerId = synchronizerId3, + targetSynchronizerId = synchronizerId4, ), - dtoExercise( + dtoExerciseLegacy( offset = offset(10), eventSequentialId = 10L, consuming = true, contractId = hashCid("#2"), - synchronizerId = "x::synchronizer1", + synchronizerId = synchronizerId1, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(11), eventSequentialId = 11L, contractId = hashCid("#1"), - sourceSynchronizerId = "x::synchronizer1", + sourceSynchronizerId = synchronizerId1, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(12), eventSequentialId = 12L, contractId = hashCid("#1"), - targetSynchronizerId = "x::synchronizer2", + targetSynchronizerId = synchronizerId2, ), - dtoAssign( + dtoAssignLegacy( offset = offset(13), eventSequentialId = 13L, contractId = hashCid("#2"), - targetSynchronizerId = "x::synchronizer2", + targetSynchronizerId = synchronizerId2, ), - dtoUnassign( + dtoUnassignLegacy( offset = offset(14), eventSequentialId = 14L, contractId = hashCid("#2"), - sourceSynchronizerId = "x::synchronizer2", + sourceSynchronizerId = synchronizerId2, ), - dtoAssign( + dtoAssignLegacy( offset = offset(15), eventSequentialId = 15L, contractId = hashCid("#2"), - targetSynchronizerId = "x::synchronizer2", + targetSynchronizerId = synchronizerId2, ), - dtoCreate( + dtoCreateLegacy( offset = offset(16), eventSequentialId = 16L, contractId = hashCid("#3"), - synchronizerId = "x::synchronizer4", + synchronizerId = synchronizerId4, ), ) @@ -954,7 +1078,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql(updateLedgerEnd(offset(16), 16L)) executeSql( - backend.event.lookupAssignSequentialIdByOffset( + backend.event.lookupAssignSequentialIdByOffsetLegacy( List( 1L, 5L, @@ -964,14 +1088,14 @@ private[backend] trait StorageBackendTestsReassignmentEvents ) ) shouldBe Vector(5L, 6L) executeSql( - backend.event.lookupUnassignSequentialIdByOffset( + backend.event.lookupUnassignSequentialIdByOffsetLegacy( List( 1L, 4L, 6L, 7L, 11L, ) ) ) shouldBe Vector(4L, 11L) executeSql( - backend.event.lookupAssignSequentialIdBy( + backend.event.lookupAssignSequentialIdByLegacy( List( // (contractId, synchronizerId, sequentialId) (1, 2, 16L), // not found @@ -984,7 +1108,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents // check that the last assign event is preferred over the earlier executeSql( - backend.event.lookupAssignSequentialIdBy( + backend.event.lookupAssignSequentialIdByLegacy( List( // (contractId, synchronizerId, sequentialId) (1, 2, 16L), // not found @@ -1000,7 +1124,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents ).map { case (tuple, id) => (toDbValues(tuple), id) } // check that sequential id is taken into account executeSql( - backend.event.lookupAssignSequentialIdBy( + backend.event.lookupAssignSequentialIdByLegacy( List( // (contractId, synchronizerId, sequentialId) (2, 2, 15L), // last <15 found at 13 @@ -1015,7 +1139,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents executeSql( // test that we will not find the create event if we use the correct contract id but a wrong synchronizer id - backend.event.lookupCreateSequentialIdByContractId( + backend.event.lookupCreateSequentialIdByContractIdLegacy( List( 1, // found at 1 2, // found at 2 @@ -1025,10 +1149,10 @@ private[backend] trait StorageBackendTestsReassignmentEvents } def rawCreatedEventHasExpectedCreateArgumentAndAuthenticationData( - rawCreatedEvent: RawCreatedEvent, + rawCreatedEvent: RawCreatedEventLegacy, createArgument: Array[Byte], authenticationData: Array[Byte], - ): RawCreatedEvent = { + ): RawCreatedEventLegacy = { rawCreatedEvent.createArgument.toList shouldBe createArgument.toList rawCreatedEvent.authenticationData.toList shouldBe authenticationData.toList rawCreatedEvent.copy( @@ -1038,8 +1162,8 @@ private[backend] trait StorageBackendTestsReassignmentEvents } private def sanitize( - original: Entry[EventStorageBackend.RawAssignEvent] - ): Entry[EventStorageBackend.RawAssignEvent] = + original: Entry[EventStorageBackend.RawAssignEventLegacy] + ): Entry[EventStorageBackend.RawAssignEventLegacy] = original.copy( event = original.event.copy( rawCreatedEvent = rawCreatedEventHasExpectedCreateArgumentAndAuthenticationData( @@ -1056,7 +1180,7 @@ private[backend] trait StorageBackendTestsReassignmentEvents actual: Option[Array[Byte]], expected: Option[Array[Byte]], ): Option[Array[Byte]] = { - actual should equal(expected) + actual shouldEqual expected expected } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReset.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReset.scala index 8366167515..4c5485b8c9 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReset.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReset.scala @@ -4,7 +4,9 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids -import com.digitalasset.canton.platform.store.backend.common.EventPayloadSourceForUpdatesLedgerEffects +import com.digitalasset.canton.platform.store.backend.common.EventIdSourceLegacy.CreateStakeholder +import com.digitalasset.canton.platform.store.backend.common.EventPayloadSourceForUpdatesLedgerEffectsLegacy +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -41,18 +43,33 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac // 1: party allocation dtoPartyEntry(offset(1)), // 2: transaction with create node - dtoCreate(offset(2), 1L, hashCid("#3")), - DbDto.IdFilterCreateStakeholder(1L, someTemplateId.toString, someParty.toString), + dtoCreateLegacy(offset(2), 1L, hashCid("#3")), + DbDto.IdFilterCreateStakeholder( + 1L, + someTemplateId.toString, + someParty.toString, + first_per_sequential_id = true, + ), dtoCompletion(offset(2)), // 3: transaction with exercise node and retroactive divulgence - dtoExercise(offset(3), 2L, true, hashCid("#3")), + dtoExerciseLegacy(offset(3), 2L, true, hashCid("#3")), dtoCompletion(offset(3)), // 4: assign event - dtoAssign(offset(4), 4L, hashCid("#4")), - DbDto.IdFilterAssignStakeholder(4L, someTemplateId.toString, someParty.toString), + dtoAssignLegacy(offset(4), 4L, hashCid("#4")), + DbDto.IdFilterAssignStakeholder( + 4L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), // 5: unassign event - dtoUnassign(offset(5), 5L, hashCid("#5")), - DbDto.IdFilterUnassignStakeholder(5L, someTemplateId.toString, someParty.toString), + dtoUnassignLegacy(offset(5), 5L, hashCid("#5")), + DbDto.IdFilterUnassignStakeholder( + 5L, + someTemplateId.toString, + someParty, + first_per_sequential_id = true, + ), // 6: topology transaction dtoPartyToParticipant(offset(6), 6L), // String interning @@ -71,13 +88,13 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac def events = executeSql( - backend.event.fetchEventPayloadsLedgerEffects( - EventPayloadSourceForUpdatesLedgerEffects.Create + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create )(Ids(List(1L)), Some(Set.empty)) ) ++ executeSql( - backend.event.fetchEventPayloadsLedgerEffects( - EventPayloadSourceForUpdatesLedgerEffects.Consuming + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming )(Ids(List(2L)), Some(Set.empty)) ) @@ -88,46 +105,55 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac ) def filterIds = executeSql( - backend.event.updateStreamingQueries.fetchIdsOfCreateEventsForStakeholder( + backend.event.updateStreamingQueries.fetchEventIdsLegacy(CreateStakeholder)( stakeholderO = Some(someParty), templateIdO = None, - startExclusive = 0, - endInclusive = 1000, - limit = 1000, + )(_)( + PaginationInput( + startExclusive = 0, + endInclusive = 1000, + limit = 1000, + ) ) ) def assignEvents = executeSql( - backend.event.assignEventBatch( + backend.event.assignEventBatchLegacy( eventSequentialIds = Ids(List(4)), allFilterParties = Some(Set.empty), ) ) def unassignEvents = executeSql( - backend.event.unassignEventBatch( + backend.event.unassignEventBatchLegacy( eventSequentialIds = Ids(List(5)), allFilterParties = Some(Set.empty), ) ) def assignIds = executeSql( - backend.event.fetchAssignEventIdsForStakeholder( + backend.event.fetchAssignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0L, - endInclusive = 1000L, - 1000, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 1000L, + 1000, + ) ) ) def reassignmentIds = executeSql( - backend.event.fetchUnassignEventIdsForStakeholder( + backend.event.fetchUnassignEventIdsForStakeholderLegacy( stakeholderO = Some(someParty), templateId = None, - startExclusive = 0L, - endInclusive = 1000L, - 1000, + )(_)( + PaginationInput( + startExclusive = 0L, + endInclusive = 1000L, + 1000, + ) ) ) diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTimestamps.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTimestamps.scala index 79b12b5e3d..c7e7b868b5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTimestamps.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTimestamps.scala @@ -21,7 +21,7 @@ private[backend] trait StorageBackendTestsTimestamps extends Matchers with Stora it should "correctly read ledger effective time using rawEvents" in { val let = timestampFromInstant(Instant.now) val cid = hashCid("#1") - val create = dtoCreate( + val create = dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = cid, @@ -33,7 +33,7 @@ private[backend] trait StorageBackendTestsTimestamps extends Matchers with Stora executeSql(ingest(Vector(create), _)) executeSql(updateLedgerEnd(offset(1), 1L)) - val events = backend.event.activeContractCreateEventBatch( + val events = backend.event.activeContractCreateEventBatchLegacy( List(1L), Some(Set(Ref.Party.assertFromString("signatory"))), 1L, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTransactionStreamsEvents.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTransactionStreamsEvents.scala index 9e28b420a1..b3bb862a12 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTransactionStreamsEvents.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTransactionStreamsEvents.scala @@ -10,13 +10,13 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.Sequen Ids, } import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ - RawCreatedEvent, - RawFlatEvent, - RawTreeEvent, + RawAcsDeltaEventLegacy, + RawCreatedEventLegacy, + RawLedgerEffectsEventLegacy, } import com.digitalasset.canton.platform.store.backend.common.{ - EventPayloadSourceForUpdatesAcsDelta, - EventPayloadSourceForUpdatesLedgerEffects, + EventPayloadSourceForUpdatesAcsDeltaLegacy, + EventPayloadSourceForUpdatesLedgerEffectsLegacy, } import com.digitalasset.daml.lf.data.{Ref, Time} import com.google.protobuf.ByteString @@ -45,7 +45,7 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents it should "return the correct created_at" in { - val create = dtoCreate( + val create = dtoCreateLegacy( offset = offset(1), eventSequentialId = 1L, contractId = contractId1, @@ -67,14 +67,14 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents def testExternalTransactionHash(hash: Option[Array[Byte]]) = { val creates = Vector( - dtoCreate( + dtoCreateLegacy( offset(1), 1L, contractId = contractId1, signatory = signatory, externalTransactionHash = hash, ), - dtoExercise( + dtoExerciseLegacy( offset(1), 2L, consuming = true, @@ -82,7 +82,7 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents signatory = signatory, externalTransactionHash = hash, ), - dtoExercise( + dtoExerciseLegacy( offset(1), 2L, consuming = false, @@ -96,13 +96,13 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents val someParty = Ref.Party.assertFromString(signatory) val filterParties = Some(Set(someParty)) - def flatTransactionEvents(target: EventPayloadSourceForUpdatesAcsDelta) = executeSql( - backend.event.fetchEventPayloadsAcsDelta( + def flatTransactionEvents(target: EventPayloadSourceForUpdatesAcsDeltaLegacy) = executeSql( + backend.event.fetchEventPayloadsAcsDeltaLegacy( target )(eventSequentialIds = Ids(Seq(1L, 2L, 3L, 4L)), filterParties) ) - def transactionTreeEvents(target: EventPayloadSourceForUpdatesLedgerEffects) = executeSql( - backend.event.fetchEventPayloadsLedgerEffects( + def transactionTreeEvents(target: EventPayloadSourceForUpdatesLedgerEffectsLegacy) = executeSql( + backend.event.fetchEventPayloadsLedgerEffectsLegacy( target )(eventSequentialIds = Ids(Seq(1L, 2L, 3L, 4L)), filterParties) ) @@ -111,31 +111,31 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents val expectedHash = hash.map(byteArrayToHash) - flatTransactionEvents(EventPayloadSourceForUpdatesAcsDelta.Create) + flatTransactionEvents(EventPayloadSourceForUpdatesAcsDeltaLegacy.Create) .map( _.externalTransactionHash ) .loneElement .map(byteArrayToHash) shouldBe expectedHash - flatTransactionEvents(EventPayloadSourceForUpdatesAcsDelta.Consuming) + flatTransactionEvents(EventPayloadSourceForUpdatesAcsDeltaLegacy.Consuming) .map( _.externalTransactionHash ) .loneElement .map(byteArrayToHash) shouldBe expectedHash - transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffects.Create) + transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create) .map( _.externalTransactionHash ) .loneElement .map(byteArrayToHash) shouldBe expectedHash - transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffects.Consuming) + transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffectsLegacy.Consuming) .map( _.externalTransactionHash ) .loneElement .map(byteArrayToHash) shouldBe expectedHash - transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffects.NonConsuming) + transactionTreeEvents(EventPayloadSourceForUpdatesLedgerEffectsLegacy.NonConsuming) .map( _.externalTransactionHash ) @@ -160,10 +160,10 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents it should "return the correct stream contents for acs" in { val creates = Vector( - dtoCreate(offset(1), 1L, contractId = contractId1, signatory = signatory), - dtoCreate(offset(1), 2L, contractId = contractId2, signatory = signatory), - dtoCreate(offset(1), 3L, contractId = contractId3, signatory = signatory), - dtoCreate(offset(1), 4L, contractId = contractId4, signatory = signatory), + dtoCreateLegacy(offset(1), 1L, contractId = contractId1, signatory = signatory), + dtoCreateLegacy(offset(1), 2L, contractId = contractId2, signatory = signatory), + dtoCreateLegacy(offset(1), 3L, contractId = contractId3, signatory = signatory), + dtoCreateLegacy(offset(1), 4L, contractId = contractId4, signatory = signatory), ) ingestDtos(creates) @@ -174,17 +174,16 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents flatTransactionEventsRange, transactionTreeEvents, transactionTreeEventsRange, - _transactionTree, acs, ) = fetch(Some(Set(someParty))) flatTransactionEvents.map(_.eventSequentialId) shouldBe Vector(1L, 2L, 3L, 4L) - flatTransactionEvents.map(_.event).collect { case created: RawCreatedEvent => + flatTransactionEvents.map(_.event).collect { case created: RawCreatedEventLegacy => created.contractId } shouldBe Vector(contractId1, contractId2, contractId3, contractId4) transactionTreeEvents.map(_.eventSequentialId) shouldBe Vector(1L, 2L, 3L, 4L) - transactionTreeEvents.map(_.event).collect { case created: RawCreatedEvent => + transactionTreeEvents.map(_.event).collect { case created: RawCreatedEventLegacy => created.contractId } shouldBe Vector(contractId1, contractId2, contractId3, contractId4) @@ -201,17 +200,16 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents flatTransactionEventsSuperReaderRange, transactionTreeEventsSuperReader, transactionTreeEventsSuperReaderRange, - _, acsSuperReader, ) = fetch(None) flatTransactionEventsSuperReader.map(_.eventSequentialId) shouldBe Vector(1L, 2L, 3L, 4L) - flatTransactionEventsSuperReader.map(_.event).collect { case created: RawCreatedEvent => + flatTransactionEventsSuperReader.map(_.event).collect { case created: RawCreatedEventLegacy => created.contractId } shouldBe Vector(contractId1, contractId2, contractId3, contractId4) transactionTreeEventsSuperReader.map(_.eventSequentialId) shouldBe Vector(1L, 2L, 3L, 4L) - transactionTreeEventsSuperReader.map(_.event).collect { case created: RawCreatedEvent => + transactionTreeEventsSuperReader.map(_.event).collect { case created: RawCreatedEventLegacy => created.contractId } shouldBe Vector(contractId1, contractId2, contractId3, contractId4) @@ -233,38 +231,33 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents private def fetch(filterParties: Option[Set[Ref.Party]]) = { val flatTransactionEvents = executeSql( - backend.event.fetchEventPayloadsAcsDelta( - EventPayloadSourceForUpdatesAcsDelta.Create + backend.event.fetchEventPayloadsAcsDeltaLegacy( + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create )(eventSequentialIds = Ids(Seq(1L, 2L, 3L, 4L)), filterParties) ) val flatTransactionEventsRange = executeSql( - backend.event.fetchEventPayloadsAcsDelta( - EventPayloadSourceForUpdatesAcsDelta.Create + backend.event.fetchEventPayloadsAcsDeltaLegacy( + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create )(eventSequentialIds = IdRange(1L, 4L), filterParties) ) val transactionTreeEvents = executeSql( - backend.event.fetchEventPayloadsLedgerEffects( - EventPayloadSourceForUpdatesLedgerEffects.Create + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create )(eventSequentialIds = Ids(Seq(1L, 2L, 3L, 4L)), filterParties) ) val transactionTreeEventsRange = executeSql( - backend.event.fetchEventPayloadsLedgerEffects( - EventPayloadSourceForUpdatesLedgerEffects.Create + backend.event.fetchEventPayloadsLedgerEffectsLegacy( + EventPayloadSourceForUpdatesLedgerEffectsLegacy.Create )(eventSequentialIds = IdRange(1L, 4L), filterParties) ) - val transactionTree = executeSql( - backend.event.updatePointwiseQueries - .fetchTreeTransactionEvents(1L, 1L, filterParties) - ) val acs = executeSql( - backend.event.activeContractCreateEventBatch(Seq(1L, 2L, 3L, 4L), filterParties, 4L) + backend.event.activeContractCreateEventBatchLegacy(Seq(1L, 2L, 3L, 4L), filterParties, 4L) ) ( flatTransactionEvents, flatTransactionEventsRange, transactionTreeEvents, transactionTreeEventsRange, - transactionTree, acs, ) } @@ -278,25 +271,19 @@ private[backend] trait StorageBackendTestsTransactionStreamsEvents flatTransactionEventsRange, transactionTreeEvents, transactionTreeEventsRange, - transactionTree, acs, ) = fetch(partiesO) - extractCreatedAtFrom[RawCreatedEvent, RawFlatEvent]( + extractCreatedAtFrom[RawCreatedEventLegacy, RawAcsDeltaEventLegacy]( in = flatTransactionEvents, createdAt = _.ledgerEffectiveTime, ) shouldBe expectedCreatedAt - extractCreatedAtFrom[RawCreatedEvent, RawTreeEvent]( + extractCreatedAtFrom[RawCreatedEventLegacy, RawLedgerEffectsEventLegacy]( in = transactionTreeEvents, createdAt = _.ledgerEffectiveTime, ) shouldBe expectedCreatedAt - extractCreatedAtFrom[RawCreatedEvent, RawTreeEvent]( - in = transactionTree, - createdAt = _.ledgerEffectiveTime, - ) shouldBe expectedCreatedAt - acs.head.rawCreatedEvent.ledgerEffectiveTime shouldBe expectedCreatedAt flatTransactionEventsRange.map(_.eventSequentialId) shouldBe diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala new file mode 100644 index 0000000000..5f8472591e --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala @@ -0,0 +1,2119 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.backend + +import com.daml.metrics.api.MetricsContext +import com.daml.platform.v1.index.StatusDetails +import com.digitalasset.canton.RepairCounter +import com.digitalasset.canton.data.DeduplicationPeriod.{DeduplicationDuration, DeduplicationOffset} +import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries, Offset} +import com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent.{ + Added, + ChangedTo, + Revoked, +} +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel.* +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.TopologyEvent.PartyToParticipantAuthorization +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.{ + AuthorizationEvent, + TopologyEvent, +} +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds +import com.digitalasset.canton.ledger.participant.state.{ + Reassignment, + ReassignmentInfo, + TestAcsChangeFactory, + Update, +} +import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.store.backend.Conversions.{ + authorizationEventInt, + participantPermissionInt, +} +import com.digitalasset.canton.platform.store.backend.StorageBackendTestValues.someExternalTransactionHash +import com.digitalasset.canton.platform.store.backend.UpdateToDbDtoLegacy.templateIdWithPackageName +import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao +import com.digitalasset.canton.platform.store.dao.events.{ + CompressionStrategy, + FieldCompressionStrategy, + LfValueSerialization, +} +import com.digitalasset.canton.platform.{ContractId, Create, Exercise} +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId} +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension +import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.emptyTraceContext +import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} +import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} +import com.digitalasset.daml.lf.crypto +import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} +import com.digitalasset.daml.lf.transaction.GlobalKey +import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey +import com.digitalasset.daml.lf.transaction.test.{ + NodeIdTransactionBuilder, + TestNodeBuilder, + TransactionBuilder, +} +import com.digitalasset.daml.lf.value.Value +import com.google.rpc.status.Status as StatusProto +import io.grpc.Status +import org.scalatest.matchers.should.Matchers +import org.scalatest.prop.TableDrivenPropertyChecks.* +import org.scalatest.wordspec.AnyWordSpec + +import java.time.{Duration, Instant} +import java.util.UUID + +// Note: this suite contains hand-crafted updates that are impossible to produce on some ledgers +// (e.g., because the ledger removes rollback nodes before sending them to the index database). +// Should you ever consider replacing this suite by something else, make sure all functionality is still covered. +class UpdateToDbDtoLegacySpec extends AnyWordSpec with Matchers { + + import TraceContext.Implicits.Empty.* + import TransactionBuilder.Implicits.* + import UpdateToDbDtoLegacySpec.* + + object TxBuilder { + def apply(): NodeIdTransactionBuilder & TestNodeBuilder = new NodeIdTransactionBuilder + with TestNodeBuilder + } + + "UpdateToDbDto" should { + + "handle PartyAddedToParticipant (local party)" in { + val update = state.Update.PartyAddedToParticipant( + someParty, + someParticipantId, + someRecordTime, + Some(someSubmissionId), + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.PartyEntry( + ledger_offset = someOffset.unwrap, + recorded_at = someRecordTime.toMicros, + submission_id = Some(someSubmissionId), + party = Some(someParty), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(true), + ) + ) + } + + "handle PartyAddedToParticipant (remote party)" in { + val update = state.Update.PartyAddedToParticipant( + someParty, + otherParticipantId, + someRecordTime, + None, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.PartyEntry( + ledger_offset = someOffset.unwrap, + recorded_at = someRecordTime.toMicros, + submission_id = None, + party = Some(someParty), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(false), + ) + ) + } + + "handle CommandRejected (sequenced rejection)" in { + val status = StatusProto.of(Status.Code.ABORTED.value(), "test reason", Seq.empty) + val completionInfo = someCompletionInfo + val update = state.Update.SequencedCommandRejected( + completionInfo, + state.Update.CommandRejected.FinalReason(status), + someSynchronizerId1, + CantonTimestamp.ofEpochMicro(1234567), + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = 1234567L, + publication_time = 0, + user_id = someUserId, + submitters = Set(someParty), + command_id = someCommandId, + update_id = None, + rejection_status_code = Some(status.code), + rejection_status_message = Some(status.message), + rejection_status_details = Some(StatusDetails.of(status.details).toByteArray), + submission_id = Some(someSubmissionId), + deduplication_offset = None, + deduplication_duration_seconds = None, + deduplication_duration_nanos = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + ) + } + + "handle CommandRejected (local rejection)" in { + val status = StatusProto.of(Status.Code.ABORTED.value(), "test reason", Seq.empty) + val messageUuid = UUID.randomUUID() + val completionInfo = someCompletionInfo + val update = state.Update.UnSequencedCommandRejected( + completionInfo, + state.Update.CommandRejected.FinalReason(status), + someSynchronizerId1, + someRecordTime, + messageUuid, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = someUserId, + submitters = Set(someParty), + command_id = someCommandId, + update_id = None, + rejection_status_code = Some(status.code), + rejection_status_message = Some(status.message), + rejection_status_details = Some(StatusDetails.of(status.details).toByteArray), + submission_id = Some(someSubmissionId), + deduplication_offset = None, + deduplication_duration_seconds = None, + deduplication_duration_nanos = None, + synchronizer_id = someSynchronizerId1, + message_uuid = Some(messageUuid.toString), + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + ) + } + + val updateId = TestUpdateId("mock_hash") + val updateIdByteArray = updateId.toProtoPrimitive.toByteArray + + // We only care about distinguishing between repair and sequencer transactions for create nodes + // since for create nodes the representative package-id assignment policies are different between the two + def handleTransactionAcceptedSingleCreateNode( + isAcsDelta: Boolean, + isRepairTransaction: Boolean, + ): Unit = { + assert( + isRepairTransaction && isAcsDelta || !isRepairTransaction, + "Repair transaction is implicitly an ACS delta", + ) + val updateName = + if (isRepairTransaction) classOf[state.Update.RepairTransactionAccepted].getSimpleName + else classOf[state.Update.SequencedTransactionAccepted].getSimpleName + s"handle $updateName (single create node, isAcsDelta = $isAcsDelta)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val contractId = builder.newCid + val internalContractIds = Map(contractId -> 0L) + val contractTemplate = Ref.Identifier.assertFromString("P:M:T") + val keyValue = Value.ValueUnit + val createNode = builder + .create( + id = contractId, + templateId = contractTemplate, + argument = Value.ValueUnit, + signatories = Set("signatory1", "signatory2", "signatory3"), + observers = Set("observer"), + key = CreateKey.KeyWithMaintainers(keyValue, Set("signatory2", "signatory3")), + ) + val createNodeId = builder.add(createNode) + val transaction = builder.buildCommitted() + val update = + if (isRepairTransaction) + state.Update.RepairTransactionAccepted( + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + representativePackageIds = RepresentativePackageIds.DedicatedRepresentativePackageIds( + Map(contractId -> someRepresentativePackageId) + ), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + repairCounter = RepairCounter(1337), + internalContractIds = internalContractIds, + ) + else + state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = isAcsDelta), + internalContractIds = internalContractIds, + ) + val dtos = updateToDtos(update) + + val dtoCreate = DbDto.EventCreate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Option.when(!isRepairTransaction)(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Option.when(!isRepairTransaction)(completionInfo.userId), + submitters = Option.when(!isRepairTransaction)(completionInfo.actAs.toSet), + node_id = createNodeId.index, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId, + flat_event_witnesses = + if (isAcsDelta) Set("signatory1", "signatory2", "signatory3", "observer") + else Set.empty, // stakeholders + tree_event_witnesses = + Set("signatory1", "signatory2", "signatory3", "observer"), // informees + create_argument = emptyArray, + create_signatories = Set("signatory1", "signatory2", "signatory3"), + create_observers = Set("observer"), + create_key_value = Some(emptyArray), + create_key_maintainers = Some(Set("signatory2", "signatory3")), + create_key_hash = Some( + GlobalKey + .assertBuild(contractTemplate, keyValue, createNode.packageName) + .hash + .bytes + .toHexString + ), + create_argument_compression = compressionAlgorithmId, + create_key_value_compression = compressionAlgorithmId, + event_sequential_id = 0, + authentication_data = someContractAuthenticationData.toByteArray, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = + Option.when(!isRepairTransaction)(externalTransactionHash.unwrap.toByteArray), + representative_package_id = + if (isRepairTransaction) someRepresentativePackageId + else createNode.templateId.packageId, + internal_contract_id = 0L, + ) + val dtoCompletion = DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + val dtoTransactionMeta = DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + + dtos.head shouldEqual dtoCreate + if (!isRepairTransaction) { + dtos(5) shouldEqual dtoCompletion + dtos(6) shouldEqual dtoTransactionMeta + } else { + dtos(5) shouldEqual dtoTransactionMeta + } + Set(dtos(1), dtos(2), dtos(3), dtos(4)) should contain theSameElementsAs + (if (isAcsDelta) + Set( + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory1", + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory2", + first_per_sequential_id = false, + ), + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory3", + first_per_sequential_id = false, + ), + DbDto + .IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + ) + else + Set( + DbDto.IdFilterCreateNonStakeholderInformee( + 0L, + templateIdWithPackageName(createNode), + "signatory1", + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateNonStakeholderInformee( + 0L, + templateIdWithPackageName(createNode), + "signatory2", + first_per_sequential_id = false, + ), + DbDto.IdFilterCreateNonStakeholderInformee( + 0L, + templateIdWithPackageName(createNode), + "signatory3", + first_per_sequential_id = false, + ), + DbDto.IdFilterCreateNonStakeholderInformee( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + )) + + if (isRepairTransaction) + dtos.size shouldEqual 6 + else + dtos.size shouldEqual 7 + } + } + + handleTransactionAcceptedSingleCreateNode(isAcsDelta = false, isRepairTransaction = false) + handleTransactionAcceptedSingleCreateNode(isAcsDelta = true, isRepairTransaction = false) + handleTransactionAcceptedSingleCreateNode(isAcsDelta = true, isRepairTransaction = true) + + def handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta: Boolean): Unit = { + s"handle TransactionAccepted (single consuming exercise node, isAcsDelta = $isAcsDelta)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val exerciseNode = { + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + } + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = CantonTimestamp.ofEpochMicro(120), + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = isAcsDelta), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual + DbDto.EventExercise( + consuming = true, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeId.index, + contract_id = exerciseNode.targetCoid, + template_id = templateIdWithPackageName(exerciseNode), + package_id = exerciseNode.templateId.packageId, + flat_event_witnesses = + if (isAcsDelta) Set("signatory", "observer") else Set.empty, // stakeholders + tree_event_witnesses = Set("signatory", "observer"), // informees + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = 120, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ) + dtos(3) shouldEqual + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = 120, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + dtos(4) shouldEqual + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = 120, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + + Set(dtos(1), dtos(2)) should contain theSameElementsAs + (if (isAcsDelta) + Set( + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ), + ) + else + Set( + DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ), + )) + + dtos.size shouldEqual 5 + + } + } + + handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta = true) + handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta = false) + + "handle TransactionAccepted (single non-consuming exercise node)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val exerciseNode = { + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + } + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.EventExercise( + consuming = false, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeId.index, + contract_id = exerciseNode.targetCoid, + template_id = templateIdWithPackageName(exerciseNode), + package_id = exerciseNode.templateId.packageId, + flat_event_witnesses = Set.empty, // stakeholders + tree_event_witnesses = Set("signatory"), // informees + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (nested exercise nodes)" in { + // Previous transaction + // └─ #1 Create + // Transaction + // └─ #2 Exercise (choice A) + // ├─ #3 Exercise (choice B) + // └─ #4 Exercise (choice C) + // └─ #5 Exercise (choice D) + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNodeA = builder.exercise( + contract = createNode, + choice = "A", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeB = builder.exercise( + contract = createNode, + choice = "B", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeC = builder.exercise( + contract = createNode, + choice = "C", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeD = builder.exercise( + contract = createNode, + choice = "D", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeAId = builder.add(exerciseNodeA) + val exerciseNodeBId = builder.add(exerciseNodeB, exerciseNodeAId) + val exerciseNodeCId = builder.add(exerciseNodeC, exerciseNodeAId) + val exerciseNodeDId = builder.add(exerciseNodeD, exerciseNodeCId) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.EventExercise( + consuming = false, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeAId.index, + contract_id = exerciseNodeA.targetCoid, + template_id = templateIdWithPackageName(exerciseNodeA), + package_id = exerciseNodeA.templateId.packageId, + flat_event_witnesses = Set.empty, // stakeholders + tree_event_witnesses = Set("signatory"), // informees + exercise_choice = exerciseNodeA.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeDId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeA), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.EventExercise( + consuming = false, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeBId.index, + contract_id = exerciseNodeB.targetCoid, + template_id = templateIdWithPackageName(exerciseNodeB), + package_id = exerciseNodeB.templateId.packageId, + flat_event_witnesses = Set.empty, // stakeholders + tree_event_witnesses = Set("signatory"), // informees + exercise_choice = exerciseNodeB.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeBId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeB), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.EventExercise( + consuming = false, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeCId.index, + contract_id = exerciseNodeC.targetCoid, + template_id = templateIdWithPackageName(exerciseNodeC), + package_id = exerciseNodeC.templateId.packageId, + flat_event_witnesses = Set.empty, // stakeholders + tree_event_witnesses = Set("signatory"), // informees + exercise_choice = exerciseNodeC.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeDId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeC), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.EventExercise( + consuming = false, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeDId.index, + contract_id = exerciseNodeD.targetCoid, + template_id = templateIdWithPackageName(exerciseNodeD), + package_id = exerciseNodeD.templateId.packageId, + flat_event_witnesses = Set.empty, // stakeholders + tree_event_witnesses = Set("signatory"), // informees + exercise_choice = exerciseNodeD.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeDId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeD), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (fetch and lookup nodes)" in { + // Previous transaction + // └─ #1 Create + // Transaction + // ├─ #1 Fetch + // ├─ #2 Fetch by key + // └─ #3 Lookup by key + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val builder = TxBuilder() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + key = CreateKey.SignatoryMaintainerKey(Value.ValueUnit), + ) + val fetchNode = builder.fetch( + contract = createNode, + byKey = false, + ) + val fetchByKeyNode = builder.fetch( + contract = createNode, + byKey = true, + ) + val lookupByKeyNode = builder.lookupByKey( + contract = createNode + ) + builder.add(fetchNode) + builder.add(fetchByKeyNode) + builder.add(lookupByKeyNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + // Note: fetch and lookup nodes are not indexed + dtos should contain theSameElementsInOrderAs List( + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (single exercise node with divulgence)" in { + // Previous transaction + // └─ #1 Create + // Transaction + // └─ #2 Exercise (divulges #1 to 'divulgee') + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + + val builder = TxBuilder() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNode = builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set("divulgee"), + byKey = false, + ) + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.EventExercise( + consuming = true, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeId.index, + contract_id = exerciseNode.targetCoid, + template_id = templateIdWithPackageName(exerciseNode), + package_id = exerciseNode.templateId.packageId, + flat_event_witnesses = Set("signatory", "observer"), + tree_event_witnesses = Set("signatory", "observer", "divulgee"), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ), + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ), + DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "divulgee", + first_per_sequential_id = true, + ), + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (transaction with local divulgence)" in { + // Transaction + // ├─ #1 Create + // └─ #2 Exercise (divulges #1 to 'divulgee') + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val contractId = builder.newCid + val interfaceId = toIdentifier("M:I") + val createNode = builder.create( + id = contractId, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNode = builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set("divulgee"), + byKey = false, + interfaceId = Some(interfaceId), + ) + val createNodeId = builder.add(createNode) + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.EventCreate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = createNodeId.index, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId.toString, + flat_event_witnesses = Set("signatory", "observer"), + tree_event_witnesses = Set("signatory", "observer"), + create_argument = emptyArray, + create_signatories = Set("signatory"), + create_observers = Set("observer"), + create_key_value = None, + create_key_maintainers = None, + create_key_hash = None, + create_argument_compression = compressionAlgorithmId, + create_key_value_compression = None, + event_sequential_id = 0, + authentication_data = someContractAuthenticationData.toByteArray, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + representative_package_id = createNode.templateId.packageId, + internal_contract_id = 42L, + ) + Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + ) + dtos(3) shouldEqual DbDto.EventExercise( + consuming = true, + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = exerciseNodeId.index, + contract_id = exerciseNode.targetCoid, + template_id = templateIdWithPackageName(exerciseNode), + package_id = exerciseNode.templateId.packageId, + flat_event_witnesses = Set("signatory", "observer"), + tree_event_witnesses = Set("signatory", "observer", "divulgee"), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = Some(interfaceId.toString), + exercise_argument = emptyArray, + exercise_result = Some(emptyArray), + exercise_actors = Set("signatory"), + exercise_last_descendant_node_id = exerciseNodeId.index, + exercise_argument_compression = compressionAlgorithmId, + exercise_result_compression = compressionAlgorithmId, + event_sequential_id = 0, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + deactivated_event_sequential_id = None, + ) + dtos(4) shouldEqual DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) + dtos(5) shouldEqual DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ) + dtos(6) shouldEqual DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "divulgee", + first_per_sequential_id = true, + ) + dtos(7) shouldEqual DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + dtos(8) shouldEqual DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + dtos.size shouldEqual 9 + } + + "handle TransactionAccepted (rollback node)" in { + // Transaction + // └─ #1 Rollback + // ├─ #2 Create + // └─ #3 Exercise (divulges #2 to divulgee) + // - Create and Exercise events must not be visible + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val builder = TxBuilder() + val rollbackNode = builder.rollback() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNode = builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set("divulgee"), + byKey = false, + ) + val rollbackNodeId = builder.add(rollbackNode) + builder.add(createNode, rollbackNodeId) + builder.add(exerciseNode, rollbackNodeId) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (no submission info)" in { + // Transaction that is missing a SubmitterInfo + // This happens if a transaction was submitted through a different participant + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val contractId = builder.newCid + val createNode = builder.create( + id = contractId, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val createNodeId = builder.add(createNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = None, + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.EventCreate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = None, + workflow_id = transactionMeta.workflowId, + user_id = None, + submitters = None, + node_id = createNodeId.index, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId.toString, + flat_event_witnesses = Set("signatory", "observer"), + tree_event_witnesses = Set("signatory", "observer"), + create_argument = emptyArray, + create_signatories = Set("signatory"), + create_observers = Set("observer"), + create_key_value = None, + create_key_maintainers = None, + create_key_hash = None, + create_argument_compression = compressionAlgorithmId, + create_key_value_compression = None, + event_sequential_id = 0, + authentication_data = someContractAuthenticationData.toByteArray, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + representative_package_id = createNode.templateId.packageId, + internal_contract_id = 42L, + ) + Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + ) + dtos.size shouldEqual 4 + } + + val deduplicationPeriods = Table( + ( + "Deduplication period", + "Expected deduplication offset", + "Expected deduplication duration seconds", + "Expected deduplication duration nanos", + ), + (None, None, None, None), + ( + Some(DeduplicationOffset(None)), + Some(0L), + None, + None, + ), + ( + Some(DeduplicationDuration(Duration.ofDays(1L).plusNanos(100 * 1000))), + None, + Some(Duration.ofDays(1L).toMinutes * 60L), + Some(100 * 1000), + ), + ) + + "handle CommandRejected (all deduplication data)" in { + val status = StatusProto.of(Status.Code.ABORTED.value(), "test reason", Seq.empty) + forAll(deduplicationPeriods) { + case ( + deduplicationPeriod, + expectedDeduplicationOffset, + expectedDeduplicationDurationSeconds, + expectedDeduplicationDurationNanos, + ) => + val completionInfo = someCompletionInfo.copy(optDeduplicationPeriod = deduplicationPeriod) + val update = state.Update.SequencedCommandRejected( + completionInfo, + state.Update.CommandRejected.FinalReason(status), + someSynchronizerId1, + someRecordTime, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = someUserId, + submitters = Set(someParty), + command_id = someCommandId, + update_id = None, + rejection_status_code = Some(status.code), + rejection_status_message = Some(status.message), + rejection_status_details = Some(StatusDetails.of(status.details).toByteArray), + submission_id = Some(someSubmissionId), + deduplication_offset = expectedDeduplicationOffset, + deduplication_duration_seconds = expectedDeduplicationDurationSeconds, + deduplication_duration_nanos = expectedDeduplicationDurationNanos, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + ) + } + } + + "handle TransactionAccepted (all deduplication data)" in { + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val contractId = builder.newCid + val createNode = builder.create( + id = contractId, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val createNodeId = builder.add(createNode) + val transaction = builder.buildCommitted() + + forAll(deduplicationPeriods) { + case ( + deduplicationPeriod, + expectedDeduplicationOffset, + expectedDeduplicationDurationSeconds, + expectedDeduplicationDurationNanos, + ) => + val completionInfo = someCompletionInfo.copy(optDeduplicationPeriod = deduplicationPeriod) + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.EventCreate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, + command_id = Some(completionInfo.commandId), + workflow_id = transactionMeta.workflowId, + user_id = Some(completionInfo.userId), + submitters = Some(completionInfo.actAs.toSet), + node_id = createNodeId.index, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId.toString, + flat_event_witnesses = Set("signatory", "observer"), // stakeholders + tree_event_witnesses = Set("signatory", "observer"), // informees + create_argument = emptyArray, + create_signatories = Set("signatory"), + create_observers = Set("observer"), + create_key_value = None, + create_key_maintainers = None, + create_key_hash = None, + create_argument_compression = compressionAlgorithmId, + create_key_value_compression = None, + event_sequential_id = 0, + authentication_data = someContractAuthenticationData.toByteArray, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + representative_package_id = createNode.templateId.packageId, + internal_contract_id = 42L, + ) + Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterCreateStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + ) + dtos(3) shouldEqual DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = Some(someSubmissionId), + deduplication_offset = expectedDeduplicationOffset, + deduplication_duration_seconds = expectedDeduplicationDurationSeconds, + deduplication_duration_nanos = expectedDeduplicationDurationNanos, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + dtos(4) shouldEqual DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + dtos.size shouldEqual 5 + } + } + + "handle ReassignmentAccepted - Assign" in { + val completionInfo = someCompletionInfo + val builder = TxBuilder() + val contractId = builder.newCid + val createNode = builder + .create( + id = contractId, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = Set("signatory"), + observers = Set("observer", "observer2"), + ) + + val targetSynchronizerId = Target(SynchronizerId.tryFromString("x::synchronizer2")) + val update = state.Update.SequencedReassignmentAccepted( + optCompletionInfo = Some(completionInfo), + workflowId = Some(someWorkflowId), + updateId = updateId, + reassignmentInfo = ReassignmentInfo( + sourceSynchronizer = Source(SynchronizerId.tryFromString("x::synchronizer1")), + targetSynchronizer = targetSynchronizerId, + submitter = Option(someParty), + reassignmentId = ReassignmentId.tryCreate("001000000000"), + isReassigningParticipant = true, + ), + reassignment = Reassignment.Batch( + Reassignment.Assign( + ledgerEffectiveTime = Time.Timestamp.assertFromLong(17000000), + createNode = createNode, + contractAuthenticationData = someContractAuthenticationData, + reassignmentCounter = 1500L, + nodeId = 0, + ) + ), + recordTime = someRecordTime, + synchronizerId = targetSynchronizerId.unwrap, + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), + ) + + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.EventAssign( + event_offset = someOffset.unwrap, + update_id = update.updateId.toProtoPrimitive.toByteArray, + command_id = Some(completionInfo.commandId), + workflow_id = Some(someWorkflowId), + submitter = Option(someParty), + node_id = 0, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId.toString, + flat_event_witnesses = Set("signatory", "observer", "observer2"), + create_argument = emptyArray, + create_signatories = Set("signatory"), + create_observers = Set("observer", "observer2"), + create_key_value = None, + create_key_maintainers = None, + create_key_hash = None, + create_argument_compression = compressionAlgorithmId, + create_key_value_compression = None, + event_sequential_id = 0, + ledger_effective_time = 17000000, + authentication_data = someContractAuthenticationData.toByteArray, + source_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + target_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), + reassignment_id = ReassignmentId.tryCreate("001000000000").toBytes.toByteArray, + reassignment_counter = 1500L, + trace_context = serializedEmptyTraceContext, + record_time = someRecordTime.toMicros, + internal_contract_id = 42L, + ) + dtos(4) shouldEqual DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), + message_uuid = None, + is_transaction = false, + trace_context = serializedEmptyTraceContext, + ) + dtos(5) shouldEqual DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + Set(dtos(1), dtos(2), dtos(3)) should contain theSameElementsAs Set( + DbDto.IdFilterAssignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ), + DbDto.IdFilterAssignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ), + DbDto.IdFilterAssignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer2", + first_per_sequential_id = false, + ), + ) + dtos.size shouldEqual 6 + } + + "handle ReassignmentAccepted - Unassign" in { + val completionInfo = someCompletionInfo + val builder = TxBuilder() + val contractId = builder.newCid + val createNode = builder + .create( + id = contractId, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = Set("signatory"), + observers = Set("observer"), + ) + + val sourceSynchronizerId = Source(SynchronizerId.tryFromString("x::synchronizer1")) + val update = state.Update.SequencedReassignmentAccepted( + optCompletionInfo = Some(completionInfo), + workflowId = Some(someWorkflowId), + updateId = updateId, + reassignmentInfo = ReassignmentInfo( + sourceSynchronizer = sourceSynchronizerId, + targetSynchronizer = Target(SynchronizerId.tryFromString("x::synchronizer2")), + submitter = Option(someParty), + reassignmentId = ReassignmentId.tryCreate("001000000000"), + isReassigningParticipant = true, + ), + reassignment = Reassignment.Batch( + Reassignment.Unassign( + contractId = contractId, + templateId = createNode.templateId, + packageName = createNode.packageName, + stakeholders = + List("signatory12", "observer23", "asdasdasd").map(Ref.Party.assertFromString), + assignmentExclusivity = Some(Time.Timestamp.assertFromLong(123456)), + reassignmentCounter = 1500L, + nodeId = 0, + ) + ), + recordTime = CantonTimestamp.ofEpochMicro(120), + synchronizerId = sourceSynchronizerId.unwrap, + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.EventUnassign( + event_offset = someOffset.unwrap, + update_id = update.updateId.toProtoPrimitive.toByteArray, + command_id = Some(completionInfo.commandId), + workflow_id = Some(someWorkflowId), + submitter = someParty, + node_id = 0, + contract_id = createNode.coid, + template_id = templateIdWithPackageName(createNode), + package_id = createNode.templateId.packageId, + flat_event_witnesses = Set("signatory12", "observer23", "asdasdasd"), + event_sequential_id = 0, + source_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + target_synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), + reassignment_id = ReassignmentId.tryCreate("001000000000").toBytes.toByteArray, + reassignment_counter = 1500L, + assignment_exclusivity = Some(123456L), + trace_context = serializedEmptyTraceContext, + record_time = 120L, + deactivated_event_sequential_id = None, + ) + dtos(4) shouldEqual DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = 120L, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + message_uuid = None, + is_transaction = false, + trace_context = serializedEmptyTraceContext, + ) + dtos(5) shouldEqual DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = 120L, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + Set(dtos(1), dtos(2), dtos(3)) should contain theSameElementsAs Set( + DbDto.IdFilterUnassignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "signatory12", + first_per_sequential_id = true, + ), + DbDto.IdFilterUnassignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "observer23", + first_per_sequential_id = false, + ), + DbDto.IdFilterUnassignStakeholder( + 0L, + templateIdWithPackageName(createNode), + "asdasdasd", + first_per_sequential_id = false, + ), + ) + dtos.size shouldEqual 6 + } + + "handle TopologyTransactionEffective - PartyToParticipantAuthorization" in { + val submissionParty = Ref.Party.assertFromString("SubmissionParty") + val confirmationParty = Ref.Party.assertFromString("ConfirmationParty") + val observationParty = Ref.Party.assertFromString("ObservationParty") + + val events = Set[TopologyEvent]( + PartyToParticipantAuthorization( + party = submissionParty, + participant = someParticipantId, + authorizationEvent = Added(Submission), + ), + PartyToParticipantAuthorization( + party = confirmationParty, + participant = someParticipantId, + authorizationEvent = Added(Confirmation), + ), + PartyToParticipantAuthorization( + party = observationParty, + participant = someParticipantId, + authorizationEvent = Added(Observation), + ), + PartyToParticipantAuthorization( + party = submissionParty, + participant = otherParticipantId, + authorizationEvent = ChangedTo(Submission), + ), + PartyToParticipantAuthorization( + party = confirmationParty, + participant = otherParticipantId, + authorizationEvent = ChangedTo(Confirmation), + ), + PartyToParticipantAuthorization( + party = observationParty, + participant = otherParticipantId, + authorizationEvent = ChangedTo(Observation), + ), + PartyToParticipantAuthorization( + party = someParty, + participant = someParticipantId, + authorizationEvent = Revoked, + ), + ) + + val update = state.Update.TopologyTransactionEffective( + updateId = updateId, + events = events, + synchronizerId = someSynchronizerId1, + effectiveTime = someRecordTime, + ) + + def eventPartyToParticipant( + partyId: String, + participantId: String, + authorizationEvent: AuthorizationEvent, + ) = + DbDto.EventPartyToParticipant( + event_sequential_id = 0, + event_offset = someOffset.unwrap, + update_id = update.updateId.toProtoPrimitive.toByteArray, + party_id = partyId, + participant_id = participantId, + participant_permission = participantPermissionInt(authorizationEvent), + participant_authorization_event = authorizationEventInt(authorizationEvent), + synchronizer_id = someSynchronizerId1, + record_time = someRecordTime.toMicros, + trace_context = serializedEmptyTraceContext, + ) + + val dtos = updateToDtos(update) + + dtos should contain( + eventPartyToParticipant( + partyId = submissionParty, + participantId = someParticipantId, + authorizationEvent = Added(Submission), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = confirmationParty, + participantId = someParticipantId, + authorizationEvent = Added(Confirmation), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = observationParty, + participantId = someParticipantId, + authorizationEvent = Added(Observation), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = submissionParty, + participantId = otherParticipantId, + authorizationEvent = ChangedTo(Submission), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = confirmationParty, + participantId = otherParticipantId, + authorizationEvent = ChangedTo(Confirmation), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = observationParty, + participantId = otherParticipantId, + authorizationEvent = ChangedTo(Observation), + ) + ) + dtos should contain( + eventPartyToParticipant( + partyId = someParty, + participantId = someParticipantId, + authorizationEvent = Revoked, + ) + ) + dtos should contain( + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + ) + } + + "handle SequencerIndexMoved" in { + val update = state.Update.SequencerIndexMoved( + synchronizerId = someSynchronizerId1, + recordTime = CantonTimestamp.ofEpochMicro(2000), + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual DbDto.SequencerIndexMoved( + synchronizerId = someSynchronizerId1 + ) + dtos.size shouldEqual 1 + } + + } + + private def updateToDtos(update: Update) = + UpdateToDbDtoLegacy( + someParticipantId, + valueSerialization, + compressionStrategy, + LedgerApiServerMetrics.ForTesting, + )( + MetricsContext.Empty + )( + someOffset + )(update).toList +} + +object UpdateToDbDtoLegacySpec { + private val emptyArray = Array.emptyByteArray + + // These tests do not check the correctness of the LF value serialization. + // All LF values are serialized into empty arrays in this suite. + private val valueSerialization = new LfValueSerialization { + override def serialize( + contractId: ContractId, + contractArgument: Value.VersionedValue, + ): Array[Byte] = emptyArray + + /** Returns (contract argument, contract key) */ + override def serialize(create: Create): (Array[Byte], Option[Array[Byte]]) = + (emptyArray, create.keyOpt.map(_ => emptyArray)) + + /** Returns (choice argument, exercise result, contract key) */ + override def serialize( + exercise: Exercise + ): (Array[Byte], Option[Array[Byte]], Option[Array[Byte]]) = + ( + emptyArray, + exercise.exerciseResult.map(_ => emptyArray), + exercise.keyOpt.map(_ => emptyArray), + ) + } + + // These test do not check the correctness of compression. + // All values are compressed using a dummy (identity) algorithm in this suite. + private val compressionAlgorithmId = Some(123) + private val compressionStrategy: CompressionStrategy = { + val noCompression = new FieldCompressionStrategy(compressionAlgorithmId, x => x) + CompressionStrategy( + noCompression, + noCompression, + noCompression, + noCompression, + noCompression, + noCompression, + ) + } + + private val someParticipantId = + Ref.ParticipantId.assertFromString("UpdateToDbDtoSpecParticipant") + private val otherParticipantId = + Ref.ParticipantId.assertFromString("UpdateToDbDtoSpecRemoteParticipant") + private val someOffset = Offset.tryFromLong(12345678L) + private val someRecordTime = + CantonTimestamp( + Time.Timestamp.assertFromInstant(Instant.parse(("2000-01-01T00:00:00.000000Z"))) + ) + private val someUserId = + Ref.UserId.assertFromString("UpdateToDbDtoSpecUserId") + private val someCommandId = Ref.CommandId.assertFromString("UpdateToDbDtoSpecCommandId") + private val someSubmissionId = + Ref.SubmissionId.assertFromString("UpdateToDbDtoSpecSubmissionId") + private val someWorkflowId = Ref.WorkflowId.assertFromString("UpdateToDbDtoSpecWorkflowId") + private val someParty = Ref.Party.assertFromString("UpdateToDbDtoSpecParty") + private val someHash = + crypto.Hash.assertFromString("01cf85cfeb36d628ca2e6f583fa2331be029b6b28e877e1008fb3f862306c086") + private val someCompletionInfo = state.CompletionInfo( + actAs = List(someParty), + userId = someUserId, + commandId = someCommandId, + optDeduplicationPeriod = None, + submissionId = Some(someSubmissionId), + ) + private val someSynchronizerId1 = SynchronizerId.tryFromString("x::synchronizer1") + private val someTransactionMeta = state.TransactionMeta( + ledgerEffectiveTime = Time.Timestamp.assertFromLong(2), + workflowId = Some(someWorkflowId), + preparationTime = Time.Timestamp.assertFromLong(3), + submissionSeed = someHash, + timeBoundaries = LedgerTimeBoundaries.unconstrained, + optUsedPackages = None, + optNodeSeeds = None, + optByKeyNodes = None, + ) + private val someContractAuthenticationData = Bytes.assertFromString("00abcd") + private val someRepresentativePackageId = Ref.PackageId.assertFromString("rp-id") + + implicit private val DbDtoEqual: org.scalactic.Equality[DbDto] = ScalatestEqualityHelpers.DbDtoEq + + private val serializedEmptyTraceContext = + SerializableTraceContext(emptyTraceContext).toDamlProto.toByteArray +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala index d20dd2dc51..12f6d7c736 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala @@ -5,8 +5,7 @@ package com.digitalasset.canton.platform.store.backend import com.daml.metrics.api.MetricsContext import com.daml.platform.v1.index.StatusDetails -import com.digitalasset.canton.crypto.HashAlgorithm.Sha256 -import com.digitalasset.canton.crypto.{Hash, HashPurpose} +import com.digitalasset.canton.RepairCounter import com.digitalasset.canton.data.DeduplicationPeriod.{DeduplicationDuration, DeduplicationOffset} import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries, Offset} import com.digitalasset.canton.ledger.participant.state @@ -21,6 +20,7 @@ import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransacti AuthorizationEvent, TopologyEvent, } +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.{ Reassignment, ReassignmentInfo, @@ -32,6 +32,8 @@ import com.digitalasset.canton.platform.store.backend.Conversions.{ authorizationEventInt, participantPermissionInt, } +import com.digitalasset.canton.platform.store.backend.DbDto.IdFilter +import com.digitalasset.canton.platform.store.backend.StorageBackendTestValues.someExternalTransactionHash import com.digitalasset.canton.platform.store.backend.UpdateToDbDto.templateIdWithPackageName import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao import com.digitalasset.canton.platform.store.dao.events.{ @@ -40,7 +42,7 @@ import com.digitalasset.canton.platform.store.dao.events.{ LfValueSerialization, } import com.digitalasset.canton.platform.{ContractId, Create, Exercise} -import com.digitalasset.canton.protocol.ReassignmentId +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.emptyTraceContext @@ -56,7 +58,6 @@ import com.digitalasset.daml.lf.transaction.test.{ TransactionBuilder, } import com.digitalasset.daml.lf.value.Value -import com.google.protobuf.ByteString import com.google.rpc.status.Status as StatusProto import io.grpc.Status import org.scalatest.matchers.should.Matchers @@ -153,7 +154,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_seconds = None, deduplication_duration_nanos = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, @@ -190,7 +191,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_seconds = None, deduplication_duration_nanos = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = Some(messageUuid.toString), is_transaction = true, trace_context = serializedEmptyTraceContext, @@ -198,15 +199,29 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { ) } - val updateId = Ref.TransactionId.assertFromString("UpdateId") - - def handleTransactionAcceptedSingleCreateNode(isAcsDelta: Boolean): Unit = { - s"handle TransactionAccepted (single create node, isAcsDelta = $isAcsDelta)" in { + val updateId = TestUpdateId("mock_hash") + val updateIdByteArray = updateId.toProtoPrimitive.toByteArray + + // We only care about distinguishing between repair and sequencer transactions for create nodes + // since for create nodes the representative package-id assignment policies are different between the two + def handleAcsDeltaTransactionAcceptedWithSingleCreateNode( + isAcsDelta: Boolean, + isRepairTransaction: Boolean, + ): Unit = { + assert( + isRepairTransaction && isAcsDelta || !isRepairTransaction, + "Repair transaction is implicitly an ACS delta", + ) + val updateName = + if (isRepairTransaction) classOf[state.Update.RepairTransactionAccepted].getSimpleName + else classOf[state.Update.SequencedTransactionAccepted].getSimpleName + s"handle $updateName (single create node, isAcsDelta = $isAcsDelta)" in { val completionInfo = someCompletionInfo val transactionMeta = someTransactionMeta val externalTransactionHash = someExternalTransactionHash val builder = TxBuilder() val contractId = builder.newCid + val internalContractIds = Map(contractId -> 42L) val contractTemplate = Ref.Identifier.assertFromString("P:M:T") val keyValue = Value.ValueUnit val createNode = builder @@ -220,41 +235,62 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { ) val createNodeId = builder.add(createNode) val transaction = builder.buildCommitted() - val update = state.Update.SequencedTransactionAccepted( - completionInfoO = Some(completionInfo), - transactionMeta = transactionMeta, - transaction = transaction, - updateId = updateId, - contractAuthenticationData = Map(contractId -> someContractAuthenticationData), - synchronizerId = someSynchronizerId1, - recordTime = someRecordTime, - externalTransactionHash = Some(externalTransactionHash), - acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = isAcsDelta), - ) + val update = + if (isRepairTransaction) + state.Update.RepairTransactionAccepted( + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + representativePackageIds = RepresentativePackageIds.DedicatedRepresentativePackageIds( + Map(contractId -> someRepresentativePackageId) + ), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + repairCounter = RepairCounter(1337), + internalContractIds = internalContractIds, + ) + else + state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = isAcsDelta), + internalContractIds = internalContractIds, + ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventCreate( + val dtoCreate = DbDto.EventActivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), - submitters = Some(completionInfo.actAs.toSet), + command_id = Option.when(!isRepairTransaction)(completionInfo.commandId), + submitters = Option.when(!isRepairTransaction)(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = + Option.when(!isRepairTransaction)(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.Create.asInt, + event_sequential_id = 0, node_id = createNodeId.index, - contract_id = createNode.coid.toBytes.toByteArray, - template_id = templateIdWithPackageName(createNode), - package_id = createNode.templateId.packageId.toString, - flat_event_witnesses = - if (isAcsDelta) Set("signatory1", "signatory2", "signatory3", "observer") - else Set.empty, // stakeholders - tree_event_witnesses = - Set("signatory1", "signatory2", "signatory3", "observer"), // informees - create_argument = emptyArray, - create_signatories = Set("signatory1", "signatory2", "signatory3"), - create_observers = Set("observer"), - create_key_value = Some(emptyArray), - create_key_maintainers = Some(Set("signatory2", "signatory3")), + additional_witnesses = Some( + if (isAcsDelta) Set.empty + else Set("signatory1", "signatory2", "signatory3", "observer") + ), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = + if (isRepairTransaction) someRepresentativePackageId + else createNode.templateId.packageId, + notPersistedContractId = createNode.coid, + internal_contract_id = 42L, create_key_hash = Some( GlobalKey .assertBuild(contractTemplate, keyValue, createNode.packageName) @@ -262,23 +298,15 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { .bytes .toHexString ), - create_argument_compression = compressionAlgorithmId, - create_key_value_compression = compressionAlgorithmId, - event_sequential_id = 0, - authentication_data = someContractAuthenticationData.toByteArray, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, - record_time = someRecordTime.toMicros, - external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), ) - dtos(5) shouldEqual DbDto.CommandCompletion( + val dtoCompletion = DbDto.CommandCompletion( completion_offset = someOffset.unwrap, record_time = someRecordTime.toMicros, publication_time = 0, user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -286,209 +314,510 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ) - dtos(6) shouldEqual DbDto.TransactionMeta( - update_id = updateId, + val dtoTransactionMeta = DbDto.TransactionMeta( + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ) + + dtos.head shouldEqual dtoCreate + if (!isRepairTransaction) { + dtos(5) shouldEqual dtoCompletion + dtos(6) shouldEqual dtoTransactionMeta + } else { + dtos(5) shouldEqual dtoTransactionMeta + } Set(dtos(1), dtos(2), dtos(3), dtos(4)) should contain theSameElementsAs (if (isAcsDelta) Set( - DbDto.IdFilterCreateStakeholder( - 0L, - templateIdWithPackageName(createNode), - "signatory1", + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory1", + first_per_sequential_id = true, + ) ), - DbDto.IdFilterCreateStakeholder( - 0L, - templateIdWithPackageName(createNode), - "signatory2", + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory2", + first_per_sequential_id = false, + ) ), - DbDto.IdFilterCreateStakeholder( - 0L, - templateIdWithPackageName(createNode), - "signatory3", + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory3", + first_per_sequential_id = false, + ) ), DbDto - .IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "observer"), + .IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), ) else Set( - DbDto.IdFilterCreateNonStakeholderInformee( - 0L, - templateIdWithPackageName(createNode), - "signatory1", + DbDto.IdFilterActivateWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory1", + first_per_sequential_id = true, + ) ), - DbDto.IdFilterCreateNonStakeholderInformee( - 0L, - templateIdWithPackageName(createNode), - "signatory2", + DbDto.IdFilterActivateWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory2", + first_per_sequential_id = false, + ) ), - DbDto.IdFilterCreateNonStakeholderInformee( - 0L, - templateIdWithPackageName(createNode), - "signatory3", + DbDto.IdFilterActivateWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory3", + first_per_sequential_id = false, + ) ), - DbDto.IdFilterCreateNonStakeholderInformee( - 0L, - templateIdWithPackageName(createNode), - "observer", + DbDto.IdFilterActivateWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) ), )) - dtos.size shouldEqual 7 + if (isRepairTransaction) + dtos.size shouldEqual 6 + else + dtos.size shouldEqual 7 } } - handleTransactionAcceptedSingleCreateNode(isAcsDelta = true) - handleTransactionAcceptedSingleCreateNode(isAcsDelta = false) + handleAcsDeltaTransactionAcceptedWithSingleCreateNode( + isAcsDelta = true, + isRepairTransaction = false, + ) + handleAcsDeltaTransactionAcceptedWithSingleCreateNode( + isAcsDelta = true, + isRepairTransaction = true, + ) - def handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta: Boolean): Unit = { - s"handle TransactionAccepted (single consuming exercise node, isAcsDelta = $isAcsDelta)" in { - val completionInfo = someCompletionInfo - val transactionMeta = someTransactionMeta - val externalTransactionHash = someExternalTransactionHash - val builder = TxBuilder() - val exerciseNode = { - val createNode = builder.create( - id = builder.newCid, - templateId = "M:T", - argument = Value.ValueUnit, - signatories = List("signatory"), - observers = List("observer"), - ) - builder.exercise( - contract = createNode, - choice = "someChoice", - consuming = true, - actingParties = Set("signatory"), - argument = Value.ValueUnit, - result = Some(Value.ValueUnit), - choiceObservers = Set.empty, - byKey = false, - ) - } - val exerciseNodeId = builder.add(exerciseNode) - val transaction = builder.buildCommitted() - val update = state.Update.SequencedTransactionAccepted( + "handle SequencedTransactionAccepted (single create node, isAcsDelta = false)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val contractId = builder.newCid + val contractTemplate = Ref.Identifier.assertFromString("P:M:T") + val keyValue = Value.ValueUnit + val createNode = builder + .create( + id = contractId, + templateId = contractTemplate, + argument = Value.ValueUnit, + signatories = Set("signatory1", "signatory2", "signatory3"), + observers = Set("observer"), + key = CreateKey.KeyWithMaintainers(keyValue, Set("signatory2", "signatory3")), + ) + val createNodeId = builder.add(createNode) + val transaction = builder.buildCommitted() + val update = + state.Update.SequencedTransactionAccepted( completionInfoO = Some(completionInfo), transactionMeta = transactionMeta, transaction = transaction, updateId = updateId, - contractAuthenticationData = Map.empty, + contractAuthenticationData = Map(contractId -> someContractAuthenticationData), synchronizerId = someSynchronizerId1, - recordTime = CantonTimestamp.ofEpochMicro(120), + recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), - acsChangeFactory = TestAcsChangeFactory(contractActivenessChanged = isAcsDelta), + acsChangeFactory = TestAcsChangeFactory(false), + internalContractIds = Map(contractId -> 42L), ) - val dtos = updateToDtos(update) + val dtos = updateToDtos(update) - dtos.head shouldEqual - DbDto.EventExercise( - consuming = true, - event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), - workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), - submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeId.index, - contract_id = exerciseNode.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNode), - package_id = exerciseNode.templateId.packageId, - flat_event_witnesses = - if (isAcsDelta) Set("signatory", "observer") else Set.empty, // stakeholders - tree_event_witnesses = Set("signatory", "observer"), // informees - exercise_choice = exerciseNode.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, - record_time = 120, - external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ) - dtos(3) shouldEqual - DbDto.CommandCompletion( - completion_offset = someOffset.unwrap, - record_time = 120, - publication_time = 0, - user_id = completionInfo.userId, - submitters = completionInfo.actAs.toSet, - command_id = completionInfo.commandId, - update_id = Some(updateId), - rejection_status_code = None, - rejection_status_message = None, - rejection_status_details = None, - submission_id = completionInfo.submissionId, - deduplication_offset = None, - deduplication_duration_nanos = None, - deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - message_uuid = None, - is_transaction = true, - trace_context = serializedEmptyTraceContext, - ) - dtos(4) shouldEqual - DbDto.TransactionMeta( - update_id = updateId, - event_offset = someOffset.unwrap, - publication_time = 0, - record_time = 120, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - event_sequential_id_first = 0, - event_sequential_id_last = 0, - ) + val dtoCreate = DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.WitnessedCreate.asInt, + event_sequential_id = 0, + node_id = createNodeId.index, + additional_witnesses = Set("signatory1", "signatory2", "signatory3", "observer"), + consuming = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = Some(createNode.templateId.packageId), + contract_id = None, + internal_contract_id = Some(42L), + template_id = None, + package_id = None, + ledger_effective_time = None, + ) + val dtoCompletion = DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + val dtoTransactionMeta = DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) - Set(dtos(1), dtos(2)) should contain theSameElementsAs - (if (isAcsDelta) - Set( - DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "signatory", - ), - DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "observer", - ), - ) - else - Set( - DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "signatory", - ), - DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "observer", - ), - )) + dtos.head shouldEqual dtoCreate + dtos(5) shouldEqual dtoCompletion + dtos(6) shouldEqual dtoTransactionMeta + Set(dtos(1), dtos(2), dtos(3), dtos(4)) should contain theSameElementsAs + Set( + DbDto.IdFilterVariousWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory1", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterVariousWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory2", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterVariousWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory3", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterVariousWitness( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), + ) - dtos.size shouldEqual 5 + dtos.size shouldEqual 7 + } + s"handle TransactionAccepted (single consuming exercise node, isAcsDelta = true)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val exerciseNode = { + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) } + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = CantonTimestamp.ofEpochMicro(120), + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(true), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual + DbDto.EventDeactivate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = 120, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.ConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeId.index, + deactivated_event_sequential_id = None, + additional_witnesses = Some(Set.empty), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeId.index), + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + contract_id = exerciseNode.targetCoid, + internal_contract_id = None, + template_id = templateIdWithPackageName(exerciseNode), + package_id = exerciseNode.templateId.packageId, + stakeholders = Set("signatory", "observer"), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ) + dtos(3) shouldEqual + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = 120, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + dtos(4) shouldEqual + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = 120, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + + Set(dtos(1), dtos(2)) should contain theSameElementsAs + Set( + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ) + ), + ) + + dtos.size shouldEqual 5 } - handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta = true) - handleTransactionAcceptedSingleConsumingExerciseNode(isAcsDelta = false) + s"handle TransactionAccepted (single consuming exercise node, isAcsDelta = false)" in { + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val exerciseNode = { + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + builder.exercise( + contract = createNode, + choice = "someChoice", + consuming = true, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + } + val exerciseNodeId = builder.add(exerciseNode) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = CantonTimestamp.ofEpochMicro(120), + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(false), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos.head shouldEqual + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = 120, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.WitnessedConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeId.index, + additional_witnesses = Set("signatory", "observer"), + consuming = Some(true), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeId.index), + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNode.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNode)), + package_id = Some(exerciseNode.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ) + dtos(3) shouldEqual + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = 120, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ) + dtos(4) shouldEqual + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = 120, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ) + + Set(dtos(1), dtos(2)) should contain theSameElementsAs + Set( + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ) + ), + ) + + dtos.size shouldEqual 5 + } "handle TransactionAccepted (single non-consuming exercise node)" in { val completionInfo = someCompletionInfo @@ -525,43 +854,498 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { synchronizerId = someSynchronizerId1, recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), - acsChangeFactory = TestAcsChangeFactory(), + acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNode.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNode)), + package_id = Some(exerciseNode.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (create node divulged)" in { + // Previous transaction + // └─ #1 Create + // Transaction + // └─ #2 Exercise (choice A) + // ├─ #3 Exercise (choice B) + // └─ #4 Create (C) + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNodeA = builder.exercise( + contract = createNode, + choice = "A", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeB = builder.exercise( + contract = createNode, + choice = "B", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val createNodeC = builder.create( + id = builder.newCid, + templateId = "M:T2", + argument = Value.ValueUnit, + signatories = List("signatory2"), + observers = Set.empty, + ) + val exerciseNodeAId = builder.add(exerciseNodeA) + val exerciseNodeBId = builder.add(exerciseNodeB, exerciseNodeAId) + val createNodeCId = builder.add(createNodeC, exerciseNodeAId) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(false), + internalContractIds = Map(createNodeC.coid -> 42L), + ) + val dtos = updateToDtos(update) + + dtos should contain theSameElementsInOrderAs List( + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeAId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeA.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(createNodeCId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeA.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeA)), + package_id = Some(exerciseNodeA.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeA), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeBId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeB.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeBId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeB.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeB)), + package_id = Some(exerciseNodeB.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeB), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.WitnessedCreate.asInt, + event_sequential_id = 0, + node_id = createNodeCId.index, + additional_witnesses = Set("signatory", "signatory2"), + consuming = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + representative_package_id = Some(createNode.templateId.packageId), + contract_id = None, + internal_contract_id = Some(42L), + template_id = None, + package_id = None, + ledger_effective_time = None, + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(createNodeC), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(createNodeC), + party_id = "signatory2", + first_per_sequential_id = false, + ) + ), + DbDto.CommandCompletion( + completion_offset = someOffset.unwrap, + record_time = someRecordTime.toMicros, + publication_time = 0, + user_id = completionInfo.userId, + submitters = completionInfo.actAs.toSet, + command_id = completionInfo.commandId, + update_id = Some(updateIdByteArray), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + submission_id = completionInfo.submissionId, + deduplication_offset = None, + deduplication_duration_nanos = None, + deduplication_duration_seconds = None, + synchronizer_id = someSynchronizerId1, + message_uuid = None, + is_transaction = true, + trace_context = serializedEmptyTraceContext, + ), + DbDto.TransactionMeta( + update_id = updateIdByteArray, + event_offset = someOffset.unwrap, + publication_time = 0, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + event_sequential_id_first = 0, + event_sequential_id_last = 0, + ), + ) + } + + "handle TransactionAccepted (nested create node, ACSDelta = true)" in { + // Previous transaction + // └─ #1 Create + // Transaction + // └─ #2 Exercise (choice A) + // ├─ #3 Exercise (choice B) + // └─ #4 Create (C) + val completionInfo = someCompletionInfo + val transactionMeta = someTransactionMeta + val externalTransactionHash = someExternalTransactionHash + val builder = TxBuilder() + val createNode = builder.create( + id = builder.newCid, + templateId = "M:T", + argument = Value.ValueUnit, + signatories = List("signatory"), + observers = List("observer"), + ) + val exerciseNodeA = builder.exercise( + contract = createNode, + choice = "A", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val exerciseNodeB = builder.exercise( + contract = createNode, + choice = "B", + consuming = false, + actingParties = Set("signatory"), + argument = Value.ValueUnit, + result = Some(Value.ValueUnit), + choiceObservers = Set.empty, + byKey = false, + ) + val createNodeC = builder.create( + id = builder.newCid, + templateId = "M:T2", + argument = Value.ValueUnit, + signatories = List("signatory2"), + observers = Set.empty, + key = CreateKey.KeyWithMaintainers(Value.ValueUnit, Set("signatory2")), + ) + val exerciseNodeAId = builder.add(exerciseNodeA) + val exerciseNodeBId = builder.add(exerciseNodeB, exerciseNodeAId) + val createNodeCId = builder.add(createNodeC, exerciseNodeAId) + val transaction = builder.buildCommitted() + val update = state.Update.SequencedTransactionAccepted( + completionInfoO = Some(completionInfo), + transactionMeta = transactionMeta, + transaction = transaction, + updateId = updateId, + contractAuthenticationData = Map.empty, + synchronizerId = someSynchronizerId1, + recordTime = someRecordTime, + externalTransactionHash = Some(externalTransactionHash), + acsChangeFactory = TestAcsChangeFactory(true), + internalContractIds = Map(createNodeC.coid -> 42L), ) val dtos = updateToDtos(update) dtos should contain theSameElementsInOrderAs List( - DbDto.EventExercise( - consuming = false, + DbDto.EventVariousWitnessed( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeId.index, - contract_id = exerciseNode.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNode), - package_id = exerciseNode.templateId.packageId, - flat_event_witnesses = Set.empty, // stakeholders - tree_event_witnesses = Set("signatory"), // informees - exercise_choice = exerciseNode.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeAId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeA.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(createNodeCId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeA.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeA)), + package_id = Some(exerciseNodeA.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeA), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.NonConsumingExercise.asInt, + event_sequential_id = 0, + node_id = exerciseNodeBId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeB.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeBId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeB.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeB)), + package_id = Some(exerciseNodeB.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeB), + party_id = "signatory", + first_per_sequential_id = true, + ) ), - DbDto.IdFilterNonConsumingInformee( + DbDto.EventActivate( + event_offset = someOffset.unwrap, + update_id = updateIdByteArray, + workflow_id = transactionMeta.workflowId, + command_id = Some(completionInfo.commandId), + submitters = Some(completionInfo.actAs.toSet), + record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, + external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.Create.asInt, event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "signatory", + node_id = createNodeCId.index, + additional_witnesses = Some(Set("signatory")), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = createNodeC.templateId.packageId, + notPersistedContractId = createNodeC.coid, + internal_contract_id = 42L, + create_key_hash = Some( + GlobalKey + .assertBuild( + Ref.Identifier.assertFromString("P:M:T2"), + Value.ValueUnit, + createNodeC.packageName, + ) + .hash + .bytes + .toHexString + ), + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(createNodeC), + party_id = "signatory2", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(createNodeC), + party_id = "signatory", + first_per_sequential_id = true, + ) ), DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -570,7 +1354,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -578,17 +1362,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ), DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ), @@ -669,141 +1453,170 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) val dtos = updateToDtos(update) dtos should contain theSameElementsInOrderAs List( - DbDto.EventExercise( - consuming = false, + DbDto.EventVariousWitnessed( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeAId.index, - contract_id = exerciseNodeA.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNodeA), - package_id = exerciseNodeA.templateId.packageId, - flat_event_witnesses = Set.empty, // stakeholders - tree_event_witnesses = Set("signatory"), // informees - exercise_choice = exerciseNodeA.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeDId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ), - DbDto.IdFilterNonConsumingInformee( + event_type = PersistentEventType.NonConsumingExercise.asInt, event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNodeA), - party_id = "signatory", + node_id = exerciseNodeAId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeA.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeDId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeA.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeA)), + package_id = Some(exerciseNodeA.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), ), - DbDto.EventExercise( - consuming = false, + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeA), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeBId.index, - contract_id = exerciseNodeB.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNodeB), - package_id = exerciseNodeB.templateId.packageId, - flat_event_witnesses = Set.empty, // stakeholders - tree_event_witnesses = Set("signatory"), // informees - exercise_choice = exerciseNodeB.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeBId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ), - DbDto.IdFilterNonConsumingInformee( + event_type = PersistentEventType.NonConsumingExercise.asInt, event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNodeB), - party_id = "signatory", + node_id = exerciseNodeBId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeB.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeBId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeB.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeB)), + package_id = Some(exerciseNodeB.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), ), - DbDto.EventExercise( - consuming = false, + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeB), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeCId.index, - contract_id = exerciseNodeC.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNodeC), - package_id = exerciseNodeC.templateId.packageId, - flat_event_witnesses = Set.empty, // stakeholders - tree_event_witnesses = Set("signatory"), // informees - exercise_choice = exerciseNodeC.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeDId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ), - DbDto.IdFilterNonConsumingInformee( + event_type = PersistentEventType.NonConsumingExercise.asInt, event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNodeC), - party_id = "signatory", + node_id = exerciseNodeCId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeC.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeDId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeC.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeC)), + package_id = Some(exerciseNodeC.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), ), - DbDto.EventExercise( - consuming = false, + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeC), + party_id = "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.EventVariousWitnessed( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeDId.index, - contract_id = exerciseNodeD.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNodeD), - package_id = exerciseNodeD.templateId.packageId, - flat_event_witnesses = Set.empty, // stakeholders - tree_event_witnesses = Set("signatory"), // informees - exercise_choice = exerciseNodeD.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeDId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ), - DbDto.IdFilterNonConsumingInformee( + event_type = PersistentEventType.NonConsumingExercise.asInt, event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNodeD), - party_id = "signatory", + node_id = exerciseNodeDId.index, + additional_witnesses = Set("signatory"), + consuming = Some(false), + exercise_choice = exerciseNodeD.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeDId.index), + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, + representative_package_id = None, + contract_id = Some(exerciseNodeD.targetCoid), + internal_contract_id = None, + template_id = Some(templateIdWithPackageName(exerciseNodeD)), + package_id = Some(exerciseNodeD.templateId.packageId), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), + ), + DbDto.IdFilterVariousWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNodeD), + party_id = "signatory", + first_per_sequential_id = true, + ) ), DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -812,7 +1625,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -820,17 +1633,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ), DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ), @@ -879,6 +1692,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { synchronizerId = someSynchronizerId1, recordTime = someRecordTime, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) val dtos = updateToDtos(update) @@ -891,7 +1705,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -899,17 +1713,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ), DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ), @@ -955,52 +1769,69 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) val dtos = updateToDtos(update) dtos should contain theSameElementsInOrderAs List( - DbDto.EventExercise( - consuming = true, + DbDto.EventDeactivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeId.index, - contract_id = exerciseNode.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNode), - package_id = exerciseNode.templateId.packageId, - flat_event_witnesses = Set("signatory", "observer"), - tree_event_witnesses = Set("signatory", "observer", "divulgee"), - exercise_choice = exerciseNode.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ), - DbDto.IdFilterConsumingStakeholder( + event_type = PersistentEventType.ConsumingExercise.asInt, event_sequential_id = 0, + node_id = exerciseNodeId.index, + deactivated_event_sequential_id = None, + additional_witnesses = Some(Set("divulgee")), + exercise_choice = exerciseNode.choiceId, + exercise_choice_interface_id = None, + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeId.index), + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + contract_id = exerciseNode.targetCoid, + internal_contract_id = None, template_id = templateIdWithPackageName(exerciseNode), - party_id = "signatory", + package_id = exerciseNode.templateId.packageId, + stakeholders = Set("signatory", "observer"), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), ), - DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "observer", + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) ), - DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "divulgee", + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterDeactivateWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "divulgee", + first_per_sequential_id = true, + ) ), DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -1009,7 +1840,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1017,17 +1848,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ), DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ), @@ -1043,6 +1874,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { val externalTransactionHash = someExternalTransactionHash val builder = TxBuilder() val contractId = builder.newCid + val interfaceId = toIdentifier("M:I") val createNode = builder.create( id = contractId, templateId = "M:T", @@ -1059,6 +1891,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { result = Some(Value.ValueUnit), choiceObservers = Set("divulgee"), byKey = false, + interfaceId = Some(interfaceId), ) val createNodeId = builder.add(createNode) val exerciseNodeId = builder.add(exerciseNode) @@ -1073,84 +1906,107 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventCreate( + dtos.head shouldEqual DbDto.EventActivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = createNodeId.index, - contract_id = createNode.coid.toBytes.toByteArray, - template_id = templateIdWithPackageName(createNode), - package_id = createNode.templateId.packageId.toString, - flat_event_witnesses = Set("signatory", "observer"), - tree_event_witnesses = Set("signatory", "observer"), - create_argument = emptyArray, - create_signatories = Set("signatory"), - create_observers = Set("observer"), - create_key_value = None, - create_key_maintainers = None, - create_key_hash = None, - create_argument_compression = compressionAlgorithmId, - create_key_value_compression = None, - event_sequential_id = 0, - authentication_data = someContractAuthenticationData.toByteArray, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.Create.asInt, + event_sequential_id = 0, + node_id = createNodeId.index, + additional_witnesses = Some(Set.empty), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = createNode.templateId.packageId, + notPersistedContractId = createNode.coid, + internal_contract_id = 42L, + create_key_hash = None, ) Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "signatory"), - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "observer"), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), ) - dtos(3) shouldEqual DbDto.EventExercise( - consuming = true, + dtos(3) shouldEqual DbDto.EventDeactivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = exerciseNodeId.index, - contract_id = exerciseNode.targetCoid.toBytes.toByteArray, - template_id = templateIdWithPackageName(exerciseNode), - package_id = exerciseNode.templateId.packageId, - flat_event_witnesses = Set("signatory", "observer"), - tree_event_witnesses = Set("signatory", "observer", "divulgee"), - exercise_choice = exerciseNode.choiceId, - exercise_argument = emptyArray, - exercise_result = Some(emptyArray), - exercise_actors = Set("signatory"), - exercise_last_descendant_node_id = exerciseNodeId.index, - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, - event_sequential_id = 0, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), - ) - dtos(4) shouldEqual DbDto.IdFilterConsumingStakeholder( + event_type = PersistentEventType.ConsumingExercise.asInt, event_sequential_id = 0, + node_id = exerciseNodeId.index, + deactivated_event_sequential_id = None, + additional_witnesses = Some(Set("divulgee")), + exercise_choice = Some(exerciseNode.choiceId), + exercise_choice_interface_id = Some(interfaceId.toString), + exercise_argument = Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), + exercise_actors = Some(Set("signatory")), + exercise_last_descendant_node_id = Some(exerciseNodeId.index), + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, + reassignment_id = None, + assignment_exclusivity = None, + target_synchronizer_id = None, + reassignment_counter = None, + contract_id = exerciseNode.targetCoid, + internal_contract_id = None, template_id = templateIdWithPackageName(exerciseNode), - party_id = "signatory", + package_id = exerciseNode.templateId.packageId, + stakeholders = Set("signatory", "observer"), + ledger_effective_time = Some(transactionMeta.ledgerEffectiveTime.micros), ) - dtos(5) shouldEqual DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "observer", + dtos(4) shouldEqual DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "signatory", + first_per_sequential_id = true, + ) ) - dtos(6) shouldEqual DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, - template_id = templateIdWithPackageName(exerciseNode), - party_id = "divulgee", + dtos(5) shouldEqual DbDto.IdFilterDeactivateStakeholder( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "observer", + first_per_sequential_id = false, + ) + ) + dtos(6) shouldEqual DbDto.IdFilterDeactivateWitness( + IdFilter( + event_sequential_id = 0, + template_id = templateIdWithPackageName(exerciseNode), + party_id = "divulgee", + first_per_sequential_id = true, + ) ) dtos(7) shouldEqual DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -1159,7 +2015,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1167,17 +2023,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ) dtos(8) shouldEqual DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ) @@ -1224,6 +2080,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { synchronizerId = someSynchronizerId1, recordTime = someRecordTime, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) val dtos = updateToDtos(update) @@ -1235,7 +2092,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1243,17 +2100,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ), DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ), @@ -1280,47 +2137,55 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { completionInfoO = None, transactionMeta = transactionMeta, transaction = transaction, - updateId = Ref.TransactionId.assertFromString("UpdateId"), + updateId = updateId, contractAuthenticationData = Map(contractId -> someContractAuthenticationData), synchronizerId = someSynchronizerId1, recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventCreate( + dtos.head shouldEqual DbDto.EventActivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = None, + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = None, + command_id = None, submitters = None, - node_id = createNodeId.index, - contract_id = createNode.coid.toBytes.toByteArray, - template_id = templateIdWithPackageName(createNode), - package_id = createNode.templateId.packageId.toString, - flat_event_witnesses = Set("signatory", "observer"), - tree_event_witnesses = Set("signatory", "observer"), - create_argument = emptyArray, - create_signatories = Set("signatory"), - create_observers = Set("observer"), - create_key_value = None, - create_key_maintainers = None, - create_key_hash = None, - create_argument_compression = compressionAlgorithmId, - create_key_value_compression = None, - event_sequential_id = 0, - authentication_data = someContractAuthenticationData.toByteArray, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.Create.asInt, + event_sequential_id = 0, + node_id = createNodeId.index, + additional_witnesses = Some(Set.empty), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = createNode.templateId.packageId, + notPersistedContractId = createNode.coid, + internal_contract_id = 42L, + create_key_hash = None, ) Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "signatory"), - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "observer"), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), ) dtos.size shouldEqual 4 } @@ -1381,7 +2246,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = expectedDeduplicationOffset, deduplication_duration_seconds = expectedDeduplicationDurationSeconds, deduplication_duration_nanos = expectedDeduplicationDurationNanos, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, @@ -1423,41 +2288,49 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = someRecordTime, externalTransactionHash = Some(externalTransactionHash), acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventCreate( + dtos.head shouldEqual DbDto.EventActivate( event_offset = someOffset.unwrap, - update_id = updateId, - ledger_effective_time = transactionMeta.ledgerEffectiveTime.micros, - command_id = Some(completionInfo.commandId), + update_id = updateIdByteArray, workflow_id = transactionMeta.workflowId, - user_id = Some(completionInfo.userId), + command_id = Some(completionInfo.commandId), submitters = Some(completionInfo.actAs.toSet), - node_id = createNodeId.index, - contract_id = createNode.coid.toBytes.toByteArray, - template_id = templateIdWithPackageName(createNode), - package_id = createNode.templateId.packageId.toString, - flat_event_witnesses = Set("signatory", "observer"), // stakeholders - tree_event_witnesses = Set("signatory", "observer"), // informees - create_argument = emptyArray, - create_signatories = Set("signatory"), - create_observers = Set("observer"), - create_key_value = None, - create_key_maintainers = None, - create_key_hash = None, - create_argument_compression = compressionAlgorithmId, - create_key_value_compression = None, - event_sequential_id = 0, - authentication_data = someContractAuthenticationData.toByteArray, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, - trace_context = serializedEmptyTraceContext, record_time = someRecordTime.toMicros, + synchronizer_id = someSynchronizerId1, + trace_context = serializedEmptyTraceContext, external_transaction_hash = Some(externalTransactionHash.unwrap.toByteArray), + event_type = PersistentEventType.Create.asInt, + event_sequential_id = 0, + node_id = createNodeId.index, + additional_witnesses = Some(Set.empty), + source_synchronizer_id = None, + reassignment_counter = None, + reassignment_id = None, + representative_package_id = createNode.templateId.packageId, + notPersistedContractId = createNode.coid, + internal_contract_id = 42L, + create_key_hash = None, ) Set(dtos(1), dtos(2)) should contain theSameElementsAs Set( - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "signatory"), - DbDto.IdFilterCreateStakeholder(0L, templateIdWithPackageName(createNode), "observer"), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), ) dtos(3) shouldEqual DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -1466,7 +2339,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1474,17 +2347,17 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = expectedDeduplicationOffset, deduplication_duration_seconds = expectedDeduplicationDurationSeconds, deduplication_duration_nanos = expectedDeduplicationDurationNanos, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, message_uuid = None, is_transaction = true, trace_context = serializedEmptyTraceContext, ) dtos(4) shouldEqual DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, event_sequential_id_first = 0, event_sequential_id_last = 0, ) @@ -1529,38 +2402,32 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = someRecordTime, synchronizerId = targetSynchronizerId.unwrap, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map(contractId -> 42L), ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventAssign( + dtos.head shouldEqual DbDto.EventActivate( event_offset = someOffset.unwrap, - update_id = update.updateId, - command_id = Some(completionInfo.commandId), + update_id = update.updateId.toProtoPrimitive.toByteArray, workflow_id = Some(someWorkflowId), - submitter = Option(someParty), + command_id = Some(completionInfo.commandId), + submitters = Option(Set(someParty)), + record_time = someRecordTime.toMicros, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), + trace_context = serializedEmptyTraceContext, + external_transaction_hash = None, + event_type = PersistentEventType.Assign.asInt, + event_sequential_id = 0, node_id = 0, - contract_id = createNode.coid.toBytes.toByteArray, - template_id = templateIdWithPackageName(createNode), - package_id = createNode.templateId.packageId.toString, - flat_event_witnesses = Set("signatory", "observer", "observer2"), - create_argument = emptyArray, - create_signatories = Set("signatory"), - create_observers = Set("observer", "observer2"), - create_key_value = None, - create_key_maintainers = None, + additional_witnesses = None, + source_synchronizer_id = Some(SynchronizerId.tryFromString("x::synchronizer1")), + reassignment_counter = Some(1500L), + reassignment_id = Some(ReassignmentId.tryCreate("001000000000").toBytes.toByteArray), + representative_package_id = createNode.templateId.packageId, + notPersistedContractId = createNode.coid, + internal_contract_id = 42L, create_key_hash = None, - create_argument_compression = compressionAlgorithmId, - create_key_value_compression = None, - event_sequential_id = 0, - ledger_effective_time = 17000000, - authentication_data = someContractAuthenticationData.toByteArray, - source_synchronizer_id = "x::synchronizer1", - target_synchronizer_id = "x::synchronizer2", - reassignment_id = "001000000000", - reassignment_counter = 1500L, - trace_context = serializedEmptyTraceContext, - record_time = someRecordTime.toMicros, ) dtos(4) shouldEqual DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -1569,7 +2436,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1577,24 +2444,45 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = "x::synchronizer2", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), message_uuid = None, is_transaction = false, trace_context = serializedEmptyTraceContext, ) dtos(5) shouldEqual DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = "x::synchronizer2", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer2"), event_sequential_id_first = 0, event_sequential_id_last = 0, ) Set(dtos(1), dtos(2), dtos(3)) should contain theSameElementsAs Set( - DbDto.IdFilterAssignStakeholder(0L, templateIdWithPackageName(createNode), "signatory"), - DbDto.IdFilterAssignStakeholder(0L, templateIdWithPackageName(createNode), "observer"), - DbDto.IdFilterAssignStakeholder(0L, templateIdWithPackageName(createNode), "observer2"), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterActivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer2", + first_per_sequential_id = false, + ) + ), ) dtos.size shouldEqual 6 } @@ -1639,29 +2527,44 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { recordTime = CantonTimestamp.ofEpochMicro(120), synchronizerId = sourceSynchronizerId.unwrap, acsChangeFactory = TestAcsChangeFactory(), + internalContractIds = Map.empty, ) val dtos = updateToDtos(update) - dtos.head shouldEqual DbDto.EventUnassign( + dtos.head shouldEqual DbDto.EventDeactivate( event_offset = someOffset.unwrap, - update_id = update.updateId, - command_id = Some(completionInfo.commandId), + update_id = update.updateId.toProtoPrimitive.toByteArray, workflow_id = Some(someWorkflowId), - submitter = someParty, + command_id = Some(completionInfo.commandId), + submitters = Some(Set(someParty)), + record_time = 120L, + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), + trace_context = serializedEmptyTraceContext, + external_transaction_hash = None, + event_type = PersistentEventType.Unassign.asInt, + event_sequential_id = 0, node_id = 0, - contract_id = createNode.coid.toBytes.toByteArray, + deactivated_event_sequential_id = None, + additional_witnesses = None, + exercise_choice = None, + exercise_choice_interface_id = None, + exercise_argument = None, + exercise_result = None, + exercise_actors = None, + exercise_last_descendant_node_id = None, + exercise_argument_compression = None, + exercise_result_compression = None, + reassignment_id = Some(ReassignmentId.tryCreate("001000000000").toBytes.toByteArray), + assignment_exclusivity = Some(123456L), + target_synchronizer_id = Some(SynchronizerId.tryFromString("x::synchronizer2")), + reassignment_counter = Some(1500L), + contract_id = createNode.coid, + internal_contract_id = None, template_id = templateIdWithPackageName(createNode), package_id = createNode.templateId.packageId, - flat_event_witnesses = Set("signatory12", "observer23", "asdasdasd"), - event_sequential_id = 0, - source_synchronizer_id = "x::synchronizer1", - target_synchronizer_id = "x::synchronizer2", - reassignment_id = "001000000000", - reassignment_counter = 1500L, - assignment_exclusivity = Some(123456L), - trace_context = serializedEmptyTraceContext, - record_time = 120L, + stakeholders = Set("signatory12", "observer23", "asdasdasd"), + ledger_effective_time = None, ) dtos(4) shouldEqual DbDto.CommandCompletion( completion_offset = someOffset.unwrap, @@ -1670,7 +2573,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { user_id = completionInfo.userId, submitters = completionInfo.actAs.toSet, command_id = completionInfo.commandId, - update_id = Some(updateId), + update_id = Some(updateIdByteArray), rejection_status_code = None, rejection_status_message = None, rejection_status_details = None, @@ -1678,24 +2581,45 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { deduplication_offset = None, deduplication_duration_nanos = None, deduplication_duration_seconds = None, - synchronizer_id = "x::synchronizer1", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), message_uuid = None, is_transaction = false, trace_context = serializedEmptyTraceContext, ) dtos(5) shouldEqual DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = 120L, - synchronizer_id = "x::synchronizer1", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), event_sequential_id_first = 0, event_sequential_id_last = 0, ) Set(dtos(1), dtos(2), dtos(3)) should contain theSameElementsAs Set( - DbDto.IdFilterUnassignStakeholder(0L, templateIdWithPackageName(createNode), "signatory12"), - DbDto.IdFilterUnassignStakeholder(0L, templateIdWithPackageName(createNode), "observer23"), - DbDto.IdFilterUnassignStakeholder(0L, templateIdWithPackageName(createNode), "asdasdasd"), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "signatory12", + first_per_sequential_id = true, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "observer23", + first_per_sequential_id = false, + ) + ), + DbDto.IdFilterDeactivateStakeholder( + IdFilter( + 0L, + templateIdWithPackageName(createNode), + "asdasdasd", + first_per_sequential_id = false, + ) + ), ) dtos.size shouldEqual 6 } @@ -1758,12 +2682,12 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { DbDto.EventPartyToParticipant( event_sequential_id = 0, event_offset = someOffset.unwrap, - update_id = update.updateId, + update_id = update.updateId.toProtoPrimitive.toByteArray, party_id = partyId, participant_id = participantId, participant_permission = participantPermissionInt(authorizationEvent), participant_authorization_event = authorizationEventInt(authorizationEvent), - synchronizer_id = someSynchronizerId1.toProtoPrimitive, + synchronizer_id = someSynchronizerId1, record_time = someRecordTime.toMicros, trace_context = serializedEmptyTraceContext, ) @@ -1821,11 +2745,11 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { ) dtos should contain( DbDto.TransactionMeta( - update_id = updateId, + update_id = updateIdByteArray, event_offset = someOffset.unwrap, publication_time = 0, record_time = someRecordTime.toMicros, - synchronizer_id = "x::synchronizer1", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer1"), event_sequential_id_first = 0, event_sequential_id_last = 0, ) @@ -1840,7 +2764,7 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { val dtos = updateToDtos(update) dtos.head shouldEqual DbDto.SequencerIndexMoved( - synchronizerId = someSynchronizerId1.toProtoPrimitive + synchronizerId = someSynchronizerId1 ) dtos.size shouldEqual 1 } @@ -1888,11 +2812,34 @@ object UpdateToDbDtoSpec { // These test do not check the correctness of compression. // All values are compressed using a dummy (identity) algorithm in this suite. - private val compressionAlgorithmId = Some(123) - private val compressionStrategy: CompressionStrategy = { - val noCompression = new FieldCompressionStrategy(compressionAlgorithmId, x => x) - CompressionStrategy(noCompression, noCompression, noCompression, noCompression) - } + private val compressionAlgorithmIdInvalid = Some(12) + private val compressionAlgorithmIdConsumingArg = Some(13) + private val compressionAlgorithmIdConsumingRes = Some(14) + private val compressionAlgorithmIdNonConsumingArg = Some(15) + private val compressionAlgorithmIdNonConsumingRes = Some(16) + private val compressionStrategy: CompressionStrategy = CompressionStrategy( + new FieldCompressionStrategy(compressionAlgorithmIdInvalid, x => x), + new FieldCompressionStrategy(compressionAlgorithmIdInvalid, x => x), + new FieldCompressionStrategy( + compressionAlgorithmIdConsumingArg, + compressArrayWith(compressionAlgorithmIdConsumingArg, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdConsumingRes, + compressArrayWith(compressionAlgorithmIdConsumingRes, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdNonConsumingArg, + compressArrayWith(compressionAlgorithmIdNonConsumingArg, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdNonConsumingRes, + compressArrayWith(compressionAlgorithmIdNonConsumingRes, _), + ), + ) + + private def compressArrayWith(id: Option[Int], x: Array[Byte]) = + x ++ Array(id.getOrElse(-1).toByte) private val someParticipantId = Ref.ParticipantId.assertFromString("UpdateToDbDtoSpecParticipant") @@ -1930,11 +2877,10 @@ object UpdateToDbDtoSpec { optNodeSeeds = None, optByKeyNodes = None, ) - private val someExternalTransactionHash = - Hash.digest(HashPurpose.PreparedSubmission, ByteString.copyFromUtf8("mock_hash"), Sha256) private val someContractAuthenticationData = Bytes.assertFromString("00abcd") + private val someRepresentativePackageId = Ref.PackageId.assertFromString("rp-id") - implicit private val DbDtoEqual: org.scalactic.Equality[DbDto] = DbDtoEq.DbDtoEq + implicit private val DbDtoEqual: org.scalactic.Equality[DbDto] = ScalatestEqualityHelpers.DbDtoEq private val serializedEmptyTraceContext = SerializableTraceContext(emptyTraceContext).toDamlProto.toByteArray diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/ContractStateCachesSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/ContractStateCachesSpec.scala index c3715bbf62..6cd51bf9d5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/ContractStateCachesSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/ContractStateCachesSpec.scala @@ -4,16 +4,16 @@ package com.digitalasset.canton.platform.store.cache import cats.data.NonEmptyVector -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.data.{CantonTimestamp, Offset} +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.* +import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent import com.digitalasset.canton.{HasExecutionContext, TestEssentials} import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, Node as LfNode} -import com.digitalasset.daml.lf.value.Value.{ValueInt64, ValueRecord} +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.value.Value.ValueInt64 import org.mockito.MockitoSugar import org.scalatest.OptionValues import org.scalatest.flatspec.AnyFlatSpec @@ -31,72 +31,69 @@ class ContractStateCachesSpec behavior of classOf[ContractStateCaches].getSimpleName "build" should "set the cache index to the initialization index" in { - val cacheInitializationOffset = offset(1337) + val cacheInitializationEventSeqId = 1337L @SuppressWarnings(Array("com.digitalasset.canton.GlobalExecutionContext")) val contractStateCaches = ContractStateCaches.build( - Some(cacheInitializationOffset), + cacheInitializationEventSeqId, maxContractsCacheSize = 1L, maxKeyCacheSize = 1L, metrics = LedgerApiServerMetrics.ForTesting, loggerFactory, ) - contractStateCaches.keyState.cacheIndex shouldBe Some(cacheInitializationOffset) - contractStateCaches.contractState.cacheIndex shouldBe Some(cacheInitializationOffset) + contractStateCaches.keyState.cacheEventSeqIdIndex shouldBe cacheInitializationEventSeqId + contractStateCaches.contractState.cacheEventSeqIdIndex shouldBe cacheInitializationEventSeqId } "push" should "update the caches with a batch of events" in new TestScope { - val previousCreate = createEvent(offset = offset(1), withKey = true) + val previousCreate = createEvent(withKey = true) - val create1 = createEvent(offset = offset(2), withKey = false) - val create2 = createEvent(offset = offset(3), withKey = true) - val archive1 = archiveEvent(create1, offset(3)) - val archivedPrevious = archiveEvent(previousCreate, offset(4)) + val create1 = createEvent(withKey = false) + val create2 = createEvent(withKey = true) + val archive1 = archiveEvent(create1) + val archivedPrevious = archiveEvent(previousCreate) val batch = NonEmptyVector.of(create1, create2, archive1, archivedPrevious) val expectedContractStateUpdates = Map( - create1.contractId -> contractArchived(create1), - create2.contractId -> contractActive(create2), - previousCreate.contractId -> contractArchived(previousCreate), + create1.contractId -> ContractStateStatus.Archived, + create2.contractId -> ContractStateStatus.Active, + previousCreate.contractId -> ContractStateStatus.Archived, ) val expectedKeyStateUpdates = Map( create2.globalKey.value -> keyAssigned(create2), previousCreate.globalKey.value -> ContractKeyStateValue.Unassigned, ) - contractStateCaches.push(batch) - verify(contractStateCache).putBatch(offset(4), expectedContractStateUpdates) - verify(keyStateCache).putBatch(offset(4), expectedKeyStateUpdates) + contractStateCaches.push(batch, 4) + verify(contractStateCache).putBatch(4, expectedContractStateUpdates) + verify(keyStateCache).putBatch(4, expectedKeyStateUpdates) } "push" should "update the key state cache even if no key updates" in new TestScope { - val create1 = createEvent(offset = offset(2), withKey = false) + val create1 = createEvent(withKey = false) val batch = NonEmptyVector.of(create1) - val expectedContractStateUpdates = Map(create1.contractId -> contractActive(create1)) + val expectedContractStateUpdates = Map(create1.contractId -> ContractStateStatus.Active) - contractStateCaches.push(batch) - verify(contractStateCache).putBatch(offset(2), expectedContractStateUpdates) - verify(keyStateCache).putBatch(offset(2), Map.empty) - } - - "push" should "update the key state cache even if only reassignment updates" in new TestScope { - val assign1 = ContractStateEvent.ReassignmentAccepted(offset(2)) - - val batch = NonEmptyVector.of(assign1) - - contractStateCaches.push(batch) - verify(contractStateCache).putBatch(offset(2), Map.empty) - verify(keyStateCache).putBatch(offset(2), Map.empty) + contractStateCaches.push(batch, 2) + verify(contractStateCache).putBatch(2, expectedContractStateUpdates) + verify(keyStateCache).putBatch(2, Map.empty) } "reset" should "reset the caches on `reset`" in new TestScope { - private val someOffset = Some(Offset.tryFromLong(112233L)) + private val someOffset = Some( + LedgerEnd( + lastOffset = Offset.tryFromLong(112243L), + lastEventSeqId = 125, + lastStringInterningId = 0, + lastPublicationTime = CantonTimestamp.MinValue, + ) + ) contractStateCaches.reset(someOffset) - verify(keyStateCache).reset(someOffset) - verify(contractStateCache).reset(someOffset) + verify(keyStateCache).reset(125) + verify(contractStateCache).reset(125) } private trait TestScope { @@ -105,8 +102,8 @@ class ContractStateCachesSpec val keyStateCache: StateCache[Key, ContractKeyStateValue] = mock[StateCache[Key, ContractKeyStateValue]] - val contractStateCache: StateCache[ContractId, ContractStateValue] = - mock[StateCache[ContractId, ContractStateValue]] + val contractStateCache: StateCache[ContractId, ContractStateStatus] = + mock[StateCache[ContractId, ContractStateStatus]] val contractStateCaches = new ContractStateCaches( keyStateCache, @@ -115,78 +112,33 @@ class ContractStateCachesSpec ) def createEvent( - offset: Offset, - withKey: Boolean, + withKey: Boolean ): ContractStateEvent.Created = { - val cId = contractIdx.incrementAndGet() - + val cId = contractId(contractIdx.incrementAndGet()) val templateId = Identifier.assertFromString(s"some:template:name") val packageName = Ref.PackageName.assertFromString("pkg-name") - val contractArgument = ValueRecord( - Some(templateId), - ImmArray(None -> ValueInt64(cId)), - ) - val signatories = Set(Ref.Party.assertFromString(s"party-$cId")) - val stakeholders = signatories - - val key = - if (withKey) - Some( - KeyWithMaintainers.assertBuild( - templateId, - ValueInt64(keyIdx.incrementAndGet()), - Set.empty, - packageName, - ) - ) - else - None - - val contractInstance = - FatContract.fromCreateNode( - LfNode.Create( - coid = contractId(cId), - packageName = Ref.PackageName.assertFromString("pkg-name"), - templateId = Identifier.assertFromString(s"some:template:name"), - arg = contractArgument, - signatories = signatories, - stakeholders = stakeholders, - keyOpt = key, - version = LanguageVersion.Major.V2.maxStableVersion, - ), - createTime = CreationTime.CreatedAt(Time.Timestamp(cId)), - authenticationData = Bytes.Empty, + val key = Option.when(withKey)( + Key.assertBuild( + templateId, + ValueInt64(keyIdx.incrementAndGet()), + packageName, ) - - ContractStateEvent.Created(contractInstance, offset) + ) + ContractStateEvent.Created(cId, key) } def archiveEvent( - create: ContractStateEvent.Created, - offset: Offset, + create: ContractStateEvent.Created ): ContractStateEvent.Archived = ContractStateEvent.Archived( contractId = create.contractId, globalKey = create.globalKey, - stakeholders = create.contract.stakeholders, - eventOffset = offset, ) } - private def contractActive(create: ContractStateEvent.Created) = - ContractStateValue.Active(create.contract) - - private def contractArchived(create: ContractStateEvent.Created) = - ContractStateValue.Archived(create.contract.stakeholders) - private def keyAssigned(create: ContractStateEvent.Created) = - ContractKeyStateValue.Assigned( - create.contractId, - create.contract.stakeholders, - ) + ContractKeyStateValue.Assigned(create.contractId) private def contractId(id: Long): ContractId = ContractId.V1(Hash.hashPrivateKey(id.toString)) - - private def offset(idx: Int) = Offset.tryFromLong(idx.toLong) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala index d2b7dacab8..80c6e77a1f 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala @@ -16,10 +16,10 @@ import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer.{ UnorderedException, } import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate -import com.digitalasset.canton.protocol.ReassignmentId +import com.digitalasset.canton.protocol.{ReassignmentId, TestUpdateId, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.util.ReassignmentTag -import com.digitalasset.daml.lf.data.{Ref, Time} +import com.digitalasset.daml.lf.data.Time import org.scalatest.Succeeded import org.scalatest.compatible.Assertion import org.scalatest.matchers.should.Matchers @@ -545,7 +545,7 @@ class InMemoryFanoutBufferSpec private def txAccepted(idx: Long, offset: Offset) = TransactionLogUpdate.TransactionAccepted( - updateId = s"tx-$idx", + updateId = TestUpdateId(s"tx-$idx").toHexString, workflowId = s"workflow-$idx", effectiveAt = Time.Timestamp.Epoch, offset = offset, @@ -569,7 +569,7 @@ class InMemoryFanoutBufferSpec private def reassignmentAccepted(idx: Long, offset: Offset) = TransactionLogUpdate.ReassignmentAccepted( - updateId = s"reassignment-$idx", + updateId = TestUpdateId(s"reassignment-$idx").toHexString, workflowId = s"workflow-$idx", offset = offset, completionStreamResponse = None, @@ -588,7 +588,7 @@ class InMemoryFanoutBufferSpec private def topologyTxAccepted(idx: Long, offset: Offset) = TransactionLogUpdate.TopologyTransactionEffective( - updateId = s"topology-tx-$idx", + updateId = TestUpdateId(s"topology-tx-$idx").toHexString, offset = offset, effectiveTime = Time.Timestamp.Epoch, synchronizerId = someSynchronizerId.toProtoPrimitive, @@ -602,9 +602,9 @@ class InMemoryFanoutBufferSpec txs.foldLeft(succeed) { case (Succeeded, tx) => buffer.lookup( - LookupKey.UpdateId(Ref.TransactionId.assertFromString(getUpdateId(tx))) + LookupKey.ByUpdateId(getUpdateId(tx)) ) shouldBe Some(tx) - buffer.lookup(LookupKey.Offset(tx.offset)) shouldBe Some(tx) + buffer.lookup(LookupKey.ByOffset(tx.offset)) shouldBe Some(tx) case (failed, _) => failed } @@ -615,19 +615,22 @@ class InMemoryFanoutBufferSpec txs.foldLeft(succeed) { case (Succeeded, tx) => buffer.lookup( - LookupKey.UpdateId(Ref.TransactionId.assertFromString(getUpdateId(tx))) + LookupKey.ByUpdateId(getUpdateId(tx)) ) shouldBe None - buffer.lookup(LookupKey.Offset(tx.offset)) shouldBe None + buffer.lookup(LookupKey.ByOffset(tx.offset)) shouldBe None case (failed, _) => failed } - private def getUpdateId(tx: TransactionLogUpdate): String = tx match { - case txAccepted: TransactionLogUpdate.TransactionAccepted => txAccepted.updateId - case _: TransactionLogUpdate.TransactionRejected => - throw new RuntimeException("did not expect a TransactionRejected") - case reassignment: TransactionLogUpdate.ReassignmentAccepted => reassignment.updateId - case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => - topologyTransaction.updateId + private def getUpdateId(tx: TransactionLogUpdate): UpdateId = { + val updateStr = tx match { + case txAccepted: TransactionLogUpdate.TransactionAccepted => txAccepted.updateId + case _: TransactionLogUpdate.TransactionRejected => + throw new RuntimeException("did not expect a TransactionRejected") + case reassignment: TransactionLogUpdate.ReassignmentAccepted => reassignment.updateId + case topologyTransaction: TransactionLogUpdate.TopologyTransactionEffective => + topologyTransaction.updateId + } + UpdateId.fromLedgerString(updateStr).valueOrFail("invalid update id") } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala index a1f5489499..0e64526deb 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala @@ -6,10 +6,17 @@ package com.digitalasset.canton.platform.store.cache import cats.data.NonEmptyVector import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll import com.digitalasset.canton.TestEssentials -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.{ + Active, + Archived, + ExistingContractStatus, +} import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore +import com.digitalasset.canton.participant.store.{ContractStore, PersistedContractInstance} import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.store.cache.MutableCacheBackedContractStoreRaceTests.{ IndexViewContractsReader, @@ -22,16 +29,16 @@ import com.digitalasset.canton.platform.store.cache.MutableCacheBackedContractSt import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.* -import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageMajorVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, Node, Versioned} +import com.digitalasset.canton.protocol.{ExampleContractFactory, ExampleTransactionFactory} +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.transaction.GlobalKey import com.digitalasset.daml.lf.value.Value import com.digitalasset.daml.lf.value.Value.ValueInt64 import org.apache.pekko.Done import org.apache.pekko.stream.Materializer import org.apache.pekko.stream.scaladsl.Source import org.scalatest.Assertions.fail +import org.scalatest.concurrent.ScalaFutures.convertScalaFuture import org.scalatest.flatspec.AsyncFlatSpec import java.util.concurrent.Executors @@ -52,11 +59,25 @@ class MutableCacheBackedContractStoreRaceTests it should "preserve causal monotonicity under contention for key state" in { val workload = generateWorkload(keysCount = 10L, contractsCount = 1000L) val indexViewContractsReader = IndexViewContractsReader()(unboundedExecutionContext) + val participantContractStore = new InMemoryContractStore( + timeouts = timeouts, + loggerFactory = loggerFactory, + )(unboundedExecutionContext) val contractStore = - buildContractStore(indexViewContractsReader, unboundedExecutionContext, loggerFactory) + buildContractStore( + indexViewContractsReader, + unboundedExecutionContext, + loggerFactory, + participantContractStore, + ) for { - _ <- test(indexViewContractsReader, workload, unboundedExecutionContext) { ec => event => + _ <- test( + indexViewContractsReader, + participantContractStore, + workload, + unboundedExecutionContext, + ) { ec => event => assert_sync_vs_async_race_key(contractStore)(event)(ec) } } yield succeed @@ -65,11 +86,25 @@ class MutableCacheBackedContractStoreRaceTests it should "preserve causal monotonicity under contention for contract state" in { val workload = generateWorkload(keysCount = 10L, contractsCount = 1000L) val indexViewContractsReader = IndexViewContractsReader()(unboundedExecutionContext) + val participantContractStore = new InMemoryContractStore( + timeouts = timeouts, + loggerFactory = loggerFactory, + )(unboundedExecutionContext) val contractStore = - buildContractStore(indexViewContractsReader, unboundedExecutionContext, loggerFactory) + buildContractStore( + indexViewContractsReader, + unboundedExecutionContext, + loggerFactory, + participantContractStore, + ) for { - _ <- test(indexViewContractsReader, workload, unboundedExecutionContext) { ec => event => + _ <- test( + indexViewContractsReader, + participantContractStore, + workload, + unboundedExecutionContext, + ) { ec => event => assert_sync_vs_async_race_contract(contractStore)(event)(ec) } } yield succeed @@ -82,7 +117,8 @@ private object MutableCacheBackedContractStoreRaceTests { private def test( indexViewContractsReader: IndexViewContractsReader, - workload: Seq[Offset => SimplifiedContractStateEvent], + participantContractStore: ContractStore, + workload: Seq[Long => SimplifiedContractStateEvent], unboundedExecutionContext: ExecutionContext, )( assert: ExecutionContext => SimplifiedContractStateEvent => Future[Unit] @@ -94,11 +130,12 @@ private object MutableCacheBackedContractStoreRaceTests { eventCtor => { counter += 1 - Iterator(eventCtor(offset(counter))) + Iterator(eventCtor(counter)) } } .map { event => indexViewContractsReader.update(event) + update(participantContractStore, event)(unboundedExecutionContext).futureValue event } .mapAsync(1)( @@ -117,7 +154,7 @@ private object MutableCacheBackedContractStoreRaceTests { // Use Future.delegate here to ensure immediate control handover to the next statement val keyLookupF = Future.delegate(contractStore.lookupContractKey(stakeholders, event.key)) // Update the mutable contract state cache synchronously - contractStore.contractStateCaches.push(NonEmptyVector.of(contractStateEvent)) + contractStore.contractStateCaches.push(NonEmptyVector.of(contractStateEvent), event.eventSeqId) for { // Lookup after synchronous update @@ -141,7 +178,7 @@ private object MutableCacheBackedContractStoreRaceTests { val keyLookupF = Future.delegate(contractStore.lookupActiveContract(stakeholders, event.contractId)) // Update the mutable contract state cache synchronously - contractStore.contractStateCaches.push(NonEmptyVector.of(contractStateEvent)) + contractStore.contractStateCaches.push(NonEmptyVector.of(contractStateEvent), event.eventSeqId) for { // Lookup after synchronous update @@ -178,11 +215,11 @@ private object MutableCacheBackedContractStoreRaceTests { assignment: Option[FatContract] )(event: SimplifiedContractStateEvent): Unit = assignment match { - case Some(actualContract) if (event.contract != toThin(actualContract)) || !event.created => + case Some(actualContract) if (event.contract != actualContract) || !event.created => fail(message = s"Contract state corruption for ${event.contractId}: " + s"expected ${if (event.created) s"active contract (${event.contract})" - else "non-active contract"}, but got assignment to ${toThin(actualContract)}" + else "non-active contract"}, but got assignment to $actualContract" ) case None if event.created => fail(message = @@ -198,9 +235,9 @@ private object MutableCacheBackedContractStoreRaceTests { )(implicit ec: ExecutionContext) = for { _ <- indexViewContractsReader - .lookupKeyState(event.key, event.offset) + .lookupKeyState(event.key, event.eventSeqId) .map { - case KeyAssigned(contractId, _) if contractId == event.contractId && event.created => + case KeyAssigned(contractId) if contractId == event.contractId && event.created => case KeyUnassigned if !event.created => case actual => fail( @@ -209,11 +246,10 @@ private object MutableCacheBackedContractStoreRaceTests { ) } _ <- indexViewContractsReader - .lookupContractState(event.contractId, event.offset) + .lookupContractState(event.contractId, event.eventSeqId) .map { - case Some(ActiveContract(actualContract)) - if event.created && event.contract == toThin(actualContract) => - case Some(ArchivedContract(_)) if !event.created => + case Some(Active) if event.created => + case Some(Archived) if !event.created => case actual => fail( s"Test bug: actual $actual after event $event: index view: ${indexViewContractsReader.contractStateStore @@ -225,7 +261,7 @@ private object MutableCacheBackedContractStoreRaceTests { private def generateWorkload( keysCount: Long, contractsCount: Long, - ): Seq[Offset => SimplifiedContractStateEvent] = { + ): Seq[Long => SimplifiedContractStateEvent] = { val keys = (0L until keysCount).map { keyIdx => keyIdx -> Key.assertBuild( Identifier.assertFromString("pkgId:module:entity"), @@ -239,31 +275,28 @@ private object MutableCacheBackedContractStoreRaceTests { key -> (0L until contractLifecyclesForKey) .map { contractIdx => val globalContractIdx = keyIdx * contractLifecyclesForKey + contractIdx - val contractId = ContractId.V1(Hash.hashPrivateKey(globalContractIdx.toString)) - val contractRef = contract(globalContractIdx) - (contractId, contractRef) + val contractRef = contract(globalContractIdx, key) + (contractRef.contractId, contractRef) } - .foldLeft(VectorMap.empty[ContractId, ThinContract]) { case (r, (k, v)) => + .foldLeft(VectorMap.empty[ContractId, FatContract]) { case (r, (k, v)) => r.updated(k, v) } } val updates = keysToContracts.map { case (key, contracts) => - contracts.flatMap { case (contractId, contractRef) => + contracts.flatMap { case (_, contractRef) => Vector( - (offset: Offset) => + (eventSeqId: Long) => SimplifiedContractStateEvent( - offset = offset, - contractId = contractId, + eventSeqId = eventSeqId, contract = contractRef, created = true, key = key, ), - (offset: Offset) => + (eventSeqId: Long) => SimplifiedContractStateEvent( - offset = offset, - contractId = contractId, + eventSeqId = eventSeqId, contract = contractRef, created = false, key = key, @@ -276,8 +309,8 @@ private object MutableCacheBackedContractStoreRaceTests { } private def interleaveRandom( - indexContractsUpdates: Iterable[Iterable[Offset => SimplifiedContractStateEvent]] - ): Seq[Offset => SimplifiedContractStateEvent] = { + indexContractsUpdates: Iterable[Iterable[Long => SimplifiedContractStateEvent]] + ): Seq[Long => SimplifiedContractStateEvent] = { @tailrec def interleaveIteratorsRandom[T](acc: Vector[T], col: Set[Iterator[T]]): Vector[T] = if (col.isEmpty) acc @@ -290,89 +323,76 @@ private object MutableCacheBackedContractStoreRaceTests { } interleaveIteratorsRandom( - Vector.empty[Offset => SimplifiedContractStateEvent], + Vector.empty[Long => SimplifiedContractStateEvent], indexContractsUpdates.map(_.iterator).toSet, ) } final case class SimplifiedContractStateEvent( - offset: Offset, - contractId: ContractId, - contract: ThinContract, + eventSeqId: Long, + contract: FatContract, created: Boolean, key: Key, - ) + ) { + val contractId: ContractId = contract.contractId + } - private def contract(idx: Long): ThinContract = { + private def contract(idx: Long, key: GlobalKey): FatContract = { val templateId = Identifier.assertFromString("pkgId:module:entity") val packageName = Ref.PackageName.assertFromString("pkg-name") val contractArgument = Value.ValueInt64(idx) - ThinContract( - packageName = packageName, - template = templateId, - arg = Versioned(LanguageMajorVersion.V2.maxStableVersion, contractArgument), - ) + ExampleContractFactory + .build( + packageName = packageName, + templateId = templateId, + argument = contractArgument, + signatories = stakeholders, + stakeholders = stakeholders, + keyOpt = Some(KeyWithMaintainers(key, Set.empty)), + overrideContractId = Some(ExampleTransactionFactory.suffixedId(idx.toInt, 0)), + ) + .inst } private def buildContractStore( indexViewContractsReader: IndexViewContractsReader, ec: ExecutionContext, loggerFactory: NamedLoggerFactory, + participantContractStore: ContractStore, ) = { val metrics = LedgerApiServerMetrics.ForTesting new MutableCacheBackedContractStore( contractsReader = indexViewContractsReader, contractStateCaches = ContractStateCaches.build( - initialCacheIndex = None, + initialCacheEventSeqIdIndex = 0L, maxContractsCacheSize = 1L, maxKeyCacheSize = 1L, metrics = metrics, loggerFactory = loggerFactory, )(ec), + contractStore = participantContractStore, loggerFactory = loggerFactory, )(ec) } private val toContractStateEvent: SimplifiedContractStateEvent => ContractStateEvent = { - case SimplifiedContractStateEvent(offset, contractId, contract, created, key) => + case SimplifiedContractStateEvent(_eventSeqId, contract, created, key) => if (created) - ContractStateEvent.Created( - FatContract.fromCreateNode( - create = Node.Create( - coid = contractId, - packageName = contract.unversioned.packageName, - templateId = contract.unversioned.template, - arg = contract.unversioned.arg, - signatories = stakeholders, - stakeholders = stakeholders, // Not used - keyOpt = Some(KeyWithMaintainers(key, Set.empty)), - version = contract.version, - ), - createTime = CreationTime.CreatedAt(Time.Timestamp.MinValue), // Not used, - authenticationData = Bytes.Empty, - ), - offset, - ) + ContractStateEvent.Created(contract.contractId, Some(key)) else - ContractStateEvent.Archived( - contractId = contractId, - globalKey = Some(key), - stakeholders = stakeholders, // Not used - eventOffset = offset, - ) + ContractStateEvent.Archived(contract.contractId, Some(key)) } final case class ContractLifecycle( - contractId: ContractId, - contract: ThinContract, - createdAt: Offset, - archivedAt: Option[Offset], + contract: FatContract, + createdAt: Long, + archivedAt: Option[Long], ) // Simplified view of the index which models the evolution of the key and contracts state private final case class IndexViewContractsReader()(implicit ec: ExecutionContext) extends LedgerDaoContractsReader { - private type CreatedAt = Offset + private type CreatedAt = Long @volatile private[cache] var contractStateStore = Map.empty[ContractId, ContractLifecycle] @volatile private[cache] var keyStateStore = Map.empty[Key, TreeMap[CreatedAt, ContractId]] @@ -385,9 +405,8 @@ private object MutableCacheBackedContractStoreRaceTests { case None => Some( ContractLifecycle( - contractId = event.contractId, contract = event.contract, - createdAt = event.offset, + createdAt = event.eventSeqId, archivedAt = None, ) ) @@ -396,11 +415,11 @@ private object MutableCacheBackedContractStoreRaceTests { } keyStateStore = keyStateStore.updatedWith(event.key) { - case None => Some(TreeMap(event.offset -> event.contractId)) + case None => Some(TreeMap(event.eventSeqId -> event.contractId)) case Some(assignments) => val (lastContractAssignedAt, currentContractId) = assignments.last val lastContract = contractStateStore(currentContractId) - val createdAt = event.offset + val createdAt = event.eventSeqId if (lastContractAssignedAt < createdAt && lastContract.archivedAt.exists(_ < createdAt)) Some(assignments + (createdAt -> event.contractId)) else fail(s"Key state update conflict: last state $lastContract vs event $event") @@ -408,9 +427,9 @@ private object MutableCacheBackedContractStoreRaceTests { } else { // On archive contractStateStore = contractStateStore.updatedWith(event.contractId) { - case Some(contractLifecycle @ ContractLifecycle(contractId, _, createdAt, None)) - if event.offset > createdAt && event.contractId == contractId => - Some(contractLifecycle.copy(archivedAt = Some(event.offset))) + case Some(contractLifecycle @ ContractLifecycle(contract, createdAt, None)) + if event.eventSeqId > createdAt && event.contractId == contract.contractId => + Some(contractLifecycle.copy(archivedAt = Some(event.eventSeqId))) case lastState => fail(s"Contract state update conflict: last state $lastState vs even $event") } @@ -419,7 +438,7 @@ private object MutableCacheBackedContractStoreRaceTests { case Some(assignments) => val (currentCreatedAt, currentContractId) = assignments.last val lastContractAssignment = contractStateStore(currentContractId) - val archivedAt = event.offset + val archivedAt = event.eventSeqId if (currentCreatedAt < archivedAt && lastContractAssignment.archivedAt.nonEmpty) Some(assignments + (archivedAt -> event.contractId)) else @@ -429,72 +448,51 @@ private object MutableCacheBackedContractStoreRaceTests { } } - override def lookupContractState(contractId: ContractId, validAt: Offset)(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[ContractState]] = + override def lookupContractState(contractId: ContractId, notEarlierThanEventSeqId: Long)( + implicit loggingContext: LoggingContextWithTrace + ): Future[Option[ExistingContractStatus]] = Future { val _ = loggingContext contractStateStore .get(contractId) - .flatMap { case ContractLifecycle(_, contract, createdAt, maybeArchivedAt) => - if (validAt < createdAt) None - else if (maybeArchivedAt.forall(_ > validAt)) - Some( - ActiveContract( - FatContract.fromCreateNode( - create = Node.Create( - coid = contractId, - packageName = contract.unversioned.packageName, - templateId = contract.unversioned.template, - arg = contract.unversioned.arg, - signatories = stakeholders, - stakeholders = stakeholders, - keyOpt = None, - version = contract.version, - ), - createTime = CreationTime.CreatedAt(Time.Timestamp.MinValue), - authenticationData = Bytes.Empty, - ) - ) - ) - else Some(ArchivedContract(stakeholders)) + .flatMap { case ContractLifecycle(_, createdAt, maybeArchivedAt) => + if (notEarlierThanEventSeqId < createdAt) None + else if (maybeArchivedAt.forall(_ > notEarlierThanEventSeqId)) + Some(ContractStateStatus.Active) + else Some(ContractStateStatus.Archived) } }(ec) - override def lookupKeyState(key: Key, validAt: Offset)(implicit + override def lookupKeyState(key: Key, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[KeyState] = Future { val _ = loggingContext keyStateStore .get(key) - .map(_.maxBefore(nextAfter(validAt)) match { + .map(_.maxBefore(notEarlierThanEventSeqId + 1) match { case Some((_, contractId)) => contractStateStore(contractId).archivedAt match { - case Some(archivedAt) if archivedAt <= validAt => KeyUnassigned - case _ => KeyAssigned(contractId, stakeholders) + case Some(archivedAt) if archivedAt <= notEarlierThanEventSeqId => KeyUnassigned + case _ => KeyAssigned(contractId) } case None => KeyUnassigned }) .getOrElse(KeyUnassigned) }(ec) - override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanOffset: CreatedAt)(implicit + override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[Map[Key, KeyState]] = ??? // not used in this test } - private def offset(idx: Long) = { - val base = BigInt(1L) << 32 - Offset.tryFromLong((base + idx).toLong) - } - - private def nextAfter(currentOffset: Offset) = currentOffset.increment - - def toThin(contract: FatContract): ThinContract = - ThinContract( - packageName = contract.packageName, - template = contract.templateId, - arg = Versioned(LanguageMajorVersion.V2.maxStableVersion, contract.createArg), - ) - + def update(contractStore: ContractStore, event: SimplifiedContractStateEvent)(implicit + ec: ExecutionContext + ): Future[Unit] = + if (event.created) { + val contract = PersistedContractInstance(event.contract) + contractStore + .storeContracts(Seq(contract.asContractInstance)) + .map(_ => ()) + .failOnShutdownToAbortException("test") + } else Future.unit } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreSpec.scala index 80d285b032..069f0470b5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreSpec.scala @@ -5,10 +5,14 @@ package com.digitalasset.canton.platform.store.cache import cats.data.NonEmptyVector import com.daml.ledger.resources.Resource -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.participant.state.index.ContractState +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.ExistingContractStatus +import com.digitalasset.canton.ledger.participant.state.index.{ContractState, ContractStateStatus} import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.store.cache.MutableCacheBackedContractStoreSpec.* import com.digitalasset.canton.platform.store.dao.events.ContractStateEvent @@ -18,12 +22,13 @@ import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReade KeyState, KeyUnassigned, } +import com.digitalasset.canton.protocol.ExampleContractFactory +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.{HasExecutionContext, TestEssentials} import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Ref, Time} -import com.digitalasset.daml.lf.language.LanguageMajorVersion -import com.digitalasset.daml.lf.transaction.{CreationTime, Node, Versioned} -import com.digitalasset.daml.lf.value.Value.{ValueInt64, ValueRecord, ValueText} +import com.digitalasset.daml.lf.data.{Ref, Time} +import com.digitalasset.daml.lf.transaction.CreationTime +import com.digitalasset.daml.lf.value.Value.{ContractId, ValueText} import org.mockito.MockitoSugar import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec @@ -47,19 +52,18 @@ class MutableCacheBackedContractStoreSpec contractsReader = mock[LedgerDaoContractsReader], contractStateCaches = contractStateCaches, loggerFactory = loggerFactory, + contractStore = mock[store.ContractStore], ) val event1 = ContractStateEvent.Archived( contractId = ContractId.V1(Hash.hashPrivateKey("cid")), globalKey = None, - stakeholders = Set.empty, - eventOffset = Offset.firstOffset, ) val event2 = event1 val updateBatch = NonEmptyVector.of(event1, event2) - contractStore.contractStateCaches.push(updateBatch) - verify(contractStateCaches).push(updateBatch) + contractStore.contractStateCaches.push(updateBatch, 10) + verify(contractStateCaches).push(updateBatch, 10) succeed } @@ -75,24 +79,24 @@ class MutableCacheBackedContractStoreSpec loggerFactory = loggerFactory, spyContractsReader, ).asFuture - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset1) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId1 cId2_lookup <- store.lookupActiveContract(Set(charlie), cId_2) another_cId2_lookup <- store.lookupActiveContract(Set(charlie), cId_2) - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset2) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId2 cId3_lookup <- store.lookupActiveContract(Set(bob), cId_3) another_cId3_lookup <- store.lookupActiveContract(Set(bob), cId_3) - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset3) - nonExistentCId = contractId(5) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId3 + nonExistentCId = cId_5 nonExistentCId_lookup <- store.lookupActiveContract(Set.empty, nonExistentCId) another_nonExistentCId_lookup <- store.lookupActiveContract(Set.empty, nonExistentCId) } yield { cId2_lookup shouldBe Option.empty another_cId2_lookup shouldBe Option.empty - cId3_lookup.map(_.templateId) shouldBe Some(contract3.unversioned.template) - another_cId3_lookup.map(_.templateId) shouldBe Some(contract3.unversioned.template) + cId3_lookup.map(_.templateId) shouldBe Some(contract3.inst.templateId) + another_cId3_lookup.map(_.templateId) shouldBe Some(contract3.inst.templateId) nonExistentCId_lookup shouldBe Option.empty another_nonExistentCId_lookup shouldBe Option.empty @@ -101,9 +105,9 @@ class MutableCacheBackedContractStoreSpec // So even though a read-through populates missing entries, // they can be immediately evicted by GCs and lead to subsequent misses. // Hence, verify atLeastOnce for LedgerDaoContractsReader.lookupContractState - verify(spyContractsReader, atLeastOnce).lookupContractState(cId_2, offset1) - verify(spyContractsReader, atLeastOnce).lookupContractState(cId_3, offset2) - verify(spyContractsReader, atLeastOnce).lookupContractState(nonExistentCId, offset3) + verify(spyContractsReader, atLeastOnce).lookupContractState(cId_2, eventSeqId1) + verify(spyContractsReader, atLeastOnce).lookupContractState(cId_3, eventSeqId2) + verify(spyContractsReader, atLeastOnce).lookupContractState(nonExistentCId, eventSeqId3) succeed } } @@ -116,7 +120,7 @@ class MutableCacheBackedContractStoreSpec loggerFactory = loggerFactory, spyContractsReader, ).asFuture - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset1) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId1 negativeLookup_cId6 <- store.lookupActiveContract(Set(alice), cId_6) positiveLookup_cId6 <- store.lookupActiveContract(Set(alice), cId_6) } yield { @@ -124,7 +128,7 @@ class MutableCacheBackedContractStoreSpec positiveLookup_cId6 shouldBe Option.empty verify(spyContractsReader, times(wantedNumberOfInvocations = 1)) - .lookupContractState(cId_6, offset1) + .lookupContractState(cId_6, eventSeqId1) succeed } } @@ -135,22 +139,22 @@ class MutableCacheBackedContractStoreSpec cId1_lookup0 <- store.lookupActiveContract(Set(alice), cId_1) cId2_lookup0 <- store.lookupActiveContract(Set(bob), cId_2) - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset1) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId1 cId1_lookup1 <- store.lookupActiveContract(Set(alice), cId_1) cid1_lookup1_archivalNotDivulged <- store.lookupActiveContract(Set(charlie), cId_1) - _ = store.contractStateCaches.contractState.cacheIndex = Some(offset2) + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId2 cId2_lookup2 <- store.lookupActiveContract(Set(bob), cId_2) cid2_lookup2_divulged <- store.lookupActiveContract(Set(charlie), cId_2) cid2_lookup2_nonVisible <- store.lookupActiveContract(Set(charlie), cId_2) } yield { - cId1_lookup0.map(_.templateId) shouldBe Some(contract1.unversioned.template) + cId1_lookup0.map(_.templateId) shouldBe Some(contract1.inst.templateId) cId2_lookup0 shouldBe Option.empty cId1_lookup1 shouldBe Option.empty cid1_lookup1_archivalNotDivulged shouldBe None - cId2_lookup2.map(_.templateId) shouldBe Some(contract2.unversioned.template) + cId2_lookup2.map(_.templateId) shouldBe Some(contract2.inst.templateId) cid2_lookup2_divulged shouldBe None cid2_lookup2_nonVisible shouldBe Option.empty } @@ -171,7 +175,7 @@ class MutableCacheBackedContractStoreSpec assigned_firstLookup <- store.lookupContractKey(Set(alice), someKey) assigned_secondLookup <- store.lookupContractKey(Set(alice), someKey) - _ = store.contractStateCaches.keyState.cacheIndex = Some(offset1) + _ = store.contractStateCaches.keyState.cacheEventSeqIdIndex = eventSeqId1 unassigned_firstLookup <- store.lookupContractKey(Set(alice), unassignedKey) unassigned_secondLookup <- store.lookupContractKey(Set(alice), unassignedKey) } yield { @@ -181,9 +185,11 @@ class MutableCacheBackedContractStoreSpec unassigned_firstLookup shouldBe Option.empty unassigned_secondLookup shouldBe Option.empty - verify(spyContractsReader).lookupKeyState(someKey, offset0)(loggingContext) - // looking up the key state will not prefetch the contract state - verify(spyContractsReader).lookupKeyState(unassignedKey, offset1)(loggingContext) + verify(spyContractsReader).lookupKeyState(someKey, eventSeqId0)(loggingContext) + // looking up the key state will prefetch and use the contract state + verify(spyContractsReader).lookupContractState(cId_1, eventSeqId0)(loggingContext) + verify(spyContractsReader).lookupContractState(cId_1, eventSeqId0)(loggingContext) + verify(spyContractsReader).lookupKeyState(unassignedKey, eventSeqId1)(loggingContext) verifyNoMoreInteractions(spyContractsReader) succeed } @@ -194,14 +200,17 @@ class MutableCacheBackedContractStoreSpec store <- contractStore(cachesSize = 0L, loggerFactory).asFuture key_lookup0 <- store.lookupContractKey(Set(alice), someKey) - _ = store.contractStateCaches.keyState.cacheIndex = Some(offset1) + _ = store.contractStateCaches.keyState.cacheEventSeqIdIndex = eventSeqId1 + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId1 key_lookup1 <- store.lookupContractKey(Set(alice), someKey) - _ = store.contractStateCaches.keyState.cacheIndex = Some(offset2) + _ = store.contractStateCaches.keyState.cacheEventSeqIdIndex = eventSeqId2 + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId2 key_lookup2 <- store.lookupContractKey(Set(bob), someKey) key_lookup2_notVisible <- store.lookupContractKey(Set(charlie), someKey) - _ = store.contractStateCaches.keyState.cacheIndex = Some(offset3) + _ = store.contractStateCaches.keyState.cacheEventSeqIdIndex = eventSeqId3 + _ = store.contractStateCaches.contractState.cacheEventSeqIdIndex = eventSeqId3 key_lookup3 <- store.lookupContractKey(Set(bob), someKey) } yield { key_lookup0 shouldBe Some(cId_1) @@ -215,36 +224,23 @@ class MutableCacheBackedContractStoreSpec "lookupContractStateWithoutDivulgence" should { - lazy val contract = fatContract( - contractId = cId_4, - thinContract = contract4, - createLedgerEffectiveTime = t4, - stakeholders = exStakeholders, - signatories = exSignatories, - key = Some(KeyWithMaintainers(someKey, exMaintainers)), - authenticationData = exAuthenticationData, - ) - - val stateValueActive = ContractStateValue.Active(contract) - val stateActive = ContractState.Active(contract) - "resolve lookup from cache" in { for { store <- contractStore(cachesSize = 2L, loggerFactory).asFuture _ = store.contractStateCaches.contractState.putBatch( - offset2, + eventSeqId2, Map( // Populate the cache with an active contract - cId_4 -> stateValueActive, + cId_4 -> ContractStateStatus.Active, // Populate the cache with an archived contract - cId_5 -> ContractStateValue.Archived(Set.empty), + cId_5 -> ContractStateStatus.Archived, ), ) activeContractLookupResult <- store.lookupContractState(cId_4) archivedContractLookupResult <- store.lookupContractState(cId_5) nonExistentContractLookupResult <- store.lookupContractState(cId_7) } yield { - activeContractLookupResult shouldBe stateActive + activeContractLookupResult shouldBe ContractState.Active(contract4.inst) archivedContractLookupResult shouldBe ContractState.Archived nonExistentContractLookupResult shouldBe ContractState.NotFound } @@ -257,7 +253,7 @@ class MutableCacheBackedContractStoreSpec archivedContractLookupResult <- store.lookupContractState(cId_5) nonExistentContractLookupResult <- store.lookupContractState(cId_7) } yield { - activeContractLookupResult shouldBe stateActive + activeContractLookupResult shouldBe ContractState.Active(contract4.inst) archivedContractLookupResult shouldBe ContractState.Archived nonExistentContractLookupResult shouldBe ContractState.NotFound } @@ -267,41 +263,53 @@ class MutableCacheBackedContractStoreSpec @nowarn("msg=match may not be exhaustive") object MutableCacheBackedContractStoreSpec { - private val offset0 = Offset.tryFromLong(1L) - private val offset1 = Offset.tryFromLong(2L) - private val offset2 = Offset.tryFromLong(3L) - private val offset3 = Offset.tryFromLong(4L) + private val eventSeqId0 = 1L + private val eventSeqId1 = 2L + private val eventSeqId2 = 3L + private val eventSeqId3 = 4L private val Seq(alice, bob, charlie) = Seq("alice", "bob", "charlie").map(party) - private val ( - Seq(cId_1, cId_2, cId_3, cId_4, cId_5, cId_6, cId_7), - Seq(contract1, contract2, contract3, contract4, _, contract6, _), - Seq(t1, t2, t3, t4, _, t6, _), - ) = - (1 to 7).map { id => - (contractId(id), thinContract(id), Time.Timestamp.assertFromLong(id.toLong * 1000L)) - }.unzip3 private val someKey = globalKey("key1") private val exStakeholders = Set(bob, alice) private val exSignatories = Set(alice) private val exMaintainers = Set(alice) - private val exAuthenticationData = Bytes.fromByteArray("meta".getBytes) private val someKeyWithMaintainers = KeyWithMaintainers(someKey, exMaintainers) + private val timeouts = ProcessingTimeout() + + private val Seq(t1, t2, t3, t4, t5, t6, t7) = (1 to 7).map { id => + Time.Timestamp.assertFromLong(id.toLong * 1000L) + } + + private val ( + Seq(cId_1, cId_2, cId_3, cId_4, cId_5, cId_6, cId_7), + Seq(contract1, contract2, contract3, contract4, _, contract6, _), + ) = + Seq( + contract(Set(alice), t1), + contract(exStakeholders, t2), + contract(exStakeholders, t3), + contract(exStakeholders, t4), + contract(exStakeholders, t5), + contract(Set(alice), t6), + contract(exStakeholders, t7), + ).map(c => c.contractId -> c).unzip + private def contractStore( cachesSize: Long, loggerFactory: NamedLoggerFactory, readerFixture: LedgerDaoContractsReader = ContractsReaderFixture(), - )(implicit ec: ExecutionContext) = { + )(implicit ec: ExecutionContext, traceContext: TraceContext) = { val metrics = LedgerApiServerMetrics.ForTesting - val startIndexExclusive = Some(offset0) + val startIndexExclusive = eventSeqId0 val contractStore = new MutableCacheBackedContractStore( readerFixture, contractStateCaches = ContractStateCaches .build(startIndexExclusive, cachesSize, cachesSize, metrics, loggerFactory), loggerFactory = loggerFactory, + contractStore = inMemoryContractStore(loggerFactory), ) Resource.successful(contractStore) @@ -310,113 +318,77 @@ object MutableCacheBackedContractStoreSpec { @SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is spied in tests case class ContractsReaderFixture() extends LedgerDaoContractsReader { @volatile private var initialResultForCid6 = - Future.successful(Option.empty[LedgerDaoContractsReader.ContractState]) + Future.successful(Option.empty[ExistingContractStatus]) - override def lookupKeyState(key: Key, validAt: Offset)(implicit + override def lookupKeyState(key: Key, notEarlierThanEventSeqId: Long)(implicit loggingContext: LoggingContextWithTrace - ): Future[LedgerDaoContractsReader.KeyState] = (key, validAt) match { - case (`someKey`, `offset0`) => Future.successful(KeyAssigned(cId_1, Set(alice))) - case (`someKey`, `offset2`) => Future.successful(KeyAssigned(cId_2, Set(bob))) + ): Future[LedgerDaoContractsReader.KeyState] = (key, notEarlierThanEventSeqId) match { + case (`someKey`, `eventSeqId0`) => Future.successful(KeyAssigned(cId_1)) + case (`someKey`, `eventSeqId2`) => Future.successful(KeyAssigned(cId_2)) case _ => Future.successful(KeyUnassigned) } - override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanOffset: Offset)(implicit + override def lookupKeyStatesFromDb(keys: Seq[Key], notEarlierThanOffset: Long)(implicit loggingContext: LoggingContextWithTrace ): Future[Map[Key, KeyState]] = ??? // not used in this test - override def lookupContractState(contractId: ContractId, validAt: Offset)(implicit - loggingContext: LoggingContextWithTrace - ): Future[Option[LedgerDaoContractsReader.ContractState]] = - (contractId, validAt) match { - case (`cId_1`, `offset0`) => activeContract(cId_1, contract1, Set(alice), t1) - case (`cId_1`, validAt) if validAt > offset0 => archivedContract(Set(alice)) - case (`cId_2`, validAt) if validAt >= offset1 => - activeContract(cId_2, contract2, exStakeholders, t2) - case (`cId_3`, _) => activeContract(cId_3, contract3, exStakeholders, t3) - case (`cId_4`, _) => activeContract(cId_4, contract4, exStakeholders, t4) - case (`cId_5`, _) => archivedContract(Set(bob)) + override def lookupContractState(contractId: ContractId, notEarlierThanEventSeqId: Long)( + implicit loggingContext: LoggingContextWithTrace + ): Future[Option[ExistingContractStatus]] = + (contractId, notEarlierThanEventSeqId) match { + case (`cId_1`, `eventSeqId0`) => activeContract + case (`cId_1`, validAt) if validAt > eventSeqId0 => archivedContract + case (`cId_2`, validAt) if validAt >= eventSeqId1 => + activeContract + case (`cId_3`, _) => activeContract + case (`cId_4`, _) => activeContract + case (`cId_5`, _) => archivedContract case (`cId_6`, _) => // Simulate store being populated from one query to another val result = initialResultForCid6 - initialResultForCid6 = activeContract(cId_6, contract6, Set(alice), t6) + initialResultForCid6 = activeContract result case _ => Future.successful(Option.empty) } } - private def activeContract( - contractId: ContractId, - contract: ThinContract, - stakeholders: Set[Party], - ledgerEffectiveTime: Time.Timestamp, - signatories: Set[Party] = exSignatories, - key: Option[KeyWithMaintainers] = Some(someKeyWithMaintainers), - authenticationData: Bytes = exAuthenticationData, - ): Future[Option[LedgerDaoContractsReader.ActiveContract]] = - Future.successful( - Some( - LedgerDaoContractsReader.ActiveContract( - contract = fatContract( - contractId = contractId, - thinContract = contract, - createLedgerEffectiveTime = ledgerEffectiveTime, - stakeholders = stakeholders, - signatories = signatories, - key = key, - authenticationData = authenticationData, - ) - ) - ) - ) - - private def archivedContract( - parties: Set[Party] - ): Future[Option[LedgerDaoContractsReader.ArchivedContract]] = - Future.successful(Some(LedgerDaoContractsReader.ArchivedContract(parties))) - - private def party(name: String): Party = Party.assertFromString(name) - - private def thinContract(idx: Int): ThinContract = { - val templateId = Identifier.assertFromString("some:template:name") - val packageName = Ref.PackageName.assertFromString("pkg-name") - - val contractArgument = ValueRecord( - Some(templateId), - ImmArray(None -> ValueInt64(idx.toLong)), - ) - ThinContract( - packageName = packageName, - template = templateId, - arg = Versioned(LanguageMajorVersion.V2.maxStableVersion, contractArgument), + def inMemoryContractStore( + loggerFactory: NamedLoggerFactory + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): InMemoryContractStore = { + val store = new InMemoryContractStore(timeouts, loggerFactory) + val contracts = Seq( + contract1, + contract2, + contract3, + contract4, + contract6, ) + store.storeContracts(contracts).discard + store } - private def fatContract( - contractId: ContractId, - thinContract: ThinContract, - createLedgerEffectiveTime: Time.Timestamp, + private def contract( stakeholders: Set[Party], - signatories: Set[Party], - key: Option[KeyWithMaintainers], - authenticationData: Bytes, + ledgerEffectiveTime: Time.Timestamp, + key: Option[KeyWithMaintainers] = Some(someKeyWithMaintainers), ) = - FatContract.fromCreateNode( - Node.Create( - coid = contractId, - packageName = thinContract.unversioned.packageName, - templateId = thinContract.unversioned.template, - arg = thinContract.unversioned.arg, - signatories = signatories, - stakeholders = stakeholders, - keyOpt = key, - version = thinContract.version, - ), - createTime = CreationTime.CreatedAt(createLedgerEffectiveTime), - authenticationData = authenticationData, + ExampleContractFactory.build( + createdAt = CreationTime.CreatedAt(ledgerEffectiveTime), + signatories = exSignatories, + stakeholders = stakeholders, + keyOpt = key, ) - private def contractId(id: Int): ContractId = - ContractId.V1(Hash.hashPrivateKey(id.toString)) + private val activeContract: Future[Option[ExistingContractStatus]] = + Future.successful(Some(ContractStateStatus.Active)) + + private val archivedContract: Future[Option[ExistingContractStatus]] = + Future.successful(Some(ContractStateStatus.Archived)) + + private def party(name: String): Party = Party.assertFromString(name) private def globalKey(desc: String): Key = Key.assertBuild( diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/StateCacheSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/StateCacheSpec.scala index 89711a9eb4..9ffb3607c7 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/StateCacheSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/StateCacheSpec.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.platform.store.cache import com.daml.metrics.api.noop.{NoOpMetricsFactory, NoOpTimer} import com.daml.metrics.api.{MetricInfo, MetricName, MetricQualification} import com.digitalasset.canton.caching.{CaffeineCache, ConcurrentCache, SizedCache} -import com.digitalasset.canton.data.Offset import com.digitalasset.canton.metrics.CacheMetrics import com.digitalasset.canton.{BaseTest, HasExecutionContext} import com.github.benmanes.caffeine.cache.Caffeine @@ -39,9 +38,9 @@ class StateCacheSpec it should "asynchronously store the update" in { val cache = mock[ConcurrentCache[String, String]] - val someOffset = offset(0L) + val someEventSeqId = 0L val stateCache = StateCache[String, String]( - initialCacheIndex = Some(someOffset), + initialCacheEventSeqIdIndex = someEventSeqId, emptyLedgerState = "", cache = cache, registerUpdateTimer = cacheUpdateTimer, @@ -53,7 +52,7 @@ class StateCacheSpec stateCache.putAsync( "key", { - case `someOffset` => asyncUpdatePromise.future + case `someEventSeqId` => asyncUpdatePromise.future case _ => fail() }, ) @@ -123,9 +122,9 @@ class StateCacheSpec it should "synchronously update the cache in front of older asynchronous updates" in { val cache = mock[ConcurrentCache[String, String]] - val initialOffset = offset(0L) + val initialEventSeqId = 0L val stateCache = StateCache[String, String]( - initialCacheIndex = Some(initialOffset), + initialCacheEventSeqIdIndex = initialEventSeqId, emptyLedgerState = "", cache = cache, registerUpdateTimer = cacheUpdateTimer, @@ -137,12 +136,12 @@ class StateCacheSpec stateCache.putAsync( "key", { - case `initialOffset` => asyncUpdatePromise.future + case `initialEventSeqId` => asyncUpdatePromise.future case _ => fail() }, ) stateCache.putBatch( - offset(2L), + 2L, Map("key" -> "value", "key2" -> "value2"), ) asyncUpdatePromise.completeWith(Future.successful("should not update the cache")) @@ -159,21 +158,21 @@ class StateCacheSpec it should "not update the cache if called with a non-increasing `validAt`" in { val cache = mock[ConcurrentCache[String, String]] - val stateCache = StateCache[String, String](None, "", cache, cacheUpdateTimer, loggerFactory) + val stateCache = StateCache[String, String](0L, "", cache, cacheUpdateTimer, loggerFactory) - stateCache.putBatch(offset(2L), Map("key" -> "value")) + stateCache.putBatch(2L, Map("key" -> "value")) loggerFactory.assertLogs( within = { // `Put` at a decreasing validAt - stateCache.putBatch(offset(1L), Map("key" -> "earlier value")) + stateCache.putBatch(1L, Map("key" -> "earlier value")) stateCache - .putBatch(offset(2L), Map("key" -> "value at same validAt")) + .putBatch(2L, Map("key" -> "value at same validAt")) }, assertions = _.warningMessage should include( - "Ignoring incoming synchronous update at an index (1000000001) equal to or before the cache index (1000000002)" + "Ignoring incoming synchronous update at an index at event sequential ID(1) equal to or before the cache index (2)" ), _.warningMessage should include( - "Ignoring incoming synchronous update at an index (1000000002) equal to or before the cache index (1000000002)" + "Ignoring incoming synchronous update at an index at event sequential ID(2) equal to or before the cache index (2)" ), ) @@ -187,7 +186,7 @@ class StateCacheSpec it should "correctly reset the state cache" in { val stateCache = new StateCache[String, String]( - initialCacheIndex = Some(offset(1L)), + initialCacheEventSeqIdIndex = 1L, emptyLedgerState = "", cache = SizedCache.from( SizedCache.Configuration(2), @@ -205,7 +204,7 @@ class StateCacheSpec // Add eagerly an entry into the cache stateCache.putBatch( - offset(2L), + 2L, Map(syncUpdateKey -> "some initial value"), ) stateCache.get(syncUpdateKey) shouldBe Some("some initial value") @@ -216,7 +215,7 @@ class StateCacheSpec loggerFactory.assertLogs( within = stateCache.putAsync( asyncUpdateKey, - Map(offset(2L) -> asyncUpdatePromise.future), + Map(2L -> asyncUpdatePromise.future), ), assertions = _.warningMessage should include( "Pending updates tracker for other_key not registered. This could be due to a transient error causing a restart in the index service." @@ -224,13 +223,13 @@ class StateCacheSpec ) // Reset the cache - stateCache.reset(Some(offset(1L))) + stateCache.reset(1L) // Complete async update asyncUpdatePromise.completeWith(Future.successful("some value")) // Assert the cache is empty after completion of the async update putAsyncF.map { _ => - stateCache.cacheIndex shouldBe Some(offset(1L)) + stateCache.cacheEventSeqIdIndex shouldBe 1L stateCache.get(syncUpdateKey) shouldBe None stateCache.get(asyncUpdateKey) shouldBe None } @@ -238,7 +237,7 @@ class StateCacheSpec private def buildStateCache(cacheSize: Long): StateCache[String, String] = StateCache[String, String]( - initialCacheIndex = None, + initialCacheEventSeqIdIndex = 0L, emptyLedgerState = "", cache = CaffeineCache[String, String]( Caffeine @@ -281,14 +280,14 @@ class StateCacheSpec var cacheIdx = 0L insertions.map { case (key, (promise, _)) => cacheIdx += 1L - val validAt = offset(cacheIdx) - stateCache.cacheIndex = Some(validAt) + val validAt = cacheIdx + stateCache.cacheEventSeqIdIndex = validAt stateCache .putAsync( key, { case `validAt` => promise.future - case _ => fail() + case incorrect => fail(s"expected $validAt but was $incorrect") }, ) .map(_ => ()) @@ -301,9 +300,4 @@ class StateCacheSpec val duration = FiniteDuration((System.nanoTime() - start) / 1000000L, TimeUnit.MILLISECONDS) (r, duration) } - - private def offset(idx: Long): Offset = { - val base = 1000000000L - Offset.tryFromLong(base + idx) - } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala index 18ecd13019..f38011d311 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer import com.digitalasset.canton.platform.store.dao.BufferedStreamsReader.FetchFromPersistence import com.digitalasset.canton.platform.store.dao.BufferedStreamsReaderSpec.* import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.{BaseTest, HasExecutionContext, HasExecutorServiceGeneric} @@ -41,10 +42,12 @@ class BufferedStreamsReaderSpec transactionsBuffer = inMemoryFanoutBuffer, startInclusive = offset2, endInclusive = offset3, - bufferSliceFilter = noFilterBufferSlice(_).filterNot(_.updateId == "tx-3"), + bufferSliceFilter = noFilterBufferSlice(_).filterNot( + _.updateId == TestUpdateId("tx-3").toHexString + ), ) streamElements should contain theSameElementsInOrderAs Seq( - offset2 -> "tx-2" + offset2 -> TestUpdateId("tx-2").toHexString ) } } @@ -57,8 +60,8 @@ class BufferedStreamsReaderSpec endInclusive = offset3, ) streamElements should contain theSameElementsInOrderAs Seq( - offset2 -> "tx-2", - offset3 -> "tx-3", + offset2 -> TestUpdateId("tx-2").toHexString, + offset3 -> TestUpdateId("tx-3").toHexString, ) } } @@ -72,8 +75,8 @@ class BufferedStreamsReaderSpec ) streamElements should contain theSameElementsInOrderAs Seq( - offset2 -> "tx-2", - offset3 -> "tx-3", + offset2 -> TestUpdateId("tx-2").toHexString, + offset3 -> TestUpdateId("tx-3").toHexString, ) } } @@ -106,7 +109,7 @@ class BufferedStreamsReaderSpec streamElements should contain theSameElementsInOrderAs Seq( offset1 -> anotherResponseForOffset1, offset2 -> anotherResponseForOffset2, - offset3 -> "tx-3", + offset3 -> TestUpdateId("tx-3").toHexString, ) } @@ -127,12 +130,14 @@ class BufferedStreamsReaderSpec endInclusive = offset3, fetchFromPersistence = fetchFromPersistence, persistenceFetchArgs = filterMock, - bufferSliceFilter = noFilterBufferSlice(_).filterNot(_.updateId == "tx-3"), + bufferSliceFilter = noFilterBufferSlice(_).filterNot( + _.updateId == TestUpdateId("tx-3").toHexString + ), ) streamElements should contain theSameElementsInOrderAs Seq( offset1 -> anotherResponseForOffset1, - offset2 -> "tx-2", + offset2 -> TestUpdateId("tx-2").toHexString, ) } } @@ -446,7 +451,7 @@ object BufferedStreamsReaderSpec { result.size shouldBe endInclusiveIdx - startInclusiveIdx + 1 } val expectedElements = (startInclusiveIdx.toLong to endInclusiveIdx.toLong) map { idx => - offset(idx) -> s"tx-$idx" + offset(idx) -> TestUpdateId(s"tx-$idx").toHexString } result should contain theSameElementsInOrderAs expectedElements } @@ -471,7 +476,7 @@ object BufferedStreamsReaderSpec { private def transaction(i: Long) = TransactionLogUpdate.TransactionAccepted( - updateId = s"tx-$i", + updateId = TestUpdateId(s"tx-$i").toHexString, commandId = "", workflowId = "", effectiveAt = Timestamp.Epoch, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReaderSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReaderSpec.scala deleted file mode 100644 index 7537d4099a..0000000000 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionPointwiseReaderSpec.scala +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.dao - -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.logging.LoggingContextWithTrace -import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer -import com.digitalasset.canton.platform.store.dao.BufferedTransactionByIdReader.{ - FetchTransactionPointwiseFromPersistence, - ToApiResponse, -} -import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.Party -import com.digitalasset.daml.lf.data.Time.Timestamp -import org.mockito.MockitoSugar -import org.scalatest.flatspec.AsyncFlatSpec - -import scala.concurrent.Future - -class BufferedTransactionPointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar with BaseTest { - private val className = classOf[BufferedTransactionPointwiseReader[_, _]].getSimpleName - - private implicit val loggingContext: LoggingContextWithTrace = LoggingContextWithTrace( - loggerFactory - ) - - private val requestingParties = Set("p1", "p2").map(Ref.Party.assertFromString) - private val someSynchronizerId = SynchronizerId.tryFromString("some::synchronizer id") - - private val bufferedUpdateId1 = "bufferedTid_1" - private val bufferedUpdateId2 = "bufferedTid_2" - private val notBufferedUpdateId = "notBufferedTid" - private val unknownUpdateId = "unknownUpdateId" - - private val bufferedOffset1 = Offset.firstOffset - private val bufferedOffset2 = bufferedOffset1.increment - private val notBufferedOffset = bufferedOffset2.increment - private val unknownOffset = notBufferedOffset.increment - - private val bufferedTransaction1 = tx(bufferedUpdateId1, bufferedOffset1) - private val bufferedTransaction2 = tx(bufferedUpdateId2, bufferedOffset2) - - private val inMemoryFanout = mock[InMemoryFanoutBuffer] - when(inMemoryFanout.lookupTransaction(bufferedUpdateId1)).thenReturn(Some(bufferedTransaction1)) - when(inMemoryFanout.lookupTransaction(bufferedUpdateId2)).thenReturn(Some(bufferedTransaction2)) - when(inMemoryFanout.lookupTransaction(notBufferedUpdateId)).thenReturn(None) - when(inMemoryFanout.lookupTransaction(unknownUpdateId)).thenReturn(None) - - when(inMemoryFanout.lookupTransaction(bufferedOffset1)).thenReturn(Some(bufferedTransaction1)) - when(inMemoryFanout.lookupTransaction(bufferedOffset2)).thenReturn(Some(bufferedTransaction2)) - when(inMemoryFanout.lookupTransaction(notBufferedOffset)).thenReturn(None) - when(inMemoryFanout.lookupTransaction(unknownOffset)).thenReturn(None) - - private val toApiResponse = mock[ToApiResponse[Set[Party], String]] - when(toApiResponse.apply(bufferedTransaction1, requestingParties, loggingContext)) - .thenReturn(Future.successful(Some(bufferedUpdateId1))) - when(toApiResponse.apply(bufferedTransaction2, requestingParties, loggingContext)) - .thenReturn(Future.successful(None)) - - private val fetchFromPersistenceById = - new FetchTransactionPointwiseFromPersistence[(String, Set[Party]), String] { - override def apply( - queryParam: (String, Set[Party]), - loggingContext: LoggingContextWithTrace, - ): Future[Option[String]] = - queryParam._1 match { - case `notBufferedUpdateId` => Future.successful(Some(notBufferedUpdateId)) - case `unknownUpdateId` => Future.successful(None) - case other => fail(s"Unexpected $other transactionId") - } - } - - private val bufferedTransactionByIdReader = - new BufferedTransactionPointwiseReader[(String, Set[Party]), String]( - fetchFromPersistence = fetchFromPersistenceById, - fetchFromBuffer = queryParam => inMemoryFanout.lookupTransaction(queryParam._1), - toApiResponse = (tx, queryParam, lc) => toApiResponse(tx, queryParam._2, lc), - ) - - private val fetchFromPersistenceByOffset = - new FetchTransactionPointwiseFromPersistence[(Offset, Set[Party]), String] { - override def apply( - queryParam: (Offset, Set[Party]), - loggingContext: LoggingContextWithTrace, - ): Future[Option[String]] = - queryParam._1 match { - case `notBufferedOffset` => Future.successful(Some(notBufferedUpdateId)) - case `unknownOffset` => Future.successful(None) - case other => fail(s"Unexpected offset $other") - } - } - - private val bufferedTransactionByOffsetReader = - new BufferedTransactionPointwiseReader[(Offset, Set[Party]), String]( - fetchFromPersistence = fetchFromPersistenceByOffset, - fetchFromBuffer = queryParam => inMemoryFanout.lookupTransaction(queryParam._1), - toApiResponse = (tx, queryParam, lc) => toApiResponse(tx, queryParam._2, lc), - ) - - s"$className.fetch" should "convert to API response and return if transaction buffered" in { - for { - response1 <- bufferedTransactionByIdReader.fetch(bufferedUpdateId1 -> requestingParties) - response2 <- bufferedTransactionByIdReader.fetch(bufferedUpdateId2 -> requestingParties) - response3 <- bufferedTransactionByOffsetReader.fetch(bufferedOffset1 -> requestingParties) - response4 <- bufferedTransactionByOffsetReader.fetch(bufferedOffset2 -> requestingParties) - } yield { - response1 shouldBe Some(bufferedUpdateId1) - response2 shouldBe None - response3 shouldBe response1 - response4 shouldBe response2 - verify(toApiResponse, times(2)).apply(bufferedTransaction1, requestingParties, loggingContext) - verify(toApiResponse, times(2)).apply(bufferedTransaction2, requestingParties, loggingContext) - succeed - } - } - - s"$className.fetch" should "delegate to persistence fetch if transaction not buffered" in { - for { - response1 <- bufferedTransactionByIdReader.fetch(notBufferedUpdateId -> requestingParties) - response2 <- bufferedTransactionByIdReader.fetch(unknownUpdateId -> requestingParties) - response3 <- bufferedTransactionByOffsetReader.fetch(notBufferedOffset -> requestingParties) - response4 <- bufferedTransactionByOffsetReader.fetch(unknownOffset -> requestingParties) - } yield { - response1 shouldBe Some(notBufferedUpdateId) - response2 shouldBe None - response3 shouldBe response1 - response4 shouldBe response2 - verifyZeroInteractions(toApiResponse) - succeed - } - } - - private def tx(discriminator: String, offset: Offset) = - TransactionLogUpdate.TransactionAccepted( - updateId = discriminator, - workflowId = "", - commandId = "", - effectiveAt = Timestamp.Epoch, - offset = offset, - events = Vector(null), - completionStreamResponse = None, - synchronizerId = someSynchronizerId.toProtoPrimitive, - recordTime = Timestamp.Epoch, - externalTransactionHash = None, - ) -} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedUpdatePointwiseReaderSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedUpdatePointwiseReaderSpec.scala index 4ce16141d8..89292af32b 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedUpdatePointwiseReaderSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedUpdatePointwiseReaderSpec.scala @@ -13,6 +13,7 @@ import com.digitalasset.canton.platform.store.dao.BufferedUpdatePointwiseReader. ToApiResponse, } import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.protocol.{TestUpdateId, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.Party @@ -24,7 +25,7 @@ import scala.concurrent.Future import scala.language.implicitConversions class BufferedUpdatePointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar with BaseTest { - private val className = classOf[BufferedTransactionPointwiseReader[_, _]].getSimpleName + private val className = classOf[BufferedUpdatePointwiseReader[_, _]].getSimpleName private implicit val loggingContext: LoggingContextWithTrace = LoggingContextWithTrace( loggerFactory @@ -33,10 +34,10 @@ class BufferedUpdatePointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar private val requestingParties = Set("p1", "p2").map(Ref.Party.assertFromString) private val someSynchronizerId = SynchronizerId.tryFromString("some::synchronizer id") - private val bufferedUpdateId1 = "bufferedTid_1" - private val bufferedUpdateId2 = "bufferedTid_2" - private val notBufferedUpdateId = "notBufferedTid" - private val unknownUpdateId = "unknownUpdateId" + private val bufferedUpdateId1 = TestUpdateId("bufferedTid_1") + private val bufferedUpdateId2 = TestUpdateId("bufferedTid_2") + private val notBufferedUpdateId = TestUpdateId("notBufferedTid") + private val unknownUpdateId = TestUpdateId("unknownUpdateId") private val bufferedOffset1 = Offset.firstOffset private val bufferedOffset2 = bufferedOffset1.increment @@ -57,29 +58,30 @@ class BufferedUpdatePointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar when(inMemoryFanout.lookup(toLookupKey(notBufferedOffset))).thenReturn(None) when(inMemoryFanout.lookup(toLookupKey(unknownOffset))).thenReturn(None) - private val toApiResponse = mock[ToApiResponse[Set[Party], String]] + private val toApiResponse = mock[ToApiResponse[Set[Party], UpdateId]] when(toApiResponse.apply(bufferedTransaction1, requestingParties, loggingContext)) .thenReturn(Future.successful(Some(bufferedUpdateId1))) when(toApiResponse.apply(bufferedTransaction2, requestingParties, loggingContext)) .thenReturn(Future.successful(None)) private val fetchFromPersistence = - new FetchUpdatePointwiseFromPersistence[(LookupKey, Set[Party]), String] { + new FetchUpdatePointwiseFromPersistence[(LookupKey, Set[Party]), UpdateId] { override def apply( queryParam: (LookupKey, Set[Party]), loggingContext: LoggingContextWithTrace, - ): Future[Option[String]] = + ): Future[Option[UpdateId]] = queryParam._1 match { - case LookupKey.UpdateId(`notBufferedUpdateId`) | LookupKey.Offset(`notBufferedOffset`) => + case LookupKey.ByUpdateId(`notBufferedUpdateId`) | + LookupKey.ByOffset(`notBufferedOffset`) => Future.successful(Some(notBufferedUpdateId)) - case LookupKey.UpdateId(`unknownUpdateId`) | LookupKey.Offset(`unknownOffset`) => + case LookupKey.ByUpdateId(`unknownUpdateId`) | LookupKey.ByOffset(`unknownOffset`) => Future.successful(None) case other => fail(s"Unexpected $other lookup key") } } private val bufferedUpdateReader = - new BufferedUpdatePointwiseReader[(LookupKey, Set[Party]), String]( + new BufferedUpdatePointwiseReader[(LookupKey, Set[Party]), UpdateId]( fetchFromPersistence = fetchFromPersistence, fetchFromBuffer = queryParam => inMemoryFanout.lookup(queryParam._1), toApiResponse = (tx, queryParam, lc) => toApiResponse(tx, queryParam._2, lc), @@ -118,9 +120,9 @@ class BufferedUpdatePointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar } } - private def tx(discriminator: String, offset: Offset) = + private def tx(discriminator: UpdateId, offset: Offset) = TransactionLogUpdate.TransactionAccepted( - updateId = discriminator, + updateId = discriminator.toHexString, workflowId = "", commandId = "", effectiveAt = Timestamp.Epoch, @@ -135,7 +137,7 @@ class BufferedUpdatePointwiseReaderSpec extends AsyncFlatSpec with MockitoSugar protected implicit def toLedgerString(s: String): Ref.LedgerString = Ref.LedgerString.assertFromString(s) - private def toLookupKey(str: String): LookupKey = LookupKey.UpdateId(str) + private def toLookupKey(str: UpdateId): LookupKey = LookupKey.ByUpdateId(str) - private def toLookupKey(offset: Offset): LookupKey = LookupKey.Offset(offset) + private def toLookupKey(offset: Offset): LookupKey = LookupKey.ByOffset(offset) } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala index afbecafec7..9610b427f4 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala @@ -13,10 +13,11 @@ import com.digitalasset.canton.ledger.api.ParticipantId import com.digitalasset.canton.logging.LoggingContextWithTrace.withNewLoggingContext import com.digitalasset.canton.logging.SuppressingLogger import com.digitalasset.canton.metrics.{LedgerApiServerHistograms, LedgerApiServerMetrics} +import com.digitalasset.canton.participant.store.ContractStore +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore import com.digitalasset.canton.platform.config.{ ActiveContractsServiceStreamsConfig, ServerRole, - TransactionTreeStreamsConfig, UpdatesStreamsConfig, } import com.digitalasset.canton.platform.store.DbSupport.{ConnectionPoolConfig, DbConfig} @@ -29,7 +30,12 @@ import com.digitalasset.canton.platform.store.dao.events.{ LfValueTranslation, } import com.digitalasset.canton.platform.store.interning.StringInterningView -import com.digitalasset.canton.platform.store.{DbSupport, DbType, FlywayMigrations} +import com.digitalasset.canton.platform.store.{ + DbSupport, + DbType, + FlywayMigrations, + PruningOffsetService, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.archive.DamlLf.Archive import com.digitalasset.daml.lf.data.Ref @@ -104,7 +110,8 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base ) contractLoader <- ContractLoader.create( contractStorageBackend = dbSupport.storageBackendFactory.createContractStorageBackend( - stringInterningView + stringInterningView, + ledgerEndCache, ), dbDispatcher = dbSupport.dbDispatcher, metrics = metrics, @@ -147,7 +154,6 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base contractProcessingParallelism = eventsProcessingParallelism, ), updatesStreamsConfig = UpdatesStreamsConfig.default, - transactionTreeStreamsConfig = TransactionTreeStreamsConfig.default, globalMaxEventIdQueries = 20, globalMaxEventPayloadQueries = 10, tracer = OpenTelemetry.noop().getTracer("test"), @@ -159,6 +165,8 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base loadPackage = (packageId, _) => loadPackage(packageId), loggerFactory = loggerFactory, ), + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) } } @@ -167,7 +175,11 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base protected final var ledgerDao: LedgerDao = _ protected var ledgerEndCache: MutableLedgerEndCache = _ + protected var contractStore: ContractStore = _ protected var stringInterningView: StringInterningView = _ + protected val pruningOffsetService: PruningOffsetService = mock[PruningOffsetService] + when(pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) // `dbDispatcher` and `ledgerDao` depend on the `postgresFixture` which is in turn initialized `beforeAll` private var resource: Resource[LedgerDao] = _ @@ -177,6 +189,7 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base // We use the dispatcher here because the default Scalatest execution context is too slow. implicit val resourceContext: ResourceContext = ResourceContext(system.dispatcher) ledgerEndCache = MutableLedgerEndCache() + contractStore = new InMemoryContractStore(timeouts, loggerFactory) stringInterningView = new StringInterningView(loggerFactory) resource = withNewLoggingContext() { implicit loggingContext => for { diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala index d9755c93ad..e0dead3f3a 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala @@ -43,7 +43,7 @@ private[dao] trait JdbcLedgerDaoCompletionsSpec extends OptionValues with LoneEl val completion = response.completionResponse.completion.toList.head - completion.updateId shouldBe tx.updateId + completion.updateId shouldBe tx.updateId.toHexString completion.commandId shouldBe tx.commandId.value completion.status.value.code shouldBe io.grpc.Status.Code.OK.value() } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoContractsSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoContractsSpec.scala index 4786554103..f835a0efbe 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoContractsSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoContractsSpec.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.platform.store.dao import cats.syntax.parallel.* -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.{Active, Archived} import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader.{ KeyAssigned, @@ -12,7 +12,6 @@ import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReade KeyUnassigned, } import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.daml.lf.language.LanguageMajorVersion import com.digitalasset.daml.lf.transaction.{GlobalKey, GlobalKeyWithMaintainers} import com.digitalasset.daml.lf.value.Value.{ContractId, ValueText} import org.scalatest.flatspec.AsyncFlatSpec @@ -36,45 +35,29 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi stakeholders = Set(alice, bob), key = None, ) + ledgerEnd <- ledgerDao.lookupLedgerEnd() result <- contractsReader.lookupContractState( nonTransient(tx).loneElement, - offset, + ledgerEnd.map(_.lastEventSeqId).getOrElse(0L), ) } yield { - result.collect { case active: LedgerDaoContractsReader.ActiveContract => - ( - active.contract.version, - active.contract.packageName, - active.contract.templateId, - active.contract.createArg, - active.stakeholders, - active.contract.signatories, - ) - } shouldEqual Some( - ( - LanguageMajorVersion.V2.maxStableVersion, - somePackageName, - someTemplateId, - someContractArgument, - Set(alice, bob), - Set(alice, bob), - ) - ) + result shouldBe Some(Active) } } it should "store contracts with a transient contract in the global divulgence and do not fetch it" in { for { - (offset, tx) <- store(fullyTransientWithChildren, contractActivenessChanged = false) + (_, tx) <- store(fullyTransientWithChildren, contractActivenessChanged = false) + ledgerEnd <- ledgerDao.lookupLedgerEnd() contractId1 = created(tx).head contractId2 = created(tx).tail.loneElement result1 <- contractsReader.lookupContractState( contractId1, - offset, + ledgerEnd.map(_.lastEventSeqId).getOrElse(0L), ) result2 <- contractsReader.lookupContractState( contractId2, - offset, + ledgerEnd.map(_.lastEventSeqId).getOrElse(0L), ) } yield { result1 shouldBe empty @@ -92,29 +75,15 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi Some(ledgerEndAfterArchive) <- ledgerDao.lookupLedgerEnd() queryAfterCreate <- contractsReader.lookupContractState( contractId, - ledgerEndAtCreate.lastOffset, + ledgerEndAtCreate.lastEventSeqId, ) queryAfterArchive <- contractsReader.lookupContractState( contractId, - ledgerEndAfterArchive.lastOffset, + ledgerEndAfterArchive.lastEventSeqId, ) } yield { - queryAfterCreate.value match { - case LedgerDaoContractsReader.ActiveContract(contract) => - contract.version shouldBe LanguageMajorVersion.V2.maxStableVersion - contract.packageName shouldBe somePackageName - contract.templateId shouldBe someTemplateId - contract.createArg shouldBe someContractArgument - contract.stakeholders should contain theSameElementsAs Set(alice) - case LedgerDaoContractsReader.ArchivedContract(_) => - fail("Contract should appear as active") - } - queryAfterArchive.value match { - case _: LedgerDaoContractsReader.ActiveContract => - fail("Contract should appear as archived") - case LedgerDaoContractsReader.ArchivedContract(stakeholders) => - stakeholders should contain theSameElementsAs Set(alice) - } + queryAfterCreate.value shouldBe Active + queryAfterArchive.value shouldBe Archived } } @@ -142,17 +111,16 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi ledgerEndAfterArchive <- ledgerDao.lookupLedgerEnd() queryAfterCreate <- contractsReader.lookupKeyState( key.globalKey, - ledgerEndAtCreate.value.lastOffset, + ledgerEndAtCreate.value.lastEventSeqId, ) queryAfterArchive <- contractsReader.lookupKeyState( key.globalKey, - ledgerEndAfterArchive.value.lastOffset, + ledgerEndAfterArchive.value.lastEventSeqId, ) } yield { queryAfterCreate match { - case LedgerDaoContractsReader.KeyAssigned(fetchedContractId, stakeholders) => + case LedgerDaoContractsReader.KeyAssigned(fetchedContractId) => fetchedContractId shouldBe contractId - stakeholders shouldBe Set(alice, bob) case _ => fail("Key should be assigned") } queryAfterArchive shouldBe LedgerDaoContractsReader.KeyUnassigned @@ -182,14 +150,14 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi def fetchAll( keys: Seq[GlobalKey], - offset: Offset, + eventSeqId: Long, ): Future[(Map[GlobalKey, KeyState], Map[GlobalKey, KeyState])] = { val oneByOneF = keys .parTraverse { key => - contractsReader.lookupKeyState(key, offset).map(state => key -> state) + contractsReader.lookupKeyState(key, eventSeqId).map(state => key -> state) } .map(_.toMap) - val togetherF = contractsReader.lookupKeyStatesFromDb(keys, offset) + val togetherF = contractsReader.lookupKeyStatesFromDb(keys, eventSeqId) for { oneByOne <- oneByOneF together <- togetherF @@ -203,44 +171,51 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi val (oneByOne, together) = results oneByOne shouldBe together oneByOne.map { - case (k, KeyAssigned(cid, _)) => (k, Some(cid)) + case (k, KeyAssigned(cid)) => (k, Some(cid)) case (k, KeyUnassigned) => (k, None) } shouldBe expected.map { case (k, v) => (k.globalKey, v) } } for { // have AA at offsetA - (textA, keyA, cidA, offsetA) <- genContractWithKey() + (textA, keyA, cidA, _) <- genContractWithKey() + eventSeqIdA <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) _ <- store(singleNonConsumingExercise(cidA)) // have AA,BB at offsetB - (_, keyB, cidB, offsetB) <- genContractWithKey() + (_, keyB, cidB, _) <- genContractWithKey() + eventSeqIdB <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) // have BB at offsetPreC - (offsetPreC, _) <- store(txArchiveContract(alice, (cidA, None))) + (_, _) <- store(txArchiveContract(alice, (cidA, None))) + eventSeqIdPreC <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) // have BB, CC at offsetC - (_, keyC, cidC, offsetC) <- genContractWithKey() + (_, keyC, cidC, _) <- genContractWithKey() + eventSeqIdC <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) // have AA, BB, CC at offsetA2 - (_, keyA2, cidA2, offsetA2) <- genContractWithKey(textA.value) + (_, keyA2, cidA2, _) <- genContractWithKey(textA.value) + eventSeqIdA2 <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) // have AA, BB at offset D - (offsetD, _) <- store(txArchiveContract(alice, (cidC, None))) + (_, _) <- store(txArchiveContract(alice, (cidC, None))) + eventSeqIdD <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) // have AA at offsetE - (offsetE, _) <- store(txArchiveContract(alice, (cidB, None))) + (_, _) <- store(txArchiveContract(alice, (cidB, None))) + eventSeqIdE <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) allKeys = Seq(keyA, keyB, keyC).map(_.globalKey) - atOffsetA <- fetchAll(allKeys, offsetA) - atOffsetB <- fetchAll(allKeys, offsetB) - atOffsetPreC <- fetchAll(allKeys, offsetPreC) - atOffsetC <- fetchAll(allKeys, offsetC) - atOffsetA2 <- fetchAll(allKeys, offsetA2) - atOffsetD <- fetchAll(allKeys, offsetD) - atOffsetE <- fetchAll(allKeys, offsetE) + atEventSeqIdA <- fetchAll(allKeys, eventSeqIdA) + atEventSeqIdB <- fetchAll(allKeys, eventSeqIdB) + atEventSeqIdPreC <- fetchAll(allKeys, eventSeqIdPreC) + atEventSeqIdC <- fetchAll(allKeys, eventSeqIdC) + atEventSeqIdA2 <- fetchAll(allKeys, eventSeqIdA2) + atEventSeqIdD <- fetchAll(allKeys, eventSeqIdD) + atEventSeqIdE <- fetchAll(allKeys, eventSeqIdE) } yield { keyA shouldBe keyA2 - verifyMatch(atOffsetA, Map(keyA -> Some(cidA), keyB -> None, keyC -> None)) - verifyMatch(atOffsetB, Map(keyA -> Some(cidA), keyB -> Some(cidB), keyC -> None)) - verifyMatch(atOffsetPreC, Map(keyA -> None, keyB -> Some(cidB), keyC -> None)) - verifyMatch(atOffsetC, Map(keyA -> None, keyB -> Some(cidB), keyC -> Some(cidC))) - verifyMatch(atOffsetA2, Map(keyA -> Some(cidA2), keyB -> Some(cidB), keyC -> Some(cidC))) - verifyMatch(atOffsetD, Map(keyA -> Some(cidA2), keyB -> Some(cidB), keyC -> None)) - verifyMatch(atOffsetE, Map(keyA -> Some(cidA2), keyB -> None, keyC -> None)) + verifyMatch(atEventSeqIdA, Map(keyA -> Some(cidA), keyB -> None, keyC -> None)) + verifyMatch(atEventSeqIdB, Map(keyA -> Some(cidA), keyB -> Some(cidB), keyC -> None)) + verifyMatch(atEventSeqIdPreC, Map(keyA -> None, keyB -> Some(cidB), keyC -> None)) + verifyMatch(atEventSeqIdC, Map(keyA -> None, keyB -> Some(cidB), keyC -> Some(cidC))) + verifyMatch(atEventSeqIdA2, Map(keyA -> Some(cidA2), keyB -> Some(cidB), keyC -> Some(cidC))) + verifyMatch(atEventSeqIdD, Map(keyA -> Some(cidA2), keyB -> Some(cidB), keyC -> None)) + verifyMatch(atEventSeqIdE, Map(keyA -> Some(cidA2), keyB -> None, keyC -> None)) } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoExceptionSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoExceptionSpec.scala index d0d9b65dac..6b9f2d9b59 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoExceptionSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoExceptionSpec.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.platform.store.dao -import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader +import com.digitalasset.canton.ledger.participant.state.index.ContractStateStatus.Active import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.* import com.digitalasset.daml.lf.transaction.test.{ TestIdFactory, @@ -45,12 +45,13 @@ private[dao] trait JdbcLedgerDaoExceptionSpec val offsetAndEntry = fromTransaction(tx) for { - (offset, _) <- store(offsetAndEntry) - result1 <- contractsReader.lookupContractState(cid1, offset) - result2 <- contractsReader.lookupContractState(cid2, offset) + (_, _) <- store(offsetAndEntry) + eventSeqId <- ledgerDao.lookupLedgerEnd().map(_.value.lastEventSeqId) + result1 <- contractsReader.lookupContractState(cid1, eventSeqId) + result2 <- contractsReader.lookupContractState(cid2, eventSeqId) } yield { result1 shouldBe None - result2.value shouldBe a[LedgerDaoContractsReader.ActiveContract] + result2.value shouldBe Active } } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecH2.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecH2.scala index 939208d4ff..d88f26fe9d 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecH2.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecH2.scala @@ -18,5 +18,4 @@ final class JdbcLedgerDaoSpecH2 with JdbcLedgerDaoExceptionSpec with JdbcLedgerDaoPartiesSpec with JdbcLedgerDaoTransactionsSpec - with JdbcLedgerDaoTransactionTreesSpec with JdbcLedgerDaoTransactionsWriterSpec diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecPostgres.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecPostgres.scala index 7ec003683f..bf4cdbd1c5 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecPostgres.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecPostgres.scala @@ -19,5 +19,4 @@ final class JdbcLedgerDaoSpecPostgres with JdbcLedgerDaoExceptionSpec with JdbcLedgerDaoPartiesSpec with JdbcLedgerDaoTransactionsSpec - with JdbcLedgerDaoTransactionTreesSpec with JdbcLedgerDaoTransactionsWriterSpec diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala index 817f7ee1eb..0e01f06cf1 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.ledger.api.TemplateFilter import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.platform.store.entries.LedgerEntry +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.canton.testing.utils.TestModels import com.digitalasset.canton.util.JarResourceUtils import com.digitalasset.daml.lf.archive.{DamlLf, DarParser, Decode} @@ -23,8 +24,8 @@ import com.digitalasset.daml.lf.data.Ref.{ import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, FrontStack, ImmArray, Ref, Time} import com.digitalasset.daml.lf.language.LanguageVersion -import com.digitalasset.daml.lf.transaction.* import com.digitalasset.daml.lf.transaction.test.{NodeIdTransactionBuilder, TransactionBuilder} +import com.digitalasset.daml.lf.transaction.{SerializationVersion as LfSerializationVersion, *} import com.digitalasset.daml.lf.value.Value as LfValue import com.digitalasset.daml.lf.value.Value.{ContractId, ThinContractInstance, ValueText} import org.apache.pekko.stream.scaladsl.Sink @@ -151,7 +152,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa ), ) - private[this] val txVersion = LanguageVersion.Major.V2.maxStableVersion + private[this] val txVersion = LfSerializationVersion.V1 private[this] def newBuilder(): NodeIdTransactionBuilder = new NodeIdTransactionBuilder protected final val someContractInstance = @@ -233,7 +234,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa key: Option[GlobalKeyWithMaintainers] = None, templateId: Identifier = someTemplateId, contractArgument: LfValue = someContractArgument, - transactionVersion: LanguageVersion = LanguageVersion.v2_1, + serializationVersion: LfSerializationVersion = LfSerializationVersion.V1, ): Node.Create = Node.Create( coid = absCid, @@ -243,7 +244,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa signatories = signatories, stakeholders = stakeholders, keyOpt = key, - version = transactionVersion, + version = serializationVersion, ) protected final def exerciseNode( @@ -326,7 +327,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = s"trId$id", + updateId = TestUpdateId(s"trId$id"), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = actAs, @@ -352,7 +353,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = s"trId$id", + updateId = TestUpdateId(s"trId$id"), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = actAs, @@ -406,7 +407,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = s"trId$id", + updateId = TestUpdateId(s"trId$id"), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = List("Alice"), @@ -428,7 +429,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = s"trId$id", + updateId = TestUpdateId(s"trId$id"), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = List(alice, bob, charlie), @@ -450,7 +451,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = s"trId$id", + updateId = TestUpdateId(s"trId$id"), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = List("Alice"), @@ -475,7 +476,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() offset -> LedgerEntry.Transaction( commandId = Some(s"commandId$id"), - updateId = txId, + updateId = TestUpdateId(txId), userId = Some("userId1"), submissionId = Some(s"submissionId$id"), actAs = List("Alice"), @@ -497,7 +498,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID().toString, + updateId = TestUpdateId(UUID.randomUUID().toString), userId = Some("userId1"), submissionId = Some(UUID.randomUUID.toString), actAs = List(alice), @@ -525,7 +526,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID.toString), - updateId = UUID.randomUUID().toString, + updateId = TestUpdateId(UUID.randomUUID().toString), userId = Some("userId1"), submissionId = Some(UUID.randomUUID.toString), actAs = List(alice), @@ -582,7 +583,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa val let = Timestamp.now() nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID().toString, + updateId = TestUpdateId(UUID.randomUUID().toString), userId = Some("userId1"), submissionId = Some(UUID.randomUUID().toString), actAs = List(charlie), @@ -622,7 +623,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa } yield nodeId -> parties nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID.toString, + updateId = TestUpdateId(UUID.randomUUID.toString), userId = Some("userId1"), submissionId = Some(UUID.randomUUID.toString), actAs = List(operator), @@ -695,7 +696,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = txUuid.getOrElse(UUID.randomUUID.toString), + updateId = TestUpdateId(txUuid.getOrElse(UUID.randomUUID.toString)), userId = Some(defaultUserId), submissionId = Some(UUID.randomUUID().toString), actAs = List(party), @@ -740,7 +741,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa ) nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID.toString, + updateId = TestUpdateId(UUID.randomUUID.toString), userId = Some(defaultUserId), submissionId = Some(UUID.randomUUID().toString), actAs = List(party), @@ -771,7 +772,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa ) nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID.toString, + updateId = TestUpdateId(UUID.randomUUID.toString), userId = Some(defaultUserId), submissionId = Some(UUID.randomUUID().toString), actAs = List(party), @@ -804,7 +805,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa ) nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID.toString, + updateId = TestUpdateId(UUID.randomUUID.toString), userId = Some(defaultUserId), submissionId = Some(UUID.randomUUID().toString), actAs = List(party), @@ -819,7 +820,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa protected final def emptyTransaction(party: Party): (Offset, LedgerEntry.Transaction) = nextOffset() -> LedgerEntry.Transaction( commandId = Some(UUID.randomUUID().toString), - updateId = UUID.randomUUID.toString, + updateId = TestUpdateId(UUID.randomUUID.toString), userId = Some(defaultUserId), submissionId = Some(UUID.randomUUID().toString), actAs = List(party), diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionTreesSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionTreesSpec.scala deleted file mode 100644 index c10f9f6008..0000000000 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionTreesSpec.scala +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.platform.store.dao - -import com.daml.ledger.api.v2.transaction.TransactionTree -import com.daml.ledger.api.v2.update_service.GetUpdateTreesResponse -import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.ledger.api.util.TimestampConversion -import com.digitalasset.canton.platform.store.dao.EventProjectionProperties.UseOriginalViewPackageId -import com.digitalasset.canton.platform.store.entries.LedgerEntry -import com.digitalasset.canton.platform.store.utils.EventOps.TreeEventOps -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.transaction.Node -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.{Sink, Source} -import org.scalatest.* -import org.scalatest.flatspec.AsyncFlatSpec -import org.scalatest.matchers.should.Matchers - -import scala.annotation.nowarn -import scala.concurrent.Future - -// TODO(#23504) remove TransactionTree related methods when TransactionTree is removed from the API -@nowarn("cat=deprecation") -private[dao] trait JdbcLedgerDaoTransactionTreesSpec - extends OptionValues - with Inside - with LoneElement { - this: AsyncFlatSpec with Matchers with JdbcLedgerDaoSuite => - - behavior of "JdbcLedgerDao (lookupTransactionTreeById, lookupTransactionTreeByOffset)" - - it should "return nothing for a mismatching transaction id" in { - for { - (_, tx) <- store(singleCreate) - result <- ledgerDao.updateReader - .lookupTransactionTreeById( - updateId = "WRONG", - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - result shouldBe None - } - } - - it should "return nothing for a mismatching offset" in { - for { - (_, tx) <- store(singleCreate) - result <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset = Offset.tryFromLong(12345678L), - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - result shouldBe None - } - } - - it should "return nothing for a mismatching party" in { - for { - (offset, tx) <- store(singleCreate) - resultById <- ledgerDao.updateReader - .lookupTransactionTreeById( - tx.updateId, - Set("WRONG"), - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset, - Set("WRONG"), - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - resultById shouldBe None - resultByOffset shouldBe resultById - } - } - - it should "return the expected transaction tree for a correct request (create)" in { - for { - (offset, tx) <- store(singleCreate) - resultById <- ledgerDao.updateReader - .lookupTransactionTreeById( - tx.updateId, - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset, - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - inside(tx.transaction.nodes.headOption) { case Some((nodeId, createNode: Node.Create)) => - transaction.commandId shouldBe tx.commandId.value - transaction.offset shouldBe offset.unwrap - TimestampConversion.toLf( - transaction.effectiveAt.value, - TimestampConversion.ConversionMode.Exact, - ) shouldBe tx.ledgerEffectiveTime - transaction.updateId shouldBe tx.updateId - transaction.workflowId shouldBe tx.workflowId.getOrElse("") - val created = transaction.eventsById.values.loneElement.getCreated - created.offset shouldBe offset.unwrap - created.nodeId shouldBe nodeId.index - created.witnessParties should contain only (tx.actAs*) - created.contractKey shouldBe None - created.createArguments shouldNot be(None) - created.signatories should contain theSameElementsAs createNode.signatories - created.observers should contain theSameElementsAs createNode.stakeholders.diff( - createNode.signatories - ) - created.templateId shouldNot be(None) - } - } - resultByOffset shouldBe resultById - } - } - - it should "return the expected transaction tree for a correct request (exercise)" in { - for { - (_, create) <- store(singleCreate) - (offset, exercise) <- store(singleExercise(nonTransient(create).loneElement)) - resultById <- ledgerDao.updateReader - .lookupTransactionTreeById( - exercise.updateId, - exercise.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset, - exercise.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - inside(exercise.transaction.nodes.headOption) { - case Some((nodeId, exerciseNode: Node.Exercise)) => - transaction.commandId shouldBe exercise.commandId.value - transaction.offset shouldBe offset.unwrap - TimestampConversion.toLf( - transaction.effectiveAt.value, - TimestampConversion.ConversionMode.Exact, - ) shouldBe exercise.ledgerEffectiveTime - transaction.updateId shouldBe exercise.updateId - transaction.workflowId shouldBe exercise.workflowId.getOrElse("") - val exercised = transaction.eventsById.values.loneElement.getExercised - exercised.offset shouldBe offset.unwrap - exercised.nodeId shouldBe nodeId.index - exercised.witnessParties should contain only (exercise.actAs*) - exercised.contractId shouldBe exerciseNode.targetCoid.coid - exercised.templateId shouldNot be(None) - exercised.actingParties should contain theSameElementsAs exerciseNode.actingParties - exercised.lastDescendantNodeId shouldBe nodeId.index - exercised.choice shouldBe exerciseNode.choiceId - exercised.choiceArgument shouldNot be(None) - exercised.consuming shouldBe true - exercised.exerciseResult shouldNot be(None) - } - } - resultByOffset shouldBe resultById - } - } - - it should "return the expected transaction tree for a correct request (create, exercise)" in { - for { - (offset, tx) <- store(fullyTransient()) - resultById <- ledgerDao.updateReader - .lookupTransactionTreeById( - tx.updateId, - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset, - tx.actAs.toSet, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - val (createNodeId, createNode) = - tx.transaction.nodes.collectFirst { case (nodeId, node: Node.Create) => - nodeId -> node - }.value - val (exerciseNodeId, exerciseNode) = - tx.transaction.nodes.collectFirst { case (nodeId, node: Node.Exercise) => - nodeId -> node - }.value - - transaction.commandId shouldBe tx.commandId.value - transaction.offset shouldBe offset.unwrap - transaction.updateId shouldBe tx.updateId - transaction.workflowId shouldBe tx.workflowId.getOrElse("") - TimestampConversion.toLf( - transaction.effectiveAt.value, - TimestampConversion.ConversionMode.Exact, - ) shouldBe tx.ledgerEffectiveTime - - val created = transaction - .eventsById(createNodeId.index) - .getCreated - val exercised = transaction - .eventsById(exerciseNodeId.index) - .getExercised - - created.offset shouldBe offset.unwrap - created.nodeId shouldBe createNodeId.index - created.witnessParties should contain only (tx.actAs*) - created.contractKey shouldBe None - created.createArguments shouldNot be(None) - created.signatories should contain theSameElementsAs createNode.signatories - created.observers should contain theSameElementsAs createNode.stakeholders.diff( - createNode.signatories - ) - created.templateId shouldNot be(None) - - exercised.offset shouldBe offset.unwrap - exercised.nodeId shouldBe exerciseNodeId.index - exercised.witnessParties should contain only (tx.actAs*) - exercised.contractId shouldBe exerciseNode.targetCoid.coid - exercised.templateId shouldNot be(None) - exercised.actingParties should contain theSameElementsAs exerciseNode.actingParties - exercised.lastDescendantNodeId shouldBe exerciseNodeId.index - exercised.choice shouldBe exerciseNode.choiceId - exercised.choiceArgument shouldNot be(None) - exercised.consuming shouldBe true - exercised.exerciseResult shouldNot be(None) - } - resultByOffset shouldBe resultById - } - } - - it should "return a transaction tree with the expected shape for a partially visible transaction" in { - for { - (offset, tx) <- store(partiallyVisible) - resultById <- ledgerDao.updateReader - .lookupTransactionTreeById( - tx.updateId, - Set(alice), - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) // only two children are visible to Alice - resultByOffset <- ledgerDao.updateReader - .lookupTransactionTreeByOffset( - offset, - Set(alice), - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - transaction.eventsById should have size 2 - } - resultByOffset shouldBe resultById - } - } - - behavior of "JdbcLedgerDao (getTransactionTrees)" - - it should "match the results of lookupTransactionTreeById" in { - for { - (from, to, transactions) <- storeTestFixture() - lookups <- lookupIndividually(transactions, Set(alice, bob, charlie)) - result <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from, - endInclusive = to, - requestingParties = Some(Set(alice, bob, charlie)), - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - } yield { - comparable(result) should contain theSameElementsInOrderAs comparable(lookups) - } - } - - it should "work correctly for party-wildcard" in { - for { - (from, to, _) <- storeTestFixture() - result <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from, - endInclusive = to, - requestingParties = Some(Set(alice, bob, charlie)), - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - resultPartyWildcard <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from, - endInclusive = to, - requestingParties = None, - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - } yield { - comparable(result) should contain theSameElementsInOrderAs comparable(resultPartyWildcard) - } - } - - it should "filter correctly by party" in { - for { - from <- ledgerDao.lookupLedgerEnd() - (_, tx) <- store( - multipleCreates( - charlie, - Seq( - (alice, someTemplateId, someContractArgument), - (bob, someTemplateId, someContractArgument), - ), - ) - ) - to <- ledgerDao.lookupLedgerEnd() - individualLookupForAlice <- lookupIndividually(Seq(tx), as = Set(alice)) - individualLookupForBob <- lookupIndividually(Seq(tx), as = Set(bob)) - individualLookupForCharlie <- lookupIndividually(Seq(tx), as = Set(charlie)) - resultForAlice <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from.fold(Offset.firstOffset)(_.lastOffset.increment), - endInclusive = to.value.lastOffset, - requestingParties = Some(Set(alice)), - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - resultForBob <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from.fold(Offset.firstOffset)(_.lastOffset.increment), - endInclusive = to.value.lastOffset, - requestingParties = Some(Set(bob)), - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - resultForCharlie <- transactionsOf( - ledgerDao.updateReader - .getTransactionTrees( - startInclusive = from.fold(Offset.firstOffset)(_.lastOffset.increment), - endInclusive = to.value.lastOffset, - requestingParties = Some(Set(charlie)), - eventProjectionProperties = EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - } yield { - individualLookupForAlice should contain theSameElementsInOrderAs resultForAlice - individualLookupForBob should contain theSameElementsInOrderAs resultForBob - individualLookupForCharlie should contain theSameElementsInOrderAs resultForCharlie - } - } - - private def storeTestFixture(): Future[(Offset, Offset, Seq[LedgerEntry.Transaction])] = - for { - from <- ledgerDao.lookupLedgerEnd() - (_, t1) <- store(singleCreate) - (_, t2) <- store(singleCreate) - (_, t3) <- store(singleExercise(nonTransient(t2).loneElement)) - (_, t4) <- store(fullyTransient()) - to <- ledgerDao.lookupLedgerEnd() - } yield ( - from.fold(Offset.firstOffset)(_.lastOffset.increment), - to.value.lastOffset, - Seq(t1, t2, t3, t4), - ) - - private def lookupIndividually( - transactions: Seq[LedgerEntry.Transaction], - as: Set[Ref.Party], - ): Future[Seq[TransactionTree]] = - Future - .sequence( - transactions.map(tx => - ledgerDao.updateReader - .lookupTransactionTreeById( - tx.updateId, - as, - EventProjectionProperties( - verbose = true - )(interfaceViewPackageUpgrade = UseOriginalViewPackageId), - ) - ) - ) - .map(_.flatMap(_.toList.flatMap(_.transaction.toList))) - - private def transactionsOf( - source: Source[(Offset, GetUpdateTreesResponse), NotUsed] - ): Future[Seq[TransactionTree]] = - source - .map(_._2) - .runWith(Sink.seq) - .map(_.flatMap(_.update match { - case GetUpdateTreesResponse.Update.TransactionTree(txTree) => Seq(txTree) - case _ => Nil - })) - - // Ensure two sequences of transaction trees are comparable: - // - witnesses do not have to appear in a specific order - private def comparable(txs: Seq[TransactionTree]): Seq[TransactionTree] = - txs.map(tx => - tx.copy(eventsById = tx.eventsById.view.mapValues(_.modifyWitnessParties(_.sorted)).toMap) - ) -} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsSpec.scala index 8b6c5a460a..73ef96b464 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsSpec.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.ledger.api.TransactionShape.AcsDelta import com.digitalasset.canton.ledger.api.util.{LfEngineToApi, TimestampConversion} import com.digitalasset.canton.ledger.participant.state.index.IndexerPartyDetails import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey -import com.digitalasset.canton.platform.store.dao.* import com.digitalasset.canton.platform.store.dao.EventProjectionProperties.UseOriginalViewPackageId import com.digitalasset.canton.platform.store.entries.LedgerEntry import com.digitalasset.canton.platform.store.utils.EventOps.EventOps @@ -22,6 +21,7 @@ import com.digitalasset.canton.platform.{ InternalUpdateFormat, TemplatePartiesFilter, } +import com.digitalasset.canton.protocol.TestUpdateId import com.digitalasset.daml.lf.data.Ref.Party import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.transaction.Node @@ -39,210 +39,6 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid import JdbcLedgerDaoTransactionsSpec.* - // TODO(#23504) remove the test when the rpc methods are removed - behavior of "JdbcLedgerDao (lookupFlatTransactionById, lookupFlatTransactionByOffset)" - - it should "return nothing for a mismatching transaction id" in { - for { - (_, tx) <- store(singleCreate) - result <- ledgerDao.updateReader - .lookupTransactionById( - updateId = "WRONG", - internalTransactionFormat = transactionFormatForWildcardParties(tx.actAs.toSet), - ) - } yield { - result shouldBe None - } - } - - it should "return nothing for a mismatching offset" in { - for { - (_, tx) <- store(singleCreate) - result <- ledgerDao.updateReader - .lookupTransactionByOffset( - offset = Offset.tryFromLong(12345678L), - internalTransactionFormat = transactionFormatForWildcardParties(tx.actAs.toSet), - ) - } yield { - result shouldBe None - } - } - - it should "return nothing for a mismatching party" in { - for { - (offset, tx) <- store(singleCreate) - resultById <- ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(Set("WRONG"))) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionByOffset(offset, transactionFormatForWildcardParties(Set("WRONG"))) - } yield { - resultById shouldBe None - resultByOffset shouldBe resultById - } - } - - it should "return the expected flat transaction for a correct request (create)" in { - for { - (offset, tx) <- store(singleCreate) - resultById <- ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(tx.actAs.toSet)) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionByOffset(offset, transactionFormatForWildcardParties(tx.actAs.toSet)) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - transaction.commandId shouldBe tx.commandId.value - transaction.offset shouldBe offset.unwrap - TimestampConversion.toLf( - transaction.effectiveAt.value, - TimestampConversion.ConversionMode.Exact, - ) shouldBe tx.ledgerEffectiveTime - transaction.updateId shouldBe tx.updateId - transaction.workflowId shouldBe tx.workflowId.getOrElse("") - inside(transaction.events.loneElement.event.created) { case Some(created) => - inside(tx.transaction.nodes.headOption) { case Some((nodeId, createNode: Node.Create)) => - created.offset shouldBe offset.unwrap - created.nodeId shouldBe nodeId.index - created.witnessParties should contain only (tx.actAs*) - created.contractKey shouldBe None - created.createArguments shouldNot be(None) - created.signatories should contain theSameElementsAs createNode.signatories - created.observers should contain theSameElementsAs createNode.stakeholders.diff( - createNode.signatories - ) - created.templateId shouldNot be(None) - } - } - } - resultByOffset shouldBe resultById - } - } - - it should "return the expected flat transaction for a correct request (exercise)" in { - for { - (_, create) <- store(singleCreate) - (offset, exercise) <- store(singleExercise(nonTransient(create).loneElement)) - resultById <- ledgerDao.updateReader - .lookupTransactionById( - updateId = exercise.updateId, - internalTransactionFormat = transactionFormatForWildcardParties(exercise.actAs.toSet), - ) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionByOffset( - offset = offset, - internalTransactionFormat = transactionFormatForWildcardParties(exercise.actAs.toSet), - ) - } yield { - inside(resultById.value.transaction) { case Some(transaction) => - transaction.commandId shouldBe exercise.commandId.value - transaction.offset shouldBe offset.unwrap - transaction.updateId shouldBe exercise.updateId - TimestampConversion.toLf( - transaction.effectiveAt.value, - TimestampConversion.ConversionMode.Exact, - ) shouldBe exercise.ledgerEffectiveTime - transaction.workflowId shouldBe exercise.workflowId.getOrElse("") - inside(transaction.events.loneElement.event.archived) { case Some(archived) => - inside(exercise.transaction.nodes.headOption) { - case Some((nodeId, exerciseNode: Node.Exercise)) => - archived.offset shouldBe offset.unwrap - archived.nodeId shouldBe nodeId.index - archived.witnessParties should contain only (exercise.actAs*) - archived.contractId shouldBe exerciseNode.targetCoid.coid - archived.templateId shouldNot be(None) - } - } - } - resultByOffset shouldBe resultById - } - } - - it should "show command IDs to the original submitters (lookupFlatTransactionById)" in { - val signatories = Set(alice, bob) - val stakeholders = Set(alice, bob, charlie) // Charlie is only stakeholder - val actAs = List(alice, bob, david) // David is submitter but not signatory - for { - (_, tx) <- store(singleCreate(createNode(_, signatories, stakeholders), actAs)) - // Response 1: querying as all submitters - result1 <- ledgerDao.updateReader - .lookupTransactionById( - updateId = tx.updateId, - internalTransactionFormat = transactionFormatForWildcardParties(Set(alice, bob, david)), - ) - // Response 2: querying as a proper subset of all submitters - result2 <- ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(Set(alice, david))) - // Response 3: querying as a proper superset of all submitters - result3 <- ledgerDao.updateReader - .lookupTransactionById( - updateId = tx.updateId, - internalTransactionFormat = - transactionFormatForWildcardParties(Set(alice, bob, charlie, david)), - ) - } yield { - result1.value.transaction.value.commandId shouldBe tx.commandId.value - result2.value.transaction.value.commandId shouldBe tx.commandId.value - result3.value.transaction.value.commandId shouldBe tx.commandId.value - } - } - - it should "show command IDs to the original submitters (lookupFlatTransactionByOffset)" in { - val signatories = Set(alice, bob) - val stakeholders = Set(alice, bob, charlie) // Charlie is only stakeholder - val actAs = List(alice, bob, david) // David is submitter but not signatory - for { - (offset, tx) <- store(singleCreate(createNode(_, signatories, stakeholders), actAs)) - // Response 1: querying as all submitters - result1 <- ledgerDao.updateReader - .lookupTransactionByOffset( - offset = offset, - internalTransactionFormat = transactionFormatForWildcardParties(Set(alice, bob, david)), - ) - // Response 2: querying as a proper subset of all submitters - result2 <- ledgerDao.updateReader - .lookupTransactionByOffset(offset, transactionFormatForWildcardParties(Set(alice, david))) - // Response 3: querying as a proper superset of all submitters - result3 <- ledgerDao.updateReader - .lookupTransactionByOffset( - offset = offset, - internalTransactionFormat = - transactionFormatForWildcardParties(Set(alice, bob, charlie, david)), - ) - } yield { - result1.value.transaction.value.commandId shouldBe tx.commandId.value - result2.value.transaction.value.commandId shouldBe tx.commandId.value - result3.value.transaction.value.commandId shouldBe tx.commandId.value - } - } - - it should "hide command IDs from non-submitters" in { - val signatories = Set(alice, bob) - val stakeholders = Set(alice, bob, charlie) // Charlie is only stakeholder - val actAs = List(alice, bob, david) // David is submitter but not signatory - for { - (offset, tx) <- store(singleCreate(createNode(_, signatories, stakeholders), actAs)) - resultById <- ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(Set(charlie))) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionByOffset(offset, transactionFormatForWildcardParties(Set(charlie))) - } yield { - resultById.value.transaction.value.commandId shouldBe "" - resultByOffset shouldBe resultById - } - } - - it should "hide transactions with transient contracts" in { - for { - (offset, tx) <- store(fullyTransient(), contractActivenessChanged = false) - resultById <- ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(tx.actAs.toSet)) - resultByOffset <- ledgerDao.updateReader - .lookupTransactionByOffset(offset, transactionFormatForWildcardParties(tx.actAs.toSet)) - } yield { - resultById shouldBe empty - resultByOffset shouldBe empty - } - } - behavior of "JdbcLedgerDao (lookupUpdateById, lookupUpdateByOffset)" it should "return nothing for a mismatching update id" in { @@ -250,7 +46,7 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (_, tx) <- store(singleCreate) result <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId("WRONG"), + lookupKey = LookupKey.ByUpdateId(TestUpdateId("WRONG")), internalUpdateFormat = updateFormatForWildcardParties(tx.actAs.toSet), ) } yield { @@ -263,7 +59,7 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (_, tx) <- store(singleCreate) result <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.Offset(Offset.tryFromLong(12345678L)), + lookupKey = LookupKey.ByOffset(Offset.tryFromLong(12345678L)), internalUpdateFormat = updateFormatForWildcardParties(tx.actAs.toSet), ) } yield { @@ -276,12 +72,12 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (offset, tx) <- store(singleCreate) resultById <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(tx.updateId), + lookupKey = LookupKey.ByUpdateId(tx.updateId), internalUpdateFormat = updateFormatForWildcardParties(Set("WRONG")), ) resultByOffset <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.Offset(offset), + lookupKey = LookupKey.ByOffset(offset), internalUpdateFormat = updateFormatForWildcardParties(Set("WRONG")), ) } yield { @@ -295,11 +91,11 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (offset, tx) <- store(singleCreate) resultById <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(tx.updateId), + lookupKey = LookupKey.ByUpdateId(tx.updateId), internalUpdateFormat = updateFormatForWildcardParties(tx.actAs.toSet), ) resultByOffset <- ledgerDao.updateReader - .lookupUpdateBy(LookupKey.Offset(offset), updateFormatForWildcardParties(tx.actAs.toSet)) + .lookupUpdateBy(LookupKey.ByOffset(offset), updateFormatForWildcardParties(tx.actAs.toSet)) } yield { inside(resultById.value.update.transaction) { case Some(transaction) => transaction.commandId shouldBe tx.commandId.value @@ -308,7 +104,7 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid transaction.effectiveAt.value, TimestampConversion.ConversionMode.Exact, ) shouldBe tx.ledgerEffectiveTime - transaction.updateId shouldBe tx.updateId + transaction.updateId shouldBe tx.updateId.toHexString transaction.workflowId shouldBe tx.workflowId.getOrElse("") inside(transaction.events.loneElement.event.created) { case Some(created) => inside(tx.transaction.nodes.headOption) { case Some((nodeId, createNode: Node.Create)) => @@ -335,19 +131,19 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (offset, exercise) <- store(singleExercise(nonTransient(create).loneElement)) resultById <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(exercise.updateId), + lookupKey = LookupKey.ByUpdateId(exercise.updateId), internalUpdateFormat = updateFormatForWildcardParties(exercise.actAs.toSet), ) resultByOffset <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.Offset(offset), + lookupKey = LookupKey.ByOffset(offset), internalUpdateFormat = updateFormatForWildcardParties(exercise.actAs.toSet), ) } yield { inside(resultById.value.update.transaction) { case Some(transaction) => transaction.commandId shouldBe exercise.commandId.value transaction.offset shouldBe offset.unwrap - transaction.updateId shouldBe exercise.updateId + transaction.updateId shouldBe exercise.updateId.toHexString TimestampConversion.toLf( transaction.effectiveAt.value, TimestampConversion.ConversionMode.Exact, @@ -377,19 +173,19 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid // Response 1: querying as all submitters result1 <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(tx.updateId), + lookupKey = LookupKey.ByUpdateId(tx.updateId), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, bob, david)), ) // Response 2: querying as a proper subset of all submitters result2 <- ledgerDao.updateReader .lookupUpdateBy( - lookupKey = LookupKey.UpdateId(tx.updateId), + lookupKey = LookupKey.ByUpdateId(tx.updateId), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, david)), ) // Response 3: querying as a proper superset of all submitters result3 <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.UpdateId(tx.updateId), + LookupKey.ByUpdateId(tx.updateId), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, bob, charlie, david)), ) } yield { @@ -408,19 +204,19 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid // Response 1: querying as all submitters result1 <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.Offset(offset), + LookupKey.ByOffset(offset), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, bob, david)), ) // Response 2: querying as a proper subset of all submitters result2 <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.Offset(offset), + LookupKey.ByOffset(offset), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, david)), ) // Response 3: querying as a proper superset of all submitters result3 <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.Offset(offset), + LookupKey.ByOffset(offset), internalUpdateFormat = updateFormatForWildcardParties(Set(alice, bob, charlie, david)), ) } yield { @@ -438,11 +234,11 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (offset, tx) <- store(singleCreate(createNode(_, signatories, stakeholders), actAs)) resultById <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.UpdateId(tx.updateId), + LookupKey.ByUpdateId(tx.updateId), updateFormatForWildcardParties(Set(charlie)), ) resultByOffset <- ledgerDao.updateReader - .lookupUpdateBy(LookupKey.Offset(offset), updateFormatForWildcardParties(Set(charlie))) + .lookupUpdateBy(LookupKey.ByOffset(offset), updateFormatForWildcardParties(Set(charlie))) } yield { resultById.value.update.transaction.value.commandId shouldBe "" resultByOffset shouldBe resultById @@ -454,11 +250,11 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid (offset, tx) <- store(fullyTransient(), contractActivenessChanged = false) resultById <- ledgerDao.updateReader .lookupUpdateBy( - LookupKey.UpdateId(tx.updateId), + LookupKey.ByUpdateId(tx.updateId), updateFormatForWildcardParties(tx.actAs.toSet), ) resultByOffset <- ledgerDao.updateReader - .lookupUpdateBy(LookupKey.Offset(offset), updateFormatForWildcardParties(tx.actAs.toSet)) + .lookupUpdateBy(LookupKey.ByOffset(offset), updateFormatForWildcardParties(tx.actAs.toSet)) } yield { resultById shouldBe empty resultByOffset shouldBe empty @@ -813,8 +609,8 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid val txs = extractAllTransactions(result) inside(txs) { case Vector(tx1, tx2) => - tx1.updateId shouldBe create.updateId - tx2.updateId shouldBe exercise.updateId + tx1.updateId shouldBe create.updateId.toHexString + tx2.updateId shouldBe exercise.updateId.toHexString inside(tx1.events) { case Seq(Event(Created(createdEvent))) => createdEvent.contractId shouldBe firstContractId.coid } @@ -848,7 +644,7 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid import com.daml.ledger.api.v2.event.Event.Event.Created inside(extractAllTransactions(result)) { case Vector(tx) => - tx.updateId shouldBe create2.updateId + tx.updateId shouldBe create2.updateId.toHexString inside(tx.events) { case Seq(Event(Created(createdEvent))) => createdEvent.contractId shouldBe nonTransient(create2).loneElement.coid } @@ -1046,10 +842,10 @@ private[dao] trait JdbcLedgerDaoTransactionsSpec extends OptionValues with Insid .sequence( transactions.map(tx => ledgerDao.updateReader - .lookupTransactionById(tx.updateId, transactionFormatForWildcardParties(as)) + .lookupUpdateBy(LookupKey.ByUpdateId(tx.updateId), updateFormatForWildcardParties(as)) ) ) - .map(_.flatMap(_.toList.flatMap(_.transaction.toList))) + .map(_.flatMap(_.toList.flatMap(_.update.transaction.toList))) private def transactionsOf( source: Source[(Offset, GetUpdatesResponse), NotUsed] diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsWriterSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsWriterSpec.scala index 5ed9f55c5c..4c77f9d402 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsWriterSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsWriterSpec.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.platform.store.dao import com.digitalasset.canton.data.Offset -import com.digitalasset.canton.platform.store.interfaces.LedgerDaoContractsReader import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{LoneElement, OptionValues} @@ -62,17 +61,4 @@ private[dao] trait JdbcLedgerDaoTransactionsWriterSpec extends LoneElement with } } - it should "prefer stakeholder info" in { - for { - (offset, tx) <- store(offsetAndTx = singleCreate) - result <- ledgerDao.contractsReader.lookupContractState( - nonTransient(tx).loneElement, - offset, - ) - } yield { - result.collect { case active: LedgerDaoContractsReader.ActiveContract => - active.stakeholders -> active.contract.signatories - } shouldBe Some(Set(alice, bob) -> Set(alice, bob)) - } - } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDaoSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDaoSpec.scala index ccf927a9df..43e2519dfc 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDaoSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDaoSpec.scala @@ -26,7 +26,8 @@ import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.SerializableTraceContextConverter.SerializableTraceContextExtension import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, PackageId, Party} +import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, PackageId, Party, UserId} +import com.digitalasset.daml.lf.value.Value.ContractId import com.google.protobuf.ByteString import org.mockito.MockitoSugar.mock import org.scalatest.flatspec.AnyFlatSpec @@ -242,6 +243,9 @@ object SequentialWriteDaoSpec { private def offset(l: Long): Offset = Offset.tryFromLong(l) + private def hashCid(key: String): ContractId = + ContractId.V1(com.digitalasset.daml.lf.crypto.Hash.hashPrivateKey(key)) + private def someUpdate(key: String) = Some( Update.PartyAddedToParticipant( party = Ref.Party.assertFromString(key), @@ -263,16 +267,17 @@ object SequentialWriteDaoSpec { private val someEventCreated = DbDto.EventCreate( event_offset = 1, - update_id = "", + update_id = new Array[Byte](0), ledger_effective_time = 3, command_id = None, workflow_id = None, user_id = None, submitters = None, node_id = 3, - contract_id = Array(24), + contract_id = hashCid("24"), template_id = "", package_id = "2", + representative_package_id = "3", flat_event_witnesses = Set.empty, tree_event_witnesses = Set.empty, create_argument = Array.empty, @@ -285,28 +290,30 @@ object SequentialWriteDaoSpec { create_key_value_compression = None, event_sequential_id = 0, authentication_data = Array.empty, - synchronizer_id = "x::synchronizer", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer"), trace_context = serializableTraceContext, record_time = 0, external_transaction_hash = Some(externalTransactionHash), + internal_contract_id = 42L, ) private val someEventExercise = DbDto.EventExercise( consuming = true, event_offset = 1, - update_id = "", + update_id = new Array[Byte](0), ledger_effective_time = 3, command_id = None, workflow_id = None, user_id = None, submitters = None, node_id = 3, - contract_id = Array(24), + contract_id = hashCid("24"), template_id = "", package_id = "2", flat_event_witnesses = Set.empty, tree_event_witnesses = Set.empty, exercise_choice = "", + exercise_choice_interface_id = None, exercise_argument = Array.empty, exercise_result = None, exercise_actors = Set.empty, @@ -314,10 +321,11 @@ object SequentialWriteDaoSpec { exercise_argument_compression = None, exercise_result_compression = None, event_sequential_id = 0, - synchronizer_id = "x::synchronizer", + synchronizer_id = SynchronizerId.tryFromString("x::synchronizer"), trace_context = serializableTraceContext, record_time = 0, external_transaction_hash = Some(externalTransactionHash), + deactivated_event_sequential_id = None, ) val singlePartyFixture: Option[Update.PartyAddedToParticipant] = @@ -333,8 +341,8 @@ object SequentialWriteDaoSpec { partyAndCreateFixture.get.party -> List(someParty, someEventCreated), allEventsFixture.get.party -> List( someEventCreated, - DbDto.IdFilterCreateStakeholder(0L, "", ""), - DbDto.IdFilterCreateStakeholder(0L, "", ""), + DbDto.IdFilterCreateStakeholder(0L, "", "", first_per_sequential_id = true), + DbDto.IdFilterCreateStakeholder(0L, "", "", first_per_sequential_id = false), someEventExercise, ), ) @@ -353,6 +361,10 @@ object SequentialWriteDaoSpec { templateIds = List("1").iterator, synchronizerIds = Iterator.empty, packageIds = Iterator("2"), + userIds = Iterator.empty, + participantIds = Iterator.empty, + choiceNames = Iterator.empty, + interfaceIds = Iterator.empty, ) case _ => new DomainStringIterators( @@ -360,6 +372,10 @@ object SequentialWriteDaoSpec { templateIds = Iterator.empty, synchronizerIds = Iterator.empty, packageIds = Iterator.empty, + userIds = Iterator.empty, + participantIds = Iterator.empty, + choiceNames = Iterator.empty, + interfaceIds = Iterator.empty, ) } @@ -376,6 +392,17 @@ object SequentialWriteDaoSpec { override def synchronizerId: StringInterningDomain[SynchronizerId] = throw new NotImplementedException + override def userId: StringInterningDomain[UserId] = throw new NotImplementedException + + override def participantId: StringInterningDomain[Ref.ParticipantId] = + throw new NotImplementedException + + override def choiceName: StringInterningDomain[Ref.ChoiceName] = + throw new NotImplementedException + + override def interfaceId: StringInterningDomain[Ref.Identifier] = + throw new NotImplementedException + override def internize( domainStringIterators: DomainStringIterators ): Iterable[(Int, String)] = diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/ACSReaderSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/ACSReaderSpec.scala index 76749e9b8c..770d35de44 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/ACSReaderSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/ACSReaderSpec.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.digitalasset.canton.BaseTest import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream -import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.IdPaginationState +import com.digitalasset.canton.platform.store.dao.PaginatingAsyncStream.PaginationInput import com.digitalasset.canton.platform.store.dao.events.EventIdsUtils.* import org.apache.pekko.actor.ActorSystem import org.apache.pekko.stream.Materializer @@ -13,6 +13,7 @@ import org.apache.pekko.stream.scaladsl.{Sink, Source} import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.{Assertion, BeforeAndAfterAll} +import java.sql.Connection import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, ExecutionContext, Future} @@ -131,13 +132,13 @@ class ACSReaderSpec extends AsyncFlatSpec with BaseTest with BeforeAndAfterAll { Range(1, 70).map(_.toLong).toVector, ).map( _ shouldBe Vector( - IdPaginationState(0, 1), - IdPaginationState(1, 4), - IdPaginationState(5, 16), - IdPaginationState(21, 20), - IdPaginationState(41, 20), - IdPaginationState(61, 20), - IdPaginationState(69, 20), + PaginationInput(0, 69, 1), + PaginationInput(1, 69, 4), + PaginationInput(5, 69, 16), + PaginationInput(21, 69, 20), + PaginationInput(41, 69, 20), + PaginationInput(61, 69, 20), + PaginationInput(69, 69, 20), ) ) } @@ -151,11 +152,11 @@ class ACSReaderSpec extends AsyncFlatSpec with BaseTest with BeforeAndAfterAll { Range(1, 70).map(_.toLong).toVector, ).map( _ shouldBe Vector( - IdPaginationState(0, 20), - IdPaginationState(20, 20), - IdPaginationState(40, 20), - IdPaginationState(60, 20), - IdPaginationState(69, 20), + PaginationInput(0, 69, 20), + PaginationInput(20, 69, 20), + PaginationInput(40, 69, 20), + PaginationInput(60, 69, 20), + PaginationInput(69, 69, 20), ) ) } @@ -169,9 +170,9 @@ class ACSReaderSpec extends AsyncFlatSpec with BaseTest with BeforeAndAfterAll { Range(1, 6).map(_.toLong).toVector, ).map( _ shouldBe Vector( - IdPaginationState(0, 1), - IdPaginationState(1, 4), - IdPaginationState(5, 16), + PaginationInput(0, 5, 1), + PaginationInput(1, 5, 4), + PaginationInput(5, 5, 16), ) ) } @@ -185,7 +186,7 @@ class ACSReaderSpec extends AsyncFlatSpec with BaseTest with BeforeAndAfterAll { Vector.empty, ).map( _ shouldBe Vector( - IdPaginationState(0, 1) + PaginationInput(0, 0, 1) ) ) } @@ -298,17 +299,23 @@ class ACSReaderSpec extends AsyncFlatSpec with BaseTest with BeforeAndAfterAll { private def testIdSource( idQueryConfiguration: IdPageSizing, ids: Vector[Long], - ): Future[Vector[IdPaginationState]] = { - val queries = Vector.newBuilder[IdPaginationState] + ): Future[Vector[PaginationInput]] = { + val queries = Vector.newBuilder[PaginationInput] paginatingAsyncStream - .streamIdsFromSeekPagination(idQueryConfiguration, 1, 0L) { idQuery => - queries.addOne(idQuery) - Future.successful( + .streamIdsFromSeekPaginationWithoutIdFilter( + idStreamName = "test-stream", + idPageSizing = idQueryConfiguration, + idPageBufferSize = 1, + initialFromIdExclusive = 0L, + initialEndInclusive = ids.lastOption.getOrElse(0), + )(_ => + input => { + queries.addOne(input) ids - .dropWhile(_ <= idQuery.fromIdExclusive) - .take(idQuery.pageSize) - ) - } + .dropWhile(_ <= input.startExclusive) + .take(input.limit) + } + )(f => Future.successful(f(mock[Connection]))) .runWith(Sink.seq[Long]) .map { result => result shouldBe ids diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala new file mode 100644 index 0000000000..804fad57a7 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala @@ -0,0 +1,100 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.dao.events + +import com.digitalasset.canton.protocol.ExampleTransactionFactory.{ + exerciseNode, + fetchNode, + lookupByKeyNode, + templateId, +} +import com.digitalasset.canton.protocol.{ExampleContractFactory, LfGlobalKey, LfTemplateId} +import com.digitalasset.canton.util.LfTransactionBuilder.defaultPackageName +import com.digitalasset.canton.{BaseTest, LfPackageId} +import com.digitalasset.daml.lf.transaction.test.TestIdFactory +import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.{ + NodeOps, + toVersionedTransaction, +} +import com.digitalasset.daml.lf.value.Value +import org.scalatest.wordspec.AnyWordSpec + +class InputContractPackagesTest extends AnyWordSpec with BaseTest with TestIdFactory { + + import InputContractPackages.* + + val (cid1, cid2, cid3) = (newCid, newCid, newCid) + val (p1, p2, p3) = (newPackageId, newPackageId, newPackageId) + def t(pId: LfPackageId): LfTemplateId = LfTemplateId(pId, templateId.qualifiedName) + + "InputContractPackages.forTransaction" should { + + "extract package ids associated with nodes" in { + + val globalKey = LfGlobalKey.assertBuild( + t(p2), + Value.ValueUnit, + defaultPackageName, + ) + + val example = toVersionedTransaction( + exerciseNode(cid1, templateId = t(p1)).withChildren( + lookupByKeyNode(globalKey, resolution = Some(cid2)), + fetchNode(cid3, templateId = t(p3)), + ) + ).transaction + + forTransaction(example) shouldBe Map( + cid1 -> Set(p1), + cid2 -> Set(p2), + cid3 -> Set(p3), + ) + } + + "return multiple package where the same contract is bound to different packages" in { + + val example = toVersionedTransaction( + exerciseNode(cid1, templateId = t(p1)).withChildren( + exerciseNode(cid1, templateId = t(p2)), + exerciseNode(cid1, templateId = t(p3)), + ) + ).transaction + + forTransaction(example) shouldBe Map( + cid1 -> Set(p1, p2, p3) + ) + } + + } + + "InputContractPackages.mergeToExactTuple" should { + "work where both maps have identical keys" in { + strictZipByKey(Map(1 -> "a", 2 -> "b"), Map(1 -> 3.0, 2 -> 4.0)) shouldBe Right( + Map(1 -> ("a", 3.0), 2 -> ("b", 4.0)) + ) + } + "fail where the key sets are unequal" in { + inside(strictZipByKey(Map(1 -> "a", 2 -> "b"), Map(2 -> 4.0, 3 -> 5.0))) { + case Left(mismatch) => mismatch shouldBe Set(1, 3) + } + } + + } + + "InputContractPackages.forTransactionWithContracts" should { + + val cid = newCid + val inst = ExampleContractFactory.build() + val tx = toVersionedTransaction( + exerciseNode(cid, templateId = t(p1)) + ).transaction + + "combine transaction contracts with contracts instances map" in { + forTransactionWithContracts(tx, Map(cid -> inst)) shouldBe Right( + Map(cid -> (inst.inst, Set(p1))) + ) + } + } + +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/entries/LedgerEntry.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/entries/LedgerEntry.scala index dd39e944cb..5a2b758d36 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/entries/LedgerEntry.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/entries/LedgerEntry.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.platform.store.entries import com.digitalasset.canton.platform.* +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.daml.lf.data.Relation import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.transaction.{CommittedTransaction, NodeId} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/MockStringInterning.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/MockStringInterning.scala index 4d81dedb89..5348a44170 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/MockStringInterning.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/MockStringInterning.scala @@ -99,6 +99,66 @@ class MockStringInterning extends StringInterning { rawStringInterning.tryExternalize(id).map(SynchronizerId.tryFromString) } + override val userId: StringInterningDomain[Ref.UserId] = + new StringInterningDomain[Ref.UserId] { + override val unsafe: StringInterningAccessor[String] = rawStringInterning + + override def internalize(t: Ref.UserId): Int = tryInternalize(t).get + + override def tryInternalize(t: Ref.UserId): Option[Int] = + rawStringInterning.tryInternalize(t.toString) + + override def externalize(id: Int): Ref.UserId = tryExternalize(id).get + + override def tryExternalize(id: Int): Option[Ref.UserId] = + rawStringInterning.tryExternalize(id).map(Ref.UserId.assertFromString) + } + + override val participantId: StringInterningDomain[Ref.ParticipantId] = + new StringInterningDomain[Ref.ParticipantId] { + override val unsafe: StringInterningAccessor[String] = rawStringInterning + + override def internalize(t: Ref.ParticipantId): Int = tryInternalize(t).get + + override def tryInternalize(t: Ref.ParticipantId): Option[Int] = + rawStringInterning.tryInternalize(t.toString) + + override def externalize(id: Int): Ref.ParticipantId = tryExternalize(id).get + + override def tryExternalize(id: Int): Option[Ref.ParticipantId] = + rawStringInterning.tryExternalize(id).map(Ref.ParticipantId.assertFromString) + } + + override val choiceName: StringInterningDomain[Ref.ChoiceName] = + new StringInterningDomain[Ref.ChoiceName] { + override val unsafe: StringInterningAccessor[String] = rawStringInterning + + override def internalize(t: Ref.ChoiceName): Int = tryInternalize(t).get + + override def tryInternalize(t: Ref.ChoiceName): Option[Int] = + rawStringInterning.tryInternalize(t.toString) + + override def externalize(id: Int): Ref.ChoiceName = tryExternalize(id).get + + override def tryExternalize(id: Int): Option[Ref.ChoiceName] = + rawStringInterning.tryExternalize(id).map(Ref.ChoiceName.assertFromString) + } + + override val interfaceId: StringInterningDomain[Ref.Identifier] = + new StringInterningDomain[Ref.Identifier] { + override val unsafe: StringInterningAccessor[String] = rawStringInterning + + override def internalize(t: Ref.Identifier): Int = tryInternalize(t).get + + override def tryInternalize(t: Ref.Identifier): Option[Int] = + rawStringInterning.tryInternalize(t.toString) + + override def externalize(id: Int): Ref.Identifier = tryExternalize(id).get + + override def tryExternalize(id: Int): Option[Ref.Identifier] = + rawStringInterning.tryExternalize(id).map(Ref.Identifier.assertFromString) + } + private[store] def reset(): Unit = blocking(synchronized { idToString = Map.empty stringToId = Map.empty diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningViewSpec.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningViewSpec.scala index 5069b95fd2..019b561101 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningViewSpec.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningViewSpec.scala @@ -29,13 +29,24 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest synchronizerIdAbsent(testee, "22::same:name") packageIdAbsent(testee, "pkg-1") packageIdAbsent(testee, "pkg-2") + userIdAbsent(testee, "usr1") + userIdAbsent(testee, "usr2") + participantIdAbsent(testee, "pn-1") + participantIdAbsent(testee, "pn-2") + choiceNameAbsent(testee, "ChoiceName") + interfaceIdAbsent(testee, "pkg:inter:face") testee.internize( new DomainStringIterators( parties = List("p1", "p2", "22::same:name").iterator, templateIds = List("#22:t:a", "#22:t:b").iterator, - synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer2").iterator, + synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer2").iterator + .map(SynchronizerId.tryFromString), packageIds = List("pkg-1", "pkg-2").iterator, + userIds = List("usr1", "usr2").iterator, + participantIds = List("pn-1", "pn-2").iterator, + choiceNames = List("ChoiceName").iterator, + interfaceIds = List("pkg:inter:face").iterator, ) ) shouldBe Vector( 1 -> "p|p1", @@ -48,6 +59,12 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest 8 -> "d|x::synchronizer2", 9 -> "i|pkg-1", 10 -> "i|pkg-2", + 11 -> "u|usr1", + 12 -> "u|usr2", + 13 -> "n|pn-1", + 14 -> "n|pn-2", + 15 -> "c|ChoiceName", + 16 -> "f|pkg:inter:face", ) partyPresent(testee, "p1", 1) partyPresent(testee, "p2", 2) @@ -63,6 +80,16 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest packageIdPresent(testee, "pkg-1", 9) packageIdPresent(testee, "pkg-2", 10) packageIdAbsent(testee, "pkg-unknown") + userIdPresent(testee, "usr1", 11) + userIdPresent(testee, "usr2", 12) + userIdAbsent(testee, "usr-unknown") + participantIdPresent(testee, "pn-1", 13) + participantIdPresent(testee, "pn-2", 14) + participantIdAbsent(testee, "pn-unknown") + choiceNamePresent(testee, "ChoiceName", 15) + choiceNameAbsent(testee, "CnUnknown") + interfaceIdPresent(testee, "pkg:inter:face", 16) + interfaceIdAbsent(testee, "inter:face:unknown") } it should "extend working view correctly" in { @@ -77,12 +104,21 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest synchronizerIdAbsent(testee, "x::synchronizer2") packageIdAbsent(testee, "pkg-1") packageIdAbsent(testee, "pkg-2") + userIdAbsent(testee, "usr1") + userIdAbsent(testee, "usr2") + choiceNameAbsent(testee, "ChoiceName") + interfaceIdAbsent(testee, "pkg:inter:face") testee.internize( new DomainStringIterators( parties = List("p1", "p2", "22::same:name").iterator, templateIds = List("#22:t:a").iterator, - synchronizerIds = List("x::synchronizer1", "x::synchronizer2").iterator, + synchronizerIds = + List("x::synchronizer1", "x::synchronizer2").iterator.map(SynchronizerId.tryFromString), packageIds = List("pkg-1").iterator, + userIds = List("usr1").iterator, + participantIds = List("pn-1", "pn-2").iterator, + choiceNames = List("ChoiceName").iterator, + interfaceIds = List("pkg:inter:face").iterator, ) ) shouldBe Vector( 1 -> "p|p1", @@ -92,6 +128,11 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest 5 -> "d|x::synchronizer1", 6 -> "d|x::synchronizer2", 7 -> "i|pkg-1", + 8 -> "u|usr1", + 9 -> "n|pn-1", + 10 -> "n|pn-2", + 11 -> "c|ChoiceName", + 12 -> "f|pkg:inter:face", ) partyPresent(testee, "p1", 1) partyPresent(testee, "p2", 2) @@ -107,34 +148,60 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest packageIdPresent(testee, "pkg-1", 7) packageIdAbsent(testee, "pkg-2") packageIdAbsent(testee, "pkg-unknown") + userIdPresent(testee, "usr1", 8) + userIdAbsent(testee, "usr2") + choiceNamePresent(testee, "ChoiceName", 11) + choiceNameAbsent(testee, "CnUnknown") + interfaceIdPresent(testee, "pkg:inter:face", 12) + interfaceIdAbsent(testee, "inter:face:unknown") testee.internize( new DomainStringIterators( parties = List("p1", "p2").iterator, templateIds = List("#22:t:a", "#22:t:b").iterator, - synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer3").iterator, + synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer3").iterator + .map(SynchronizerId.tryFromString), packageIds = List("pkg-1", "pkg-2").iterator, + userIds = List("usr1", "usr2").iterator, + participantIds = List("pn-1", "pn-2", "pn-3").iterator, + choiceNames = List("ChoiceName").iterator, + interfaceIds = List("pkg:inter:face", "pkg:inter:face2").iterator, ) ) shouldBe Vector( - 8 -> "t|#22:t:b", - 9 -> "d|22::same:name", - 10 -> "d|x::synchronizer3", - 11 -> "i|pkg-2", + 13 -> "t|#22:t:b", + 14 -> "d|22::same:name", + 15 -> "d|x::synchronizer3", + 16 -> "i|pkg-2", + 17 -> "u|usr2", + 18 -> "n|pn-3", + 19 -> "f|pkg:inter:face2", ) partyPresent(testee, "p1", 1) partyPresent(testee, "p2", 2) partyPresent(testee, "22::same:name", 3) partyAbsent(testee, "unknown") templatePresent(testee, "#22:t:a", 4) - templatePresent(testee, "#22:t:b", 8) + templatePresent(testee, "#22:t:b", 13) templateAbsent(testee, "#22:unkno:wn") synchronizerIdPresent(testee, "x::synchronizer1", 5) synchronizerIdPresent(testee, "x::synchronizer2", 6) - synchronizerIdPresent(testee, "22::same:name", 9) - synchronizerIdPresent(testee, "x::synchronizer3", 10) + synchronizerIdPresent(testee, "22::same:name", 14) + synchronizerIdPresent(testee, "x::synchronizer3", 15) synchronizerIdAbsent(testee, "x::synchronizerunknown") packageIdPresent(testee, "pkg-1", 7) - packageIdPresent(testee, "pkg-2", 11) + packageIdPresent(testee, "pkg-2", 16) packageIdAbsent(testee, "pkg-unknown") + userIdPresent(testee, "usr1", 8) + userIdPresent(testee, "usr2", 17) + userIdAbsent(testee, "usr-unknown") + participantIdPresent(testee, "pn-1", 9) + participantIdPresent(testee, "pn-2", 10) + participantIdPresent(testee, "pn-3", 18) + participantIdAbsent(testee, "pn-unknown") + choiceNamePresent(testee, "ChoiceName", 11) + interfaceIdPresent(testee, "pkg:inter:face2", 19) + choiceNameAbsent(testee, "CnUnknown") + interfaceIdPresent(testee, "pkg:inter:face", 12) + interfaceIdAbsent(testee, "inter:face:unknown") } it should "correctly load prefixing entries in the view on `update`" in { @@ -190,8 +257,12 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest new DomainStringIterators( parties = List("p1", "p2").iterator, templateIds = List().iterator, - synchronizerIds = List("x::synchronizer1").iterator, + synchronizerIds = List("x::synchronizer1").iterator.map(SynchronizerId.tryFromString), packageIds = List("pkg-1").iterator, + userIds = List().iterator, + participantIds = List().iterator, + choiceNames = List().iterator, + interfaceIds = List().iterator, ) ) partyPresent(testee, "p1", 1) @@ -237,8 +308,13 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest new DomainStringIterators( parties = List("p1", "p2", "22::same:name").iterator, templateIds = List("#22:t:a", "#22:t:b").iterator, - synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer2").iterator, + synchronizerIds = List("22::same:name", "x::synchronizer1", "x::synchronizer2").iterator + .map(SynchronizerId.tryFromString), packageIds = List("pkg-1").iterator, + userIds = List().iterator, + participantIds = List().iterator, + choiceNames = List().iterator, + interfaceIds = List().iterator, ) ) shouldBe Vector( 1 -> "p|p1", @@ -309,6 +385,30 @@ class StringInterningViewSpec extends AsyncFlatSpec with Matchers with BaseTest private def packageIdAbsent(view: StringInterning, packageId: String) = interningEntryAbsent(view.packageId, packageId, Ref.PackageId.assertFromString) + private def userIdPresent(view: StringInterning, userId: String, id: Int) = + interningEntryPresent(view.userId, userId, id, Ref.UserId.assertFromString) + + private def userIdAbsent(view: StringInterning, userId: String) = + interningEntryAbsent(view.userId, userId, Ref.UserId.assertFromString) + + private def participantIdPresent(view: StringInterning, participantId: String, id: Int) = + interningEntryPresent(view.participantId, participantId, id, Ref.ParticipantId.assertFromString) + + private def participantIdAbsent(view: StringInterning, participantId: String) = + interningEntryAbsent(view.participantId, participantId, Ref.ParticipantId.assertFromString) + + private def choiceNamePresent(view: StringInterning, choiceName: String, id: Int) = + interningEntryPresent(view.choiceName, choiceName, id, Ref.ChoiceName.assertFromString) + + private def choiceNameAbsent(view: StringInterning, choiceName: String) = + interningEntryAbsent(view.choiceName, choiceName, Ref.ChoiceName.assertFromString) + + private def interfaceIdPresent(view: StringInterning, interfaceId: String, id: Int) = + interningEntryPresent(view.interfaceId, interfaceId, id, Ref.Identifier.assertFromString) + + private def interfaceIdAbsent(view: StringInterning, interfaceId: String) = + interningEntryAbsent(view.interfaceId, interfaceId, Ref.Identifier.assertFromString) + private def interningEntryPresent[T]( interningDomain: StringInterningDomain[T], stringValue: String, diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbDataTypes.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbDataTypes.scala index 4c0ab7d113..8bcfeecbb1 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbDataTypes.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbDataTypes.scala @@ -64,23 +64,4 @@ class DbDataTypes(dbType: DbType) { } } - case object IntArray extends DbDataType { - override def get(resultSet: ResultSet, index: Int): Any = - resultSet - .getArray(index) - .getArray - .asInstanceOf[Array[java.lang.Integer]] - .toVector - .map(_.intValue()) - - override def put(value: Any): String = { - val array = value - .asInstanceOf[Vector[Int]] - .map(_.toString) - dbType match { - case DbType.Postgres => array.mkString("ARRAY[", ", ", "]::INTEGER[]") - case other => sys.error(s"Unsupported db type: $other") - } - } - } } diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala new file mode 100644 index 0000000000..7029cc1948 --- /dev/null +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/ContractValidatorTest.scala @@ -0,0 +1,286 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.syntax.either.* +import com.daml.logging.LoggingContext +import com.digitalasset.canton.crypto.TestSalt +import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto +import com.digitalasset.canton.examples.java.cycle.Cycle +import com.digitalasset.canton.protocol.{CantonContractIdV1Version, *} +import com.digitalasset.canton.{ + BaseTest, + FailOnShutdown, + HasExecutionContext, + LfPackageName, + LfPartyId, +} +import com.digitalasset.daml.lf.data.ImmArray +import com.digitalasset.daml.lf.transaction.CreationTime.CreatedAt +import com.digitalasset.daml.lf.transaction.{FatContractInstance, Versioned} +import com.digitalasset.daml.lf.value.Value +import com.digitalasset.daml.lf.value.Value.ValueText +import org.scalatest.Assertion +import org.scalatest.wordspec.AsyncWordSpec + +import java.time.Duration +import scala.concurrent.Future + +class ContractValidatorTest + extends AsyncWordSpec + with BaseTest + with HasExecutionContext + with FailOnShutdown { + + implicit private val loggingContext: LoggingContext = LoggingContext.empty + + private val alice = LfPartyId.assertFromString("Alice") + + private val pureCrypto = new SymbolicPureCrypto() + + val authContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1 + + private val testEngine = + new TestEngine( + packagePaths = Seq(CantonExamplesPath), + iterationsBetweenInterruptions = 10, + cantonContractIdVersion = authContractIdVersion, + ) + + private val underTest = + ContractValidator(pureCrypto, testEngine.engine, testEngine.packageResolver) + + private def assertAuthenticationError(invalid: FatContractInstance): Future[Assertion] = + assertErrorRegex(invalid, s"AuthenticationError.*${invalid.contractId.coid}") + + private def assertTypeMismatch(invalid: FatContractInstance): Future[Assertion] = + assertErrorRegex(invalid, s"TranslationError.*TypeMismatch") + + private def assertValidationFailure(invalid: FatContractInstance): Future[Assertion] = + assertErrorRegex(invalid, s"ValidationFailed.*${invalid.contractId.coid}.*") + + private def assertErrorRegex( + invalid: FatContractInstance, + errorRegex: String, + ): Future[Assertion] = + underTest + .authenticate(invalid, invalid.templateId.packageId) + .value + .map(e => + inside(e) { case Left(error) => + error should include regex errorRegex + } + ) + + s"ContractAuthenticatorImpl with $authContractIdVersion" when { + + val (createTx, _) = + testEngine.submitAndConsume(new Cycle("id", alice).create().commands.loneElement, alice) + val createNode = createTx.nodes.values.collect { case c: LfNodeCreate => c }.loneElement + val contractInstance = ContractInstance.create(testEngine.suffix(createNode)).value + val targetPackageId = contractInstance.templateId.packageId + + "using a valid contract id" should { + "correctly authenticate the contract" in { + underTest + .authenticate(contractInstance.inst, targetPackageId) + .value + .map(_ shouldBe Either.unit) + } + } + + "using a un-normalized values" should { + if (authContractIdVersion > AuthenticatedContractIdVersionV10) { + + val unNormalizedArg = Value.ValueRecord( + None, + contractInstance.inst.createArg + .asInstanceOf[Value.ValueRecord] + .fields + .slowAppend(ImmArray.from(Seq((None, Value.ValueOptional(None))))), + ) + + val unNormalizedContract = + ExampleContractFactory.modify(contractInstance, arg = Some(unNormalizedArg)) + + "correctly authenticate the contract" in { + underTest + .authenticate(unNormalizedContract.inst, unNormalizedContract.templateId.packageId) + .value + .map(_ shouldBe Either.unit) + } + } + } + + "using an invalid contract id" should { + "fail authentication" in { + val invalidContractId = ExampleContractFactory.buildContractId() + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt](contractInstance, contractId = Some(invalidContractId)) + .inst + assertAuthenticationError(invalid) + } + } + + "using a changed salt/authentication data" should { + "fail authentication" in { + val authenticationData = ContractAuthenticationDataV1(TestSalt.generateSalt(42))( + authContractIdVersion + ).toLfBytes + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt](contractInstance, authenticationData = Some(authenticationData)) + .inst + assertAuthenticationError(invalid) + } + } + + "using a changed ledger time" should { + "fail authentication" in { + val changedTime = + CreatedAt(contractInstance.inst.createdAt.time.add(Duration.ofDays(1L))) + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt](contractInstance, createdAt = Some(changedTime)) + .inst + assertAuthenticationError(invalid) + } + } + + "using a changed contract argument" should { + "fail authentication" in { + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt](contractInstance, arg = Some(ValueText("changed"))) + .inst + assertTypeMismatch(invalid) + } + } + + "using a changed template-id" should { + import com.digitalasset.canton.examples.java.iou.Iou + "fail authentication" in { + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstance, + templateId = Some(testEngine.toRefIdentifier(Iou.TEMPLATE_ID_WITH_PACKAGE_ID)), + ) + .inst + assertTypeMismatch(invalid) + } + } + + "using a changed package-name" should { + "fail authentication" ignore { + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstance, + packageName = Some(LfPackageName.assertFromString("definitely-changed-package-name")), + ) + .inst + assertAuthenticationError(invalid) + } + } + + "using changed signatories" should { + "fail authentication" in { + val changedSignatory: LfPartyId = + LfPartyId.assertFromString("changed::signatory") + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstance, + metadata = Some( + ContractMetadata.tryCreate( + signatories = contractInstance.metadata.signatories + changedSignatory, + stakeholders = contractInstance.metadata.stakeholders + changedSignatory, + maybeKeyWithMaintainersVersioned = + contractInstance.metadata.maybeKeyWithMaintainersVersioned, + ) + ), + ) + .inst + assertValidationFailure(invalid) + } + } + + "using changed observers" should { + "fail authentication" in { + val changedObserver: LfPartyId = + LfPartyId.assertFromString("changed::observer") + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstance, + metadata = Some( + ContractMetadata.tryCreate( + signatories = contractInstance.metadata.signatories, + stakeholders = contractInstance.metadata.stakeholders + changedObserver, + maybeKeyWithMaintainersVersioned = + contractInstance.metadata.maybeKeyWithMaintainersVersioned, + ) + ), + ) + .inst + assertValidationFailure(invalid) + } + } + + } + + // TODO(i16065): Re-enable contract key tests + private val keyEnabledContractIdVersions = Seq.empty[CantonContractIdV1Version] + + forEvery(keyEnabledContractIdVersions) { authContractIdVersion => + s"Contract key validations" when { + + val keyWithMaintainers = ExampleContractFactory.buildKeyWithMaintainers() + val contractInstanceWithKey = ExampleContractFactory.build[CreatedAt]( + cantonContractIdVersion = authContractIdVersion, + keyOpt = Some(keyWithMaintainers), + ) + + "using a changed key value" should { + "fail authentication" in { + val changeKey = keyWithMaintainers.copy(globalKey = + LfGlobalKey.assertBuild( + contractInstanceWithKey.templateId, + ValueText("changed"), + contractInstanceWithKey.inst.packageName, + ) + ) + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstanceWithKey, + metadata = Some( + ContractMetadata.tryCreate( + signatories = contractInstanceWithKey.metadata.signatories, + stakeholders = contractInstanceWithKey.metadata.stakeholders, + maybeKeyWithMaintainersVersioned = + Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), + ) + ), + ) + .inst + assertAuthenticationError(invalid) + } + } + + "using a changed key maintainers" should { + "fail authentication" ignore { + val changeKey = keyWithMaintainers.copy(maintainers = Set.empty) + val invalid: FatContractInstance = ExampleContractFactory + .modify[CreatedAt]( + contractInstanceWithKey, + metadata = Some( + ContractMetadata.tryCreate( + signatories = contractInstanceWithKey.metadata.signatories, + stakeholders = contractInstanceWithKey.metadata.stakeholders, + maybeKeyWithMaintainersVersioned = + Some(Versioned(contractInstanceWithKey.inst.version, changeKey)), + ) + ), + ) + .inst + assertAuthenticationError(invalid) + } + } + } + } +} diff --git a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/TestEngine.scala b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/TestEngine.scala index 89d798d04c..fe7a38ef01 100644 --- a/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/TestEngine.scala +++ b/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/TestEngine.scala @@ -11,7 +11,6 @@ import com.digitalasset.canton.crypto.{HashOps, HmacOps, Salt, TestSalt} import com.digitalasset.canton.ledger.api.validation.ValidateUpgradingPackageResolutions.ValidatedCommandPackageResolutionsSnapshot import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, - ValidateDisclosedContracts, ValidateUpgradingPackageResolutions, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -47,6 +46,7 @@ class TestEngine( userId: String = "TestUserId", commandId: String = "TestCmdId", iterationsBetweenInterruptions: Long = 1000, + cantonContractIdVersion: CantonContractIdV1Version = CantonContractIdVersion.maxV1, ) extends EitherValues with OptionValues { @@ -63,8 +63,7 @@ class TestEngine( } private val commandsValidator = new CommandsValidator( - validateUpgradingPackageResolutions = validateUpgradingPackageResolutions, - validateDisclosedContracts = ValidateDisclosedContracts.WithContractIdVerificationDisabled, + validateUpgradingPackageResolutions = validateUpgradingPackageResolutions ) val packageResolver: PackageId => TraceContext => FutureUnlessShutdown[Option[Package]] = @@ -92,7 +91,6 @@ class TestEngine( private val testInstant = Instant.now private val testTimestamp = Time.Timestamp.assertFromInstant(testInstant) private val maxDeduplicationDuration = Duration.ZERO - private val cantonContractIdVersion = AuthenticatedContractIdVersionV11 val engine = new Engine( EngineConfig( @@ -101,6 +99,12 @@ class TestEngine( ) ) + def hashAndConsume( + c: LfNodeCreate, + method: Hash.HashingMethod = cantonContractIdVersion.contractHashingMethod, + ): LfHash = + consume(engine.hashCreateNode(c, identity, method)) + private val valueEnricher = new Enricher(engine) def consume[T]( @@ -187,7 +191,6 @@ class TestEngine( packagePreference = engineCommands.packagePreferenceSet, submitters = Set(Ref.Party.assertFromString(actAs)), cmds = engineCommands.commands, - disclosures = engineCommands.disclosedContracts.map(_.fatContractInstance), participantId = participantId, submissionSeed = randomHash(), readAs = Set.empty, @@ -213,8 +216,7 @@ class TestEngine( create.stakeholders, create.keyOpt.map(Versioned(create.version, _)), ), - contractHash = LegacyContractHash - .tryThinContractHash(create.coinst, cantonContractIdVersion.useUpgradeFriendlyHashing), + contractHash = hashAndConsume(create), ) .value @@ -225,7 +227,7 @@ class TestEngine( val suffixed = create.mapCid(_ => contractId) val authenticationData = - ContractAuthenticationDataV1(salt)(AuthenticatedContractIdVersionV11).toLfBytes + ContractAuthenticationDataV1(salt)(cantonContractIdVersion).toLfBytes FatContractInstance.fromCreateNode( suffixed, @@ -238,10 +240,7 @@ class TestEngine( fat: FatContractInstance, recomputeIdVersion: CantonContractIdV1Version, ): Unicum = { - - val contractHash = - LegacyContractHash.tryFatContractHash(fat, recomputeIdVersion.useUpgradeFriendlyHashing) - + val contractHash = hashAndConsume(fat.toCreateNode, recomputeIdVersion.contractHashingMethod) unicumGenerator.recomputeUnicum(fat, recomputeIdVersion, contractHash).value } diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index b107a502b4..c062b0407d 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 -build-options: +build-options: - --enable-interfaces=yes name: carbonv1-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index 7f70479c87..a6348eeb20 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,11 +1,11 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 -build-options: +build-options: - --enable-interfaces=yes name: carbonv2-tests -data-dependencies: +data-dependencies: - ../../../../scala-2.13/resource_managed/main/carbonv1-tests-3.1.0.dar source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index cf8defc8cb..4600baa7c4 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -2,6 +2,6 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: experimental-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index 58088d64c2..9e58c79263 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 -build-options: +build-options: - --enable-interfaces=yes name: model-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml new file mode 100644 index 0000000000..c821d43f05 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml @@ -0,0 +1,9 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +build-options: +- --enable-interfaces=yes +name: model-iface-tests +source: . +version: 3.1.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml index 3fad614bfe..dc45ced390 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml @@ -2,6 +2,6 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: ongoing-stream-package-upload-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index 692d64890b..5223cec0f0 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -2,6 +2,6 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: package-management-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index 4aca2eb146..ed2d816174 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 -build-options: +build-options: - --enable-interfaces=yes name: semantic-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 63fcf60584..52172f6720 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: upgrade-tests -data-dependencies: +data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar source: . version: 1.0.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index a75551bc96..7dfc6243c5 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: upgrade-tests -data-dependencies: +data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar source: . version: 2.0.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index b559a5d75b..55bb73ceb7 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,9 +1,9 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: upgrade-tests -data-dependencies: +data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar source: . version: 3.0.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml index 7f59142a67..bbc706273a 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml @@ -2,6 +2,6 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: upgrade-fetch-tests source: . version: 1.0.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml index 8088167265..3a3dae510c 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml @@ -2,6 +2,6 @@ sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 name: upgrade-fetch-tests source: . version: 2.0.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml index e77a75a5f3..2a8498e0b0 100644 --- a/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml @@ -1,9 +1,9 @@ -sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 -build-options: +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 +build-options: - --enable-interfaces=yes name: upgrade-iface-tests source: . version: 3.1.0 -dependencies: +dependencies: - daml-prim - daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/Alt.daml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/Alt.daml new file mode 100644 index 0000000000..713e01f301 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/Alt.daml @@ -0,0 +1,9 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Alt where + +template AltT with + p: Party + where + signatory p diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml new file mode 100644 index 0000000000..c9cf3c21d8 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml @@ -0,0 +1,7 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +name: vetting-alt +source: . +version: 1.0.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/Dep.daml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/Dep.daml new file mode 100644 index 0000000000..73927037b9 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/Dep.daml @@ -0,0 +1,9 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Dep where + +template DepT with + p: Party + where + signatory p diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml new file mode 100644 index 0000000000..71f2d3f4ba --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml @@ -0,0 +1,7 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +name: vetting-dep +source: . +version: 1.0.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/Main.daml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/Main.daml new file mode 100644 index 0000000000..2ce620867f --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/Main.daml @@ -0,0 +1,12 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Main where + +import Dep + +template MainT with + p: Party + dep: DepT + where + signatory p, dep.p diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml new file mode 100644 index 0000000000..b9703ae202 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml @@ -0,0 +1,9 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +name: vetting-main +data-dependencies: +- ../../../../../scala-2.13/resource_managed/main/vetting-dep-1.0.0.dar +source: . +version: 1.0.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/Main.daml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/Main.daml new file mode 100644 index 0000000000..2ce620867f --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/Main.daml @@ -0,0 +1,12 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Main where + +import Dep + +template MainT with + p: Party + dep: DepT + where + signatory p, dep.p diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml new file mode 100644 index 0000000000..c9cf8a9db2 --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml @@ -0,0 +1,9 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +name: vetting-main +data-dependencies: +- ../../../../../scala-2.13/resource_managed/main/vetting-dep-1.0.0.dar +source: . +version: 2.0.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/Main.daml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/Main.daml new file mode 100644 index 0000000000..7cc518f8de --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/Main.daml @@ -0,0 +1,12 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Main where + +import Dep + +template DifferentMainT with + p: Party + dep: DepT + where + signatory p, dep.p diff --git a/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml new file mode 100644 index 0000000000..c8f620ac9d --- /dev/null +++ b/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml @@ -0,0 +1,10 @@ +sdk-version: 3.3.0-snapshot.20250502.13767.0.v2fc6c7e2 +canton-daml-plugin-name-suffix: split-lineage +name: vetting-main +data-dependencies: +- ../../../../../scala-2.13/resource_managed/main/vetting-dep-1.0.0.dar +source: . +version: 2.0.0 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/package.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/package.scala index 2b87058eff..c153c50bc3 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/package.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/package.scala @@ -136,20 +136,26 @@ package api { identityProviderId: IdentityProviderId, ) - sealed abstract class UserRight extends Product with Serializable + sealed abstract class UserRight extends Product with Serializable { + def getParty: Option[Ref.Party] = None + } + + sealed abstract class UserRightForParty(party: Ref.Party) extends UserRight { + override def getParty: Option[Ref.Party] = Some(party) + } object UserRight { final case object ParticipantAdmin extends UserRight final case object IdentityProviderAdmin extends UserRight - final case class CanActAs(party: Ref.Party) extends UserRight + final case class CanActAs(party: Ref.Party) extends UserRightForParty(party) - final case class CanReadAs(party: Ref.Party) extends UserRight + final case class CanReadAs(party: Ref.Party) extends UserRightForParty(party) final case object CanReadAsAnyParty extends UserRight - final case class CanExecuteAs(party: Ref.Party) extends UserRight + final case class CanExecuteAs(party: Ref.Party) extends UserRightForParty(party) final case object CanExecuteAsAnyParty extends UserRight } diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOps.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOps.scala deleted file mode 100644 index 411ac3fd1e..0000000000 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOps.scala +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api.util - -import com.daml.ledger.api.v2.event.ExercisedEvent -import com.daml.ledger.api.v2.transaction.TransactionTree - -import scala.annotation.{nowarn, tailrec} - -//TODO(#23504) remove when transaction trees are removed -@nowarn("cat=deprecation") -object TransactionTreeOps { - - implicit class TransactionTreeOps(val tree: TransactionTree) extends AnyVal { - - /** Computes the node ids of the root nodes (i.e. the nodes that do not have any ancestors). A - * node can be considered a root if there are not any ancestors of it. There is no guarantee - * that the root node was also a root in the original transaction tree (i.e. before filtering - * out events from the original transaction tree). - * - * @return - * the root node ids - */ - def rootNodeIds(): List[Int] = { - val lastDescendantById = tree.eventsById.view - .mapValues(_.kind.exercised) - .map { case (nodeId, exercisedO) => - (nodeId, exercisedO.fold(nodeId)(_.lastDescendantNodeId)) - } - .toMap - - val sortedNodeIds = tree.eventsById.keys.toList.sorted - - @tailrec - def go(remainingNodes: List[Int], roots: List[Int]): List[Int] = - remainingNodes match { - case Nil => roots.reverse - case nodeId :: tail => - val lastDescendant = lastDescendantById.getOrElse( - nodeId, - throw new RuntimeException( - s"The last descendant for the node with id $nodeId was unexpectedly not found!" - ), - ) - val newRoots = nodeId :: roots - // skip all nodes within the range of the last descendant - val remaining = tail.dropWhile(_ <= lastDescendant) - go(remaining, newRoots) - } - - go(sortedNodeIds, List.empty) - } - - /** Computes the children nodes of an exercised event. It finds the candidate nodes that could - * be children of the event given (i.e. its descendants). Then it repeatedly finds from the - * candidates the one with the lowest id and adds it to its children and removes the child's - * descendants from the list with the candidates. A node can be considered a child of another - * node if there are not any intermediate descendants between its parent and itself. There is - * no guarantee that the child was a child of its parent in the original transaction tree (i.e. - * before filtering out events from the original transaction tree). - * - * @param exercised - * the exercised event - * @return - * the children's node ids - */ - def childNodeIds(exercised: ExercisedEvent): List[Int] = { - val nodeId = exercised.nodeId - val lastDescendant = exercised.lastDescendantNodeId - - val candidatesMap = - tree.eventsById.view.filter { case (id, _) => id > nodeId && id <= lastDescendant } - - val candidates = candidatesMap.keys.toList.sorted - - val lastDescendantById = candidatesMap - .mapValues(_.kind.exercised) - .map { case (nodeId, exercisedO) => - (nodeId, exercisedO.fold(nodeId)(_.lastDescendantNodeId)) - } - .toMap - - @tailrec - def go(remainingNodes: List[Int], children: List[Int]): List[Int] = - remainingNodes match { - case Nil => children.reverse - // first candidate will always be a child since it is not a descendant of another intermediate node - case child :: restCandidates => - val lastDescendant = lastDescendantById.getOrElse( - child, - throw new RuntimeException( - s"The node with id $child was unexpectedly not found!" - ), - ) - // add child to children and skip its descendants - val remainingCandidates = restCandidates.dropWhile(_ <= lastDescendant) - go(remainingCandidates, child :: children) - } - - go(candidates, List.empty) - } - } -} diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PackageManagementClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PackageManagementClient.scala index dc08e9e409..146ce3b2d3 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PackageManagementClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PackageManagementClient.scala @@ -7,6 +7,8 @@ import com.daml.ledger.api.v2.admin.package_management_service.PackageManagement import com.daml.ledger.api.v2.admin.package_management_service.{ ListKnownPackagesRequest, PackageDetails, + UpdateVettedPackagesRequest, + UpdateVettedPackagesResponse, UploadDarFileRequest, ValidateDarFileRequest, } @@ -40,6 +42,8 @@ final class PackageManagementClient( def uploadDarFile( darFile: ByteString, token: Option[String] = None, + vetAllPackages: Boolean = true, + synchronizerId: Option[String] = None, )(implicit traceContext: TraceContext): Future[Unit] = LedgerClient .stubWithTracing(service, token.orElse(getDefaultToken())) @@ -47,6 +51,12 @@ final class PackageManagementClient( UploadDarFileRequest( darFile = darFile, submissionId = "", + vettingChange = + if (vetAllPackages) + UploadDarFileRequest.VettingChange.VETTING_CHANGE_VET_ALL_PACKAGES + else + UploadDarFileRequest.VettingChange.VETTING_CHANGE_DONT_VET_ANY_PACKAGES, + synchronizerId = synchronizerId.getOrElse(""), ) ) .map(_ => ()) @@ -54,6 +64,7 @@ final class PackageManagementClient( def validateDarFile( darFile: ByteString, token: Option[String] = None, + synchronizerId: Option[String] = None, )(implicit traceContext: TraceContext): Future[Unit] = LedgerClient .stubWithTracing(service, token.orElse(getDefaultToken())) @@ -61,7 +72,16 @@ final class PackageManagementClient( ValidateDarFileRequest( darFile = darFile, submissionId = "", + synchronizerId = synchronizerId.getOrElse(""), ) ) .map(_ => ()) + + def updateVettedPackages( + request: UpdateVettedPackagesRequest, + token: Option[String] = None, + )(implicit traceContext: TraceContext): Future[UpdateVettedPackagesResponse] = + LedgerClient + .stubWithTracing(service, token) + .updateVettedPackages(request) } diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandServiceClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandServiceClient.scala index 5024f0a0af..9f31114055 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandServiceClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandServiceClient.scala @@ -7,7 +7,6 @@ import com.daml.ledger.api.v2.command_service.CommandServiceGrpc.CommandServiceS import com.daml.ledger.api.v2.command_service.{ SubmitAndWaitForTransactionRequest, SubmitAndWaitForTransactionResponse, - SubmitAndWaitForTransactionTreeResponse, SubmitAndWaitRequest, SubmitAndWaitResponse, } @@ -59,17 +58,6 @@ class CommandServiceClient( getSubmitAndWaitForTransactionRequest(request.commands) ) - // TODO(#23504) remove when json/v1 api is removed - @deprecated("TransactionTrees are deprecated", "3.3.0") - def deprecatedSubmitAndWaitForTransactionTreeForJsonApi( - request: SubmitAndWaitRequest, - timeout: Option[Duration] = None, - token: Option[String] = None, - )(implicit traceContext: TraceContext): Future[SubmitAndWaitForTransactionTreeResponse] = - serviceWithTokenAndDeadline(timeout, token).submitAndWaitForTransactionTree( - request - ) - def submitAndWaitForTransaction( commands: Commands, transactionShape: TransactionShape = TRANSACTION_SHAPE_ACS_DELTA, @@ -88,23 +76,6 @@ class CommandServiceClient( ), ) - // TODO(#23504) remove method - @deprecated("TransactionTrees are deprecated", "3.3.0") - def submitAndWaitForTransactionTree( - commands: Commands, - timeout: Option[Duration] = None, - token: Option[String] = None, - )(implicit - traceContext: TraceContext - ): Future[Either[Status, SubmitAndWaitForTransactionTreeResponse]] = - submitAndHandle( - timeout, - token, - withTraceContextInjectedIntoOpenTelemetryContext( - _.submitAndWaitForTransactionTree(SubmitAndWaitRequest(commands = Some(commands))) - ), - ) - def submitAndWait( commands: Commands, timeout: Option[Duration] = None, diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/PackageClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/PackageClient.scala index 85d77a5cd2..533320ed08 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/PackageClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/PackageClient.scala @@ -11,6 +11,8 @@ import com.daml.ledger.api.v2.package_service.{ GetPackageStatusResponse, ListPackagesRequest, ListPackagesResponse, + ListVettedPackagesRequest, + ListVettedPackagesResponse, } import com.digitalasset.canton.ledger.client.LedgerClient import com.digitalasset.canton.tracing.TraceContext @@ -46,4 +48,12 @@ final class PackageClient( .getPackageStatus( GetPackageStatusRequest(packageId = packageId) ) + + def listVettedPackages( + request: ListVettedPackagesRequest, + token: Option[String] = None, + )(implicit traceContext: TraceContext): Future[ListVettedPackagesResponse] = + LedgerClient + .stubWithTracing(service, token) + .listVettedPackages(request) } diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/state/StateServiceClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/state/StateServiceClient.scala index d9249c7908..d54816eadf 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/state/StateServiceClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/state/StateServiceClient.scala @@ -16,7 +16,7 @@ import com.daml.ledger.api.v2.state_service.{ GetLedgerEndRequest, GetLedgerEndResponse, } -import com.daml.ledger.api.v2.transaction_filter.{EventFormat, TransactionFilter} +import com.daml.ledger.api.v2.transaction_filter.EventFormat import com.digitalasset.canton.ledger.client.LedgerClient import com.digitalasset.canton.tracing.TraceContext import org.apache.pekko.NotUsed @@ -33,29 +33,6 @@ class StateServiceClient( esf: ExecutionSequencerFactory, ) { - /** Returns a stream of GetActiveContractsResponse messages. */ - // TODO(#23504) remove when TransactionFilter is removed - @deprecated( - "Use getActiveContractsSource with EventFormat instead", - "3.4.0", - ) - def getActiveContractsSource( - filter: TransactionFilter, - validAtOffset: Long, - verbose: Boolean = false, - token: Option[String] = None, - )(implicit traceContext: TraceContext): Source[GetActiveContractsResponse, NotUsed] = - ClientAdapter - .serverStreaming( - GetActiveContractsRequest( - filter = Some(filter), - verbose = verbose, - activeAtOffset = validAtOffset, - eventFormat = None, - ), - LedgerClient.stubWithTracing(service, token.orElse(getDefaultToken())).getActiveContracts, - ) - /** Returns a stream of GetActiveContractsResponse messages. */ def getActiveContractsSource( eventFormat: EventFormat, @@ -65,38 +42,12 @@ class StateServiceClient( ClientAdapter .serverStreaming( GetActiveContractsRequest( - filter = None, - verbose = false, activeAtOffset = validAtOffset, eventFormat = Some(eventFormat), ), LedgerClient.stubWithTracing(service, token.orElse(getDefaultToken())).getActiveContracts, ) - /** Returns the resulting active contract set */ - // TODO(#23504) remove when TransactionFilter is removed - @deprecated( - "Use getActiveContracts with EventFormat instead", - "3.4.0", - ) - def getActiveContracts( - filter: TransactionFilter, - validAtOffset: Long, - verbose: Boolean, - token: Option[String], - )(implicit - materializer: Materializer, - traceContext: TraceContext, - ): Future[Seq[ActiveContract]] = - for { - contracts <- getActiveContractsSource(filter, validAtOffset, verbose, token).runWith(Sink.seq) - active = contracts - .map(_.contractEntry) - .collect { case ContractEntry.ActiveContract(value) => - value - } - } yield active - /** Returns the resulting active contract set */ def getActiveContracts( eventFormat: EventFormat, diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/updates/UpdateServiceClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/updates/UpdateServiceClient.scala index d8fe26765a..0078adc199 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/updates/UpdateServiceClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/updates/UpdateServiceClient.scala @@ -6,12 +6,7 @@ package com.digitalasset.canton.ledger.client.services.updates import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.pekko.ClientAdapter import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA -import com.daml.ledger.api.v2.transaction_filter.{ - EventFormat, - TransactionFilter, - TransactionFormat, - UpdateFormat, -} +import com.daml.ledger.api.v2.transaction_filter.{EventFormat, TransactionFormat, UpdateFormat} import com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.UpdateServiceStub import com.daml.ledger.api.v2.update_service.{GetUpdatesRequest, GetUpdatesResponse} import com.digitalasset.canton.ledger.client.LedgerClient @@ -25,30 +20,6 @@ class UpdateServiceClient( )(implicit esf: ExecutionSequencerFactory ) { - // TODO(#23504) remove when TransactionFilter is removed - @deprecated( - "Use getUpdatesSource with EventFormat instead", - "3.4.0", - ) - def getUpdatesSource( - begin: Long, - filter: TransactionFilter, - verbose: Boolean, - end: Option[Long], - token: Option[String], - )(implicit traceContext: TraceContext): Source[GetUpdatesResponse, NotUsed] = - ClientAdapter - .serverStreaming( - GetUpdatesRequest( - beginExclusive = begin, - endInclusive = end, - filter = Some(filter), - verbose = verbose, - updateFormat = None, - ), - LedgerClient.stubWithTracing(service, token.orElse(getDefaultToken())).getUpdates, - ) - def getUpdatesSource( begin: Long, eventFormat: EventFormat, @@ -60,8 +31,6 @@ class UpdateServiceClient( GetUpdatesRequest( beginExclusive = begin, endInclusive = end, - filter = None, - verbose = false, updateFormat = Some( UpdateFormat( includeTransactions = Some( diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/version/VersionClient.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/version/VersionClient.scala index 09ce10108b..b5c1eb008f 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/version/VersionClient.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/client/services/version/VersionClient.scala @@ -39,7 +39,7 @@ object VersionClient { featuresDescriptor match { // Note that we do not expose experimental features here, as they are used for internal testing only // and do not have backwards compatibility guarantees. (They should probably be named 'internalFeatures' ;-) - case FeaturesDescriptor(userManagement, _, _, _) => + case FeaturesDescriptor(userManagement, _, _, _, _) => userManagement.toList map (_ => Feature.UserManagement) } } diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala index 23d3ec8e06..6009e8eef8 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala @@ -52,9 +52,10 @@ object CommonErrors extends CommonErrorGroup { id = "REQUEST_ALREADY_IN_FLIGHT", ErrorCategory.ContentionOnSharedResources, ) { - final case class Reject(requestId: String)(implicit errorLogger: ErrorLoggingContext) - extends DamlErrorWithDefiniteAnswer( - cause = s"The request $requestId is already in flight" + final case class Reject(requestId: String, details: String)(implicit + errorLogger: ErrorLoggingContext + ) extends DamlErrorWithDefiniteAnswer( + cause = s"Request with ID $requestId is already in flight: $details" ) } diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala index 3f3c8fde07..f56f3ccce8 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala @@ -21,7 +21,11 @@ import com.digitalasset.daml.lf.data.Ref.{Identifier, PackageId} import com.digitalasset.daml.lf.engine.Error as LfError import com.digitalasset.daml.lf.interpretation.Error as LfInterpretationError import com.digitalasset.daml.lf.language.{Ast, LanguageVersion, Reference} -import com.digitalasset.daml.lf.transaction.{GlobalKey, TransactionVersion} +import com.digitalasset.daml.lf.transaction.{ + GlobalKey, + GlobalKeyWithMaintainers, + SerializationVersion, +} import com.digitalasset.daml.lf.value.Value.ContractId import com.digitalasset.daml.lf.value.{Value, ValueCoder} import com.digitalasset.daml.lf.{VersionRange, language} @@ -36,22 +40,25 @@ import scala.concurrent.duration.DurationInt object CommandExecutionErrors extends CommandExecutionErrorGroup { def encodeValue(v: Value): Either[ValueCoder.EncodeError, String] = ValueCoder - .encodeValue(valueVersion = TransactionVersion.VDev, v0 = v) + .encodeValue(valueVersion = SerializationVersion.VDev, v0 = v) .map(bs => BaseEncoding.base64().encode(bs.toByteArray)) - def withEncodedValue( - v: Value - )( - f: String => Seq[(ErrorResource, String)] - )(implicit loggingContext: ErrorLoggingContext): Seq[(ErrorResource, String)] = + def tryEncodeValue(v: Value)(implicit loggingContext: ErrorLoggingContext): Option[String] = encodeValue(v).fold( { case ValueCoder.EncodeError(msg) => loggingContext.error(msg) - Seq.empty + None }, - f, + Some(_), ) + def withEncodedValue( + v: Value + )( + f: String => Seq[(ErrorResource, String)] + )(implicit loggingContext: ErrorLoggingContext): Seq[(ErrorResource, String)] = + tryEncodeValue(v).fold(Seq.empty[(ErrorResource, String)])(f) + def encodeParties(parties: Set[Ref.Party]): Seq[(ErrorResource, String)] = Seq((ErrorResource.Parties, parties.mkString(","))) @@ -838,76 +845,32 @@ object CommandExecutionErrors extends CommandExecutionErrorGroup { ) { override def resources: Seq[(ErrorResource, String)] = { - val optKeyResources = err.keyOpt.fold(Seq.empty[(ErrorResource, String)])(key => - withEncodedValue(key.globalKey.key) { encodedKey => - Seq( - (ErrorResource.ContractKey, encodedKey), - (ErrorResource.PackageName, key.globalKey.packageName), - ) ++ encodeParties(key.maintainers) - } - ) + def optKeyResources( + keyOpt: Option[GlobalKeyWithMaintainers] + ): Seq[(ErrorResource, String)] = + Seq( + ( + ErrorResource.ContractKey.nullable, + keyOpt.flatMap(key => tryEncodeValue(key.globalKey.key)).getOrElse("NULL"), + ), + ( + ErrorResource.PackageName.nullable, + keyOpt.map(_.globalKey.packageName).getOrElse("NULL"), + ), + ( + ErrorResource.Parties.nullable, + keyOpt.map(_.maintainers.mkString(",")).getOrElse("NULL"), + ), + ) Seq( (ErrorResource.ContractId, err.coid.coid), (ErrorResource.TemplateId, err.srcTemplateId.toString), (ErrorResource.TemplateId, err.dstTemplateId.toString), - ) ++ encodeParties(err.signatories) ++ encodeParties(err.observers) ++ optKeyResources + ) ++ encodeParties(err.originalSignatories) ++ encodeParties(err.originalObservers) ++ optKeyResources(err.originalKeyOpt) ++ encodeParties(err.recomputedSignatories) ++ encodeParties(err.recomputedObservers) ++ optKeyResources(err.recomputedKeyOpt) } } } - - @Explanation( - "An optional contract field with a value of Some may not be dropped during downgrading" - ) - @Resolution( - "There is data that is newer than the implementation using it, and thus is not compatible. Ensure new data (i.e. those with additional fields as `Some`) is only used with new/compatible choices" - ) - object DowngradeDropDefinedField - extends ErrorCode( - id = "INTERPRETATION_UPGRADE_ERROR_DOWNGRADE_DROP_DEFINED_FIELD", - ErrorCategory.InvalidGivenCurrentSystemStateOther, - ) { - final case class Reject( - override val cause: String, - err: LfInterpretationError.Upgrade.DowngradeDropDefinedField, - )(implicit - loggingContext: ErrorLoggingContext - ) extends DamlErrorWithDefiniteAnswer( - cause = cause - ) { - override def resources: Seq[(ErrorResource, String)] = - Seq( - (ErrorResource.ExpectedType, err.expectedType.pretty), - (ErrorResource.FieldIndex, err.fieldIndex.toString), - ) - } - } - - @Explanation( - "An optional contract field with a value of Some may not be dropped during downgrading" - ) - @Resolution( - "There is data that is newer than the implementation using it, and thus is not compatible. Ensure new data (i.e. those with additional fields as `Some`) is only used with new/compatible choices" - ) - object DowngradeFailed - extends ErrorCode( - id = "INTERPRETATION_UPGRADE_ERROR_DOWNGRADE_FAILED", - ErrorCategory.InvalidGivenCurrentSystemStateOther, - ) { - final case class Reject( - override val cause: String, - err: LfInterpretationError.Upgrade.DowngradeFailed, - )(implicit - loggingContext: ErrorLoggingContext - ) extends DamlErrorWithDefiniteAnswer( - cause = cause - ) { - override def resources: Seq[(ErrorResource, String)] = - Seq( - (ErrorResource.ExpectedType, err.expectedType.pretty) - ) - } - } } @Explanation("Errors that occur when using cyptography primitives") diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala index 28152d4ca2..9d6bbd5a7d 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala @@ -59,36 +59,6 @@ object RequestValidationErrors extends RequestValidationErrorGroup { ) } - // TODO(#23504) remove the error when it is no longer used - @Explanation( - "The transaction does not exist or the requesting set of parties are not authorized to fetch it." - ) - @Resolution( - "Check the transaction id or offset and verify that the requested transaction is visible to the requesting parties." - ) - object Transaction - extends ErrorCode( - id = "TRANSACTION_NOT_FOUND", - ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, - ) { - - final case class RejectWithTxId(transactionId: String)(implicit - loggingContext: ErrorLoggingContext - ) extends DamlErrorWithDefiniteAnswer(cause = "Transaction not found, or not visible.") { - override def resources: Seq[(ErrorResource, String)] = Seq( - (ErrorResource.TransactionId, transactionId) - ) - } - - final case class RejectWithOffset(offset: Long)(implicit - loggingContext: ErrorLoggingContext - ) extends DamlErrorWithDefiniteAnswer(cause = "Transaction not found, or not visible.") { - override def resources: Seq[(ErrorResource, String)] = Seq( - (ErrorResource.Offset, offset.toString) - ) - } - } - @Explanation( "The update does not exist or the update format specified filters it out." ) diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/UserManagementServiceErrors.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/UserManagementServiceErrors.scala index e57974d9de..34429e3c7d 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/UserManagementServiceErrors.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/UserManagementServiceErrors.scala @@ -87,7 +87,7 @@ object UserManagementServiceErrors extends UserManagementServiceErrorGroup { } } - @Explanation("The user referred to by the request was not found.") + @Explanation("The user / idp combination referred to by the request was not found.") @Resolution( "Check that you are connecting to the right participant node and the user-id is spelled correctly, if yes, create the user." ) diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala index c194f0223a..0984f0b9ad 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala @@ -77,8 +77,10 @@ trait UpdateStreamsDbMetrics { val fetchEventConsumingIdsStakeholder: DatabaseMetrics = createDbMetrics( "fetch_event_consuming_ids_stakeholder" ) - val fetchEventCreatePayloads: DatabaseMetrics = createDbMetrics("fetch_event_create_payloads") - val fetchEventConsumingPayloads: DatabaseMetrics = createDbMetrics( + val fetchEventCreatePayloadsLegacy: DatabaseMetrics = createDbMetrics( + "fetch_event_create_payloads" + ) + val fetchEventConsumingPayloadsLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_consuming_payloads" ) @@ -91,19 +93,19 @@ trait UpdateStreamsDbMetrics { // Private constructor to avoid being instantiated multiple times by accident final class UpdatesLedgerEffectsStreamMetrics private[UpdateStreamsDbMetrics] { - val fetchEventCreateIdsStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventCreateIdsStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_create_ids_stakeholder" ) - val fetchEventCreateIdsNonStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventCreateIdsNonStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_create_ids_non_stakeholder" ) - val fetchEventConsumingIdsStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventConsumingIdsStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_consuming_ids_stakeholder" ) - val fetchEventConsumingIdsNonStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventConsumingIdsNonStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_consuming_ids_non_stakeholder" ) - val fetchEventNonConsumingIds: DatabaseMetrics = createDbMetrics( + val fetchEventNonConsumingIdsLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_non_consuming_ids_informee" ) val fetchEventCreatePayloads: DatabaseMetrics = createDbMetrics("fetch_event_create_payloads") @@ -125,14 +127,16 @@ trait UpdateStreamsDbMetrics { // Private constructor to avoid being instantiated multiple times by accident final class ReassignmentStreamMetrics private[UpdateStreamsDbMetrics] { - val fetchEventAssignIdsStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventAssignIdsStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_assign_ids_stakeholder" ) - val fetchEventUnassignIdsStakeholder: DatabaseMetrics = createDbMetrics( + val fetchEventUnassignIdsStakeholderLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_unassign_ids_stakeholder" ) - val fetchEventAssignPayloads: DatabaseMetrics = createDbMetrics("fetch_event_assign_payloads") - val fetchEventUnassignPayloads: DatabaseMetrics = createDbMetrics( + val fetchEventAssignPayloadsLegacy: DatabaseMetrics = createDbMetrics( + "fetch_event_assign_payloads" + ) + val fetchEventUnassignPayloadsLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_unassign_payloads" ) @@ -166,8 +170,10 @@ trait UpdatePointwiseDbMetrics { // Private constructor to avoid being instantiated multiple times by accident final class UpdatesAcsDeltaPointwiseMetrics private[UpdatePointwiseDbMetrics] { - val fetchEventCreatePayloads: DatabaseMetrics = createDbMetrics("fetch_event_create_payloads") - val fetchEventConsumingPayloads: DatabaseMetrics = createDbMetrics( + val fetchEventCreatePayloadsLegacy: DatabaseMetrics = createDbMetrics( + "fetch_event_create_payloads" + ) + val fetchEventConsumingPayloadsLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_consuming_payloads" ) @@ -199,8 +205,10 @@ trait UpdatePointwiseDbMetrics { // Private constructor to avoid being instantiated multiple times by accident final class ReassignmentPointwiseMetrics private[UpdatePointwiseDbMetrics] { - val fetchEventAssignPayloads: DatabaseMetrics = createDbMetrics("fetch_event_assign_payloads") - val fetchEventUnassignPayloads: DatabaseMetrics = createDbMetrics( + val fetchEventAssignPayloadsLegacy: DatabaseMetrics = createDbMetrics( + "fetch_event_assign_payloads" + ) + val fetchEventUnassignPayloadsLegacy: DatabaseMetrics = createDbMetrics( "fetch_event_unassign_payloads" ) @@ -440,6 +448,9 @@ class MainIndexDBMetrics( val lookupContractByKeyDbMetrics: DatabaseMetrics = createDbMetrics( "lookup_contract_by_key" ) + val lookupLastActivationsDbMetrics: DatabaseMetrics = createDbMetrics( + "lookup_last_activations" + ) val lookupPointwiseUpdateFetchEventIds: DatabaseMetrics = createDbMetrics( "fetch_event_ids" @@ -450,16 +461,31 @@ class MainIndexDBMetrics( ) val getEventsByContractId: DatabaseMetrics = createDbMetrics("get_events_by_contract_id") val getActiveContracts: DatabaseMetrics = createDbMetrics("get_active_contracts") - val getActiveContractIdsForCreated: DatabaseMetrics = createDbMetrics( - "get_active_contract_ids_for_created" + val getActiveContractIdRanges: DatabaseMetrics = createDbMetrics( + "get_active_contract_id_ranges" + ) + val getActiveContractIdRangesForCreatedLegacy: DatabaseMetrics = createDbMetrics( + "get_active_contract_id_ranges_for_created" + ) + val getFilteredActiveContractIds: DatabaseMetrics = createDbMetrics( + "get_filtered_active_contract_ids" + ) + val getFilteredActiveContractIdsForCreatedLegacy: DatabaseMetrics = createDbMetrics( + "get_filtered_active_contract_ids_for_created" + ) + val getActiveContractIdRangesForAssignedLegacy: DatabaseMetrics = createDbMetrics( + "get_active_contract_id_ranges_for_assigned" + ) + val getFilteredActiveContractIdsForAssignedLegacy: DatabaseMetrics = createDbMetrics( + "get_filtered_active_contract_ids_for_assigned" ) - val getActiveContractIdsForAssigned: DatabaseMetrics = createDbMetrics( - "get_active_contract_ids_for_assigned" + val getActiveContractBatch: DatabaseMetrics = createDbMetrics( + "get_active_contract_batch" ) - val getActiveContractBatchForCreated: DatabaseMetrics = createDbMetrics( + val getActiveContractBatchForCreatedLegacy: DatabaseMetrics = createDbMetrics( "get_active_contract_batch_for_created" ) - val getActiveContractBatchForAssigned: DatabaseMetrics = createDbMetrics( + val getActiveContractBatchForAssignedLegacy: DatabaseMetrics = createDbMetrics( "get_active_contract_batch_for_assigned" ) val getEventSeqIdRange: DatabaseMetrics = createDbMetrics("get_event_sequential_id_range") @@ -475,10 +501,10 @@ class MainIndexDBMetrics( val getUnassingIdsForOffsets: DatabaseMetrics = createDbMetrics( "get_unassign_ids_for_offsets" ) - val getCreateIdsForContractIds: DatabaseMetrics = createDbMetrics( + val getCreateIdsForContractIdsLegacy: DatabaseMetrics = createDbMetrics( "get_create_ids_for_contract_ids" ) - val getAssignIdsForContractIds: DatabaseMetrics = createDbMetrics( + val getAssignIdsForContractIdsLegacy: DatabaseMetrics = createDbMetrics( "get_assign_ids_for_contract_ids" ) @@ -499,7 +525,7 @@ class MainIndexDBMetrics( "last_synchronizer_offset_before_or_at_record_time" ) - val archivals: DatabaseMetrics = createDbMetrics("archivals") + val archivalsLegacy: DatabaseMetrics = createDbMetrics("archivals") object translation { val getLfPackage: Timer = openTelemetryMetricsFactory.timer(inventory.getLfPackage.info) diff --git a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala index bcc453ef3c..dab3a08909 100644 --- a/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala +++ b/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala @@ -82,15 +82,6 @@ private[metrics] final class ServicesHistograms(val prefix: MetricName)(implicit private[metrics] val transactionTrees: Item = extend("transaction_trees", baseInfo) private[metrics] val getUpdateByOffset: Item = extend("get_update_by_offset", baseInfo) private[metrics] val getUpdateById: Item = extend("get_update_by_id", baseInfo) - // TODO(#23504) remove when corresponding grpc method has been removed - private[metrics] val getTransactionById: Item = extend("get_transaction_by_id", baseInfo) - // TODO(#23504) remove when corresponding grpc method has been removed - private[metrics] val getTransactionTreeById: Item = extend("get_transaction_tree_by_id", baseInfo) - // TODO(#23504) remove when corresponding grpc method has been removed - private[metrics] val getTransactionByOffset: Item = extend("get_transaction_by_offset", baseInfo) - // TODO(#23504) remove when corresponding grpc method has been removed - private[metrics] val getTransactionTreeByOffset: Item = - extend("get_transaction_tree_by_offset", baseInfo) private[metrics] val getActiveContracts: Item = extend("get_active_contracts", baseInfo) private[metrics] val lookupActiveContract: Item = extend("lookup_active_contract", baseInfo) private[metrics] val lookupContractState: Item = extend("lookup_contract_state", baseInfo) @@ -161,6 +152,7 @@ private[metrics] final class ServicesHistograms(val prefix: MetricName)(implicit private[metrics] val readListLfPackages: Item = extend("list_lf_packages", readBaseInfo) private[metrics] val readGetLfArchive: Item = extend("get_lf_archive", readBaseInfo) private[metrics] val readValidateDar: Item = extend("validate_dar", readBaseInfo) + private[metrics] val readListVettedPackages: Item = extend("list_vetted_packages", readBaseInfo) private[metrics] val writeBaseInfo = MetricInfo( indexPrefix :+ "write", @@ -182,6 +174,9 @@ private[metrics] final class ServicesHistograms(val prefix: MetricName)(implicit private[metrics] val writePrune: Item = extend("prune", writeBaseInfo) + private[metrics] val writeUpdateVettedPackages: Item = + extend("update_vetted_packages", writeBaseInfo) + } // Private constructor to avoid being instantiated multiple times by accident @@ -205,18 +200,6 @@ final class ServicesMetrics private[metrics] ( val getCompletions: Timer = openTelemetryMetricsFactory.timer(inventory.getCompletions.info) val transactions: Timer = openTelemetryMetricsFactory.timer(inventory.transactions.info) val transactionTrees: Timer = openTelemetryMetricsFactory.timer(inventory.transactionTrees.info) - // TODO(#23504) remove when corresponding grpc method has been removed - val getTransactionById: Timer = - openTelemetryMetricsFactory.timer(inventory.getTransactionById.info) - // TODO(#23504) remove when corresponding grpc method has been removed - val getTransactionTreeById: Timer = - openTelemetryMetricsFactory.timer(inventory.getTransactionTreeById.info) - // TODO(#23504) remove when corresponding grpc method has been removed - val getTransactionByOffset: Timer = - openTelemetryMetricsFactory.timer(inventory.getTransactionByOffset.info) - // TODO(#23504) remove when corresponding grpc method has been removed - val getTransactionTreeByOffset: Timer = - openTelemetryMetricsFactory.timer(inventory.getTransactionTreeByOffset.info) val getUpdateByOffset: Timer = openTelemetryMetricsFactory.timer(inventory.getUpdateByOffset.info) val getUpdateById: Timer = @@ -331,6 +314,8 @@ final class ServicesMetrics private[metrics] ( val listLfPackages: Timer = openTelemetryMetricsFactory.timer(inventory.readListLfPackages.info) val getLfArchive: Timer = openTelemetryMetricsFactory.timer(inventory.readGetLfArchive.info) val validateDar: Timer = openTelemetryMetricsFactory.timer(inventory.readValidateDar.info) + val listVettedPackages: Timer = + openTelemetryMetricsFactory.timer(inventory.readListVettedPackages.info) } val read: ReadMetrics = new ReadMetrics @@ -360,6 +345,9 @@ final class ServicesMetrics private[metrics] ( val allocateParty: Timer = openTelemetryMetricsFactory.timer(inventory.writeAllocateParty.info) val prune: Timer = openTelemetryMetricsFactory.timer(inventory.writePrune.info) + + val updateVettedPackages: Timer = + openTelemetryMetricsFactory.timer(inventory.writeUpdateVettedPackages.info) } val write: WriteMetrics = new WriteMetrics diff --git a/canton/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOpsSpec.scala b/canton/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOpsSpec.scala deleted file mode 100644 index 01d0fe60b2..0000000000 --- a/canton/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/util/TransactionTreeOpsSpec.scala +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api.util - -import com.daml.ledger.api.v2.transaction.TransactionTree -import com.daml.ledger.api.v2.transaction.TreeEvent.Kind -import com.daml.ledger.javaapi.data.Generators.* -import com.daml.ledger.javaapi.data.{ - TransactionTree as TransactionTreeJava, - TransactionTreeUtils, - TreeEvent, -} -import org.scalatest.OptionValues -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import scala.annotation.nowarn -import scala.jdk.CollectionConverters.* - -//TODO(#23504) remove when TransactionTree is removed -@nowarn("cat=deprecation") -class TransactionTreeOpsSpec - extends AnyFlatSpec - with Matchers - with OptionValues - with ScalaCheckDrivenPropertyChecks { - - "rootNodeIds and childNodeIds" should "find the root and children node ids of the transaction tree" in forAll( - transactionTreeGenWithIdsInPreOrder - ) { transactionTreeOuter => - import TransactionTreeOps.* - - val transactionTree = TransactionTree.fromJavaProto(transactionTreeOuter) - - case class WrappedEvent(nodeId: Int, children: List[WrappedEvent]) { - def descendants(): Seq[WrappedEvent] = - Seq(this) ++ children ++ children.flatMap(_.descendants()) - } - - val wrappedTree: Seq[WrappedEvent] = TransactionTreeUtils - .buildTree( - TransactionTreeJava.fromProto(transactionTreeOuter), - (treeEvent: TreeEvent, children: java.util.List[WrappedEvent]) => - WrappedEvent(treeEvent.getNodeId, children.asScala.toList), - ) - .asScala - .toSeq - - transactionTree.rootNodeIds() shouldBe wrappedTree.map(_.nodeId) - - val wrappedEventsById = - wrappedTree.flatMap(_.descendants()).map(event => event.nodeId -> event).toMap - - val events = transactionTree.eventsById.values - - val exercisedEvents = events.collect(e => - e.kind match { - case Kind.Exercised(exercised) => exercised - } - ) - - exercisedEvents.foreach { event => - transactionTree - .childNodeIds(event) shouldBe wrappedEventsById - .get(event.nodeId) - .value - .children - .map(_.nodeId) - } - - } -} diff --git a/canton/community/ledger/ledger-json-api/CONTRIBUTING.md b/canton/community/ledger/ledger-json-api/CONTRIBUTING.md new file mode 100644 index 0000000000..49957ea663 --- /dev/null +++ b/canton/community/ledger/ledger-json-api/CONTRIBUTING.md @@ -0,0 +1,151 @@ +# Hints for developers extending Ledger JSON API +## also known as Json v2 + + +## Adding new endpoint to existing service +For example: service `JsUserManagementService` - endpoint - list user rights + +1. First you need to have a corresponding grpc service working. +In the example: +`UserManagementServiceGrpc#listUserRights(request: com.daml.ledger.api.v2.admin.user_management_service.ListUserRightsRequest): scala.concurrent.Future[com.daml.ledger.api.v2.admin.user_management_service.ListUserRightsResponse]` +2. Decide about url and params for the endpoint. +In the example: `GET /v2/users//rights` + +Naming conventions: + - prefer `kebab-case` for paths and query parameters, + - use `camelCase` for json fields (to match generated grpc stub classes), + - while creating mirror classes (below0 try to keep names as close to grpc as possible (avoid prefixes such as Js). + + + +3. Encode this Endpoint as Tapir Endpoint inside service object. +```scala + val listUserRightsEndpoint = + users.get + .in(path[String](userIdPath)) + .in("rights") + .out(jsonBody[user_management_service.ListUserRightsResponse]) + .description("List user rights.") +``` + +Notice that there is no business logic added yet (only endpoint definition). + +4. Compile and add Codecs + +When you try to compile you will probably get errors about missing Circe codecs for: + +Typical solution is to add Codec inside Codecs object for service: + +in example: `JsUserManagementCodecs` +```scala + implicit val listUserRightsResponseRW: Codec[user_management_service.ListUserRightsResponse] = + deriveRelaxedCodec +``` + +Sometimes you need to add codecs for nested types as well. + +5. Add endpoint to the list of documented endpoints: + +```scala +override def documentation: Seq[AnyEndpoint] = List( + ... + revokeUserRightsEndpoint, ++ listUserRightsEndpoint, + updateUserIdentityProviderEndpoint, + ) +} +``` + +6. Add logic to the endpoint. + +Go to the `endpoints` method in service and add logic to the endpoint. +Typically you first add a method: +```scala +private def listUserRights( + callerContext: CallerContext + ): TracedInput[String] => Future[ + Either[JsCantonError, user_management_service.ListUserRightsResponse] + ] = req => + UserId.fromString(req.in) match { + case Right(userId) => + userManagementClient + .serviceStub(callerContext.token())(req.traceContext) + .listUserRights( + new user_management_service.ListUserRightsRequest( + userId = userId, + identityProviderId = "", + ) + ) + .resultToRight + case Left(error) => malformedUserId(error)(req.traceContext) + } +``` + +Then you actually add the endpoint to the list of handled endpoints: +```scala +withServerLogic( + JsUserManagementService.listUserRightsEndpoint, + listUserRights, +), +``` + +7. If the Json structure returned or consumed by the endpoint contains DAML Record it needs to be specially handled by so called `transcode`. + You also need to add "mirror" class for existing grpc stub, where DAML Record is replaced by `Json` type. + + See ` JsCommand.CreateCommand` for example. +```scala +object JsCommand { + sealed trait Command + final case class CreateCommand( + templateId: Identifier, + createArguments: Json, + ) extends Command +``` +And finally you need to write mapping between grpc stub and json api class. +Put your mapping method inside `ProtocolConverters` object. +See `ProtocolConverters.Command` for example. + +If DAML record is nested you need to create mirror class for the outer classes as well (this is sometimes quite tedious). + + +## Writing a new service + +1. Write `JsMYSERVICE` object - define base endpoint: +```scala + private val myservice = v2Endpoint.in(sttp.tapir.stringToPath("myservice")) +``` +2. Write `JsMYSERVICECodecs` object - define codecs for types used in service. + +3. Write `JsMYSERVICE` class - implement service logic and endpoints method. + +Copy of existing small service like `JsVersionService` is a good starting point. + +## Final steps / testing + +1. Typically you need to regenerate OpenAPI documentation. +2. Run object `GenerateJSONApiDocs` or use sbt task `sbt packageJsonApiDocsArtifacts`. +3. Check that the `openapi.yml` file is updated - analyze if the changes look correct +4. There is `OpenapiTypesTest` which checks that openapi generated classes (java) match the actual json api classes (scala). +Usually you need to add missing mapping (for newly used types). +5. If you added a new ProtocolConverter - add a test for it in `ProtocolConvertersTest`. +6. Best way to test endpoint is to use our IT tests framework - add a test for your new endpoint such as in `PartyManagementServiceIT` - tests will run both using grpc and json/http + +## Design decisions - explanations + +1. Ledger JSON Api is designed to be a mirror of grpc - but we do not automate / generate "urls" from grpc api - +in order to keep API clean and understandable. + +2. We use Tapir to define endpoints - which gives us a lot of flexibility and power. + +3. We use Circe - but with semi automated codec generation - this means it is necessary to add codecs for new types +but this saves us from random changes in the circe encoding of more complex types. + +4. We have some custom Encoders for circe like `deriveRelaxedCodec` ,`stringEncoderForEnum`, +check code for uses. + +5. In tapir version we use (for Scala2) Circe encoding and actual documentation schema (for OpenAPI) is generated +independently - which means sometimes it diverges (documentation does not match actually expected types). +Such cases should be detected automatically in tests. You might need to use custom circe encoder and or Schema wrapper. +See code for examples. Usually problematic are ADTs and Enums. + +6. We write mirror classes and use transcode for inputs/output with Daml records to keep jsons "readable". diff --git a/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml b/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml index 41dd60aa35..861d3ab6d6 100644 --- a/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml +++ b/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml @@ -20,6 +20,38 @@ messages: This field will be the reassignment_counter of the latest observable activation event on this synchronizer, which is before the active_at_offset. Required + AllocateExternalPartyRequest: + message: + comments: 'Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(identity_provider_id)``' + fieldComments: + synchronizer: |- + TODO(#27670) support synchronizer aliases + Synchronizer ID on which to onboard the party + Required + onboarding_transactions: |- + TopologyTransactions to onboard the external party + Can contain: + - A namespace for the party. + This can be either a single NamespaceDelegation, + or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToKeyMapping to register the party's signing keys. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToParticipant to register the hosting relationship of the party. + Must be provided. + Required + multi_hash_signatures: |- + Optional signatures of the combined hash of all onboarding_transactions + This may be used instead of providing signatures on each individual transaction + identity_provider_id: |- + The id of the ``Identity Provider`` + If not set, assume the party is managed by the default identity provider. + Optional + AllocateExternalPartyResponse: + message: + comments: null + fieldComments: + party_id: '' AllocatePartyRequest: message: comments: 'Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(identity_provider_id)``' @@ -513,6 +545,14 @@ messages: Required, it is a valid absolute offset (positive integer) package_name: |- The package name of the created contract. + Required + representative_package_id: |- + A package-id present in the participant package store that typechecks the contract's argument. + This may differ from the package-id of the template used to create the contract. + For contracts created before Canton 3.4, this field matches the contract's creation package-id. + + NOTE: Experimental, server internal concept, not for client consumption. Subject to change without notice. + Required acs_delta: |- Whether this event would be part of respective ACS_DELTA shaped stream, @@ -569,7 +609,7 @@ messages: fieldComments: version: |- [docs-entry-end: DamlTransaction.Node] - Transaction version, will be >= max(nodes version) + serialization version, will be >= max(nodes version) roots: Root nodes of the transaction nodes: List of nodes in the transaction node_seeds: Node seeds are values associated with certain nodes used for generating @@ -609,10 +649,13 @@ messages: The template id of the contract. The identifier uses the package-id reference format. - Required + If provided, used to validate the template id of the contract serialized in the created_event_blob. + Optional contract_id: |- The contract id - Required + + If provided, used to validate the contract id of the contract serialized in the created_event_blob. + Optional created_event_blob: |- Opaque byte string containing the complete payload required by the Daml engine to reconstruct a contract not known to the receiving participant. @@ -990,16 +1033,20 @@ messages: for ledger implementation testing purposes only. Daml applications SHOULD not depend on these in production. - user_management: |- - If set, then the Ledger API server supports user management. - It is recommended that clients query this field to gracefully adjust their behavior for - ledgers that do not support user management. + package_feature: |- + If set, then the Ledger API server supports package listing + configurability. It is recommended that clients query this field to + gracefully adjust their behavior to maximum package listing page size. + offset_checkpoint: It contains the timeouts related to the periodic offset + checkpoint emission party_management: |- If set, then the Ledger API server supports party management configurability. It is recommended that clients query this field to gracefully adjust their behavior to maximum party page size. - offset_checkpoint: It contains the timeouts related to the periodic offset - checkpoint emission + user_management: |- + If set, then the Ledger API server supports user management. + It is recommended that clients query this field to gracefully adjust their behavior for + ledgers that do not support user management. Fetch: message: comments: Fetch node @@ -1025,6 +1072,38 @@ messages: A wildcard filter SHOULD NOT be defined more than once in the accumulative field. Optional, if no ``CumulativeFilter`` defined, the default of a single ``WildcardFilter`` with include_created_event_blob unset is used. + GenerateExternalPartyTopologyRequest: + message: + comments: null + fieldComments: + synchronizer: |- + TODO(#27670) support synchronizer aliases + Required: synchronizer-id for which we are building this request. + other_confirming_participant_uids: 'Optional: other participant ids which + should be confirming for this party' + public_key: 'Required: public key' + observing_participant_uids: 'Optional: other observing participant ids for + this party' + local_participant_observation_only: 'Optional: if true, then the local participant + will only be observing, not confirming. Default false.' + confirmation_threshold: 'Optional: Confirmation threshold >= 1 for the party. + Defaults to all available confirmers (or if set to 0).' + party_hint: 'Required: the actual party id will be constructed from this hint + and a fingerprint of the public key' + GenerateExternalPartyTopologyResponse: + message: + comments: Response message with topology transactions and the multi-hash to + be signed. + fieldComments: + party_id: the generated party id + public_key_fingerprint: the fingerprint of the supplied public key + topology_transactions: |- + The serialized topology transactions which need to be signed and submitted as part of the allocate party process + Note that the serialization includes the versioning information. Therefore, the transaction here is serialized + as an `UntypedVersionedMessage` which in turn contains the serialized `TopologyTransaction` in the version + supported by the synchronizer. + multi_hash: the multi-hash which may be signed instead of each individual + transaction GetActiveContractsRequest: message: comments: |- @@ -1033,15 +1112,6 @@ messages: Note that it is ok to request acs snapshots for party migration with offsets other than ledger end, because party migration is not concerned with incomplete (un)assignments. fieldComments: - filter: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - Templates to include in the served snapshot, per party. - Optional, if specified event_format must be unset, if not specified event_format must be set. - verbose: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - If enabled, values served over the API will contain more information than strictly necessary to interpret the data. - In particular, setting the verbose flag to true triggers the ledger to include labels for record fields. - Optional, if specified event_format must be unset. active_at_offset: |- The offset at which the snapshot of the active contracts will be computed. Must be no greater than the current ledger end offset. @@ -1098,7 +1168,8 @@ messages: party: |- The party of interest Must be a valid PartyIdString (as described in ``value.proto``). - Required + If empty, all synchronizers this node is connected to will be returned + Optional participant_id: |- The id of a participant whose mapping of a party to connected synchronizers is requested. Must be a valid participant-id retrieved through a prior call to getParticipantId. @@ -1334,68 +1405,6 @@ messages: comments: null fieldComments: current_time: The current time according to the ledger server. - GetTransactionByIdRequest: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - update_id: |- - The ID of a particular transaction. - Must be a valid LedgerString (as described in ``value.proto``). - Required - requesting_parties: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - The parties whose events the client expects to see. - Events that are not visible for the parties in this collection will not be present in the response. - Each element must be a valid PartyIdString (as described in ``value.proto``). - Must be set for GetTransactionTreeById request. - Optional for backwards compatibility for GetTransactionById request: if defined transaction_format must be - unset (falling back to defaults). - transaction_format: |- - Must be unset for GetTransactionTreeById request. - Optional for GetTransactionById request for backwards compatibility: defaults to a transaction_format, where: - - - event_format.filters_by_party will have template-wildcard filters for all the requesting_parties - - event_format.filters_for_any_party is unset - - event_format.verbose = true - - transaction_shape = TRANSACTION_SHAPE_ACS_DELTA - GetTransactionByOffsetRequest: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - offset: |- - The offset of the transaction being looked up. - Must be a valid absolute offset (positive integer). - Required - requesting_parties: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - The parties whose events the client expects to see. - Events that are not visible for the parties in this collection will not be present in the response. - Each element must be a valid PartyIdString (as described in ``value.proto``). - Must be set for GetTransactionTreeByOffset request. - Optional for backwards compatibility for GetTransactionByOffset request: if defined transaction_format must be - unset (falling back to defaults). - transaction_format: |- - Must be unset for GetTransactionTreeByOffset request. - Optional for GetTransactionByOffset request for backwards compatibility: defaults to a TransactionFormat, where: - - - event_format.filters_by_party will have template-wildcard filters for all the requesting_parties - - event_format.filters_for_any_party is unset - - event_format.verbose = true - - transaction_shape = TRANSACTION_SHAPE_ACS_DELTA - GetTransactionResponse: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - transaction: Required - GetTransactionTreeResponse: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - transaction: Required GetUpdateByIdRequest: message: comments: null @@ -1425,35 +1434,21 @@ messages: transaction: '' reassignment: '' topology_transaction: '' - GetUpdateTreesResponse: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - transaction_tree: '' - reassignment: '' - offset_checkpoint: '' GetUpdatesRequest: message: comments: null fieldComments: - verbose: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - If enabled, values served over the API will contain more information than strictly necessary to interpret the data. - In particular, setting the verbose flag to true triggers the ledger to include labels, record and variant type ids - for record fields. - Optional for backwards compatibility, if defined update_format must be unset begin_exclusive: |- Beginning of the requested ledger section (non-negative integer). The response will only contain transactions whose offset is strictly greater than this. If zero, the stream will start from the beginning of the ledger. If positive, the streaming will start after this absolute offset. If the ledger has been pruned, this parameter must be specified and be greater than the pruning offset. - filter: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - Requesting parties with template filters. - Template filters must be empty for GetUpdateTrees requests. - Optional for backwards compatibility, if defined update_format must be unset + end_inclusive: |- + End of the requested ledger section. + The response will only contain transactions whose offset is less than or equal to this. + Optional, if empty, the stream will not terminate. + If specified, the stream will terminate after this absolute offset (positive integer) is reached. update_format: |- Must be unset for GetUpdateTrees request. Optional for backwards compatibility for GetUpdates request: defaults to an UpdateFormat where: @@ -1465,11 +1460,6 @@ messages: - include_reassignments.filter = the same filter specified on this request - include_reassignments.verbose = the same flag specified on this request - include_topology_events.include_participant_authorization_events.parties = all the parties specified in filter - end_inclusive: |- - End of the requested ledger section. - The response will only contain transactions whose offset is less than or equal to this. - Optional, if empty, the stream will not terminate. - If specified, the stream will terminate after this absolute offset (positive integer) is reached. GetUpdatesResponse: message: comments: null @@ -1715,12 +1705,67 @@ messages: next_page_token: |- Pagination token to retrieve the next page. Empty, if there are no further results. + ListVettedPackagesRequest: + message: + comments: null + fieldComments: + package_metadata_filter: |- + The package metadata filter the returned vetted packages set must satisfy. + Optional + topology_state_filter: |- + The topology filter the returned vetted packages set must satisfy. + Optional + page_token: |- + Pagination token to determine the specific page to fetch. Using the token + guarantees that ``VettedPackages`` on a subsequent page are all greater + (``VettedPackages`` are sorted by synchronizer ID then participant ID) than + the last ``VettedPackages`` on a previous page. + + The server does not store intermediate results between calls chained by a + series of page tokens. As a consequence, if new vetted packages are being + added and a page is requested twice using the same token, more packages can + be returned on the second call. + + Leave unspecified (i.e. as empty string) to fetch the first page. + + Optional + page_size: |- + Maximum number of ``VettedPackages`` results to return in a single page. + + If the page_size is unspecified (i.e. left as 0), the server will decide + the number of results to be returned. + + If the page_size exceeds the maximum supported by the server, an + error will be returned. + + To obtain the server's maximum consult the PackageService descriptor + available in the VersionService. + + Optional + ListVettedPackagesResponse: + message: + comments: null + fieldComments: + vetted_packages: |- + All ``VettedPackages`` that contain at least one ``VettedPackage`` matching + both a ``PackageMetadataFilter`` and a ``TopologyStateFilter``. + Sorted by synchronizer_id then participant_id. + next_page_token: |- + Pagination token to retrieve the next page. + Empty string if there are no further results. Metadata: message: comments: |- Transaction Metadata Refer to the hashing documentation for information on how it should be hashed. fieldComments: + max_record_time: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer `synchronizer_id`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Unsigned in 3.3 to avoid a breaking protocol change + Will be signed in 3.4+ + Set max_record_time in the PreparedTransactionRequest to get this field set accordingly synchronizer_id: '' preparation_time: '' min_ledger_effective_time: '' @@ -1840,6 +1885,30 @@ messages: known_since: |- Indicates since when the package is known to the backing participant. Required + PackageFeature: + message: + comments: null + fieldComments: + max_vetted_packages_page_size: |- + The maximum number of vetted packages the server can return in a single + response (page) when listing them. + PackageMetadataFilter: + message: + comments: |- + Filter the VettedPackages by package metadata. + + A PackageMetadataFilter without package_ids and without package_name_prefixes + matches any vetted package. + + Non-empty fields specify candidate values of which at least one must match. + If both fields are set, then a candidate is returned if it matches one of the fields. + fieldComments: + package_ids: |- + If this list is non-empty, any vetted package with a package ID in this + list will match the filter. + package_name_prefixes: |- + If this list is non-empty, any vetted package with a name matching at least + one prefix in this list will match the filter. PackagePreference: message: comments: null @@ -1956,9 +2025,19 @@ messages: The change ID can be used for matching the intended ledger changes with all their completions. Must be a valid LedgerString (as described in ``value.proto``). Required + max_record_time: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Use this to limit the time-to-life of a prepared transaction, + which is useful to know when it can definitely not be accepted + anymore and resorting to preparing another transaction for the same + intent is safe again. + Optional synchronizer_id: |- Must be a valid synchronizer id - Required + If not set, a suitable synchronizer that this node is connected to will be chosen + Optional package_id_selection_preference: |- The package-id selection preference of the client for resolving package names and interface instances in command submission and interpretation @@ -2029,6 +2108,14 @@ messages: transaction: Daml Transaction representing the ledger effect if executed. See below metadata: Metadata context necessary to execute the transaction + PriorTopologySerial: + message: + comments: |- + The serial of last ``VettedPackages`` topology transaction on a given + participant and synchronizer. + fieldComments: + prior: Previous transaction's serial. + no_prior: No previous transaction exists. PruneRequest: message: comments: null @@ -2202,6 +2289,13 @@ messages: and needed to verify. signing_algorithm_spec: The signing algorithm specification used to produce this signature + SigningPublicKey: + message: + comments: null + fieldComments: + format: The serialization format of the public key + key_data: Serialized public key in the format specified above + key_spec: The key specification SinglePartySignatures: message: comments: Signatures provided by a single party @@ -2251,15 +2345,6 @@ messages: The transaction that resulted from the submitted command. The transaction might contain no events (request conditions result in filtering out all of them). Required - SubmitAndWaitForTransactionTreeResponse: - message: - comments: Provided for backwards compatibility, it will be removed in the Canton - version 3.4.0. - fieldComments: - transaction: |- - The transaction tree that resulted from the submitted command. - The transaction might contain no events (request conditions result in filtering out all of them). - Required SubmitAndWaitRequest: message: comments: These commands are executed as a single atomic transaction. @@ -2344,6 +2429,28 @@ messages: include_participant_authorization_events: |- Include participant authorization topology events in streams. Optional, if unset no participant authorization topology events are emitted in the stream. + TopologyStateFilter: + message: + comments: |- + Filter the vetted packages by the participant and synchronizer that they are + hosted on. + + Empty fields are ignored, such that a ``TopologyStateFilter`` without + participant_ids and without synchronizer_ids matches a vetted package hosted + on any participant and synchronizer. + + Non-empty fields specify candidate values of which at least one must match. + If both fields are set then at least one candidate value must match from each + field. + fieldComments: + participant_ids: |- + If this list is non-empty, only vetted packages hosted on participants + listed in this field match the filter. + Query the current Ledger API's participant's ID via the public + ``GetParticipantId`` command in ``PartyManagementService``. + synchronizer_ids: |- + If this list is non-empty, only vetted packages from the topology state of + the synchronizers in this list match the filter. TopologyTransaction: message: comments: null @@ -2435,29 +2542,6 @@ messages: The time at which the transaction was recorded. The record time refers to the synchronizer which synchronized the transaction. Required - TransactionFilter: - message: - comments: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - Used both for filtering create and archive events as well as for filtering transaction trees. - fieldComments: - filters_by_party: |- - Each key must be a valid PartyIdString (as described in ``value.proto``). - The interpretation of the filter depends on the transaction-shape being filtered: - - 1. For **transaction trees** (used in GetUpdateTreesResponse for backwards compatibility) all party keys used as - wildcard filters, and all subtrees whose root has one of the listed parties as an informee are returned. - If there are ``CumulativeFilter``s, those will control returned ``CreatedEvent`` fields where applicable, but will - not be used for template/interface filtering. - 2. For **ledger-effects** create and exercise events are returned, for which the witnesses include at least one of - the listed parties and match the per-party filter. - 3. For **transaction and active-contract-set streams** create and archive events are returned for all contracts whose - stakeholders include at least one of the listed parties and match the per-party filter. - - Required - filters_for_any_party: |- - Wildcard filters that apply to all the parties existing on the participant. The interpretation of the filters is the same - with the per-party filter as described above. TransactionFormat: message: comments: |- @@ -2468,68 +2552,6 @@ messages: transaction_shape: |- What transaction shape to use for interpreting the filters of the event format. Required - TransactionTree: - message: - comments: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - Complete view of an on-ledger transaction. - fieldComments: - command_id: |- - The ID of the command which resulted in this transaction. Missing for everyone except the submitting party. - Must be a valid LedgerString (as described in ``value.proto``). - Optional - trace_context: |- - Optional; ledger API trace context - - The trace context transported in this message corresponds to the trace context supplied - by the client application in a HTTP2 header of the original command submission. - We typically use a header to transfer this type of information. Here we use message - body, because it is used in gRPC streams which do not support per message headers. - This field will be populated with the trace context contained in the original submission. - If that was not provided, a unique ledger-api-server generated trace context will be used - instead. - synchronizer_id: |- - A valid synchronizer id. - Identifies the synchronizer that synchronized the transaction. - Required - update_id: |- - Assigned by the server. Useful for correlating logs. - Must be a valid LedgerString (as described in ``value.proto``). - Required - events_by_id: |- - Changes to the ledger that were caused by this transaction. Nodes of the transaction tree. - Each key must be a valid node ID (non-negative integer). - Required - offset: |- - The absolute offset. The details of this field are described in ``community/ledger-api/README.md``. - Required, it is a valid absolute offset (positive integer). - effective_at: |- - Ledger effective time. - Required - workflow_id: |- - The workflow ID used in command submission. Only set if the ``workflow_id`` for the command was set. - Must be a valid LedgerString (as described in ``value.proto``). - Optional - record_time: |- - The time at which the transaction was recorded. The record time refers to the synchronizer - which synchronized the transaction. - Required - TreeEvent: - message: - comments: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. - Each tree event message type below contains a ``witness_parties`` field which - indicates the subset of the requested parties that can see the event - in question. - - Note that transaction trees might contain events with - _no_ witness parties, which were included simply because they were - children of events which have witnesses. - fieldComments: - created: |- - The event as it appeared in the context of its original daml transaction on this participant node. - In particular, the offset, node_id pair of the daml transaction are preserved. - exercised: '' UnassignCommand: message: comments: Unassign a contract @@ -2732,6 +2754,49 @@ messages: comments: null fieldComments: user: Updated user + UpdateVettedPackagesRequest: + message: + comments: null + fieldComments: + changes: |- + Changes to apply to the current vetting state of the participant on the + specified synchronizer. The changes are applied in order. + Any package not changed will keep their previous vetting state. + dry_run: |- + If dry_run is true, then the changes are only prepared, but not applied. If + a request would trigger an error when run (e.g. TOPOLOGY_DEPENDENCIES_NOT_VETTED), + it will also trigger an error when dry_run. + + Use this flag to preview a change before applying it. + synchronizer_id: |- + If set, the requested changes will take place on the specified + synchronizer. If synchronizer_id is unset and the participant is only + connected to a single synchronizer, that synchronizer will be used by + default. If synchronizer_id is unset and the participant is connected to + multiple synchronizers, the request will error out with + PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + + Optional + expected_topology_serial: |- + The serial of the last ``VettedPackages`` topology transaction of this + participant and on this synchronizer. + + Execution of the request fails if this is not correct. Use this to guard + against concurrent changes. + + If left unspecified, no validation is done against the last transaction's + serial. + + Optional + UpdateVettedPackagesResponse: + message: + comments: null + fieldComments: + past_vetted_packages: |- + All vetted packages on this participant and synchronizer, before the + specified changes. Empty if no vetting state existed beforehand. + new_vetted_packages: All vetted packages on this participant and synchronizer, + after the specified changes. UploadDarFileRequest: message: comments: null @@ -2744,6 +2809,18 @@ messages: submission_id: |- Unique submission identifier. Optional, defaults to a random identifier. + vetting_change: How to vet packages in the DAR being uploaded + synchronizer_id: |- + Only used if VettingChange is set to VETTING_CHANGE_VET_ALL_PACKAGES, in + order to specify which synchronizer to vet on. + + If synchronizer_id is set, the synchronizer with this ID will be used. If + synchronizer_id is unset and the participant is only connected to a single + synchronizer, that synchronizer will be used by default. If synchronizer_id + is unset and the participant is connected to multiple synchronizers, the + request will error out with PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + + Optional UploadDarFileResponse: message: comments: A message that is received when the upload operation succeeded. @@ -2809,10 +2886,86 @@ messages: submission_id: |- Unique submission identifier. Optional, defaults to a random identifier. + synchronizer_id: |- + If synchronizer_id is set, the synchronizer with this ID will be used. If + synchronizer_id is unset and the participant is only connected to a single + synchronizer, that synchronizer will be used by default. If synchronizer_id + is unset and the participant is connected to multiple synchronizers, the + request will error out with PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + + Optional ValidateDarFileResponse: message: comments: null fieldComments: {} + VettedPackage: + message: + comments: |- + A package that is vetting on a given participant and synchronizer, + modelled after ``VettedPackage`` in `topology.proto `_, + enriched with the package name and version. + fieldComments: + valid_until_exclusive: |- + The time until which this package is vetted. Empty if vetting time has no + upper bound. + package_version: |- + Version of this package. + Only available if the package has been uploaded to the current participant. + If unavailable, is empty string. + package_name: |- + Name of this package. + Only available if the package has been uploaded to the current participant. + If unavailable, is empty string. + package_id: Package ID of this package. Always present. + valid_from_inclusive: |- + The time from which this package is vetted. Empty if vetting time has no + lower bound. + VettedPackages: + message: + comments: |- + The list of packages vetted on a given participant and synchronizer, modelled + after ``VettedPackages`` in `topology.proto `_. + The list only contains packages that matched a filter in the query that + originated it. + fieldComments: + packages: |- + Sorted by package_name and package_version where known, and package_id as a + last resort. + participant_id: Participant on which these packages are vetted. Always present. + synchronizer_id: Synchronizer on which these packages are vetted. Always present. + topology_serial: |- + Serial of last ``VettedPackages`` topology transaction of this participant + and on this synchronizer. Always present. + VettedPackagesChange: + message: + comments: A change to the set of vetted packages. + fieldComments: + vet: Add packages to or update packages in the set of vetted packages. + unvet: Remove packages from the set of vetted packages. + VettedPackagesRef: + message: + comments: |- + A reference to identify one or more packages. + + A reference matches a package if its ``package_id`` matches the package's ID, + its ``package_name`` matches the package's name, and its ``package_version`` + matches the package's version. If an attribute in the reference is left + unspecified (i.e. as an empty string), that attribute is treated as a + wildcard. At a minimum, ``package_id`` or the ``package_name`` must be + specified. + + If a reference does not match any package, the reference is considered + unresolved and the entire update request is rejected. + fieldComments: + package_id: |- + Package's package id must be the same as this field. + Optional + package_name: |- + Package's name must be the same as this field. + Optional + package_version: |- + Package's version must be the same as this field. + Optional WildcardFilter: message: comments: This filter matches all templates. @@ -2824,6 +2977,8 @@ messages: Optional oneOfs: ActiveContract: {} + AllocateExternalPartyRequest: {} + AllocateExternalPartyResponse: {} AllocatePartyRequest: {} AllocatePartyResponse: {} Archived: {} @@ -2988,6 +3143,8 @@ oneOfs: FeaturesDescriptor: {} Fetch: {} Filters: {} + GenerateExternalPartyTopologyRequest: {} + GenerateExternalPartyTopologyResponse: {} GetActiveContractsRequest: {} GetActiveContractsResponse: contract_entry: @@ -3041,10 +3198,6 @@ oneOfs: GetPreferredPackagesResponse: {} GetTimeRequest: {} GetTimeResponse: {} - GetTransactionByIdRequest: {} - GetTransactionByOffsetRequest: {} - GetTransactionResponse: {} - GetTransactionTreeResponse: {} GetUpdateByIdRequest: {} GetUpdateByOffsetRequest: {} GetUpdateResponse: @@ -3055,14 +3208,6 @@ oneOfs: transaction: '' reassignment: '' topology_transaction: '' - GetUpdateTreesResponse: - update: - message: - comments: The update that matches the filter in the request. - fieldComments: - transaction_tree: '' - reassignment: '' - offset_checkpoint: '' GetUpdatesRequest: {} GetUpdatesResponse: update: @@ -3095,6 +3240,8 @@ oneOfs: ListUserRightsResponse: {} ListUsersRequest: {} ListUsersResponse: {} + ListVettedPackagesRequest: {} + ListVettedPackagesResponse: {} Metadata: {} MinLedgerTime: time: @@ -3129,6 +3276,8 @@ oneOfs: OffsetCheckpoint: {} OffsetCheckpointFeature: {} PackageDetails: {} + PackageFeature: {} + PackageMetadataFilter: {} PackagePreference: {} PackageReference: {} PackageVettingRequirement: {} @@ -3143,6 +3292,13 @@ oneOfs: PrepareSubmissionRequest: {} PrepareSubmissionResponse: {} PreparedTransaction: {} + PriorTopologySerial: + serial: + message: + comments: null + fieldComments: + prior: Previous transaction's serial. + no_prior: No previous transaction exists. PruneRequest: {} PruneResponse: {} Reassignment: {} @@ -3182,12 +3338,12 @@ oneOfs: Rollback: {} SetTimeRequest: {} Signature: {} + SigningPublicKey: {} SinglePartySignatures: {} SubmitAndWaitForReassignmentRequest: {} SubmitAndWaitForReassignmentResponse: {} SubmitAndWaitForTransactionRequest: {} SubmitAndWaitForTransactionResponse: {} - SubmitAndWaitForTransactionTreeResponse: {} SubmitAndWaitRequest: {} SubmitAndWaitResponse: {} SubmitReassignmentRequest: {} @@ -3205,21 +3361,11 @@ oneOfs: participant_authorization_revoked: '' participant_authorization_added: '' TopologyFormat: {} + TopologyStateFilter: {} TopologyTransaction: {} TraceContext: {} Transaction: {} - TransactionFilter: {} TransactionFormat: {} - TransactionTree: {} - TreeEvent: - kind: - message: - comments: null - fieldComments: - created: |- - The event as it appeared in the context of its original daml transaction on this participant node. - In particular, the offset, node_id pair of the daml transaction are preserved. - exercised: '' UnassignCommand: {} UnassignedEvent: {} UpdateFormat: {} @@ -3233,10 +3379,22 @@ oneOfs: UpdateUserIdentityProviderIdResponse: {} UpdateUserRequest: {} UpdateUserResponse: {} + UpdateVettedPackagesRequest: {} + UpdateVettedPackagesResponse: {} UploadDarFileRequest: {} UploadDarFileResponse: {} User: {} UserManagementFeature: {} ValidateDarFileRequest: {} ValidateDarFileResponse: {} + VettedPackage: {} + VettedPackages: {} + VettedPackagesChange: + operation: + message: + comments: null + fieldComments: + vet: Add packages to or update packages in the set of vetted packages. + unvet: Remove packages from the set of vetted packages. + VettedPackagesRef: {} WildcardFilter: {} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreams.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreams.scala deleted file mode 100644 index ba12b8256d..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreams.scala +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts - -import com.daml.ledger.api.v2 as lav2 -import com.daml.ledger.api.v2.transaction.Transaction -import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter -import com.daml.ledger.api.v2.transaction_filter.{CumulativeFilter, TemplateFilter} -import com.daml.scalautil.Statement.discard -import com.digitalasset.canton.fetchcontracts.util.GraphExtensions.* -import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters.apiIdentifier -import com.digitalasset.canton.http.{ContractTypeId, ResolvedQuery} -import com.digitalasset.canton.logging.TracedLogger -import com.digitalasset.canton.tracing.NoTracing -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.{Broadcast, Concat, Flow, GraphDSL, Source} -import org.apache.pekko.stream.{FanOutShape2, Graph} - -import scala.annotation.nowarn - -import util.{ - AbsoluteBookmark, - BeginBookmark, - ContractStreamStep, - InsertDeleteStep, - ParticipantBegin, -} - -object AcsTxStreams extends NoTracing { - import util.PekkoStreamsUtils.{last, max, project2} - - /** Plan inserts, deletes from an in-order batch of create/archive events. */ - private[this] def partitionInsertsDeletes( - txes: Iterable[lav2.event.Event] - ): InsertDeleteStep.LAV1 = { - val csb = Vector.newBuilder[lav2.event.CreatedEvent] - val asb = Map.newBuilder[String, lav2.event.ArchivedEvent] - import lav2.event.Event - import Event.Event.* - txes foreach { - case Event(Created(c)) => discard(csb += c) - case Event(Archived(a)) => discard(asb += ((a.contractId, a))) - case Event(Exercised(_)) => () // nonsense - case Event(Empty) => () // nonsense - } - val as = asb.result() - InsertDeleteStep(csb.result() filter (ce => !as.contains(ce.contractId)), as) - } - - /** Like `acsAndBoundary`, but also include the events produced by `transactionsSince` after the - * ACS's last offset, terminating with the last offset of the last transaction, or the ACS's last - * offset if there were no transactions. - */ - def acsFollowingAndBoundary( - transactionsSince: String => Source[Transaction, NotUsed], - logger: TracedLogger, - )(implicit - ec: concurrent.ExecutionContext, - lc: com.daml.logging.LoggingContextOf[Any], - ): Graph[FanOutShape2[ - Either[Long, lav2.state_service.GetActiveContractsResponse], - ContractStreamStep.LAV1, - BeginBookmark[Offset], - ], NotUsed] = - GraphDSL.create() { implicit b => - import ContractStreamStep.{Acs, LiveBegin} - import GraphDSL.Implicits.* - type Off = BeginBookmark[Offset] - val acs = b add acsAndBoundary - val dupOff = b add Broadcast[Off](2, eagerCancel = false) - val liveStart = Flow fromFunction { (off: Off) => - LiveBegin(off) - } - val txns = b add transactionsFollowingBoundary(transactionsSince, logger) - val allSteps = b add Concat[ContractStreamStep.LAV1](3) - // format: off - discard {dupOff <~ acs.out1} - discard {acs.out0.map(ces => Acs(ces.toVector)) ~> allSteps} - discard {dupOff ~> liveStart ~> allSteps} - discard {txns.out0 ~> allSteps} - discard {dupOff ~> txns.in} - // format: on - new FanOutShape2(acs.in, allSteps.out, txns.out1) - } - - /** Split a series of ACS responses into two channels: one with contracts, the other with a single - * result, the last offset. - */ - private[this] def acsAndBoundary - : Graph[FanOutShape2[Either[Long, lav2.state_service.GetActiveContractsResponse], Seq[ - lav2.event.CreatedEvent, - ], BeginBookmark[Offset]], NotUsed] = - GraphDSL.create() { implicit b => - import GraphDSL.Implicits.* - import lav2.state_service.GetActiveContractsResponse as GACR - val dup = b add Broadcast[Either[Long, GACR]](2, eagerCancel = true) - val acs = b add (Flow fromFunction ((_: Either[Long, GACR]).toSeq.flatMap( - _.contractEntry.activeContract - .flatMap(_.createdEvent) - .toList - ))) - val off = b add Flow[Either[Long, GACR]] - .collect { case Left(offset) => - AbsoluteBookmark(Offset(offset)) - } - .via(last(ParticipantBegin: BeginBookmark[Offset])) - discard(dup ~> acs) - discard(dup ~> off) - new FanOutShape2(dup.in, acs.out, off.out) - } - - /** Interpreting the transaction stream so it conveniently depends on the ACS graph, if desired. - * Deliberately matching output shape to `acsFollowingAndBoundary`. - */ - def transactionsFollowingBoundary( - transactionsSince: String => Source[Transaction, NotUsed], - logger: TracedLogger, - )(implicit - ec: concurrent.ExecutionContext, - lc: com.daml.logging.LoggingContextOf[Any], - ): Graph[FanOutShape2[ - BeginBookmark[Offset], - ContractStreamStep.Txn.LAV1, - BeginBookmark[Offset], - ], NotUsed] = - GraphDSL.create() { implicit b => - import GraphDSL.Implicits.* - type Off = BeginBookmark[Offset] - val dupOff = b add Broadcast[Off](2) - val mergeOff = b add Concat[Off](2) - val txns = Flow[Off] - .flatMapConcat(off => transactionsSince(off.toLedgerApi)) - .map(transactionToInsertsAndDeletes) - val txnSplit = b add project2[ContractStreamStep.Txn.LAV1, Offset] - import Offset.`Offset ordering` - val lastTxOff = b add last(ParticipantBegin: Off) - val maxOff = b add max(ParticipantBegin: Off) - val logTxnOut = - b add logTermination[ContractStreamStep.Txn.LAV1](logger, "first branch of tx stream split") - // format: off - discard {txnSplit.in <~ txns <~ dupOff} - discard {dupOff ~> mergeOff ~> maxOff} - discard {txnSplit.out1.map(off => AbsoluteBookmark(off)) ~> lastTxOff ~> mergeOff} - discard {txnSplit.out0 ~> logTxnOut} - // format: on - new FanOutShape2(dupOff.in, logTxnOut.out, maxOff.out) - } - - private[this] def transactionToInsertsAndDeletes( - tx: lav2.transaction.Transaction - ): (ContractStreamStep.Txn.LAV1, Offset) = { - val offset = Offset.fromLedgerApi(tx) - (ContractStreamStep.Txn(partitionInsertsDeletes(tx.events), offset), offset) - } - - // TODO(#23504) use EventFormat - @nowarn("cat=deprecation") - def transactionFilter[Pkg]( - parties: PartySet, - contractTypeIds: List[ContractTypeId.Definite[Pkg]], - ): lav2.transaction_filter.TransactionFilter = { - import lav2.transaction_filter.{Filters, InterfaceFilter} - - val (templateIds, interfaceIds) = ResolvedQuery.partition(contractTypeIds) - val filters = Filters( - templateIds - .map(templateId => - CumulativeFilter( - IdentifierFilter.TemplateFilter( - TemplateFilter( - templateId = Some(apiIdentifier[Pkg](templateId)), - includeCreatedEventBlob = false, - ) - ) - ) - ) - ++ - interfaceIds - .map(interfaceId => - CumulativeFilter( - IdentifierFilter.InterfaceFilter( - InterfaceFilter( - interfaceId = Some(apiIdentifier(interfaceId)), - includeInterfaceView = true, - includeCreatedEventBlob = false, - ) - ) - ) - ) - ) - - lav2.transaction_filter.TransactionFilter( - filtersByParty = Party.unsubst((parties: Set[Party]).toVector).map(_ -> filters).toMap, - filtersForAnyParty = None, - ) - } - -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/package.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/package.scala deleted file mode 100644 index 720c32b975..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/package.scala +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.daml.ledger.api.v2 as lav2 -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.daml.lf -import scalaz.std.option.* -import scalaz.std.string.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{@@, Applicative, Order, Semigroup, Show, Tag, Tags, Traverse, \/} - -import fetchcontracts.util.ClientUtil.boxedRecord - -package object fetchcontracts { - type LfValue = lf.value.Value - - type ContractId = lar.ContractId - val ContractId = lar.ContractId - - type Party = lar.Party - val Party = lar.Party - - type PartySet = NonEmpty[Set[Party]] - - type Offset = String @@ OffsetTag - - implicit final class `fc ErrorOps`[A](private val o: Option[A]) extends AnyVal { - def required(label: String): Error \/ A = - o toRightDisjunction Error(Symbol("ErrorOps_required"), s"Missing required field $label") - } -} - -package fetchcontracts { - - import com.digitalasset.canton.data.Offset as CoreOffset - import com.digitalasset.canton.http.{ContractTypeId, ResolvedQuery} - import com.digitalasset.daml.lf.data.{Bytes, Ref} - import com.google.protobuf.ByteString - import scalaz.-\/ - - import java.nio.{ByteBuffer, ByteOrder} - - final case class Error(id: Symbol, message: String) - - object Error { - implicit val errorShow: Show[Error] = Show shows { e => - s"fetchcontracts.Error, ${e.id: Symbol}: ${e.message: String}" - } - } - - sealed trait OffsetTag - - object Offset { - val tag = Tag.of[OffsetTag] - - // TODO(#22819) remove as json v1 is removed and offsets are no longer HexStrings - def apply(s: String): Offset = tag(s) - def apply(l: Long): Offset = tag(fromLong(l)) - - def unwrap(x: Offset): String = tag.unwrap(x) - - def tryToLong(x: Offset): Long = java.lang.Long.parseUnsignedLong(unwrap(x), 16) - - def fromLedgerApi(tx: lav2.transaction.Transaction): Offset = Offset(tx.offset) - - implicit val semigroup: Semigroup[Offset] = Tag.unsubst(Semigroup[Offset @@ Tags.LastVal]) - implicit val `Offset ordering`: Order[Offset] = Order.orderBy[Offset, String](Offset.unwrap) - - // TODO(#22819) remove as json v1 is removed and offsets are no longer HexStrings - private def fromOldHexString(s: Ref.HexString): Long = { - val bytes = Bytes.fromHexString(s) - // first byte was the version byte, so we must remove it - ByteBuffer.wrap(bytes.toByteArray).getLong(1) - } - - // TODO(#22819) remove as json v1 is removed and offsets are no longer HexStrings - def assertFromStringToLong(s: String): Long = - if (s.isEmpty) 0L - else { - val hex = Ref.HexString.assertFromString(s) - fromOldHexString(hex) - } - - // TODO(#22819) remove as json v1 is removed and offsets are no longer HexStrings - private def toOldHexString(offset: Long): Ref.HexString = { - val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long - val versionUpstreamOffsetsAsLong: Byte = 0 - - com.digitalasset.daml.lf.data.Bytes - .fromByteString( - ByteString.copyFrom( - ByteBuffer - .allocate(longBasedByteLength) - .order(ByteOrder.BIG_ENDIAN) - .put(0, versionUpstreamOffsetsAsLong) - .putLong(1, offset) - ) - ) - .toHexString - } - - // TODO(#22819) remove as json v1 is removed and offsets are no longer HexStrings - def fromLong(l: Long): String = - toOldHexString(CoreOffset.tryFromLong(l).unwrap) - } - - final case class ActiveContract[+CtTyId, +LfV]( - contractId: ContractId, - templateId: CtTyId, - key: Option[LfV], - payload: LfV, - signatories: Seq[Party], - observers: Seq[Party], - ) - - object ActiveContract { - type ResolvedCtTyId[+LfV] = ActiveContract[ContractTypeId.ResolvedPkgId, LfV] - - def fromLedgerApi[CtTyId[T] <: ContractTypeId.Definite[T]]( - extractor: ExtractAs[CtTyId], - in: lav2.event.CreatedEvent, - ): Error \/ ActiveContract[ContractTypeId.ResolvedPkgIdOf[CtTyId], lav2.value.Value] = - extractor.getIdKeyPayload(in).map { case (id, key, payload) => - ActiveContract( - contractId = ContractId(in.contractId), - templateId = id, - key = key, - payload = boxedRecord(payload), - signatories = Party.subst(in.signatories), - observers = Party.subst(in.observers), - ) - } - - // Strategy for extracting data from the created event, - // depending on the kind of thing we were expecting, i.e. template or interface view. - sealed trait ExtractAs[+CtTyId[T] <: ContractTypeId.Definite[T]] { - def getIdKeyPayload(in: lav2.event.CreatedEvent): ExtractAs.IdKeyPayload[CtTyId] - } - - object ExtractAs { - type IdKeyPayload[+CtId[_]] = - Error \/ (ContractTypeId.ResolvedPkgIdOf[CtId], Option[lav2.value.Value], lav2.value.Record) - - def apply(id: ContractTypeId.Definite[_]): ExtractAs[ContractTypeId.Definite] = id match { - case ContractTypeId.Interface(_, mod, entity) => ExtractAs.InterfaceView(mod, entity) - case ContractTypeId.Template(_, _, _) => ExtractAs.Template - } - - def apply(resolvedQuery: ResolvedQuery): ExtractAs[ContractTypeId.Definite] = - resolvedQuery match { - case ResolvedQuery.ByInterfaceId(intfId) => - ExtractAs.InterfaceView(intfId.original.moduleName, intfId.original.entityName) - case ResolvedQuery.ByTemplateId(_) => ExtractAs.Template - case ResolvedQuery.ByTemplateIds(_) => ExtractAs.Template - } - - // For interfaces we need to find the correct view and extract the payload and id from that. - final case class InterfaceView(module: String, entity: String) - extends ExtractAs[ContractTypeId.Interface] { - import com.google.rpc.Code - import com.google.rpc.status.Status - - def getIdKeyPayload(in: lav2.event.CreatedEvent): IdKeyPayload[ContractTypeId.Interface] = { - val view = in.interfaceViews.find( - // We ignore the package id when matching views. - // The search result should have already selected the correct packages, - // and if the query used a package name, multiple different corresponding - // package ids may be returned. - _.interfaceId.exists(id => id.moduleName == module && id.entityName == entity) - ) - view match { - case None => - val msg = s"Missing view with id matching '$module:$entity' in $in" - -\/(Error(Symbol("ErrorOps_view_missing"), msg)) - case Some(v) => - viewError(v) match { - case Some(s) => -\/(Error(Symbol("ErrorOps_view_eval"), s.toString)) - case None => - for { - id <- v.interfaceId.required("interfaceId") - payload <- v.viewValue.required("interviewView") - } yield (ContractTypeId.Interface.fromLedgerApi(id), None, payload) - } - } - } - - private def viewError(view: lav2.event.InterfaceView): Option[Status] = - view.viewStatus.filter(_.code != Code.OK_VALUE) - } - - // For templates we can get the data more directly. - final case object Template extends ExtractAs[ContractTypeId.Template] { - def getIdKeyPayload( - in: lav2.event.CreatedEvent - ): IdKeyPayload[ContractTypeId.Template] = for { - id <- in.templateId.required("templateId") - payload <- in.createArguments.required("createArguments") - } yield (ContractTypeId.Template.fromLedgerApi(id), in.contractKey, payload) - } - } - - implicit def covariant[C]: Traverse[ActiveContract[C, *]] = new Traverse[ActiveContract[C, *]] { - override def map[A, B](fa: ActiveContract[C, A])(f: A => B): ActiveContract[C, B] = - fa.copy(key = fa.key map f, payload = f(fa.payload)) - - override def traverseImpl[G[_]: Applicative, A, B]( - fa: ActiveContract[C, A] - )(f: A => G[B]): G[ActiveContract[C, B]] = { - import scalaz.syntax.apply.* - val gk: G[Option[B]] = fa.key traverse f - val ga: G[B] = f(fa.payload) - ^(gk, ga)((k, a) => fa.copy(key = k, payload = a)) - } - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/BeginBookmark.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/BeginBookmark.scala deleted file mode 100644 index d74aadc394..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/BeginBookmark.scala +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.digitalasset.canton.fetchcontracts.Offset -import scalaz.Liskov.<~< -import scalaz.Order -import scalaz.syntax.order.* -import spray.json.{JsNull, JsonWriter} - -sealed abstract class BeginBookmark[+Off] extends Product with Serializable { - def toLedgerApi(implicit ev: Off <~< Offset): String = - this match { - case AbsoluteBookmark(offset) => Offset.unwrap(ev(offset)) - case ParticipantBegin => - "" - } - - def map[B](f: Off => B): BeginBookmark[B] = this match { - case AbsoluteBookmark(offset) => AbsoluteBookmark(f(offset)) - case ParticipantBegin => ParticipantBegin - } - - def toOption: Option[Off] = this match { - case AbsoluteBookmark(offset) => Some(offset) - case ParticipantBegin => None - } -} -final case class AbsoluteBookmark[+Off](offset: Off) extends BeginBookmark[Off] -case object ParticipantBegin extends BeginBookmark[Nothing] - -object BeginBookmark { - implicit def jsonWriter[Off: JsonWriter]: JsonWriter[BeginBookmark[Off]] = - (obj: BeginBookmark[Off]) => { - val ev = implicitly[JsonWriter[Off]] - obj match { - case AbsoluteBookmark(offset) => ev.write(offset) - case ParticipantBegin => JsNull - } - } - - import scalaz.Ordering.{EQ, LT, GT} - - implicit def `BeginBookmark order`[Off: Order]: Order[BeginBookmark[Off]] = { - case (AbsoluteBookmark(a), AbsoluteBookmark(b)) => a ?|? b - case (ParticipantBegin, ParticipantBegin) => EQ - case (ParticipantBegin, AbsoluteBookmark(_)) => LT - case (AbsoluteBookmark(_), ParticipantBegin) => GT - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ClientUtil.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ClientUtil.scala deleted file mode 100644 index 0e2cf17a11..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ClientUtil.scala +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.ledger.api.v2 as lav2 - -object ClientUtil { - def boxedRecord(a: lav2.value.Record): lav2.value.Value = - lav2.value.Value(lav2.value.Value.Sum.Record(a)) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStep.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStep.scala deleted file mode 100644 index aa957e9b56..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStep.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.digitalasset.canton.fetchcontracts.Offset -import scalaz.std.tuple.* -import scalaz.syntax.functor.* -import scalaz.{Semigroup, \/} - -import InsertDeleteStep.{Cid, Inserts} - -sealed abstract class ContractStreamStep[+D, +C] extends Product with Serializable { - import ContractStreamStep.* - - def toInsertDelete: InsertDeleteStep[D, C] = this match { - case Acs(inserts) => InsertDeleteStep(inserts, Map.empty) - case LiveBegin(_) => InsertDeleteStep.Empty - case Txn(step, _) => step - } - - def append[DD >: D, CC >: C: Cid](o: ContractStreamStep[DD, CC]): ContractStreamStep[DD, CC] = - (this, o) match { - case (Acs(inserts), Acs(oinserts)) => Acs(inserts ++ oinserts) - case (Acs(_), LiveBegin(AbsoluteBookmark(off))) => - Txn(toInsertDelete, off) - case (Acs(_) | Txn(_, _), Txn(ostep, off)) => - Txn(toInsertDelete append ostep, off) - case (LiveBegin(_), Txn(_, _)) => o - // the following cases should never happen in a real stream; we attempt to - // provide definitions that make `append` totally associative, anyway - case (Acs(_) | LiveBegin(_), LiveBegin(ParticipantBegin)) => this - case (LiveBegin(ParticipantBegin), Acs(_) | LiveBegin(_)) | - (LiveBegin(AbsoluteBookmark(_)), LiveBegin(AbsoluteBookmark(_))) => - o - case (LiveBegin(AbsoluteBookmark(off)), Acs(_)) => Txn(o.toInsertDelete, off) - case (Txn(step, off), Acs(_) | LiveBegin(ParticipantBegin)) => - Txn(step append o.toInsertDelete, off) - case (Txn(step, _), LiveBegin(AbsoluteBookmark(off))) => Txn(step, off) - } - - def mapPreservingIds[CC](f: C => CC): ContractStreamStep[D, CC] = - mapInserts(_ map f) - - def partitionBimap[LD, DD, LC, CC, LDS](f: D => (LD \/ DD), g: C => (LC \/ CC))(implicit - LDS: collection.Factory[LD, LDS] - ): (LDS, Inserts[LC], ContractStreamStep[DD, CC]) = - this match { - case Acs(inserts) => - val (lcs, ins) = inserts partitionMap (x => g(x).toEither) - (LDS.newBuilder.result(), lcs, Acs(ins)) - case lb @ LiveBegin(_) => (LDS.newBuilder.result(), Inserts.empty, lb) - case Txn(step, off) => step.partitionBimap(f, g)(LDS).map(Txn(_, off)) - } - - def mapInserts[CC](f: Inserts[C] => Inserts[CC]): ContractStreamStep[D, CC] = this match { - case Acs(inserts) => Acs(f(inserts)) - case lb @ LiveBegin(_) => lb - case Txn(step, off) => Txn(step.copy(inserts = f(step.inserts)), off) - } - - def mapDeletes[DD](f: Map[String, D] => Map[String, DD]): ContractStreamStep[DD, C] = - this match { - case acs @ Acs(_) => acs - case lb @ LiveBegin(_) => lb - case Txn(step, off) => Txn(step.copy(deletes = f(step.deletes)), off) - } - - def nonEmpty: Boolean = this match { - case Acs(inserts) => inserts.nonEmpty - case LiveBegin(_) => true // unnatural wrt `toInsertDelete`, but what nonEmpty is used for here - case Txn(step, _) => step.nonEmpty - } - - def bookmark: Option[BeginBookmark[Offset]] = this match { - case Acs(_) => Option.empty - case LiveBegin(bookmark) => Some(bookmark) - case Txn(_, offset) => Some(AbsoluteBookmark(offset)) - } -} - -object ContractStreamStep extends WithLAV1[ContractStreamStep] { - final case class Acs[+C](inserts: Inserts[C]) extends ContractStreamStep[Nothing, C] - final case class LiveBegin(offset: BeginBookmark[Offset]) - extends ContractStreamStep[Nothing, Nothing] - final case class Txn[+D, +C](step: InsertDeleteStep[D, C], offsetAfter: Offset) - extends ContractStreamStep[D, C] - object Txn extends WithLAV1[Txn] - - implicit def `CSS semigroup`[D, C: Cid]: Semigroup[ContractStreamStep[D, C]] = - Semigroup instance (_ append _) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/GraphExtensions.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/GraphExtensions.scala deleted file mode 100644 index 8e2e4ed5e6..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/GraphExtensions.scala +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.logging.TracedLogger -import com.digitalasset.canton.tracing.NoTracing -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.{Flow, GraphDSL, Keep, Sink} -import org.apache.pekko.stream.{FanOutShape2, FlowShape, Graph} -import scalaz.Liskov.<~< - -import scala.concurrent.{ExecutionContext, Future} - -object GraphExtensions extends NoTracing { - implicit final class `Graph FOS2 funs`[A, Y, Z, M]( - private val g: Graph[FanOutShape2[A, Y, Z], M] - ) extends AnyVal { - private def divertToMat[N, O](oz: Sink[Z, N])(mat: (M, N) => O): Flow[A, Y, O] = - Flow fromGraph GraphDSL.createGraph(g, oz)(mat) { implicit b => (gs, zOut) => - import GraphDSL.Implicits.* - gs.out1 ~> zOut - new FlowShape(gs.in, gs.out0) - } - - /** Several of the graphs here have a second output guaranteed to deliver only one value. This - * turns such a graph into a flow with the value materialized. - */ - def divertToHead(implicit noM: M <~< NotUsed): Flow[A, Y, Future[Z]] = { - type CK[-T] = (T, Future[Z]) => Future[Z] - divertToMat(Sink.head)(noM.subst[CK](Keep.right[NotUsed, Future[Z]])) - } - } - - def logTermination[A]( - logger: TracedLogger, - extraMessage: String, - )(implicit ec: ExecutionContext, lc: LoggingContextOf[Any]): Flow[A, A, NotUsed] = - if (logger.underlying.isTraceEnabled) { - Flow[A].watchTermination() { (mat, fd) => - fd.onComplete( - _.fold( - t => - logger.trace( - s"stream-abort [$extraMessage] trying to abort ${t.getMessage}, ${lc.makeString}" - ), - _ => logger.trace(s"stream-stop [$extraMessage] trying to shutdown, ${lc.makeString}"), - ) - ) - mat - } - } else - Flow[A] - -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala deleted file mode 100644 index 6641acf2e1..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http.ContractTypeId -import com.digitalasset.daml.lf -import com.digitalasset.daml.lf.data.Ref.{DottedName, ModuleName, PackageId, QualifiedName} - -object IdentifierConverters { - def apiIdentifier(a: lf.data.Ref.Identifier): lav2.value.Identifier = - lav2.value.Identifier( - packageId = a.packageId, - moduleName = a.qualifiedName.module.dottedName, - entityName = a.qualifiedName.name.dottedName, - ) - - def lfIdentifier(a: com.daml.ledger.api.v2.value.Identifier): lf.data.Ref.Identifier = - lf.data.Ref.Identifier( - pkg = PackageId.assertFromString(a.packageId), - qualifiedName = QualifiedName( - module = ModuleName.assertFromString(a.moduleName), - name = DottedName.assertFromString(a.entityName), - ), - ) - - def apiIdentifier[Pkg](a: ContractTypeId[Pkg]): lav2.value.Identifier = - lav2.value.Identifier( - packageId = a.packageId.toString, - moduleName = a.moduleName, - entityName = a.entityName, - ) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStep.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStep.scala deleted file mode 100644 index 32f61d0d41..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStep.scala +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.ledger.api.v2.event as evv1 -import com.digitalasset.canton.fetchcontracts.ActiveContract -import scalaz.syntax.tag.* -import scalaz.{Monoid, \/, \/-} - -import scala.runtime.AbstractFunction1 - -final case class InsertDeleteStep[+D, +C]( - inserts: InsertDeleteStep.Inserts[C], - deletes: Map[String, D], -) { - import InsertDeleteStep.* - - def append[DD >: D, CC >: C: Cid](o: InsertDeleteStep[DD, CC]): InsertDeleteStep[DD, CC] = - InsertDeleteStep( - appendForgettingDeletes(inserts, o), - deletes ++ o.deletes, - ) - - /** NB: This is ''not'' distributive across `append`. */ - def size: Int = inserts.length + deletes.size - - def nonEmpty: Boolean = inserts.nonEmpty || deletes.nonEmpty - - def leftMap[DD](f: D => DD): InsertDeleteStep[DD, C] = - copy(deletes = deletes transform ((_, d) => f(d))) - - /** Results undefined if cid(d) != cid(c) */ - def mapPreservingIds[CC](f: C => CC): InsertDeleteStep[D, CC] = copy(inserts = inserts map f) - - /** Results undefined if cid(d) != cid(c) */ - def partitionMapPreservingIds[LC, CC]( - f: C => (LC \/ CC) - ): (Inserts[LC], InsertDeleteStep[D, CC]) = { - val (_, lcs, step) = partitionBimap(\/-(_), f)(List) - (lcs, step) - } - - /** Results undefined if cid(cc) != cid(c) */ - def partitionBimap[LD, DD, LC, CC, LDS](f: D => (LD \/ DD), g: C => (LC \/ CC))(implicit - LDS: collection.Factory[LD, LDS] - ): (LDS, Inserts[LC], InsertDeleteStep[DD, CC]) = { - import scalaz.std.tuple.*, scalaz.std.either.*, scalaz.syntax.traverse.* - val (lcs, ins) = inserts partitionMap (x => g(x).toEither) - val (lds, del) = deletes.toList.partitionMap(_.traverse(x => f(x).toEither)) - (LDS.fromSpecific(lds), lcs, copy(inserts = ins, deletes = del.toMap)) - } -} - -object InsertDeleteStep extends WithLAV1[InsertDeleteStep] { - type Inserts[+C] = Vector[C] - val Inserts: Vector.type = Vector - - val Empty: InsertDeleteStep[Nothing, Nothing] = apply(Vector.empty, Map.empty) - - abstract class Cid[-C] extends (C AbstractFunction1 String) - - @SuppressWarnings(Array("org.wartremover.warts.Any")) - object Cid { -// implicit val ofDBC: Cid[DBContract[Any, Any, Any, Any]] = _.contractId - implicit val ofAC: Cid[ActiveContract[Any, Any]] = _.contractId.unwrap - implicit def ofFst[L](implicit L: Cid[L]): Cid[(L, Any)] = la => L(la._1) - // ofFst and ofSnd should *not* both be defined, being incoherent together - } - - // we always use the Last semigroup for D - implicit def `IDS monoid`[D, C: Cid]: Monoid[InsertDeleteStep[D, C]] = - Monoid.instance(_ append _, Empty) - - def appendForgettingDeletes[D, C](leftInserts: Inserts[C], right: InsertDeleteStep[Any, C])( - implicit cid: Cid[C] - ): Inserts[C] = - (if (right.deletes.isEmpty) leftInserts - else leftInserts.filter(c => !right.deletes.isDefinedAt(cid(c)))) ++ right.inserts -} - -trait WithLAV1[F[_, _]] { - type LAV1 = F[evv1.ArchivedEvent, evv1.CreatedEvent] -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/PekkoStreamsUtils.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/PekkoStreamsUtils.scala deleted file mode 100644 index 9ddcb29ac7..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/PekkoStreamsUtils.scala +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.scalautil.Statement.discard -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.{Broadcast, Flow, GraphDSL, Partition} -import org.apache.pekko.stream.{FanOutShape2, Graph} -import scalaz.syntax.order.* -import scalaz.{-\/, Order, \/, \/-} - -// Generic utilities for pekko-streams and doobie. -object PekkoStreamsUtils { - def partition[A, B]: Graph[FanOutShape2[A \/ B, A, B], NotUsed] = - GraphDSL.create() { implicit b => - import GraphDSL.Implicits.* - val split = b.add( - Partition[A \/ B]( - 2, - { - case -\/(_) => 0 - case \/-(_) => 1 - }, - ) - ) - val as = b.add(Flow[A \/ B].collect { case -\/(a) => a }) - val bs = b.add(Flow[A \/ B].collect { case \/-(b) => b }) - discard(split ~> as) - discard(split ~> bs) - new FanOutShape2(split.in, as.out, bs.out) - } - - private[fetchcontracts] def project2[A, B]: Graph[FanOutShape2[(A, B), A, B], NotUsed] = - GraphDSL.create() { implicit b => - import GraphDSL.Implicits.* - val split = b add Broadcast[(A, B)](2, eagerCancel = true) - val left = b add Flow.fromFunction((_: (A, B))._1) - val right = b add Flow.fromFunction((_: (A, B))._2) - discard(split ~> left) - discard(split ~> right) - new FanOutShape2(split.in, left.out, right.out) - } - - private[fetchcontracts] def last[A](ifEmpty: A): Flow[A, A, NotUsed] = - Flow[A].fold(ifEmpty)((_, later) => later) - - private[fetchcontracts] def max[A: Order](ifEmpty: A): Flow[A, A, NotUsed] = - Flow[A].fold(ifEmpty)(_ max _) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/Endpoints.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/Endpoints.scala index 35bae9870a..7aa432131a 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/Endpoints.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/Endpoints.scala @@ -5,9 +5,7 @@ package com.digitalasset.canton.http import com.daml.logging.LoggingContextOf import com.daml.logging.LoggingContextOf.withEnrichedLoggingContext -import com.digitalasset.canton.http.json.v1.{CommandService, ContractsService, V1Routes} import com.digitalasset.canton.http.json.v2.V2Routes -import com.digitalasset.canton.http.metrics.HttpApiMetrics import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.NoTracing import org.apache.pekko.http.scaladsl.model.* @@ -15,8 +13,7 @@ import org.apache.pekko.http.scaladsl.server import org.apache.pekko.http.scaladsl.server.Directives.{extractClientIP, *} import org.apache.pekko.http.scaladsl.server.RouteResult.* import org.apache.pekko.http.scaladsl.server.{Directive, Directive0, PathMatcher, Route} -import scalaz.syntax.std.option.* -import scalaz.{-\/, EitherT, \/-} +import scalaz.EitherT import spray.json.* import scala.concurrent.{ExecutionContext, Future} @@ -29,7 +26,6 @@ import util.Logging.{InstanceUUID, RequestID, extendWithRequestIdLogCtx} class Endpoints( healthService: HealthService, v2Routes: V2Routes, - v1Routes: V1Routes, shouldLogHttpBodies: Boolean, val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) @@ -163,8 +159,7 @@ class Endpoints( logRequestAndResultFn(lc) def all(implicit - lc0: LoggingContextOf[InstanceUUID], - metrics: HttpApiMetrics, + lc0: LoggingContextOf[InstanceUUID] ): Route = extractRequest apply { _ => implicit val lc: LoggingContextOf[InstanceUUID with RequestID] = extendWithRequestIdLogCtx(identity)(lc0) @@ -180,7 +175,6 @@ class Endpoints( def path[L](pm: PathMatcher[L]) = server.Directives.path(pm) & markThroughputAndLogProcessingTime & logRequestAndResult concat( - v1Routes.v1Routes, path("livez") apply responseToRoute(Future.successful(HttpResponse(status = StatusCodes.OK))), path("readyz") apply responseToRoute(healthService.ready().map(_.toHttpResponse)), v2Routes.v2Routes, @@ -191,34 +185,4 @@ class Endpoints( object Endpoints { type ET[A] = EitherT[Future, Error, A] - - final class IntoEndpointsError[-A](val run: A => Error) extends AnyVal - object IntoEndpointsError { - import com.digitalasset.canton.http.json.v1.LedgerClientJwt.Grpc.Category - - implicit val id: IntoEndpointsError[Error] = new IntoEndpointsError(identity) - - implicit val fromCommands: IntoEndpointsError[CommandService.Error] = new IntoEndpointsError({ - case CommandService.InternalError(id, reason) => - ServerError( - new Exception( - s"command service error, ${id.cata(sym => s"${sym.name}: ", "")}${reason.getMessage}", - reason, - ) - ) - case CommandService.GrpcError(status) => - ParticipantServerError(status) - case CommandService.ClientError(-\/(Category.PermissionDenied), message) => - Unauthorized(message) - case CommandService.ClientError(\/-(Category.InvalidArgument), message) => - InvalidUserInput(message) - }) - - implicit val fromContracts: IntoEndpointsError[ContractsService.Error] = - new IntoEndpointsError({ case ContractsService.InternalError(id, msg) => - ServerError.fromMsg(s"contracts service error, ${id.name}: $msg") - }) - } - - private final case class MkHttpResponse[-T](run: T => Future[HttpResponse]) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala index 004307dd5b..499ad59627 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala @@ -3,13 +3,6 @@ package com.digitalasset.canton.http -import com.daml.jwt.{ - AuthServiceJWTCodec, - AuthServiceJWTPayload, - DecodedJwt, - Jwt, - StandardJWTPayload, -} import com.daml.logging.LoggingContextOf import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.base.error.utils.ErrorDetails.ErrorDetail @@ -19,33 +12,24 @@ import com.digitalasset.canton.http.util.Logging.{ RequestID, extendWithRequestIdLogCtx, } -import com.digitalasset.canton.http.{JwtPayload, JwtWritePayload, LedgerApiError} -import com.digitalasset.canton.ledger.api.UserRight -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar import com.digitalasset.canton.ledger.service.Grpc.StatusEnvelope import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.daml.lf.data.Ref.UserId import com.google.rpc.{Code as GrpcCode, Status} import org.apache.pekko.http.scaladsl.model.* import org.apache.pekko.http.scaladsl.server.RouteResult.Complete import org.apache.pekko.http.scaladsl.server.{RequestContext, Route} import org.apache.pekko.util.ByteString -import scalaz.syntax.std.either.* -import scalaz.{-\/, EitherT, Monad, NonEmptyList, Show, \/, \/-} +import scalaz.Show import spray.json.JsValue import scala.concurrent.Future import scala.util.control.NonFatal import util.GrpcHttpErrorCodes.* -import UserRight.{CanActAs, CanReadAs} object EndpointsCompanion extends NoTracing { - type ValidateJwt = Jwt => Unauthorized \/ DecodedJwt[String] - type ResolveUser = Jwt => UserId => Future[Seq[UserRight]] - sealed abstract class Error extends Product with Serializable final case class InvalidUserInput(message: String) extends Error @@ -71,11 +55,6 @@ object EndpointsCompanion extends NoTracing { final case class NotFound(message: String) extends Error - object ServerError { - // We want stack traces also in the case of simple error messages. - def fromMsg(message: String): ServerError = ServerError(new Exception(message)) - } - object Error { implicit val ShowInstance: Show[Error] = Show shows { case InvalidUserInput(e) => s"Endpoints.InvalidUserInput: ${e: String}" @@ -92,90 +71,6 @@ object EndpointsCompanion extends NoTracing { } } - trait CreateFromUserToken[A] { - def apply( - jwt: StandardJWTPayload, - listUserRights: UserId => Future[Seq[UserRight]], - ): EitherT[Future, Unauthorized, A] - } - - object CreateFromUserToken { - - import com.digitalasset.canton.http.util.FutureUtil.either - - trait FromUser[A, B] { - def apply(userId: String, actAs: List[String], readAs: List[String]): A \/ B - } - - def userIdFromToken( - jwt: StandardJWTPayload - ): Unauthorized \/ UserId = - UserId - .fromString(jwt.userId) - .disjunction - .leftMap(Unauthorized.apply) - - private def transformUserTokenTo[B]( - jwt: StandardJWTPayload, - listUserRights: UserId => Future[Seq[UserRight]], - )( - fromUser: FromUser[Unauthorized, B] - )(implicit - mf: Monad[Future] - ): EitherT[Future, Unauthorized, B] = - for { - userId <- either(userIdFromToken(jwt)) - rights <- EitherT.rightT(listUserRights(userId)) - - actAs = rights.collect { case CanActAs(party) => - party - } - readAs = rights.collect { case CanReadAs(party) => - party - } - res <- either(fromUser(userId, actAs.toList, readAs.toList)) - } yield res - - @SuppressWarnings(Array("org.wartremover.warts.IterableOps")) - implicit def jwtWritePayloadFromUserToken(implicit - mf: Monad[Future] - ): CreateFromUserToken[JwtWritePayload] = - ( - jwt, - listUserRights, - ) => - transformUserTokenTo(jwt, listUserRights)((userId, actAs, readAs) => - for { - actAsNonEmpty <- - if (actAs.isEmpty) - -\/ apply Unauthorized( - "ActAs list of user was empty, this is an invalid state for converting it to a JwtWritePayload" - ) - else \/-(NonEmptyList(actAs.head: String, actAs.tail*)) - } yield JwtWritePayload( - lar.UserId(userId), - lar.Party.subst(actAsNonEmpty), - lar.Party.subst(readAs), - ) - ) - - implicit def jwtPayloadFromUserToken(implicit - mf: Monad[Future] - ): CreateFromUserToken[JwtPayload] = - ( - jwt, - listUserRights, - ) => - transformUserTokenTo(jwt, listUserRights)((userId, actAs, readAs) => - \/ fromEither JwtPayload( - lar.UserId(userId), - actAs = lar.Party.subst(actAs), - readAs = lar.Party.subst(readAs), - ).toRight(Unauthorized("Unable to convert user token into a set of claims")) - ) - - } - def notFound( logger: TracedLogger )(implicit lc: LoggingContextOf[InstanceUUID]): Route = (ctx: RequestContext) => @@ -207,7 +102,6 @@ object EndpointsCompanion extends NoTracing { ) = ErrorResponse( errors = List(error), - warnings = None, status = status, ledgerApiError = ledgerApiError, ) @@ -240,36 +134,4 @@ object EndpointsCompanion extends NoTracing { ) def format(a: JsValue): ByteString = ByteString(a.compactPrint) - - def decodeAndParseJwt( - jwt: Jwt, - decodeJwt: ValidateJwt, - ): Error \/ AuthServiceJWTPayload = - decodeJwt(jwt) - .flatMap(a => - AuthServiceJWTCodec - .readFromString(a.payload) - .toEither - .disjunction - .leftMap(Error.fromThrowable) - ) - - def decodeAndParsePayload[A]( - jwt: Jwt, - decodeJwt: ValidateJwt, - resolveUser: ResolveUser, - )(implicit - createFromUserToken: CreateFromUserToken[A], - fm: Monad[Future], - ): EitherT[Future, Error, (Jwt, A)] = - for { - token <- EitherT.either(decodeAndParseJwt(jwt, decodeJwt)) - p <- token match { - case standardToken: StandardJWTPayload => - createFromUserToken( - standardToken, - resolveUser(jwt), - ).leftMap(identity[Error]) - } - } yield (jwt, p: A) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/ErrorMessages.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/ErrorMessages.scala deleted file mode 100644 index 20abc8e5e0..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/ErrorMessages.scala +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -object ErrorMessages { - def cannotResolveTemplateId(t: ContractTypeId[_]): String = - s"Cannot resolve template ID, given: ${t.toString}" - - def cannotResolveAnyTemplateId: String = - "Cannot resolve any template ID from request" - - def cannotResolveTemplateId(a: ContractLocator[_]): String = - s"Cannot resolve templateId, given: $a" -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiServer.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiServer.scala index 5716dccda6..17b42dd1fd 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiServer.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiServer.scala @@ -61,8 +61,7 @@ object HttpApiServer extends NoTracing { } yield { logger.info( s"HTTP JSON API Server started with (address=${config.server.address: String}" + - s", configured httpPort=${config.server.port.getOrElse(0)}" + - s", assigned httpPort=${serverBinding.localAddress.getPort}" + + s", port=${config.server.port}" + s", portFile=${config.server.portFile: Option[Path]}" + s", pathPrefix=${config.server.pathPrefix}" + s", wsConfig=${config.websocketConfig.shows}" + diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpServerConfig.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpServerConfig.scala index af8bc01d41..7227287bcf 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpServerConfig.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpServerConfig.scala @@ -3,6 +3,8 @@ package com.digitalasset.canton.http +import com.digitalasset.canton.config.RequireTypes.Port + import java.nio.file.Path import scala.concurrent.duration.{DurationInt, FiniteDuration} @@ -24,11 +26,16 @@ import scala.concurrent.duration.{DurationInt, FiniteDuration} */ final case class HttpServerConfig( address: String = HttpServerConfig.defaultAddress, - port: Option[Int] = None, + internalPort: Option[Port] = None, portFile: Option[Path] = None, pathPrefix: Option[String] = None, requestTimeout: FiniteDuration = HttpServerConfig.defaultRequestTimeout, -) +) { + def port: Port = + internalPort.getOrElse( + throw new IllegalStateException("Accessing server port before default was set") + ) +} object HttpServerConfig { private val defaultAddress: String = java.net.InetAddress.getLoopbackAddress.getHostAddress diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpService.scala index fd8797fb0e..13cad2b901 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpService.scala @@ -4,9 +4,9 @@ package com.digitalasset.canton.http import com.daml.grpc.adapter.ExecutionSequencerFactory -import com.daml.jwt.JwtDecoder import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner} import com.daml.logging.LoggingContextOf +import com.daml.metrics.api.MetricHandle.Gauge.CloseableGauge import com.daml.metrics.pekkohttp.HttpMetricsInterceptor import com.daml.ports.{Port, PortFiles} import com.daml.tls.TlsVersion @@ -16,7 +16,7 @@ import com.digitalasset.canton.config.{ TlsClientConfig, TlsServerConfig, } -import com.digitalasset.canton.http.json.v1.V1Routes +import com.digitalasset.canton.http.HttpService.HttpServiceHandle import com.digitalasset.canton.http.json.v2.V2Routes import com.digitalasset.canton.http.metrics.HttpApiMetrics import com.digitalasset.canton.http.util.FutureUtil.* @@ -27,10 +27,6 @@ import com.digitalasset.canton.ledger.client.configuration.{ CommandClientConfiguration, LedgerClientConfiguration, } -import com.digitalasset.canton.ledger.client.services.admin.{ - IdentityProviderConfigClient, - UserManagementClient, -} import com.digitalasset.canton.ledger.participant.state.PackageSyncService import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.platform.PackagePreferenceBackend @@ -40,6 +36,7 @@ import io.grpc.health.v1.health.{HealthCheckRequest, HealthGrpc} import org.apache.pekko.actor.ActorSystem import org.apache.pekko.http.scaladsl.Http.ServerBinding import org.apache.pekko.http.scaladsl.model.Uri +import org.apache.pekko.http.scaladsl.server.Directives.{concat, pathPrefix} import org.apache.pekko.http.scaladsl.server.{PathMatcher, Route} import org.apache.pekko.http.scaladsl.settings.ServerSettings import org.apache.pekko.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} @@ -51,11 +48,11 @@ import java.io.InputStream import java.nio.file.{Files, Path} import java.security.{Key, KeyStore} import javax.net.ssl.SSLContext -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.Future import scala.util.Using class HttpService( - startSettings: StartSettings, + startSettings: JsonApiConfig, httpsConfiguration: Option[TlsServerConfig], channel: Channel, packageSyncService: PackageSyncService, @@ -68,181 +65,125 @@ class HttpService( lc: LoggingContextOf[InstanceUUID], metrics: HttpApiMetrics, authInterceptor: AuthInterceptor, -) extends ResourceOwner[ServerBinding] +) extends ResourceOwner[HttpServiceHandle] with NamedLogging with NoTracing { private type ET[A] = EitherT[Future, HttpService.Error, A] - @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) - def acquire()(implicit context: ResourceContext): Resource[ServerBinding] = - Resource({ - logger.info(s"Starting JSON API server, ${lc.makeString}") + def acquire()(implicit context: ResourceContext): Resource[HttpServiceHandle] = { + logger.info(s"Starting JSON API server, ${lc.makeString}") - import startSettings.* - val DummyUserId: UserId = UserId("HTTP-JSON-API-Gateway") + val DummyUserId: UserId = UserId("HTTP-JSON-API-Gateway") - val settings: ServerSettings = ServerSettings(asys) - .withTransparentHeadRequests(true) - .mapTimeouts(_.withRequestTimeout(startSettings.server.requestTimeout)) + val clientConfig = LedgerClientConfiguration( + userId = UserId.unwrap(DummyUserId), + commandClient = CommandClientConfiguration.default, + ) - implicit val wsConfig = startSettings.websocketConfig.getOrElse(WebsocketConfig()) + val ledgerClient: DamlLedgerClient = + DamlLedgerClient.withoutToken(channel, clientConfig, loggerFactory) - val clientConfig = LedgerClientConfiguration( - userId = UserId.unwrap(DummyUserId), - commandClient = CommandClientConfiguration.default, - ) + val ledgerHealthService = HealthGrpc.stub(channel) + val healthService = new HealthService(() => ledgerHealthService.check(HealthCheckRequest())) - val ledgerClient: DamlLedgerClient = - DamlLedgerClient.withoutToken(channel, clientConfig, loggerFactory) + for { + gauge <- Resource(Future { + metrics.health.registerHealthGauge( + HttpApiMetrics.ComponentName, + () => healthService.ready().map(_.checks.forall(_.result)), + ) + })(gauge => Future(gauge.close())) + binding <- Resource(serverBinding(ledgerClient, healthService)) { binding => + logger.info(s"Stopping JSON API server..., ${lc.makeString}") + binding.unbind().void + } + } yield HttpServiceHandle(binding, gauge) + } - val resolveUser: EndpointsCompanion.ResolveUser = - if (startSettings.userManagementWithoutAuthorization) - HttpService.resolveUserWithIdp( - ledgerClient.userManagementClient, - ledgerClient.identityProviderConfigClient, - ) - else - HttpService.resolveUser(ledgerClient.userManagementClient) + private def serverBinding( + ledgerClient: DamlLedgerClient, + healthService: HealthService, + )(implicit context: ResourceContext): Future[ServerBinding] = { + import startSettings.* - import org.apache.pekko.http.scaladsl.server.Directives.* - val bindingEt: EitherT[Future, HttpService.Error, ServerBinding] = - for { - _ <- eitherT(Future.successful(\/-(ledgerClient))) - ledgerHealthService = HealthGrpc.stub(channel) + val settings: ServerSettings = ServerSettings(asys) + .withTransparentHeadRequests(true) + .mapTimeouts(_.withRequestTimeout(startSettings.server.requestTimeout)) - healthService = new HealthService(() => ledgerHealthService.check(HealthCheckRequest())) + implicit val wsConfig = startSettings.websocketConfig.getOrElse(WebsocketConfig()) - _ = metrics.health.registerHealthGauge( - HttpApiMetrics.ComponentName, - () => healthService.ready().map(_.checks.forall(_.result)), - ) + val bindingEt: EitherT[Future, HttpService.Error, ServerBinding] = + for { + _ <- eitherT(Future.successful(\/-(ledgerClient))) + + v2Routes = V2Routes( + ledgerClient, + metadataServiceEnabled = startSettings.damlDefinitionsServiceEnabled, + packageSyncService, + packagePreferenceBackend, + mat.executionContext, + loggerFactory, + ) - v2Routes = V2Routes( - ledgerClient, - metadataServiceEnabled = startSettings.damlDefinitionsServiceEnabled, - packageSyncService, - packagePreferenceBackend, - mat.executionContext, - loggerFactory, - ) + jsonEndpoints = new Endpoints( + healthService, + v2Routes, + debugLoggingOfHttpBodies, + loggerFactory, + ) - v1Routes = V1Routes( - ledgerClient, - httpsConfiguration.isEmpty, - HttpService.decodeJwt, - debugLoggingOfHttpBodies, - resolveUser, - ledgerClient.userManagementClient, - loggerFactory, - websocketConfig, - ) + rateDurationSizeMetrics = HttpMetricsInterceptor.rateDurationSizeMetrics(metrics.http) - jsonEndpoints = new Endpoints( - healthService, - v2Routes, - v1Routes, - debugLoggingOfHttpBodies, - loggerFactory, - ) + defaultEndpoints = + rateDurationSizeMetrics apply jsonEndpoints.all - rateDurationSizeMetrics = HttpMetricsInterceptor.rateDurationSizeMetrics(metrics.http) + allEndpoints: Route = concat( + defaultEndpoints, + EndpointsCompanion.notFound(logger), + ) + prefixedEndpoints = server.pathPrefix + .map(_.split("/").toList.dropWhile(_.isEmpty)) + .collect { case head :: tl => + val joinedPrefix = tl.foldLeft(PathMatcher(Uri.Path(head), ()))(_ slash _) + pathPrefix(joinedPrefix)(allEndpoints) + } + .getOrElse(allEndpoints) - defaultEndpoints = - rateDurationSizeMetrics apply jsonEndpoints.all + binding <- liftET[HttpService.Error] { + val serverBuilder = Http() + .newServerAt(server.address, server.port.unwrap) + .withSettings(settings) - allEndpoints: Route = concat( - defaultEndpoints, - EndpointsCompanion.notFound(logger), - ) - prefixedEndpoints = server.pathPrefix - .map(_.split("/").toList.dropWhile(_.isEmpty)) - .collect { case head :: tl => - val joinedPrefix = tl.foldLeft(PathMatcher(Uri.Path(head), ()))(_ slash _) - pathPrefix(joinedPrefix)(allEndpoints) + httpsConfiguration + .fold(serverBuilder) { config => + logger.info(s"Enabling HTTPS with $config") + serverBuilder.enableHttps(HttpService.httpsConnectionContext(config)(logger)) } - .getOrElse(allEndpoints) - - binding <- liftET[HttpService.Error] { - val serverBuilder = Http() - .newServerAt(server.address, server.port.getOrElse(0)) - .withSettings(settings) - - httpsConfiguration - .fold(serverBuilder) { config => - logger.info(s"Enabling HTTPS with $config") - serverBuilder.enableHttps(HttpService.httpsConnectionContext(config)(logger)) - } - .bind(prefixedEndpoints) - } + .bind(prefixedEndpoints) + } - _ <- either( - server.portFile.cata(f => HttpService.createPortFile(f, binding), \/-(())) - ): ET[Unit] + _ <- either( + server.portFile.cata(f => HttpService.createPortFile(f, binding), \/-(())) + ): ET[Unit] - } yield binding + } yield binding - (bindingEt.run: Future[HttpService.Error \/ ServerBinding]).flatMap { - case -\/(error) => Future.failed(new RuntimeException(error.message)) - case \/-(binding) => Future.successful(binding) - } - }) { binding => - logger.info(s"Stopping JSON API server..., ${lc.makeString}") - binding.unbind().void + (bindingEt.run: Future[HttpService.Error \/ ServerBinding]).flatMap { + case -\/(error) => Future.failed(new RuntimeException(error.message)) + case \/-(binding) => Future.successful(binding) } + } } object HttpService extends NoTracing { + + final case class HttpServiceHandle(binding: ServerBinding, gauge: CloseableGauge) // if no minimumServerProtocolVersion is set `config.protocols` returns an empty list // but we still want to setup some protocols private val allowedProtocols = Set[TlsVersion.TlsVersion](TlsVersion.V1_2, TlsVersion.V1_3).map(_.version) - def resolveUser(userManagementClient: UserManagementClient): EndpointsCompanion.ResolveUser = - jwt => userId => userManagementClient.listUserRights(userId = userId, token = Some(jwt.value)) - - def resolveUserWithIdp( - userManagementClient: UserManagementClient, - idpClient: IdentityProviderConfigClient, - )(implicit ec: ExecutionContext): EndpointsCompanion.ResolveUser = jwt => - userId => { - for { - idps <- idpClient - .listIdentityProviderConfigs(token = Some(jwt.value)) - .map(_.map(_.identityProviderId.value)) - userWithIdp <- Future - .traverse("" +: idps)(idp => - userManagementClient - .listUsers( - token = Some(jwt.value), - identityProviderId = idp, - pageToken = "", - // Hardcoded limit for users within any idp. This is enough for the limited usage - // of this functionality in the transition phase from json-api v1 to v2. - pageSize = 1000, - ) - .map(_._1) - ) - .map(_.flatten.filter(_.id == userId)) - userRight <- Future.traverse(userWithIdp)(user => - userManagementClient.listUserRights( - token = Some(jwt.value), - userId = userId, - identityProviderId = user.identityProviderId.toRequestString, - ) - ) - } yield userRight.flatten - - } - // TODO(#13303) Check that this is intended to be used as ValidateJwt in prod code - // and inline. - // Decode JWT without any validation - private val decodeJwt: EndpointsCompanion.ValidateJwt = - jwt => - \/.fromEither( - JwtDecoder.decode(jwt).leftMap(e => EndpointsCompanion.Unauthorized(e.prettyPrint)) - ) - private[http] def createPortFile( file: Path, binding: org.apache.pekko.http.scaladsl.Http.ServerBinding, diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/JsonApiConfig.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/JsonApiConfig.scala index 2493cea90f..80496c9725 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/JsonApiConfig.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/JsonApiConfig.scala @@ -13,19 +13,18 @@ import scala.concurrent.duration.* // The internal transient scopt structure *and* StartSettings; external `start` // users should extend StartSettings or DefaultStartSettings themselves final case class JsonApiConfig( - server: HttpServerConfig, + enabled: Boolean = true, + server: HttpServerConfig = HttpServerConfig(), websocketConfig: Option[WebsocketConfig] = None, - debugLoggingOfHttpBodies: Boolean = false, // v1 only + debugLoggingOfHttpBodies: Boolean = false, damlDefinitionsServiceEnabled: Boolean = false, - userManagementWithoutAuthorization: Boolean = false, // v1 only -) extends StartSettings +) object JsonApiConfig { implicit val jsonApiConfigCantonConfigValidator: CantonConfigValidator[JsonApiConfig] = CantonConfigValidator.validateAll // Do not recurse as there are no enterprise features on the JSON API } -// It is public for Daml Hub final case class WebsocketConfig( maxDuration: FiniteDuration = WSC.DefaultMaxDuration, // v1 only throttleElem: Int = WSC.DefaultThrottleElem, // v1 only @@ -39,7 +38,7 @@ final case class WebsocketConfig( object WebsocketConfig { implicit val showInstance: Show[WebsocketConfig] = Show.shows(c => - s"WebsocketConfig(maxDuration=${c.maxDuration}, heartBeatPer=${c.heartbeatPeriod})" + s"WebsocketConfig(httpListMaxElementsLimit=${c.httpListMaxElementsLimit}, httpListWaitTime=${c.httpListWaitTime})" ) val DefaultMaxDuration: FiniteDuration = 120.minutes diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/StartSettings.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/StartSettings.scala deleted file mode 100644 index e947f02043..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/StartSettings.scala +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -trait StartSettings { - val server: HttpServerConfig - val websocketConfig: Option[WebsocketConfig] - val debugLoggingOfHttpBodies: Boolean - val damlDefinitionsServiceEnabled: Boolean - val userManagementWithoutAuthorization: Boolean -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonDecoder.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonDecoder.scala deleted file mode 100644 index 05cbc4308b..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonDecoder.scala +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2 as lav2 -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.http.ErrorMessages.cannotResolveTemplateId -import com.digitalasset.canton.http.json.v1.PackageService -import com.digitalasset.canton.http.util.FutureUtil.either -import com.digitalasset.canton.http.util.Logging.InstanceUUID -import com.digitalasset.canton.http.{ - CommandMeta, - ContractLocator, - ContractTypeId, - ContractTypeRef, - CreateAndExerciseCommand, - CreateCommand, - EnrichedContractId, - EnrichedContractKey, - ExerciseCommand, - HasTemplateId, - LfType, - LfValue, -} -import com.digitalasset.daml.lf.data.Ref -import scalaz.EitherT.eitherT -import scalaz.std.option.* -import scalaz.std.scalaFuture.* -import scalaz.syntax.applicative.{ToFunctorOps as _, *} -import scalaz.syntax.bitraverse.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, Foldable, Traverse, \/} -import spray.json.{JsValue, JsonReader} - -import scala.concurrent.{ExecutionContext, Future} - -import JsValueToApiValueConverter.mustBeApiRecord - -class ApiJsonDecoder( - resolveContractTypeId: PackageService.ResolveContractTypeId, - resolveTemplateRecordType: PackageService.ResolveTemplateRecordType, - resolveChoiceArgType: PackageService.ResolveChoiceArgType, - resolveKeyType: PackageService.ResolveKeyType, - jsValueToApiValue: (LfType, JsValue) => JsonError \/ lav2.value.Value, - jsValueToLfValue: (LfType, JsValue) => JsonError \/ LfValue, -) { - - import com.digitalasset.canton.http.util.ErrorOps.* - type ET[A] = EitherT[Future, JsonError, A] - - def decodeCreateCommand(a: JsValue, jwt: Jwt)(implicit - ev1: JsonReader[ - CreateCommand[JsValue, ContractTypeId.Template.RequiredPkg] - ], - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): ET[CreateCommand[lav2.value.Record, ContractTypeId.Template.RequiredPkg]] = { - val err = "ApiJsonDecoder_decodeCreateCommand" - for { - fj <- either( - SprayJson - .decode[CreateCommand[JsValue, ContractTypeId.Template.RequiredPkg]](a) - .liftErrS(err)(JsonError) - ) - - tmplId <- templateId_(fj.templateId, jwt) - payloadT <- either(templateRecordType(tmplId.latestPkgId)) - - fv <- either( - fj - .copy(templateId = tmplId.original) - .traversePayload(x => jsValueToApiValue(payloadT, x).flatMap(mustBeApiRecord)) - ) - } yield fv - } - - def decodeUnderlyingValues[F[_]: Traverse: HasTemplateId.Compat]( - fa: F[JsValue], - jwt: Jwt, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): ET[F[lav2.value.Value]] = - for { - damlLfId <- lookupLfType(fa, jwt) - apiValue <- either(fa.traverse(jsValue => jsValueToApiValue(damlLfId, jsValue))) - } yield apiValue - - def decodeUnderlyingValuesToLf[F[_]: Traverse: HasTemplateId.Compat]( - fa: F[JsValue], - jwt: Jwt, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): ET[F[LfValue]] = - for { - lfType <- lookupLfType(fa, jwt) - lfValue <- either(fa.traverse(jsValue => jsValueToLfValue(lfType, jsValue))) - } yield lfValue - - private def lookupLfType[F[_]](fa: F[_], jwt: Jwt)(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - H: HasTemplateId[F], - ): ET[H.TypeFromCtId] = - for { - tId <- templateId_(H.templateId(fa), jwt) - lfType <- either( - H - .lfType( - fa, - tId.latestPkgId, - resolveTemplateRecordType, - resolveChoiceArgType, - resolveKeyType, - ) - .liftErrS("ApiJsonDecoder_lookupLfType")(JsonError) - ) - } yield lfType - - def decodeContractLocatorKey( - a: ContractLocator[JsValue], - jwt: Jwt, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): ET[ContractLocator[LfValue]] = - a match { - case k: EnrichedContractKey[JsValue] => - decodeUnderlyingValuesToLf[EnrichedContractKey](k, jwt).map(_.widen) - case c: EnrichedContractId => - (c: ContractLocator[LfValue]).pure[ET] - } - - def decodeExerciseCommand(a: JsValue, jwt: Jwt)(implicit - ev1: JsonReader[ExerciseCommand.RequiredPkg[JsValue, ContractLocator[JsValue]]], - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): ET[ - ExerciseCommand.RequiredPkg[LfValue, ContractLocator[LfValue]] - ] = - for { - cmd0 <- either( - SprayJson - .decode[ExerciseCommand.RequiredPkg[JsValue, ContractLocator[JsValue]]](a) - .liftErrS("ApiJsonDecoder_decodeExerciseCommand")(JsonError) - ) - - ifIdlfType <- lookupLfType[ - ExerciseCommand.RequiredPkg[+*, ContractLocator[_]] - ]( - cmd0, - jwt, - ) - (oIfaceId, argLfType) = ifIdlfType - // treat an inferred iface ID as a user-specified one - choiceIfaceOverride <- - if (oIfaceId.isDefined) - oIfaceId.map(i => i.map(p => Ref.PackageRef.Id(p): Ref.PackageRef)).pure[ET] - else cmd0.choiceInterfaceId.traverse(templateId_(_, jwt).map(_.original)) - - lfArgument <- either(jsValueToLfValue(argLfType, cmd0.argument)) - metaWithResolvedIds <- cmd0.meta.traverse(resolveMetaTemplateIds(_, jwt)) - - cmd1 <- - cmd0 - .copy( - argument = lfArgument, - choiceInterfaceId = choiceIfaceOverride, - meta = metaWithResolvedIds, - ) - .bitraverse( - _.point[ET], - ref => decodeContractLocatorKey(ref, jwt), - ): ET[ExerciseCommand.RequiredPkg[LfValue, ContractLocator[ - LfValue - ]]] - - } yield cmd1 - - def decodeCreateAndExerciseCommand(a: JsValue, jwt: Jwt)(implicit - ev1: JsonReader[ - CreateAndExerciseCommand[ - JsValue, - JsValue, - ContractTypeId.Template.RequiredPkg, - ContractTypeId.RequiredPkg, - ] - ], - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): EitherT[Future, JsonError, CreateAndExerciseCommand.LAVResolved] = { - val err = "ApiJsonDecoder_decodeCreateAndExerciseCommand" - for { - fjj <- either( - SprayJson - .decode[CreateAndExerciseCommand[ - JsValue, - JsValue, - ContractTypeId.Template.RequiredPkg, - ContractTypeId.RequiredPkg, - ]](a) - .liftErrS(err)(JsonError) - ).flatMap(_.bitraverse(templateId_(_, jwt), templateId_(_, jwt))) - - tId = fjj.templateId.latestPkgId - ciId = fjj.choiceInterfaceId.map(_.latestPkgId) - - payloadT <- either(resolveTemplateRecordType(tId).liftErr(JsonError)) - oIfIdArgT <- either(resolveChoiceArgType(ciId getOrElse tId, fjj.choice).liftErr(JsonError)) - (oIfaceId, argT) = oIfIdArgT - - payload <- either(jsValueToApiRecord(payloadT, fjj.payload)) - argument <- either(jsValueToApiValue(argT, fjj.argument)) - - choiceIfaceOverride = - if (oIfaceId.isDefined) oIfaceId.map(i => i.map(p => Ref.PackageRef.Id(p): Ref.PackageRef)) - else fjj.choiceInterfaceId.map(_.original) - - cmd <- fjj.bitraverse(_.original.pure[ET], _.pure[ET]) - } yield cmd.copy( - payload = payload, - argument = argument, - choiceInterfaceId = choiceIfaceOverride, - ) - } - - private[this] def jsValueToApiRecord(t: LfType, v: JsValue) = - jsValueToApiValue(t, v) flatMap mustBeApiRecord - - private[this] def resolveMetaTemplateIds[ - U, - CtId[T] <: ContractTypeId[T] with ContractTypeId.Ops[CtId, T], - ]( - meta: CommandMeta[U with ContractTypeId.RequiredPkg], - jwt: Jwt, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - resolveOverload: PackageService.ResolveContractTypeId.Overload[U, CtId], - ): ET[CommandMeta[CtId[Ref.PackageRef]]] = for { - // resolve as few template IDs as possible - tpidToResolved <- { - import scalaz.std.vector.* - val inputTpids = Foldable[CommandMeta].toSet(meta) - inputTpids.toVector - .traverse(ot => templateId_(ot, jwt).map(_.original) strengthL ot) - .map(_.toMap) - } - } yield meta map tpidToResolved - - private def templateId_[U, CtId[T] <: ContractTypeId[T] with ContractTypeId.Ops[CtId, T]]( - id: U with ContractTypeId.RequiredPkg, - jwt: Jwt, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - resolveOverload: PackageService.ResolveContractTypeId.Overload[U, CtId], - ): ET[ContractTypeRef[CtId]] = - eitherT( - resolveContractTypeId(jwt)(id) - .map(_.toOption.flatten.toRightDisjunction(JsonError(cannotResolveTemplateId(id)))) - ) - - def templateRecordType( - id: ContractTypeId.Template.RequiredPkgId - ): JsonError \/ LfType = - resolveTemplateRecordType(id).liftErr(JsonError) - - def keyType(id: ContractTypeId.Template.RequiredPkg)(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - jwt: Jwt, - ): ET[LfType] = - templateId_(id, jwt).map(_.latestPkgId).flatMap { - case it: ContractTypeId.Template.ResolvedPkgId => - either(resolveKeyType(it: ContractTypeId.Template.ResolvedPkgId).liftErr(JsonError)) - case other => - either(-\/(JsonError(s"Expect contract type Id to be template Id, got otherwise: $other"))) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonEncoder.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonEncoder.scala deleted file mode 100644 index 6ffb9702fe..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiJsonEncoder.scala +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http -import scalaz.\/ -import scalaz.syntax.bitraverse.* -import scalaz.syntax.show.* -import scalaz.syntax.traverse.* -import spray.json.{JsObject, JsValue, JsonWriter} - -class ApiJsonEncoder( - val apiRecordToJsObject: lav2.value.Record => JsonError \/ JsObject, - val apiValueToJsValue: lav2.value.Value => JsonError \/ JsValue, -) { - - import com.digitalasset.canton.http.util.ErrorOps.* - - def encodeExerciseCommand( - cmd: http.ExerciseCommand.RequiredPkg[lav2.value.Value, http.ContractLocator[ - lav2.value.Value - ]] - )(implicit - ev: JsonWriter[http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]]] - ): JsonError \/ JsValue = - for { - x <- cmd.bitraverse( - arg => apiValueToJsValue(arg), - ref => ref.traverse(a => apiValueToJsValue(a)), - ): JsonError \/ http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]] - - y <- SprayJson.encode(x).liftErr(JsonError) - - } yield y - - def encodeCreateCommand[CtId]( - cmd: http.CreateCommand[lav2.value.Record, CtId] - )(implicit - ev: JsonWriter[http.CreateCommand[JsValue, CtId]] - ): JsonError \/ JsValue = - for { - x <- cmd.traversePayload( - apiRecordToJsObject(_) - ): JsonError \/ http.CreateCommand[JsValue, CtId] - y <- SprayJson.encode(x).liftErr(JsonError) - - } yield y - - def encodeCreateAndExerciseCommand[CtId, IfceId]( - cmd: http.CreateAndExerciseCommand[ - lav2.value.Record, - lav2.value.Value, - CtId, - IfceId, - ] - )(implicit - ev: JsonWriter[http.CreateAndExerciseCommand[JsValue, JsValue, CtId, IfceId]] - ): JsonError \/ JsValue = - for { - jsCmd <- cmd.traversePayloadsAndArgument(apiRecordToJsObject, apiValueToJsValue) - y <- SprayJson - .encode(jsCmd: http.CreateAndExerciseCommand[JsValue, JsValue, CtId, IfceId]) - .liftErr(JsonError) - - } yield y - - object implicits { - implicit val ApiValueJsonWriter: JsonWriter[lav2.value.Value] = (obj: lav2.value.Value) => - apiValueToJsValue(obj).valueOr(e => spray.json.serializationError(e.shows)) - - implicit val ApiRecordJsonWriter: JsonWriter[lav2.value.Record] = (obj: lav2.value.Record) => - apiRecordToJsObject(obj).valueOr(e => spray.json.serializationError(e.shows)) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ExtraFormats.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ExtraFormats.scala deleted file mode 100644 index 21b7a9171e..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ExtraFormats.scala +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import com.digitalasset.canton.topology.SynchronizerId -import scalaz.{@@, Tag} -import spray.json.{JsNumber, JsString, JsValue, JsonFormat, deserializationError} - -import java.time.Instant - -trait ExtraFormats { - - def taggedJsonFormat[A: JsonFormat, T]: JsonFormat[A @@ T] = Tag.subst(implicitly[JsonFormat[A]]) - - implicit val InstantFormat: JsonFormat[java.time.Instant] = new JsonFormat[Instant] { - override def write(obj: Instant): JsValue = JsNumber(obj.toEpochMilli) - - override def read(json: JsValue): Instant = json match { - case JsNumber(a) => java.time.Instant.ofEpochMilli(a.toLongExact) - case _ => deserializationError("java.time.Instant must be epoch millis") - } - } - - implicit val synchronizerIdFormat: JsonFormat[SynchronizerId] = new JsonFormat[SynchronizerId] { - override def write(obj: SynchronizerId): JsValue = JsString(obj.toProtoPrimitive) - - override def read(json: JsValue): SynchronizerId = json match { - case JsString(stringSynchronizerId) => - SynchronizerId.fromString(stringSynchronizerId) match { - case Left(err) => deserializationError(err) - case Right(synchronizerId) => synchronizerId - } - case _ => deserializationError("Synchronizer id must be a string") - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsValueToApiValueConverter.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsValueToApiValueConverter.scala deleted file mode 100644 index a704d01e95..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsValueToApiValueConverter.scala +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http -import com.digitalasset.canton.ledger.api.util.LfEngineToApi -import com.digitalasset.daml.lf -import com.digitalasset.daml.lf.typesig -import scalaz.std.string.* -import scalaz.{-\/, \/, \/-} -import spray.json.JsValue - -import JsValueToApiValueConverter.LfTypeLookup -import JsonProtocol.LfValueCodec - -class JsValueToApiValueConverter(lfTypeLookup: LfTypeLookup) { - import com.digitalasset.canton.http.util.ErrorOps.* - - def jsValueToLfValue( - lfId: lf.data.Ref.Identifier, - jsValue: JsValue, - ): JsonError \/ lf.value.Value = - \/.attempt( - LfValueCodec.jsValueToApiValue(jsValue, lfId, lfTypeLookup) - )(identity).liftErr(JsonError) - - def jsValueToLfValue( - lfType: typesig.Type, - jsValue: JsValue, - ): JsonError \/ lf.value.Value = - \/.attempt( - LfValueCodec.jsValueToApiValue(jsValue, lfType, lfTypeLookup) - )(identity).liftErr(JsonError) - - def jsValueToApiValue(lfType: http.LfType, jsValue: JsValue): JsonError \/ lav2.value.Value = - for { - lfValue <- jsValueToLfValue(lfType, jsValue) - apiValue <- JsValueToApiValueConverter.lfValueToApiValue(lfValue) - } yield apiValue -} - -object JsValueToApiValueConverter { - import com.digitalasset.canton.http.util.ErrorOps.* - - type LfTypeLookup = lf.data.Ref.Identifier => Option[lf.typesig.DefDataType.FWT] - - def lfValueToApiValue(lfValue: http.LfValue): JsonError \/ lav2.value.Value = - \/.fromEither(LfEngineToApi.lfValueToApiValue(verbose = true, lfValue)).liftErr(JsonError) - - def mustBeApiRecord(a: lav2.value.Value): JsonError \/ lav2.value.Record = a.sum match { - case lav2.value.Value.Sum.Record(b) => \/-(b) - case _ => -\/(JsonError(s"Expected ${classOf[lav2.value.Value.Sum.Record]}, got: $a")) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonError.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonError.scala deleted file mode 100644 index c7022e7270..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonError.scala +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import scalaz.Show - -final case class JsonError(message: String) - -object JsonError extends (String => JsonError) { - implicit val ShowInstance: Show[JsonError] = Show shows { f => - s"JsonError: ${f.message}" - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala index 4eabb11992..efc5a0c49a 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala @@ -3,67 +3,26 @@ package com.digitalasset.canton.http.json -import com.daml.nonempty.NonEmpty import com.daml.struct.spray.StructJsonFormat -import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed import com.digitalasset.canton.http import com.digitalasset.canton.http.* -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.value.Value.ContractId -import com.google.protobuf.ByteString import com.google.protobuf.struct.Struct import org.apache.pekko.http.scaladsl.model.StatusCode -import scalaz.syntax.std.option.* import scalaz.syntax.tag.* -import scalaz.{-\/, @@, NonEmptyList, Tag, \/-} +import scalaz.{@@, NonEmptyList, Tag} import spray.json.* import scala.reflect.ClassTag object JsonProtocol extends JsonProtocolLow { - implicit val UserIdFormat: JsonFormat[lar.UserId] = - taggedJsonFormat[String, lar.UserIdTag] + private def taggedJsonFormat[A: JsonFormat, T]: JsonFormat[A @@ T] = + Tag.subst(implicitly[JsonFormat[A]]) implicit val PartyFormat: JsonFormat[http.Party] = taggedJsonFormat - implicit val CommandIdFormat: JsonFormat[lar.CommandId] = - taggedJsonFormat[String, lar.CommandIdTag] - - implicit val ChoiceFormat: JsonFormat[lar.Choice] = taggedJsonFormat[String, lar.ChoiceTag] - - implicit val HttpContractIdFormat: JsonFormat[http.ContractId] = - taggedJsonFormat - - implicit val ContractIdFormat: JsonFormat[ContractId] = - new JsonFormat[ContractId] { - override def write(obj: ContractId) = - JsString(obj.coid) - override def read(json: JsValue) = json match { - case JsString(s) => - ContractId.fromString(s).fold(deserializationError(_), identity) - case _ => deserializationError("ContractId must be a string") - } - } - - implicit val OffsetFormat: JsonFormat[http.Offset] = - taggedJsonFormat - - implicit def NonEmptyListFormat[A: JsonReader: JsonWriter]: JsonFormat[NonEmptyList[A]] = - jsonFormatFromReaderWriter(NonEmptyListReader, NonEmptyListWriter) - - // Do not design your own open typeclasses like JsonFormat was designed. - private[this] def jsonFormatFromReaderWriter[A](implicit - R: JsonReader[_ <: A], - W: JsonWriter[_ >: A], - ): JsonFormat[A] = - new JsonFormat[A] { - override def read(json: JsValue) = R read json - override def write(obj: A) = W write obj - } - /** This intuitively pointless extra type is here to give it specificity so this instance will * beat CollectionFormats#listFormat. You would normally achieve the conflict resolution by * putting this instance in a parent of @@ -79,80 +38,6 @@ object JsonProtocol extends JsonProtocolLow { implicit def `List reader only`[A: JsonReader]: JsonReaderList[A] = new JsonReaderList - private[this] def baseNFormat[Tag]( - strToBytesThrowsIAE: String => Array[Byte], - bytesToStrTotal: Array[Byte] => String, - ): JsonFormat[ByteString @@ Tag] = - Tag subst xemapStringJsonFormat { s => - for { - arr <- - try Right(strToBytesThrowsIAE(s)) - catch { - case e: IllegalArgumentException => Left(e.getMessage) - } - } yield ByteString copyFrom arr - }(bytes => bytesToStrTotal(bytes.toByteArray)) - - implicit val Base64Format: JsonFormat[http.Base64] = { - import java.util.Base64.{getDecoder, getEncoder} - baseNFormat(getDecoder.decode, getEncoder.encodeToString) - } - - implicit val Base16Format: JsonFormat[http.Base16] = { - import com.google.common.io.BaseEncoding.base16 - - import java.util.Locale.US - baseNFormat(s => base16.decode(s toUpperCase US), ba => base16.encode(ba) toLowerCase US) - } - - implicit val userDetails: JsonFormat[http.UserDetails] = - jsonFormat2(http.UserDetails.apply) - - implicit val participantAdmin: JsonFormat[ParticipantAdmin.type] = - jsonFormat0(() => ParticipantAdmin) - implicit val identityProviderAdmin: JsonFormat[IdentityProviderAdmin.type] = - jsonFormat0(() => IdentityProviderAdmin) - implicit val canReadAsAnyParty: JsonFormat[CanReadAsAnyParty.type] = - jsonFormat0(() => CanReadAsAnyParty) - implicit val canExecuteAsAnyParty: JsonFormat[CanExecuteAsAnyParty.type] = - jsonFormat0(() => CanExecuteAsAnyParty) - implicit val canActAs: JsonFormat[CanActAs] = jsonFormat1(CanActAs.apply) - implicit val canReadAs: JsonFormat[CanReadAs] = jsonFormat1(CanReadAs.apply) - implicit val canExecuteAs: JsonFormat[CanExecuteAs] = jsonFormat1(CanExecuteAs.apply) - - implicit val userRight: JsonFormat[UserRight] = { - val participantAdminTypeName = ParticipantAdmin.getClass.getSimpleName.replace("$", "") - val identityProviderAdminTypeName = - IdentityProviderAdmin.getClass.getSimpleName.replace("$", "") - val canActAsTypeName = classOf[CanActAs].getSimpleName - val canReadAsTypeName = classOf[CanReadAs].getSimpleName - val canExecuteAsTypeName = classOf[CanExecuteAs].getSimpleName - val canReadAsAnyPartyTypeName = CanReadAsAnyParty.getClass.getSimpleName.replace("$", "") - val canExecuteAsAnyPartyTypeName = CanExecuteAsAnyParty.getClass.getSimpleName.replace("$", "") - - jsonFormatFromADT[UserRight]( - { - case `participantAdminTypeName` => _ => ParticipantAdmin - case `identityProviderAdminTypeName` => _ => IdentityProviderAdmin - case `canActAsTypeName` => canActAs.read(_) - case `canReadAsTypeName` => canReadAs.read(_) - case `canExecuteAsTypeName` => canExecuteAs.read(_) - case `canReadAsAnyPartyTypeName` => _ => CanReadAsAnyParty - case `canExecuteAsAnyPartyTypeName` => _ => CanExecuteAsAnyParty - case typeName => deserializationError(s"Unknown user right type: $typeName") - }, - { - case ParticipantAdmin => participantAdmin.write(ParticipantAdmin) - case IdentityProviderAdmin => identityProviderAdmin.write(IdentityProviderAdmin) - case canActAsObj: CanActAs => canActAs.write(canActAsObj) - case canReadAsObj: CanReadAs => canReadAs.write(canReadAsObj) - case CanReadAsAnyParty => canReadAsAnyParty.write(CanReadAsAnyParty) - case canExecuteAsObj: CanExecuteAs => canExecuteAs.write(canExecuteAsObj) - case CanExecuteAsAnyParty => canExecuteAsAnyParty.write(CanExecuteAsAnyParty) - }, - ) - } - private def jsonFormatFromADT[T: ClassTag]( fromJs: String => JsObject => T, toJs: T => JsValue, @@ -186,401 +71,9 @@ object JsonProtocol extends JsonProtocolLow { } } - implicit val PartyDetails: JsonFormat[http.PartyDetails] = - jsonFormat2(http.PartyDetails.apply) - - implicit val CreateUserRequest: JsonFormat[http.CreateUserRequest] = - jsonFormat3(http.CreateUserRequest.apply) - - implicit val ListUserRightsRequest: JsonFormat[http.ListUserRightsRequest] = - jsonFormat1(http.ListUserRightsRequest.apply) - - implicit val GrantUserRightsRequest: JsonFormat[http.GrantUserRightsRequest] = - jsonFormat2(http.GrantUserRightsRequest.apply) - - implicit val RevokeUserRightsRequest: JsonFormat[http.RevokeUserRightsRequest] = - jsonFormat2(http.RevokeUserRightsRequest.apply) - - implicit val GetUserRequest: JsonFormat[http.GetUserRequest] = - jsonFormat1(http.GetUserRequest.apply) - - implicit val DeleteUserRequest: JsonFormat[http.DeleteUserRequest] = - jsonFormat1(http.DeleteUserRequest.apply) - - implicit val AllocatePartyRequest: JsonFormat[http.AllocatePartyRequest] = - jsonFormat2(http.AllocatePartyRequest.apply) - - object LfValueCodec - extends ApiCodecCompressed( - encodeDecimalAsString = true, - encodeInt64AsString = true, - ) - - implicit def TemplateIdRequiredPkgIdFormat[CtId[T] <: http.ContractTypeId[T]](implicit - CtId: http.ContractTypeId.Like[CtId] - ): RootJsonFormat[CtId[Ref.PackageId]] = new TemplateIdFormat(CtId, Ref.PackageId.fromString) - - implicit def TemplateIdRequiredPkgFormat[CtId[T] <: http.ContractTypeId[T]](implicit - CtId: http.ContractTypeId.Like[CtId] - ): RootJsonFormat[CtId[Ref.PackageRef]] = new TemplateIdFormat(CtId, Ref.PackageRef.fromString) - - class TemplateIdFormat[P, CtId[T] <: http.ContractTypeId[T]]( - CtId: http.ContractTypeId.Like[CtId], - readPkg: (String => Either[String, P]), - ) extends RootJsonFormat[CtId[P]] { - override def write(a: CtId[P]) = - JsString(s"${a.packageId.toString: String}:${a.moduleName: String}:${a.entityName: String}") - - override def read(json: JsValue) = json match { - case JsString(str) => - str.split(':') match { - case Array(p, m, e) => - readPkg(p) match { - case Left(reason) => error(json, reason) - case Right(pkgRef) => CtId(pkgRef, m, e) - } - case _ => error(json, "did not have two ':' chars") - } - case _ => error(json, "not JsString") - } - - private def error(json: JsValue, reason: String): Nothing = - deserializationError(s"Expected JsString(::), got: $json. $reason") - } - - private[this] def decodeContractRef( - fields: Map[String, JsValue], - what: String, - ): http.InputContractRef[JsValue] = - (fields get "templateId", fields get "key", fields get "contractId") match { - case (Some(templateId), Some(key), None) => - -\/((templateId.convertTo[http.ContractTypeId.Template.RequiredPkg], key)) - case (otid, None, Some(contractId)) => - val a = otid map (_.convertTo[http.ContractTypeId.RequiredPkg]) - val b = contractId.convertTo[http.ContractId] - \/-((a, b)) - case (None, Some(_), None) => - deserializationError(s"$what requires key to be accompanied by a templateId") - case (_, None, None) | (_, Some(_), Some(_)) => - deserializationError(s"$what requires either key or contractId field") - } - - implicit val EnrichedContractKeyFormat: RootJsonFormat[http.EnrichedContractKey[JsValue]] = - jsonFormat2(http.EnrichedContractKey.apply[JsValue]) - - implicit val EnrichedContractIdFormat: RootJsonFormat[http.EnrichedContractId] = - jsonFormat2(http.EnrichedContractId.apply) - - private[this] val contractIdAtOffsetKey = "contractIdAtOffset" - - implicit val InitialContractKeyStreamRequest - : RootJsonReader[http.ContractKeyStreamRequest[Unit, JsValue]] = { jsv => - val ekey = jsv.convertTo[http.EnrichedContractKey[JsValue]] - jsv match { - case JsObject(fields) if fields contains contractIdAtOffsetKey => - deserializationError( - s"$contractIdAtOffsetKey is not allowed for WebSocket streams starting at the beginning" - ) - case _ => - } - http.ContractKeyStreamRequest((), ekey) - } - - implicit val ResumingContractKeyStreamRequest: RootJsonReader[ - http.ContractKeyStreamRequest[Option[Option[http.ContractId]], JsValue] - ] = { jsv => - val off = jsv match { - case JsObject(fields) => fields get contractIdAtOffsetKey map (_.convertTo[Option[String]]) - case _ => None - } - val ekey = jsv.convertTo[http.EnrichedContractKey[JsValue]] - type OO[+A] = Option[Option[A]] - http.ContractKeyStreamRequest(http.ContractId.subst[OO, String](off), ekey) - } - - val ReadersKey = "readers" - - implicit val FetchRequestFormat: RootJsonReader[http.FetchRequest[JsValue]] = - new RootJsonFormat[http.FetchRequest[JsValue]] { - override def write(obj: http.FetchRequest[JsValue]): JsValue = { - val http.FetchRequest(locator, readAs) = obj - val lj = locator.toJson - readAs.cata(rl => JsObject(lj.asJsObject.fields.updated(ReadersKey, rl.toJson)), lj) - } - - override def read(json: JsValue): http.FetchRequest[JsValue] = { - val jo = json.asJsObject("fetch request must be a JSON object").fields - http.FetchRequest( - JsObject(jo - ReadersKey).convertTo[http.ContractLocator[JsValue]], - jo.get(ReadersKey).flatMap(_.convertTo[Option[NonEmptyList[http.Party]]]), - ) - } - } - - implicit val ContractLocatorFormat: RootJsonFormat[http.ContractLocator[JsValue]] = - new RootJsonFormat[http.ContractLocator[JsValue]] { - override def write(obj: http.ContractLocator[JsValue]): JsValue = obj match { - case a: http.EnrichedContractKey[JsValue] => EnrichedContractKeyFormat.write(a) - case b: http.EnrichedContractId => EnrichedContractIdFormat.write(b) - } - - override def read(json: JsValue): http.ContractLocator[JsValue] = json match { - case JsObject(fields) => - http.ContractLocator.structure.from(decodeContractRef(fields, "ContractLocator")) - case _ => - deserializationError(s"Cannot read ContractLocator from json: $json") - } - } - - implicit val ContractFormat: RootJsonFormat[http.Contract[JsValue]] = - new RootJsonFormat[http.Contract[JsValue]] { - private val archivedKey = "archived" - private val activeKey = "created" - - override def read(json: JsValue): http.Contract[JsValue] = json match { - case JsObject(fields) => - fields.toList match { - case List((`archivedKey`, archived)) => - http.Contract[JsValue](-\/(ArchivedContractFormat.read(archived))) - case List((`activeKey`, active)) => - http.Contract[JsValue](\/-(ActiveContractFormat.read(active))) - case _ => - deserializationError( - s"Contract must be either {$archivedKey: obj} or {$activeKey: obj}, got: $fields" - ) - } - case _ => deserializationError("Contract must be an object") - } - - override def write(obj: http.Contract[JsValue]): JsValue = obj.value match { - case -\/(archived) => JsObject(archivedKey -> ArchivedContractFormat.write(archived)) - case \/-(active) => JsObject(activeKey -> ActiveContractFormat.write(active)) - } - } - - implicit val ActiveContractFormat: RootJsonFormat[http.ActiveContract.ResolvedCtTyId[JsValue]] = { - implicit val `ctid resolved fmt`: JsonFormat[http.ContractTypeId.ResolvedPkgId] = - jsonFormatFromReaderWriter( - TemplateIdRequiredPkgIdFormat[http.ContractTypeId.Template], - // we only write (below) in main, but read ^ in tests. For ^, getting - // the proper contract type ID right doesn't matter - TemplateIdRequiredPkgIdFormat[http.ContractTypeId], - ) - jsonFormat6(http.ActiveContract.apply[ContractTypeId.ResolvedPkgId, JsValue]) - } - - implicit val ArchivedContractFormat: RootJsonFormat[http.ArchivedContract] = - jsonFormat2(http.ArchivedContract.apply) - - // Like requestJsonReader, but suitable for exactly one extra field, simply - // parsing it to the supplied extra type if present. - // Can generalize to >1 field with singleton types and hlists, if you like - private def requestJsonReaderPlusOne[Extra: JsonReader, TpId: JsonFormat, Request]( - validExtraField: String - )( - toRequest: ( - NonEmpty[Set[TpId]], - Option[Extra], - ) => Request - ): RootJsonReader[Request] = - requestJsonReader(Set(validExtraField)) { (tids: NonEmpty[Set[TpId]], extra) => - toRequest(tids, extra get validExtraField map (_.convertTo[Extra])) - } - - /** Derived from autogenerated with 2 extra features: - * 1. template IDs are required - * 1. error out on unsupported request fields - */ - private[this] def requestJsonReader[TpId: JsonFormat, Request](validExtraFields: Set[String])( - toRequest: ( - NonEmpty[Set[TpId]], - Map[String, JsValue], - ) => Request - ): RootJsonReader[Request] = { - final case class BaseRequest(templateIds: Set[TpId]) - val validKeys = Set("templateIds") ++ validExtraFields - implicit val primitive: JsonReader[BaseRequest] = jsonFormat1(BaseRequest.apply) - jsv => { - val BaseRequest(tids) = jsv.convertTo[BaseRequest] - val unsupported = jsv.asJsObject.fields.keySet diff validKeys - if (unsupported.nonEmpty) deserializationError(s"unsupported request fields $unsupported") - val extraFields = jsv.asJsObject.fields.filter { case (fieldName, _) => - validExtraFields(fieldName) - } - val nonEmptyTids = NonEmpty from tids getOrElse { - deserializationError("search requires at least one item in 'templateIds'") - } - toRequest(nonEmptyTids, extraFields) - } - } - - implicit val GetActiveContractsRequestFormat: RootJsonReader[http.GetActiveContractsRequest] = - requestJsonReaderPlusOne(ReadersKey)(http.GetActiveContractsRequest.apply) - - implicit val SearchForeverQueryFormat: RootJsonReader[http.SearchForeverQuery] = { - val OffsetKey = "offset" - requestJsonReaderPlusOne(OffsetKey)(http.SearchForeverQuery.apply) - } - - implicit val SearchForeverRequestFormat: RootJsonReader[http.SearchForeverRequest] = { - case multi @ JsArray(_) => - val queriesWithPos = multi.convertTo[NonEmptyList[http.SearchForeverQuery]].zipWithIndex - http.SearchForeverRequest(queriesWithPos) - case single => - http.SearchForeverRequest(NonEmptyList((single.convertTo[http.SearchForeverQuery], 0))) - } - - implicit def DisclosedContractFormat[TmplId: JsonFormat] - : JsonFormat[http.DisclosedContract[TmplId]] = { - val rawJsonFormat = jsonFormat3(http.DisclosedContract[TmplId].apply) - - new JsonFormat[DisclosedContract[TmplId]] { - override def read(json: JsValue): DisclosedContract[TmplId] = { - val raw = rawJsonFormat.read(json) - if ((Base64 unwrap raw.createdEventBlob).isEmpty) - deserializationError("DisclosedContract.createdEventBlob must not be empty") - else raw - } - - override def write(obj: DisclosedContract[TmplId]): JsValue = rawJsonFormat.write(obj) - } - } - implicit val hexStringFormat: JsonFormat[Ref.HexString] = xemapStringJsonFormat(Ref.HexString.fromString)(identity) - implicit val PackageIdFormat: JsonFormat[Ref.PackageId] = - xemapStringJsonFormat(Ref.PackageId.fromString)(identity) - - implicit val deduplicationPeriodOffset: JsonFormat[DeduplicationPeriod.Offset] = jsonFormat1( - DeduplicationPeriod.Offset.apply - ) - implicit val deduplicationPeriodDuration: JsonFormat[DeduplicationPeriod.Duration] = jsonFormat1( - DeduplicationPeriod.Duration.apply - ) - - implicit val DeduplicationPeriodFormat: JsonFormat[DeduplicationPeriod] = { - val deduplicationPeriodOffsetTypeName = - classOf[DeduplicationPeriod.Offset].getSimpleName - val deduplicationPeriodDurationTypeName = - classOf[DeduplicationPeriod.Duration].getSimpleName - - jsonFormatFromADT( - { - case `deduplicationPeriodOffsetTypeName` => deduplicationPeriodOffset.read(_) - case `deduplicationPeriodDurationTypeName` => deduplicationPeriodDuration.read(_) - case typeName => deserializationError(s"Unknown deduplication period type: $typeName") - }, - { - case obj: DeduplicationPeriod.Offset => deduplicationPeriodOffset.write(obj) - case obj: DeduplicationPeriod.Duration => deduplicationPeriodDuration.write(obj) - }, - ) - } - - implicit val SubmissionIdFormat: JsonFormat[http.SubmissionId] = taggedJsonFormat - - implicit val WorkflowIdFormat: JsonFormat[http.WorkflowId] = taggedJsonFormat - - implicit def CommandMetaFormat[TmplId: JsonFormat]: JsonFormat[http.CommandMeta[TmplId]] = - jsonFormat9(http.CommandMeta.apply[TmplId]) - - // exposed for testing - private[json] implicit val CommandMetaNoDisclosedFormat - : RootJsonFormat[http.CommandMeta.NoDisclosed] = { - type NeverDC = http.DisclosedContract[Nothing] - implicit object alwaysEmptyList extends JsonFormat[List[NeverDC]] { - override def write(obj: List[NeverDC]): JsValue = JsArray() - override def read(json: JsValue): List[NeverDC] = List.empty - } - implicit object noDisclosed extends OptionFormat[List[NeverDC]] { - override def write(obj: Option[List[NeverDC]]): JsValue = JsNull - override def read(json: JsValue): Option[List[NeverDC]] = None - override def readSome(value: JsValue): Some[List[NeverDC]] = Some(List.empty) - } - jsonFormat9(http.CommandMeta.apply) - } - - implicit val CreateCommandFormat: RootJsonFormat[ - http.CreateCommand[JsValue, http.ContractTypeId.Template.RequiredPkg] - ] = - jsonFormat3( - http.CreateCommand[JsValue, http.ContractTypeId.Template.RequiredPkg] - ) - - implicit val ExerciseCommandFormat: RootJsonFormat[ - http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]] - ] = - new RootJsonFormat[ - http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]] - ] { - override def write( - obj: http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]] - ): JsValue = { - - val reference: JsObject = - ContractLocatorFormat.write(obj.reference).asJsObject("reference must be an object") - - val fields = - reference.fields ++ - Iterable("choice" -> obj.choice.toJson, "argument" -> obj.argument.toJson) ++ - Iterable( - "meta" -> obj.meta.map(_.toJson), - "choiceInterfaceId" -> obj.choiceInterfaceId.map(_.toJson), - ).collect { case (k, Some(v)) => (k, v) } - - JsObject(fields) - } - - override def read( - json: JsValue - ): http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]] = { - val reference = ContractLocatorFormat.read(json) - val choice = fromField[http.Choice](json, "choice") - val argument = fromField[JsValue](json, "argument") - val meta = - fromField[Option[http.CommandMeta[ContractTypeId.Template.RequiredPkg]]]( - json, - "meta", - ) - - http.ExerciseCommand( - reference = reference, - choice = choice, - argument = argument, - choiceInterfaceId = - fromField[Option[http.ContractTypeId.RequiredPkg]](json, "choiceInterfaceId"), - meta = meta, - ) - } - } - - implicit val CreateAndExerciseCommandFormat: RootJsonFormat[ - http.CreateAndExerciseCommand[ - JsValue, - JsValue, - http.ContractTypeId.Template.RequiredPkg, - http.ContractTypeId.RequiredPkg, - ] - ] = - jsonFormat6( - http.CreateAndExerciseCommand[ - JsValue, - JsValue, - http.ContractTypeId.Template.RequiredPkg, - http.ContractTypeId.RequiredPkg, - ] - ) - - implicit val CompletionOffsetFormat: JsonFormat[http.CompletionOffset] = - taggedJsonFormat[String, http.CompletionOffsetTag] - - implicit val ExerciseResponseFormat: RootJsonFormat[http.ExerciseResponse[JsValue]] = - jsonFormat3(http.ExerciseResponse[JsValue]) - - implicit val CreateCommandResponseFormat: RootJsonFormat[http.CreateCommandResponse[JsValue]] = - jsonFormat7(http.CreateCommandResponse[JsValue]) - implicit val StatusCodeFormat: RootJsonFormat[StatusCode] = new RootJsonFormat[StatusCode] { override def read(json: JsValue): StatusCode = json match { @@ -591,39 +84,6 @@ object JsonProtocol extends JsonProtocolLow { override def write(obj: StatusCode): JsValue = JsNumber(obj.intValue) } - implicit val ServiceWarningFormat: RootJsonFormat[http.ServiceWarning] = - new RootJsonFormat[http.ServiceWarning] { - override def read(json: JsValue): http.ServiceWarning = json match { - case JsObject(fields) if fields.contains("unknownTemplateIds") => - UnknownTemplateIdsFormat.read(json) - case JsObject(fields) if fields.contains("unknownParties") => - UnknownPartiesFormat.read(json) - case _ => - deserializationError( - s"Expected JsObject(unknownTemplateIds | unknownParties -> JsArray(...)), got: $json" - ) - } - - override def write(obj: http.ServiceWarning): JsValue = obj match { - case x: http.UnknownTemplateIds => UnknownTemplateIdsFormat.write(x) - case x: http.UnknownParties => UnknownPartiesFormat.write(x) - } - } - - implicit val AsyncWarningsWrapperFormat: RootJsonFormat[http.AsyncWarningsWrapper] = - jsonFormat1(http.AsyncWarningsWrapper.apply) - - implicit val UnknownTemplateIdsFormat: RootJsonFormat[http.UnknownTemplateIds] = jsonFormat1( - http.UnknownTemplateIds.apply - ) - - implicit val UnknownPartiesFormat: RootJsonFormat[http.UnknownParties] = jsonFormat1( - http.UnknownParties.apply - ) - - implicit def OkResponseFormat[R: JsonFormat]: RootJsonFormat[http.OkResponse[R]] = - jsonFormat3(http.OkResponse[R]) - implicit val ResourceInfoDetailFormat: RootJsonFormat[http.ResourceInfoDetail] = jsonFormat2( http.ResourceInfoDetail.apply ) @@ -679,33 +139,10 @@ object JsonProtocol extends JsonProtocolLow { jsonFormat3(http.LedgerApiError.apply) implicit val ErrorResponseFormat: RootJsonFormat[http.ErrorResponse] = - jsonFormat4(http.ErrorResponse.apply) + jsonFormat3(http.ErrorResponse.apply) implicit val StructFormat: RootJsonFormat[Struct] = StructJsonFormat - implicit def SyncResponseFormat[R: JsonFormat]: RootJsonFormat[http.SyncResponse[R]] = - new RootJsonFormat[http.SyncResponse[R]] { - private val resultKey = "result" - private val errorsKey = "errors" - private val errorMsg = - s"Invalid ${http.SyncResponse.getClass.getSimpleName} format, expected a JSON object with either $resultKey or $errorsKey field" - - override def write(obj: http.SyncResponse[R]): JsValue = obj match { - case a: http.OkResponse[_] => OkResponseFormat[R].write(a) - case b: http.ErrorResponse => ErrorResponseFormat.write(b) - } - - override def read(json: JsValue): http.SyncResponse[R] = json match { - case JsObject(fields) => - (fields get resultKey, fields get errorsKey) match { - case (Some(_), None) => OkResponseFormat[R].read(json) - case (None, Some(_)) => ErrorResponseFormat.read(json) - case _ => deserializationError(errorMsg) - } - case _ => deserializationError(errorMsg) - } - } - // xmap with an error case for StringJsonFormat def xemapStringJsonFormat[A](readFn: String => Either[String, A])( writeFn: A => String @@ -717,7 +154,7 @@ object JsonProtocol extends JsonProtocolLow { } } -sealed abstract class JsonProtocolLow extends DefaultJsonProtocol with ExtraFormats { +sealed abstract class JsonProtocolLow extends DefaultJsonProtocol { implicit def NonEmptyListReader[A: JsonReader]: JsonReader[NonEmptyList[A]] = { case JsArray(hd +: tl) => NonEmptyList(hd.convertTo[A], tl map (_.convertTo[A]): _*) diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ResponseFormats.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ResponseFormats.scala index 0c607e398a..0b74ccd57e 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ResponseFormats.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ResponseFormats.scala @@ -16,12 +16,6 @@ import spray.json.* import scala.concurrent.{ExecutionContext, Future} object ResponseFormats { - def resultJsObject[A: JsonWriter](a: A): JsObject = - resultJsObject(a.toJson) - - def resultJsObject(a: JsValue): JsObject = - JsObject(("status", JsNumber(OK.intValue)), ("result", a)) - def resultJsObject[E: Show]( jsVals: Source[E \/ JsValue, NotUsed], warnings: Option[JsValue], diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/SprayJson.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/SprayJson.scala index a03ec34de1..5fb60c62ef 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/SprayJson.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/SprayJson.scala @@ -4,16 +4,8 @@ package com.digitalasset.canton.http.json import com.daml.scalautil.ExceptionOps.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, Show, Traverse, \/, \/-} -import spray.json.{ - JsObject, - JsValue, - JsonParser, - JsonReader, - JsonWriter, - enrichAny as `sj enrichAny`, -} +import scalaz.{Show, \/} +import spray.json.{JsValue, JsonParser, JsonWriter, enrichAny as `sj enrichAny`} object SprayJson { sealed abstract class Error extends Product with Serializable @@ -42,50 +34,7 @@ object SprayJson { def parse(str: String): JsonReaderError \/ JsValue = \/.attempt(JsonParser(str))(e => JsonReaderError(str, e.description)) - def decode[A: JsonReader](str: String): JsonReaderError \/ A = - for { - jsValue <- parse(str) - a <- decode(jsValue) - } yield a - - def decode[A: JsonReader](a: JsValue): JsonReaderError \/ A = - \/.attempt(a.convertTo[A])(e => JsonReaderError(a.toString, e.description)) - - def decode1[F[_], A](str: String)(implicit - ev1: JsonReader[F[JsValue]], - ev2: Traverse[F], - ev3: JsonReader[A], - ): JsonReaderError \/ F[A] = - parse(str).flatMap(decode1[F, A]) - - def decode1[F[_], A](a: JsValue)(implicit - ev1: JsonReader[F[JsValue]], - ev2: Traverse[F], - ev3: JsonReader[A], - ): JsonReaderError \/ F[A] = - for { - fj <- decode[F[JsValue]](a) - fa <- fj.traverse(decode[A](_)) - } yield fa - - def encode[A: JsonWriter](a: A): JsonWriterError \/ JsValue = - \/.attempt(a.toJson)(e => JsonWriterError(a, e.description)) - def encodeUnsafe[A: JsonWriter](a: A): JsValue = a.toJson - def encode1[F[_], A](fa: F[A])(implicit - ev1: JsonWriter[F[JsValue]], - ev2: Traverse[F], - ev3: JsonWriter[A], - ): JsonWriterError \/ JsValue = - for { - fj <- fa.traverse(encode[A](_)) - jsVal <- encode[F[JsValue]](fj) - } yield jsVal - - def mustBeJsObject(a: JsValue): JsonError \/ JsObject = a match { - case b: JsObject => \/-(b) - case _ => -\/(JsonError(s"Expected JsObject, got: ${a: JsValue}")) - } } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ApiValueToJsValueConverter.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ApiValueToJsValueConverter.scala deleted file mode 100644 index 49cbb9b9f8..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ApiValueToJsValueConverter.scala +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http.json.JsonError -import com.digitalasset.canton.http.json.JsonProtocol.LfValueCodec -import com.digitalasset.canton.http.util.ApiValueToLfValueConverter -import scalaz.std.list.* -import scalaz.syntax.show.* -import scalaz.syntax.traverse.* -import scalaz.{\/, \/-} -import spray.json.{JsObject, JsValue} - -class ApiValueToJsValueConverter(apiToLf: ApiValueToLfValueConverter.ApiValueToLfValue) { - - def apiValueToJsValue(a: lav2.value.Value): JsonError \/ JsValue = - apiToLf(a) - .map(LfValueCodec.apiValueToJsValue) - .leftMap(x => JsonError(x.shows)) - - def apiRecordToJsObject[O >: JsObject](a: lav2.value.Record): JsonError \/ O = - a.fields.toList.traverse(convertField).map(fs => JsObject(fs.toMap)) - - private def convertField(field: lav2.value.RecordField): JsonError \/ (String, JsValue) = - field.value match { - case None => \/-(field.label -> JsObject.empty) - case Some(v) => apiValueToJsValue(v).map(field.label -> _) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CommandService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CommandService.scala deleted file mode 100644 index f377dec8cb..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CommandService.scala +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2 as lav2 -import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod -import com.daml.logging.LoggingContextOf -import com.daml.logging.LoggingContextOf.{label, withEnrichedLoggingContext} -import com.digitalasset.canton.http.util.ClientUtil.uniqueCommandId -import com.digitalasset.canton.http.util.FutureUtil.* -import com.digitalasset.canton.http.util.IdentifierConverters.refApiIdentifier -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.util.{Commands, Transactions} -import com.digitalasset.canton.http.{ - ActiveContract, - Choice, - CommandMeta, - CompletionOffset, - Contract, - ContractTypeId, - CreateAndExerciseCommand, - CreateCommand, - CreateCommandResponse, - ExerciseCommand, - ExerciseResponse, - JwtWritePayload, - Offset, - ResolvedContractRef, -} -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.ledger.api.util.TransactionTreeOps.TransactionTreeOps -import com.digitalasset.canton.ledger.service.Grpc.StatusEnvelope -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.{NoTracing, TraceContext} -import com.digitalasset.daml.lf.data.ImmArray.ImmArraySeq -import scalaz.std.scalaFuture.* -import scalaz.syntax.show.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, \/, \/-} - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} -import scala.util.{Failure, Success} - -import LedgerClientJwt.Grpc - -// TODO(#23504) remove when TransactionTrees are removed from the API -@nowarn("cat=deprecation") -class CommandService( - submitAndWaitForTransaction: LedgerClientJwt.SubmitAndWaitForTransaction, - submitAndWaitForTransactionTree: LedgerClientJwt.SubmitAndWaitForTransactionTree, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging - with NoTracing { - - import CommandService.* - - private def withTemplateLoggingContext[CtId <: ContractTypeId.RequiredPkg, T]( - templateId: CtId - )(implicit lc: LoggingContextOf[T]): withEnrichedLoggingContext[CtId, T] = - withEnrichedLoggingContext( - label[CtId], - "template_id" -> templateId.toString, - ) - - private def withTemplateChoiceLoggingContext[CtId <: ContractTypeId.RequiredPkg, T]( - templateId: CtId, - choice: Choice, - )(implicit lc: LoggingContextOf[T]): withEnrichedLoggingContext[Choice, CtId with T] = - withTemplateLoggingContext(templateId).run( - withEnrichedLoggingContext( - label[Choice], - "choice" -> choice.toString, - )(_) - ) - - def create( - jwt: Jwt, - jwtPayload: JwtWritePayload, - input: CreateCommand[lav2.value.Record, ContractTypeId.Template.RequiredPkg], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - traceContext: TraceContext, - ): Future[Error \/ CreateCommandResponse[lav2.value.Value]] = - withTemplateLoggingContext(input.templateId).run { implicit lc => - logger.trace(s"sending create command to ledger, ${lc.makeString}") - val command = createCommand(input) - val request = submitAndWaitRequest(jwtPayload, input.meta, command, "create") - val et: ET[CreateCommandResponse[lav2.value.Value]] = for { - response <- logResult( - Symbol("create"), - submitAndWaitForTransaction(jwt, request)(traceContext)(lc), - ) - contract <- either(exactlyOneActiveContract(response)) - } yield CreateCommandResponse( - contract.contractId, - contract.templateId, - contract.key, - contract.payload, - contract.signatories, - contract.observers, - CompletionOffset( - response.transaction.map(_.offset).map(Offset.fromLong).getOrElse("") - ), - ) - et.run - } - - def exercise( - jwt: Jwt, - jwtPayload: JwtWritePayload, - input: ExerciseCommand.RequiredPkg[lav2.value.Value, ExerciseCommandRef], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Error \/ ExerciseResponse[lav2.value.Value]] = - withEnrichedLoggingContext( - label[lav2.value.Value], - "contract_id" -> input.argument.getContractId, - ).run(implicit lc => - withTemplateChoiceLoggingContext(input.reference.fold(_._1, _._1), input.choice) - .run { implicit lc => - logger.trace(s"sending exercise command to ledger, ${lc.makeString}") - val command = exerciseCommand(input) - - val request = submitAndWaitRequest(jwtPayload, input.meta, command, "exercise") - - val et: ET[ExerciseResponse[lav2.value.Value]] = - for { - response <- - logResult(Symbol("exercise"), submitAndWaitForTransactionTree(jwt, request)(lc)) - exerciseResult <- either(exerciseResult(response)) - contracts <- either(contracts(response)) - } yield ExerciseResponse( - exerciseResult, - contracts, - CompletionOffset( - response.transaction.map(_.offset).map(Offset.fromLong).getOrElse("") - ), - ) - - et.run - } - ) - - def createAndExercise( - jwt: Jwt, - jwtPayload: JwtWritePayload, - input: CreateAndExerciseCommand.LAVResolved, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Error \/ ExerciseResponse[lav2.value.Value]] = - withTemplateChoiceLoggingContext(input.templateId, input.choice).run { implicit lc => - logger.trace(s"sending create and exercise command to ledger, ${lc.makeString}") - val command = createAndExerciseCommand(input) - val request = submitAndWaitRequest(jwtPayload, input.meta, command, "createAndExercise") - val et: ET[ExerciseResponse[lav2.value.Value]] = for { - response <- logResult( - Symbol("createAndExercise"), - submitAndWaitForTransactionTree(jwt, request)(lc), - ) - exerciseResult <- either(exerciseResult(response)) - contracts <- either(contracts(response)) - } yield ExerciseResponse( - exerciseResult, - contracts, - CompletionOffset( - response.transaction.map(_.offset).map(Offset.fromLong).getOrElse("") - ), - ) - et.run - } - - private def logResult[A]( - op: Symbol, - fa: Grpc.EFuture[Grpc.Category.SubmitError, A], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[A] = { - val opName = op.name - EitherT { - fa.transformWith { - case Failure(e) => - Future.successful(-\/(e match { - case StatusEnvelope(status) => GrpcError(status) - case _ => InternalError(Some(op), e) - })) - case Success(-\/(e)) => - import Grpc.Category.* - val tagged = e.e match { - case PermissionDenied => -\/(PermissionDenied) - case InvalidArgument => \/-(InvalidArgument) - } - Future.successful(-\/(ClientError(tagged, e.message))) - case Success(\/-(a)) => - logger.debug(s"$opName success: $a, ${lc.makeString}") - Future.successful(\/-(a)) - } - } - } - - private def createCommand( - input: CreateCommand[lav2.value.Record, ContractTypeId.Template.RequiredPkg] - ): lav2.commands.Command.Command.Create = - Commands.create(refApiIdentifier(input.templateId), input.payload) - - private def exerciseCommand( - input: ExerciseCommand.RequiredPkg[lav2.value.Value, ExerciseCommandRef] - ): lav2.commands.Command.Command = { - val choiceSource = - input.choiceInterfaceId getOrElse input.reference.fold(_._1, _._1) - input.reference match { - case -\/((templateId, contractKey)) => - Commands.exerciseByKey( - templateId = refApiIdentifier(templateId), - // TODO(#13303) Re-adapted from Daml repo: daml-14549 somehow pass choiceSource - contractKey = contractKey, - choice = input.choice, - argument = input.argument, - ) - case \/-((_, contractId)) => - Commands.exercise( - templateId = refApiIdentifier(choiceSource), - contractId = contractId, - choice = input.choice, - argument = input.argument, - ) - } - } - - // TODO(#13303) Re-adapted from Daml repo: daml-14549 somehow use the choiceInterfaceId - private def createAndExerciseCommand( - input: CreateAndExerciseCommand.LAVResolved - ): lav2.commands.Command.Command.CreateAndExercise = - Commands - .createAndExercise( - refApiIdentifier(input.templateId), - input.payload, - input.choice, - input.argument, - ) - - private def submitAndWaitRequest( - jwtPayload: JwtWritePayload, - meta: Option[CommandMeta.LAV], - command: lav2.commands.Command.Command, - commandKind: String, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): lav2.command_service.SubmitAndWaitRequest = { - val commandId: lar.CommandId = meta.flatMap(_.commandId).getOrElse(uniqueCommandId()) - val actAs = meta.flatMap(_.actAs) getOrElse jwtPayload.submitter - val readAs = meta.flatMap(_.readAs) getOrElse jwtPayload.readAs - withEnrichedLoggingContext( - label[lar.CommandId], - "command_id" -> commandId.toString, - ) - .run { implicit lc => - logger.info( - s"Submitting $commandKind command, ${lc.makeString}" - ) - Commands.submitAndWaitRequest( - jwtPayload.userId, - commandId, - actAs, - readAs, - command, - meta - .flatMap(_.deduplicationPeriod) - .map(_.toProto) - .getOrElse(DeduplicationPeriod.Empty), - submissionId = meta.flatMap(_.submissionId), - workflowId = meta.flatMap(_.workflowId), - meta.flatMap(_.disclosedContracts) getOrElse Seq.empty, - meta.flatMap(_.synchronizerId), - meta.flatMap(_.packageIdSelectionPreference) getOrElse Seq.empty, - ) - } - } - - private def exactlyOneActiveContract( - response: lav2.command_service.SubmitAndWaitForTransactionResponse - ): Error \/ ActiveContract[ContractTypeId.Template.ResolvedPkgId, lav2.value.Value] = - activeContracts(response).flatMap { - case Seq(x) => \/-(x) - case xs @ _ => - -\/( - InternalError( - Some(Symbol("exactlyOneActiveContract")), - s"Expected exactly one active contract, got: $xs", - ) - ) - } - - private def activeContracts( - response: lav2.command_service.SubmitAndWaitForTransactionResponse - ): Error \/ ImmArraySeq[ActiveContract[ContractTypeId.Template.ResolvedPkgId, lav2.value.Value]] = - response.transaction - .toRightDisjunction( - InternalError( - Some(Symbol("activeContracts")), - s"Received response without transaction: $response", - ) - ) - .flatMap(activeContracts) - - private def activeContracts( - tx: lav2.transaction.Transaction - ): Error \/ ImmArraySeq[ActiveContract[ContractTypeId.Template.ResolvedPkgId, lav2.value.Value]] = - Transactions - .allCreatedEvents(tx) - .traverse(ActiveContract.fromLedgerApi(ActiveContract.ExtractAs.Template, _)) - .leftMap(e => InternalError(Some(Symbol("activeContracts")), e.shows)) - - // TODO(#23504) remove when SubmitAndWaitForTransactionTreeResponse is removed from the API - @nowarn("cat=deprecation") - private def contracts( - response: lav2.command_service.SubmitAndWaitForTransactionTreeResponse - ): Error \/ List[Contract[lav2.value.Value]] = - response.transaction - .toRightDisjunction( - InternalError( - Some(Symbol("contracts")), - s"Received response without transaction: $response", - ) - ) - .flatMap(contracts) - - // TODO(#23504) remove when TransactionTree is removed from the API - @nowarn("cat=deprecation") - private def contracts( - tx: lav2.transaction.TransactionTree - ): Error \/ List[Contract[lav2.value.Value]] = - Contract - .fromTransactionTree(tx) - .leftMap(e => InternalError(Some(Symbol("contracts")), e.shows)) - .map(_.toList) - - // TODO(#23504) remove when TransactionTree is removed from the API - @nowarn("cat=deprecation") - private def exerciseResult( - a: lav2.command_service.SubmitAndWaitForTransactionTreeResponse - ): Error \/ lav2.value.Value = { - val result: Option[lav2.value.Value] = for { - transaction <- a.transaction: Option[lav2.transaction.TransactionTree] - exercised <- firstExercisedEvent(transaction): Option[lav2.event.ExercisedEvent] - exResult <- exercised.exerciseResult: Option[lav2.value.Value] - } yield exResult - - result.toRightDisjunction( - InternalError( - Some(Symbol("choiceArgument")), - s"Cannot get exerciseResult from the first ExercisedEvent of gRPC response: ${a.toString}", - ) - ) - } - - // TODO(#23504) remove when TransactionTree is removed from the API - @nowarn("cat=deprecation") - private def firstExercisedEvent( - tx: lav2.transaction.TransactionTree - ): Option[lav2.event.ExercisedEvent] = { - val lookup: Int => Option[lav2.event.ExercisedEvent] = id => - tx.eventsById.get(id).flatMap(_.kind.exercised) - tx.rootNodeIds().collectFirst(Function unlift lookup) - } -} - -object CommandService { - sealed abstract class Error extends Product with Serializable - final case class ClientError( - id: Grpc.Category.PermissionDenied \/ Grpc.Category.InvalidArgument, - message: String, - ) extends Error - final case class GrpcError(status: com.google.rpc.Status) extends Error - final case class InternalError(id: Option[Symbol], error: Throwable) extends Error - object InternalError { - def apply(id: Option[Symbol], message: String): InternalError = - InternalError(id, new Exception(message)) - def apply(id: Option[Symbol], error: Throwable): InternalError = - InternalError(id, error) - } - - private type ET[A] = EitherT[Future, Error, A] - - type ExerciseCommandRef = ResolvedContractRef[lav2.value.Value] -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractList.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractList.scala deleted file mode 100644 index 126ddb34d7..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractList.scala +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.endpoints - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.logging.LoggingContextOf.withEnrichedLoggingContext -import com.digitalasset.canton.http.Endpoints.{ET, IntoEndpointsError} -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.json.v1.ContractsService.SearchResult -import com.digitalasset.canton.http.json.v1.{ContractsService, RouteSetup} -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} -import com.digitalasset.canton.http.util.JwtParties.* -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ - ActiveContract, - FetchRequest, - GetActiveContractsRequest, - JwtPayload, - OkResponse, - SyncResponse, - json, -} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.daml.lf.value.Value as LfValue -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.stream.scaladsl.{Flow, Source} -import scalaz.std.scalaFuture.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, \/, \/-} -import spray.json.* - -import scala.concurrent.{ExecutionContext, Future} - -private[http] final class ContractList( - routeSetup: RouteSetup, - decoder: ApiJsonDecoder, - contractsService: ContractsService, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging - with NoTracing { - import ContractList.* - import routeSetup.*, RouteSetup.* - import com.digitalasset.canton.http.json.JsonProtocol.* - import com.digitalasset.canton.http.util.ErrorOps.* - - def fetch(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - ec: ExecutionContext, - metrics: HttpApiMetrics, - ): ET[SyncResponse[JsValue]] = - for { - parseAndDecodeTimer <- getParseAndDecodeTimerCtx() - input <- inputJsValAndJwtPayload(req): ET[(Jwt, JwtPayload, JsValue)] - - (jwt, jwtPayload, reqBody) = input - - jsVal <- withJwtPayloadLoggingContext(jwtPayload) { _ => implicit lc => - logger.debug(s"/v1/fetch reqBody: $reqBody, ${lc.makeString}") - for { - fr <- - either( - SprayJson - .decode[FetchRequest[JsValue]](reqBody) - .liftErr[Error](InvalidUserInput.apply) - ) - .flatMap( - _.traverseLocator( - decoder - .decodeContractLocatorKey(_, jwt) - .liftErr(InvalidUserInput.apply) - ) - ): ET[FetchRequest[LfValue]] - _ <- EitherT.pure(parseAndDecodeTimer.stop()) - _ = logger.debug(s"/v1/fetch fr: $fr, ${lc.makeString}") - - _ <- either(ensureReadAsAllowedByJwt(fr.readAs, jwtPayload)) - ac <- contractsService.lookup(jwt, jwtPayload, fr) - - jsVal <- either( - ac.cata(x => toJsValue(x), \/-(JsNull)) - ): ET[JsValue] - } yield jsVal - } - } yield OkResponse(jsVal) - - def retrieveAll(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Future[Error \/ SearchResult[Error \/ JsValue]] = for { - parseAndDecodeTimer <- Future( - metrics.incomingJsonParsingAndValidationTimer.startAsync() - ) - res <- inputAndJwtPayload[JwtPayload](req).run.map { - _.map { case (jwt, jwtPayload, _) => - parseAndDecodeTimer.stop() - withJwtPayloadLoggingContext(jwtPayload) { _ => implicit lc => - val result: SearchResult[ - ContractsService.Error \/ ActiveContract.ResolvedCtTyId[LfValue] - ] = - contractsService.retrieveAll(jwt, jwtPayload) - - SyncResponse.covariant.map(result) { source => - source - .via(handleSourceFailure) - .map(_.flatMap(lfAcToJsValue)): Source[Error \/ JsValue, NotUsed] - } - } - } - } - } yield res - - def query(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Future[Error \/ SearchResult[Error \/ JsValue]] = { - for { - it <- inputAndJwtPayload[JwtPayload](req).leftMap(identity[Error]) - (jwt, jwtPayload, reqBody) = it - res <- withJwtPayloadLoggingContext(jwtPayload) { _ => implicit lc => - val res = for { - cmd <- SprayJson - .decode[GetActiveContractsRequest](reqBody) - .liftErr[Error](InvalidUserInput.apply) - _ <- ensureReadAsAllowedByJwt(cmd.readAs, jwtPayload) - } yield withEnrichedLoggingContext( - LoggingContextOf.label[GetActiveContractsRequest], - "cmd" -> cmd.toString, - ).run { implicit lc => - logger.debug(s"Processing a query request, ${lc.makeString}") - contractsService - .search(jwt, jwtPayload, cmd) - .map( - SyncResponse.covariant.map(_)( - _.via(handleSourceFailure) - .map(_.flatMap(toJsValue[ActiveContract.ResolvedCtTyId[JsValue]](_))) - ) - ) - } - eitherT(res.sequence) - } - } yield res - }.run - - private def handleSourceFailure[E, A](implicit - E: IntoEndpointsError[E] - ): Flow[E \/ A, Error \/ A, NotUsed] = - Flow - .fromFunction((_: E \/ A).leftMap(E.run)) - .recover(Error.fromThrowable andThen (-\/(_))) -} - -private[endpoints] object ContractList { - import json.JsonProtocol.* - import com.digitalasset.canton.http.util.ErrorOps.* - - private def lfValueToJsValue(a: LfValue): Error \/ JsValue = - \/.attempt(LfValueCodec.apiValueToJsValue(a))(identity).liftErr(ServerError.fromMsg) - - private def lfAcToJsValue(a: ActiveContract.ResolvedCtTyId[LfValue]): Error \/ JsValue = - for { - b <- a.traverse(lfValueToJsValue): Error \/ ActiveContract.ResolvedCtTyId[JsValue] - c <- toJsValue(b) - } yield c - - private def toJsValue[A: JsonWriter](a: A): Error \/ JsValue = - SprayJson.encode(a).liftErr(ServerError.fromMsg) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractsService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractsService.scala deleted file mode 100644 index 0fffb9c769..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ContractsService.scala +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2 as lav2 -import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse -import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.transaction_filter.TransactionFilter -import com.daml.logging.LoggingContextOf -import com.daml.metrics.Timed -import com.daml.metrics.api.MetricHandle -import com.daml.nonempty.NonEmpty -import com.daml.nonempty.NonEmptyReturningOps.* -import com.daml.scalautil.ExceptionOps.* -import com.digitalasset.canton.fetchcontracts.AcsTxStreams.transactionFilter -import com.digitalasset.canton.fetchcontracts.util.ContractStreamStep.{Acs, LiveBegin} -import com.digitalasset.canton.fetchcontracts.util.GraphExtensions.* -import com.digitalasset.canton.fetchcontracts.util.{ - AbsoluteBookmark, - ContractStreamStep, - InsertDeleteStep, -} -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.EndpointsCompanion.NotFound -import com.digitalasset.canton.http.json.JsonProtocol.LfValueCodec -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.ApiValueToLfValueConverter -import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ - ActiveContract, - ContractId, - ContractLocator, - ContractTypeId, - ContractTypeRef, - EndpointsCompanion, - EnrichedContractId, - EnrichedContractKey, - ErrorResponse, - FetchRequest, - GetActiveContractsRequest, - JwtPayload, - Offset, - OkResponse, - Party, - PartySet, - ResolvedContractRef, - ResolvedQuery, - StartingOffset, - SyncResponse, - UnknownTemplateIds, -} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.daml.lf -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.model.StatusCodes -import org.apache.pekko.stream.scaladsl.* -import scalaz.std.scalaFuture.* -import scalaz.syntax.show.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, Show, \/, \/-} -import spray.json.JsValue - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext, Future} - -import LedgerClientJwt.Terminates -import PackageService.ResolveContractTypeId.Overload - -class ContractsService( - resolveContractTypeId: PackageService.ResolveContractTypeId, - allTemplateIds: PackageService.AllTemplateIds, - getContractByContractId: LedgerClientJwt.GetContractByContractId, - getActiveContracts: LedgerClientJwt.GetActiveContracts, - getCreatesAndArchivesSince: LedgerClientJwt.GetCreatesAndArchivesSince, - getLedgerEnd: LedgerClientJwt.GetLedgerEnd, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging - with NoTracing { - import ContractsService.* - - private type ActiveContractO = Option[ActiveContract.ResolvedCtTyId[JsValue]] - - def resolveContractReference( - jwt: Jwt, - parties: PartySet, - contractLocator: ContractLocator[LfValue], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[ResolvedContractRef[LfValue]] = { - import Overload.Template - contractLocator match { - case EnrichedContractKey(templateId, key) => - _resolveContractTypeId(jwt, templateId).map(x => -\/(x.original -> key)) - case EnrichedContractId(Some(templateId), contractId) => - _resolveContractTypeId(jwt, templateId).map(x => \/-(x.original -> contractId)) - case EnrichedContractId(None, contractId) => - findByContractId(jwt, parties, contractId) - .flatMap { - case Some(value) => - EitherT.pure(value): ET[ActiveContract.ResolvedCtTyId[JsValue]] - case None => - EitherT.pureLeft( - invalidUserInput( - s"Could not resolve contract reference for contract id $contractId" - ) - ): ET[ - ActiveContract.ResolvedCtTyId[JsValue] - ] - } - .map(a => \/-(a.templateId.map(lf.data.Ref.PackageRef.Id.apply) -> a.contractId)) - } - } - - def lookup( - jwt: Jwt, - jwtPayload: JwtPayload, - req: FetchRequest[LfValue], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[ActiveContractO] = { - val readAs = req.readAs.cata(_.toSet1, jwtPayload.parties) - req.locator match { - case EnrichedContractKey(_templateId, _contractKey) => - either(-\/(NotFound("lookup by contract key is not supported"))) - // TODO(#16065) - // findByContractKey(jwt, readAs, templateId, contractKey) - case EnrichedContractId(_templateId, contractId) => - findByContractId(jwt, readAs, contractId) - } - } - -// TODO(#16065) -// private[this] def findByContractKey( -// jwt: Jwt, -// parties: PartySet, -// templateId: ContractTypeId.Template.RequiredPkg, -// contractKey: LfValue, -// )(implicit -// lc: LoggingContextOf[InstanceUUID with RequestID], -// metrics: HttpApiMetrics, -// ): ET[ActiveContractO] = -// timedETFuture(metrics.dbFindByContractKey)( -// for { -// resolvedTemplateId <- _resolveContractTypeId( -// jwt, -// templateId, -// ) -// keyApiValue <- -// EitherT( -// Future.successful( -// \/.fromEither( -// LfEngineToApi.lfValueToApiValue(verbose = true, contractKey) -// ).leftMap { err => -// serverError(s"Cannot convert key $contractKey from LF value to API value: $err") -// } -// ) -// ) -// response <- EitherT( -// getContractByContractKey( -// jwt, -// keyApiValue, -// IdentifierConverters.apiIdentifier(resolvedTemplateId), -// parties, -// "", -// )(lc).map(_.leftMap { err => -// unauthorized( -// s"Unauthorized access for fetching contract with key $contractKey for parties $parties: $err" -// ) -// }) -// ) -// result <- response match { -// case GetEventsByContractKeyResponse(None, Some(_), _) => -// EitherT.pureLeft( -// serverError( -// s"Found archived event in response without a matching create for key $contractKey" -// ) -// ): ET[ActiveContractO] -// case GetEventsByContractKeyResponse(_, Some(_), _) | -// GetEventsByContractKeyResponse(None, None, _) => -// logger.debug(s"Contract archived for contract key $contractKey") -// EitherT.pure(None): ET[ActiveContractO] -// case GetEventsByContractKeyResponse(Some(createdEvent), None, _) => -// EitherT.either( -// ActiveContract -// .fromLedgerApi(ActiveContract.IgnoreInterface, createdEvent) -// .leftMap(_.shows) -// .flatMap(apiAcToLfAc(_).leftMap(_.shows)) -// .flatMap(_.traverse(lfValueToJsValue(_).leftMap(_.shows))) -// .leftMap { err => -// serverError(s"Error processing create event for active contract: $err") -// } -// .map(Some(_)) -// ): ET[ActiveContractO] -// } -// } yield result -// ) - - private[this] def findByContractId( - jwt: Jwt, - parties: PartySet, - contractId: ContractId, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[ActiveContractO] = - timedETFuture(metrics.dbFindByContractId)( - eitherT( - getContractByContractId(jwt, contractId, parties: Set[Party])(lc) - .map(_.leftMap { err => - unauthorized( - s"Unauthorized access for fetching contract with id $contractId for parties $parties: $err" - ) - }) - .map(_.flatMap { - case GetEventsByContractIdResponse(_, Some(_)) => - logger.debug(s"Contract archived for contract id $contractId") - \/-(Option.empty) - case GetEventsByContractIdResponse(Some(created), None) - if created.createdEvent.nonEmpty => - ActiveContract - .fromLedgerApi( - ActiveContract.ExtractAs.Template, - created.createdEvent.getOrElse( - throw new RuntimeException("unreachable since created.createdEvent is nonEmpty") - ), - ) - .leftMap(_.shows) - .flatMap(apiAcToLfAc(_).leftMap(_.shows)) - .flatMap(_.traverse(lfValueToJsValue(_).leftMap(_.shows))) - .leftMap { err => - serverError(s"Error processing create event for active contract: $err") - } - .map(Some(_)) - case GetEventsByContractIdResponse(_, None) => - logger.debug(s"Contract with id $contractId not found") - \/-(Option.empty) - }) - ) - ) - - private def serverError( - errorMessage: String - )(implicit lc: LoggingContextOf[InstanceUUID with RequestID]): EndpointsCompanion.Error = { - logger.error(s"$errorMessage, ${lc.makeString}") - EndpointsCompanion.ServerError(new RuntimeException(errorMessage)) - } - - private def invalidUserInput( - message: String - )(implicit lc: LoggingContextOf[InstanceUUID with RequestID]): EndpointsCompanion.Error = { - logger.info(s"$message, ${lc.makeString}") - EndpointsCompanion.InvalidUserInput(message) - } - - private def unauthorized( - message: String - )(implicit lc: LoggingContextOf[InstanceUUID with RequestID]): EndpointsCompanion.Error = { - logger.info(s"$message, ${lc.makeString}") - EndpointsCompanion.Unauthorized(message) - } - - private def _resolveContractTypeId[U, R[T] <: ContractTypeId[T]]( - jwt: Jwt, - templateId: U with ContractTypeId.RequiredPkg, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - overload: Overload[U, R], - ): ET[ContractTypeRef[R]] = - eitherT( - resolveContractTypeId[U, R](jwt)(templateId) - .map(_.leftMap { - case PackageService.InputError(message) => - invalidUserInput( - s"Invalid user input detected when resolving contract type id for templateId $templateId: $message" - ) - case PackageService.ServerError(message) => - serverError( - s"Error encountered when resolving contract type id for templateId $templateId: $message" - ) - }.flatMap(_.toRightDisjunction { - invalidUserInput(s"Template for id $templateId not found") - })) - ) - - private def timedETFuture[R](timer: MetricHandle.Timer)(f: ET[R]): ET[R] = - EitherT.eitherT(Timed.future(timer, f.run)) - - private[this] def search: Search = SearchInMemory - - private object SearchInMemory extends Search { - type LfV = LfValue - override val lfvToJsValue = SearchValueFormat(lfValueToJsValue) - - override def search(ctx: SearchContext.QueryLang)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ) = { - import ctx.{jwt, parties, templateIds} - searchInMemory( - jwt, - parties, - templateIds, - ) - } - } - - def retrieveAll( - jwt: Jwt, - jwtPayload: JwtPayload, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): SearchResult[Error \/ ActiveContract.ResolvedCtTyId[LfValue]] = - OkResponse( - Source - .future(allTemplateIds(lc)(jwt)) - .flatMapConcat( - Source(_).flatMapConcat(templateId => - searchInMemory(jwt, jwtPayload.parties, ResolvedQuery(templateId)) - ) - ) - ) - - def search( - jwt: Jwt, - jwtPayload: JwtPayload, - request: GetActiveContractsRequest, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Future[SearchResult[Error \/ ActiveContract.ResolvedCtTyId[JsValue]]] = - search( - jwt, - request.readAs.cata((_.toSet1), jwtPayload.parties), - request.templateIds, - ) - - def search( - jwt: Jwt, - parties: PartySet, - templateIds: NonEmpty[Set[ContractTypeId.RequiredPkg]], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Future[SearchResult[Error \/ ActiveContract.ResolvedCtTyId[JsValue]]] = for { - res <- resolveContractTypeIds(jwt)(templateIds) - (resolvedContractTypeIds, unresolvedContractTypeIds) = res - - warnings: Option[UnknownTemplateIds] = - if (unresolvedContractTypeIds.isEmpty) None - else Some(UnknownTemplateIds(unresolvedContractTypeIds.toList)) - } yield { - ResolvedQuery(resolvedContractTypeIds) - .leftMap(handleResolvedQueryErrors(warnings)) - .map { resolvedQuery => - val searchCtx = SearchContext(jwt, parties, resolvedQuery) - val source = search.toFinal.search(searchCtx) - OkResponse(source, warnings) - } - .merge - } - - private def handleResolvedQueryErrors( - warnings: Option[UnknownTemplateIds] - ): ResolvedQuery.Unsupported => ErrorResponse = unsuppoerted => - mkErrorResponse(unsuppoerted.errorMsg, warnings) - - private def mkErrorResponse(errorMessage: String, warnings: Option[UnknownTemplateIds]) = - ErrorResponse( - errors = List(errorMessage), - warnings = warnings, - status = StatusCodes.BadRequest, - ) - - private[this] def searchInMemory( - jwt: Jwt, - parties: PartySet, - resolvedQuery: ResolvedQuery, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Source[InternalError \/ ActiveContract.ResolvedCtTyId[LfValue], NotUsed] = { - logger.debug( - s"Searching in memory, parties: $parties, resolvedQuery: $resolvedQuery" - ) - - type Ac = ActiveContract.ResolvedCtTyId[LfValue] - val empty = (Vector.empty[Error], Vector.empty[Ac]) - import InsertDeleteStep.appendForgettingDeletes - - insertDeleteStepSource(jwt, buildTransactionFilter(parties, resolvedQuery)) - .map { step => - step.toInsertDelete.partitionMapPreservingIds { apiEvent => - ActiveContract - .fromLedgerApi(ActiveContract.ExtractAs(resolvedQuery), apiEvent) - .leftMap(e => InternalError(Symbol("searchInMemory"), e.shows)) - .flatMap(apiAcToLfAc): Error \/ Ac - } - } - .fold(empty) { case ((errL, stepL), (errR, stepR)) => - (errL ++ errR, appendForgettingDeletes(stepL, stepR)) - } - .mapConcat { case (err, inserts) => - inserts.map(\/-(_)) ++ err.map(-\/(_)) - } - } - - // TODO(#23504) replace TransactionFilter with EventFormat if json v1 is not removed - @nowarn("cat=deprecation") - def liveAcsAsInsertDeleteStepSource( - jwt: Jwt, - txnFilter: TransactionFilter, - )(implicit lc: LoggingContextOf[InstanceUUID]): Source[ContractStreamStep.LAV1, NotUsed] = - getLedgerEnd(jwt)(lc) - .flatMapConcat { offset => - getActiveContracts(jwt, txnFilter, offset, true)(lc) - .map { case GetActiveContractsResponse(_, contractEntry) => - if (contractEntry.isActiveContract) { - val createdEvent = contractEntry.activeContract - .getOrElse( - throw new RuntimeException( - "unreachable, activeContract should not have been empty since contract is checked to be an active contract" - ) - ) - .createdEvent - Acs(createdEvent.toList.toVector) - } else LiveBegin(AbsoluteBookmark(Offset(offset))) - } - .concat(Source.single(LiveBegin(AbsoluteBookmark(Offset(offset))))) - } - - /** An ACS ++ transaction stream of `templateIds`, starting at `startOffset` and ending at - * `terminates`. - */ - // TODO(#23504) use EventFormat instead of TransactionFilter - @nowarn("cat=deprecation") - def insertDeleteStepSource( - jwt: Jwt, - txnFilter: TransactionFilter, - startOffset: Option[StartingOffset] = None, - terminates: Terminates = Terminates.AtParticipantEnd, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Source[ContractStreamStep.LAV1, NotUsed] = { - def source = - (getLedgerEnd(jwt)(lc) - .flatMapConcat(offset => - getActiveContracts(jwt, txnFilter, offset, true)(lc) - .map(Right(_)) - .concat(Source.single(Left(offset))) - ) - via logTermination(logger, "ACS upstream")) - - val transactionsSince: String => Source[ - lav2.transaction.Transaction, - NotUsed, - ] = off => - getCreatesAndArchivesSince( - jwt, - txnFilter, - Offset.assertFromStringToLong(off), - terminates, - )(lc) via logTermination(logger, "transactions upstream") - - import com.digitalasset.canton.fetchcontracts.AcsTxStreams.{ - acsFollowingAndBoundary, - transactionsFollowingBoundary, - } - import com.digitalasset.canton.fetchcontracts.util.GraphExtensions.* - val contractsAndBoundary = startOffset - .cata( - so => - Source - .single(AbsoluteBookmark(so.offset)) - .viaMat(transactionsFollowingBoundary(transactionsSince, logger).divertToHead)( - Keep.right - ), - source.viaMat(acsFollowingAndBoundary(transactionsSince, logger).divertToHead)( - Keep.right - ), - ) - .via(logTermination(logger, "ACS+tx or tx stream")) - contractsAndBoundary mapMaterializedValue { fob => - fob.foreach(a => - logger.debug(s"contracts fetch completed at: ${a.toString}, ${lc.makeString}") - ) - NotUsed - } - } - - private def apiAcToLfAc( - ac: ActiveContract.ResolvedCtTyId[ApiValue] - ): Error \/ ActiveContract.ResolvedCtTyId[LfValue] = - ac.traverse(ApiValueToLfValueConverter.apiValueToLfValue) - .leftMap(e => InternalError(Symbol("apiAcToLfAc"), e.shows)) - - private def lfValueToJsValue(a: LfValue): Error \/ JsValue = - \/.attempt(LfValueCodec.apiValueToJsValue(a))(e => - InternalError(Symbol("lfValueToJsValue"), e.description) - ) - - def resolveContractTypeIds[Tid <: ContractTypeId.RequiredPkg]( - jwt: Jwt - )( - xs: NonEmpty[Set[Tid]] - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[(Set[ContractTypeRef.Resolved], Set[Tid])] = { - import scalaz.syntax.traverse.* - import scalaz.std.list.*, scalaz.std.scalaFuture.* - - xs.toList.toNEF - .traverse { x => - resolveContractTypeId(jwt)(x) - .map(_.toOption.flatten.toLeft(x)): Future[ - Either[ContractTypeRef.Resolved, Tid] - ] - } - .map(_.toSet.partitionMap(a => a)) - } -} - -object ContractsService { - private type ApiValue = lav2.value.Value - - private type LfValue = lf.value.Value - - private final case class SearchValueFormat[-T](encode: T => (Error \/ JsValue)) - - private final case class SearchContext[+TpIds]( - jwt: Jwt, - parties: PartySet, - templateIds: TpIds, - ) - - private object SearchContext { - - type QueryLang = SearchContext[ - ResolvedQuery - ] - type ById = SearchContext[Option[ContractTypeId.RequiredPkg]] - type Key = SearchContext[ContractTypeId.Template.RequiredPkg] - } - - // A prototypical abstraction over the in-memory/in-DB split, accounting for - // the fact that in-memory works with ADT-encoded LF values, - // whereas in-DB works with JsValues - private sealed abstract class Search { self => - type LfV - val lfvToJsValue: SearchValueFormat[LfV] - - final def toFinal: Search { type LfV = JsValue } = { - val SearchValueFormat(convert) = lfvToJsValue - new Search { - type LfV = JsValue - override val lfvToJsValue = SearchValueFormat(\/.right) - - override def search( - ctx: SearchContext.QueryLang - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Source[Error \/ ActiveContract.ResolvedCtTyId[LfV], NotUsed] = - self.search(ctx) map (_ flatMap (_ traverse convert)) - } - } - - def search(ctx: SearchContext.QueryLang)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Source[Error \/ ActiveContract.ResolvedCtTyId[LfV], NotUsed] - } - - final case class Error(id: Symbol, message: String) - private type InternalError = Error - val InternalError: Error.type = Error - - object Error { - implicit val errorShow: Show[Error] = Show shows { e => - s"ContractService Error, ${e.id: Symbol}: ${e.message: String}" - } - } - - type SearchResult[A] = SyncResponse[Source[A, NotUsed]] - - // TODO(#23504) use EventFormat instead of TransactionFilter - @nowarn("cat=deprecation") - def buildTransactionFilter( - parties: PartySet, - resolvedQuery: ResolvedQuery, - ): TransactionFilter = - transactionFilter(parties, resolvedQuery.resolved.map(_.original).toList) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CreateAndExercise.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CreateAndExercise.scala deleted file mode 100644 index 8aef0ebf44..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/CreateAndExercise.scala +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2 as lav2 -import com.daml.logging.LoggingContextOf -import com.daml.metrics.Timed -import com.digitalasset.canton.http.ContractTypeId.Template -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} -import com.digitalasset.canton.http.util.JwtParties.* -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ - CommandMeta, - ContractLocator, - CreateAndExerciseCommand, - CreateCommand, - CreateCommandResponse, - ExerciseCommand, - ExerciseResponse, - JwtPayloadTag, - JwtWritePayload, - ResolvedContractRef, - SyncResponse, -} -import com.digitalasset.daml.lf.value.Value as LfValue -import org.apache.pekko.http.scaladsl.model.* -import scalaz.std.scalaFuture.* -import scalaz.{-\/, EitherT, \/, \/-} -import spray.json.* - -import scala.concurrent.ExecutionContext - -import lav2.value.{Record as ApiRecord, Value as ApiValue} - -private[http] final class CreateAndExercise( - routeSetup: RouteSetup, - decoder: ApiJsonDecoder, - commandService: CommandService, - contractsService: ContractsService, -)(implicit ec: ExecutionContext) { - import CreateAndExercise.* - import RouteSetup.* - import com.digitalasset.canton.http.json.JsonProtocol.* - import com.digitalasset.canton.http.util.ErrorOps.* - import routeSetup.* - - def create(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - ec: ExecutionContext, - metrics: HttpApiMetrics, - ): ET[SyncResponse[JsValue]] = - handleCommand(req) { - (jwt, jwtPayload, reqBody, parseAndDecodeTimer) => implicit tc => implicit lc => - for { - cmd <- - decoder - .decodeCreateCommand(reqBody, jwt) - .liftErr(InvalidUserInput.apply): ET[ - CreateCommand[ApiRecord, Template.RequiredPkg] - ] - _ <- EitherT.pure(parseAndDecodeTimer.stop()) - - response <- eitherT( - Timed.future( - metrics.commandSubmissionLedgerTimer, - handleFutureEitherFailure(commandService.create(jwt, jwtPayload, cmd)), - ) - ): ET[CreateCommandResponse[ApiValue]] - } yield response - } - - def exercise(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - ec: ExecutionContext, - metrics: HttpApiMetrics, - ): ET[SyncResponse[JsValue]] = - handleCommand(req) { (jwt, jwtPayload, reqBody, parseAndDecodeTimer) => _ => implicit lc => - for { - cmd <- - decoder - .decodeExerciseCommand(reqBody, jwt) - .liftErr(InvalidUserInput.apply): ET[ - ExerciseCommand.RequiredPkg[LfValue, ContractLocator[LfValue]] - ] - _ <- EitherT.pure(parseAndDecodeTimer.stop()) - resolvedRef <- resolveReference(jwt, jwtPayload, cmd.meta, cmd.reference) - - apiArg <- either(lfValueToApiValue(cmd.argument)): ET[ApiValue] - - resolvedCmd = cmd.copy(argument = apiArg, reference = resolvedRef, meta = cmd.meta) - - resp <- eitherT( - Timed.future( - metrics.commandSubmissionLedgerTimer, - handleFutureEitherFailure( - commandService.exercise(jwt, jwtPayload, resolvedCmd) - ), - ) - ): ET[ExerciseResponse[ApiValue]] - - } yield resp - } - - def createAndExercise(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[SyncResponse[JsValue]] = - handleCommand(req) { (jwt, jwtPayload, reqBody, parseAndDecodeTimer) => _ => implicit lc => - for { - cmd <- - decoder - .decodeCreateAndExerciseCommand(reqBody, jwt) - .liftErr(InvalidUserInput.apply): ET[ - CreateAndExerciseCommand.LAVResolved - ] - _ <- EitherT.pure(parseAndDecodeTimer.stop()) - - resp <- eitherT( - Timed.future( - metrics.commandSubmissionLedgerTimer, - handleFutureEitherFailure( - commandService.createAndExercise(jwt, jwtPayload, cmd) - ), - ) - ): ET[ExerciseResponse[ApiValue]] - } yield resp - } - - private def resolveReference( - jwt: Jwt, - jwtPayload: JwtWritePayload, - meta: Option[CommandMeta.IgnoreDisclosed], - reference: ContractLocator[LfValue], - )(implicit - lc: LoggingContextOf[JwtPayloadTag with InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[ResolvedContractRef[ApiValue]] = - contractsService - .resolveContractReference( - jwt, - resolveRefParties(meta, jwtPayload), - reference, - ) - .flatMap { - case -\/((tpId, key)) => EitherT.either(lfValueToApiValue(key).map(k => -\/(tpId -> k))) - case a @ \/-((_, _)) => EitherT.pure(a) - } -} - -object CreateAndExercise { - import com.digitalasset.canton.http.util.ErrorOps.* - - private def lfValueToApiValue(a: LfValue): Error \/ ApiValue = - JsValueToApiValueConverter.lfValueToApiValue(a).liftErr(ServerError.fromMsg) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/GetPackageResponse.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/GetPackageResponse.scala deleted file mode 100644 index bccef34346..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/GetPackageResponse.scala +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.ledger.api.v2.package_service -import com.google.protobuf - -sealed abstract class HashFunction extends Product with Serializable -case object SHA256 extends HashFunction -final case class Unrecognized(value: Int) extends HashFunction - -object HashFunction { - def fromLedgerApi(a: package_service.HashFunction): HashFunction = a match { - case package_service.HashFunction.HASH_FUNCTION_SHA256 => SHA256 - case package_service.HashFunction.Unrecognized(x) => Unrecognized(x) - } -} - -final case class GetPackageResponse( - hashFunction: HashFunction, - hash: String, - archivePayload: protobuf.ByteString, -) - -object GetPackageResponse { - def fromLedgerApi(a: package_service.GetPackageResponse): GetPackageResponse = - GetPackageResponse( - hashFunction = HashFunction.fromLedgerApi(a.hashFunction), - archivePayload = a.archivePayload, - hash = a.hash, - ) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerClientJwt.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerClientJwt.scala deleted file mode 100644 index 0643f05bc4..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerClientJwt.scala +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2.command_service.{ - SubmitAndWaitForTransactionResponse, - SubmitAndWaitForTransactionTreeResponse, - SubmitAndWaitRequest, -} -import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse -import com.daml.ledger.api.v2.package_service -import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse -import com.daml.ledger.api.v2.transaction.Transaction -import com.daml.ledger.api.v2.transaction_filter.TransactionFilter -import com.daml.ledger.api.v2.update_service.GetUpdatesResponse.Update -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.fetchcontracts.Offset -import com.digitalasset.canton.http.json.v1.LedgerClientJwt.Grpc -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ContractId, Party} -import com.digitalasset.canton.ledger.api.PartyDetails as apiPartyDetails -import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient -import com.digitalasset.canton.ledger.client.services.EventQueryServiceClient -import com.digitalasset.canton.ledger.client.services.admin.{ - PackageManagementClient, - PartyManagementClient, -} -import com.digitalasset.canton.ledger.client.services.commands.CommandServiceClient -import com.digitalasset.canton.ledger.client.services.pkg.PackageClient -import com.digitalasset.canton.ledger.client.services.state.StateServiceClient -import com.digitalasset.canton.ledger.client.services.updates.UpdateServiceClient -import com.digitalasset.canton.ledger.service.Grpc.StatusEnvelope -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.daml.lf.data.Ref -import com.google.protobuf -import com.google.rpc.Code -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.Source -import scalaz.syntax.tag.* -import scalaz.{-\/, OneAnd, \/} - -import scala.annotation.nowarn -import scala.concurrent.{ExecutionContext as EC, Future} - -final case class LedgerClientJwt(loggerFactory: NamedLoggerFactory) extends NamedLogging { - import Grpc.Category.* - import LedgerClientJwt.* - import LedgerClientRequestTimeLogger.* - - private def bearer(jwt: Jwt): Some[String] = Some(jwt.value: String) - - def submitAndWaitForTransaction( - client: DamlLedgerClient - )(implicit ec: EC): SubmitAndWaitForTransaction = - (jwt, req) => - implicit traceContext => - implicit lc => { - logFuture(SubmitAndWaitForTransactionLog) { - client.commandService - .submitAndWaitForTransactionForJsonApi(req, token = bearer(jwt)) - } - .requireHandling(submitErrors) - } - - // TODO(#23504) remove this method once the deprecated submitAndWaitForTransactionTree is removed - @deprecated("Use submitAndWaitForTransaction instead", "3.3.0") - def submitAndWaitForTransactionTree( - client: DamlLedgerClient - )(implicit ec: EC, traceContext: TraceContext): SubmitAndWaitForTransactionTree = - (jwt, req) => - implicit lc => { - logFuture(SubmitAndWaitForTransactionTreeLog) { - client.commandService - .deprecatedSubmitAndWaitForTransactionTreeForJsonApi(req, token = bearer(jwt)) - } - .requireHandling(submitErrors) - } - - // TODO(#13364) test this function with a token or do not pass the token to getActiveContractsSource if it is not needed - // TODO(#23504) use EventFormat instead of TransactionFilter once TransactionFilter is removed - @nowarn("cat=deprecation") - def getActiveContracts(client: DamlLedgerClient)(implicit - traceContext: TraceContext - ): GetActiveContracts = - (jwt, filter, offset, verbose) => - implicit lc => { - log(GetActiveContractsLog) { - client.stateService - .getActiveContractsSource( - filter = filter, - token = bearer(jwt), - verbose = verbose, - validAtOffset = offset, - ) - } - } - - // TODO(#23504) use EventFormat instead of TransactionFilter once TransactionFilter is removed - @nowarn("cat=deprecation") - def getCreatesAndArchivesSince( - client: DamlLedgerClient - )(implicit traceContext: TraceContext): GetCreatesAndArchivesSince = - (jwt, filter, offset, terminates) => { implicit lc => - val endSource: Source[Option[Long], NotUsed] = terminates match { - case Terminates.AtParticipantEnd => - Source - .future(client.stateService.getLedgerEnd(token = bearer(jwt))) - .map(_.offset) - .map(Some(_)) - case Terminates.Never => Source.single(None) - case Terminates.AtAbsolute(off) => - Source.single(Some(Offset.assertFromStringToLong(off))) - } - endSource.flatMapConcat { end => - if (skipRequest(offset, end)) - Source.empty[Transaction] - else { - log(GetUpdatesLog) { - client.updateService - .getUpdatesSource( - begin = offset, - filter = filter, - verbose = true, - end = end, - token = bearer(jwt), - ) - .collect { response => - response.update match { - case Update.Transaction(t) => t - } - } - } - } - } - } - - def getByContractId( - client: DamlLedgerClient - )(implicit ec: EC, traceContext: TraceContext): GetContractByContractId = { - (jwt, contractId, requestingParties) => implicit lc => - logFuture(GetContractByContractIdLog) { - client.eventQueryService.getEventsByContractId( - contractId = contractId.unwrap, - requestingParties = requestingParties.view.map(_.unwrap).toSeq, - token = bearer(jwt), - ) - } - .requireHandling { case Code.PERMISSION_DENIED => - PermissionDenied - } - } - - // TODO(#16065) - // def getByContractKey(client: DamlLedgerClient)(implicit ec: EC): GetContractByContractKey = { - // (jwt, key, templateId, requestingParties, continuationToken) => - // { implicit lc => - // logFuture(GetContractByContractKeyLog) { - // client.eventQueryServiceClient.getEventsByContractKey( - // token = bearer(jwt), - // contractKey = key, - // templateId = templateId, - // requestingParties = requestingParties.view.map(_.unwrap).toSeq, - // continuationToken = continuationToken, - // ) - // } - // .requireHandling { case Code.PERMISSION_DENIED => - // PermissionDenied - // } - // } - // } - - private def skipRequest(start: Long, endO: Option[Long]): Boolean = - (start, endO) match { - case (_, Some(end)) => start >= end - case (_, None) => false - } - - // TODO(#13303): Replace all occurrences of EC for logging purposes in this file - // (preferrably with DirectExecutionContext) - def listKnownParties(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): ListKnownParties = - (jwt, pageToken, pageSize) => - implicit lc => { - logFuture(ListKnownPartiesLog) { - client.partyManagementClient.listKnownParties(bearer(jwt), pageToken, pageSize) - } - .requireHandling { case Code.PERMISSION_DENIED => - PermissionDenied - } - } - - def getParties(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): GetParties = - (jwt, partyIds) => - implicit lc => { - logFuture(GetPartiesLog) { - client.partyManagementClient.getParties(partyIds, bearer(jwt)) - } - .requireHandling { case Code.PERMISSION_DENIED => - PermissionDenied - } - } - - def allocateParty(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): AllocateParty = - (jwt, identifierHint, synchronizerIdO) => - implicit lc => { - logFuture(AllocatePartyLog) { - client.partyManagementClient.allocateParty( - hint = identifierHint, - token = bearer(jwt), - synchronizerId = synchronizerIdO, - ) - } - } - - def listPackages(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): ListPackages = - jwt => - implicit lc => { - logger.trace(s"sending list packages request to ledger, ${lc.makeString}") - logFuture(ListPackagesLog) { - client.packageService.listPackages(bearer(jwt)) - } - } - - def getPackage(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): GetPackage = - (jwt, packageId) => - implicit lc => { - logger.trace(s"sending get packages request to ledger, ${lc.makeString}") - logFuture(GetPackageLog) { - client.packageService.getPackage(packageId, token = bearer(jwt)) - } - } - - def uploadDar(client: DamlLedgerClient)(implicit - ec: EC, - traceContext: TraceContext, - ): UploadDarFile = - (jwt, byteString) => - implicit lc => { - logger.trace(s"sending upload dar request to ledger, ${lc.makeString}") - logFuture(UploadDarFileLog) { - client.packageManagementClient.uploadDarFile(darFile = byteString, token = bearer(jwt)) - } - } - - def getLedgerEnd(client: DamlLedgerClient)(implicit - traceContext: TraceContext - ): GetLedgerEnd = - jwt => - implicit lc => { - Source.future( - log(GetLedgerEndLog) { - client.stateService.getLedgerEndOffset(token = bearer(jwt)) - } - ) - } - - private def logFuture[T, C]( - requestLog: RequestLog - )( - block: => Future[T] - )(implicit ec: EC, lc: LoggingContextOf[C], traceContext: TraceContext): Future[T] = if ( - logger.underlying.isDebugEnabled - ) { - val start = System.nanoTime() - val futureResult = block - futureResult.andThen { case _ => - logger.debug(s"${logMessage(start, requestLog)}, ${lc.makeString}") - } - } else block - - private def log[T, C]( - requestLog: RequestLog - )(block: => T)(implicit lc: LoggingContextOf[C], traceContext: TraceContext): T = if ( - logger.underlying.isDebugEnabled - ) { - val start = System.nanoTime() - val result = block - logger.debug(s"${logMessage(start, requestLog)}, ${lc.makeString}") - result - } else block - -} -object LedgerClientJwt { - import Grpc.Category.* - import Grpc.EFuture - - // there are other error categories of interest if we wish to propagate - // different 5xx errors, but PermissionDenied and InvalidArgument are the only - // "client" errors here - type SubmitAndWaitForTransaction = - ( - Jwt, - SubmitAndWaitRequest, - ) => TraceContext => LoggingContextOf[InstanceUUID with RequestID] => EFuture[ - SubmitError, - SubmitAndWaitForTransactionResponse, - ] - - // TODO(#23504) remove this method once the deprecated SubmitAndWaitForTransactionTreeResponse is removed - @deprecated("Use SubmitAndWaitForTransaction instead", "3.3.0") - type SubmitAndWaitForTransactionTree = - ( - Jwt, - SubmitAndWaitRequest, - ) => LoggingContextOf[InstanceUUID with RequestID] => EFuture[ - SubmitError, - SubmitAndWaitForTransactionTreeResponse, - ] - - // TODO(#23504) use EventFormat instead of TransactionFilter once TransactionFilter is removed - @nowarn("cat=deprecation") - type GetActiveContracts = - ( - Jwt, - TransactionFilter, - Long, - Boolean, - ) => LoggingContextOf[InstanceUUID] => Source[ - GetActiveContractsResponse, - NotUsed, - ] - - type GetLedgerEnd = - Jwt => LoggingContextOf[InstanceUUID] => Source[Long, NotUsed] - - // TODO(#23504) use EventFormat instead of TransactionFilter once TransactionFilter is removed - @nowarn("cat=deprecation") - type GetCreatesAndArchivesSince = - ( - Jwt, - TransactionFilter, - Long, - Terminates, - ) => LoggingContextOf[InstanceUUID] => Source[Transaction, NotUsed] - - type GetContractByContractId = - ( - Jwt, - ContractId, - Set[Party], - ) => LoggingContextOf[InstanceUUID] => EFuture[PermissionDenied, GetEventsByContractIdResponse] - - // TODO(#16065) - // type ContinuationToken = String - // type GetContractByContractKey = - // ( - // Jwt, - // com.daml.ledger.api.v2.value.Value, - // Identifier, - // Set[Party], - // ContinuationToken, - // ) => LoggingContextOf[InstanceUUID] => EFuture[PermissionDenied, GetEventsByContractKeyResponse] - - type ListKnownParties = - ( - Jwt, - String, - Int, - ) => LoggingContextOf[InstanceUUID with RequestID] => EFuture[ - PermissionDenied, - ( - List[ - apiPartyDetails - ], - String, - ), - ] - - type GetParties = - ( - Jwt, - OneAnd[Set, Ref.Party], - ) => LoggingContextOf[InstanceUUID with RequestID] => EFuture[PermissionDenied, List[ - apiPartyDetails - ]] - - type AllocateParty = - ( - Jwt, - Option[Ref.Party], - Option[String], - ) => LoggingContextOf[InstanceUUID with RequestID] => Future[apiPartyDetails] - - type ListPackages = - Jwt => LoggingContextOf[InstanceUUID with RequestID] => Future[ - package_service.ListPackagesResponse - ] - - type GetPackage = - ( - Jwt, - String, - ) => LoggingContextOf[InstanceUUID with RequestID] => Future[ - package_service.GetPackageResponse - ] - - type UploadDarFile = - ( - Jwt, - protobuf.ByteString, - ) => LoggingContextOf[InstanceUUID with RequestID] => Future[Unit] - - sealed abstract class Terminates extends Product with Serializable - - object Terminates { - case object AtParticipantEnd extends Terminates - case object Never extends Terminates - final case class AtAbsolute(off: String) extends Terminates - } - - // a shim error model to stand in for https://github.com/digital-asset/daml/issues/9834 - object Grpc { - type EFuture[E, A] = Future[Error[E] \/ A] - - final case class Error[+E](e: E, message: String) - - // like Code but with types - // only needs to contain types that may be reported to the json-api user; - // if it is an "internal error" there is no need to call it out for handling - // e.g. Unauthenticated never needs to be specially handled, because we should - // have caught that the jwt token was missing and reported that to client already - object Category { - sealed trait SubmitError - // TODO(i13378) we might be able to assign singleton types to the Codes instead in 2.13+ - type PermissionDenied = PermissionDenied.type - case object PermissionDenied extends SubmitError - type InvalidArgument = InvalidArgument.type - case object InvalidArgument extends SubmitError - // not *every* singleton here should be a subtype of SubmitError; - // think of it more like a Venn diagram - - private[LedgerClientJwt] val submitErrors: Code PartialFunction SubmitError = { - case Code.PERMISSION_DENIED => PermissionDenied - case Code.INVALID_ARGUMENT => InvalidArgument - } - - private[LedgerClientJwt] implicit final class `Future Status Category ops`[A]( - private val fa: Future[A] - ) extends AnyVal { - def requireHandling[E](c: Code PartialFunction E)(implicit ec: EC): EFuture[E, A] = - fa map \/.right[Error[E], A] recover Function.unlift { - case StatusEnvelope(status) => - c.lift(Code.forNumber(status.getCode)) map (e => -\/(Error(e, status.getMessage))) - case _ => None - } - } - } - } - - object LedgerClientRequestTimeLogger { - sealed abstract class RequestLog(klass: Class[_], val requestName: String) - extends Product - with Serializable { - final def className: String = klass.getSimpleName - } - - case object SubmitAndWaitForTransactionLog - extends RequestLog(classOf[CommandServiceClient], "submitAndWaitForTransaction") - case object SubmitAndWaitForTransactionTreeLog - extends RequestLog(classOf[CommandServiceClient], "submitAndWaitForTransactionTree") - case object ListKnownPartiesLog - extends RequestLog(classOf[PartyManagementClient], "listKnownParties") - case object GetPartiesLog extends RequestLog(classOf[PartyManagementClient], "getParties") - case object AllocatePartyLog extends RequestLog(classOf[PartyManagementClient], "allocateParty") - case object ListPackagesLog extends RequestLog(classOf[PackageClient], "listPackages") - case object GetPackageLog extends RequestLog(classOf[PackageClient], "getPackages") - case object UploadDarFileLog - extends RequestLog(classOf[PackageManagementClient], "uploadDarFile") - case object GetActiveContractsLog - extends RequestLog(classOf[StateServiceClient], "getActiveContracts") - case object GetLedgerEndLog extends RequestLog(classOf[StateServiceClient], "getLedgerEnd") - case object GetUpdatesLog extends RequestLog(classOf[UpdateServiceClient], "getUpdates") - case object GetContractByContractIdLog - extends RequestLog(classOf[EventQueryServiceClient], "getContractByContractId") -// TODO(#16065) -// case object GetContractByContractKeyLog -// extends RequestLog(classOf[EventQueryServiceClient], "getContractByContractKey") - - private[LedgerClientJwt] def logMessage(startTime: Long, requestLog: RequestLog): String = - s"Ledger client request ${requestLog.className} ${requestLog.requestName} executed, elapsed time: " + - s"${(System.nanoTime() - startTime) / 1000000L} ms" - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerReader.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerReader.scala deleted file mode 100644 index d23f52239e..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/LedgerReader.scala +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.ledger.api.v2.package_service.GetPackageResponse -import com.daml.logging.LoggingContextOf -import com.daml.scalautil.TraverseFMSyntax.* -import com.daml.timer.RetryStrategy -import com.digitalasset.canton.ledger.client.services.pkg.PackageClient -import com.digitalasset.canton.ledger.service.Grpc -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.daml.lf.archive -import com.digitalasset.daml.lf.archive.DamlLf -import com.digitalasset.daml.lf.data.ImmArray.ImmArraySeq -import com.digitalasset.daml.lf.data.Ref.{Identifier, PackageId} -import com.digitalasset.daml.lf.language.{Ast, Util} -import com.digitalasset.daml.lf.typesig.reader.{DamlLfArchiveReader, SignatureReader} -import com.digitalasset.daml.lf.typesig.{DefDataType, PackageSignature} -import scalaz.* -import scalaz.Scalaz.* - -import scala.concurrent.{ExecutionContext, Future} - -final case class LedgerReader(loggerFactory: NamedLoggerFactory) - extends NamedLogging - with NoTracing { - import LedgerReader.* - - /** @return - * [[LedgerReader.UpToDate]] if packages did not change - */ - def loadPackageStoreUpdates( - client: PackageClient, - loadCache: LoadCache, - token: Option[String], - )( - loadedPackageIds: Set[String] - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[Any], - ): Future[Error \/ Option[PackageStore]] = - for { - newPackageIds <- client.listPackages(token).map(_.packageIds.toList) - diffIds = newPackageIds.filterNot(loadedPackageIds): List[String] // keeping the order - result <- - if (diffIds.isEmpty) UpToDate - else load[Option[PackageStore]](client, loadCache, diffIds, token) - } yield result - - private def load[PS >: Some[PackageStore]]( - client: PackageClient, - loadCache: LoadCache, - packageIds: List[String], - token: Option[String], - )(implicit ec: ExecutionContext, lc: LoggingContextOf[Any]): Future[Error \/ PS] = - util.Random - .shuffle(packageIds.grouped(loadCache.ParallelLoadFactor).toList) - .traverseFM { - _.traverse(getPackage(client, loadCache, token)(_)) - } - .map(groups => createPackageStoreFromArchives(groups.flatten).map(Some(_))) - - private def getPackage( - client: PackageClient, - loadCache: LoadCache, - token: Option[String], - )( - pkid: String - )(implicit ec: ExecutionContext, lc: LoggingContextOf[Any]): Future[Error \/ Signatures] = { - import loadCache.cache - retryLoop { - cache - .getIfPresent(pkid) - .cata( - { v => - logger - .trace(s"detected redundant package load before starting: $pkid, ${lc.makeString}") - Future successful v - }, - client.getPackage(pkid, token).map { pkresp => - cache - .getIfPresent(pkid) - .cata( - { decoded => - logger - .trace(s"detected redundant package load after gRPC: $pkid, ${lc.makeString}") - decoded - }, { - val decoded = decodeInterfaceFromPackageResponse(pkresp) - if (logger.underlying.isTraceEnabled && cache.getIfPresent(pkid).isDefined) - logger.trace( - s"detected redundant package load after decoding: $pkid, ${lc.makeString}" - ) - cache.put(pkid, decoded) - decoded - }, - ) - }, - ) - } - } - - private def retryLoop[A]( - fa: => Future[A] - )(implicit ec: ExecutionContext, lc: LoggingContextOf[Any]): Future[A] = - packageRetry.apply((_, _) => fa) - - private def packageRetry(implicit lc: LoggingContextOf[Any]): RetryStrategy = { - import com.google.rpc.Code - - import scala.concurrent.duration.* - RetryStrategy.constant( - Some(20), - 250.millis, - ) { case Grpc.StatusEnvelope(status) => - val retry = Code.ABORTED == (Code forNumber status.getCode) && - (status.getMessage startsWith "THREADPOOL_OVERLOADED") - if (retry) - logger.trace(s"package load failed with THREADPOOL_OVERLOADED; retrying, ${lc.makeString}") - retry - } - } - -} - -object LedgerReader { - - type Error = String - - final case class Signatures(typesig: PackageSignature, pack: Ast.PackageSignature) - // PackageId -> PackageSignature - type PackageStore = Map[String, Signatures] - - val UpToDate: Future[Error \/ Option[PackageStore]] = - Future.successful(\/-(None)) - final class LoadCache private () { - import com.digitalasset.canton.caching.CaffeineCache - import com.github.benmanes.caffeine.cache.Caffeine - - // This cache serves *concurrent* load requests, not *subsequent* requests; - // once a request is complete, its records shouldn't be touched at all for - // any requests that follow for the rest of the server lifetime, hence the - // short timeout. The timeout is chosen to allow concurrent contention to - // resolve even in unideal execution situations with large package sets, but - // short enough not to pointlessly cache for pkg reqs that do not overlap at - // all. - // - // A hit indicates concurrent contention, so we actually want to *maximize - // misses, not hits*, but the hitrate is really determined by the client's - // request pattern, so there isn't anything you can really do about it on - // the server configuration. 100% miss rate means no redundant work is - // happening; it does not mean the server is being slower. - private[LedgerReader] val cache = CaffeineCache[String, Error \/ Signatures]( - Caffeine - .newBuilder() - .softValues() - .expireAfterWrite(60, java.util.concurrent.TimeUnit.SECONDS), - None, - ) - - private[LedgerReader] val ParallelLoadFactor = 8 - } - - object LoadCache { - def freshCache(): LoadCache = new LoadCache() - } - - private def createPackageStoreFromArchives( - packageResponses: List[Error \/ Signatures] - ): Error \/ PackageStore = - packageResponses.sequence - .map(_.groupMapReduce(_.typesig.packageId: String)(identity)((_, sig) => sig)) - - private def decodeInterfaceFromPackageResponse( - packageResponse: GetPackageResponse - ): Error \/ Signatures = { - import packageResponse.* - \/.attempt { - val payload: DamlLf.ArchivePayload = - archive.ArchivePayloadParser.assertFromByteString(archivePayload) - val pck = DamlLfArchiveReader.readPackage(PackageId.assertFromString(hash), payload) - - val (errors, out) = - SignatureReader.readPackageSignature(() => pck) - - (if (!errors.empty) -\/("Errors reading LF archive:\n" + errors.toString) - else - \/-(out).flatMap(x => - pck.map(p => Signatures(x, Util.toSignature(p._2))) - )): Error \/ Signatures - }(_.getLocalizedMessage).join - } - - def damlLfTypeLookup( - packageStore: () => PackageStore - )(id: Identifier): Option[DefDataType.FWT] = { - val store = packageStore() - - store.get(id.packageId).flatMap { packageSignature => - packageSignature.typesig.typeDecls.get(id.qualifiedName).map(_.`type`).orElse { - for { - interface <- packageSignature.typesig.interfaces.get(id.qualifiedName) - viewTypeId <- interface.viewType - viewType <- PackageSignature - .resolveInterfaceViewType(store.view.mapValues(_.typesig)) - .lift(viewTypeId) - } yield DefDataType(ImmArraySeq(), viewType) - } - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageManagementService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageManagementService.scala deleted file mode 100644 index 19bd43b052..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageManagementService.scala +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.util.ProtobufByteStrings -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.Source -import org.apache.pekko.util.ByteString - -import scala.concurrent.{ExecutionContext, Future} - -class PackageManagementService( - listKnownPackagesFn: LedgerClientJwt.ListPackages, - getPackageFn: LedgerClientJwt.GetPackage, - uploadDarFileFn: LedgerClientJwt.UploadDarFile, -)(implicit ec: ExecutionContext, mat: Materializer) { - - def listPackages(jwt: Jwt)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Seq[String]] = - listKnownPackagesFn(jwt)(lc).map(_.packageIds) - - def getPackage(jwt: Jwt, packageId: String)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[GetPackageResponse] = - getPackageFn(jwt, packageId)(lc).map(GetPackageResponse.fromLedgerApi) - - def uploadDarFile( - jwt: Jwt, - source: Source[ByteString, NotUsed], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Unit] = - uploadDarFileFn(jwt, ProtobufByteStrings.readFrom(source))(lc) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageService.scala deleted file mode 100644 index 36234813a5..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackageService.scala +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.nonempty.{NonEmpty, Singleton} -import com.digitalasset.canton.http.ContractTypeId.ResolvedOf -import com.digitalasset.canton.http.util.IdentifierConverters -import com.digitalasset.canton.http.util.Logging.InstanceUUID -import com.digitalasset.canton.http.{Choice, ContractTypeId, ContractTypeRef} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.daml.lf.data.ImmArray.ImmArraySeq -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.typesig -import scalaz.std.option.none -import scalaz.std.scalaFuture.* -import scalaz.syntax.apply.* -import scalaz.syntax.std.option.* -import scalaz.{EitherT, Show, \/, \/-} - -import java.time.* -import scala.collection.MapView -import scala.concurrent.{ExecutionContext, Future} - -import LedgerReader.{PackageStore, Signatures} - -class PackageService( - reloadPackageStoreIfChanged: Jwt => PackageService.ReloadPackageStore, - val loggerFactory: NamedLoggerFactory, - timeoutInSeconds: Long = 60L, -) extends NamedLogging - with NoTracing { - - import PackageService.* - private type ET[A] = EitherT[Future, Error, A] - - private case class State( - packageIds: Set[String], - interfaceIdMap: InterfaceIdMap, - templateIdMap: TemplateIdMap, - choiceTypeMap: ChoiceTypeMap, - keyTypeMap: KeyTypeMap, - packageStore: PackageStore, - ) { - - def append(diff: PackageStore): State = { - val newPackageStore = this.packageStore ++ resolveChoicesIn(diff) - - val (tpIdMap, ifaceIdMap) = getTemplateIdInterfaceMaps(newPackageStore) - State( - packageIds = newPackageStore.keySet, - interfaceIdMap = ifaceIdMap, - templateIdMap = tpIdMap, - choiceTypeMap = getChoiceTypeMap(newPackageStore), - keyTypeMap = getKeyTypeMap(newPackageStore), - packageStore = newPackageStore, - ) - } - - // `diff` but with interface-inherited choices resolved - private[this] def resolveChoicesIn(diff: PackageStore): PackageStore = { - def lookupIf(pkgId: Ref.PackageId) = (packageStore get pkgId) orElse (diff get pkgId) - val findIface = - typesig.PackageSignature.findInterface((Function unlift lookupIf).andThen(_.typesig)) - diff.transform((_, iface) => - Signatures(iface.typesig.resolveChoicesAndIgnoreUnresolvedChoices(findIface), iface.pack) - ) - } - - } - - @SuppressWarnings(Array("org.wartremover.warts.Var")) - private class StateCache private { - // volatile, reading threads don't need synchronization - @volatile private var _state: State = - State( - Set.empty, - TemplateIdMap.Empty, - TemplateIdMap.Empty, - Map.empty, - Map.empty, - Map.empty, - ) - - private def updateState(diff: PackageStore): Unit = synchronized { - this._state = this._state.append(diff) - } - - @volatile private var lastUpdated = Instant.MIN - - private def updateInstant(instant: Instant): Unit = synchronized { - if (lastUpdated.isBefore(instant)) - lastUpdated = instant - } - - def state: State = _state - - // Regular updates should happen regardless of the current state every minute. - def packagesShouldBeFetchedAgain: Boolean = - lastUpdated.until(Instant.now(), temporal.ChronoUnit.SECONDS) >= timeoutInSeconds - - def reload(jwt: Jwt)(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): Future[Error \/ Unit] = - EitherT - .eitherT( - Future( - logger.debug(s"Trying to execute a package update, ${lc.makeString}") - ) *> reloadPackageStoreIfChanged(jwt)(_state.packageIds) - ) - .map { - case Some(diff) => - // this is not a perfect reduction, but is never less efficient - // and often more efficient in concurrent loading. - // - // But how can we just drop half of the packages on the floor? - // Because if a package is in _state already, then by definition - // it cannot depend on any of the packages that remain in - // loadsSinceReloading; therefore, loadsSinceReloading is the valid - // diff we would have seen had we started the reload *now*. - val loadsSinceReloading = diff -- _state.packageIds - if (diff.sizeIs > loadsSinceReloading.size) - logger.debug( - s"discarding ${diff.size - loadsSinceReloading.size} redundant loaded packages, ${lc.makeString}" - ) - if (loadsSinceReloading.isEmpty) - logger.debug(s"new package IDs not found, ${lc.makeString}") - else { - updateState(loadsSinceReloading) - logger.info( - s"new package IDs loaded: ${loadsSinceReloading.keySet.mkString(", ")}, ${lc.makeString}" - ) - logger.debug( - s"loaded diff: $loadsSinceReloading, ${lc.makeString}" - .take(1000) /* truncate output */ - ) - } - case None => logger.debug(s"new package IDs not found, ${lc.makeString}") - } - .map { res => - updateInstant(Instant.now()) - res - } - .run - } - - private object StateCache { - def apply() = new StateCache() - } - - private val cache = StateCache() - private def state: State = cache.state - - @inline - def reload(jwt: Jwt)(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): Future[Error \/ Unit] = cache.reload(jwt) - - def packageStore: PackageStore = state.packageStore - - def resolveContractTypeId(implicit ec: ExecutionContext): ResolveContractTypeId = - resolveContractTypeIdFromState { () => - val st = state - (st.templateIdMap, st.interfaceIdMap) - } - - private[this] def resolveContractTypeIdFromState( - latestMaps: () => (TemplateIdMap, InterfaceIdMap) - )(implicit ec: ExecutionContext): ResolveContractTypeId = new ResolveContractTypeId { - import ResolveContractTypeId.Overload as O - import com.digitalasset.canton.http.ContractTypeId as C - - override def apply[U, R[T] <: ContractTypeId[T]](jwt: Jwt)( - x: U with ContractTypeId.RequiredPkg - )(implicit - lc: LoggingContextOf[InstanceUUID], - overload: O[U, R], - ): Future[Error \/ Option[ContractTypeRef[R]]] = { - type ResultType = Option[ContractTypeRef[R]] - // we use a different resolution strategy depending on the static type - // determined by 'overload', as well as the class of 'x'. We figure the - // strategy exactly once so the reload is cheaper - val doSearch: ((TemplateIdMap, InterfaceIdMap)) => ResultType = overload match { - case O.Template => { case (tids, _) => tids resolve x } - case O.Top => - (x: C.RequiredPkg) match { - // only search the template or interface map, if that is the origin - // class, since searching the other map would convert template IDs - // to interface IDs and vice versa - case x: C.Template.RequiredPkg => { case (tids, _) => tids resolve x } - case x: C.Interface.RequiredPkg => { case (_, iids) => iids resolve x } - case x: C.Unknown.RequiredPkg => { case (tids, iids) => - (tids resolve x, iids resolve x) match { - case (tid @ Some(_), None) => tid - case (None, iid @ Some(_)) => iid - // presence in both means the ID is ambiguous - case (None, None) | (Some(_), Some(_)) => None - } - } - } - } - def doReloadAndSearchAgain() = EitherT(reload(jwt)).map(_ => doSearch(latestMaps())) - def keep(it: ResultType) = EitherT.pure(it): ET[ResultType] - for { - result <- EitherT.pure(doSearch(latestMaps())): ET[ResultType] - _ = logger.trace(s"Result: $result, ${lc.makeString}") - finalResult <- ((x: C.RequiredPkg).packageId match { - case Ref.PackageRef.Name(_) => // Used package name, not package id - if (result.isDefined) - // no package id and we do have the package, refresh if timeout - if (cache.packagesShouldBeFetchedAgain) { - logger.trace( - s"no package id and we do have the package, refresh because of timeout, ${lc.makeString}" - ) - doReloadAndSearchAgain() - } else { - logger.trace( - s"no package id and we do have the package, -no timeout- no refresh, ${lc.makeString}" - ) - keep(result) - } - // no package id and we don’t have the package, always refresh - else { - logger.trace( - s"no package id and we don’t have the package, always refresh, ${lc.makeString}" - ) - doReloadAndSearchAgain() - } - case Ref.PackageRef.Id(packageId) => - if (result.isDefined) { - logger.trace( - s"package id defined & template id found, no refresh necessary, ${lc.makeString}" - ) - keep(result) - } else { - // package id and we have the package, never refresh - if (state.packageIds.contains(packageId)) { - logger.trace(s"package id and we have the package, never refresh, ${lc.makeString}") - keep(result) - } - // package id and we don’t have the package, always refresh - else { - logger.trace( - s"package id and we don’t have the package, always refresh, ${lc.makeString}" - ) - doReloadAndSearchAgain() - } - } - }): ET[ResultType] - _ = logger.trace(s"Final result: $finalResult, ${lc.makeString}") - } yield finalResult - }.run - } - - def resolveTemplateRecordType: ResolveTemplateRecordType = - templateId => - \/-( - typesig - .TypeCon( - typesig.TypeConId(IdentifierConverters.lfIdentifier(templateId)), - ImmArraySeq(), - ) - ) - - def allTemplateIds(implicit ec: ExecutionContext): AllTemplateIds = { implicit lc => jwt => - val f = - if (cache.packagesShouldBeFetchedAgain) { - logger.trace( - s"no package id and we do have the package, refresh because of timeout, ${lc.makeString}" - ) - reload(jwt) - } else Future.successful(()) - f.map(_ => state.templateIdMap.allIds) - } - -// See the above comment on resolveTemplateId - def resolveChoiceArgType: ResolveChoiceArgType = - (ctid, c) => PackageService.resolveChoiceArgType(state.choiceTypeMap)(ctid, c) - -// See the above comment on resolveTemplateId - def resolveKeyType: ResolveKeyType = - x => PackageService.resolveKey(state.keyTypeMap)(x) -} - -object PackageService { - sealed trait Error - final case class InputError(message: String) extends Error - final case class ServerError(message: String) extends Error - - object Error { - implicit val errorShow: Show[Error] = Show shows { - case InputError(m) => s"PackageService input error: ${m: String}" - case ServerError(m) => s"PackageService server error: ${m: String}" - } - } - - type ReloadPackageStore = - Set[String] => Future[PackageService.Error \/ Option[LedgerReader.PackageStore]] - - sealed abstract class ResolveContractTypeId { - import ResolveContractTypeId.Overload - def apply[U, R[T] <: ContractTypeId[T]](jwt: Jwt)( - x: U with ContractTypeId.RequiredPkg - )(implicit lc: LoggingContextOf[InstanceUUID], overload: Overload[U, R]): Future[ - PackageService.Error \/ Option[ContractTypeRef[R]] - ] - } - - object ResolveContractTypeId { - sealed abstract class Overload[-Unresolved, +Resolved[_]] - - import com.digitalasset.canton.http.ContractTypeId as C - - object Overload extends LowPriority { - /* TODO(#13303) Re-adapted from Daml repo #15293: see below note about Top - implicit case object Unknown - extends Overload[C.Unknown.RequiredPkg, C.ResolvedId[C.Definite[String]]] - */ - implicit case object Template extends Overload[C.Template.RequiredPkg, C.Template] - case object Top extends Overload[C.RequiredPkg, C.Definite] - } - - // TODO(#13303) Re-adapted from Daml repo #15293: - // if the request model has .Unknown included, then LowPriority and Top are - // no longer needed and can be replaced with Overload.Unknown above - sealed abstract class LowPriority { this: Overload.type => - // needs to be low priority so it doesn't win against Template - implicit def `fallback Top`: Overload[C.RequiredPkg, C.Definite] = Top - } - } - - type ResolveTemplateRecordType = - ContractTypeId.Template.RequiredPkgId => Error \/ typesig.Type - - type AllTemplateIds = - LoggingContextOf[ - InstanceUUID - ] => Jwt => Future[Set[ContractTypeRef[ContractTypeId.Template]]] - - type ResolveChoiceArgType = - ( - ContractTypeId.ResolvedPkgId, - Choice, - ) => Error \/ (Option[ContractTypeId.Interface.ResolvedPkgId], typesig.Type) - - type ResolveKeyType = - ContractTypeId.Template.RequiredPkgId => Error \/ typesig.Type - - final case class ContractTypeIdMap[CtId[T] <: ContractTypeId[T]]( - all: Map[CtId[Ref.PackageRef], ResolvedOf[CtId]], - nameIds: Map[Ref.PackageName, NonEmpty[Seq[Ref.PackageId]]], - idNames: PackageNameMap, - ) { - private[http] def toContractTypeRef( - id: ContractTypeId.ResolvedOf[CtId] - ): Option[ContractTypeRef[CtId]] = - id.packageId match { - case Ref.PackageRef.Name(pname) => - for { - pkgIdsForName <- nameIds.get(pname) - pkgIdsForCtId <- NonEmpty.from( - pkgIdsForName.filter(pId => all.contains(id.copy(packageId = Ref.PackageRef.Id(pId)))) - ) - (name, _) <- idNames.get(Ref.PackageRef.Name(pname)) - } yield ContractTypeRef(id, pkgIdsForCtId, Some(name)) - case Ref.PackageRef.Id(pid) => - Some(ContractTypeRef.unnamed[CtId](id.copy(packageId = pid))) - } - - def allIds: Set[ContractTypeRef[CtId]] = - all.values.flatMap { case id => - // If the package has a name, use the package name instead of package id. - val useId = idNames - .get(id.packageId) - .fold(id) { case (name, _) => id.copy(packageId = Ref.PackageRef.Name(name)) } - toContractTypeRef(useId) - }.toSet - - def resolve( - a: ContractTypeId.RequiredPkg - )(implicit makeKey: ContractTypeId.Like[CtId]): Option[ContractTypeRef[CtId]] = - (all get makeKey(a.packageId, a.moduleName, a.entityName)).flatMap(toContractTypeRef) - } - - type TemplateIdMap = ContractTypeIdMap[ContractTypeId.Template] - private type InterfaceIdMap = ContractTypeIdMap[ContractTypeId.Interface] - - object TemplateIdMap { - def Empty[CtId[T] <: ContractTypeId.Definite[T]]: ContractTypeIdMap[CtId] = - ContractTypeIdMap(Map.empty, Map.empty, PackageNameMap.empty) - } - - private type ChoiceTypeMap = Map[ContractTypeId.ResolvedPkgId, NonEmpty[ - Map[Choice, NonEmpty[Map[Option[ContractTypeId.Interface.ResolvedPkgId], typesig.Type]]] - ]] - - type KeyTypeMap = Map[ContractTypeId.Template.ResolvedPkgId, typesig.Type] - - final case class PackageNameMap( - private val mapView: MapView[Ref.PackageRef, (Ref.PackageName, Ref.PackageVersion)] - ) { - def get(pkgId: Ref.PackageRef) = mapView.get(pkgId) - override def toString() = s"PackageNameMap(${mapView.toMap})" - } - object PackageNameMap { - val empty = PackageNameMap(MapView.empty) - } - - private def buildPackageNameMap(packageStore: PackageStore): PackageNameMap = - PackageNameMap( - packageStore.view - .flatMap { case ((pkgId, p)) => - // We make two entries per package: one by package id and another by package name. - val meta = p.typesig.metadata - val pId = Ref.PackageId.assertFromString(pkgId) - val pName = Ref.PackageName.assertFromString(meta.name) - val nameInfo = (pName, meta.version) - List( - (Ref.PackageRef.Id(pId), nameInfo), - (Ref.PackageRef.Name(pName), nameInfo), - ) - } - .toMap[Ref.PackageRef, (Ref.PackageName, Ref.PackageVersion)] - .view - ) - - private def getTemplateIdInterfaceMaps( - packageStore: PackageStore - ): (TemplateIdMap, InterfaceIdMap) = { - import TemplateIds.{getInterfaceIds, getTemplateIds} - val packageSigs = packageStore.values.toSet - val idName = buildPackageNameMap(packageStore) - ( - buildTemplateIdMap( - idName, - getTemplateIds(packageSigs.map(_.typesig)) map ContractTypeId.Template.fromLedgerApi, - ), - buildTemplateIdMap( - idName, - getInterfaceIds(packageSigs.map(_.typesig)) map ContractTypeId.Interface.fromLedgerApi, - ), - ) - } - - def buildTemplateIdMap[CtId[T] <: ContractTypeId.Definite[T] with ContractTypeId.Ops[CtId, T]]( - idName: PackageNameMap, - ids: Set[CtId[Ref.PackageId]], - ): ContractTypeIdMap[CtId] = { - import com.daml.nonempty.NonEmptyReturningOps.* - val all: Map[CtId[Ref.PackageRef], ResolvedOf[CtId]] = ids.view.map { id => - val k = id.copy(packageId = Ref.PackageRef.Id(id.packageId): Ref.PackageRef) - (k, k) - }.toMap - - val idPkgNamePkgVer: Set[(CtId[Ref.PackageId], Ref.PackageName, Ref.PackageVersion)] = - ids - .flatMap { id => - idName - .get(Ref.PackageRef.Id(id.packageId)) - .map { case (nm, ver) => (id, nm, ver) } - } - - val nameIds: Map[Ref.PackageName, NonEmpty[Seq[Ref.PackageId]]] = idPkgNamePkgVer - .groupBy1(_._2) // group by package name - .map { - case (name, idNameVers) => { - // Sort the package ids by version, descending - val orderedPkgIds: NonEmpty[Seq[Ref.PackageId]] = idNameVers - .map { case (id, _, ver) => (id.packageId, ver) } - .toSeq - .sorted( - Ordering.by((pkgIdVer: (Ref.PackageId, Ref.PackageVersion)) => pkgIdVer._2).reverse - ) - .map(_._1) - (name, orderedPkgIds) - } - } - .toMap - - val allByPkgName: Map[CtId[Ref.PackageRef], ResolvedOf[CtId]] = idPkgNamePkgVer.map { - case (id, name, _) => - val idWithPkgName = id.copy(packageId = Ref.PackageRef.Name(name): Ref.PackageRef) - (idWithPkgName, idWithPkgName) - }.toMap - - ContractTypeIdMap(all ++ allByPkgName, nameIds, idName) - } - - private def resolveChoiceArgType( - choiceIdMap: ChoiceTypeMap - )( - ctId: ContractTypeId.ResolvedPkgId, - choice: Choice, - ): Error \/ (Option[ContractTypeId.Interface.ResolvedPkgId], typesig.Type) = { - // TODO(#13303) Re-adapted from Daml repo #14727: skip indirect resolution if ctId is an interface ID - val resolution = for { - choices <- choiceIdMap get ctId - overloads <- choices get choice - onlyChoice <- Singleton.unapply(overloads) orElse (overloads get None map ((None, _))) - } yield onlyChoice - resolution.toRightDisjunction( - InputError(s"Cannot resolve Choice Argument type, given: ($ctId, $choice)") - ) - } - - def resolveKey( - keyTypeMap: KeyTypeMap - )(templateId: ContractTypeId.Template.RequiredPkgId): Error \/ typesig.Type = - keyTypeMap - .get(templateId) - .toRightDisjunction( - InputError(s"Cannot resolve Template Key type, given: ${templateId.toString}") - ) - - // assert that the given identifier is resolved - private[this] def fromIdentifier[CtId[T] <: ContractTypeId.Definite[T]]( - b: ContractTypeId.Like[CtId], - id: Ref.Identifier, - ): b.ResolvedPkgId = - fromQualifiedName(b, id.packageId, id.qualifiedName) - - // assert that the given identifier is resolved - private[this] def fromQualifiedName[CtId[T] <: ContractTypeId.Definite[T]]( - b: ContractTypeId.Like[CtId], - pkgId: Ref.PackageId, - qn: Ref.QualifiedName, - ): b.ResolvedPkgId = - b(pkgId, qn.module.dottedName, qn.name.dottedName) - - private def getChoiceTypeMap(packageStore: PackageStore): ChoiceTypeMap = - packageStore.values.view.map(_.typesig).flatMap(getChoices).toMap - - private def getChoices( - signature: typesig.PackageSignature - ): IterableOnce[(ContractTypeId.ResolvedPkgId, NonEmpty[ChoicesByInterface[typesig.Type]])] = - signature.typeDecls.iterator.collect(joinPF { - case ( - qn, - typesig.PackageSignature.TypeDecl.Template(_, typesig.DefTemplate(choices, _, _)), - ) => - NonEmpty from getTChoices(choices.resolvedChoices) map (( - fromQualifiedName(ContractTypeId.Template, signature.packageId, qn), - _, - )) - }) ++ signature.interfaces.iterator.collect(Function unlift { case (qn, defIf) => - NonEmpty from getIChoices(defIf.choices) map (( - fromQualifiedName(ContractTypeId.Interface, signature.packageId, qn), - _, - )) - }) - - private[this] type ChoicesByInterface[Ty] = - Map[Choice, NonEmpty[Map[Option[ContractTypeId.Interface.ResolvedPkgId], Ty]]] - - private def getTChoices[Ty]( - choices: Map[Ref.ChoiceName, NonEmpty[ - Map[Option[Ref.TypeConId], typesig.TemplateChoice[Ty]] - ]] - ): ChoicesByInterface[Ty] = { - import typesig.* - choices.map { case (name, resolvedChoices) => - ( - Choice(name: String), - resolvedChoices.map { case (oIface, TemplateChoice(pTy, _, _)) => - (oIface map (fromIdentifier(ContractTypeId.Interface, _)), pTy) - }.toMap, - ) - } - } - - private def getIChoices[Ty]( - choices: Map[Ref.ChoiceName, typesig.TemplateChoice[Ty]] - ): ChoicesByInterface[Ty] = - choices.map { case (name, typesig.TemplateChoice(pTy, _, _)) => - (Choice(name: String), NonEmpty(Map, none[ContractTypeId.Interface.ResolvedPkgId] -> pTy)) - } - - // flatten two levels of partiality into one - private[this] def joinPF[T, R](f: T PartialFunction Option[R]): T PartialFunction R = - new PartialFunction[T, R] { - override def applyOrElse[A1 <: T, B1 >: R](x: A1, default: A1 => B1): B1 = - f.applyOrElse(x, Function const None) getOrElse default(x) - - override def isDefinedAt(x: T): Boolean = f.applyOrElse(x, Function const None).isDefined - - override def apply(v1: T): R = f(v1) getOrElse (throw new MatchError(v1)) - } - - private def getKeyTypeMap(packageStore: PackageStore): KeyTypeMap = - packageStore.flatMap { case (_, interface) => getKeys(interface.typesig) } - - private def getKeys( - interface: typesig.PackageSignature - ): Map[ContractTypeId.Template.ResolvedPkgId, typesig.Type] = - interface.typeDecls.collect { - case ( - qn, - typesig.PackageSignature.TypeDecl - .Template(_, typesig.DefTemplate(_, Some(keyType), _)), - ) => - val templateId = - ContractTypeId.Template(interface.packageId, qn.module.dottedName, qn.name.dottedName) - (templateId, keyType) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackagesAndDars.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackagesAndDars.scala deleted file mode 100644 index 546c8d7152..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PackagesAndDars.scala +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT, rightT} -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.util.ProtobufByteStrings -import com.digitalasset.canton.http.{OkResponse, SyncResponse} -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.model.* -import scalaz.EitherT -import scalaz.std.scalaFuture.* - -import scala.concurrent.{ExecutionContext, Future} - -class PackagesAndDars(routeSetup: RouteSetup, packageManagementService: PackageManagementService)( - implicit ec: ExecutionContext -) { - import RouteSetup.* - import routeSetup.* - - def uploadDarFile(httpRequest: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): ET[SyncResponse[Unit]] = - for { - parseAndDecodeTimer <- getParseAndDecodeTimerCtx() - t2 <- either(routeSetup.inputSource(httpRequest)) - (jwt, source) = t2 - _ <- EitherT.pure(parseAndDecodeTimer.stop()) - - _ <- eitherT( - handleFutureFailure( - packageManagementService.uploadDarFile( - jwt, - source.mapMaterializedValue(_ => NotUsed), - ) - ) - ): ET[Unit] - } yield OkResponse(()) - - def listPackages(jwt: Jwt)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[SyncResponse[Seq[String]]] = - rightT(packageManagementService.listPackages(jwt)).map(OkResponse(_)) - - def downloadPackage(jwt: Jwt, packageId: String)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[HttpResponse] = { - val pkgResp: Future[GetPackageResponse] = - packageManagementService.getPackage(jwt, packageId) - pkgResp.map { x => - HttpResponse( - entity = HttpEntity.apply( - ContentTypes.`application/octet-stream`, - ProtobufByteStrings.toSource(x.archivePayload), - ) - ) - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/Parties.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/Parties.scala deleted file mode 100644 index b4d4886acf..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/Parties.scala +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.util.Collections.toNonEmptySet -import com.digitalasset.canton.http.util.FutureUtil.eitherT -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ - AllocatePartyRequest, - OkResponse, - Party, - PartyDetails, - SyncResponse, - UnknownParties, -} -import org.apache.pekko.stream.Materializer -import scalaz.NonEmptyList -import scalaz.std.scalaFuture.* - -import scala.concurrent.ExecutionContext - -private[http] final class Parties(partiesService: PartiesService)(implicit ec: ExecutionContext) { - import Parties.* - - def allParties(jwt: Jwt)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - mat: Materializer, - ): ET[SyncResponse[List[PartyDetails]]] = for { - res <- eitherT(partiesService.allParties(jwt)) - } yield OkResponse(res) - - def parties(jwt: Jwt, parties: NonEmptyList[Party])(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[SyncResponse[List[PartyDetails]]] = - for { - ps <- eitherT(partiesService.parties(jwt, toNonEmptySet(parties))) - } yield partiesResponse(parties = ps._1.toList, unknownParties = ps._2.toList) - - def allocateParty(jwt: Jwt, request: AllocatePartyRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[SyncResponse[PartyDetails]] = - for { - res <- eitherT(partiesService.allocate(jwt, request)) - } yield OkResponse(res) -} - -private[v1] object Parties { - private def partiesResponse( - parties: List[PartyDetails], - unknownParties: List[Party], - ): SyncResponse[List[PartyDetails]] = { - - val warnings: Option[UnknownParties] = - if (unknownParties.isEmpty) None - else Some(UnknownParties(unknownParties)) - - OkResponse(parties, warnings) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PartiesService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PartiesService.scala deleted file mode 100644 index c4bba20fe6..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/PartiesService.scala +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.nonempty.* -import com.digitalasset.canton.http.EndpointsCompanion.{Error, InvalidUserInput, Unauthorized} -import com.digitalasset.canton.http.util.FutureUtil.* -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{AllocatePartyRequest, Party, PartyDetails, PartySet} -import com.digitalasset.daml.lf.data.Ref -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} -import scalaz.std.option.* -import scalaz.std.scalaFuture.* -import scalaz.std.string.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, OneAnd, \/, \/-} - -import scala.concurrent.{ExecutionContext, Future} - -import LedgerClientJwt.Grpc - -class PartiesService( - listAllParties: LedgerClientJwt.ListKnownParties, - getParties: LedgerClientJwt.GetParties, - allocateParty: LedgerClientJwt.AllocateParty, -)(implicit ec: ExecutionContext) { - - import PartiesService.* - - def allocate( - jwt: Jwt, - request: AllocatePartyRequest, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Error \/ PartyDetails] = { - val et: ET[PartyDetails] = for { - idHint <- either( - request.identifierHint.traverse(toLedgerApi) - ): ET[Option[Ref.Party]] - - apiParty <- rightT( - allocateParty(jwt, idHint, request.synchronizerId)(lc) - ): ET[com.digitalasset.canton.ledger.api.PartyDetails] - - httpParty = PartyDetails.fromLedgerApi(apiParty) - - } yield httpParty - - et.run - } - - private type AllPartiesRet = Error \/ List[PartyDetails] - - def allParties(jwt: Jwt)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - mat: Materializer, - ): Future[AllPartiesRet] = { - import scalaz.std.option.* - Source - .unfoldAsync(some("")) { - _ traverse { pageToken => - listAllParties(jwt, pageToken, 0)(lc) - .map { - case -\/(e) => (None, -\/(handleGrpcError(e))) - case \/-((parties, "")) => (None, \/-(parties)) - case \/-((parties, pageToken)) => (Some(pageToken), \/-(parties)) - } - // if the listAllParties call fails, stop the stream and emit the error as a "warning" - .recover(Error.fromThrowable andThen (e => (None, -\/(e)))) - } - } - .toMat(Sink.fold(\/-(List.empty): AllPartiesRet) { - case (-\/(e), _) => -\/(e) - case (_, -\/(e)) => -\/(e) - case (\/-(acc), \/-(more)) => \/-(acc ++ more.map(PartyDetails.fromLedgerApi)) - })(Keep.right) - .run() - } - - def parties( - jwt: Jwt, - identifiers: PartySet, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Error \/ (Set[PartyDetails], Set[Party])] = { - val et: ET[(Set[PartyDetails], Set[Party])] = for { - apiPartyIds <- either(toLedgerApiPartySet(identifiers)): ET[OneAnd[Set, Ref.Party]] - apiPartyDetails <- eitherT(getParties(jwt, apiPartyIds)(lc)) - .leftMap(handleGrpcError): ET[List[com.digitalasset.canton.ledger.api.PartyDetails]] - httpPartyDetails = apiPartyDetails.iterator - .map(PartyDetails.fromLedgerApi) - .toSet: Set[PartyDetails] - } yield (httpPartyDetails, findUnknownParties(httpPartyDetails, identifiers)) - - et.run - } - - private def findUnknownParties( - found: Set[PartyDetails], - requested: PartySet, - ): Set[Party] = - if (found.sizeIs == requested.size) Set.empty - else requested -- found.map(_.identifier) -} - -object PartiesService { - import com.digitalasset.canton.http.util.ErrorOps.* - - private type ET[A] = EitherT[Future, Error, A] - - private def handleGrpcError(e: Grpc.Error[Grpc.Category.PermissionDenied]): Error = - Unauthorized(e.message) - - def toLedgerApiPartySet( - ps: PartySet - ): InvalidUserInput \/ OneAnd[Set, Ref.Party] = { - import scalaz.std.list.* - val enel: InvalidUserInput \/ NonEmptyF[List, Ref.Party] = ps.toList.toNEF traverse toLedgerApi - enel.map { case x +-: xs => OneAnd(x, xs.toSet) } - } - - def toLedgerApi(p: Party): InvalidUserInput \/ Ref.Party = - \/.fromEither(Ref.Party.fromString(Party.unwrap(p))) - .liftErrS("PartiesService.toLedgerApi")(InvalidUserInput.apply) - -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/RouteSetup.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/RouteSetup.scala deleted file mode 100644 index 21e8ab5d8f..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/RouteSetup.scala +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.ledger.api.v2 as lav2 -import com.daml.logging.LoggingContextOf -import com.daml.logging.LoggingContextOf.withEnrichedLoggingContext -import com.daml.metrics.api.MetricHandle.Timer.TimerHandle -import com.daml.scalautil.Statement.discard -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} -import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} -import com.digitalasset.canton.http.{ - Endpoints, - EndpointsCompanion, - JwtPayloadG, - JwtPayloadTag, - JwtWritePayload, - OkResponse, - SyncResponse, -} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.{NoTracing, TraceContext, W3CTraceContext} -import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.http.scaladsl.model.headers.{ - Authorization, - ModeledCustomHeader, - ModeledCustomHeaderCompanion, - OAuth2BearerToken, - `X-Forwarded-Proto`, -} -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.Source -import org.apache.pekko.util.ByteString -import scalaz.std.scalaFuture.* -import scalaz.syntax.std.option.* -import scalaz.{-\/, EitherT, Traverse, \/, \/-} -import spray.json.* - -import scala.concurrent.duration.FiniteDuration -import scala.concurrent.{ExecutionContext, Future} -import scala.util.Try - -import lav2.value.Value as ApiValue - -private[http] final class RouteSetup( - allowNonHttps: Boolean, - decodeJwt: EndpointsCompanion.ValidateJwt, - encoder: ApiJsonEncoder, - resolveUser: ResolveUser, - maxTimeToCollectRequest: FiniteDuration, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext, mat: Materializer) - extends NamedLogging - with NoTracing { - import RouteSetup.* - import encoder.implicits.* - import com.digitalasset.canton.http.util.ErrorOps.* - - private[v1] def handleCommand[T[_]](req: HttpRequest)( - fn: ( - Jwt, - JwtWritePayload, - JsValue, - TimerHandle, - ) => TraceContext => LoggingContextOf[JwtPayloadTag with InstanceUUID with RequestID] => ET[ - T[ApiValue] - ] - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - ev1: JsonWriter[T[JsValue]], - ev2: Traverse[T], - metrics: HttpApiMetrics, - ): ET[SyncResponse[JsValue]] = { - val traceContextOption = - W3CTraceContext.fromHeaders(req.headers.map(header => (header.name(), header.value())).toMap) - implicit val traceContext = - traceContextOption.map(_.toTraceContext).getOrElse(TraceContext.empty) - for { - parseAndDecodeTimerCtx <- getParseAndDecodeTimerCtx() - t3 <- inputJsValAndJwtPayload(req): ET[(Jwt, JwtWritePayload, JsValue)] - (jwt, jwtPayload, reqBody) = t3 - resp <- withJwtPayloadLoggingContext(jwtPayload)( - fn(jwt, jwtPayload, reqBody, parseAndDecodeTimerCtx) - ) - jsVal <- either(SprayJson.encode1(resp).liftErr(ServerError.fromMsg)): ET[JsValue] - } yield OkResponse(jsVal) - } - - def inputJsValAndJwtPayload[P](req: HttpRequest)(implicit - createFromUserToken: CreateFromUserToken[P], - lc: LoggingContextOf[InstanceUUID with RequestID], - ): EitherT[Future, Error, (Jwt, P, JsValue)] = - inputJsVal(req).flatMap(x => withJwtPayload[JsValue, P](x).leftMap(it => it: Error)) - - def withJwtPayload[A, P](fa: (Jwt, A))(implicit - createFromUserToken: CreateFromUserToken[P] - ): EitherT[Future, Error, (Jwt, P, A)] = - decodeAndParsePayload[P](fa._1, decodeJwt, resolveUser).map(t2 => (t2._1, t2._2, fa._2)) - - def inputAndJwtPayload[P]( - req: HttpRequest - )(implicit - createFromUserToken: CreateFromUserToken[P], - lc: LoggingContextOf[InstanceUUID with RequestID], - ): EitherT[Future, Error, (Jwt, P, String)] = - eitherT(input(req)).flatMap(it => withJwtPayload[String, P](it)) - - def getParseAndDecodeTimerCtx()(implicit - metrics: HttpApiMetrics - ): ET[TimerHandle] = - EitherT.pure(metrics.incomingJsonParsingAndValidationTimer.startAsync()) - - def input(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Future[Error \/ (Jwt, String)] = - findJwt(req) match { - case e @ -\/(_) => - discard(req.entity.discardBytes(mat)) - Future.successful(e) - case \/-(j) => - data(req.entity).map(d => \/-((j, d))) - } - - def inputSource(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Error \/ (Jwt, Source[ByteString, Any]) = - findJwt(req) match { - case e @ -\/(_) => - discard(req.entity.discardBytes(mat)) - e - case \/-(j) => - \/.right((j, req.entity.dataBytes)) - - } - - private[this] def data(entity: RequestEntity): Future[String] = - entity.toStrict(maxTimeToCollectRequest).map(_.data.utf8String) - - def inputJsVal(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[(Jwt, JsValue)] = - for { - t2 <- eitherT(input(req)): ET[(Jwt, String)] - jsVal <- either(SprayJson.parse(t2._2).liftErr(InvalidUserInput.apply)): ET[JsValue] - } yield (t2._1, jsVal) - - def findJwt(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Unauthorized \/ Jwt = - ensureHttpsForwarded(req) flatMap { _ => - req.headers - .collectFirst { case Authorization(OAuth2BearerToken(token)) => - Jwt(token) - } - .toRightDisjunction( - Unauthorized("missing Authorization header with OAuth 2.0 Bearer Token") - ) - } - - private def isHttps(req: HttpRequest): Boolean = req.uri.scheme == "https" - - private[this] def ensureHttpsForwarded(req: HttpRequest)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Unauthorized \/ Unit = - if (allowNonHttps || isForwardedForHttps(req.headers) || isHttps(req)) \/-(()) - else { - logger.warn(s"$nonHttpsErrorMessage, ${lc.makeString}") - \/-(()) - } -} - -object RouteSetup { - import Endpoints.IntoEndpointsError - - private val nonHttpsErrorMessage = - "missing HTTPS reverse-proxy request headers" - - def withJwtPayloadLoggingContext[A](jwtPayload: JwtPayloadG)( - fn: TraceContext => LoggingContextOf[JwtPayloadTag with InstanceUUID with RequestID] => A - )(implicit lc: LoggingContextOf[InstanceUUID with RequestID], traceContext: TraceContext): A = - withEnrichedLoggingContext( - LoggingContextOf.label[JwtPayloadTag], - "act_as" -> jwtPayload.actAs.toString, - "user_id" -> jwtPayload.userId.toString, - "read_as" -> jwtPayload.readAs.toString, - ).run(fn(traceContext)) - - def handleFutureFailure[A](fa: Future[A])(implicit - ec: ExecutionContext - ): Future[Error \/ A] = - fa.map(a => \/-(a)).recover(Error.fromThrowable andThen (-\/(_))) - - def handleFutureEitherFailure[A, B](fa: Future[A \/ B])(implicit - ec: ExecutionContext, - A: IntoEndpointsError[A], - ): Future[Error \/ B] = - fa.map(_ leftMap A.run).recover(Error.fromThrowable andThen (-\/(_))) - - private def isForwardedForHttps(headers: Seq[HttpHeader]): Boolean = - headers exists { - case `X-Forwarded-Proto`(protocol) => protocol equalsIgnoreCase "https" - // the whole "custom headers" thing in pekko-http is a mishmash of - // actually using the ModeledCustomHeaderCompanion stuff (which works) - // and "just use ClassTag YOLO" (which won't work) - case Forwarded(value) => Forwarded(value).proto contains "https" - case _ => false - } - - // avoid case class to avoid using the wrong unapply in isForwardedForHttps - private[v1] final class Forwarded(override val value: String) - extends ModeledCustomHeader[Forwarded] { - override def companion = Forwarded - override def renderInRequests = true - override def renderInResponses = false - // per discussion https://github.com/digital-asset/daml/pull/5660#discussion_r412539107 - def proto: Option[String] = - Forwarded.re findFirstMatchIn value map (_.group(1).toLowerCase) - } - - private[v1] object Forwarded extends ModeledCustomHeaderCompanion[Forwarded] { - override val name = "Forwarded" - override def parse(value: String) = Try(new Forwarded(value)) - private val re = raw"""(?i)proto\s*=\s*"?(https?)""".r - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/TemplateIds.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/TemplateIds.scala deleted file mode 100644 index eaf25f3153..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/TemplateIds.scala +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.ledger.api.v2.value.Identifier -import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.typesig.PackageSignature -import com.digitalasset.daml.lf.typesig.PackageSignature.TypeDecl.Template - -object TemplateIds { - def getTemplateIds(packages: Set[PackageSignature]): Set[Identifier] = - packages.flatMap { pkg => - getTemplateIds( - pkg, - pkg.typeDecls.iterator.collect { case (qn, _: Template) => qn }, - ) - } - - def getInterfaceIds(packages: Set[PackageSignature]): Set[Identifier] = - packages.flatMap { pkg => - getTemplateIds(pkg, pkg.interfaces.keysIterator) - } - - private def getTemplateIds( - pkg: PackageSignature, - qns: IterableOnce[Ref.QualifiedName], - ): Set[Identifier] = - qns.iterator.map { qn => - Identifier( - packageId = pkg.packageId, - moduleName = qn.module.dottedName, - entityName = qn.name.dottedName, - ) - }.toSet -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/UserManagement.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/UserManagement.scala deleted file mode 100644 index 4a5400fa23..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/UserManagement.scala +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.{Jwt, StandardJWTPayload} -import com.digitalasset.canton.http.Endpoints.ET -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.EndpointsCompanion.CreateFromUserToken.userIdFromToken -import com.digitalasset.canton.http.util.FutureUtil.either -import com.digitalasset.canton.http.{ - CreateUserRequest, - DeleteUserRequest, - EndpointsCompanion, - GetUserRequest, - GrantUserRightsRequest, - ListUserRightsRequest, - OkResponse, - RevokeUserRightsRequest, - SyncResponse, - UserDetails, - UserRight, - UserRights, -} -import com.digitalasset.canton.ledger.api.{User, UserRight as ApiUserRight} -import com.digitalasset.canton.ledger.client.services.admin.UserManagementClient -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.daml.lf.data.Ref.UserId -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.Source -import scalaz.std.scalaFuture.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, Monad, \/, \/-} - -import scala.concurrent.{ExecutionContext, Future} - -final class UserManagement( - decodeJwt: EndpointsCompanion.ValidateJwt, - userManagementClient: UserManagementClient, -)(implicit - ec: ExecutionContext -) { - import UserManagement.* - - def getUser(jwt: Jwt, req: GetUserRequest)(implicit - traceContext: TraceContext - ): ET[SyncResponse[UserDetails]] = - for { - userId <- parseUserId(req.userId) - user <- EitherT.rightT(userManagementClient.getUser(userId = userId, token = Some(jwt.value))) - } yield OkResponse( - UserDetails(user.id, user.primaryParty) - ): SyncResponse[UserDetails] - - def createUser( - jwt: Jwt, - createUserRequest: CreateUserRequest, - )(implicit traceContext: TraceContext): ET[SyncResponse[spray.json.JsObject]] = { - import com.digitalasset.daml.lf.data.Ref - import scalaz.std.option.* - import scalaz.syntax.std.either.* - import scalaz.syntax.traverse.* - val input = - for { - username <- UserId.fromString(createUserRequest.userId).disjunction - primaryParty <- createUserRequest.primaryParty.traverse(it => - Ref.Party.fromString(it).disjunction - ) - rights <- UserRights.toLedgerUserRights( - createUserRequest.rights.getOrElse(List.empty) - ) - } yield (username, primaryParty, rights) - for { - info <- EitherT.either(input.leftMap(InvalidUserInput.apply)): ET[ - (UserId, Option[Ref.Party], List[ApiUserRight]) - ] - (username, primaryParty, initialRights) = info - _ <- EitherT.rightT( - userManagementClient.createUser( - User(username, primaryParty), - initialRights, - Some(jwt.value), - ) - ) - } yield emptyObjectResponse - } - - def deleteUser( - jwt: Jwt, - deleteUserRequest: DeleteUserRequest, - )(implicit traceContext: TraceContext): ET[SyncResponse[spray.json.JsObject]] = - for { - userId <- parseUserId(deleteUserRequest.userId) - _ <- EitherT.rightT(userManagementClient.deleteUser(userId = userId, token = Some(jwt.value))) - } yield emptyObjectResponse - - def listUserRights( - jwt: Jwt, - listUserRightsRequest: ListUserRightsRequest, - )(implicit traceContext: TraceContext): ET[SyncResponse[List[UserRight]]] = - for { - userId <- parseUserId(listUserRightsRequest.userId) - rights <- EitherT.rightT( - userManagementClient.listUserRights(userId = userId, token = Some(jwt.value)) - ) - } yield OkResponse(UserRights.fromLedgerUserRights(rights)): SyncResponse[List[ - UserRight - ]] - - def grantUserRights( - jwt: Jwt, - grantUserRightsRequest: GrantUserRightsRequest, - )(implicit traceContext: TraceContext): ET[SyncResponse[List[UserRight]]] = - for { - userId <- parseUserId(grantUserRightsRequest.userId) - rights <- either( - UserRights.toLedgerUserRights(grantUserRightsRequest.rights) - ).leftMap(InvalidUserInput.apply): ET[List[ApiUserRight]] - grantedUserRights <- EitherT.rightT( - userManagementClient.grantUserRights( - userId = userId, - rights = rights, - token = Some(jwt.value), - ) - ) - } yield OkResponse( - UserRights.fromLedgerUserRights(grantedUserRights) - ): SyncResponse[List[UserRight]] - - def revokeUserRights( - jwt: Jwt, - revokeUserRightsRequest: RevokeUserRightsRequest, - )(implicit traceContext: TraceContext): ET[SyncResponse[List[UserRight]]] = - for { - userId <- parseUserId(revokeUserRightsRequest.userId) - rights <- either( - UserRights.toLedgerUserRights(revokeUserRightsRequest.rights) - ).leftMap(InvalidUserInput.apply): ET[List[ApiUserRight]] - revokedUserRights <- EitherT.rightT( - userManagementClient.revokeUserRights( - userId = userId, - rights = rights, - token = Some(jwt.value), - ) - ) - } yield OkResponse( - UserRights.fromLedgerUserRights(revokedUserRights) - ): SyncResponse[List[UserRight]] - - def getAuthenticatedUser( - jwt: Jwt - )(implicit traceContext: TraceContext): ET[SyncResponse[UserDetails]] = - for { - userId <- getUserIdFromToken(jwt) - user <- EitherT.rightT(userManagementClient.getUser(userId = userId, token = Some(jwt.value))) - } yield OkResponse(UserDetails(user.id, user.primaryParty)) - - def listAuthenticatedUserRights( - jwt: Jwt - )(implicit traceContext: TraceContext): ET[SyncResponse[List[UserRight]]] = - for { - userId <- getUserIdFromToken(jwt) - rights <- EitherT.rightT( - userManagementClient.listUserRights(userId = userId, token = Some(jwt.value)) - ) - } yield OkResponse(UserRights.fromLedgerUserRights(rights)): SyncResponse[List[ - UserRight - ]] - - def listUsers( - jwt: Jwt - )(implicit - traceContext: TraceContext - ): ET[SyncResponse[Source[Error \/ UserDetails, NotUsed]]] = { - val users = aggregateListUserPages(Some(jwt.value)) - val userDetails = users.map(_ map UserDetails.fromUser) - EitherT.rightT(Future.successful(OkResponse(userDetails))) - } - - private def aggregateListUserPages( - token: Option[String], - pageSize: Int = 1000, - )(implicit traceContext: TraceContext): Source[Error \/ User, NotUsed] = { - import scalaz.std.option.* - Source.unfoldAsync(some("")) { - _ traverse { pageToken => - userManagementClient - .listUsers(token = token, pageToken = pageToken, pageSize = pageSize) - .map { - case (users, "") => (None, \/-(users)) - case (users, pageToken) => (Some(pageToken), \/-(users)) - } - // if a listUsers call fails, stop the stream and emit the error as a "warning" - .recover(Error.fromThrowable andThen (e => (None, -\/(e)))) - } - } mapConcat { - case e @ -\/(_) => Seq(e) - case \/-(users) => users.view.map(\/-(_)) - } - } - - private def getUserIdFromToken(jwt: Jwt): ET[UserId] = - decodeAndParseUserIdFromToken(jwt, decodeJwt).leftMap(identity[Error]) -} - -object UserManagement { - private def parseUserId(rawUserId: String)(implicit - ec: ExecutionContext - ): ET[UserId] = { - import scalaz.syntax.std.either.* - either( - UserId.fromString(rawUserId).disjunction.leftMap(InvalidUserInput.apply) - ) - } - - private def decodeAndParseUserIdFromToken(rawJwt: Jwt, decodeJwt: ValidateJwt)(implicit - mf: Monad[Future] - ): ET[UserId] = - EitherT.either(decodeAndParseJwt(rawJwt, decodeJwt).flatMap { - case token: StandardJWTPayload => userIdFromToken(token) - case _ => - -\/(Unauthorized("A user token was expected but a custom token was given"): Error) - }) - - private val emptyObjectResponse: SyncResponse[spray.json.JsObject] = - OkResponse(spray.json.JsObject()) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/V1Routes.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/V1Routes.scala deleted file mode 100644 index 6f61e432fa..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/V1Routes.scala +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.logging.LoggingContextOf.withEnrichedLoggingContext -import com.daml.metrics.Timed -import com.digitalasset.canton.concurrent.DirectExecutionContext -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.json.* -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.ApiValueToLfValueConverter -import com.digitalasset.canton.http.util.FutureUtil.{either, rightT} -import com.digitalasset.canton.http.util.Logging.{ - InstanceUUID, - RequestID, - extendWithRequestIdLogCtx, -} -import com.digitalasset.canton.http.{ - EndpointsCompanion, - ErrorResponse, - OkResponse, - SyncResponse, - WebsocketConfig, - endpoints, - json, - util, -} -import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient -import com.digitalasset.canton.ledger.client.services.admin.UserManagementClient -import com.digitalasset.canton.ledger.client.services.pkg.PackageClient -import com.digitalasset.canton.logging.NoLogging.noTracingLogger -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.http.scaladsl.model.headers.`Content-Type` -import org.apache.pekko.http.scaladsl.server -import org.apache.pekko.http.scaladsl.server.Directives.* -import org.apache.pekko.http.scaladsl.server.RouteResult.* -import org.apache.pekko.http.scaladsl.server.{Directive, Directive0, PathMatcher, Route} -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Flow, Source} -import org.apache.pekko.util.ByteString -import scalaz.EitherT.eitherT -import scalaz.Scalaz.some -import scalaz.std.scalaFuture.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, EitherT, \/, \/-} -import spray.json.* - -import scala.annotation.nowarn -import scala.concurrent.duration.FiniteDuration -import scala.concurrent.{ExecutionContext, Future} -import scala.util.control.NonFatal - -import LedgerReader.PackageStore -import ContractsService.SearchResult - -class V1Routes( - allowNonHttps: Boolean, - decodeJwt: EndpointsCompanion.ValidateJwt, - commandService: CommandService, - contractsService: ContractsService, - partiesService: PartiesService, - packageManagementService: PackageManagementService, - websocketEndpoints: WebsocketEndpoints, - encoder: ApiJsonEncoder, - decoder: ApiJsonDecoder, - shouldLogHttpBodies: Boolean, - resolveUser: ResolveUser, - userManagementClient: UserManagementClient, - val loggerFactory: NamedLoggerFactory, - maxTimeToCollectRequest: FiniteDuration = FiniteDuration(5, "seconds"), -)(implicit ec: ExecutionContext, mat: Materializer) - extends NamedLogging - with NoTracing { - - private[this] val routeSetup: RouteSetup = new RouteSetup( - allowNonHttps = allowNonHttps, - decodeJwt = decodeJwt, - encoder = encoder, - resolveUser, - maxTimeToCollectRequest = maxTimeToCollectRequest, - loggerFactory = loggerFactory, - ) - - private[this] val commandsHelper: CreateAndExercise = - new CreateAndExercise(routeSetup, decoder, commandService, contractsService) - import commandsHelper.* - - private[this] val userManagement: UserManagement = new UserManagement( - decodeJwt = decodeJwt, - userManagementClient, - ) - import userManagement.* - - private[this] val packagesDars: PackagesAndDars = - new PackagesAndDars(routeSetup, packageManagementService) - import packagesDars.* - - private[this] val contractList: endpoints.ContractList = - new endpoints.ContractList(routeSetup, decoder, contractsService, loggerFactory) - import contractList.* - - private[this] val partiesEP: Parties = new Parties(partiesService) - import partiesEP.* - - // Limit logging of bodies to content with size of less than 10 KiB. - // Reason is that a char of an UTF-8 string consumes 1 up to 4 bytes such that the string length - // with this limit will be 2560 chars up to 10240 chars. This can hold already the whole cascade - // of import statements in this file, which I would consider already as very big string to log. - private final val maxBodySizeForLogging = Math.pow(2, 10) * 10 - - import V1Routes.* - import json.JsonProtocol.* - import util.ErrorOps.* - - private def responseToRoute(res: Future[HttpResponse]): Route = _ => res map Complete.apply - private def toRoute[T: MkHttpResponse](res: => T)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Route = - responseToRoute(httpResponse(res)) - - private def toPostRoute[Req: JsonReader, Res: JsonWriter]( - httpRequest: HttpRequest, - fn: (Jwt, Req) => ET[SyncResponse[Res]], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - ): Route = { - val res = for { - t <- routeSetup.inputJsVal(httpRequest): ET[(Jwt, JsValue)] - (jwt, reqBody) = t - req <- either(SprayJson.decode[Req](reqBody).liftErr(InvalidUserInput.apply)): ET[Req] - res <- eitherT(RouteSetup.handleFutureEitherFailure(fn(jwt, req).run)): ET[ - SyncResponse[Res] - ] - } yield res - responseToRoute(httpResponse(res)) - } - - private def toGetRoute[Res]( - httpRequest: HttpRequest, - fn: Jwt => ET[SyncResponse[Res]], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - mkHttpResponse: MkHttpResponse[ET[SyncResponse[Res]]], - ): Route = { - val res = for { - t <- eitherT(routeSetup.input(httpRequest)): ET[(Jwt, String)] - (jwt, _) = t - res <- eitherT(RouteSetup.handleFutureEitherFailure(fn(jwt).run)): ET[ - SyncResponse[Res] - ] - } yield res - responseToRoute(httpResponse(res)) - } - - private def toDownloadPackageRoute( - httpRequest: HttpRequest, - packageId: String, - fn: (Jwt, String) => Future[HttpResponse], - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Route = - responseToRoute( - httpResponse( - extractJwt(httpRequest).flatMap { jwt => - rightT(fn(jwt, packageId)) - } - ) - ) - - private def extractJwt( - httpRequest: HttpRequest - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): ET[Jwt] = for { - t <- eitherT(routeSetup.input(httpRequest)): ET[(Jwt, String)] - (jwt, _) = t - } yield jwt - - private def mkRequestLogMsg(request: HttpRequest, remoteAddress: RemoteAddress) = - s"Incoming ${request.method.value} request on ${request.uri} from $remoteAddress" - - private def mkResponseLogMsg(statusCode: StatusCode) = - s"Responding to client with HTTP $statusCode" - - // Always put this directive after a path to ensure - // that you don't log request bodies multiple times (simply because a matching test was made multiple times). - // TL;DR JUST PUT THIS THING AFTER YOUR FINAL PATH MATCHING - private def logRequestResponseHelper( - logIncomingRequest: (HttpRequest, RemoteAddress) => HttpRequest, - logResponse: HttpResponse => HttpResponse, - ): Directive0 = - extractClientIP flatMap { remoteAddress => - mapRequest(request => logIncomingRequest(request, remoteAddress)) & mapRouteResultFuture { - responseF => - for { - response <- responseF - transformedResponse <- response match { - case Complete(httpResponse) => - Future.successful(Complete(logResponse(httpResponse))) - case _ => - Future.failed( - new RuntimeException( - """Logging the request & response should never happen on routes which get rejected. - |Make sure to place the directive only at places where a match is guaranteed (e.g. after the path directive).""".stripMargin - ) - ) - } - } yield transformedResponse - } - } - - private def logJsonRequestAndResult(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Directive0 = { - def logWithHttpMessageBodyIfAvailable( - httpMessage: HttpMessage, - msg: String, - kind: String, - ): httpMessage.Self = - if ( - httpMessage - .header[`Content-Type`] - .map(_.contentType) - .contains(ContentTypes.`application/json`) - ) { - def logWithBodyInCtx(body: com.daml.logging.entries.LoggingValue) = - withEnrichedLoggingContext( - LoggingContextOf.label[RequestEntity], - s"${kind}_body" -> body, - ) - .run(implicit lc => logger.info(s"$msg, ${lc.makeString}")) - httpMessage.entity.contentLengthOption match { - case Some(length) if length < maxBodySizeForLogging => - import org.apache.pekko.stream.scaladsl.* - httpMessage - .transformEntityDataBytes( - Flow.fromFunction { it => - try logWithBodyInCtx(it.utf8String.parseJson) - catch { - case NonFatal(ex) => - logger.error(s"Failed to log message body, ${lc.makeString}: ", ex) - } - it - } - ) - case other => - val reason = other - .map(length => s"size of $length is too big for logging") - .getOrElse { - if (httpMessage.entity.isChunked()) - "is chunked & overall size is unknown" - else - "size is unknown" - } - logWithBodyInCtx(s"omitted because $kind body $reason") - httpMessage.self - } - } else { - logger.info(s"$msg, ${lc.makeString}") - httpMessage.self - } - logRequestResponseHelper( - (request, remoteAddress) => - logWithHttpMessageBodyIfAvailable( - request, - mkRequestLogMsg(request, remoteAddress), - "request", - ), - httpResponse => - logWithHttpMessageBodyIfAvailable( - httpResponse, - mkResponseLogMsg(httpResponse.status), - "response", - ), - ) - } - - def logRequestAndResultSimple(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): Directive0 = - logRequestResponseHelper( - (request, remoteAddress) => { - logger.info(s"${mkRequestLogMsg(request, remoteAddress)}, ${lc.makeString}") - request - }, - httpResponse => { - logger.info(s"${mkResponseLogMsg(httpResponse.status)}, ${lc.makeString}") - httpResponse - }, - ) - val logRequestAndResultFn: LoggingContextOf[InstanceUUID with RequestID] => Directive0 = - if (shouldLogHttpBodies) lc => logJsonRequestAndResult(lc) - else lc => logRequestAndResultSimple(lc) - - def logRequestAndResult(implicit lc: LoggingContextOf[InstanceUUID with RequestID]): Directive0 = - logRequestAndResultFn(lc) - - def v1Routes(implicit - lc0: LoggingContextOf[InstanceUUID], - metrics: HttpApiMetrics, - ): Route = extractRequest apply { req => - implicit val lc: LoggingContextOf[InstanceUUID with RequestID] = - extendWithRequestIdLogCtx(identity)(lc0) - val markThroughputAndLogProcessingTime: Directive0 = Directive { (fn: Unit => Route) => - val t0 = System.nanoTime - fn(()).andThen { res => - res.onComplete(_ => - logger.trace(s"Processed request after ${System.nanoTime() - t0}ns, ${lc.makeString}") - ) - res - } - } - def path[L](pm: PathMatcher[L]) = - server.Directives.path(pm) & markThroughputAndLogProcessingTime & logRequestAndResult - - concat( - pathPrefix("v1") apply concat( - post apply concat( - path("create") apply toRoute(create(req)), - path("exercise") apply toRoute(exercise(req)), - path("create-and-exercise") apply toRoute( - createAndExercise(req) - ), - path("query").apply(toRoute(query(req))), - path("fetch") apply toRoute(fetch(req)), - path("user") apply toPostRoute(req, getUser), - path("user" / "create") apply toPostRoute(req, createUser), - path("user" / "delete") apply toPostRoute(req, deleteUser), - path("user" / "rights") apply toPostRoute(req, listUserRights), - path("user" / "rights" / "grant") apply toPostRoute(req, grantUserRights), - path("user" / "rights" / "revoke") apply toPostRoute(req, revokeUserRights), - path("parties") apply toPostRoute(req, parties), - path("parties" / "allocate") apply toPostRoute( - req, - allocateParty, - ), - path("packages") apply toRoute(uploadDarFile(req)), - ), - get apply concat( - path("query") apply toRoute(retrieveAll(req)), - path("user") apply toGetRoute(req, getAuthenticatedUser), - path("user" / "rights") apply toGetRoute(req, listAuthenticatedUserRights), - path("users") apply toGetRoute(req, listUsers), - path("parties") apply toGetRoute(req, allParties), - path("packages") apply toGetRoute(req, listPackages), - path("packages" / ".+".r)(packageId => - extractRequest apply (req => toDownloadPackageRoute(req, packageId, downloadPackage)) - ), - ), - ), - websocketEndpoints.transactionWebSocket, - ) - } - - private def httpResponse[T](output: T)(implicit - T: MkHttpResponse[T], - lc: LoggingContextOf[InstanceUUID with RequestID], - ): Future[HttpResponse] = - T.run(output) - .recover(Error.fromThrowable andThen (httpResponseError(_, logger))) - - private implicit def sourceStreamSearchResults[A: JsonWriter](implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): MkHttpResponse[ET[SyncResponse[Source[Error \/ A, NotUsed]]]] = - MkHttpResponse { output => - implicitly[MkHttpResponse[Future[Error \/ SearchResult[Error \/ JsValue]]]] - .run(output.map(_ map (_ map (_ map ((_: A).toJson)))).run) - } - - private implicit def searchResults(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): MkHttpResponse[Future[Error \/ SearchResult[Error \/ JsValue]]] = - MkHttpResponse { output => - output.flatMap(_.fold(e => Future(httpResponseError(e, logger)), searchHttpResponse)) - } - - private implicit def mkHttpResponseEitherT(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): MkHttpResponse[ET[HttpResponse]] = - MkHttpResponse { output => - implicitly[MkHttpResponse[Future[Error \/ HttpResponse]]].run(output.run) - } - - private implicit def mkHttpResponse(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): MkHttpResponse[Future[Error \/ HttpResponse]] = - MkHttpResponse { output => - output.map(_.fold(httpResponseError(_, logger), identity)) - } - - private def searchHttpResponse( - searchResult: SearchResult[Error \/ JsValue] - )(implicit lc: LoggingContextOf[RequestID]): Future[HttpResponse] = { - import json.JsonProtocol.* - - (searchResult match { - case OkResponse(result, warnings, _) => - val warningsJsVal: Option[JsValue] = warnings.map(SprayJson.encodeUnsafe(_)) - ResponseFormats.resultJsObject(result via filterStreamErrors, warningsJsVal) - case error: ErrorResponse => - val jsVal: JsValue = SprayJson.encodeUnsafe(error) - Future((Source.single(ByteString(jsVal.compactPrint)), StatusCodes.InternalServerError)) - }).map { case (response: Source[ByteString, NotUsed], statusCode: StatusCode) => - HttpResponse( - status = statusCode, - entity = HttpEntity - .Chunked(ContentTypes.`application/json`, response.map(HttpEntity.ChunkStreamPart(_))), - ) - } - } - - private[this] def filterStreamErrors[A](implicit - lc: LoggingContextOf[RequestID] - ): Flow[Error \/ A, Error \/ A, NotUsed] = - Flow[Error \/ A].map { - case -\/(ServerError(t)) => - val hideMsg = "internal server error" - logger.error( - s"hiding internal error details from response, responding '$hideMsg' instead, ${lc.makeString}", - t, - ) - -\/(ServerError.fromMsg(hideMsg)) - case o => o - } - - private implicit def fullySync[A: JsonWriter](implicit - metrics: HttpApiMetrics, - lc: LoggingContextOf[InstanceUUID with RequestID], - ): MkHttpResponse[ET[SyncResponse[A]]] = MkHttpResponse { result => - Timed.future( - metrics.responseCreationTimer, - result - .flatMap { x => - either(SprayJson.encode1(x).map(y => (y, x.status)).liftErr(ServerError.fromMsg)) - } - .run - .map { - case -\/(e) => - httpResponseError(e, logger) - case \/-((jsVal, status)) => - HttpResponse( - entity = HttpEntity.Strict(ContentTypes.`application/json`, format(jsVal)), - status = status, - ) - }, - ) - } - -} - -object V1Routes extends NoTracing { - type ET[A] = EitherT[Future, Error, A] - - final class IntoEndpointsError[-A](val run: A => Error) extends AnyVal - object IntoEndpointsError { - import LedgerClientJwt.Grpc.Category - - implicit val id: IntoEndpointsError[Error] = new IntoEndpointsError(identity) - - implicit val fromCommands: IntoEndpointsError[CommandService.Error] = new IntoEndpointsError({ - case CommandService.InternalError(id, reason) => - ServerError( - new Exception( - s"command service error, ${id.cata(sym => s"${sym.name}: ", "")}${reason.getMessage}", - reason, - ) - ) - case CommandService.GrpcError(status) => - ParticipantServerError(status) - case CommandService.ClientError(-\/(Category.PermissionDenied), message) => - Unauthorized(message) - case CommandService.ClientError(\/-(Category.InvalidArgument), message) => - InvalidUserInput(message) - }) - - implicit val fromContracts: IntoEndpointsError[ContractsService.Error] = - new IntoEndpointsError({ case ContractsService.InternalError(id, msg) => - ServerError.fromMsg(s"contracts service error, ${id.name}: $msg") - }) - } - - private final case class MkHttpResponse[-T](run: T => Future[HttpResponse]) - - def doLoad( - packageClient: PackageClient, - ledgerReader: LedgerReader, - loadCache: LedgerReader.LoadCache, - )(jwt: Jwt)(ids: Set[String])(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): Future[PackageService.ServerError \/ Option[PackageStore]] = - ledgerReader - .loadPackageStoreUpdates( - packageClient, - loadCache, - some(jwt.value), - )(ids) - .map(_.leftMap(e => PackageService.ServerError(e))) - - def buildJsonCodecs( - packageService: PackageService - )(implicit ec: ExecutionContext): (ApiJsonEncoder, ApiJsonDecoder) = { - - val lfTypeLookup = LedgerReader.damlLfTypeLookup(() => packageService.packageStore) _ - val jsValueToApiValueConverter = new JsValueToApiValueConverter(lfTypeLookup) - - val apiValueToJsValueConverter = new ApiValueToJsValueConverter( - ApiValueToLfValueConverter.apiValueToLfValue - ) - - val encoder = new ApiJsonEncoder( - apiValueToJsValueConverter.apiRecordToJsObject, - apiValueToJsValueConverter.apiValueToJsValue, - ) - - val decoder = new ApiJsonDecoder( - packageService.resolveContractTypeId, - packageService.resolveTemplateRecordType, - packageService.resolveChoiceArgType, - packageService.resolveKeyType, - jsValueToApiValueConverter.jsValueToApiValue, - jsValueToApiValueConverter.jsValueToLfValue, - ) - - (encoder, decoder) - } - - // TODO(#23504) remove submitAndWaitForTransactionTree as it is deprecated - @nowarn("cat=deprecation") - def apply( - ledgerClient: DamlLedgerClient, - allowNonHttps: Boolean, - decodeJwt: EndpointsCompanion.ValidateJwt, - shouldLogHttpBodies: Boolean, - resolveUser: ResolveUser, - userManagementClient: UserManagementClient, - loggerFactory: NamedLoggerFactory, - websocketConfig: Option[WebsocketConfig], - maxTimeToCollectRequest: FiniteDuration = FiniteDuration(5, "seconds"), - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - mat: Materializer, - ): V1Routes = { - - val directEc = DirectExecutionContext(noTracingLogger) - - val packageCache = LedgerReader.LoadCache.freshCache() - - val packageService = new PackageService( - reloadPackageStoreIfChanged = - doLoad(ledgerClient.packageService, LedgerReader(loggerFactory), packageCache), - loggerFactory = loggerFactory, - ) - - val (encoder, decoder) = buildJsonCodecs(packageService) - - val ledgerClientJwt = LedgerClientJwt(loggerFactory) - - val commandService = new CommandService( - ledgerClientJwt.submitAndWaitForTransaction(ledgerClient), - ledgerClientJwt.submitAndWaitForTransactionTree(ledgerClient), - loggerFactory, - ) - - val contractsService = new ContractsService( - packageService.resolveContractTypeId, - packageService.allTemplateIds, - ledgerClientJwt.getByContractId(ledgerClient), - ledgerClientJwt.getActiveContracts(ledgerClient), - ledgerClientJwt.getCreatesAndArchivesSince(ledgerClient), - ledgerClientJwt.getLedgerEnd(ledgerClient), - loggerFactory, - ) - - val partiesService = new PartiesService( - ledgerClientJwt.listKnownParties(ledgerClient), - ledgerClientJwt.getParties(ledgerClient), - ledgerClientJwt.allocateParty(ledgerClient), - ) - - val packageManagementService = new PackageManagementService( - ledgerClientJwt.listPackages(ledgerClient), - ledgerClientJwt.getPackage(ledgerClient), - { case (jwt, byteString) => - implicit lc => - ledgerClientJwt - .uploadDar(ledgerClient)(directEc, traceContext)( - jwt, - byteString, - )(lc) - .flatMap(_ => packageService.reload(jwt)) - .map(_ => ()) - }, - ) - - val websocketService = new WebSocketService( - contractsService, - packageService.resolveContractTypeId, - decoder, - websocketConfig, - loggerFactory, - ) - - val websocketEndpoints = new WebsocketEndpoints( - decodeJwt, - websocketService, - resolveUser, - loggerFactory, - ) - - new V1Routes( - allowNonHttps, - decodeJwt, - commandService, - contractsService, - partiesService, - packageManagementService, - websocketEndpoints, - encoder, - decoder, - shouldLogHttpBodies, - resolveUser, - userManagementClient, - loggerFactory, - maxTimeToCollectRequest, - ) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ValuePredicate.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ValuePredicate.scala deleted file mode 100644 index b2d9ddb34b..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/ValuePredicate.scala +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.digitalasset.daml.lf.value.Value as V - -object ValuePredicate { - type LfV = V -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebSocketService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebSocketService.scala deleted file mode 100644 index 72be73b795..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebSocketService.scala +++ /dev/null @@ -1,1041 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.nonempty.NonEmpty -import com.daml.nonempty.NonEmptyColl.foldable1 -import com.digitalasset.canton.fetchcontracts.util.ContractStreamStep.{Acs, LiveBegin, Txn} -import com.digitalasset.canton.fetchcontracts.util.GraphExtensions.* -import com.digitalasset.canton.fetchcontracts.util.{ - AbsoluteBookmark, - BeginBookmark, - ContractStreamStep, - InsertDeleteStep, -} -import com.digitalasset.canton.http.ContractTypeId.RequiredPkg -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.ResolvedQuery.Unsupported -import com.digitalasset.canton.http.json.JsonProtocol.LfValueCodec.apiValueToJsValue as lfValueToJsValue -import com.digitalasset.canton.http.json.{ApiJsonDecoder, JsonProtocol, SprayJson} -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.ApiValueToLfValueConverter.apiValueToLfValue -import com.digitalasset.canton.http.util.FlowUtil.allowOnlyFirstInput -import com.digitalasset.canton.http.util.Logging.{ - InstanceUUID, - RequestID, - extendWithRequestIdLogCtx, -} -import com.digitalasset.canton.http.{ - ActiveContract, - ArchivedContract, - AsyncWarningsWrapper, - ContractId, - ContractKeyStreamRequest, - ContractTypeId, - ContractTypeRef, - JwtPayload, - LfValue, - Offset, - PartySet, - ResolvedQuery, - SearchForeverQuery, - SearchForeverRequest, - StartingOffset, - UnknownTemplateIds, - WebsocketConfig, - util, -} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import org.apache.pekko.NotUsed -import org.apache.pekko.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage} -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Flow, Sink, Source} -import scalaz.EitherT.{either, eitherT, rightT} -import scalaz.Liskov.<~< -import scalaz.std.list.* -import scalaz.std.map.* -import scalaz.std.option.* -import scalaz.std.scalaFuture.* -import scalaz.std.tuple.* -import scalaz.syntax.bifunctor.* -import scalaz.syntax.std.boolean.* -import scalaz.syntax.std.option.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, Foldable, Liskov, NonEmptyList, Tag, \/, \/-} -import spray.json.{JsArray, JsObject, JsValue, JsonReader, JsonWriter, enrichAny as `sj enrichAny`} - -import scala.concurrent.{ExecutionContext, Future} - -import LedgerClientJwt.Terminates -import ValuePredicate.LfV - -object WebSocketService extends NoTracing { - import com.digitalasset.canton.http.util.ErrorOps.* - - final case class StreamPredicate[+Positive]( - resolvedQuery: ResolvedQuery, - unresolved: Set[ContractTypeId.RequiredPkg], - fn: (ActiveContract.ResolvedCtTyId[LfV], Option[Offset]) => Option[Positive], - ) - - /** If an element satisfies `prefix`, consume it and emit the result alongside the next element - * (which is not similarly tested); otherwise, emit it. - * - * {{{ - * withOptPrefix(_ => None) - * = Flow[I].map((None, _)) - * - * Source(Seq(1, 2, 3, 4)) via withOptPrefix(Some(_)) - * = Source(Seq((Some(1), 2), (Some(3), 4))) - * }}} - */ - private def withOptPrefix[I, L](prefix: I => Option[L]): Flow[I, (Option[L], I), NotUsed] = - Flow[I] - .scan(none[L \/ (Option[L], I)]) { (s, i) => - s match { - case Some(-\/(l)) => Some(\/-((some(l), i))) - case None | Some(\/-(_)) => Some(prefix(i) toLeftDisjunction ((none, i))) - } - } - .collect { case Some(\/-(oli)) => oli } - - private final case class StepAndErrors[+Pos, +LfVT]( - errors: Seq[ServerError], - step: ContractStreamStep[ - ArchivedContract, - (ActiveContract.ResolvedCtTyId[LfVT], Pos), - ], - loggerFactory: NamedLoggerFactory, - ) extends NamedLogging { - import JsonProtocol.* - - def logHiddenErrors()(implicit lc: LoggingContextOf[InstanceUUID]): Unit = - errors foreach { case ServerError(reason) => - logger.error(s"while rendering contract, ${lc.makeString}", reason) - } - - def render(implicit lfv: LfVT <~< JsValue, pos: Pos <~< Map[String, JsValue]): JsObject = { - - def inj[V: JsonWriter](ctor: String, v: V) = JsObject(ctor -> v.toJson) - - val InsertDeleteStep(inserts, deletes) = - Liskov - .lift2[StepAndErrors, Pos, Map[String, JsValue], LfVT, JsValue](pos, lfv)(this) - .step - .toInsertDelete - - val events = (deletes.valuesIterator.map(inj("archived", _)).toVector - ++ inserts.map { case (ac, pos) => - val acj = inj("created", ac) - acj.copy(fields = acj.fields ++ pos) - } ++ errors.map(_ => inj("error", "error rendering contract"))) - // TODO(i13377) ^ all useful information is now hidden; - // can replace with an error count in later API version - - val offsetAfter = step.bookmark.map(_.toJson) - - renderEvents(events, offsetAfter) - } - - def append[P >: Pos, A >: LfVT](o: StepAndErrors[P, A]): StepAndErrors[P, A] = - StepAndErrors(errors ++ o.errors, step append o.step, loggerFactory) - - def mapLfv[A](f: LfVT => A): StepAndErrors[Pos, A] = - copy(step = step mapPreservingIds (_ leftMap (_ map f))) - - def mapPos[P](f: Pos => P): StepAndErrors[P, LfVT] = - copy(step = step mapPreservingIds (_ rightMap f)) - - def nonEmpty: Boolean = errors.nonEmpty || step.nonEmpty - } - - private def renderEvents(events: Vector[JsObject], offset: Option[JsValue]): JsObject = - JsObject(Map("events" -> JsArray(events)) ++ offset.map("offset" -> _).toList) - - private def readStartingOffset(jv: JsValue): Option[Error \/ StartingOffset] = - jv match { - case JsObject(fields) => - fields get "offset" map { offJv => - import JsonProtocol.* - if (fields.sizeIs > 1) - -\/(InvalidUserInput("offset must be specified as a leading, separate object message")) - else - SprayJson - .decode[Offset](offJv) - .liftErr[Error](InvalidUserInput.apply) - .map(offset => StartingOffset(offset)) - } - case _ => None - } - - private def conflation[P, A]: Flow[StepAndErrors[P, A], StepAndErrors[P, A], NotUsed] = { - val maxCost = 200L - Flow[StepAndErrors[P, A]] - .batchWeighted( - max = maxCost, - costFn = { - case StepAndErrors(errors, ContractStreamStep.LiveBegin(_), _) => - 1L + errors.length - case StepAndErrors(errors, step, _) => - val InsertDeleteStep(inserts, deletes) = step.toInsertDelete - errors.length.toLong + (inserts.length * 2) + deletes.size - }, - identity, - )(_ append _) - } - - final case class ResolvedQueryRequest[R](q: R, alg: StreamQuery[R]) - - sealed abstract class StreamRequestParser[A] { - case class QueryRequest[Q](request: Q, resolver: RequestResolver[Q]) - def parse( - resumingAtOffset: Boolean, - decoder: ApiJsonDecoder, - jv: JsValue, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[Error \/ (_ <: QueryRequest[_])] - } - - trait RequestResolver[Q] { - def resolve( - req: Q, - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[Error \/ ResolvedQueryRequest[_]] - } - - sealed trait StreamQuery[R] { - - /** Extra data on success of a predicate. */ - type Positive - - def removePhantomArchives(resolvedRequest: R): Option[Set[ContractId]] - - private[WebSocketService] def predicate( - resolvedRequest: R, - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[StreamPredicate[Positive]] - - def renderCreatedMetadata(p: Positive): Map[String, JsValue] - - def acsRequest( - maybePrefix: Option[StartingOffset], - resolvedRequest: R, - ): Option[R] - - /** Perform any necessary adjustment to the request based on the prefix - */ - def adjustRequest( - prefix: Option[StartingOffset], - resolvedRequest: R, - ): R - - /** Specify the offset from which the live part of the query should start - */ - def liveStartingOffset( - prefix: Option[StartingOffset], - resolvedRequest: R, - ): Option[StartingOffset] - } - - final case class ResolvedSearchForeverRequest( - resolvedQuery: ResolvedQuery, - queriesWithPos: NonEmpty[List[(ResolvedSearchForeverQuery, Int)]], - unresolved: Set[ContractTypeId.RequiredPkg], - ) - - final case class ResolvedSearchForeverQuery( - resolvedQuery: ResolvedQuery, - offset: Option[Offset], - ) - - implicit def SearchForeverRequestWithStreamQuery(implicit - ec: ExecutionContext - ): StreamRequestParser[SearchForeverRequest] = - new StreamRequestParser[SearchForeverRequest] - with StreamQuery[ResolvedSearchForeverRequest] - with RequestResolver[SearchForeverRequest] { - type Positive = NonEmptyList[Int] - - override def parse( - resumingAtOffset: Boolean, - decoder: ApiJsonDecoder, - jv: JsValue, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ) = { - import JsonProtocol.* - Future.successful( - SprayJson - .decode[SearchForeverRequest](jv) - .liftErr[Error](InvalidUserInput.apply) - .map(QueryRequest(_, this)) - ) - } - - override def resolve( - req: SearchForeverRequest, - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[Error \/ ResolvedQueryRequest[_]] = { - import scalaz.syntax.foldable.* - - def resolveIds( - sfq: SearchForeverQuery - ): Future[(Set[ContractTypeRef.Resolved], Set[ContractTypeId.RequiredPkg])] = - sfq.templateIds.toList.toNEF - .traverse(x => resolveContractTypeId(jwt)(x).map(_.toOption.flatten.toLeft(x))) - .map( - _.toSet.partitionMap( - identity[ - Either[ContractTypeRef.Resolved, ContractTypeId.RequiredPkg] - ] - ) - ) - - def query( - sfq: SearchForeverQuery, - pos: Int, - ): Future[ - Unsupported \/ ( - ResolvedSearchForeverQuery, - Int, - Set[ContractTypeId.RequiredPkg], - ) - ] = for { - partitionedResolved <- resolveIds(sfq) - (resolved, unresolved) = partitionedResolved - res = ResolvedQuery(resolved).map { rq => - (ResolvedSearchForeverQuery(rq, sfq.offset), pos, unresolved) - } - } yield res - - Future - .sequence( - req.queriesWithPos - .map((query _).tupled) - .toList - ) - .map { l => - val ( - err: List[Unsupported], - ok: List[ - ( - ResolvedSearchForeverQuery, - Int, - Set[ContractTypeId.RequiredPkg], - ) - ], - ) = - l.partitionMap(_.toEither) - if (err.nonEmpty) -\/(InvalidUserInput(err.map(_.errorMsg).mkString)) - else if (ok.isEmpty) -\/(InvalidUserInput(ResolvedQuery.CannotBeEmpty.errorMsg)) - else { - val queriesWithPos = ok.map { case (q, p, _) => (q, p) } - val unresolved = ok.flatMap { case (_, _, unresolved) => unresolved }.toSet - val resolvedQuery = - ResolvedQuery(ok.flatMap { case (q, _, _) => q.resolvedQuery.resolved }.toSet) - .leftMap(unsupported => InvalidUserInput(unsupported.errorMsg)) - resolvedQuery.flatMap { rq => - queriesWithPos match { - case NonEmpty(list) => - \/-( - ResolvedQueryRequest( - ResolvedSearchForeverRequest(rq, list, unresolved), - this, - ) - ) - case _ => -\/(InvalidUserInput(ResolvedQuery.CannotBeEmpty.errorMsg)) - } - } - } - } - } - - override def removePhantomArchives(request: ResolvedSearchForeverRequest) = None - - override private[WebSocketService] def predicate( - request: ResolvedSearchForeverRequest, - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[StreamPredicate[Positive]] = { - - import util.Collections.* - - val indexedOffsets: Vector[Option[Offset]] = - request.queriesWithPos.map { case (q, _) => q.offset }.toVector - - def matchesOffset(queryIndex: Int, maybeEventOffset: Option[Offset]): Boolean = { - import Offset.`Offset ordering` - import scalaz.syntax.order.* - val matches = - for { - queryOffset <- indexedOffsets(queryIndex) - eventOffset <- maybeEventOffset - } yield eventOffset > queryOffset - matches.getOrElse(true) - } - - def fn( - q: Map[ContractTypeId.ResolvedPkgId, NonEmptyList[(Int, Int)]] - )( - a: ActiveContract.ResolvedCtTyId[LfV], - o: Option[Offset], - ): Option[Positive] = - q.get(a.templateId).flatMap { preds => - preds.collect(Function unlift { case (ix, pos) => - matchesOffset(ix, o).option(pos) - }) - } - - def query( - rsfq: ResolvedSearchForeverQuery, - pos: Int, - ix: Int, - ): NonEmpty[Map[ContractTypeId.ResolvedPkgId, NonEmptyList[ - (Int, Int) - ]]] = - rsfq.resolvedQuery.resolved.flatMap(_.allPkgIds).map(_ -> NonEmptyList(ix -> pos)).toMap - - val q = { - import scalaz.syntax.foldable1.* - request.queriesWithPos.zipWithIndex // index is used to ensure matchesOffset works properly - .map { case ((q, pos), ix) => (q, pos, ix) } - .toNEF - .foldMap1((query _).tupled) - } - - Future.successful( - StreamPredicate( - request.resolvedQuery, - request.unresolved, - fn(q), - ) - ) - } - - override def renderCreatedMetadata(p: Positive) = - Map { - import JsonProtocol.* - "matchedQueries" -> p.toJson - } - - override def acsRequest( - maybePrefix: Option[StartingOffset], - request: ResolvedSearchForeverRequest, - ): Option[ResolvedSearchForeverRequest] = { - val withoutOffset = - NonEmpty.from(request.queriesWithPos.filter { case (q, _) => q.offset.isEmpty }) - - withoutOffset.map( - ResolvedSearchForeverRequest(request.resolvedQuery, _, request.unresolved) - ) - } - - override def adjustRequest( - prefix: Option[StartingOffset], - request: ResolvedSearchForeverRequest, - ): ResolvedSearchForeverRequest = - prefix.fold(request)(prefix => - request.copy( - queriesWithPos = request.queriesWithPos.map { - _ leftMap (q => q.copy(offset = q.offset.orElse(Some(prefix.offset)))) - } - ) - ) - - import Offset.`Offset ordering` - import scalaz.std.option.optionOrder - import scalaz.syntax.foldable1.* - - // This is called after `adjustRequest` already filled in the blank offsets - override def liveStartingOffset( - prefix: Option[StartingOffset], - request: ResolvedSearchForeverRequest, - ): Option[StartingOffset] = - request.queriesWithPos - .map { case (q, _) => q.offset } - .toNEF - .minimumBy1(identity) - .map(StartingOffset(_)) - - } - - final case class ResolvedContractKeyStreamRequest[C, V]( - resolvedQuery: ResolvedQuery, - list: NonEmptyList[ContractKeyStreamRequest[C, V]], - q: NonEmpty[Map[ContractTypeRef.Resolved, NonEmpty[Set[V]]]], - unresolved: Set[ContractTypeId.RequiredPkg], - ) - - implicit def EnrichedContractKeyWithStreamQuery(implicit - ec: ExecutionContext - ): StreamRequestParser[ContractKeyStreamRequest[_, _]] = - new StreamRequestParser[ContractKeyStreamRequest[_, _]] { - import JsonProtocol.* - - override def parse( - resumingAtOffset: Boolean, - decoder: ApiJsonDecoder, - jv: JsValue, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ) = { - type NelCKRH[Hint, V] = NonEmptyList[ContractKeyStreamRequest[Hint, V]] - def go[Hint]( - resolver: RequestResolver[NelCKRH[Hint, LfV]] - )(implicit ev: JsonReader[NelCKRH[Hint, JsValue]]) = - for { - as <- either[Future, Error, NelCKRH[Hint, JsValue]]( - SprayJson - .decode[NelCKRH[Hint, JsValue]](jv) - .liftErr[Error](InvalidUserInput.apply) - ) - bs <- rightT { - as.map(a => decodeWithFallback(decoder, a, jwt)).sequence - } - } yield QueryRequest(bs, resolver) - if (resumingAtOffset) go(ResumingEnrichedContractKeyWithStreamQuery()) - else go(InitialEnrichedContractKeyWithStreamQuery()) - }.run - - private def decodeWithFallback[Hint]( - decoder: ApiJsonDecoder, - a: ContractKeyStreamRequest[Hint, JsValue], - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[ContractKeyStreamRequest[Hint, LfValue]] = - decoder - .decodeUnderlyingValuesToLf(a, jwt) - .run - .map( - _.valueOr(_ => a.map(_ => com.digitalasset.daml.lf.value.Value.ValueUnit)) - ) // unit will not match any key - } - - private[this] sealed abstract class EnrichedContractKeyWithStreamQuery[Cid](implicit - ec: ExecutionContext - ) extends StreamQuery[ResolvedContractKeyStreamRequest[Cid, LfV]] - with RequestResolver[NonEmptyList[ContractKeyStreamRequest[Cid, LfV]]] { - type Positive = Unit - - protected type CKR[+V] = ContractKeyStreamRequest[Cid, V] - - override def resolve( - request: NonEmptyList[ContractKeyStreamRequest[Cid, LfV]], - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[Error \/ ResolvedQueryRequest[_]] = { - def getQ[K, V](resolvedWithKey: NonEmpty[Set[(K, V)]]): NonEmpty[Map[K, NonEmpty[Set[V]]]] = - resolvedWithKey.groupMap(_._1)(_._2) - - request.toList - .traverse { (x: CKR[LfV]) => - resolveContractTypeId(jwt)(x.ekey.templateId) - .map(_.toOption.flatten.map((_, x.ekey.key)).toLeft(x.ekey.templateId)) - } - .map { resolveTries => - val (resolvedWithKey, unresolved) = resolveTries - .toSet[Either[(ContractTypeRef.Resolved, LfV), RequiredPkg]] - .partitionMap(identity) - for { - resolvedWithKey <- (NonEmpty from resolvedWithKey - toRightDisjunction InvalidUserInput(ResolvedQuery.CannotBeEmpty.errorMsg)) - q = getQ(resolvedWithKey) - rq <- ResolvedQuery(q.keySet) - .leftMap(unsupported => InvalidUserInput(unsupported.errorMsg)) - } yield ResolvedQueryRequest( - ResolvedContractKeyStreamRequest(rq, request, q, unresolved), - this, - ) - } - } - - override private[WebSocketService] def predicate( - resolvedRequest: ResolvedContractKeyStreamRequest[Cid, LfV], - resolveContractTypeId: PackageService.ResolveContractTypeId, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[StreamPredicate[Positive]] = { - def fn( - q: Map[ContractTypeId.ResolvedPkgId, NonEmpty[Set[LfV]]] - ): (ActiveContract.ResolvedCtTyId[LfV], Option[Offset]) => Option[Positive] = { (a, _) => - a.key match { - case None => None - case Some(k) => - if (q.get(a.templateId).exists(_ contains k)) Some(()) else None - } - } - - Future.successful( - StreamPredicate[Positive]( - resolvedRequest.resolvedQuery, - resolvedRequest.unresolved, - fn(resolvedRequest.q.flatMap { case (k, v) => k.allPkgIds.map(_ -> v) }.forgetNE.toMap), - ) - ) - } - - override def renderCreatedMetadata(p: Unit) = Map.empty - - override def adjustRequest( - prefix: Option[StartingOffset], - request: ResolvedContractKeyStreamRequest[Cid, LfV], - ): ResolvedContractKeyStreamRequest[Cid, LfV] = request - - override def acsRequest( - maybePrefix: Option[StartingOffset], - request: ResolvedContractKeyStreamRequest[Cid, LfV], - ): Option[ResolvedContractKeyStreamRequest[Cid, LfV]] = - maybePrefix.cata(_ => None, Some(request)) - - override def liveStartingOffset( - prefix: Option[StartingOffset], - request: ResolvedContractKeyStreamRequest[Cid, LfV], - ): Option[StartingOffset] = prefix - - } - - private[WebSocketService] final class InitialEnrichedContractKeyWithStreamQuery private ()( - implicit ec: ExecutionContext - ) extends EnrichedContractKeyWithStreamQuery[Unit] { - override def removePhantomArchives(request: ResolvedContractKeyStreamRequest[Unit, LfV]) = - Some(Set.empty) - } - - object InitialEnrichedContractKeyWithStreamQuery { - def apply()(implicit ec: ExecutionContext) = new InitialEnrichedContractKeyWithStreamQuery() - } - - private[WebSocketService] final class ResumingEnrichedContractKeyWithStreamQuery private ()( - implicit ec: ExecutionContext - ) extends EnrichedContractKeyWithStreamQuery[Option[Option[ContractId]]] { - override def removePhantomArchives( - request: ResolvedContractKeyStreamRequest[Option[Option[ContractId]], LfV] - ) = { - val NelO = Foldable[NonEmptyList].compose[Option] - request.list traverse (_.contractIdAtOffset) map NelO.toSet - } - } - - object ResumingEnrichedContractKeyWithStreamQuery { - def apply()(implicit ec: ExecutionContext) = new ResumingEnrichedContractKeyWithStreamQuery() - } - - private abstract sealed class TickTriggerOrStep[+A] extends Product with Serializable - private final case object TickTrigger extends TickTriggerOrStep[Nothing] - private final case class Step[A](payload: StepAndErrors[A, JsValue]) extends TickTriggerOrStep[A] -} - -class WebSocketService( - contractsService: ContractsService, - resolveContractTypeId: PackageService.ResolveContractTypeId, - decoder: ApiJsonDecoder, - wsConfig: Option[WebsocketConfig], - val loggerFactory: NamedLoggerFactory, -)(implicit mat: Materializer, ec: ExecutionContext) - extends NamedLogging - with NoTracing { - - import ContractsService.buildTransactionFilter - import WebSocketService.* - import com.daml.scalautil.Statement.discard - import util.ErrorOps.* - import com.digitalasset.canton.http.json.JsonProtocol.* - - private val config = wsConfig.getOrElse(WebsocketConfig()) - - private val numConns = new java.util.concurrent.atomic.AtomicInteger(0) - - def transactionMessageHandler[A: StreamRequestParser]( - jwt: Jwt, - jwtPayload: JwtPayload, - )(implicit - lc: LoggingContextOf[InstanceUUID], - metrics: HttpApiMetrics, - ): Flow[Message, Message, _] = - wsMessageHandler[A](jwt, jwtPayload) - .via(applyConfig) - .via(connCounter) - - private def applyConfig[A]: Flow[A, A, NotUsed] = - Flow[A] - .takeWithin(config.maxDuration) - .throttle(config.throttleElem, config.throttlePer, config.maxBurst, config.mode) - - private def connCounter[A](implicit - lc: LoggingContextOf[InstanceUUID], - metrics: HttpApiMetrics, - ): Flow[A, A, NotUsed] = - Flow[A] - .watchTermination() { (_, future) => - val afterInc = numConns.incrementAndGet() - metrics.websocketRequestCounter.inc() - logger.info( - s"New websocket client has connected, current number of clients:$afterInc, ${lc.makeString}" - ) - future onComplete { td => - def msg = td.fold( - ex => s"interrupted on Failure: ${ex.getMessage}. remaining", - _ => "has disconnected. Current", - ) - val afterDec = numConns.decrementAndGet() - metrics.websocketRequestCounter.dec() - logger.info(s"Websocket client $msg number of clients: $afterDec, ${lc.makeString}") - } - NotUsed - } - - private def wsMessageHandler[A: StreamRequestParser]( - jwt: Jwt, - jwtPayload: JwtPayload, - )(implicit - ec: ExecutionContext, - lc: LoggingContextOf[InstanceUUID], - ): Flow[Message, Message, NotUsed] = { - val Q = implicitly[StreamRequestParser[A]] - Flow[Message] - .mapAsync(1)(parseJson) - .via(withOptPrefix(ejv => ejv.toOption flatMap readStartingOffset)) - .mapAsync(1) { case (oeso, ejv) => - (for { - offPrefix <- either[Future, Error, Option[StartingOffset]](oeso.sequence) - jv <- either[Future, Error, JsValue](ejv) - a <- eitherT( - Q.parse( - resumingAtOffset = offPrefix.isDefined, - decoder, - jv, - jwt, - ): Future[ - Error \/ Q.QueryRequest[_] - ] - ) - } yield (offPrefix, a: Q.QueryRequest[_])).run - } - .mapAsync(1) { - _.map { case (offPrefix, qq: Q.QueryRequest[q]) => - qq.resolver - .resolve( - qq.request, - resolveContractTypeId, - jwt, - ) - .map(_.map(resolved => (offPrefix, resolved))) - } - .fold(e => Future.successful(-\/(e)), identity) - } - .via( - allowOnlyFirstInput( - InvalidUserInput("Multiple requests over the same WebSocket connection are not allowed.") - ) - ) - .flatMapMerge( - 2, // 2 streams max, the 2nd is to be able to send an error back - _.map { case (offPrefix, rq: ResolvedQueryRequest[q]) => - implicit val SQ: StreamQuery[q] = rq.alg - getTransactionSourceForParty[q]( - jwt, - jwtPayload.parties, - offPrefix, - rq.q: q, - ) via logTermination(logger, "getTransactionSourceForParty") - }.valueOr(e => Source.single(-\/(e))): Source[Error \/ Message, NotUsed], - ) - .takeWhile(_.isRight, inclusive = true) // stop after emitting 1st error - .map( - _.fold(e => extendWithRequestIdLogCtx(implicit lc1 => wsErrorMessage(e)), identity): Message - ) - } - - private def parseJson(x: Message): Future[InvalidUserInput \/ JsValue] = x match { - case msg: TextMessage => - msg.toStrict(config.maxDuration).map { m => - SprayJson.parse(m.text).liftErr(InvalidUserInput.apply) - } - case bm: BinaryMessage => - // ignore binary messages but drain content to avoid the stream being clogged - discard(bm.dataStream.runWith(Sink.ignore)) - Future successful -\/( - InvalidUserInput( - "Invalid request. Expected a single TextMessage with JSON payload, got BinaryMessage" - ) - ) - } - - private[this] def fetchAndPreFilterAcs[Positive]( - predicate: StreamPredicate[Positive], - jwt: Jwt, - parties: PartySet, - )(implicit - lc: LoggingContextOf[InstanceUUID] - ): Future[Source[StepAndErrors[Positive, JsValue], NotUsed]] = - Future.successful { - contractsService - .liveAcsAsInsertDeleteStepSource( - jwt, - buildTransactionFilter(parties, predicate.resolvedQuery), - ) - .via( - convertFilterContracts( - predicate.resolvedQuery, - predicate.fn, - ) - ) - } - - // simple alias to avoid passing in the class parameters - private[this] def queryPredicate[A]( - request: A, - jwt: Jwt, - )(implicit - lc: LoggingContextOf[InstanceUUID], - Q: StreamQuery[A], - ): Future[StreamPredicate[Q.Positive]] = - Q.predicate(request, resolveContractTypeId, jwt) - - private def getTransactionSourceForParty[A]( - jwt: Jwt, - parties: PartySet, - offPrefix: Option[StartingOffset], - rawRequest: A, - )(implicit - lc: LoggingContextOf[InstanceUUID], - Q: StreamQuery[A], - ): Source[Error \/ Message, NotUsed] = { - // If there is a prefix, replace the empty offsets in the request with it - val request = Q.adjustRequest(offPrefix, rawRequest) - - // Take all remaining queries without offset, these will be the ones for which an ACS request is needed - val acsRequest = Q.acsRequest(offPrefix, request) - - // Stream predicates specific fo the ACS part - val acsPred = - acsRequest - .map(queryPredicate(_, jwt)) - .sequence - - def liveFrom(resolvedQuery: ResolvedQuery)( - acsEnd: Option[StartingOffset] - ): Future[Source[StepAndErrors[Q.Positive, JsValue], NotUsed]] = { - val shiftedRequest = Q.adjustRequest(acsEnd, request) - val liveStartingOffset = Q.liveStartingOffset(acsEnd, shiftedRequest) - - // Produce the predicate that is going to be applied to the incoming transaction stream - // We need to apply this to the request with all the offsets shifted so that each stream - // can filter out anything from liveStartingOffset to the query-specific offset - queryPredicate(shiftedRequest, jwt).map { case StreamPredicate(_, _, fn) => - contractsService - .insertDeleteStepSource( - jwt, - buildTransactionFilter(parties, resolvedQuery), - liveStartingOffset, - Terminates.Never, - ) - .via(logTermination(logger, "insertDeleteStepSource with ACS")) - .via( - convertFilterContracts( - resolvedQuery, - fn, - ) - ) - .via(emitOffsetTicksAndFilterOutEmptySteps(liveStartingOffset)) - } - } - - def processResolved( - resolvedQuery: ResolvedQuery, - unresolved: Set[RequiredPkg], - fn: (ActiveContract.ResolvedCtTyId[LfV], Option[Offset]) => Option[Q.Positive], - ) = - acsPred - .flatMap( - _.flatMap { (vp: StreamPredicate[Q.Positive]) => - Some(fetchAndPreFilterAcs(vp, jwt, parties)) - }.cata( - _.map { acsAndLiveMarker => - acsAndLiveMarker - .mapAsync(1) { - case acs @ StepAndErrors(_, Acs(_), _) if acs.nonEmpty => - Future.successful(Source.single(acs)) - case StepAndErrors(_, Acs(_), _) => - Future.successful(Source.empty) - case liveBegin @ StepAndErrors(_, LiveBegin(offset), _) => - val acsEnd = offset.toOption.map(StartingOffset(_)) - liveFrom(resolvedQuery)(acsEnd).map(it => Source.single(liveBegin) ++ it) - case txn @ StepAndErrors(_, Txn(_, offset), _) => - val acsEnd = Some(StartingOffset(offset)) - liveFrom(resolvedQuery)(acsEnd).map(it => Source.single(txn) ++ it) - } - .flatMapConcat(it => it) - }, { - // This is the case where we made no ACS request because everything had an offset - // Get the earliest available offset from where to start from - val liveStartingOffset = Q.liveStartingOffset(offPrefix, request) - Future.successful( - contractsService - .insertDeleteStepSource( - jwt, - buildTransactionFilter(parties, resolvedQuery), - liveStartingOffset, - Terminates.Never, - ) - .via(logTermination(logger, "insertDeleteStepSource without ACS")) - .via( - convertFilterContracts( - resolvedQuery, - fn, - ) - ) - .via(emitOffsetTicksAndFilterOutEmptySteps(liveStartingOffset)) - ) - }, - ).map( - _.via(removePhantomArchives(remove = Q.removePhantomArchives(request))) - .map { sae => - sae.logHiddenErrors() - sae.mapPos(Q.renderCreatedMetadata).render - } - .prepend(reportUnresolvedTemplateIds(unresolved)) - .map(jsv => \/-(wsMessage(jsv))) - ) - ) - - Source - .lazyFutureSource { () => - queryPredicate(request, jwt).flatMap { pred => - processResolved(pred.resolvedQuery, pred.unresolved, pred.fn) - } - } - .mapMaterializedValue(_ => NotUsed) - } - - private def emitOffsetTicksAndFilterOutEmptySteps[Pos]( - offset: Option[StartingOffset] - ): Flow[StepAndErrors[Pos, JsValue], StepAndErrors[Pos, JsValue], NotUsed] = { - - val zero = ( - offset.map(o => AbsoluteBookmark(o.offset)): Option[BeginBookmark[Offset]], - TickTrigger: TickTriggerOrStep[Pos], - ) - - Flow[StepAndErrors[Pos, JsValue]] - .map(a => Step(a)) - .keepAlive(config.heartbeatPeriod, () => TickTrigger) - .scan(zero) { - case ((None, _), TickTrigger) => - // skip all ticks we don't have the offset yet - (None, TickTrigger) - case ((Some(offset), _), TickTrigger) => - // emit an offset tick - (Some(offset), Step(offsetTick(offset))) - case ((_, _), msg @ Step(_)) => - // capture the new offset and emit the current step - val newOffset = msg.payload.step.bookmark - (newOffset, msg) - } - // filter non-empty Steps, we don't want to spam client with empty events - .collect { case (_, Step(x)) if x.nonEmpty => x } - } - - private def offsetTick[Pos](offset: BeginBookmark[Offset]) = - StepAndErrors[Pos, JsValue](Seq.empty, LiveBegin(offset), loggerFactory) - - private def removePhantomArchives[A, B](remove: Option[Set[ContractId]]) = - remove.cata(removePhantomArchives_[A, B], Flow[StepAndErrors[A, B]]) - - private def removePhantomArchives_[A, B]( - initialState: Set[ContractId] - ): Flow[StepAndErrors[A, B], StepAndErrors[A, B], NotUsed] = { - import ContractStreamStep.{Acs, LiveBegin, Txn} - Flow[StepAndErrors[A, B]] - .scan((Tag unsubst initialState, Option.empty[StepAndErrors[A, B]])) { - case ((s0, _), a0 @ StepAndErrors(_, Txn(idstep, _), _)) => - val newInserts: Vector[String] = - ContractId.unsubst(idstep.inserts.map(_._1.contractId)) - val (deletesToEmit, deletesToHold) = s0 partition idstep.deletes.keySet - val s1: Set[String] = deletesToHold ++ newInserts - val a1 = a0.copy(step = a0.step.mapDeletes(_.view.filterKeys(deletesToEmit).toMap)) - - (s1, if (a1.nonEmpty) Some(a1) else None) - - case ((deletesToHold, _), a0 @ StepAndErrors(_, Acs(inserts), _)) => - val newInserts: Vector[String] = ContractId.unsubst(inserts.map(_._1.contractId)) - val s1: Set[String] = deletesToHold ++ newInserts - (s1, Some(a0)) - - case ((s0, _), a0 @ StepAndErrors(_, LiveBegin(_), loggerFactory)) => - (s0, Some(a0)) - } - .collect { case (_, Some(x)) => x } - } - - def wsErrorMessage(error: Error)(implicit - lc: LoggingContextOf[InstanceUUID with RequestID] - ): TextMessage.Strict = - wsMessage(SprayJson.encodeUnsafe(errorResponse(error, logger))) - - def wsMessage(jsVal: JsValue): TextMessage.Strict = - TextMessage(jsVal.compactPrint) - - private def convertFilterContracts[Pos]( - resolvedQuery: ResolvedQuery, - fn: (ActiveContract.ResolvedCtTyId[LfV], Option[Offset]) => Option[Pos], - ): Flow[ContractStreamStep.LAV1, StepAndErrors[Pos, JsValue], NotUsed] = - Flow - .fromFunction { (step: ContractStreamStep.LAV1) => - val (aerrors, errors, dstep) = step.partitionBimap( - ae => - ArchivedContract - .fromLedgerApi(resolvedQuery, ae) - .liftErr(ServerError.fromMsg), - ce => - ActiveContract - .fromLedgerApi(ActiveContract.ExtractAs(resolvedQuery), ce) - .liftErr(ServerError.fromMsg) - .flatMap(_.traverse(apiValueToLfValue).liftErr(ServerError.fromMsg)), - )(Seq) - StepAndErrors( - errors ++ aerrors, - dstep mapInserts { (inserts: Vector[ActiveContract.ResolvedCtTyId[LfV]]) => - inserts.flatMap { ac => - fn(ac, dstep.bookmark.flatMap(_.toOption)).map((ac, _)).toList - } - }, - loggerFactory, - ) - } - .via(conflation) - .map(_ mapLfv lfValueToJsValue) - - private def reportUnresolvedTemplateIds( - unresolved: Set[ContractTypeId.RequiredPkg] - ): Source[JsValue, NotUsed] = - if (unresolved.isEmpty) Source.empty - else { - Source.single( - AsyncWarningsWrapper(UnknownTemplateIds(unresolved.toList)).toJson - ) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebsocketEndpoints.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebsocketEndpoints.scala deleted file mode 100644 index faf2454856..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v1/WebsocketEndpoints.scala +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import com.daml.jwt.Jwt -import com.daml.logging.LoggingContextOf -import com.daml.metrics.api.MetricsContext -import com.daml.metrics.pekkohttp.{MetricLabelsExtractor, WebSocketMetricsInterceptor} -import com.digitalasset.canton.http.EndpointsCompanion.* -import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.http.util.Logging.{ - InstanceUUID, - RequestID, - extendWithRequestIdLogCtx, -} -import com.digitalasset.canton.http.{ContractKeyStreamRequest, JwtPayload, SearchForeverRequest} -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.tracing.NoTracing -import org.apache.pekko.http.scaladsl.model.* -import org.apache.pekko.http.scaladsl.model.HttpMethods.* -import org.apache.pekko.http.scaladsl.model.ws.{Message, WebSocketUpgrade} -import org.apache.pekko.http.scaladsl.server.RouteResult.{Complete, Rejected} -import org.apache.pekko.http.scaladsl.server.{Rejection, RequestContext, Route, RouteResult} -import org.apache.pekko.stream.scaladsl.Flow -import scalaz.std.scalaFuture.* -import scalaz.syntax.std.boolean.* -import scalaz.syntax.std.option.* -import scalaz.{EitherT, \/} - -import scala.concurrent.{ExecutionContext, Future} - -object WebsocketEndpoints { - val tokenPrefix: String = "jwt.token." - val wsProtocol: String = "daml.ws.auth" - - private def findJwtFromSubProtocol[Err >: Unauthorized]( - upgradeToWebSocket: WebSocketUpgrade - ): Err \/ Jwt = - upgradeToWebSocket.requestedProtocols - .collectFirst { - case p if p startsWith tokenPrefix => Jwt(p drop tokenPrefix.length) - } - .toRightDisjunction(Unauthorized(s"Missing required $tokenPrefix.[token] in subprotocol")) - - private def preconnect( - decodeJwt: ValidateJwt, - req: WebSocketUpgrade, - subprotocol: String, - resolveUser: ResolveUser, - )(implicit ec: ExecutionContext): EitherT[Future, Error, (Jwt, JwtPayload)] = - for { - _ <- EitherT.either( - req.requestedProtocols.contains(subprotocol) either (()) or (Unauthorized( - s"Missing required $tokenPrefix.[token] or $wsProtocol subprotocol" - ): Error) - ) - jwt0 <- EitherT.either(findJwtFromSubProtocol[Error](req)) - payload <- decodeAndParsePayload[JwtPayload]( - jwt0, - decodeJwt, - resolveUser, - ).leftMap(it => it: Error) - } yield payload -} - -class WebsocketEndpoints( - decodeJwt: ValidateJwt, - webSocketService: WebSocketService, - resolveUser: ResolveUser, - val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging - with NoTracing { - - import WebsocketEndpoints.* - - def transactionWebSocket(implicit - lc: LoggingContextOf[InstanceUUID], - metrics: HttpApiMetrics, - ): Route = { (ctx: RequestContext) => - val dispatch: PartialFunction[HttpRequest, LoggingContextOf[ - InstanceUUID with RequestID - ] => Future[HttpResponse]] = { - case req @ HttpRequest(GET, Uri.Path("/v1/stream/query"), _, _, _) => - ( - implicit lc => - (for { - upgradeReq <- EitherT.either( - req.attribute(AttributeKeys.webSocketUpgrade) \/> (InvalidUserInput( - "Cannot upgrade client's connection to websocket" - ): Error) - ) - _ = logger.info(s"GOT $wsProtocol ${lc.makeString}") - - payload <- preconnect( - decodeJwt, - upgradeReq, - wsProtocol, - resolveUser, - ) - (jwt, jwtPayload) = payload - } yield { - MetricsContext.withMetricLabels(MetricLabelsExtractor.labelsFromRequest(req)*) { - implicit mc: MetricsContext => - handleWebsocketRequest[SearchForeverRequest]( - jwt, - jwtPayload, - upgradeReq, - wsProtocol, - ) - } - }) - .valueOr(httpResponseError(_, logger)) - ) - - case req @ HttpRequest(GET, Uri.Path("/v1/stream/fetch"), _, _, _) => - ( - implicit lc => - (for { - upgradeReq <- EitherT.either( - req.attribute(AttributeKeys.webSocketUpgrade) \/> (InvalidUserInput( - s"Cannot upgrade client's connection to websocket" - ): Error) - ) - payload <- preconnect( - decodeJwt, - upgradeReq, - wsProtocol, - resolveUser, - ) - (jwt, jwtPayload) = payload - } yield { - MetricsContext.withMetricLabels(MetricLabelsExtractor.labelsFromRequest(req)*) { - implicit mc: MetricsContext => - handleWebsocketRequest[ContractKeyStreamRequest[_, _]]( - jwt, - jwtPayload, - upgradeReq, - wsProtocol, - ) - } - }) - .valueOr(httpResponseError(_, logger)) - ) - } - import scalaz.std.partialFunction.* - import scalaz.syntax.arrow.* - dispatch - .&&& { case r => r } - .andThen { case (lcFhr, req) => - extendWithRequestIdLogCtx { implicit lc => - logger.trace(s"Incoming request on ${req.uri}, ${lc.makeString}") - lcFhr(lc) map Complete.apply - } - } - .applyOrElse[HttpRequest, Future[RouteResult]]( - ctx.request, - _ => Future(Rejected(Seq.empty[Rejection])), - ) - } - - def handleWebsocketRequest[A: WebSocketService.StreamRequestParser]( - jwt: Jwt, - jwtPayload: JwtPayload, - req: WebSocketUpgrade, - protocol: String, - )(implicit - lc: LoggingContextOf[InstanceUUID with RequestID], - metrics: HttpApiMetrics, - mc: MetricsContext, - ): HttpResponse = { - val handler: Flow[Message, Message, _] = - WebSocketMetricsInterceptor.withRateSizeMetrics( - metrics.websocket, - webSocketService.transactionMessageHandler[A](jwt, jwtPayload), - ) - req.handleMessages(handler, Some(protocol)) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/CirceRelaxedCodec.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/CirceRelaxedCodec.scala index d1f1bec7e6..bf30716145 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/CirceRelaxedCodec.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/CirceRelaxedCodec.scala @@ -22,6 +22,18 @@ import scala.reflect.runtime.universe.* * - for JS mirrors of gRPC classes * - for proto base lib classes (not generated), (they have default value) * - for gRPC scalaPb generated roots of ADT + * + * For scalar types that should have default values, you can use the extension + * deriveRelaxedCodedWithDefaults which takes a map of default values. + * + * DO NOT USE deriveRelaxedCodec FOR Protobuf ENUM. + * + * You will likely get: 'JSON decoding to CNil should never happen'. In this case, create separate + * Encoder / Decoder using stringDecoderForEnum / stringEncoderForEnum. + * + * Do not forget to also create a Schema for the openapi.yml encoding. Otherwise, your enum will be + * translated into objects and you will likely end up with inconsistencies between Tapir and the + * openapi.yml. */ object CirceRelaxedCodec { @@ -30,7 +42,6 @@ object CirceRelaxedCodec { c: Lazy[ConfiguredAsObjectCodec[T]] ): Codec.AsObject[T] = { val codec = deriveConfiguredCodec[T] - new Codec.AsObject[T] { override def encodeObject(value: T): JsonObject = codec.encodeObject(value) @@ -62,7 +73,6 @@ object CirceRelaxedCodec { } override def apply(c: HCursor): Result[T] = { - val updatedJson = c.value.mapObject { jsonObj => val objMap: Map[String, Json] = jsonObj.toMap val resultMap = initiallyEmptyProperties ++ objMap @@ -72,4 +82,22 @@ object CirceRelaxedCodec { } } } + + /** derived codec that supports using default scalar values */ + def deriveRelaxedCodecWithDefaults[T: TypeTag](defaults: Map[String, Json])(implicit + c: Lazy[ConfiguredAsObjectCodec[T]] + ): Codec.AsObject[T] = { + val codec = deriveRelaxedCodec[T] + new Codec.AsObject[T] { + override def encodeObject(value: T): JsonObject = codec.encodeObject(value) + override def apply(c: HCursor): Result[T] = + codec.decodeJson(c.value.mapObject { jsonObj => + val objMap = jsonObj.toMap + val resultMap = defaults ++ objMap + JsonObject.fromMap(resultMap) + }) + } + + } + } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/Endpoints.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/Endpoints.scala index 42a575034d..efc983e596 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/Endpoints.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/Endpoints.scala @@ -288,10 +288,6 @@ trait Endpoints extends NamedLogging { .map(Right(_)) .recover(handleError.andThen(_.left.map(_._2))) - def resultWithStatusToRight: Future[Either[CustomError, R]] = - future - .map(Right(_)) - .recover(handleError) } /** Utility to prepare flow from a gRPC method with an observer. @@ -323,15 +319,29 @@ trait Endpoints extends NamedLogging { Left( ( sre.getStatus.getCode.asSttpStatus, - JsCantonError.fromDecodedCantonError( - DecodedCantonError - .fromStatusRuntimeException(sre) - .getOrElse( - throw new RuntimeException( - "Failed to convert response to JsCantonError." - ) + DecodedCantonError + .fromStatusRuntimeException(sre) + .map(JsCantonError.fromDecodedCantonError) + .getOrElse { + // TODO (#27556) we should log these errors / locations and clean all of them up + // CantonErrors are logged on creation (normally ...). + logger.info( + s"Request failed with legacy error ${sre.getStatus} / ${sre.getMessage}", + sre.getCause, ) - ), + JsCantonError( + code = sre.getStatus.getDescription, + cause = sre.getMessage, + correlationId = None, + traceId = None, + context = Map(), + resources = Seq(), + errorCategory = -1, + grpcCodeValue = Some(sre.getStatus.getCode.value()), + retryInfo = None, + definiteAnswer = None, + ) + }, ) ) case unexpected: UnexpectedFieldsException => diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsCommandService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsCommandService.scala index 7230a60e51..f6e9f832f0 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsCommandService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsCommandService.scala @@ -6,11 +6,19 @@ package com.digitalasset.canton.http.json.v2 import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.v2.command_service.{ CommandServiceGrpc, + SubmitAndWaitForTransactionRequest, SubmitAndWaitRequest, SubmitAndWaitResponse, } import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod -import com.daml.ledger.api.v2.transaction_filter.TransactionFormat +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TransactionFormat, + WildcardFilter, +} import com.daml.ledger.api.v2.value.Identifier import com.daml.ledger.api.v2.{ command_completion_service, @@ -31,6 +39,7 @@ import com.digitalasset.canton.http.json.v2.JsSchema.{ JsTransaction, JsTransactionTree, } +import com.digitalasset.canton.http.json.v2.LegacyDTOs.toTransactionTree import com.digitalasset.canton.http.json.v2.damldefinitionsservice.Schema.Codecs.* import com.digitalasset.canton.ledger.client.LedgerClient import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} @@ -46,7 +55,6 @@ import sttp.tapir.generic.auto.* import sttp.tapir.json.circe.* import sttp.tapir.{AnyEndpoint, CodecFormat, Schema, webSocketBody} -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} class JsCommandService( @@ -141,8 +149,6 @@ class JsCommandService( } yield result } - // TODO(#23504) remove when TransactionTree is removed from the API - @nowarn("cat=deprecation") def submitAndWaitForTransactionTree( callerContext: CallerContext ): TracedInput[JsCommands] => Future[ @@ -152,15 +158,48 @@ class JsCommandService( for { commands <- protocolConverters.Commands.fromJson(req.in) - submitAndWaitRequest = - SubmitAndWaitRequest(commands = Some(commands)) + submitAndWaitForTransactionRequest = + SubmitAndWaitForTransactionRequest( + commands = Some(commands), + transactionFormat = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = commands.actAs + .map(party => + party -> Filters( + Seq( + CumulativeFilter.defaultInstance + .withWildcardFilter(WildcardFilter.defaultInstance) + ) + ) + ) + .toMap, + filtersForAnyParty = None, + verbose = true, + ) + ), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ), + ) result <- commandServiceClient(callerContext.token()) - .submitAndWaitForTransactionTree(submitAndWaitRequest) - .flatMap(r => protocolConverters.SubmitAndWaitTransactionTreeResponse.toJson(r)) + .submitAndWaitForTransaction(submitAndWaitForTransactionRequest) + .flatMap(r => + protocolConverters.SubmitAndWaitTransactionTreeResponseLegacy + .toJson(toSubmitAndWaitTransactionTreeResponse(r)) + ) .resultToRight } yield result } + private def toSubmitAndWaitTransactionTreeResponse( + response: command_service.SubmitAndWaitForTransactionResponse + ): LegacyDTOs.SubmitAndWaitForTransactionTreeResponse = + LegacyDTOs.SubmitAndWaitForTransactionTreeResponse( + response.transaction.map(toTransactionTree) + ) + def submitAndWaitForTransaction( callerContext: CallerContext ): TracedInput[JsSubmitAndWaitForTransactionRequest] => Future[ @@ -300,7 +339,10 @@ object JsCommandService extends DocumentationEndpoints { .in(sttp.tapir.stringToPath("submit-and-wait-for-transaction-tree")) .in(jsonBody[JsCommands]) .out(jsonBody[JsSubmitAndWaitForTransactionTreeResponse]) - .description("Submit a batch of commands and wait for the transaction trees response") + .deprecated() + .description( + "Submit a batch of commands and wait for the transaction trees response. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use submit-and-wait-for-transaction instead." + ) val submitAndWait = commands.post .in(sttp.tapir.stringToPath("submit-and-wait")) diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala index 2842c7e122..cbcf81829d 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala @@ -10,8 +10,8 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ InteractiveSubmissionServiceGrpc, MinLedgerTime, } -import com.daml.ledger.api.v2.package_reference import com.daml.ledger.api.v2.transaction_filter.TransactionFormat +import com.daml.ledger.api.v2.{crypto as lapicrypto, package_reference} import com.digitalasset.canton.auth.AuthInterceptor import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec import com.digitalasset.canton.http.json.v2.Endpoints.{CallerContext, TracedInput, v2Endpoint} @@ -184,6 +184,7 @@ final case class JsPrepareSubmissionRequest( packageIdSelectionPreference: Seq[String], verboseHashing: Boolean = false, prefetchContractKeys: Seq[js.PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[com.google.protobuf.timestamp.Timestamp], ) final case class JsPrepareSubmissionResponse( @@ -386,19 +387,17 @@ object JsInteractiveSubmissionServiceCodecs { : Codec[interactive_submission_service.SinglePartySignatures] = deriveRelaxedCodec - implicit val signatureRW: Codec[interactive_submission_service.Signature] = + implicit val signatureRW: Codec[lapicrypto.Signature] = deriveRelaxedCodec - implicit val signingAlgorithmSpecEncoder - : Encoder[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpecEncoder: Encoder[lapicrypto.SigningAlgorithmSpec] = stringEncoderForEnum() - implicit val signingAlgorithmSpecDecoder - : Decoder[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpecDecoder: Decoder[lapicrypto.SigningAlgorithmSpec] = stringDecoderForEnum() - implicit val signatureFormatDecoder: Decoder[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatDecoder: Decoder[lapicrypto.SignatureFormat] = stringDecoderForEnum() - implicit val signatureFormatEncoder: Encoder[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatEncoder: Encoder[lapicrypto.SignatureFormat] = stringEncoderForEnum() implicit val jsExecuteSubmissionRequestRW: Codec[JsExecuteSubmissionRequest] = @@ -432,10 +431,10 @@ object JsInteractiveSubmissionServiceCodecs { deriveRelaxedCodec // Schema mappings are added to align generated tapir docs with a circe mapping of ADTs - implicit val signatureFormatSchema: Schema[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatSchema: Schema[lapicrypto.SignatureFormat] = stringSchemaForEnum() - implicit val signingAlgorithmSpec: Schema[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpec: Schema[lapicrypto.SigningAlgorithmSpec] = stringSchemaForEnum() implicit val timeSchema: Schema[interactive_submission_service.MinLedgerTime.Time] = diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPackageService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPackageService.scala index 2497eeb5ca..d878272467 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPackageService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPackageService.scala @@ -4,10 +4,11 @@ package com.digitalasset.canton.http.json.v2 import com.daml.ledger.api.v2.admin.package_management_service -import com.daml.ledger.api.v2.package_service +import com.daml.ledger.api.v2.{package_reference, package_service} import com.digitalasset.canton.auth.AuthInterceptor import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec import com.digitalasset.canton.http.json.v2.Endpoints.{CallerContext, TracedInput} +import com.digitalasset.canton.http.json.v2.JsSchema.DirectScalaPbRwImplicits.* import com.digitalasset.canton.http.json.v2.JsSchema.{ JsCantonError, stringDecoderForEnum, @@ -19,6 +20,7 @@ import com.digitalasset.canton.ledger.client.services.pkg.PackageClient import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.tracing.TraceContext import com.google.protobuf +import io.circe.generic.extras.semiauto.deriveConfiguredCodec import io.circe.{Codec, Decoder, Encoder} import org.apache.pekko.stream.Materializer import org.apache.pekko.stream.scaladsl.{Source, StreamConverters} @@ -26,7 +28,7 @@ import org.apache.pekko.util import sttp.capabilities.pekko.PekkoStreams import sttp.tapir.generic.auto.* import sttp.tapir.json.circe.jsonBody -import sttp.tapir.{AnyEndpoint, CodecFormat, Schema, path, streamBinaryBody} +import sttp.tapir.{AnyEndpoint, CodecFormat, Schema, SchemaType, path, query, streamBinaryBody} import scala.annotation.unused import scala.concurrent.{ExecutionContext, Future} @@ -43,8 +45,6 @@ class JsPackageService( materializer: Materializer, val authInterceptor: AuthInterceptor, ) extends Endpoints { - import JsPackageService.* - @SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable")) def endpoints() = List( @@ -57,13 +57,21 @@ class JsPackageService( getPackage, ), withServerLogic( - uploadDar, + JsPackageService.uploadDar, upload, ), withServerLogic( JsPackageService.packageStatusEndpoint, status, ), + withServerLogic( + JsPackageService.listVettedPackagesEndpoint, + listVettedPackages, + ), + withServerLogic( + JsPackageService.updateVettedPackagesEndpoint, + updateVettedPackages, + ), ) private def list( caller: CallerContext @@ -78,18 +86,39 @@ class JsPackageService( Either[JsCantonError, package_service.GetPackageStatusResponse] ] = req => packageClient.getPackageStatus(req.in, caller.token())(req.traceContext).resultToRight + private def listVettedPackages( + @unused caller: CallerContext + ): TracedInput[package_service.ListVettedPackagesRequest] => Future[ + Either[JsCantonError, package_service.ListVettedPackagesResponse] + ] = req => + packageClient.listVettedPackages(req.in, caller.token())(req.traceContext).resultToRight + + private def updateVettedPackages( + @unused caller: CallerContext + ): TracedInput[package_management_service.UpdateVettedPackagesRequest] => Future[ + Either[JsCantonError, package_management_service.UpdateVettedPackagesResponse] + ] = req => + packageManagementClient + .updateVettedPackages(req.in, caller.token())(req.traceContext) + .resultToRight + private def upload(caller: CallerContext) = { - (tracedInput: TracedInput[Source[util.ByteString, Any]]) => + (tracedInput: TracedInput[(Source[util.ByteString, Any], Option[Boolean], Option[String])]) => implicit val traceContext: TraceContext = tracedInput.traceContext - val inputStream = tracedInput.in.runWith(StreamConverters.asInputStream())(materializer) + val (bytesSource, vetAllPackagesO, synchronizerIdO) = tracedInput.in + val inputStream = bytesSource.runWith(StreamConverters.asInputStream())(materializer) val bs = protobuf.ByteString.readFrom(inputStream) packageManagementClient - .uploadDarFile(bs, caller.token()) + .uploadDarFile( + darFile = bs, + token = caller.token(), + vetAllPackages = vetAllPackagesO.getOrElse(true), + synchronizerId = synchronizerIdO, + ) .map { _ => package_management_service.UploadDarFileResponse() } .resultToRight - } private def getPackage(caller: CallerContext) = { (tracedInput: TracedInput[String]) => @@ -114,14 +143,16 @@ class JsPackageService( object JsPackageService extends DocumentationEndpoints { import Endpoints.* lazy val packages = v2Endpoint.in(sttp.tapir.stringToPath("packages")) + lazy val packageVetting = v2Endpoint.in(sttp.tapir.stringToPath("package-vetting")) private val packageIdPath = "package-id" val uploadDar = packages.post .in(streamBinaryBody(PekkoStreams)(CodecFormat.OctetStream()).toEndpointIO) + .in(query[Option[Boolean]]("vetAllPackages")) + .in(query[Option[String]]("synchronizerId")) .out(jsonBody[package_management_service.UploadDarFileResponse]) .description("Upload a DAR to the participant node") - val listPackagesEndpoint = packages.get .out(jsonBody[package_service.ListPackagesResponse]) @@ -143,8 +174,27 @@ object JsPackageService extends DocumentationEndpoints { .out(jsonBody[package_service.GetPackageStatusResponse]) .description("Get package status") + val listVettedPackagesEndpoint = + packageVetting.get + .in(jsonBody[package_service.ListVettedPackagesRequest]) + .out(jsonBody[package_service.ListVettedPackagesResponse]) + .description("List vetted packages") + + val updateVettedPackagesEndpoint = + packageVetting.post + .in(jsonBody[package_management_service.UpdateVettedPackagesRequest]) + .out(jsonBody[package_management_service.UpdateVettedPackagesResponse]) + .description("Update vetted packages") + override def documentation: Seq[AnyEndpoint] = - Seq(uploadDar, listPackagesEndpoint, downloadPackageEndpoint, packageStatusEndpoint) + Seq( + uploadDar, + listPackagesEndpoint, + downloadPackageEndpoint, + packageStatusEndpoint, + listVettedPackagesEndpoint, + updateVettedPackagesEndpoint, + ) } @@ -155,6 +205,66 @@ object JsPackageCodecs { deriveRelaxedCodec implicit val getPackageStatusResponse: Codec[package_service.GetPackageStatusResponse] = deriveRelaxedCodec + implicit val vettedPackages: Codec[package_reference.VettedPackages] = + deriveRelaxedCodec + implicit val vettedPackage: Codec[package_reference.VettedPackage] = + deriveRelaxedCodec + implicit val updateVettedPackagesResponse + : Codec[package_management_service.UpdateVettedPackagesResponse] = + deriveRelaxedCodec + implicit val vettedPackagesChangeRef: Codec[package_management_service.VettedPackagesRef] = + deriveRelaxedCodec + implicit val vettedPackagesChangeUnvet + : Codec[package_management_service.VettedPackagesChange.Unvet] = + deriveRelaxedCodec + implicit val vettedPackagesChangeVet: Codec[package_management_service.VettedPackagesChange.Vet] = + deriveRelaxedCodec + implicit val vettedPackagesChangeOperation + : Codec[package_management_service.VettedPackagesChange.Operation] = + deriveConfiguredCodec + implicit val vettedPackagesChangeOperationSchema + : Schema[package_management_service.VettedPackagesChange.Operation] = + Schema.oneOfWrapped + + implicit val topologySerial: Codec[package_reference.PriorTopologySerial] = + deriveRelaxedCodec + + implicit val topologySerialSerial: Codec[package_reference.PriorTopologySerial.Serial] = + deriveConfiguredCodec + + implicit val topologySerialSerialPriorOneOf + : Codec[package_reference.PriorTopologySerial.Serial.Prior] = + deriveRelaxedCodec + + implicit val topologySerialSerialNoPriorOneOf + : Codec[package_reference.PriorTopologySerial.Serial.NoPrior] = + Codec.from( + Decoder.decodeUnit.map(_ => + package_reference.PriorTopologySerial.Serial.NoPrior(com.google.protobuf.empty.Empty()) + ), + Encoder.encodeUnit.contramap[package_reference.PriorTopologySerial.Serial.NoPrior](_ => ()), + ) + + implicit val vettedPackagesChange: Codec[package_management_service.VettedPackagesChange] = + deriveRelaxedCodec + implicit val updateVettedPackagesRequest + : Codec[package_management_service.UpdateVettedPackagesRequest] = + deriveRelaxedCodec + implicit val packageMetadataFilter: Codec[package_service.PackageMetadataFilter] = + deriveRelaxedCodec + implicit val topologyStateFilter: Codec[package_service.TopologyStateFilter] = + deriveRelaxedCodec + implicit val listVettedPackagesRequest: Codec[package_service.ListVettedPackagesRequest] = + deriveRelaxedCodec + implicit val listVettedPackagesResponse: Codec[package_service.ListVettedPackagesResponse] = + deriveRelaxedCodec + + implicit val vettedPackagesChangeUnvetOneOf + : Codec[package_management_service.VettedPackagesChange.Operation.Unvet] = + deriveRelaxedCodec + implicit val vettedPackagesChangeVetOneOf + : Codec[package_management_service.VettedPackagesChange.Operation.Vet] = + deriveRelaxedCodec implicit val uploadDarFileResponseRW: Codec[package_management_service.UploadDarFileResponse] = deriveRelaxedCodec @@ -169,4 +279,15 @@ object JsPackageCodecs { implicit val packageStatusSchema: Schema[package_service.PackageStatus] = stringSchemaForEnum() + implicit val topologySerialSerialNoPriorSchema + : Schema[package_reference.PriorTopologySerial.Serial.NoPrior] = + Schema( + schemaType = + SchemaType.SProduct[package_reference.PriorTopologySerial.Serial.NoPrior](List.empty), + name = Some(Schema.SName("NoPrior")), + ) + + implicit val topologySerialSerialSchema: Schema[package_reference.PriorTopologySerial.Serial] = + Schema.oneOfWrapped + } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala index 0f6ac8a366..db20be9f8d 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala @@ -4,8 +4,13 @@ package com.digitalasset.canton.http.json.v2 import com.daml.ledger.api.v2.admin.party_management_service +import com.daml.ledger.api.v2.admin.party_management_service.GenerateExternalPartyTopologyRequest +import com.daml.ledger.api.v2.crypto as lapicrypto import com.digitalasset.canton.auth.AuthInterceptor -import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec +import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.{ + deriveRelaxedCodec, + deriveRelaxedCodecWithDefaults, +} import com.digitalasset.canton.http.json.v2.Endpoints.{CallerContext, TracedInput} import com.digitalasset.canton.http.json.v2.JsSchema.DirectScalaPbRwImplicits.* import com.digitalasset.canton.http.json.v2.JsSchema.JsCantonError @@ -13,12 +18,12 @@ import com.digitalasset.canton.ledger.client.services.admin.PartyManagementClien import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.InvalidArgument import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.TraceContext -import io.circe.Codec import io.circe.generic.extras.semiauto.deriveConfiguredCodec +import io.circe.{Codec, Json} import sttp.tapir.generic.auto.* import sttp.tapir.json.circe.jsonBody import sttp.tapir.server.ServerEndpoint -import sttp.tapir.{AnyEndpoint, path, query} +import sttp.tapir.{AnyEndpoint, Schema, path, query} import scala.concurrent.{ExecutionContext, Future} @@ -43,6 +48,10 @@ class JsPartyManagementService( allocatePartyEndpoint, allocateParty, ), + withServerLogic( + allocateExternalPartyEndpoint, + allocateExternalParty, + ), withServerLogic( JsPartyManagementService.getParticipantIdEndpoint, getParticipantId, @@ -52,6 +61,10 @@ class JsPartyManagementService( JsPartyManagementService.updatePartyEndpoint, updateParty, ), + withServerLogic( + JsPartyManagementService.externalPartyGenerateTopologyEndpoint, + externalPartyGenerateTopology, + ), ) private val listKnownParties: CallerContext => TracedInput[PagedList[Unit]] => Future[ @@ -109,6 +122,18 @@ class JsPartyManagementService( } yield response } + private val allocateExternalParty: CallerContext => TracedInput[ + party_management_service.AllocateExternalPartyRequest + ] => Future[ + Either[JsCantonError, party_management_service.AllocateExternalPartyResponse] + ] = + caller => + req => + partyManagementClient + .serviceStub(caller.token())(req.traceContext) + .allocateExternalParty(req.in) + .resultToRight + private val updateParty: CallerContext => TracedInput[ (String, party_management_service.UpdatePartyDetailsRequest) ] => Future[Either[JsCantonError, party_management_service.UpdatePartyDetailsResponse]] = @@ -129,13 +154,29 @@ class JsPartyManagementService( ) ) } + + private val externalPartyGenerateTopology: CallerContext => TracedInput[ + party_management_service.GenerateExternalPartyTopologyRequest + ] => Future[ + Either[JsCantonError, party_management_service.GenerateExternalPartyTopologyResponse] + ] = + caller => + request => { + partyManagementClient + .serviceStub(caller.token())(request.traceContext) + .generateExternalPartyTopology(request.in) + .resultToRight + } + } object JsPartyManagementService extends DocumentationEndpoints { import Endpoints.* import JsPartyManagementCodecs.* + import JsSchema.Crypto.* private val parties = v2Endpoint.in(sttp.tapir.stringToPath("parties")) + private val external = sttp.tapir.stringToPath("external") private val partyPath = "party" val allocatePartyEndpoint = parties.post @@ -143,6 +184,13 @@ object JsPartyManagementService extends DocumentationEndpoints { .out(jsonBody[party_management_service.AllocatePartyResponse]) .description("Allocate a new party to the participant node") + val allocateExternalPartyEndpoint = parties + .in(external / sttp.tapir.stringToPath("allocate")) + .post + .in(jsonBody[party_management_service.AllocateExternalPartyRequest]) + .out(jsonBody[party_management_service.AllocateExternalPartyResponse]) + .description("Allocate a new external party") + val listKnownPartiesEndpoint = parties.get .out(jsonBody[party_management_service.ListKnownPartiesResponse]) @@ -168,17 +216,36 @@ object JsPartyManagementService extends DocumentationEndpoints { .in(jsonBody[party_management_service.UpdatePartyDetailsRequest]) .out(jsonBody[party_management_service.UpdatePartyDetailsResponse]) .description("Allocate a new party to the participant node") + + val externalPartyGenerateTopologyEndpoint = parties + .in(external / sttp.tapir.stringToPath("generate-topology")) + .post + .in(jsonBody[party_management_service.GenerateExternalPartyTopologyRequest]) + .out(jsonBody[party_management_service.GenerateExternalPartyTopologyResponse]) + .description("Generate a topology for an external party") + override def documentation: Seq[AnyEndpoint] = Seq( listKnownPartiesEndpoint, allocatePartyEndpoint, + allocateExternalPartyEndpoint, getParticipantIdEndpoint, getPartyEndpoint, updatePartyEndpoint, + externalPartyGenerateTopologyEndpoint, ) } object JsPartyManagementCodecs { + import JsSchema.config + import JsInteractiveSubmissionServiceCodecs.signatureRW + import JsSchema.Crypto.* + + implicit val signatureFormatSchema: Schema[lapicrypto.SignatureFormat] = + Schema.string + + implicit val signingAlgorithmSpec: Schema[lapicrypto.SigningAlgorithmSpec] = + Schema.string implicit val partyDetails: Codec[party_management_service.PartyDetails] = deriveRelaxedCodec implicit val listKnownPartiesResponse: Codec[party_management_service.ListKnownPartiesResponse] = @@ -189,6 +256,18 @@ object JsPartyManagementCodecs { implicit val allocatePartyResponse: Codec[party_management_service.AllocatePartyResponse] = deriveRelaxedCodec + implicit val signedTransaction + : Codec[party_management_service.AllocateExternalPartyRequest.SignedTransaction] = + deriveRelaxedCodec + + implicit val allocateExternalPartyRequest + : Codec[party_management_service.AllocateExternalPartyRequest] = + deriveRelaxedCodecWithDefaults(Map("identityProviderId" -> Json.fromString(""))) + + implicit val allocateExternalPartyResponse + : Codec[party_management_service.AllocateExternalPartyResponse] = + deriveRelaxedCodec + implicit val getPartiesRequest: Codec[party_management_service.GetPartiesRequest] = deriveRelaxedCodec implicit val getPartiesResponse: Codec[party_management_service.GetPartiesResponse] = @@ -203,4 +282,21 @@ object JsPartyManagementCodecs { implicit val getParticipantIdResponse: Codec[party_management_service.GetParticipantIdResponse] = deriveRelaxedCodec + + implicit val generateExternalPartyTopologyRequest + : Codec[party_management_service.GenerateExternalPartyTopologyRequest] = { + import io.circe.Json + deriveRelaxedCodecWithDefaults[GenerateExternalPartyTopologyRequest]( + Map( + "localParticipantObservationOnly" -> Json.False, + "confirmationThreshold" -> Json.fromInt(0), + ) + ) + + } + + implicit val generateExternalPartyTopologyResponse + : Codec[party_management_service.GenerateExternalPartyTopologyResponse] = + deriveRelaxedCodec + } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsSchema.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsSchema.scala index bd7638380f..4fc8f46e22 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsSchema.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsSchema.scala @@ -8,7 +8,13 @@ import com.daml.ledger.api.v2.admin.object_meta.ObjectMeta import com.daml.ledger.api.v2.trace_context.TraceContext import com.daml.ledger.api.v2.transaction_filter.TransactionShape import com.daml.ledger.api.v2.value.Identifier -import com.daml.ledger.api.v2.{offset_checkpoint, reassignment, state_service, transaction_filter} +import com.daml.ledger.api.v2.{ + crypto, + offset_checkpoint, + reassignment, + state_service, + transaction_filter, +} import com.digitalasset.base.error.utils.DecodedCantonError import com.digitalasset.base.error.{DamlErrorWithDefiniteAnswer, RpcError} import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec @@ -32,7 +38,6 @@ import sttp.tapir.{DecodeResult, Schema, SchemaType, Validator} import java.time.Instant import java.util.Base64 -import scala.annotation.nowarn import scala.concurrent.duration.Duration import scala.util.Try @@ -49,12 +54,17 @@ object JsSchema { def stringDecoderForEnum[T <: scalapb.GeneratedEnum]()(implicit enumCompanion: GeneratedEnumCompanion[T] ): Decoder[T] = - Decoder.decodeString.emap { v => - enumCompanion - .fromName(v) - .toRight( - s"Unrecognized enum value $v. Supported values: ${enumCompanion.values.map(_.name).mkString("[", ", ", "]")}" - ) + Decoder.decodeString.emap { + case "UNRECOGNIZED" => + // Map UNRECOGNIZED to -1 in order to have the enum companion return the Unrecognized instance + // which is automatically generated by protobuf + Right(enumCompanion.fromValue(-1)) + case v => + enumCompanion + .fromName(v) + .toRight { + s"Unrecognized enum value $v. Supported values: ${enumCompanion.values.map(_.name).mkString("[", ", ", "]")}" + } } def stringSchemaForEnum[T <: scalapb.GeneratedEnum]()(implicit @@ -155,10 +165,6 @@ object JsSchema { deriveRelaxedCodec implicit val filtersRW: Codec[transaction_filter.Filters] = deriveRelaxedCodec - // TODO(#23504) remove - @nowarn("cat=deprecation") - implicit val transactionFilterRW: Codec[transaction_filter.TransactionFilter] = - deriveRelaxedCodec implicit val transactionFilterLegacyRW: Codec[LegacyDTOs.TransactionFilter] = deriveRelaxedCodec implicit val eventFormatRW: Codec[transaction_filter.EventFormat] = deriveRelaxedCodec @@ -251,6 +257,7 @@ object JsSchema { observers: Seq[String], createdAt: protobuf.timestamp.Timestamp, packageName: String, + representativePackageId: String, acsDelta: Boolean, ) extends Event @@ -402,6 +409,12 @@ object JsSchema { } } + implicit val emptyCodec: Codec[com.google.protobuf.empty.Empty] = + Codec.from( + Decoder.decodeUnit.map(_ => com.google.protobuf.empty.Empty()), + Encoder.encodeUnit.contramap[com.google.protobuf.empty.Empty](_ => ()), + ) + implicit val encodeIdentifier: Encoder[com.daml.ledger.api.v2.value.Identifier] = Encoder.encodeString.contramap { identifier => IdentifierConverter.toJson(identifier) @@ -474,6 +487,12 @@ object JsSchema { implicit val structSchema: Schema[protobuf.struct.Struct] = Schema.any + implicit val emptySchema: Schema[com.google.protobuf.empty.Empty] = + Schema( + schemaType = SchemaType.SProduct[com.google.protobuf.empty.Empty](List.empty), + name = Some(Schema.SName("Empty")), + ) + implicit val anySchema: Schema[com.google.protobuf.any.Any] = Schema.derived.name(Some(SName("ProtoAny"))) @@ -518,4 +537,24 @@ object JsSchema { implicit val valueSchema: Schema[com.google.protobuf.struct.Value] = Schema.any } + + object Crypto { + + implicit val signingKeySpecSchema: Schema[crypto.SigningKeySpec] = Schema.string + .encodedExample("SIGNING_KEY_SPEC_EC_CURVE25519") + .description("Specifies the key schema used for the signature.") + + implicit val signingKeySpecEncoder: Encoder[crypto.SigningKeySpec] = stringEncoderForEnum() + implicit val signingKeySpecDecoder: Decoder[crypto.SigningKeySpec] = stringDecoderForEnum() + + implicit val cryptoKeyFormatSchema: Schema[crypto.CryptoKeyFormat] = Schema.string + .encodedExample("CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO") + .description("Specifies the format of the cryptographic key.") + implicit val cryptoKeyFormatEncoder: Encoder[crypto.CryptoKeyFormat] = stringEncoderForEnum() + implicit val cryptoKeyFormatDecoder: Decoder[crypto.CryptoKeyFormat] = stringDecoderForEnum() + implicit val signingPublicKey: Codec[crypto.SigningPublicKey] = + deriveRelaxedCodec[crypto.SigningPublicKey] + + } + } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsStateService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsStateService.scala index 0c0b0bee8d..0f2432af7b 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsStateService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsStateService.scala @@ -4,7 +4,8 @@ package com.digitalasset.canton.http.json.v2 import com.daml.grpc.adapter.ExecutionSequencerFactory -import com.daml.ledger.api.v2.{reassignment, state_service, transaction_filter} +import com.daml.ledger.api.v2.transaction_filter.EventFormat +import com.daml.ledger.api.v2.{reassignment, state_service} import com.digitalasset.canton.auth.AuthInterceptor import com.digitalasset.canton.http.WebsocketConfig import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec @@ -18,6 +19,7 @@ import com.digitalasset.canton.http.json.v2.JsContractEntry.{ import com.digitalasset.canton.http.json.v2.JsSchema.DirectScalaPbRwImplicits.* import com.digitalasset.canton.http.json.v2.JsSchema.{JsCantonError, JsEvent} import com.digitalasset.canton.ledger.client.LedgerClient +import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.TraceContext import io.circe.Codec @@ -30,11 +32,8 @@ import sttp.tapir.generic.auto.* import sttp.tapir.json.circe.* import sttp.tapir.{AnyEndpoint, CodecFormat, Schema, query, webSocketBody} -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} -// TODO(#23504) remove deprecation suppression -@nowarn("cat=deprecation") class JsStateService( ledgerClient: LedgerClient, protocolConverters: ProtocolConverters, @@ -77,16 +76,16 @@ class JsStateService( private def getConnectedSynchronizers( callerContext: CallerContext - ): TracedInput[(String, Option[String], Option[String])] => Future[ + ): TracedInput[(Option[String], Option[String], Option[String])] => Future[ Either[JsCantonError, state_service.GetConnectedSynchronizersResponse] ] = req => stateServiceClient(callerContext.token())(req.traceContext) .getConnectedSynchronizers( state_service .GetConnectedSynchronizersRequest( - party = req.in._1, + party = req.in._1.getOrElse(""), participantId = req.in._2.getOrElse(""), - identityProviderId = req.in._2.getOrElse(""), + identityProviderId = req.in._3.getOrElse(""), ) ) .resultToRight @@ -118,18 +117,8 @@ class JsStateService( ] = req => { implicit val tc = req.traceContext - Flow[LegacyDTOs.GetActiveContractsRequest].map { request => - state_service.GetActiveContractsRequest( - filter = request.filter.map(f => - transaction_filter.TransactionFilter( - filtersByParty = f.filtersByParty, - filtersForAnyParty = f.filtersForAnyParty, - ) - ), - verbose = request.verbose, - activeAtOffset = request.activeAtOffset, - eventFormat = request.eventFormat, - ) + Flow[LegacyDTOs.GetActiveContractsRequest].map { + toGetActiveContractsRequest } via prepareSingleWsStream( stateServiceClient(caller.token())(TraceContext.empty).getActiveContracts, @@ -138,6 +127,46 @@ class JsStateService( ) } + private def toGetActiveContractsRequest( + req: LegacyDTOs.GetActiveContractsRequest + )(implicit traceContext: TraceContext): state_service.GetActiveContractsRequest = + (req.eventFormat, req.filter, req.verbose) match { + case (Some(_), Some(_), _) => + throw RequestValidationErrors.InvalidArgument + .Reject( + "Both event_format and filter are set. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." + ) + .asGrpcError + case (Some(_), _, true) => + throw RequestValidationErrors.InvalidArgument + .Reject( + "Both event_format and verbose are set. Please use either backwards compatible arguments (filter and verbose) or event_format, but not both." + ) + .asGrpcError + case (Some(_), None, false) => + state_service.GetActiveContractsRequest( + activeAtOffset = req.activeAtOffset, + eventFormat = req.eventFormat, + ) + case (None, None, _) => + throw RequestValidationErrors.InvalidArgument + .Reject( + "Either filter/verbose or event_format is required. Please use either backwards compatible arguments (filter and verbose) or event_format." + ) + .asGrpcError + case (None, Some(filter), verbose) => + state_service.GetActiveContractsRequest( + activeAtOffset = req.activeAtOffset, + eventFormat = Some( + EventFormat( + filtersByParty = filter.filtersByParty, + filtersForAnyParty = filter.filtersForAnyParty, + verbose = verbose, + ) + ), + ) + } + } object JsStateService extends DocumentationEndpoints { @@ -174,7 +203,7 @@ object JsStateService extends DocumentationEndpoints { val getConnectedSynchronizersEndpoint = state.get .in(sttp.tapir.stringToPath("connected-synchronizers")) - .in(query[String]("party")) + .in(query[Option[String]]("party")) .in(query[Option[String]]("participantId")) .in(query[Option[String]]("identityProviderId")) .out(jsonBody[state_service.GetConnectedSynchronizersResponse]) @@ -230,8 +259,6 @@ final case class JsGetActiveContractsResponse( contractEntry: JsContractEntry, ) -// TODO(#23504) remove deprecation suppression -@nowarn("cat=deprecation") object JsStateServiceCodecs { import JsSchema.* @@ -259,6 +286,7 @@ object JsStateServiceCodecs { implicit val getConnectedSynchronizersResponseRW : Codec[state_service.GetConnectedSynchronizersResponse] = deriveRelaxedCodec + implicit val connectedSynchronizerRW : Codec[state_service.GetConnectedSynchronizersResponse.ConnectedSynchronizer] = deriveRelaxedCodec diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsUpdateService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsUpdateService.scala index 918e113a78..e185f8adcd 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsUpdateService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsUpdateService.scala @@ -5,16 +5,18 @@ package com.digitalasset.canton.http.json.v2 import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.v2 as lapi -import com.daml.ledger.api.v2.event.Event.Event -import com.daml.ledger.api.v2.transaction.Transaction import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter.WildcardFilter -import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} import com.daml.ledger.api.v2.transaction_filter.{ CumulativeFilter, EventFormat, Filters, ParticipantAuthorizationTopologyFormat, TransactionFormat, + TransactionShape, UpdateFormat, } import com.daml.ledger.api.v2.update_service.GetUpdatesResponse @@ -31,6 +33,7 @@ import com.digitalasset.canton.http.json.v2.JsSchema.{ JsTransactionTree, } import com.digitalasset.canton.http.json.v2.JsUpdateServiceConverters.toUpdateFormat +import com.digitalasset.canton.http.json.v2.LegacyDTOs.toTransactionTree import com.digitalasset.canton.http.json.v2.damldefinitionsservice.Schema.Codecs.* import com.digitalasset.canton.ledger.client.LedgerClient import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors @@ -47,11 +50,8 @@ import sttp.tapir.generic.auto.* import sttp.tapir.json.circe.* import sttp.tapir.{AnyEndpoint, CodecFormat, Schema, path, query, webSocketBody} -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} -// TODO(#23504) remove deprecated methods -@nowarn("cat=deprecation") class JsUpdateService( ledgerClient: LedgerClient, protocolConverters: ProtocolConverters, @@ -131,30 +131,120 @@ class JsUpdateService( ] = { req => implicit val tc: TraceContext = req.traceContext updateServiceClient(caller.token())(req.traceContext) - .getTransactionTreeByOffset( - update_service.GetTransactionByOffsetRequest( + .getUpdateByOffset( + update_service.GetUpdateByOffsetRequest( offset = req.in._1, - requestingParties = req.in._2, - transactionFormat = None, + updateFormat = Some( + getUpdateFormatForPointwiseQueries( + requestingParties = req.in._2, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ), ) ) - .flatMap(protocolConverters.GetTransactionTreeResponse.toJson(_)) + .flatMap((r: update_service.GetUpdateResponse) => + protocolConverters.GetTransactionTreeResponseLegacy.toJson(toGetTransactionTreeResponse(r)) + ) .resultToRight } + private def getUpdateFormatForPointwiseQueries( + requestingParties: Seq[String], + transactionShape: TransactionShape, + ): UpdateFormat = { + val eventFormat = EventFormat( + filtersByParty = requestingParties + .map(party => + party -> Filters( + cumulative = List( + CumulativeFilter( + WildcardFilter( + transaction_filter.WildcardFilter.defaultInstance + ) + ) + ) + ) + ) + .toMap, + filtersForAnyParty = None, + verbose = true, + ) + val transactionFormat = TransactionFormat( + transactionShape = transactionShape, + eventFormat = Some(eventFormat), + ) + UpdateFormat( + includeTransactions = Some(transactionFormat), + includeReassignments = None, + includeTopologyEvents = None, + ) + } + + private def toGetTransactionTreeResponse( + update: update_service.GetUpdateResponse + ): LegacyDTOs.GetTransactionTreeResponse = + LegacyDTOs.GetTransactionTreeResponse(update.update.transaction.map(toTransactionTree)) + private def getTransactionByOffset( caller: CallerContext - ): TracedInput[update_service.GetTransactionByOffsetRequest] => Future[ + ): TracedInput[LegacyDTOs.GetTransactionByOffsetRequest] => Future[ Either[JsCantonError, JsGetTransactionResponse] ] = req => { implicit val tc = req.traceContext updateServiceClient(caller.token())(req.traceContext) - .getTransactionByOffset(req.in) - .flatMap(protocolConverters.GetTransactionResponse.toJson(_)) + .getUpdateByOffset( + update_service.GetUpdateByOffsetRequest( + offset = req.in.offset, + updateFormat = Some( + getUpdateFormatForFlatQueries( + requestingParties = req.in.requestingParties, + transactionFormat = req.in.transactionFormat, + ) + ), + ) + ) + .flatMap((r: update_service.GetUpdateResponse) => + protocolConverters.GetTransactionResponseLegacy.toJson(toGetTransactionResponse(r)) + ) .resultToRight } + private def getUpdateFormatForFlatQueries( + requestingParties: Seq[String], + transactionFormat: Option[TransactionFormat], + )(implicit traceContext: TraceContext): UpdateFormat = + (requestingParties, transactionFormat) match { + case (Nil, Some(format)) => + UpdateFormat( + includeTransactions = Some(format), + includeReassignments = None, + includeTopologyEvents = None, + ) + case (Nil, None) => + throw RequestValidationErrors.InvalidArgument + .Reject( + "Either transaction_format or requesting_parties is required. Please use either backwards compatible arguments (requesting_parties) or transaction_format." + ) + .asGrpcError + case (_, Some(_)) => + throw RequestValidationErrors.InvalidArgument + .Reject( + "Both transaction_format and requesting_parties are set. Please use either backwards compatible arguments (requesting_parties) or transaction_format but not both." + ) + .asGrpcError + case (requestingParties, None) => + getUpdateFormatForPointwiseQueries( + requestingParties = requestingParties, + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + } + + private def toGetTransactionResponse( + update: update_service.GetUpdateResponse + ): LegacyDTOs.GetTransactionResponse = + LegacyDTOs.GetTransactionResponse(update.update.transaction) + private def getUpdateByOffset( caller: CallerContext ): TracedInput[update_service.GetUpdateByOffsetRequest] => Future[ @@ -183,13 +273,25 @@ class JsUpdateService( private def getTransactionById( caller: CallerContext - ): TracedInput[update_service.GetTransactionByIdRequest] => Future[ + ): TracedInput[LegacyDTOs.GetTransactionByIdRequest] => Future[ Either[JsCantonError, JsGetTransactionResponse] ] = { req => implicit val tc = req.traceContext updateServiceClient(caller.token())(req.traceContext) - .getTransactionById(req.in) - .flatMap(protocolConverters.GetTransactionResponse.toJson(_)) + .getUpdateById( + update_service.GetUpdateByIdRequest( + updateId = req.in.updateId, + updateFormat = Some( + getUpdateFormatForFlatQueries( + requestingParties = req.in.requestingParties, + transactionFormat = req.in.transactionFormat, + ) + ), + ) + ) + .flatMap((r: update_service.GetUpdateResponse) => + protocolConverters.GetTransactionResponseLegacy.toJson(toGetTransactionResponse(r)) + ) .resultToRight } @@ -201,15 +303,21 @@ class JsUpdateService( req => { implicit val tc = req.traceContext updateServiceClient(caller.token())(req.traceContext) - .getTransactionTreeById( - update_service.GetTransactionByIdRequest( + .getUpdateById( + update_service.GetUpdateByIdRequest( updateId = req.in._1, - requestingParties = req.in._2, - transactionFormat = None, + updateFormat = Some( + getUpdateFormatForPointwiseQueries( + requestingParties = req.in._2, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ), ) ) - .flatMap { tr => - protocolConverters.GetTransactionTreeResponse.toJson(tr) + .flatMap { (r: update_service.GetUpdateResponse) => + protocolConverters.GetTransactionTreeResponseLegacy.toJson( + toGetTransactionTreeResponse(r) + ) } .resultToRight } @@ -220,18 +328,7 @@ class JsUpdateService( req => { implicit val tc = req.traceContext Flow[LegacyDTOs.GetUpdatesRequest].map { request => - update_service.GetUpdatesRequest( - beginExclusive = request.beginExclusive, - endInclusive = request.endInclusive, - filter = request.filter.map(f => - transaction_filter.TransactionFilter( - filtersByParty = f.filtersByParty, - filtersForAnyParty = f.filtersForAnyParty, - ) - ), - verbose = request.verbose, - updateFormat = request.updateFormat, - ) + toGetUpdatesRequest(request, forTrees = false) } via prepareSingleWsStream( updateServiceClient(caller.token())(TraceContext.empty).getUpdates, @@ -293,8 +390,6 @@ class JsUpdateService( update_service.GetUpdatesRequest( beginExclusive = req.beginExclusive, endInclusive = req.endInclusive, - filter = None, - verbose = false, updateFormat = req.updateFormat, ) case (None, None, _) => @@ -307,8 +402,6 @@ class JsUpdateService( update_service.GetUpdatesRequest( beginExclusive = req.beginExclusive, endInclusive = req.endInclusive, - filter = None, - verbose = false, updateFormat = Some(toUpdateFormat(filter, verbose, forTrees)), ) } @@ -330,33 +423,8 @@ class JsUpdateService( } ) - private def toTransactionTree(tx: Transaction): LegacyDTOs.TransactionTree = - LegacyDTOs.TransactionTree( - updateId = tx.updateId, - commandId = tx.commandId, - workflowId = tx.workflowId, - effectiveAt = tx.effectiveAt, - offset = tx.offset, - eventsById = tx.events - .collect(e => - e.event match { - case Event.Created(created) => - created.nodeId -> LegacyDTOs.TreeEvent(LegacyDTOs.TreeEvent.Kind.Created(created)) - case Event.Exercised(exercised) => - exercised.nodeId -> LegacyDTOs.TreeEvent( - LegacyDTOs.TreeEvent.Kind.Exercised(exercised) - ) - } - ) - .toMap, - synchronizerId = tx.synchronizerId, - traceContext = tx.traceContext, - recordTime = tx.recordTime, - ) } -// TODO(#23504) remove deprecated methods -@nowarn("cat=deprecation") object JsUpdateService extends DocumentationEndpoints { import Endpoints.* import JsUpdateServiceCodecs.* @@ -392,15 +460,19 @@ object JsUpdateService extends DocumentationEndpoints { CodecFormat.Json, ](PekkoStreams) ) - .description("Get flat transactions update stream (deprecated: use v2/updates instead)") + .deprecated() + .description( + "Get flat transactions update stream. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead." + ) val getUpdatesFlatListEndpoint = updates.post .in(sttp.tapir.stringToPath("flats")) .in(jsonBody[LegacyDTOs.GetUpdatesRequest]) .out(jsonBody[Seq[JsGetUpdatesResponse]]) + .deprecated() .description( - "Query flat transactions update list (blocking call, deprecated: use v2/updates instead)" + "Query flat transactions update list (blocking call). Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead." ) .inStreamListParamsAndDescription() @@ -414,15 +486,19 @@ object JsUpdateService extends DocumentationEndpoints { CodecFormat.Json, ](PekkoStreams) ) - .description("Get update transactions tree stream (deprecated: use v2/updates instead)") + .deprecated() + .description( + "Get update transactions tree stream. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead." + ) val getUpdatesTreeListEndpoint = updates.post .in(sttp.tapir.stringToPath("trees")) .in(jsonBody[LegacyDTOs.GetUpdatesRequest]) .out(jsonBody[Seq[JsGetUpdateTreesResponse]]) + .deprecated() .description( - "Query update transactions tree list (blocking call, deprecated: use v2/updates instead)" + "Query update transactions tree list (blocking call). Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead." ) .inStreamListParamsAndDescription() @@ -431,28 +507,40 @@ object JsUpdateService extends DocumentationEndpoints { .in(path[Long]("offset")) .in(query[List[String]]("parties")) .out(jsonBody[JsGetTransactionTreeResponse]) - .description("Get transaction tree by offset") + .deprecated() + .description( + "Get transaction tree by offset. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates/update-by-offset instead." + ) val getTransactionTreeByIdEndpoint = updates.get .in(sttp.tapir.stringToPath("transaction-tree-by-id")) .in(path[String]("update-id")) .in(query[List[String]]("parties")) .out(jsonBody[JsGetTransactionTreeResponse]) - .description("Get transaction tree by id") + .deprecated() + .description( + "Get transaction tree by id. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates/update-by-id instead." + ) val getTransactionByIdEndpoint = updates.post .in(sttp.tapir.stringToPath("transaction-by-id")) - .in(jsonBody[update_service.GetTransactionByIdRequest]) + .in(jsonBody[LegacyDTOs.GetTransactionByIdRequest]) .out(jsonBody[JsGetTransactionResponse]) - .description("Get transaction by id") + .deprecated() + .description( + "Get transaction by id. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates/update-by-id instead." + ) val getTransactionByOffsetEndpoint = updates.post .in(sttp.tapir.stringToPath("transaction-by-offset")) - .in(jsonBody[update_service.GetTransactionByOffsetRequest]) + .in(jsonBody[LegacyDTOs.GetTransactionByOffsetRequest]) .out(jsonBody[JsGetTransactionResponse]) - .description("Get transaction by offset") + .deprecated() + .description( + "Get transaction by offset. Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates/update-by-offset instead." + ) val getUpdateByOffsetEndpoint = updates.post @@ -514,8 +602,6 @@ final case class JsGetUpdateTreesResponse( update: JsUpdateTree.Update ) -// TODO(#23504) remove suppression of deprecation warnings -@nowarn("cat=deprecation") object JsUpdateServiceCodecs { import JsSchema.config import JsSchema.JsServicesCommonCodecs.* @@ -526,10 +612,10 @@ object JsUpdateServiceCodecs { implicit val updateFormatRW: Codec[transaction_filter.UpdateFormat] = deriveRelaxedCodec implicit val getUpdatesRequestRW: Codec[update_service.GetUpdatesRequest] = deriveRelaxedCodec implicit val getUpdatesRequestLegacyRW: Codec[LegacyDTOs.GetUpdatesRequest] = deriveRelaxedCodec - implicit val getTransactionByIdRequestRW: Codec[update_service.GetTransactionByIdRequest] = + implicit val getTransactionByIdRequestLegacyRW: Codec[LegacyDTOs.GetTransactionByIdRequest] = deriveRelaxedCodec - implicit val getTransactionByOffsetRequestRW - : Codec[update_service.GetTransactionByOffsetRequest] = + implicit val getTransactionByOffsetRequestLegacyRW + : Codec[LegacyDTOs.GetTransactionByOffsetRequest] = deriveRelaxedCodec implicit val getUpdateByIdRequestRW: Codec[update_service.GetUpdateByIdRequest] = deriveRelaxedCodec diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsVersionService.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsVersionService.scala index be5ff2f5ab..81b08c3360 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsVersionService.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsVersionService.scala @@ -66,6 +66,7 @@ object JsVersionServiceCodecs { implicit val umf: Codec[version_service.UserManagementFeature] = deriveRelaxedCodec implicit val pmf: Codec[version_service.PartyManagementFeature] = deriveRelaxedCodec implicit val ocf: Codec[version_service.OffsetCheckpointFeature] = deriveRelaxedCodec + implicit val pf: Codec[version_service.PackageFeature] = deriveRelaxedCodec implicit val fd: Codec[version_service.FeaturesDescriptor] = deriveRelaxedCodec implicit val glavr: Codec[version_service.GetLedgerApiVersionResponse] = deriveRelaxedCodec } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/LegacyDTOs.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/LegacyDTOs.scala index b612054c46..667635ba09 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/LegacyDTOs.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/LegacyDTOs.scala @@ -3,6 +3,8 @@ package com.digitalasset.canton.http.json.v2 +import com.daml.ledger.api.v2.event.Event.Event +import com.daml.ledger.api.v2.transaction.Transaction import com.daml.ledger.api.v2.{offset_checkpoint, reassignment} /** Data structures that replicate legacy gRPC messages for backwards compatibility */ @@ -62,15 +64,63 @@ object LegacyDTOs { beginExclusive: Long, endInclusive: Option[Long], filter: Option[LegacyDTOs.TransactionFilter], - verbose: Boolean, + verbose: Boolean = false, updateFormat: Option[com.daml.ledger.api.v2.transaction_filter.UpdateFormat], ) final case class GetActiveContractsRequest( filter: Option[LegacyDTOs.TransactionFilter], - verbose: Boolean, + verbose: Boolean = false, activeAtOffset: Long, eventFormat: Option[com.daml.ledger.api.v2.transaction_filter.EventFormat], ) + final case class SubmitAndWaitForTransactionTreeResponse( + transaction: Option[TransactionTree] + ) + + def toTransactionTree(tx: Transaction): LegacyDTOs.TransactionTree = + LegacyDTOs.TransactionTree( + updateId = tx.updateId, + commandId = tx.commandId, + workflowId = tx.workflowId, + effectiveAt = tx.effectiveAt, + offset = tx.offset, + eventsById = tx.events + .collect(e => + e.event match { + case Event.Created(created) => + created.nodeId -> LegacyDTOs.TreeEvent(LegacyDTOs.TreeEvent.Kind.Created(created)) + case Event.Exercised(exercised) => + exercised.nodeId -> LegacyDTOs.TreeEvent( + LegacyDTOs.TreeEvent.Kind.Exercised(exercised) + ) + } + ) + .toMap, + synchronizerId = tx.synchronizerId, + traceContext = tx.traceContext, + recordTime = tx.recordTime, + ) + + final case class GetTransactionTreeResponse( + transaction: Option[TransactionTree] + ) + + final case class GetTransactionByIdRequest( + updateId: String, + requestingParties: Seq[String], + transactionFormat: Option[com.daml.ledger.api.v2.transaction_filter.TransactionFormat], + ) + + final case class GetTransactionByOffsetRequest( + offset: Long, + requestingParties: Seq[String], + transactionFormat: Option[com.daml.ledger.api.v2.transaction_filter.TransactionFormat], + ) + + final case class GetTransactionResponse( + transaction: Option[com.daml.ledger.api.v2.transaction.Transaction] + ) + } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtoInfo.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtoInfo.scala index fe9ed50dfe..2f8956051f 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtoInfo.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtoInfo.scala @@ -31,10 +31,27 @@ final case class ProtoInfo(protoComments: ExtractedProtoComments) { .flatMap(_.get(camelToSnake(normalizeName(msgName)))) ) // Special case for GetUpdatesRequest, where we want to document the verbose and filter fields - // TODO(#27734) remove this hack when json legacy is removed + // and legacy requests for which we want to preserve the documentation val getUpdatesRequestName = LegacyDTOs.GetUpdatesRequest.getClass.getSimpleName.replace("$", "") val transactionFilterName = LegacyDTOs.TransactionFilter.getClass.getSimpleName.replace("$", "") - msgName match { + val submitAndWaitForTransactionTreeResponseName = + LegacyDTOs.SubmitAndWaitForTransactionTreeResponse.getClass.getSimpleName.replace("$", "") + val transactionTreeName = LegacyDTOs.TransactionTree.getClass.getSimpleName.replace("$", "") + val getTransactionByIdName = + LegacyDTOs.GetTransactionByIdRequest.getClass.getSimpleName.replace("$", "") + val getTransactionByOffsetName = + LegacyDTOs.GetTransactionByOffsetRequest.getClass.getSimpleName.replace("$", "") + val getTransactionResponseName = + LegacyDTOs.GetTransactionResponse.getClass.getSimpleName.replace("$", "") + val getTransactionTreeResponseName = + LegacyDTOs.GetTransactionTreeResponse.getClass.getSimpleName.replace("$", "") + val getActiveContractsRequestName = + LegacyDTOs.GetActiveContractsRequest.getClass.getSimpleName.replace("$", "") + val getUpdateTreesResponseName = + LegacyDTOs.GetUpdateTreesResponse.getClass.getSimpleName.replace("$", "") + val treeEventName = + LegacyDTOs.TreeEvent.getClass.getSimpleName.replace("$", "") + normalizeName(msgName) match { case `getUpdatesRequestName` => original.map(info => info.copy(message = @@ -82,6 +99,207 @@ final case class ProtoInfo(protoComments: ExtractedProtoComments) { ) ) ) + case `submitAndWaitForTransactionTreeResponseName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map.empty, + ) + ) + ) + case `transactionTreeName` => + Some( + MessageInfo( + FieldData( + comments = Some( + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |Complete view of an on-ledger transaction.""".stripMargin + ), + fieldComments = Map( + "updateId" -> + """Assigned by the server. Useful for correlating logs. + |Must be a valid LedgerString (as described in ``value.proto``). + |Required""".stripMargin, + "commandId" -> + """The ID of the command which resulted in this transaction. Missing for everyone except the submitting party. + |Must be a valid LedgerString (as described in ``value.proto``). + |Optional""".stripMargin, + "workflowId" -> + """The workflow ID used in command submission. Only set if the ``workflow_id`` for the command was set. + |Must be a valid LedgerString (as described in ``value.proto``). + |Optional""".stripMargin, + "effectiveAt" -> + """Ledger effective time. + |Required""".stripMargin, + "offset" -> + """The absolute offset. The details of this field are described in ``community/ledger-api/README.md``. + |Required, it is a valid absolute offset (positive integer).""".stripMargin, + "eventsById" -> + """Changes to the ledger that were caused by this transaction. Nodes of the transaction tree. + |Each key must be a valid node ID (non-negative integer). + |Required""".stripMargin, + "synchronizerId" -> + """A valid synchronizer id. + |Identifies the synchronizer that synchronized the transaction. + |Required""".stripMargin, + "traceContext" -> + """Optional; ledger API trace context + | + |The trace context transported in this message corresponds to the trace context supplied + |by the client application in a HTTP2 header of the original command submission. + |We typically use a header to transfer this type of information. Here we use message + |body, because it is used in gRPC streams which do not support per message headers. + |This field will be populated with the trace context contained in the original submission. + |If that was not provided, a unique ledger-api-server generated trace context will be used + |instead.""".stripMargin, + "recordTime" -> + """The time at which the transaction was recorded. The record time refers to the synchronizer + |which synchronized the transaction. + |Required""".stripMargin, + ), + ) + ) + ) + case `getTransactionResponseName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map( + "transaction" -> "Required" + ), + ) + ) + ) + case `getTransactionTreeResponseName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map( + "transaction" -> "Required" + ), + ) + ) + ) + case `getTransactionByIdName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map( + "updateId" -> + """The ID of a particular transaction. + |Must be a valid LedgerString (as described in ``value.proto``). + |Required""".stripMargin, + "requestingParties" -> + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |The parties whose events the client expects to see. + |Events that are not visible for the parties in this collection will not be present in the response. + |Each element must be a valid PartyIdString (as described in ``value.proto``). + |Optional for backwards compatibility for GetTransactionById request: if defined transaction_format must be + |unset (falling back to defaults).""".stripMargin, + "transactionFormat" -> + """Optional for GetTransactionById request for backwards compatibility: defaults to a transaction_format, where: + | + |- event_format.filters_by_party will have template-wildcard filters for all the requesting_parties + |- event_format.filters_for_any_party is unset + |- event_format.verbose = true + |- transaction_shape = TRANSACTION_SHAPE_ACS_DELTA""".stripMargin, + ), + ) + ) + ) + case `getTransactionByOffsetName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map( + "offset" -> + """The offset of the transaction being looked up. + |Must be a valid absolute offset (positive integer). + |Required""".stripMargin, + "requestingParties" -> + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |The parties whose events the client expects to see. + |Events that are not visible for the parties in this collection will not be present in the response. + |Each element must be a valid PartyIdString (as described in ``value.proto``). + |Optional for backwards compatibility for GetTransactionByOffset request: if defined transaction_format must be + |unset (falling back to defaults).""".stripMargin, + "transactionFormat" -> + """Optional for GetTransactionByOffset request for backwards compatibility: defaults to a TransactionFormat, where: + | + |- event_format.filters_by_party will have template-wildcard filters for all the requesting_parties + |- event_format.filters_for_any_party is unset + |- event_format.verbose = true + |- transaction_shape = TRANSACTION_SHAPE_ACS_DELTA""".stripMargin, + ), + ) + ) + ) + case `getActiveContractsRequestName` => + original.map(info => + info.copy(message = + info.message.copy(fieldComments = + info.message.fieldComments + + ("verbose" -> + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |If enabled, values served over the API will contain more information than strictly necessary to interpret the data. + |In particular, setting the verbose flag to true triggers the ledger to include labels for record fields. + |Optional, if specified event_format must be unset.""".stripMargin) + + ("filter" -> + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |Templates to include in the served snapshot, per party. + |Optional, if specified event_format must be unset, if not specified event_format must be set.""".stripMargin) + ) + ) + ) + case `getUpdateTreesResponseName` => + Some( + MessageInfo( + FieldData( + comments = Some( + "Provided for backwards compatibility, it will be removed in the Canton version 3.5.0." + ), + fieldComments = Map.empty, + ) + ) + ) + case `treeEventName` => + Some( + MessageInfo( + FieldData( + comments = Some( + """Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. + |Each tree event message type below contains a ``witness_parties`` field which + |indicates the subset of the requested parties that can see the event + |in question. + | + |Note that transaction trees might contain events with + |_no_ witness parties, which were included simply because they were + |children of events which have witnesses.""".stripMargin + ), + fieldComments = Map( + "created" -> + """The event as it appeared in the context of its original daml transaction on this participant node. + |In particular, the offset, node_id pair of the daml transaction are preserved. + |Required""".stripMargin + ), + ) + ) + ) case _ => original } } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtocolConverters.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtocolConverters.scala index 36cc984a4a..eb218dece4 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtocolConverters.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/ProtocolConverters.scala @@ -12,7 +12,6 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ PreparedTransaction, } import com.daml.ledger.api.v2.reassignment.AssignedEvent -import com.daml.ledger.api.v2.transaction.TreeEvent import com.digitalasset.canton.http.json.v2.IdentifierConverter.illegalValue import com.digitalasset.canton.http.json.v2.JsContractEntry.JsContractEntry import com.digitalasset.canton.http.json.v2.JsSchema.JsReassignmentEvent.JsReassignmentEvent @@ -35,7 +34,6 @@ import io.scalaland.chimney.dsl.* import ujson.StringRenderer import ujson.circe.CirceJson -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} import scala.language.implicitConversions @@ -444,38 +442,6 @@ class ProtocolConverters( .transform } - // TODO(#23504) remove when TransactionTreeEvent is removed - @nowarn("cat=deprecation") - object TransactionTreeEvent - extends ProtocolConverter[lapi.transaction.TreeEvent.Kind, JsTreeEvent.TreeEvent] { - - override def fromJson( - jsObj: JsTreeEvent.TreeEvent - )(implicit traceContext: TraceContext): Future[TreeEvent.Kind] = - jsObj match { - case JsTreeEvent.CreatedTreeEvent(created) => - CreatedEvent - .fromJson(created) - .map(ev => lapi.transaction.TreeEvent.Kind.Created(ev)) - - case JsTreeEvent.ExercisedTreeEvent(exercised) => - ExercisedEvent - .fromJson(exercised) - .map(ev => lapi.transaction.TreeEvent.Kind.Exercised(ev)) - } - - override def toJson(lapiObj: TreeEvent.Kind)(implicit - traceContext: TraceContext - ): Future[JsTreeEvent.TreeEvent] = lapiObj match { - case lapi.transaction.TreeEvent.Kind.Empty => - illegalValue(lapi.transaction.TreeEvent.Kind.Empty.toString()) - case lapi.transaction.TreeEvent.Kind.Created(created) => - CreatedEvent.toJson(created).map(JsTreeEvent.CreatedTreeEvent(_)) - case lapi.transaction.TreeEvent.Kind.Exercised(exercised) => - ExercisedEvent.toJson(exercised).map(JsTreeEvent.ExercisedTreeEvent(_)) - } - } - object TransactionTreeEventLegacy extends ProtocolConverter[LegacyDTOs.TreeEvent.Kind, JsTreeEvent.TreeEvent] { @@ -506,40 +472,6 @@ class ProtocolConverters( } } - // TODO(#23504) remove when TransactionTree is removed - @nowarn("cat=deprecation") - object TransactionTree - extends ProtocolConverter[lapi.transaction.TransactionTree, JsTransactionTree] { - def toJson( - lapiTransactionTree: lapi.transaction.TransactionTree - )(implicit - traceContext: TraceContext - ): Future[JsTransactionTree] = - for { - eventsById <- lapiTransactionTree.eventsById.toSeq.map { case (k, v) => - TransactionTreeEvent.toJson(v.kind).map(newVal => (k, newVal)) - }.sequence - - } yield lapiTransactionTree - .into[JsTransactionTree] - .withFieldConst(_.eventsById, eventsById.toMap) - .transform - - def fromJson( - jsTransactionTree: JsTransactionTree - )(implicit - traceContext: TraceContext - ): Future[lapi.transaction.TransactionTree] = - for { - eventsById <- jsTransactionTree.eventsById.toSeq.map { case (k, v) => - TransactionTreeEvent.fromJson(v).map(newVal => (k, lapi.transaction.TreeEvent(newVal))) - }.sequence - } yield jsTransactionTree - .into[lapi.transaction.TransactionTree] - .withFieldConst(_.eventsById, eventsById.toMap) - .transform - } - object TransactionTreeLegacy extends ProtocolConverter[LegacyDTOs.TransactionTree, JsTransactionTree] { def toJson( @@ -574,21 +506,19 @@ class ProtocolConverters( .transform } - // TODO(#23504) remove when SubmitAndWaitForTransactionTreeResponse is removed - @nowarn("cat=deprecation") - object SubmitAndWaitTransactionTreeResponse + object SubmitAndWaitTransactionTreeResponseLegacy extends ProtocolConverter[ - lapi.command_service.SubmitAndWaitForTransactionTreeResponse, + LegacyDTOs.SubmitAndWaitForTransactionTreeResponse, JsSubmitAndWaitForTransactionTreeResponse, ] { def toJson( - response: lapi.command_service.SubmitAndWaitForTransactionTreeResponse + response: LegacyDTOs.SubmitAndWaitForTransactionTreeResponse )(implicit traceContext: TraceContext ): Future[JsSubmitAndWaitForTransactionTreeResponse] = - TransactionTree - .toJson(response.getTransaction) + TransactionTreeLegacy + .toJson(response.transaction.getOrElse(invalidArgument("empty", "non-empty transaction"))) .map(tree => JsSubmitAndWaitForTransactionTreeResponse( transactionTree = tree @@ -599,11 +529,11 @@ class ProtocolConverters( response: JsSubmitAndWaitForTransactionTreeResponse )(implicit traceContext: TraceContext - ): Future[lapi.command_service.SubmitAndWaitForTransactionTreeResponse] = - TransactionTree + ): Future[LegacyDTOs.SubmitAndWaitForTransactionTreeResponse] = + TransactionTreeLegacy .fromJson(response.transactionTree) .map(tree => - lapi.command_service.SubmitAndWaitForTransactionTreeResponse( + LegacyDTOs.SubmitAndWaitForTransactionTreeResponse( transaction = Some(tree) ) ) @@ -767,26 +697,16 @@ class ProtocolConverters( object CreatedEvent extends ProtocolConverter[lapi.event.CreatedEvent, JsEvent.CreatedEvent] { def toJson(created: lapi.event.CreatedEvent)(implicit traceContext: TraceContext - ): Future[JsEvent.CreatedEvent] = + ): Future[JsEvent.CreatedEvent] = { + val representativeTemplateId = + created.getTemplateId.copy(packageId = created.representativePackageId) + for { contractKey <- created.contractKey - .map(ck => - schemaProcessors - .keyArgFromProtoToJson( - created.getTemplateId, - ck, - ) - ) - .sequence - createdArgs <- created.createArguments - .map(ca => - schemaProcessors - .contractArgFromProtoToJson( - created.getTemplateId, - ca, - ) - ) - .sequence + .traverse(schemaProcessors.keyArgFromProtoToJson(representativeTemplateId, _)) + createdArgs <- created.createArguments.traverse( + schemaProcessors.contractArgFromProtoToJson(representativeTemplateId, _) + ) interfaceViews <- Future.sequence(created.interfaceViews.map(InterfaceView.toJson)) } yield created .into[JsEvent.CreatedEvent] @@ -794,6 +714,7 @@ class ProtocolConverters( .withFieldConst(_.contractKey, contractKey.map(toCirce(_))) .withFieldConst(_.createArgument, createdArgs.map(toCirce(_))) .transform + } def fromJson(createdEvent: JsEvent.CreatedEvent)(implicit traceContext: TraceContext @@ -1172,50 +1093,6 @@ class ProtocolConverters( }).map(lapi.update_service.GetUpdateResponse(_)) } - // TODO(#23504) remove when GetUpdateTreesResponse is removed - @nowarn("cat=deprecation") - object GetUpdateTreesResponse - extends ProtocolConverter[ - lapi.update_service.GetUpdateTreesResponse, - JsGetUpdateTreesResponse, - ] { - def toJson( - value: lapi.update_service.GetUpdateTreesResponse - )(implicit - traceContext: TraceContext - ): Future[JsGetUpdateTreesResponse] = - ((value.update match { - case lapi.update_service.GetUpdateTreesResponse.Update.Empty => - illegalValue(lapi.update_service.GetUpdateTreesResponse.Update.Empty.toString()) - case lapi.update_service.GetUpdateTreesResponse.Update.OffsetCheckpoint(value) => - Future(JsUpdateTree.OffsetCheckpoint(value)) - case lapi.update_service.GetUpdateTreesResponse.Update.TransactionTree(value) => - TransactionTree.toJson(value).map(JsUpdateTree.TransactionTree.apply) - case lapi.update_service.GetUpdateTreesResponse.Update.Reassignment(value) => - Reassignment.toJson(value).map(JsUpdateTree.Reassignment.apply) - }): Future[JsUpdateTree.Update]).map(update => JsGetUpdateTreesResponse(update)) - - def fromJson( - jsObj: JsGetUpdateTreesResponse - )(implicit - traceContext: TraceContext - ): Future[lapi.update_service.GetUpdateTreesResponse] = - (jsObj.update match { - case JsUpdateTree.OffsetCheckpoint(value) => - Future.successful( - lapi.update_service.GetUpdateTreesResponse.Update.OffsetCheckpoint(value) - ) - case JsUpdateTree.Reassignment(value) => - Reassignment - .fromJson(value) - .map(lapi.update_service.GetUpdateTreesResponse.Update.Reassignment.apply) - case JsUpdateTree.TransactionTree(value) => - TransactionTree - .fromJson(value) - .map(lapi.update_service.GetUpdateTreesResponse.Update.TransactionTree.apply) - }).map(lapi.update_service.GetUpdateTreesResponse(_)) - } - object GetUpdateTreesResponseLegacy extends ProtocolConverter[ LegacyDTOs.GetUpdateTreesResponse, @@ -1258,47 +1135,47 @@ class ProtocolConverters( }).map((u: LegacyDTOs.GetUpdateTreesResponse.Update) => LegacyDTOs.GetUpdateTreesResponse(u)) } - // TODO(#23504) remove when GetTransactionTreeResponse is removed - @nowarn("cat=deprecation") - object GetTransactionTreeResponse + object GetTransactionTreeResponseLegacy extends ProtocolConverter[ - lapi.update_service.GetTransactionTreeResponse, + LegacyDTOs.GetTransactionTreeResponse, JsGetTransactionTreeResponse, ] { def toJson( - obj: lapi.update_service.GetTransactionTreeResponse + obj: LegacyDTOs.GetTransactionTreeResponse )(implicit traceContext: TraceContext ): Future[JsGetTransactionTreeResponse] = - TransactionTree.toJson(obj.getTransaction).map(JsGetTransactionTreeResponse.apply) + TransactionTreeLegacy + .toJson(obj.transaction.getOrElse(invalidArgument("empty", "non-empty transaction"))) + .map(JsGetTransactionTreeResponse.apply) def fromJson(treeResponse: JsGetTransactionTreeResponse)(implicit traceContext: TraceContext - ): Future[lapi.update_service.GetTransactionTreeResponse] = - TransactionTree + ): Future[LegacyDTOs.GetTransactionTreeResponse] = + TransactionTreeLegacy .fromJson(treeResponse.transaction) - .map(tree => lapi.update_service.GetTransactionTreeResponse(Some(tree))) + .map(tree => LegacyDTOs.GetTransactionTreeResponse(Some(tree))) } - // TODO(#23504) remove when GetTransactionResponse is removed - @nowarn("cat=deprecation") - object GetTransactionResponse + object GetTransactionResponseLegacy extends ProtocolConverter[ - lapi.update_service.GetTransactionResponse, + LegacyDTOs.GetTransactionResponse, JsGetTransactionResponse, ] { - def toJson(obj: lapi.update_service.GetTransactionResponse)(implicit + def toJson(obj: LegacyDTOs.GetTransactionResponse)(implicit traceContext: TraceContext ): Future[JsGetTransactionResponse] = - Transaction.toJson(obj.getTransaction).map(JsGetTransactionResponse.apply) + Transaction + .toJson(obj.transaction.getOrElse(invalidArgument("empty", "non-empty transaction"))) + .map(JsGetTransactionResponse.apply) def fromJson(obj: JsGetTransactionResponse)(implicit traceContext: TraceContext - ): Future[lapi.update_service.GetTransactionResponse] = + ): Future[LegacyDTOs.GetTransactionResponse] = Transaction .fromJson(obj.transaction) - .map(tr => lapi.update_service.GetTransactionResponse(Some(tr))) + .map(tr => LegacyDTOs.GetTransactionResponse(Some(tr))) } object PrepareSubmissionRequest diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessors.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessors.scala index 781a970556..0b44e93402 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessors.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessors.scala @@ -66,7 +66,7 @@ trait SchemaProcessors { def exerciseResultFromJsonToProto( template: v2.value.Identifier, choiceName: Ref.IdString.Name, - value: ujson.Value, + jvalue: ujson.Value, )(implicit traceContext: TraceContext ): Future[scala.Option[v2.value.Value]] diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessorsImpl.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessorsImpl.scala index 426e7b4091..e17d21adde 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessorsImpl.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/SchemaProcessorsImpl.scala @@ -4,14 +4,7 @@ package com.digitalasset.canton.http.json.v2 import com.daml.ledger.api.v2.value -import com.daml.ledger.api.v2.value.{Identifier, Value} -import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters -import com.digitalasset.canton.http.json.v2.SchemaProcessorsImpl.{ - JsonDict, - PackageSignatures, - ProtoDict, - ResultOps, -} +import com.digitalasset.canton.http.json.v2.SchemaProcessorsImpl.* import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidField import com.digitalasset.canton.ledger.error.LedgerApiErrors import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors @@ -20,7 +13,14 @@ import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.{IdString, PackageRef} +import com.digitalasset.daml.lf.data.Ref.{ + DottedName, + IdString, + ModuleName, + PackageId, + PackageRef, + QualifiedName, +} import com.digitalasset.daml.lf.language.Ast import com.digitalasset.transcode.codec.json.JsonCodec import com.digitalasset.transcode.codec.proto.GrpcValueCodec @@ -45,7 +45,7 @@ class SchemaProcessorsImpl( jsonArgsValue: ujson.Value, )(implicit traceContext: TraceContext - ): Future[Value] = + ): Future[value.Value] = for { templateId <- resolveIdentifier(template).toFuture protoDict <- prepareProtoDict @@ -70,7 +70,7 @@ class SchemaProcessorsImpl( jsonArgsValue: ujson.Value, )(implicit traceContext: TraceContext - ): Future[Value] = + ): Future[value.Value] = for { templateId <- resolveIdentifier(template).toFuture protoDict <- prepareProtoDict @@ -113,7 +113,7 @@ class SchemaProcessorsImpl( protoArgs: ujson.Value, )(implicit traceContext: TraceContext - ): Future[Value] = + ): Future[value.Value] = for { templateId <- resolveIdentifier(template).toFuture protoDict <- prepareProtoDict @@ -137,12 +137,12 @@ class SchemaProcessorsImpl( } yield choiceResultConverter.convert(v) override def exerciseResultFromJsonToProto( - template: Identifier, + template: value.Identifier, choiceName: IdString.Name, - value: ujson.Value, + jvalue: ujson.Value, )(implicit traceContext: TraceContext - ): Future[Option[Value]] = value match { + ): Future[Option[value.Value]] = jvalue match { case ujson.Null => Future(None) case _ => for { @@ -152,7 +152,7 @@ class SchemaProcessorsImpl( .get(templateId -> choiceName) .toRight(invalidChoiceException(templateId, choiceName)) .toFuture - } yield Some(choiceResultConverter.convert(value)) + } yield Some(choiceResultConverter.convert(jvalue)) } private def invalidChoiceException(templateId: Ref.Identifier, choiceName: IdString.Name)(implicit @@ -180,11 +180,11 @@ class SchemaProcessorsImpl( ) case PackageRef.Id(_) => Right(template) } - .map(IdentifierConverters.lfIdentifier) + .map(lfIdentifier) private def prepareProtoDict(implicit traceContext: TraceContext - ): Future[Dictionary[Converter[ujson.Value, Value]]] = + ): Future[Dictionary[Converter[ujson.Value, value.Value]]] = memoizedDictionaries(errorLoggingContext).map(_._1) private def prepareJsonDict(implicit @@ -194,7 +194,7 @@ class SchemaProcessorsImpl( private def computeProtoDict( signatures: PackageSignatures - ): Dictionary[Converter[ujson.Value, Value]] = { + ): Dictionary[Converter[ujson.Value, value.Value]] = { val visitor: SchemaVisitor { type Type = (Codec[ujson.Value], Codec[value.Value]) } = SchemaVisitor.compose(new JsonCodec(), GrpcValueCodec) val collector = @@ -208,7 +208,7 @@ class SchemaProcessorsImpl( private def computeJsonDict( signatures: PackageSignatures - ): Dictionary[Converter[Value, ujson.Value]] = { + ): Dictionary[Converter[value.Value, ujson.Value]] = { val visitor: SchemaVisitor { type Type = (Codec[value.Value], Codec[ujson.Value]) } = SchemaVisitor.compose(GrpcValueCodec, new JsonCodec()) val collector = @@ -279,8 +279,8 @@ class SchemaProcessorsImpl( } object SchemaProcessorsImpl { - type JsonDict = Dictionary[Converter[Value, ujson.Value]] - type ProtoDict = Dictionary[Converter[ujson.Value, Value]] + type JsonDict = Dictionary[Converter[value.Value, ujson.Value]] + type ProtoDict = Dictionary[Converter[ujson.Value, value.Value]] type PackageSignatures = Map[Ref.PackageId, Ast.PackageSignature] implicit class ResultOps[T](val result: Either[StatusRuntimeException, T]) extends AnyVal { def toFuture: Future[T] = result match { @@ -288,4 +288,13 @@ object SchemaProcessorsImpl { case Right(value) => Future.successful(value) } } + + def lfIdentifier(a: value.Identifier): Ref.Identifier = + Ref.Identifier( + pkg = PackageId.assertFromString(a.packageId), + qualifiedName = QualifiedName( + module = ModuleName.assertFromString(a.moduleName), + name = DottedName.assertFromString(a.entityName), + ), + ) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala index 2dc75e0485..3bd1964cb3 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.ledger.error.JsonApiErrors import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.platform.PackagePreferenceBackend -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* @@ -307,7 +307,6 @@ private class TranscodePackageMetadataBackedResolver( (for { userPreferences <- - // TODO(#27499): Support conflicting preferences (two package-ids with the same package-name) TranscodePackageIdResolver.resolvePackageNames(userPreferences, packageMetadataSnapshot) localPreferences <- packageNames.forgetNE.toList.traverse { packageName => packageNameMap @@ -366,6 +365,7 @@ object TranscodePackageIdResolver { .toRight(show"Package-id $packageId not known") .map(_ -> packageId) } + // TODO(#27500): support multiple preferences per package-name .map(_.toMap) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/damldefinitionsservice/DamlDefinitionsView.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/damldefinitionsservice/DamlDefinitionsView.scala index 04dbfc8a47..c54f8dfb38 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/damldefinitionsservice/DamlDefinitionsView.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/damldefinitionsservice/DamlDefinitionsView.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.http.json.v2.damldefinitionsservice.Schema.{ TypeSig, } import com.digitalasset.canton.logging.{ErrorLoggingContext, NoLogging} -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.daml.lf.data.Ref class DamlDefinitionsView( diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/metrics/HttpApiMetrics.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/metrics/HttpApiMetrics.scala index 1537ef4e3b..1a66d1646e 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/metrics/HttpApiMetrics.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/metrics/HttpApiMetrics.scala @@ -4,10 +4,8 @@ package com.digitalasset.canton.http.metrics import com.daml.metrics.HealthMetrics -import com.daml.metrics.api.HistogramInventory.Item -import com.daml.metrics.api.MetricHandle.{Counter, LabeledMetricsFactory, Timer} -import com.daml.metrics.api.noop.NoOpMetricsFactory -import com.daml.metrics.api.{HistogramInventory, MetricInfo, MetricName, MetricQualification} +import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory +import com.daml.metrics.api.{HistogramInventory, MetricName} import com.daml.metrics.http.{ DamlHttpHistograms, DamlHttpMetrics, @@ -18,12 +16,6 @@ import com.daml.metrics.http.{ import scala.annotation.unused object HttpApiMetrics { - lazy val ForTesting = - new HttpApiMetrics( - new HttpApiHistograms(MetricName("test"))(new HistogramInventory), - NoOpMetricsFactory, - ) - final val ComponentName = "json_api" } @@ -37,70 +29,16 @@ class HttpApiHistograms(parent: MetricName)(implicit private val _webSockets: DamlWebSocketsHistograms = new DamlWebSocketsHistograms() val prefix: MetricName = parent :+ "http_json_api" - - // Meters how long parsing and decoding of an incoming json payload takes - val incomingJsonParsingAndValidationTimer: Item = - Item( - prefix :+ "incoming_json_parsing_and_validation_timing", - "", - MetricQualification.Debug, - ) - - // Meters how long the construction of the response json payload takes - val responseCreationTimer: Item = - Item( - prefix :+ "response_creation_timing", - "", - MetricQualification.Debug, - ) - // Meters how long a find by contract id database operation takes - val dbFindByContractId: Item = - Item(prefix :+ "db_find_by_contract_id_timing", "", MetricQualification.Debug) - - // Meters how long processing of the command submission request takes on the ledger - val commandSubmissionLedgerTimer: Item = - Item(prefix :+ "command_submission_ledger_timing", "", MetricQualification.Debug) - } -// TODO(#13303) Clean up metrics class HttpApiMetrics( parent: HttpApiHistograms, labeledMetricsFactory: LabeledMetricsFactory, ) { import HttpApiMetrics.* - import com.daml.metrics.api.MetricsContext.Implicits.empty val prefix: MetricName = parent.prefix - // Meters how long parsing and decoding of an incoming json payload takes - val incomingJsonParsingAndValidationTimer: Timer = - labeledMetricsFactory.timer( - parent.incomingJsonParsingAndValidationTimer.info - ) - - // Meters how long the construction of the response json payload takes - val responseCreationTimer: Timer = - labeledMetricsFactory.timer( - parent.responseCreationTimer.info - ) - // Meters how long a find by contract id database operation takes - val dbFindByContractId: Timer = - labeledMetricsFactory.timer( - parent.dbFindByContractId.info - ) - // Meters how long processing of the command submission request takes on the ledger - val commandSubmissionLedgerTimer: Timer = - labeledMetricsFactory.timer( - parent.commandSubmissionLedgerTimer.info - ) - // Meters http requests throughput - // Meters how many websocket connections are currently active - val websocketRequestCounter: Counter = - labeledMetricsFactory.counter( - MetricInfo(prefix :+ "websocket_request_count", "", MetricQualification.Debug) - ) - val http = new DamlHttpMetrics(labeledMetricsFactory, ComponentName) val websocket = new DamlWebSocketMetrics(labeledMetricsFactory, ComponentName) diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/package.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/package.scala index f9d02e018e..4fbd71c8a0 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/package.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/package.scala @@ -3,799 +3,40 @@ package com.digitalasset.canton -import com.daml.ledger.api.v2 as lav2 import com.daml.nonempty.NonEmpty -import com.daml.nonempty.NonEmptyReturningOps.* -import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters.apiIdentifier -import com.digitalasset.canton.ledger.api.User import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.daml.lf.typesig +import com.digitalasset.daml.lf import com.google.protobuf.ByteString -import org.apache.pekko.http.scaladsl.model.{StatusCode, StatusCodes} -import scalaz.Isomorphism.{<~>, IsoFunctorTemplate} -import scalaz.std.list.* -import scalaz.std.option.* -import scalaz.syntax.apply.{^, ^^} -import scalaz.syntax.show.* -import scalaz.syntax.tag.* -import scalaz.syntax.traverse.* -import scalaz.{-\/, Applicative, Bitraverse, Functor, NonEmptyList, Traverse, \/, \/-} - -import scala.annotation.tailrec +import org.apache.pekko.http.scaladsl.model.StatusCode package object http { - import com.digitalasset.canton.fetchcontracts as here import scalaz.{@@, Tag} - type Error = here.Error - final val Error = here.Error - type LfValue = here.LfValue - type ContractId = here.ContractId - final val ContractId = here.ContractId - type Party = here.Party - final val Party = here.Party - type PartySet = here.PartySet - type Offset = here.Offset - final val Offset = here.Offset - type ActiveContract[+CtTyId, +LfV] = here.ActiveContract[CtTyId, LfV] - final val ActiveContract = here.ActiveContract - - type InputContractRef[LfV] = - (ContractTypeId.Template.RequiredPkg, LfV) \/ (Option[ContractTypeId.RequiredPkg], ContractId) + type LfValue = lf.value.Value + type Party = lar.Party + final val Party = lar.Party + type PartySet = NonEmpty[Set[Party]] - type ResolvedContractRef[LfV] = - (ContractTypeId.Template.RequiredPkg, LfV) \/ (ContractTypeId.RequiredPkg, ContractId) - - type UserIdTag = lar.UserIdTag type UserId = lar.UserId val UserId = lar.UserId - type Choice = lar.Choice - val Choice = lar.Choice - - type CommandIdTag = lar.CommandIdTag - type CommandId = lar.CommandId - val CommandId = lar.CommandId - - type SubmissionId = String @@ SubmissionIdTag - val SubmissionId = Tag.of[SubmissionIdTag] - - type WorkflowIdTag = lar.WorkflowIdTag - type WorkflowId = String @@ WorkflowIdTag - val WorkflowId = Tag.of[WorkflowIdTag] - - type LfType = typesig.Type - type RetryInfoDetailDuration = scala.concurrent.duration.Duration @@ RetryInfoDetailDurationTag val RetryInfoDetailDuration = Tag.of[RetryInfoDetailDurationTag] - type CompletionOffset = String @@ CompletionOffsetTag - val CompletionOffset = Tag.of[CompletionOffsetTag] - type Base64 = ByteString @@ Base64Tag val Base64 = Tag.of[Base64Tag] - type Base16 = ByteString @@ Base16Tag - val Base16 = Tag.of[Base16Tag] } package http { - import com.daml.ledger.api.v2.commands.Commands - import com.digitalasset.daml.lf.data.Ref.{HexString, PackageId, PackageRef} - import com.digitalasset.canton.fetchcontracts.`fc ErrorOps` - import com.digitalasset.canton.http.json.v1.PackageService - import com.digitalasset.canton.topology.SynchronizerId - - import scala.annotation.nowarn - - sealed trait SubmissionIdTag - - sealed trait CompletionOffsetTag - sealed trait Base64Tag - sealed trait Base16Tag - - trait JwtPayloadTag - - trait JwtPayloadG { - val userId: UserId - val readAs: List[Party] - val actAs: List[Party] - val parties: PartySet - } - - // write endpoints require at least one party in actAs - // (only the first one is used for pre-multiparty ledgers) - // but we can have multiple parties in readAs. - final case class JwtWritePayload( - userId: UserId, - submitter: NonEmptyList[Party], - readAs: List[Party], - ) extends JwtPayloadG { - override val actAs: List[Party] = submitter.toList - override val parties: PartySet = - submitter.toSet1 ++ readAs - } - -// As with JwtWritePayload, but supports empty `actAs`. At least one of -// `actAs` or `readAs` must be non-empty. - sealed abstract case class JwtPayload private ( - userId: UserId, - readAs: List[Party], - actAs: List[Party], - parties: PartySet, - ) extends JwtPayloadG {} - - object JwtPayload { - def apply( - userId: UserId, - readAs: List[Party], - actAs: List[Party], - ): Option[JwtPayload] = - (readAs ++ actAs) match { - case NonEmpty(ps) => - Some( - new JwtPayload(userId, readAs, actAs, ps.toSet) {} - ) - case _ => None - } - } - - final case class Contract[LfV](value: ArchivedContract \/ ActiveContract.ResolvedCtTyId[LfV]) - - final case class ArchivedContract( - contractId: ContractId, - templateId: ContractTypeId.RequiredPkgId, - ) - - final case class FetchRequest[+LfV]( - locator: ContractLocator[LfV], - readAs: Option[NonEmptyList[Party]], - ) { - def traverseLocator[F[_]: Functor, OV]( - f: ContractLocator[LfV] => F[ContractLocator[OV]] - ): F[FetchRequest[OV]] = f(locator) map (l => copy(locator = l)) - } - - sealed abstract class ContractLocator[+LfV] extends Product with Serializable - - final case class EnrichedContractKey[+LfV]( - templateId: ContractTypeId.Template.RequiredPkg, - key: LfV, - ) extends ContractLocator[LfV] - - final case class EnrichedContractId( - templateId: Option[ContractTypeId.RequiredPkg], - contractId: http.ContractId, - ) extends ContractLocator[Nothing] - - final case class ContractKeyStreamRequest[+Cid, +LfV]( - contractIdAtOffset: Cid, - ekey: EnrichedContractKey[LfV], - ) - - final case class GetActiveContractsRequest( - templateIds: NonEmpty[Set[ContractTypeId.RequiredPkg]], - readAs: Option[NonEmptyList[Party]], - ) - - final case class SearchForeverRequest( - queriesWithPos: NonEmptyList[(SearchForeverQuery, Int)] - ) - - final case class SearchForeverQuery( - templateIds: NonEmpty[Set[ContractTypeId.RequiredPkg]], - offset: Option[http.Offset], - ) - - final case class PartyDetails(identifier: Party, isLocal: Boolean) - - // Important note: when changing this ADT, adapt the custom associated JsonFormat codec in JsonProtocol - sealed abstract class UserRight extends Product with Serializable - final case object ParticipantAdmin extends UserRight - final case object IdentityProviderAdmin extends UserRight - final case class CanActAs(party: Party) extends UserRight - final case class CanReadAs(party: Party) extends UserRight - final case class CanExecuteAs(party: Party) extends UserRight - final case object CanReadAsAnyParty extends UserRight - final case object CanExecuteAsAnyParty extends UserRight - - object UserRights { - import com.digitalasset.daml.lf.data.Ref - import com.digitalasset.canton.ledger.api.UserRight as LedgerUserRight - import scalaz.syntax.std.either.* - import scalaz.syntax.traverse.* - - def toLedgerUserRights(input: List[UserRight]): String \/ List[LedgerUserRight] = - input.traverse { - case ParticipantAdmin => \/.right(LedgerUserRight.ParticipantAdmin) - case IdentityProviderAdmin => \/.right(LedgerUserRight.IdentityProviderAdmin) - case CanActAs(party) => - Ref.Party.fromString(party.unwrap).map(LedgerUserRight.CanActAs.apply).disjunction - case CanReadAs(party) => - Ref.Party.fromString(party.unwrap).map(LedgerUserRight.CanReadAs.apply).disjunction - case CanReadAsAnyParty => \/.right(LedgerUserRight.CanReadAsAnyParty) - case CanExecuteAs(party) => - Ref.Party.fromString(party.unwrap).map(LedgerUserRight.CanExecuteAs.apply).disjunction - case CanExecuteAsAnyParty => \/.right(LedgerUserRight.CanExecuteAsAnyParty) - } - - def fromLedgerUserRights(input: Seq[LedgerUserRight]): List[UserRight] = input - .map[http.UserRight] { - case LedgerUserRight.ParticipantAdmin => ParticipantAdmin - case LedgerUserRight.IdentityProviderAdmin => IdentityProviderAdmin - case LedgerUserRight.CanReadAsAnyParty => CanReadAsAnyParty - case LedgerUserRight.CanExecuteAsAnyParty => CanExecuteAsAnyParty - case LedgerUserRight.CanActAs(party) => - CanActAs(Party(party: String)) - case LedgerUserRight.CanReadAs(party) => - CanReadAs(Party(party: String)) - case LedgerUserRight.CanExecuteAs(party) => - CanExecuteAs(Party(party: String)) - } - .toList - } - - final case class UserDetails(userId: String, primaryParty: Option[String]) - - object UserDetails { - def fromUser(user: User) = - UserDetails(user.id, user.primaryParty) - } - - final case class CreateUserRequest( - userId: String, - primaryParty: Option[String], - rights: Option[List[UserRight]], - ) - - final case class ListUserRightsRequest(userId: String) - - final case class GrantUserRightsRequest( - userId: String, - rights: List[UserRight], - ) - - final case class RevokeUserRightsRequest( - userId: String, - rights: List[UserRight], - ) - - final case class GetUserRequest(userId: String) - - final case class DeleteUserRequest(userId: String) - - final case class AllocatePartyRequest( - identifierHint: Option[Party], - synchronizerId: Option[String], - ) - - // Important note: when changing this ADT, adapt the custom associated JsonFormat codec in JsonProtocol - sealed abstract class DeduplicationPeriod extends Product with Serializable { - def toProto: Commands.DeduplicationPeriod = - this match { - case DeduplicationPeriod.Duration(millis) => - Commands.DeduplicationPeriod.DeduplicationDuration( - com.google.protobuf.duration.Duration(java.time.Duration.ofMillis(millis)) - ) - case DeduplicationPeriod.Offset(offset) => - Commands.DeduplicationPeriod - .DeduplicationOffset(Offset.assertFromStringToLong(offset)) - } - } - - object DeduplicationPeriod { - final case class Duration(durationInMillis: Long) extends http.DeduplicationPeriod - final case class Offset(offset: HexString) extends http.DeduplicationPeriod - } - - final case class DisclosedContract[+TmplId]( - contractId: ContractId, - templateId: TmplId, - createdEventBlob: Base64, - ) { - def toLedgerApi(implicit - TmplId: TmplId <:< ContractTypeId.Template.RequiredPkg - ): lav2.commands.DisclosedContract = - lav2.commands.DisclosedContract( - templateId = Some(apiIdentifier(templateId)), - contractId = ContractId unwrap contractId, - createdEventBlob = Base64 unwrap createdEventBlob, - synchronizerId = "", - ) - } - - object DisclosedContract { - type LAV = DisclosedContract[ContractTypeId.Template.RequiredPkg] - - implicit val covariant: Traverse[DisclosedContract] = - new Traverse[DisclosedContract] { - override def traverseImpl[G[_]: Applicative, A, B]( - fab: DisclosedContract[A] - )(f: A => G[B]): G[DisclosedContract[B]] = - f(fab.templateId).map(tId => fab.copy(templateId = tId)) - } - } - - /** @tparam TmplId - * disclosed contracts' template ID - */ - final case class CommandMeta[+TmplId]( - commandId: Option[CommandId], - actAs: Option[NonEmptyList[Party]], - readAs: Option[List[Party]], - submissionId: Option[SubmissionId], - workflowId: Option[WorkflowId], - deduplicationPeriod: Option[http.DeduplicationPeriod], - disclosedContracts: Option[List[DisclosedContract[TmplId]]], - synchronizerId: Option[SynchronizerId], - packageIdSelectionPreference: Option[List[PackageId]], - ) - - object CommandMeta { - type NoDisclosed = CommandMeta[Nothing] - type IgnoreDisclosed = CommandMeta[Any] - type LAV = CommandMeta[ContractTypeId.Template.RequiredPkg] - - implicit val covariant: Traverse[CommandMeta] = new Traverse[CommandMeta] { - override def traverseImpl[G[_]: Applicative, A, B]( - fab: CommandMeta[A] - )(f: A => G[B]): G[CommandMeta[B]] = - fab.disclosedContracts - .traverse(_.traverse(_.traverse(f))) - .map(dc => fab.copy(disclosedContracts = dc)) - } - } - - final case class CreateCommand[+LfV, +TmplId]( - templateId: TmplId, - payload: LfV, - meta: Option[CommandMeta.NoDisclosed], - ) { - def traversePayload[G[_]: Applicative, LfVB, TmplId0 >: TmplId]( - f: LfV => G[LfVB] - ): G[CreateCommand[LfVB, TmplId0]] = - Bitraverse[CreateCommand].leftTraverse[TmplId0].traverse(this)(f) - } - - final case class ExerciseCommand[+PkgId, +LfV, +Ref]( - reference: Ref, - choice: http.Choice, - argument: LfV, - // passing a template ID is allowed; we distinguish internally - choiceInterfaceId: Option[ContractTypeId[PkgId]], - meta: Option[CommandMeta[ContractTypeId.Template[PkgId]]], - ) - - final case class CreateAndExerciseCommand[+Payload, +Arg, +TmplId, +IfceId]( - templateId: TmplId, - payload: Payload, - choice: http.Choice, - argument: Arg, - // passing a template ID is allowed; we distinguish internally - choiceInterfaceId: Option[IfceId], - meta: Option[CommandMeta[TmplId]], - ) - - final case class CreateCommandResponse[+LfV]( - contractId: ContractId, - templateId: ContractTypeId.Template.RequiredPkgId, - key: Option[LfV], - payload: LfV, - signatories: Seq[Party], - observers: Seq[Party], - completionOffset: CompletionOffset, - ) - - final case class ExerciseResponse[LfV]( - exerciseResult: LfV, - events: List[Contract[LfV]], - completionOffset: CompletionOffset, - ) - - object PartyDetails { - def fromLedgerApi(p: com.digitalasset.canton.ledger.api.PartyDetails): PartyDetails = - PartyDetails(Party(p.party), p.isLocal) - } - - final case class StartingOffset(offset: Offset) - - object Contract { - - // TODO(#23504) remove when TransactionTree is removed from the API - @nowarn("cat=deprecation") - def fromTransactionTree( - tx: lav2.transaction.TransactionTree - ): Error \/ Vector[Contract[lav2.value.Value]] = { - val events = tx.eventsById.toSeq.view.sortBy(_._1).map(_._2).toVector - fromTreeEvents(events) - } - - // TODO(#23504) remove when TreeEvent is removed from the API - @nowarn("cat=deprecation") - private[this] def fromTreeEvents( - events: Vector[lav2.transaction.TreeEvent] - ): Error \/ Vector[Contract[lav2.value.Value]] = { - @tailrec - def loop( - events: Vector[lav2.transaction.TreeEvent], - acc: Error \/ Vector[Contract[lav2.value.Value]], - ): Error \/ Vector[Contract[lav2.value.Value]] = events match { - case head +: tail => - head.kind match { - case lav2.transaction.TreeEvent.Kind.Created(created) => - val a = - ActiveContract - .fromLedgerApi(http.ActiveContract.ExtractAs.Template, created) - .map(a => Contract[lav2.value.Value](\/-(a))) - val newAcc = ^(acc, a)(_ :+ _) - loop(tail, newAcc) - case lav2.transaction.TreeEvent.Kind.Exercised(exercised) => - val a = ArchivedContract - .fromLedgerApi(exercised) - .map(_.map(a => Contract[lav2.value.Value](-\/(a)))) - val newAcc = ^(acc, a)(_ ++ _.toVector) - loop(tail, newAcc) - case lav2.transaction.TreeEvent.Kind.Empty => - val errorMsg = s"Expected either Created or Exercised event, got: Empty" - -\/(Error(Symbol("Contract_fromTreeEvent"), errorMsg)) - } - // Wildcard to make the exhaustiveness checker happy. - case _ => - acc - } - - loop(events, \/-(Vector())) - - } - - implicit val covariant: Traverse[Contract] = new Traverse[Contract] { - - override def map[A, B](fa: Contract[A])(f: A => B): Contract[B] = { - val valueB: ArchivedContract \/ ActiveContract.ResolvedCtTyId[B] = - fa.value.map(a => a.map(f)) - Contract(valueB) - } - - override def traverseImpl[G[_]: Applicative, A, B]( - fa: Contract[A] - )(f: A => G[B]): G[Contract[B]] = { - val valueB: G[ArchivedContract \/ ActiveContract.ResolvedCtTyId[B]] = - fa.value.traverse(a => a.traverse(f)) - valueB.map(x => Contract[B](x)) - } - } - } - - object ActiveContractExtras { - // only used in integration tests - implicit val `AcC hasTemplateId`: HasTemplateId.Compat[ActiveContract.ResolvedCtTyId] = - new HasTemplateId[ActiveContract.ResolvedCtTyId] { - override def templateId(fa: ActiveContract.ResolvedCtTyId[_]): ContractTypeId.RequiredPkg = - fa.templateId.map(PackageRef.Id(_)) - - type TypeFromCtId = LfType - - override def lfType( - fa: ActiveContract.ResolvedCtTyId[_], - templateId: ContractTypeId.ResolvedPkgId, - f: PackageService.ResolveTemplateRecordType, - g: PackageService.ResolveChoiceArgType, - h: PackageService.ResolveKeyType, - ): Error \/ LfType = - templateId match { - case tid @ ContractTypeId.Template(_, _, _) => - f(tid: ContractTypeId.Template.ResolvedPkgId) - .leftMap(e => Error(Symbol("ActiveContract_hasTemplateId_lfType"), e.shows)) - case other => - val errorMsg = s"Expect contract type Id to be template Id, got otherwise: $other" - -\/(Error(Symbol("ActiveContract_hasTemplateId_lfType"), errorMsg)) - } - } - } - - object ArchivedContract { - def fromLedgerApi( - resolvedQuery: http.ResolvedQuery, - in: lav2.event.ArchivedEvent, - ): Error \/ ArchivedContract = { - val resolvedTemplateId = resolvedQuery match { - case ResolvedQuery.ByInterfaceId(interfaceId) => - (in.templateId required "templateId") - .map(ContractTypeId.Interface.fromLedgerApi) - // Use the interface id that was queried for, but with the package id returned. - .map(gotId => interfaceId.latestPkgId.copy(packageId = gotId.packageId)) - case _ => - (in.templateId required "templateId").map(ContractTypeId.Template.fromLedgerApi) - } - for { - templateId <- resolvedTemplateId - } yield ArchivedContract( - contractId = ContractId(in.contractId), - templateId = templateId, - ) - } - - def fromLedgerApi(in: lav2.event.ExercisedEvent): Error \/ Option[ArchivedContract] = - if (in.consuming) { - for { - templateId <- in.templateId.required("templateId") - } yield Some( - ArchivedContract( - contractId = ContractId(in.contractId), - templateId = ContractTypeId.Template fromLedgerApi templateId, - ) - ) - } else { - \/-(None) - } - } - - object ContractLocator { - implicit val covariant: Traverse[ContractLocator] = new Traverse[ContractLocator] { - - override def map[A, B](fa: ContractLocator[A])(f: A => B): ContractLocator[B] = fa match { - case ka: EnrichedContractKey[A] => EnrichedContractKey(ka.templateId, f(ka.key)) - case c: EnrichedContractId => c - } - - override def traverseImpl[G[_]: Applicative, A, B]( - fa: ContractLocator[A] - )(f: A => G[B]): G[ContractLocator[B]] = - fa match { - case ka: EnrichedContractKey[A] => - f(ka.key).map(b => EnrichedContractKey(ka.templateId, b)) - case c: EnrichedContractId => - val G: Applicative[G] = implicitly - G.point(c) - } - } - - val structure: ContractLocator <~> InputContractRef = - new IsoFunctorTemplate[ContractLocator, InputContractRef] { - override def from[A](ga: InputContractRef[A]) = - ga.fold((EnrichedContractKey[A] _).tupled, (EnrichedContractId.apply _).tupled) - - override def to[A](fa: ContractLocator[A]) = fa match { - case EnrichedContractId(otid, cid) => \/-((otid, cid)) - case EnrichedContractKey(tid, key) => -\/((tid, key)) - } - } - } - - object EnrichedContractKey { - implicit val covariant: Traverse[EnrichedContractKey] = new Traverse[EnrichedContractKey] { - override def traverseImpl[G[_]: Applicative, A, B]( - fa: EnrichedContractKey[A] - )(f: A => G[B]): G[EnrichedContractKey[B]] = - f(fa.key).map(b => EnrichedContractKey(fa.templateId, b)) - } - - implicit val hasTemplateId: HasTemplateId.Compat[EnrichedContractKey] = - new HasTemplateId[EnrichedContractKey] { - - override def templateId(fa: EnrichedContractKey[_]): ContractTypeId.RequiredPkg = - fa.templateId - - type TypeFromCtId = LfType - - override def lfType( - fa: EnrichedContractKey[_], - templateId: ContractTypeId.ResolvedPkgId, - f: PackageService.ResolveTemplateRecordType, - g: PackageService.ResolveChoiceArgType, - h: PackageService.ResolveKeyType, - ): Error \/ LfType = - templateId match { - case tid @ ContractTypeId.Template(_, _, _) => - h(tid: ContractTypeId.Template.ResolvedPkgId) - .leftMap(e => Error(Symbol("EnrichedContractKey_hasTemplateId_lfType"), e.shows)) - case other => - val errorMsg = s"Expect contract type Id to be template Id, got otherwise: $other" - -\/(Error(Symbol("EnrichedContractKey_hasTemplateId_lfType"), errorMsg)) - } - } - } - - object ContractKeyStreamRequest { - implicit def covariantR[Off]: Traverse[ContractKeyStreamRequest[Off, *]] = { - type F[A] = ContractKeyStreamRequest[Off, A] - new Traverse[F] { - override def traverseImpl[G[_]: Applicative, A, B](fa: F[A])(f: A => G[B]): G[F[B]] = - fa.ekey traverse f map (ekey => fa.copy(ekey = ekey)) - } - } - - implicit def hasTemplateId[Off]: HasTemplateId.Compat[ContractKeyStreamRequest[Off, *]] = - HasTemplateId.by[ContractKeyStreamRequest[Off, *]](_.ekey) - } - - trait HasTemplateId[-F[_]] { - protected[this] type FHuh = F[_] // how to pronounce "F[?]" or "F huh?" - - def templateId(fa: F[_]): ContractTypeId.RequiredPkg - - type TypeFromCtId - - def lfType( - fa: F[_], - templateId: ContractTypeId.ResolvedPkgId, - f: PackageService.ResolveTemplateRecordType, - g: PackageService.ResolveChoiceArgType, - h: PackageService.ResolveKeyType, - ): Error \/ TypeFromCtId - } - - object HasTemplateId { - type Compat[-F[_]] = Aux[F, LfType] - type Aux[-F[_], TFC0] = HasTemplateId[F] { type TypeFromCtId = TFC0 } - - def by[F[_]]: By[F] = new By[F](0) - - final class By[F[_]](private val ign: Int) extends AnyVal { - def apply[G[_]]( - nt: F[_] => G[_] - )(implicit basis: HasTemplateId[G]): Aux[F, basis.TypeFromCtId] = - new HasTemplateId[F] { - override def templateId(fa: F[_]) = basis templateId nt(fa) - - type TypeFromCtId = basis.TypeFromCtId - - override def lfType( - fa: F[_], - templateId: ContractTypeId.ResolvedPkgId, - f: PackageService.ResolveTemplateRecordType, - g: PackageService.ResolveChoiceArgType, - h: PackageService.ResolveKeyType, - ): Error \/ TypeFromCtId = basis.lfType(nt(fa), templateId, f, g, h) - } - } - } - - object CreateCommand { - type RequiredPkg[+LfV] = CreateCommand[LfV, ContractTypeId.Template.RequiredPkg] - - implicit val bitraverseInstance: Bitraverse[CreateCommand] = new Bitraverse[CreateCommand] { - override def bitraverseImpl[G[_]: Applicative, A, B, C, D]( - fab: CreateCommand[A, B] - )(f: A => G[C], g: B => G[D]): G[CreateCommand[C, D]] = - ^(f(fab.payload), g(fab.templateId))((c, d) => fab.copy(payload = c, templateId = d)) - } - } - - object ExerciseCommand { - type RequiredPkg[+LfV, +R] = ExerciseCommand[PackageRef, LfV, R] - - implicit def bitraverseInstance[PkgId]: Bitraverse[ExerciseCommand[PkgId, *, *]] = - new Bitraverse[ExerciseCommand[PkgId, *, *]] { - override def bitraverseImpl[G[_]: Applicative, A, B, C, D]( - fab: ExerciseCommand[PkgId, A, B] - )(f: A => G[C], g: B => G[D]): G[ExerciseCommand[PkgId, C, D]] = - ^(f(fab.argument), g(fab.reference))((argument, reference) => - fab.copy( - argument = argument, - reference = reference, - ) - ) - } - - implicit val leftTraverseInstance: Traverse[RequiredPkg[+*, Nothing]] = - bitraverseInstance[PackageRef].leftTraverse - - implicit val hasTemplateId: HasTemplateId.Aux[RequiredPkg[ - +*, - http.ContractLocator[_], - ], (Option[http.ContractTypeId.Interface.ResolvedPkgId], LfType)] = - new HasTemplateId[RequiredPkg[+*, http.ContractLocator[_]]] { - override def templateId(fab: FHuh): ContractTypeId.RequiredPkg = - fab.choiceInterfaceId getOrElse (fab.reference match { - case EnrichedContractKey(templateId, _) => templateId - case EnrichedContractId(Some(templateId), _) => templateId - case EnrichedContractId(None, _) => - throw new IllegalArgumentException( - "Please specify templateId, optional templateId is not supported yet!" - ) - }) - - type TypeFromCtId = (Option[http.ContractTypeId.Interface.ResolvedPkgId], LfType) - - override def lfType( - fa: FHuh, - templateId: ContractTypeId.ResolvedPkgId, - f: PackageService.ResolveTemplateRecordType, - g: PackageService.ResolveChoiceArgType, - h: PackageService.ResolveKeyType, - ) = - g(templateId, fa.choice) - .leftMap(e => Error(Symbol("ExerciseCommand_hasTemplateId_lfType"), e.shows)) - } - } - - object CreateAndExerciseCommand { - type LAVUnresolved = CreateAndExerciseCommand[ - lav2.value.Record, - lav2.value.Value, - http.ContractTypeId.Template.RequiredPkg, - http.ContractTypeId.RequiredPkg, - ] - - type LAVResolved = CreateAndExerciseCommand[ - lav2.value.Record, - lav2.value.Value, - http.ContractTypeId.Template.RequiredPkg, - http.ContractTypeId.RequiredPkg, - ] - - implicit final class `CAEC traversePayloadArg`[P, Ar, T, I]( - private val self: CreateAndExerciseCommand[P, Ar, T, I] - ) extends AnyVal { - def traversePayloadsAndArgument[G[_]: Applicative, P2, Ar2]( - f: P => G[P2], - g: Ar => G[Ar2], - ): G[CreateAndExerciseCommand[P2, Ar2, T, I]] = - ^(f(self.payload), g(self.argument))((p, a) => self.copy(payload = p, argument = a)) - } - - implicit def covariant[P, Ar]: Bitraverse[CreateAndExerciseCommand[P, Ar, *, *]] = - new Bitraverse[CreateAndExerciseCommand[P, Ar, *, *]] { - override def bitraverseImpl[G[_]: Applicative, A, B, C, D]( - fa: CreateAndExerciseCommand[P, Ar, A, B] - )(f: A => G[C], g: B => G[D]): G[CreateAndExerciseCommand[P, Ar, C, D]] = - ^^( - f(fa.templateId), - fa.choiceInterfaceId traverse g, - fa.meta traverse (_ traverse f), - ) { (tId, ciId, meta) => - fa.copy(templateId = tId, choiceInterfaceId = ciId, meta = meta) - } - } - } - - object ExerciseResponse { - implicit val traverseInstance: Traverse[ExerciseResponse] = new Traverse[ExerciseResponse] { - override def traverseImpl[G[_]: Applicative, A, B]( - fa: ExerciseResponse[A] - )(f: A => G[B]): G[ExerciseResponse[B]] = { - val gb: G[B] = f(fa.exerciseResult) - val gbs: G[List[Contract[B]]] = fa.events.traverse(_.traverse(f)) - ^(gb, gbs) { (exerciseResult, events) => - fa.copy( - exerciseResult = exerciseResult, - events = events, - ) - } - } - } - } - object CreateCommandResponse { - implicit val covariant: Traverse[CreateCommandResponse] = new Traverse[CreateCommandResponse] { - - override def map[A, B](fa: CreateCommandResponse[A])(f: A => B): CreateCommandResponse[B] = - fa.copy(key = fa.key map f, payload = f(fa.payload)) - - override def traverseImpl[G[_]: Applicative, A, B]( - fa: CreateCommandResponse[A] - )(f: A => G[B]): G[CreateCommandResponse[B]] = { - import scalaz.syntax.apply.* - val gk: G[Option[B]] = fa.key traverse f - val ga: G[B] = f(fa.payload) - ^(gk, ga)((k, a) => fa.copy(key = k, payload = a)) - } - } - } sealed abstract class SyncResponse[+R] extends Product with Serializable { def status: StatusCode } - final case class OkResponse[+R]( - result: R, - warnings: Option[ServiceWarning] = None, - status: StatusCode = StatusCodes.OK, - ) extends SyncResponse[R] - sealed trait RetryInfoDetailDurationTag // Important note: when changing this ADT, adapt the custom associated JsonFormat codec in JsonProtocol @@ -829,354 +70,7 @@ package http { final case class ErrorResponse( errors: List[String], - warnings: Option[ServiceWarning], status: StatusCode, ledgerApiError: Option[LedgerApiError] = None, ) extends SyncResponse[Nothing] - - object OkResponse { - implicit val covariant: Traverse[OkResponse] = new Traverse[OkResponse] { - override def traverseImpl[G[_]: Applicative, A, B](fa: OkResponse[A])( - f: A => G[B] - ): G[OkResponse[B]] = - f(fa.result).map(b => fa.copy(result = b)) - } - } - - object SyncResponse { - implicit val covariant: Traverse[SyncResponse] = new Traverse[SyncResponse] { - override def traverseImpl[G[_]: Applicative, A, B]( - fa: SyncResponse[A] - )(f: A => G[B]): G[SyncResponse[B]] = { - val G = implicitly[Applicative[G]] - fa match { - case err: ErrorResponse => G.point[SyncResponse[B]](err) - case ok: OkResponse[A] => OkResponse.covariant.traverse(ok)(f).widen - } - } - } - } - - sealed abstract class ServiceWarning extends Serializable with Product - - final case class UnknownTemplateIds(unknownTemplateIds: List[ContractTypeId.RequiredPkg]) - extends ServiceWarning - - final case class UnknownParties(unknownParties: List[http.Party]) extends ServiceWarning - - final case class AsyncWarningsWrapper(warnings: ServiceWarning) - - import com.digitalasset.daml.lf.data.Ref - - import scala.collection.IterableOps - - /** A contract type ID that may be either a template or an interface ID. A - * [[ContractTypeId.ResolvedPkg]] ID will always be either [[ContractTypeId.Template]] or - * [[ContractTypeId.Interface]]; an unresolved ID may be one of those, which indicates an - * expectation of what the resolved ID will be, or neither, which indicates that resolving what - * kind of ID this is will be part of the resolution. - * - * Built-in equality is solely determined by the triple of package name, module name, entity - * name. This is because there are likely insidious expectations that this be true dating to - * before contract type IDs were distinguished at all, and we are only interested in - * distinguishing them statically, which these types do, and by pattern-matching, which does - * work. - * - * {{{ - * val selector: ContractTypeId[Unit] = Template((), "M", "E") - * selector match { - * case ContractTypeId.Unknown(p, m, e) => // this will not match - * case ContractTypeId.Interface(p, m, e) => // this will not match - * case ContractTypeId.Template(p, m, e) => // this will match - * } - * }}} - */ - sealed abstract class ContractTypeId[+PkgId] - extends Product3[PkgId, String, String] - with Serializable - with ContractTypeId.Ops[ContractTypeId, PkgId] { - val packageId: PkgId - val moduleName: String - val entityName: String - - override def _1 = packageId - - override def _2 = moduleName - - override def _3 = entityName - - // the only way we want to tell the difference dynamically is when - // pattern-matching. If we didn't need that for query, we wouldn't even - // bother with different classes, we would just use different newtypes. - // Which would yield exactly the following dynamic equality behavior. - override final def equals(o: Any) = o match { - case o: ContractTypeId[_] => - (this eq o) || { - packageId == o.packageId && moduleName == o.moduleName && entityName == o.entityName - } - case _ => false - } - - override final def hashCode = { - import scala.util.hashing.MurmurHash3 as H - H.productHash(this, H.productSeed, ignorePrefix = true) - } - - def fqn: String = s"${packageId.toString}:$moduleName:$entityName" - } - - object ResolvedQuery { - def apply[CtId[T] <: ContractTypeId[T]](resolved: ContractTypeRef[CtId]): ResolvedQuery = - resolved match { - case t: ContractTypeRef.TemplateRef => ByTemplateId(t) - case i: ContractTypeRef.InterfaceRef => ByInterfaceId(i) - } - - def apply(resolved: Set[_ <: ContractTypeRef[ContractTypeId]]): Unsupported \/ ResolvedQuery = { - import com.daml.nonempty.{NonEmpty, Singleton} - val (templateIds, interfaceIds) = partitionRefs(resolved) - templateIds match { - case NonEmpty(templateIds) => - interfaceIds match { - case NonEmpty(_) => -\/(CannotQueryBothTemplateIdsAndInterfaceIds) - case _ => \/-(ByTemplateIds(templateIds)) - } - case _ => - interfaceIds match { - case NonEmpty(Singleton(interfaceId)) => \/-(ByInterfaceId(interfaceId)) - case NonEmpty(_) => -\/(CannotQueryManyInterfaceIds) - case _ => -\/(CannotBeEmpty) - } - } - } - - def partition[CC[_], C, Pkg]( - resolved: IterableOps[ContractTypeId.Definite[Pkg], CC, C] - ): (CC[ContractTypeId.Template[Pkg]], CC[ContractTypeId.Interface[Pkg]]) = - resolved.partitionMap { - case t @ ContractTypeId.Template(_, _, _) => Left(t) - case i @ ContractTypeId.Interface(_, _, _) => Right(i) - } - - def partitionRefs[CC[_], C]( - resolved: IterableOps[ContractTypeRef[ContractTypeId], CC, C] - ): (CC[ContractTypeRef.TemplateRef], CC[ContractTypeRef.InterfaceRef]) = - resolved.partitionMap { - case t: ContractTypeRef.TemplateRef => Left(t) - case i: ContractTypeRef.InterfaceRef => Right(i) - } - - sealed abstract class Unsupported(val errorMsg: String) extends Product with Serializable - - final case object CannotQueryBothTemplateIdsAndInterfaceIds - extends Unsupported("Cannot query both templates IDs and interface IDs") - - final case object CannotQueryManyInterfaceIds - extends Unsupported("Cannot query more than one interface ID") - - final case object CannotBeEmpty - extends Unsupported("Cannot resolve any template ID from request") - - final case class ByTemplateIds(templateIds: NonEmpty[Set[ContractTypeRef.TemplateRef]]) - extends ResolvedQuery { - def resolved = templateIds - } - - final case class ByTemplateId(templateId: ContractTypeRef.TemplateRef) extends ResolvedQuery { - def resolved = NonEmpty(Set, templateId) - } - - final case class ByInterfaceId(interfaceId: ContractTypeRef.InterfaceRef) - extends ResolvedQuery { - def resolved = NonEmpty(Set, interfaceId) - } - } - - sealed abstract class ResolvedQuery extends Product with Serializable { - def resolved: NonEmpty[Set[_ <: ContractTypeRef.Resolved]] - } - - object ContractTypeId extends ContractTypeIdLike[ContractTypeId] { - final case class Unknown[+PkgId]( - packageId: PkgId, - moduleName: String, - entityName: String, - ) extends ContractTypeId[PkgId] - with Ops[Unknown, PkgId] { - override def productPrefix = "ContractTypeId" - - override def copy[PkgId0]( - packageId: PkgId0 = packageId, - moduleName: String = moduleName, - entityName: String = entityName, - ) = Unknown(packageId, moduleName, entityName) - } - - sealed abstract class Definite[+PkgId] extends ContractTypeId[PkgId] with Ops[Definite, PkgId] - - /** A contract type ID known to be a template, not an interface. When resolved, it indicates - * that the LF environment associates this ID with a template. When unresolved, it indicates - * that the intent is to search only template IDs for resolution, and that resolving to an - * interface ID should be an error. - */ - final case class Template[+PkgId](packageId: PkgId, moduleName: String, entityName: String) - extends Definite[PkgId] - with Ops[Template, PkgId] { - override def productPrefix = "TemplateId" - - override def copy[PkgId0]( - packageId: PkgId0 = packageId, - moduleName: String = moduleName, - entityName: String = entityName, - ) = Template(packageId, moduleName, entityName) - } - - /** A contract type ID known to be an interface, not a template. When resolved, it indicates - * that the LF environment associates this ID with an interface. When unresolved, it indicates - * that the intent is to search only interface IDs for resolution, and that resolving to a - * template ID should be an error. - */ - final case class Interface[+PkgId](packageId: PkgId, moduleName: String, entityName: String) - extends Definite[PkgId] - with Ops[Interface, PkgId] { - override def productPrefix = "InterfaceId" - - override def copy[PkgId0]( - packageId: PkgId0 = packageId, - moduleName: String = moduleName, - entityName: String = entityName, - ) = Interface(packageId, moduleName, entityName) - } - - override def apply[PkgId]( - packageId: PkgId, - moduleName: String, - entityName: String, - ): ContractTypeId[PkgId] = - Unknown(packageId, moduleName, entityName) - - // Product3 makes custom unapply really cheap - def unapply[PkgId](ctId: ContractTypeId[PkgId]): Some[ContractTypeId[PkgId]] = Some(ctId) - - // belongs in ultimate parent `object` - implicit def `ContractTypeId covariant`[F[T] <: ContractTypeId[T] with ContractTypeId.Ops[F, T]] - : Traverse[F] = - new Traverse[F] { - override def map[A, B](fa: F[A])(f: A => B): F[B] = - fa.copy(packageId = f(fa.packageId)) - - override def traverseImpl[G[_]: Applicative, A, B](fa: F[A])(f: A => G[B]): G[F[B]] = - f(fa.packageId) map (p2 => fa.copy(packageId = p2)) - } - - object Unknown extends Like[Unknown] - - object Template extends Like[Template] - - object Interface extends Like[Interface] - - // TODO(#13303) Re-adapted from Daml repo #14727: make an opaque subtype, produced by PackageService on - // confirmed-present IDs only. Can probably start by adding - // `with Definite[Any]` here and seeing what happens - /** A resolved [[ContractTypeId]], typed `CtTyId`. */ - type WithDefiniteOps[+CtId[_], Pkg] = CtId[Pkg] with Definite[Pkg] with Ops[CtId, Pkg] - - type ResolvedOf[+CtId[_]] = WithDefiniteOps[CtId, Ref.PackageRef] - type ResolvedPkgIdOf[+CtId[_]] = WithDefiniteOps[CtId, Ref.PackageId] - - type Like[CtId[T] <: ContractTypeId[T]] = ContractTypeIdLike[CtId] - - // CtId serves the same role as `CC` on scala.collection.IterableOps - sealed trait Ops[+CtId[_], +PkgId] { - this: ContractTypeId[PkgId] => - def copy[PkgId0]( - packageId: PkgId0 = packageId, - moduleName: String = moduleName, - entityName: String = entityName, - ): CtId[PkgId0] with Ops[CtId, PkgId0] - } - - def withPkgRef[CtId[T] <: ContractTypeId[T]]( - id: CtId[Ref.PackageId] with Ops[CtId, Ref.PackageId] - ): CtId[Ref.PackageRef] = - id.copy(packageId = Ref.PackageRef.Id(id.packageId): Ref.PackageRef) - } - - /** A contract type ID companion. */ - sealed abstract class ContractTypeIdLike[CtId[T] <: ContractTypeId[T]] { - type RequiredPkg = CtId[Ref.PackageRef] - type RequiredPkgId = CtId[Ref.PackageId] - type ResolvedPkg = ContractTypeId.ResolvedOf[CtId] - type ResolvedPkgId = ContractTypeId.ResolvedPkgIdOf[CtId] - - // treat the companion like a typeclass instance - implicit def `ContractTypeIdLike companion`: this.type = this - - def apply[PkgId]( - packageId: PkgId, - moduleName: String, - entityName: String, - ): CtId[PkgId] with ContractTypeId.Ops[CtId, PkgId] - - final def fromLedgerApi(in: lav2.value.Identifier): RequiredPkgId = - apply(Ref.PackageId.assertFromString(in.packageId), in.moduleName, in.entityName) - - } - - // Represents information about a contract type id that may use a name as the package reference. - // i.e. `orig` may have a form or either "#foo:Bar:Baz" or "123:Bar:Baz". - // If a package name is provided, then `allIds` may resolve to multiple values, for the relevant - // template in each package id that shares the same package name. - sealed abstract class ContractTypeRef[+CtTyId[T] <: ContractTypeId[T]]( - orig: ContractTypeId.ResolvedOf[CtTyId], - ids: NonEmpty[Seq[ContractTypeId.ResolvedPkgIdOf[CtTyId]]], - val name: Option[Ref.PackageName], - ) { - def allPkgIds: NonEmpty[Set[_ <: ContractTypeId.ResolvedPkgIdOf[CtTyId]]] = ids.toSet - @SuppressWarnings(Array("org.wartremover.warts.IterableOps")) - def latestPkgId: ContractTypeId.ResolvedPkgIdOf[CtTyId] = ids.head - def original: ContractTypeId.ResolvedOf[CtTyId] = orig - } - - object ContractTypeRef { - type Resolved = ContractTypeRef[ContractTypeId] - - def unnamed[CtTyId[T] <: ContractTypeId[T]]( - id: ContractTypeId.ResolvedPkgIdOf[CtTyId] - ): ContractTypeRef[CtTyId] = { - val idWithRef = id.copy(packageId = Ref.PackageRef.Id(id.packageId): Ref.PackageRef) - apply[CtTyId](idWithRef, NonEmpty(Seq, id.packageId), None) - } - - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) - def apply[CtId[T] <: ContractTypeId[T]]( - id: ContractTypeId.ResolvedOf[CtId], // The original template/interface id. - pkgIds: NonEmpty[Seq[Ref.PackageId]], // Package ids with same name, by version, descending - name: Option[Ref.PackageName], // The package name info - ): ContractTypeRef[CtId] = - ((id: ContractTypeId.Definite[Ref.PackageRef]) match { - case t @ ContractTypeId.Template(_, _, _) => TemplateRef(t, pkgIds, name) - case i @ ContractTypeId.Interface(_, _, _) => InterfaceRef(i, pkgIds, name) - }).asInstanceOf[ContractTypeRef[CtId]] // CtId <: Definite, because id is resolved - - final case class TemplateRef( - orig: ContractTypeId.Template.ResolvedPkg, - pkgIds: NonEmpty[Seq[Ref.PackageId]], - override val name: Option[Ref.PackageName], - ) extends ContractTypeRef[ContractTypeId.Template]( - orig, - pkgIds.map(pkgId => orig.copy(packageId = pkgId)), - name, - ) - - final case class InterfaceRef( - orig: ContractTypeId.Interface.ResolvedPkg, - pkgIds: NonEmpty[Seq[Ref.PackageId]], - override val name: Option[Ref.PackageName], - ) extends ContractTypeRef[ContractTypeId.Interface]( - orig, - pkgIds.map(pkgId => orig.copy(packageId = pkgId)), - name, - ) - } } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala index f6a5918d56..b1adb25ede 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala @@ -3,18 +3,8 @@ package com.digitalasset.canton.http.util -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.ledger.api.refinements.ApiTypes.CommandId - import java.util.UUID object ClientUtil { def uniqueId(): String = UUID.randomUUID.toString - - def uniqueCommandId(): CommandId = CommandId(uniqueId()) - - import com.digitalasset.canton.fetchcontracts.util.ClientUtil as FC - - def boxedRecord(a: lav2.value.Record): lav2.value.Value = - FC.boxedRecord(a) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Collections.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Collections.scala deleted file mode 100644 index e0acd04c88..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Collections.scala +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.nonempty.NonEmpty -import com.daml.nonempty.NonEmptyReturningOps.* -import scalaz.NonEmptyList - -private[http] object Collections { - implicit final class `cdhuc Nel Ops`[A](private val self: NonEmptyList[A]) extends AnyVal { - def collect[B](f: A PartialFunction B): Option[NonEmptyList[B]] = - self.list.collect(f).toNel - } - - def toNonEmptySet[A](as: NonEmptyList[A]): NonEmpty[Set[A]] = { - import scalaz.syntax.foldable.* - as.tail.toSet incl1 as.head - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala deleted file mode 100644 index 6ed7341ee7..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref -import scalaz.NonEmptyList -import scalaz.syntax.foldable.* -import scalaz.syntax.tag.* - -import lav2.commands.Commands.DeduplicationPeriod - -object Commands { - def create( - templateId: lar.TemplateId, - payload: lav2.value.Record, - ): lav2.commands.Command.Command.Create = - lav2.commands.Command.Command.Create( - lav2.commands - .CreateCommand(templateId = Some(templateId.unwrap), createArguments = Some(payload)) - ) - - def exercise( - templateId: lar.TemplateId, - contractId: lar.ContractId, - choice: lar.Choice, - argument: lav2.value.Value, - ): lav2.commands.Command.Command.Exercise = - lav2.commands.Command.Command.Exercise( - lav2.commands.ExerciseCommand( - templateId = Some(templateId.unwrap), - contractId = contractId.unwrap, - choice = choice.unwrap, - choiceArgument = Some(argument), - ) - ) - - def exerciseByKey( - templateId: lar.TemplateId, - contractKey: lav2.value.Value, - choice: lar.Choice, - argument: lav2.value.Value, - ): lav2.commands.Command.Command.ExerciseByKey = - lav2.commands.Command.Command.ExerciseByKey( - lav2.commands.ExerciseByKeyCommand( - templateId = Some(templateId.unwrap), - contractKey = Some(contractKey), - choice = choice.unwrap, - choiceArgument = Some(argument), - ) - ) - - def createAndExercise( - templateId: lar.TemplateId, - payload: lav2.value.Record, - choice: lar.Choice, - argument: lav2.value.Value, - ): lav2.commands.Command.Command.CreateAndExercise = - lav2.commands.Command.Command.CreateAndExercise( - lav2.commands.CreateAndExerciseCommand( - templateId = Some(templateId.unwrap), - createArguments = Some(payload), - choice = choice.unwrap, - choiceArgument = Some(argument), - ) - ) - - def submitAndWaitRequest( - userId: lar.UserId, - commandId: lar.CommandId, - actAs: NonEmptyList[lar.Party], - readAs: List[lar.Party], - command: lav2.commands.Command.Command, - deduplicationPeriod: DeduplicationPeriod, - submissionId: Option[http.SubmissionId], - workflowId: Option[http.WorkflowId], - disclosedContracts: Seq[http.DisclosedContract.LAV], - synchronizerId: Option[SynchronizerId], - packageIdSelectionPreference: Seq[Ref.PackageId], - ): lav2.command_service.SubmitAndWaitRequest = { - val commands = lav2.commands.Commands( - userId = userId.unwrap, - commandId = commandId.unwrap, - actAs = lar.Party.unsubst(actAs.toList), - readAs = lar.Party.unsubst(readAs), - deduplicationPeriod = deduplicationPeriod, - disclosedContracts = disclosedContracts map (_.toLedgerApi), - synchronizerId = synchronizerId.map(_.toProtoPrimitive).getOrElse(""), - packageIdSelectionPreference = packageIdSelectionPreference, - commands = Seq(lav2.commands.Command(command)), - workflowId = workflowId.map(_.toString).getOrElse(""), - submissionId = submissionId.map(_.toString).getOrElse(""), - minLedgerTimeAbs = None, - minLedgerTimeRel = None, - prefetchContractKeys = Nil, - ) - val commandsWithSubmissionId = - http.SubmissionId.unsubst(submissionId).map(commands.withSubmissionId).getOrElse(commands) - val commandsWithWorkflowId = - http.WorkflowId - .unsubst(workflowId) - .map(commandsWithSubmissionId.withWorkflowId) - .getOrElse(commandsWithSubmissionId) - lav2.command_service.SubmitAndWaitRequest(Some(commandsWithWorkflowId)) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FlowUtil.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FlowUtil.scala deleted file mode 100644 index 14a3f228a6..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FlowUtil.scala +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.Flow -import scalaz.{-\/, \/} - -object FlowUtil { - def allowOnlyFirstInput[E, A](error: => E): Flow[E \/ A, E \/ A, NotUsed] = - Flow[E \/ A] - .scan(Option.empty[E \/ A]) { (s0, x) => - s0 match { - case Some(_) => - Some(-\/(error)) - case None => - Some(x) - } - } - .collect { case Some(x) => - x - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FutureUtil.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FutureUtil.scala index 5e83c7d110..816f5a8fca 100644 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FutureUtil.scala +++ b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FutureUtil.scala @@ -3,27 +3,11 @@ package com.digitalasset.canton.http.util -import scalaz.syntax.show.* -import scalaz.{Applicative, EitherT, Functor, Show, \/} +import scalaz.{Applicative, EitherT, Functor, \/} import scala.concurrent.Future -import scala.util.Try object FutureUtil { - def toFuture[A](o: Option[A]): Future[A] = - o.fold(Future.failed[A](new IllegalStateException(s"Empty option: $o")))(a => - Future.successful(a) - ) - - def toFuture[A](a: Try[A]): Future[A] = - a.fold(e => Future.failed(e), a => Future.successful(a)) - - def toFuture[A: Show, B](a: A \/ B): Future[B] = - a.fold(e => Future.failed(new IllegalStateException(e.shows)), a => Future.successful(a)) - - def toFuture[A](a: Throwable \/ A): Future[A] = - a.fold(e => Future.failed(new IllegalStateException(e)), a => Future.successful(a)) - def liftET[E]: LiftET[E] = new LiftET(0) final class LiftET[E](private val ignore: Int) extends AnyVal { def apply[F[_]: Functor, A](fa: F[A]): EitherT[F, E, A] = EitherT.rightT(fa) @@ -34,10 +18,4 @@ object FutureUtil { def either[A, B](d: A \/ B)(implicit ev: Applicative[Future]): EitherT[Future, A, B] = EitherT.either[Future, A, B](d) - - def rightT[A, B](fa: Future[B])(implicit ev: Functor[Future]): EitherT[Future, A, B] = - EitherT.rightT(fa) - - def leftT[A, B](fa: Future[A])(implicit ev: Functor[Future]): EitherT[Future, A, B] = - EitherT.leftT(fa) } diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala deleted file mode 100644 index f75207c5cd..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters as FC -import com.digitalasset.canton.http -import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.digitalasset.daml.lf - -object IdentifierConverters { - - def lfIdentifier(a: lar.TemplateId): lf.data.Ref.Identifier = - lfIdentifier(lar.TemplateId.unwrap(a)) - - def lfIdentifier(a: lav2.value.Identifier): lf.data.Ref.Identifier = { - import lf.data.Ref - Ref.Identifier( - Ref.PackageId.assertFromString(a.packageId), - Ref.QualifiedName( - Ref.ModuleName.assertFromString(a.moduleName), - Ref.DottedName.assertFromString(a.entityName), - ), - ) - } - - def lfIdentifier(a: http.ContractTypeId.RequiredPkgId): lf.data.Ref.Identifier = { - import lf.data.Ref - Ref.Identifier( - a.packageId, - Ref.QualifiedName( - Ref.ModuleName.assertFromString(a.moduleName), - Ref.DottedName.assertFromString(a.entityName), - ), - ) - } - - def refApiIdentifier(a: http.ContractTypeId.RequiredPkg): lar.TemplateId = - lar.TemplateId(FC.apiIdentifier(a)) - -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/JwtParties.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/JwtParties.scala deleted file mode 100644 index cbe5ff4efd..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/JwtParties.scala +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.nonempty.NonEmptyReturningOps.* -import com.digitalasset.canton.http -import com.digitalasset.canton.http.{JwtPayload, JwtWritePayload} -import scalaz.syntax.foldable.* -import scalaz.syntax.std.option.* -import scalaz.{-\/, NonEmptyList, \/, \/-} - -private[http] object JwtParties { - import com.digitalasset.canton.http.EndpointsCompanion.{Error, Unauthorized} - - // security check for readAs; we delegate the remainder to - // the participant's check that the JWT itself is valid - def ensureReadAsAllowedByJwt( - readAs: Option[NonEmptyList[http.Party]], - jwtPayload: JwtPayload, - ): Error \/ Unit = { - val disallowedParties: Set[http.Party] = - readAs.cata((_.toSet.filterNot(jwtPayload.parties)), Set.empty) - if (disallowedParties.isEmpty) \/-(()) - else { - val err = - s"$EnsureReadAsDisallowedError: ${disallowedParties mkString ", "}" - -\/(Unauthorized(err)) - } - } - - private[util] val EnsureReadAsDisallowedError = "Queried parties not allowed by given JWT token" - - def resolveRefParties( - meta: Option[http.CommandMeta.IgnoreDisclosed], - jwtPayload: JwtWritePayload, - ): http.PartySet = { - val actAs = meta.flatMap(_.actAs) getOrElse jwtPayload.submitter - val readAs = meta.flatMap(_.readAs) getOrElse jwtPayload.readAs - actAs.toSet1 ++ readAs - } -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ProtobufByteStrings.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ProtobufByteStrings.scala deleted file mode 100644 index 7429df2626..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ProtobufByteStrings.scala +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.google.protobuf -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.{Source, StreamConverters} - -import scala.jdk.CollectionConverters.* - -object ProtobufByteStrings { - - def readFrom( - source: Source[org.apache.pekko.util.ByteString, NotUsed] - )(implicit mat: Materializer): protobuf.ByteString = { - val inputStream = source.runWith(StreamConverters.asInputStream()) - protobuf.ByteString.readFrom(inputStream) - } - - def toSource(a: protobuf.ByteString): Source[org.apache.pekko.util.ByteString, NotUsed] = - Source.fromIterator(() => - a.asReadOnlyByteBufferList().iterator.asScala.map(x => org.apache.pekko.util.ByteString(x)) - ) -} diff --git a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Transactions.scala b/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Transactions.scala deleted file mode 100644 index 059e5f7d68..0000000000 --- a/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Transactions.scala +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.ledger.api.v2.event.CreatedEvent -import com.daml.ledger.api.v2.transaction.Transaction -import com.digitalasset.daml.lf.data.ImmArray.ImmArraySeq - -object Transactions { - @SuppressWarnings(Array("org.wartremover.warts.Any")) - def allCreatedEvents(transaction: Transaction): ImmArraySeq[CreatedEvent] = - transaction.events.iterator.flatMap(_.event.created.toList).to(ImmArraySeq) -} diff --git a/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/asyncapi.yaml b/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/asyncapi.yaml index 83925151a7..8c3721db6d 100644 --- a/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/asyncapi.yaml +++ b/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/asyncapi.yaml @@ -49,36 +49,36 @@ channels: ws: method: GET /v2/updates/flats: - description: 'Get flat transactions update stream (deprecated: use v2/updates - instead)' + description: Get flat transactions update stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. subscribe: operationId: onV2UpdatesFlats - description: 'Get flat transactions update stream (deprecated: use v2/updates - instead)' + description: Get flat transactions update stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. message: $ref: '#/components/messages/Either_JsCantonError_JsGetUpdatesResponse' publish: operationId: sendV2UpdatesFlats - description: 'Get flat transactions update stream (deprecated: use v2/updates - instead)' + description: Get flat transactions update stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. message: $ref: '#/components/messages/GetUpdatesRequest' bindings: ws: method: GET /v2/updates/trees: - description: 'Get update transactions tree stream (deprecated: use v2/updates - instead)' + description: Get update transactions tree stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. subscribe: operationId: onV2UpdatesTrees - description: 'Get update transactions tree stream (deprecated: use v2/updates - instead)' + description: Get update transactions tree stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. message: $ref: '#/components/messages/Either_JsCantonError_JsGetUpdateTreesResponse' publish: operationId: sendV2UpdatesTrees - description: 'Get update transactions tree stream (deprecated: use v2/updates - instead)' + description: Get update transactions tree stream. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates instead. message: $ref: '#/components/messages/GetUpdatesRequest' bindings: @@ -498,12 +498,12 @@ components: filter: $ref: '#/components/schemas/TransactionFilter' description: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. + Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. Templates to include in the served snapshot, per party. Optional, if specified event_format must be unset, if not specified event_format must be set. verbose: description: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. + Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. If enabled, values served over the API will contain more information than strictly necessary to interpret the data. In particular, setting the verbose flag to true triggers the ledger to include labels for record fields. Optional, if specified event_format must be unset. @@ -846,6 +846,7 @@ components: - createdEventBlob - createdAt - packageName + - representativePackageId - acsDelta properties: offset: @@ -959,6 +960,16 @@ components: packageName: description: |- The package name of the created contract. + Required + type: string + representativePackageId: + description: |- + A package-id present in the participant package store that typechecks the contract's argument. + This may differ from the package-id of the template used to create the contract. + For contracts created before Canton 3.4, this field matches the contract's creation package-id. + + NOTE: Experimental, server internal concept, not for client consumption. Subject to change without notice. + Required type: string acsDelta: @@ -1996,7 +2007,7 @@ components: JsGetUpdateTreesResponse: title: JsGetUpdateTreesResponse description: Provided for backwards compatibility, it will be removed in the - Canton version 3.4.0. + Canton version 3.5.0. type: object required: - update @@ -2049,7 +2060,7 @@ components: TransactionTree: title: TransactionTree description: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. + Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. Complete view of an on-ledger transaction. type: object required: @@ -2060,7 +2071,7 @@ components: JsTransactionTree: title: JsTransactionTree description: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. + Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. Complete view of an on-ledger transaction. type: object required: @@ -2139,7 +2150,7 @@ components: TreeEvent: title: TreeEvent description: |- - Provided for backwards compatibility, it will be removed in the Canton version 3.4.0. + Provided for backwards compatibility, it will be removed in the Canton version 3.5.0. Each tree event message type below contains a ``witness_parties`` field which indicates the subset of the requested parties that can see the event in question. diff --git a/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml b/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml index a462ecf002..48658572a9 100644 --- a/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml +++ b/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml @@ -102,8 +102,9 @@ paths: - apiKeyAuth: [] /v2/commands/submit-and-wait-for-transaction-tree: post: - description: 'Submit a batch of commands and wait for the transaction trees - response (deprecated: use submit-and-wait-for-transaction instead)' + description: Submit a batch of commands and wait for the transaction trees response. + Provided for backwards compatibility, it will be removed in the Canton version + 3.5.0, use submit-and-wait-for-transaction instead. operationId: postV2CommandsSubmit-and-wait-for-transaction-tree requestBody: content: @@ -130,6 +131,7 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] @@ -349,6 +351,11 @@ paths: required: false schema: type: boolean + - name: synchronizerId + in: query + required: false + schema: + type: string requestBody: content: application/octet-stream: @@ -365,7 +372,8 @@ paths: $ref: '#/components/schemas/UploadDarFileResponse' '400': description: 'Invalid value for: body, Invalid value for: query parameter - vetAllPackages, Invalid value for: headers' + vetAllPackages, Invalid value for: query parameter synchronizerId, Invalid + value for: headers' content: text/plain: schema: @@ -586,6 +594,38 @@ paths: security: - httpAuth: [] - apiKeyAuth: [] + /v2/parties/external/allocate: + post: + description: Allocate a new external party + operationId: postV2PartiesExternalAllocate + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AllocateExternalPartyRequest' + required: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AllocateExternalPartyResponse' + '400': + description: 'Invalid value for: body, Invalid value for: headers' + content: + text/plain: + schema: + type: string + default: + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/JsCantonError' + security: + - httpAuth: [] + - apiKeyAuth: [] /v2/parties/participant-id: get: description: Get participant id @@ -694,6 +734,38 @@ paths: security: - httpAuth: [] - apiKeyAuth: [] + /v2/parties/external/generate-topology: + post: + description: Generate a topology for an external party + operationId: postV2PartiesExternalGenerate-topology + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/GenerateExternalPartyTopologyRequest' + required: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GenerateExternalPartyTopologyResponse' + '400': + description: 'Invalid value for: body, Invalid value for: headers' + content: + text/plain: + schema: + type: string + default: + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/JsCantonError' + security: + - httpAuth: [] + - apiKeyAuth: [] /v2/state/active-contracts: post: description: |- @@ -765,7 +837,7 @@ paths: parameters: - name: party in: query - required: true + required: false schema: type: string - name: participantId @@ -916,7 +988,7 @@ paths: /v2/updates/flats: post: description: |- - Query flat transactions update list (blocking call, deprecated: use v2/updates instead) + Query flat transactions update list (blocking call). Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead. Notice: This endpoint should be used for small results set. When number of results exceeded node configuration limit (`http-list-max-elements-limit`) there will be an error (`413 Content Too Large`) returned. @@ -969,13 +1041,14 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] /v2/updates/trees: post: description: |- - Query update transactions tree list (blocking call, deprecated: use v2/updates instead) + Query update transactions tree list (blocking call). Provided for backwards compatibility, it will be removed in the Canton version 3.5.0, use v2/updates instead. Notice: This endpoint should be used for small results set. When number of results exceeded node configuration limit (`http-list-max-elements-limit`) there will be an error (`413 Content Too Large`) returned. @@ -1028,13 +1101,15 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] /v2/updates/transaction-tree-by-offset/{offset}: get: - description: 'Get transaction tree by offset (deprecated: use v2/updates/update-by-offset - instead)' + description: Get transaction tree by offset. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates/update-by-offset + instead. operationId: getV2UpdatesTransaction-tree-by-offsetOffset parameters: - name: offset @@ -1070,13 +1145,15 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] /v2/updates/transaction-by-offset: post: - description: 'Get transaction by offset (deprecated: use v2/updates/update-by-offset - instead)' + description: Get transaction by offset. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates/update-by-offset + instead. operationId: postV2UpdatesTransaction-by-offset requestBody: content: @@ -1103,6 +1180,7 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] @@ -1140,8 +1218,8 @@ paths: - apiKeyAuth: [] /v2/updates/transaction-by-id: post: - description: 'Get transaction by id (deprecated: use v2/updates/update-by-id - instead)' + description: Get transaction by id. Provided for backwards compatibility, it + will be removed in the Canton version 3.5.0, use v2/updates/update-by-id instead. operationId: postV2UpdatesTransaction-by-id requestBody: content: @@ -1168,6 +1246,7 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] @@ -1205,8 +1284,9 @@ paths: - apiKeyAuth: [] /v2/updates/transaction-tree-by-id/{update-id}: get: - description: 'Get transaction tree by id (deprecated: use v2/updates/update-by-id - instead)' + description: Get transaction tree by id. Provided for backwards compatibility, + it will be removed in the Canton version 3.5.0, use v2/updates/update-by-id + instead. operationId: getV2UpdatesTransaction-tree-by-idUpdate-id parameters: - name: update-id @@ -1241,6 +1321,7 @@ paths: application/json: schema: $ref: '#/components/schemas/JsCantonError' + deprecated: true security: - httpAuth: [] - apiKeyAuth: [] @@ -1974,6 +2055,58 @@ paths: - apiKeyAuth: [] components: schemas: + AllocateExternalPartyRequest: + title: AllocateExternalPartyRequest + description: 'Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(identity_provider_id)``' + type: object + required: + - synchronizer + - identityProviderId + properties: + synchronizer: + description: |- + TODO(#27670) support synchronizer aliases + Synchronizer ID on which to onboard the party + Required + type: string + onboardingTransactions: + description: |- + TopologyTransactions to onboard the external party + Can contain: + - A namespace for the party. + This can be either a single NamespaceDelegation, + or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToKeyMapping to register the party's signing keys. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToParticipant to register the hosting relationship of the party. + Must be provided. + Required + type: array + items: + $ref: '#/components/schemas/SignedTransaction' + multiHashSignatures: + description: |- + Optional signatures of the combined hash of all onboarding_transactions + This may be used instead of providing signatures on each individual transaction + type: array + items: + $ref: '#/components/schemas/Signature' + identityProviderId: + description: |- + The id of the ``Identity Provider`` + If not set, assume the party is managed by the default identity provider. + Optional + type: string + AllocateExternalPartyResponse: + title: AllocateExternalPartyResponse + type: object + required: + - partyId + properties: + partyId: + description: '' + type: string AllocatePartyRequest: title: AllocatePartyRequest description: 'Required authorization: ``HasRight(ParticipantAdmin) OR IsAuthenticatedIdentityProviderAdmin(identity_provider_id)``' @@ -2810,7 +2943,7 @@ components: - Empty properties: Empty: - $ref: '#/components/schemas/Empty9' + $ref: '#/components/schemas/Empty10' DeleteIdentityProviderConfigResponse: title: DeleteIdentityProviderConfigResponse description: Does not (yet) contain any data. @@ -2831,12 +2964,15 @@ components: The template id of the contract. The identifier uses the package-id reference format. - Required + If provided, used to validate the template id of the contract serialized in the created_event_blob. + Optional type: string contractId: description: |- The contract id - Required + + If provided, used to validate the contract id of the contract serialized in the created_event_blob. + Optional type: string createdEventBlob: description: |- @@ -2872,6 +3008,9 @@ components: Empty1: title: Empty type: object + Empty10: + title: Empty + type: object Empty2: title: Empty type: object @@ -3317,6 +3456,76 @@ components: type: array items: $ref: '#/components/schemas/CumulativeFilter' + GenerateExternalPartyTopologyRequest: + title: GenerateExternalPartyTopologyRequest + type: object + required: + - synchronizer + - partyHint + - localParticipantObservationOnly + - confirmationThreshold + properties: + synchronizer: + description: |- + TODO(#27670) support synchronizer aliases + Required: synchronizer-id for which we are building this request. + type: string + partyHint: + description: 'Required: the actual party id will be constructed from this + hint and a fingerprint of the public key' + type: string + publicKey: + $ref: '#/components/schemas/SigningPublicKey' + description: 'Required: public key' + localParticipantObservationOnly: + description: 'Optional: if true, then the local participant will only be + observing, not confirming. Default false.' + type: boolean + otherConfirmingParticipantUids: + description: 'Optional: other participant ids which should be confirming + for this party' + type: array + items: + type: string + confirmationThreshold: + description: 'Optional: Confirmation threshold >= 1 for the party. Defaults + to all available confirmers (or if set to 0).' + type: integer + format: int32 + observingParticipantUids: + description: 'Optional: other observing participant ids for this party' + type: array + items: + type: string + GenerateExternalPartyTopologyResponse: + title: GenerateExternalPartyTopologyResponse + description: Response message with topology transactions and the multi-hash + to be signed. + type: object + required: + - partyId + - publicKeyFingerprint + - multiHash + properties: + partyId: + description: the generated party id + type: string + publicKeyFingerprint: + description: the fingerprint of the supplied public key + type: string + topologyTransactions: + description: |- + The serialized topology transactions which need to be signed and submitted as part of the allocate party process + Note that the serialization includes the versioning information. Therefore, the transaction here is serialized + as an `UntypedVersionedMessage` which in turn contains the serialized `TopologyTransaction` in the version + supported by the synchronizer. + type: array + items: + type: string + multiHash: + description: the multi-hash which may be signed instead of each individual + transaction + type: string GetActiveContractsRequest: title: GetActiveContractsRequest description: |- @@ -4613,7 +4822,8 @@ components: synchronizerId: description: |- Must be a valid synchronizer id - Required + If not set, a suitable synchronizer that this node is connected to will be chosen + Optional type: string packageIdSelectionPreference: description: |- @@ -4639,6 +4849,17 @@ components: type: array items: $ref: '#/components/schemas/PrefetchContractKey' + maxRecordTime: + description: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Use this to limit the time-to-life of a prepared transaction, + which is useful to know when it can definitely not be accepted + anymore and resorting to preparing another transaction for the same + intent is safe again. + Optional + type: string JsPrepareSubmissionResponse: title: JsPrepareSubmissionResponse description: '[docs-entry-end: HashingSchemeVersion]' @@ -5079,7 +5300,7 @@ components: - Empty properties: Empty: - $ref: '#/components/schemas/Empty7' + $ref: '#/components/schemas/Empty8' - type: object required: - IdentityProviderAdmin @@ -5160,17 +5381,20 @@ components: ListVettedPackagesRequest: title: ListVettedPackagesRequest type: object + required: + - pageToken + - pageSize properties: packageMetadataFilter: $ref: '#/components/schemas/PackageMetadataFilter' description: |- The package metadata filter the returned vetted packages set must satisfy. - Optional. + Optional topologyStateFilter: $ref: '#/components/schemas/TopologyStateFilter' description: |- The topology filter the returned vetted packages set must satisfy. - Optional. + Optional pageToken: description: |- Pagination token to determine the specific page to fetch. Using the token @@ -5183,7 +5407,7 @@ components: added and a page is requested twice using the same token, more packages can be returned on the second call. - Leave empty to fetch the first page. + Leave unspecified (i.e. as empty string) to fetch the first page. Optional type: string @@ -5191,8 +5415,8 @@ components: description: |- Maximum number of ``VettedPackages`` results to return in a single page. - If the page_size is unspecified, the server will decide the number of - results to be returned. + If the page_size is unspecified (i.e. left as 0), the server will decide + the number of results to be returned. If the page_size exceeds the maximum supported by the server, an error will be returned. @@ -5206,6 +5430,8 @@ components: ListVettedPackagesResponse: title: ListVettedPackagesResponse type: object + required: + - nextPageToken properties: vettedPackages: description: |- @@ -5218,7 +5444,7 @@ components: nextPageToken: description: |- Pagination token to retrieve the next page. - Empty, if there are no further results. + Empty string if there are no further results. type: string Map_Filters: title: Map_Filters @@ -5264,6 +5490,9 @@ components: properties: value: $ref: '#/components/schemas/Duration' + NoPrior: + title: NoPrior + type: object ObjectMeta: title: ObjectMeta description: |- @@ -5393,7 +5622,7 @@ components: - Empty properties: Empty: - $ref: '#/components/schemas/Empty5' + $ref: '#/components/schemas/Empty6' - type: object required: - Unvet @@ -5679,6 +5908,26 @@ components: description: |- The key of the contract the client wants to prefetch. Required + Prior: + title: Prior + type: object + required: + - value + properties: + value: + type: integer + format: int32 + PriorTopologySerial: + title: PriorTopologySerial + description: |- + The serial of last ``VettedPackages`` topology transaction on a given + participant and synchronizer. + type: object + required: + - serial + properties: + serial: + $ref: '#/components/schemas/Serial' ProtoAny: title: ProtoAny type: object @@ -5819,6 +6068,27 @@ components: properties: kind: $ref: '#/components/schemas/Kind' + Serial: + title: Serial + oneOf: + - type: object + required: + - Empty + properties: + Empty: + $ref: '#/components/schemas/Empty5' + - type: object + required: + - NoPrior + properties: + NoPrior: + $ref: '#/components/schemas/NoPrior' + - type: object + required: + - Prior + properties: + Prior: + $ref: '#/components/schemas/Prior' Signature: title: Signature type: object @@ -5831,12 +6101,6 @@ components: format: description: '' type: string - enum: - - SIGNATURE_FORMAT_UNSPECIFIED - - SIGNATURE_FORMAT_RAW - - SIGNATURE_FORMAT_DER - - SIGNATURE_FORMAT_CONCAT - - SIGNATURE_FORMAT_SYMBOLIC signature: description: '' type: string @@ -5847,11 +6111,37 @@ components: signingAlgorithmSpec: description: The signing algorithm specification used to produce this signature type: string - enum: - - SIGNING_ALGORITHM_SPEC_UNSPECIFIED - - SIGNING_ALGORITHM_SPEC_ED25519 - - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 - - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 + SignedTransaction: + title: SignedTransaction + type: object + required: + - transaction + properties: + transaction: + type: string + signatures: + type: array + items: + $ref: '#/components/schemas/Signature' + SigningPublicKey: + title: SigningPublicKey + type: object + required: + - format + - keyData + - keySpec + properties: + format: + description: The serialization format of the public key + example: CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO + type: string + keyData: + description: Serialized public key in the format specified above + type: string + keySpec: + description: The key specification + example: SIGNING_KEY_SPEC_EC_CURVE25519 + type: string SinglePartySignatures: title: SinglePartySignatures description: Signatures provided by a single party @@ -5978,7 +6268,7 @@ components: - Empty properties: Empty: - $ref: '#/components/schemas/Empty8' + $ref: '#/components/schemas/Empty9' - type: object required: - MinLedgerTimeAbs @@ -6007,7 +6297,7 @@ components: - Empty properties: Empty: - $ref: '#/components/schemas/Empty6' + $ref: '#/components/schemas/Empty7' - type: object required: - ParticipantAuthorizationAdded @@ -6373,7 +6663,6 @@ components: $ref: '#/components/schemas/Transaction' Update1: title: Update - description: The update that matches the filter in the request. oneOf: - type: object required: @@ -6573,20 +6862,28 @@ components: type: boolean synchronizerId: description: |- - The sychronizer on which the ``VettedPackages`` of this participant node - should be changed. + If set, the requested changes will take place on the specified + synchronizer. If synchronizer_id is unset and the participant is only + connected to a single synchronizer, that synchronizer will be used by + default. If synchronizer_id is unset and the participant is connected to + multiple synchronizers, the request will error out with + PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER. + + Optional type: string expectedTopologySerial: + $ref: '#/components/schemas/PriorTopologySerial' description: |- The serial of the last ``VettedPackages`` topology transaction of this participant and on this synchronizer. - Execution of the request fails if this is not correct. If the serial is - left unspecified, the request always succeeds. + Execution of the request fails if this is not correct. Use this to guard + against concurrent changes. - Use this to guard against concurrent changes. - type: integer - format: int32 + If left unspecified, no validation is done against the last transaction's + serial. + + Optional UpdateVettedPackagesResponse: title: UpdateVettedPackagesResponse type: object @@ -6709,6 +7006,8 @@ components: type: object required: - packageId + - packageName + - packageVersion properties: packageId: description: Package ID of this package. Always present. @@ -6727,11 +7026,13 @@ components: description: |- Name of this package. Only available if the package has been uploaded to the current participant. + If unavailable, is empty string. type: string packageVersion: description: |- Version of this package. Only available if the package has been uploaded to the current participant. + If unavailable, is empty string. type: string VettedPackages: title: VettedPackages @@ -6744,7 +7045,6 @@ components: required: - participantId - synchronizerId - - topologySerial properties: packages: description: |- @@ -6760,11 +7060,10 @@ components: description: Synchronizer on which these packages are vetted. Always present. type: string topologySerial: + $ref: '#/components/schemas/PriorTopologySerial' description: |- Serial of last ``VettedPackages`` topology transaction of this participant - and on this synchronizer. - type: integer - format: int32 + and on this synchronizer. Always present. VettedPackagesChange: title: VettedPackagesChange description: A change to the set of vetted packages. @@ -6781,22 +7080,33 @@ components: A reference matches a package if its ``package_id`` matches the package's ID, its ``package_name`` matches the package's name, and its ``package_version`` - matches the package's version. If any attribute is left unspecified in the - reference, it is treated as a wildcard. At a minimum, ``package_id`` or the - ``package_name`` must be specified. + matches the package's version. If an attribute in the reference is left + unspecified (i.e. as an empty string), that attribute is treated as a + wildcard. At a minimum, ``package_id`` or the ``package_name`` must be + specified. If a reference does not match any package, the reference is considered unresolved and the entire update request is rejected. type: object + required: + - packageId + - packageName + - packageVersion properties: packageId: - description: Package's package id must be the same as this field. + description: |- + Package's package id must be the same as this field. + Optional type: string packageName: - description: Package's name must be the same as this field. + description: |- + Package's name must be the same as this field. + Optional type: string packageVersion: - description: Package's version must be the same as this field. + description: |- + Package's version must be the same as this field. + Optional type: string WildcardFilter: title: WildcardFilter diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala deleted file mode 100644 index f1656da06d..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.daml.lf.value.json - -import com.digitalasset.canton.daml.lf.value.json.NavigatorModelAliases as model -import com.digitalasset.canton.ledger.service.MetadataReader -import com.digitalasset.canton.util.JarResourceUtils -import com.digitalasset.daml.lf.data.{ImmArray, Numeric, Ref, SortedLookupList, Time} -import com.digitalasset.daml.lf.value.Value.ContractId -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.{ - ValueAddend as VA, - genAddend, - genTypeAndValue, -} -import com.digitalasset.daml.lf.value.test.ValueGenerators.coidGen -import org.scalacheck.Arbitrary -import org.scalactic.source -import org.scalatest.Inside -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec -import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks -import scalaz.syntax.show.* -import shapeless.record.Record as HRecord -import shapeless.{Coproduct as HSum, HNil} -import spray.json.* - -import java.time.Instant -import scala.annotation.nowarn -import scala.util.{Success, Try} - -import ApiCodecCompressed.{apiValueToJsValue, jsValueToApiValue} - -abstract class ApiCodecCompressedSpec - extends AnyWordSpec - with Matchers - with ScalaCheckPropertyChecks - with Inside { - - def darPath: String - - import C.typeLookup - - protected implicit val cidArb: Arbitrary[ContractId] = Arbitrary(coidGen) - - private val dar = JarResourceUtils.resourceFile(darPath) - require(dar.exists()) - - protected val darMetadata: MetadataReader.LfMetadata = - MetadataReader - .readFromDar(dar) - .valueOr(e => fail(s"Cannot read metadata from $dar, error:" + e.shows)) - - protected val darTypeLookup: NavigatorModelAliases.DamlLfTypeLookup = - MetadataReader.typeLookup(darMetadata) - - /** Serializes the API value to JSON, then parses it back to an API value */ - protected def serializeAndParse( - value: model.ApiValue, - typ: model.DamlLfType, - ): Try[model.ApiValue] = { - import ApiCodecCompressed.JsonImplicits.* - - for { - serialized <- Try(value.toJson.prettyPrint) - json <- Try(serialized.parseJson) - parsed <- Try(jsValueToApiValue(json, typ, typeLookup)) - } yield parsed - } - - protected def roundtrip(va: VA)(v: va.Inj): Option[va.Inj] = - va.prj(jsValueToApiValue(apiValueToJsValue(va.inj(v)), va.t, typeLookup)) - - protected val decimalScale = Numeric.Scale.assertFromInt(10) - - protected object C /* based on navigator DamlConstants */ { - import shapeless.syntax.singleton.* - val packageId0 = Ref.PackageId assertFromString "hash" - val moduleName0 = Ref.ModuleName assertFromString "Module" - def defRef(name: String) = - Ref.Identifier( - packageId0, - Ref.QualifiedName(moduleName0, Ref.DottedName assertFromString name), - ) - val emptyRecordId = defRef("EmptyRecord") - val (emptyRecordDDT, emptyRecordT) = VA.record(emptyRecordId, HNil) - val simpleRecordId = defRef("SimpleRecord") - val simpleRecordVariantSpec = HRecord(fA = VA.text, fB = VA.int64) - val (simpleRecordDDT, simpleRecordT) = - VA.record(simpleRecordId, simpleRecordVariantSpec) - val simpleRecordV: simpleRecordT.Inj = HRecord(fA = "foo", fB = 100L) - - val simpleVariantId = defRef("SimpleVariant") - val (simpleVariantDDT, simpleVariantT) = - VA.variant(simpleVariantId, simpleRecordVariantSpec) - val simpleVariantV = HSum[simpleVariantT.Inj](Symbol("fA") ->> "foo") - - val complexRecordId = defRef("ComplexRecord") - val (complexRecordDDT, complexRecordT) = - VA.record( - complexRecordId, - HRecord( - fText = VA.text, - fBool = VA.bool, - fDecimal = VA.numeric(decimalScale), - fUnit = VA.unit, - fInt64 = VA.int64, - fParty = VA.party, - fContractId = VA.contractId, - fListOfText = VA.list(VA.text), - fListOfUnit = VA.list(VA.unit), - fDate = VA.date, - fTimestamp = VA.timestamp, - fOptionalText = VA.optional(VA.text), - fOptionalUnit = VA.optional(VA.unit), - fOptOptText = VA.optional(VA.optional(VA.text)), - fMap = VA.map(VA.int64), - fVariant = simpleVariantT, - fRecord = simpleRecordT, - ), - ) - @nowarn("msg=dubious usage of method asInstanceOf with unit value") - val complexRecordV: complexRecordT.Inj = - HRecord( - fText = "foo", - fBool = true, - fDecimal = Numeric assertFromString "100.0000000000", - fUnit = (), - fInt64 = 100L, - fParty = Ref.Party assertFromString "BANK1", - fContractId = ContractId.assertFromString("00" + "00" * 32 + "c0"), - fListOfText = Vector("foo", "bar"), - fListOfUnit = Vector((), ()), - fDate = Time.Date assertFromString "2019-01-28", - fTimestamp = Time.Timestamp.assertFromInstant(Instant.parse("2019-01-28T12:44:33.22Z")), - fOptionalText = None, - fOptionalUnit = Some(()), - fOptOptText = Some(Some("foo")), - fMap = SortedLookupList(Map("1" -> 1L, "2" -> 2L, "3" -> 3L)), - fVariant = simpleVariantV, - fRecord = simpleRecordV, - ) - - val colorId = defRef("Color") - val (colorGD, colorGT) = - VA.enumeration(colorId, Seq("Red", "Green", "Blue") map Ref.Name.assertFromString) - - val typeLookup: NavigatorModelAliases.DamlLfTypeLookup = - Map( - emptyRecordId -> emptyRecordDDT, - simpleRecordId -> simpleRecordDDT, - simpleVariantId -> simpleVariantDDT, - complexRecordId -> complexRecordDDT, - colorId -> colorGD, - ).lift - } - - protected def mustBeOne[A](as: Seq[A]): A = as match { - case Seq(x) => x - case xs @ _ => sys.error(s"Expected exactly one element, got: $xs") - } -} - -class ApiCodecCompressedSpecStable extends ApiCodecCompressedSpec { - - import C.typeLookup - - override def darPath: String = "JsonEncodingTest.dar" - - "API compressed JSON codec" when { - - "serializing and parsing a value" should { - - "work for arbitrary reference-free types" in forAll( - genTypeAndValue(coidGen), - minSuccessful(100), - ) { case (typ, value) => - serializeAndParse(value, typ) shouldBe Success(value) - } - - "work for many, many values in raw format" in forAll(genAddend, minSuccessful(100)) { va => - import va.injshrink - implicit val arbInj: Arbitrary[va.Inj] = va.injarb - forAll(minSuccessful(20)) { (v: va.Inj) => - roundtrip(va)(v) should ===(Some(v)) - } - } - - "handle nested optionals" in { - val va = VA.optional(VA.optional(VA.int64)) - val cases = Table( - "value", - None, - Some(None), - Some(Some(42L)), - ) - forEvery(cases) { ool => - roundtrip(va)(ool) should ===(Some(ool)) - } - } - - "handle lists of optionals" in { - val va = VA.optional(VA.optional(VA.list(VA.optional(VA.optional(VA.int64))))) - import va.injshrink - implicit val arbInj: Arbitrary[va.Inj] = va.injarb - forAll(minSuccessful(1000)) { (v: va.Inj) => - roundtrip(va)(v) should ===(Some(v)) - } - } - - def cr(typ: VA)(v: typ.Inj) = - (typ, v: Any, typ.inj(v)) - - val roundtrips = Table( - ("type", "original value", "Daml value"), - cr(C.emptyRecordT)(HRecord()), - cr(C.simpleRecordT)(C.simpleRecordV), - cr(C.simpleVariantT)(C.simpleVariantV), - cr(C.complexRecordT)(C.complexRecordV), - ) - "work for records and variants" in forAll(roundtrips) { (typ, origValue, damlValue) => - typ.prj(jsValueToApiValue(apiValueToJsValue(damlValue), typ.t, typeLookup)) should ===( - Some(origValue) - ) - } - /* - "work for Tree" in { - serializeAndParse(C.treeV, C.treeTC) shouldBe Success(C.treeV) - } - "work for Enum" in { - serializeAndParse(C.redV, C.redTC) shouldBe Success(C.redV) - } - */ - } - - def cn(canonical: String, numerically: String, typ: VA)( - expected: typ.Inj, - alternates: String* - )(implicit pos: source.Position) = - (pos.lineNumber, canonical, numerically, typ, expected, alternates) - - def c(canonical: String, typ: VA)(expected: typ.Inj, alternates: String*)(implicit - pos: source.Position - ) = - cn(canonical, canonical, typ)(expected, alternates*)(pos) - - object VAs { - val ooi = VA.optional(VA.optional(VA.int64)) - val oooi = VA.optional(ooi) - } - - val numCodec = ApiCodecCompressed.copy(false, false) - - @nowarn("cat=lint-infer-any") - val successes = Table( - ("line#", "serialized", "serializedNumerically", "type", "parsed", "alternates"), - c( - "\"0000000000000000000000000000000000000000000000000000000000000000000123\"", - VA.contractId, - )( - ContractId.assertFromString( - "0000000000000000000000000000000000000000000000000000000000000000000123" - ) - ), - cn("\"42.0\"", "42.0", VA.numeric(decimalScale))( - Numeric assertFromString "42.0000000000", - "\"42\"", - "42", - "42.0", - "\"+42\"", - ), - cn("\"2000.0\"", "2000", VA.numeric(decimalScale))( - Numeric assertFromString "2000.0000000000", - "\"2000\"", - "2000", - "2e3", - ), - cn("\"0.3\"", "0.3", VA.numeric(decimalScale))( - Numeric assertFromString "0.3000000000", - "\"0.30000000000000004\"", - "0.30000000000000004", - ), - cn( - "\"9999999999999999999999999999.9999999999\"", - "9999999999999999999999999999.9999999999", - VA.numeric(decimalScale), - )(Numeric assertFromString "9999999999999999999999999999.9999999999"), - cn("\"0.1234512346\"", "0.1234512346", VA.numeric(decimalScale))( - Numeric assertFromString "0.1234512346", - "0.12345123455", - "0.12345123465", - "\"0.12345123455\"", - "\"0.12345123465\"", - ), - cn("\"0.1234512345\"", "0.1234512345", VA.numeric(decimalScale))( - Numeric assertFromString "0.1234512345", - "0.123451234549", - "0.12345123445001", - "\"0.123451234549\"", - "\"0.12345123445001\"", - ), - c("\"1990-11-09T04:30:23.123456Z\"", VA.timestamp)( - Time.Timestamp.assertFromInstant(Instant.parse("1990-11-09T04:30:23.123456Z")), - "\"1990-11-09T04:30:23.1234569Z\"", - ), - c("\"1970-01-01T00:00:00Z\"", VA.timestamp)(Time.Timestamp assertFromLong 0), - // Ensure ISO 8601 timestamps with offsets are successfully parsed by comparing to (epoch - 1 hour) - c("\"1969-12-31T23:00:00Z\"", VA.timestamp)( - Time.Timestamp.assertFromLong(-3600000000L), - "\"1970-01-01T00:00:00+01:00\"", - ), - cn("\"42\"", "42", VA.int64)(42, "\"+42\""), - cn("\"0\"", "0", VA.int64)(0, "-0", "\"+0\"", "\"-0\""), - c("\"Alice\"", VA.party)(Ref.Party assertFromString "Alice"), - c("{}", VA.unit)(()), - c("\"2019-06-18\"", VA.date)(Time.Date assertFromString "2019-06-18"), - c("\"9999-12-31\"", VA.date)(Time.Date assertFromString "9999-12-31"), - c("\"0001-01-01\"", VA.date)(Time.Date assertFromString "0001-01-01"), - c("\"abc\"", VA.text)("abc"), - c("true", VA.bool)(true), - cn("""["1", "2", "3"]""", "[1, 2, 3]", VA.list(VA.int64))(Vector(1, 2, 3)), - c("""{"a": "b", "c": "d"}""", VA.map(VA.text))(SortedLookupList(Map("a" -> "b", "c" -> "d"))), - c("""[["a", "b"], ["c", "d"]]""", VA.genMap(VA.text, VA.text))(Map("a" -> "b", "c" -> "d")), - cn("\"42\"", "42", VA.optional(VA.int64))(Some(42)), - c("null", VA.optional(VA.int64))(None), - c("null", VAs.ooi)(None), - c("[]", VAs.ooi)(Some(None), "[null]"), - cn("""["42"]""", "[42]", VAs.ooi)(Some(Some(42))), - c("null", VAs.oooi)(None), - c("[]", VAs.oooi)(Some(None), "[null]"), - c("[[]]", VAs.oooi)(Some(Some(None)), "[[null]]"), - cn("""[["42"]]""", "[[42]]", VAs.oooi)(Some(Some(Some(42)))), - cn("""{"fA": "foo", "fB": "100"}""", """{"fA": "foo", "fB": 100}""", C.simpleRecordT)( - C.simpleRecordV - ), - c("""{"tag": "fA", "value": "foo"}""", C.simpleVariantT)(C.simpleVariantV), - c("\"Green\"", C.colorGT)( - C.colorGT get Ref.Name.assertFromString("Green") getOrElse sys.error("impossible") - ), - ) - - val failures = Table( - ("JSON", "type", "errorSubstring"), - ("42.3", VA.int64, ""), - ("\"42.3\"", VA.int64, ""), - ("9223372036854775808", VA.int64, ""), - ("-9223372036854775809", VA.int64, ""), - ("\"garbage\"", VA.int64, ""), - ("\" 42 \"", VA.int64, ""), - ("\"1970-01-01T00:00:00\"", VA.timestamp, ""), - ("\"1970-01-01T00:00:00+01:00[Europe/Paris]\"", VA.timestamp, ""), - ("\"0000-01-01\"", VA.date, "Invalid date: 0000-01-01"), - ("\"9999-99-99\"", VA.date, "Invalid date: 9999-99-99"), - ("\"9999-12-32\"", VA.date, "Invalid date: 9999-12-32"), - ("\"9999-13-31\"", VA.date, "Invalid date: 9999-13-31"), - ("\"10000-01-01\"", VA.date, "Invalid date: 10000-01-01"), - ("\"1-01-01\"", VA.date, "Invalid date: 1-01-01"), - ("\"0001-02-29\"", VA.date, "Invalid date: 0001-02-29"), - ("\"not-a-date\"", VA.date, "Invalid date: not-a-date"), - ("""{"a": "b", "c": "d"}""", VA.genMap(VA.text, VA.text), ""), - ("\"\"", VA.party, "Daml-LF Party is empty"), - (List.fill(256)('a').mkString("\"", "", "\""), VA.party, "Daml-LF Party is too long"), - ) - - "dealing with particular formats" should { - "succeed in cases" in forEvery(successes) { - (_, serialized, serializedNumerically, typ, expected, alternates) => - val json = serialized.parseJson - val numJson = serializedNumerically.parseJson - val parsed = jsValueToApiValue(json, typ.t, typeLookup) - jsValueToApiValue(numJson, typ.t, typeLookup) should ===(parsed) - typ.prj(parsed) should ===(Some(expected)) - apiValueToJsValue(parsed) should ===(json) - numCodec.apiValueToJsValue(parsed) should ===(numJson) - val tAlternates = Table("alternate", alternates*) - forEvery(tAlternates) { alternate => - val aJson = alternate.parseJson - typ.prj(jsValueToApiValue(aJson, typ.t, typeLookup)) should ===(Some(expected)) - } - } - - "fail in cases" in forEvery(failures) { (serialized, typ, errorSubstring) => - val json = serialized.parseJson // we don't test *the JSON decoder* - val exception = the[DeserializationException] thrownBy { - jsValueToApiValue(json, typ.t, typeLookup) - } - exception.getMessage should include(errorSubstring) - } - } - - import com.digitalasset.daml.lf.value.Value as LfValue - import ApiCodecCompressed.JsonImplicits.* - - val packageId: Ref.PackageId = mustBeOne( - MetadataReader.typeByName(darMetadata)( - Ref.QualifiedName.assertFromString("JsonEncodingTest:Foo") - ) - )._1 - - val bazRecord = LfValue.ValueRecord( - None, - ImmArray(Some(Ref.Name.assertFromString("baz")) -> LfValue.ValueText("text abc")), - ) - - val bazVariant = LfValue.ValueVariant( - None, - Ref.Name.assertFromString("Baz"), - bazRecord, - ) - - val quxVariant = LfValue.ValueVariant( - None, - Ref.Name.assertFromString("Qux"), - LfValue.ValueUnit, - ) - - val fooId = - Ref.Identifier(packageId, Ref.QualifiedName.assertFromString("JsonEncodingTest:Foo")) - - val bazRecordId = - Ref.Identifier(packageId, Ref.QualifiedName.assertFromString("JsonEncodingTest:BazRecord")) - - "dealing with LF Record" should { - val lfType = (n: String) => - Ref.Identifier(packageId, Ref.QualifiedName.assertFromString("JsonEncodingTest:" + n)) - val decode = (typeId: Ref.Identifier, json: String) => - jsValueToApiValue(json.parseJson, typeId, darTypeLookup) - val person = (name: String, age: Long, address: String) => { - val attr = (n: String) => Some(Ref.Name.assertFromString(n)) - LfValue.ValueRecord( - Some(lfType("Person")), - ImmArray( - (attr("name"), LfValue.ValueText(name)), - (attr("age"), LfValue.ValueInt64(age)), - (attr("address"), LfValue.ValueText(address)), - ), - ) - } - "decode a JSON array of the right length" in { - decode(lfType("Person"), """["Joe Smith", 20, "1st Street"]""") - .shouldBe(person("Joe Smith", 20, "1st Street")) - } - "fail to decode if missing fields" in { - the[DeserializationException].thrownBy { - decode(lfType("Person"), """["Joe Smith", 21]""") - }.getMessage should include("expected 3, found 2") - } - "fail to decode if extra fields" in { - the[DeserializationException].thrownBy { - decode(lfType("Person"), """["Joe Smith", 21, "1st Street", "Arizona"]""") - }.getMessage should include("expected 3, found 4") - } - } - - "dealing with LF Variant" should { - "encode Foo/Baz to JSON" in { - val writer = implicitly[spray.json.JsonWriter[LfValue]] - (writer.write( - bazVariant - ): JsValue) shouldBe ("""{"tag":"Baz", "value":{"baz":"text abc"}}""".parseJson: JsValue) - } - - "decode Foo/Baz from JSON" in { - val actualValue: LfValue = jsValueToApiValue( - """{"tag":"Baz", "value":{"baz":"text abc"}}""".parseJson, - fooId, - darTypeLookup, - ) - - val expectedValueWithIds: LfValue.ValueVariant = - bazVariant.copy(tycon = Some(fooId), value = bazRecord.copy(tycon = Some(bazRecordId))) - - actualValue shouldBe expectedValueWithIds - } - - "encode Foo/Qux to JSON" in { - val writer = implicitly[spray.json.JsonWriter[LfValue]] - (writer.write( - quxVariant - ): JsValue) shouldBe ("""{"tag":"Qux", "value":{}}""".parseJson: JsValue) - } - - "fail decoding Foo/Qux from JSON if 'value' field is missing" in { - assertThrows[spray.json.DeserializationException] { - jsValueToApiValue( - """{"tag":"Qux"}""".parseJson, - fooId, - darTypeLookup, - ) - } - } - - "decode Foo/Qux (empty value) from JSON" in { - val actualValue: LfValue = jsValueToApiValue( - """{"tag":"Qux", "value":{}}""".parseJson, - fooId, - darTypeLookup, - ) - - val expectedValueWithIds: LfValue.ValueVariant = - quxVariant.copy(tycon = Some(fooId)) - - actualValue shouldBe expectedValueWithIds - } - } - } -} - -class ApiCodecCompressedSpecDev extends ApiCodecCompressedSpec { - override def darPath: String = "JsonEncodingTestDev.dar" - - import com.digitalasset.daml.lf.value.Value as LfValue - - "API compressed JSON codec" when { - "dealing with Contract Key" should { - import com.digitalasset.daml.lf.typesig.PackageSignature.TypeDecl.Template as TDTemplate - - "decode type Key = Party from JSON" in { - val templateDef: TDTemplate = mustBeOne( - MetadataReader.templateByName(darMetadata)( - Ref.QualifiedName.assertFromString("JsonEncodingTest:KeyedByParty") - ) - )._2 - - val keyType = templateDef.template.key.getOrElse(fail("Expected a key, got None")) - val expectedValue: LfValue = LfValue.ValueParty(Ref.Party.assertFromString("Alice")) - - jsValueToApiValue(JsString("Alice"), keyType, darTypeLookup) shouldBe expectedValue - } - - "decode type Key = (Party, Int) from JSON" in { - val templateDef: TDTemplate = mustBeOne( - MetadataReader.templateByName(darMetadata)( - Ref.QualifiedName.assertFromString("JsonEncodingTest:KeyedByPartyInt") - ) - )._2 - - val tuple2Name = Ref.QualifiedName.assertFromString("DA.Types:Tuple2") - val daTypesPackageId: Ref.PackageId = - mustBeOne(MetadataReader.typeByName(darMetadata)(tuple2Name))._1 - - val keyType = templateDef.template.key.getOrElse(fail("Expected a key, got None")) - - val expectedValue: LfValue = LfValue.ValueRecord( - Some(Ref.Identifier(daTypesPackageId, tuple2Name)), - ImmArray( - Some(Ref.Name.assertFromString("_1")) -> LfValue.ValueParty( - Ref.Party.assertFromString("Alice") - ), - Some(Ref.Name.assertFromString("_2")) -> LfValue.ValueInt64(123), - ), - ) - - jsValueToApiValue( - """["Alice", 123]""".parseJson, - keyType, - darTypeLookup, - ) shouldBe expectedValue - } - - "decode type Key = (Party, (Int, Foo, BazRecord)) from JSON" in { - val templateDef: TDTemplate = mustBeOne( - MetadataReader.templateByName(darMetadata)( - Ref.QualifiedName.assertFromString("JsonEncodingTest:KeyedByVariantAndRecord") - ) - )._2 - - val keyType = templateDef.template.key.getOrElse(fail("Expected a key, got None")) - - val actual: LfValue = jsValueToApiValue( - """["Alice", [11, {"tag": "Bar", "value": 123}, {"baz": "baz text"}]]""".parseJson, - keyType, - darTypeLookup, - ) - - inside(actual) { case LfValue.ValueRecord(Some(id2), ImmArray((_, party), (_, record2))) => - id2.qualifiedName.name shouldBe Ref.DottedName.assertFromString("Tuple2") - party shouldBe LfValue.ValueParty(Ref.Party.assertFromString("Alice")) - - inside(record2) { case LfValue.ValueRecord(Some(id3), ImmArray((_, age), _, _)) => - id3.qualifiedName.name shouldBe Ref.DottedName.assertFromString("Tuple3") - age shouldBe LfValue.ValueInt64(11) - } - } - } - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreamsTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreamsTest.scala deleted file mode 100644 index 7a02fc4ee6..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreamsTest.scala +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts - -import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll -import com.daml.ledger.api.v2.transaction.Transaction -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.logging.TracedLogger -import org.scalatest.wordspec.AsyncWordSpec - -import scala.concurrent.Future - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class AcsTxStreamsTest extends AsyncWordSpec with BaseTest with PekkoBeforeAndAfterAll { - import AcsTxStreamsTest.* - - "acsFollowingAndBoundary" when { - "ACS is active" should { - "cancel the ACS on output cancel" in { - val (acs, futx, out, _) = probeAcsFollowingAndBoundary(logger) - out.cancel() - acs.expectCancellation() - futx.isCompleted should ===(false) - } - } - - "ACS is past liveBegin" should { - "not start tx until ACS is complete" in { - val (acs, futx, _, _) = probeAcsFollowingAndBoundary(logger) - acs.sendNext(liveBegin) - futx.isCompleted should ===(false) - } - - "propagate cancellation of tx stream" in { - val (acs, futx, out, off) = probeAcsFollowingAndBoundary(logger) - acs.sendNext(liveBegin).sendComplete() - off.expectSubscription() - out.cancel() - futx.map { tx => - tx.expectCancellation() - succeed - } - } - } - } -} - -object AcsTxStreamsTest { - import org.apache.pekko.actor.ActorSystem - import org.apache.pekko.{NotUsed, stream as aks} - import aks.scaladsl.{GraphDSL, RunnableGraph, Source} - import aks.testkit as tk - import com.daml.logging.LoggingContextOf - import tk.TestPublisher.Probe as InProbe - import tk.TestSubscriber.Probe as OutProbe - import tk.scaladsl.{TestSink, TestSource} - - private val liveBegin: Left[Long, Nothing] = Left(42L) - - private implicit val `log ctx`: LoggingContextOf[Any] = - LoggingContextOf.newLoggingContext(LoggingContextOf.label[Any])(identity) - - private def probeAcsFollowingAndBoundary(logger: TracedLogger)(implicit - ec: concurrent.ExecutionContext, - as: ActorSystem, - ) = - probeFOS2PlusContinuation( - AcsTxStreams.acsFollowingAndBoundary( - _: String => Source[Transaction, NotUsed], - logger, - ) - ).run() - - @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) - private def probeFOS2PlusContinuation[K, I0, I1, O0, O1]( - part: (Any => Source[I1, NotUsed]) => aks.Graph[aks.FanOutShape2[I0, O0, O1], NotUsed] - )(implicit - as: ActorSystem - ): RunnableGraph[(InProbe[I0], Future[InProbe[I1]], OutProbe[O0], OutProbe[O1])] = { - val i1 = concurrent.Promise[InProbe[I1]]() - // filling in i1 like this is terribly hacky but is well enough for a test - probeAll( - part(_ => - TestSource.probe[I1].mapMaterializedValue { i1p => - i1.success(i1p) - NotUsed - } - ) - ) - .mapMaterializedValue { case (i0, o0, o1) => (i0, i1.future, o0, o1) } - } - - private def probeAll[I, O0, O1]( - part: aks.Graph[aks.FanOutShape2[I, O0, O1], NotUsed] - )(implicit as: ActorSystem): RunnableGraph[(InProbe[I], OutProbe[O0], OutProbe[O1])] = - RunnableGraph fromGraph GraphDSL.createGraph( - TestSource.probe[I], - TestSink.probe[O0], - TestSink.probe[O1], - )((_, _, _)) { implicit b => (i, o0, o1) => - import GraphDSL.Implicits.* - val here = b add part - // format: off - i ~> here.in - o0 <~ here.out0 - o1 <~ here.out1 - // format: on - aks.ClosedShape - } -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStepTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStepTest.scala deleted file mode 100644 index 6101dd7a0b..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStepTest.scala +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.scalatest.FlatSpecCheckLaws -import com.digitalasset.canton.Generators.boundedListGen -import com.digitalasset.canton.fetchcontracts.Offset -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatest.prop.TableDrivenPropertyChecks -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks -import scalaz.scalacheck.ScalaCheckBinding.* -import scalaz.scalacheck.ScalazProperties -import scalaz.syntax.apply.* -import scalaz.syntax.semigroup.* -import scalaz.{@@, Equal, Tag} - -import scala.annotation.nowarn - -@nowarn("msg=match may not be exhaustive") -class ContractStreamStepTest - extends AnyFlatSpec - with FlatSpecCheckLaws - with Matchers - with ScalaCheckDrivenPropertyChecks - with TableDrivenPropertyChecks { - - import ContractStreamStepTest.*, ContractStreamStep.* - import InsertDeleteStepTest.* - - override implicit val generatorDrivenConfig: PropertyCheckConfiguration = - PropertyCheckConfiguration(minSuccessful = 100) - - behavior of "append" - - it should "be associative for valid streams" in forAll(validStreamGen) { csses => - whenever(csses.sizeIs >= 3) { - forEvery( - Table(("a", "b", "c"), csses.sliding(3).map { case Seq(a, b, c) => (a, b, c) }.toSeq*) - ) { case (a, b, c) => - (a |+| (b |+| c)) should ===((a |+| b) |+| c) - } - } - } - - it should "report the last offset" in forAll { (a: CSS, b: CSS) => - def off(css: ContractStreamStep[_, _]) = css match { - case Acs(_) => None - case LiveBegin(off) => off.toOption - case Txn(_, off) => Some(off) - } - - off(a |+| b) should ===(off(b) orElse off(a)) - } - - it should "preserve append across toInsertDelete" in forAll { (a: CSS, b: CSS) => - (a |+| b).toInsertDelete should ===(a.toInsertDelete |+| b.toInsertDelete) - } - - behavior of "append semigroup" - - checkLaws(ScalazProperties.semigroup.laws[CSS]) -} - -object ContractStreamStepTest { - import InsertDeleteStepTest.*, InsertDeleteStep.Inserts, ContractStreamStep.* - import org.scalacheck.{Arbitrary, Gen} - import Arbitrary.arbitrary - - type CSS = ContractStreamStep[Unit, Cid] - - private val offGen: Gen[Offset] = Tag subst Tag.unsubst(arbitrary[String @@ Alpha]) - private val acsGen = arbitrary[Inserts[Cid]] map (Acs(_)) - private val noAcsLBGen = Gen const LiveBegin(ParticipantBegin) - private val postAcsGen = offGen map (o => LiveBegin(AbsoluteBookmark(o))) - private val txnGen = ^(arbitrary[IDS], offGen)(Txn(_, _)) - - private val validStreamGen: Gen[Seq[CSS]] = for { - beforeAfter <- Gen.zip( - boundedListGen(acsGen), - boundedListGen(txnGen), - ) - (acsSeq, txnSeq) = beforeAfter - liveBegin <- if (acsSeq.isEmpty) noAcsLBGen else postAcsGen - } yield (acsSeq :+ liveBegin) ++ txnSeq - - private implicit val `CSS eq`: Equal[CSS] = Equal.equalA - - private implicit val `anyCSS arb`: Arbitrary[CSS] = - Arbitrary(Gen.frequency((4, acsGen), (1, noAcsLBGen), (1, postAcsGen), (4, txnGen))) -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStepTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStepTest.scala deleted file mode 100644 index a5b004239f..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStepTest.scala +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.fetchcontracts.util - -import com.daml.scalatest.FlatSpecCheckLaws -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks -import scalaz.scalacheck.ScalazProperties -import scalaz.syntax.semigroup.* -import scalaz.{@@, Equal, Tag} - -class InsertDeleteStepTest - extends AnyFlatSpec - with Matchers - with FlatSpecCheckLaws - with ScalaCheckDrivenPropertyChecks { - import InsertDeleteStepTest.* - - override implicit val generatorDrivenConfig: PropertyCheckConfiguration = - PropertyCheckConfiguration(minSuccessful = 100) - - behavior of "append monoid" - - checkLaws(ScalazProperties.monoid.laws[IDS]) - - behavior of "append" - - it should "never insert a deleted item" in forAll { (x: IDS, y: IDS) => - val xy = x |+| y.copy(inserts = y.inserts filterNot Cid.subst(x.deletes.keySet)) - xy.inserts.toSet intersect Cid.subst(xy.deletes.keySet) shouldBe empty - } - - it should "preserve every left delete" in forAll { (x: IDS, y: IDS) => - val xy = x |+| y - xy.deletes.keySet should contain allElementsOf x.deletes.keySet - } - - it should "preserve at least right deletes absent in left inserts" in forAll { (x: IDS, y: IDS) => - val xy = x |+| y - // xy.deletes _may_ contain x.inserts; it is semantically irrelevant - xy.deletes.keySet should contain allElementsOf (y.deletes.keySet -- Cid.unsubst(x.inserts)) - } - - it should "preserve append absent deletes" in forAll { (x: Vector[Cid], y: Vector[Cid]) => - val xy = InsertDeleteStep(x, Map.empty[String, Unit]) |+| InsertDeleteStep(y, Map.empty) - xy.inserts should ===(x ++ y) - } -} - -object InsertDeleteStepTest { - import org.scalacheck.{Arbitrary, Gen, Shrink} - import Arbitrary.arbitrary - - type IDS = InsertDeleteStep[Unit, Cid] - sealed trait Alpha - type Cid = String @@ Alpha - val Cid = Tag.of[Alpha] - - implicit val `Alpha arb`: Arbitrary[Cid] = - Cid subst Arbitrary(Gen.alphaUpperChar map (_.toString)) - - private[util] implicit val `test Cid`: InsertDeleteStep.Cid[Cid] = Cid.unwrap(_) - - implicit val `IDS arb`: Arbitrary[IDS] = - Arbitrary(arbitrary[(Vector[Cid], Map[Cid, Unit])] map { case (is, ds) => - InsertDeleteStep(is filterNot ds.keySet, Cid.unsubst[Map[*, Unit], String](ds)) - }) - - implicit val `IDS shr`: Shrink[IDS] = - Shrink.xmap[(Vector[Cid], Map[Cid, Unit]), IDS]( - { case (is, ds) => InsertDeleteStep(is, Cid.unsubst[Map[*, Unit], String](ds)) }, - step => (step.inserts, Cid.subst[Map[*, Unit], String](step.deletes)), - ) - - implicit val `IDS eq`: Equal[IDS] = Equal.equalA -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/CommandServiceTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/CommandServiceTest.scala deleted file mode 100644 index 496387cd41..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/CommandServiceTest.scala +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -import com.daml.jwt.{ - AuthServiceJWTCodec, - AuthServiceJWTPayload, - DecodedJwt, - Jwt, - JwtSigner, - StandardJWTPayload, - StandardJWTTokenFormat, -} -import com.daml.ledger.api.v2 as lav2 -import com.daml.logging.LoggingContextOf -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.http.json.v1.CommandService -import com.digitalasset.canton.http.util.Logging as HLogging -import com.digitalasset.canton.tracing.NoTracing -import org.scalatest.Inside -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AsyncWordSpec -import scalaz.syntax.foldable.* -import scalaz.syntax.tag.* -import scalaz.{NonEmptyList, \/-} -import spray.json.* - -import java.util.concurrent.CopyOnWriteArrayList -import scala.annotation.nowarn -import scala.collection as sc -import scala.concurrent.{ExecutionContext as EC, Future} -import scala.jdk.CollectionConverters.* - -import lav2.command_service.{ - SubmitAndWaitForTransactionResponse, - SubmitAndWaitForTransactionTreeResponse, - SubmitAndWaitRequest, -} -import lav2.transaction.{Transaction, TransactionTree} -import LoggingContextOf.{label, newLoggingContext} - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class CommandServiceTest extends AsyncWordSpec with Matchers with Inside with NoTracing { - import CommandServiceTest.* - - "create" should { - // exercise and createAndExercise use the exact same party-handling code - "let CommandMeta parties override JWT" in { - val (cs, txns, trees) = simpleCommand() - val specialActAs = NonEmptyList("bar") - val specialReadAs = List("quux") - def create(meta: Option[CommandMeta.NoDisclosed]) = - CreateCommand(tplId, lav2.value.Record(), meta) - for { - normal <- cs.create(jwtForParties, multiPartyJwp, create(None)) - overridden <- cs.create( - jwtForParties, - multiPartyJwp, - create( - Some( - util.JwtPartiesTest.partiesOnlyMeta( - actAs = Party subst specialActAs, - readAs = Party subst specialReadAs, - ) - ) - ), - ) - } yield { - normal shouldBe a[\/-[_]] - overridden shouldBe a[\/-[_]] - inside(txns) { - case sc.Seq( - SubmitAndWaitRequest(Some(normalC)), - SubmitAndWaitRequest(Some(overriddenC)), - ) => - normalC.actAs should ===(multiPartyJwp.actAs) - normalC.readAs should ===(multiPartyJwp.readAs) - overriddenC.actAs should ===(specialActAs.toList) - overriddenC.readAs should ===(specialReadAs) - } - trees shouldBe empty - } - } - } -} - -// TODO(#23504) remove suppression of deprecation warnings -@nowarn("cat=deprecation") -object CommandServiceTest extends BaseTest { - private val multiPartyJwp = JwtWritePayload( - UserId("myapp"), - submitter = Party subst NonEmptyList("foo", "bar"), - readAs = Party subst List("baz", "quux"), - ) - private val tplId = - ContractTypeId.Template( - com.digitalasset.daml.lf.data.Ref.PackageRef.assertFromString("Foo"), - "Bar", - "Baz", - ) - - private[http] val userId: UserId = UserId("test") - - implicit private val ignoredLoggingContext - : LoggingContextOf[HLogging.InstanceUUID with HLogging.RequestID] = - newLoggingContext(label[HLogging.InstanceUUID with HLogging.RequestID])(identity) - - lazy val jwtForParties: Jwt = { - import AuthServiceJWTCodec.JsonImplicits.* - val payload: JsValue = { - val standardJwtPayload: AuthServiceJWTPayload = - StandardJWTPayload( - issuer = None, - userId = userId.unwrap, - participantId = None, - exp = None, - format = StandardJWTTokenFormat.Scope, - audiences = List.empty, - scope = Some(AuthServiceJWTCodec.scopeLedgerApiFull), - ) - standardJwtPayload.toJson - } - JwtSigner.HMAC256 - .sign( - DecodedJwt( - """{"alg": "HS256", "typ": "JWT"}""", - payload.prettyPrint, - ), - "secret", - ) - .fold( - e => throw new IllegalArgumentException(s"cannot sign a JWT: ${e.prettyPrint}"), - identity, - ) - } - - @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) - private def simpleCommand()(implicit - ec: EC - ): (CommandService, sc.Seq[SubmitAndWaitRequest], sc.Seq[SubmitAndWaitRequest]) = { - val txns = new CopyOnWriteArrayList[SubmitAndWaitRequest]() - val trees = new CopyOnWriteArrayList[SubmitAndWaitRequest]() - ( - new CommandService( - submitAndWaitForTransaction = (_, req) => - _ => - _ => - Future { - txns.add(req) - import lav2.event.{CreatedEvent, Event}, Event.Event.Created - import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters.apiIdentifier - val creation = Event( - Created( - CreatedEvent.defaultInstance.copy( - templateId = Some(apiIdentifier(tplId)), - createArguments = Some(lav2.value.Record()), - ) - ) - ) - \/-( - SubmitAndWaitForTransactionResponse( - Some(Transaction.defaultInstance.copy(events = Seq(creation), offset = 1)) - ) - ) - }, - submitAndWaitForTransactionTree = (_, req) => - _ => - Future { - trees.add(req) - \/-(SubmitAndWaitForTransactionTreeResponse(Some(TransactionTree.defaultInstance))) - }, - loggerFactory = loggerFactory, - ), - txns.asScala, - trees.asScala, - ) - } -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/Generators.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/Generators.scala deleted file mode 100644 index 3108103b26..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/Generators.scala +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http.ContractTypeId -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref -import org.scalacheck.Gen -import scalaz.{-\/, \/, \/-} -import spray.json.{JsNumber, JsObject, JsString, JsValue} - -object Generators { - def genApiIdentifier: Gen[lav2.value.Identifier] = - for { - p <- genPkgIdentifier - m <- Gen.identifier - e <- Gen.identifier - } yield lav2.value.Identifier(packageId = p, moduleName = m, entityName = e) - - def genHttpTemplateId: Gen[ContractTypeId.Template.RequiredPkg] = - genHttpTemplateIdO[ContractTypeId.Template, Ref.PackageRef] - - def genHttpTemplateIdPkgId: Gen[ContractTypeId.Template.RequiredPkgId] = - genHttpTemplateIdO[ContractTypeId.Template, Ref.PackageId] - - def genHttpTemplateIdO[CtId[T] <: ContractTypeId[T], A](implicit - CtId: ContractTypeId.Like[CtId], - ev: PackageIdGen[A], - ): Gen[CtId[A]] = - for { - p <- ev.gen - m <- Gen.identifier - e <- Gen.identifier - } yield CtId(p, m, e) - - def nonEmptySetOf[A](gen: Gen[A]): Gen[Set[A]] = Gen.nonEmptyListOf(gen).map(_.toSet) - - // Generate Identifiers with unique packageId values, but the same moduleName and entityName. - def genDuplicateModuleEntityApiIdentifiers: Gen[Set[lav2.value.Identifier]] = - for { - id0 <- genApiIdentifier - otherPackageIds <- nonEmptySetOf(genPkgIdentifier.filter(x => x != id0.packageId)) - } yield Set(id0) ++ otherPackageIds.map(a => id0.copy(packageId = a)) - - def genDuplicateModuleEntityTemplateIds: Gen[Set[ContractTypeId.Template.RequiredPkgId]] = - genDuplicateModuleEntityApiIdentifiers.map(xs => xs.map(ContractTypeId.Template.fromLedgerApi)) - - private val genPkgIdentifier = Gen.identifier.map(s => s.substring(0, 64 min s.length)) - - final case class PackageIdGen[A](gen: Gen[A]) - - implicit val RequiredPackageIdGen: PackageIdGen[Ref.PackageId] = PackageIdGen( - genPkgIdentifier.map(Ref.PackageId.assertFromString) - ) - implicit val RequiredPackageRefGen: PackageIdGen[Ref.PackageRef] = PackageIdGen( - Gen.oneOf( - genPkgIdentifier.map(s => Ref.PackageRef.Id(Ref.PackageId.assertFromString(s))), - genPkgIdentifier.map(s => Ref.PackageRef.Name(Ref.PackageName.assertFromString(s))), - ) - ) - - def contractIdGen: Gen[ContractId] = ContractId subst Gen.identifier - def partyGen: Gen[Party] = Party subst Gen.identifier - - def scalazEitherGen[A, B](a: Gen[A], b: Gen[B]): Gen[A \/ B] = - Gen.oneOf(a.map(-\/(_)), b.map(\/-(_))) - - def inputContractRefGen[LfV](lfv: Gen[LfV]): Gen[InputContractRef[LfV]] = - scalazEitherGen( - Gen.zip(genHttpTemplateIdO[ContractTypeId.Template, Ref.PackageRef], lfv), - Gen.zip( - Gen.option(genHttpTemplateIdO: Gen[ContractTypeId.RequiredPkg]), - contractIdGen, - ), - ) - - def contractLocatorGen[LfV](lfv: Gen[LfV]): Gen[ContractLocator[LfV]] = - inputContractRefGen(lfv) map (ContractLocator.structure.from(_)) - - def contractGen: Gen[Contract[JsValue]] = - scalazEitherGen(archivedContractGen, activeContractGen).map(Contract(_)) - - def activeContractGen: Gen[ActiveContract.ResolvedCtTyId[JsValue]] = - for { - contractId <- contractIdGen - templateId <- Gen.oneOf( - genHttpTemplateIdO[ContractTypeId.Template, Ref.PackageId], - genHttpTemplateIdO[ContractTypeId.Interface, Ref.PackageId], - ) - key <- Gen.option(Gen.identifier.map(JsString(_))) - argument <- Gen.identifier.map(JsString(_)) - signatories <- Gen.listOf(partyGen) - observers <- Gen.listOf(partyGen) - } yield ActiveContract[ContractTypeId.ResolvedPkgId, JsValue]( - contractId = contractId, - templateId = templateId, - key = key, - payload = argument, - signatories = signatories, - observers = observers, - ) - - def archivedContractGen: Gen[ArchivedContract] = - for { - contractId <- contractIdGen - templateId <- Generators.genHttpTemplateIdO: Gen[ContractTypeId.RequiredPkgId] - } yield ArchivedContract( - contractId = contractId, - templateId = templateId, - ) - - def contractLocatorGen: Gen[ContractLocator[JsObject]] = - Gen.oneOf(enrichedContractIdGen, enrichedContractKeyGen) - - def enrichedContractKeyGen: Gen[EnrichedContractKey[JsObject]] = - for { - templateId <- genHttpTemplateIdO[ContractTypeId.Template, Ref.PackageRef] - key <- genJsObj - } yield EnrichedContractKey(templateId, key) - - def enrichedContractIdGen: Gen[EnrichedContractId] = - for { - templateId <- Gen.option(genHttpTemplateIdO: Gen[ContractTypeId.RequiredPkg]) - contractId <- contractIdGen - } yield EnrichedContractId(templateId, contractId) - - def exerciseCmdGen: Gen[ExerciseCommand.RequiredPkg[JsValue, ContractLocator[JsValue]]] = - for { - ref <- contractLocatorGen - arg <- genJsObj - cIfId <- Gen.option(genHttpTemplateIdO: Gen[ContractTypeId.RequiredPkg]) - choice <- Gen.identifier.map(Choice(_)) - meta <- Gen.option(metaGen) - } yield ExerciseCommand( - reference = ref, - choice = choice, - choiceInterfaceId = cIfId, - argument = arg, - meta = meta, - ) - - def metaGen: Gen[CommandMeta.NoDisclosed] = - for { - commandId <- Gen.option(Gen.identifier.map(CommandId(_))) - synchronizerId <- Gen.option(Gen.const(SynchronizerId.tryFromString("some::synchronizerid"))) - } yield CommandMeta(commandId, None, None, None, None, None, None, synchronizerId, None) - - private def genJsObj: Gen[JsObject] = - Gen.listOf(genJsValPair).map(xs => JsObject(xs.toMap)) - - private def genJsValPair: Gen[(String, JsValue)] = - for { - k <- Gen.identifier - v <- genJsValue - } yield (k, v) - - private def genJsValue: Gen[JsValue] = Gen.oneOf( - Gen.identifier.map(JsString(_): JsValue), - Gen.posNum[Int].map(JsNumber(_): JsValue), - ) - - def genUnknownTemplateIds: Gen[UnknownTemplateIds] = - Gen - .listOf(genHttpTemplateIdO: Gen[ContractTypeId.RequiredPkg]) - .map(UnknownTemplateIds.apply) - - def genUnknownParties: Gen[UnknownParties] = - Gen.listOf(partyGen).map(UnknownParties.apply) - - def genServiceWarning: Gen[ServiceWarning] = - Gen.oneOf(genUnknownTemplateIds, genUnknownParties) - - def genWarningsWrapper: Gen[AsyncWarningsWrapper] = - genServiceWarning.map(AsyncWarningsWrapper.apply) -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/GeneratorsTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/GeneratorsTest.scala deleted file mode 100644 index 5c017f5164..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/GeneratorsTest.scala +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -import Generators.genDuplicateModuleEntityApiIdentifiers - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class GeneratorsTest extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyChecks { - implicit override val generatorDrivenConfig: PropertyCheckConfiguration = - PropertyCheckConfiguration(minSuccessful = 10000) - - import org.scalacheck.Shrink.shrinkAny - - "Generators.genDuplicateApiIdentifiers" should "generate API Identifiers with the same moduleName and entityName" in - forAll(genDuplicateModuleEntityApiIdentifiers) { ids => - ids.size should be >= 2 - val (packageIds, moduleNames, entityNames) = - ids.foldLeft((Set.empty[String], Set.empty[String], Set.empty[String])) { (b, a) => - (b._1 + a.packageId, b._2 + a.moduleName, b._3 + a.entityName) - } - - packageIds.size shouldBe ids.size - moduleNames.size shouldBe 1 - entityNames.size shouldBe 1 - } -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/HttpSpec.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/HttpSpec.scala deleted file mode 100644 index 34b099311f..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/HttpSpec.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -import com.daml.nonempty.NonEmpty -import com.daml.scalatest.FreeSpecCheckLaws -import com.digitalasset.canton.http.{DisclosedContract, JwtPayload, JwtWritePayload} -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers -import scalaz.NonEmptyList - -final class HttpSpec extends AnyFreeSpec with Matchers with FreeSpecCheckLaws { - import HttpSpec.* - - private val userId = UserId("myUserId") - private val alice = Party("Alice") - private val bob = Party("Bob") - "JwtWritePayload" - { - "parties deduplicates between actAs/submitter and readAs" in { - val payload = - JwtWritePayload(userId, submitter = NonEmptyList(alice), readAs = List(alice, bob)) - payload.parties should ===(NonEmpty(Set, alice, bob)) - } - } - "JwtPayload" - { - "parties deduplicates between actAs and readAs" in { - val payload = JwtPayload(userId, actAs = List(alice), readAs = List(alice, bob)) - payload.map(_.parties) should ===(Some(NonEmpty(Set, alice, bob))) - } - "returns None if readAs and actAs are empty" in { - val payload = JwtPayload(userId, actAs = List(), readAs = List()) - payload shouldBe None - } - } - - "DisclosedContract" - { - import json.JsonProtocolTest.* - import scalaz.scalacheck.ScalazProperties as SZP - - "bitraverse" - { - checkLaws(SZP.traverse.laws[DisclosedContract]) - } - } -} - -object HttpSpec { - import scalaz.Equal - - implicit val eqDisclosedContract: Equal[DisclosedContract[Int]] = - Equal.equalA -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala deleted file mode 100644 index efe50779d2..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http - -import com.daml.ledger.api.v2 as lav2 -import com.digitalasset.canton.http.Generators.{ - genDuplicateModuleEntityTemplateIds, - genHttpTemplateIdPkgId, - nonEmptySetOf, -} -import com.digitalasset.canton.http.json.v1.PackageService -import com.digitalasset.canton.http.json.v1.PackageService.TemplateIdMap -import com.digitalasset.daml.lf.data.Ref -import org.scalacheck.Shrink -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers -import org.scalatest.{Inside, OptionValues} -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks - -@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) -class PackageServiceTest - extends AnyFreeSpec - with Matchers - with Inside - with ScalaCheckDrivenPropertyChecks - with OptionValues { - - import Shrink.shrinkAny - import ContractTypeId.withPkgRef - - def pkgRefName(s: String): Ref.PackageRef = - Ref.PackageRef.Name(Ref.PackageName.assertFromString(s)) - - implicit override val generatorDrivenConfig: PropertyCheckConfiguration = - PropertyCheckConfiguration(minSuccessful = 100) - - "PackageService.buildTemplateIdMap" - { - - "pass one specific test case that was failing" in { - val id0 = ContractTypeId.Template.fromLedgerApi(lav2.value.Identifier("a", "f4", "x")) - val id1 = ContractTypeId.Template.fromLedgerApi(lav2.value.Identifier("b", "f4", "x")) - val map = PackageService.buildTemplateIdMap(noPackageNames, Set(id0, id1)) - map.all.keySet shouldBe Set(id0, id1).map(withPkgRef) - } - - "TemplateIdMap.all should contain dups and unique identifiers" in - forAll( - nonEmptySetOf(genHttpTemplateIdPkgId), - genDuplicateModuleEntityTemplateIds, - ) { (xs, dups) => - val map = PackageService.buildTemplateIdMap(noPackageNames, (xs ++ dups)) - map.all.keySet should ===((xs ++ dups).map(withPkgRef)) - map.all.keySet should contain allElementsOf dups.map(withPkgRef) - map.all.keySet should contain allElementsOf xs.map(withPkgRef) - } - } - - "PackageService.resolveContractTypeId" - { - - "should resolve fully qualified Template ID" in forAll( - nonEmptySetOf(genHttpTemplateIdPkgId) - ) { ids => - val map = PackageService.buildTemplateIdMap(noPackageNames, ids) - ids.foreach { id => - val unresolvedId: ContractTypeId.Template.RequiredPkg = withPkgRef(id) - val resolved = (map resolve unresolvedId).value - resolved.original shouldBe withPkgRef(id) - resolved.latestPkgId shouldBe id - resolved.allPkgIds shouldBe Set(id) - } - } - - "should resolve by package name when single package id" in forAll( - nonEmptySetOf(genHttpTemplateIdPkgId) - ) { ids => - def pkgNameForPkgId(pkgId: String) = pkgId + "_name" - val idName = buildPackageNameMap(pkgNameForPkgId)(ids) // package_id:package_name is 1:1 - val map = PackageService.buildTemplateIdMap(idName, ids) - ids.foreach { id => - val pkgName = pkgRefName(pkgNameForPkgId(id.packageId)) - val unresolvedId: ContractTypeId.Template.RequiredPkg = id.copy(packageId = pkgName) - val resolved = (map resolve unresolvedId).value - resolved.original shouldBe id.copy(packageId = pkgName) - resolved.latestPkgId shouldBe id - resolved.allPkgIds shouldBe Set(id) - } - } - - "should resolve by package name when multiple package ids" in forAll( - genDuplicateModuleEntityTemplateIds - ) { ids => - val idName = buildPackageNameMap(_ => "foo")(ids) // package_id:package_name is n:1 - val idWithMaxVer = ids.maxBy(id => packageVersionForId(id.packageId)) - val map = PackageService.buildTemplateIdMap(idName, ids) - ids.foreach { id => - val unresolvedId: ContractTypeId.Template.RequiredPkg = - id.copy(packageId = pkgRefName("foo")) - val resolved = (map resolve unresolvedId).value - resolved.original shouldBe id.copy(packageId = pkgRefName("foo")) - resolved.latestPkgId shouldBe idWithMaxVer - resolved.allPkgIds shouldBe ids - } - } - - "should return None for unknown Template ID" in forAll( - Generators.genHttpTemplateIdO: org.scalacheck.Gen[ContractTypeId.RequiredPkg] - ) { (templateId: ContractTypeId.RequiredPkg) => - val map = TemplateIdMap.Empty[ContractTypeId.Template] - map resolve templateId shouldBe None - } - } - - "PackageService.allTemplateIds" - { - "when no package names, should resolve to input ids" in forAll( - nonEmptySetOf(genHttpTemplateIdPkgId) - ) { ids => - val map = PackageService.buildTemplateIdMap(noPackageNames, ids) - map.allIds.size shouldBe ids.size - map.allIds.map(_.original) shouldBe ids.map(withPkgRef) - map.allIds.map(_.latestPkgId) shouldBe ids - map.allIds.flatMap(_.allPkgIds) shouldBe ids - } - - "when has single package name per package id, each has has its own item" in forAll( - nonEmptySetOf(genHttpTemplateIdPkgId) - ) { ids => - def pkgNameForPkgId(pkgId: String) = pkgId + "_name" - val idName = buildPackageNameMap(pkgNameForPkgId)(ids) // package_id:package_name is 1:1 - val map = PackageService.buildTemplateIdMap(idName, ids) - - map.allIds.size shouldBe ids.size - map.allIds.map(_.original) shouldBe ids.map { id => - id.copy(packageId = pkgRefName(pkgNameForPkgId(id.packageId))) - } - map.allIds.map(_.latestPkgId) shouldBe ids - map.allIds.map(_.allPkgIds) shouldBe ids.map(Set(_)) - } - - "when has multiple names per package id, they are collapsed into a single item" in forAll( - genDuplicateModuleEntityTemplateIds - ) { ids => - val idName = buildPackageNameMap(_ => "foo")(ids) // package_id:package_name is n:1 - val idWithMaxVer = ids.maxBy(id => packageVersionForId(id.packageId)) - val map = PackageService.buildTemplateIdMap(idName, ids) - - map.allIds.size shouldBe 1 - map.allIds.head.original shouldBe ids.head.copy(packageId = pkgRefName("foo")) - map.allIds.head.latestPkgId shouldBe idWithMaxVer - map.allIds.head.allPkgIds shouldBe ids - } - } - - // Arbitrary but deterministic assignment of package version to package id. - private def packageVersionForId(pkgId: String) = - Ref.PackageVersion.assertFromString(s"0.0.${pkgId.hashCode.abs}") - - private def buildPackageNameMap( - pkgNameForPkgId: (String => String) - )(ids: Set[_ <: ContractTypeId.RequiredPkgId]): PackageService.PackageNameMap = - PackageService.PackageNameMap( - ids - .flatMap { (id: ContractTypeId.RequiredPkgId) => - val pkgName = Ref.PackageName.assertFromString(pkgNameForPkgId(id.packageId.toString)) - val pkgVersion = packageVersionForId(id.packageId) - Set( - (Ref.PackageRef.Id(id.packageId), (pkgName, pkgVersion)), - (Ref.PackageRef.Name(pkgName), (pkgName, pkgVersion)), - ) - } - .toMap - .view - ) - - private val noPackageNames: PackageService.PackageNameMap = PackageService.PackageNameMap.empty -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/JsonProtocolTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/JsonProtocolTest.scala deleted file mode 100644 index 5521e736f9..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/JsonProtocolTest.scala +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json - -import com.daml.scalautil.Statement.discard -import com.digitalasset.canton.http -import com.digitalasset.canton.http.Generators.{ - contractGen, - contractIdGen, - contractLocatorGen, - exerciseCmdGen, - genHttpTemplateId, - genServiceWarning, - genUnknownParties, - genUnknownTemplateIds, - genWarningsWrapper, -} -import com.digitalasset.canton.http.json.SprayJson.JsonReaderError -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.daml.lf.data.Ref -import org.apache.pekko.http.scaladsl.model.StatusCodes -import org.scalacheck.Arbitrary -import org.scalacheck.Arbitrary.arbitrary -import org.scalacheck.Gen.{identifier, listOf} -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers -import org.scalatest.{Assertion, Inside, Succeeded} -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks -import scalaz.syntax.functor.* -import scalaz.syntax.tag.* -import scalaz.{-\/, \/, \/-} - -class JsonProtocolTest - extends AnyFreeSpec - with Matchers - with Inside - with ScalaCheckDrivenPropertyChecks { - - import JsonProtocol.* - import JsonProtocolTest.* - import spray.json.* - - implicit override val generatorDrivenConfig: PropertyCheckConfiguration = - PropertyCheckConfiguration(minSuccessful = 100) - - "http.ContractTypeId.RequiredPkg" - { - "can be serialized to JSON" in forAll(genHttpTemplateId) { - (a: http.ContractTypeId.RequiredPkg) => - inside(a.toJson) { case JsString(str) => - str should ===(s"${a.packageId}:${a.moduleName}:${a.entityName}") - } - } - "roundtrips" in forAll(genHttpTemplateId) { (a: http.ContractTypeId.RequiredPkg) => - val b = a.toJson.convertTo[http.ContractTypeId.RequiredPkg] - b should ===(a) - } - } - - "http.Base16" - { - "is case-insensitive" in forAll { (b16: http.Base16) => - val str = b16.toJson.convertTo[String] - all( - Seq(str.toUpperCase, str.toLowerCase) - .map(_.toJson.convertTo[http.Base16]) - ) should ===(b16) - } - } - - "http.Contract" - { - "can be serialized to JSON" in forAll(contractGen) { contract => - inside(SprayJson.encode(contract)) { case \/-(JsObject(fields)) => - inside(fields.toList) { - case List(("archived", JsObject(_))) => - case List(("created", JsObject(_))) => - } - } - } - "can be serialized and deserialized back to the same object" in forAll(contractGen) { - contract0 => - val actual: SprayJson.Error \/ http.Contract[JsValue] = for { - jsValue <- SprayJson.encode(contract0) - contract <- SprayJson.decode[http.Contract[JsValue]](jsValue) - } yield contract - - inside(actual) { case \/-(contract1) => - contract1 shouldBe contract0 - } - } - } - - "http.ContractLocator" - { - type Loc = http.ContractLocator[JsValue] - "roundtrips" in forAll(contractLocatorGen(arbitrary[Int] map (JsNumber(_)))) { (locator: Loc) => - locator.toJson.convertTo[Loc] should ===(locator) - } - } - - "http.DeduplicationPeriod" - { - @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements")) - def roundtrip(p: http.DeduplicationPeriod, expected: JsValue) = { - SprayJson.encode(p) should ===(\/-(expected)) - SprayJson.decode[http.DeduplicationPeriod](expected) should ===(\/-(p)) - } - - "encodes durations" in { - roundtrip( - http.DeduplicationPeriod.Duration(10000L), - Map("type" -> "Duration".toJson, "durationInMillis" -> 10000L.toJson).toJson, - ) - } - - "encodes offsets" in { - roundtrip( - http.DeduplicationPeriod.Offset(Ref.HexString assertFromString "0123579236ab"), - Map("type" -> "Offset", "offset" -> "0123579236ab").toJson, - ) - } - } - - "http.ServiceWarning" - { - "UnknownTemplateIds serialization" in forAll(genUnknownTemplateIds) { x => - val expectedTemplateIds: Vector[JsValue] = x.unknownTemplateIds.view.map(_.toJson).toVector - val expected = JsObject("unknownTemplateIds" -> JsArray(expectedTemplateIds)) - x.toJson.asJsObject shouldBe expected - } - "UnknownParties serialization" in forAll(genUnknownParties) { x => - val expectedParties: Vector[JsValue] = x.unknownParties.view.map(_.toJson).toVector - val expected = JsObject("unknownParties" -> JsArray(expectedParties)) - x.toJson.asJsObject shouldBe expected - } - "roundtrips" in forAll(genServiceWarning) { x => - x.toJson.convertTo[http.ServiceWarning] === x - } - } - - "http.WarningsWrapper" - { - "serialization" in forAll(genWarningsWrapper) { x => - inside(x.toJson) { - case JsObject(fields) if fields.contains("warnings") && fields.sizeIs == 1 => - Succeeded - } - } - "roundtrips" in forAll(genWarningsWrapper) { x => - x.toJson.convertTo[http.AsyncWarningsWrapper] === x - } - } - - "http.OkResponse" - { - - "response with warnings" in forAll(listOf(genHttpTemplateId)) { - (templateIds: List[http.ContractTypeId.RequiredPkg]) => - val response: http.OkResponse[Int] = - http.OkResponse(result = 100, warnings = Some(http.UnknownTemplateIds(templateIds))) - - val responseJsVal: http.OkResponse[JsValue] = response.map(_.toJson) - - discard { - responseJsVal.toJson shouldBe JsObject( - "result" -> JsNumber(100), - "warnings" -> JsObject("unknownTemplateIds" -> templateIds.toJson), - "status" -> JsNumber(200), - ) - } - } - - "response without warnings" in forAll(identifier) { str => - val response: http.OkResponse[String] = - http.OkResponse(result = str, warnings = None) - - val responseJsVal: http.OkResponse[JsValue] = response.map(_.toJson) - - discard { - responseJsVal.toJson shouldBe JsObject( - "result" -> JsString(str), - "status" -> JsNumber(200), - ) - } - } - } - - "http.SyncResponse" - { - "Ok response parsed" in { - import SprayJson.decode1 - - val str = - """{"warnings":{"unknownTemplateIds":["ZZZ:AAA:BBB"]},"result":[],"status":200}""" - - inside(decode1[http.SyncResponse, List[JsValue]](str)) { - case \/-(http.OkResponse(List(), Some(warning), StatusCodes.OK)) => - warning shouldBe http.UnknownTemplateIds( - List(http.ContractTypeId(Ref.PackageRef.assertFromString("ZZZ"), "AAA", "BBB")) - ) - } - } - } - - "ErrorDetail" - { - "Encoding and decoding ResourceInfoDetail should result in the same object" in { - val resourceInfoDetail: http.ErrorDetail = http.ResourceInfoDetail("test", "test") - resourceInfoDetail shouldBe resourceInfoDetail.toJson.convertTo[http.ErrorDetail] - } - - "Encoding and decoding RetryInfoDetail should result in the same object" in { - val retryInfoDetail: http.ErrorDetail = - http.RetryInfoDetail( - http.RetryInfoDetailDuration( - scala.concurrent.duration.Duration.Zero: scala.concurrent.duration.Duration - ) - ) - retryInfoDetail shouldBe retryInfoDetail.toJson.convertTo[http.ErrorDetail] - } - - "Encoding and decoding RequestInfoDetail should result in the same object" in { - val requestInfoDetail: http.ErrorDetail = http.RequestInfoDetail("test") - requestInfoDetail shouldBe requestInfoDetail.toJson.convertTo[http.ErrorDetail] - } - - "Encoding and decoding ErrorInfoDetail should result in the same object" in { - val errorInfoDetail: http.ErrorDetail = - http.ErrorInfoDetail("test", Map("test" -> "test1", "test2" -> "test3")) - errorInfoDetail shouldBe errorInfoDetail.toJson.convertTo[http.ErrorDetail] - } - } - - "UserRight" - { - def testIsomorphic[T <: http.UserRight](original: T): Assertion = - (original: http.UserRight).toJson.convertTo[http.UserRight] shouldBe original - - "Encoding and decoding ParticipantAdmin should result in the same object" in { - testIsomorphic(http.ParticipantAdmin) - } - "Encoding and decoding IdentityProviderAdmin should result in the same object" in { - testIsomorphic(http.IdentityProviderAdmin) - } - "Encoding and decoding CanActAs should result in the same object" in { - testIsomorphic(http.CanActAs(http.Party("canActAs"))) - } - "Encoding and decoding CanReadAs should result in the same object" in { - testIsomorphic(http.CanReadAs(http.Party("canReadAs"))) - } - "Encoding and decoding CanExecuteAs should result in the same object" in { - testIsomorphic(http.CanExecuteAs(http.Party("canExecuteAs"))) - } - "Encoding and decoding CanReadAsAnyParty should result in the same object" in { - testIsomorphic(http.CanReadAsAnyParty) - } - "Encoding and decoding CanExecuteAsAnyParty should result in the same object" in { - testIsomorphic(http.CanExecuteAsAnyParty) - } - } - - "http.ExerciseCommand" - { - "should serialize to a JSON object with flattened reference fields" in forAll(exerciseCmdGen) { - cmd => - val actual: JsValue = cmd.toJson - val referenceFields: Map[String, JsValue] = cmd.reference.toJson.asJsObject.fields - val expectedFields: Map[String, JsValue] = referenceFields ++ Map[String, JsValue]( - "choice" -> JsString(cmd.choice.unwrap), - "argument" -> cmd.argument, - ) ++ Iterable( - cmd.choiceInterfaceId.map(x => "choiceInterfaceId" -> x.toJson), - cmd.meta.map(x => "meta" -> x.toJson), - ).collect { case Some(x) => x } - - actual shouldBe JsObject(expectedFields) - } - - "roundtrips" in forAll(exerciseCmdGen) { a => - val b = a.toJson - .convertTo[http.ExerciseCommand.RequiredPkg[JsValue, http.ContractLocator[JsValue]]] - b should ===(a) - } - } - - "http.CommandMeta" - { - "is entirely optional" in { - "{}".parseJson.convertTo[http.CommandMeta[JsValue]] should ===( - http.CommandMeta(None, None, None, None, None, None, None, None, None) - ) - } - - "is entirely optional when NoDisclosed" in { - "{}".parseJson.convertTo[http.CommandMeta.NoDisclosed] should ===( - http.CommandMeta(None, None, None, None, None, None, None, None, None) - ) - } - - "successfully parsed with synchronizerId" in { - """{"synchronizerId":"x::synchronizer"}""".parseJson - .convertTo[http.CommandMeta[JsValue]] should ===( - http.CommandMeta( - None, - None, - None, - None, - None, - None, - None, - Some(SynchronizerId.tryFromString("x::synchronizer")), - None, - ) - ) - } - } - - "http.DisclosedContract" - { - import http.DisclosedContract - type DC = DisclosedContract[Int] - - "roundtrips" in forAll { (a: DC) => - val b = a.toJson.convertTo[DC] - b should ===(a) - } - - "decodes a hand-written sample" in { - import com.google.protobuf.ByteString - val utf8 = java.nio.charset.Charset forName "UTF-8" - val expected = DisclosedContract( - contractId = http.ContractId("abcd"), - templateId = - http.ContractTypeId.Template(Ref.PackageRef.assertFromString("Pkg"), "Mod", "Tmpl"), - createdEventBlob = http.Base64(ByteString.copyFrom("some create event payload", utf8)), - ) - val encoded = - s"""{ - "contractId": "abcd", - "templateId": "Pkg:Mod:Tmpl", - "createdEventBlob": "c29tZSBjcmVhdGUgZXZlbnQgcGF5bG9hZA==" - }""".parseJson - val _ = expected.toJson should ===(encoded) - val decoded = - encoded.convertTo[DisclosedContract[http.ContractTypeId.Template.RequiredPkg]] - decoded should ===(expected) - } - - "fails to decode with an empty createdEventBlob" in { - val encoded = - s"""{ - "contractId": "abcd", - "templateId": "Pkg:Mod:Tmpl", - "createdEventBlob": "" - }""".parseJson - - val result = - SprayJson.decode[DisclosedContract[http.ContractTypeId.Template.RequiredPkg]](encoded) - inside(result) { case -\/(JsonReaderError(_, message)) => - message shouldBe "spray.json.DeserializationException: DisclosedContract.createdEventBlob must not be empty" - } - } - } -} - -object JsonProtocolTest { - // like Arbitrary(arbitrary[T].map(f)) but with inferred `T` - private[this] def arbArg[T: Arbitrary, R]( - f: T => R, - filterExpr: R => Boolean = (_: R) => true, - ): Arbitrary[R] = - Arbitrary(arbitrary[T] map f filter filterExpr) - - private[this] implicit val arbBase64: Arbitrary[http.Base64] = - http.Base64 subst arbArg(com.google.protobuf.ByteString.copyFrom(_: Array[Byte])) - - private implicit val arbBase16: Arbitrary[http.Base16] = - http.Base16 subst arbArg(com.google.protobuf.ByteString.copyFrom(_: Array[Byte])) - - private[this] implicit val arbCid: Arbitrary[http.ContractId] = - Arbitrary(contractIdGen) - - private[http] implicit def arbDisclosedCt[TpId: Arbitrary] - : Arbitrary[http.DisclosedContract[TpId]] = - arbArg((http.DisclosedContract.apply[TpId] _).tupled, !_.createdEventBlob.unwrap.isEmpty) -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/ProtocolConvertersTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/ProtocolConvertersTest.scala index 38d1c8ebf2..d949d71588 100644 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/ProtocolConvertersTest.scala +++ b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/ProtocolConvertersTest.scala @@ -30,12 +30,9 @@ import magnolify.scalacheck.semiauto.ArbitraryDerivation import org.scalacheck.{Arbitrary, Gen} import org.scalatest.wordspec.AnyWordSpec -import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag -// TODO(#23504) remove -@nowarn("cat=deprecation") class ProtocolConvertersTest extends AnyWordSpec with BaseTest with HasExecutionContext { import StdGenerators.* import Arbitraries.* @@ -70,8 +67,7 @@ class ProtocolConvertersTest extends AnyWordSpec with BaseTest with HasExecution JsMapping(converters.InterfaceView), JsMapping(converters.Event), JsMapping(converters.Transaction), - JsMapping(converters.TransactionTree), - JsMapping(converters.SubmitAndWaitTransactionTreeResponse), + JsMapping(converters.SubmitAndWaitTransactionTreeResponseLegacy), JsMapping(converters.SubmitAndWaitTransactionResponse), JsMapping(converters.SubmitAndWaitForReassignmentResponse), JsMapping(converters.SubmitAndWaitForTransactionRequest), @@ -84,7 +80,7 @@ class ProtocolConvertersTest extends AnyWordSpec with BaseTest with HasExecution JsMapping(converters.Reassignment), JsMapping(converters.GetUpdatesResponse), JsMapping(converters.GetUpdateTreesResponseLegacy), - JsMapping(converters.GetTransactionResponse), + JsMapping(converters.GetTransactionResponseLegacy), // JsMapping(converters.PrepareSubmissionRequest),//we only need toJson // JsMapping(converters.PrepareSubmissionResponse), // we only need toJson // JsMapping(converters.ExecuteSubmissionRequest), // we only need fromJson @@ -148,10 +144,6 @@ object Arbitraries { nonEmptyScalaPbOneOf( ArbitraryDerivation[lapi.topology_transaction.TopologyEvent.Event] ) - implicit val arbTreeEventKind: Arbitrary[lapi.transaction.TreeEvent.Kind] = - nonEmptyScalaPbOneOf( - ArbitraryDerivation[lapi.transaction.TreeEvent.Kind] - ) implicit val arbTreeEventLegacyKind: Arbitrary[LegacyDTOs.TreeEvent.Kind] = { val arb = ArbitraryDerivation[LegacyDTOs.TreeEvent.Kind] Arbitrary { @@ -181,6 +173,29 @@ object Arbitraries { ) } } + implicit val arbSubmitAndWaitTransactionTreeResponseLegacy + : Arbitrary[LegacyDTOs.SubmitAndWaitForTransactionTreeResponse] = { + val arb = ArbitraryDerivation[LegacyDTOs.SubmitAndWaitForTransactionTreeResponse] + Arbitrary { + retryUntilSome( + arb.arbitrary.sample.filter(_.transaction.isDefined) + ).getOrElse( + throw new RuntimeException( + "Failed to generate non-empty SubmitAndWaitForTransactionTreeResponse" + ) + ) + } + } + implicit val arbGetTransactionResponseLegacy: Arbitrary[LegacyDTOs.GetTransactionResponse] = { + val arb = ArbitraryDerivation[LegacyDTOs.GetTransactionResponse] + Arbitrary { + retryUntilSome( + arb.arbitrary.sample.filter(_.transaction.isDefined) + ).getOrElse( + throw new RuntimeException("Failed to generate non-empty GetTransactionResponse") + ) + } + } } class MockSchemaProcessor()(implicit val executionContext: ExecutionContext) diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/TranscodePackageIdResolverTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/TranscodePackageIdResolverTest.scala index 412f32e394..b77e21dd6b 100644 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/TranscodePackageIdResolverTest.scala +++ b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/TranscodePackageIdResolverTest.scala @@ -19,8 +19,8 @@ import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.{ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.platform.PackagePreferenceBackend import com.digitalasset.canton.platform.PackagePreferenceBackend.PackageFilterRestriction -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.{ +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ LocalPackagePreference, PackageResolution, } diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/v1/RouteSetupTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/v1/RouteSetupTest.scala deleted file mode 100644 index f1e73aadd0..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/v1/RouteSetupTest.scala +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.json.v1 - -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers - -class RouteSetupTest extends AnyFreeSpec with Matchers { - "Forwarded" - { - import com.digitalasset.canton.http.json.v1.RouteSetup.Forwarded - "can 'parse' sample" in { - Forwarded("for=192.168.0.1;proto=http;by=192.168.0.42").proto should ===(Some("http")) - } - - "can 'parse' quoted sample" in { - Forwarded("for=192.168.0.1;proto = \"https\" ;by=192.168.0.42").proto should ===( - Some("https") - ) - } - } -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/FlowUtilTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/FlowUtilTest.scala deleted file mode 100644 index 9a9d80c43a..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/FlowUtilTest.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import org.apache.pekko.NotUsed -import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.Materializer -import org.apache.pekko.stream.scaladsl.Source -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.concurrent.ScalaFutures -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks -import scalaz.{-\/, \/, \/-} - -import scala.concurrent.Future -import scala.concurrent.duration.* - -class FlowUtilTest - extends AnyFlatSpec - with ScalaFutures - with Matchers - with ScalaCheckDrivenPropertyChecks { - import com.digitalasset.canton.http.util.FlowUtil.* - - implicit val asys: ActorSystem = ActorSystem(this.getClass.getSimpleName) - implicit val materializer: Materializer = Materializer(asys) - - "allowOnlyFirstInput" should "pass 1st message through and replace all others with errors" in forAll( - nonEmptyVectorOfInts - ) { (xs: Vector[Int]) => - val error = "Error" - val errorNum = Math.max(xs.size - 1, 0) - val expected: Vector[String \/ Int] = - xs.take(1).map(\/-(_)) ++ Vector.fill(errorNum)(-\/(error)) - val input: Source[String \/ Int, NotUsed] = - Source.fromIterator(() => xs.iterator).map(\/-(_)) - - val actualF: Future[Vector[String \/ Int]] = - input - .via(allowOnlyFirstInput[String, Int](error)) - .runFold(Vector.empty[String \/ Int])(_ :+ _) - - whenReady(actualF, timeout(5.seconds), interval(100.milliseconds)) { actual => - actual shouldBe expected - } - } - - private val nonEmptyVectorOfInts: Gen[Vector[Int]] = - Gen.nonEmptyBuildableOf[Vector[Int], Int](Arbitrary.arbitrary[Int]) -} diff --git a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/JwtPartiesTest.scala b/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/JwtPartiesTest.scala deleted file mode 100644 index 6d2e0cfefe..0000000000 --- a/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/JwtPartiesTest.scala +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.http.util - -import com.daml.nonempty.NonEmpty -import com.daml.nonempty.NonEmptyReturningOps.* -import com.daml.scalautil.Statement.discard -import com.digitalasset.canton.http -import com.digitalasset.canton.http.EndpointsCompanion.Unauthorized -import com.digitalasset.canton.http.{JwtPayload, JwtWritePayload} -import com.digitalasset.daml.lf.value.test.ValueGenerators.party as partyGen -import org.scalacheck.{Arbitrary, Gen} -import org.scalatest.OptionValues -import org.scalatest.concurrent.ScalaFutures -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec -import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks -import scalaz.scalacheck.ScalazArbitrary.* -import scalaz.std.set.* -import scalaz.syntax.foldable1.* -import scalaz.{-\/, NonEmptyList, \/-} - -import Arbitrary.arbitrary - -class JwtPartiesTest - extends AnyWordSpec - with ScalaFutures - with Matchers - with ScalaCheckDrivenPropertyChecks - with OptionValues { - import JwtPartiesTest.* - - "ensureReadAsAllowedByJwt" should { - import JwtParties.{ensureReadAsAllowedByJwt, EnsureReadAsDisallowedError} - - "always allow missing readAs" in forAll { (jp: JwtPayload) => - ensureReadAsAllowedByJwt(None, jp) should ===(\/-(())) - } - - "allow any subset" in forAll { (jp: JwtPayload) => - val half = NonEmpty.from(jp.parties take (1 max (jp.parties.size / 2))).value - ensureReadAsAllowedByJwt(Some(half.toNEF.toNel), jp) should ===(\/-(())) - } - - "disallow any party not in jwt" in forAll { (p: http.Party, jp: JwtPayload) => - whenever(!jp.parties(p)) { - ensureReadAsAllowedByJwt(Some(NonEmptyList(p)), jp) should ===( - -\/(Unauthorized(s"$EnsureReadAsDisallowedError: $p")) - ) - } - } - } - - "resolveRefParties" should { - import JwtParties.resolveRefParties - - // ensures compatibility with old behavior - "use Jwt if explicit spec is absent" in forAll { (jwp: JwtWritePayload) => - discard(resolveRefParties(None, jwp) should ===(jwp.parties)) - resolveRefParties( - Some(http.CommandMeta(None, None, None, None, None, None, None, None, None)), - jwp, - ) should ===( - jwp.parties - ) - } - - "ignore Jwt if full explicit spec is present" in forAll { - (actAs: NonEmptyList[http.Party], readAs: List[http.Party], jwp: JwtWritePayload) => - resolveRefParties( - Some(partiesOnlyMeta(actAs = actAs, readAs = readAs)), - jwp, - ) should ===(actAs.toSet1 ++ readAs) - } - } -} - -object JwtPartiesTest { - private val irrelevantUserId = http.UserId("bar") - - private implicit val arbParty: Arbitrary[http.Party] = Arbitrary( - http.Party.subst(partyGen: Gen[String]) - ) - - private implicit val arbJwtR: Arbitrary[JwtPayload] = - Arbitrary(arbitrary[(Boolean, http.Party, List[http.Party], List[http.Party])].map { - case (neAct, extra, actAs, readAs) => - http - .JwtPayload( - irrelevantUserId, - actAs = if (neAct) extra :: actAs else actAs, - readAs = if (!neAct) extra :: readAs else readAs, - ) - .getOrElse(sys.error("should have satisfied JwtPayload invariant")) - }) - - private implicit val arbJwtW: Arbitrary[JwtWritePayload] = - Arbitrary( - arbitrary[(NonEmptyList[http.Party], List[http.Party])].map { case (submitter, readAs) => - JwtWritePayload( - irrelevantUserId, - submitter = submitter, - readAs = readAs, - ) - } - ) - - private[http] def partiesOnlyMeta(actAs: NonEmptyList[http.Party], readAs: List[http.Party]) = - http.CommandMeta( - commandId = None, - actAs = Some(actAs), - readAs = Some(readAs), - submissionId = None, - workflowId = None, - deduplicationPeriod = None, - disclosedContracts = None, - synchronizerId = None, - packageIdSelectionPreference = None, - ) -} diff --git a/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala b/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala index d396844cd6..1d9e383a7b 100644 --- a/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala +++ b/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala @@ -16,13 +16,11 @@ object CantonGenerators { } // We define custom generators for the enums here, so that UNRECOGNIZED values are not generated - implicit val arbSignatureFormat - : Arbitrary[lapi.interactive.interactive_submission_service.SignatureFormat] = - enumArbitrary(lapi.interactive.interactive_submission_service.SignatureFormat.enumCompanion) - implicit val arbSigningAlgorithmSpec - : Arbitrary[lapi.interactive.interactive_submission_service.SigningAlgorithmSpec] = + implicit val arbSignatureFormat: Arbitrary[lapi.crypto.SignatureFormat] = + enumArbitrary(lapi.crypto.SignatureFormat.enumCompanion) + implicit val arbSigningAlgorithmSpec: Arbitrary[lapi.crypto.SigningAlgorithmSpec] = enumArbitrary( - lapi.interactive.interactive_submission_service.SigningAlgorithmSpec.enumCompanion + lapi.crypto.SigningAlgorithmSpec.enumCompanion ) implicit val arbHashingSchemeVersion : Arbitrary[lapi.interactive.interactive_submission_service.HashingSchemeVersion] = diff --git a/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala b/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala index fa3334be65..ee5d43769a 100644 --- a/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala +++ b/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala @@ -15,8 +15,8 @@ import org.scalatest.Assertion import org.scalatest.Inspectors.forAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec +import org.slf4j.LoggerFactory -import scala.annotation.nowarn import scala.jdk.CollectionConverters.* import scala.reflect.ClassTag import scala.util.Using @@ -27,20 +27,32 @@ import scala.util.control.NonFatal * The reason for this test is that tapir can generate openapi that is not in sync with circe * codec, we are trying to detect such cases and fix them. * + * To recap: + * - Codec = Encoder + Decoder by Circe to translate Scala object to Json and back + * - Schema = Information to Tapir how to represent Scala object in Openapi. The schema is then + * used by tapir to generate the openapi spec (openapi.yaml). + * * Also existing code generators are often buggy -> we are trying to detect such cases at least for * java (here). * * Test generates multiple samples, unfortunately no seed is used so every time examples will be * different. (Introduction of a seed would complicate the code a lot) + * + * If you have issues: + * - check how the type is represented in openapi.yaml + * - try to serialize an example instance + * - inspect the string. + * - try to deserialize the string with openapi generated class + * + * Do not forget to regenerate the openapi definitions (openapi.yaml). See GenerateJSONApiDocs. */ -// TODO(#23504) remove suppression of deprecation warnings -@nowarn("cat=deprecation") class OpenapiTypesTest extends AnyWordSpec with Matchers { // this can be increased locally // with 100 examples tests take 5 minutes on my machine // 20 is a modest value to ensure CI is not overloaded private val randomSamplesPerMappedClass = 20 private val allMappingExamples = Mappings.allMappings + private val logger = LoggerFactory.getLogger(getClass) def checkType[T, V]( fromJson: (String) => V @@ -52,12 +64,16 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { ): Assertion = { val sample = arb.arbitrary.sample - val initialCirceJson = sample.map(encoder(_)).map(_.toString()).toRight("-- no sample --") + val initialCirceJson = sample.map(encoder(_)).map(x => x.toString()).toRight("-- no sample --") val javaObject = try { initialCirceJson.map(fromJson) } catch { case NonFatal(error) => + logger.error( + s"Parse error detected for class $classTag when attempting to parse the generated json.\n json-error: $error\n sample: $sample\n encoded-json: $initialCirceJson", + error, + ) throw new RuntimeException( s"parse error, class $classTag json: $error\n $initialCirceJson", error, @@ -141,12 +157,13 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { import com.digitalasset.canton.http.json.v2.JsInteractiveSubmissionServiceCodecs.* import com.digitalasset.canton.http.json.v2.JsIdentityProviderCodecs.* import com.digitalasset.canton.http.json.v2.JsVersionServiceCodecs.* + import com.digitalasset.canton.http.json.v2.JsSchema.Crypto.* import magnolify.scalacheck.auto.* // as stated above this split is needed to ensure that mappings initialization do not exceed max 64kB method size val allMappings = - JsMappings1.value ++ JsMappings2.value ++ GrpcMappings1.value ++ GrpcMappings2.value ++ GrpcMappings3.value + JsMappings1.value ++ JsMappings2.value ++ GrpcMappings1.value ++ GrpcMappings2.value ++ GrpcMappings3.value ++ GrpcMappings4.value object GrpcMappings1 { val value: Seq[Mapping[_, _]] = Seq( @@ -339,7 +356,7 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { Mapping[v2.transaction_filter.Filters, openapi.Filters]( openapi.Filters.fromJson ), - Mapping[v2.state_service.GetActiveContractsRequest, openapi.GetActiveContractsRequest]( + Mapping[LegacyDTOs.GetActiveContractsRequest, openapi.GetActiveContractsRequest]( openapi.GetActiveContractsRequest.fromJson ), Mapping[ @@ -416,11 +433,11 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { object GrpcMappings2 { val value: Seq[Mapping[_, _]] = Seq( - Mapping[v2.update_service.GetTransactionByIdRequest, openapi.GetTransactionByIdRequest]( + Mapping[LegacyDTOs.GetTransactionByIdRequest, openapi.GetTransactionByIdRequest]( openapi.GetTransactionByIdRequest.fromJson ), Mapping[ - v2.update_service.GetTransactionByOffsetRequest, + LegacyDTOs.GetTransactionByOffsetRequest, openapi.GetTransactionByOffsetRequest, ]( openapi.GetTransactionByOffsetRequest.fromJson @@ -431,7 +448,7 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { Mapping[v2.update_service.GetUpdateByOffsetRequest, openapi.GetUpdateByOffsetRequest]( openapi.GetUpdateByOffsetRequest.fromJson ), - Mapping[v2.update_service.GetUpdatesRequest, openapi.GetUpdatesRequest]( + Mapping[LegacyDTOs.GetUpdatesRequest, openapi.GetUpdatesRequest]( openapi.GetUpdatesRequest.fromJson ), Mapping[v2.admin.user_management_service.GetUserResponse, openapi.GetUserResponse]( @@ -636,7 +653,7 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { Mapping[v2.admin.user_management_service.Right, openapi.Right]( openapi.Right.fromJson ), - Mapping[v2.interactive.interactive_submission_service.Signature, openapi.Signature]( + Mapping[v2.crypto.Signature, openapi.Signature]( openapi.Signature.fromJson ), Mapping[ @@ -694,7 +711,7 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { Mapping[v2.trace_context.TraceContext, openapi.TraceContext]( openapi.TraceContext.fromJson ), - Mapping[v2.transaction_filter.TransactionFilter, openapi.TransactionFilter]( + Mapping[LegacyDTOs.TransactionFilter, openapi.TransactionFilter]( openapi.TransactionFilter.fromJson ), Mapping[v2.transaction_filter.TransactionFormat, openapi.TransactionFormat]( @@ -784,8 +801,143 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { ]( openapi.ExecuteSubmissionAndWaitResponse.fromJson ), + Mapping[v2.package_reference.VettedPackages, openapi.VettedPackages]( + openapi.VettedPackages.fromJson + ), + Mapping[v2.package_reference.VettedPackage, openapi.VettedPackage]( + openapi.VettedPackage.fromJson + ), + Mapping[ + v2.package_service.PackageMetadataFilter, + openapi.PackageMetadataFilter, + ]( + openapi.PackageMetadataFilter.fromJson + ), + Mapping[ + v2.package_service.TopologyStateFilter, + openapi.TopologyStateFilter, + ]( + openapi.TopologyStateFilter.fromJson + ), + Mapping[ + v2.package_service.ListVettedPackagesResponse, + openapi.ListVettedPackagesResponse, + ]( + openapi.ListVettedPackagesResponse.fromJson + ), + Mapping[ + v2.package_service.ListVettedPackagesRequest, + openapi.ListVettedPackagesRequest, + ]( + openapi.ListVettedPackagesRequest.fromJson + ), + Mapping[ + v2.admin.package_management_service.UpdateVettedPackagesResponse, + openapi.UpdateVettedPackagesResponse, + ]( + openapi.UpdateVettedPackagesResponse.fromJson + ), + Mapping[ + v2.admin.package_management_service.UpdateVettedPackagesRequest, + openapi.UpdateVettedPackagesRequest, + ]( + openapi.UpdateVettedPackagesRequest.fromJson + ), + Mapping[ + v2.admin.package_management_service.UpdateVettedPackagesRequest, + openapi.UpdateVettedPackagesRequest, + ]( + openapi.UpdateVettedPackagesRequest.fromJson + ), + Mapping[ + v2.admin.package_management_service.VettedPackagesRef, + openapi.VettedPackagesRef, + ]( + openapi.VettedPackagesRef.fromJson + ), + Mapping[v2.version_service.PackageFeature, openapi.PackageFeature]( + openapi.PackageFeature.fromJson + ), + Mapping[ + v2.admin.package_management_service.VettedPackagesChange, + openapi.VettedPackagesChange, + ]( + openapi.VettedPackagesChange.fromJson + ), + Mapping[v2.admin.package_management_service.VettedPackagesChange.Vet, openapi.Vet1]( + openapi.Vet1.fromJson + ), + Mapping[ + v2.admin.package_management_service.VettedPackagesChange.Operation.Vet, + openapi.Vet, + ]( + openapi.Vet.fromJson + ), + Mapping[v2.admin.package_management_service.VettedPackagesChange.Unvet, openapi.Unvet1]( + openapi.Unvet1.fromJson + ), + Mapping[ + v2.admin.package_management_service.VettedPackagesChange.Operation.Unvet, + openapi.Unvet, + ]( + openapi.Unvet.fromJson + ), + Mapping[ + v2.package_reference.PriorTopologySerial, + openapi.PriorTopologySerial, + ]( + openapi.PriorTopologySerial.fromJson + ), + Mapping[ + v2.package_reference.PriorTopologySerial.Serial.Prior, + openapi.Prior, + ]( + openapi.Prior.fromJson + ), + Mapping[ + v2.admin.party_management_service.AllocateExternalPartyRequest.SignedTransaction, + openapi.SignedTransaction, + ]( + openapi.SignedTransaction.fromJson + ), + Mapping[ + v2.admin.party_management_service.AllocateExternalPartyRequest, + openapi.AllocateExternalPartyRequest, + ]( + openapi.AllocateExternalPartyRequest.fromJson + ), + Mapping[ + v2.admin.party_management_service.AllocateExternalPartyResponse, + openapi.AllocateExternalPartyResponse, + ]( + openapi.AllocateExternalPartyResponse.fromJson + ), + ) + } + + object GrpcMappings4 { + val value: Seq[Mapping[_, _]] = Seq( + Mapping[ + v2.crypto.SigningPublicKey, + openapi.SigningPublicKey, + ]( + openapi.SigningPublicKey.fromJson + ), + Mapping[ + v2.admin.party_management_service.GenerateExternalPartyTopologyRequest, + openapi.GenerateExternalPartyTopologyRequest, + ]( + openapi.GenerateExternalPartyTopologyRequest.fromJson + ), + Mapping[ + v2.admin.party_management_service.GenerateExternalPartyTopologyResponse, + openapi.GenerateExternalPartyTopologyResponse, + ]( + openapi.GenerateExternalPartyTopologyResponse.fromJson + ), ) } + object JsMappings1 { val value: Seq[Mapping[_, _]] = Seq( diff --git a/canton/community/lib/magnolify/src/main/scala/com/digitalasset/canton/config/CantonConfigPrevalidator.scala b/canton/community/lib/magnolify/src/main/scala/com/digitalasset/canton/config/CantonConfigPrevalidator.scala index b2eb227be8..bf074165a8 100644 --- a/canton/community/lib/magnolify/src/main/scala/com/digitalasset/canton/config/CantonConfigPrevalidator.scala +++ b/canton/community/lib/magnolify/src/main/scala/com/digitalasset/canton/config/CantonConfigPrevalidator.scala @@ -3,6 +3,8 @@ package com.digitalasset.canton.config +import scala.annotation.implicitNotFound + /** Type class for the validation checks that are specific for a particular configuration class. * Unlike [[CantonConfigValidator]], the checks performed by instances of this type class do not * recurse into subconfigurations. @@ -19,6 +21,9 @@ package com.digitalasset.canton.config * @tparam A * The configuration class to validate */ +@implicitNotFound( + "Could not find a suitable CantonConfigPrevalidator for ${A} to determine the necessary validations. In typical cases, ${A} should extend UniformCantonConfigValidation, EnterpriseOnlyCantonConfigValidation, or CommunityOnlyCantonConfigValidation." +) trait CantonConfigPrevalidator[-A] { /** Checks the configuration `config` for validity in the given [[CantonEdition]] and returns the diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala index a25b3369f3..b9b96755e4 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala @@ -14,7 +14,7 @@ import com.digitalasset.canton.auth.CantonAdminTokenDispenser import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService import com.digitalasset.canton.config.AdminTokenConfig -import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.connection.GrpcApiInfoService import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc import com.digitalasset.canton.crypto.{ @@ -51,10 +51,7 @@ import com.digitalasset.canton.participant.pruning.{AcsCommitmentProcessor, Prun import com.digitalasset.canton.participant.scheduler.ParticipantPruningScheduler import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.store.SynchronizerConnectionConfigStore.Active -import com.digitalasset.canton.participant.store.memory.{ - MutablePackageMetadataViewImpl, - PackageMetadataView, -} +import com.digitalasset.canton.participant.store.memory.MutablePackageMetadataViewImpl import com.digitalasset.canton.participant.sync.* import com.digitalasset.canton.participant.sync.ConnectedSynchronizer.SubmissionReady import com.digitalasset.canton.participant.synchronizer.SynchronizerAliasManager @@ -68,14 +65,12 @@ import com.digitalasset.canton.resource.* import com.digitalasset.canton.scheduler.{Schedulers, SchedulersImpl} import com.digitalasset.canton.sequencing.client.{RecordingConfig, ReplayConfig, SequencerClient} import com.digitalasset.canton.store.IndexedStringStore +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.* import com.digitalasset.canton.time.admin.v30.SynchronizerTimeServiceGrpc import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.admin.grpc.PSIdLookup -import com.digitalasset.canton.topology.client.{ - StoreBasedTopologySnapshot, - SynchronizerTopologyClient, -} +import com.digitalasset.canton.topology.client.SynchronizerTopologyClient import com.digitalasset.canton.topology.store.TopologyStoreId.{AuthorizedStore, SynchronizerStore} import com.digitalasset.canton.topology.store.{PartyMetadataStore, TopologyStore, TopologyStoreId} import com.digitalasset.canton.topology.transaction.HostingParticipant @@ -120,7 +115,8 @@ class ParticipantNodeBootstrap( ](arguments) { private val cantonSyncService = new SingleUseCell[CantonSyncService] - private val packageDependencyResolver = new SingleUseCell[PackageDependencyResolver] + private val mutablePackageMetadataView = new SingleUseCell[MutablePackageMetadataViewImpl] + private val packageDependencyResolver = new SingleUseCell[PackageDependencyResolver.Impl] private val packageUpgradeValidator = new PackageUpgradeValidator( arguments.parameterConfig.general.cachingConfigs.packageUpgradeCache, loggerFactory, @@ -130,7 +126,12 @@ class ParticipantNodeBootstrap( override protected val adminTokenConfig: AdminTokenConfig = config.ledgerApi.adminTokenConfig.merge(config.adminApi.adminTokenConfig) - private def tryGetPackageDependencyResolver(): PackageDependencyResolver = + private def tryGetMutablePackageMetadataView(): MutablePackageMetadataViewImpl = + mutablePackageMetadataView.getOrElse( + sys.error("mutablePackageMetadataView should be defined") + ) + + private def tryGetPackageDependencyResolver(): PackageDependencyResolver.Impl = packageDependencyResolver.getOrElse( sys.error("packageDependencyResolver should be defined") ) @@ -183,7 +184,29 @@ class ParticipantNodeBootstrap( authorizedStore: TopologyStore[AuthorizedStore], storage: Storage, ): AuthorizedTopologyManager = { - val resolver = new PackageDependencyResolver( + val store = DamlPackageStore( + storage, + arguments.futureSupervisor, + arguments.parameterConfig, + exitOnFatalFailures = parameters.exitOnFatalFailures, + loggerFactory, + ) + + val packageMetadataView = new MutablePackageMetadataViewImpl( + clock, + store, + packageUpgradeValidator, + loggerFactory, + config.parameters.packageMetadataView, + timeouts, + arguments.futureSupervisor, + exitOnFatalFailures = parameters.exitOnFatalFailures, + ) + + mutablePackageMetadataView.putIfAbsent(packageMetadataView).discard + + val resolver = new PackageDependencyResolver.Impl( + participantId = ParticipantId(nodeId), damlPackageStore = DamlPackageStore( storage, arguments.futureSupervisor, @@ -214,14 +237,6 @@ class ParticipantNodeBootstrap( cantonSyncService.get .traverse(_.ledgerApiIndexer.asEval.value.ledgerApiStore.value.ledgerEnd) .map(_.flatten) - - def getPackageMetadataView(): Option[PackageMetadataView] = - // In some rare cases, it is possible to vet packages before the package service is created. - // For instance, in a major upgrade, we import a topology snapshot as soon as the node - // topology is ready, and before the participant services are created. - // In such case, we cannot get a proper PackageMetadata snapshot and we bypass the upgrade checks. - cantonSyncService.get.map(_.getPackageMetadataView) - val topologyManager = new AuthorizedTopologyManager( nodeId, clock, @@ -232,9 +247,14 @@ class ParticipantNodeBootstrap( futureSupervisor, bootstrapStageCallback.loggerFactory, ) with ParticipantTopologyValidation { + override def initialize(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + // initialize the package metadata view before we start vetting any package + packageMetadataView.refreshState + override def validatePackageVetting( currentlyVettedPackages: Set[LfPackageId], nextPackageIds: Set[LfPackageId], + dryRunSnapshot: Option[PackageMetadata], forceFlags: ForceFlags, )(implicit traceContext: TraceContext @@ -242,8 +262,8 @@ class ParticipantNodeBootstrap( validatePackageVetting( currentlyVettedPackages, nextPackageIds, - getPackageMetadataView(), - resolver, + packageMetadataView, + dryRunSnapshot, acsInspections = () => acsInspectionPerSynchronizer(), forceFlags, disableUpgradeValidation = parameters.disableUpgradeValidation, @@ -350,17 +370,16 @@ class ParticipantNodeBootstrap( } private def createPackageOps(manager: SyncPersistentStateManager): PackageOps = { - val authorizedTopologyStoreClient = new StoreBasedTopologySnapshot( - CantonTimestamp.MaxValue, - topologyManager.store, - tryGetPackageDependencyResolver(), - loggerFactory, - ) val packageOps = new PackageOpsImpl( participantId = participantId, - headAuthorizedTopologySnapshot = authorizedTopologyStoreClient, stateManager = manager, - topologyManager = topologyManager, + topologyManagerLookup = new TopologyManagerLookup( + lookupByPsid = psid => + cantonSyncService.get + .flatMap(_.syncPersistentStateManager.get(psid)) + .map(_.topologyManager), + lookupActivePsidByLsid = lookupActivePSId, + ), nodeId = nodeId, initialProtocolVersion = ProtocolVersion.latest, loggerFactory = ParticipantNodeBootstrap.this.loggerFactory, @@ -379,7 +398,7 @@ class ParticipantNodeBootstrap( storage: Storage, engine: Engine, authorizedTopologyManager: AuthorizedTopologyManager, - packageDependencyResolver: PackageDependencyResolver, + packageDependencyResolver: PackageDependencyResolver.Impl, )(implicit executionSequencerFactory: ExecutionSequencerFactory): EitherT[ FutureUnlessShutdown, String, @@ -448,22 +467,7 @@ class ParticipantNodeBootstrap( _ <- EitherT.right(persistentStateContainer.initializeNext()) persistentState = persistentStateContainer.asEval - mutablePackageMetadataViewContainer = new LifeCycleContainer[ - MutablePackageMetadataViewImpl - ]( - stateName = "mutable-package-metadata-view", - create = () => - MutablePackageMetadataViewImpl.createAndInitialize( - clock, - packageDependencyResolver.damlPackageStore, - packageUpgradeValidator, - loggerFactory, - config.parameters.packageMetadataView, - parameters.processingTimeouts, - ), - loggerFactory = loggerFactory, - ) - _ <- EitherT.right(mutablePackageMetadataViewContainer.initializeNext()) + mutablePackageMetadataView = tryGetMutablePackageMetadataView() syncPersistentStateManager = new SyncPersistentStateManager( participantId, @@ -476,10 +480,9 @@ class ParticipantNodeBootstrap( (staticSynchronizerParameters: StaticSynchronizerParameters) => SynchronizerCrypto(crypto, staticSynchronizerParameters), clock, - tryGetPackageDependencyResolver(), + mutablePackageMetadataView, persistentState.map(_.ledgerApiStore), persistentState.map(_.contractStore), - mutablePackageMetadataViewContainer.asEval, futureSupervisor, loggerFactory, ) @@ -583,11 +586,11 @@ class ParticipantNodeBootstrap( packageService = PackageService( clock = clock, engine = engine, + mutablePackageMetadataView = mutablePackageMetadataView, packageDependencyResolver = packageDependencyResolver, enableStrictDarValidation = parameters.enableStrictDarValidation, loggerFactory = loggerFactory, metrics = arguments.metrics, - mutablePackageMetadataView = mutablePackageMetadataViewContainer.asEval, packageOps = createPackageOps(syncPersistentStateManager), timeouts = parameters.processingTimeouts, ) @@ -623,6 +626,7 @@ class ParticipantNodeBootstrap( topologyDispatcher, syncCryptoSignerWithSessionKeys, config.crypto, + config.topology, clock, parameters, synchronizerAliasManager, @@ -883,7 +887,6 @@ class ParticipantNodeBootstrap( addCloseable(partyMetadataStore) persistentState.map(addCloseable).discard addCloseable(packageService) - addCloseable(mutablePackageMetadataViewContainer.currentAutoCloseable()) addCloseable(indexedStringStore) addCloseable(partyNotifier) addCloseable(ephemeralState.participantEventPublisher) @@ -903,11 +906,12 @@ class ParticipantNodeBootstrap( }) addCloseable(ledgerApiDependentServices) addCloseable(packageDependencyResolver) + addCloseable(mutablePackageMetadataView) // return values ParticipantServices( persistentStateContainer = persistentStateContainer, - mutablePackageMetadataViewContainer = mutablePackageMetadataViewContainer, + mutablePackageMetadataView = mutablePackageMetadataView, ledgerApiIndexerContainer = ledgerApiIndexerContainer, cantonSyncService = sync, schedulers = schedulers, @@ -990,7 +994,7 @@ object ParticipantNodeBootstrap { final case class ParticipantServices( persistentStateContainer: LifeCycleContainer[ParticipantNodePersistentState], - mutablePackageMetadataViewContainer: LifeCycleContainer[MutablePackageMetadataViewImpl], + mutablePackageMetadataView: MutablePackageMetadataViewImpl, ledgerApiIndexerContainer: LifeCycleContainer[LedgerApiIndexer], cantonSyncService: CantonSyncService, schedulers: Schedulers, @@ -1036,7 +1040,7 @@ class ParticipantNode( override def status: ParticipantStatus = { val ports = Map("ledger" -> config.ledgerApi.port, "admin" -> config.adminApi.port) ++ - config.httpLedgerApi.flatMap(_.server.port).flatMap(Port.create(_).toOption).map("json" -> _) + Option.when(config.httpLedgerApi.enabled)("json" -> config.httpLedgerApi.server.port) val synchronizers = readySynchronizers val topologyQueues = identityPusher.queueStatus diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeBootstrapFactory.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeBootstrapFactory.scala index 3caaca77db..5d85f179bd 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeBootstrapFactory.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeBootstrapFactory.scala @@ -8,8 +8,7 @@ import cats.syntax.either.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.admin.participant.v30 import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.store.CommunityCryptoPrivateStoreFactory +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.environment.{ CantonNodeBootstrapCommonArguments, NodeFactoryArguments, @@ -22,7 +21,7 @@ import com.digitalasset.canton.participant.metrics.ParticipantMetrics import com.digitalasset.canton.participant.store.ParticipantSettingsStore import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.participant.util.DAMLe -import com.digitalasset.canton.resource.CommunityStorageFactory +import com.digitalasset.canton.resource.StorageSingleFactory import com.digitalasset.canton.time.TestingTimeService import com.digitalasset.daml.lf.engine.Engine import io.grpc.ServerServiceDefinition @@ -126,18 +125,17 @@ object CommunityParticipantNodeBootstrapFactory extends ParticipantNodeBootstrap ): Either[String, ParticipantNodeBootstrap] = arguments .toCantonNodeBootstrapCommonArguments( - new CommunityStorageFactory(arguments.config.storage), - new CommunityCryptoPrivateStoreFactory( + new StorageSingleFactory(arguments.config.storage), + new CryptoPrivateStoreFactory( arguments.config.crypto.provider, arguments.config.crypto.kms, - CommunityKmsFactory, arguments.config.parameters.caching.kmsMetadataCache, arguments.config.crypto.privateKeyStore, + replicaManager = None, arguments.futureSupervisor, arguments.clock, arguments.executionContext, ), - CommunityKmsFactory, ) .map { arguments => val engine = createEngine(arguments) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala index 08f14065d8..5e58b5c00d 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.participant.admin.AdminWorkflowConfig import com.digitalasset.canton.participant.config.* import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig import com.digitalasset.canton.sequencing.client.SequencerClientConfig -import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.time import com.digitalasset.canton.tracing.TracingConfig import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting @@ -22,16 +22,18 @@ final case class ParticipantNodeParameters( adminWorkflow: AdminWorkflowConfig, maxUnzippedDarSize: Int, stores: ParticipantStoreConfig, - reassignmentTimeProofFreshnessProportion: NonNegativeInt, protocolConfig: ParticipantProtocolConfig, ledgerApiServerParameters: LedgerApiServerParametersConfig, engine: CantonEngineConfig, - journalGarbageCollectionDelay: NonNegativeFiniteDuration, + journalGarbageCollectionDelay: time.NonNegativeFiniteDuration, disableUpgradeValidation: Boolean, enableStrictDarValidation: Boolean, commandProgressTracking: CommandProgressTrackerConfig, unsafeOnlinePartyReplication: Option[UnsafeOnlinePartyReplicationConfig], automaticallyPerformLogicalSynchronizerUpgrade: Boolean, + reassignmentsConfig: ReassignmentsConfig, + doNotAwaitOnCheckingIncomingCommitments: Boolean, + disableOptionalTopologyChecks: Boolean, ) extends CantonNodeParameters with HasGeneralCantonNodeParameters { override def dontWarnOnDeprecatedPV: Boolean = protocolConfig.dontWarnOnDeprecatedPV @@ -44,7 +46,7 @@ object ParticipantNodeParameters { def forTestingOnly(testedProtocolVersion: ProtocolVersion) = ParticipantNodeParameters( general = CantonNodeParameters.General.Impl( tracing = TracingConfig(TracingConfig.Propagation.Disabled), - delayLoggingThreshold = NonNegativeFiniteDuration.tryOfMillis(5000), + delayLoggingThreshold = time.NonNegativeFiniteDuration.tryOfMillis(5000), enableAdditionalConsistencyChecks = true, loggingConfig = LoggingConfig(api = ApiLoggingConfig(messagePayloads = true)), processingTimeouts = DefaultProcessingTimeouts.testing, @@ -68,7 +70,6 @@ object ParticipantNodeParameters { ), maxUnzippedDarSize = 10, stores = ParticipantStoreConfig(), - reassignmentTimeProofFreshnessProportion = NonNegativeInt.tryCreate(3), protocolConfig = ParticipantProtocolConfig( Some(testedProtocolVersion), // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version @@ -78,11 +79,16 @@ object ParticipantNodeParameters { ), ledgerApiServerParameters = LedgerApiServerParametersConfig(), engine = CantonEngineConfig(), - journalGarbageCollectionDelay = NonNegativeFiniteDuration.Zero, + journalGarbageCollectionDelay = time.NonNegativeFiniteDuration.Zero, disableUpgradeValidation = false, enableStrictDarValidation = false, commandProgressTracking = CommandProgressTrackerConfig(), unsafeOnlinePartyReplication = None, automaticallyPerformLogicalSynchronizerUpgrade = true, + reassignmentsConfig = ReassignmentsConfig( + targetTimestampForwardTolerance = NonNegativeFiniteDuration.ofSeconds(30) + ), + doNotAwaitOnCheckingIncomingCommitments = false, + disableOptionalTopologyChecks = false, ) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowServices.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowServices.scala index 3f2ac71195..8b59665d82 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowServices.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowServices.scala @@ -43,7 +43,7 @@ import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext import com.digitalasset.canton.tracing.{Spanning, TraceContext, Traced, TracerProvider} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ResourceUtil.withResource -import com.digitalasset.canton.util.{DamlPackageLoader, EitherTUtil, FutureUtil} +import com.digitalasset.canton.util.{DamlPackageLoader, EitherTUtil, FutureUtil, MonadUtil} import com.digitalasset.daml.lf.data.Ref.PackageId import com.digitalasset.daml.lf.language.Ast import com.google.protobuf.ByteString @@ -52,7 +52,7 @@ import org.apache.pekko.actor.ActorSystem import org.apache.pekko.stream.scaladsl.Flow import java.io.InputStream -import scala.concurrent.{ExecutionContextExecutor, Future} +import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} /** Manages our admin workflow applications (ping, party management). Currently, each is an * individual application with their own ledger connection and acting independently. @@ -189,27 +189,6 @@ class AdminWorkflowServices( pkgRes <- pkgs.keys.toList.parTraverse(lc.packageService.getPackageStatus(_)) } yield pkgRes.forall(pkgResponse => pkgResponse.packageStatus.isPackageStatusRegistered) - private def handleDamlErrorDuringPackageLoading(adminWorkflow: String)( - res: EitherT[FutureUnlessShutdown, RpcError, Unit] - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, IllegalStateException, Unit] = - EitherTUtil - .leftSubflatMap(res) { - case CantonPackageServiceError.IdentityManagerParentError( - ParticipantTopologyManagerError.IdentityManagerParentError( - NoAppropriateSigningKeyInStore.Failure(_, _) | SecretKeyNotInStore.Failure(_) - ) - ) => - // Log error by creating error object, but continue processing. - AdminWorkflowServices.CanNotAutomaticallyVetAdminWorkflowPackage - .Error(adminWorkflow) - .discard - Either.unit - case err => - Left(new IllegalStateException(CantonError.stringFromContext(err))) - } - private def adminWorkflowsAreLoaded()(implicit traceContext: TraceContext ): UnlessShutdown[Boolean] = @@ -232,7 +211,7 @@ class AdminWorkflowServices( } /** Parses dar and checks if all contained packages are already loaded and recorded in the - * indexer. If not, loads the dar. + * indexer. If not, loads the dar. Does NOT vet the dar. * @throws java.lang.IllegalStateException * if the daml archive cannot be found on the classpath */ @@ -244,23 +223,11 @@ class AdminWorkflowServices( val packages = AdminWorkflowServices.getDarPackages(darName) FutureUnlessShutdown .outcomeF(checkPackagesStatus(packages, conn)) - .flatMap { isAlreadyLoaded => - if (!isAlreadyLoaded) + .flatMap(isAlreadyLoaded => + MonadUtil.when(!isAlreadyLoaded)( EitherTUtil.toFutureUnlessShutdown(loadDamlArchiveResource(darName)) - else { - logger.debug("Admin workflow packages are already present. Skipping loading.") - // vet any packages that have not yet been vetted - EitherTUtil.toFutureUnlessShutdown( - handleDamlErrorDuringPackageLoading(darName)( - packageService - .vetPackages( - packages.keys.toSeq, - synchronizeVetting = PackageVettingSynchronization.NoSync, - ) - ) - ) - } - } + ) + ) } val resultUS = for { @@ -297,14 +264,13 @@ class AdminWorkflowServices( ): EitherT[FutureUnlessShutdown, IllegalStateException, Unit] = { val bytes = withResource(AdminWorkflowServices.getDarInputStream(darName))(ByteString.readFrom) - handleDamlErrorDuringPackageLoading(darName)( + AdminWorkflowServices.handleDamlErrorDuringPackageLoading(darName)( packageService .upload( darBytes = bytes, description = Some("System package"), submissionIdO = None, - vetAllPackages = true, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) .void @@ -394,7 +360,7 @@ object AdminWorkflowServices extends AdminWorkflowServicesErrorGroup { val PingDarResourceName: String = "canton-builtin-admin-workflow-ping" val PingDarResourceFileName: String = s"$PingDarResourceName.dar" - private val PartyReplicationDarResourceName: String = + val PartyReplicationDarResourceName: String = "canton-builtin-admin-workflow-party-replication-alpha" private val PartyReplicationDarResourceFileName: String = s"$PartyReplicationDarResourceName.dar" @@ -411,15 +377,41 @@ object AdminWorkflowServices extends AdminWorkflowServicesErrorGroup { ) } - private def getDarPackages(darName: String): Map[PackageId, Ast.Package] = + private[participant] def getDarPackages(darName: String): Map[PackageId, Ast.Package] = DamlPackageLoader .getPackagesFromInputStream(darName, getDarInputStream(darName)) .valueOr(err => throw new IllegalStateException(s"Unable to load admin workflow packages: $err") ) - lazy val AdminWorkflowPackages: Map[PackageId, Ast.Package] = - getDarPackages(PingDarResourceFileName) ++ getDarPackages(PartyReplicationDarResourceFileName) + private[participant] def handleDamlErrorDuringPackageLoading(adminWorkflow: String)( + res: EitherT[FutureUnlessShutdown, RpcError, Unit] + )(implicit + ec: ExecutionContext, + loggingContext: ErrorLoggingContext, + ): EitherT[FutureUnlessShutdown, IllegalStateException, Unit] = + EitherTUtil + .leftSubflatMap(res) { + case CantonPackageServiceError.IdentityManagerParentError( + ParticipantTopologyManagerError.IdentityManagerParentError( + NoAppropriateSigningKeyInStore.Failure(_, _) | SecretKeyNotInStore.Failure(_) + ) + ) => + // Log error by creating error object, but continue processing. + AdminWorkflowServices.CanNotAutomaticallyVetAdminWorkflowPackage + .Error(adminWorkflow) + .discard + Either.unit + case err => + Left(new IllegalStateException(CantonError.stringFromContext(err))) + } + + lazy val PingPackages: Map[PackageId, Ast.Package] = getDarPackages(PingDarResourceFileName) + lazy val PartyReplicationPackages: Map[PackageId, Ast.Package] = getDarPackages( + PartyReplicationDarResourceFileName + ) + lazy val AllBuiltInPackages: Map[PackageId, Ast.Package] = + PingPackages ++ PartyReplicationPackages @Explanation( """This error indicates that the admin workflow package could not be vetted. The admin workflows is diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala index aac1e8a0fe..2b3c1db136 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala @@ -11,8 +11,10 @@ import com.digitalasset.base.error.{ Explanation, Resolution, } +import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.PackageServiceErrorGroup import com.digitalasset.canton.error.{CantonError, ContextualizedCantonError, ParentCantonError} +import com.digitalasset.canton.ledger.api.VettedPackagesRef import com.digitalasset.canton.ledger.error.PackageServiceErrors import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.participant.admin.PackageService.DarDescription @@ -157,6 +159,43 @@ object CantonPackageServiceError extends PackageServiceErrorGroup { } + @Resolution("Connect to a synchronzer before vetting a package.") + object NotConnectedToSynchronizer + extends ErrorCode( + id = "PACKAGE_SERVICE_NOT_CONNECTED_TO_SYNCHRONIZER", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Error(synchronizerId: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Cannot upload a dar because the participant is not connected to synchronizer '$synchronizerId'." + ) + } + + @Resolution( + """If the node is not connected to any synchronizer, connect to a synchornizer first. + |If the participant is connected to more than one synchronizer, explicitly specify the synchronizer id.""" + ) + object CannotAutodetectSynchronizer + extends ErrorCode( + id = "PACKAGE_SERVICE_CANNOT_AUTODETECT_SYNCHRONIZER", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(synchronizers: Seq[SynchronizerId])(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = { + val connectedSynchronizersMsg = + if (synchronizers.isEmpty) + "no synchronizers currently connected" + else + "currently connected synchronizers " + synchronizers.mkString(", ") + s"Cannot autodetect synchronizer: $connectedSynchronizersMsg" + } + ) + + } @Explanation( """An operation failed with an internal error.""" ) @@ -169,4 +208,41 @@ object CantonPackageServiceError extends PackageServiceErrorGroup { ) extends CantonError.Impl(_reason)(PackageServiceErrors.InternalError) } + @Explanation("Package vetting errors") + object Vetting extends ErrorGroup { + + @Explanation( + """The vetted package reference does not match any package in the package store.""" + ) + @Resolution("""Check the provided vetted package reference and re-try the operation.""") + object VettingReferenceEmpty + extends ErrorCode( + id = "UNRESOLVED_VETTING_REFERENCE", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + ) { + final case class Reject(reason: String, reference: VettedPackagesRef)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"The vetted package reference $reference does not match any package in the package store. Reason: $reason" + ) + } + + @Explanation( + """The vetted package reference matches more than one package in the package store.""" + ) + @Resolution("""Check the provided vetted package reference and re-try the operation.""") + object VettingReferenceMoreThanOne + extends ErrorCode( + id = "AMBIGUOUS_VETTING_REFERENCE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Reject(reference: VettedPackagesRef, matchingPackages: Set[LfPackageId])( + implicit val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + show"The package reference $reference matches multiple packages: ${matchingPackages.toSeq}" + ) + } + } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageDependencyResolver.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageDependencyResolver.scala index ce307639a5..e99d2042e4 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageDependencyResolver.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageDependencyResolver.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.admin import cats.data.{EitherT, OptionT} import cats.syntax.either.* -import cats.syntax.parallel.* import com.digitalasset.canton.caching.ScaffeineCache import com.digitalasset.canton.caching.ScaffeineCache.TracedAsyncLoadingCache import com.digitalasset.canton.config @@ -15,110 +14,108 @@ import com.digitalasset.canton.ledger.participant.state.PackageDescription import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.store.DamlPackageStore -import com.digitalasset.canton.topology.store.PackageDependencyResolverUS +import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.topology.store.PackageDependencyResolver import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil import com.digitalasset.daml.lf.data.Ref.PackageId import scala.concurrent.ExecutionContext -class PackageDependencyResolver( - val damlPackageStore: DamlPackageStore, - override protected val timeouts: ProcessingTimeout, - protected val loggerFactory: NamedLoggerFactory, - fetchPackageParallelism: PositiveInt = PositiveInt.tryCreate(8), - packageDependencyCacheConfig: CacheConfig = CacheConfig( - maximumSize = PositiveLong.tryCreate(10000), - expireAfterAccess = config.NonNegativeFiniteDuration.ofMinutes(15L), - ), -)(implicit - ec: ExecutionContext -) extends NamedLogging - with FlagCloseable - with PackageDependencyResolverUS { +object PackageDependencyResolver { - private val dependencyCache: TracedAsyncLoadingCache[ - EitherT[FutureUnlessShutdown, PackageId, *], - PackageId, - Set[PackageId], - ] = ScaffeineCache - .buildTracedAsync[EitherT[FutureUnlessShutdown, PackageId, *], PackageId, Set[PackageId]]( - cache = packageDependencyCacheConfig.buildScaffeine(), - loader = implicit tc => loadPackageDependencies _, - allLoader = None, - )(logger, "dependencyCache") + class Impl( + participantId: ParticipantId, + val damlPackageStore: DamlPackageStore, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, + fetchPackageParallelism: PositiveInt = PositiveInt.tryCreate(8), + packageDependencyCacheConfig: CacheConfig = CacheConfig( + maximumSize = PositiveLong.tryCreate(10000), + expireAfterAccess = config.NonNegativeFiniteDuration.ofMinutes(15L), + ), + )(implicit + ec: ExecutionContext + ) extends NamedLogging + with FlagCloseable + with PackageDependencyResolver { - def packageDependencies(packageId: PackageId)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - dependencyCache.get(packageId).map(_ - packageId) + private val dependencyCache: TracedAsyncLoadingCache[ + EitherT[FutureUnlessShutdown, PackageId, *], + PackageId, + Set[PackageId], + ] = ScaffeineCache + .buildTracedAsync[EitherT[FutureUnlessShutdown, PackageId, *], PackageId, Set[PackageId]]( + cache = packageDependencyCacheConfig.buildScaffeine(), + loader = implicit tc => loadPackageDependencies _, + allLoader = None, + )(logger, "dependencyCache") - def packageDependencies(packages: List[PackageId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - packages - .parTraverse(packageDependencies) - .map(_.flatten.toSet -- packages) + def packageDependencies(packageId: PackageId)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] = + dependencyCache.get(packageId).leftMap(p => (p, participantId)).map(_ - packageId) - def getPackageDescription(packageId: PackageId)(implicit - traceContext: TraceContext - ): OptionT[FutureUnlessShutdown, PackageDescription] = - damlPackageStore.getPackageDescription(packageId) + def getPackageDescription(packageId: PackageId)(implicit + traceContext: TraceContext + ): OptionT[FutureUnlessShutdown, PackageDescription] = + damlPackageStore.getPackageDescription(packageId) - private def loadPackageDependencies(packageId: PackageId)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = { - def computeDirectDependencies( - packageIds: List[PackageId] - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - for { - directDependenciesByPackage <- MonadUtil.parTraverseWithLimit( - fetchPackageParallelism - )(packageIds) { packageId => - for { - pckg <- OptionT(damlPackageStore.getPackage(packageId)).toRight(packageId) - directDependencies <- EitherT.fromEither[FutureUnlessShutdown]( - com.digitalasset.daml.lf.archive.Decode - .decodeArchive(pckg) - .map { case (_, packageAst) => packageAst.directDeps } - .leftMap { e => - logger.error( - s"Failed to decode package with id $packageId while trying to determine dependencies", - e, - ) - packageId - } - ) - } yield directDependencies + private def loadPackageDependencies(packageId: PackageId)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = { + def computeDirectDependencies( + packageIds: List[PackageId] + ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = + for { + directDependenciesByPackage <- MonadUtil.parTraverseWithLimit( + fetchPackageParallelism + )(packageIds) { packageId => + for { + pckg <- OptionT(damlPackageStore.getPackage(packageId)).toRight(packageId) + directDependencies <- EitherT.fromEither[FutureUnlessShutdown]( + com.digitalasset.daml.lf.archive.Decode + .decodeArchive(pckg) + .map { case (_, packageAst) => packageAst.directDeps } + .leftMap { e => + logger.error( + s"Failed to decode package with id $packageId while trying to determine dependencies", + e, + ) + packageId + } + ) + } yield directDependencies + } + } yield { + directDependenciesByPackage.reduceLeftOption(_ ++ _).getOrElse(Set.empty) } - } yield { - directDependenciesByPackage.reduceLeftOption(_ ++ _).getOrElse(Set.empty) - } - def go( - packageIds: List[PackageId], - knownDependencies: Set[PackageId], - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - if (isClosing) - EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]]( - FutureUnlessShutdown.abortedDueToShutdown - ) - else if (packageIds.isEmpty) EitherT.rightT(knownDependencies) - else { - for { - directDependencies <- computeDirectDependencies(packageIds) - newlyDiscovered = directDependencies -- knownDependencies - packageId - allDependencies <- go(newlyDiscovered.toList, knownDependencies ++ newlyDiscovered) - } yield allDependencies - } + def go( + packageIds: List[PackageId], + knownDependencies: Set[PackageId], + ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = + if (isClosing) + EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]]( + FutureUnlessShutdown.abortedDueToShutdown + ) + else if (packageIds.isEmpty) EitherT.rightT(knownDependencies) + else { + for { + directDependencies <- computeDirectDependencies(packageIds) + newlyDiscovered = directDependencies -- knownDependencies - packageId + allDependencies <- go(newlyDiscovered.toList, knownDependencies ++ newlyDiscovered) + } yield allDependencies + } - go(List(packageId), Set()) + go(List(packageId), Set()) - } + } - override def onClosed(): Unit = { - dependencyCache.invalidateAll() - dependencyCache.cleanUp() - LifeCycle.close(damlPackageStore)(logger) + override def onClosed(): Unit = { + dependencyCache.invalidateAll() + dependencyCache.cleanUp() + LifeCycle.close(damlPackageStore)(logger) + } } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala index 114615bbc8..49e87734e2 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.participant.admin -import cats.Eval import cats.data.{EitherT, OptionT} import cats.syntax.bifunctor.* import cats.syntax.foldable.* @@ -12,6 +11,14 @@ import cats.syntax.parallel.* import com.digitalasset.base.error.RpcError import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.ledger.api.{ + EnrichedVettedPackage, + ListVettedPackagesOpts, + SinglePackageTargetVetting, + UpdateVettedPackagesOpts, + VettedPackagesRef, +} import com.digitalasset.canton.ledger.error.PackageServiceErrors import com.digitalasset.canton.ledger.participant.state.PackageDescription import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} @@ -24,6 +31,10 @@ import com.digitalasset.canton.participant.admin.CantonPackageServiceError.Packa PackageRemovalError, PackageVetted, } +import com.digitalasset.canton.participant.admin.CantonPackageServiceError.Vetting.{ + VettingReferenceEmpty, + VettingReferenceMoreThanOne, +} import com.digitalasset.canton.participant.admin.PackageService.* import com.digitalasset.canton.participant.admin.data.UploadDarData import com.digitalasset.canton.participant.metrics.ParticipantMetrics @@ -34,7 +45,15 @@ import com.digitalasset.canton.participant.store.memory.{ } import com.digitalasset.canton.participant.topology.PackageOps import com.digitalasset.canton.platform.packages.DeduplicatingPackageLoader +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.topology.{ + ForceFlag, + ForceFlags, + PhysicalSynchronizerId, + SynchronizerId, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} import com.digitalasset.canton.{LedgerSubmissionId, LfPackageId, ProtoDeserializationError} @@ -54,8 +73,7 @@ trait DarService { def upload( dars: Seq[UploadDarData], submissionIdO: Option[LedgerSubmissionId], - vetAllPackages: Boolean, - synchronizeVetting: PackageVettingSynchronization, + vettingInfo: Option[(PhysicalSynchronizerId, PackageVettingSynchronization)], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Seq[DarMainPackageId]] @@ -63,6 +81,7 @@ trait DarService { def validateDar( payload: ByteString, filename: String, + synchronizerId: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, DarMainPackageId] @@ -81,7 +100,7 @@ trait DarService { } class PackageService( - val packageDependencyResolver: PackageDependencyResolver, + val packageDependencyResolver: PackageDependencyResolver.Impl, protected val loggerFactory: NamedLoggerFactory, metrics: ParticipantMetrics, packageOps: PackageOps, @@ -95,7 +114,7 @@ class PackageService( private val packageLoader = new DeduplicatingPackageLoader() private val packagesDarsStore = packageDependencyResolver.damlPackageStore - def getPackageMetadataView: PackageMetadataView = packageUploader.getPackageMetadataView + def getPackageMetadataView: PackageMetadataView = packageUploader.packageMetadataView def getLfArchive(packageId: PackageId)(implicit traceContext: TraceContext @@ -153,34 +172,141 @@ class PackageService( } yield () } - def removeDar(mainPackageId: DarMainPackageId)(implicit + def removeDar(mainPackageId: DarMainPackageId, psids: Set[PhysicalSynchronizerId])(implicit tc: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = - ifDarExists(mainPackageId)(removeDarLf(_, _))(ifNotExistsOperationFailed = + ifDarExists(mainPackageId)(removeDarLf(_, _, psids))(ifNotExistsOperationFailed = "DAR archive removal" ) def vetDar( mainPackageId: DarMainPackageId, synchronizeVetting: PackageVettingSynchronization, + psid: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = ifDarExists(mainPackageId) { (_, darLf) => packageOps - .vetPackages(darLf.all.map(readPackageId), synchronizeVetting) + .vetPackages(darLf.all.map(readPackageId), synchronizeVetting, psid) .leftWiden[RpcError] }(ifNotExistsOperationFailed = "DAR archive vetting") - def unvetDar(mainPackageId: DarMainPackageId)(implicit + def unvetDar(mainPackageId: DarMainPackageId, psid: PhysicalSynchronizerId)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = ifDarExists(mainPackageId) { (descriptor, lfArchive) => val packages = lfArchive.all.map(readPackageId) val mainPkg = readPackageId(lfArchive.main) - revokeVettingForDar(mainPkg, packages, descriptor) + revokeVettingForDar(mainPkg, packages, descriptor, psid) }(ifNotExistsOperationFailed = "DAR archive unvetting") + def resolveTargetVettingReferences( + targetState: SinglePackageTargetVetting[VettedPackagesRef], + snapshot: PackageMetadata, + )(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + RpcError, + List[SinglePackageTargetVetting[PackageId]], + ] = + targetState.ref.findMatchingPackages(snapshot) match { + // When vetting, we expect every reference to give exactly one package. + // With unvetting we are more lenient to allow the use case where a + // app developer provides a script that unvets an old, deprecated package + // "just in case". + case Left(msg) => + val err = VettingReferenceEmpty.Reject(msg, targetState.ref) + if (targetState.isUnvetting) { + logger.debug(err.cause) + EitherT.rightT[FutureUnlessShutdown, RpcError](List()) + } else { + EitherT.leftT[FutureUnlessShutdown, List[SinglePackageTargetVetting[PackageId]]]( + err + ) + } + + // When vetting, we expect every reference to give exactly one package in + // order to rule out the case where two versions of the same package are + // vetted. On the other hand, when unvetting, it is safe to unvet all + // versions of a package. + case Right(matchingPackages) => + if (targetState.isVetting && matchingPackages.sizeIs >= 2) { + EitherT.leftT[FutureUnlessShutdown, List[SinglePackageTargetVetting[PackageId]]]( + VettingReferenceMoreThanOne.Reject(targetState.ref, matchingPackages) + ) + } else { + EitherT.rightT[FutureUnlessShutdown, RpcError]( + matchingPackages.toList.map(SinglePackageTargetVetting(_, targetState.bounds)) + ) + } + } + + def updateVettedPackages( + opts: UpdateVettedPackagesOpts, + synchronizerId: PhysicalSynchronizerId, + synchronizeVetting: PackageVettingSynchronization, + )(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + RpcError, + (Seq[EnrichedVettedPackage], Seq[EnrichedVettedPackage]), + ] = { + val snapshot = getPackageMetadataView.getSnapshot + val targetStates = opts.toTargetStates + val dryRunSnapshot = Option.when(opts.dryRun)(snapshot) + for { + resolvedTargetStates <- targetStates.parTraverse(resolveTargetVettingReferences(_, snapshot)) + preAndPost <- packageOps + .updateVettedPackages( + resolvedTargetStates.flatten, + synchronizerId, + synchronizeVetting, + dryRunSnapshot, + ) + .leftWiden[RpcError] + } yield { + val (pre, post) = preAndPost + val enrichedPre = pre.map(enrichVettedPackage) + val enrichedPost = post.map(enrichVettedPackage) + (enrichedPre, enrichedPost) + } + } + + def listVettedPackages( + opts: ListVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, RpcError, Seq[ + (Seq[EnrichedVettedPackage], SynchronizerId, PositiveInt) + ]] = { + val snapshot = getPackageMetadataView.getSnapshot + val filterPredicates = opts.toPredicate(snapshot) + packageOps + .getVettedPackages(opts.topologyStateFilter.map(_.synchronizerIds.toSet)) + .leftWiden[RpcError] + .map(_.map { case (allVettedPackages, synchronizerId, serial) => + val matching = allVettedPackages.filter((v: VettedPackage) => filterPredicates(v.packageId)) + val enriched = matching.map(enrichVettedPackage) + (enriched, synchronizerId, serial) + }) + } + + private def enrichVettedPackage(vetted: VettedPackage)(implicit + traceContext: TraceContext + ): EnrichedVettedPackage = + getPackageMetadataView.getSnapshot.packageIdVersionMap + .get(vetted.packageId) + .fold(EnrichedVettedPackage(vetted, None, None)) { case (name, version) => + EnrichedVettedPackage( + vetted, + Some(name), + Some(version), + ) + } + private def ifDarExists(mainPackageId: DarMainPackageId)( action: ( DarDescription, @@ -213,13 +339,14 @@ class PackageService( elc: ErrorLoggingContext ): EitherT[FutureUnlessShutdown, PackageRemovalError, Unit] = EitherTUtil.condUnitET( - !AdminWorkflowServices.AdminWorkflowPackages.keySet.contains(packageId), + !AdminWorkflowServices.AllBuiltInPackages.keySet.contains(packageId), new PackageRemovalErrorCode.CannotRemoveAdminWorkflowPackage(packageId), ) private def removeDarLf( darDescriptor: DarDescription, dar: archive.Dar[DamlLf.Archive], + psids: Set[PhysicalSynchronizerId], )(implicit tc: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = { @@ -268,10 +395,13 @@ class PackageService( packagesThatCanBeRemoved = packagesThatCanBeRemoved_.toList - _unit <- revokeVettingForDar( - mainPkg, - packagesThatCanBeRemoved, - darDescriptor, + _ <- MonadUtil.sequentialTraverse(psids.toSeq)(psid => + revokeVettingForDar( + mainPkg, + packagesThatCanBeRemoved, + darDescriptor, + psid, + ) ) // TODO(#26078): update documentation to reflect main package dependency removal changes @@ -293,6 +423,7 @@ class PackageService( mainPkg: PackageId, packages: List[PackageId], darDescriptor: DarDescription, + psid: PhysicalSynchronizerId, )(implicit tc: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = @@ -306,7 +437,18 @@ class PackageService( ) ) else - packageOps.revokeVettingForPackages(mainPkg, packages, darDescriptor).leftWiden + packageOps + .revokeVettingForPackages( + mainPkg, + packages, + darDescriptor, + psid, + // Unvetting a DAR requires AllowUnvettedDependencies because it is going to unvet all + // packages from the DAR, even the utility packages. UnvetDar is an experimental + // operation that requires expert-level knowledge. + ForceFlags(ForceFlag.AllowUnvettedDependencies), + ) + .leftWiden } /** Performs the upload DAR flow: @@ -317,9 +459,9 @@ class PackageService( * 1. Updates the * [[com.digitalasset.canton.participant.store.memory.MutablePackageMetadataView]] which is * used for subsequent DAR upload validations and incoming Ledger API queries - * 1. Issues a package vetting topology transaction for all uploaded packages (if - * `vetAllPackages` is enabled) and waits for for its completion (if `synchronizeVetting` is - * enabled). + * 1. Issues a package vetting topology transaction for all uploaded packages (if `vettingInfo` + * is nonempty) and waits for for its completion using the synchronization provided by the + * `PackageVettingSynchronization` instance in `vettingInfo`) * * @param darBytes * The DAR payload to store. @@ -327,21 +469,17 @@ class PackageService( * A description of the DAR. * @param submissionIdO * upstream submissionId for ledger api server to recognize previous package upload requests - * @param vetAllPackages - * if true, then the packages will be vetted automatically - * @param synchronizeVetting - * a value of PackageVettingSynchronization, that checks that the packages have been vetted on - * all connected synchronizers. The Future returned by the check will complete once all - * synchronizers have observed the vetting for the new packages. The caller may also pass be a - * no-op implementation that immediately returns, depending no the caller's needs for - * synchronization. + * @param vettingInfo + * If set, checks that the packages have been vetted on the specified synchronizer. The Future + * returned by the check will complete once the synchronizer has observed the vetting for the + * new packages. The caller may also pass be a no-op implementation that immediately returns, + * depending no the caller's needs for synchronization. */ final def upload( darBytes: ByteString, description: Option[String], submissionIdO: Option[LedgerSubmissionId], - vetAllPackages: Boolean, - synchronizeVetting: PackageVettingSynchronization, + vettingInfo: Option[(PhysicalSynchronizerId, PackageVettingSynchronization)], expectedMainPackageId: Option[LfPackageId], )(implicit traceContext: TraceContext @@ -349,8 +487,7 @@ class PackageService( upload( Seq(UploadDarData(darBytes, description, expectedMainPackageId)), submissionIdO, - vetAllPackages, - synchronizeVetting, + vettingInfo, ).subflatMap { case Seq(mainPackageId) => Right(mainPackageId) case Seq() => @@ -375,28 +512,23 @@ class PackageService( * 1. Updates the * [[com.digitalasset.canton.participant.store.memory.MutablePackageMetadataView]] which is * used for subsequent DAR upload validations and incoming Ledger API queries - * 1. Issues a package vetting topology transaction for all uploaded packages (if - * `vetAllPackages` is enabled) and waits for for its completion (if `synchronizeVetting` is - * enabled). + * 1. Issues a package vetting topology transaction for all uploaded packages (if `vettingInfo` + * is set). * * @param dars * The DARs (bytes, description, expected main package) to upload. * @param submissionIdO * upstream submissionId for ledger api server to recognize previous package upload requests - * @param vetAllPackages - * if true, then the packages will be vetted automatically - * @param synchronizeVetting - * a value of PackageVettingSynchronization, that checks that the packages have been vetted on - * all connected synchronizers. The Future returned by the check will complete once all - * synchronizers have observed the vetting to be effective for the new packages. The caller may - * also pass be a no-op implementation that immediately returns, depending no the caller's - * needs for synchronization. + * @param vettingInfo + * If set, checks that the packages have been vetted on the specified synchronizer. The Future + * returned by the check will complete once the synchronizer has observed the vetting for the + * new packages. The caller may also pass be a no-op implementation that immediately returns, + * depending no the caller's needs for synchronization. */ def upload( dars: Seq[UploadDarData], submissionIdO: Option[LedgerSubmissionId], - vetAllPackages: Boolean, - synchronizeVetting: PackageVettingSynchronization, + vettingInfo: Option[(PhysicalSynchronizerId, PackageVettingSynchronization)], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Seq[DarMainPackageId]] = { @@ -415,9 +547,9 @@ class PackageService( (mainPkgs, allPackages) = uploadResult.foldMap { case (mainPkg, dependencies) => (List(DarMainPackageId.tryCreate(mainPkg)), mainPkg +: dependencies) } - _ <- EitherTUtil.ifThenET(vetAllPackages)( - vetPackages(allPackages, synchronizeVetting) - ) + _ <- vettingInfo.traverse_ { case (synchronizerId, synchronizeVetting) => + vetPackages(allPackages, synchronizeVetting, synchronizerId) + } } yield mainPkgs // try is okay as we get this package-id from the uploader } @@ -430,10 +562,34 @@ class PackageService( def validateDar( payload: ByteString, darName: String, + synchronizerId: PhysicalSynchronizerId, )(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, RpcError, DarMainPackageId] = - packageUploader.validateDar(payload, darName) + ): EitherT[FutureUnlessShutdown, RpcError, DarMainPackageId] = { + import cats.implicits.catsSyntaxSemigroup + import PackageMetadata.Implicits.packageMetadataSemigroup + for { + darPkgs <- packageUploader.validateDar(payload, darName) + (mainPackageId, allPackages) = darPkgs + targetVettingState = allPackages.map { case (packageId, _) => + SinglePackageTargetVetting(packageId, bounds = Some((None, None))) + } + dryRunSnapshot = + allPackages + .map { case (packageId, packageAst) => PackageMetadata.from(packageId, packageAst) } + .foldLeft(getPackageMetadataView.getSnapshot)(_ |+| _) + + // TODO(#27750): all requests are of the authorized store instead of a synchronizer + _ <- packageOps + .updateVettedPackages( + targetVettingState, + synchronizerId, + PackageVettingSynchronization.NoSync, + Some(dryRunSnapshot), + ) + .leftWiden[RpcError] + } yield DarMainPackageId.tryCreate(mainPackageId) + } override def getDar(mainPackageId: DarMainPackageId)(implicit traceContext: TraceContext @@ -453,11 +609,12 @@ class PackageService( def vetPackages( packages: Seq[PackageId], synchronizeVetting: PackageVettingSynchronization, + psid: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = packageOps - .vetPackages(packages, synchronizeVetting) + .vetPackages(packages, synchronizeVetting, psid) .leftMap { err => implicit val code = err.code CantonPackageServiceError.IdentityManagerParentError(err) @@ -471,11 +628,11 @@ object PackageService { def apply( clock: Clock, engine: Engine, - packageDependencyResolver: PackageDependencyResolver, + mutablePackageMetadataView: MutablePackageMetadataView, + packageDependencyResolver: PackageDependencyResolver.Impl, enableStrictDarValidation: Boolean, loggerFactory: NamedLoggerFactory, metrics: ParticipantMetrics, - mutablePackageMetadataView: Eval[MutablePackageMetadataView], packageOps: PackageOps, timeouts: ProcessingTimeout, )(implicit diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageUploader.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageUploader.scala index 770a7f6c62..d2fb0cf548 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageUploader.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageUploader.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.participant.admin -import cats.Eval import cats.data.EitherT import cats.implicits.{catsSyntaxParallelTraverse1, toBifunctorOps, toTraverseOps} import com.digitalasset.base.error.RpcError @@ -19,12 +18,9 @@ import com.digitalasset.canton.participant.admin.PackageService.{ DarMainPackageId, catchUpstreamErrors, } -import com.digitalasset.canton.participant.store.memory.{ - MutablePackageMetadataView, - PackageMetadataView, -} +import com.digitalasset.canton.participant.store.memory.MutablePackageMetadataView import com.digitalasset.canton.participant.store.{DamlPackageStore, PackageInfo} -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.ThereafterOps @@ -43,21 +39,19 @@ class PackageUploader( packageStore: DamlPackageStore, engine: Engine, enableStrictDarValidation: Boolean, - packageMetadataView: Eval[MutablePackageMetadataView], + val packageMetadataView: MutablePackageMetadataView, protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) extends NamedLogging with FlagCloseable { - def getPackageMetadataView: PackageMetadataView = packageMetadataView.value - def validateDar( payload: ByteString, darName: String, )(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, RpcError, DarMainPackageId] = + ): EitherT[FutureUnlessShutdown, RpcError, (LfPackageId, List[(LfPackageId, Ast.Package)])] = synchronizeWithClosing("validate DAR") { val stream = new ZipInputStream(payload.newInput()) for { @@ -69,7 +63,7 @@ class PackageUploader( ) lfDar = LfDar(mainPackage, dependencies) _ <- validateLfDarPackages(lfDar) - } yield DarMainPackageId.tryCreate(mainPackage._1) + } yield (mainPackage._1, mainPackage +: dependencies) } /** Uploads dar into dar store @@ -137,9 +131,10 @@ class PackageUploader( _ = logger.debug( s"Managed to upload one or more archives for submissionId $submissionId" ) - _ = allPackages.foreach { case (_, (pkgId, pkg)) => - packageMetadataView.value.update(PackageMetadata.from(pkgId, pkg)) + darPackageMetadata = allPackages.map { case (_, (pkgId, pkg)) => + PackageMetadata.from(pkgId, pkg) } + _ <- packageMetadataView.updateMany(darPackageMetadata) } yield () val uploadTime = clock.monotonicTime() @@ -189,7 +184,7 @@ class PackageUploader( ) // If JDBC insertion call failed, we don't know whether the DB was updated or not // hence ensure the package metadata view stays in sync by re-initializing it from the DB. - packageMetadataView.value.refreshState.transformWith(_ => FutureUnlessShutdown.failed(e)) + packageMetadataView.refreshState.transformWith(_ => FutureUnlessShutdown.failed(e)) case success: Success[UnlessShutdown[Unit]] => FutureUnlessShutdown.lift(success.value) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageVettingSynchronization.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageVettingSynchronization.scala index 20a82bde3c..27a1d7c738 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageVettingSynchronization.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageVettingSynchronization.scala @@ -5,22 +5,23 @@ package com.digitalasset.canton.participant.admin import cats.data.EitherT import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError +import com.digitalasset.canton.topology.PhysicalSynchronizerId +import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil -import com.digitalasset.daml.lf.data.Ref.PackageId import scala.concurrent.Future // TODO(i25076) remove this synchronization logic once topology events are published on the ledger api trait PackageVettingSynchronization { - def sync(packages: Set[PackageId])(implicit + def sync(packages: Set[VettedPackage], psid: PhysicalSynchronizerId)(implicit traceContext: TraceContext ): EitherT[Future, ParticipantTopologyManagerError, Unit] } object PackageVettingSynchronization { object NoSync extends PackageVettingSynchronization { - override def sync(packages: Set[PackageId])(implicit + override def sync(packages: Set[VettedPackage], psid: PhysicalSynchronizerId)(implicit traceContext: TraceContext ): EitherT[Future, ParticipantTopologyManagerError, Unit] = EitherTUtil.unit } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContractOld.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContractOld.scala index 797a924f1e..56704dbbc2 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContractOld.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContractOld.scala @@ -42,7 +42,14 @@ final case class ActiveContractOld( contract.ledgerCreateTime, contract.authenticationData.toLfBytes, ) - RepairContract(synchronizerId, inst, reassignmentCounter) + RepairContract( + synchronizerId = synchronizerId, + contract = inst, + reassignmentCounter = reassignmentCounter, + // Fine to have the same representative package id as in the original contract since + // exports created using ExportAcsOld are not meant to support forgetting the creation package + representativePackageId = inst.templateId.packageId, + ) } } @@ -108,5 +115,4 @@ object ActiveContractOld extends VersioningCompanion[ActiveContractOld] { source, ActiveContractOld, ) - .map(_.toList) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractIdImportMode.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractIdImportMode.scala deleted file mode 100644 index 5d4103dd0c..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractIdImportMode.scala +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.admin.data - -import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, UnrecognizedEnum} -import com.digitalasset.canton.admin.participant.v30 -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult - -/** Represents the contract ID processing mode that should be applied on contract IDs found in an - * ACS import. - */ -sealed trait ContractIdImportMode extends Product with Serializable with PrettyPrinting { - def toProtoV30: v30.ContractIdImportMode -} - -object ContractIdImportMode { - - case object Accept extends ContractIdImportMode { - override def toProtoV30: v30.ContractIdImportMode = - v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_ACCEPT - - override def pretty: Pretty[Accept.type] = prettyOfObject[Accept.type] - } - - case object Validation extends ContractIdImportMode { - override def toProtoV30: v30.ContractIdImportMode = - v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_VALIDATION - - override def pretty: Pretty[Validation.type] = prettyOfObject[Validation.type] - } - - case object Recomputation extends ContractIdImportMode { - override def toProtoV30: v30.ContractIdImportMode = - v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_RECOMPUTATION - - override def pretty: Pretty[Recomputation.type] = prettyOfObject[Recomputation.type] - } - - def fromProtoV30( - contractIdReComputationModeP: v30.ContractIdImportMode - ): ParsingResult[ContractIdImportMode] = - contractIdReComputationModeP match { - case v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_UNSPECIFIED => - Left(FieldNotSet(contractIdReComputationModeP.name)) - case v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_ACCEPT => - Right(Accept) - case v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_VALIDATION => - Right(Validation) - case v30.ContractIdImportMode.CONTRACT_ID_IMPORT_MODE_RECOMPUTATION => - Right(Recomputation) - case v30.ContractIdImportMode.Unrecognized(value) => - Left(UnrecognizedEnum(contractIdReComputationModeP.name, value)) - } -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractImportMode.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractImportMode.scala new file mode 100644 index 0000000000..86fe39bdd3 --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ContractImportMode.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.data + +import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, UnrecognizedEnum} +import com.digitalasset.canton.admin.participant.v30 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** Represents the contract processing mode that should be applied on contracts found in an ACS + * import. + */ +sealed trait ContractImportMode extends Product with Serializable with PrettyPrinting { + def toProtoV30: v30.ContractImportMode +} + +object ContractImportMode { + + case object Accept extends ContractImportMode { + override def toProtoV30: v30.ContractImportMode = + v30.ContractImportMode.CONTRACT_IMPORT_MODE_ACCEPT + + override def pretty: Pretty[Accept.type] = prettyOfObject[Accept.type] + } + + case object Validation extends ContractImportMode { + override def toProtoV30: v30.ContractImportMode = + v30.ContractImportMode.CONTRACT_IMPORT_MODE_VALIDATION + + override def pretty: Pretty[Validation.type] = prettyOfObject[Validation.type] + } + + case object Recomputation extends ContractImportMode { + override def toProtoV30: v30.ContractImportMode = + v30.ContractImportMode.CONTRACT_IMPORT_MODE_RECOMPUTATION + + override def pretty: Pretty[Recomputation.type] = prettyOfObject[Recomputation.type] + } + + def fromProtoV30( + contractIdReComputationModeP: v30.ContractImportMode + ): ParsingResult[ContractImportMode] = + contractIdReComputationModeP match { + case v30.ContractImportMode.CONTRACT_IMPORT_MODE_UNSPECIFIED => + Left(FieldNotSet(contractIdReComputationModeP.name)) + case v30.ContractImportMode.CONTRACT_IMPORT_MODE_ACCEPT => + Right(Accept) + case v30.ContractImportMode.CONTRACT_IMPORT_MODE_VALIDATION => + Right(Validation) + case v30.ContractImportMode.CONTRACT_IMPORT_MODE_RECOMPUTATION => + Right(Recomputation) + case v30.ContractImportMode.Unrecognized(value) => + Left(UnrecognizedEnum(contractIdReComputationModeP.name, value)) + } +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepairContract.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepairContract.scala index 2ce8091806..c75a82d6cf 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepairContract.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepairContract.scala @@ -5,11 +5,11 @@ package com.digitalasset.canton.participant.admin.data import cats.implicits.* import com.daml.ledger.api.v2.state_service.ActiveContract as LapiActiveContract -import com.digitalasset.canton.ReassignmentCounter import com.digitalasset.canton.data.Counter import com.digitalasset.canton.protocol.* import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.util.{ByteStringUtil, GrpcStreamingUtils, ResourceUtil} +import com.digitalasset.canton.{LfPackageId, ReassignmentCounter} import com.digitalasset.daml.lf.transaction.{CreationTime, TransactionCoder} import com.google.protobuf.ByteString @@ -21,6 +21,7 @@ final case class RepairContract( synchronizerId: SynchronizerId, contract: LfFatContractInst, reassignmentCounter: ReassignmentCounter, + representativePackageId: LfPackageId, ) { def contractId: LfContractId = contract.contractId @@ -69,6 +70,13 @@ object RepairContract { s"Unable to decode contract event payload: ${decodeError.errorMessage}" ) + // TODO(#25385): Assume populated representativePackageId starting with 3.4 + representativePackageId <- Option(event.representativePackageId) + .filter(_.nonEmpty) + .traverse(LfPackageId.fromString) + .leftMap(err => s"Unable to parse representative package id: $err") + .map(_.getOrElse(fattyContract.templateId.packageId)) + fatContractInstance <- fattyContract.traverseCreateAt { case absolute: CreationTime.CreatedAt => Right(absolute) case _ => Left("Unable to determine create time.") @@ -80,9 +88,10 @@ object RepairContract { s"Unable to deserialize synchronized id from ${contract.synchronizerId}: $deserializationError" ) } yield RepairContract( - synchronizerId, - fatContractInstance, - Counter(contract.reassignmentCounter), + synchronizerId = synchronizerId, + contract = fatContractInstance, + reassignmentCounter = Counter(contract.reassignmentCounter), + representativePackageId = representativePackageId, ) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepresentativePackageIdOverride.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepresentativePackageIdOverride.scala new file mode 100644 index 0000000000..dc4c226654 --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/RepresentativePackageIdOverride.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.data + +import cats.implicits.toTraverseOps +import com.digitalasset.canton.admin.participant.v30 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.{LfPackageId, LfPackageName} +import com.digitalasset.daml.lf.value.Value.ContractId + +/** Defines override mappings for assigning representative package IDs to contracts upon ACS import. + * See [[com.digitalasset.canton.admin.participant.v30.RepresentativePackageIdOverride]] + */ +final case class RepresentativePackageIdOverride( + contractOverride: Map[ContractId, LfPackageId], + packageIdOverride: Map[LfPackageId, LfPackageId], + packageNameOverride: Map[LfPackageName, LfPackageId], +) extends PrettyPrinting { + def toProtoV30: v30.RepresentativePackageIdOverride = + v30.RepresentativePackageIdOverride( + // Apparently structurally trivial mappings, but upcasting from domain types to Strings + contractOverride = contractOverride.map { case (k, v) => k.coid -> v }, + packageIdOverride = packageIdOverride.map { case (k, v) => k -> v }, + packageNameOverride = packageNameOverride.map { case (k, v) => k -> v }, + ) + + override protected def pretty: Pretty[RepresentativePackageIdOverride] = + prettyOfClass( + paramIfNonEmpty("contractOverride", _.contractOverride), + paramIfNonEmpty("packageIdOverride", _.packageIdOverride), + paramIfNonEmpty("packageNameOverride", _.packageNameOverride), + ) +} + +object RepresentativePackageIdOverride { + val NoOverride: RepresentativePackageIdOverride = + RepresentativePackageIdOverride(Map.empty, Map.empty, Map.empty) + + def fromProtoV30( + representativePackageIdOverrideP: v30.RepresentativePackageIdOverride + ): ParsingResult[RepresentativePackageIdOverride] = for { + contractOverride <- representativePackageIdOverrideP.contractOverride.toSeq.traverse { + case (contractId, packageId) => + for { + cid <- ProtoConverter.parseLfContractId(contractId) + pkgId <- ProtoConverter.parsePackageId(packageId) + } yield cid -> pkgId + } + packageIdOverride <- representativePackageIdOverrideP.packageIdOverride.toSeq.traverse { + case (from, to) => + for { + fromPkgId <- ProtoConverter.parsePackageId(from) + toPkgId <- ProtoConverter.parsePackageId(to) + } yield fromPkgId -> toPkgId + } + packageNameOverride <- representativePackageIdOverrideP.packageNameOverride.toSeq.traverse { + case (pkgName, toPkgIdProto) => + for { + fromPkgName <- ProtoConverter.parsePackageName(pkgName) + toPkgId <- ProtoConverter.parsePackageId(toPkgIdProto) + } yield fromPkgName -> toPkgId + } + } yield new RepresentativePackageIdOverride( + contractOverride = contractOverride.toMap, + packageIdOverride = packageIdOverride.toMap, + packageNameOverride = packageNameOverride.toMap, + ) +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPackageService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPackageService.scala index f9fe288d4e..6b36d51276 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPackageService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPackageService.scala @@ -30,6 +30,7 @@ import com.digitalasset.canton.participant.admin.{ PackageService, PackageVettingSynchronization, } +import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.{EitherTUtil, MonadUtil, OptionUtil} import com.digitalasset.daml.lf.data.Ref.ModuleName @@ -44,6 +45,7 @@ import scala.concurrent.{ExecutionContext, Future} class GrpcPackageService( service: PackageService, synchronizeVetting: PackageVettingSynchronization, + connectedSynchronizers: () => Set[PhysicalSynchronizerId], protected val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) extends v30.PackageServiceGrpc.PackageService @@ -73,12 +75,16 @@ class GrpcPackageService( override def validateDar(request: v30.ValidateDarRequest): Future[v30.ValidateDarResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext val ret = - service - .validateDar(request.data, request.filename) - .map(mainPackageId => v30.ValidateDarResponse(mainPackageId = mainPackageId.unwrap)) + for { + psid <- findSynchronizerId(request.synchronizerId).mapK(FutureUnlessShutdown.outcomeK) + result <- service + .validateDar(request.data, request.filename, psid) + .map(mainPackageId => v30.ValidateDarResponse(mainPackageId = mainPackageId.unwrap)) + .leftMap(_.asGrpcError) + } yield result + EitherTUtil.toFuture( ret - .leftMap(_.asGrpcError) .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError)) ) } @@ -89,6 +95,7 @@ class GrpcPackageService( uploadDarDataP, vetAllPackages, synchronizeVettingP, + synchronizerIdP, ) = request val ret = @@ -102,14 +109,17 @@ class GrpcPackageService( ) ) .leftMap(_.asGrpcError) + psidO <- Option + .when(vetAllPackages)(synchronizerIdP) + .traverse(findSynchronizerId(_).mapK(FutureUnlessShutdown.outcomeK)) darIds <- service .upload( - uploadDarData, + dars = uploadDarData, submissionIdO = None, - vetAllPackages = vetAllPackages, - synchronizeVetting = - if (synchronizeVettingP) synchronizeVetting - else PackageVettingSynchronization.NoSync, + vettingInfo = psidO.map(psid => + psid -> (if (synchronizeVettingP) synchronizeVetting + else PackageVettingSynchronization.NoSync) + ), ) .leftMap(_.asGrpcError) } yield v30.UploadDarResponse(darIds = darIds.map(_.unwrap)) @@ -151,14 +161,57 @@ class GrpcPackageService( EitherTUtil.toFuture(ret) } + // Given a synchronizer ID as a raw string, return the physical synchronizer + // ID. If no synchronizer ID is provided and only one synchronizer is + // connected, return that instead of erroring out. If multiple synchronizers + // are connected, error out. + private def findSynchronizerId( + synchronizerIdRaw: Option[String] + )(implicit + traceContext: TraceContext + ): EitherT[Future, StatusRuntimeException, PhysicalSynchronizerId] = + for { + synchronizerIdO <- EitherT + .fromEither[Future]( + synchronizerIdRaw.traverse(SynchronizerId.fromProtoPrimitive(_, "synchronizer_id")) + ) + .leftMap(ProtoDeserializationFailure.Wrap(_).asGrpcError) + + connected = connectedSynchronizers() + validatedSpecifiedSynchronizerIdO = synchronizerIdO.map(synchronizerId => + connected + .find(_.logical == synchronizerId) + .toRight( + CantonPackageServiceError.NotConnectedToSynchronizer.Error(synchronizerId.toString) + ) + ) + singleConnectedSynchronizer = connected.toSeq match { + case Seq() => Left(CantonPackageServiceError.CannotAutodetectSynchronizer.Failure(Seq())) + case Seq(onlySynchronizerId) => Right(onlySynchronizerId) + case multiple => + Left( + CantonPackageServiceError.CannotAutodetectSynchronizer.Failure(multiple.map(_.logical)) + ) + } + synchronizerId <- EitherT + .fromEither[Future]( + validatedSpecifiedSynchronizerIdO + .map(_.leftMap(_.asGrpcError)) + .getOrElse(singleConnectedSynchronizer.leftMap(_.asGrpcError)) + ) + + } yield synchronizerId + override def vetDar(request: v30.VetDarRequest): Future[v30.VetDarResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext val ret = for { + psid <- findSynchronizerId(request.synchronizerId) hash <- EitherT.fromEither[Future](extractMainPackageId(request.mainPackageId)) _unit <- service .vetDar( hash, if (request.synchronize) synchronizeVetting else PackageVettingSynchronization.NoSync, + psid, ) .leftMap(_.asGrpcError) .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError)) @@ -171,9 +224,10 @@ class GrpcPackageService( implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext val ret = for { + synchronizerId <- findSynchronizerId(request.synchronizerId) hash <- EitherT.fromEither[Future](extractMainPackageId(request.mainPackageId)) _unit <- service - .unvetDar(hash) + .unvetDar(hash, synchronizerId) .leftMap(_.asGrpcError) .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError)) } yield v30.UnvetDarResponse() @@ -188,7 +242,7 @@ class GrpcPackageService( for { hash <- EitherT.fromEither[Future](hashE) _unit <- service - .removeDar(hash) + .removeDar(hash, connectedSynchronizers()) .leftMap(_.asGrpcError) .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError)) } yield v30.RemoveDarResponse() diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala index f4a31cefc9..1ffd7b1b57 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala @@ -18,14 +18,18 @@ import com.digitalasset.canton.networking.grpc.CantonGrpcUtil import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.admin.data.ActiveContractOld.loadFromByteString -import com.digitalasset.canton.participant.admin.data.{ContractIdImportMode, RepairContract} +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepairContract, + RepresentativePackageIdOverride, +} import com.digitalasset.canton.participant.admin.grpc.GrpcParticipantRepairService.{ ValidExportAcsOldRequest, ValidExportAcsRequest, } import com.digitalasset.canton.participant.admin.repair.RepairServiceError.ImportAcsError import com.digitalasset.canton.participant.admin.repair.{ - ContractIdsImportProcessor, + ContractAuthenticationImportProcessor, RepairServiceError, } import com.digitalasset.canton.participant.sync.CantonSyncService @@ -50,7 +54,6 @@ import com.digitalasset.canton.util.{ OptionUtil, ResourceUtil, } -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ LfPartyId, ReassignmentCounter, @@ -136,9 +139,7 @@ final class GrpcParticipantRepairService( )(implicit traceContext: TraceContext): Future[Unit] = { val gzipOut = new GZIPOutputStream(out) val res = for { - validRequest <- EitherT.fromEither[FutureUnlessShutdown]( - ValidExportAcsOldRequest(request, sync.stateInspection.allProtocolVersions) - ) + validRequest <- EitherT.fromEither[FutureUnlessShutdown](ValidExportAcsOldRequest(request)) timestampAsString = validRequest.timestamp.fold("head")(ts => s"at $ts") _ = logger.info( s"Exporting active contract set ($timestampAsString) for parties ${validRequest.parties}" @@ -151,7 +152,6 @@ final class GrpcParticipantRepairService( _.filterString.startsWith(request.filterSynchronizerId), validRequest.parties, validRequest.timestamp, - validRequest.contractSynchronizerRenames, skipCleanTimestampCheck = validRequest.force, partiesOffboarding = validRequest.partiesOffboarding, ) @@ -221,7 +221,7 @@ final class GrpcParticipantRepairService( override def onCompleted(): Unit = { val (workflowIdPrefix, allowContractIdSuffixRecomputation) = tryArgs - val res = importAcsSnapshot( + val res = importAcsSnapshotOld( data = ByteString.copyFrom(outputStream.toByteArray), workflowIdPrefix = workflowIdPrefix, allowContractIdSuffixRecomputation = allowContractIdSuffixRecomputation, @@ -238,16 +238,16 @@ final class GrpcParticipantRepairService( } } - private def importAcsSnapshot( + private def importAcsSnapshotOld( data: ByteString, workflowIdPrefix: String, allowContractIdSuffixRecomputation: Boolean, )(implicit traceContext: TraceContext): Future[Map[String, String]] = - importAcsContracts( + importAcsContractsOld( loadFromByteString(data).map(contracts => contracts.map(_.toRepairContract)), workflowIdPrefix, - if (allowContractIdSuffixRecomputation) ContractIdImportMode.Recomputation - else ContractIdImportMode.Validation, + if (allowContractIdSuffixRecomputation) ContractImportMode.Recomputation + else ContractImportMode.Validation, ) override def exportAcs( @@ -348,6 +348,10 @@ final class GrpcParticipantRepairService( parsingResult.leftMap(error => RepairServiceError.InvalidArgument.Error(error.message)) } + /* + Note that `responseObserver` originates from `GrpcStreamingUtils.streamToServer` which is + a wrapper that turns the responses into a promise/future. This is not a true bidirectional stream. + */ override def importAcs( responseObserver: StreamObserver[ImportAcsResponse] ): StreamObserver[ImportAcsRequest] = { @@ -355,137 +359,158 @@ final class GrpcParticipantRepairService( // TODO(#23818): This buffer will contain the whole ACS snapshot. val outputStream = new ByteArrayOutputStream() - // (workflowIdPrefix, ContractIdImportMode) - val args = new AtomicReference[Option[(String, ContractIdImportMode, Set[PartyId])]](None) - def tryArgs: (String, ContractIdImportMode, Set[PartyId]) = + + // (workflowIdPrefix, ContractImportMode, excludedStakeholders, representativePackageIdOverride) + type ImportArgs = (String, ContractImportMode, Set[PartyId], RepresentativePackageIdOverride) + + val args = new AtomicReference[Option[ImportArgs]](None) + + def recordedArgs: Either[String, ImportArgs] = args .get() - .getOrElse(throw new IllegalStateException("The import ACS request fields are not set")) + .toRight("The import ACS request fields are not set") + + def setOrCheck( + workflowIdPrefix: String, + contractImportMode: ContractImportMode, + excludeStakeholders: Set[PartyId], + representativePackageIdOverride: RepresentativePackageIdOverride, + ): Either[String, Unit] = { + val newOrMatchingValue = Some( + (workflowIdPrefix, contractImportMode, excludeStakeholders, representativePackageIdOverride) + ) + if (args.compareAndSet(None, newOrMatchingValue)) { + Right(()) // This was the first message, success, set. + } else { + recordedArgs.flatMap { + case (oldWorkflowIdPrefix, _, _, _) if oldWorkflowIdPrefix != workflowIdPrefix => + Left( + s"Workflow ID prefix cannot be changed from $oldWorkflowIdPrefix to $workflowIdPrefix" + ) + case (_, oldContractImportMode, _, _) if oldContractImportMode != contractImportMode => + Left( + s"Contract ID import mode cannot be changed from $oldContractImportMode to $contractImportMode" + ) + case (_, _, oldExcludedStakeholders, _) + if oldExcludedStakeholders != excludeStakeholders => + Left( + s"Exclude parties cannot be changed from $oldExcludedStakeholders to $excludeStakeholders" + ) + case (_, _, _, oldRepresentativePackageIdOverride) + if oldRepresentativePackageIdOverride != representativePackageIdOverride => + Left( + s"Representative package ID override cannot be changed from $oldRepresentativePackageIdOverride to $representativePackageIdOverride" + ) - new StreamObserver[ImportAcsRequest] { - def setOrCheck( - workflowIdPrefix: String, - contractIdImportMode: ContractIdImportMode, - excludeStakeholders: Set[PartyId], - ): Try[Unit] = - Try { - val newOrMatchingValue = - Some((workflowIdPrefix, contractIdImportMode, excludeStakeholders)) - if (!args.compareAndSet(None, newOrMatchingValue)) { - val (oldWorkflowIdPrefix, oldContractIdImportMode, oldExcludedStakeholders) = tryArgs - if (workflowIdPrefix != oldWorkflowIdPrefix) { - throw new IllegalArgumentException( - s"Workflow ID prefix cannot be changed from $oldWorkflowIdPrefix to $workflowIdPrefix" - ) - } else if (oldContractIdImportMode != contractIdImportMode) { - throw new IllegalArgumentException( - s"Contract ID import mode cannot be changed from $oldContractIdImportMode to $contractIdImportMode" - ) - } else if (oldExcludedStakeholders != excludeStakeholders) { - throw new IllegalArgumentException( - s"Exclude parties cannot be changed from $oldExcludedStakeholders to $excludeStakeholders" - ) - } - } + case _ => Right(()) // All arguments matched successfully } + } + } - override def onNext(request: ImportAcsRequest): Unit = { + new StreamObserver[ImportAcsRequest] { - val processRequest = - for { - contractIdRecomputationMode <- ContractIdImportMode - .fromProtoV30( - request.contractIdSuffixRecomputationMode - ) - .fold( - left => Failure(new IllegalArgumentException(left.message)), - right => Success(right), - ) - excludedStakeholders <- request.excludedStakeholderIds - .traverse(party => - UniqueIdentifier - .fromProtoPrimitive(party, "excluded_stakeholder_ids") - .map(PartyId(_)) - ) - .fold( - left => Failure(new IllegalArgumentException(left.message)), - right => Success(right), - ) - _ <- setOrCheck( - request.workflowIdPrefix, - contractIdRecomputationMode, - excludedStakeholders.toSet, + override def onNext(request: ImportAcsRequest): Unit = { + val processRequest: Either[String, Unit] = for { + contractImportMode <- ContractImportMode + .fromProtoV30(request.contractImportMode) + .leftMap(_.message) + excludedStakeholders <- request.excludedStakeholderIds + .traverse(party => + UniqueIdentifier + .fromProtoPrimitive(party, "excluded_stakeholder_ids") + .map(PartyId(_)) ) - _ <- Try(outputStream.write(request.acsSnapshot.toByteArray)) - } yield () + .leftMap(_.message) + representativePackageIdOverrideO <- request.representativePackageIdOverride + .traverse(RepresentativePackageIdOverride.fromProtoV30) + .leftMap(_.message) + _ <- setOrCheck( + request.workflowIdPrefix, + contractImportMode, + excludedStakeholders.toSet, + representativePackageIdOverrideO.getOrElse(RepresentativePackageIdOverride.NoOverride), + ) + } yield () - processRequest match { - case Failure(exception) => - outputStream.close() - responseObserver.onError(exception) - case Success(_) => - () // Nothing to do, just move on to the next request - } - } + processRequest.fold( + // On failure: Signal the error, that is throw an exception. + // Observer's top-level onError will handle cleanup. + errorMessage => responseObserver.onError(new IllegalArgumentException(errorMessage)), + _ => outputStream.write(request.acsSnapshot.toByteArray), + ) - override def onError(t: Throwable): Unit = { - responseObserver.onError(t) - outputStream.close() } + override def onError(t: Throwable): Unit = + try { + responseObserver.onError(t) + } finally { + outputStream.close() + } + override def onCompleted(): Unit = { - val (workflowIdPrefix, contractIdImportMode, excludedStakeholders) = tryArgs - val res = importAcsNewSnapshot( - acsSnapshot = ByteString.copyFrom(outputStream.toByteArray), - workflowIdPrefix = workflowIdPrefix, - contractIdImportMode = contractIdImportMode, - excludedStakeholders = excludedStakeholders, - ) + val result: EitherT[Future, Throwable, Map[String, String]] = for { - Try(Await.result(res, processingTimeout.unbounded.duration)) match { - case Failure(exception) => responseObserver.onError(exception) - case Success(contractIdRemapping) => - responseObserver.onNext(ImportAcsResponse(contractIdRemapping)) - responseObserver.onCompleted() - } - outputStream.close() - } - } - } + argsTuple <- EitherT.fromEither[Future]( + recordedArgs.leftMap(new IllegalStateException(_)) + ) + ( + workflowIdPrefix, + contractImportMode, + excludedStakeholders, + representativePackageIdOverride, + ) = argsTuple + + acsSnapshot <- EitherT.fromEither[Future]( + Try(ByteString.copyFrom(outputStream.toByteArray)).toEither + ) - private def importAcsNewSnapshot( - acsSnapshot: ByteString, - workflowIdPrefix: String, - contractIdImportMode: ContractIdImportMode, - excludedStakeholders: Set[PartyId], - )(implicit traceContext: TraceContext): Future[Map[String, String]] = { + contractIdRemapping <- EitherT.liftF[Future, Throwable, Map[String, String]]( + ParticipantCommon.importAcsNewSnapshot( + acsSnapshot = acsSnapshot, + workflowIdPrefix = workflowIdPrefix, + contractImportMode = contractImportMode, + excludedStakeholders = excludedStakeholders, + representativePackageIdOverride = representativePackageIdOverride, + sync = sync, + batching = batching, + loggerFactory = loggerFactory, + ) + ) + } yield contractIdRemapping - val contractsE = if (excludedStakeholders.isEmpty) { - RepairContract.loadAcsSnapshot(acsSnapshot) - } else { - RepairContract - .loadAcsSnapshot(acsSnapshot) - .map( - _.filter(_.contract.stakeholders.intersect(excludedStakeholders.map(_.toLf)).isEmpty) - ) + result + .thereafter { _ => + outputStream.close() + } + .value // Get the underlying Future[Either[...]] + .onComplete { + // The Future itself failed (e.g., a fatal error in `thereafter`) + case Failure(exception) => + responseObserver.onError(exception) + + case Success(result) => + result match { + case Left(exception) => + responseObserver.onError(exception) + case Right(contractIdRemapping) => + responseObserver.onNext(ImportAcsResponse(contractIdRemapping)) + responseObserver.onCompleted() + } + } + } } - - importAcsContracts( - contractsE, - workflowIdPrefix, - contractIdImportMode, - ) } - private def importAcsContracts( + private def importAcsContractsOld( contracts: Either[String, List[RepairContract]], workflowIdPrefix: String, - contractIdImportMode: ContractIdImportMode, + contractImportMode: ContractImportMode, )(implicit traceContext: TraceContext): Future[Map[String, String]] = { val resultET = for { - repairContracts <- EitherT - .fromEither[Future](contracts) + repairContracts <- contracts + .toEitherT[FutureUnlessShutdown] .ensure( // TODO(#23073) - Remove this restriction once #27325 has been re-implemented "Found at least one contract with a non-zero reassignment counter. ACS import does not yet support it." )(_.forall(_.reassignmentCounter == ReassignmentCounter.Genesis)) @@ -493,11 +518,13 @@ final class GrpcParticipantRepairService( workflowIdPrefixO = Option.when(workflowIdPrefix != "")(workflowIdPrefix) activeContractsWithRemapping <- - ContractIdsImportProcessor( + ContractAuthenticationImportProcessor( loggerFactory, sync.syncPersistentStateManager, sync.pureCryptoApi, - contractIdImportMode, + sync.contractHasher, + sync.contractValidator, + contractImportMode, )(repairContracts) (activeContractsWithValidContractIds, contractIdRemapping) = activeContractsWithRemapping @@ -508,22 +535,23 @@ final class GrpcParticipantRepairService( batching.parallelism, batching.maxAcsImportBatchSize, )(contracts)( - writeContractsBatch(workflowIdPrefixO)(synchronizerId, _) + writeContractsBatchOld(workflowIdPrefixO)(synchronizerId, _) + .mapK(FutureUnlessShutdown.outcomeK) ) } } yield contractIdRemapping resultET.value.flatMap { - case Left(error) => Future.failed(ImportAcsError.Error(error).asGrpcError) + case Left(error) => FutureUnlessShutdown.failed(ImportAcsError.Error(error).asGrpcError) case Right(contractIdRemapping) => - Future.successful( + FutureUnlessShutdown.pure( contractIdRemapping.map { case (oldCid, newCid) => (oldCid.coid, newCid.coid) } ) - } + }.asGrpcFuture } - private def writeContractsBatch( + private def writeContractsBatchOld( workflowIdPrefixO: Option[String] )(synchronizerId: SynchronizerId, contracts: Seq[RepairContract])(implicit traceContext: TraceContext @@ -846,48 +874,8 @@ object GrpcParticipantRepairService { // TODO(#24610) - remove, used by ExportAcsOldRequest only private object ValidExportAcsOldRequest { - - private def validateContractSynchronizerRenames( - contractSynchronizerRenames: Map[String, ExportAcsOldRequest.TargetSynchronizer], - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], - ): Either[String, List[(SynchronizerId, (SynchronizerId, ProtocolVersion))]] = - contractSynchronizerRenames.toList.traverse { - case ( - source, - ExportAcsOldRequest.TargetSynchronizer(targetSynchronizer, targetProtocolVersionRaw), - ) => - for { - sourceId <- SynchronizerId - .fromProtoPrimitive(source, "source synchronizer id") - .leftMap(_.message) - - targetSynchronizerId <- SynchronizerId - .fromProtoPrimitive(targetSynchronizer, "target synchronizer id") - .leftMap(_.message) - targetProtocolVersion <- ProtocolVersion - .fromProtoPrimitive(targetProtocolVersionRaw) - .leftMap(_.toString) - - /* - The `targetProtocolVersion` should be the one running on the corresponding synchronizer. - */ - _ <- allProtocolVersions - .get(targetSynchronizerId) - .map { foundProtocolVersion => - Either.cond( - foundProtocolVersion == targetProtocolVersion, - (), - s"Inconsistent protocol versions for synchronizer $targetSynchronizerId: found version is $foundProtocolVersion, passed is $targetProtocolVersion", - ) - } - .getOrElse(Either.unit) - - } yield (sourceId, (targetSynchronizerId, targetProtocolVersion)) - } - private def validateRequestOld( - request: ExportAcsOldRequest, - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], + request: ExportAcsOldRequest ): Either[String, ValidExportAcsOldRequest] = for { parties <- request.parties.traverse(party => @@ -896,37 +884,29 @@ object GrpcParticipantRepairService { timestamp <- request.timestamp .traverse(CantonTimestamp.fromProtoTimestamp) .leftMap(_.message) - contractSynchronizerRenames <- validateContractSynchronizerRenames( - request.contractSynchronizerRenames, - allProtocolVersions, - ) } yield ValidExportAcsOldRequest( parties.toSet, timestamp, - contractSynchronizerRenames.toMap, force = request.force, partiesOffboarding = request.partiesOffboarding, ) def apply( - request: ExportAcsOldRequest, - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], + request: ExportAcsOldRequest )(implicit elc: ErrorLoggingContext ): Either[RepairServiceError, ValidExportAcsOldRequest] = for { - validRequest <- validateRequestOld(request, allProtocolVersions).leftMap( + validRequest <- validateRequestOld(request).leftMap( RepairServiceError.InvalidArgument.Error(_) ) } yield validRequest - } // TODO(#24610) - remove, used by ExportAcsOldRequest only private final case class ValidExportAcsOldRequest private ( parties: Set[LfPartyId], timestamp: Option[CantonTimestamp], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], force: Boolean, // if true, does not check whether `timestamp` is clean partiesOffboarding: Boolean, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPartyManagementService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPartyManagementService.scala index 89c1104c06..d74892fafc 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPartyManagementService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPartyManagementService.scala @@ -4,12 +4,12 @@ package com.digitalasset.canton.participant.admin.grpc import cats.data.EitherT -import cats.implicits.toTraverseOps -import cats.syntax.either.* +import cats.syntax.all.* import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction as LapiTopologyTransaction import com.digitalasset.canton.ProtoDeserializationError.OtherError import com.digitalasset.canton.admin.participant.v30 -import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.admin.participant.v30.* +import com.digitalasset.canton.config.{BatchingConfig, ProcessingTimeout} import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.ledger.participant.state.{InternalIndexService, SynchronizerIndex} @@ -17,9 +17,15 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.mapErrNewEUS +import com.digitalasset.canton.participant.ParticipantNodeParameters +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepresentativePackageIdOverride, +} import com.digitalasset.canton.participant.admin.party.PartyReplicationAdminWorkflow.PartyReplicationArguments import com.digitalasset.canton.participant.admin.party.{ PartyManagementServiceError, + PartyOnboardingCompletion, PartyParticipantPermission, PartyReplicationAdminWorkflow, } @@ -27,20 +33,33 @@ import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SynchronizerOffset import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SynchronizerTimeTracker} import com.digitalasset.canton.topology.client.SynchronizerTopologyClientWithInit +import com.digitalasset.canton.topology.store.TopologyStore +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId, UniqueIdentifier} +import com.digitalasset.canton.topology.{ + ParticipantId, + PartyId, + SynchronizerId, + SynchronizerTopologyManager, + UniqueIdentifier, +} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.canton.util.{EitherTUtil, GrpcStreamingUtils} +import com.google.protobuf.ByteString +import com.google.protobuf.duration.Duration import io.grpc.stub.StreamObserver import io.grpc.{Status, StatusRuntimeException} import org.apache.pekko.actor.ActorSystem import org.apache.pekko.stream.scaladsl.Sink -import java.io.OutputStream +import java.io.{ByteArrayOutputStream, OutputStream} +import java.util.UUID import java.util.zip.GZIPOutputStream import scala.concurrent.{ExecutionContextExecutor, Future} +import scala.util.control.NonFatal import scala.util.{Failure, Success} /** grpc service to allow modifying party hosting on participants @@ -49,6 +68,7 @@ class GrpcPartyManagementService( adminWorkflowO: Option[PartyReplicationAdminWorkflow], processingTimeout: ProcessingTimeout, sync: CantonSyncService, + parameters: ParticipantNodeParameters, protected val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContextExecutor, @@ -56,6 +76,8 @@ class GrpcPartyManagementService( ) extends v30.PartyManagementServiceGrpc.PartyManagementService with NamedLogging { + private val batching: BatchingConfig = parameters.batchingConfig + override def addPartyAsync( request: v30.AddPartyAsyncRequest ): Future[v30.AddPartyAsyncResponse] = { @@ -188,7 +210,7 @@ class GrpcPartyManagementService( allLogicalSynchronizerIds = sync.syncPersistentStateManager.getAllLatest.keySet validRequest <- validateExportPartyAcsRequest(request, ledgerEnd, allLogicalSynchronizerIds) - ValidExportPartyAcsRequest( + ValidPartyReplicationCommonRequestParams( party, synchronizerId, targetParticipant, @@ -217,6 +239,27 @@ class GrpcPartyManagementService( .fromEither[FutureUnlessShutdown](findTopologyClient(synchronizerId, sync)) .leftMap(PartyManagementServiceError.InvalidState.Error(_)) + snapshot <- EitherT.right( + client.awaitSnapshot(activationTimestamp.immediateSuccessor) + ) + // TODO(#28208) - Indirection because LAPI topology transaction does not include the onboarding flag + activeParticipants <- EitherT.right( + snapshot.activeParticipantsOf(party.toLf) + ) + _ <- + EitherT.cond[FutureUnlessShutdown]( + activeParticipants.exists { case (participantId, participantAttributes) => + participantId == targetParticipant && + participantAttributes.onboarding + }, + (), + PartyManagementServiceError.InvalidState + .AbortAcsExportForMissingOnboardingFlag( + party, + targetParticipant, + ): PartyManagementServiceError, + ) + partiesHostedByTargetParticipant <- EitherT.right( client .awaitSnapshot(activationTimestamp) @@ -231,7 +274,7 @@ class GrpcPartyManagementService( otherPartiesHostedByTargetParticipant = partiesHostedByTargetParticipant excl party excl targetParticipant.adminParty - snapshot <- ParticipantCommon + _ <- ParticipantCommon .writeAcsSnapshot( indexService, Set(party), @@ -243,7 +286,7 @@ class GrpcPartyManagementService( .leftMap(msg => PartyManagementServiceError.IOStream.Error(msg): PartyManagementServiceError ) - } yield snapshot + } yield () mapErrNewEUS(res.leftMap(_.toCantonRpcError)) } @@ -254,13 +297,41 @@ class GrpcPartyManagementService( synchronizerIds: Set[SynchronizerId], )(implicit elc: ErrorLoggingContext - ): EitherT[FutureUnlessShutdown, PartyManagementServiceError, ValidExportPartyAcsRequest] = { + ): EitherT[ + FutureUnlessShutdown, + PartyManagementServiceError, + ValidPartyReplicationCommonRequestParams, + ] = + validatePartyReplicationCommonRequestParams( + request.partyId, + request.synchronizerId, + request.targetParticipantUid, + request.beginOffsetExclusive, + request.waitForActivationTimeout, + )(ledgerEnd, synchronizerIds) + + private def validatePartyReplicationCommonRequestParams( + partyId: String, + synchronizerId: String, + targetParticipantUid: String, + beginOffsetExclusive: Long, + waitForActivationTimeout: Option[Duration], + )( + ledgerEnd: Offset, + synchronizerIds: Set[SynchronizerId], + )(implicit + elc: ErrorLoggingContext + ): EitherT[ + FutureUnlessShutdown, + PartyManagementServiceError, + ValidPartyReplicationCommonRequestParams, + ] = { val parsingResult = for { party <- UniqueIdentifier - .fromProtoPrimitive(request.partyId, "party_id") + .fromProtoPrimitive(partyId, "party_id") .map(PartyId(_)) parsedSynchronizerId <- SynchronizerId.fromProtoPrimitive( - request.synchronizerId, + synchronizerId, "synchronizer_id", ) synchronizerId <- Either.cond( @@ -270,12 +341,12 @@ class GrpcPartyManagementService( ) targetParticipantId <- UniqueIdentifier .fromProtoPrimitive( - request.targetParticipantUid, + targetParticipantUid, "target_participant_uid", ) .map(ParticipantId(_)) parsedBeginOffsetExclusive <- ProtoConverter - .parseOffset("begin_offset_exclusive", request.beginOffsetExclusive) + .parseOffset("begin_offset_exclusive", beginOffsetExclusive) beginOffsetExclusive <- Either.cond( parsedBeginOffsetExclusive <= ledgerEnd, parsedBeginOffsetExclusive, @@ -283,10 +354,10 @@ class GrpcPartyManagementService( s"Begin ledger offset $parsedBeginOffsetExclusive needs to be smaller or equal to the ledger end $ledgerEnd" ), ) - waitForActivationTimeout <- request.waitForActivationTimeout.traverse( + waitForActivationTimeout <- waitForActivationTimeout.traverse( NonNegativeFiniteDuration.fromProtoPrimitive("wait_for_activation_timeout")(_) ) - } yield ValidExportPartyAcsRequest( + } yield ValidPartyReplicationCommonRequestParams( party, synchronizerId, targetParticipantId, @@ -300,6 +371,7 @@ class GrpcPartyManagementService( ) } + // TODO(#24065) - There may be multiple party on- and offboarding transactions which may break this method private def findSinglePartyActivationTopologyTransaction( indexService: InternalIndexService, party: PartyId, @@ -361,6 +433,87 @@ class GrpcPartyManagementService( topoClient <- sync.lookupTopologyClient(psid).toRight("Absent topology client") } yield topoClient + private def findTopologyServices( + synchronizerId: SynchronizerId, + sync: CantonSyncService, + ): Either[ + String, + (SynchronizerTopologyManager, TopologyStore[SynchronizerStore], SynchronizerTimeTracker), + ] = + for { + psid <- sync.syncPersistentStateManager + .latestKnownPSId(synchronizerId) + .toRight(s"Undefined physical synchronizer ID for given $synchronizerId") + topologyStore <- sync.syncPersistentStateManager + .get(psid) + .map(_.topologyStore) + .toRight("Cannot get topology store") + topologyManager <- sync.syncPersistentStateManager + .get(psid) + .map(_.topologyManager) + .toRight("Cannot get topology manager") + timeTracker <- sync.lookupSynchronizerTimeTracker(synchronizerId) + } yield (topologyManager, topologyStore, timeTracker) + + /* + Note that `responseObserver` originates from `GrpcStreamingUtils.streamToServer` which is + a wrapper that turns the responses into a promise/future. This is not a true bidirectional stream. + */ + override def importPartyAcs( + responseObserver: StreamObserver[ImportPartyAcsResponse] + ): StreamObserver[ImportPartyAcsRequest] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + + // TODO(#23818): This buffer will contain the whole ACS snapshot. + val outputStream = new ByteArrayOutputStream() + + new StreamObserver[ImportPartyAcsRequest] { + + override def onNext(request: ImportPartyAcsRequest): Unit = + outputStream.write(request.acsSnapshot.toByteArray) + + override def onError(t: Throwable): Unit = + try { + outputStream.close() + } finally { + responseObserver.onError(t) + } + + override def onCompleted(): Unit = { + // Synchronously try to get the snapshot and start the import + val importFuture = + try { + ParticipantCommon.importAcsNewSnapshot( + acsSnapshot = ByteString.copyFrom(outputStream.toByteArray), + batching = batching, + contractImportMode = ContractImportMode.Validation, + excludedStakeholders = Set.empty, + loggerFactory = loggerFactory, + // TODO(#27872): Consider allowing package-id overrides for party imports + representativePackageIdOverride = RepresentativePackageIdOverride.NoOverride, + sync = sync, + workflowIdPrefix = s"import-party-acs-${UUID.randomUUID}", + ) + } catch { + // If toByteArray or importAcsNewSnapshot fails + case NonFatal(e) => Future.failed(e) + } + + importFuture + .thereafter { _ => + outputStream.close() + } + .onComplete { + case Failure(exception) => + responseObserver.onError(exception) + case Success(_) => + responseObserver.onNext(ImportPartyAcsResponse()) + responseObserver.onCompleted() + } + } + } + } + override def getHighestOffsetByTimestamp( request: v30.GetHighestOffsetByTimestampRequest ): Future[v30.GetHighestOffsetByTimestampResponse] = { @@ -450,6 +603,127 @@ class GrpcPartyManagementService( mapErrNewEUS(res.leftMap(_.toCantonRpcError)) } + override def completePartyOnboarding( + request: v30.CompletePartyOnboardingRequest + ): Future[v30.CompletePartyOnboardingResponse] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val res = for { + r <- processPartyOnboardingRequest(request) + (onboarded, safeTime) = r + } yield v30.CompletePartyOnboardingResponse(onboarded, safeTime.map(x => x.toProtoTimestamp)) + mapErrNewEUS(res.leftMap(_.toCantonRpcError)) + } + + private def processPartyOnboardingRequest( + request: v30.CompletePartyOnboardingRequest + )(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + PartyManagementServiceError, + (Boolean, Option[CantonTimestamp]), + ] = + for { + ledgerEnd <- EitherT + .fromEither[FutureUnlessShutdown](ParticipantCommon.findLedgerEnd(sync)) + .leftMap(PartyManagementServiceError.InvalidState.Error(_)) + allLogicalSynchronizerIds = sync.syncPersistentStateManager.getAllLatest.keySet + + validRequest <- validateCompletePartyOnboardingRequest( + request, + ledgerEnd, + allLogicalSynchronizerIds, + ) + ValidPartyReplicationCommonRequestParams( + party, + synchronizerId, + targetParticipant, + beginOffsetExclusive, + waitForActivationTimeout, + ) = validRequest + + indexService <- EitherT.fromOption[FutureUnlessShutdown]( + sync.internalIndexService, + PartyManagementServiceError.InvalidState.Error("Unavailable internal index service"), + ) + + topologyTx <- + findSinglePartyActivationTopologyTransaction( + indexService, + party, + beginOffsetExclusive, + synchronizerId, + targetParticipant, + waitForActivationTimeout, + ) + + (_activationOffset, activationTimestamp) = extractOffsetAndTimestamp(topologyTx) + + client <- EitherT + .fromEither[FutureUnlessShutdown](findTopologyClient(synchronizerId, sync)) + .leftMap(PartyManagementServiceError.InvalidState.Error(_)) + + snapshot <- EitherT.right( + client.awaitSnapshot(activationTimestamp.immediateSuccessor) + ) + // TODO(#28208) - Indirection because LAPI topology transaction does not include the onboarding flag + activeParticipants <- EitherT.right( + snapshot.activeParticipantsOf(party.toLf) + ) + _ <- EitherT.cond[FutureUnlessShutdown]( + activeParticipants.exists { case (participantId, participantAttributes) => + participantId == targetParticipant && + participantAttributes.onboarding + }, + (), + PartyManagementServiceError.InvalidState.MissingOnboardingFlagCannotCompleteOnboarding( + party, + targetParticipant, + ): PartyManagementServiceError, + ) + + topoServices <- EitherT + .fromEither[FutureUnlessShutdown](findTopologyServices(synchronizerId, sync)) + .leftMap(PartyManagementServiceError.InvalidState.Error(_): PartyManagementServiceError) + + (topologyManager, topologyStore, timeTracker) = topoServices + + onboarding = new PartyOnboardingCompletion( + party, + synchronizerId, + targetParticipant, + timeTracker, + topologyManager, + topologyStore, + client, + loggerFactory, + ) + + onboardingCompletionOutcome <- onboarding + .attemptCompletion(activationTimestamp) + .leftMap(PartyManagementServiceError.InvalidState.Error(_): PartyManagementServiceError) + + } yield (onboardingCompletionOutcome) + + private def validateCompletePartyOnboardingRequest( + request: v30.CompletePartyOnboardingRequest, + ledgerEnd: Offset, + synchronizerIds: Set[SynchronizerId], + )(implicit + elc: ErrorLoggingContext + ): EitherT[ + FutureUnlessShutdown, + PartyManagementServiceError, + ValidPartyReplicationCommonRequestParams, + ] = + validatePartyReplicationCommonRequestParams( + request.partyId, + request.synchronizerId, + request.targetParticipantUid, + request.beginOffsetExclusive, + request.waitForActivationTimeout, + )(ledgerEnd, synchronizerIds) + } object GrpcPartyManagementService { @@ -512,7 +786,7 @@ object GrpcPartyManagementService { } } -private final case class ValidExportPartyAcsRequest( +private final case class ValidPartyReplicationCommonRequestParams( party: PartyId, synchronizerId: SynchronizerId, targetParticipant: ParticipantId, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/ParticipantCommon.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/ParticipantCommon.scala index 07589d1d83..b786eae91e 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/ParticipantCommon.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/ParticipantCommon.scala @@ -4,14 +4,30 @@ package com.digitalasset.canton.participant.admin.grpc import cats.data.EitherT +import cats.implicits.catsSyntaxParallelTraverse_ +import cats.syntax.either.* +import com.digitalasset.canton.ReassignmentCounter +import com.digitalasset.canton.config.BatchingConfig import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.participant.state.InternalIndexService import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.participant.admin.data.ActiveContract as ActiveContractValueClass +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} +import com.digitalasset.canton.participant.admin.data.{ + ActiveContract as ActiveContractValueClass, + ContractImportMode, + RepairContract, + RepresentativePackageIdOverride, +} +import com.digitalasset.canton.participant.admin.repair.RepairServiceError.ImportAcsError +import com.digitalasset.canton.participant.admin.repair.{ + ContractAuthenticationImportProcessor, + SelectRepresentativePackageIds, +} import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.topology.{PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ResourceUtil +import com.digitalasset.canton.util.{MonadUtil, ResourceUtil} +import com.google.protobuf.ByteString import org.apache.pekko.actor.ActorSystem import java.io.OutputStream @@ -104,4 +120,143 @@ private[admin] object ParticipantCommon { .mapK(FutureUnlessShutdown.outcomeK) } yield () + private[grpc] def importAcsNewSnapshot( + acsSnapshot: ByteString, + batching: BatchingConfig, + contractImportMode: ContractImportMode, + excludedStakeholders: Set[PartyId], + loggerFactory: NamedLoggerFactory, + representativePackageIdOverride: RepresentativePackageIdOverride, + sync: CantonSyncService, + workflowIdPrefix: String, + )(implicit + ec: ExecutionContext, + elc: ErrorLoggingContext, + traceContext: TraceContext, + ): Future[Map[String, String]] = { + + val packageMetadataSnapshot = sync.getPackageMetadataSnapshot + val selectRepresentativePackageIds = new SelectRepresentativePackageIds( + representativePackageIdOverride = representativePackageIdOverride, + knownPackages = packageMetadataSnapshot.packages.keySet, + packageNameMap = packageMetadataSnapshot.packageNameMap, + contractImportMode = contractImportMode, + loggerFactory = loggerFactory, + ) + val importer = new AcsImporter( + sync, + batching, + loggerFactory, + workflowIdPrefix, + contractImportMode, + selectRepresentativePackageIds, + ) + + importer.runImport(acsSnapshot, excludedStakeholders) + } + + private final class AcsImporter( + sync: CantonSyncService, + batching: BatchingConfig, + loggerFactory: NamedLoggerFactory, + workflowIdPrefix: String, + contractImportMode: ContractImportMode, + selectRepresentativePackageIds: SelectRepresentativePackageIds, + )(implicit + ec: ExecutionContext, + elc: ErrorLoggingContext, + traceContext: TraceContext, + ) { + + private val workflowIdPrefixO: Option[String] = + Option.when(workflowIdPrefix.nonEmpty)(workflowIdPrefix) + + def runImport( + acsSnapshot: ByteString, + excludedStakeholders: Set[PartyId], + ): Future[Map[String, String]] = { + + val contractsE = if (excludedStakeholders.isEmpty) { + RepairContract.loadAcsSnapshot(acsSnapshot) + } else { + RepairContract + .loadAcsSnapshot(acsSnapshot) + .map( + _.filter(_.contract.stakeholders.intersect(excludedStakeholders.map(_.toLf)).isEmpty) + ) + } + + importAcsContracts(contractsE) + } + + private def importAcsContracts( + contracts: Either[String, List[RepairContract]] + ): Future[Map[String, String]] = { + val resultET = for { + repairContracts <- contracts + .toEitherT[FutureUnlessShutdown] + .ensure( // TODO(#23073) - Remove this restriction once #27325 has been re-implemented + "Found at least one contract with a non-zero reassignment counter. ACS import does not yet support it." + )(_.forall(_.reassignmentCounter == ReassignmentCounter.Genesis)) + + contractsWithOverriddenRpId <- selectRepresentativePackageIds(repairContracts) + .toEitherT[FutureUnlessShutdown] + + activeContractsWithRemapping <- + ContractAuthenticationImportProcessor( + loggerFactory, + sync.syncPersistentStateManager, + sync.pureCryptoApi, + sync.contractHasher, + sync.contractValidator, + contractImportMode, + )(contractsWithOverriddenRpId) + (activeContractsWithValidContractIds, contractIdRemapping) = + activeContractsWithRemapping + + _ <- activeContractsWithValidContractIds.groupBy(_.synchronizerId).toSeq.parTraverse_ { + case (synchronizerId, contracts) => + MonadUtil.batchedSequentialTraverse_( + batching.parallelism, + batching.maxAcsImportBatchSize, + )(contracts)( + writeContractsBatch(synchronizerId, _).mapK(FutureUnlessShutdown.outcomeK) + ) + } + + } yield contractIdRemapping + + resultET.value.flatMap { + case Left(error) => FutureUnlessShutdown.failed(ImportAcsError.Error(error).asGrpcError) + case Right(contractIdRemapping) => + FutureUnlessShutdown.pure( + contractIdRemapping.map { case (oldCid, newCid) => (oldCid.coid, newCid.coid) } + ) + }.asGrpcFuture + } + + private def writeContractsBatch( + synchronizerId: SynchronizerId, + contracts: Seq[RepairContract], + ): EitherT[Future, String, Unit] = + for { + alias <- EitherT.fromEither[Future]( + sync.aliasManager + .aliasForSynchronizerId(synchronizerId) + .toRight(s"Not able to find synchronizer alias for ${synchronizerId.toString}") + ) + + _ <- EitherT.fromEither[Future]( + sync.repairService.addContracts( + alias, + contracts, + ignoreAlreadyAdded = true, + ignoreStakeholderCheck = true, + workflowIdPrefix = workflowIdPrefixO, + ) + ) + } yield () + + } + } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala index f881aec9ae..d819c3687e 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala @@ -266,109 +266,103 @@ final class SyncStateInspection( EitherT.right(disabledCleaningF) } - // TODO(#26061) Fix this computation - def allProtocolVersions: Map[SynchronizerId, ProtocolVersion] = - syncPersistentStateManager.getAll.keySet - .map(id => id.logical -> id.protocolVersion) - .toMap - - /* - TODO(#26061) If this method cannot be removed, ensure this is correct. - In particular, it does not make sense to do every step for each PS. - */ def exportAcsDumpActiveContracts( outputStream: OutputStream, filterSynchronizerId: SynchronizerId => Boolean, parties: Set[LfPartyId], timestamp: Option[CantonTimestamp], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], skipCleanTimestampCheck: Boolean, partiesOffboarding: Boolean, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, AcsInspectionError, Unit] = { - val allSynchronizers = syncPersistentStateManager.getAll + // To disable/re-enable background pruning + val allSynchronizers: Map[PhysicalSynchronizerId, SyncPersistentState] = + syncPersistentStateManager.getAll + + // For the ACS export + val latestSynchronizers = syncPersistentStateManager.getAllLatest + + def writeACSToStream(synchronizerId: SynchronizerId, state: SyncPersistentState) = { + val pv = state.staticSynchronizerParameters.protocolVersion + + val acsInspection = state.acsInspection + val timeOfSnapshotO = timestamp.map(TimeOfChange.apply) + for { + result <- acsInspection + .forEachVisibleActiveContract( + synchronizerId.logical, + parties, + timeOfSnapshotO, + skipCleanTocCheck = skipCleanTimestampCheck, + ) { case (contractInst, reassignmentCounter) => + (for { + contract <- SerializableContract.fromLfFatContractInst(contractInst.inst) + activeContract = ActiveContractOld.create( + synchronizerId, + contract, + reassignmentCounter, + )(pv) + + _ <- activeContract.writeDelimitedTo(outputStream) + } yield ()) match { + case Left(errorMessage) => + Left( + AcsInspectionError.SerializationIssue( + synchronizerId.logical, + contractInst.contractId, + errorMessage, + ) + ) + case Right(_) => + outputStream.flush() + Either.unit + } + } - // disable journal cleaning for the duration of the dump - disableJournalCleaningForFilter(allSynchronizers, filterSynchronizerId) - .mapK(FutureUnlessShutdown.outcomeK) - .flatMap { _ => - MonadUtil.sequentialTraverse_(allSynchronizers) { - case (synchronizerId, state) if filterSynchronizerId(synchronizerId.logical) => - val (synchronizerIdForExport, protocolVersion) = - contractSynchronizerRenames.getOrElse( - synchronizerId.logical, - (synchronizerId.logical, state.staticSynchronizerParameters.protocolVersion), - ) - val acsInspection = state.acsInspection - val timeOfSnapshotO = timestamp.map(TimeOfChange.apply) - val ret = for { - result <- acsInspection - .forEachVisibleActiveContract( + _ <- result match { + case Some((allStakeholders, snapshotToc)) if partiesOffboarding => + for { + connectedSynchronizer <- EitherT.fromOption[FutureUnlessShutdown]( + connectedSynchronizersLookup.get(synchronizerId), + AcsInspectionError.OffboardingParty( synchronizerId.logical, - parties, - timeOfSnapshotO, - skipCleanTocCheck = skipCleanTimestampCheck, - ) { case (contractInst, reassignmentCounter) => - (for { - contract <- SerializableContract.fromLfFatContractInst(contractInst.inst) - activeContract = - ActiveContractOld.create( - synchronizerIdForExport, - contract, - reassignmentCounter, - )( - protocolVersion - ) - _ <- activeContract.writeDelimitedTo(outputStream) - } yield ()) match { - case Left(errorMessage) => - Left( - AcsInspectionError.SerializationIssue( - synchronizerId.logical, - contractInst.contractId, - errorMessage, - ) - ) - case Right(_) => - outputStream.flush() - Either.unit - } - } - - _ <- result match { - case Some((allStakeholders, snapshotToc)) if partiesOffboarding => - for { - connectedSynchronizer <- EitherT.fromOption[FutureUnlessShutdown]( - connectedSynchronizersLookup.get(synchronizerId), - AcsInspectionError.OffboardingParty( - synchronizerId.logical, - s"Unable to get topology client for synchronizer $synchronizerId; check synchronizer connectivity.", - ), - ) + s"Unable to get topology client for synchronizer $synchronizerId; check synchronizer connectivity.", + ), + ) - _ <- acsInspection.checkOffboardingSnapshot( - participantId, - offboardedParties = parties, - allStakeholders = allStakeholders, - snapshotToc = snapshotToc, - topologyClient = connectedSynchronizer.topologyClient, - ) - } yield () + _ <- acsInspection.checkOffboardingSnapshot( + participantId, + offboardedParties = parties, + allStakeholders = allStakeholders, + snapshotToc = snapshotToc, + topologyClient = connectedSynchronizer.topologyClient, + ) + } yield () - // Snapshot is empty or partiesOffboarding is false - case _ => EitherTUtil.unitUS[AcsInspectionError] - } + // Snapshot is empty or partiesOffboarding is false + case _ => EitherTUtil.unitUS[AcsInspectionError] + } + } yield () + } - } yield () - // re-enable journal cleaning after the dump - ret.thereafter { _ => - journalCleaningControl.enable(synchronizerId) - } - case _ => - EitherTUtil.unitUS + // disable journal cleaning for the duration of the dump + val res: EitherT[FutureUnlessShutdown, AcsInspectionError, Unit] = + disableJournalCleaningForFilter(allSynchronizers, filterSynchronizerId) + .mapK(FutureUnlessShutdown.outcomeK) + .flatMap { _ => + MonadUtil.sequentialTraverse_(latestSynchronizers) { + case (synchronizerId, state) if filterSynchronizerId(synchronizerId) => + writeACSToStream(synchronizerId, state) + case _ => + EitherTUtil.unitUS + } } - } + + // re-enable journal cleaning after the dump + res.thereafter { _ => + allSynchronizers.keys.foreach(journalCleaningControl.enable) + } } def contractCount(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyManagementServiceError.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyManagementServiceError.scala index 7dffe6d76f..4b74a8a00b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyManagementServiceError.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyManagementServiceError.scala @@ -8,7 +8,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.PartyManagementServiceErrorGroup import com.digitalasset.canton.error.{CantonBaseError, CantonError} import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} sealed trait PartyManagementServiceError extends Product with Serializable with CantonBaseError @@ -32,6 +32,28 @@ object PartyManagementServiceError extends PartyManagementServiceErrorGroup { final case class Error(reason: String)(implicit val loggingContext: ErrorLoggingContext) extends CantonError.Impl(reason) with PartyManagementServiceError + + final case class AbortAcsExportForMissingOnboardingFlag( + party: PartyId, + targetParticipant: ParticipantId, + )(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Aborted to export ACS for party $party. " + + s"To enable export, the party must be activated on the target participant $targetParticipant with the onboarding flag set" + ) + with PartyManagementServiceError + + final case class MissingOnboardingFlagCannotCompleteOnboarding( + party: PartyId, + targetParticipant: ParticipantId, + )(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Aborted to complete party onboarding because the activation for $party on the target participant $targetParticipant is missing the onboarding flag" + ) + with PartyManagementServiceError } object IOStream diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyOnboardingCompletion.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyOnboardingCompletion.scala new file mode 100644 index 0000000000..6d4e815673 --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyOnboardingCompletion.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.party + +import cats.data.EitherT +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.time.SynchronizerTimeTracker +import com.digitalasset.canton.topology.client.SynchronizerTopologyClient +import com.digitalasset.canton.topology.store.TopologyStore +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore +import com.digitalasset.canton.topology.{ + ParticipantId, + PartyId, + SynchronizerId, + SynchronizerTopologyManager, +} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.ExecutionContext + +final class PartyOnboardingCompletion( + partyId: PartyId, + synchronizerId: SynchronizerId, + targetParticipantId: ParticipantId, + synchronizerTimeTracker: SynchronizerTimeTracker, + topologyManager: SynchronizerTopologyManager, + topologyStore: TopologyStore[SynchronizerStore], + topologyClient: SynchronizerTopologyClient, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + private val topologyWorkflow = + new PartyReplicationTopologyWorkflow( + targetParticipantId, + timeouts = ProcessingTimeout(), + loggerFactory, + ) + + def attemptCompletion( + onboardingEffectiveAt: CantonTimestamp + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, (Boolean, Option[CantonTimestamp])] = + topologyWorkflow.authorizeOnboardedTopology( + partyId, + synchronizerId, + targetParticipantId, + onboardingEffectiveAt, + synchronizerTimeTracker, + topologyManager, + topologyStore, + topologyClient, + ) + +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationAdminWorkflow.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationAdminWorkflow.scala index 9900c04d5f..9c76364219 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationAdminWorkflow.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationAdminWorkflow.scala @@ -376,6 +376,7 @@ class PartyReplicationAdminWorkflow( )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = { + logger.info(s"Marking agreement done for ${ap.requestId} on target participant") val commandId = s"agreement-done-${ap.requestId}" val agreementCid = new M.partyreplication.PartyReplicationAgreement.ContractId(damlAgreementCid.coid) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTestInterceptor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTestInterceptor.scala index adeb81634f..ad3c4484a4 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTestInterceptor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTestInterceptor.scala @@ -23,7 +23,7 @@ trait PartyReplicationTestInterceptor { */ def onSourceParticipantProgress(store: SourceParticipantStore)(implicit traceContext: TraceContext - ): ProceedOrWait + ): ProceedOrWait = Proceed /** Specifies whether the TP proceeds or waits depending on the contents of the store. * @return @@ -31,7 +31,7 @@ trait PartyReplicationTestInterceptor { */ def onTargetParticipantProgress(store: TargetParticipantStore)(implicit traceContext: TraceContext - ): ProceedOrWait + ): ProceedOrWait = Proceed } object PartyReplicationTestInterceptor { @@ -42,12 +42,5 @@ object PartyReplicationTestInterceptor { /** In production, always proceed as any type of disruptive or stalling behavior is to be used in * integration tests only. */ - object AlwaysProceed extends PartyReplicationTestInterceptor { - override def onSourceParticipantProgress(store: SourceParticipantStore)(implicit - traceContext: TraceContext - ): ProceedOrWait = Proceed - override def onTargetParticipantProgress(store: TargetParticipantStore)(implicit - traceContext: TraceContext - ): ProceedOrWait = Proceed - } + object AlwaysProceed extends PartyReplicationTestInterceptor } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflow.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflow.scala index ef06ba634e..2f52026e2b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflow.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflow.scala @@ -5,10 +5,15 @@ package com.digitalasset.canton.participant.admin.party import cats.data.EitherT import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicSynchronizerParametersHistory +import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.TopologyManagerError.NoAppropriateSigningKeyInStore +import com.digitalasset.canton.topology.client.SynchronizerTopologyClient import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.store.{StoredTopologyTransaction, TimeQuery, TopologyStore} import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace @@ -22,6 +27,7 @@ import com.digitalasset.canton.topology.{ ForceFlags, ParticipantId, PartyId, + SynchronizerId, SynchronizerTopologyManager, } import com.digitalasset.canton.tracing.TraceContext @@ -322,6 +328,202 @@ class PartyReplicationTopologyWorkflow( } yield () } + /** Attempt to authorize the onboarded topology for the party replication request on the target + * participant. Once the onboarded topology is authorized, verify the topology transaction, e.g. + * the party has a hosting permission on the target participants without the onboarding flag. Do + * so in an idempotent way such that this function can be retried. + * + * @param params + * party replication parameters + * @param onboardingEffectiveAt + * effective time of the onboarding topology transaction needed to determine the safe time to + * clear the onboarding flag. + * @param synchronizerTimeTracker + * synchronizer time tracker to find the latest synchronizer timestamp observed by the + * participant + * @param topologyManager + * synchronizer topology manager to use for authorizing and TP-signature checking + * @param topologyStore + * synchronizer topology store + * @param topologyClient + * synchronizer topology client to look up synchronizer dynamic parameter history in order to + * determine the safe time to clear onboarding flags wrt decision timeouts of historic + * transactions. + * @return + * whether the onboarded topology has been authorized + */ + private[party] def authorizeOnboardedTopology( + params: PartyReplicationStatus.ReplicationParams, + onboardingEffectiveAt: CantonTimestamp, + synchronizerTimeTracker: SynchronizerTimeTracker, + topologyManager: SynchronizerTopologyManager, + topologyStore: TopologyStore[SynchronizerStore], + topologyClient: SynchronizerTopologyClient, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Boolean] = { + val PartyReplicationStatus + .ReplicationParams( + requestId, + partyId, + synchronizerId, + _, + targetParticipantId, + _, + _, + ) = params + val res = authorizeOnboardedTopology( + partyId, + synchronizerId, + targetParticipantId, + onboardingEffectiveAt, + synchronizerTimeTracker, + topologyManager, + topologyStore, + topologyClient, + Some(requestId), + ) + res.map { case (partyHasBeenOnboarded, _) => partyHasBeenOnboarded } + } + + private[party] def authorizeOnboardedTopology( + partyId: PartyId, + synchronizerId: SynchronizerId, + targetParticipantId: ParticipantId, + onboardingEffectiveAt: CantonTimestamp, + synchronizerTimeTracker: SynchronizerTimeTracker, + topologyManager: SynchronizerTopologyManager, + topologyStore: TopologyStore[SynchronizerStore], + topologyClient: SynchronizerTopologyClient, + requestId: Option[Hash] = None, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, (Boolean, Option[CantonTimestamp])] = { + require( + synchronizerId == topologyManager.psid.logical, + s"party replication synchronizer id $synchronizerId does not match topology manager synchronizer id ${topologyManager.psid.logical}", + ) + require( + synchronizerId == topologyStore.storeId.psid.logical, + s"party replication synchronizer id $synchronizerId does not match topology store synchronizer id ${topologyStore.storeId.psid.logical}", + ) + val requestIdLogPart = if (requestId.nonEmpty) s"For request $requestId: " else "" + for { + ptpHeadTxn <- EitherT( + partyToParticipantTopologyHeadO(partyId, topologyStore).map(txO => + txO + .filter(_.mapping.participants.exists(_.participantId == targetParticipantId)) + .toRight( + s"${requestIdLogPart}Party $partyId is not hosted by target participant $targetParticipantId" + ) + ) + ): EitherT[ + FutureUnlessShutdown, + String, + StoredTopologyTransaction[Replace, PartyToParticipant], + ] + onboardedPtpProposalO = Option.when( + ptpHeadTxn.mapping.participants.exists(p => + p.participantId == targetParticipantId && p.onboarding + ) + )( + ( + PartyToParticipant.tryCreate( + ptpHeadTxn.mapping.partyId, + ptpHeadTxn.mapping.threshold, + ptpHeadTxn.mapping.participants.map { + case HostingParticipant(`targetParticipantId`, permission, true) => + HostingParticipant(targetParticipantId, permission, onboarding = false) + case otherParticipant => otherParticipant + }, + ), + ptpHeadTxn.serial.increment, + ) + ) + partyHasBeenOnboarded = true + noDecisionDeadline = None + latestSynchronizerTimestampObservedO = synchronizerTimeTracker.latestTime + isPartyVerifiedOnboarded <- onboardedPtpProposalO match { + case None => + EitherT.rightT[FutureUnlessShutdown, String]((partyHasBeenOnboarded, noDecisionDeadline)) + case Some((ptpProposal, serial)) + if participantId == targetParticipantId && latestSynchronizerTimestampObservedO.isDefined => + logger.info( + s"${requestIdLogPart}About to mark party $partyId as onboarded on target participant" + ) + + for { + _ <- EitherT.cond[FutureUnlessShutdown]( + topologyClient.snapshotAvailable(onboardingEffectiveAt), + (), + s"Synchronizer $synchronizerId does not have a snapshot at onboarding effective time $onboardingEffectiveAt", + ) + onboardingTsSnapshot <- EitherT.right[String]( + topologyClient.snapshot(onboardingEffectiveAt) + ) + synchronizerParameterHistory <- EitherT.right[String]( + onboardingTsSnapshot.listDynamicSynchronizerParametersChanges() + ) + decisionDeadline = DynamicSynchronizerParametersHistory + .latestDecisionDeadlineEffectiveAt( + synchronizerParameterHistory, + onboardingEffectiveAt, + ) + _ = if (logger.underlying.isDebugEnabled) { + logger.debug( + s"safe timestamp: $decisionDeadline compared to latest synchronizer ts $latestSynchronizerTimestampObservedO" + + s" with onboardingEffectiveAt $onboardingEffectiveAt" + ) + } + isSafeToOnboard = latestSynchronizerTimestampObservedO.exists(_ > decisionDeadline) + _ <- + if (isSafeToOnboard) { + topologyManager + .proposeAndAuthorize( + op = TopologyChangeOp.Replace, + mapping = ptpProposal, + serial = Some(serial), + signingKeys = + Seq.empty, // Rely on topology manager to use the right TP signing keys + protocolVersion = topologyManager.managerVersion.serialization, + expectFullAuthorization = + true, // expect full authorization when onboarding is done + forceChanges = ForceFlags.none, + waitToBecomeEffective = None, + ) + .map(_ => ()) + .recover { case err @ NoAppropriateSigningKeyInStore.Failure(_, _) => + // See the note above on the possible race condition between the existingProposal and the topology manager call. + logger.info( + s"${requestIdLogPart}No appropriate key response to proposing topology change for $partyId indicates race with proposal authorization: $err" + ) + } + .leftMap { err => + val exception = err.asGrpcError + logger.warn( + s"${requestIdLogPart}Error proposing party to participant topology change on $participantId for $partyId", + exception, + ) + exception.getMessage + } + } else { + // If it is not yet safe to onboard, ask for a time proof in case the synchronizer does not + // serve any load, so that the party does not stay in the onboarding state until the next + // "minObservationDuration" (24 hours by default). + logger.info( + s"Requesting time proof to advance synchronizer time to the safe onboarded timestamp $decisionDeadline" + ) + synchronizerTimeTracker.requestTick(decisionDeadline.immediateSuccessor).discard + EitherTUtil.unitUS[String] + } + } yield (!partyHasBeenOnboarded, Some(decisionDeadline)) + case Some((_, _)) => + // for any participant other than the target participant which is + EitherT.rightT[FutureUnlessShutdown, String]((!partyHasBeenOnboarded, noDecisionDeadline)) + } + } yield isPartyVerifiedOnboarded + } + private def partyToParticipantTopologyHeadO( partyId: PartyId, topologyStore: TopologyStore[SynchronizerStore], diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicator.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicator.scala index 765ec1324d..23fd135b90 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicator.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/party/PartyReplicator.scala @@ -804,19 +804,33 @@ final class PartyReplicator( ): EitherT[FutureUnlessShutdown, String, Unit] = ensureParticipantStateAndSynchronizerConnected[ PartyReplicationStatus.FullyReplicatedAcs - ](requestId) { case (status, _, _) => - EitherT.right[String]( - markOnPRAgreementDone( - PartyReplicationAgreementParams.fromAgreedReplicationStatus(status), - status.damlAgreementCid, - traceContext, - ).map(isAgreementArchived => - if (isAgreementArchived) { - logger.info(s"Party replication $requestId has completed") - partyReplications.put(requestId, PartyReplicationStatus.Completed(status)).discard - } + ](requestId) { case (status, connectedSynchronizer, _) => + for { + isAgreementArchived <- EitherT.right[String]( + markOnPRAgreementDone( + PartyReplicationAgreementParams.fromAgreedReplicationStatus(status), + status.damlAgreementCid, + traceContext, + ) ) - ) + isPartyOnboarded <- topologyWorkflow.authorizeOnboardedTopology( + status.params, + status.effectiveAt, + connectedSynchronizer.ephemeral.timeTracker, + connectedSynchronizer.synchronizerHandle.syncPersistentState.topologyManager, + connectedSynchronizer.synchronizerHandle.syncPersistentState.topologyStore, + connectedSynchronizer.synchronizerHandle.topologyClient, + ) + } yield { + if (isAgreementArchived && isPartyOnboarded) { + logger.info(s"Party replication $requestId has completed") + partyReplications.put(requestId, PartyReplicationStatus.Completed(status)).discard + } else { + logger.debug( + s"Party replication $requestId not yet completed. AgreementArchived $isAgreementArchived, PartyOnboarded $isPartyOnboarded." + ) + } + } } private def recordError(requestId: AddPartyRequestId, tc: TraceContext)(error: String): Unit = { diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala index 3c2c7f6058..23262b39f1 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala @@ -74,6 +74,9 @@ private final class ChangeAssignation( } unassignedContracts <- readContracts(contractIdCounters) _ <- persistContracts(unassignedContracts) + internalContractIdsForUnassignedContracts <- EitherT.right( + getInternalContractIds(unassignedContracts) + ) _ <- targetPersistentState.unwrap.reassignmentStore .completeReassignment( unassignmentData.payload.reassignmentId, @@ -90,6 +93,7 @@ private final class ChangeAssignation( publishAssignmentEvent( unassignmentData.payload.unassignmentTs, unassignmentData.map(_ => unassignedContracts), + internalContractIdsForUnassignedContracts, ) ) } yield () @@ -131,7 +135,10 @@ private final class ChangeAssignation( _ <- persistContracts(changeBatch) newChanges = changes.map(_ => changeBatch) _ <- persistUnassignAndAssign(newChanges).toEitherT - _ <- EitherT.right(publishReassignmentEvents(repairSource.unwrap.timestamp, newChanges)) + internalContractIds <- EitherT.right(getInternalContractIds(changeBatch)) + _ <- EitherT.right( + publishReassignmentEvents(repairSource.unwrap.timestamp, newChanges, internalContractIds) + ) } yield () } @@ -332,6 +339,20 @@ private final class ChangeAssignation( EitherT.right(batches.parTraverse_(contractStore.storeContracts)) } + /** Get the internal contract ids by the contract id mapping of [[ContractStore]] + */ + private def getInternalContractIds(changes: Changes)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Map[LfContractId, Long]] = { + val batches = changes.batches.map { batch => + batch.contracts.map(_.contract.contractId) + } + + batches + .parTraverse(contractStore.lookupBatchedNonCachedInternalIds(_)) + .map(_.foldLeft(Map.empty[LfContractId, Long])(_ ++ _)) + } + private def persistAssignments( contracts: Iterable[(LfContractId, ReassignmentCounter)], timeOfRepair: Target[TimeOfRepair], @@ -377,8 +398,9 @@ private final class ChangeAssignation( private def publishAssignmentEvent( unassignmentTs: CantonTimestamp, changes: ChangeAssignation.Data[Changes], + internalContractIds: Map[LfContractId, Long], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val updates = assignment(unassignmentTs, changes) + val updates = assignment(unassignmentTs, changes, internalContractIds) for { _ <- FutureUnlessShutdown.outcomeF( @@ -390,10 +412,13 @@ private final class ChangeAssignation( private def publishReassignmentEvents( unassignmentTs: CantonTimestamp, changes: ChangeAssignation.Data[Changes], + internalContractIds: Map[LfContractId, Long], )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] = { - val updates = unassignment(unassignmentTs, changes) ++ assignment(unassignmentTs, changes) + val updates = + unassignment(unassignmentTs, changes) ++ + assignment(unassignmentTs, changes, internalContractIds) for { _ <- FutureUnlessShutdown.outcomeF( MonadUtil.sequentialTraverse(updates)(repairIndexer.offer(_)) @@ -416,7 +441,7 @@ private final class ChangeAssignation( ) Update.RepairReassignmentAccepted( workflowId = None, - updateId = randomTransactionId(syncCrypto).tryAsLedgerTransactionId, + updateId = randomTransactionId(syncCrypto), reassignmentInfo = ReassignmentInfo( sourceSynchronizer = sourceSynchronizerId, targetSynchronizer = targetSynchronizerId, @@ -438,11 +463,17 @@ private final class ChangeAssignation( repairCounter = changes.sourceTimeOfRepair.unwrap.repairCounter, recordTime = changes.sourceTimeOfRepair.unwrap.timestamp, synchronizerId = sourceSynchronizerId.unwrap, + // no internal contract ids since no create nodes are involved + internalContractIds = Map.empty, ) } - private def assignment(unassignmentTs: CantonTimestamp, changes: ChangeAssignation.Data[Changes])( - implicit traceContext: TraceContext + private def assignment( + unassignmentTs: CantonTimestamp, + changes: ChangeAssignation.Data[Changes], + internalContractIds: Map[LfContractId, Long], + )(implicit + traceContext: TraceContext ): Seq[RepairUpdate] = changes.payload.batches.map { case batch => val reassignmentId = ReassignmentId( @@ -453,7 +484,7 @@ private final class ChangeAssignation( ) Update.RepairReassignmentAccepted( workflowId = None, - updateId = randomTransactionId(syncCrypto).tryAsLedgerTransactionId, + updateId = randomTransactionId(syncCrypto), reassignmentInfo = ReassignmentInfo( sourceSynchronizer = sourceSynchronizerId, targetSynchronizer = targetSynchronizerId, @@ -476,6 +507,7 @@ private final class ChangeAssignation( repairCounter = changes.targetTimeOfRepair.unwrap.repairCounter, recordTime = changes.targetTimeOfRepair.unwrap.timestamp, synchronizerId = targetSynchronizerId.unwrap, + internalContractIds = internalContractIds, ) } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractAuthenticationImportProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractAuthenticationImportProcessor.scala new file mode 100644 index 0000000000..be8b00a55f --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractAuthenticationImportProcessor.scala @@ -0,0 +1,334 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.repair + +import cats.Eval +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.parallel.* +import cats.syntax.traverse.* +import com.daml.logging.LoggingContext +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.crypto.{HashOps, HmacOps} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.admin.data.* +import com.digitalasset.canton.participant.sync.StaticSynchronizerParametersGetter +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{ContractHasher, ContractValidator, MonadUtil} +import com.digitalasset.daml.lf.crypto.Hash +import com.digitalasset.daml.lf.transaction.Versioned + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +sealed abstract class ContractAuthenticationImportProcessor( + staticParametersGetter: StaticSynchronizerParametersGetter +) extends NamedLogging { + protected implicit def executionContext: ExecutionContext + + def process(contracts: Seq[RepairContract])(implicit + tc: TraceContext + ): EitherT[FutureUnlessShutdown, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] + + /* + In the context of a migration combining ACS import and synchronizer change (such as the one we perform + as part a major upgrade for early mainnet), the `contract.protocolVersion` and the protocol + version of the synchronizer will be different. Hence, we need to query it using the getter. + */ + protected def getMaximumSupportedContractIdVersion( + synchronizerId: SynchronizerId + ): Either[String, CantonContractIdVersion] = + staticParametersGetter + .latestKnownProtocolVersion(synchronizerId) + .toRight( + s"Protocol version for synchronizer with ID $synchronizerId cannot be resolved" + ) + .flatMap(CantonContractIdVersion.maximumSupportedVersion) +} + +object ContractAuthenticationImportProcessor { + + /** Verify that all contract IDs have a version greater or equal to the contract ID version + * associated with the protocol version of the synchronizer to which the contract is assigned. + * Furthermore, perform full contract validation. If these checks fail for any contract, the + * whole import is aborted. + */ + private final class ValidateContracts( + staticParametersGetter: StaticSynchronizerParametersGetter, + contractValidator: ContractValidator, + override val loggerFactory: NamedLoggerFactory, + )(protected implicit val executionContext: ExecutionContext) + extends ContractAuthenticationImportProcessor(staticParametersGetter) { + private val processParallelism = Threading.detectNumberOfThreads(noTracingLogger) + + private def validateContract( + contract: RepairContract + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, RepairContract] = { + val validatedContractIdVersionE = for { + maxSynchronizerVersion <- getMaximumSupportedContractIdVersion(contract.synchronizerId) + activeContractVersion <- CantonContractIdVersion + .extractCantonContractIdVersion(contract.contract.contractId) + _ <- Either.cond( + maxSynchronizerVersion >= activeContractVersion, + (), + s"Contract ID ${contract.contract.contractId} has version $activeContractVersion but synchronizer ${contract.synchronizerId.toProtoPrimitive} only supports up to $maxSynchronizerVersion", + ) + } yield contract + + for { + _ <- validatedContractIdVersionE.toEitherT[FutureUnlessShutdown] + _ <- { + implicit val loggingContext: LoggingContext = LoggingContext.empty + contractValidator + .authenticate(contract.contract, contract.representativePackageId) + .leftMap { e => + s"Failed to authenticate contract with id: ${contract.contract.contractId}: $e" + } + } + } yield contract + } + + override def process(contracts: Seq[RepairContract])(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + String, + (Seq[RepairContract], Map[LfContractId, LfContractId]), + ] = MonadUtil + .parTraverseWithLimit(processParallelism)(contracts)(validateContract) + .map((_, Map.empty)) + } + + private final case class DiscriminatorWithContractId( + discriminator: Hash, + contractId: LfContractId, + ) + + /** Recompute the contract IDs of all contracts using the provided cryptoOps. The whole + * preprocessing will fail if any of the following conditions apply to any contract: + * - the contract ID discriminator version is unknown + * - any contract ID referenced in a payload is missing from the import + * - any contract is referenced by two different IDs (e.g. the ID in the payload is fine but + * the one in the contract is not) + */ + private final class RecomputeContractIdSuffixes( + staticParametersGetter: StaticSynchronizerParametersGetter, + cryptoOps: HashOps & HmacOps, + hasher: ContractHasher, + override val loggerFactory: NamedLoggerFactory, + )(implicit protected val executionContext: ExecutionContext) + extends ContractAuthenticationImportProcessor(staticParametersGetter) { + + private val unicumGenerator = new UnicumGenerator(cryptoOps) + + private val fullRemapping = + TrieMap.empty[LfContractId, Eval[EitherT[Future, String, RepairContract]]] + + private def getDiscriminator(c: LfFatContractInst): Either[String, Hash] = + c.contractId match { + case LfContractId.V1(discriminator, _) => + Right(discriminator) + case _ => + Left(s"Unknown LF contract ID version, cannot recompute contract ID ${c.contractId.coid}") + } + + // Recompute the contract ID of a single contract. Any dependency is taken from the `fullRemapping`, + // which is pre-populated with a lazy reference to the contract ID recomputed here. The evaluation + // of the `Eval` as part of resolving the (recomputed) contract ID for dependencies will cause the + // immediate retrieval of the dependency, possibly triggering recomputation, limiting throughput in + // the presence of dependencies but preventing deadlocks while being stack-safe (`Eval` employs + // trampolining). If a contract ID is reached for which there is no instance, the recomputation + // cannot be performed. This is normal, as the dependency might have been archived and pruned. Still, + // we issue a warning out of caution. + private def recomputeContractIdSuffix( + repairContract: RepairContract, + contractIdVersion: CantonContractIdVersion, + )(implicit tc: TraceContext): EitherT[Future, String, RepairContract] = { + val contract = repairContract.contract + + for { + discriminator <- EitherT.fromEither[Future](getDiscriminator(contract)) + depsRemapping <- contract.createArg.cids.toSeq + // parTraverse use is fine because computation is in-memory only + .parTraverse { contractId => + fullRemapping + .get(contractId) + .fold { + logger.warn( + s"Missing dependency with contract ID '${contractId.coid}'. The contract might have been archived. Its contract ID cannot be recomputed." + ) + EitherT.rightT[Future, String](contractId -> contractId) + }(_.value.map(contract => contractId -> contract.contract.contractId)) + } + .map(_.toMap) + + newCreate = contract.toCreateNode.copy( + packageName = contract.packageName, + templateId = contract.templateId, + arg = contract.createArg.mapCid(depsRemapping), + ) + newThinContractInstance = newCreate.coinst + contractIdV1Version <- EitherT.fromEither[Future](contractIdVersion match { + case v1: CantonContractIdV1Version => Right(v1) + case _ => + // TODO(#23971) implement this if possible + Left( + s"Contract ID version $contractIdVersion is not supported for recomputation, only V1 versions are supported" + ) + }) + authenticationData <- EitherT.fromEither[Future]( + ContractAuthenticationData + .fromLfBytes(contractIdV1Version, contract.authenticationData) + .leftMap(err => + s"Could not parse contract authentication data for contract ID ${contract.contractId}: $err" + ) + ) + metadata <- EitherT.fromEither[Future]( + ContractMetadata.create( + signatories = contract.signatories, + stakeholders = contract.stakeholders, + maybeKeyWithMaintainersVersioned = + contract.contractKeyWithMaintainers.map(Versioned(contract.version, _)), + ) + ) + contractHash <- EitherT.apply({ + hasher.hash(newCreate, contractIdV1Version.contractHashingMethod).value.unwrap.map { + _.onShutdown[Either[String, LfHash]]( + "Shutdown during contract hashing".asLeft[LfHash] + ) + } + }) + + unicum <- EitherT.fromEither[Future] { + unicumGenerator.recomputeUnicum( + authenticationData.salt, + contract.createdAt, + metadata, + contractHash, + ) + } + newContractId = contractIdV1Version.fromDiscriminator(discriminator, unicum) + newFatContractInstance = LfFatContractInst.fromCreateNode( + contract.toCreateNode.copy(coid = newContractId, arg = newThinContractInstance.arg), + contract.createdAt, + contract.authenticationData, + ) + } yield repairContract.withContractInstance(newFatContractInstance) + } + + // If the contract ID is already valid return the contract as is, eagerly and synchronously. + // If the contract ID is not valid it will recompute it, lazily and asynchronously. + private def recomputeBrokenContractIdSuffix(contract: RepairContract)(implicit + tc: TraceContext + ): Eval[EitherT[Future, String, RepairContract]] = + getMaximumSupportedContractIdVersion(contract.synchronizerId).fold( + error => Eval.now(EitherT.leftT[Future, RepairContract](error)), + maxContractIdVersion => { + val contractId = contract.contract.contractId + val valid = CantonContractIdVersion + .extractCantonContractIdVersion(contractId) + .exists(_ <= maxContractIdVersion) + if (valid) { + logger.debug(s"Contract ID '${contractId.coid}' is already valid") + Eval.now(EitherT.rightT[Future, String](contract)) + } else { + logger.debug(s"Contract ID '${contractId.coid}' needs to be recomputed") + Eval.later(recomputeContractIdSuffix(contract, maxContractIdVersion)) + } + }, + ) + + private def ensureDiscriminatorUniqueness( + contracts: Seq[RepairContract] + ): Either[String, Unit] = { + val allContractIds = contracts.map(_.contract.contractId) + val allDependencies = contracts.flatMap(_.contract.createArg.cids) + (allContractIds ++ allDependencies) + .traverse { + case contractId @ LfContractId.V1(discriminator, _) => + Right(DiscriminatorWithContractId(discriminator, contractId)) + case unknown => + Left(s"Unknown LF contract ID version, cannot recompute contract ID ${unknown.coid}") + } + .map(_.groupMapReduce(_.discriminator)(cid => Set(cid.contractId))(_ ++ _)) + .flatMap( + _.collectFirst { case cid @ (_, contractIds) if contractIds.sizeIs > 1 => cid } + .toLeft(()) + .leftMap { case (discriminator, contractIds) => + s"Duplicate discriminator '${discriminator.bytes.toHexString}' is used by ${contractIds.size} contract IDs, including (showing up to 10): ${contractIds.take(10).map(_.coid).mkString(", ")}..." + } + ) + } + + private def recomputeBrokenContractIdSuffixes(contracts: Seq[RepairContract])(implicit + tc: TraceContext + ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = { + // Associate every contract ID with a lazy deferred computation that will recompute the contract ID if necessary + // It's lazy so that every single contract ID is associated with a computation, before the first one finishes. + // The assumptions are that every contract ID references in any payload has an associated `RepairContract` in + // the import, and that there are no cycles in the contract ID references. + for (contract <- contracts) { + fullRemapping + .put( + contract.contract.contractId, + recomputeBrokenContractIdSuffix(contract), + ) + .discard + } + for { + // parTraverse use is fine because computation is in-memory only + completedRemapping <- fullRemapping.view.valuesIterator.toSeq.parTraverse(_.value) + contractIdRemapping <- fullRemapping.toSeq.parTraverseFilter { case (cid, v) => + v.value.map(c => Option.when(cid != c.contract.contractId)(cid -> c.contract.contractId)) + } + } yield completedRemapping -> contractIdRemapping.toMap + } + + override def process(contracts: Seq[RepairContract])(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + String, + (Seq[RepairContract], Map[LfContractId, LfContractId]), + ] = + for { + _ <- ensureDiscriminatorUniqueness(contracts).toEitherT[FutureUnlessShutdown] + completedRemapping <- recomputeBrokenContractIdSuffixes(contracts).mapK( + FutureUnlessShutdown.outcomeK + ) + } yield completedRemapping + } + + /** Ensures that all contracts are validated and their contract IDs comply with the scheme + * associated to the synchronizer where the contracts are assigned. + */ + def apply( + loggerFactory: NamedLoggerFactory, + staticParametersGetter: StaticSynchronizerParametersGetter, + cryptoOps: HashOps & HmacOps, + hasher: ContractHasher, + contractValidator: ContractValidator, + contractImportMode: ContractImportMode, + )(contracts: Seq[RepairContract])(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[FutureUnlessShutdown, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = + contractImportMode match { + // Accept contracts as they are. + case ContractImportMode.Accept => EitherT.rightT((contracts, Map.empty)) + case ContractImportMode.Validation => + new ValidateContracts(staticParametersGetter, contractValidator, loggerFactory) + .process(contracts) + case ContractImportMode.Recomputation => + new RecomputeContractIdSuffixes(staticParametersGetter, cryptoOps, hasher, loggerFactory) + .process(contracts) + } + +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractIdsImportProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractIdsImportProcessor.scala deleted file mode 100644 index 8b42138eed..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ContractIdsImportProcessor.scala +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.admin.repair - -import cats.Eval -import cats.data.EitherT -import cats.syntax.either.* -import cats.syntax.parallel.* -import cats.syntax.traverse.* -import com.digitalasset.canton.crypto.{HashOps, HmacOps} -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.admin.data.* -import com.digitalasset.canton.participant.sync.StaticSynchronizerParametersGetter -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.topology.SynchronizerId -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.daml.lf.crypto.Hash -import com.digitalasset.daml.lf.transaction.Versioned -import com.digitalasset.daml.lf.value.Value.ThinContractInstance - -import scala.collection.concurrent.TrieMap -import scala.concurrent.{ExecutionContext, Future} - -sealed abstract class ContractIdsImportProcessor( - staticParametersGetter: StaticSynchronizerParametersGetter -) extends NamedLogging { - def process(contracts: Seq[RepairContract])(implicit - ec: ExecutionContext, - tc: TraceContext, - ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] - - /* - In the context of a migration combining ACS import and synchronizer change (such as the one we perform - as part a major upgrade for early mainnet), the `contract.protocolVersion` and the protocol - version of the synchronizer will be different. Hence, we need to query it using the getter. - */ - protected def getMaximumSupportedContractIdVersion( - synchronizerId: SynchronizerId - ): Either[String, CantonContractIdVersion] = - staticParametersGetter - .latestKnownProtocolVersion(synchronizerId) - .toRight( - s"Protocol version for synchronizer with ID $synchronizerId cannot be resolved" - ) - .flatMap(CantonContractIdVersion.maximumSupportedVersion) -} - -object ContractIdsImportProcessor { - - /** Verify that all contract IDs have a version greater or equal to the contract ID version - * associated with the protocol version of the synchronizer to which the contract is assigned. If - * any contract ID fails, the whole process fails. - */ - private final class VerifyContractIdSuffixes( - staticParametersGetter: StaticSynchronizerParametersGetter, - override val loggerFactory: NamedLoggerFactory, - ) extends ContractIdsImportProcessor(staticParametersGetter) { - - private def verifyContractIdSuffix( - contract: RepairContract - ): Either[String, RepairContract] = - for { - maxSynchronizerVersion <- getMaximumSupportedContractIdVersion(contract.synchronizerId) - activeContractVersion <- CantonContractIdVersion - .extractCantonContractIdVersion(contract.contract.contractId) - .leftMap(_.toString) - _ <- Either.cond( - maxSynchronizerVersion >= activeContractVersion, - (), - s"Contract ID ${contract.contract.contractId} has version $activeContractVersion but synchronizer ${contract.synchronizerId.toProtoPrimitive} only supports up to $maxSynchronizerVersion", - ) - } yield contract - - override def process(contracts: Seq[RepairContract])(implicit - ec: ExecutionContext, - tc: TraceContext, - ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = - EitherT - .fromEither[Future](contracts.traverse(verifyContractIdSuffix)) - .map((_, Map.empty)) - } - - private final case class DiscriminatorWithContractId( - discriminator: Hash, - contractId: LfContractId, - ) - - /** Recompute the contract IDs of all contracts using the provided cryptoOps. The whole - * preprocessing will fail if any of the following conditions apply to any contract: - * - the contract ID discriminator version is unknown - * - any contract ID referenced in a payload is missing from the import - * - any contract is referenced by two different IDs (e.g. the ID in the payload is fine but - * the one in the contract is not) - */ - private final class RecomputeContractIdSuffixes( - staticParametersGetter: StaticSynchronizerParametersGetter, - cryptoOps: HashOps & HmacOps, - override val loggerFactory: NamedLoggerFactory, - ) extends ContractIdsImportProcessor(staticParametersGetter) { - - private val unicumGenerator = new UnicumGenerator(cryptoOps) - - private val fullRemapping = - TrieMap.empty[LfContractId, Eval[EitherT[Future, String, RepairContract]]] - - private def getDiscriminator(c: LfFatContractInst): Either[String, Hash] = - c.contractId match { - case LfContractId.V1(discriminator, _) => - Right(discriminator) - case _ => - Left(s"Unknown LF contract ID version, cannot recompute contract ID ${c.contractId.coid}") - } - - // Recompute the contract ID of a single contract. Any dependency is taken from the `fullRemapping`, - // which is pre-populated with a lazy reference to the contract ID recomputed here. The evaluation - // of the `Eval` as part of resolving the (recomputed) contract ID for dependencies will cause the - // immediate retrieval of the dependency, possibly triggering recomputation, limiting throughput in - // the presence of dependencies but preventing deadlocks while being stack-safe (`Eval` employs - // trampolining). If a contract ID is reached for which there is no instance, the recomputation - // cannot be performed. This is normal, as the dependency might have been archived and pruned. Still, - // we issue a warning out of caution. - private def recomputeContractIdSuffix( - repairContract: RepairContract, - contractIdVersion: CantonContractIdVersion, - )(implicit tc: TraceContext, ec: ExecutionContext): EitherT[Future, String, RepairContract] = { - val contract = repairContract.contract - - for { - discriminator <- EitherT.fromEither[Future](getDiscriminator(contract)) - depsRemapping <- contract.createArg.cids.toSeq - // parTraverse use is fine because computation is in-memory only - .parTraverse { contractId => - fullRemapping - .get(contractId) - .fold { - logger.warn( - s"Missing dependency with contract ID '${contractId.coid}'. The contract might have been archived. Its contract ID cannot be recomputed." - ) - EitherT.rightT[Future, String](contractId -> contractId) - }(_.value.map(contract => contractId -> contract.contract.contractId)) - } - .map(_.toMap) - newThinContractInstance = ThinContractInstance( - contract.packageName, - contract.templateId, - contract.createArg.mapCid(depsRemapping), - ) - contractIdV1Version <- EitherT.fromEither[Future](contractIdVersion match { - case v1: CantonContractIdV1Version => Right(v1) - case _ => - // TODO(#23971) implement this if possible - Left( - s"Contract ID version $contractIdVersion is not supported for recomputation, only V1 versions are supported" - ) - }) - authenticationData <- EitherT.fromEither[Future]( - ContractAuthenticationData - .fromLfBytes(contractIdV1Version, contract.authenticationData) - .leftMap(err => - s"Could not parse contract authentication data for contract ID ${contract.contractId}: $err" - ) - ) - metadata <- EitherT.fromEither[Future]( - ContractMetadata.create( - signatories = contract.signatories, - stakeholders = contract.stakeholders, - maybeKeyWithMaintainersVersioned = - contract.contractKeyWithMaintainers.map(Versioned(contract.version, _)), - ) - ) - unicum <- EitherT.fromEither[Future] { - unicumGenerator.recomputeUnicum( - authenticationData.salt, - contract.createdAt, - metadata, - newThinContractInstance, - contractIdV1Version, - ) - } - newContractId = contractIdV1Version.fromDiscriminator(discriminator, unicum) - newFatContractInstance = LfFatContractInst.fromCreateNode( - contract.toCreateNode.copy(coid = newContractId, arg = newThinContractInstance.arg), - contract.createdAt, - contract.authenticationData, - ) - } yield repairContract.withContractInstance(newFatContractInstance) - } - - // If the contract ID is already valid return the contract as is, eagerly and synchronously. - // If the contract ID is not valid it will recompute it, lazily and asynchronously. - private def recomputeBrokenContractIdSuffix(contract: RepairContract)(implicit - ec: ExecutionContext, - tc: TraceContext, - ): Eval[EitherT[Future, String, RepairContract]] = - getMaximumSupportedContractIdVersion(contract.synchronizerId).fold( - error => Eval.now(EitherT.leftT[Future, RepairContract](error)), - maxContractIdVersion => { - val contractId = contract.contract.contractId - val valid = CantonContractIdVersion - .extractCantonContractIdVersion(contractId) - .exists(_ <= maxContractIdVersion) - if (valid) { - logger.debug(s"Contract ID '${contractId.coid}' is already valid") - Eval.now(EitherT.rightT[Future, String](contract)) - } else { - logger.debug(s"Contract ID '${contractId.coid}' needs to be recomputed") - Eval.later(recomputeContractIdSuffix(contract, maxContractIdVersion)) - } - }, - ) - - private def ensureDiscriminatorUniqueness( - contracts: Seq[RepairContract] - ): Either[String, Unit] = { - val allContractIds = contracts.map(_.contract.contractId) - val allDependencies = contracts.flatMap(_.contract.createArg.cids) - (allContractIds ++ allDependencies) - .traverse { - case contractId @ LfContractId.V1(discriminator, _) => - Right(DiscriminatorWithContractId(discriminator, contractId)) - case unknown => - Left(s"Unknown LF contract ID version, cannot recompute contract ID ${unknown.coid}") - } - .map(_.groupMapReduce(_.discriminator)(cid => Set(cid.contractId))(_ ++ _)) - .flatMap( - _.collectFirst { case cid @ (_, contractIds) if contractIds.sizeIs > 1 => cid } - .toLeft(()) - .leftMap { case (discriminator, contractIds) => - s"Duplicate discriminator '${discriminator.bytes.toHexString}' is used by ${contractIds.size} contract IDs, including (showing up to 10): ${contractIds.take(10).map(_.coid).mkString(", ")}..." - } - ) - } - - private def recomputeBrokenContractIdSuffixes(contracts: Seq[RepairContract])(implicit - ec: ExecutionContext, - tc: TraceContext, - ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = { - // Associate every contract ID with a lazy deferred computation that will recompute the contract ID if necessary - // It's lazy so that every single contract ID is associated with a computation, before the first one finishes. - // The assumptions are that every contract ID references in any payload has an associated `RepairContract` in - // the import, and that there are no cycles in the contract ID references. - for (contract <- contracts) { - fullRemapping - .put( - contract.contract.contractId, - recomputeBrokenContractIdSuffix(contract), - ) - .discard - } - for { - // parTraverse use is fine because computation is in-memory only - completedRemapping <- fullRemapping.view.valuesIterator.toSeq.parTraverse(_.value) - contractIdRemapping <- fullRemapping.toSeq.parTraverseFilter { case (cid, v) => - v.value.map(c => Option.when(cid != c.contract.contractId)(cid -> c.contract.contractId)) - } - } yield completedRemapping -> contractIdRemapping.toMap - } - - override def process(contracts: Seq[RepairContract])(implicit - ec: ExecutionContext, - tc: TraceContext, - ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = - for { - _ <- EitherT.fromEither[Future](ensureDiscriminatorUniqueness(contracts)) - completedRemapping <- recomputeBrokenContractIdSuffixes(contracts) - } yield completedRemapping - } - - /** Ensures that all contract IDs comply with the scheme associated to the synchronizer where the - * contracts are assigned. - */ - def apply( - loggerFactory: NamedLoggerFactory, - staticParametersGetter: StaticSynchronizerParametersGetter, - cryptoOps: HashOps & HmacOps, - contractIdImportMode: ContractIdImportMode, - )(contracts: Seq[RepairContract])(implicit - ec: ExecutionContext, - tc: TraceContext, - ): EitherT[Future, String, (Seq[RepairContract], Map[LfContractId, LfContractId])] = - contractIdImportMode match { - // Accept contract IDs as they are. - case ContractIdImportMode.Accept => EitherT.rightT((contracts, Map.empty)) - case ContractIdImportMode.Validation => - new VerifyContractIdSuffixes(staticParametersGetter, loggerFactory) - .process(contracts) - case ContractIdImportMode.Recomputation => - new RecomputeContractIdSuffixes(staticParametersGetter, cryptoOps, loggerFactory) - .process(contracts) - } - -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairRequest.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairRequest.scala index f77bd8b1c9..87a0b1aa7d 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairRequest.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairRequest.scala @@ -7,14 +7,14 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.ledger.participant.state.SynchronizerIndex import com.digitalasset.canton.participant.store.SyncPersistentState -import com.digitalasset.canton.protocol.{StaticSynchronizerParameters, TransactionId} +import com.digitalasset.canton.protocol.{StaticSynchronizerParameters, UpdateId} import com.digitalasset.canton.topology.PhysicalSynchronizerId import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.{RepairCounter, SynchronizerAlias} private[repair] final case class RepairRequest( synchronizer: RepairRequest.SynchronizerData, - transactionId: TransactionId, + updateId: UpdateId, repairCounters: NonEmpty[Seq[RepairCounter]], ) { diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala index d8d6e2c296..e60c3d9065 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.SyncCryptoApiParticipantProvider import com.digitalasset.canton.data.{CantonTimestamp, LedgerTimeBoundaries} import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.participant.state.Update.TransactionAccepted.RepresentativePackageIds import com.digitalasset.canton.ledger.participant.state.{RepairUpdate, TransactionMeta, Update} import com.digitalasset.canton.lifecycle.{ FlagCloseable, @@ -81,8 +82,7 @@ import scala.concurrent.{ExecutionContext, Future} final class RepairService( participantId: ParticipantId, syncCrypto: SyncCryptoApiParticipantProvider, - packageDependencyResolver: PackageDependencyResolver, - contractAuthenticator: ContractAuthenticator, + packageDependencyResolver: PackageDependencyResolver.Impl, contractStore: Eval[ContractStore], ledgerApiIndexer: Eval[LedgerApiIndexer], aliasManager: SynchronizerAliasManager, @@ -132,7 +132,12 @@ final class RepairService( ContractInstance.create(repairContract.contract) ) } yield Option( - ContractToAdd(contractInstance, repairContract.reassignmentCounter, reassigningFrom) + ContractToAdd( + contract = contractInstance, + reassignmentCounter = repairContract.reassignmentCounter, + reassigningFrom = reassigningFrom, + representativePackageId = repairContract.representativePackageId, + ) ) acsState match { @@ -171,7 +176,7 @@ final class RepairService( case Some(ActiveContractStore.Purged) => addContract(reassigningFrom = None) case Some(ActiveContractStore.ReassignedAway(targetSynchronizer, reassignmentCounter)) => log( - s"Marking contract ${repairContract.contract.contractId} previously unassigned targetting $targetSynchronizer as " + + s"Marking contract ${repairContract.contract.contractId} previously unassigned targeting $targetSynchronizer as " + s"assigned from $targetSynchronizer (even though contract may have been reassigned to yet another synchronizer since)." ).discard @@ -193,38 +198,6 @@ final class RepairService( } } - /** Prepare contract for add, including re-computing metadata - * @param repairContract - * Contract to be added - * @param acsState - * If the contract is known, its status - * @param storedContract - * If the contract already exist in the ContractStore, the stored copy - */ - private def readRepairContractCurrentState( - repairContract: RepairContract, - acsState: Option[ActiveContractStore.Status], - storedContract: Option[ContractInstance], - ignoreAlreadyAdded: Boolean, - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Option[ContractToAdd]] = - for { - _ <- EitherT - .fromEither[FutureUnlessShutdown]( - contractAuthenticator.legacyAuthenticate(repairContract.contract) - ) - .leftMap(e => - log(s"Failed to authenticate contract with id: ${repairContract.contract.contractId}: $e") - ) - contractToAdd <- contractToAdd( - repairContract, - ignoreAlreadyAdded = ignoreAlreadyAdded, - acsState = acsState, - storedContract = storedContract, - ) - } yield contractToAdd - // The repair request gets inserted at the reprocessing starting point. // We use the prenextTimestamp such that a regular request is always the first request for a given timestamp. // This is needed for causality tracking, which cannot use a tie breaker on timestamps. @@ -338,11 +311,11 @@ final class RepairService( storedContracts = contractInstances.map(c => c.contractId -> c).toMap filteredContracts <- contracts.zip(contractStates).parTraverseFilter { case (contract, acsState) => - readRepairContractCurrentState( + contractToAdd( repairContract = contract, + ignoreAlreadyAdded = ignoreAlreadyAdded, acsState = acsState, storedContract = storedContracts.get(contract.contract.contractId), - ignoreAlreadyAdded = ignoreAlreadyAdded, ) } @@ -397,11 +370,20 @@ final class RepairService( storedContracts = storedContracts, ) + internalContractIdsForContractsAdded <- + logOnFailureWithInfoLevel( + contractStore.value.lookupBatchedNonCachedInternalIds( + contractsWithTimeOfChange.map(_._1.contract.contractId) + ), + "Unable to lookup internal contract ids in contract store", + ) + // Commit and publish added contracts via the indexer to the ledger api. _ <- EitherT.right[String]( writeContractsAddedEvents( repair, contractsToAdd, + internalContractIdsForContractsAdded, workflowIds, repairIndexer, ) @@ -739,9 +721,9 @@ final class RepairService( contracts: Seq[ContractToAdd], )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = for { - // All referenced templates known and vetted - _packagesVetted <- contracts - .map(_.contract.templateId.packageId) + // Check that the representative package-id is known + _ <- contracts + .map(_.representativePackageId) .distinct .parTraverse_(packageKnown) @@ -986,11 +968,15 @@ final class RepairService( roots = ImmArray.from(nodeIds.take(txNodes.size)), ) ), - updateId = repair.transactionId.tryAsLedgerTransactionId, + updateId = repair.updateId, contractAuthenticationData = Map.empty, + // No create nodes so no representative package IDs + representativePackageIds = RepresentativePackageIds.Empty, synchronizerId = repair.synchronizer.psid.logical, repairCounter = repair.tryExactlyOneRepairCounter, recordTime = repair.timestamp, + // no need to pass the internal contract ids since no create nodes are involved + internalContractIds = Map.empty, ) // not waiting for Update.persisted, since CommitRepair anyway will be waited for at the end repairIndexer.offer(update).map(_ => ()) @@ -1001,11 +987,15 @@ final class RepairService( repairCounter: RepairCounter, ledgerCreateTime: CreationTime.CreatedAt, contractsAdded: Seq[ContractToAdd], + internalContractIdsForContractsAdded: Map[LfContractId, Long], workflowIdProvider: () => Option[LfWorkflowId], )(implicit traceContext: TraceContext): RepairUpdate = { val contractAuthenticationData = contractsAdded.view.map { c => c.contract.contractId -> c.authenticationData }.toMap + val representativePackageIds = contractsAdded.view + .map(c => c.contract.contractId -> c.representativePackageId) + .toMap val nodeIds = LazyList.from(0).map(LfNodeId) val txNodes = nodeIds.zip(contractsAdded.map(_.contract.toLf)).toMap Update.RepairTransactionAccepted( @@ -1025,17 +1015,20 @@ final class RepairService( roots = ImmArray.from(nodeIds.take(txNodes.size)), ) ), - updateId = randomTransactionId(syncCrypto).tryAsLedgerTransactionId, + updateId = randomTransactionId(syncCrypto), contractAuthenticationData = contractAuthenticationData, + representativePackageIds = RepresentativePackageIds.from(representativePackageIds), synchronizerId = repair.synchronizer.psid.logical, repairCounter = repairCounter, recordTime = repair.timestamp, + internalContractIds = internalContractIdsForContractsAdded, ) } private def writeContractsAddedEvents( repair: RepairRequest, contractsAdded: Seq[(TimeOfRepair, (CreationTime.CreatedAt, Seq[ContractToAdd]))], + internalContractIdsForContractsAdded: Map[LfContractId, Long], workflowIds: Iterator[Option[LfWorkflowId]], repairIndexer: FutureQueue[RepairUpdate], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = @@ -1045,11 +1038,12 @@ final class RepairService( repairIndexer .offer( prepareAddedEvents( - repair, - timeOfChange.repairCounter, - timestamp, - contractsToAdd, - () => workflowIds.next(), + repair = repair, + repairCounter = timeOfChange.repairCounter, + ledgerCreateTime = timestamp, + contractsAdded = contractsToAdd, + internalContractIdsForContractsAdded = internalContractIdsForContractsAdded, + workflowIdProvider = () => workflowIds.next(), ) ) .map(_ => ()) @@ -1268,6 +1262,7 @@ object RepairService { contract: ContractInstance, reassignmentCounter: ReassignmentCounter, reassigningFrom: Option[Source[SynchronizerId]], + representativePackageId: LfPackageId, ) { def cid: LfContractId = contract.contractId diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIds.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIds.scala new file mode 100644 index 0000000000..567f3edd1e --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIds.scala @@ -0,0 +1,111 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.repair + +import cats.implicits.toTraverseOps +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.admin.data.ContractImportMode.Validation +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepairContract, + RepresentativePackageIdOverride, +} +import com.digitalasset.canton.store.packagemeta.PackageMetadata.PackageResolution +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{LfPackageId, LfPackageName} + +/** Select new representative package IDs for ACS import contracts based on the precedence rules + * defined in [[com.digitalasset.canton.admin.participant.v30.ImportAcsRequest]] + * + * @param representativePackageIdOverride + * The representative package-id overrides to be applied to the imported contracts + * @param knownPackages + * All packages known to the participant at the time of the ACS import request + * @param packageNameMap + * Mapping of package-names to package-resolutions known to the participant at the time of the + * ACS import request + */ +private[admin] class SelectRepresentativePackageIds( + representativePackageIdOverride: RepresentativePackageIdOverride, + knownPackages: Set[LfPackageId], + packageNameMap: Map[LfPackageName, PackageResolution], + contractImportMode: ContractImportMode, + val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + + def apply( + contracts: List[RepairContract] + )(implicit traceContext: TraceContext): Either[String, Seq[RepairContract]] = + contracts.traverse(selectRpId) + + private def selectRpId( + repairContract: RepairContract + )(implicit traceContext: TraceContext): Either[String, RepairContract] = { + import representativePackageIdOverride.* + + val rpIdSelectionPrecedence = Seq( + RPIdCandidate( + contractOverride.get(repairContract.contract.contractId), + "Contract ID override", + ), + RPIdCandidate( + packageIdOverride.get(repairContract.representativePackageId), + "Override the exported representative package ID", + ), + RPIdCandidate( + packageIdOverride.get(repairContract.contract.templateId.packageId), + "Override the creation package ID", + ), + RPIdCandidate( + Option(repairContract.representativePackageId), + "Representative package ID from export", + ), + RPIdCandidate( + Option(repairContract.contract.templateId.packageId), + "Contract creation package ID", + ), + RPIdCandidate( + packageNameOverride.get(repairContract.contract.packageName), + "Package-name override", + ), + // TODO(#28075): Add package vetting-based override + RPIdCandidate( + packageNameMap.get(repairContract.contract.packageName).map(_.preference.packageId), + "Highest versioned (package store) package-id for the contract's package-name", + ), + ) + + rpIdSelectionPrecedence.view + .map(_.evaluated) + .collectFirst { case Some(RPIdSelection(selectedRpId, sourceDescription)) => + logger.debug( + show"Selected representative package-id $selectedRpId for ${repairContract.contract.contractId} ($sourceDescription)" + ) + selectedRpId + } + .toRight( + show"Could not select a representative package-id for contract with id ${repairContract.contract.contractId}. No package in store for the contract's package-name '${repairContract.contract.packageName}'." + ) + .flatMap { selectedPkgId => + Either.cond( + selectedPkgId == repairContract.representativePackageId || contractImportMode == Validation || contractImportMode == ContractImportMode.Recomputation, + selectedPkgId, + show"Contract import mode is '$contractImportMode' but the selected representative package-id $selectedPkgId " + + show"for contract with id ${repairContract.contract.contractId} differs from the exported representative package-id ${repairContract.representativePackageId}. " + + show"Please use contract import mode '${ContractImportMode.Validation}' or '${ContractImportMode.Recomputation}' to change the representative package-id.", + ) + } + .map(selectedRPId => repairContract.copy(representativePackageId = selectedRPId)) + } + + private case class RPIdSelection(packageId: LfPackageId, selectionSourceDescription: String) + + private case class RPIdCandidate( + candidate: Option[LfPackageId], + selectionSourceDescription: String, + ) { + val evaluated: Option[RPIdSelection] = + candidate.filter(knownPackages.contains).map(RPIdSelection(_, selectionSourceDescription)) + } +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/package.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/package.scala index fdf450db24..63dd7ec2be 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/package.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/package.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.participant.admin import com.digitalasset.canton.crypto.{HashPurpose, SyncCryptoApiParticipantProvider} -import com.digitalasset.canton.protocol.TransactionId +import com.digitalasset.canton.protocol.UpdateId package object repair { @@ -20,6 +20,6 @@ package object repair { // We don't have to worry about clashes with ordinary transaction IDs as the hash purpose is different. val randomness = syncCrypto.pureCrypto.generateRandomByteString(16) val hash = syncCrypto.pureCrypto.digest(HashPurpose.RepairTransactionId, randomness) - TransactionId(hash) + UpdateId(hash) } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/DeclarativeParticipantConfig.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/DeclarativeParticipantConfig.scala index dd1bc8a80f..12580296ef 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/DeclarativeParticipantConfig.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/DeclarativeParticipantConfig.scala @@ -21,11 +21,12 @@ import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, SubmissionRequestAmplification, } import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} +import com.digitalasset.canton.topology.{Namespace, PartyId, UniqueIdentifier} import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.canton.{SequencerAlias, SynchronizerAlias} import com.google.protobuf.ByteString @@ -86,6 +87,7 @@ final case class DeclarativeDarConfig( location: String, requestHeaders: Map[String, String] = Map(), expectedMainPackage: Option[String] = None, + synchronizers: Seq[String] = Seq.empty, ) extends UniformCantonConfigValidation sealed trait ParticipantPermissionConfig extends UniformCantonConfigValidation { @@ -191,25 +193,36 @@ final case class DeclarativeUserConfig( )(val resourceVersion: String = "") extends UniformCantonConfigValidation { - /** map party names to namespace and filter out parties that are not yet registered - * - * the ledger api server needs to know the parties that we add to a user. that requires a - * synchronizer connection. therefore, we filter here the parties that are not yet registered as - * otherwise the ledger api server will throw errors + /** map party names to namespace */ def mapPartiesToNamespace( - namespace: Namespace, - filterParty: String => Boolean, + namespace: Namespace ): DeclarativeUserConfig = { def mapParty(party: String): String = if (party.contains(UniqueIdentifier.delimiter)) party else UniqueIdentifier.tryCreate(party, namespace).toProtoPrimitive copy( - primaryParty = primaryParty.map(mapParty).filter(filterParty), + primaryParty = primaryParty.map(mapParty), + rights = rights.copy( + actAs = rights.actAs.map(mapParty), + readAs = rights.readAs.map(mapParty), + ), + )(resourceVersion) + } + + def referencedParties: Set[PartyId] = + rights.readAs.map(PartyId.tryFromProtoPrimitive) ++ rights.actAs.map( + PartyId.tryFromProtoPrimitive + ) ++ primaryParty.map(PartyId.tryFromProtoPrimitive) + + def removeParties(unknown: Set[PartyId]): DeclarativeUserConfig = { + val unknownStrings = unknown.map(_.toProtoPrimitive) + copy( + primaryParty = primaryParty.filterNot(unknownStrings.contains), rights = rights.copy( - actAs = rights.actAs.map(mapParty).filter(filterParty), - readAs = rights.readAs.map(mapParty).filter(filterParty), + actAs = rights.actAs.diff(unknownStrings), + readAs = rights.readAs.diff(unknownStrings), ), )(resourceVersion) } @@ -305,6 +318,7 @@ final case class DeclarativeConnectionConfig( sequencerTrustThreshold = trustThreshold, sequencerLivenessMargin = livenessMargin, submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) sequencerConnectionsE.map { sequencerConnections => diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala index 167c840200..bcc5be59c9 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala @@ -80,7 +80,7 @@ final case class ParticipantNodeConfig( override val init: ParticipantInitConfig = ParticipantInitConfig(), override val crypto: CryptoConfig = CryptoConfig(), ledgerApi: LedgerApiServerConfig = LedgerApiServerConfig(), - httpLedgerApi: Option[JsonApiConfig] = None, + httpLedgerApi: JsonApiConfig = JsonApiConfig(), override val adminApi: AdminServerConfig = AdminServerConfig(), override val storage: StorageConfig = StorageConfig.Memory(), testingTime: Option[TestingTimeServiceConfig] = None, @@ -112,6 +112,8 @@ final case class ParticipantNodeConfig( .modify(ports.participantAdminApiPort.setDefaultPort) .focus(_.replication) .modify(ReplicationConfig.withDefaultO(storage, _, edition)) + .focus(_.httpLedgerApi.server.internalPort) + .modify(ports.jsonLedgerApiPort.setDefaultPort) } object ParticipantNodeConfig { @@ -247,6 +249,9 @@ final case class LedgerApiServerConfig( ) extends ServerConfig // We can't currently expose enterprise server features at the ledger api anyway { + // LAPI server does not use the canonical server builder, so doesn't support the stream limits using stream limit config + override val stream: Option[StreamLimitConfig] = None + lazy val clientConfig: FullClientConfig = FullClientConfig(address, port, tls.map(_.clientConfig)) @@ -314,12 +319,6 @@ object TestingTimeServiceConfig { * * The following specialized participant node performance tuning parameters may be grouped once a * more final set of configs emerges. - * @param reassignmentTimeProofFreshnessProportion - * Proportion of the target synchronizer exclusivity timeout that is used as a freshness bound - * when requesting a time proof. Setting to 3 means we'll take a 1/3 of the target synchronizer - * exclusivity timeout and potentially we reuse a recent timeout if one exists within that bound, - * otherwise a new time proof will be requested. Setting to zero will disable reusing recent time - * proofs and will instead always fetch a new proof. * @param minimumProtocolVersion * The minimum protocol version that this participant will speak when connecting to a * synchronizer @@ -350,6 +349,9 @@ object TestingTimeServiceConfig { * [[com.digitalasset.canton.config.CantonParameters.enableAdditionalConsistencyChecks]] being * enabled are logged, measured in the number of contract activations during a single connection * to a synchronizer. Used only for database storage. + * @param doNotAwaitOnCheckingIncomingCommitments + * Enable fully asynchronous checking of incoming commitments. This may result in some incoming + * commitments not being checked in case of crashes or HA failovers. */ final case class ParticipantNodeParameterConfig( adminWorkflow: AdminWorkflowConfig = AdminWorkflowConfig(), @@ -357,7 +359,6 @@ final case class ParticipantNodeParameterConfig( batching: BatchingConfig = BatchingConfig(), caching: CachingConfigs = CachingConfigs(), stores: ParticipantStoreConfig = ParticipantStoreConfig(), - reassignmentTimeProofFreshnessProportion: NonNegativeInt = NonNegativeInt.tryCreate(3), minimumProtocolVersion: Option[ParticipantProtocolVersion] = Some( ParticipantProtocolVersion(ProtocolVersion.v34) ), @@ -384,15 +385,15 @@ final case class ParticipantNodeParameterConfig( // TODO(#25344): check whether this should be removed automaticallyPerformLogicalSynchronizerUpgrade: Boolean = true, activationFrequencyForWarnAboutConsistencyChecks: Long = 1000, + reassignmentsConfig: ReassignmentsConfig = ReassignmentsConfig(), + doNotAwaitOnCheckingIncomingCommitments: Boolean = false, ) extends LocalNodeParametersConfig with UniformCantonConfigValidation object ParticipantNodeParameterConfig { implicit val participantNodeParameterConfigCantonConfigValidator - : CantonConfigValidator[ParticipantNodeParameterConfig] = { - import CantonConfigValidatorInstances.* + : CantonConfigValidator[ParticipantNodeParameterConfig] = CantonConfigValidatorDerivation[ParticipantNodeParameterConfig] - } } /** Parameters for the participant node's stores diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala index 0f3771a1a6..946f984ff5 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala @@ -38,6 +38,7 @@ import com.digitalasset.canton.{RequestCounter, SequencerCounter} import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} +import scala.util.chaining.* import scala.util.{Failure, Success} /** Helper trait for Online Party Replication event publishing. Refer to methods in the @@ -124,7 +125,7 @@ class RecordOrderPublisher private ( } } { store.put( - updateId = transactionAccepted.updateId, + updateId = transactionAccepted.updateId.toHexString, lfVersionedTransaction = transactionAccepted.transaction, ) } @@ -351,9 +352,8 @@ class RecordOrderPublisher private ( } } - // TODO(#26580) More validation and setting should be done in case of cancelled upgrade (and when attempting the next one) - def setSuccessor(successor: SynchronizerSuccessor): Unit = - synchronizerSuccessor.set(Some(successor)) + def setSuccessor(successor: Option[SynchronizerSuccessor]): Unit = + synchronizerSuccessor.set(successor) private def scheduleBufferingEventTaskImmediately( perform: CantonTimestamp => FutureUnlessShutdown[Unit] @@ -553,8 +553,8 @@ object RecordOrderPublisher { loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, clock: Clock, - )(implicit executionContextForPublishing: ExecutionContext): RecordOrderPublisher = { - val rop = new RecordOrderPublisher( + )(implicit executionContextForPublishing: ExecutionContext): RecordOrderPublisher = + new RecordOrderPublisher( psid, initSc, initTimestamp, @@ -565,10 +565,5 @@ object RecordOrderPublisher { loggerFactory, futureSupervisor, clock, - ) - - synchronizerSuccessor.foreach(rop.setSuccessor) - - rop - } + ).tap(_.setSuccessor(synchronizerSuccessor)) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiIndexer.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiIndexer.scala index b1c97c8aca..076cc52bc6 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiIndexer.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiIndexer.scala @@ -145,8 +145,7 @@ object LedgerApiIndexer { DbSupport.DataSourceProperties( connectionPool = IndexerConfig .createConnectionPoolConfig( - ingestionParallelism = - ledgerApiIndexerConfig.indexerConfig.ingestionParallelism.unwrap, + indexerConfig = ledgerApiIndexerConfig.indexerConfig, connectionTimeout = ledgerApiIndexerConfig.serverConfig.databaseConnectionTimeout.underlying, ), diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala index 174bc26b9c..68c86544e8 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala @@ -42,7 +42,12 @@ import com.digitalasset.canton.participant.config.{ ParticipantNodeConfig, TestingTimeServiceConfig, } -import com.digitalasset.canton.participant.store.ParticipantNodePersistentState +import com.digitalasset.canton.participant.store.{ + ContractStore, + ParticipantNodePersistentState, + ParticipantPruningStore, + PruningOffsetServiceImpl, +} import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.participant.{ LedgerApiServerBootstrapUtils, @@ -76,8 +81,9 @@ import com.digitalasset.canton.platform.{ } import com.digitalasset.canton.time.{Clock, RemoteClock, SimClock} import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} -import com.digitalasset.canton.util.ContractAuthenticator -import com.digitalasset.canton.{LedgerParticipantId, LfPartyId, config} +import com.digitalasset.canton.util.ContractValidator +import com.digitalasset.canton.util.PackageConsumer.PackageResolver +import com.digitalasset.canton.{LedgerParticipantId, LfPackageId, LfPartyId, config} import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.Party import com.digitalasset.daml.lf.engine.Engine @@ -93,7 +99,7 @@ import scala.concurrent.Future class LedgerApiServer( serverConfig: LedgerApiServerConfig, - jsonApiConfig: Option[JsonApiConfig], + jsonApiConfig: JsonApiConfig, participantId: LedgerParticipantId, adminParty: Party, adminTokenConfig: AdminTokenConfig, @@ -102,6 +108,8 @@ class LedgerApiServer( cantonParameterConfig: ParticipantNodeParameters, testingTimeService: Option[TimeServiceBackend], adminTokenDispenser: CantonAdminTokenDispenser, + participantContractStore: Eval[ContractStore], + participantPruningStore: Eval[ParticipantPruningStore], enableCommandInspection: Boolean, tracerProvider: TracerProvider, grpcApiMetrics: LedgerApiServerMetrics, @@ -192,7 +200,8 @@ class LedgerApiServer( ContractLoader .create( contractStorageBackend = dbSupport.storageBackendFactory.createContractStorageBackend( - inMemoryState.stringInterningView + inMemoryState.stringInterningView, + inMemoryState.ledgerEndCache, ), dbDispatcher = dbSupport.dbDispatcher, metrics = grpcApiMetrics, @@ -254,6 +263,9 @@ class LedgerApiServer( )( loggingContext ), + participantContractStore = participantContractStore.value, + pruningOffsetService = + PruningOffsetServiceImpl(participantPruningStore.value, loggerFactory), ) _ = timedSyncService.registerInternalIndexService(new InternalIndexService { override def activeContracts( @@ -261,7 +273,7 @@ class LedgerApiServer( validAt: Option[Offset], )(implicit traceContext: TraceContext): Source[GetActiveContractsResponse, NotUsed] = indexService.getActiveContracts( - filter = EventFormat( + eventFormat = EventFormat( filtersByParty = partyIds.view.map(_ -> CumulativeFilter.templateWildcardFilter(true)).toMap, filtersForAnyParty = None, @@ -302,26 +314,27 @@ class LedgerApiServer( executionContext = executionContext, loggerFactory = loggerFactory, ) - contractAuthenticator = ContractAuthenticator( - syncService.pureCryptoApi - ) + + packageLoader = new DeduplicatingPackageLoader() + packageResolver: PackageResolver = (packageId: LfPackageId) => + (traceContext: TraceContext) => + FutureUnlessShutdown.outcomeF( + packageLoader.loadPackage( + packageId = packageId, + delegate = packageId => timedSyncService.getLfArchive(packageId)(traceContext), + metric = grpcApiMetrics.index.db.translation.getLfPackage, + ) + ) + + contractValidator = ContractValidator(syncService.pureCryptoApi, engine, packageResolver) // TODO(i21582) The prepare endpoint of the interactive submission service does not suffix // contract IDs of the transaction yet. This means enrichment of the transaction may fail // when processing unsuffixed contract IDs. For that reason we disable this requirement via the flag below. // When CIDs are suffixed, we can re-use the LfValueTranslation from the index service created above - packageLoader = new DeduplicatingPackageLoader() interactiveSubmissionEnricher = new InteractiveSubmissionEnricher( new Engine(engine.config.copy(forbidLocalContractIds = false)), - packageResolver = packageId => - implicit traceContext => - FutureUnlessShutdown.outcomeF( - packageLoader.loadPackage( - packageId = packageId, - delegate = packageId => timedSyncService.getLfArchive(packageId)(traceContext), - metric = grpcApiMetrics.index.db.translation.getLfPackage, - ) - ), + packageResolver = packageResolver, ) (_, authInterceptor) <- ApiServiceOwner( @@ -372,7 +385,7 @@ class LedgerApiServer( engineLoggingConfig = cantonParameterConfig.engine.submissionPhaseLogging, telemetry = telemetry, loggerFactory = loggerFactory, - contractAuthenticator = contractAuthenticator.authenticate, + contractAuthenticator = contractValidator.authenticateHash, dynParamGetter = syncService.dynamicSynchronizerParameterGetter, interactiveSubmissionServiceConfig = serverConfig.interactiveSubmissionService, interactiveSubmissionEnricher = interactiveSubmissionEnricher, @@ -494,34 +507,34 @@ class LedgerApiServer( authInterceptor: AuthInterceptor, packagePreferenceBackend: PackagePreferenceBackend, ): ResourceOwner[Unit] = - jsonApiConfig - .fold(ResourceOwner.unit) { jsonApiConfig => - for { - channel <- ResourceOwner - .forReleasable(() => - InProcessChannelBuilder - .forName(InProcessGrpcName.forPort(serverConfig.clientConfig.port)) - .executor(executionContext.execute(_)) - .build() - )(channel => - Future( - new FastCloseableChannel(channel, logger, "JSON-API").close() - ) + if (!jsonApiConfig.enabled) + ResourceOwner.unit + else + for { + channel <- ResourceOwner + .forReleasable(() => + InProcessChannelBuilder + .forName(InProcessGrpcName.forPort(serverConfig.clientConfig.port)) + .executor(executionContext.execute(_)) + .build() + )(channel => + Future( + new FastCloseableChannel(channel, logger, "JSON-API").close() ) - .afterReleased(noTracingLogger.info("JSON-API gRPC channel is released")) - _ <- HttpApiServer( - jsonApiConfig, - serverConfig.tls, - channel, - packageSyncService, - loggerFactory, - authInterceptor, - packagePreferenceBackend = packagePreferenceBackend, - )( - jsonApiMetrics - ).afterReleased(noTracingLogger.info("JSON-API HTTP Server is released")) - } yield () - } + ) + .afterReleased(noTracingLogger.info("JSON-API gRPC channel is released")) + _ <- HttpApiServer( + jsonApiConfig, + serverConfig.tls, + channel, + packageSyncService, + loggerFactory, + authInterceptor, + packagePreferenceBackend = packagePreferenceBackend, + )( + jsonApiMetrics + ).afterReleased(noTracingLogger.info("JSON-API HTTP Server is released")) + } yield () } object LedgerApiServer { @@ -575,6 +588,8 @@ object LedgerApiServer { cantonParameterConfig = parameters, testingTimeService = ledgerTestingTimeService, adminTokenDispenser = adminTokenDispenser, + participantContractStore = participantNodePersistentState.map(_.contractStore), + participantPruningStore = participantNodePersistentState.map(_.pruningStore), enableCommandInspection = config.ledgerApi.enableCommandInspection, tracerProvider = tracerProvider, grpcApiMetrics = metrics, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiStore.scala index da3dc71421..d1c018eb64 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiStore.scala @@ -211,8 +211,8 @@ class LedgerApiStore( traceContext: TraceContext, ec: ExecutionContext, ): FutureUnlessShutdown[Set[LfContractId]] = - executeSqlUS(metrics.index.db.archivals)( - eventStorageBackend.archivals(fromExclusive, toInclusive) + executeSqlUS(metrics.index.db.archivalsLegacy)( + eventStorageBackend.archivalsLegacy(fromExclusive, toInclusive) ) private[api] def initializeInMemoryState(implicit diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiDependentServices.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiDependentServices.scala index 70569ed9de..e48b0d4fe0 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiDependentServices.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiDependentServices.scala @@ -113,7 +113,8 @@ class StartableStoppableLedgerApiDependentServices( .bindService( new GrpcPackageService( packageService, - syncService.synchronizeVettingOnConnectedSynchronizers, + syncService.synchronizeVettingOnSynchronizer, + () => syncService.readySynchronizers.values.map(_._1).toSet, loggerFactory, ), ec, @@ -150,6 +151,7 @@ class StartableStoppableLedgerApiDependentServices( partyReplicationAdminWorkflowO, parameters.processingTimeouts, syncService, + parameters, loggerFactory, ), ec, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/JavaDecodeUtil.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/JavaDecodeUtil.scala index ef069f301b..d087726b44 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/JavaDecodeUtil.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/JavaDecodeUtil.scala @@ -19,6 +19,7 @@ import com.daml.ledger.javaapi.data.{ } import com.digitalasset.daml.lf.data.Ref +import java.util.Optional import scala.jdk.CollectionConverters.* /** Java event decoders @@ -140,10 +141,10 @@ object JavaDecodeUtil { ) else new DisclosedContract( - create.getTemplateId, - create.getContractId, createdEventBlob, synchronizerId, + Optional.of(create.getTemplateId), + Optional.of(create.getContractId), ) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/LedgerEffectAbsolutizer.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/LedgerEffectAbsolutizer.scala new file mode 100644 index 0000000000..b9e193c0ac --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/LedgerEffectAbsolutizer.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.protocol + +import cats.syntax.traverse.* +import com.digitalasset.canton.data.{ + AssignedKey, + FreeKey, + SerializableKeyResolution, + ViewParticipantData, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect +import com.digitalasset.canton.protocol.{ + ContractIdAbsolutizer, + CreatedContract, + InputContract, + LfContractId, + LfGlobalKey, +} +import com.digitalasset.canton.{LfPartyId, LfVersioned} + +class LedgerEffectAbsolutizer(absolutizer: ContractIdAbsolutizer) { + + def absoluteViewEffects( + vpd: ViewParticipantData, + informees: Set[LfPartyId], + ): Either[String, ViewAbsoluteLedgerEffect] = + for { + coreInputs <- vpd.coreInputs.toSeq.traverse { case (cid, input) => + if (input.contractId.isAbsolute) Right(cid -> input) + else + absolutizeInputContract(input).map(absolutizedInstance => + // The ViewParticipantData ensures that the keys of the core inputs are the same contract ID as in the referenced value. + // So we can skip absolutizing the keys + (absolutizedInstance.contractId, absolutizedInstance) + ) + } + createdCore <- vpd.createdCore.traverse(absolutizeCreatedContract) + createdInSubviewArchivedInCore <- vpd.createdInSubviewArchivedInCore.toSeq + .traverse(absolutizer.absolutizeContractId) + .map(_.toSet) + resolvedKeys <- vpd.resolvedKeys.toSeq.traverse { case (gkey, resolution) => + for { + absolutizedKey <- gkey.key.traverseCid(absolutizer.absolutizeContractId) + absolutizedResolution <- absolutizeKeyResolution(resolution.unversioned) + } yield ( + LfGlobalKey.assertBuild(gkey.templateId, absolutizedKey, gkey.packageName), + resolution.copy(unversioned = absolutizedResolution), + ) + } + } yield ViewAbsoluteLedgerEffect( + coreInputs = coreInputs.toMap, + createdCore = createdCore, + createdInSubviewArchivedInCore = createdInSubviewArchivedInCore, + resolvedKeys = resolvedKeys.toMap, + inRollback = vpd.rollbackContext.inRollback, + informees = informees, + ) + + def absolutizeInputContract(input: InputContract): Either[String, InputContract] = + for { + absolutizedInstance <- absolutizer.absolutizeContractInstance(input.contract) + } yield input.copy(contract = absolutizedInstance) + + def absolutizeCreatedContract(created: CreatedContract): Either[String, CreatedContract] = + for { + absolutizedInstance <- absolutizer.absolutizeContractInstance(created.contract) + absolutizedCreated <- CreatedContract.create( + contract = absolutizedInstance, + consumedInCore = created.consumedInCore, + rolledBack = created.rolledBack, + ) + } yield absolutizedCreated + + def absolutizeKeyResolution( + resolution: SerializableKeyResolution + ): Either[String, SerializableKeyResolution] = + resolution match { + case AssignedKey(cid) => + absolutizer.absolutizeContractId(cid).map(AssignedKey.apply) + case free: FreeKey => Right(free) + } +} + +object LedgerEffectAbsolutizer { + + /** Projection of [[com.digitalasset.canton.data.ViewParticipantData]] to relevant fields with + * absolutized contract IDs + */ + final case class ViewAbsoluteLedgerEffect( + coreInputs: Map[LfContractId, InputContract], + createdCore: Seq[CreatedContract], + createdInSubviewArchivedInCore: Set[LfContractId], + resolvedKeys: Map[LfGlobalKey, LfVersioned[SerializableKeyResolution]], + inRollback: Boolean, + informees: Set[LfPartyId], + ) extends PrettyPrinting { + override protected def pretty: Pretty[ViewAbsoluteLedgerEffect] = prettyOfClass( + paramIfNonEmpty("core inputs", _.coreInputs), + paramIfNonEmpty("created core", _.createdCore), + paramIfNonEmpty("created in subview, archived in core", _.createdInSubviewArchivedInCore), + paramIfNonEmpty("resolved keys", _.resolvedKeys), + paramIfTrue("in rollback", _.inRollback), + param("informees", _.informees), + ) + } +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala index 49311b8d7d..50b48fac26 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala @@ -100,12 +100,21 @@ class ParticipantTopologyTerminateProcessing( override def notifyUpgradeAnnouncement( successor: SynchronizerSuccessor )(implicit traceContext: TraceContext): Unit = { - logger.debug( + logger.info( s"Node is notified about the upgrade of $psid to ${successor.psid} scheduled at ${successor.upgradeTime}" ) lsuCallback.registerCallback(successor) - recordOrderPublisher.setSuccessor(successor) + recordOrderPublisher.setSuccessor(Some(successor)) + } + + override def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit = { + logger.info( + s"Node is notified about the cancellation of upgrade" + ) + + lsuCallback.unregisterCallback() + recordOrderPublisher.setSuccessor(None) } private def scheduleEvent( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProcessingSteps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProcessingSteps.scala index 930c925f66..c813d76187 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProcessingSteps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProcessingSteps.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.protocol.EngineController.EngineAbortStatus import com.digitalasset.canton.participant.protocol.ProcessingSteps.{ + InternalContractIds, ParsedRequest, WrapsProcessorError, } @@ -102,6 +103,8 @@ trait ProcessingSteps[ type ViewSubmitterMetadata = RequestViewType#ViewSubmitterMetadata + type ViewAbsoluteLedgerEffects + /** Type of a request that has been parsed and contains at least one well-formed view. */ type ParsedRequestType <: ParsedRequest[ViewSubmitterMetadata] @@ -336,7 +339,7 @@ trait ProcessingSteps[ // Phase 3: Request processing - /** Phase 3, step 1: + /** Phase 3, step 1a: * * @param batch * The batch of messages in the request to be processed @@ -378,16 +381,45 @@ trait ProcessingSteps[ } /** Phase 3, step 1b + * + * Extracts the ledger effects from the decrypted view and makes them absolute. When + * absolutization fails for a view, say because an unknown contract ID versions appear in an + * input contract value, the view should be considered malformed. + * + * Conflict detection needs absolutized contract IDs. As it happens in parallel to model + * conformance, absolutization cannot be delayed until model conformance. + * + * @param viewsWithCorrectRootHashAndRecipientsAndSignature + * The list of decrypted views, as returned by [[decryptViews]], after the additional checks + * for the root hash and the recipients. + */ + def absolutizeLedgerEffects( + viewsWithCorrectRootHashAndRecipientsAndSignature: Seq[ + (WithRecipients[DecryptedView], Option[Signature]) + ] + ): ( + Seq[(WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects)], + Seq[MalformedPayload], + ) + + type FullViewAbsoluteLedgerEffects + + /** Phase 3, step 1c * * Converts the decrypted (possible light-weight) view trees to the corresponding full view * trees. Views that cannot be converted are mapped to [[ProtocolProcessor.MalformedPayload]] * errors. */ def computeFullViews( - decryptedViewsWithSignatures: Seq[(WithRecipients[DecryptedView], Option[Signature])] - ): (Seq[(WithRecipients[FullView], Option[Signature])], Seq[MalformedPayload]) + decryptedViewsWithSignatures: Seq[ + (WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects) + ] + ): ( + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)], + Seq[MalformedPayload], + ) - /** Phase 3, step 1c + /** Phase 3, step 1d * * Create a ParsedRequest out of the data computed so far. */ @@ -396,7 +428,7 @@ trait ProcessingSteps[ ts: CantonTimestamp, sc: SequencerCounter, rootViewsWithMetadata: NonEmpty[ - Seq[(WithRecipients[FullView], Option[Signature])] + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)] ], submitterMetadataO: Option[ViewSubmitterMetadata], isFreshOwnTimelyRequest: Boolean, @@ -566,7 +598,7 @@ trait ProcessingSteps[ case class CommitAndStoreContractsAndPublishEvent( commitSet: Option[FutureUnlessShutdown[CommitSet]], contractsToBeStored: Seq[ContractInstance], - maybeEvent: Option[AcsChangeFactory => SequencedUpdate], + maybeEvent: Option[AcsChangeFactory => InternalContractIds => SequencedUpdate], ) /** Phase 7, step 4: @@ -593,7 +625,7 @@ trait ProcessingSteps[ object ProcessingSteps { def getAssignmentExclusivity( topologySnapshot: Target[TopologySnapshot], - ts: CantonTimestamp, + ts: Target[CantonTimestamp], )(implicit ec: ExecutionContext, traceContext: TraceContext, @@ -602,7 +634,9 @@ object ProcessingSteps { synchronizerParameters <- EitherT(topologySnapshot.unwrap.findDynamicSynchronizerParameters()) assignmentExclusivity <- EitherT - .fromEither[FutureUnlessShutdown](synchronizerParameters.assignmentExclusivityLimitFor(ts)) + .fromEither[FutureUnlessShutdown]( + synchronizerParameters.assignmentExclusivityLimitFor(ts.unwrap) + ) } yield Target(assignmentExclusivity) def getDecisionTime( @@ -793,4 +827,8 @@ object ProcessingSteps { override def cancelDecisionTimeTickRequest(): Unit = () } + + // TODO(#27996) remove this type when internal contract ids are no longer fetched from ProtocolProcessor + type InternalContractIds = Map[LfContractId, Long] + } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala index 92aef28c4e..324764a824 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala @@ -783,18 +783,27 @@ abstract class ProtocolProcessor[ ) (incorrectRecipients, viewsWithCorrectRootHashAndRecipients) = checkRecipientsResult - // TODO(#23971) Absolutize ledger effects before passing them to ExtractUsedAndCreated + ( + viewsWithCorrectRootHashAndRecipientsAndAbsoluteLedgerEffects, + incorrectAbsolutizationViews, + ) = steps.absolutizeLedgerEffects( + viewsWithCorrectRootHashAndRecipients + ) - (fullViewsWithCorrectRootHashAndRecipients, incorrectDecryptedViews) = - steps.computeFullViews(viewsWithCorrectRootHashAndRecipients) + ( + fullViewsWithCorrectRootHashAndRecipientsAndAbsoluteLedgerEffects, + incorrectFullViews, + ) = steps.computeFullViews(viewsWithCorrectRootHashAndRecipientsAndAbsoluteLedgerEffects) malformedPayloads = - decryptionErrors ++ incorrectRootHashes ++ incorrectRecipients ++ incorrectDecryptedViews + decryptionErrors ++ incorrectRootHashes ++ incorrectRecipients ++ incorrectAbsolutizationViews ++ incorrectFullViews - _ <- NonEmpty.from(fullViewsWithCorrectRootHashAndRecipients) match { + _ <- NonEmpty.from( + fullViewsWithCorrectRootHashAndRecipientsAndAbsoluteLedgerEffects + ) match { case None => /* - If fullViewsWithCorrectRootHashAndRecipients is empty, it does not necessarily mean that we have a + If fullViewsWithCorrectRootHashAndRecipientsAndAbsoluteLedgerEffects is empty, it does not necessarily mean that we have a malicious submitter (e.g., if there is concurrent topology change). Hence, if we have a submission data, then we will aim to generate a command completion. */ @@ -848,7 +857,7 @@ abstract class ProtocolProcessor[ case Some(goodViewsWithSignatures) => // All views with the same correct root hash declare the same mediator, so it's enough to look at the head - val (firstView, _) = goodViewsWithSignatures.head1 + val (firstView, _, _) = goodViewsWithSignatures.head1 val observeFUS = submissionDataForTrackerO match { case Some(submissionDataForTracker) => @@ -1591,7 +1600,15 @@ abstract class ProtocolProcessor[ ) for { commitSet <- EitherT.right[steps.ResultError](commitSetF) - eventO = eventFactoryO.map(_(AcsChangeSupport.fromCommitSet(commitSet))) + // TODO(#27996) getting the internal contract ids will not be done here and will be part of indexing + internalContractIdsForStoredContracts <- EitherT.right[steps.ResultError]( + ephemeral.contractStore.lookupBatchedNonCachedInternalIds( + contractsToBeStored.map(_.contractId) + ) + ) + eventO = eventFactoryO.map( + _(AcsChangeSupport.fromCommitSet(commitSet))(internalContractIdsForStoredContracts) + ) _ = logger.info(show"About to wrap up request $requestId with event $eventO") requestTimestamp = requestId.unwrap _unit <- EitherT.right[steps.ResultError]( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionDiff.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionDiff.scala index c98952b6bb..0bf8b215e5 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionDiff.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionDiff.scala @@ -15,10 +15,11 @@ import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransacti AuthorizationLevel, TopologyEvent, } +import com.digitalasset.canton.protocol.UpdateId import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransactions.PositiveSignedTopologyTransactions import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} -import com.digitalasset.canton.{LedgerParticipantId, LedgerTransactionId, LfPartyId} +import com.digitalasset.canton.{LedgerParticipantId, LfPartyId} private[protocol] object TopologyTransactionDiff { @@ -90,7 +91,7 @@ private[protocol] object TopologyTransactionDiff { synchronizerId: PhysicalSynchronizerId, oldRelevantState: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], currentRelevantState: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], - ): LedgerTransactionId = { + ): UpdateId = { val builder = Hash.build(HashPurpose.TopologyUpdateId, HashAlgorithm.Sha256) def addToBuilder( @@ -110,7 +111,7 @@ private[protocol] object TopologyTransactionDiff { val hash = builder.finish() - LedgerTransactionId.assertFromString(hash.toHexString) + UpdateId(hash) } private def partyToParticipant( @@ -150,6 +151,6 @@ private[protocol] object TopologyTransactionDiff { private[protocol] final case class TopologyTransactionDiff( topologyEvents: NonEmpty[Set[TopologyEvent]], - transactionId: LedgerTransactionId, + transactionId: UpdateId, requiresLocalParticipantPartyReplication: Boolean, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala index ec5a6be431..172a019a48 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.protocol import cats.data.{EitherT, OptionT} import cats.syntax.either.* -import cats.syntax.foldable.* import cats.syntax.functor.* import cats.syntax.option.* import cats.syntax.parallel.* @@ -15,8 +14,9 @@ import com.daml.nonempty.catsinstances.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.* +import com.digitalasset.canton.data.LightTransactionViewTree.ToFullViewTreesResult import com.digitalasset.canton.data.ViewType.TransactionViewType -import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.discard.Implicits.* import com.digitalasset.canton.error.TransactionError import com.digitalasset.canton.ledger.participant.state.* import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} @@ -29,15 +29,16 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, NamedL import com.digitalasset.canton.metrics.* import com.digitalasset.canton.participant.metrics.TransactionProcessingMetrics import com.digitalasset.canton.participant.protocol.EngineController.EngineAbortStatus +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect import com.digitalasset.canton.participant.protocol.ProcessingSteps.ParsedRequest import com.digitalasset.canton.participant.protocol.ProtocolProcessor.{ MalformedPayload, NoMediatorError, + ViewMessageError, } import com.digitalasset.canton.participant.protocol.TransactionProcessingSteps.* import com.digitalasset.canton.participant.protocol.TransactionProcessor.* import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.{ - ContractAuthenticationFailed, SequencerRequest, SubmissionDuringShutdown, SubmissionInternalError, @@ -72,15 +73,17 @@ import com.digitalasset.canton.participant.protocol.validation.TimeValidator.Tim import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.sync.* import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceAlarm -import com.digitalasset.canton.participant.util.DAMLe.{CreateNodeEnricher, TransactionEnricher} +import com.digitalasset.canton.participant.util.DAMLe.{ContractEnricher, TransactionEnricher} import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.protocol.ContractIdAbsolutizer.ContractIdAbsolutizationDataV1 import com.digitalasset.canton.protocol.WellFormedTransaction.{ WithSuffixesAndMerged, WithoutSuffixes, } import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.protocol.messages.EncryptedViewMessage.computeRandomnessLength +import com.digitalasset.canton.protocol.messages.EncryptedViewMessageError.InvalidContractIdInView import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException import com.digitalasset.canton.sequencing.client.SendAsyncClientError import com.digitalasset.canton.sequencing.protocol.* @@ -92,24 +95,21 @@ import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.{ContractAuthenticator, EitherTUtil, ErrorUtil} +import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, RoseTree} import com.digitalasset.canton.{ LedgerSubmissionId, LfKeyResolver, LfPartyId, - LfVersioned, RequestCounter, SequencerCounter, WorkflowId, checked, } import com.digitalasset.daml.lf.transaction.CreationTime -import com.google.common.annotations.VisibleForTesting import com.google.protobuf.ByteString import monocle.PLens import java.util.concurrent.ConcurrentHashMap -import scala.None import scala.collection.immutable.SortedMap import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters.* @@ -129,14 +129,14 @@ class TransactionProcessingSteps( staticSynchronizerParameters: StaticSynchronizerParameters, crypto: SynchronizerCryptoClient, metrics: TransactionProcessingMetrics, - serializableContractAuthenticator: ContractAuthenticator, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, authorizationValidator: AuthorizationValidator, internalConsistencyChecker: InternalConsistencyChecker, tracker: CommandProgressTracker, protected val loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, + messagePayloadLoggingEnabled: Boolean, )(implicit val ec: ExecutionContext) extends ProcessingSteps[ SubmissionParam, @@ -152,6 +152,11 @@ class TransactionProcessingSteps( override type PendingSubmissionId = Unit override type PendingSubmissionData = None.type + override type ViewAbsoluteLedgerEffects = LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect + + /** The rose tree of the subviews' [[ViewAbsoluteLedgerEffects]] following the view structure */ + override type FullViewAbsoluteLedgerEffects = RoseTree[ViewAbsoluteLedgerEffects] + override type ParsedRequestType = ParsedTransactionRequest override type RejectionArgs = TransactionProcessingSteps.RejectionArgs @@ -308,8 +313,13 @@ class TransactionProcessingSteps( override def maxSequencingTimeO: OptionT[FutureUnlessShutdown, CantonTimestamp] = OptionT.liftF( recentSnapshot.ipsSnapshot.findDynamicSynchronizerParametersOrDefault(protocolVersion).map { synchronizerParameters => - CantonTimestamp(transactionMeta.ledgerEffectiveTime) + val maxSequencingTimeFromLET = CantonTimestamp(transactionMeta.ledgerEffectiveTime) .add(synchronizerParameters.ledgerTimeRecordTimeTolerance.unwrap) + submitterInfo.externallySignedSubmission + .flatMap(_.maxRecordTimeO) + .map(CantonTimestamp.apply) + .map(_.min(maxSequencingTimeFromLET)) + .getOrElse(maxSequencingTimeFromLET) } ) @@ -345,7 +355,7 @@ class TransactionProcessingSteps( disclosedContracts .get(contractId) .map(contract => - EitherT.rightT[Future, TransactionTreeFactory.ContractLookupError]( + EitherT.rightT[FutureUnlessShutdown, TransactionTreeFactory.ContractLookupError]( contract: GenContractInstance ) ) @@ -694,34 +704,84 @@ class TransactionProcessingSteps( } } + override def absolutizeLedgerEffects( + viewsWithCorrectRootHashAndRecipientsAndSignature: Seq[ + (WithRecipients[DecryptedView], Option[Signature]) + ] + ): ( + Seq[(WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffect)], + Seq[MalformedPayload], + ) = + NonEmpty.from(viewsWithCorrectRootHashAndRecipientsAndSignature) match { + case Some(viewsNE) => + // All views have the same root hash, so we can take the first one + val firstView = viewsNE.head1._1.unwrap + val transactionId = firstView.transactionId + val ledgerTime = firstView.ledgerTime + // TODO(#23971) Generate absolutization data based on the protocol version + val absolutizationData = { + transactionId.discard + ledgerTime.discard + ContractIdAbsolutizationDataV1 + } + val contractAbsolutizer = new ContractIdAbsolutizer(crypto.pureCrypto, absolutizationData) + val absolutizer = new LedgerEffectAbsolutizer(contractAbsolutizer) + + viewsNE.partitionMap { case (withRecipients @ WithRecipients(view, _), sig) => + val vpd = view.viewParticipantData + absolutizer + .absoluteViewEffects(vpd, view.informees) + .bimap( + err => + ViewMessageError( + InvalidContractIdInView( + s"Failed to absolutize view at position ${view.viewPosition}: $err" + ) + ), + effects => (withRecipients, sig, effects), + ) + .swap + } + case None => (Seq.empty, Seq.empty) + } + override def computeFullViews( - decryptedViewsWithSignatures: Seq[(WithRecipients[DecryptedView], Option[Signature])] - ): (Seq[(WithRecipients[FullView], Option[Signature])], Seq[MalformedPayload]) = { + decryptedViewsWithSignatures: Seq[ + (WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects) + ] + ): ( + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)], + Seq[MalformedPayload], + ) = { val lens = PLens[ - (WithRecipients[LightTransactionViewTree], Option[Signature]), - (WithRecipients[FullTransactionViewTree], Option[Signature]), - LightTransactionViewTree, - FullTransactionViewTree, - ](_._1.unwrap)(tvt => { case (WithRecipients(_, rec), sig) => - (WithRecipients(tvt, rec), sig) - }) - - val (fullViews, incompleteLightViewTrees, duplicateLightViewTrees) = + (WithRecipients[LightTransactionViewTree], Option[Signature], ViewAbsoluteLedgerEffects), + (WithRecipients[FullTransactionViewTree], Option[Signature], FullViewAbsoluteLedgerEffects), + (LightTransactionViewTree, ViewAbsoluteLedgerEffects), + (FullTransactionViewTree, FullViewAbsoluteLedgerEffects), + ] { case (viewWithRecipients, _, effect) => (viewWithRecipients.unwrap, effect) }( + tvtAndEffects => { case (WithRecipients(_, rec), sig, _) => + val (tvt, effects) = tvtAndEffects + (WithRecipients(tvt, rec), sig, effects) + } + ) + + val ToFullViewTreesResult(fullViews, incompleteLightViewTrees, duplicateLightViewTrees) = LightTransactionViewTree.toFullViewTrees( lens, protocolVersion, crypto.pureCrypto, topLevelOnly = true, - )(decryptedViewsWithSignatures) + decryptedViewsWithSignatures, + ) val incompleteLightViewTreeErrors = incompleteLightViewTrees.map { - case (WithRecipients(vt, _), _) => + case (WithRecipients(vt, _), _, _) => ProtocolProcessor.IncompleteLightViewTree(vt.viewPosition) } val duplicateLightViewTreeErrors = duplicateLightViewTrees.map { - case (WithRecipients(vt, _), _) => + case (WithRecipients(vt, _), _, _) => ProtocolProcessor.DuplicateLightViewTree(vt.viewPosition) } @@ -733,7 +793,13 @@ class TransactionProcessingSteps( ts: CantonTimestamp, sc: SequencerCounter, rootViewsWithMetadata: NonEmpty[ - Seq[(WithRecipients[FullTransactionViewTree], Option[Signature])] + Seq[ + ( + WithRecipients[FullTransactionViewTree], + Option[Signature], + FullViewAbsoluteLedgerEffects, + ) + ] ], submitterMetadataO: Option[SubmitterMetadata], isFreshOwnTimelyRequest: Boolean, @@ -742,11 +808,14 @@ class TransactionProcessingSteps( snapshot: SynchronizerSnapshotSyncCryptoApi, synchronizerParameters: DynamicSynchronizerParametersWithValidity, )(implicit traceContext: TraceContext): FutureUnlessShutdown[ParsedTransactionRequest] = { - val rootViewTrees = rootViewsWithMetadata.map { case (WithRecipients(view, _), _) => view } + val workflowId = rootViewsWithMetadata.head1._1.unwrap.workflowIdO + val effects = rootViewsWithMetadata.forgetNE.flatMap { case (_, _, effects) => + effects.preorder.toVector + } for { usedAndCreated <- ExtractUsedAndCreated( participantId, - rootViewTrees.map(_.view), + effects, snapshot.ipsSnapshot, loggerFactory, ) @@ -760,7 +829,7 @@ class TransactionProcessingSteps( malformedPayloads, mediator, usedAndCreated, - rootViewTrees.head1.workflowIdO, + workflowId, snapshot, synchronizerParameters, ) @@ -775,14 +844,13 @@ class TransactionProcessingSteps( // Also, check that all the view's informees received the derived randomness Right(parsedRequest.usedAndCreated.activenessSet) - def authenticateInputContracts( + override def authenticateInputContracts( parsedRequest: ParsedTransactionRequest )(implicit traceContext: TraceContext ): EitherT[Future, TransactionProcessorError, Unit] = - authenticateInputContractsInternal( - parsedRequest.usedAndCreated.contracts.used - ) + // For transaction processing contract authentication is done as part of model conformance + EitherT.pure(()) override def constructPendingDataAndResponse( parsedRequest: ParsedTransactionRequest, @@ -854,6 +922,7 @@ class TransactionProcessingSteps( transactionEnricher, createNodeEnricher, logger, + messagePayloadLoggingEnabled, ) consistencyResultE = ContractConsistencyChecker @@ -890,7 +959,7 @@ class TransactionProcessingSteps( // mediator verdict. conformanceResultET = modelConformanceChecker .check( - parsedRequest.rootViewTrees, + parsedRequest.rootViewTreesWithEffects, keyResolverFor(_), ipsSnapshot, commonData, @@ -1127,21 +1196,6 @@ class TransactionProcessingSteps( Right(updateO) } - @VisibleForTesting - private[protocol] def authenticateInputContractsInternal( - inputContracts: Map[LfContractId, GenContractInstance] - )(implicit - traceContext: TraceContext - ): EitherT[Future, TransactionProcessorError, Unit] = - EitherT.fromEither( - inputContracts.toList - .traverse_ { case (contractId, contract) => - serializableContractAuthenticator - .legacyAuthenticate(contract.inst) - .leftMap(message => ContractAuthenticationFailed.Error(contractId, message).reported()) - } - ) - private def completionInfoFromSubmitterMetadataO( meta: SubmitterMetadata, freshOwnTimelyTx: Boolean, @@ -1201,11 +1255,7 @@ class TransactionProcessingSteps( modelConformanceResult: ModelConformanceChecker.Result, )(implicit traceContext: TraceContext - ): EitherT[ - FutureUnlessShutdown, - TransactionProcessorError, - CommitAndStoreContractsAndPublishEvent, - ] = { + ): CommitAndStoreContractsAndPublishEvent = { val txValidationResult = pendingRequestData.transactionValidationResult val commitSet = txValidationResult.commitSet(pendingRequestData.requestId) @@ -1225,7 +1275,7 @@ class TransactionProcessingSteps( private def computeCommitAndContractsAndEvent( requestTime: CantonTimestamp, - txId: TransactionId, + txId: UpdateId, workflowIdO: Option[WorkflowId], commitSet: CommitSet, createdContracts: Map[LfContractId, NewContractInstance], @@ -1235,30 +1285,25 @@ class TransactionProcessingSteps( externalTransactionHash: Option[Hash], )(implicit traceContext: TraceContext - ): EitherT[ - FutureUnlessShutdown, - TransactionProcessorError, - CommitAndStoreContractsAndPublishEvent, - ] = { + ): CommitAndStoreContractsAndPublishEvent = { val commitSetF = FutureUnlessShutdown.pure(commitSet) val ledgerEffectiveTime = lfTx.metadata.ledgerTime val contractsToBeStored = - createdContracts.values.map(ContractInstance.assignCreationTime(_, ledgerEffectiveTime)).toSeq - - for { - lfTxId <- EitherT - .fromEither[FutureUnlessShutdown](txId.asLedgerTransactionId) - .leftMap[TransactionProcessorError](FieldConversionError("Transaction Id", _)) - - contractAuthenticationData = - // We deliberately do not forward the authentication data - // for divulged contracts since they are not visible on the Ledger API - (createdContracts ++ witnessed).view.map { case (contractId, contract) => - contractId -> contract.inst.authenticationData - }.toMap - - acceptedEvent = - (acsChangeFactory: AcsChangeFactory) => + (createdContracts ++ witnessed).values + .map(ContractInstance.assignCreationTime(_, ledgerEffectiveTime)) + .toSeq + + val contractAuthenticationData = + // We deliberately do not forward the authentication data + // for retroactively divulged contracts since they are not visible on the Ledger API + // For immediately divulged contracts we populate this as those are visible. + (createdContracts ++ witnessed).view.map { case (contractId, contract) => + contractId -> contract.inst.authenticationData + }.toMap + + val acceptedEvent = + (acsChangeFactory: AcsChangeFactory) => + (internalContractIds: Map[LfContractId, Long]) => Update.SequencedTransactionAccepted( completionInfoO = completionInfoO, transactionMeta = TransactionMeta( @@ -1274,14 +1319,15 @@ class TransactionProcessingSteps( optByKeyNodes = None, // optByKeyNodes is unused by the indexer ), transaction = LfCommittedTransaction(lfTx.unwrap), - updateId = lfTxId, + updateId = txId, contractAuthenticationData = contractAuthenticationData, synchronizerId = psid.logical, recordTime = requestTime, externalTransactionHash = externalTransactionHash, acsChangeFactory = acsChangeFactory, + internalContractIds = internalContractIds, ) - } yield CommitAndStoreContractsAndPublishEvent( + CommitAndStoreContractsAndPublishEvent( Some(commitSetF), contractsToBeStored, Some(acceptedEvent), @@ -1292,7 +1338,7 @@ class TransactionProcessingSteps( pendingRequestData: RequestType#PendingRequestData, completionInfoO: Option[CompletionInfo], validSubTransaction: WellFormedTransaction[WithSuffixesAndMerged], - validSubViewsNE: NonEmpty[Seq[TransactionView]], + validSubViewEffectsNE: NonEmpty[Seq[ViewAbsoluteLedgerEffect]], topologySnapshot: TopologySnapshot, )(implicit traceContext: TraceContext @@ -1306,7 +1352,7 @@ class TransactionProcessingSteps( .right( ExtractUsedAndCreated( participantId, - validSubViewsNE, + validSubViewEffectsNE, topologySnapshot, loggerFactory, ) @@ -1322,7 +1368,7 @@ class TransactionProcessingSteps( createdContracts = createdContracts, ) - commitAndContractsAndEvent <- computeCommitAndContractsAndEvent( + commitAndContractsAndEvent = computeCommitAndContractsAndEvent( requestTime = pendingRequestData.requestTime, txId = pendingRequestData.transactionValidationResult.transactionId, workflowIdO = pendingRequestData.transactionValidationResult.workflowIdO, @@ -1412,13 +1458,13 @@ class TransactionProcessingSteps( case ErrorWithSubTransaction( _errors, Some(validSubTransaction), - NonEmpty(validSubViewsNE), + NonEmpty(validSubViewEffectsNE), ) => getCommitSetAndContractsToBeStoredAndEventApprovePartlyConform( pendingRequestData, completionInfoO, validSubTransaction, - validSubViewsNE, + validSubViewEffectsNE, topologySnapshot, ) @@ -1428,14 +1474,20 @@ class TransactionProcessingSteps( rejectedWithModelConformanceError(error) }, modelConformanceResult => - getCommitSetAndContractsToBeStoredAndEventApproveConform( - pendingRequestData, - completionInfoO, - modelConformanceResult, + EitherT.pure( + getCommitSetAndContractsToBeStoredAndEventApproveConform( + pendingRequestData, + completionInfoO, + modelConformanceResult, + ) ), ) - def rejectedWithModelConformanceError(error: ErrorWithSubTransaction) = { + def rejectedWithModelConformanceError(error: ErrorWithSubTransaction[?]): EitherT[ + FutureUnlessShutdown, + TransactionProcessorError, + CommitAndStoreContractsAndPublishEvent, + ] = { val localVerdict = LocalRejectError.MalformedRejects.ModelConformance .Reject(error.errors.head1.toString) rejected(ErrorDetails(localVerdict.reason(), localVerdict.isMalformed)) @@ -1453,7 +1505,7 @@ class TransactionProcessingSteps( } yield CommitAndStoreContractsAndPublishEvent( None, Seq(), - eventO.map(event => _ => event), + eventO.map(event => _ => _ => event), )).mapK(FutureUnlessShutdown.outcomeK) for { @@ -1527,22 +1579,18 @@ object TransactionProcessingSteps { disclosedContracts: Map[LfContractId, ContractInstance], ) - /** Projection of [[com.digitalasset.canton.data.ViewParticipantData]] to relevant fields with - * absolutized contract IDs - */ - final case class ViewAbsoluteLedgerEffect( - coreInputs: Map[LfContractId, InputContract], - createdCore: Seq[CreatedContract], - createdInSubviewArchivedInCore: Set[LfContractId], - resolvedKeys: Map[LfGlobalKey, LfVersioned[SerializableKeyResolution]], - ) - final case class ParsedTransactionRequest( override val rc: RequestCounter, override val requestTimestamp: CantonTimestamp, override val sc: SequencerCounter, rootViewTreesWithMetadata: NonEmpty[ - Seq[(WithRecipients[FullTransactionViewTree], Option[Signature])] + Seq[ + ( + WithRecipients[FullTransactionViewTree], + Option[Signature], + RoseTree[ViewAbsoluteLedgerEffect], + ) + ] ], override val submitterMetadataO: Option[SubmitterMetadata], override val isFreshOwnTimelyRequest: Boolean, @@ -1554,19 +1602,25 @@ object TransactionProcessingSteps { override val synchronizerParameters: DynamicSynchronizerParametersWithValidity, ) extends ParsedRequest[SubmitterMetadata] { + lazy val rootViewTreesWithEffects + : NonEmpty[Seq[(FullTransactionViewTree, RoseTree[ViewAbsoluteLedgerEffect])]] = + rootViewTreesWithMetadata.map { case (WithRecipients(rootViewTree, _), _, effects) => + (rootViewTree, effects) + } + lazy val rootViewTrees: NonEmpty[Seq[FullTransactionViewTree]] = rootViewTreesWithMetadata.map { - case (WithRecipients(rootViewTree, _), _) => rootViewTree + case (WithRecipients(rootViewTree, _), _, _) => rootViewTree } lazy val rootViewTreesWithSignatures: NonEmpty[ Seq[(FullTransactionViewTree, Option[Signature])] - ] = rootViewTreesWithMetadata.map { case (WithRecipients(rootViewTree, _), signature) => + ] = rootViewTreesWithMetadata.map { case (WithRecipients(rootViewTree, _), signature, _) => (rootViewTree, signature) } override def rootHash: RootHash = rootViewTrees.head1.rootHash - def transactionId: TransactionId = rootViewTrees.head1.transactionId + def transactionId: UpdateId = rootViewTrees.head1.transactionId def ledgerTime: CantonTimestamp = rootViewTrees.head1.ledgerTime @@ -1579,7 +1633,7 @@ object TransactionProcessingSteps { authorizationResult: Map[ViewPosition, String], conformanceResultET: EitherT[ FutureUnlessShutdown, - ModelConformanceChecker.ErrorWithSubTransaction, + ModelConformanceChecker.ErrorWithSubTransaction[ViewAbsoluteLedgerEffect], ModelConformanceChecker.Result, ], internalConsistencyResultE: Either[ErrorWithInternalConsistencyCheck, Unit], @@ -1615,7 +1669,7 @@ object TransactionProcessingSteps { } final case class CommonData( - transactionId: TransactionId, + transactionId: UpdateId, ledgerTime: CantonTimestamp, preparationTime: CantonTimestamp, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala index bb37b55e14..cfc9f85a5d 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala @@ -53,7 +53,7 @@ import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ShowUtil.* import org.slf4j.event.Level @@ -67,6 +67,7 @@ class TransactionProcessor( damle: DAMLe, staticSynchronizerParameters: StaticSynchronizerParameters, crypto: SynchronizerCryptoClient, + contractValidator: ContractValidator, sequencerClient: SequencerClient, inFlightSubmissionSynchronizerTracker: InFlightSubmissionSynchronizerTracker, ephemeral: SyncEphemeralState, @@ -78,6 +79,7 @@ class TransactionProcessor( packageResolver: PackageResolver, override val testingConfig: TestingConfigInternal, promiseFactory: PromiseUnlessShutdownFactory, + messagePayloadLoggingEnabled: Boolean, )(implicit val ec: ExecutionContext) extends ProtocolProcessor[ TransactionProcessingSteps.SubmissionParam, @@ -97,7 +99,7 @@ class TransactionProcessor( ModelConformanceChecker( damle, confirmationRequestFactory.transactionTreeFactory, - ContractAuthenticator(crypto.pureCrypto), + contractValidator, participantId, packageResolver, crypto.pureCrypto, @@ -106,9 +108,8 @@ class TransactionProcessor( staticSynchronizerParameters, crypto, metrics, - ContractAuthenticator(crypto.pureCrypto), damle.enrichTransaction, - damle.enrichCreateNode, + damle.enrichContract, new AuthorizationValidator(participantId), new InternalConsistencyChecker( loggerFactory @@ -116,6 +117,7 @@ class TransactionProcessor( commandProgressTracker, loggerFactory, futureSupervisor, + messagePayloadLoggingEnabled, ), inFlightSubmissionSynchronizerTracker, ephemeral, @@ -476,7 +478,7 @@ object TransactionProcessor { } final case class ViewParticipantDataError( - transactionId: TransactionId, + transactionId: UpdateId, viewHash: ViewHash, error: String, ) extends TransactionProcessorError { diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/InMemoryProcessorStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/InMemoryProcessorStore.scala index 174fa08eeb..edc18f1d1b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/InMemoryProcessorStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/InMemoryProcessorStore.scala @@ -119,15 +119,9 @@ final class TargetParticipantStore extends PartyReplicationProcessorStore { ) private[party] def requestedContractsCount: NonNegativeInt = state.get().requestedContractsCount - private[party] def increaseRequestedContractsCount(delta: PositiveInt): NonNegativeInt = + private[party] def setRequestedContractsCount(count: NonNegativeInt): Unit = state - .updateAndGet(state => - state.copy(requestedContractsCount = state.requestedContractsCount.map(_ + delta.unwrap)) - ) - .requestedContractsCount - private[party] def resetRequestedContractsCount(reset: NonNegativeInt): Unit = - state - .updateAndGet(_.copy(requestedContractsCount = reset)) + .updateAndGet(_.copy(requestedContractsCount = count)) .discard def processedContractsCount: NonNegativeInt = state.get().processedContractsCount diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessor.scala index db5bb3f56b..4260361d69 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessor.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.participant.admin.party.PartyReplicationTestInter import com.digitalasset.canton.sequencing.client.channel.SequencerChannelProtocolProcessor import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{FutureUnlessShutdownUtil, SimpleExecutionQueue} +import com.google.common.annotations.VisibleForTesting import scala.util.chaining.scalaUtilChainingOps @@ -35,6 +36,9 @@ trait PartyReplicationProcessor extends SequencerChannelProtocolProcessor { crashOnFailure = exitOnFatalFailures, ) + @VisibleForTesting + private[party] def isExecutionQueueEmpty: Boolean = executionQueue.isEmpty + protected def testOnlyInterceptor: PartyReplicationTestInterceptor protected def onAcsFullyReplicated: TraceContext => Unit @@ -45,6 +49,16 @@ trait PartyReplicationProcessor extends SequencerChannelProtocolProcessor { */ def progressPartyReplication()(implicit traceContext: TraceContext): Unit + protected def notifyCounterParticipantAndPartyReplicatorOnError( + code: => EitherT[FutureUnlessShutdown, String, Unit] + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = + code.leftSemiflatMap { error => + // Let the PartyReplicator know there has been an error. + onError(error) + // Let the counter participant know there has been an error. + sendError(error).value.map(_ => error) + } + final protected def executeAsync(operation: String)( code: => EitherT[FutureUnlessShutdown, String, Unit] )(implicit traceContext: TraceContext): Unit = { @@ -55,7 +69,7 @@ trait PartyReplicationProcessor extends SequencerChannelProtocolProcessor { Unit, ] ): FutureUnlessShutdown[Unit] = - eitherT.valueOr(err => logger.warn(s"\"$operation\" failed with $err")) + eitherT.valueOr(err => logger.warn(s"\"$operation\" failed with \"$err\"")) logger.debug(s"About to $operation") FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationSourceParticipantProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationSourceParticipantProcessor.scala index 8d7b7787bd..3718771343 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationSourceParticipantProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationSourceParticipantProcessor.scala @@ -80,6 +80,8 @@ final class PartyReplicationSourceParticipantProcessor private ( )(implicit override val executionContext: ExecutionContext) extends PartyReplicationProcessor { protected val processorStore: SourceParticipantStore = InMemoryProcessorStore.sourceParticipant() + + // TODO(#22251): Make this configurable. private val contractsPerBatch = PositiveInt.two override def replicatedContractsCount: NonNegativeInt = processorStore.sentContractsCount @@ -91,8 +93,7 @@ final class PartyReplicationSourceParticipantProcessor private ( override def onConnected()(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = execute("handle connect to TP") { - processorStore.clearInitialContractOrdinalInclusive() - EitherTUtil.unitUS + super.onConnected().map(_ => processorStore.clearInitialContractOrdinalInclusive()) } /** Handle instructions from the target participant @@ -210,16 +211,6 @@ final class PartyReplicationSourceParticipantProcessor private ( } yield () } - private def notifyCounterParticipantAndPartyReplicatorOnError( - code: => EitherT[FutureUnlessShutdown, String, Unit] - )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = - code.leftSemiflatMap { error => - // Let the PartyReplicator know there has been an error. - onError(error) - // Let the target participant know there has been an error. - sendError(error).value.map(_ => error) - } - /** Reads contract batches from the ACS in a brute-force fashion via AcsInspection until * TODO(#24326) reads the ACS via the Ledger API. */ diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationTargetParticipantProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationTargetParticipantProcessor.scala index 8d5136cae8..be6bcb912e 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationTargetParticipantProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationTargetParticipantProcessor.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.RepairCounter import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.crypto.{CryptoPureApi, HashPurpose} import com.digitalasset.canton.data.{CantonTimestamp, ContractReassignment} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.participant.state.{Reassignment, ReassignmentInfo, Update} @@ -22,19 +22,29 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.admin.data.ActiveContractOld import com.digitalasset.canton.participant.admin.party.PartyReplicationTestInterceptor import com.digitalasset.canton.participant.admin.party.PartyReplicator.AddPartyRequestId -import com.digitalasset.canton.participant.event.AcsChangeSupport -import com.digitalasset.canton.participant.protocol.conflictdetection.CommitSet +import com.digitalasset.canton.participant.event.{AcsChangeSupport, RecordOrderPublisher} +import com.digitalasset.canton.participant.protocol.conflictdetection.{CommitSet, RequestTracker} +import com.digitalasset.canton.participant.protocol.party.PartyReplicationTargetParticipantProcessor.{ + GetInternalContractIds, + PersistContracts, + contractsToRequestEachTime, +} import com.digitalasset.canton.participant.store.ParticipantNodePersistentState import com.digitalasset.canton.participant.sync.ConnectedSynchronizer import com.digitalasset.canton.participant.util.TimeOfChange -import com.digitalasset.canton.protocol.{ContractInstance, ReassignmentId, TransactionId} +import com.digitalasset.canton.protocol.{ + ContractInstance, + LfContractId, + ReassignmentId, + SerializableContract, + UpdateId, +} import com.digitalasset.canton.topology.{PartyId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{EitherTUtil, ReassignmentTag} import com.google.protobuf.ByteString import scala.concurrent.ExecutionContext -import scala.util.chaining.scalaUtilChainingOps /** The target participant processor ingests a party's active contracts on a specific synchronizer * and timestamp from a source participant as part of Online Party Replication. @@ -52,6 +62,8 @@ import scala.util.chaining.scalaUtilChainingOps * The party id of the party to replicate active contracts for. * @param requestId * The "add party" request id that this replication is associated with. + * @param psid + * The physical id of the synchronizer to replicate active contracts in. * @param partyToParticipantEffectiveAt * The timestamp immediately on which the ACS snapshot is based. * @param onAcsFullyReplicated @@ -63,15 +75,19 @@ import scala.util.chaining.scalaUtilChainingOps * @param testOnlyInterceptor * Test interceptor only alters behavior in integration tests. */ -final class PartyReplicationTargetParticipantProcessor( +class PartyReplicationTargetParticipantProcessor( partyId: PartyId, requestId: AddPartyRequestId, + protected val psid: PhysicalSynchronizerId, partyToParticipantEffectiveAt: CantonTimestamp, protected val onAcsFullyReplicated: TraceContext => Unit, protected val onError: String => Unit, protected val onDisconnect: (String, TraceContext) => Unit, - participantNodePersistentState: Eval[ParticipantNodePersistentState], - connectedSynchronizer: ConnectedSynchronizer, + persistContracts: PersistContracts, + getInternalContractIds: GetInternalContractIds, + recordOrderPublisher: RecordOrderPublisher, + requestTracker: RequestTracker, + pureCrypto: CryptoPureApi, protected val futureSupervisor: FutureSupervisor, protected val exitOnFatalFailures: Boolean, protected val timeouts: ProcessingTimeout, @@ -81,12 +97,6 @@ final class PartyReplicationTargetParticipantProcessor( extends PartyReplicationProcessor { protected val processorStore: TargetParticipantStore = InMemoryProcessorStore.targetParticipant() - private val contractsToRequestEachTime = PositiveInt.tryCreate(10) - - override protected val psid: PhysicalSynchronizerId = connectedSynchronizer.psid - - private val pureCrypto = - connectedSynchronizer.synchronizerHandle.syncPersistentState.pureCryptoApi // The base hash for all indexer UpdateIds to avoid repeating this for all ACS batches. private lazy val indexerUpdateIdBaseHash = pureCrypto @@ -103,10 +113,11 @@ final class PartyReplicationTargetParticipantProcessor( override def onConnected()(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = execute("handle connect to SP") { - // Upon connecting or reconnecting, clear the initial contract ordinal. - processorStore.clearInitialContractOrdinalInclusive() - progressPartyReplication() - EitherTUtil.unitUS + super.onConnected().map { _ => + // Upon connecting or reconnecting, clear the initial contract ordinal. + processorStore.clearInitialContractOrdinalInclusive() + progressPartyReplication() + } } /** Consume status updates and ACS batches from the source participant. @@ -114,12 +125,13 @@ final class PartyReplicationTargetParticipantProcessor( override def handlePayload(payload: ByteString)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = execute("handle payload from SP") { - val recordOrderPublisher = connectedSynchronizer.ephemeral.recordOrderPublisher - (for { + notifyCounterParticipantAndPartyReplicatorOnError(for { messageFromSP <- EitherT.fromEither[FutureUnlessShutdown]( PartyReplicationSourceParticipantMessage .fromByteString(protocolVersion, payload) - .leftMap(_.message) + .leftMap(deserializationError => + s"Failed to parse payload message from SP: ${deserializationError.message}" + ) ) _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( processorStore.initialContractOrdinalInclusiveO.isDefined, @@ -133,15 +145,18 @@ final class PartyReplicationTargetParticipantProcessor( .map(_.contract.contractId) .mkString(", ")}" ) - val contractsToAdd = contracts.map(_.contract) for { - _ <- EitherT( - contractsToAdd.forgetNE - .traverse(ContractInstance.fromSerializable) - .traverse( - participantNodePersistentState.value.contractStore.storeContracts - ) + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + !processorStore.hasEndOfACSBeenReached, + s"Received ACS batch from SP after EndOfACS at $firstContractOrdinal", ) + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + processorStore.processedContractsCount.unwrap + contracts.size <= processorStore.requestedContractsCount.unwrap, + s"Received too many contracts from SP: processed ${processorStore.processedContractsCount.unwrap} + received ${contracts.size} > requested ${processorStore.requestedContractsCount.unwrap}", + ) + contractsToAdd = contracts.map(_.contract) + _ <- persistContracts(contractsToAdd)(executionContext)(traceContext) + .leftMap(err => s"Failed to persist contracts: $err") repairCounter = processorStore.getAndIncrementRepairCounter() toc = TimeOfChange(partyToParticipantEffectiveAt, Some(repairCounter)) contractAssignments = contracts.map { @@ -153,7 +168,7 @@ final class PartyReplicationTargetParticipantProcessor( toc, ) } - _ <- connectedSynchronizer.ephemeral.requestTracker + _ <- requestTracker .addReplicatedContracts(requestId, partyToParticipantEffectiveAt, contractAssignments) .leftMap(e => s"Failed to assign contracts $contractAssignments in ActiveContractStore: $e" @@ -164,9 +179,16 @@ final class PartyReplicationTargetParticipantProcessor( .fromSerializable(contract) .map(ContractReassignment(_, reassignmentCounter)) }) + internalContractIdsForActiveContracts <- EitherT.right[String]( + getInternalContractIds(reassignments.map(_.contract.contractId))(traceContext) + ) _ <- EitherT.rightT[FutureUnlessShutdown, String]( recordOrderPublisher.schedulePublishAddContracts( - repairEventFromSerializedContract(repairCounter, reassignments) + repairEventFromSerializedContract( + repairCounter = repairCounter, + activeContracts = reassignments, + internalContractIds = internalContractIdsForActiveContracts, + ) ) ) _ = processorStore @@ -180,13 +202,7 @@ final class PartyReplicationTargetParticipantProcessor( processorStore.setHasEndOfACSBeenReached() EitherT.rightT[FutureUnlessShutdown, String](()) } - } yield ()).bimap( - _.tap { error => - logger.warn(s"Error while processing payload: $error") - onError(error) - }, - _ => progressPartyReplication(), - ) + } yield ()).map(_ => progressPartyReplication()) } override def progressPartyReplication()(implicit traceContext: TraceContext): Unit = @@ -206,7 +222,6 @@ final class PartyReplicationTargetParticipantProcessor( private def respondToSourceParticipant()(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = if (hasEndOfACSBeenReached) { - val recordOrderPublisher = connectedSynchronizer.ephemeral.recordOrderPublisher onAcsFullyReplicated(traceContext) EitherT( FutureUnlessShutdown @@ -231,7 +246,7 @@ final class PartyReplicationTargetParticipantProcessor( // Once the SP initialize message has been sent, set the initial contract ordinal // and reset the requested contracts count to the processed contracts count. processorStore.setInitialContractOrdinalInclusive(initialContractOrdinalInclusive) - processorStore.resetRequestedContractsCount(processorStore.processedContractsCount) + processorStore.setRequestedContractsCount(processorStore.processedContractsCount) progressPartyReplication() } } else if (processorStore.processedContractsCount == processorStore.requestedContractsCount) { @@ -247,9 +262,9 @@ final class PartyReplicationTargetParticipantProcessor( private def requestNextSetOfContracts()(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = { - val updatedContractOrdinalRequestedExclusive = - processorStore.increaseRequestedContractsCount(contractsToRequestEachTime) - val inclusiveContractOrdinal = updatedContractOrdinalRequestedExclusive.unwrap - 1 + val updatedContractOrdinalToRequestExclusive = + processorStore.requestedContractsCount.map(_ + contractsToRequestEachTime.unwrap) + val inclusiveContractOrdinal = updatedContractOrdinalToRequestExclusive.unwrap - 1 val instructionMessage = PartyReplicationTargetParticipantMessage( PartyReplicationTargetParticipantMessage.SendAcsUpTo( NonNegativeInt.tryCreate(inclusiveContractOrdinal) @@ -260,7 +275,7 @@ final class PartyReplicationTargetParticipantProcessor( sendPayload( s"request next set of contracts up to ordinal $inclusiveContractOrdinal", instructionMessage.toByteString, - ) + ).map(_ => processorStore.setRequestedContractsCount(updatedContractOrdinalToRequestExclusive)) } override protected def hasEndOfACSBeenReached: Boolean = processorStore.hasEndOfACSBeenReached @@ -268,6 +283,7 @@ final class PartyReplicationTargetParticipantProcessor( private def repairEventFromSerializedContract( repairCounter: RepairCounter, activeContracts: NonEmpty[Seq[ContractReassignment]], + internalContractIds: Map[LfContractId, Long], )( timestamp: CantonTimestamp )(implicit traceContext: TraceContext): Update.OnPRReassignmentAccepted = { @@ -285,7 +301,7 @@ final class PartyReplicationTargetParticipantProcessor( .add(contract.contractId.coid) } .finish() - TransactionId(hash).tryAsLedgerTransactionId + UpdateId(hash) } val contractIdCounters = activeContracts.map { @@ -331,6 +347,7 @@ final class PartyReplicationTargetParticipantProcessor( recordTime = timestamp, synchronizerId = psid.logical, acsChangeFactory = acsChangeFactory, + internalContractIds = internalContractIds, ) } } @@ -355,12 +372,16 @@ object PartyReplicationTargetParticipantProcessor { new PartyReplicationTargetParticipantProcessor( partyId, requestId, + connectedSynchronizer.psid, partyToParticipantEffectiveAt, onComplete, onError, onDisconnect, - participantNodePersistentState, - connectedSynchronizer, + persistContracts(participantNodePersistentState), + getInternalContractIds(participantNodePersistentState), + connectedSynchronizer.ephemeral.recordOrderPublisher, + connectedSynchronizer.ephemeral.requestTracker, + connectedSynchronizer.synchronizerHandle.syncPersistentState.pureCryptoApi, futureSupervisor, exitOnFatalFailures, timeouts, @@ -370,4 +391,37 @@ object PartyReplicationTargetParticipantProcessor { .append("requestId", requestId.toHexString), testInterceptor, ) + + // TODO(#22251): Make this configurable. + private[party] val contractsToRequestEachTime = PositiveInt.tryCreate(10) + + private[party] type PersistContracts = + NonEmpty[Seq[SerializableContract]] => (ExecutionContext) => ( + TraceContext + ) => EitherT[FutureUnlessShutdown, String, Unit] + + private def persistContracts( + participantNodePersistentState: Eval[ParticipantNodePersistentState] + ): PersistContracts = contracts => + implicit ec => + implicit tc => + EitherT( + contracts.forgetNE + .traverse(ContractInstance.fromSerializable) + .traverse( + participantNodePersistentState.value.contractStore.storeContracts(_) + ) + ) + + private[party] type GetInternalContractIds = + NonEmpty[Seq[LfContractId]] => TraceContext => FutureUnlessShutdown[ + Map[LfContractId, Long] + ] + + private def getInternalContractIds( + participantNodePersistentState: Eval[ParticipantNodePersistentState] + ): GetInternalContractIds = contracts => + implicit tc => + participantNodePersistentState.value.contractStore + .lookupBatchedNonCachedInternalIds(contracts.forgetNE) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingSteps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingSteps.scala index 2b3705fa97..7238bfc3e3 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingSteps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingSteps.scala @@ -43,7 +43,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.{ContractAuthenticator, EitherTUtil} +import com.digitalasset.canton.util.{ContractValidator, EitherTUtil} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfPartyId, RequestCounter, SequencerCounter, checked} @@ -56,7 +56,7 @@ private[reassignment] class AssignmentProcessingSteps( reassignmentCoordination: ReassignmentCoordination, targetCrypto: SynchronizerCryptoClient, seedGenerator: SeedGenerator, - override protected val contractAuthenticator: ContractAuthenticator, + override protected val contractValidator: ContractValidator, staticSynchronizerParameters: Target[StaticSynchronizerParameters], val protocolVersion: Target[ProtocolVersion], protected val loggerFactory: NamedLoggerFactory, @@ -94,7 +94,7 @@ private[reassignment] class AssignmentProcessingSteps( staticSynchronizerParameters, participantId, reassignmentCoordination, - contractAuthenticator, + contractValidator, loggerFactory, ) @@ -378,29 +378,14 @@ private[reassignment] class AssignmentProcessingSteps( logger.info( s"Sending an abstain verdict for ${assignmentValidationResult.hostedConfirmingReassigningParties} because unassignment data is not found in the reassignment store" ) - val confirmationResponses = checked( - ConfirmationResponses.tryCreate( - parsedRequest.requestId, - assignmentValidationResult.rootHash, - synchronizerId.unwrap, - participantId, - NonEmpty.mk( - Seq, - ConfirmationResponse - .tryCreate( - Some(ViewPosition.root), - LocalAbstainError.CannotPerformAllValidations - .Abstain( - s"Unassignment data not found when processing assignment $reassignmentId." - ) - .toLocalAbstain(protocolVersion.unwrap), - assignmentValidationResult.hostedConfirmingReassigningParties, - ), - ), - protocolVersion.unwrap, - ) + val confirmationResponses = createAbstainResponse( + parsedRequest.requestId, + assignmentValidationResult.rootHash, + s"Unassignment data not found when processing assignment $reassignmentId.", + assignmentValidationResult.hostedConfirmingReassigningParties, ) - FutureUnlessShutdown.pure(Some(confirmationResponses)) + + FutureUnlessShutdown.pure(confirmationResponses) } else { createConfirmationResponses( parsedRequest.requestId, @@ -489,7 +474,7 @@ private[reassignment] class AssignmentProcessingSteps( } yield CommitAndStoreContractsAndPublishEvent( None, Seq.empty, - eventO.map(event => _ => event), + eventO.map(event => _ => _ => event), ) EitherT.fromEither[FutureUnlessShutdown](commit) } @@ -552,13 +537,11 @@ private[reassignment] class AssignmentProcessingSteps( source = assignmentValidationResult.sourcePSId.map(_.logical), target = synchronizerId.map(_.logical), ) - } else EitherTUtil.unitUS - update <- EitherT.fromEither[FutureUnlessShutdown]( - assignmentValidationResult.createReassignmentAccepted( - synchronizerId.map(_.logical), - participantId, - requestId.unwrap, - ) + } else EitherTUtil.unitUS[ReassignmentProcessorError] + update = assignmentValidationResult.createReassignmentAccepted( + synchronizerId.map(_.logical), + participantId, + requestId.unwrap, ) } yield CommitAndStoreContractsAndPublishEvent( commitSetO, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessor.scala index 8d0fe5026b..7662833523 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessor.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.participant.sync.SyncEphemeralState import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.Target import com.digitalasset.canton.version.ProtocolVersion @@ -34,6 +34,7 @@ class AssignmentProcessor( inFlightSubmissionSynchronizerTracker: InFlightSubmissionSynchronizerTracker, ephemeral: SyncEphemeralState, synchronizerCrypto: SynchronizerCryptoClient, + contractValidator: ContractValidator, seedGenerator: SeedGenerator, sequencerClient: SequencerClient, override protected val timeouts: ProcessingTimeout, @@ -55,7 +56,7 @@ class AssignmentProcessor( reassignmentCoordination, synchronizerCrypto, seedGenerator, - ContractAuthenticator(synchronizerCrypto.pureCrypto), + contractValidator, staticSynchronizerParameters, targetProtocolVersion, loggerFactory, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidation.scala index 05732938d7..72d1f8231a 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidation.scala @@ -32,7 +32,7 @@ import com.digitalasset.canton.participant.store.ReassignmentStore.{ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.Target import scala.concurrent.ExecutionContext @@ -42,15 +42,10 @@ private[reassignment] class AssignmentValidation( staticSynchronizerParameters: Target[StaticSynchronizerParameters], participantId: ParticipantId, reassignmentCoordination: ReassignmentCoordination, - contractAuthenticator: ContractAuthenticator, + contractValidator: ContractValidator, protected val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) - extends ReassignmentValidation[ - FullAssignmentTree, - AssignmentValidationResult.CommonValidationResult, - AssignmentValidationResult.ReassigningParticipantValidationResult, - ] - with NamedLogging { + extends NamedLogging { /** Validate the assignment request */ @@ -128,7 +123,7 @@ private[reassignment] class AssignmentValidation( ) } - override def performCommonValidations( + def performCommonValidations( parsedRequest: ParsedReassignmentRequest[FullAssignmentTree], activenessF: FutureUnlessShutdown[ActivenessResult], )(implicit @@ -137,9 +132,9 @@ private[reassignment] class AssignmentValidation( val topologySnapshot = Target(parsedRequest.snapshot.ipsSnapshot) val assignmentRequest: FullAssignmentTree = parsedRequest.fullViewTree - val stakeholdersCheckResultET = - ReassignmentValidation.checkMetadata( - contractAuthenticator, + val contractAuthenticationResultF = + ReassignmentValidation.authenticateContractAndStakeholders( + contractValidator, assignmentRequest, ) @@ -167,15 +162,13 @@ private[reassignment] class AssignmentValidation( } yield AssignmentValidationResult.CommonValidationResult( activenessResult = activenessResult, participantSignatureVerificationResult = participantSignatureVerificationResult, - contractAuthenticationResultF = stakeholdersCheckResultET, + contractAuthenticationResultF = contractAuthenticationResultF, submitterCheckResult = submitterCheckResult, reassignmentIdResult = reassignmentIdResult, ) } - override type ReassigningParticipantValidationData = UnassignmentData - - override def performValidationForReassigningParticipants( + def performValidationForReassigningParticipants( parsedRequest: ParsedReassignmentRequest[FullAssignmentTree], unassignmentData: UnassignmentData, )(implicit @@ -189,7 +182,7 @@ private[reassignment] class AssignmentValidation( val assignmentRequestTs = parsedRequest.requestTimestamp for { - // TODO(i26479): Check that reassignmentData.unassignmentRequest.targetTimeProof.timestamp is in the past + // TODO(i26479): Check that reassignmentData.unassignmentRequest.targetTimestamp is in the past exclusivityTimeoutError <- AssignmentValidation.checkExclusivityTimeout( reassignmentCoordination, targetPSId, @@ -300,9 +293,9 @@ object AssignmentValidation { ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, Option[ ReassignmentValidationError ]] = { - val targetTimeProof = unassignmentData.targetTimestamp + val targetTimestamp = unassignmentData.targetTimestamp for { - // TODO(i26479): Check that reassignmentData.unassignmentRequest.targetTimeProof.timestamp is in the past + // TODO(i26479): Check that reassignmentData.unassignmentRequest.targetTimestamp is in the past cryptoSnapshotTargetTs <- reassignmentCoordination .cryptoSnapshot( /* @@ -311,14 +304,14 @@ object AssignmentValidation { */ targetPSId, staticSynchronizerParameters, - targetTimeProof, + targetTimestamp, ) .map(_.map(_.ipsSnapshot)) exclusivityLimit <- ProcessingSteps .getAssignmentExclusivity( cryptoSnapshotTargetTs, - targetTimeProof, + targetTimestamp, ) .leftMap[ReassignmentProcessorError]( ReassignmentParametersError(targetPSId.unwrap, _) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationResult.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationResult.scala index a8a230659e..a8235f8653 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationResult.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationResult.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT -import cats.syntax.either.* import cats.syntax.functor.* import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.{ @@ -21,13 +20,10 @@ import com.digitalasset.canton.ledger.participant.state.{ Update, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.participant.protocol.ProcessingSteps.InternalContractIds import com.digitalasset.canton.participant.protocol.conflictdetection.{ActivenessResult, CommitSet} -import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.{ - FieldConversionError, - ReassignmentProcessorError, -} import com.digitalasset.canton.participant.protocol.validation.AuthenticationError -import com.digitalasset.canton.protocol.{LfNodeCreate, ReassignmentId, RootHash} +import com.digitalasset.canton.protocol.{LfNodeCreate, ReassignmentId, RootHash, UpdateId} import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} @@ -74,7 +70,7 @@ final case class AssignmentValidationResult private[reassignment] ( recordTime: CantonTimestamp, )(implicit traceContext: TraceContext - ): Either[ReassignmentProcessorError, AcsChangeFactory => SequencedUpdate] = { + ): AcsChangeFactory => InternalContractIds => SequencedUpdate = { val reassignment = contracts.contracts.zipWithIndex.map { case (reassign, idx) => val contract = reassign.contract val contractInst = contract.inst @@ -96,39 +92,36 @@ final case class AssignmentValidationResult private[reassignment] ( nodeId = idx, ) } - for { - updateId <- - rootHash.asLedgerTransactionId.leftMap[ReassignmentProcessorError]( - FieldConversionError(reassignmentId, "Transaction id (root hash)", _) - ) - - completionInfo = - Option.when(participantId == submitterMetadata.submittingParticipant)( - CompletionInfo( - actAs = List(submitterMetadata.submitter), - userId = submitterMetadata.userId, - commandId = submitterMetadata.commandId, - optDeduplicationPeriod = None, - submissionId = submitterMetadata.submissionId, - ) + val updateId = rootHash + val completionInfo = + Option.when(participantId == submitterMetadata.submittingParticipant)( + CompletionInfo( + actAs = List(submitterMetadata.submitter), + userId = submitterMetadata.userId, + commandId = submitterMetadata.commandId, + optDeduplicationPeriod = None, + submissionId = submitterMetadata.submissionId, ) - } yield (acsChangeFactory: AcsChangeFactory) => - Update.SequencedReassignmentAccepted( - optCompletionInfo = completionInfo, - workflowId = submitterMetadata.workflowId, - updateId = updateId, - reassignmentInfo = ReassignmentInfo( - sourceSynchronizer = sourcePSId.map(_.logical), - targetSynchronizer = targetSynchronizer, - submitter = Option(submitterMetadata.submitter), - reassignmentId = reassignmentId, - isReassigningParticipant = isReassigningParticipant, - ), - reassignment = Reassignment.Batch(reassignment), - recordTime = recordTime, - synchronizerId = targetSynchronizer.unwrap, - acsChangeFactory = acsChangeFactory, ) + (acsChangeFactory: AcsChangeFactory) => + (internalContractIds: InternalContractIds) => + Update.SequencedReassignmentAccepted( + optCompletionInfo = completionInfo, + workflowId = submitterMetadata.workflowId, + updateId = UpdateId.fromRootHash(updateId), + reassignmentInfo = ReassignmentInfo( + sourceSynchronizer = sourcePSId.map(_.logical), + targetSynchronizer = targetSynchronizer, + submitter = Option(submitterMetadata.submitter), + reassignmentId = reassignmentId, + isReassigningParticipant = isReassigningParticipant, + ), + reassignment = Reassignment.Batch(reassignment), + recordTime = recordTime, + synchronizerId = targetSynchronizer.unwrap, + acsChangeFactory = acsChangeFactory, + internalContractIds = internalContractIds, + ) } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AutomaticAssignment.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AutomaticAssignment.scala index 19bf8a4cf2..3f3205374c 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AutomaticAssignment.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/AutomaticAssignment.scala @@ -38,7 +38,7 @@ private[participant] object AutomaticAssignment { stakeholders: Set[LfPartyId], unassignmentSubmitterMetadata: ReassignmentSubmitterMetadata, participantId: ParticipantId, - t0: CantonTimestamp, + targetTimestamp: Target[CantonTimestamp], )(implicit ec: ExecutionContext, elc: ErrorLoggingContext, @@ -62,13 +62,12 @@ private[participant] object AutomaticAssignment { def performAutoAssignmentOnce : EitherT[FutureUnlessShutdown, ReassignmentProcessorError, com.google.rpc.status.Status] = for { - targetIps <- reassignmentCoordination - .getTimeProofAndSnapshot( + targetTopology <- reassignmentCoordination + .getRecentTopologySnapshot( targetSynchronizer, targetStaticSynchronizerParameters, ) - .map(_._2) - possibleSubmittingParties <- EitherT.right(hostedStakeholders(targetIps.map(_.ipsSnapshot))) + possibleSubmittingParties <- EitherT.right(hostedStakeholders(targetTopology)) assignmentSubmitter <- EitherT.fromOption[FutureUnlessShutdown]( possibleSubmittingParties.headOption, AutomaticAssignmentError("No possible submitting party for automatic assignment"), @@ -85,6 +84,7 @@ private[participant] object AutomaticAssignment { workflowId = None, ), id, + targetTopology, )(TraceContext.empty) .mapK(FutureUnlessShutdown.outcomeK) AssignmentProcessingSteps.SubmissionResult(completionF) = submissionResult @@ -125,7 +125,8 @@ private[participant] object AutomaticAssignment { exclusivityLimit <- EitherT .fromEither[FutureUnlessShutdown]( targetSynchronizerParameters.unwrap - .assignmentExclusivityLimitFor(t0) + .assignmentExclusivityLimitFor(targetTimestamp.unwrap) + .map(Target(_)) .leftMap(ReassignmentParametersError(targetSynchronizer.unwrap, _)) ) .leftWiden[ReassignmentProcessorError] @@ -134,7 +135,7 @@ private[participant] object AutomaticAssignment { _ <- if (targetHostedStakeholders.nonEmpty) { logger.info( - s"Registering automatic submission of assignment with ID $id at time $exclusivityLimit, where base timestamp is $t0" + s"Registering automatic submission of assignment with ID $id at time $exclusivityLimit, where base timestamp is $targetTimestamp" ) for { _ <- reassignmentCoordination @@ -176,7 +177,7 @@ private[participant] object AutomaticAssignment { .cryptoSnapshot( targetSynchronizer, targetStaticSynchronizerParameters, - t0, + targetTimestamp, ) targetSnapshot = targetIps.map(_.ipsSnapshot) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentCoordination.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentCoordination.scala index d6b4739a6e..8a49d59995 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentCoordination.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentCoordination.scala @@ -6,9 +6,10 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT import cats.instances.future.catsStdInstancesForFuture import cats.syntax.functor.* -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.ReassignmentsConfig import com.digitalasset.canton.crypto.{ SyncCryptoApiParticipantProvider, + SynchronizerCryptoClient, SynchronizerSnapshotSyncCryptoApi, } import com.digitalasset.canton.data.{ @@ -34,8 +35,8 @@ import com.digitalasset.canton.participant.sync.{ SyncPersistentStateManager, } import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.sequencing.protocol.TimeProof -import com.digitalasset.canton.time.SynchronizerTimeTracker +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SynchronizerTimeTracker} +import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} @@ -44,19 +45,51 @@ import com.digitalasset.canton.util.{ReassignmentTag, SameReassignmentType, Sing import scala.concurrent.{ExecutionContext, Future} +trait GetTopologyAtTimestamp { + + /** Will wait for the topology at the requested timestamp, unless it's too far in the future, in + * which case it'll return None. + */ + def maybeAwaitTopologySnapshot( + targetPSId: Target[PhysicalSynchronizerId], + requestedTimestamp: Target[CantonTimestamp], + )(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ReassignmentProcessorError, + Option[Target[TopologySnapshot]], + ] + /* + * TODO(i27585): After cleaning up the waiting routines, refactor to something like + * + * def getTopologySnapshot( + * targetPSId: PhysicalSynchronizerId, + * requestedTimestamp: CantonTimestamp, + * ): Either[UnknownPhysicalSynchronizer, TopologySnapshotResult] + * + * sealed trait TopologySnapshotResult + * case class TimestampTooFarInFuture(description: String) extends TopologySnapshotResult + * case class OK(await: FutureUnlessShutdown[TopologySnapshot]) extends TopologySnapshotResult + * + * This makes the various outcomes very clear, and isolates the Future into only the success case. + */ +} + class ReassignmentCoordination( reassignmentStoreFor: Target[SynchronizerId] => Either[ ReassignmentProcessorError, ReassignmentStore, ], - recentTimeProofFor: RecentTimeProofProvider, reassignmentSubmissionFor: PhysicalSynchronizerId => Option[ReassignmentSubmissionHandle], pendingUnassignments: Source[SynchronizerId] => Option[ReassignmentSynchronizer], staticSynchronizerParametersGetter: StaticSynchronizerParametersGetter, syncCryptoApi: SyncCryptoApiParticipantProvider, + targetTimestampForwardTolerance: NonNegativeFiniteDuration, override val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) - extends NamedLogging { + extends NamedLogging + with GetTopologyAtTimestamp { def addPendingUnassignment( reassignmentId: ReassignmentId, @@ -83,16 +116,16 @@ class ReassignmentCoordination( .getOrElse(Future.successful(())) ) - private[reassignment] def awaitSynchronizerTime( - psid: ReassignmentTag[PhysicalSynchronizerId], - timestamp: CantonTimestamp, + private[reassignment] def awaitSynchronizerTime[T[X] <: ReassignmentTag[X]: SameReassignmentType]( + psid: T[PhysicalSynchronizerId], + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): EitherT[Future, UnknownPhysicalSynchronizer, Unit] = reassignmentSubmissionFor(psid.unwrap) match { case Some(handle) => - handle.timeTracker.requestTick(timestamp, immediately = true).discard - EitherT.right(handle.timeTracker.awaitTick(timestamp).getOrElse(Future.unit)) + handle.timeTracker.requestTick(timestamp.unwrap, immediately = true).discard + EitherT.right(handle.timeTracker.awaitTick(timestamp.unwrap).getOrElse(Future.unit)) case None => EitherT.leftT( UnknownPhysicalSynchronizer( @@ -110,7 +143,7 @@ class ReassignmentCoordination( private[reassignment] def awaitTimestamp[T[X] <: ReassignmentTag[X]: SameReassignmentType]( synchronizerId: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): Either[ReassignmentProcessorError, Option[FutureUnlessShutdown[Unit]]] = @@ -121,8 +154,8 @@ class ReassignmentCoordination( ) handle <- reassignmentSubmissionFor(synchronizerId.unwrap) } yield { - handle.timeTracker.requestTick(timestamp, immediately = true).discard - cryptoApi.awaitTimestamp(timestamp) + handle.timeTracker.requestTick(timestamp.unwrap, immediately = true).discard + cryptoApi.awaitTimestamp(timestamp.unwrap) }).toRight(UnknownPhysicalSynchronizer(synchronizerId.unwrap, "When waiting for timestamp")) /** Similar to [[awaitTimestamp]] but lifted into an [[EitherT]] @@ -133,7 +166,7 @@ class ReassignmentCoordination( private[reassignment] def awaitTimestamp[T[X] <: ReassignmentTag[X]: SameReassignmentType]( synchronizerId: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], onImmediate: => FutureUnlessShutdown[Unit], )(implicit traceContext: TraceContext @@ -152,6 +185,7 @@ class ReassignmentCoordination( targetSynchronizerId: Target[PhysicalSynchronizerId], submitterMetadata: ReassignmentSubmitterMetadata, reassignmentId: ReassignmentId, + targetTopology: Target[TopologySnapshot], )(implicit traceContext: TraceContext ): EitherT[Future, ReassignmentProcessorError, AssignmentProcessingSteps.SubmissionResult] = { @@ -167,6 +201,7 @@ class ReassignmentCoordination( .submitAssignments( submitterMetadata, reassignmentId, + targetTopology, ) .mapK(FutureUnlessShutdown.outcomeK) .semiflatMap(Predef.identity) @@ -176,12 +211,13 @@ class ReassignmentCoordination( private[reassignment] def getStaticSynchronizerParameter[T[_]: SingletonTraverse]( psid: T[PhysicalSynchronizerId] - ): EitherT[FutureUnlessShutdown, UnknownPhysicalSynchronizer, T[StaticSynchronizerParameters]] = + ): Either[UnknownPhysicalSynchronizer, T[StaticSynchronizerParameters]] = psid.traverseSingleton { (_, synchronizerId) => - EitherT.fromOption[FutureUnlessShutdown]( - staticSynchronizerParametersGetter.staticSynchronizerParameters(synchronizerId), - UnknownPhysicalSynchronizer(synchronizerId, "getting static synchronizer parameters"), - ) + staticSynchronizerParametersGetter + .staticSynchronizerParameters(synchronizerId) + .toRight( + UnknownPhysicalSynchronizer(synchronizerId, "getting static synchronizer parameters") + ) } /** Returns a [[crypto.SynchronizerSnapshotSyncCryptoApi]] for the given `synchronizer` at the @@ -194,7 +230,7 @@ class ReassignmentCoordination( ]( psid: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, T[ @@ -215,15 +251,15 @@ class ReassignmentCoordination( } ) .semiflatMap( - _.traverseSingleton((_, syncCrypto) => syncCrypto.snapshot(timestamp)) + _.traverseSingleton((_, syncCrypto) => syncCrypto.snapshot(timestamp.unwrap)) ) - private[reassignment] def awaitTimestampAndGetTaggedCryptoSnapshot[T[X] <: ReassignmentTag[ + private def awaitTimestampAndGetTaggedCryptoSnapshot[T[X] <: ReassignmentTag[ X ]: SameReassignmentType: SingletonTraverse]( targetSynchronizerId: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, T[ @@ -243,7 +279,63 @@ class ReassignmentCoordination( ) } yield snapshot - private[reassignment] def getTimeProofAndSnapshot( + private def getRecentTopologyTimestamp[T[X] <: ReassignmentTag[ + X + ]: SameReassignmentType: SingletonTraverse]( + psid: T[PhysicalSynchronizerId] + )(implicit + traceContext: TraceContext + ): Either[UnknownPhysicalSynchronizer, T[CantonTimestamp]] = for { + staticSynchronizerParameters <- getStaticSynchronizerParameter(psid) + topoClient <- getTopologyClient(psid, staticSynchronizerParameters) + } yield topoClient.map(_.currentSnapshotApproximation.ipsSnapshot.timestamp) + + override def maybeAwaitTopologySnapshot( + targetPSId: Target[PhysicalSynchronizerId], + requestedTimestamp: Target[CantonTimestamp], + )(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ReassignmentProcessorError, + Option[Target[TopologySnapshot]], + ] = for { + staticSynchronizerParameters <- EitherT.fromEither[FutureUnlessShutdown]( + getStaticSynchronizerParameter(targetPSId) + ) + + recentTimestamp <- EitherT.fromEither[FutureUnlessShutdown]( + getRecentTopologyTimestamp(targetPSId) + ) + timestampUpperBound = recentTimestamp.map(_ + targetTimestampForwardTolerance) + topology <- + if (requestedTimestamp <= timestampUpperBound) { + awaitTimestampAndGetTaggedCryptoSnapshot( + targetPSId, + staticSynchronizerParameters, + requestedTimestamp, + ).map(_.map(_.ipsSnapshot)).map(Some(_)) + } else { + logger.info( + s"Not loading target topology at timestamp $requestedTimestamp because it is more than $targetTimestampForwardTolerance ahead of our local target timestamp of $recentTimestamp." + ) + EitherT.right[ReassignmentProcessorError](FutureUnlessShutdown.pure(None)) + } + } yield topology + + private def getTopologyClient[ + T[X] <: ReassignmentTag[X]: SameReassignmentType: SingletonTraverse + ]( + psid: T[PhysicalSynchronizerId], + staticSynchronizerParameters: T[StaticSynchronizerParameters], + ): Either[UnknownPhysicalSynchronizer, T[SynchronizerCryptoClient]] = + psid + .traverseSingleton { case (_, synchronizerId) => + syncCryptoApi.forSynchronizer(synchronizerId, staticSynchronizerParameters.unwrap) + } + .toRight(UnknownPhysicalSynchronizer(psid.unwrap, "when getting topology client")) + + private[reassignment] def getRecentTopologySnapshot( targetSynchronizerId: Target[PhysicalSynchronizerId], staticSynchronizerParameters: Target[StaticSynchronizerParameters], )(implicit @@ -251,17 +343,19 @@ class ReassignmentCoordination( ): EitherT[ FutureUnlessShutdown, ReassignmentProcessorError, - (TimeProof, Target[SynchronizerSnapshotSyncCryptoApi]), + Target[TopologySnapshot], ] = for { - timeProof <- recentTimeProofFor.get(targetSynchronizerId, staticSynchronizerParameters) + timestamp <- EitherT.fromEither[FutureUnlessShutdown]( + getRecentTopologyTimestamp(targetSynchronizerId) + ) // Since events are stored before they are processed, we wait just to be sure. targetCrypto <- awaitTimestampAndGetTaggedCryptoSnapshot( targetSynchronizerId, staticSynchronizerParameters, - timeProof.timestamp, + timestamp, ) - } yield (timeProof, targetCrypto) + } yield targetCrypto.map(_.ipsSnapshot) /** Stores the given reassignment data on the target synchronizer. */ private[reassignment] def addUnassignmentRequest( @@ -311,7 +405,7 @@ class ReassignmentCoordination( object ReassignmentCoordination { def apply( - reassignmentTimeProofFreshnessProportion: NonNegativeInt, + reassignmentsConfig: ReassignmentsConfig, syncPersistentStateManager: SyncPersistentStateManager, submissionHandles: PhysicalSynchronizerId => Option[ReassignmentSubmissionHandle], pendingUnassignments: Source[SynchronizerId] => Option[ReassignmentSynchronizer], @@ -325,20 +419,14 @@ object ReassignmentCoordination { .reassignmentStore(synchronizerId.unwrap) .toRight(UnknownSynchronizer(synchronizerId.unwrap, "looking for reassignment store")) - val recentTimeProofProvider = new RecentTimeProofProvider( - submissionHandles, - syncCryptoApi, - loggerFactory, - reassignmentTimeProofFreshnessProportion, - ) - new ReassignmentCoordination( reassignmentStoreFor = reassignmentStoreFor, - recentTimeProofFor = recentTimeProofProvider, reassignmentSubmissionFor = submissionHandles, pendingUnassignments = pendingUnassignments, staticSynchronizerParametersGetter = syncPersistentStateManager, syncCryptoApi = syncCryptoApi, + targetTimestampForwardTolerance = + reassignmentsConfig.targetTimestampForwardTolerance.toInternal, loggerFactory = loggerFactory, ) } @@ -351,6 +439,7 @@ trait ReassignmentSubmissionHandle { submitterMetadata: ReassignmentSubmitterMetadata, contractIds: Seq[LfContractId], targetSynchronizer: Target[PhysicalSynchronizerId], + sourceTopology: Source[TopologySnapshot], )(implicit traceContext: TraceContext ): EitherT[Future, ReassignmentProcessorError, FutureUnlessShutdown[ @@ -360,6 +449,7 @@ trait ReassignmentSubmissionHandle { def submitAssignments( submitterMetadata: ReassignmentSubmitterMetadata, reassignmentId: ReassignmentId, + targetTopology: Target[TopologySnapshot], )(implicit traceContext: TraceContext ): EitherT[Future, ReassignmentProcessorError, FutureUnlessShutdown[ diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala index b933221a49..550950ef16 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala @@ -38,6 +38,7 @@ import com.digitalasset.canton.participant.protocol.ProtocolProcessor.{ MalformedPayload, NoMediatorError, ProcessorError, + ViewMessageError, } import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.* import com.digitalasset.canton.participant.protocol.submission.EncryptedViewMessageFactory.EncryptedViewMessageCreationError @@ -46,6 +47,7 @@ import com.digitalasset.canton.participant.store.ReassignmentStore.ReassignmentS import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceAlarm import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.messages.* +import com.digitalasset.canton.protocol.messages.EncryptedViewMessageError.InvalidContractIdInView import com.digitalasset.canton.protocol.messages.Verdict.{ Approve, MediatorReject, @@ -56,7 +58,7 @@ import com.digitalasset.canton.store.ConfirmationRequestSessionKeyStore import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.{ContractAuthenticator, EitherTUtil, ReassignmentTag} +import com.digitalasset.canton.util.{ContractValidator, EitherTUtil, ReassignmentTag} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfPartyId, RequestCounter, SequencerCounter, checked} @@ -82,7 +84,7 @@ private[reassignment] trait ReassignmentProcessingSteps[ val protocolVersion: ReassignmentTag[ProtocolVersion] - protected def contractAuthenticator: ContractAuthenticator + protected def contractValidator: ContractValidator protected implicit def ec: ExecutionContext @@ -104,6 +106,10 @@ private[reassignment] trait ReassignmentProcessingSteps[ override val requestType: RequestType override type FullView <: FullReassignmentViewTree + + override type ViewAbsoluteLedgerEffects = Unit + override type FullViewAbsoluteLedgerEffects = Unit + override type ParsedRequestType = ParsedReassignmentRequest[FullView] protected def reassignmentId( @@ -212,9 +218,38 @@ private[reassignment] trait ReassignmentProcessingSteps[ EitherT.right(result) } + override def absolutizeLedgerEffects( + viewsWithCorrectRootHashAndRecipientsAndSignature: Seq[ + (WithRecipients[DecryptedView], Option[Signature]) + ] + ): ( + Seq[(WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects)], + Seq[MalformedPayload], + ) = + // Merely check that all reassigned contract IDs are absolute. + viewsWithCorrectRootHashAndRecipientsAndSignature.partitionMap { + case (withRecipients @ WithRecipients(view, _), sig) => + val invalidContractIds = + view.contracts.contracts.view.map(_.contract.contractId).filterNot(_.isAbsolute).toSeq + Either.cond( + invalidContractIds.nonEmpty, + ViewMessageError( + InvalidContractIdInView( + s"Invalid contract IDs in view at position ${view.viewPosition}: $invalidContractIds" + ) + ), + (withRecipients, sig, ()), + ) + } + override def computeFullViews( - decryptedViewsWithSignatures: Seq[(WithRecipients[DecryptedView], Option[Signature])] - ): (Seq[(WithRecipients[FullView], Option[Signature])], Seq[ProtocolProcessor.MalformedPayload]) = + decryptedViewsWithSignatures: Seq[ + (WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects) + ] + ): ( + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)], + Seq[ProtocolProcessor.MalformedPayload], + ) = (decryptedViewsWithSignatures, Seq.empty) override def computeParsedRequest( @@ -222,7 +257,7 @@ private[reassignment] trait ReassignmentProcessingSteps[ ts: CantonTimestamp, sc: SequencerCounter, rootViewsWithMetadata: NonEmpty[ - Seq[(WithRecipients[FullView], Option[Signature])] + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)] ], submitterMetadataO: Option[ViewSubmitterMetadata], isFreshOwnTimelyRequest: Boolean, @@ -246,7 +281,7 @@ private[reassignment] trait ReassignmentProcessingSteps[ .report() } - val (WithRecipients(viewTree, recipients), signature) = rootViewsWithMetadata.head1 + val (WithRecipients(viewTree, recipients), signature, ()) = rootViewsWithMetadata.head1 contractsMaybeUnknown(viewTree, snapshot).map(contractsMaybeUnknown => ParsedReassignmentRequest( @@ -405,6 +440,37 @@ private[reassignment] trait ReassignmentProcessingSteps[ ) } + protected def createAbstainResponse( + requestId: RequestId, + rootHash: RootHash, + msg: String, + hostedConfirmingReassigningParties: Set[LfPartyId], + ): Option[ConfirmationResponses] = + NonEmpty + .from(hostedConfirmingReassigningParties) + .map { parties => + checked( + ConfirmationResponses.tryCreate( + requestId, + rootHash, + synchronizerId.unwrap, + participantId, + NonEmpty.mk( + Seq, + ConfirmationResponse + .tryCreate( + Some(ViewPosition.root), + LocalAbstainError.CannotPerformAllValidations + .Abstain(msg) + .toLocalAbstain(protocolVersion.unwrap), + parties, + ), + ), + protocolVersion.unwrap, + ) + ) + } + private def responsesForWellformedPayloads( requestId: RequestId, protocolVersion: ProtocolVersion, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidation.scala index 29cca8819b..4c662d4830 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidation.scala @@ -6,56 +6,19 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT import cats.syntax.either.* import cats.syntax.functor.* +import com.daml.logging.LoggingContext import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.participant.protocol.conflictdetection.ActivenessResult -import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.{ - ParsedReassignmentRequest, - ReassignmentProcessorError, -} -import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentValidationResult.CommonValidationResult import com.digitalasset.canton.protocol.ReassignmentId import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.{ContractAuthenticator, EitherTUtil, MonadUtil, ReassignmentTag} +import com.digitalasset.canton.util.{ContractValidator, EitherTUtil, MonadUtil, ReassignmentTag} import scala.concurrent.ExecutionContext -private[reassignment] trait ReassignmentValidation[ - View <: FullReassignmentViewTree, - CommonResult <: ReassignmentValidationResult.CommonValidationResult, - ReassigningParticipantResult <: ReassignmentValidationResult.ReassigningParticipantValidationResult, -] { - type ReassigningParticipantValidationData - - /** The common validations that are performed on all participants (reassigning as well as - * non-reassigning) - */ - def performCommonValidations( - parsedRequest: ParsedReassignmentRequest[View], - activenessF: FutureUnlessShutdown[ActivenessResult], - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[CommonValidationResult] - - /** The validations that are performed only for reassigning participants. We need specific - * parameters depending on the type of reassignment request. - */ - def performValidationForReassigningParticipants( - parsedRequest: ParsedReassignmentRequest[View], - additionalParams: ReassigningParticipantValidationData, - )(implicit - traceContext: TraceContext - ): EitherT[ - FutureUnlessShutdown, - ReassignmentProcessorError, - ReassigningParticipantResult, - ] -} - object ReassignmentValidation { /** - check if the submitter is a stakeholder @@ -102,37 +65,46 @@ object ReassignmentValidation { ) } yield () - def checkMetadata( - contractAuthenticator: ContractAuthenticator, + def authenticateContractAndStakeholders( + contractValidator: ContractValidator, reassignmentRequest: FullReassignmentViewTree, )(implicit - ec: ExecutionContext + ec: ExecutionContext, + traceContext: TraceContext, ): EitherT[FutureUnlessShutdown, ReassignmentValidationError, Unit] = { val declaredViewStakeholders = reassignmentRequest.stakeholders val declaredContractStakeholders = reassignmentRequest.contracts.stakeholders - EitherT.fromEither(for { - _ <- Either.cond( - declaredViewStakeholders == declaredContractStakeholders, - (), - ReassignmentValidationError.StakeholdersMismatch( - reassignmentRequest.reassignmentRef, - declaredViewStakeholders = declaredViewStakeholders, - expectedStakeholders = declaredContractStakeholders, - ), + for { + _ <- EitherT.fromEither[FutureUnlessShutdown]( + Either.cond( + declaredViewStakeholders == declaredContractStakeholders, + (), + ReassignmentValidationError.StakeholdersMismatch( + reassignmentRequest.reassignmentRef, + declaredViewStakeholders = declaredViewStakeholders, + expectedStakeholders = declaredContractStakeholders, + ): ReassignmentValidationError, + ) ) - _ <- MonadUtil.sequentialTraverse(reassignmentRequest.contracts.contracts) { reassign => - contractAuthenticator - .legacyAuthenticate(reassign.contract.inst) - .leftMap(error => - ReassignmentValidationError.ContractIdAuthenticationFailure( - reassignmentRequest.reassignmentRef, - error, - reassign.contract.contractId, + + _ <- MonadUtil.sequentialTraverse(reassignmentRequest.contracts.contracts.forgetNE) { + reassign => + contractValidator + .authenticate(reassign.contract.inst, reassign.contract.templateId.packageId)( + ec, + traceContext, + LoggingContext.empty, ) - ) + .leftMap { reason => + ReassignmentValidationError.ContractAuthenticationFailure( + reassignmentRequest.reassignmentRef, + reason, + reassign.contract.contractId, + ): ReassignmentValidationError + } } - } yield ()) + } yield () } def ensureMediatorActive( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidationError.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidationError.scala index 5bccd331dc..b3dec57291 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidationError.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentValidationError.scala @@ -57,7 +57,7 @@ object ReassignmentValidationError { s"Expected $expectedStakeholders, found $declaredViewStakeholders" } - final case class ContractIdAuthenticationFailure( + final case class ContractAuthenticationFailure( reassignmentRef: ReassignmentRef, reason: String, contractId: LfContractId, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/RecentTimeProofProvider.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/RecentTimeProofProvider.scala deleted file mode 100644 index dd18bc55ad..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/RecentTimeProofProvider.scala +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.protocol.reassignment - -import cats.data.EitherT -import cats.syntax.either.* -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.crypto.SyncCryptoApiParticipantProvider -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.{ - NoTimeProofFromSynchronizer, - ReassignmentProcessorError, -} -import com.digitalasset.canton.protocol.StaticSynchronizerParameters -import com.digitalasset.canton.sequencing.protocol.TimeProof -import com.digitalasset.canton.time.NonNegativeFiniteDuration -import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ReassignmentTag.Target - -import scala.concurrent.ExecutionContext - -/** Returns a recent time proof received from the given synchronizer. */ -private[reassignment] class RecentTimeProofProvider( - submissionHandles: PhysicalSynchronizerId => Option[ReassignmentSubmissionHandle], - syncCryptoApi: SyncCryptoApiParticipantProvider, - override val loggerFactory: NamedLoggerFactory, - reassignmentTimeProofFreshnessProportion: NonNegativeInt, -)(implicit ec: ExecutionContext) - extends NamedLogging { - - private def calculateFreshness( - exclusivityTimeout: NonNegativeFiniteDuration - ): NonNegativeFiniteDuration = - if (reassignmentTimeProofFreshnessProportion.unwrap == 0) - NonNegativeFiniteDuration.Zero // always fetch time proof - else - exclusivityTimeout / reassignmentTimeProofFreshnessProportion - - def get( - targetSynchronizerId: Target[PhysicalSynchronizerId], - staticSynchronizerParameters: Target[StaticSynchronizerParameters], - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, TimeProof] = { - val synchronizer = targetSynchronizerId.unwrap - - for { - handle <- EitherT.fromEither[FutureUnlessShutdown]( - submissionHandles(synchronizer).toRight( - NoTimeProofFromSynchronizer(synchronizer, "unknown synchronizer") - ) - ) - - crypto <- EitherT.fromEither[FutureUnlessShutdown]( - syncCryptoApi - .forSynchronizer(synchronizer, staticSynchronizerParameters.value) - .toRight(NoTimeProofFromSynchronizer(synchronizer, "getting the crypto client")) - ) - - parameters <- EitherT( - crypto.ips.currentSnapshotApproximation - .findDynamicSynchronizerParameters() - .map( - _.leftMap(err => - NoTimeProofFromSynchronizer( - synchronizer, - s"unable to find synchronizer parameters: $err", - ) - ) - ) - ) - exclusivityTimeout = parameters.assignmentExclusivityTimeout - desiredTimeProofFreshness = calculateFreshness(exclusivityTimeout) - timeProof <- EitherT.right(handle.timeTracker.fetchTimeProof(desiredTimeProofFreshness)) - } yield timeProof - } -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingSteps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingSteps.scala index 01f0d7ec85..7421e85243 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingSteps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingSteps.scala @@ -56,11 +56,10 @@ import com.digitalasset.canton.store.ConfirmationRequestSessionKeyStore import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex -import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil.{condUnitET, ifThenET} import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.{ContractAuthenticator, MonadUtil} +import com.digitalasset.canton.util.{ContractValidator, MonadUtil} import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionValidation} import com.digitalasset.canton.{LfPartyId, RequestCounter, SequencerCounter, checked} @@ -73,7 +72,7 @@ private[reassignment] class UnassignmentProcessingSteps( sourceCrypto: SynchronizerCryptoClient, seedGenerator: SeedGenerator, staticSynchronizerParameters: Source[StaticSynchronizerParameters], - override protected val contractAuthenticator: ContractAuthenticator, + override protected val contractValidator: ContractValidator, val protocolVersion: Source[ProtocolVersion], protected val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) @@ -141,15 +140,17 @@ private[reassignment] class UnassignmentProcessingSteps( TargetSynchronizerIsSourceSynchronizer(synchronizerId.unwrap, contractIds), ) - targetStaticSynchronizerParameters <- reassignmentCoordination - .getStaticSynchronizerParameter(targetSynchronizer) - timeProofAndSnapshot <- reassignmentCoordination - .getTimeProofAndSnapshot( + targetStaticSynchronizerParameters <- EitherT.fromEither[FutureUnlessShutdown]( + reassignmentCoordination + .getStaticSynchronizerParameter(targetSynchronizer) + ) + targetTopology <- reassignmentCoordination + .getRecentTopologySnapshot( targetSynchronizer, targetStaticSynchronizerParameters, ) - (timeProof, targetCrypto) = timeProofAndSnapshot - _ = logger.debug(withDetails(s"Picked time proof ${timeProof.timestamp}")) + targetTimestamp = targetTopology.map(_.timestamp) + _ = logger.debug(withDetails(s"Picked target timestamp $targetTimestamp")) contractStates <- EitherT( ephemeralState.tracker @@ -196,14 +197,13 @@ private[reassignment] class UnassignmentProcessingSteps( validated <- UnassignmentRequest .validated( participantId, - timeProof, contracts, submitterMetadata, synchronizerId, mediator, targetSynchronizer, Source(sourceRecentSnapshot.ipsSnapshot), - targetCrypto.map(_.ipsSnapshot), + targetTopology, ) .leftMap(_.toSubmissionValidationError) @@ -381,48 +381,6 @@ private[reassignment] class UnassignmentProcessingSteps( hostedStakeholders.nonEmpty && hostedStakeholders.values.forall(_.onboarding) ) - /** Wait until the participant has received and processed all topology transactions on the target - * synchronizer up to the target-synchronizer time proof timestamp. - * - * As we're not processing messages in parallel, delayed message processing on one synchronizer - * can block message processing on another synchronizer and thus breaks isolation across - * synchronizers. Even with parallel processing, the cursors in the request journal would not - * move forward, so event emission to the event log blocks, too. - * - * No deadlocks can arise under normal behaviour though. For a deadlock, we would need cyclic - * waiting, i.e., an unassignment request on one synchronizer D1 references a time proof on - * another synchronizer D2 and an earlier unassignment request on D2 references a time proof on - * D3 and so on to synchronizer Dn and an earlier unassignment request on Dn references a later - * time proof on D1. This, however, violates temporal causality of events. - * - * This argument breaks down for malicious participants because the participant cannot verify - * that the time proof is authentic without having processed all topology updates up to the - * declared timestamp as the sequencer's signing key might change. So a malicious participant - * could fake a time proof and set a timestamp in the future, which breaks causality. With - * unbounded parallel processing of messages, deadlocks cannot occur as this waiting runs in - * parallel with the request tracker, so time progresses on the target synchronizer and - * eventually reaches the timestamp. - */ - // TODO(i26479): Prevent deadlocks. Detect non-sensible timestamps. Verify sequencer signature on time proof. - private def getTopologySnapshotAtTimestamp( - synchronizerId: Target[PhysicalSynchronizerId], - timestamp: CantonTimestamp, - )(implicit - traceContext: TraceContext, - ec: ExecutionContext, - ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, Target[TopologySnapshot]] = - for { - targetStaticSynchronizerParameters <- reassignmentCoordination - .getStaticSynchronizerParameter(synchronizerId) - - snapshot <- reassignmentCoordination - .awaitTimestampAndGetTaggedCryptoSnapshot( - synchronizerId, - targetStaticSynchronizerParameters, - timestamp, - ) - } yield snapshot.map(_.ipsSnapshot) - override def constructPendingDataAndResponse( parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree], reassignmentLookup: ReassignmentLookup, @@ -440,8 +398,6 @@ private[reassignment] class UnassignmentProcessingSteps( val requestCounter = parsedRequest.rc val isReassigningParticipant = fullTree.isReassigningParticipant(participantId) - val unassignmentValidation = new UnassignmentValidation(participantId, contractAuthenticator) - if (isReassigningParticipant) { reassignmentCoordination.addPendingUnassignment( parsedRequest.reassignmentId, @@ -449,28 +405,42 @@ private[reassignment] class UnassignmentProcessingSteps( ) } - for { - targetTopologyO <- - if (isReassigningParticipant) - getTopologySnapshotAtTimestamp( - fullTree.targetSynchronizer, - fullTree.targetTimeProof.timestamp, - ).map(Option(_)) - else EitherT.pure[FutureUnlessShutdown, ReassignmentProcessorError](None) - - unassignmentValidationResult <- unassignmentValidation.perform( - targetTopologyO, - activenessF, - )(parsedRequest) + val unassignmentValidation = UnassignmentValidation( + isReassigningParticipant, + participantId, + contractValidator, + activenessF, + reassignmentCoordination, + ) + for { + unassignmentValidationResult <- unassignmentValidation.perform(parsedRequest) } yield { + val confirmationResponseF = + if ( + unassignmentValidationResult.reassigningParticipantValidationResult.isTargetTsValidatable + ) { + createConfirmationResponses( + parsedRequest.requestId, + parsedRequest.malformedPayloads, + protocolVersion.unwrap, + unassignmentValidationResult, + ) + } else { + logger.info( + s"Sending an abstain verdict for ${unassignmentValidationResult.hostedConfirmingReassigningParties} because target timestamp is not validatable" + ) + FutureUnlessShutdown.pure( + createAbstainResponse( + parsedRequest.requestId, + unassignmentValidationResult.rootHash, + s"Non-validatable target timestamp when processing unassignment ${parsedRequest.reassignmentId}", + unassignmentValidationResult.hostedConfirmingReassigningParties, + ) + ) + } val responseF = - createConfirmationResponses( - parsedRequest.requestId, - parsedRequest.malformedPayloads, - protocolVersion.unwrap, - unassignmentValidationResult, - ).map(_.map((_, Recipients.cc(parsedRequest.mediator)))) + confirmationResponseF.map(_.map((_, Recipients.cc(parsedRequest.mediator)))) // We consider that we rejected if at least one of the responses is a "reject" val locallyRejectedF = responseF.map( @@ -558,7 +528,7 @@ private[reassignment] class UnassignmentProcessingSteps( } yield CommitAndStoreContractsAndPublishEvent( None, Seq.empty, - eventO.map(event => _ => event), + eventO.map(event => _ => _ => event), ) def mergeRejectionReasons( @@ -627,12 +597,11 @@ private[reassignment] class UnassignmentProcessingSteps( triggerAssignmentWhenExclusivityTimeoutExceeded(pendingRequestData) else EitherT.pure[FutureUnlessShutdown, ReassignmentProcessorError](()) - reassignmentAccepted <- EitherT.fromEither[FutureUnlessShutdown]( + reassignmentAccepted = unassignmentValidationResult.createReassignmentAccepted( participantId, requestId.unwrap, ) - ) } yield CommitAndStoreContractsAndPublishEvent( commitSetFO, Seq.empty, @@ -671,8 +640,10 @@ private[reassignment] class UnassignmentProcessingSteps( val t0 = pendingRequestData.unassignmentValidationResult.targetTimestamp for { - targetStaticSynchronizerParameters <- reassignmentCoordination - .getStaticSynchronizerParameter(targetSynchronizer) + targetStaticSynchronizerParameters <- EitherT.fromEither[FutureUnlessShutdown]( + reassignmentCoordination + .getStaticSynchronizerParameter(targetSynchronizer) + ) automaticAssignment <- AutomaticAssignment .perform( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessor.scala index 1fc352c85b..80cb6fc424 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessor.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.participant.sync.SyncEphemeralState import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.Source import com.digitalasset.canton.version.ProtocolVersion @@ -34,6 +34,7 @@ class UnassignmentProcessor( inFlightSubmissionSynchronizerTracker: InFlightSubmissionSynchronizerTracker, ephemeral: SyncEphemeralState, synchronizerCrypto: SynchronizerCryptoClient, + contractValidator: ContractValidator, seedGenerator: SeedGenerator, sequencerClient: SequencerClient, override protected val timeouts: ProcessingTimeout, @@ -56,7 +57,7 @@ class UnassignmentProcessor( synchronizerCrypto, seedGenerator, staticSynchronizerParameters, - ContractAuthenticator(synchronizerCrypto.pureCrypto), + contractValidator, sourceProtocolVersion, loggerFactory, ), diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentRequest.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentRequest.scala index 8fbd721ea8..75f03df9a3 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentRequest.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentRequest.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.participant.protocol.reassignment.UnassignmentValidationError.PackageIdUnknownOrUnvetted import com.digitalasset.canton.participant.protocol.submission.UsableSynchronizers import com.digitalasset.canton.protocol.ReassignmentId -import com.digitalasset.canton.sequencing.protocol.{MediatorGroupRecipient, TimeProof} +import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext @@ -24,9 +24,8 @@ import scala.concurrent.ExecutionContext * * @param reassigningParticipants * The list of reassigning participants - * @param targetTimeProof - * a sequenced event that the submitter has recently observed on the target synchronizer. - * Determines the timestamp of the topology at the target synchronizer. + * @param targetTimestamp + * The timestamp of the topology at the target synchronizer to be used for validation. */ final case class UnassignmentRequest( submitterMetadata: ReassignmentSubmitterMetadata, @@ -35,7 +34,7 @@ final case class UnassignmentRequest( sourceSynchronizer: Source[PhysicalSynchronizerId], sourceMediator: MediatorGroupRecipient, targetSynchronizer: Target[PhysicalSynchronizerId], - targetTimeProof: TimeProof, + targetTimestamp: Target[CantonTimestamp], ) { private val sourceProtocolVersion = sourceSynchronizer.map(_.protocolVersion) @@ -72,7 +71,7 @@ final case class UnassignmentRequest( viewSalt, contracts, targetSynchronizer, - targetTimeProof, + targetTimestamp, sourceProtocolVersion, ) @@ -84,7 +83,6 @@ object UnassignmentRequest { def validated( participantId: ParticipantId, - timeProof: TimeProof, contracts: ContractsReassignmentBatch, submitterMetadata: ReassignmentSubmitterMetadata, sourcePSId: Source[PhysicalSynchronizerId], @@ -146,7 +144,7 @@ object UnassignmentRequest { sourcePSId, sourceMediator, targetPSId, - timeProof, + targetTopology.map(_.timestamp), ) UnassignmentRequestValidated( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidation.scala index 0494ed074b..7de291f120 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidation.scala @@ -5,101 +5,46 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.* import cats.syntax.functor.* -import cats.syntax.traverse.* -import com.digitalasset.canton.LfPartyId -import com.digitalasset.canton.data.{FullUnassignmentTree, ReassignmentRef, UnassignmentData} +import com.digitalasset.canton.data.{ + CantonTimestamp, + FullUnassignmentTree, + ReassignmentRef, + UnassignmentData, +} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.participant.protocol.ProcessingSteps import com.digitalasset.canton.participant.protocol.conflictdetection.ActivenessResult +import com.digitalasset.canton.participant.protocol.reassignment.GetTopologyAtTimestamp import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.* import com.digitalasset.canton.participant.protocol.reassignment.UnassignmentValidationResult.ReassigningParticipantValidationResult import com.digitalasset.canton.participant.protocol.validation.AuthenticationValidator import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} import scala.concurrent.ExecutionContext -private[reassignment] class UnassignmentValidation( - participantId: ParticipantId, - contractAuthenticator: ContractAuthenticator, -)(implicit val ec: ExecutionContext) - extends ReassignmentValidation[ - FullUnassignmentTree, - UnassignmentValidationResult.CommonValidationResult, - UnassignmentValidationResult.ReassigningParticipantValidationResult, - ] { - - /** @param targetTopology - * Defined if and only if the participant is reassigning - */ - def perform( - targetTopology: Option[Target[TopologySnapshot]], - activenessF: FutureUnlessShutdown[ActivenessResult], - )(parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree])(implicit +sealed trait UnassignmentValidation { + + def perform(parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree])(implicit ec: ExecutionContext, traceContext: TraceContext, - ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] = { - val fullTree = parsedRequest.fullViewTree - val sourceTopology = Source(parsedRequest.snapshot.ipsSnapshot) - val isReassigningParticipant = fullTree.isReassigningParticipant(participantId) - - for { - commonValidationResult <- EitherT.right( - performCommonValidations( - parsedRequest, - activenessF, - ) - ) - - // `targetTopology` is defined only for reassigning participants - reassigningParticipantValidationResult <- targetTopology match { - case Some(targetTopology) => - performValidationForReassigningParticipants(parsedRequest, targetTopology) - case None => - EitherT.right(FutureUnlessShutdown.pure(ReassigningParticipantValidationResult(Nil))) - } - - hostedConfirmingReassigningParties <- EitherT.right( - if (isReassigningParticipant) - sourceTopology.unwrap.canConfirm( - participantId, - parsedRequest.fullViewTree.confirmingParties, - ) - else - FutureUnlessShutdown.pure(Set.empty[LfPartyId]) - ) + ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] - assignmentExclusivity <- targetTopology.traverse { targetTopology => - ProcessingSteps - .getAssignmentExclusivity(targetTopology, fullTree.targetTimeProof.timestamp) - .leftMap[ReassignmentProcessorError]( - ReassignmentParametersError(fullTree.targetSynchronizer.unwrap, _) - ) - } - - } yield UnassignmentValidationResult( - unassignmentData = UnassignmentData(fullTree, parsedRequest.requestTimestamp), - rootHash = parsedRequest.rootHash, - hostedConfirmingReassigningParties = hostedConfirmingReassigningParties, - assignmentExclusivity = assignmentExclusivity, - commonValidationResult = commonValidationResult, - reassigningParticipantValidationResult = reassigningParticipantValidationResult, - ) - } - - override def performCommonValidations( + protected def performCommonValidations( parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree], + contractValidator: ContractValidator, activenessF: FutureUnlessShutdown[ActivenessResult], )(implicit - traceContext: TraceContext + ec: ExecutionContext, + traceContext: TraceContext, ): FutureUnlessShutdown[UnassignmentValidationResult.CommonValidationResult] = { val fullTree = parsedRequest.fullViewTree val sourceTopologySnapshot = Source(parsedRequest.snapshot.ipsSnapshot) val authenticationResultET = - ReassignmentValidation.checkMetadata(contractAuthenticator, fullTree) + ReassignmentValidation.authenticateContractAndStakeholders(contractValidator, fullTree) for { activenessResult <- activenessF @@ -127,12 +72,133 @@ private[reassignment] class UnassignmentValidation( submitterCheckResult = submitterCheckResult, ) } - override type ReassigningParticipantValidationData = Target[TopologySnapshot] +} + +object UnassignmentValidation { + + def apply( + isReassigningParticipant: Boolean, + participantId: ParticipantId, + contractValidator: ContractValidator, + activenessF: FutureUnlessShutdown[ActivenessResult], + getTopologyAtTs: GetTopologyAtTimestamp, + ): UnassignmentValidation = + if (isReassigningParticipant) + ReassigningParticipantUnassignmentValidator( + participantId, + contractValidator, + activenessF, + getTopologyAtTs, + ) + else NonReassigningParticipantUnassignmentValidator(contractValidator, activenessF) +} + +private final case class NonReassigningParticipantUnassignmentValidator( + contractValidator: ContractValidator, + activenessF: FutureUnlessShutdown[ActivenessResult], +) extends UnassignmentValidation { + def perform(parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree])(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] = + EitherT.right( + performCommonValidations(parsedRequest, contractValidator, activenessF) + .map { commonValidationResult => + UnassignmentValidationResult( + unassignmentData = + UnassignmentData(parsedRequest.fullViewTree, parsedRequest.requestTimestamp), + rootHash = parsedRequest.rootHash, + commonValidationResult = commonValidationResult, + // The below fields all pertain to reassigning participants. + reassigningParticipantValidationResult = ReassigningParticipantValidationResult(Nil), + assignmentExclusivity = None, + hostedConfirmingReassigningParties = Set.empty, + ) + } + ) +} + +private final case class ReassigningParticipantUnassignmentValidator( + participantId: ParticipantId, + contractValidator: ContractValidator, + activenessF: FutureUnlessShutdown[ActivenessResult], + getTopologyAtTs: GetTopologyAtTimestamp, +) extends UnassignmentValidation { + + def perform(parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree])(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] = { + val fullTree = parsedRequest.fullViewTree + val sourceTopology = Source(parsedRequest.snapshot.ipsSnapshot) + + for { + commonValidationResult <- EitherT.right( + performCommonValidations( + parsedRequest, + contractValidator, + activenessF, + ) + ) + targetTopologyO <- getTopologyAtTs.maybeAwaitTopologySnapshot( + fullTree.targetSynchronizer, + fullTree.targetTimestamp, + ) + resultsWithTargetTopology <- targetTopologyO match { + case Some(targetTopology) => + performValidationWithTargetTopology(parsedRequest, targetTopology) + case None => + val results = (ReassigningParticipantValidationResult.TargetTimestampTooFarInFuture, None) + EitherT.right(FutureUnlessShutdown.pure(results)) + } + (reassigningParticipantValidationResult, assignmentExclusivity) = resultsWithTargetTopology + hostedConfirmingReassigningParties <- EitherT.right( + sourceTopology.unwrap + .canConfirm(participantId, parsedRequest.fullViewTree.confirmingParties) + ) + + } yield UnassignmentValidationResult( + unassignmentData = + UnassignmentData(parsedRequest.fullViewTree, parsedRequest.requestTimestamp), + rootHash = parsedRequest.rootHash, + commonValidationResult = commonValidationResult, + // The below fields all pertain to reassigning participants. + reassigningParticipantValidationResult = reassigningParticipantValidationResult, + assignmentExclusivity = assignmentExclusivity, + hostedConfirmingReassigningParties = hostedConfirmingReassigningParties, + ) + } - override def performValidationForReassigningParticipants( + private def performValidationWithTargetTopology( parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree], targetTopology: Target[TopologySnapshot], - )(implicit traceContext: TraceContext): EitherT[ + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[ + FutureUnlessShutdown, + ReassignmentProcessorError, + (ReassigningParticipantValidationResult, Option[Target[CantonTimestamp]]), + ] = for { + reassigningParticipantValidationResult <- performValidationForReassigningParticipants( + parsedRequest, + targetTopology, + ) + fullTree = parsedRequest.fullViewTree + assignmentExclusivity <- ProcessingSteps + .getAssignmentExclusivity(targetTopology, fullTree.targetTimestamp) + .leftMap[ReassignmentProcessorError]( + ReassignmentParametersError(fullTree.targetSynchronizer.unwrap, _) + ) + } yield (reassigningParticipantValidationResult, Some(assignmentExclusivity)) + + private def performValidationForReassigningParticipants( + parsedRequest: ParsedReassignmentRequest[FullUnassignmentTree], + targetTopology: Target[TopologySnapshot], + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[ FutureUnlessShutdown, ReassignmentProcessorError, ReassigningParticipantValidationResult, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationError.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationError.scala index 1ed873632f..ccd6330cc8 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationError.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationError.scala @@ -16,4 +16,8 @@ object UnassignmentValidationError { override def message: String = s"Cannot unassign contracts `$contractIds`: ${unknownTo.mkString(", ")}" } + + case object TargetTimestampTooFarInFuture extends UnassignmentValidationError { + override def message: String = "Target timestamp is too far in the future" + } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationResult.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationResult.scala index bdb1f610cb..d460422751 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationResult.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationResult.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT -import cats.syntax.either.* import cats.syntax.functor.* import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.{ @@ -21,13 +20,10 @@ import com.digitalasset.canton.ledger.participant.state.{ Update, } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.participant.protocol.ProcessingSteps.InternalContractIds import com.digitalasset.canton.participant.protocol.conflictdetection.{ActivenessResult, CommitSet} -import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.{ - FieldConversionError, - ReassignmentProcessorError, -} import com.digitalasset.canton.participant.protocol.validation.AuthenticationError -import com.digitalasset.canton.protocol.{ReassignmentId, RootHash} +import com.digitalasset.canton.protocol.{ReassignmentId, RootHash, UpdateId} import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag @@ -47,7 +43,7 @@ final case class UnassignmentValidationResult( unassignmentData.sourcePSId val targetSynchronizer: Target[PhysicalSynchronizerId] = unassignmentData.targetPSId val stakeholders: Set[LfPartyId] = unassignmentData.stakeholders.all - val targetTimestamp: CantonTimestamp = unassignmentData.targetTimestamp + val targetTimestamp: Target[CantonTimestamp] = unassignmentData.targetTimestamp override def reassignmentId: ReassignmentId = unassignmentData.reassignmentId @@ -82,54 +78,51 @@ final case class UnassignmentValidationResult( recordTime: CantonTimestamp, )(implicit traceContext: TraceContext - ): Either[ReassignmentProcessorError, AcsChangeFactory => Update.SequencedReassignmentAccepted] = - for { - updateId <- - rootHash.asLedgerTransactionId - .leftMap[ReassignmentProcessorError]( - FieldConversionError(reassignmentId, "Transaction Id", _) - ) - - completionInfo = - Option.when( - participantId == submitterMetadata.submittingParticipant - )( - CompletionInfo( - actAs = List(submitterMetadata.submitter), - userId = submitterMetadata.userId, - commandId = submitterMetadata.commandId, - optDeduplicationPeriod = None, - submissionId = submitterMetadata.submissionId, - ) + ): AcsChangeFactory => InternalContractIds => Update.SequencedReassignmentAccepted = { + val updateId = rootHash + val completionInfo = + Option.when( + participantId == submitterMetadata.submittingParticipant + )( + CompletionInfo( + actAs = List(submitterMetadata.submitter), + userId = submitterMetadata.userId, + commandId = submitterMetadata.commandId, + optDeduplicationPeriod = None, + submissionId = submitterMetadata.submissionId, ) - } yield (acsChangeFactory: AcsChangeFactory) => - Update.SequencedReassignmentAccepted( - optCompletionInfo = completionInfo, - workflowId = submitterMetadata.workflowId, - updateId = updateId, - reassignmentInfo = ReassignmentInfo( - sourceSynchronizer = sourceSynchronizer.map(_.logical), - targetSynchronizer = targetSynchronizer.map(_.logical), - submitter = Option(submitterMetadata.submitter), - reassignmentId = reassignmentId, - isReassigningParticipant = isReassigningParticipant, - ), - reassignment = - Reassignment.Batch(contracts.contracts.zipWithIndex.map { case (reassign, idx) => - Reassignment.Unassign( - contractId = reassign.contract.contractId, - templateId = reassign.templateId, - packageName = reassign.packageName, - stakeholders = contracts.stakeholders.all, - assignmentExclusivity = assignmentExclusivity.map(_.unwrap.toLf), - reassignmentCounter = reassign.counter.unwrap, - nodeId = idx, - ) - }), - recordTime = recordTime, - synchronizerId = sourceSynchronizer.unwrap.logical, - acsChangeFactory = acsChangeFactory, ) + (acsChangeFactory: AcsChangeFactory) => + (internalContractIds: InternalContractIds) => + Update.SequencedReassignmentAccepted( + optCompletionInfo = completionInfo, + workflowId = submitterMetadata.workflowId, + updateId = UpdateId.fromRootHash(updateId), + reassignmentInfo = ReassignmentInfo( + sourceSynchronizer = sourceSynchronizer.map(_.logical), + targetSynchronizer = targetSynchronizer.map(_.logical), + submitter = Option(submitterMetadata.submitter), + reassignmentId = reassignmentId, + isReassigningParticipant = isReassigningParticipant, + ), + reassignment = + Reassignment.Batch(contracts.contracts.zipWithIndex.map { case (reassign, idx) => + Reassignment.Unassign( + contractId = reassign.contract.contractId, + templateId = reassign.templateId, + packageName = reassign.packageName, + stakeholders = contracts.stakeholders.all, + assignmentExclusivity = assignmentExclusivity.map(_.unwrap.toLf), + reassignmentCounter = reassign.counter.unwrap, + nodeId = idx, + ) + }), + recordTime = recordTime, + synchronizerId = sourceSynchronizer.unwrap.logical, + acsChangeFactory = acsChangeFactory, + internalContractIds = internalContractIds, + ) + } } object UnassignmentValidationResult { @@ -151,5 +144,17 @@ object UnassignmentValidationResult { final case class ReassigningParticipantValidationResult( errors: Seq[ReassignmentValidationError] - ) extends ReassignmentValidationResult.ReassigningParticipantValidationResult + ) extends ReassignmentValidationResult.ReassigningParticipantValidationResult { + def isTargetTsValidatable: Boolean = !errors.exists { + case UnassignmentValidationError.TargetTimestampTooFarInFuture => true + case _ => false + } + } + + object ReassigningParticipantValidationResult { + val TargetTimestampTooFarInFuture: ReassigningParticipantValidationResult = + ReassigningParticipantValidationResult( + Seq(UnassignmentValidationError.TargetTimestampTooFarInFuture) + ) + } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionConfirmationRequestFactory.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionConfirmationRequestFactory.scala index f706b2c941..8e8109af2f 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionConfirmationRequestFactory.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionConfirmationRequestFactory.scala @@ -46,7 +46,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.{ContractHasher, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion import scala.concurrent.ExecutionContext @@ -362,6 +362,7 @@ object TransactionConfirmationRequestFactory { synchronizerId: PhysicalSynchronizerId, )( cryptoOps: HashOps & HmacOps, + hasher: ContractHasher, seedGenerator: SeedGenerator, loggingConfig: LoggingConfig, loggerFactory: NamedLoggerFactory, @@ -372,8 +373,9 @@ object TransactionConfirmationRequestFactory { submitterNode, synchronizerId, // TODO(#23971): Make this dependent on the protocol version when introducing V2 contract IDs - AuthenticatedContractIdVersionV11, + AuthenticatedContractIdVersionV12, cryptoOps, + hasher, loggerFactory, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactory.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactory.scala index f61879db54..005aec8058 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactory.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactory.scala @@ -32,7 +32,7 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.transaction.TransactionErrors.KeyInputError import java.util.UUID -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext trait TransactionTreeFactory { @@ -100,7 +100,7 @@ trait TransactionTreeFactory { object TransactionTreeFactory { type ContractInstanceOfId = - LfContractId => EitherT[Future, ContractLookupError, GenContractInstance] + LfContractId => EitherT[FutureUnlessShutdown, ContractLookupError, GenContractInstance] def contractInstanceLookup( contractStore: ContractLookup @@ -110,7 +110,6 @@ object TransactionTreeFactory { contract <- contractStore .lookup(id) .toRight(ContractLookupError(id, "Unknown contract")) - .failOnShutdownToAbortException("TransactionTreeFactory.contractInstanceLookup") } yield contract /** Supertype for all errors than may arise during the conversion. */ @@ -149,8 +148,13 @@ object TransactionTreeFactory { final case class ContractKeyResolutionError(error: KeyInputError) extends TransactionTreeConversionError { - override protected def pretty: Pretty[ContractKeyResolutionError] = - prettyOfClass(unnamedParam(_.error)) + override protected def pretty: Pretty[ContractKeyResolutionError] = prettyOfClass( + unnamedParam(_.error) + ) + } + + final case class FailedToHashContact(error: String) extends TransactionTreeConversionError { + override protected def pretty: Pretty[FailedToHashContact] = prettyOfString(_.error) } /** Indicates that too few salts have been supplied for creating a view */ diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImpl.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImpl.scala index f69e2a287f..70199581c8 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImpl.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImpl.scala @@ -31,10 +31,9 @@ import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.collection.MapsUtil -import com.digitalasset.canton.util.{ErrorUtil, LfTransactionUtil, MonadUtil} +import com.digitalasset.canton.util.{ContractHasher, ErrorUtil, LfTransactionUtil, MonadUtil} import com.digitalasset.daml.lf.data.Ref.PackageId import com.digitalasset.daml.lf.transaction.ContractStateMachine.KeyInactive import com.digitalasset.daml.lf.transaction.Transaction.{ @@ -54,7 +53,7 @@ import java.util.UUID import scala.annotation.{nowarn, tailrec} import scala.collection.immutable.SortedSet import scala.collection.mutable -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext /** Factory class that can create the [[com.digitalasset.canton.data.GenTransactionTree]]s from a * [[com.digitalasset.canton.protocol.WellFormedTransaction]]. @@ -71,6 +70,7 @@ class TransactionTreeFactoryImpl( synchronizerId: PhysicalSynchronizerId, override val cantonContractIdVersion: CantonContractIdVersion, cryptoOps: HashOps & HmacOps, + hasher: ContractHasher, override protected val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) extends TransactionTreeFactory @@ -165,22 +165,17 @@ class TransactionTreeFactoryImpl( ) } - rootViews <- createRootViews(rootViewDecompositions, state, contractOfId).mapK( - FutureUnlessShutdown.outcomeK - ) + rootViews <- createRootViews(rootViewDecompositions, state, contractOfId) _ <- if (validatePackageVettings) { - val commandExecutionPackages = requiredPackagesByParty(rootViewDecompositions) - val inputContractPackages = inputContractPackagesByParty(rootViews) - val packagesByParty = - MapsUtil.mergeMapsOfSets(commandExecutionPackages, inputContractPackages) + val requiredPackageByParty = requiredPackagesByParty(rootViewDecompositions) UsableSynchronizers .checkPackagesVetted( synchronizerId = synchronizerId, snapshot = topologySnapshot, - requiredPackagesByParty = packagesByParty, - metadata.ledgerTime, + requiredPackagesByParty = requiredPackageByParty, + ledgerTime = metadata.ledgerTime, ) .leftMap[TransactionTreeConversionError](_.transformInto[UnknownPackageError]) } else EitherT.rightT[FutureUnlessShutdown, TransactionTreeConversionError](()) @@ -241,37 +236,13 @@ class TransactionTreeFactoryImpl( } } - /** @return set of packages required for input contract consistency checking, by party */ - private def inputContractPackagesByParty( - rootViews: Seq[TransactionView] - ): Map[LfPartyId, Set[PackageId]] = { - - def viewPartyPackages(view: TransactionView): Map[LfPartyId, Set[PackageId]] = { - val inputPackages = checked(view.viewParticipantData.tryUnwrap).coreInputs.values - .map(_.contract.templateId.packageId) - .toSet - val informees = checked(view.viewCommonData.tryUnwrap).viewConfirmationParameters.informees - val viewMap = informees.map(_ -> inputPackages).toMap - val subviewMap = viewsPartyPackages(view.subviews.unblindedElements) - MapsUtil.mergeMapsOfSets(subviewMap, viewMap) - } - - def viewsPartyPackages(views: Seq[TransactionView]): Map[LfPartyId, Set[PackageId]] = - views.foldLeft(Map.empty[LfPartyId, Set[PackageId]]) { case (acc, view) => - MapsUtil.mergeMapsOfSets(acc, viewPartyPackages(view)) - } - - viewsPartyPackages(rootViews) - - } - private def createRootViews( decompositions: Seq[TransactionViewDecomposition.NewView], state: State, contractOfId: ContractInstanceOfId, )(implicit traceContext: TraceContext - ): EitherT[Future, TransactionTreeConversionError, Seq[TransactionView]] = { + ): EitherT[FutureUnlessShutdown, TransactionTreeConversionError, Seq[TransactionView]] = { // collect all contract ids referenced val preloadCids = Set.newBuilder[LfContractId] @@ -302,9 +273,9 @@ class TransactionTreeFactoryImpl( .flatMap { preloaded => def fromPreloaded( cid: LfContractId - ): EitherT[Future, ContractLookupError, GenContractInstance] = + ): EitherT[FutureUnlessShutdown, ContractLookupError, GenContractInstance] = preloaded.get(cid) match { - case Some(value) => EitherT.fromEither(value) + case Some(value) => EitherT.fromEither[FutureUnlessShutdown](value) case None => // if we ever missed a contract during prefetching due to mistake, then we can // fallback to the original loader @@ -327,7 +298,7 @@ class TransactionTreeFactoryImpl( contractOfId: ContractInstanceOfId, )(implicit traceContext: TraceContext - ): EitherT[Future, TransactionTreeConversionError, TransactionView] = { + ): EitherT[FutureUnlessShutdown, TransactionTreeConversionError, TransactionView] = { state.signalRollbackScope(view.rbContext.rollbackScope) // reset to a fresh state with projected resolver before visiting the subtree @@ -355,15 +326,11 @@ class TransactionTreeFactoryImpl( val subviewIndex = TransactionSubviews.indices(nbSubViews).iterator val createdInView = mutable.Set.empty[LfContractId] - def fromEither[A <: TransactionTreeConversionError, B]( - either: Either[A, B] - ): EitherT[Future, TransactionTreeConversionError, B] = - EitherT.fromEither(either.leftWiden[TransactionTreeConversionError]) - for { // Compute salts - viewCommonDataSalt <- fromEither(state.nextSalt()) - viewParticipantDataSalt <- fromEither(state.nextSalt()) + viewCommonDataSalt <- EitherT.fromEither[FutureUnlessShutdown](state.nextSalt()) + viewParticipantDataSalt <- EitherT.fromEither[FutureUnlessShutdown](state.nextSalt()) + _ <- MonadUtil.sequentialTraverse_(view.allNodes) { case childView: TransactionViewDecomposition.NewView => // Compute subviews, recursively @@ -378,50 +345,57 @@ class TransactionTreeFactoryImpl( } } - case TransactionViewDecomposition.SameView(lfActionNode, nodeId, rbContext) => + case TransactionViewDecomposition.SameView(lfActionNode, nodeId, rbContext) => { + val rbScope = rbContext.rollbackScope - val suffixedNode = lfActionNode match { - case createNode: LfNodeCreate => - val suffixedNode = updateStateWithContractCreation( - nodeId, - createNode, - viewParticipantDataSalt, - viewPosition, - createIndex, - state, - ) - coreCreatedBuilder += (suffixedNode -> rbScope) - createdInView += suffixedNode.coid - createIndex += 1 - suffixedNode - case lfNode: LfActionNode => - val suffixedNode = trySuffixNode(state)(nodeId -> lfNode) - coreOtherBuilder += ((nodeId, lfNode) -> rbScope) - suffixedNode - } - suffixedNode.keyOpt.foreach { case LfGlobalKeyWithMaintainers(gkey, maintainers) => - state.keyVersionAndMaintainers += (gkey -> (suffixedNode.version -> maintainers)) - } + for { + + suffixedNode <- lfActionNode match { + case createNode: LfNodeCreate => + updateStateWithContractCreation( + nodeId, + createNode, + viewParticipantDataSalt, + viewPosition, + createIndex, + state, + ).map { suffixedNode => + coreCreatedBuilder += (suffixedNode -> rbScope) + createdInView += suffixedNode.coid + createIndex += 1 + suffixedNode + } + case lfNode: LfActionNode => + val suffixedNode = trySuffixNode(state)(nodeId -> lfNode) + coreOtherBuilder += ((nodeId, lfNode) -> rbScope) + EitherT.pure[FutureUnlessShutdown, TransactionTreeConversionError](suffixedNode) + } - state.signalRollbackScope(rbScope) + _ = suffixedNode.keyOpt.foreach { case LfGlobalKeyWithMaintainers(gkey, maintainers) => + state.keyVersionAndMaintainers += (gkey -> (suffixedNode.version -> maintainers)) + } - EitherT.fromEither[Future]({ - for { - resolutionForModeOff <- suffixedNode match { - case lookupByKey: LfNodeLookupByKey - if state.csmState.mode == ContractKeyUniquenessMode.Off => - val gkey = lookupByKey.key.globalKey - state.currentResolver.get(gkey).toRight(MissingContractKeyLookupError(gkey)) - case _ => Right(KeyInactive) // dummy value, as resolution is not used + _ = state.signalRollbackScope(rbScope) + + _ <- EitherT.fromEither[FutureUnlessShutdown]({ + for { + resolutionForModeOff <- suffixedNode match { + case lookupByKey: LfNodeLookupByKey + if state.csmState.mode == ContractKeyUniquenessMode.Off => + val gkey = lookupByKey.key.globalKey + state.currentResolver.get(gkey).toRight(MissingContractKeyLookupError(gkey)) + case _ => Right(KeyInactive) // dummy value, as resolution is not used + } + nextState <- state.csmState + .handleNode((), suffixedNode, resolutionForModeOff) + .leftMap(ContractKeyResolutionError.apply) + } yield { + state.csmState = nextState } - nextState <- state.csmState - .handleNode((), suffixedNode, resolutionForModeOff) - .leftMap(ContractKeyResolutionError.apply) - } yield { - state.csmState = nextState - } - }) + }) + } yield () + } } _ = state.signalRollbackScope(view.rbContext.rollbackScope) @@ -445,14 +419,14 @@ class TransactionTreeFactoryImpl( // Compute the parameters of the view seed = view.rootSeed - packagePreference <- EitherT.fromEither[Future](buildPackagePreference(view)) + packagePreference <- EitherT.fromEither[FutureUnlessShutdown](buildPackagePreference(view)) actionDescription = createActionDescription(suffixedRootNode, seed, packagePreference) viewCommonData = createViewCommonData(view, viewCommonDataSalt).fold( ErrorUtil.internalError, identity, ) viewKeyInputs = state.csmState.globalKeyInputs - resolvedK <- EitherT.fromEither[Future]( + resolvedK <- EitherT.fromEither[FutureUnlessShutdown]( resolvedKeys( viewKeyInputs, state.keyVersionAndMaintainers, @@ -472,7 +446,7 @@ class TransactionTreeFactoryImpl( ) // fast-forward the former state over the subtree - nextCsmState <- EitherT.fromEither[Future]( + nextCsmState <- EitherT.fromEither[FutureUnlessShutdown]( previousCsmState .advance( // advance ignores the resolver in mode Strict @@ -511,7 +485,9 @@ class TransactionTreeFactoryImpl( viewPosition: ViewPosition, createIndex: Int, state: State, - )(implicit traceContext: TraceContext): LfNodeCreate = { + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TransactionTreeConversionError, LfNodeCreate] = { val cantonContractInst = checked( LfTransactionUtil @@ -547,37 +523,45 @@ class TransactionTreeFactoryImpl( case _: CantonContractIdV2Version => CreationTime.Now } - val ContractIdSuffixer.RelativeSuffixResult( - suffixedCreateNode, - localContractId, - relativeSuffix, - authenticationData, - ) = contractIdSuffixer - .relativeSuffixForLocalContract( - contractSalt, - creationTime, - createNodeWithSuffixedArg, - ) - .valueOr(err => throw new IllegalArgumentException(s"Failed to compute contract ID: $err")) + hasher + .hash(createNodeWithSuffixedArg, contractIdSuffixer.contractHashingMethod) + .map { contractHash => + val ContractIdSuffixer.RelativeSuffixResult( + suffixedCreateNode, + localContractId, + relativeSuffix, + authenticationData, + ) = contractIdSuffixer + .relativeSuffixForLocalContract( + contractSalt, + creationTime, + createNodeWithSuffixedArg, + contractHash, + ) + .valueOr(err => + throw new IllegalArgumentException(s"Failed to compute contract ID: $err") + ) - state.setSuffixFor(localContractId, relativeSuffix) + state.setSuffixFor(localContractId, relativeSuffix) - val inst = LfFatContractInst.fromCreateNode( - create = suffixedCreateNode, - createTime = creationTime, - authenticationData = authenticationData.toLfBytes, - ) - val createdInfo = ContractInstance - .create(inst) - .valueOr(err => - throw new IllegalArgumentException( - s"Failed to create contract instance well formed instrument: $err" + val inst = LfFatContractInst.fromCreateNode( + create = suffixedCreateNode, + createTime = creationTime, + authenticationData = authenticationData.toLfBytes, ) - ) + val createdInfo = ContractInstance + .create(inst) + .valueOr(err => + throw new IllegalArgumentException( + s"Failed to create contract instance well formed instrument: $err" + ) + ) - state.setCreatedContractInfo(suffixedCreateNode.coid, createdInfo) - state.addSuffixedNode(nodeId, suffixedCreateNode) - suffixedCreateNode + state.setCreatedContractInfo(suffixedCreateNode.coid, createdInfo) + state.addSuffixedNode(nodeId, suffixedCreateNode) + suffixedCreateNode + } + .leftMap(error => FailedToHashContact(error)) } private def trySuffixNode( @@ -680,7 +664,10 @@ class TransactionTreeFactoryImpl( */ private def resolvedKeys( viewKeyInputs: Map[LfGlobalKey, KeyInput], - keyVersionAndMaintainers: collection.Map[LfGlobalKey, (LfLanguageVersion, Set[LfPartyId])], + keyVersionAndMaintainers: collection.Map[ + LfGlobalKey, + (LfSerializationVersion, Set[LfPartyId]), + ], subviewKeyResolutions: collection.Map[LfGlobalKey, LfVersioned[SerializableKeyResolution]], )(implicit traceContext: TraceContext @@ -726,7 +713,7 @@ class TransactionTreeFactoryImpl( salt: Salt, contractOfId: ContractInstanceOfId, rbContextCore: RollbackContext, - ): EitherT[Future, TransactionTreeConversionError, ViewParticipantData] = { + ): EitherT[FutureUnlessShutdown, TransactionTreeConversionError, ViewParticipantData] = { val consumedInCore = coreOtherNodes.mapFilter { case (an, rbScopeOther) => @@ -768,7 +755,7 @@ class TransactionTreeFactoryImpl( def withInstance( contractId: LfContractId - ): EitherT[Future, ContractLookupError, InputContract] = { + ): EitherT[FutureUnlessShutdown, ContractLookupError, InputContract] = { val cons = consumedInCore.contains(contractId) createdContractInfo.get(contractId) match { case Some(info) => @@ -784,7 +771,7 @@ class TransactionTreeFactoryImpl( .leftWiden[TransactionTreeConversionError] .map(_.toMap) viewParticipantData <- EitherT - .fromEither[Future]( + .fromEither[FutureUnlessShutdown]( ViewParticipantData.create(cryptoOps)( coreInputs = coreInputsWithInstances, createdCore = created, @@ -880,7 +867,6 @@ class TransactionTreeFactoryImpl( decompositions <- EitherT.right(decompositionsF) decomposition = checked(decompositions.head) view <- createView(decomposition, rootPosition, state, contractOfId) - .mapK(FutureUnlessShutdown.outcomeK) suffixedNodes = state.suffixedNodes() transform { // Recover the children case (nodeId, ne: LfNodeExercises) => @@ -958,6 +944,7 @@ object TransactionTreeFactoryImpl { synchronizerId: PhysicalSynchronizerId, cantonContractIdVersion: CantonContractIdVersion, cryptoOps: HashOps & HmacOps, + hasher: ContractHasher, loggerFactory: NamedLoggerFactory, )(implicit ex: ExecutionContext): TransactionTreeFactoryImpl = new TransactionTreeFactoryImpl( @@ -965,6 +952,7 @@ object TransactionTreeFactoryImpl { synchronizerId, cantonContractIdVersion, cryptoOps, + hasher, loggerFactory, ) @@ -1032,13 +1020,14 @@ object TransactionTreeFactoryImpl { var createdContractsInView: collection.Set[LfContractId] = Set.empty /** An [[com.digitalasset.canton.protocol.LfGlobalKey]] stores neither the - * [[com.digitalasset.canton.protocol.LfLanguageVersion]] to be used during serialization nor - * the maintainers, which we need to cache in case no contract is found. + * [[com.digitalasset.canton.protocol.LfSerializationVersion]] to be used during serialization + * nor the maintainers, which we need to cache in case no contract is found. * * Out parameter that stores version and maintainers for all keys that have been referenced by * an already-processed node. */ - val keyVersionAndMaintainers: mutable.Map[LfGlobalKey, (LfLanguageVersion, Set[LfPartyId])] = + val keyVersionAndMaintainers + : mutable.Map[LfGlobalKey, (LfSerializationVersion, Set[LfPartyId])] = mutable.Map.empty /** Out parameter for the [[com.digitalasset.daml.lf.transaction.ContractStateMachine.State]] diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/UsableSynchronizers.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/UsableSynchronizers.scala index 7b01c14436..fe2083219d 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/UsableSynchronizers.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/UsableSynchronizers.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.participant.protocol.submission import cats.data.EitherT +import cats.implicits.toFoldableOps import cats.syntax.alternative.* import cats.syntax.bifunctor.* import cats.syntax.parallel.* @@ -11,19 +12,23 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.participant.protocol.submission.TransactionTreeFactory.PackageUnknownTo -import com.digitalasset.canton.protocol.{LfActionNode, LfLanguageVersion, LfVersionedTransaction} +import com.digitalasset.canton.protocol.{ + LfActionNode, + LfSerializationVersion, + LfVersionedTransaction, +} import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.store.UnknownOrUnvettedPackages import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{EitherTUtil, LfTransactionUtil} import com.digitalasset.canton.version.{ - DamlLfVersionToProtocolVersions, HashingSchemeVersion, + LfSerializationVersionToProtocolVersions, ProtocolVersion, } import com.digitalasset.canton.{LfPackageId, LfPartyId} import com.digitalasset.daml.lf.engine.Blinding -import com.digitalasset.daml.lf.transaction.TransactionVersion import scala.concurrent.ExecutionContext @@ -69,7 +74,7 @@ object UsableSynchronizers { ): EitherT[FutureUnlessShutdown, SynchronizerNotUsedReason, Unit] = { val requiredPackagesPerParty = Blinding.partyPackages(transaction) - val transactionVersion = transaction.version + val serializationVersion = transaction.version val packageVetted: EitherT[FutureUnlessShutdown, UnknownPackage, Unit] = checkPackagesVetted( @@ -85,7 +90,7 @@ object UsableSynchronizers { checkConfirmingParties(synchronizerId, transaction, snapshot) val compatibleProtocolVersion : EitherT[FutureUnlessShutdown, UnsupportedMinimumProtocolVersion, Unit] = - checkProtocolVersion(synchronizerId, transactionVersion) + checkProtocolVersion(synchronizerId, serializationVersion) val compatibleInteractiveSubmissionVersion : EitherT[FutureUnlessShutdown, SynchronizerNotUsedReason, Unit] = checkInteractiveSubmissionVersion(synchronizerId, interactiveSubmissionVersionO) @@ -166,13 +171,10 @@ object UsableSynchronizers { private def unknownPackages(snapshot: TopologySnapshot, ledgerTime: CantonTimestamp)( participantIdAndRequiredPackages: (ParticipantId, Set[LfPackageId]) )(implicit - ec: ExecutionContext, - tc: TraceContext, - ): FutureUnlessShutdown[List[PackageUnknownTo]] = { + tc: TraceContext + ): FutureUnlessShutdown[UnknownOrUnvettedPackages] = { val (participantId, required) = participantIdAndRequiredPackages - snapshot - .findUnvettedPackagesOrDependencies(participantId, required, ledgerTime) - .map(notVetted => notVetted.map(PackageUnknownTo(_, participantId)).toList) + snapshot.findUnvettedPackagesOrDependencies(participantId, required, ledgerTime) } private def resolveParticipants( @@ -239,19 +241,22 @@ object UsableSynchronizers { ): EitherT[FutureUnlessShutdown, UnknownPackage, Unit] = EitherT( requiredPackages.toList - .parFlatTraverse(unknownPackages(snapshot, ledgerTime)) - .map(NonEmpty.from(_).toLeft(())) - ).leftMap(unknownTo => UnknownPackage(synchronizerId, unknownTo)) + .parTraverse(unknownPackages(snapshot, ledgerTime)) + .map(_.combineAll.unknownOrUnvetted.toList.flatMap { case (participantId, packageIds) => + packageIds.toSeq.map(packageId => PackageUnknownTo(packageId, participantId)) + }) + .map(u => NonEmpty.from(u).map(UnknownPackage(synchronizerId, _)).toLeft(())) + ) private def checkProtocolVersion( synchronizerId: PhysicalSynchronizerId, - transactionVersion: TransactionVersion, + serializationVersion: LfSerializationVersion, )(implicit ec: ExecutionContext ): EitherT[FutureUnlessShutdown, UnsupportedMinimumProtocolVersion, Unit] = { val minimumPVForTransaction = - DamlLfVersionToProtocolVersions.getMinimumSupportedProtocolVersion( - transactionVersion + LfSerializationVersionToProtocolVersions.getMinimumSupportedProtocolVersion( + serializationVersion ) EitherTUtil.condUnitET( @@ -259,7 +264,7 @@ object UsableSynchronizers { UnsupportedMinimumProtocolVersion( synchronizerId, minimumPVForTransaction, - transactionVersion, + serializationVersion, ), ) } @@ -290,7 +295,7 @@ object UsableSynchronizers { final case class UnsupportedMinimumProtocolVersion( synchronizerId: PhysicalSynchronizerId, requiredPV: ProtocolVersion, - lfVersion: LfLanguageVersion, + lfVersion: LfSerializationVersion, ) extends SynchronizerNotUsedReason { val currentPV: ProtocolVersion = synchronizerId.protocolVersion diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsReassigner.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsReassigner.scala index accb3a579a..8ec5f250eb 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsReassigner.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsReassigner.scala @@ -10,11 +10,16 @@ import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.ReassignmentSubmitterMetadata import com.digitalasset.canton.error.TransactionRoutingError import com.digitalasset.canton.error.TransactionRoutingError.AutomaticReassignmentForTransactionFailure -import com.digitalasset.canton.ledger.participant.state.{SubmitterInfo, SynchronizerRank} +import com.digitalasset.canton.ledger.participant.state.{ + RoutingSynchronizerState, + SubmitterInfo, + SynchronizerRank, +} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.sync.ConnectedSynchronizersLookup import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} @@ -30,12 +35,14 @@ private[routing] class ContractsReassigner( def reassign( synchronizerRankTarget: SynchronizerRank, submitterInfo: SubmitterInfo, + synchronizerState: RoutingSynchronizerState, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, TransactionRoutingError, Unit] = if (synchronizerRankTarget.reassignments.nonEmpty) { + val targetPSId = synchronizerRankTarget.synchronizerId logger.info( - s"Automatic transaction reassignment to synchronizer ${synchronizerRankTarget.synchronizerId}" + s"Automatic transaction reassignment to synchronizer $targetPSId" ) def getStakeholders( @@ -74,20 +81,31 @@ private[routing] class ContractsReassigner( ((LfPartyId, PhysicalSynchronizerId, Stakeholders), Iterable[LfContractId]) ]) .parTraverse_ { case ((lfParty, sourceSynchronizerId, _), cids) => - perform( - Source(sourceSynchronizerId), - Target(synchronizerRankTarget.synchronizerId), - ReassignmentSubmitterMetadata( - submitter = lfParty, - submittingParticipant, - submitterInfo.commandId, - submitterInfo.submissionId, - submitterInfo.userId, - workflowId = None, - ), - cids.toSeq, - ) - .mapK(FutureUnlessShutdown.outcomeK) + for { + sourceTopology <- EitherT.fromEither[FutureUnlessShutdown]( + synchronizerState.getTopologySnapshotFor(sourceSynchronizerId) + ) + targetTopology <- EitherT.fromEither[FutureUnlessShutdown]( + synchronizerState.getTopologySnapshotFor(targetPSId) + ) + + _ <- perform( + Source(sourceSynchronizerId), + Source(sourceTopology), + Target(targetPSId), + Target(targetTopology), + ReassignmentSubmitterMetadata( + submitter = lfParty, + submittingParticipant, + submitterInfo.commandId, + submitterInfo.submissionId, + submitterInfo.userId, + workflowId = None, + ), + cids.toSeq, + ) + .mapK(FutureUnlessShutdown.outcomeK) + } yield () } } yield () } else { @@ -96,7 +114,9 @@ private[routing] class ContractsReassigner( private def perform( sourceSynchronizerId: Source[PhysicalSynchronizerId], + sourceTopology: Source[TopologySnapshot], targetSynchronizerId: Target[PhysicalSynchronizerId], + targetTopology: Target[TopologySnapshot], submitterMetadata: ReassignmentSubmitterMetadata, contractIds: Seq[LfContractId], )(implicit traceContext: TraceContext): EitherT[Future, TransactionRoutingError, Unit] = { @@ -121,7 +141,12 @@ private[routing] class ContractsReassigner( ) unassignmentResult <- sourceSynchronizer - .submitUnassignments(submitterMetadata, contractIds, targetSynchronizerId) + .submitUnassignments( + submitterMetadata, + contractIds, + targetSynchronizerId, + sourceTopology, + ) .mapK(FutureUnlessShutdown.outcomeK) .semiflatMap(Predef.identity) .leftMap(_.toString) @@ -144,6 +169,7 @@ private[routing] class ContractsReassigner( .submitAssignments( submitterMetadata, unassignmentResult.reassignmentId, + targetTopology, ) .leftMap[String](err => s"Assignment failed with error $err") .flatMap { s => diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionData.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionData.scala index 30ef1972d4..8cad3472fa 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionData.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionData.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.ledger.participant.state.{RoutingSynchronizerStat import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.protocol.{ LfContractId, - LfLanguageVersion, + LfSerializationVersion, LfVersionedTransaction, Stakeholders, } @@ -55,7 +55,7 @@ private[routing] final case class TransactionData private ( prescribedSynchronizerIdO: Option[PhysicalSynchronizerId], ) { val informees: Set[LfPartyId] = requiredPackagesPerParty.keySet - val version: LfLanguageVersion = transaction.version + val version: LfSerializationVersion = transaction.version val readers: Set[LfPartyId] = actAs.union(readAs) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionRoutingProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionRoutingProcessor.scala index 8d69d75f03..7a6e1ffa39 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionRoutingProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionRoutingProcessor.scala @@ -6,12 +6,10 @@ package com.digitalasset.canton.participant.protocol.submission.routing import cats.data.EitherT import cats.syntax.bifunctor.* import cats.syntax.either.* -import cats.syntax.foldable.* import cats.syntax.parallel.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.crypto.CryptoPureApi import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.TransactionRoutingError import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.{ @@ -47,7 +45,7 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.{ContractAuthenticator, EitherTUtil} +import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.{LfKeyResolver, LfPartyId, checked} import com.digitalasset.daml.lf.data.ImmArray import com.digitalasset.daml.lf.transaction.CreationTime @@ -62,7 +60,6 @@ import scala.concurrent.ExecutionContext class TransactionRoutingProcessor( contractsReassigner: ContractsReassigner, connectedSynchronizersLookup: ConnectedSynchronizersLookup, - serializableContractAuthenticator: ContractAuthenticator, synchronizerRankComputation: SynchronizerRankComputation, synchronizerSelectorFactory: SynchronizerSelectorFactory, override protected val timeouts: ProcessingTimeout, @@ -94,23 +91,15 @@ class TransactionRoutingProcessor( logger.debug(s"Routing the transaction to synchronizer $synchronizerId") for { - // TODO(#25385) Not needed anymore if we just authenticate all before interpretation - // and ensure we just forward the payload inputDisclosedContracts <- EitherT .fromEither[FutureUnlessShutdown]( - for { - _ <- explicitlyDisclosedContracts.toList - .traverse_(serializableContractAuthenticator.legacyAuthenticate) - .leftMap(MalformedInputErrors.DisclosedContractAuthenticationFailed.Error.apply) - inputDisclosedContracts <- - explicitlyDisclosedContracts.toList - .parTraverse(ContractInstance.create[CreationTime.CreatedAt]) - .leftMap(MalformedInputErrors.InvalidDisclosedContract.Error.apply) - - } yield inputDisclosedContracts + explicitlyDisclosedContracts.toList + .parTraverse(ContractInstance.create[CreationTime.CreatedAt]) + .leftMap(MalformedInputErrors.InvalidDisclosedContract.Error.apply) ) + _ <- contractsReassigner - .reassign(synchronizerRankTarget, submitterInfo) + .reassign(synchronizerRankTarget, submitterInfo, synchronizerState) topologySnapshot <- EitherT .fromEither[FutureUnlessShutdown] { @@ -378,7 +367,6 @@ object TransactionRoutingProcessor { def apply( connectedSynchronizersLookup: ConnectedSynchronizersLookup, synchronizerConnectionConfigStore: SynchronizerConnectionConfigStore, - cryptoPureApi: CryptoPureApi, participantId: ParticipantId, parameters: ParticipantNodeParameters, loggerFactory: NamedLoggerFactory, @@ -405,12 +393,9 @@ object TransactionRoutingProcessor { loggerFactory = loggerFactory, ) - val serializableContractAuthenticator = ContractAuthenticator(cryptoPureApi) - new TransactionRoutingProcessor( reassigner, connectedSynchronizersLookup, - serializableContractAuthenticator, synchronizerRankComputation, synchronizerSelectorFactory, parameters.processingTimeouts, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala index 4653ed877a..4b22de7d7e 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala @@ -37,7 +37,8 @@ import com.digitalasset.canton.participant.protocol.validation.ModelConformanceC LazyAsyncReInterpretation, LazyAsyncReInterpretationMap, } -import com.digitalasset.canton.participant.util.DAMLe.{CreateNodeEnricher, TransactionEnricher} +import com.digitalasset.canton.participant.util.DAMLe.{ContractEnricher, TransactionEnricher} +import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.protocol.{ExternalAuthorization, RequestId} import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext @@ -65,8 +66,9 @@ private[protocol] object AuthenticationValidator { reInterpretedTopLevelViewsEval: LazyAsyncReInterpretationMap, synchronizerId: PhysicalSynchronizerId, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, logger: TracedLogger, + messagePayloadLoggingEnabled: Boolean, )(implicit traceContext: TraceContext, executionContext: ExecutionContext, @@ -104,6 +106,7 @@ private[protocol] object AuthenticationValidator { transactionEnricher = transactionEnricher, createNodeEnricher = createNodeEnricher, logger = logger, + messagePayloadLoggingEnabled = messagePayloadLoggingEnabled, ) case None => // If we don't have the re-interpreted transaction for this view it's either a programming error @@ -266,9 +269,10 @@ private[protocol] object AuthenticationValidator { reInterpretationET: LazyAsyncReInterpretation, synchronizerId: PhysicalSynchronizerId, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, requestId: RequestId, logger: TracedLogger, + messagePayloadLoggingEnabled: Boolean, )(implicit traceContext: TraceContext, executionContext: ExecutionContext, @@ -289,6 +293,13 @@ private[protocol] object AuthenticationValidator { ) ) case Right(reInterpretedTopLevelView) => + // The trace contains detailed information about the transaction and is expensive to compute + // Only compute it if message payload logging is enabled and in debug level + val hashTracer = + if (messagePayloadLoggingEnabled && logger.underlying.isDebugEnabled) { + Some(HashTracer.StringHashTracer(traceSubNodes = true)) + } else None + reInterpretedTopLevelView .computeHash( externalAuthorization.hashingSchemeVersion, @@ -300,6 +311,7 @@ private[protocol] object AuthenticationValidator { protocolVersion, transactionEnricher, createNodeEnricher, + hashTracer.getOrElse[HashTracer](HashTracer.NoOp), ) // If Hash computation is successful, verify the signature is valid .flatMap { hash => @@ -308,7 +320,14 @@ private[protocol] object AuthenticationValidator { hash, externalAuthorization, submitterMetadata.actAs, - ).map(_.toLeft(Some(hash))) + ).map { + case error @ Some(_) => + hashTracer.map(_.result).foreach { trace => + logger.debug("Transaction hash computation trace:\n" + trace) + } + error + case None => None + }.map(_.toLeft(Some(hash))) ) } .map(res => diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreated.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreated.scala index 2bbdb81eba..5ebc6883a0 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreated.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreated.scala @@ -3,17 +3,15 @@ package com.digitalasset.canton.participant.protocol.validation -import com.daml.nonempty.NonEmpty import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.* import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.protocol.TransactionProcessingSteps.ViewAbsoluteLedgerEffect +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect import com.digitalasset.canton.participant.protocol.validation.ExtractUsedAndCreated.{ CreatedContractPrep, InputContractPrep, - ViewData, } import com.digitalasset.canton.protocol.* import com.digitalasset.canton.topology.ParticipantId @@ -28,66 +26,38 @@ import scala.concurrent.ExecutionContext */ object ExtractUsedAndCreated { - private[validation] final case class ViewData( - effects: ViewAbsoluteLedgerEffect, - inRollback: Boolean, - informees: Set[LfPartyId], - ) { - def transientContracts(): Seq[LfContractId] = - // Only track transient contracts outside of rollback scopes. - if (!inRollback) { - val transientCore = - effects.createdCore - .filter(x => x.consumedInCore && !x.rolledBack) - .map(_.contract.contractId) - - // The participant might host only an actor and not a stakeholder of the contract that is archived in the core. - // We nevertheless add all of them here because we will intersect this set with `createdContractsOfHostedStakeholdersB` later. - // This ensures that we only cover contracts of which the participant hosts a stakeholder. - transientCore ++ effects.createdInSubviewArchivedInCore - } else { - Seq.empty - } - } - - private[validation] object ViewData { - def tryFromView(v: TransactionView): ViewData = { - val vpd = v.viewParticipantData.tryUnwrap - val effects = ViewAbsoluteLedgerEffect( - vpd.coreInputs, - vpd.createdCore, - vpd.createdInSubviewArchivedInCore, - vpd.resolvedKeys, - ) - ViewData( - effects, - inRollback = vpd.rollbackContext.inRollback, - v.viewCommonData.tryUnwrap.viewConfirmationParameters.informees, - ) + def transientContracts(effects: ViewAbsoluteLedgerEffect): Seq[LfContractId] = + // Only track transient contracts outside of rollback scopes. + if (!effects.inRollback) { + val transientCore = + effects.createdCore + .filter(x => x.consumedInCore && !x.rolledBack) + .map(_.contract.contractId) + + // The participant might host only an actor and not a stakeholder of the contract that is archived in the core. + // We nevertheless add all of them here because we will intersect this set with `createdContractsOfHostedStakeholdersB` later. + // This ensures that we only cover contracts of which the participant hosts a stakeholder. + transientCore ++ effects.createdInSubviewArchivedInCore + } else { + Seq.empty } - } - private def viewDataInPreOrder(view: TransactionView): Seq[ViewData] = { - view.subviews.assertAllUnblinded(hash => - s"View ${view.viewHash} contains an unexpected blinded subview $hash" - ) - ViewData.tryFromView(view) +: view.subviews.unblindedElements.flatMap(viewDataInPreOrder) - } - - private[validation] def extractPartyIds(viewData: Seq[ViewData]): Set[LfPartyId] = { + private[validation] def extractPartyIds( + viewData: Seq[ViewAbsoluteLedgerEffect] + ): Set[LfPartyId] = { val parties = Set.newBuilder[LfPartyId] viewData.foreach { data => parties ++= data.informees - data.effects.coreInputs.values.foreach { c => + data.coreInputs.values.foreach { c => parties ++= c.stakeholders parties ++= c.maintainers } - data.effects.createdCore.foreach { c => + data.createdCore.foreach { c => // The object invariants of metadata enforce that every maintainer is also a stakeholder. // Therefore, we don't have to explicitly add maintainers. parties ++= c.contract.metadata.stakeholders } - data.effects.resolvedKeys.values + data.resolvedKeys.values .collect { case Versioned(_, FreeKey(maintainers)) => maintainers } .foreach(parties ++=) } @@ -108,24 +78,19 @@ object ExtractUsedAndCreated { .toMap } - private[validation] def viewDataFromRootViews( - rootViews: Seq[TransactionView] - ): Seq[ViewData] = rootViews.flatMap(viewDataInPreOrder) - def apply( participantId: ParticipantId, - rootViews: NonEmpty[Seq[TransactionView]], + viewAbsoluteLedgerEffect: Seq[ViewAbsoluteLedgerEffect], topologySnapshot: TopologySnapshot, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext, traceContext: TraceContext, ): FutureUnlessShutdown[UsedAndCreated] = { - val dataViews = viewDataFromRootViews(rootViews) - val partyIds = extractPartyIds(dataViews) + val partyIds = extractPartyIds(viewAbsoluteLedgerEffect) fetchHostedParties(partyIds, participantId, topologySnapshot).map { hostedParties => new ExtractUsedAndCreated(hostedParties, loggerFactory) - .usedAndCreated(dataViews) + .usedAndCreated(viewAbsoluteLedgerEffect) } } @@ -151,7 +116,9 @@ private[validation] class ExtractUsedAndCreated( )(implicit traceContext: TraceContext) extends NamedLogging { - private[validation] def usedAndCreated(dataViews: Seq[ViewData]): UsedAndCreated = { + private[validation] def usedAndCreated( + dataViews: Seq[ViewAbsoluteLedgerEffect] + ): UsedAndCreated = { val createdContracts = createdContractPrep(dataViews) val inputContracts = inputContractPrep(dataViews) val transientContracts = transientContractsPrep(dataViews) @@ -161,7 +128,9 @@ private[validation] class ExtractUsedAndCreated( ) } - private[validation] def inputContractPrep(dataViews: Seq[ViewData]): InputContractPrep = { + private[validation] def inputContractPrep( + dataViews: Seq[ViewAbsoluteLedgerEffect] + ): InputContractPrep = { val usedB = Map.newBuilder[LfContractId, GenContractInstance] val contractIdsOfHostedInformeeStakeholderB = Set.newBuilder[LfContractId] val contractIdsAllowedToBeUnknownB = Set.newBuilder[LfContractId] @@ -169,8 +138,8 @@ private[validation] class ExtractUsedAndCreated( val divulgedB = Map.newBuilder[LfContractId, GenContractInstance] (for { - viewData <- dataViews: Seq[ViewData] - inputContractWithMetadata <- viewData.effects.coreInputs.values + viewData <- dataViews + inputContractWithMetadata <- viewData.coreInputs.values } yield { val informees = viewData.informees val contract = inputContractWithMetadata.contract @@ -210,7 +179,9 @@ private[validation] class ExtractUsedAndCreated( ) } - private[validation] def createdContractPrep(dataViews: Seq[ViewData]): CreatedContractPrep = { + private[validation] def createdContractPrep( + dataViews: Seq[ViewAbsoluteLedgerEffect] + ): CreatedContractPrep = { val createdContractsOfHostedInformeesB = Map.newBuilder[LfContractId, Option[NewContractInstance]] @@ -221,7 +192,7 @@ private[validation] class ExtractUsedAndCreated( (for { viewData <- dataViews createdAndHosts <- - viewData.effects.createdCore.map { cc => + viewData.createdCore.map { cc => (cc, hostsAny(cc.contract.metadata.stakeholders)) } (created, hosts) = createdAndHosts @@ -242,15 +213,17 @@ private[validation] class ExtractUsedAndCreated( ) } - private def transientContractsPrep(dataViews: Seq[ViewData]): Set[LfContractId] = { + private def transientContractsPrep( + dataViews: Seq[ViewAbsoluteLedgerEffect] + ): Set[LfContractId] = { val transientContractsB = Set.newBuilder[LfContractId] (for { - viewData <- dataViews: Seq[ViewData] + viewData <- dataViews if hostsAny(viewData.informees) } yield { - transientContractsB ++= viewData.transientContracts() + transientContractsB ++= ExtractUsedAndCreated.transientContracts(viewData) }).discard transientContractsB.result() diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala index 796f7fba97..2bfd2c4f9c 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.protocol.validation import cats.Eval import cats.data.EitherT +import cats.implicits.{toFoldableOps, toFunctorOps} import cats.syntax.alternative.* import cats.syntax.bifunctor.* import cats.syntax.parallel.* @@ -26,6 +27,7 @@ import com.digitalasset.canton.participant.protocol.validation.ModelConformanceC import com.digitalasset.canton.participant.store.ExtendedContractLookup import com.digitalasset.canton.participant.util.DAMLe import com.digitalasset.canton.participant.util.DAMLe.* +import com.digitalasset.canton.platform.store.dao.events.InputContractPackages import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.ContractIdAbsolutizer.{ ContractIdAbsolutizationDataV1, @@ -36,17 +38,16 @@ import com.digitalasset.canton.protocol.WellFormedTransaction.{ WithSuffixesAndMerged, WithoutSuffixes, } -import com.digitalasset.canton.protocol.hash.HashTracer.NoOp +import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.collection.MapsUtil -import com.digitalasset.canton.util.{ContractAuthenticator, ErrorUtil} +import com.digitalasset.canton.util.{ContractValidator, ErrorUtil, RoseTree} import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} import com.digitalasset.canton.{LfKeyResolver, LfPartyId, checked} import com.digitalasset.daml.lf.data.Ref.{CommandId, Identifier, PackageId, PackageName} -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} import java.util.UUID import scala.concurrent.ExecutionContext @@ -63,7 +64,7 @@ class ModelConformanceChecker( reinterpreter: HasReinterpret, transactionTreeFactory: TransactionTreeFactory, participantId: ParticipantId, - contractAuthenticator: ContractAuthenticator, + contractValidator: ContractValidator, packageResolver: PackageResolver, hashOps: HashOps & HmacOps, override protected val loggerFactory: NamedLoggerFactory, @@ -82,8 +83,8 @@ class ModelConformanceChecker( * @return * the resulting LfTransaction with [[com.digitalasset.canton.protocol.LfContractId]]s only */ - private[protocol] def check( - rootViewTrees: NonEmpty[Seq[FullTransactionViewTree]], + private[protocol] def check[ViewEffect]( + rootViewTrees: NonEmpty[Seq[(FullTransactionViewTree, RoseTree[ViewEffect])]], keyResolverFor: TransactionView => LfKeyResolver, topologySnapshot: TopologySnapshot, commonData: CommonData, @@ -91,24 +92,38 @@ class ModelConformanceChecker( reInterpretedTopLevelViews: LazyAsyncReInterpretationMap, )(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, ErrorWithSubTransaction, Result] = { + ): EitherT[FutureUnlessShutdown, ErrorWithSubTransaction[ViewEffect], Result] = { val CommonData(transactionId, ledgerTime, preparationTime) = commonData // Previous checks in Phase 3 ensure that all the root views are sent to the same // mediator, and that they all have the same correct root hash, and therefore the // same CommonMetadata (which contains the UUID). - val mediator = rootViewTrees.head1.mediator - val transactionUuid = rootViewTrees.head1.transactionUuid + val (headViewTree, _) = rootViewTrees.head1 + val mediator = headViewTree.mediator + val transactionUuid = headViewTree.transactionUuid def findValidSubtransactions( - views: Seq[(TransactionView, ViewPosition, Option[SubmitterMetadata])] + views: Seq[ + ( + TransactionView, + RoseTree[ViewEffect], + ViewPosition, + Option[SubmitterMetadata], + ) + ] ): FutureUnlessShutdown[ ( Seq[Error], - Seq[(TransactionView, WithRollbackScope[WellFormedTransaction[WithAbsoluteSuffixes]])], + Seq[ + ( + TransactionView, + RoseTree[ViewEffect], + WithRollbackScope[WellFormedTransaction[WithAbsoluteSuffixes]], + ) + ], ) ] = views - .parTraverse { case (view, viewPos, submittingParticipantO) => + .parTraverse { case (view, effects, viewPos, submittingParticipantO) => for { wfTxE <- checkView( transactionId, @@ -126,16 +141,23 @@ class ModelConformanceChecker( ).value errorsViewsTxs <- wfTxE match { - case Right(wfTx) => FutureUnlessShutdown.pure((Seq.empty, Seq((view, wfTx)))) + case Right(wfTx) => FutureUnlessShutdown.pure((Seq.empty, Seq((view, effects, wfTx)))) // There is no point in checking subviews if we have aborted case Left(error @ DAMLeError(DAMLe.EngineAborted(_), _)) => FutureUnlessShutdown.pure((Seq(error), Seq.empty)) case Left(error) => - val subviewsWithInfo = view.subviews.unblindedElementsWithIndex.map { - case (sv, svIndex) => (sv, svIndex +: viewPos, None) - } + val subviewsWithIndex = view.subviews.unblindedElementsWithIndex + val childEffects = effects.children + ErrorUtil.requireArgument( + subviewsWithIndex.sizeCompare(childEffects) == 0, + s"Number of subviews (${subviewsWithIndex.size}) and child effects (${childEffects.size}) do not match for view at position $viewPos", + ) + val subviewsWithInfo = + subviewsWithIndex.zip(childEffects).map { case ((sv, svIndex), svEffects) => + (sv, svEffects, svIndex +: viewPos, None) + } findValidSubtransactions(subviewsWithInfo).map { case (subErrors, subViewsTxs) => // If a view is not model conformant, all its ancestors are not either. @@ -152,28 +174,25 @@ class ModelConformanceChecker( (errorsSeq.flatten, viewsTxsSeq.flatten) } - val rootViewsWithInfo = rootViewTrees.map { viewTree => - (viewTree.view, viewTree.viewPosition, viewTree.submitterMetadataO) + val rootViewsWithInfo = rootViewTrees.map { case (viewTree, effects) => + (viewTree.view, effects, viewTree.viewPosition, viewTree.submitterMetadataO) } val resultFE = findValidSubtransactions(rootViewsWithInfo).map { case (errors, viewsTxs) => - val (views, txs) = viewsTxs.separate - - val wftxO = NonEmpty.from(txs) match { - case Some(txsNE) => - WellFormedTransaction - .merge(txsNE) - .leftMap(mergeError => - // TODO(i14473): Handle failures to merge a transaction while preserving transparency - ErrorUtil.internalError( - new IllegalStateException( - s"Model conformance check failure when merging transaction $transactionId: $mergeError" - ) + val (views, effects, txs) = viewsTxs.unzip3 + + val wftxO = NonEmpty.from(txs).flatMap { txsNE => + WellFormedTransaction + .merge(txsNE) + .leftMap(mergeError => + // TODO(i14473): Handle failures to merge a transaction while preserving transparency + ErrorUtil.internalError( + new IllegalStateException( + s"Model conformance check failure when merging transaction $transactionId: $mergeError" ) ) - .toOption - - case None => None + ) + .toOption } NonEmpty.from(errors) match { @@ -187,7 +206,9 @@ class ModelConformanceChecker( ) ) } - case Some(errorsNE) => Left(ErrorWithSubTransaction(errorsNE, wftxO, views)) + case Some(errorsNE) => + val flatEffects = effects.flatMap(_.preorder) + Left(ErrorWithSubTransaction(errorsNE, wftxO, flatEffects)) } } @@ -237,10 +258,7 @@ class ModelConformanceChecker( val seed = viewParticipantData.actionDescription.seedOption - val inputContracts = view.tryFlattenToParticipantViews - .flatMap(_.viewParticipantData.coreInputs) - .map { case (cid, InputContract(contract, _)) => cid -> contract } - .toMap + val inputContracts = view.inputContracts.fmap(_.contract) val contractAndKeyLookup = new ExtendedContractLookup(inputContracts, resolverFromView) @@ -251,7 +269,7 @@ class ModelConformanceChecker( lfTxAndMetadata <- reinterpreter .reinterpret( contractAndKeyLookup, - contractAuthenticator.authenticate, + contractValidator.authenticateHash, authorizers, cmd, ledgerTime, @@ -271,7 +289,7 @@ class ModelConformanceChecker( } private def checkView( - transactionId: TransactionId, + transactionId: UpdateId, view: TransactionView, viewPosition: ViewPosition, mediator: MediatorGroupRecipient, @@ -386,12 +404,11 @@ class ModelConformanceChecker( .parTraverse(p => snapshot .findUnvettedPackagesOrDependencies(p, packageIds, ledgerTime) - .map(p -> _) ) } yield { - val unvettedPackages = unvetted.filter { case (_, packageIds) => packageIds.nonEmpty } - Either.cond(unvettedPackages.isEmpty, (), UnvettedPackages(unvettedPackages.toMap)) + val combined = unvetted.combineAll.unknownOrUnvetted + Either.cond(combined.isEmpty, (), UnvettedPackages(combined)) }) } @@ -402,7 +419,7 @@ object ModelConformanceChecker { def apply( damlE: DAMLe, transactionTreeFactory: TransactionTreeFactory, - contractAuthenticator: ContractAuthenticator, + contractValidator: ContractValidator, participantId: ParticipantId, packageResolver: PackageResolver, hashOps: HashOps & HmacOps, @@ -412,7 +429,7 @@ object ModelConformanceChecker { damlE, transactionTreeFactory, participantId, - contractAuthenticator, + contractValidator, packageResolver, hashOps, loggerFactory, @@ -445,27 +462,29 @@ object ModelConformanceChecker { synchronizerId: SynchronizerId, protocolVersion: ProtocolVersion, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + contractEnricher: ContractEnricher, + hashTracer: HashTracer, )(implicit traceContext: TraceContext, ec: ExecutionContext, ): EitherT[FutureUnlessShutdown, String, Hash] = for { // Enrich the transaction... - enrichedTransaction <- transactionEnricher( - reInterpretationResult.transaction - )(traceContext) + enrichedTransaction <- transactionEnricher(reInterpretationResult.transaction)(traceContext) .leftMap(_.toString) + // ... and the input contracts so that labels and template identifiers are set and can be included in the hash - enrichedInputContracts <- viewInputContracts.toList - .parTraverse { case (cid, storedContract) => - createNodeEnricher(storedContract.toLf)(traceContext).map { enrichedNode => - cid -> FatContractInstance.fromCreateNode( - enrichedNode, - storedContract.inst.createdAt: CreationTime, - storedContract.inst.authenticationData, - ) - } + inputContracts <- EitherT.fromEither[FutureUnlessShutdown]( + InputContractPackages + .forTransactionWithContracts(enrichedTransaction.transaction, viewInputContracts) + .leftMap(mismatch => + s"The following input contract IDs were not found in both the transaction and the provided contracts: $mismatch" + ) + ) + + enrichedInputContracts <- inputContracts.toList + .parTraverse { case (cid, (inst, targetPackageIds)) => + contractEnricher((inst, targetPackageIds))(traceContext).map(cid -> _) } .map(_.toMap) .leftMap(_.toString) @@ -486,7 +505,7 @@ object ModelConformanceChecker { ), reInterpretationResult.metadata.seeds, protocolVersion, - hashTracer = NoOp, + hashTracer = hashTracer, ) .leftMap(_.message) ) @@ -498,19 +517,12 @@ object ModelConformanceChecker { /** Enriches a model conformance error with the valid subtransaction, if any. If there is a valid * subtransaction, the list of valid subview trees will not be empty. */ - final case class ErrorWithSubTransaction( + final case class ErrorWithSubTransaction[+ViewEffect]( errors: NonEmpty[Seq[Error]], validSubTransactionO: Option[WellFormedTransaction[WithSuffixesAndMerged]], - validSubViews: Seq[TransactionView], - ) extends PrettyPrinting { - require(validSubTransactionO.isEmpty == validSubViews.isEmpty) - - override protected def pretty: Pretty[ErrorWithSubTransaction] = prettyOfClass( - param("valid subtransaction", _.validSubTransactionO.toString.unquoted), - param("valid subviews", _.validSubViews), - param("errors", _.errors), - param("engine abort status", _.engineAbortStatus), - ) + validSubViewEffects: Seq[ViewEffect], + ) { + require(validSubTransactionO.isEmpty == validSubViewEffects.isEmpty) // The request computation was aborted if any error is an abort lazy val (engineAbortStatus, nonAbortErrors) = { @@ -525,6 +537,20 @@ object ModelConformanceChecker { } } + object ErrorWithSubTransaction { + implicit def prettyErrorWithSubTransaction[ViewEffect: Pretty] + : Pretty[ErrorWithSubTransaction[ViewEffect]] = { + import com.digitalasset.canton.logging.pretty.PrettyUtil.* + import com.digitalasset.canton.util.ShowUtil.* + Pretty.prettyOfClass[ErrorWithSubTransaction[ViewEffect]]( + param("errors", _.errors), + param("engine abort status", _.engineAbortStatus), + paramIfDefined("valid subtransaction", _.validSubTransactionO.map(_.toString.unquoted)), + paramIfNonEmpty("valid subview effects", _.validSubViewEffects), + ) + } + } + final case class DAMLeError(cause: DAMLe.ReinterpretationError, viewHash: ViewHash) extends Error { override protected def pretty: Pretty[DAMLeError] = prettyOfClass( @@ -632,7 +658,7 @@ object ModelConformanceChecker { } final case class Result( - transactionId: TransactionId, + transactionId: UpdateId, suffixedTransaction: WellFormedTransaction[WithSuffixesAndMerged], ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala index 8259ba7f69..3f137ce5bc 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.data.{SubmitterMetadata, ViewPosition} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect import com.digitalasset.canton.participant.protocol.conflictdetection.{ActivenessResult, CommitSet} import com.digitalasset.canton.participant.protocol.validation.ContractConsistencyChecker.ReferenceToFutureContractError import com.digitalasset.canton.participant.protocol.validation.InternalConsistencyChecker.ErrorWithInternalConsistencyCheck @@ -16,7 +17,7 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.{LfPartyId, WorkflowId} final case class TransactionValidationResult( - transactionId: TransactionId, + transactionId: UpdateId, submitterMetadataO: Option[SubmitterMetadata], workflowIdO: Option[WorkflowId], contractConsistencyResultE: Either[List[ReferenceToFutureContractError], Unit], @@ -24,7 +25,7 @@ final case class TransactionValidationResult( authorizationResult: Map[ViewPosition, String], modelConformanceResultET: EitherT[ FutureUnlessShutdown, - ModelConformanceChecker.ErrorWithSubTransaction, + ModelConformanceChecker.ErrorWithSubTransaction[ViewAbsoluteLedgerEffect], ModelConformanceChecker.Result, ], internalConsistencyResultE: Either[ErrorWithInternalConsistencyCheck, Unit], diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index 653c46e819..86b5786b13 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -10,6 +10,7 @@ import cats.syntax.functor.* import cats.syntax.parallel.* import cats.syntax.validated.* import com.daml.metrics.api.MetricsContext +import com.daml.metrics.api.MetricsContext.withEmptyMetricsContext import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.{NonEmpty, NonEmptyUtil} import com.digitalasset.base.error.{ @@ -23,12 +24,13 @@ import com.digitalasset.base.error.{ } import com.digitalasset.canton.admin.participant.v30.{ReceivedCommitmentState, SentCommitmentState} import com.digitalasset.canton.concurrent.{FutureSupervisor, Threading} -import com.digitalasset.canton.config.RequireTypes.{ - NonNegativeInt, - NonNegativeLong, - PositiveNumeric, +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt, PositiveNumeric} +import com.digitalasset.canton.config.{ + BatchingConfig, + CommitmentSendDelay, + ProcessingTimeout, + TestingConfigInternal, } -import com.digitalasset.canton.config.{BatchingConfig, ProcessingTimeout, TestingConfigInternal} import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.{ AcsCommitmentData, @@ -104,6 +106,7 @@ import java.util.concurrent.atomic.AtomicReference import scala.annotation.tailrec import scala.collection.concurrent.TrieMap import scala.collection.immutable.{Map, SortedSet} +import scala.collection.mutable import scala.concurrent.duration.* import scala.concurrent.{ExecutionContext, blocking} import scala.math.Ordering.Implicits.* @@ -172,16 +175,25 @@ import scala.math.Ordering.Implicits.* * mode, the participant skips sending and checking commitments for some reconciliation intervals. * The parameter governing catch-up mode is: * - * @param maxCommitmentSendDelayMillis - * Optional parameter to specify the maximum delay in milliseconds for sending out commitments. - * To avoid a spike in network activity at the end of each reconciliation period, commitment - * sending is delayed by default with a random amount uniformly distributed between 0 and max. - * Commitment sending should not be delayed by more than a reconciliation interval, for two - * reasons: (1) it'll overlap with the next round of commitment sends, defeating somewhat the - * purpose of delaying commitment sends; and (2) and might not interact well with the catch-up - * mode, depending on the parameters there. If this is not specified, the maximum delay is - * testingConfig.maxCommitmentSendDelayMillis if specified, otherwise the maximum delay is 2/3 of - * the reconciliation interval. + * @param commitmentSendDelay + * Optional parameter to specify the minimum and delay as fraction of the reconciliation interval + * for sending out commitments. To avoid a spike in network activity at the end of each + * reconciliation period, commitment sending is delayed by default with a random amount uniformly + * distributed between min and max. Commitment sending should not be delayed by more than a + * reconciliation interval, for two reasons: (1) It'll overlap with the next round of commitment + * sends, defeating somewhat the purpose of delaying commitment sends; and (2) It might not + * interact well with the catch-up mode, depending on the parameters there. If a commitment delay + * is not specified, the delay is testingConfig.commitmentSendDelay. If + * testingConfig.commitmentSendDelay is not set, commitment sending is delayed by a random amount + * between the default bounds of 1/3 and 5/6. If any of the bounds is not set, we take the + * default value for that bound. If the maximum bound is smaller than the minimum bound, both + * become the default bounds. + * + * @param increasePerceivedComputationTimeForCommitments + * Optional parameter that artificially increases the measured computation time for commitments + * used to decide whether to trigger catch-up mode. This is useful to test catch-up mode without + * having to create a large number of commitments. This parameter does not influence neither the + * actual time spent to compute commitments, nor the compute metrics. * * The constructor of this class is private. Instances of this class can only be created using * [[AcsCommitmentProcessor.apply]], which in turn uses the Factory method in the companion object @@ -210,7 +222,9 @@ class AcsCommitmentProcessor private ( clock: Clock, exitOnFatalFailures: Boolean, batchingConfig: BatchingConfig, - maxCommitmentSendDelayMillis: Option[NonNegativeInt], + commitmentSendDelay: Option[CommitmentSendDelay], + increasePerceivedComputationTimeForCommitments: Option[java.time.Duration], + doNotAwaitOnCheckingIncomingCommitments: Boolean, )(implicit ec: ExecutionContext) extends AcsChangeListener with FlagCloseable @@ -248,6 +262,9 @@ class AcsCommitmentProcessor private ( // used to generate randomized commitment sending delays private val rand = new scala.util.Random + // fraction of length of reconciliation interval + private lazy val defaultMinDelay = 1.0 / 3.0 + private lazy val defaultMaxDelay = 5.0 / 6.0 /** The sequencer timestamp for which we are ready to process remote commitments. * @@ -329,6 +346,11 @@ class AcsCommitmentProcessor private ( private val reinitializationEnqueued: AtomicReference[Boolean] = new AtomicReference[Boolean](false) + // initialize with a seq of length the skip step of catch-up config, in order to even out computation + // duration over the skip window + private lazy val lastCommitmentsComputeTimes: DurationResizableRingBuffer = + new DurationResizableRingBuffer(0) + /** Queue to serialize the access to the DB, to avoid serialization failures at SERIALIZABLE level */ private val dbQueue: SimpleExecutionQueue = @@ -448,6 +470,24 @@ class AcsCommitmentProcessor private ( } else None } + // initialize with a seq of length the skip step of catch-up config, in order to even out computation + // duration over the skip window + private def initLastCommitmentsComputeTimes( + lastCatchUpConfig: Option[AcsCommitmentsCatchUpParameters] + ) = + // we initialize with a seq of skip size of high values if increasePerceivedComputationTimeForCommitments is non-empty, + // otherwise we initialize to an empty array meaning there are no previous catch-up times since restart + lastCatchUpConfig.map(cfg => + if (increasePerceivedComputationTimeForCommitments.nonEmpty) { + lastCommitmentsComputeTimes.setCapacity(cfg.catchUpIntervalSkip.value) + lastCommitmentsComputeTimes.addAll( + Seq.fill(cfg.catchUpIntervalSkip.value)( + java.time.Duration.ofSeconds(Long.MaxValue) + ) + ) + } + ) + def initializeTicksOnStartup( timestamps: List[EffectiveTime] )(implicit traceContext: TraceContext): Unit = { @@ -649,6 +689,7 @@ class AcsCommitmentProcessor private ( ): FutureUnlessShutdown[Unit] = { for { config <- catchUpConfig(completedPeriod.toInclusive.forgetRefinement) + _ = initLastCommitmentsComputeTimes(config) // Evaluate in the beginning the catch-up conditions for simplicity catchingUpInProgress <- catchUpInProgress( @@ -714,6 +755,13 @@ class AcsCommitmentProcessor private ( batchingConfig, ) + reconIntervals <- getReconciliationIntervals( + completedPeriod.toInclusive.forgetRefinement + ) + reconIntervalLength = reconIntervals.intervals.headOption.fold(0L)( + _.intervalLength.duration.toMillis + ) + _ <- if (!catchingUpInProgress || hasCaughtUpToBoundaryRes) { for { @@ -728,7 +776,7 @@ class AcsCommitmentProcessor private ( _ <- MarkOutstandingIfNonEmpty(completedPeriod, msgs.keySet) _ <- persistRunningCommitments(snapshotRes) } yield { - sendCommitmentMessages(completedPeriod, msgs.toSeq) + sendCommitmentMessages(completedPeriod, msgs.toSeq, reconIntervalLength) } } else FutureUnlessShutdown.unit @@ -1002,7 +1050,7 @@ class AcsCommitmentProcessor private ( batch: Seq[OpenEnvelope[SignedProtocolMessage[AcsCommitment]]], )(implicit traceContext: TraceContext): HandlerResult = { - if (batch.lengthCompare(1) != 0) { + if (batch.sizeIs != 1) { Errors.InternalError .MultipleCommitmentsInBatch(psid.logical, timestamp, batch.length) .discard @@ -1308,8 +1356,8 @@ class AcsCommitmentProcessor private ( private def checkCommitment( commitment: AcsCommitment - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - dbQueue + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val fut = dbQueue .executeUS( { // Make sure that the ready-for-remote check is atomic with buffering the commitment @@ -1326,6 +1374,15 @@ class AcsCommitmentProcessor private ( ) .flatten + if (doNotAwaitOnCheckingIncomingCommitments) { + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + fut, + s"check incoming commitment for ${commitment.period} by ${commitment.sender}", + ) + FutureUnlessShutdown.unit + } else fut + } + private def indicateReadyForRemote(timestamp: CantonTimestampSecond)(implicit traceContext: TraceContext ): Unit = { @@ -1372,6 +1429,7 @@ class AcsCommitmentProcessor private ( local: Iterable[(CommitmentPeriod, AcsCommitment.HashedCommitmentType)], lastPruningTime: Option[CantonTimestamp], possibleCatchUp: Boolean, + intervalMillis: Long, )(implicit traceContext: TraceContext): Boolean = if (local.isEmpty) { if ( @@ -1392,7 +1450,7 @@ class AcsCommitmentProcessor private ( // end of the catch-up boundary, we then reply with an empty commitment. val msg = mkCommitment(remote.sender, AcsCommitmentProcessor.emptyCommitment, remote.period) - sendCommitmentMessages(remote.period, Seq(remote.sender -> msg)) + sendCommitmentMessages(remote.period, Seq(remote.sender -> msg), intervalMillis) logger.debug( s" ${remote.sender} send a non-empty ACS, but local ACS was empty. returned an empty ACS counter-commitment." ) @@ -1467,6 +1525,13 @@ class AcsCommitmentProcessor private ( completedPeriod.toInclusive.forgetRefinement, ) + reconIntervals <- getReconciliationIntervals( + completedPeriod.toInclusive.forgetRefinement + ) + reconIntervalLength = reconIntervals.intervals.headOption.fold(0L)( + _.intervalLength.duration.toMillis + ) + _ <- MonadUtil.parTraverseWithLimit_(threadCount)(computed.toList) { case (period, counterParticipant, cmt) => logger.debug( @@ -1500,6 +1565,7 @@ class AcsCommitmentProcessor private ( List((period, cmt)), lastPruningTime.map(_.timestamp), possibleCatchUp, + reconIntervalLength, ) ) @@ -1519,6 +1585,13 @@ class AcsCommitmentProcessor private ( } + reconIntervals <- getReconciliationIntervals( + completedPeriod.toInclusive.forgetRefinement + ) + reconIntervalLength = reconIntervals.intervals.headOption.fold(0L)( + _.intervalLength.duration.toMillis + ) + // if there is a mismatch, send all fine-grained commitments between `lastSentCatchUpCommitmentTimestamp` // and `lastProcessedCatchUpCommitmentTimestamp` _ <- @@ -1531,6 +1604,7 @@ class AcsCommitmentProcessor private ( lastSentCatchUpCommitmentTimestamp, lastProcessedCatchUpCommitmentTimestamp, filterOutParticipantId = matching.map(c => c.counterParticipant), + intervalMillis = reconIntervalLength, ) } else { // send to all counter-participants from whom I have cmts, but they don't match @@ -1539,6 +1613,7 @@ class AcsCommitmentProcessor private ( lastProcessedCatchUpCommitmentTimestamp, filterInParticipantId = mismatches.map(c => c.counterParticipant), filterOutParticipantId = matching.map(c => c.counterParticipant), + reconIntervalLength, ) } } yield res @@ -1576,6 +1651,7 @@ class AcsCommitmentProcessor private ( s"Computing commitments for $period, number of stakeholder sets: ${commitmentSnapshot.keySet.size}" ) for { + catchUpConfig <- catchUpConfig(period.toInclusive.forgetRefinement) cmts <- commitments( participantId, @@ -1585,6 +1661,10 @@ class AcsCommitmentProcessor private ( Some(metrics), threadCount, cachedCommitments.getOrElse(new CachedCommitments()), + lastCommitmentsComputeTimes = + catchUpConfig.map(cfg => (cfg.catchUpIntervalSkip, lastCommitmentsComputeTimes)), + increasePerceivedComputationTimeForCommitments = + increasePerceivedComputationTimeForCommitments, ) } yield cmts.collect { @@ -1609,6 +1689,7 @@ class AcsCommitmentProcessor private ( )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[CantonTimestamp]] = config.parFlatTraverse(cfg => for { + // compute what timestamp we would need to catch-up to sortedReconciliationIntervals <- sortedReconciliationIntervalsProvider .reconciliationIntervals(completedPeriodTimestamp) catchUpTimestamp = sortedReconciliationIntervals.intervals.headOption.flatMap(interval => @@ -1637,13 +1718,31 @@ class AcsCommitmentProcessor private ( } ) + // if we already saw commitments from a counter-participant at the time we'd need to catch up to, then + // we can be in a situation where we need to catch up comm <- catchUpTimestamp.fold(FutureUnlessShutdown.pure(Seq.empty[BufferedAcsCommitment]))( ts => store.queue.peekThroughAtOrAfter(ts) ) - } yield { - if (comm.nonEmpty) catchUpTimestamp - else None + if (comm.nonEmpty) { + // It seems we might need to catch up, but only if the participant was actually lagging behind, i.e., + // it was supposed to compute commitments at the time when the counter-participant computed commitments. + // This is not the case when the counter-participant sends a commitment because of activity on + // other contracts than the ones we share with it. + // Also, it's not the case when there are delays in the record order publisher that make the participant slow + // in observing time advance, but not actually slow in computing commitments. + // Thus, we only enter catch-up mode if the participant is actually falling behind, i.e., metrics.compute, + // which represents processing commitments * nr commitments per period, approaches the interval length. + sortedReconciliationIntervals.intervals.headOption.flatMap(interval => + lastCommitmentsComputeTimes + .averageDuration() + .fold[Option[CantonTimestamp]](None)(avgDuration => + if (avgDuration > interval.intervalLength.duration) + catchUpTimestamp + else None + ) + ) + } else None } ) @@ -1661,20 +1760,36 @@ class AcsCommitmentProcessor private ( private def sendCommitmentMessages( period: CommitmentPeriod, msgs: Seq[(ParticipantId, AcsCommitment)], + intervalMillis: Long, )(implicit traceContext: TraceContext): Unit = { - // delay sending out commitments by at most (in this order): maxCommitmentSendDelayMillis, or - // testingConfig.maxCommitmentSendDelayMillis, or 2/3 of the reconciliation interval - val maxDelayMillis = - maxCommitmentSendDelayMillis.fold(testingConfig.maxCommitmentSendDelayMillis.fold({ - val latestReconciliationInterval = - sortedReconciliationIntervalsProvider.getApproximateLatestReconciliationInterval - .map(_.intervalLength.toScala) - .getOrElse(Duration.Zero) - 2 * latestReconciliationInterval.toMillis.toInt / 3 - })(_.value))(_.value) + def contentsOrDefaults(delay: CommitmentSendDelay): (Double, Double) = + ( + delay.minCommitmentSendDelay.map(_.n.value).getOrElse(defaultMinDelay), + delay.maxCommitmentSendDelay.map(_.n.value).getOrElse(defaultMaxDelay), + ) - val delayMillis = if (maxDelayMillis > 0) rand.nextInt(maxDelayMillis) else 0 + // delay sending out commitments by (in this order): commitmentSendDelay, or testingConfig.commitmentSendDelay, + // or at least defaultMinDelay and at most defaultMaxDelay of the reconciliation interval + val (minDelayFraction, maxDelayFraction) = + commitmentSendDelay.fold( + testingConfig.commitmentSendDelay.fold({ + (defaultMinDelay, defaultMaxDelay) + })(delay => contentsOrDefaults(delay)) + )(delay => contentsOrDefaults(delay)) + val randDelayMillis = // max bound smaller or equal to the min bound + (if (maxDelayFraction < minDelayFraction) { + rand.between( + defaultMinDelay, + defaultMaxDelay, + ) * intervalMillis + } else if (maxDelayFraction == minDelayFraction) { + minDelayFraction * intervalMillis + } else + rand.between( + minDelayFraction, + maxDelayFraction, + ) * intervalMillis).toLong // filter the commitments to send based on the participants active at the "current time", // and not the ones active at the period end of the commitment @@ -1761,7 +1876,7 @@ class AcsCommitmentProcessor private ( val difference = deliver.timestamp.toMicros - period.toInclusive.toMicros // subtract the randomized sending delay to reflect the actual sequencing delay metrics.sequencingTime.updateValue( - difference - FiniteDuration(delayMillis, MILLISECONDS).toMicros + difference - FiniteDuration(randDelayMillis, MILLISECONDS).toMicros ) FutureUnlessShutdown.pure(Right[CommitmentSendState, Unit](()).either) case notSequenced: SendResult.NotSequenced => @@ -1810,10 +1925,10 @@ class AcsCommitmentProcessor private ( clock .scheduleAfter( _ => stubbornSendUnlessClosing(), - java.time.Duration.ofMillis(delayMillis.toLong), + java.time.Duration.ofMillis(randDelayMillis), ), s"Failed to schedule sending commitment message batch for period $period at time ${clock.now - .add(java.time.Duration.ofMillis(delayMillis.toLong))}", + .add(java.time.Duration.ofMillis(randDelayMillis))}", logPassiveInstanceAtInfo = true, ) .discard @@ -1835,6 +1950,7 @@ class AcsCommitmentProcessor private ( toInclusive: Option[CantonTimestampSecond], filterInParticipantId: Seq[ParticipantId] = Seq.empty, filterOutParticipantId: Seq[ParticipantId], + intervalMillis: Long, )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] = { @@ -1894,6 +2010,7 @@ class AcsCommitmentProcessor private ( ) for { + catchUpConfig <- catchUpConfig(period.toInclusive.forgetRefinement) cmts <- commitments( participantId, @@ -1905,6 +2022,10 @@ class AcsCommitmentProcessor private ( cachedCommitmentsForRetroactiveSends, filterInParticipantId, filterOutParticipantId, + lastCommitmentsComputeTimes = + catchUpConfig.map(cfg => (cfg.catchUpIntervalSkip, lastCommitmentsComputeTimes)), + increasePerceivedComputationTimeForCommitments = + increasePerceivedComputationTimeForCommitments, ) _ = logger.debug( @@ -1918,7 +2039,7 @@ class AcsCommitmentProcessor private ( } _ <- storeCommitments(msgs) // TODO(i15333) batch more commitments and handle the case when we reach the maximum message limit. - _ = sendCommitmentMessages(period, msgs.toSeq) + _ = sendCommitmentMessages(period, msgs.toSeq, intervalMillis) } yield () } .toSeq @@ -1955,9 +2076,24 @@ class AcsCommitmentProcessor private ( )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = for { splitPeriods <- sortedReconciliationIntervalsProvider.splitCommitmentPeriod(cmt.period) + reconIntervals <- getReconciliationIntervals( + cmt.period.toInclusive.forgetRefinement + ) + reconIntervalLength = reconIntervals.intervals.headOption.fold(0L)( + _.intervalLength.duration.toMillis + ) + _ <- splitPeriods match { case Some(periods) => - if (matches(cmt, commitments, lastPruningTime.map(_.timestamp), possibleCatchUp)) { + if ( + matches( + cmt, + commitments, + lastPruningTime.map(_.timestamp), + possibleCatchUp, + reconIntervalLength, + ) + ) { for { _ <- multiHostMark(cmt.sender, periods) _ <- @@ -2149,7 +2285,9 @@ object AcsCommitmentProcessor extends HasLoggerName { clock: Clock, exitOnFatalFailures: Boolean, batchingConfig: BatchingConfig, - maxCommitmentSendDelayMillis: Option[NonNegativeInt] = None, + maxCommitmentSendDelayMillis: Option[CommitmentSendDelay] = None, + increasePerceivedComputationTimeForCommitments: Option[java.time.Duration] = None, + doNotAwaitOnCheckingIncomingCommitments: Boolean, )(implicit ec: ExecutionContext, traceContext: TraceContext, @@ -2208,6 +2346,8 @@ object AcsCommitmentProcessor extends HasLoggerName { exitOnFatalFailures, batchingConfig, maxCommitmentSendDelayMillis, + increasePerceivedComputationTimeForCommitments, + doNotAwaitOnCheckingIncomingCommitments, ) // We trigger the processing of the buffered commitments, but we do not wait for it to complete here, // because, if processing buffered required topology updates that go through the same queue, we'd create a deadlock. @@ -2469,11 +2609,14 @@ object AcsCommitmentProcessor extends HasLoggerName { filterInParticipantIds: Seq[ParticipantId] = Seq.empty, // exclude from computing commitments excludeCounterParticipantIds, if non-empty, otherwise do not exclude anyone filterOutParticipantIds: Seq[ParticipantId] = Seq.empty, + lastCommitmentsComputeTimes: Option[(PositiveInt, DurationResizableRingBuffer)] = None, + increasePerceivedComputationTimeForCommitments: Option[java.time.Duration] = None, )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext, ): FutureUnlessShutdown[Map[ParticipantId, AcsCommitment.CommitmentType]] = { - val commitmentTimer = pruningMetrics.map(_.compute.startAsync()) + + val startedAtNano = System.nanoTime() for { byParticipant <- stakeholderCommitmentsPerParticipant( @@ -2501,7 +2644,24 @@ object AcsCommitmentProcessor extends HasLoggerName { runningCommitments, byParticipant.fmap(m => m.map { case (stkhd, _cmt) => stkhd }.toSet), ) - commitmentTimer.foreach(_.stop()) + + // update the duration of last interval computation + val duration = java.time.Duration.ofNanos(System.nanoTime() - startedAtNano) + + lastCommitmentsComputeTimes.foreach { case (catchUpSkip, computeTimes) => + computeTimes.setCapacity(catchUpSkip.value) + computeTimes.add( + duration.plus( + increasePerceivedComputationTimeForCommitments.getOrElse(java.time.Duration.ZERO) + ) + ) + } + + withEmptyMetricsContext { implicit metricsContext => + pruningMetrics.foreach { m => + m.compute.update(duration) + } + } res } } @@ -3007,3 +3167,75 @@ object AcsCommitmentProcessor extends HasLoggerName { case object StopRetrying extends CommitmentSendState } } + +@SuppressWarnings(Array("org.wartremover.warts.Var")) +final class DurationResizableRingBuffer(initialMaxSize: Int) { + require(initialMaxSize >= 0, s"max size must be >= 0, got $initialMaxSize") + + private val buf = mutable.ArrayDeque.empty[java.time.Duration] + @volatile private var maxSize: Int = initialMaxSize + + def capacity: Int = maxSize + + def size: Int = blocking(this.synchronized(buf.size)) + + def isEmpty: Boolean = blocking(this.synchronized(buf.isEmpty)) + + /** Change capacity. Drops oldest items if shrinking below current size. */ + def setCapacity(newMaxSize: Int): Unit = blocking { + this.synchronized { + require(newMaxSize >= 0, s"max size must be >= 0, got $newMaxSize") + maxSize = newMaxSize + if (buf.sizeIs > maxSize) { + val toDrop = buf.size - maxSize + buf.dropInPlace(toDrop) + } + () + } + } + + /** Append one element, dropping from front if full (or no-op if capacity=0). */ + def add(elem: java.time.Duration): Unit = blocking { + this.synchronized { + if (maxSize > 0) { + if (buf.sizeIs >= maxSize) { val _ = buf.removeHead() } + buf.append(elem) + } + () + } + } + + /** Append many elements efficiently, dropping as needed. */ + def addAll(elems: IterableOnce[java.time.Duration]): Unit = blocking { + this.synchronized { + if (maxSize > 0) { + buf.appendAll(elems) + // keep only the last `maxSize` elements + if (buf.sizeIs > maxSize) { + val toDrop = buf.size - maxSize + buf.dropInPlace(toDrop) + } + } + () + } + } + + /** Compute the average Duration if this buffer stores Duration. */ + def averageDuration(): Option[java.time.Duration] = blocking { + this.synchronized { + if (buf.isEmpty) None + else { + val billion = BigInt(1_000_000_000) + val avgNanos = buf.iterator + .map(d => BigInt(d.getSeconds) * billion + d.getNano) + .sum / buf.size + Some( + java.time.Duration.ofSeconds( + (avgNanos / billion).toLong, + (avgNanos % billion).toLong, + ) + ) + } + } + } +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala index 6ff60fe76f..f47d493f4b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala @@ -350,6 +350,7 @@ class PruningProcessor( // We must prune the contract store even if the event log is empty, because there is not necessarily an // archival event reassigned-away contracts. _ = logger.debug("Pruning contract store...") + // TODO(#28005): not only archived but also divulged created contracts should be deleted (will be implemented later) _ <- participantNodePersistentState.value.contractStore.deleteIgnoringUnknown( archivedContracts ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsInspection.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsInspection.scala index 85a406ab80..3f31fa04ed 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsInspection.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsInspection.scala @@ -142,8 +142,9 @@ class AcsInspection( toc, ) } else EitherT.pure[FutureUnlessShutdown, AcsInspectionError](()) - snapshot <- EitherT - .right(activeContractStore.snapshot(toc)) + + snapshot <- EitherT.right(activeContractStore.snapshot(toc)) + // check after getting the snapshot in case a pruning was happening concurrently _ <- TimestampValidation.afterPruning( synchronizerId, @@ -170,15 +171,10 @@ class AcsInspection( val maybeSnapshotET: EitherT[FutureUnlessShutdown, AcsInspectionError, MaybeSnapshot] = timeOfSnapshotO match { case Some(toc) => - getSnapshotAt(synchronizerId)( - toc, - skipCleanTocCheck = skipCleanTocCheck, - ) - .map(Some(_)) + getSnapshotAt(synchronizerId)(toc, skipCleanTocCheck = skipCleanTocCheck).map(Some(_)) case None => - EitherT - .right[AcsInspectionError](getCurrentSnapshot()) + EitherT.right[AcsInspectionError](getCurrentSnapshot()) } maybeSnapshotET.map( @@ -188,9 +184,7 @@ class AcsInspection( cid -> reassignmentCounter } .toSeq - .grouped( - BatchSize.value - ) + .grouped(BatchSize.value) AcsSnapshot(groupedSnapshot, toc) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala index 668aa51c57..559ba216d5 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala @@ -336,7 +336,7 @@ trait ActiveContractStore protected def getSynchronizerIndices( synchronizers: Seq[SynchronizerId] - ): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Map[ + )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Map[ SynchronizerId, IndexedSynchronizer, ]] = diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala deleted file mode 100644 index d6ac3724d4..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.store - -import cats.instances.list.* -import cats.syntax.foldable.* -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.participant.ParticipantNodeParameters -import com.digitalasset.canton.participant.store.db.DbContractStore -import com.digitalasset.canton.participant.store.memory.InMemoryContractStore -import com.digitalasset.canton.protocol.{ContractInstance, LfContractId} -import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} -import com.digitalasset.canton.store.Purgeable -import com.digitalasset.canton.topology.PartyId -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.daml.lf.transaction.CreationTime - -import scala.concurrent.ExecutionContext - -trait ContractStore extends ContractLookup with Purgeable with FlagCloseable { - - override type ContractsCreatedAtTime = CreationTime.CreatedAt - - /** Stores contracts created by a request. Assumes the contract data has been authenticated - * against the contract id using [[com.digitalasset.canton.util.ContractAuthenticator]]. - * - * @param contracts - * The created contracts to be stored - */ - def storeContracts(contracts: Seq[ContractInstance])(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] - - def storeContract(contract: ContractInstance)(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] = storeContracts(Seq(contract)) - - /** Debug find utility to search pcs - */ - def find( - exactId: Option[String], - filterPackage: Option[String], - filterTemplate: Option[String], - limit: Int, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] - - /** Debug find utility to search pcs. Omits contracts that are not found. - */ - def findWithPayload( - contractIds: NonEmpty[Seq[LfContractId]] - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] - - /** Deletes multiple contracts from the contract store. - * - * Ignores errors due to a contract not being present in the store, fails on other errors. - */ - def deleteIgnoringUnknown(contractIds: Iterable[LfContractId])(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] - - def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] - - // TODO(i24535): implement this on db level - def hasActiveContracts( - partyId: PartyId, - contractIds: Iterator[LfContractId], - batchSize: Int = 10, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Boolean] = { - val lfParty = partyId.toLf - - contractIds - .grouped(batchSize) - .toList - .findM(cids => - lookupStakeholders(cids.toSet).value.map { - case Right(x) => - x.exists { case (_, listParties) => listParties.contains(lfParty) } - case Left(_) => false - } - ) - .map(_.nonEmpty) - } - - // TODO(i24535): implement this on db level - def isSignatoryOnActiveContracts( - partyId: PartyId, - contractIds: Iterator[LfContractId], - batchSize: Int = 10, - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Boolean] = { - val lfParty = partyId.toLf - contractIds - .grouped(batchSize) - .toList - .findM(cids => - lookupSignatories(cids.toSet).value.map { - case Right(x) => - x.exists { case (_, listParties) => listParties.contains(lfParty) } - case Left(_) => false - } - ) - .map(_.nonEmpty) - } -} - -object ContractStore { - def create( - storage: Storage, - parameters: ParticipantNodeParameters, - loggerFactory: NamedLoggerFactory, - )(implicit executionContext: ExecutionContext): ContractStore = - storage match { - case _: MemoryStorage => - new InMemoryContractStore(parameters.processingTimeouts, loggerFactory) - - case dbStorage: DbStorage => - new DbContractStore( - dbStorage, - cacheConfig = parameters.cachingConfigs.contractStore, - dbQueryBatcherConfig = parameters.batchingConfig.aggregator, - insertBatchAggregatorConfig = parameters.batchingConfig.aggregator, - parameters.processingTimeouts, - loggerFactory, - ) - } -} - -sealed trait ContractStoreError extends Product with Serializable with PrettyPrinting - -sealed trait ContractLookupError extends ContractStoreError - -final case class UnknownContract(contractId: LfContractId) extends ContractLookupError { - override protected def pretty: Pretty[UnknownContract] = prettyOfClass(unnamedParam(_.contractId)) -} -final case class UnknownContracts(contractIds: Set[LfContractId]) extends ContractLookupError { - override protected def pretty: Pretty[UnknownContracts] = prettyOfClass( - unnamedParam(_.contractIds) - ) -} -final case class FailedConvert(contractId: LfContractId) extends ContractLookupError { - override protected def pretty: Pretty[FailedConvert] = prettyOfClass(unnamedParam(_.contractId)) -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala index 2293fa5a60..b17510cff2 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala @@ -103,7 +103,13 @@ object ParticipantNodePersistentState extends HasLoggerName { ) val contractStore = - ContractStore.create(storage, parameters, loggerFactory) + ContractStore.create( + storage = storage, + processingTimeouts = parameters.processingTimeouts, + cachingConfigs = parameters.cachingConfigs, + batchingConfig = parameters.batchingConfig, + loggerFactory = loggerFactory, + ) val pruningStore = ParticipantPruningStore(storage, timeouts, loggerFactory) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala new file mode 100644 index 0000000000..13e9f5eaf9 --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store + +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.platform.store.PruningOffsetService +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +final case class PruningOffsetServiceImpl( + participantPruningStore: ParticipantPruningStore, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends PruningOffsetService + with NamedLogging { + + override def pruningOffset(implicit + traceContext: TraceContext + ): Future[Option[Offset]] = + participantPruningStore + .pruningStatus() + .map(_.startedO) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncPersistentState.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncPersistentState.scala index 708a62aafa..2204e8ee62 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncPersistentState.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncPersistentState.scala @@ -9,7 +9,6 @@ import com.digitalasset.canton.crypto.{CryptoPureApi, SynchronizerCrypto} import com.digitalasset.canton.lifecycle.LifeCycle import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.ParticipantNodeParameters -import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.ledger.api.LedgerApiStore import com.digitalasset.canton.participant.store.db.{ DbLogicalSyncPersistentState, @@ -167,10 +166,9 @@ object PhysicalSyncPersistentState { clock: Clock, crypto: SynchronizerCrypto, parameters: ParticipantNodeParameters, - packageDependencyResolver: PackageDependencyResolver, + packageMetadataView: PackageMetadataView, ledgerApiStore: Eval[LedgerApiStore], logicalSyncPersistentState: LogicalSyncPersistentState, - packageMetadataView: Eval[PackageMetadataView], loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, )(implicit ec: ExecutionContext): PhysicalSyncPersistentState = @@ -182,12 +180,10 @@ object PhysicalSyncPersistentState { crypto, physicalSynchronizerIdx, staticSynchronizerParameters, - exitOnFatalFailures = parameters.exitOnFatalFailures, - disableUpgradeValidation = parameters.disableUpgradeValidation, - packageDependencyResolver, + parameters, + packageMetadataView, ledgerApiStore, logicalSyncPersistentState, - packageMetadataView, loggerFactory, parameters.processingTimeouts, futureSupervisor, @@ -201,10 +197,9 @@ object PhysicalSyncPersistentState { db, crypto, parameters, - packageDependencyResolver, + packageMetadataView, ledgerApiStore, logicalSyncPersistentState, - packageMetadataView, loggerFactory, futureSupervisor, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala index 2ed05bb550..4d91385add 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.store.db +import cats.Eval import cats.syntax.apply.* import cats.syntax.traverse.* import com.daml.nameof.NameOf @@ -27,6 +28,7 @@ import com.digitalasset.canton.participant.store.{ CommitmentQueue, IncrementalCommitmentStore, } +import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.protocol.messages.AcsCommitment.HashedCommitmentType import com.digitalasset.canton.protocol.messages.{ AcsCommitment, @@ -65,6 +67,7 @@ class DbAcsCommitmentStore( override val acsCounterParticipantConfigStore: AcsCounterParticipantConfigStore, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, + stringInterningEval: Eval[StringInterning], )(implicit val ec: ExecutionContext) extends AcsCommitmentStore with DbPrunableByTimeSynchronizer[IndexedSynchronizer] @@ -456,6 +459,7 @@ class DbAcsCommitmentStore( indexedSynchronizer, timeouts, loggerFactory, + stringInterningEval, ) override val queue: DbCommitmentQueue = @@ -486,6 +490,7 @@ class DbIncrementalCommitmentStore( indexedSynchronizer: IndexedSynchronizer, override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, + stringInterningEval: Eval[StringInterning], )(implicit val ec: ExecutionContext) extends IncrementalCommitmentStore with DbStore { @@ -493,17 +498,11 @@ class DbIncrementalCommitmentStore( import DbStorage.Implicits.* import storage.api.* - private implicit val getLfPartyIdSortedSet: GetResult[SortedSet[LfPartyId]] = - DbParameterUtils.getStringArrayResultsDb - .andThen(arr => SortedSet.from(arr.view.map(LfPartyId.assertFromString))) + private implicit val getLfPartyIdSortedSet: GetResult[Vector[Int]] = + DbParameterUtils.getIntArrayResultsDb.andThen(_.toVector) - private implicit val setParameterPartyIdSortedSet: SetParameter[SortedSet[LfPartyId]] = { - (parties, pp) => - DbParameterUtils.setArrayStringParameterDb( - parties, - pp, - ) - } + private implicit val setParameterPartyIdSortedSet: SetParameter[Vector[Int]] = + DbParameterUtils.setArrayIntParameterDb(_, _) override def get()(implicit traceContext: TraceContext @@ -515,9 +514,15 @@ class DbIncrementalCommitmentStore( sql"""select ts, tie_breaker from par_commitment_snapshot_time where synchronizer_idx = $indexedSynchronizer""" .as[(CantonTimestamp, Long)] .headOption - snapshot <- + snapshotWithInternedIds <- sql"""select stakeholders, commitment from par_commitment_snapshot where synchronizer_idx = $indexedSynchronizer""" - .as[(SortedSet[LfPartyId], AcsCommitment.CommitmentType)] + .as[(Vector[Int], AcsCommitment.CommitmentType)] + stringInterning = stringInterningEval.value + snapshot = snapshotWithInternedIds.map { case (internedParties, commitmentType) => + SortedSet.from( + internedParties.view.map(stringInterning.party.externalize) + ) -> commitmentType + } } yield (tsWithTieBreaker, snapshot)).transactionally .withTransactionIsolation(Serializable), operationName = "commitments: read commitments snapshot", @@ -568,13 +573,15 @@ class DbIncrementalCommitmentStore( def storeUpdates( updates: List[(SortedSet[LfPartyId], AcsCommitment.CommitmentType)] ): DbAction.All[Unit] = { + val stringInterning = stringInterningEval.value + def setParams( pp: PositionedParameters ): ((SortedSet[LfPartyId], AcsCommitment.CommitmentType)) => Unit = { case (stkhs, commitment) => pp >> indexedSynchronizer pp >> partySetHash(stkhs) - pp >> stkhs + pp >> stkhs.view.map(stringInterning.party.internalize).toVector pp >> commitment } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala deleted file mode 100644 index d48fa4792b..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.store.db - -import cats.data.{EitherT, OptionT} -import cats.implicits.toTraverseOps -import cats.syntax.parallel.* -import com.daml.nameof.NameOf.functionFullName -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.caching.ScaffeineCache -import com.digitalasset.canton.config.CantonRequireTypes.String2066 -import com.digitalasset.canton.config.{ - BatchAggregatorConfig, - BatchingConfig, - CacheConfig, - ProcessingTimeout, -} -import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} -import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.participant.store.* -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain} -import com.digitalasset.canton.resource.{DbStorage, DbStore} -import com.digitalasset.canton.store.db.{DbBulkUpdateProcessor, DbDeserializationException} -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.EitherUtil.RichEitherIterable -import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil, MonadUtil, TryUtil} -import com.digitalasset.canton.{LfPartyId, checked} -import com.google.protobuf.ByteString -import slick.jdbc.{GetResult, PositionedParameters, SetParameter} - -import scala.collection.immutable -import scala.concurrent.ExecutionContext -import scala.util.{Failure, Try} - -class DbContractStore( - override protected val storage: DbStorage, - cacheConfig: CacheConfig, - dbQueryBatcherConfig: BatchAggregatorConfig, - insertBatchAggregatorConfig: BatchAggregatorConfig, - override protected val timeouts: ProcessingTimeout, - override protected val loggerFactory: NamedLoggerFactory, -)(protected implicit val ec: ExecutionContext) - extends ContractStore - with DbStore { self => - - import DbStorage.Implicits.* - import storage.api.* - import storage.converters.* - - private val profile = storage.profile - - override protected[store] def logger: TracedLogger = super.logger - - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) - implicit def contractGetResult(implicit - getResultByteArray: GetResult[Array[Byte]] - ): GetResult[ContractInstance] = GetResult { r => - ContractInstance.decodeWithCreatedAt(ByteString.copyFrom(r.<<[Array[Byte]])) match { - case Right(contract) => contract - case Left(e) => throw new DbDeserializationException(s"Invalid contract instance: $e") - } - } - - implicit def contractSetParameter: SetParameter[ContractInstance] = (c, pp) => pp >> c.encoded - - private val cache: ScaffeineCache.TunnelledAsyncCache[LfContractId, Option[ContractInstance]] = - ScaffeineCache.buildMappedAsync[LfContractId, Option[ContractInstance]]( - cacheConfig.buildScaffeine() - )(logger, "DbContractStore.cache") - - private def invalidateCache(key: LfContractId): Unit = - cache.invalidate(key) - - // batch aggregator used for single point queries: damle will run many "lookups" - // during interpretation. they will hit the db like a nail gun. the batch - // aggregator will limit the number of parallel queries to the db and "batch them" - // together. so if there is high load with a lot of interpretation happening in parallel - // batching will kick in. - private val batchAggregatorLookup = { - val processor: BatchAggregator.Processor[LfContractId, Option[ContractInstance]] = - new BatchAggregator.Processor[LfContractId, Option[ContractInstance]] { - override val kind: String = "serializable contract" - override def logger: TracedLogger = DbContractStore.this.logger - - override def executeBatch(ids: NonEmpty[Seq[Traced[LfContractId]]])(implicit - traceContext: TraceContext, - callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Iterable[Option[ContractInstance]]] = - lookupManyUncachedInternal(ids.map(_.value)) - - override def prettyItem: Pretty[LfContractId] = implicitly - } - BatchAggregator( - processor, - dbQueryBatcherConfig, - ) - } - - private val contractsBaseQuery = - sql"""select instance from par_contracts""" - - private def lookupQuery( - ids: NonEmpty[Seq[LfContractId]] - ): DbAction.ReadOnly[Seq[Option[ContractInstance]]] = { - import DbStorage.Implicits.BuilderChain.* - - val inClause = DbStorage.toInClause("contract_id", ids) - (contractsBaseQuery ++ sql" where " ++ inClause) - .as[ContractInstance] - .map { contracts => - val foundContracts = contracts - .map(contract => (contract.contractId, contract)) - .toMap - ids.map(foundContracts.get) - } - } - - private def bulkLookupQuery( - ids: NonEmpty[Seq[LfContractId]] - ): DbAction.ReadOnly[immutable.Iterable[ContractInstance]] = { - val inClause = DbStorage.toInClause("contract_id", ids) - import DbStorage.Implicits.BuilderChain.* - val query = - contractsBaseQuery ++ sql" where " ++ inClause - query.as[ContractInstance] - } - - def lookup( - id: LfContractId - )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, ContractInstance] = - OptionT(cache.getFuture(id, _ => batchAggregatorLookup.run(id))) - - override def lookupManyExistingUncached( - ids: Seq[LfContractId] - )(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, LfContractId, List[ContractInstance]] = - NonEmpty - .from(ids) - .map(ids => - EitherT(lookupManyUncachedInternal(ids).map(ids.toList.zip(_).traverse { - case (id, contract) => - contract.toRight(id) - })) - ) - .getOrElse(EitherT.rightT(List.empty)) - - private def lookupManyUncachedInternal( - ids: NonEmpty[Seq[LfContractId]] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Seq[Option[ContractInstance]]] = - MonadUtil - .batchedSequentialTraverseNE( - parallelism = BatchingConfig().parallelism, - // chunk the ids to query to avoid hitting prepared statement limits - chunkSize = DbStorage.maxSqlParameters, - )( - ids - )(chunk => storage.query(lookupQuery(chunk), functionFullName)) - - override def find( - exactId: Option[String], - filterPackage: Option[String], - filterTemplate: Option[String], - limit: Int, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] = { - - import DbStorage.Implicits.BuilderChain.* - - // If filter is set: returns a conjunctive (`and` prepended) constraint on attribute `name`. - // Otherwise empty sql action. - def createConjunctiveFilter( - name: String, - filter: Option[String], - ): Option[SQLActionBuilderChain] = - filter - .map { f => - sql" #$name " ++ (f match { - case rs if rs.startsWith("!") => sql"= ${rs.drop(1)}" // Must be equal - case rs if rs.startsWith("^") => sql"""like ${rs.drop(1) + "%"}""" // Starts with - case rs => sql"""like ${"%" + rs + "%"}""" // Contains - }) - } - - val pkgFilter = createConjunctiveFilter("package_id", filterPackage) - val templateFilter = createConjunctiveFilter("template_id", filterTemplate) - val coidFilter: Option[SQLActionBuilderChain] = exactId.map { stringContractId => - val lfContractId = LfContractId.assertFromString(stringContractId) - sql" contract_id = $lfContractId" - } - val limitFilter = sql" #${storage.limit(limit)}" - - val whereClause = - List(pkgFilter, templateFilter, coidFilter) - .foldLeft(Option.empty[SQLActionBuilderChain]) { - case (None, Some(filter)) => Some(sql" where " ++ filter) - case (acc, None) => acc - case (Some(acc), Some(filter)) => Some(acc ++ sql" and " ++ filter) - } - .getOrElse(toSQLActionBuilderChain(sql" ")) - - val contractsQuery = contractsBaseQuery ++ whereClause ++ limitFilter - - storage - .query(contractsQuery.as[ContractInstance], functionFullName) - .map(_.toList) - } - - override def findWithPayload( - contractIds: NonEmpty[Seq[LfContractId]] - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] = - storage - .query( - bulkLookupQuery(contractIds), - functionFullName, - ) - .map(_.map(c => c.contractId -> c).toMap) - - override def storeContracts(contracts: Seq[ContractInstance])(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] = - contracts.parTraverse_(storeContract) - - private def storeContract(contract: ContractInstance)(implicit - ec: ExecutionContext, - traceContext: TraceContext, - ): FutureUnlessShutdown[Unit] = - batchAggregatorInsert.run(contract).flatMap(FutureUnlessShutdown.fromTry) - - private val batchAggregatorInsert = { - val processor = new DbBulkUpdateProcessor[ContractInstance, Unit] { - override protected implicit def executionContext: ExecutionContext = - DbContractStore.this.ec - override protected def storage: DbStorage = DbContractStore.this.storage - override def kind: String = "stored contract" - override def logger: TracedLogger = DbContractStore.this.logger - - override def executeBatch(items: NonEmpty[Seq[Traced[ContractInstance]]])(implicit - traceContext: TraceContext, - callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Iterable[Try[Unit]]] = - bulkUpdateWithCheck(items, "DbContractStore.insert")(traceContext, self.closeContext) - - override protected def bulkUpdateAction(items: NonEmpty[Seq[Traced[ContractInstance]]])( - implicit batchTraceContext: TraceContext - ): DBIOAction[Array[Int], NoStream, Effect.All] = { - def setParams(pp: PositionedParameters)(contract: ContractInstance): Unit = { - - val packageId = contract.templateId.packageId - val templateId = checked(String2066.tryCreate(contract.templateId.qualifiedName.toString)) - - pp >> contract.contractId - pp >> packageId - pp >> templateId - pp >> contract - } - - // As we assume that the contract data has previously been authenticated against the contract id, - // we only update those fields that are not covered by the authentication. - val query = - profile match { - case _: DbStorage.Profile.Postgres => - """insert into par_contracts as c ( - contract_id, package_id, template_id, instance) - values (?, ?, ?, ?) - on conflict(contract_id) do nothing""" - case _: DbStorage.Profile.H2 => - """merge into par_contracts c - using (select cast(? as binary varying) contract_id, - cast(? as varchar) package_id, - cast(? as varchar) template_id, - cast(? as binary large object) instance - from dual) as input - on (c.contract_id = input.contract_id) - when not matched then - insert (contract_id, instance, package_id, template_id) - values (input.contract_id, input.instance, input.package_id, input.template_id)""" - } - DbStorage.bulkOperation(query, items.map(_.value), profile)(setParams) - - } - - override protected def onSuccessItemUpdate(item: Traced[ContractInstance]): Try[Unit] = - Try { - val contract = item.value - cache.put(contract.contractId, Option(contract)) - } - - private def failWith(message: String)(implicit - loggingContext: ErrorLoggingContext - ): Failure[Nothing] = - ErrorUtil.internalErrorTry(new IllegalStateException(message)) - - override protected type CheckData = ContractInstance - override protected type ItemIdentifier = LfContractId - override protected def itemIdentifier(item: ContractInstance): ItemIdentifier = - item.contractId - override protected def dataIdentifier(state: CheckData): ItemIdentifier = state.contractId - - override protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit - batchTraceContext: TraceContext - ): DbAction.ReadOnly[immutable.Iterable[CheckData]] = - bulkLookupQuery(itemsToCheck) - - override protected def analyzeFoundData( - item: ContractInstance, - foundData: Option[ContractInstance], - )(implicit - traceContext: TraceContext - ): Try[Unit] = - foundData match { - case None => - // the contract is not in the db - invalidateCache(item.contractId) - failWith(s"Failed to insert contract ${item.contractId}") - case Some(data) => - if (data == item) { - cache.put(item.contractId, Some(item)) - TryUtil.unit - } else { - invalidateCache(data.contractId) - failWith( - s"Stored contracts are immutable, but found different contract ${item.contractId}" - ) - } - } - - override def prettyItem: Pretty[ContractInstance] = - ContractInstance.prettyGenContractInstance - } - - BatchAggregator(processor, insertBatchAggregatorConfig) - } - - override def deleteIgnoringUnknown( - contractIds: Iterable[LfContractId] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - import DbStorage.Implicits.BuilderChain.* - NonEmpty.from(contractIds.toSeq) match { - case None => FutureUnlessShutdown.unit - case Some(cids) => - val inClause = DbStorage.toInClause("contract_id", cids) - storage - .update_( - (sql"""delete from par_contracts where """ ++ inClause).asUpdate, - functionFullName, - ) - .thereafter(_ => cache.invalidateAll(contractIds)) - } - } - - override def purge()(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Unit] = - storage - .update_( - sqlu"""delete from par_contracts""", - functionFullName, - ) - .thereafter(_ => cache.invalidateAll()) - - override def lookupStakeholders(ids: Set[LfContractId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = - lookupMetadata(ids).map(_.view.mapValues(_.stakeholders).toMap) - - override def lookupSignatories(ids: Set[LfContractId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = - lookupMetadata(ids).map(_.view.mapValues(_.signatories).toMap) - - def lookupMetadata(ids: Set[LfContractId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, ContractMetadata]] = - NonEmpty.from(ids) match { - case None => EitherT.rightT(Map.empty) - - case Some(idsNel) => - EitherT( - MonadUtil - .parTraverseWithLimit(BatchAggregatorConfig.defaultMaximumInFlight)( - idsNel.forgetNE.toSeq - )(id => lookup(id).toRight(id).value) - .map(_.collectRight) - .map { contracts => - Either.cond( - contracts.sizeCompare(ids) == 0, - contracts - .map(contract => contract.contractId -> contract.metadata) - .toMap, - UnknownContracts(ids -- contracts.map(_.contractId).toSet), - ) - } - ) - } - - override def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = - storage.query( - sql"select count(*) from par_contracts".as[Int].head, - functionFullName, - ) - - override def onClosed(): Unit = { - cache.invalidateAll() - cache.cleanUp() - super.onClosed() - } -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala index 5a5744b7f0..f8b777314b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala @@ -66,14 +66,14 @@ class DbReassignmentStore( private def indexedSynchronizerF[T[_]: SingletonTraverse]( synchronizerId: T[SynchronizerId] - ): FutureUnlessShutdown[T[IndexedSynchronizer]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[T[IndexedSynchronizer]] = synchronizerId.traverseSingleton((_, synchronizerId) => IndexedSynchronizer.indexed(indexedStringStore)(synchronizerId) ) private def indexedSynchronizerET[E, T[_]: SingletonTraverse]( synchronizerId: T[SynchronizerId] - ): EitherT[FutureUnlessShutdown, E, T[IndexedSynchronizer]] = + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, E, T[IndexedSynchronizer]] = EitherT.right[E](indexedSynchronizerF(synchronizerId)) private def synchronizerIdF( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncPersistentState.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncPersistentState.scala index e197724dcf..098016acde 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncPersistentState.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncPersistentState.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.crypto.{CryptoPureApi, SynchronizerCrypto} import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.ParticipantNodeParameters -import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.ledger.api.LedgerApiStore import com.digitalasset.canton.participant.store.memory.PackageMetadataView import com.digitalasset.canton.participant.store.{ @@ -26,6 +25,7 @@ import com.digitalasset.canton.participant.topology.ParticipantTopologyValidatio import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.DbSequencedEventStore +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.store.{ IndexedPhysicalSynchronizer, IndexedStringStore, @@ -86,6 +86,7 @@ class DbLogicalSyncPersistentState( acsCounterParticipantConfigStore, timeouts, loggerFactory, + ledgerApiStore.map(_.stringInterningView), ) override val acsInspection: AcsInspection = @@ -119,10 +120,9 @@ class DbPhysicalSyncPersistentState( storage: DbStorage, crypto: SynchronizerCrypto, parameters: ParticipantNodeParameters, - packageDependencyResolver: PackageDependencyResolver, + packageMetadataView: PackageMetadataView, ledgerApiStore: Eval[LedgerApiStore], logicalSyncPersistentState: LogicalSyncPersistentState, - packageMetadataView: Eval[PackageMetadataView], val loggerFactory: NamedLoggerFactory, val futureSupervisor: FutureSupervisor, )(implicit ec: ExecutionContext) @@ -187,6 +187,7 @@ class DbPhysicalSyncPersistentState( staticSynchronizerParameters = staticSynchronizerParameters, store = topologyStore, outboxQueue = synchronizerOutboxQueue, + disableOptionalTopologyChecks = parameters.disableOptionalTopologyChecks, exitOnFatalFailures = parameters.exitOnFatalFailures, timeouts = timeouts, futureSupervisor = futureSupervisor, @@ -196,6 +197,7 @@ class DbPhysicalSyncPersistentState( override def validatePackageVetting( currentlyVettedPackages: Set[LfPackageId], nextPackageIds: Set[LfPackageId], + dryRunSnapshot: Option[PackageMetadata], forceFlags: ForceFlags, )(implicit traceContext: TraceContext @@ -203,8 +205,8 @@ class DbPhysicalSyncPersistentState( validatePackageVetting( currentlyVettedPackages, nextPackageIds, - Some(packageMetadataView.value), - packageDependencyResolver, + packageMetadataView, + dryRunSnapshot, acsInspections = () => Map(logicalSyncPersistentState.lsid -> logicalSyncPersistentState.acsInspection), forceFlags, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala index 63c735b44b..3b97beefc9 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala @@ -224,7 +224,7 @@ class InMemoryActiveContractStore( reassignments: Seq[ (LfContractId, ReassignmentTag[SynchronizerId], ReassignmentCounter, TimeOfChange) ] - ): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Seq[ + )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Seq[ (LfContractId, Int, ReassignmentCounter, TimeOfChange) ]] = { val synchronizers = reassignments.map { case (_, synchronizer, _, _) => diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala deleted file mode 100644 index d2883ce563..0000000000 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.store.memory - -import cats.data.{EitherT, OptionT} -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.LfPartyId -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} -import com.digitalasset.canton.participant.store.* -import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.tracing.TraceContext - -import scala.collection.concurrent.TrieMap -import scala.concurrent.ExecutionContext - -/** An in-memory contract store. This class is thread-safe. */ -class InMemoryContractStore( - override protected val timeouts: ProcessingTimeout, - protected val loggerFactory: NamedLoggerFactory, -)( - protected implicit val ec: ExecutionContext -) extends ContractStore - with NamedLogging { - - override protected[store] def logger: TracedLogger = super.logger - - /** Invariants: - * - Every [[LfFatContractInst]] is stored under [[LfFatContractInst.contractId]]. - */ - private[this] val contracts = TrieMap.empty[LfContractId, ContractInstance] - - /** Debug find utility to search pcs - */ - override def find( - filterId: Option[String], - filterPackage: Option[String], - filterTemplate: Option[String], - limit: Int, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[List[ContractInstance]] = { - def search( - needle: String, - accessor: ContractInstance => String, - ): ContractInstance => Boolean = - needle match { - case rs if rs.startsWith("!") => accessor(_) == needle.drop(1) - case rs if rs.startsWith("^") => accessor(_).startsWith(needle.drop(1)) - case _ => accessor(_).contains(needle) - } - val flt1 = - filterPackage.map(search(_, _.templateId.packageId)) - val flt2 = filterTemplate.map( - search(_, _.templateId.qualifiedName.qualifiedName) - ) - val flt3 = filterId.map(search(_, _.contractId.coid)) - - def conjunctiveFilter(sc: ContractInstance): Boolean = - flt1.forall(_(sc)) && flt2.forall(_(sc)) && flt3.forall(_(sc)) - FutureUnlessShutdown.pure( - contracts.values.filter(conjunctiveFilter).take(limit).toList - ) - } - - def findWithPayload( - contractIds: NonEmpty[Seq[LfContractId]] - )(implicit - traceContext: TraceContext - ): FutureUnlessShutdown[Map[LfContractId, ContractInstance]] = - FutureUnlessShutdown.pure( - contractIds - .map(cid => cid -> contracts.get(cid)) - .collect { case (cid, Some(contract)) => cid -> contract } - .toMap - ) - - override def lookup( - id: LfContractId - )(implicit traceContext: TraceContext): OptionT[FutureUnlessShutdown, ContractInstance] = { - logger.debug(s"Looking up contract: $id") - OptionT(FutureUnlessShutdown.pure { - val result = contracts.get(id) - result.fold(logger.debug(s"Contract $id not found"))(contract => - logger.debug( - s"Found contract $id of type ${contract.templateId.qualifiedName.qualifiedName}" - ) - ) - result - }) - } - - override def storeContracts( - contracts: Seq[ContractInstance] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - contracts.foreach(store) - FutureUnlessShutdown.unit - } - - private def store(storedContract: ContractInstance): Unit = - contracts - .putIfAbsent(storedContract.contractId, storedContract) - .discard[Option[ContractInstance]] - - override def deleteIgnoringUnknown( - ids: Iterable[LfContractId] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - ids.foreach(id => contracts.remove(id).discard[Option[ContractInstance]]) - FutureUnlessShutdown.unit - } - - override def purge()(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - contracts.clear() - FutureUnlessShutdown.unit - } - - override def lookupStakeholders(ids: Set[LfContractId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = { - val res = contracts.filter { case (cid, _) => ids.contains(cid) }.map { case (cid, c) => - (cid, c.stakeholders) - } - EitherT.cond(res.sizeCompare(ids) == 0, res.toMap, UnknownContracts(ids -- res.keySet)) - } - - override def lookupSignatories(ids: Set[LfContractId])(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, UnknownContracts, Map[LfContractId, Set[LfPartyId]]] = { - val res = contracts.filter { case (cid, _) => ids.contains(cid) }.map { case (cid, c) => - (cid, c.inst.signatories) - } - EitherT.cond(res.sizeCompare(ids) == 0, res.toMap, UnknownContracts(ids -- res.keySet)) - } - - override def contractCount()(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = - FutureUnlessShutdown.pure(contracts.size) -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncPersistentState.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncPersistentState.scala index 9f5b9be7b9..ea3e69fe80 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncPersistentState.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncPersistentState.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.{CryptoPureApi, SynchronizerCrypto} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.participant.admin.PackageDependencyResolver +import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.ledger.api.LedgerApiStore import com.digitalasset.canton.participant.store.{ AcsCounterParticipantConfigStore, @@ -24,6 +24,7 @@ import com.digitalasset.canton.participant.store.{ import com.digitalasset.canton.participant.topology.ParticipantTopologyValidation import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.store.memory.{InMemorySendTrackerStore, InMemorySequencedEventStore} +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.store.{ IndexedPhysicalSynchronizer, IndexedStringStore, @@ -90,12 +91,10 @@ class InMemoryPhysicalSyncPersistentState( crypto: SynchronizerCrypto, override val physicalSynchronizerIdx: IndexedPhysicalSynchronizer, val staticSynchronizerParameters: StaticSynchronizerParameters, - exitOnFatalFailures: Boolean, - disableUpgradeValidation: Boolean, - packageDependencyResolver: PackageDependencyResolver, + parameters: ParticipantNodeParameters, + packageMetadataView: PackageMetadataView, ledgerApiStore: Eval[LedgerApiStore], logicalSyncPersistentState: LogicalSyncPersistentState, - packageMetadataView: Eval[PackageMetadataView], val loggerFactory: NamedLoggerFactory, val timeouts: ProcessingTimeout, val futureSupervisor: FutureSupervisor, @@ -126,7 +125,8 @@ class InMemoryPhysicalSyncPersistentState( staticSynchronizerParameters, topologyStore, synchronizerOutboxQueue, - exitOnFatalFailures = exitOnFatalFailures, + disableOptionalTopologyChecks = parameters.disableOptionalTopologyChecks, + exitOnFatalFailures = parameters.exitOnFatalFailures, timeouts, futureSupervisor, loggerFactory, @@ -135,6 +135,7 @@ class InMemoryPhysicalSyncPersistentState( override def validatePackageVetting( currentlyVettedPackages: Set[LfPackageId], nextPackageIds: Set[LfPackageId], + dryRunSnapshot: Option[PackageMetadata], forceFlags: ForceFlags, )(implicit traceContext: TraceContext @@ -142,12 +143,12 @@ class InMemoryPhysicalSyncPersistentState( validatePackageVetting( currentlyVettedPackages, nextPackageIds, - Some(packageMetadataView.value), - packageDependencyResolver, + packageMetadataView, + dryRunSnapshot, acsInspections = () => Map(logicalSyncPersistentState.lsid -> logicalSyncPersistentState.acsInspection), forceFlags, - disableUpgradeValidation = disableUpgradeValidation, + disableUpgradeValidation = parameters.disableUpgradeValidation, ) override def checkCannotDisablePartyWithActiveContracts( partyId: PartyId, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/PackageMetadataView.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/PackageMetadataView.scala index a4f423b705..6e18d485a4 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/PackageMetadataView.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/PackageMetadataView.scala @@ -5,19 +5,21 @@ package com.digitalasset.canton.participant.store.memory import cats.implicits.catsSyntaxSemigroup import com.daml.timer.FutureCheck.* +import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{PackageMetadataViewConfig, ProcessingTimeout} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.error.{CommonErrors, PackageServiceErrors} -import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors import com.digitalasset.canton.participant.admin.PackageService import com.digitalasset.canton.participant.store.DamlPackageStore import com.digitalasset.canton.platform.apiserver.services.admin.PackageUpgradeValidator -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata.Implicits.packageMetadataSemigroup import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.SimpleExecutionQueue import com.digitalasset.canton.{LfPackageId, LfPackageRef} import com.digitalasset.daml.lf.archive.{DamlLf, Decode} import org.apache.pekko.actor.ActorSystem @@ -27,8 +29,8 @@ import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} /** In-memory view of Daml-related package metadata (see - * [[com.digitalasset.canton.platform.store.packagemeta.PackageMetadata]]) for all packages stored - * on the current participant. + * [[com.digitalasset.canton.store.packagemeta.PackageMetadata]]) for all packages stored on the + * current participant. */ trait PackageMetadataView extends AutoCloseable { def getSnapshot(implicit errorLoggingContext: ErrorLoggingContext): PackageMetadata @@ -39,12 +41,15 @@ trait PackageMetadataView extends AutoCloseable { * initialization and on new package uploads. */ trait MutablePackageMetadataView extends PackageMetadataView { - def update(other: PackageMetadata)(implicit tc: TraceContext): Unit - /** Re-initialize the package metadata view state from the underlying Daml packages store. - * - * Note: Not thread-safe! + /** Update the current package metadata view by merging the series of `newPackageMetadata` into + * it. */ + def updateMany(newPackagesMetadata: Seq[PackageMetadata])(implicit + tc: TraceContext + ): FutureUnlessShutdown[Unit] + + /** Re-initialize the package metadata view state from the underlying Daml packages store. */ def refreshState(implicit tc: TraceContext): FutureUnlessShutdown[Unit] } @@ -55,6 +60,8 @@ class MutablePackageMetadataViewImpl( val loggerFactory: NamedLoggerFactory, packageMetadataViewConfig: PackageMetadataViewConfig, val timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + exitOnFatalFailures: Boolean, )(implicit val actorSystem: ActorSystem, executionContext: ExecutionContext) extends MutablePackageMetadataView with FlagCloseable @@ -63,58 +70,84 @@ class MutablePackageMetadataViewImpl( private val packageMetadataRef: AtomicReference[Option[PackageMetadata]] = new AtomicReference(None) - def update(other: PackageMetadata)(implicit tc: TraceContext): Unit = - packageMetadataRef.updateAndGet { - case Some(packageMetadata) => Some(packageMetadata |+| other) - case None => - throw CommonErrors.ServiceInternalError - .Generic(s"$loggingSubject is not initialized") - .asGrpcError - }.discard + private val mutatePackageMetadataExecutionQueue = new SimpleExecutionQueue( + "sequential-upload-dar-queue", + futureSupervisor, + timeouts, + loggerFactory, + crashOnFailure = exitOnFatalFailures, + ) + + def updateMany(newPackagesMetadata: Seq[PackageMetadata])(implicit + tc: TraceContext + ): FutureUnlessShutdown[Unit] = + mutatePackageMetadataExecutionQueue.execute( + execution = Future { + newPackagesMetadata.foreach(other => + packageMetadataRef.updateAndGet { + case Some(packageMetadata) => Some(packageMetadata |+| other) + case None => + throw CommonErrors.ServiceInternalError + .Generic(s"$loggingSubject is not initialized") + .asGrpcError + }.discard + ) + }, + description = s"update $loggingSubject", + ) def refreshState(implicit tc: TraceContext): FutureUnlessShutdown[Unit] = synchronizeWithClosing(s"Refreshing $loggingSubject") { - val startedTime = clock.now - def elapsedDurationMillis(): Long = (clock.now - startedTime).toMillis - - val initializationFUS = - damlPackageStore - .listPackages() - .flatMap(packages => - FutureUnlessShutdown.outcomeF( - Source(packages) - .mapAsyncUnordered(packageMetadataViewConfig.initLoadParallelism) { pkgDesc => - logger.debug(s"Fetching package ${pkgDesc.packageId}") - fetchPackage(pkgDesc.packageId).asGrpcFuture - } - .mapAsyncUnordered(packageMetadataViewConfig.initProcessParallelism) { archive => - logger.debug(s"Decoding archive ${archive.getHash} to package metadata") - decodePackageMetadata(archive) - } - .runFold(PackageMetadata())(_ |+| _) - .map(initialized => packageMetadataRef.set(Some(initialized))) - ) - ) - - FutureUnlessShutdown( - initializationFUS.unwrap - .checkIfComplete( - packageMetadataViewConfig.initTakesTooLongInitialDelay, - packageMetadataViewConfig.initTakesTooLongInterval, - ) { - logger.warn( - s"$loggingSubject initialization takes too long (${elapsedDurationMillis()} ms)" - ) - } - .map { result => - logger.info( - s"$loggingSubject has been initialized (${elapsedDurationMillis()} ms)" - ) - result - } + mutatePackageMetadataExecutionQueue.executeUS( + execution = refreshStateInternal, + description = s"refresh $loggingSubject", ) } + private def refreshStateInternal(implicit tc: TraceContext) = { + val startedTime = clock.now + + def elapsedDurationMillis(): Long = (clock.now - startedTime).toMillis + + val initializationFUS = + damlPackageStore + .listPackages() + .flatMap(packages => + FutureUnlessShutdown.outcomeF( + Source(packages) + .mapAsyncUnordered(packageMetadataViewConfig.initLoadParallelism) { pkgDesc => + logger.debug(s"Fetching package ${pkgDesc.packageId}") + fetchPackage(pkgDesc.packageId).asGrpcFuture + } + .mapAsyncUnordered(packageMetadataViewConfig.initProcessParallelism) { archive => + logger.debug(s"Decoding archive ${archive.getHash} to package metadata") + decodePackageMetadata(archive) + } + .runFold(PackageMetadata())(_ |+| _) + .map(initialized => packageMetadataRef.set(Some(initialized))) + ) + ) + + FutureUnlessShutdown( + initializationFUS.unwrap + .checkIfComplete( + packageMetadataViewConfig.initTakesTooLongInitialDelay, + packageMetadataViewConfig.initTakesTooLongInterval, + ) { + logger.warn( + s"$loggingSubject initialization takes too long (${elapsedDurationMillis()} ms)" + ) + } + .map { result => + logger.info( + s"$loggingSubject has been initialized (${elapsedDurationMillis()} ms)" + ) + result + } + ) + + } + def getSnapshot(implicit errorLoggingContext: ErrorLoggingContext): PackageMetadata = packageMetadataRef .get() @@ -124,6 +157,9 @@ class MutablePackageMetadataViewImpl( .asGrpcError ) + override def onClosed(): Unit = + LifeCycle.close(mutatePackageMetadataExecutionQueue)(logger) + private def decodePackageMetadata( archive: DamlLf.Archive )(implicit tc: TraceContext): Future[PackageMetadata] = @@ -147,29 +183,3 @@ class MutablePackageMetadataViewImpl( ) } } - -object MutablePackageMetadataViewImpl { - def createAndInitialize( - clock: Clock, - damlPackageStore: DamlPackageStore, - packageUpgradeValidator: PackageUpgradeValidator, - loggerFactory: NamedLoggerFactory, - packageMetadataViewConfig: PackageMetadataViewConfig, - timeouts: ProcessingTimeout, - )(implicit - actorSystem: ActorSystem, - executionContext: ExecutionContext, - traceContext: TraceContext, - ): FutureUnlessShutdown[MutablePackageMetadataViewImpl] = { - val mutablePackageMetadataView = - new MutablePackageMetadataViewImpl( - clock, - damlPackageStore, - packageUpgradeValidator, - loggerFactory, - packageMetadataViewConfig, - timeouts, - ) - mutablePackageMetadataView.refreshState.map(_ => mutablePackageMetadataView) - } -} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index fd98c69ef9..bbde7253dc 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -9,14 +9,15 @@ import cats.implicits.toBifunctorOps import cats.syntax.either.* import cats.syntax.functor.* import cats.syntax.parallel.* +import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty import com.digitalasset.base.error.RpcError import com.digitalasset.canton.* import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} -import com.digitalasset.canton.crypto.{CryptoPureApi, SyncCryptoApiParticipantProvider} +import com.digitalasset.canton.crypto.{CryptoPureApi, HashOps, SyncCryptoApiParticipantProvider} import com.digitalasset.canton.data.{ CantonTimestamp, Offset, @@ -31,6 +32,13 @@ import com.digitalasset.canton.error.TransactionRoutingError.{ } import com.digitalasset.canton.health.MutableHealthComponent import com.digitalasset.canton.ledger.api.health.HealthStatus +import com.digitalasset.canton.ledger.api.{ + EnrichedVettedPackage, + ListVettedPackagesOpts, + UpdateVettedPackagesOpts, + UploadDarVettingChange, + VetAllPackages, +} import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.ledger.participant.state.* @@ -80,24 +88,26 @@ import com.digitalasset.canton.participant.sync.SynchronizerConnectionsManager.{ import com.digitalasset.canton.participant.synchronizer.* import com.digitalasset.canton.participant.topology.* import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.scheduler.Schedulers import com.digitalasset.canton.sequencing.SequencerConnectionValidation +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, SynchronizerTimeTracker} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.{ SynchronizerTopologyClientWithInit, TopologySnapshot, } +import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.canton.tracing.{Spanning, TraceContext, Traced} import com.digitalasset.canton.util.* -import com.digitalasset.canton.util.FutureInstances.parallelFuture import com.digitalasset.canton.util.OptionUtils.OptionExtension +import com.digitalasset.canton.util.PackageConsumer.PackageResolver import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} +import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.daml.lf.archive.DamlLf import com.digitalasset.daml.lf.data.Ref.{PackageId, Party, SubmissionId} import com.digitalasset.daml.lf.data.{ImmArray, Ref} @@ -231,28 +241,27 @@ class CantonSyncService( /** Validates that the provided packages are vetted on the currently connected synchronizers. */ // TODO(i25076) remove this waiting logic once topology events are published on the ledger api - val synchronizeVettingOnConnectedSynchronizers: PackageVettingSynchronization = + val synchronizeVettingOnSynchronizer: PackageVettingSynchronization = new PackageVettingSynchronization { - override def sync(packages: Set[PackageId])(implicit + override def sync(packages: Set[VettedPackage], psid: PhysicalSynchronizerId)(implicit traceContext: TraceContext ): EitherT[Future, ParticipantTopologyManagerError, Unit] = // wait for packages to be vetted on the currently connected synchronizers EitherT .right[ParticipantTopologyManagerError]( - connectedSynchronizersLookup.snapshot.toSeq.parTraverse { - case (psid, connectedSynchronizer) => - connectedSynchronizer.topologyClient - .await( - _.determinePackagesWithNoVettingEntry(participantId, packages) - .map(_.isEmpty) - .onShutdown(false), - timeouts.network.duration, - ) - // turn AbortedDuToShutdown into a verdict, as we don't want to turn - // the overall result into AbortedDueToShutdown, just because one of - // the synchronizers disconnected in the meantime. - .onShutdown(false) - .map(psid -> _) + connectedSynchronizersLookup.get(psid).traverse { connectedSynchronizer => + connectedSynchronizer.topologyClient + .await( + _.vettedPackages(participantId) + .map(_ == packages) + .onShutdown(false), + timeouts.network.duration, + ) + // turn AbortedDuToShutdown into a verdict, as we don't want to turn + // the overall result into AbortedDueToShutdown, just because one of + // the synchronizers disconnected in the meantime. + .onShutdown(false) + .map(connectedSynchronizer.psid -> _) } ) .map { result => @@ -267,6 +276,66 @@ class CantonSyncService( .void } + /** Vets the admin workflow dars on the specified synchronizer */ + private def vetAdminWorkflowsOnSynchronizer( + lsid: SynchronizerId + )(implicit traceContext: TraceContext): Unit = { + def vetPackages( + packages: Set[PackageId], + psid: PhysicalSynchronizerId, + ): FutureUnlessShutdown[Unit] = + // vet any packages that have not yet been vetted + EitherTUtil.toFutureUnlessShutdown( + AdminWorkflowServices.handleDamlErrorDuringPackageLoading( + s"${AdminWorkflowServices.PingDarResourceName}__${AdminWorkflowServices.PartyReplicationDarResourceName}" + )( + packageService + .vetPackages( + packages.toSeq, + synchronizeVetting = synchronizeVettingOnSynchronizer, + psid, + ) + ) + ) + + val topologyClientO = connectedSynchronizersLookup.get(lsid).map(_.topologyClient) + val vettingF = topologyClientO match { + case Some(topologyClient) => + val partyReplicationPackagesIfShouldVet = + if (parameters.unsafeOnlinePartyReplication.isDefined) + AdminWorkflowServices.PartyReplicationPackages.keySet + else Set.empty + val packagesToVet = AdminWorkflowServices.PingPackages.keySet ++ + partyReplicationPackagesIfShouldVet + logger.debug("Checking whether admin workflows need to be vetted still.") + + topologyClient.headSnapshot + .determinePackagesWithNoVettingEntry(participantId, packagesToVet) + .flatMap { packagesNotVetted => + if (packagesNotVetted.nonEmpty) { + vetPackages(packagesNotVetted, topologyClient.psid) + } else { + logger.debug("Admin workflow packages are already present. Skipping loading.") + FutureUnlessShutdown.unit + } + } + + case None => + logger.info( + s"Unable to vet admin workflows on $lsid, because no active configuration was found." + ) + FutureUnlessShutdown.unit + } + parameters.processingTimeouts.unbounded.awaitUS_(s"Vet Admin Workflow packages")(vettingF) + } + + subscribeToConnections(_.withTraceContext { implicit traceContext => lsid => + if (parameters.adminWorkflow.autoLoadDar) { + logger.debug(s"Received connection notification of $lsid") + vetAdminWorkflowsOnSynchronizer(lsid) + } + }) + /** Return the active PSId corresponding to the given id, if any. Since at most one synchronizer * connection per LSId can be active, this is well-defined. */ @@ -297,7 +366,6 @@ class CantonSyncService( private val transactionRoutingProcessor = TransactionRoutingProcessor( connectedSynchronizersLookup = connectedSynchronizersLookup, - cryptoPureApi = syncCrypto.pureCrypto, synchronizerConnectionConfigStore = synchronizerConnectionConfigStore, participantId = participantId, parameters = parameters, @@ -310,13 +378,17 @@ class CantonSyncService( } } - private val contractAuthenticator = ContractAuthenticator(syncCrypto.pureCrypto) + private val packageResolver: PackageResolver = packageId => + traceContext => packageService.getPackage(packageId)(traceContext) + + val contractValidator = ContractValidator(syncCrypto.pureCrypto, engine, packageResolver) + + val contractHasher = ContractHasher(engine, packageResolver) val repairService: RepairService = new RepairService( participantId, syncCrypto, packageService.packageDependencyResolver, - contractAuthenticator, participantNodePersistentState.map(_.contractStore), ledgerApiIndexer.asEval(TraceContext.empty), aliasManager, @@ -619,10 +691,18 @@ class CantonSyncService( } } + override def protocolVersionForSynchronizerId( + synchronizerId: SynchronizerId + ): Option[ProtocolVersion] = + connectedSynchronizersLookup + .get(synchronizerId) + .map(_.synchronizerHandle.staticParameters.protocolVersion) + override def allocateParty( hint: LfPartyId, rawSubmissionId: LedgerSubmissionId, synchronizerIdO: Option[SynchronizerId], + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext ): FutureUnlessShutdown[SubmissionResult] = { @@ -663,12 +743,17 @@ class CantonSyncService( specifiedSynchronizer.getOrElse(onlyConnectedSynchronizer) synchronizerIdOrDetectionError - .map(partyAllocation.allocate(hint, rawSubmissionId, _)) + .map(partyAllocation.allocate(hint, rawSubmissionId, _, externalPartyOnboardingDetails)) .leftMap(FutureUnlessShutdown.pure) .merge } - override def uploadDar(dars: Seq[ByteString], submissionId: Ref.SubmissionId)(implicit + override def uploadDar( + dars: Seq[ByteString], + submissionId: Ref.SubmissionId, + vettingChange: UploadDarVettingChange, + synchronizerIdO: Option[SynchronizerId], + )(implicit traceContext: TraceContext ): Future[SubmissionResult] = withSpan("CantonSyncService.uploadPackages") { implicit traceContext => span => @@ -677,20 +762,77 @@ class CantonSyncService( Future.successful(SyncServiceError.Synchronous.PassiveNode) } else { span.setAttribute("submission_id", submissionId) - packageService - .upload( - dars = dars.map(UploadDarData(_, Some("uploaded-via-ledger-api"), None)), - submissionIdO = Some(submissionId), - vetAllPackages = true, - synchronizeVetting = synchronizeVettingOnConnectedSynchronizers, - ) - .map(_ => SubmissionResult.Acknowledged) - .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error())) - .valueOr(err => SubmissionResult.SynchronousError(err.asGrpcStatus)) + + val synchronizerIdOrError = + if (vettingChange == VetAllPackages) { + autoDetectSynchronizer(synchronizerIdO).map(Some(_)) + } else { // no packages should be vetted, therefore don't autodetect the synchronizer + Right(None) + } + + val resultET = + synchronizerIdOrError + .toEitherT[Future] + .flatMap(psidO => + packageService + .upload( + dars = dars.map(UploadDarData(_, Some("uploaded-via-ledger-api"), None)), + submissionIdO = Some(submissionId), + vettingInfo = psidO.map(_ -> synchronizeVettingOnSynchronizer), + ) + .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error())) + .leftMap(err => SubmissionResult.SynchronousError(err.asGrpcStatus)) + ) + .map(_ => SubmissionResult.Acknowledged) + + EitherTUtil.toFuture(resultET.leftMap(_.exception)) } } - override def validateDar(dar: ByteString, darName: String)(implicit + private def autoDetectSynchronizer( + synchronizerIdO: Option[SynchronizerId] + )(implicit + traceContext: TraceContext + ): Either[SubmissionResult.SynchronousError, PhysicalSynchronizerId] = + synchronizerIdO match { + case Some(desiredSynchronizerId) => + readyConnectedSynchronizerById(desiredSynchronizerId) match { + case Some(connected) => Right(connected.psid) + case None => + Left( + SubmissionResult.SynchronousError( + CantonPackageServiceError.NotConnectedToSynchronizer + .Error( + desiredSynchronizerId.toProtoPrimitive + ) + .asGoogleGrpcStatus + ) + ) + } + case None => + // all packages should be vetted, but no synchronizer was specified, therefore automatically + // detect a single connected synchronizer + readySynchronizers.view.mapValues(_._1).values.toSeq match { + case Seq(singleSynchronizer) => Right(singleSynchronizer) + case synchronizers => + Left( + SubmissionResult.SynchronousError( + CantonPackageServiceError.CannotAutodetectSynchronizer + .Failure( + synchronizers.map(_.logical) + ) + .asGoogleGrpcStatus + ) + ) + + } + } + + override def validateDar( + dar: ByteString, + darName: String, + synchronizerId: Option[SynchronizerId], + )(implicit traceContext: TraceContext ): Future[SubmissionResult] = withSpan("CantonSyncService.validateDar") { implicit traceContext => _ => @@ -698,14 +840,46 @@ class CantonSyncService( logger.debug(s"Rejecting DAR validation request on passive replica.") Future.successful(SyncServiceError.Synchronous.PassiveNode) } else { - packageService - .validateDar(dar, darName) - .map(_ => SubmissionResult.Acknowledged) - .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error())) - .valueOr(err => SubmissionResult.SynchronousError(err.asGrpcStatus)) + autoDetectSynchronizer(synchronizerId) match { + case Left(err) => Future(err) + case Right(psid) => + packageService + .validateDar(dar, darName, psid) + .map(_ => SubmissionResult.Acknowledged) + .onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error())) + .valueOr(err => SubmissionResult.SynchronousError(err.asGrpcStatus)) + } } } + override def updateVettedPackages( + opts: UpdateVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[(Seq[EnrichedVettedPackage], Seq[EnrichedVettedPackage])] = + EitherTUtil.toFuture( + EitherT + .fromEither[Future](autoDetectSynchronizer(opts.synchronizerIdO).leftMap(_.exception)) + .flatMap(synchronizerId => + packageService + .updateVettedPackages(opts, synchronizerId, synchronizeVettingOnSynchronizer) + .failOnShutdownTo(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError) + .leftMap(_.asGrpcError) + ) + ) + + override def listVettedPackages( + opts: ListVettedPackagesOpts + )(implicit + traceContext: TraceContext + ): Future[Seq[(Seq[EnrichedVettedPackage], SynchronizerId, PositiveInt)]] = + EitherTUtil.toFuture( + packageService + .listVettedPackages(opts) + .failOnShutdownTo(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError) + .leftMap(_.asGrpcError) + ) + override def getLfArchive(packageId: PackageId)(implicit traceContext: TraceContext ): Future[Option[DamlLf.Archive]] = @@ -1205,17 +1379,25 @@ class CantonSyncService( /* @param synchronizer For unassignment this should be the source synchronizer, for assignment this is the target synchronizer */ def doReassignment[E <: ReassignmentProcessorError, T]( - synchronizerId: SynchronizerId + psid: PhysicalSynchronizerId )( - reassign: ConnectedSynchronizer => EitherT[Future, E, FutureUnlessShutdown[T]] + reassign: ( + ConnectedSynchronizer, + TopologySnapshot, + ) => EitherT[Future, E, FutureUnlessShutdown[T]] )(implicit traceContext: TraceContext): Future[SubmissionResult] = { for { connectedSynchronizer <- EitherT.fromOption[Future]( - readyConnectedSynchronizerById(synchronizerId), + readyConnectedSynchronizerById(psid.logical), ifNone = RequestValidationErrors.InvalidArgument - .Reject(s"Synchronizer id not found: $synchronizerId"): RpcError, + .Reject(s"Synchronizer id not found: $psid"): RpcError, ) - _ <- reassign(connectedSynchronizer) + topologySnapshot <- EitherT.fromOption[Future]( + syncCrypto.ips.forSynchronizer(psid).map(_.currentSnapshotApproximation), + ifNone = RequestValidationErrors.InvalidArgument + .Reject(s"Synchronizer id not found: $psid"): RpcError, + ) + _ <- reassign(connectedSynchronizer, topologySnapshot) .leftMap(error => RequestValidationErrors.InvalidArgument .Reject( @@ -1230,14 +1412,32 @@ class CantonSyncService( .leftMap(error => SubmissionResult.SynchronousError(error.asGrpcStatus)) .merge + def lookupPSId( + synchronizerId: SynchronizerId + ): Either[RequestValidationErrors.InvalidArgument.Reject, PhysicalSynchronizerId] = + connectedSynchronizersLookup + .psidFor(synchronizerId) + .toRight( + RequestValidationErrors.InvalidArgument + .Reject(s"Unable to resolve $synchronizerId to a connected physical synchronizer id") + ) + + def lookupPSIds(source: Source[SynchronizerId], target: Target[SynchronizerId]): Either[ + RequestValidationErrors.InvalidArgument.Reject, + (Source[PhysicalSynchronizerId], Target[PhysicalSynchronizerId]), + ] = for { + sourcePSId <- lookupPSId(source.unwrap).map(Source(_)) + targetPSId <- lookupPSId(target.unwrap).map(Target(_)) + } yield (sourcePSId, targetPSId) + ReassignmentCommandsBatch.create(reassignmentCommands) match { case Right(unassigns: ReassignmentCommandsBatch.Unassignments) => - connectedSynchronizersLookup.psidFor(unassigns.target.unwrap) match { - case Some(targetPSId) => + lookupPSIds(unassigns.source, unassigns.target) match { + case Right((sourcePSId, targetPSId)) => doReassignment( - synchronizerId = unassigns.source.unwrap - )( - _.submitUnassignments( + psid = sourcePSId.unwrap + ) { case (sourceSynchronizer, sourceTopology) => + sourceSynchronizer.submitUnassignments( submitterMetadata = ReassignmentSubmitterMetadata( submitter = submitter, userId = userId, @@ -1247,34 +1447,35 @@ class CantonSyncService( workflowId = workflowId, ), contractIds = unassigns.contractIds, - targetSynchronizer = Target(targetPSId), + targetSynchronizer = targetPSId, + sourceTopology = Source(sourceTopology), ) - ) + } - case None => - Future.failed( - RequestValidationErrors.InvalidArgument - .Reject(s"Unable to resolve ${unassigns.target} to a connected synchronizer id") - .asGrpcError - ) + case Left(err) => Future.failed(err.asGrpcError) } case Right(assigns: ReassignmentCommandsBatch.Assignments) => - doReassignment( - synchronizerId = assigns.target.unwrap - )( - _.submitAssignments( - submitterMetadata = ReassignmentSubmitterMetadata( - submitter = submitter, - userId = userId, - submittingParticipant = participantId, - commandId = commandId, - submissionId = submissionId, - workflowId = workflowId, - ), - reassignmentId = assigns.reassignmentId, - ) - ) + lookupPSId(assigns.target.unwrap) match { + case Right(targetPSId) => + doReassignment( + psid = targetPSId + ) { case (targetSynchronizer, targetTopology) => + targetSynchronizer.submitAssignments( + submitterMetadata = ReassignmentSubmitterMetadata( + submitter = submitter, + userId = userId, + submittingParticipant = participantId, + commandId = commandId, + submissionId = submissionId, + workflowId = workflowId, + ), + reassignmentId = assigns.reassignmentId, + targetTopology = Target(targetTopology), + ) + } + case Left(err) => Future.failed(err.asGrpcError) + } case Left(invalidBatch) => Future.failed( RequestValidationErrors.InvalidArgument @@ -1309,19 +1510,35 @@ class CantonSyncService( case (synchronizerAlias, (synchronizerId, submissionReady)) if submissionReady.unwrap => for { topology <- getSnapshot(synchronizerAlias, synchronizerId) - partyWithAttributes <- topology.hostedOn( - Set(request.party), - participantId = request.participantId.getOrElse(participantId), + // Find the attributes for the party if one is passed in, and if we can find it in topology + attributesO <- request.party.parFlatTraverse(party => + topology + .hostedOn( + Set(party), + participantId = request.participantId.getOrElse(participantId), + ) + .map( + _.get(party) + ) ) - } yield partyWithAttributes - .get(request.party) + } yield attributesO .map(attributes => ConnectedSynchronizerResponse.ConnectedSynchronizer( synchronizerAlias, synchronizerId, - attributes.permission, + Some(attributes.permission), ) ) + .orElse( + // Return the connected synchronizer without party information only when no party was requested + Option.when(request.party.isEmpty) { + ConnectedSynchronizerResponse.ConnectedSynchronizer( + synchronizerAlias, + synchronizerId, + None, + ) + } + ) }.toSeq FutureUnlessShutdown.sequence(result).map(_.flatten).map(ConnectedSynchronizerResponse.apply) @@ -1439,6 +1656,9 @@ class CantonSyncService( routingState } + + override def hashOps: HashOps = this.syncCrypto.pureCrypto + } object CantonSyncService { diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala index b9def684a9..656fd7d6c9 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala @@ -92,7 +92,13 @@ import com.digitalasset.canton.topology.processing.{ import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.{ErrorUtil, FutureUnlessShutdownUtil, MonadUtil} +import com.digitalasset.canton.util.{ + ContractHasher, + ContractValidator, + ErrorUtil, + FutureUnlessShutdownUtil, + MonadUtil, +} import com.digitalasset.daml.lf.engine.Engine import io.grpc.Status import io.opentelemetry.api.trace.Tracer @@ -126,7 +132,8 @@ class ConnectedSynchronizer( private[sync] val persistent: SyncPersistentState, val ephemeral: SyncEphemeralState, val packageService: PackageService, - synchronizerCrypto: SynchronizerCryptoClient, + val synchronizerCrypto: SynchronizerCryptoClient, + contractValidator: ContractValidator, identityPusher: ParticipantTopologyDispatcher, topologyProcessor: TopologyTransactionProcessor, missingKeysAlerter: MissingKeysAlerter, @@ -170,23 +177,26 @@ class ConnectedSynchronizer( private val seedGenerator = new SeedGenerator(synchronizerCrypto.crypto.pureCrypto) + private val packageResolver: PackageResolver = pkgId => + traceContext => packageService.getPackage(pkgId)(traceContext) + + private val contractHasher = ContractHasher(engine, packageResolver) + private[canton] val requestGenerator = TransactionConfirmationRequestFactory( participantId, psid, )( synchronizerCrypto.crypto.pureCrypto, + contractHasher, seedGenerator, parameters.loggingConfig, loggerFactory, ) - private val packageResolver: PackageResolver = pkgId => - traceContext => packageService.getPackage(pkgId)(traceContext) - private val damle = new DAMLe( - pkgId => traceContext => packageService.getPackage(pkgId)(traceContext), + packageResolver, engine, parameters.engine.validationPhaseLogging, loggerFactory, @@ -199,6 +209,7 @@ class ConnectedSynchronizer( damle, staticSynchronizerParameters, synchronizerCrypto, + contractValidator, sequencerClient, ephemeral.inFlightSubmissionSynchronizerTracker, ephemeral, @@ -210,6 +221,7 @@ class ConnectedSynchronizer( packageResolver = packageResolver, testingConfig = testingConfig, promiseUSFactory, + parameters.loggingConfig.api.messagePayloads, ) private val unassignmentProcessor: UnassignmentProcessor = new UnassignmentProcessor( @@ -220,6 +232,7 @@ class ConnectedSynchronizer( ephemeral.inFlightSubmissionSynchronizerTracker, ephemeral, synchronizerCrypto, + contractValidator, seedGenerator, sequencerClient, timeouts, @@ -238,6 +251,7 @@ class ConnectedSynchronizer( ephemeral.inFlightSubmissionSynchronizerTracker, ephemeral, synchronizerCrypto, + contractValidator, seedGenerator, sequencerClient, timeouts, @@ -436,7 +450,7 @@ class ConnectedSynchronizer( changes }) - def initializeClientAtCleanHead(): FutureUnlessShutdown[Unit] = { + def initializeClientAtCleanHead(): Unit = { // generally, the topology client will be initialised by the topology processor. however, // if there is nothing to be replayed, then the topology processor will only be initialised // once the first event is dispatched. @@ -451,25 +465,15 @@ class ConnectedSynchronizer( ApproximateTime(resubscriptionTs), potentialTopologyChange = true, ) - // now, compute epsilon at resubscriptionTs - topologyClient - .awaitSnapshot(resubscriptionTs) - .flatMap(snapshot => - snapshot.findDynamicSynchronizerParametersOrDefault( - staticSynchronizerParameters.protocolVersion, - warnOnUsingDefault = false, - ) - ) - .map(_.topologyChangeDelay) - .map { topologyChangeDelay => - // update client - topologyClient.updateHead( - SequencedTime(resubscriptionTs), - EffectiveTime(resubscriptionTs.plus(topologyChangeDelay.duration)), - ApproximateTime(resubscriptionTs), - potentialTopologyChange = true, - ) - } + // now, compute epsilon at resubscriptionTs and update client + topologyClient.updateHead( + SequencedTime(resubscriptionTs), + EffectiveTime( + resubscriptionTs.plus(staticSynchronizerParameters.topologyChangeDelay.duration) + ), + ApproximateTime(resubscriptionTs), + potentialTopologyChange = true, + ) } val startingPoints = ephemeral.startingPoints @@ -483,7 +487,7 @@ class ConnectedSynchronizer( _ <- EitherT.right(sequencerConnectionListener.init()) // Phase 0: Initialise topology client at current clean head - _ <- EitherT.right(initializeClientAtCleanHead()) + _ = initializeClientAtCleanHead() // Phase 2: Log so we know if any repairs have been applied. _ = logger.info(s"The next repair counter would be $nextRepairCounter") @@ -795,6 +799,7 @@ class ConnectedSynchronizer( submitterMetadata: ReassignmentSubmitterMetadata, contractIds: Seq[LfContractId], targetSynchronizer: Target[PhysicalSynchronizerId], + sourceTopology: Source[TopologySnapshot], )(implicit traceContext: TraceContext ): EitherT[Future, ReassignmentProcessorError, FutureUnlessShutdown[ @@ -824,13 +829,14 @@ class ConnectedSynchronizer( contractIds, targetSynchronizer, ), - synchronizerCrypto.currentSnapshotApproximation.ipsSnapshot, + sourceTopology.unwrap, ) } override def submitAssignments( submitterMetadata: ReassignmentSubmitterMetadata, reassignmentId: ReassignmentId, + targetTopology: Target[TopologySnapshot], )(implicit traceContext: TraceContext ): EitherT[Future, ReassignmentProcessorError, FutureUnlessShutdown[ @@ -854,7 +860,7 @@ class ConnectedSynchronizer( .submit( AssignmentProcessingSteps .SubmissionParam(submitterMetadata, reassignmentId), - synchronizerCrypto.currentSnapshotApproximation.ipsSnapshot, + targetTopology.unwrap, ) } @@ -1040,36 +1046,46 @@ object ConnectedSynchronizer { clock, exitOnFatalFailures = parameters.exitOnFatalFailures, parameters.batchingConfig, + doNotAwaitOnCheckingIncomingCommitments = + parameters.doNotAwaitOnCheckingIncomingCommitments, ) topologyProcessor <- topologyProcessorFactory.create( acsCommitmentProcessor.scheduleTopologyTick ) - } yield new ConnectedSynchronizer( - synchronizerHandle, - participantId, - engine, - parameters, - participantNodePersistentState, - persistentState, - ephemeralState, - packageService, - synchronizerCrypto, - identityPusher, - topologyProcessor, - missingKeysAlerter, - sequencerConnectionSuccessorListener, - reassignmentCoordination, - commandProgressTracker, - ParallelMessageDispatcherFactory, - journalGarbageCollector, - acsCommitmentProcessor, - clock, - promiseUSFactory, - connectedSynchronizerMetrics, - futureSupervisor, - loggerFactory, - testingConfig, - ) + } yield { + val contractValidator = ContractValidator( + synchronizerCrypto.pureCrypto, + engine, + packageId => traceContext => packageService.getPackage(packageId)(traceContext), + ) + new ConnectedSynchronizer( + synchronizerHandle, + participantId, + engine, + parameters, + participantNodePersistentState, + persistentState, + ephemeralState, + packageService, + synchronizerCrypto, + contractValidator, + identityPusher, + topologyProcessor, + missingKeysAlerter, + sequencerConnectionSuccessorListener, + reassignmentCoordination, + commandProgressTracker, + ParallelMessageDispatcherFactory, + journalGarbageCollector, + acsCommitmentProcessor, + clock, + promiseUSFactory, + connectedSynchronizerMetrics, + futureSupervisor, + loggerFactory, + testingConfig, + ) + } } } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala index 677c8a0021..da0065613b 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala @@ -18,6 +18,10 @@ import com.digitalasset.canton.participant.store.{ } import com.digitalasset.canton.participant.sync.LogicalSynchronizerUpgrade.UpgradabilityCheckResult import com.digitalasset.canton.resource.DbExceptionRetryPolicy +import com.digitalasset.canton.topology.transaction.{ + SynchronizerUpgradeAnnouncement, + TopologyMapping, +} import com.digitalasset.canton.topology.{ KnownPhysicalSynchronizerId, PhysicalSynchronizerId, @@ -133,8 +137,48 @@ final class LogicalSynchronizerUpgrade( logger.info(s"Upgrade from $currentPSId to $successorPSId") + // Ensure upgraded is not attempted if announcement was revoked + def ensureUpgradeOngoing(): EitherT[FutureUnlessShutdown, String, Unit] = for { + topologyStore <- EitherT.fromOption[FutureUnlessShutdown]( + syncPersistentStateManager.get(currentPSId).map(_.topologyManager.store), + "Unable to find topology store", + ) + + announcements <- EitherT + .liftF( + topologyStore.findPositiveTransactions( + asOf = synchronizerSuccessor.upgradeTime, + asOfInclusive = false, + isProposal = false, + types = Seq(TopologyMapping.Code.SynchronizerUpgradeAnnouncement), + filterUid = None, + filterNamespace = None, + ) + ) + .map(_.collectOfMapping[SynchronizerUpgradeAnnouncement]) + .map(_.result.map(_.transaction.mapping)) + + _ <- announcements match { + case Seq() => EitherT.leftT[FutureUnlessShutdown, Unit]("No synchronizer upgrade ongoing") + case Seq(head) => + EitherT.cond[FutureUnlessShutdown]( + head.successor == synchronizerSuccessor, + (), + s"Expected synchronizer successor to be $synchronizerSuccessor but found ${head.successor} in topology state", + ) + case _more => + EitherT.liftF[FutureUnlessShutdown, String, Unit]( + FutureUnlessShutdown.failed( + new IllegalStateException("Found several SynchronizerUpgradeAnnouncement") + ) + ) + } + } yield () + performIfNotUpgradedYet(successorPSId)( for { + _ <- ensureUpgradeOngoing() + upgradabilityCheckResult <- EitherT[FutureUnlessShutdown, String, UpgradabilityCheckResult]( retryPolicy.unlessShutdown( upgradabilityCheck(alias, currentPSId, synchronizerSuccessor), diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala index 0850514dcb..876a2b7133 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala @@ -4,13 +4,13 @@ package com.digitalasset.canton.participant.sync import com.digitalasset.canton.data.SynchronizerSuccessor -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureUnlessShutdownUtil -import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} trait LogicalSynchronizerUpgradeCallback { @@ -22,6 +22,8 @@ trait LogicalSynchronizerUpgradeCallback { * - Successor is registered */ def registerCallback(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext): Unit + + def unregisterCallback(): Unit } object LogicalSynchronizerUpgradeCallback { @@ -29,6 +31,8 @@ object LogicalSynchronizerUpgradeCallback { override def registerCallback(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit = () + + override def unregisterCallback(): Unit = () } } @@ -41,22 +45,37 @@ class LogicalSynchronizerUpgradeCallbackImpl( extends LogicalSynchronizerUpgradeCallback with NamedLogging { - private val registered: AtomicBoolean = new AtomicBoolean(false) + private val registered: AtomicReference[Option[SynchronizerSuccessor]] = new AtomicReference(None) def registerCallback( successor: SynchronizerSuccessor )(implicit traceContext: TraceContext): Unit = - if (registered.compareAndSet(false, true)) { - logger.info(s"Registering callback for upgrade of $psid to $successor") + if (registered.compareAndSet(None, Some(successor))) { + logger.info(s"Registering callback for upgrade of $psid to ${successor.psid}") synchronizerTimeTracker .awaitTick(successor.upgradeTime) .getOrElse(Future.unit) .foreach { _ => - synchronizerConnectionsManager.upgradeSynchronizerTo(psid, successor).discard + if (registered.get().contains(successor)) { + val upgradeResultF = synchronizerConnectionsManager + .upgradeSynchronizerTo(psid, successor) + .value + .map( + _.fold(err => logger.error(s"Upgrade to ${successor.psid} failed: $err"), _ => ()) + ) + + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + upgradeResultF, + s"Failed to upgrade to ${successor.psid}", + ) + } else + logger.info(s"Upgrade to ${successor.psid} was cancelled, not executing the upgrade.") } } else logger.info( - s"Not registering callback for upgrade of $psid to $successor because it was already done" + s"Not registering callback for upgrade of $psid to ${successor.psid} because it was already done" ) + + override def unregisterCallback(): Unit = registered.set(None) } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala index 075272c57e..ee55f2cf8d 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala @@ -7,7 +7,7 @@ import cats.data.EitherT import cats.implicits.showInterpolator import cats.syntax.bifunctor.* import cats.syntax.either.* -import cats.syntax.traverse.* +import cats.syntax.parallel.* import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.ledger.participant.state.* @@ -17,6 +17,7 @@ import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerEr import com.digitalasset.canton.participant.topology.{LedgerServerPartyNotifier, PartyOps} import com.digitalasset.canton.topology.TopologyManagerError.MappingAlreadyExists import com.digitalasset.canton.topology.{ + ExternalPartyOnboardingDetails, ParticipantId, PartyId, PhysicalSynchronizerId, @@ -44,17 +45,19 @@ private[sync] class PartyAllocation( hint: LfPartyId, rawSubmissionId: LedgerSubmissionId, synchronizerId: PhysicalSynchronizerId, + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext): FutureUnlessShutdown[SubmissionResult] = withSpan("CantonSyncService.allocateParty") { implicit traceContext => span => span.setAttribute("submission_id", rawSubmissionId) - allocateInternal(hint, rawSubmissionId, synchronizerId) + allocateInternal(hint, rawSubmissionId, synchronizerId, externalPartyOnboardingDetails) } private def allocateInternal( partyName: LfPartyId, rawSubmissionId: LedgerSubmissionId, synchronizerId: PhysicalSynchronizerId, + externalPartyOnboardingDetails: Option[ExternalPartyOnboardingDetails], )(implicit traceContext: TraceContext): FutureUnlessShutdown[SubmissionResult] = { import com.google.rpc.status.Status import io.grpc.Status.Code @@ -69,11 +72,18 @@ private[sync] class PartyAllocation( _ <- EitherT .cond[FutureUnlessShutdown](isActive(), (), SyncServiceError.Synchronous.PassiveNode) .leftWiden[SubmissionResult] - id <- UniqueIdentifier - .create(partyName, participantId.uid.namespace) - .leftMap(SyncServiceError.Synchronous.internalError) - .toEitherT[FutureUnlessShutdown] - partyId = PartyId(id) + // External parties have their own namespace + partyId <- externalPartyOnboardingDetails + .map(_.partyId) + .map(EitherT.pure[FutureUnlessShutdown, SubmissionResult](_)) + .getOrElse { + UniqueIdentifier + // local parties re-use the participant's namespace + .create(partyName, participantId.uid.namespace) + .map(id => PartyId(id)) + .leftMap(SyncServiceError.Synchronous.internalError) + .toEitherT[FutureUnlessShutdown] + } validatedSubmissionId <- EitherT.fromEither[FutureUnlessShutdown]( String255 .fromProtoPrimitive(rawSubmissionId, "LedgerSubmissionId") @@ -90,26 +100,33 @@ private[sync] class PartyAllocation( .rpcStatus() ), ) - _ <- partyNotifier - .expectPartyAllocationForNodes( - partyId, - participantId, - validatedSubmissionId, - ) - .leftMap[SubmissionResult] { err => - reject(err, Some(Code.ABORTED)) - } - .toEitherT[FutureUnlessShutdown] - _ <- partyOps - .allocateParty(partyId, participantId, synchronizerId) + _ <- + if (externalPartyOnboardingDetails.forall(_.fullyAllocatesParty)) { + partyNotifier + .expectPartyAllocationForNodes( + partyId, + participantId, + validatedSubmissionId, + ) + .leftMap[SubmissionResult] { err => + reject(err, Some(Code.ABORTED)) + } + .toEitherT[FutureUnlessShutdown] + } else EitherT.pure[FutureUnlessShutdown, SubmissionResult](()) + _ <- (externalPartyOnboardingDetails match { + case Some(details) => + partyOps.allocateExternalParty(participantId, details, synchronizerId) + case None => partyOps.allocateParty(partyId, participantId, synchronizerId) + }) .leftMap[SubmissionResult] { case IdentityManagerParentError(e) if e.code == MappingAlreadyExists => reject( - show"Party already exists: party $partyId is already allocated on this node", + show"Party already exists: party $partyId is already allocated${if (externalPartyOnboardingDetails.isEmpty) { " on this node" } + else ""}", e.code.category.grpcCode, ) case IdentityManagerParentError(e) => reject(e.cause, e.code.category.grpcCode) - case e => reject(e.toString, Some(Code.INTERNAL)) + case e => reject(e.cause, e.code.category.grpcCode) } .leftMap { x => partyNotifier.expireExpectedPartyAllocationForNodes( @@ -119,20 +136,22 @@ private[sync] class PartyAllocation( ) x } - // TODO(i25076) remove this waiting logic once topology events are published on the ledger api // wait for parties to be available on the currently connected synchronizers waitingSuccessful <- EitherT .right[SubmissionResult]( - connectedSynchronizersLookup.get(synchronizerId).traverse { connectedSynchronizer => - connectedSynchronizer.topologyClient - .awaitUS( - _.inspectKnownParties(partyId.filterString, participantId.filterString) - .map(_.nonEmpty), - timeouts.network.duration, - ) - .map(synchronizerId -> _) - } + if (externalPartyOnboardingDetails.forall(_.fullyAllocatesParty)) { + connectedSynchronizersLookup.snapshot.toSeq.parTraverse { + case (synchronizerId, connectedSynchronizer) => + connectedSynchronizer.topologyClient + .awaitUS( + _.inspectKnownParties(partyId.filterString, participantId.filterString) + .map(_.nonEmpty), + timeouts.network.duration, + ) + .map(synchronizerId -> _) + } + } else FutureUnlessShutdown.pure(Seq.empty) ) _ = waitingSuccessful.foreach { case (synchronizerId, successful) => if (!successful) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala index e61e807d9d..950dfec2f7 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala @@ -99,7 +99,7 @@ class SyncEphemeralStateFactoryImpl( of the successor a second time. */ synchronizerSuccessorO <- synchronizerCrypto.ips.currentSnapshotApproximation - .isSynchronizerUpgradeOngoing() + .synchronizerUpgradeOngoing() recordOrderPublisher = RecordOrderPublisher( persistentState.psid, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala index 63e40ab0bc..32ba7ea876 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala @@ -18,7 +18,6 @@ import com.digitalasset.canton.environment.{ import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.ParticipantNodeParameters -import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.ledger.api.LedgerApiStore import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.store.memory.PackageMetadataView @@ -123,10 +122,9 @@ class SyncPersistentStateManager( synchronizerConnectionConfigStore: SynchronizerConnectionConfigStore, synchronizerCryptoFactory: StaticSynchronizerParameters => SynchronizerCrypto, clock: Clock, - packageDependencyResolver: PackageDependencyResolver, + packageMetadataView: PackageMetadataView, ledgerApiStore: Eval[LedgerApiStore], val contractStore: Eval[ContractStore], - packageMetadataView: Eval[PackageMetadataView], futureSupervisor: FutureSupervisor, protected val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) @@ -202,12 +200,12 @@ class SyncPersistentStateManager( def getSynchronizerIdx( synchronizerId: SynchronizerId - ): FutureUnlessShutdown[IndexedSynchronizer] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[IndexedSynchronizer] = IndexedSynchronizer.indexed(this.indexedStringStore)(synchronizerId) def getPhysicalSynchronizerIdx( synchronizerId: PhysicalSynchronizerId - ): FutureUnlessShutdown[IndexedPhysicalSynchronizer] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[IndexedPhysicalSynchronizer] = IndexedPhysicalSynchronizer.indexed(this.indexedStringStore)(synchronizerId) /** Retrieves the [[com.digitalasset.canton.participant.store.SyncPersistentState]] from the @@ -420,10 +418,9 @@ class SyncPersistentStateManager( clock, synchronizerCryptoFactory(staticSynchronizerParameters), parameters, - packageDependencyResolver, + packageMetadataView, ledgerApiStore, logicalSyncPersistentState, - packageMetadataView, psidLoggerFactory(physicalSynchronizerIdx.synchronizerId), futureSupervisor, ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SynchronizerConnectionsManager.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SynchronizerConnectionsManager.scala index f72c6689e2..65862455c1 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SynchronizerConnectionsManager.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SynchronizerConnectionsManager.scala @@ -154,8 +154,7 @@ private[sync] class SynchronizerConnectionsManager( private val reassignmentCoordination: ReassignmentCoordination = ReassignmentCoordination( - reassignmentTimeProofFreshnessProportion = - parameters.reassignmentTimeProofFreshnessProportion, + reassignmentsConfig = parameters.reassignmentsConfig, syncPersistentStateManager = syncPersistentStateManager, submissionHandles = connectedSynchronizers.get, synchronizerId => @@ -1256,19 +1255,35 @@ private[sync] class SynchronizerConnectionsManager( case (synchronizerAlias, (synchronizerId, submissionReady)) if submissionReady.unwrap => for { topology <- getSnapshot(synchronizerAlias, synchronizerId) - partyWithAttributes <- topology.hostedOn( - Set(request.party), - participantId = request.participantId.getOrElse(participantId), + // Find the attributes for the party if one is passed in, and if we can find it in topology + attributesO <- request.party.parFlatTraverse(party => + topology + .hostedOn( + Set(party), + participantId = request.participantId.getOrElse(participantId), + ) + .map( + _.get(party) + ) ) - } yield partyWithAttributes - .get(request.party) + } yield attributesO .map(attributes => ConnectedSynchronizerResponse.ConnectedSynchronizer( synchronizerAlias, synchronizerId, - attributes.permission, + Some(attributes.permission), ) ) + .orElse( + // Return the connected synchronizer without party information only when no party was requested + Option.when(request.party.isEmpty) { + ConnectedSynchronizerResponse.ConnectedSynchronizer( + synchronizerAlias, + synchronizerId, + None, + ) + } + ) }.toSeq FutureUnlessShutdown.sequence(result).map(_.flatten).map(ConnectedSynchronizerResponse.apply) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerConnectionConfig.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerConnectionConfig.scala index 6e2b334926..659c56a383 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerConnectionConfig.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerConnectionConfig.scala @@ -15,6 +15,7 @@ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, SubmissionRequestAmplification, } @@ -99,6 +100,7 @@ final case class SynchronizerConnectionConfig( `sequencerConnections`.sequencerTrustThreshold, `sequencerConnections`.`sequencerLivenessMargin`, `sequencerConnections`.submissionRequestAmplification, + `sequencerConnections`.sequencerConnectionPoolDelays, ), `manualConnect`, otherSynchronizerId, @@ -156,6 +158,7 @@ final case class SynchronizerConnectionConfig( sequencerConnections.sequencerTrustThreshold, sequencerConnections.sequencerLivenessMargin, sequencerConnections.submissionRequestAmplification, + sequencerConnections.sequencerConnectionPoolDelays, ) } yield this.copy( synchronizerId = updatedSynchronizerId, @@ -319,6 +322,8 @@ object SynchronizerConnectionConfig sequencerLivenessMargin: NonNegativeInt = NonNegativeInt.zero, submissionRequestAmplification: SubmissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays: SequencerConnectionPoolDelays = + SequencerConnectionPoolDelays.default, ): SynchronizerConnectionConfig = { val sequencerConnections = SequencerConnections.tryMany( @@ -326,6 +331,7 @@ object SynchronizerConnectionConfig sequencerTrustThreshold = sequencerTrustThreshold, sequencerLivenessMargin = sequencerLivenessMargin, submissionRequestAmplification = submissionRequestAmplification, + sequencerConnectionPoolDelays = sequencerConnectionPoolDelays, ) SynchronizerConnectionConfig( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerRegistryHelpers.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerRegistryHelpers.scala index 240acca9ae..4dc07bebaa 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerRegistryHelpers.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/SynchronizerRegistryHelpers.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.* import com.digitalasset.canton.common.sequencer.SequencerConnectClient import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader.SequencerAggregatedInfo import com.digitalasset.canton.concurrent.HasFutureSupervision -import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} +import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal, TopologyConfig} import com.digitalasset.canton.crypto.{ SyncCryptoApiParticipantProvider, SynchronizerCrypto, @@ -44,7 +44,7 @@ import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.SynchronizerTopologyClientWithInit import com.digitalasset.canton.topology.processing.InitialTopologySnapshotValidator -import com.digitalasset.canton.topology.store.PackageDependencyResolverUS +import com.digitalasset.canton.topology.store.PackageDependencyResolver import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} @@ -79,10 +79,11 @@ trait SynchronizerRegistryHelpers extends FlagCloseable with NamedLogging with H cryptoApiProvider: SyncCryptoApiParticipantProvider, clock: Clock, testingConfig: TestingConfigInternal, + topologyConfig: TopologyConfig, recordSequencerInteractions: AtomicReference[Option[RecordingConfig]], replaySequencerConfig: AtomicReference[Option[ReplayConfig]], topologyDispatcher: ParticipantTopologyDispatcher, - packageDependencyResolver: PackageDependencyResolverUS, + packageDependencyResolver: PackageDependencyResolver, partyNotifier: LedgerServerPartyNotifier, metrics: SynchronizerAlias => ConnectedSynchronizerMetrics, )(implicit @@ -286,7 +287,7 @@ trait SynchronizerRegistryHelpers extends FlagCloseable with NamedLogging with H _ <- downloadSynchronizerTopologyStateForInitializationIfNeeded( syncPersistentStateManager, psid, - topologyFactory.createInitialTopologySnapshotValidator, + topologyFactory.createInitialTopologySnapshotValidator(topologyConfig), topologyClient, sequencerClient, partyNotifier, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala index 811fd398fb..82d6434429 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala @@ -11,7 +11,12 @@ import com.digitalasset.canton.* import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader import com.digitalasset.canton.common.sequencer.grpc.SequencerInfoLoader.SequencerAggregatedInfo import com.digitalasset.canton.concurrent.{FutureSupervisor, HasFutureSupervision} -import com.digitalasset.canton.config.{CryptoConfig, ProcessingTimeout, TestingConfigInternal} +import com.digitalasset.canton.config.{ + CryptoConfig, + ProcessingTimeout, + TestingConfigInternal, + TopologyConfig, +} import com.digitalasset.canton.crypto.{ CryptoHandshakeValidator, SyncCryptoApiParticipantProvider, @@ -47,7 +52,7 @@ import com.digitalasset.canton.sequencing.{ import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.SynchronizerTopologyClientWithInit -import com.digitalasset.canton.topology.store.PackageDependencyResolverUS +import com.digitalasset.canton.topology.store.PackageDependencyResolver import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} @@ -74,13 +79,14 @@ class GrpcSynchronizerRegistry( topologyDispatcher: ParticipantTopologyDispatcher, cryptoApiProvider: SyncCryptoApiParticipantProvider, cryptoConfig: CryptoConfig, + topologyConfig: TopologyConfig, clock: Clock, val participantNodeParameters: ParticipantNodeParameters, aliasManager: SynchronizerAliasManager, testingConfig: TestingConfigInternal, recordSequencerInteractions: AtomicReference[Option[RecordingConfig]], replaySequencerConfig: AtomicReference[Option[ReplayConfig]], - packageDependencyResolver: PackageDependencyResolverUS, + packageDependencyResolver: PackageDependencyResolver, metrics: SynchronizerAlias => ConnectedSynchronizerMetrics, sequencerInfoLoader: SequencerInfoLoader, partyNotifier: LedgerServerPartyNotifier, @@ -146,6 +152,13 @@ class GrpcSynchronizerRegistry( val sequencerConnections: SequencerConnections = config.sequencerConnections + val useNewConnectionPool = participantNodeParameters.sequencerClient.useNewConnectionPool + + val synchronizerLoggerFactory = loggerFactory.append( + "synchronizerId", + config.synchronizerId.map(_.toString).getOrElse(config.synchronizerAlias.toString), + ) + val connectionPoolFactory = new GrpcSequencerConnectionXPoolFactory( clientProtocolVersions = ProtocolVersionCompatibility.supportedProtocols(participantNodeParameters), @@ -157,21 +170,20 @@ class GrpcSynchronizerRegistry( seedForRandomnessO = testingConfig.sequencerTransportSeed, futureSupervisor = futureSupervisor, timeouts = timeouts, - loggerFactory = loggerFactory, + loggerFactory = synchronizerLoggerFactory, ) val connectionPoolE = connectionPoolFactory .createFromOldConfig( - config.sequencerConnections, - config.synchronizerId, - participantNodeParameters.tracing, + sequencerConnections = config.sequencerConnections, + expectedPSIdO = config.synchronizerId, + tracingConfig = participantNodeParameters.tracing, + name = if (useNewConnectionPool) "main" else "dummy", ) .leftMap[SynchronizerRegistryError](error => SynchronizerRegistryError.SynchronizerRegistryInternalError.InvalidState(error.toString) ) - val useNewConnectionPool = participantNodeParameters.sequencerClient.useNewConnectionPool - val runE = for { connectionPool <- connectionPoolE.toEitherT[FutureUnlessShutdown] _ <- @@ -236,6 +248,7 @@ class GrpcSynchronizerRegistry( config.sequencerConnections.sequencerTrustThreshold, config.sequencerConnections.sequencerLivenessMargin, config.sequencerConnections.submissionRequestAmplification, + config.sequencerConnections.sequencerConnectionPoolDelays, ) .leftMap(error => SynchronizerRegistryError.ConnectionErrors.FailedToConnectToSequencers @@ -294,6 +307,7 @@ class GrpcSynchronizerRegistry( config.sequencerConnections.sequencerTrustThreshold, config.sequencerConnections.sequencerLivenessMargin, config.sequencerConnections.submissionRequestAmplification, + config.sequencerConnections.sequencerConnectionPoolDelays, ) .map(connections => config.copy(sequencerConnections = connections)) @@ -311,6 +325,7 @@ class GrpcSynchronizerRegistry( cryptoApiProvider, clock, testingConfig, + topologyConfig, recordSequencerInteractions, replaySequencerConfig, topologyDispatcher, diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala index c42bc93ed4..b152494e9f 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala @@ -155,7 +155,7 @@ class LedgerServerPartyNotifier( ) } // propagate admin parties - case SynchronizerTrustCertificate(participantId, _) => + case SynchronizerTrustCertificate(participantId, _, _) => Seq( ( participantId.adminParty, @@ -217,6 +217,11 @@ class LedgerServerPartyNotifier( logger.debug( s"Not applying duplicate party metadata update with submission ID $submissionId" ) + // It is normally removed after we've stored the new metadata it into the DB, + // but since there's nothing to store in this case, it won't happen, so remove it now + update.participantId.foreach(pid => + pendingAllocationData.remove((update.partyId, pid)).discard + ) None } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala index eed07df044..298a30c9c6 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.participant.topology import cats.data.EitherT import cats.syntax.bifunctor.* import cats.syntax.either.* +import cats.syntax.foldable.* import cats.syntax.functor.* import cats.syntax.parallel.* import com.daml.nameof.NameOf.functionFullName @@ -14,7 +15,9 @@ import com.digitalasset.base.error.RpcError import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.ledger.api.SinglePackageTargetVetting import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.admin.CantonPackageServiceError.PackageRemovalErrorCode.PackageInUse @@ -22,8 +25,10 @@ import com.digitalasset.canton.participant.admin.PackageService.DarDescription import com.digitalasset.canton.participant.admin.PackageVettingSynchronization import com.digitalasset.canton.participant.sync.SyncPersistentStateManager import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.IdentityManagerParentError +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.store.StoredTopologyTransaction import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{ContinueAfterFailure, EitherTUtil, SimpleExecutionQueue} @@ -44,6 +49,7 @@ trait PackageOps extends NamedLogging { def vetPackages( packages: Seq[PackageId], synchronizeVetting: PackageVettingSynchronization, + psid: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] @@ -52,16 +58,46 @@ trait PackageOps extends NamedLogging { mainPkg: LfPackageId, packages: List[LfPackageId], darDescriptor: DarDescription, + psid: PhysicalSynchronizerId, + forceFlags: ForceFlags, )(implicit tc: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] + + def updateVettedPackages( + targetStates: Seq[SinglePackageTargetVetting[PackageId]], + psid: PhysicalSynchronizerId, + synchronizeVetting: PackageVettingSynchronization, + dryRunSnapshot: Option[PackageMetadata], + )(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + (Seq[VettedPackage], Seq[VettedPackage]), + ] + + def getVettedPackages(synchronizerFilter: Option[Set[SynchronizerId]])(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + Seq[(Seq[VettedPackage], SynchronizerId, PositiveInt)], + ] + + def getVettedPackagesForSynchronizer( + topologyManager: SynchronizerTopologyManager + )(implicit tc: TraceContext): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + Option[(Seq[VettedPackage], PositiveInt)], + ] } class PackageOpsImpl( val participantId: ParticipantId, - val headAuthorizedTopologySnapshot: TopologySnapshot, stateManager: SyncPersistentStateManager, - topologyManager: AuthorizedTopologyManager, + topologyManagerLookup: TopologyManagerLookup, nodeId: UniqueIdentifier, initialProtocolVersion: ProtocolVersion, val loggerFactory: NamedLoggerFactory, @@ -70,6 +106,7 @@ class PackageOpsImpl( )(implicit val ec: ExecutionContext) extends PackageOps with FlagCloseable { + import PackageOpsImpl.* private val vettingExecutionQueue = new SimpleExecutionQueue( "sequential-vetting-queue", @@ -119,7 +156,7 @@ class PackageOpsImpl( .flatMap(_.map(_.createHeadTopologySnapshot())) .toList - val packageHasVettingEntry = (headAuthorizedTopologySnapshot :: snapshotsForSynchronizers) + val packageHasVettingEntry = snapshotsForSynchronizers .parTraverse { snapshot => snapshot .determinePackagesWithNoVettingEntry(participantId, Set(packageId)) @@ -132,26 +169,30 @@ class PackageOpsImpl( override def vetPackages( packages: Seq[PackageId], synchronizeVetting: PackageVettingSynchronization, + psid: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] = vettingExecutionQueue.executeEUS( for { - newVettedPackagesCreated <- modifyVettedPackages { existingPackages => - val existingAndUpdatedPackages = existingPackages.map { existingVettedPackage => - // if a package to vet has been previously vetted, make sure it has no time bounds - if (packages.contains(existingVettedPackage.packageId)) - existingVettedPackage.asUnbounded - else existingVettedPackage - } - // now determine the actually new packages that need to be vetted - val actuallyNewPackages = - VettedPackage.unbounded(packages).toSet -- existingAndUpdatedPackages - existingAndUpdatedPackages ++ actuallyNewPackages + newVettedPackagesCreated <- modifyVettedPackages(psid, ForceFlags.none) { + existingPackages => + val existingAndUpdatedPackages = existingPackages.map { existingVettedPackage => + // if a package to vet has been previously vetted, make sure it has no time bounds + if (packages.contains(existingVettedPackage.packageId)) + existingVettedPackage.asUnbounded + else existingVettedPackage + } + // now determine the actually new packages that need to be vetted + val actuallyNewPackages = + VettedPackage.unbounded(packages).toSet -- existingAndUpdatedPackages + existingAndUpdatedPackages ++ actuallyNewPackages } // only synchronize with the connected synchronizers if a new VettedPackages transaction was actually issued - _ <- EitherTUtil.ifThenET(newVettedPackagesCreated) { - synchronizeVetting.sync(packages.toSet).mapK(FutureUnlessShutdown.outcomeK) + _ <- newVettedPackagesCreated.traverse_ { vettedPackages => + synchronizeVetting + .sync(vettedPackages, psid) + .mapK(FutureUnlessShutdown.outcomeK) } } yield (), "vet packages", @@ -161,51 +202,178 @@ class PackageOpsImpl( mainPkg: LfPackageId, packages: List[LfPackageId], darDescriptor: DarDescription, + psid: PhysicalSynchronizerId, + forceFlags: ForceFlags, )(implicit tc: TraceContext): EitherT[FutureUnlessShutdown, RpcError, Unit] = vettingExecutionQueue.executeEUS( { val packagesToUnvet = packages.toSet - modifyVettedPackages(_.filterNot(vp => packagesToUnvet(vp.packageId))) + modifyVettedPackages(psid, forceFlags)(_.filterNot(vp => packagesToUnvet(vp.packageId))) .leftWiden[RpcError] .void }, "revoke vetting", ) + override def updateVettedPackages( + targetStates: Seq[SinglePackageTargetVetting[PackageId]], + psid: PhysicalSynchronizerId, + synchronizeVetting: PackageVettingSynchronization, + dryRunSnapshot: Option[PackageMetadata], + )(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + (Seq[VettedPackage], Seq[VettedPackage]), + ] = + vettingExecutionQueue.executeEUS( + { + val targetStatesMap: Map[PackageId, SinglePackageTargetVetting[PackageId]] = + targetStates.map((x: SinglePackageTargetVetting[PackageId]) => x.ref -> x).toMap + + def toChange(previousState: VettedPackage): VettedPackageChange = + targetStatesMap.get(previousState.packageId) match { + case None => VettedPackageChange.Unchanged(previousState) + case Some(target) => + VettedPackageChange.Changed(Some(previousState), target.toVettedPackage) + } + + for { + topologyManager <- topologyManagerLookup.byPhysicalSynchronizerId(psid) + currentPackagesAndSerial <- getVettedPackagesForSynchronizer(topologyManager) + currentPackages = currentPackagesAndSerial.map(_._1).getOrElse(Seq()) + currentSerial = currentPackagesAndSerial.map(_._2) + + notInCurrentPackages = targetStatesMap -- currentPackages.map(_.packageId) + updateInstructions = + currentPackages.map(toChange) ++ notInCurrentPackages.values.map( + _.toFreshVettedPackageChange + ) + newAllPackages = updateInstructions.flatMap(_.newState) + newVettedPackagesCreated <- + if (dryRunSnapshot.isDefined) { + topologyManager + .validatePackageVetting( + currentlyVettedPackages = currentPackages.map(_.packageId).toSet, + nextPackageIds = newAllPackages.map(_.packageId).toSet, + dryRunSnapshot = dryRunSnapshot, + forceFlags = ForceFlags.none, + ) + .leftMap[ParticipantTopologyManagerError](IdentityManagerParentError(_)) + .map(_ => Option.empty[Set[VettedPackage]]) + } else { + // Fails if a new topology change is submitted between getVettedPackages + // above and this call to setVettedPackages, since currentSerial will no + // longer be valid. + setVettedPackages( + topologyManager, + currentPackages, + newAllPackages, + currentSerial, + ForceFlags.none, + ) + } + // only synchronize with the connected synchronizers if a new VettedPackages transaction was actually issued + _ <- newVettedPackagesCreated.traverse_ { newVettedPackages => + synchronizeVetting + .sync(newVettedPackages, psid) + .mapK(FutureUnlessShutdown.outcomeK) + } + } yield ( + currentPackages, + newAllPackages, + ) + }, + "update vetted packages", + ) + + override def getVettedPackages(synchronizerFilter: Option[Set[SynchronizerId]])(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + Seq[(Seq[VettedPackage], SynchronizerId, PositiveInt)], + ] = { + val synchronizers = synchronizerFilter.getOrElse(stateManager.getAllLogical.keySet) + synchronizers.toSeq + .parFlatTraverse { synchronizerId => + for { + topologyManager <- topologyManagerLookup.activeBySynchronizerId(synchronizerId) + resultO <- getVettedPackagesForSynchronizer(topologyManager) + } yield resultO.map { case (packages, serial) => (packages, synchronizerId, serial) }.toList + } + } + + override def getVettedPackagesForSynchronizer(topologyManager: SynchronizerTopologyManager)( + implicit tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + Option[(Seq[VettedPackage], PositiveInt)], + ] = + EitherT.right( + synchronizeWithClosing(functionFullName)( + topologyManager.store + .findPositiveTransactions( + asOf = CantonTimestamp.MaxValue, + asOfInclusive = true, + isProposal = false, + types = Seq(VettedPackages.code), + filterUid = Some(NonEmpty(Seq, nodeId)), + filterNamespace = None, + ) + .map { result => + result + .collectOfMapping[VettedPackages] + .result + .lastOption + .map { + (currentMapping: StoredTopologyTransaction[ + TopologyChangeOp.Replace, + VettedPackages, + ]) => + (currentMapping.mapping.packages, currentMapping.serial) + } + } + ) + ) + /** Returns true if a new VettedPackages transaction was authorized. modifyVettedPackages should * not be called concurrently */ - private def modifyVettedPackages( + private def modifyVettedPackages(psid: PhysicalSynchronizerId, forceFlags: ForceFlags)( action: Seq[VettedPackage] => Seq[VettedPackage] )(implicit tc: TraceContext - ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Boolean] = + ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Option[Set[VettedPackage]]] = for { - currentMapping <- EitherT.right( - synchronizeWithClosing(functionFullName)( - topologyManager.store - .findPositiveTransactions( - asOf = CantonTimestamp.MaxValue, - asOfInclusive = true, - isProposal = false, - types = Seq(VettedPackages.code), - filterUid = Some(NonEmpty(Seq, nodeId)), - filterNamespace = None, - ) - .map { result => - result - .collectOfMapping[VettedPackages] - .result - .lastOption - } - ) - ) - currentPackages = currentMapping - .map(_.mapping.packages) - .getOrElse(Seq.empty) - nextSerial = currentMapping.map(_.serial.increment) + topologyManager <- topologyManagerLookup.byPhysicalSynchronizerId(psid) + currentPackagesAndSerial <- getVettedPackagesForSynchronizer(topologyManager) + currentPackages = currentPackagesAndSerial.map(_._1).getOrElse(Seq()) + currentSerial = currentPackagesAndSerial.map(_._2) + newVettedPackagesState = action(currentPackages) + result <- setVettedPackages( + topologyManager, + currentPackages, + newVettedPackagesState, + currentSerial, + forceFlags, + ) + } yield result + + private def setVettedPackages( + topologyManager: SynchronizerTopologyManager, + currentPackages: Seq[VettedPackage], + newVettedPackagesState: Seq[VettedPackage], + currentSerial: Option[PositiveInt], + forceFlags: ForceFlags, + )(implicit + tc: TraceContext + ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Option[Set[VettedPackage]]] = + for { mapping <- EitherT .fromEither[FutureUnlessShutdown]( VettedPackages.create( @@ -218,22 +386,47 @@ class PackageOpsImpl( TopologyManagerError.InvalidTopologyMapping.Reject(err) ) ) + newSerial = currentSerial.map(_.increment) _ <- EitherTUtil.ifThenET(newVettedPackagesState != currentPackages) { synchronizeWithClosing(functionFullName)( topologyManager .proposeAndAuthorize( op = TopologyChangeOp.Replace, mapping = mapping, - serial = nextSerial, + serial = newSerial, signingKeys = Seq.empty, protocolVersion = initialProtocolVersion, expectFullAuthorization = true, - forceChanges = ForceFlags(ForceFlag.AllowUnvetPackage), + forceChanges = forceFlags, waitToBecomeEffective = None, ) - .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) - .map(_ => ()) + .leftMap[ParticipantTopologyManagerError](IdentityManagerParentError(_)) ) } - } yield newVettedPackagesState != currentPackages + } yield Option.when(newVettedPackagesState != currentPackages)(newVettedPackagesState.toSet) +} + +object PackageOpsImpl { + sealed trait VettedPackageChange { + def newState: Option[VettedPackage] + } + + object VettedPackageChange { + final case class Unchanged(state: VettedPackage) extends VettedPackageChange { + override def newState = Some(state) + } + + final case class Changed( + previousState: Option[VettedPackage], + newState: Option[VettedPackage], + ) extends VettedPackageChange + } + + implicit class TargetVettingToVettedPackage(target: SinglePackageTargetVetting[PackageId]) { + def toVettedPackage: Option[VettedPackage] = + target.bounds.map { case (lower, upper) => VettedPackage(target.ref, lower, upper) } + + def toFreshVettedPackageChange: VettedPackageChange.Changed = + VettedPackageChange.Changed(None, toVettedPackage) + } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala index 184c3bcbbf..05e64c2dee 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala @@ -34,6 +34,7 @@ import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.Ge import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.* import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.version.ParticipantProtocolFeatureFlags import scala.collection.concurrent.TrieMap import scala.concurrent.duration.* @@ -157,8 +158,12 @@ class ParticipantTopologyDispatcher( traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Unit] = { val logicalSynchronizerId = synchronizerId.logical + val featureFlagsForPV = ParticipantProtocolFeatureFlags.supportedFeatureFlagsByPV.getOrElse( + synchronizerId.protocolVersion, + Set.empty, + ) - def alreadyTrustedInStore( + def alreadyTrustedInStoreWithSupportedFeatures( store: TopologyStore[?] ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Boolean] = EitherT.right( @@ -173,7 +178,13 @@ class ParticipantTopologyDispatcher( filterNamespace = None, ) .map(_.toTopologyState.exists { - case SynchronizerTrustCertificate(`participantId`, `logicalSynchronizerId`) => true + // If certificate is missing feature flags, re-issue the trust certificate with it + case SynchronizerTrustCertificate( + `participantId`, + `logicalSynchronizerId`, + featureFlags, + ) => + featureFlagsForPV.diff(featureFlags.toSet).isEmpty case _ => false }) ) @@ -182,14 +193,17 @@ class ParticipantTopologyDispatcher( def trustSynchronizer( state: SyncPersistentState ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Unit] = - synchronizeWithClosing(functionFullName) { - MonadUtil.unlessM(alreadyTrustedInStore(manager.store)) { + MonadUtil.unlessM( + alreadyTrustedInStoreWithSupportedFeatures(manager.store) + ) { + synchronizeWithClosing(functionFullName) { manager .proposeAndAuthorize( TopologyChangeOp.Replace, SynchronizerTrustCertificate( participantId, logicalSynchronizerId, + featureFlagsForPV.toSeq, ), serial = None, signingKeys = Seq.empty, @@ -204,9 +218,12 @@ class ParticipantTopologyDispatcher( ) } } + // check if cert already exists in the synchronizer store getState(synchronizerId).flatMap(state => - MonadUtil.unlessM(alreadyTrustedInStore(state.topologyStore))( + MonadUtil.unlessM( + alreadyTrustedInStoreWithSupportedFeatures(state.topologyStore) + )( trustSynchronizer(state) ) ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyValidation.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyValidation.scala index 05286d7fb6..40d2974596 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyValidation.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyValidation.scala @@ -12,14 +12,13 @@ import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.data.Offset import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLogging} -import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.protocol.reassignment.IncompleteReassignmentData import com.digitalasset.canton.participant.store.memory.PackageMetadataView import com.digitalasset.canton.participant.store.{AcsInspection, ReassignmentStore} -import com.digitalasset.canton.platform.apiserver.services.admin.PackageUpgradeValidator import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.topology.TopologyManagerError.ParticipantTopologyManagerError +import com.digitalasset.canton.topology.TopologyManagerError.ParticipantTopologyManagerError.* import com.digitalasset.canton.topology.transaction.HostingParticipant import com.digitalasset.canton.topology.{ ForceFlag, @@ -31,7 +30,6 @@ import com.digitalasset.canton.topology.{ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.daml.lf.language.Ast import scala.concurrent.ExecutionContext @@ -39,8 +37,8 @@ trait ParticipantTopologyValidation extends NamedLogging { def validatePackageVetting( currentlyVettedPackages: Set[LfPackageId], nextPackageIds: Set[LfPackageId], - packageMetadataView: Option[PackageMetadataView], - packageDependencyResolver: PackageDependencyResolver, + packageMetadataView: PackageMetadataView, + dryRunSnapshot: Option[PackageMetadata], acsInspections: () => Map[SynchronizerId, AcsInspection], forceFlags: ForceFlags, disableUpgradeValidation: Boolean, @@ -50,13 +48,16 @@ trait ParticipantTopologyValidation extends NamedLogging { ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = { val toBeAdded = nextPackageIds -- currentlyVettedPackages val toBeDeleted = currentlyVettedPackages -- nextPackageIds - val toBeKept = currentlyVettedPackages.intersect(nextPackageIds) + val packageMetadataSnapshot = dryRunSnapshot.getOrElse(packageMetadataView.getSnapshot) for { - _ <- checkPackageDependencies( - toBeKept, - toBeAdded, - packageDependencyResolver, - forceFlags, + _ <- EitherT.fromEither[FutureUnlessShutdown]( + checkPackageDependencies( + nextPackageIds, + toBeAdded, + toBeDeleted, + packageMetadataSnapshot, + forceFlags, + ) ) _ <- toBeDeleted.toList.parTraverse_(packageId => isPackageInUse(packageId, acsInspections, forceFlags) @@ -69,23 +70,12 @@ trait ParticipantTopologyValidation extends NamedLogging { show"Skipping upgrade validation for newly-added packages $toBeAdded because force flag ${ForceFlag.AllowVetIncompatibleUpgrades.toString} is set" ) Right(()) - } else { - // packageMetadata can be empty if the vetting happens before the package service is created - packageMetadataView match { - case Some(packageMetadataView) => - checkUpgrades( - nextPackageIds, - toBeAdded, - packageMetadataView.getSnapshot, - packageMetadataView.packageUpgradeValidator, - ) - case None => - logger.info( - show"Skipping upgrade checks on newly-added packages because package metadata is not available: $toBeAdded" - ) - Right(()) - } - } + } else + packageMetadataView.packageUpgradeValidator.validateUpgrade( + toBeAdded, + nextPackageIds, + packageMetadataSnapshot.packages, + )(LoggingContextWithTrace(loggerFactory)) } } yield () } @@ -107,8 +97,7 @@ trait ParticipantTopologyValidation extends NamedLogging { if !forceFlags .permits(ForceFlag.AllowInsufficientParticipantPermissionForSignatoryParty) => Left( - TopologyManagerError.ParticipantTopologyManagerError.InsufficientParticipantPermissionForSignatoryParty - .Reject(party, synchronizerId): TopologyManagerError + InsufficientParticipantPermissionForSignatoryParty.Reject(party, synchronizerId) ) case true => logger.info( @@ -137,8 +126,7 @@ trait ParticipantTopologyValidation extends NamedLogging { .map { case true if !forceFlags.permits(ForceFlag.DisablePartyWithActiveContracts) => Left( - TopologyManagerError.ParticipantTopologyManagerError.DisablePartyWithActiveContractsRequiresForce - .Reject(party, synchronizerId): TopologyManagerError + DisablePartyWithActiveContractsRequiresForce.Reject(party, synchronizerId) ) case true => logger.debug( @@ -211,27 +199,25 @@ trait ParticipantTopologyValidation extends NamedLogging { nextThresholdO match { case None => Left( - TopologyManagerError.ParticipantTopologyManagerError.InsufficientSignatoryAssigningParticipantsForParty - .RejectRemovingParty( - party, - synchronizerId, - reassignmentId, - ): TopologyManagerError + InsufficientSignatoryAssigningParticipantsForParty.RejectRemovingParty( + party, + synchronizerId, + reassignmentId, + ): TopologyManagerError ) case Some(nextThreshold) if nextThreshold > currentThreshold => Left( - TopologyManagerError.ParticipantTopologyManagerError.InsufficientSignatoryAssigningParticipantsForParty - .RejectThresholdIncrease( - party, - synchronizerId, - reassignmentId, - nextThreshold, - signatoryAssigningParticipants, - ): TopologyManagerError + InsufficientSignatoryAssigningParticipantsForParty.RejectThresholdIncrease( + party, + synchronizerId, + reassignmentId, + nextThreshold, + signatoryAssigningParticipants, + ): TopologyManagerError ) case _ => Left( - TopologyManagerError.ParticipantTopologyManagerError.InsufficientSignatoryAssigningParticipantsForParty + InsufficientSignatoryAssigningParticipantsForParty .RejectNotEnoughSignatoryAssigningParticipants( party, synchronizerId, @@ -253,37 +239,38 @@ trait ParticipantTopologyValidation extends NamedLogging { } private def checkPackageDependencies( - currentlyVettedPackages: Set[LfPackageId], + vettedPackagesTarget: Set[LfPackageId], toBeAdded: Set[LfPackageId], - packageDependencyResolver: PackageDependencyResolver, + toBeRemoved: Set[LfPackageId], + packageMetadataSnapshot: PackageMetadata, forceFlags: ForceFlags, )(implicit - traceContext: TraceContext, - ec: ExecutionContext, - ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = - for { - dependencies <- packageDependencyResolver - .packageDependencies(toBeAdded.toList) - .leftFlatMap[Set[LfPackageId], TopologyManagerError] { missing => - if (forceFlags.permits(ForceFlag.AllowUnknownPackage)) - EitherT.rightT(Set.empty) - else - EitherT.leftT( - ParticipantTopologyManagerError.CannotVetDueToMissingPackages - .Missing(missing): TopologyManagerError - ) - } - - // check that all dependencies are vetted. - unvetted = dependencies -- currentlyVettedPackages - _ <- EitherT - .cond[FutureUnlessShutdown]( - unvetted.isEmpty || forceFlags.permits(ForceFlag.AllowUnvettedDependencies), - (), - ParticipantTopologyManagerError.DependenciesNotVetted - .Reject(unvetted): TopologyManagerError, - ) - } yield () + traceContext: TraceContext + ): Either[TopologyManagerError, Unit] = { + def getDependencies(packageId: LfPackageId): Set[LfPackageId] = + packageMetadataSnapshot.packages.get(packageId) match { + case Some(pkg) => pkg.directDeps + case None => + logger.warn( + s"Package dependency checks will be ignored for $packageId as it could not be found in the package metadata. " + + s"This can happen if the package was previously vetted with ${ForceFlag.AllowUnknownPackage}. " + + s"If this was not the case, the participant reached an inconsistent state on the package vetting state and should be investigated." + ) + Set.empty + } + val (knownToBeAdded, unknownToBeAdded) = + toBeAdded.partition(packageMetadataSnapshot.packages.contains) + val dependenciesOfAdded = packageMetadataSnapshot.allDependenciesRecursively(knownToBeAdded) + val removedDeps = + if (toBeRemoved.nonEmpty) vettedPackagesTarget.flatMap(getDependencies).intersect(toBeRemoved) + else Set.empty + val unvettedDeps = (dependenciesOfAdded -- vettedPackagesTarget) ++ removedDeps + if (unknownToBeAdded.nonEmpty && !forceFlags.permits(ForceFlag.AllowUnknownPackage)) + Left(CannotVetDueToMissingPackages.Missing(unknownToBeAdded)) + else if (unvettedDeps.nonEmpty && !forceFlags.permits(ForceFlag.AllowUnvettedDependencies)) + Left(DependenciesNotVetted.Reject(unvettedDeps)) + else Right(()) + } private def isPackageInUse( packageId: LfPackageId, @@ -314,40 +301,4 @@ trait ParticipantTopologyValidation extends NamedLogging { } ) } - - private def checkUpgrades( - nextPackageIds: Set[LfPackageId], - toBeAdded: Set[LfPackageId], - packageMetadata: PackageMetadata, - packageUpgradeValidator: PackageUpgradeValidator, - )(implicit traceContext: TraceContext): Either[TopologyManagerError, Unit] = { - def getPackageSignature(packageId: LfPackageId): Ast.PackageSignature = - packageMetadata.packages.getOrElse( - packageId, - throw new IllegalStateException( - s"Missing package-id $packageId in the package metadata view" - ), - ) - - def isUpgradeable(packageId: LfPackageId): Boolean = - packageMetadata.packageUpgradabilityMap - .getOrElse( - packageId, - throw new IllegalStateException( - s"Missing package-id $packageId in the package upgradability map" - ), - ) - - // We need to check the entire lineage of newly added packages. - // Removing a package can not lead to incompatible upgrade relationships between the remaining - // packages in the lineage. - val affectedPackageNames = toBeAdded.map(getPackageSignature(_).metadata.name) - val packagesToCheck = nextPackageIds.toList - .map(packageId => packageId -> getPackageSignature(packageId)) - .filter { case (packageId, pkg) => - affectedPackageNames.contains(pkg.metadata.name) && isUpgradeable(packageId) - } - packageUpgradeValidator - .validateUpgrade(packagesToCheck)(LoggingContextWithTrace(loggerFactory)) - } } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala index b879860090..fccda6813a 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala @@ -4,13 +4,18 @@ package com.digitalasset.canton.participant.topology import cats.data.EitherT +import cats.syntax.bifunctor.* import com.daml.nonempty.NonEmpty +import com.digitalasset.base.error.{ErrorCategory, ErrorCode, Explanation, Resolution} import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.IdentityManagerParentError +import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.{ + ExternalPartyAlreadyExists, + IdentityManagerParentError, +} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.TopologyManagerError.{ InconsistentTopologySnapshot, @@ -106,8 +111,6 @@ class PartyOps( ParticipantTopologyManagerError.IdentityManagerParentError( InvalidTopologyMapping.Reject(err) ), - // leaving serial to None, because in case of a REMOVE we let the serial - // auto detection mechanism figure out the correct next serial ptp => (Some(existingPtpTx.serial.increment), ptp), ) @@ -138,6 +141,97 @@ class PartyOps( .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) } yield () + def allocateExternalParty( + participantId: ParticipantId, + externalPartyOnboardingDetails: ExternalPartyOnboardingDetails, + synchronizerId: PhysicalSynchronizerId, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] = + for { + topologyManager <- EitherT.fromOption[FutureUnlessShutdown]( + topologyManagerLookup(synchronizerId), + ParticipantTopologyManagerError.IdentityManagerParentError( + TopologyManagerError.TopologyStoreUnknown.Failure(SynchronizerStore(synchronizerId)) + ), + ) + // If the party already has a fully authorized P2P mapping, then it is allocated. + // Since this function only supports allocation of fresh parties, we fail here. + // Futher changes to the party topology should be handled via the admin API for now, + // or through party replication for hosting relationship updates. + // We can't rely on the topology manager failing with a MappingAlreadyExists here, + // because the "add" method simply ignores duplicate transactions. + // This is actually useful for us as it allows this endpoint to accept the same set of onboarding + // transactions being submitted on all hosting nodes and makes multi-hosted party onboarding easier from a client + // app. + // However we still want to fail if the party is already allocated, hence this check. + existingAuthorizedP2Ps <- EitherT + .right( + topologyManager.store.findPositiveTransactions( + asOf = CantonTimestamp.MaxValue, + asOfInclusive = false, + isProposal = false, + types = Seq(PartyToParticipant.code), + filterUid = Some(NonEmpty(Seq, externalPartyOnboardingDetails.partyId.uid)), + filterNamespace = None, + ) + ) + _ <- EitherT + .cond[FutureUnlessShutdown]( + existingAuthorizedP2Ps.result.isEmpty, + (), + ExternalPartyAlreadyExists.Failure( + externalPartyOnboardingDetails.partyId, + synchronizerId.logical, + ), + ) + .leftWiden[ParticipantTopologyManagerError] + // Sign the party to participant tx with this participant + // Validation that this participant is a hosting node should already be done in ExternalPartyOnboardingDetails + // If somehow that's not done, authorization will fail in the topology manager + partyToParticipantSignedO <- + externalPartyOnboardingDetails.optionallySignedPartyToParticipant match { + // If it's already signed, extend the signature + case ExternalPartyOnboardingDetails.SignedPartyToParticipant(signed) => + topologyManager + .extendSignature( + signed, + Seq(participantId.fingerprint), + ForceFlags.none, + ) + .map(Some(_)) + .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) + case ExternalPartyOnboardingDetails.UnsignedPartyToParticipant(unsigned) => + // Otherwise add the mapping as a proposal + topologyManager + .proposeAndAuthorize( + op = TopologyChangeOp.Replace, + mapping = unsigned.mapping, + serial = Some(unsigned.serial), + signingKeys = Seq(participantId.fingerprint), + protocolVersion = synchronizerId.protocolVersion, + expectFullAuthorization = false, + waitToBecomeEffective = None, + ) + .map(_ => None) + .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) + } + // Add all transactions at once + _ <- + topologyManager + .add( + externalPartyOnboardingDetails.partyNamespace.toList + .flatMap(_.signedTransactions) ++ Seq( + externalPartyOnboardingDetails.signedPartyToKeyMappingTransaction, + partyToParticipantSignedO, + ).flatten, + ForceFlags.none, + expectFullAuthorization = externalPartyOnboardingDetails.fullyAllocatesParty, + ) + .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) + } yield () + } sealed trait ParticipantTopologyManagerError extends ContextualizedCantonError @@ -150,4 +244,24 @@ object ParticipantTopologyManagerError extends ParticipantErrorGroup { override def logOnCreation: Boolean = false } + @Explanation( + """This error occurs when a request to allocate an external party is made for a party that already exists.""" + ) + @Resolution( + """Allocate a new party with unique keys. If you're trying to change the hosting nodes of the party, + follow the party replication procedure instead.""" + ) + object ExternalPartyAlreadyExists + extends ErrorCode( + id = "EXTERNAL_PARTY_ALREADY_EXISTS", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + ) { + final case class Failure(partyId: PartyId, synchronizerId: SynchronizerId)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Party $partyId already exists on synchronizer $synchronizerId" + ) + with ParticipantTopologyManagerError + } + } diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala index 65e40c6d12..98592d1093 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala @@ -90,7 +90,7 @@ class SequencerConnectionSuccessorListener( }.toMap configuredSequencerIds = configuredSequencers.keySet - (synchronizerUpgradeOngoing, _) <- OptionT(snapshot.isSynchronizerUpgradeOngoing()) + (synchronizerUpgradeOngoing, _) <- OptionT(snapshot.synchronizerUpgradeOngoing()) SynchronizerSuccessor(successorPSId, upgradeTime) = synchronizerUpgradeOngoing _ = logger.debug( @@ -128,6 +128,7 @@ class SequencerConnectionSuccessorListener( activeConfig.config.sequencerConnections.sequencerTrustThreshold, activeConfig.config.sequencerConnections.sequencerLivenessMargin, activeConfig.config.sequencerConnections.submissionRequestAmplification, + activeConfig.config.sequencerConnections.sequencerConnectionPoolDelays, ) .toOption ) diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyComponentFactory.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyComponentFactory.scala index 2762242a8d..a472107d43 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyComponentFactory.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyComponentFactory.scala @@ -4,7 +4,12 @@ package com.digitalasset.canton.participant.topology import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout} +import com.digitalasset.canton.config.{ + BatchingConfig, + CachingConfigs, + ProcessingTimeout, + TopologyConfig, +} import com.digitalasset.canton.crypto.SynchronizerCrypto import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerPredecessor} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -24,9 +29,10 @@ import com.digitalasset.canton.topology.processing.{ TopologyTransactionProcessor, } import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore -import com.digitalasset.canton.topology.store.{PackageDependencyResolverUS, TopologyStore} +import com.digitalasset.canton.topology.store.{PackageDependencyResolver, TopologyStore} import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} +import org.apache.pekko.stream.Materializer import scala.concurrent.ExecutionContext @@ -109,18 +115,21 @@ class TopologyComponentFactory( } } - def createInitialTopologySnapshotValidator(implicit - executionContext: ExecutionContext + def createInitialTopologySnapshotValidator( + topologyConfig: TopologyConfig + )(implicit + executionContext: ExecutionContext, + materializer: Materializer, ): InitialTopologySnapshotValidator = new InitialTopologySnapshotValidator( crypto.pureCrypto, topologyStore, - timeouts, + validateInitialSnapshot = topologyConfig.validateInitialTopologySnapshot, loggerFactory, ) def createCachingTopologyClient( - packageDependencyResolver: PackageDependencyResolverUS, + packageDependencyResolver: PackageDependencyResolver, synchronizerPredecessor: Option[SynchronizerPredecessor], )(implicit executionContext: ExecutionContext, @@ -128,6 +137,7 @@ class TopologyComponentFactory( ): FutureUnlessShutdown[SynchronizerTopologyClientWithInit] = CachingSynchronizerTopologyClient.create( clock, + crypto.staticSynchronizerParameters, topologyStore, synchronizerPredecessor, packageDependencyResolver, @@ -140,7 +150,7 @@ class TopologyComponentFactory( def createTopologySnapshot( asOf: CantonTimestamp, - packageDependencyResolver: PackageDependencyResolverUS, + packageDependencyResolver: PackageDependencyResolver, preferCaching: Boolean, )(implicit executionContext: ExecutionContext): TopologySnapshot = { val snapshot = new StoreBasedTopologySnapshot( diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyManagerLookup.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyManagerLookup.scala new file mode 100644 index 0000000000..c90fe05faf --- /dev/null +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyManagerLookup.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.topology + +import cats.data.EitherT +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.topology.admin.grpc.PSIdLookup +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore +import com.digitalasset.canton.topology.{ + PhysicalSynchronizerId, + SynchronizerId, + SynchronizerTopologyManager, + TopologyManagerError, +} + +import scala.concurrent.ExecutionContext + +class TopologyManagerLookup( + lookupByPsid: PhysicalSynchronizerId => Option[SynchronizerTopologyManager], + lookupActivePsidByLsid: PSIdLookup, +) { + + def byPhysicalSynchronizerId(psid: PhysicalSynchronizerId)(implicit + errorLoggingContext: ErrorLoggingContext, + ec: ExecutionContext, + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + SynchronizerTopologyManager, + ] = + EitherT.fromOption[FutureUnlessShutdown]( + lookupByPsid(psid), + ParticipantTopologyManagerError.IdentityManagerParentError( + TopologyManagerError.TopologyStoreUnknown.Failure(SynchronizerStore(psid)) + ), + ) + + def activeBySynchronizerId(lsid: SynchronizerId)(implicit + errorLoggingContext: ErrorLoggingContext, + ec: ExecutionContext, + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + SynchronizerTopologyManager, + ] = + EitherT.fromOption[FutureUnlessShutdown]( + lookupActivePsidByLsid.activePSIdFor(lsid).flatMap(lookupByPsid), + ParticipantTopologyManagerError.IdentityManagerParentError( + TopologyManagerError.TopologyStoreUnknown.NotFoundForSynchronizer(lsid) + ), + ) + +} diff --git a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala index 6ff569c75e..af897a3176 100644 --- a/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala +++ b/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala @@ -13,7 +13,8 @@ import com.digitalasset.canton.logging.{LoggingContextUtil, NamedLoggerFactory, import com.digitalasset.canton.participant.protocol.EngineController.GetEngineAbortStatus import com.digitalasset.canton.participant.store.ContractAndKeyLookup import com.digitalasset.canton.participant.util.DAMLe.{ - CreateNodeEnricher, + ContractEnricher, + EnrichmentError, HasReinterpret, PackageResolver, ReInterpretationResult, @@ -24,7 +25,7 @@ import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticato import com.digitalasset.canton.protocol.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.ThereafterOps -import com.digitalasset.canton.{LfCommand, LfCreateCommand, LfKeyResolver, LfPartyId} +import com.digitalasset.canton.{LfCommand, LfCreateCommand, LfKeyResolver, LfPackageId, LfPartyId} import com.digitalasset.daml.lf.VersionRange import com.digitalasset.daml.lf.data.Ref.{PackageId, PackageName} import com.digitalasset.daml.lf.data.{ImmArray, Ref, Time} @@ -34,7 +35,7 @@ import com.digitalasset.daml.lf.interpretation.Error as LfInterpretationError import com.digitalasset.daml.lf.language.Ast.Package import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.language.LanguageVersion.v2_dev -import com.digitalasset.daml.lf.transaction.ContractKeyUniquenessMode +import com.digitalasset.daml.lf.transaction.{ContractKeyUniquenessMode, FatContractInstance} import java.nio.file.Path import scala.annotation.tailrec @@ -88,13 +89,13 @@ object DAMLe { * validation. */ type PackageResolver = PackageId => TraceContext => FutureUnlessShutdown[Option[Package]] - private type Enricher[A] = A => TraceContext => EitherT[ + private type Enricher[I, O] = I => TraceContext => EitherT[ FutureUnlessShutdown, ReinterpretationError, - A, + O, ] - type TransactionEnricher = Enricher[LfVersionedTransaction] - type CreateNodeEnricher = Enricher[LfNodeCreate] + type TransactionEnricher = Enricher[LfVersionedTransaction, LfVersionedTransaction] + type ContractEnricher = Enricher[(FatContractInstance, Set[LfPackageId]), FatContractInstance] sealed trait ReinterpretationError extends PrettyPrinting @@ -108,6 +109,10 @@ object DAMLe { ) } + final case class EnrichmentError(reason: String) extends ReinterpretationError { + override protected def pretty: Pretty[EnrichmentError] = adHocPrettyInstance + } + private val zeroSeed: LfHash = LfHash.assertFromByteArray(new Array[Byte](LfHash.underlyingHashLength)) @@ -180,8 +185,15 @@ class DAMLe( /** Enrich create node values by re-hydrating record labels and identifiers */ - val enrichCreateNode: CreateNodeEnricher = { createNode => implicit traceContext => - EitherT.liftF(interactiveSubmissionEnricher.enrichCreateNode(createNode)) + val enrichContract: ContractEnricher = { case (createNode, targetPackageIds) => + implicit traceContext => + interactiveSubmissionEnricher + .enrichContract(createNode, targetPackageIds) + .leftFlatMap(err => + EitherT.leftT[FutureUnlessShutdown, FatContractInstance]( + EnrichmentError(err): ReinterpretationError + ) + ) } override def reinterpret( @@ -367,17 +379,11 @@ class DAMLe( case ResultNeedContract(acoid, resume) => (CantonContractIdVersion.extractCantonContractIdVersion(acoid) match { case Right(version) => - val hashingMethod = version match { - case v1: CantonContractIdV1Version => v1.contractHashingMethod - case _: CantonContractIdV2Version => - // TODO(#23971) - Add support for transforming the contract argument prior to hashing and switch to TypedNormalForm - LfHash.HashingMethod.UpgradeFriendly - } contracts.lookupFatContract(acoid).value.map[Response] { case Some(contract) => Response.ContractFound( contract, - hashingMethod, + version.contractHashingMethod, hash => contractAuthenticator(contract, hash).isRight, ) case None => Response.ContractNotFound diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsForTesting.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsForTesting.scala index 745ef21664..7128377f48 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsForTesting.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsForTesting.scala @@ -6,11 +6,21 @@ package com.digitalasset.canton.participant.admin import cats.data.EitherT import com.digitalasset.base.error.RpcError import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.ledger.api.SinglePackageTargetVetting import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.admin.CantonPackageServiceError.PackageRemovalErrorCode.PackageInUse import com.digitalasset.canton.participant.topology.{PackageOps, ParticipantTopologyManagerError} -import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.store.packagemeta.PackageMetadata +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.topology.{ + ForceFlags, + ParticipantId, + PhysicalSynchronizerId, + SynchronizerId, + SynchronizerTopologyManager, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref.PackageId @@ -37,14 +47,43 @@ class PackageOpsForTesting( mainPkg: LfPackageId, packages: List[LfPackageId], darDescriptor: PackageService.DarDescription, + psid: PhysicalSynchronizerId, + forceFlags: ForceFlags, )(implicit tc: TraceContext): EitherT[FutureUnlessShutdown, RpcError, Unit] = EitherT.rightT(()) override def vetPackages( packages: Seq[PackageId], synchronizeVetting: PackageVettingSynchronization, + psid: PhysicalSynchronizerId, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] = EitherT.rightT(()) + + override def getVettedPackages( + synchronizerFilter: Option[Set[SynchronizerId]] + )(implicit tc: TraceContext): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Seq[ + (Seq[VettedPackage], SynchronizerId, PositiveInt) + ]] = EitherT.rightT(Seq()) + + override def getVettedPackagesForSynchronizer(topologyManager: SynchronizerTopologyManager)( + implicit tc: TraceContext + ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Option[ + (Seq[VettedPackage], PositiveInt) + ]] = EitherT.rightT(None) + + override def updateVettedPackages( + targetStates: Seq[SinglePackageTargetVetting[PackageId]], + psid: PhysicalSynchronizerId, + synchronizeVetting: PackageVettingSynchronization, + dryRunSnapshot: Option[PackageMetadata], + )(implicit + tc: TraceContext + ): EitherT[ + FutureUnlessShutdown, + ParticipantTopologyManagerError, + (Seq[VettedPackage], Seq[VettedPackage]), + ] = + EitherT.rightT((Seq(), Seq())) } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala index 4ebd861440..0a93019031 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala @@ -25,12 +25,13 @@ import com.digitalasset.canton.participant.topology.{ PackageOps, PackageOpsImpl, TopologyComponentFactory, + TopologyManagerLookup, } import com.digitalasset.canton.store.{IndexedPhysicalSynchronizer, IndexedSynchronizer} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, StoredTopologyTransactions, @@ -54,29 +55,17 @@ trait PackageOpsTestBase extends AsyncWordSpec with BaseTest with ArgumentMatche s"$sutName.hasPackageVettingEntry" should { "return true" when { - "head authorized store has the package vetted" in withTestSetup { env => - import env.* - unvettedPackagesForSnapshots(Set.empty, Set(pkgId1)) - packageOps.hasVettedPackageEntry(pkgId1).failOnShutdown.map(_ shouldBe true) - } - "one synchronizer topology snapshot has the package vetted" in withTestSetup { env => import env.* - unvettedPackagesForSnapshots(Set(pkgId1), Set.empty) - packageOps.hasVettedPackageEntry(pkgId1).failOnShutdown.map(_ shouldBe true) - } - - "all topology snapshots have the package vetted" in withTestSetup { env => - import env.* - unvettedPackagesForSnapshots(Set.empty, Set.empty) + unvettedPackagesForSnapshots(Set.empty) packageOps.hasVettedPackageEntry(pkgId1).failOnShutdown.map(_ shouldBe true) } } "return false" when { - "all topology snapshots have the package unvetted" in withTestSetup { env => + "one synchronizer topology snapshot has the package unvetted" in withTestSetup { env => import env.* - unvettedPackagesForSnapshots(Set(pkgId1), Set(pkgId1)) + unvettedPackagesForSnapshots(Set(pkgId1)) packageOps.hasVettedPackageEntry(pkgId1).failOnShutdown.map(_ shouldBe false) } } @@ -117,7 +106,6 @@ trait PackageOpsTestBase extends AsyncWordSpec with BaseTest with ArgumentMatche val stateManager = mock[SyncPersistentStateManager] val participantId = ParticipantId(UniqueIdentifier.tryCreate("participant", "one")) - val headAuthorizedTopologySnapshot = mock[TopologySnapshot] private val anotherSynchronizerTopologySnapshot = mock[TopologySnapshot] val pkgId1 = LfPackageId.assertFromString("pkgId1") @@ -166,22 +154,14 @@ trait PackageOpsTestBase extends AsyncWordSpec with BaseTest with ArgumentMatche val mainPackageId = DarMainPackageId.tryCreate("darhash") def unvettedPackagesForSnapshots( - unvettedForAuthorizedSnapshot: Set[LfPackageId], - unvettedForSynchronizerSnapshot: Set[LfPackageId], - ): Unit = { - when( - headAuthorizedTopologySnapshot.determinePackagesWithNoVettingEntry( - participantId, - Set(pkgId1), - ) - ).thenReturn(FutureUnlessShutdown.pure(unvettedForAuthorizedSnapshot)) + unvettedForSynchronizerSnapshot: Set[LfPackageId] + ): Unit = when( anotherSynchronizerTopologySnapshot.determinePackagesWithNoVettingEntry( participantId, Set(pkgId1), ) ).thenReturn(FutureUnlessShutdown.pure(unvettedForSynchronizerSnapshot)) - } } } @@ -198,7 +178,7 @@ class PackageOpsTest extends PackageOpsTestBase { arrangeCurrentlyVetted(List(pkgId1)) expectNewVettingState(List(pkgId1, pkgId2)) packageOps - .vetPackages(Seq(pkgId1, pkgId2), PackageVettingSynchronization.NoSync) + .vetPackages(Seq(pkgId1, pkgId2), PackageVettingSynchronization.NoSync, psid) .value .unwrap .map(inside(_) { case UnlessShutdown.Outcome(Right(_)) => succeed }) @@ -212,7 +192,7 @@ class PackageOpsTest extends PackageOpsTestBase { // Not ordered to prove that we check set-equality not ordered arrangeCurrentlyVetted(List(pkgId2, pkgId1)) packageOps - .vetPackages(Seq(pkgId1, pkgId2), PackageVettingSynchronization.NoSync) + .vetPackages(Seq(pkgId1, pkgId2), PackageVettingSynchronization.NoSync, psid) .value .unwrap .map(inside(_) { case UnlessShutdown.Outcome(Right(_)) => @@ -246,6 +226,8 @@ class PackageOpsTest extends PackageOpsTestBase { pkgId1, List(pkgId1, pkgId2), DarDescription(mainPackageId, str, str, str), + psid, + ForceFlags.none, ) .value .unwrap @@ -265,6 +247,8 @@ class PackageOpsTest extends PackageOpsTestBase { pkgId3, List(pkgId3), DarDescription(mainPackageId, str, str, str), + psid, + ForceFlags.none, ) .value .unwrap @@ -287,14 +271,18 @@ class PackageOpsTest extends PackageOpsTestBase { } protected class TestSetup extends CommonTestSetup { - val topologyManager = mock[AuthorizedTopologyManager] + val topologyManager = mock[SynchronizerTopologyManager] + + val psid = SynchronizerId.tryFromString("test::synchronizer").toPhysical private val nodeId: UniqueIdentifier = UniqueIdentifier.tryCreate("node", "one") val packageOps = new PackageOpsImpl( participantId = participantId, - headAuthorizedTopologySnapshot = headAuthorizedTopologySnapshot, stateManager = stateManager, - topologyManager = topologyManager, + topologyManagerLookup = new TopologyManagerLookup( + lookupByPsid = _ => Some(topologyManager), + lookupActivePsidByLsid = _ => Some(topologyManager.psid), + ), nodeId = nodeId, initialProtocolVersion = testedProtocolVersion, loggerFactory = loggerFactory, @@ -302,7 +290,7 @@ class PackageOpsTest extends PackageOpsTestBase { futureSupervisor = futureSupervisor, ) - val topologyStore = mock[TopologyStore[AuthorizedStore]] + val topologyStore = mock[TopologyStore[SynchronizerStore]] when(topologyManager.store).thenReturn(topologyStore) val txSerial = PositiveInt.tryCreate(1) def arrangeCurrentlyVetted(currentlyVettedPackages: List[LfPackageId]) = @@ -331,7 +319,7 @@ class PackageOpsTest extends PackageOpsTestBase { eqTo(Seq.empty), eqTo(testedProtocolVersion), eqTo(true), - eqTo(ForceFlags(ForceFlag.AllowUnvetPackage)), + eqTo(ForceFlags.none), any[Option[NonNegativeFiniteDuration]], )(anyTraceContext) ).thenReturn( diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala index 6ff599fce0..c746699a6d 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.admin import better.files.* -import cats.Eval import cats.data.EitherT import com.digitalasset.base.error.RpcError import com.digitalasset.canton.BaseTest.getResourcePath @@ -33,7 +32,7 @@ import com.digitalasset.canton.participant.store.memory.{ import com.digitalasset.canton.participant.util.DAMLe import com.digitalasset.canton.platform.apiserver.services.admin.PackageUpgradeValidator import com.digitalasset.canton.time.SimClock -import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.topology.{DefaultTestIdentities, SynchronizerId} import com.digitalasset.canton.util.{BinaryFileUtil, MonadUtil} import com.digitalasset.canton.{BaseTest, HasActorSystem, HasExecutionContext, LfPackageId} import com.digitalasset.daml.lf.archive @@ -106,7 +105,12 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) val packageStore = new InMemoryDamlPackageStore(loggerFactory) private val processingTimeouts = ProcessingTimeout() val packageDependencyResolver = - new PackageDependencyResolver(packageStore, processingTimeouts, loggerFactory) + new PackageDependencyResolver.Impl( + participantId, + packageStore, + processingTimeouts, + loggerFactory, + ) private val engine = DAMLe.newEngine( enableLfDev = false, @@ -116,24 +120,25 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) ) val clock = new SimClock(start = now, loggerFactory = loggerFactory) - val mutablePackageMetadataView = MutablePackageMetadataViewImpl - .createAndInitialize( - clock, - packageDependencyResolver.damlPackageStore, - new PackageUpgradeValidator(CachingConfigs.defaultPackageUpgradeCache, loggerFactory), - loggerFactory, - PackageMetadataViewConfig(), - processingTimeouts, - ) - .futureValueUS + val mutablePackageMetadataView = new MutablePackageMetadataViewImpl( + clock, + packageDependencyResolver.damlPackageStore, + new PackageUpgradeValidator(CachingConfigs.defaultPackageUpgradeCache, loggerFactory), + loggerFactory, + PackageMetadataViewConfig(), + processingTimeouts, + futureSupervisor, + exitOnFatalFailures = false, + ) + mutablePackageMetadataView.refreshState.futureValueUS val sut: PackageService = PackageService( clock = clock, engine = engine, + mutablePackageMetadataView = mutablePackageMetadataView, packageDependencyResolver = packageDependencyResolver, enableStrictDarValidation = enableStrictDarValidation, loggerFactory = loggerFactory, metrics = ParticipantTestMetrics, - mutablePackageMetadataView = Eval.now(mutablePackageMetadataView), packageOps = new PackageOpsForTesting(participantId, loggerFactory), timeouts = processingTimeouts, ) @@ -193,8 +198,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) darBytes = payload, description = Some("CantonExamples"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) .value @@ -219,8 +223,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) darBytes = ByteString.copyFrom(bytes), description = Some("some/path/CantonExamples.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) .value @@ -259,8 +262,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) .upload( Seq(examples, test), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, ) .value .map(_.valueOrFail("upload multiple dars")) @@ -297,8 +299,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) darBytes = ByteString.copyFrom(bytes), description = Some("some/path/CantonExamples.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = expected.map(LfPackageId.assertFromString), ) .value @@ -319,6 +320,8 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) } yield succeed } + val psid = SynchronizerId.tryFromString("test::synchronizer").toPhysical + "validate DAR and packages from bytes" in withEnvUS { env => import env.* @@ -327,6 +330,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) .validateDar( ByteString.copyFrom(bytes), "some/path/CantonExamples.dar", + psid, ) .value .map(_.valueOrFail("couldn't validate a dar file")) @@ -351,8 +355,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) darBytes = ByteString.copyFrom(bytes), description = Some("some/path/CantonExamples.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) .valueOrFail("appending dar") @@ -360,7 +363,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) } yield { // test for explict dependencies deps match { - case Left(value) => fail(value) + case Left((value, _)) => fail(value) case Right(loaded) => // all direct dependencies should be part of this (dependencyIds -- loaded) shouldBe empty @@ -380,6 +383,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) sut.validateDar( payload, badDarPath, + psid, ) )("append illformed.dar").failOnShutdown } yield { @@ -401,11 +405,10 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) for { error <- leftOrFail( sut.upload( - payload, - Some(badDarPath), - None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + darBytes = payload, + description = Some(badDarPath), + submissionIdO = None, + vettingInfo = None, expectedMainPackageId = None, ) )("append illformed.dar").failOnShutdown @@ -439,17 +442,22 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) } val unknownDarId = DarMainPackageId.tryCreate("darid") + val psid = SynchronizerId.tryFromString("test::synchronizer").toPhysical "requested by PackageService.unvetDar" should { "reject the request with an error" in withEnv( - rejectOnMissingDar(_.unvetDar(unknownDarId), unknownDarId, "DAR archive unvetting") + rejectOnMissingDar( + _.unvetDar(unknownDarId, psid), + unknownDarId, + "DAR archive unvetting", + ) ) } "requested by PackageService.vetDar" should { "reject the request with an error" in withEnv( rejectOnMissingDar( - _.vetDar(unknownDarId, PackageVettingSynchronization.NoSync), + _.vetDar(unknownDarId, PackageVettingSynchronization.NoSync, psid), unknownDarId, "DAR archive vetting", ) @@ -458,7 +466,11 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) "requested by PackageService.removeDar" should { "reject the request with an error" in withEnv( - rejectOnMissingDar(_.removeDar(unknownDarId), unknownDarId, "DAR archive removal") + rejectOnMissingDar( + _.removeDar(unknownDarId, psids = Set.empty), + unknownDarId, + "DAR archive removal", + ) ) } @@ -477,8 +489,7 @@ abstract class BasePackageServiceTest(enableStrictDarValidation: Boolean) darBytes = payload, description = Some(darName), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) ) @@ -571,8 +582,7 @@ class PackageServiceTestWithoutStrictDarValidation extends BasePackageServiceTes darBytes = payload, description = Some("missing-package.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) )("uploading dar with missing packages").failOnShutdown @@ -598,8 +608,7 @@ class PackageServiceTestWithoutStrictDarValidation extends BasePackageServiceTes darBytes = payload, description = Some("extra-package.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) .valueOrFail("uploading dar with extra package") @@ -630,8 +639,7 @@ class PackageServiceTestWithStrictDarValidation extends BasePackageServiceTest(t darBytes = payload, description = Some("missing-package.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) )("uploading dar with missing packages").failOnShutdown @@ -653,8 +661,7 @@ class PackageServiceTestWithStrictDarValidation extends BasePackageServiceTest(t darBytes = payload, description = Some("extra-package.dar"), submissionIdO = None, - vetAllPackages = false, - synchronizeVetting = PackageVettingSynchronization.NoSync, + vettingInfo = None, expectedMainPackageId = None, ) )("uploading dar with extra package").failOnShutdown diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageUploaderTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageUploaderTest.scala index 86e4f21d5a..ec3bb12ad4 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageUploaderTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageUploaderTest.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.participant.admin -import cats.Eval import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.{CachingConfigs, PackageMetadataViewConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp @@ -56,8 +55,8 @@ class PackageUploaderTest ) .futureValueUS - val darMainPackageId = DarMainPackageId.tryCreate(cantonExamplesMainPkgId) - validationResult.value shouldBe darMainPackageId + validationResult.value._1 shouldBe cantonExamplesMainPkgId + validationResult.value._2.size shouldBe 31 // Assert not persisted packageStore.listPackages().futureValueUS shouldBe empty @@ -274,6 +273,8 @@ class PackageUploaderTest loggerFactory = loggerFactory, packageMetadataViewConfig = PackageMetadataViewConfig(), timeouts = ProcessingTimeout(), + futureSupervisor = futureSupervisor, + exitOnFatalFailures = false, ) val packageUploader = new PackageUploader( clock = clock, @@ -285,7 +286,7 @@ class PackageUploaderTest paranoidMode = true, ), enableStrictDarValidation = enableStrictDarValidation, - packageMetadataView = Eval.now(mutablePackageMetadataViewImpl), + packageMetadataView = mutablePackageMetadataViewImpl, timeouts = ProcessingTimeout(), loggerFactory = loggerFactory, ) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala index 1dbf4f13a6..604cb398c2 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala @@ -139,7 +139,7 @@ object AcsInspectionTest extends MockitoSugar with ArgumentMatchersSugar with Ba signatories = stakeholders.take(1), stakeholders = stakeholders, createdAt = CreationTime.CreatedAt(LfTimestamp.Epoch), - version = LfLanguageVersion.v2_dev, + version = LfSerializationVersion.VDev, packageName = Ref.PackageName.assertFromString("pkg-name"), templateId = Ref.Identifier.assertFromString("pkg:Mod:Template"), argument = LfValue.ValueNil, diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspectionTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspectionTest.scala index c8a8b2bef7..4044e7e8fd 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspectionTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspectionTest.scala @@ -30,6 +30,7 @@ import com.digitalasset.canton.participant.sync.{ ConnectedSynchronizersLookup, SyncPersistentStateManager, } +import com.digitalasset.canton.platform.store.interning.MockStringInterning import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.IndexedSynchronizer @@ -62,6 +63,8 @@ sealed trait SyncStateInspectionTest with HasExecutionContext { this: DbTest => + val mockStringInterning = new MockStringInterning + override def cleanDb( storage: DbStorage )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { @@ -145,6 +148,7 @@ sealed trait SyncStateInspectionTest acsCounterParticipantConfigStore, timeouts, loggerFactory, + Eval.now(mockStringInterning), ) when(syncStateInspection.syncPersistentStateManager.acsCommitmentStore(synchronizerId)) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflowTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflowTest.scala index 446a6c2e64..63b18ce454 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflowTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/party/PartyReplicationTopologyWorkflowTest.scala @@ -6,9 +6,12 @@ package com.digitalasset.canton.participant.admin.party import cats.data.EitherT import com.digitalasset.canton.config.DefaultProcessingTimeouts import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash} +import com.digitalasset.canton.crypto.{Fingerprint, Hash, HashAlgorithm, TestHash} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.TestSynchronizerParameters +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SimClock, SynchronizerTimeTracker} +import com.digitalasset.canton.topology.client.StoreBasedSynchronizerTopologyClient import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore @@ -23,7 +26,9 @@ import com.digitalasset.canton.topology.transaction.{ ParticipantPermission, PartyToParticipant, SignedTopologyTransaction, + SynchronizerParametersState, TopologyChangeOp, + TopologyMapping, } import com.digitalasset.canton.topology.{ ForceFlags, @@ -34,7 +39,8 @@ import com.digitalasset.canton.topology.{ TopologyManager, TopologyManagerError, } -import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{BaseTest, HasExecutionContext, SequencerCounter} import org.scalatest.wordspec.AsyncWordSpec import scala.util.chaining.scalaUtilChainingOps @@ -51,6 +57,7 @@ class PartyReplicationTopologyWorkflowTest private val tp = ParticipantId("target-participant") private val serial = PositiveInt.tryCreate(17) private val serialBefore = PositiveInt.tryCreate(serial.unwrap - 1) + private val serialBefore2 = PositiveInt.tryCreate(serial.unwrap - 2) private val participantPermission = ParticipantPermission.Confirmation private val params = PartyReplicationStatus.ReplicationParams( requestId, @@ -62,6 +69,7 @@ class PartyReplicationTopologyWorkflowTest participantPermission, ) + private val tsSerialMinusTwo = CantonTimestamp.ofEpochSecond(-1L) private val tsSerialMinusOne = CantonTimestamp.Epoch private val tsSerial = CantonTimestamp.ofEpochSecond(1L) @@ -127,145 +135,263 @@ class PartyReplicationTopologyWorkflowTest DefaultProcessingTimeouts.testing, ) + private def mockSynchronizerTimeTracker(tsToReturnO: Option[CantonTimestamp]) = + mock[SynchronizerTimeTracker].tap { timeTracker => + when(timeTracker.requestTick(any[CantonTimestamp], any[Boolean])(anyTraceContext)) + .thenReturn(SynchronizerTimeTracker.DummyTickRequest) + when(timeTracker.latestTime).thenReturn(tsToReturnO) + } + private def add(topologyStore: TopologyStore[SynchronizerStore])( ts: CantonTimestamp, serial: PositiveInt, - ptp: PartyToParticipant, + mapping: TopologyMapping, proposal: Boolean = false, ) = { - val signedTx = topologyStoreTestData.makeSignedTx(ptp, serial = serial, isProposal = proposal)( - topologyStoreTestData.p1Key - ) + val signedTx = + topologyStoreTestData.makeSignedTx(mapping, serial = serial, isProposal = proposal)( + topologyStoreTestData.p1Key + ) topologyStore .update( SequencedTime(ts), EffectiveTime(ts), - removeMapping = if (proposal) Map.empty else Map(ptp.uniqueKey -> serial), + removeMapping = if (proposal) Map.empty else Map(mapping.uniqueKey -> serial), removeTxs = Set.empty, additions = Seq(ValidatedTopologyTransaction(signedTx)), ) .map(_ => signedTx) } - "PartyReplicationTopologyWorkflow" should { - "complete authorization when prerequisites are met" in { - val tw = topologyWorkflow() - val topologyManager = mockTopologyManager() - val topologyStore = newTopologyStore() + "PartyReplicationTopologyWorkflow" when { + "onboarding" should { + "complete authorization when prerequisites are met" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() - when( - topologyManager.proposeAndAuthorize( - op = TopologyChangeOp.Replace, - mapping = ptpProposal, - serial = Some(serial), - signingKeys = Seq.empty, - protocolVersion = testedProtocolVersion, - expectFullAuthorization = false, - forceChanges = ForceFlags.none, - waitToBecomeEffective = None, - ) - ).thenReturn( - EitherT.rightT[FutureUnlessShutdown, TopologyManagerError]( - topologyStoreTestData.makeSignedTx(ptpProposal, serial = serial, isProposal = true)( - topologyStoreTestData.p1Key + when( + topologyManager.proposeAndAuthorize( + op = TopologyChangeOp.Replace, + mapping = ptpProposal, + serial = Some(serial), + signingKeys = Seq.empty, + protocolVersion = testedProtocolVersion, + expectFullAuthorization = false, + forceChanges = ForceFlags.none, + waitToBecomeEffective = None, + ) + ).thenReturn( + EitherT.rightT[FutureUnlessShutdown, TopologyManagerError]( + topologyStoreTestData.makeSignedTx(ptpProposal, serial = serial, isProposal = true)( + topologyStoreTestData.p1Key + ) ) ) - ) - for { - _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpBefore) - effectiveTsBeforeO <- tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .valueOrFail("expect authorization to succeed") - _ <- add(topologyStore)(tsSerial, serial, ptpProposal).map(tx => - Right(tx): Either[TopologyManagerError, GenericSignedTopologyTransaction] - ) - effectiveTsAfterO <- tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .valueOrFail("expect authorization to succeed") - } yield { - effectiveTsBeforeO shouldBe None - effectiveTsAfterO shouldBe Some(tsSerial) - } - }.failOnShutdown + for { + _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpBefore) + effectiveTsBeforeO <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .valueOrFail("expect authorization to succeed") + _ <- add(topologyStore)(tsSerial, serial, ptpProposal).map(tx => + Right(tx): Either[TopologyManagerError, GenericSignedTopologyTransaction] + ) + effectiveTsAfterO <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .valueOrFail("expect authorization to succeed") + } yield { + effectiveTsBeforeO shouldBe None + effectiveTsAfterO shouldBe Some(tsSerial) + } + }.failOnShutdown - "back off and wait when existing proposal already signed by TP" in { - val tw = topologyWorkflow() - val topologyManager = mockTopologyManager() - val topologyStore = newTopologyStore() + "back off and wait when existing proposal already signed by TP" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() - when( - topologyManager.extendSignature( - any[SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant]], - signingKeys = eqTo(Seq.empty), - eqTo(ForceFlags.none), - )(anyTraceContext) - ).thenReturn( - EitherT.rightT[FutureUnlessShutdown, TopologyManagerError]( - topologyStoreTestData.makeSignedTx(ptpProposal, serial = serial, isProposal = true)( - // returning the same transaction and number of keys indicates that that TP has already signed - // because signing again does not add a new signature - topologyStoreTestData.p1Key + when( + topologyManager.extendSignature( + any[SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant]], + signingKeys = eqTo(Seq.empty), + eqTo(ForceFlags.none), + )(anyTraceContext) + ).thenReturn( + EitherT.rightT[FutureUnlessShutdown, TopologyManagerError]( + topologyStoreTestData.makeSignedTx(ptpProposal, serial = serial, isProposal = true)( + // returning the same transaction and number of keys indicates that that TP has already signed + // because signing again does not add a new signature + topologyStoreTestData.p1Key + ) ) ) - ) - for { - _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpBefore) - _ <- add(topologyStore)(tsSerial, serial, ptpProposal, proposal = true) - effectiveTsBeforeO <- tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .valueOrFail("expect authorization to succeed") - _ <- add(topologyStore)(tsSerial, serial, ptpProposal).map(tx => - Right(tx): Either[TopologyManagerError, GenericSignedTopologyTransaction] - ) - effectiveTsAfterO <- tw + for { + _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpBefore) + _ <- add(topologyStore)(tsSerial, serial, ptpProposal, proposal = true) + effectiveTsBeforeO <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .valueOrFail("expect authorization to succeed") + _ <- add(topologyStore)(tsSerial, serial, ptpProposal).map(tx => + Right(tx): Either[TopologyManagerError, GenericSignedTopologyTransaction] + ) + effectiveTsAfterO <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .valueOrFail("expect authorization to succeed") + } yield { + effectiveTsBeforeO shouldBe None + effectiveTsAfterO shouldBe Some(tsSerial) + } + }.failOnShutdown + + "detect party not hosted on synchronizer" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() + tw .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .valueOrFail("expect authorization to succeed") - } yield { - effectiveTsBeforeO shouldBe None - effectiveTsAfterO shouldBe Some(tsSerial) - } - }.failOnShutdown + .leftOrFail("expect failure") + .map(_ should include regex "Party .* not hosted by source participant") + }.failOnShutdown - "detect party not hosted on synchronizer" in { - val tw = topologyWorkflow() - val topologyManager = mockTopologyManager() - val topologyStore = newTopologyStore() - tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .leftOrFail("expect failure") - .map(_ should include regex "Party .* not hosted by source participant") - }.failOnShutdown + "detect party not hosted on source participant" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() - "detect party not hosted on source participant" in { - val tw = topologyWorkflow() - val topologyManager = mockTopologyManager() - val topologyStore = newTopologyStore() + for { + _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpPartyMissingFromSP) + err <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .leftOrFail("expect failure") + } yield { + err should include regex "Party .* not hosted by source participant" + } + }.failOnShutdown - for { - _ <- add(topologyStore)(tsSerialMinusOne, serialBefore, ptpPartyMissingFromSP) - err <- tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .leftOrFail("expect failure") - } yield { - err should include regex "Party .* not hosted by source participant" - } - }.failOnShutdown + "detect party not hosted on target participant as onboarding after authorization at serial" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() - "detect party not hosted on target participant as onboarding after authorization at serial" in { - val tw = topologyWorkflow() - val topologyManager = mockTopologyManager() - val topologyStore = newTopologyStore() + for { + _ <- add(topologyStore)(tsSerial, serial, ptpProposalMissingOnboardingFlag) + err <- tw + .authorizeOnboardingTopology(params, topologyManager, topologyStore) + .leftOrFail("expect failure") + } yield { + err should include regex "Target participant .* not authorized to onboard party .* even though just added" + } + }.failOnShutdown + } - for { - _ <- add(topologyStore)(tsSerial, serial, ptpProposalMissingOnboardingFlag) - err <- tw - .authorizeOnboardingTopology(params, topologyManager, topologyStore) - .leftOrFail("expect failure") - } yield { - err should include regex "Target participant .* not authorized to onboard party .* even though just added" - } - }.failOnShutdown + "onboarded" should { + "complete authorization only when prerequisites are met" in { + val tw = topologyWorkflow() + val topologyManager = mockTopologyManager() + val topologyStore = newTopologyStore() + val clock = new SimClock(loggerFactory = loggerFactory) + clock.advanceTo(tsSerial) + val topologyClient = new StoreBasedSynchronizerTopologyClient( + clock, + store = topologyStore, + packageDependenciesResolver = StoreBasedSynchronizerTopologyClient.NoPackageDependencies, + timeouts = timeouts, + futureSupervisor = futureSupervisor, + loggerFactory = loggerFactory, + staticSynchronizerParameters = defaultStaticSynchronizerParameters, + ) + val onboardingTs = tsSerialMinusOne + // unsafe time means less than the default one minute decision time + val synchronizerLatestTimeObservedUnsafe = Some(CantonTimestamp.ofEpochSecond(20L)) + val synchronizerLatestTimeObservedSafe = Some(CantonTimestamp.ofEpochSecond(3600L)) + + when( + topologyManager.proposeAndAuthorize( + op = TopologyChangeOp.Replace, + mapping = ptpProposalMissingOnboardingFlag, + serial = Some(serial), + signingKeys = Seq.empty, + protocolVersion = testedProtocolVersion, + expectFullAuthorization = true, + forceChanges = ForceFlags.none, + waitToBecomeEffective = None, + ) + ).thenAnswer[TopologyChangeOp, TopologyMapping, Option[PositiveInt], Seq[ + Fingerprint + ], ProtocolVersion, Boolean, ForceFlags, Option[NonNegativeFiniteDuration]] { + case (_, mapping, _, _, _, _, _, _) => + // Have the topology manager mock store the transaction in test topology store. + EitherT.right[TopologyManagerError]( + add(topologyStore)(tsSerial, serial, mapping) + ) + } + + for { + _ <- topologyClient.observed( + SequencedTime(tsSerial), + EffectiveTime(tsSerial), + SequencerCounter.Genesis, + Seq.empty, + ) + _ <- add(topologyStore)(tsSerialMinusTwo, serialBefore2, ptpBefore) + _ <- add(topologyStore)( + tsSerialMinusTwo, + serialBefore2, + SynchronizerParametersState( + synchronizerId, + TestSynchronizerParameters.defaultDynamic, + ), + ) + errTooEarly <- tw + .authorizeOnboardedTopology( + params, + tsSerialMinusTwo, + mockSynchronizerTimeTracker(synchronizerLatestTimeObservedSafe), + topologyManager, + topologyStore, + topologyClient, + ) + .leftOrFail("expect premature authorization to fail") + _ <- add(topologyStore)(onboardingTs, serialBefore, ptpProposal) + isOnboardedAfterUnsafeCall <- tw + .authorizeOnboardedTopology( + params, + onboardingTs, + mockSynchronizerTimeTracker(synchronizerLatestTimeObservedUnsafe), + topologyManager, + topologyStore, + topologyClient, + ) + .valueOrFail("expect authorization to not happen due to unsafe time") + isOnboardedAfterFirstSafeCall <- tw + .authorizeOnboardedTopology( + params, + onboardingTs, + mockSynchronizerTimeTracker(synchronizerLatestTimeObservedSafe), + topologyManager, + topologyStore, + topologyClient, + ) + .valueOrFail("expect authorization to succeed") + isOnboardedAfterSecondSafeCall <- tw + .authorizeOnboardedTopology( + params, + onboardingTs, + mockSynchronizerTimeTracker(synchronizerLatestTimeObservedSafe), + topologyManager, + topologyStore, + topologyClient, + ) + .valueOrFail("expect second call observe party onboarded") + } yield { + errTooEarly should include regex "Party .* is not hosted by target participant" + isOnboardedAfterUnsafeCall shouldBe false + isOnboardedAfterFirstSafeCall shouldBe false + isOnboardedAfterSecondSafeCall shouldBe true + } + }.failOnShutdown + } } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIdsTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIdsTest.scala new file mode 100644 index 0000000000..2df57d4123 --- /dev/null +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/repair/SelectRepresentativePackageIdsTest.scala @@ -0,0 +1,241 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.admin.repair + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.data.Counter +import com.digitalasset.canton.participant.admin.data.{ + ContractImportMode, + RepairContract, + RepresentativePackageIdOverride, +} +import com.digitalasset.canton.protocol.{ExampleTransactionFactory, LfContractId} +import com.digitalasset.canton.store.packagemeta.PackageMetadata.{ + LocalPackagePreference, + PackageResolution, +} +import com.digitalasset.canton.topology.SynchronizerId +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.{BaseTest, LfPackageId, LfPackageName, LfPackageVersion} +import com.digitalasset.daml.lf.data.{Bytes, Ref, Time} +import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder +import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} +import com.digitalasset.daml.lf.value.Value +import com.digitalasset.daml.lf.value.Value.ValueUnit +import org.scalatest.Assertion +import org.scalatest.wordspec.AnyWordSpec + +private[repair] class SelectRepresentativePackageIdsTest extends AnyWordSpec with BaseTest { + + classOf[SelectRepresentativePackageIds].getSimpleName when { + "selecting representative package IDs" should { + "use the highest versioned local package for the contract's package name when only that package is available" in { + import TestValues.* + testPrecedence( + knownPackages = Set(highestVersionedLocalPackage), + expectation = Right(highestVersionedLocalPackage), + ) + } + + "use the package-name override as higher precedence than the highest versioned local package" in { + import TestValues.* + testPrecedence( + knownPackages = Set(highestVersionedLocalPackage, packageNameOverride), + expectation = Right(packageNameOverride), + ) + } + + "use the contract's creation package-id as higher precedence than the package-name override" in { + import TestValues.* + testPrecedence( + knownPackages = Set( + highestVersionedLocalPackage, + packageNameOverride, + creationPkgId, + ), + expectation = Right(creationPkgId), + ) + } + + "use the exported contract's representative package-id as higher precedence than the contract's creation package-id" in { + import TestValues.* + testPrecedence( + knownPackages = Set( + highestVersionedLocalPackage, + packageNameOverride, + creationPkgId, + rpId, + ), + expectation = Right(rpId), + ) + } + + "use the package-id override for the contract's creation package-id as higher precedence than the exported contract's representative package-id" in { + import TestValues.* + testPrecedence( + knownPackages = Set( + highestVersionedLocalPackage, + packageNameOverride, + creationPkgId, + rpId, + creationPackageIdOverride, + ), + expectation = Right(creationPackageIdOverride), + ) + } + + "use the package-id override for the exported contract's representative package-id as higher precedence than the package-id override for the contract's creation package-id" in { + import TestValues.* + testPrecedence( + knownPackages = Set( + highestVersionedLocalPackage, + packageNameOverride, + creationPkgId, + rpId, + creationPackageIdOverride, + rpIdPackageIdOverride, + ), + expectation = Right(rpIdPackageIdOverride), + ) + } + + "use the contract-id override as highest precedence" in { + import TestValues.* + testPrecedence( + knownPackages = Set( + highestVersionedLocalPackage, + packageNameOverride, + creationPkgId, + rpId, + creationPackageIdOverride, + contractRpIdOverride, + ), + expectation = Right(contractRpIdOverride), + ) + } + } + + "fail if no candidate representative package-id is known" in { + import TestValues.* + + testPrecedence( + knownPackages = Set(LfPackageId.assertFromString("unknown-package-id")), + expectation = Left( + s"Could not select a representative package-id for contract with id $contractId. No package in store for the contract's package-name 'somePkgName'." + ), + ) + } + + s"fail if the selected package-id differs from the exported representative package-id in ${ContractImportMode.Accept}" in { + import TestValues.* + + testPrecedence( + knownPackages = Set(contractRpIdOverride), + expectation = Left( + show"Contract import mode is 'Accept' but the selected representative package-id $contractRpIdOverride " + + show"for contract with id ${repairContract.contract.contractId} differs from the exported representative package-id ${repairContract.representativePackageId}. " + + show"Please use contract import mode '${ContractImportMode.Validation}' or '${ContractImportMode.Recomputation}' to change the representative package-id." + ), + contractImportMode = ContractImportMode.Accept, + ) + } + + s"succeed if the selected package-id differs from the exported representative package-id in ${ContractImportMode.Recomputation}" in { + import TestValues.* + + testPrecedence( + knownPackages = Set(contractRpIdOverride), + expectation = Right(contractRpIdOverride), + contractImportMode = ContractImportMode.Recomputation, + ) + } + } + + private def testPrecedence( + knownPackages: Set[LfPackageId], + expectation: Either[String, LfPackageId], + contractImportMode: ContractImportMode = ContractImportMode.Validation, + ): Assertion = { + import TestValues.* + inside( + new SelectRepresentativePackageIds( + representativePackageIdOverride = RepresentativePackageIdOverride( + contractOverride = Map(contractId -> contractRpIdOverride), + packageIdOverride = + Map(creationPkgId -> creationPackageIdOverride, rpId -> rpIdPackageIdOverride), + packageNameOverride = Map(packageName -> packageNameOverride), + ), + knownPackages = knownPackages, + packageNameMap = Map( + packageName -> PackageResolution( + LocalPackagePreference( + LfPackageVersion.assertFromString("0.0.0"), + highestVersionedLocalPackage, + ), + allPackageIdsForName = NonEmpty.mk(Set, highestVersionedLocalPackage), + ) + ), + contractImportMode = contractImportMode, + loggerFactory = loggerFactory, + ).apply(List(repairContract)) + ) { + case Right(Seq(updatedContract)) => + updatedContract.representativePackageId shouldBe expectation.valueOrFail( + "Right returned on expected Left" + ) + case Left(error) => + error shouldBe expectation.leftOrFail("Left returned on expected Right") + } + } + + private def mkContract( + contractId: Value.ContractId, + creationPkgId: LfPackageId, + rpId: LfPackageId, + pkgName: LfPackageName, + ) = { + val templateId = + Ref.TypeConId.apply(creationPkgId, Ref.QualifiedName.assertFromString("Mod:Template")) + + val signatory = Ref.Party.assertFromString("signatory") + val observer = Ref.Party.assertFromString("observer") + RepairContract( + synchronizerId = SynchronizerId.tryFromString("x::synchronizer"), + contract = FatContractInstance.fromCreateNode( + TestNodeBuilder.create( + id = contractId, + templateId = templateId, + argument = ValueUnit, + signatories = Set(signatory), + observers = Set(observer), + packageName = pkgName, + ), + createTime = CreationTime.CreatedAt(Time.Timestamp.now()), + authenticationData = Bytes.Empty, + ), + reassignmentCounter = Counter.MinValue, + representativePackageId = rpId, + ) + } + + private object TestValues { + val creationPkgId: LfPackageId = LfPackageId.assertFromString("some-original-pkg-id") + val rpId: LfPackageId = LfPackageId.assertFromString("some-rp-id") + val contractId: Value.ContractId = LfContractId.V1(ExampleTransactionFactory.lfHash(1337)) + val packageName: LfPackageName = Ref.PackageName.assertFromString("somePkgName") + + val repairContract: RepairContract = mkContract(contractId, creationPkgId, rpId, packageName) + + val contractRpIdOverride: LfPackageId = + LfPackageId.assertFromString("some-contract-rpid-override") + val creationPackageIdOverride: LfPackageId = + LfPackageId.assertFromString("some-creation-pkg-id-override") + val rpIdPackageIdOverride: LfPackageId = + LfPackageId.assertFromString("some-rpid-pkg-id-override") + val packageNameOverride: LfPackageId = + LfPackageId.assertFromString("some-package-name-pkg-id-override") + val highestVersionedLocalPackage: LfPackageId = + LfPackageId.assertFromString("some-highest-versioned-pkg-id-override") + } +} diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/DummyContractAuthenticator.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/DummyContractAuthenticator.scala deleted file mode 100644 index 5b8102cf96..0000000000 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/DummyContractAuthenticator.scala +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.protocol - -import cats.syntax.either.* -import com.digitalasset.canton.protocol.LfHash -import com.digitalasset.canton.util.ContractAuthenticator -import com.digitalasset.daml.lf.transaction.FatContractInstance - -object DummyContractAuthenticator extends ContractAuthenticator { - override def authenticate( - contract: FatContractInstance, - contractHash: LfHash, - ): Either[String, Unit] = ??? - override def legacyAuthenticate(contract: FatContractInstance): Either[String, Unit] = Either.unit -} diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala index c6d4ce7ea9..f720f734f7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala @@ -32,7 +32,6 @@ import com.digitalasset.canton.lifecycle.{ UnlessShutdown, } import com.digitalasset.canton.logging.pretty.Pretty -import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.config.LedgerApiServerConfig import com.digitalasset.canton.participant.event.RecordOrderPublisher import com.digitalasset.canton.participant.ledger.api.LedgerApiIndexer @@ -73,7 +72,7 @@ import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.sequencing.traffic.TrafficReceipt import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.store.{IndexedPhysicalSynchronizer, IndexedSynchronizer} -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SynchronizerTimeTracker, WallClock} +import com.digitalasset.canton.time.{SynchronizerTimeTracker, WallClock} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.transaction.ParticipantPermission @@ -207,8 +206,7 @@ class ProtocolProcessorTest private val resultSc = SequencerCounter(1) private val rc = RequestCounter(0) private val parameters = DynamicSynchronizerParametersWithValidity( - DynamicSynchronizerParameters - .initialValues(NonNegativeFiniteDuration.Zero, testedProtocolVersion), + DynamicSynchronizerParameters.initialValues(testedProtocolVersion), CantonTimestamp.MinValue, None, ) @@ -251,8 +249,6 @@ class ProtocolProcessorTest SyncEphemeralState, ParticipantNodeEphemeralState, ) = { - - val packageDependencyResolver = mock[PackageDependencyResolver] val clock = new WallClock(timeouts, loggerFactory) val nodePersistentState = timeouts.default.await("creating node persistent state")( @@ -290,12 +286,10 @@ class ProtocolProcessorTest crypto.crypto, IndexedPhysicalSynchronizer.tryCreate(psid, 1), defaultStaticSynchronizerParameters, - exitOnFatalFailures = true, - disableUpgradeValidation = false, - packageDependencyResolver, + parameters = ParticipantNodeParameters.forTestingOnly(testedProtocolVersion), + mock[PackageMetadataView], Eval.now(nodePersistentState.ledgerApiStore), logical, - Eval.now(mock[PackageMetadataView]), loggerFactory, timeouts, futureSupervisor, diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TestProcessingSteps.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TestProcessingSteps.scala index f97f85b0ef..fb13813877 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TestProcessingSteps.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TestProcessingSteps.scala @@ -86,6 +86,8 @@ class TestProcessingSteps( override type RequestType = TestPendingRequestDataType override val requestType = TestPendingRequestDataType + override type ViewAbsoluteLedgerEffects = Unit + override type FullViewAbsoluteLedgerEffects = Unit override type ParsedRequestType = TestParsedRequest override def embedRequestError( @@ -190,16 +192,35 @@ class TestProcessingSteps( ) } + override def absolutizeLedgerEffects( + viewsWithCorrectRootHashAndRecipientsAndSignature: Seq[ + (WithRecipients[DecryptedView], Option[Signature]) + ] + ): ( + Seq[(WithRecipients[DecryptedView], Option[Signature], Unit)], + Seq[ProtocolProcessor.MalformedPayload], + ) = ( + viewsWithCorrectRootHashAndRecipientsAndSignature.map { case (view, sig) => (view, sig, ()) }, + Seq.empty, + ) + override def computeFullViews( - decryptedViewsWithSignatures: Seq[(WithRecipients[DecryptedView], Option[Signature])] - ): (Seq[(WithRecipients[FullView], Option[Signature])], Seq[ProtocolProcessor.MalformedPayload]) = + decryptedViewsWithSignatures: Seq[ + (WithRecipients[DecryptedView], Option[Signature], ViewAbsoluteLedgerEffects) + ] + ): ( + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)], + Seq[ProtocolProcessor.MalformedPayload], + ) = (decryptedViewsWithSignatures, Seq.empty) override def computeParsedRequest( rc: RequestCounter, ts: CantonTimestamp, sc: SequencerCounter, - rootViewsWithMetadata: NonEmpty[Seq[(WithRecipients[FullView], Option[Signature])]], + rootViewsWithMetadata: NonEmpty[ + Seq[(WithRecipients[FullView], Option[Signature], FullViewAbsoluteLedgerEffects)] + ], submitterMetadataO: Option[ViewSubmitterMetadata], isFreshOwnTimelyRequest: Boolean, malformedPayloads: Seq[ProtocolProcessor.MalformedPayload], diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala deleted file mode 100644 index fabbb21547..0000000000 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.protocol - -import cats.data.EitherT -import cats.syntax.either.* -import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.crypto.SynchronizerCryptoClient -import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.participant.metrics.ParticipantTestMetrics -import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.ContractAuthenticationFailed -import com.digitalasset.canton.participant.protocol.TransactionProcessor.TransactionProcessorError -import com.digitalasset.canton.participant.protocol.submission.TransactionConfirmationRequestFactory -import com.digitalasset.canton.participant.protocol.validation.* -import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker -import com.digitalasset.canton.protocol.{ExampleContractFactory, LfHash} -import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId, UniqueIdentifier} -import com.digitalasset.canton.util.ContractAuthenticator -import com.digitalasset.daml.lf.transaction.FatContractInstance -import com.digitalasset.daml.lf.value.Value.ContractId -import org.scalatest.Assertion -import org.scalatest.wordspec.AsyncWordSpec - -class TransactionProcessingStepsTest extends AsyncWordSpec with BaseTest { - private val synchronizerId = SynchronizerId( - UniqueIdentifier.tryFromProtoPrimitive("the::synchronizer") - ).toPhysical - private val participantId: ParticipantId = ParticipantId("participant") - - private def buildTestInstance( - behaviors: Map[ContractId, Either[String, Unit]] - ) = new TransactionProcessingSteps( - psid = synchronizerId, - participantId = participantId, - confirmationRequestFactory = mock[TransactionConfirmationRequestFactory], - confirmationResponsesFactory = mock[TransactionConfirmationResponsesFactory], - modelConformanceChecker = mock[ModelConformanceChecker], - staticSynchronizerParameters = defaultStaticSynchronizerParameters, - crypto = mock[SynchronizerCryptoClient], - metrics = ParticipantTestMetrics.synchronizer.transactionProcessing, - serializableContractAuthenticator = new ContractAuthenticator { - override def legacyAuthenticate(contract: FatContractInstance): Either[String, Unit] = - behaviors.getOrElse( - contract.contractId, - fail(s"contract authentication did not find ${contract.contractId}"), - ) - override def authenticate( - instance: FatContractInstance, - contractHash: LfHash, - ): Either[String, Unit] = - behaviors.getOrElse( - instance.contractId, - fail(s"contract authentication did not find ${instance.contractId}"), - ) - }, - transactionEnricher = tx => _ => EitherT.pure(tx), - createNodeEnricher = node => _ => EitherT.pure(node), - new AuthorizationValidator(participantId), - new InternalConsistencyChecker( - loggerFactory - ), - CommandProgressTracker.NoOp, - loggerFactory = loggerFactory, - FutureSupervisor.Noop, - ) - - "authenticateInputContracts" when { - val c1 = ExampleContractFactory.build() - val c2 = ExampleContractFactory.build() - val inputContracts = Map(c1.contractId -> c1, c2.contractId -> c2) - - "provided with valid input contracts" should { - "succeed" in { - val testInstance = - buildTestInstance(Map(c1.contractId -> Either.unit, c2.contractId -> Either.unit)) - - val result = testInstance.authenticateInputContractsInternal(inputContracts) - result.value.map(_ shouldBe Right[TransactionProcessorError, Unit](())) - } - } - - "provided with contracts failing authentication" must { - "convert failure and raise alarm" in { - val testInstance = - buildTestInstance( - Map(c1.contractId -> Either.unit, c2.contractId -> Left("some authentication failure")) - ) - - val (expectedLog, expectedResult) = { - val expectedLog: LogEntry => Assertion = - _.shouldBeCantonError( - ContractAuthenticationFailed, - _ should include( - s"Contract with id (${c2.contractId.coid}) could not be authenticated: some authentication failure" - ), - ) - - val expectedError = - ContractAuthenticationFailed.Error(c2.contractId, "some authentication failure") - - Some(expectedLog) -> Left(expectedError) - } - - loggerFactory - .assertLogs( - testInstance.authenticateInputContractsInternal(inputContracts).value, - expectedLog.toList* - ) - .map(_ shouldBe expectedResult) - } - } - } -} diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessorTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessorTest.scala new file mode 100644 index 0000000000..3bac8aabdd --- /dev/null +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/party/PartyReplicationProcessorTest.scala @@ -0,0 +1,436 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.protocol.party + +import cats.data.EitherT +import cats.syntax.either.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto +import com.digitalasset.canton.crypto.{Hash, TestHash} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.ledger.participant.state.Update +import com.digitalasset.canton.lifecycle.{ + AsyncOrSyncCloseable, + FlagCloseableAsync, + FutureUnlessShutdown, + SyncCloseable, + UnlessShutdown, +} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.participant.admin.party.PartyReplicationTestInterceptor +import com.digitalasset.canton.participant.event.RecordOrderPublisher +import com.digitalasset.canton.participant.protocol.conflictdetection.RequestTracker +import com.digitalasset.canton.participant.util.{CreatesActiveContracts, TimeOfChange} +import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.topology.{ + DefaultTestIdentities, + PartyId, + PhysicalSynchronizerId, + SynchronizerId, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{EitherTUtil, ReassignmentTag} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{ + BaseTest, + HasExecutionContext, + NeedsNewLfContractIds, + ProtocolVersionChecksFixtureAsyncWordSpec, + ReassignmentCounter, +} +import com.google.protobuf.ByteString +import org.scalatest.FutureOutcome +import org.scalatest.wordspec.FixtureAsyncWordSpec + +import scala.collection.mutable +import scala.util.chaining.scalaUtilChainingOps + +/** The PartyReplicationProcessorTest tests that the OnPR ACS replication protocol implemented by + * the source and target processors correctly handle expected and unexpected messages from the + * counter participant and local error conditions. + */ +final class PartyReplicationProcessorTest + extends FixtureAsyncWordSpec + with BaseTest + with HasExecutionContext + with ProtocolVersionChecksFixtureAsyncWordSpec + with NeedsNewLfContractIds + with CreatesActiveContracts { + + override protected val testSymbolicCrypto = new SymbolicPureCrypto() + + class Env extends FlagCloseableAsync { + override val timeouts: ProcessingTimeout = PartyReplicationProcessorTest.this.timeouts + protected val logger: TracedLogger = PartyReplicationProcessorTest.this.logger + val tp: PartyReplicationTargetParticipantProcessor = mkTP() + + val messagesSentByTP = + mutable.Buffer[(String, PartyReplicationTargetParticipantMessage.Instruction)]() + + private var hasTestProcessorCompleted: Boolean = false + var tpProceedOrWait: PartyReplicationTestInterceptor.ProceedOrWait = + PartyReplicationTestInterceptor.Proceed + var tpSendErrorOverrides: Map[String, String] = Map.empty + var persistContractResult = EitherTUtil.unitUS[String] + var getInternalContractIdsResult = FutureUnlessShutdown.pure(Map.empty[LfContractId, Long]) + + def targetProcessor: PartyReplicationTargetParticipantProcessor = tp + + private def mkTP(): PartyReplicationTargetParticipantProcessor = { + val rop = mock[RecordOrderPublisher] + val requestTracker = mock[RequestTracker] + when(rop.schedulePublishAddContracts(any[CantonTimestamp => Update])(anyTraceContext)) + .thenReturn(UnlessShutdown.unit) + when(rop.publishBufferedEvents()).thenReturn(UnlessShutdown.unit) + when( + requestTracker.addReplicatedContracts( + any[Hash], + any[CantonTimestamp], + any[Seq[ + ( + LfContractId, + ReassignmentTag.Source[SynchronizerId], + ReassignmentCounter, + TimeOfChange, + ) + ]], + )(anyTraceContext) + ) + .thenReturn(EitherTUtil.unitUS) + + new PartyReplicationTargetParticipantProcessor( + partyId = alice, + requestId = addPartyRequestId, + psid = psid, + partyToParticipantEffectiveAt = CantonTimestamp.ofEpochSecond(10), + onAcsFullyReplicated = _ => (), + onError = logger.info(_), + onDisconnect = logger.info(_)(_), + persistContracts = _ => _ => _ => persistContractResult, + getInternalContractIds = _ => _ => getInternalContractIdsResult, + recordOrderPublisher = rop, + requestTracker = requestTracker, + pureCrypto = testSymbolicCrypto, + futureSupervisor = futureSupervisor, + exitOnFatalFailures = true, + timeouts = timeouts, + loggerFactory = loggerFactory, + testOnlyInterceptor = new PartyReplicationTestInterceptor { + override def onTargetParticipantProgress(store: TargetParticipantStore)(implicit + traceContext: TraceContext + ): PartyReplicationTestInterceptor.ProceedOrWait = tpProceedOrWait + }, + ) { + override protected def sendPayload(operation: String, payload: ByteString)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = + sendMaybeOverridden(operation)( + EitherT.fromEither[FutureUnlessShutdown]( + PartyReplicationTargetParticipantMessage + .fromByteString(protocolVersion, payload) + .bimap( + _.message, + msg => { + messagesSentByTP.append(operation -> msg.instruction) + this.logger.info(s"Sent message: $operation") + }, + ) + ) + ) + + override def sendCompleted(status: String)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = { + hasTestProcessorCompleted = true + sendMaybeOverridden("completed") { + this.logger.info(s"Sent completed: $status") + EitherTUtil.unitUS + } + } + + override def sendError(error: String)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = { + hasTestProcessorCompleted = true + this.logger.info(s"Sent error: $error") + EitherTUtil.unitUS + } + + private def sendMaybeOverridden(operation: String)( + send: => EitherT[FutureUnlessShutdown, String, Unit] + ): EitherT[FutureUnlessShutdown, String, Unit] = + tpSendErrorOverrides + .get(operation) + .fold { + send + } { err => + this.logger.info(s"Send \"$operation\" overridden with error: $err") + EitherT.fromEither[FutureUnlessShutdown](Left(err)) + } + + override def hasChannelCompleted: Boolean = hasTestProcessorCompleted + } + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = Seq( + SyncCloseable("tp-processor", tp.close()) + ) + } + + private def execUntilDone[P <: PartyReplicationProcessor, T](processor: P, clue: String)( + test: P => EitherT[FutureUnlessShutdown, String, Unit] + ): FutureUnlessShutdown[Unit] = + valueOrFail(test(processor))(clue) + .tap(_ => eventually()(processor.isExecutionQueueEmpty shouldBe true)) + + private def execUntilDone[P <: PartyReplicationProcessor, T](processor: P)( + test: P => Unit + ): Unit = { + test(processor) + eventually()(processor.isExecutionQueueEmpty shouldBe true) + } + + override type FixtureParam = Env + + override def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val env = new Env() + + complete { + withFixture(test.toNoArgAsyncTest(env)) + } lastly { + env.close() + } + } + + private val alice = PartyId.tryFromProtoPrimitive("alice::default") + + private val addPartyRequestId = TestHash.digest(0) + + override protected val psid: PhysicalSynchronizerId = + PhysicalSynchronizerId( + DefaultTestIdentities.synchronizerId, + defaultStaticSynchronizerParameters, + ) + + "TargetParticipantPartyReplicationProcessor" when { + + "observing valid interaction" should { + "initialize" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + } yield { + val messagesSent = eventually()(env.messagesSentByTP.toList.tap(_.size shouldBe 2)) + messagesSent.head._2 shouldBe PartyReplicationTargetParticipantMessage.Initialize( + NonNegativeInt.zero + ) + val firstBatchEndOrdinal = + PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime.decrement + messagesSent(1)._2 shouldBe PartyReplicationTargetParticipantMessage.SendAcsUpTo( + firstBatchEndOrdinal + ) + } + } + + "handle empty ACS" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + _ <- execUntilDone(tp, "handle empty acs")( + _.handlePayload(endOfAcs) + ) + _ = execUntilDone(tp)(_.progressPartyReplication()) + } yield tp.hasChannelCompleted shouldBe true + } + + "handle single ACS batch" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + val firstBatchSize = PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + _ <- execUntilDone(tp, "receive acs batch")( + _.handlePayload(createAcsBatch(firstBatchSize)) + ) + _ <- execUntilDone(tp, "end of acs")(_.handlePayload(endOfAcs)) + _ = execUntilDone(tp)(_.progressPartyReplication()) + } yield { + tp.hasChannelCompleted shouldBe true + tp.replicatedContractsCount shouldBe firstBatchSize.toNonNegative + } + } + } + + "encountering invalid messages from source participant (SP)" should { + "complain if SP sends malformed message" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + malformedMessage = ByteString.copyFromUtf8("not a valid message") + err <- leftOrFail(tp.handlePayload(malformedMessage))("fail on malformed message") + } yield { + err should include regex "Failed to parse payload message from SP: .*" + } + } + + "complain if SP sends message before channel initialization" onlyRunWith ProtocolVersion.dev inUS { + env => + import env.* + for { + errAcsBatch <- leftOrFail(tp.handlePayload(createAcsBatch(PositiveInt.one)))( + "fail on premature AcsBatch" + ) + errEndOfACS <- leftOrFail(tp.handlePayload(endOfAcs))("fail on premature EndOfACS") + } yield { + errAcsBatch should include regex "Received unexpected message from SP before initialized by TP: .*AcsBatch" + errEndOfACS should include regex "Received unexpected message from SP before initialized by TP: .*EndOfACS" + } + } + + "complain if SP sends more contract batches than requested" onlyRunWith ProtocolVersion.dev inUS { + env => + import env.* + val batchSize = PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + // Make the TP processor wait to prevent it from automatically requesting more contracts + _ = env.tpProceedOrWait = PartyReplicationTestInterceptor.Wait + _ <- execUntilDone(tp, "receive requested")(_.handlePayload(createAcsBatch(batchSize))) + // Sending another, unrequested batch should cause an error + errTooMany <- leftOrFail(tp.handlePayload(createAcsBatch(batchSize)))( + "fail on premature AcsBatch" + ) + } yield { + errTooMany should include regex "Received too many contracts from SP: processed .* received .* > requested" + tp.replicatedContractsCount shouldBe batchSize.toNonNegative + tp.hasChannelCompleted shouldBe true + } + } + + "complain if SP sends more contracts than requested" onlyRunWith ProtocolVersion.dev inUS { + env => + import env.* + val batchSizeTooLarge = + PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime.increment + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + // Make the TP processor wait to prevent it from automatically requesting more contracts + _ = env.tpProceedOrWait = PartyReplicationTestInterceptor.Wait + errTooMany <- leftOrFail(tp.handlePayload(createAcsBatch(batchSizeTooLarge)))( + "fail on AcsBatch with more contracts than requested" + ) + } yield { + errTooMany should include regex "Received too many contracts from SP: processed .* received .* > requested" + tp.replicatedContractsCount shouldBe NonNegativeInt.zero + tp.hasChannelCompleted shouldBe true + } + } + + "complain if SP sends another message after EndOfACS" onlyRunWith ProtocolVersion.dev inUS { + env => + import env.* + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + _ <- execUntilDone(tp, "end of acs")(_.handlePayload(endOfAcs)) + errEndOfACSNotLast <- leftOrFail(tp.handlePayload(createAcsBatch(PositiveInt.one)))( + "fail on ACS batch after EndOfACS" + ) + } yield { + errEndOfACSNotLast should include("Received ACS batch from SP after EndOfACS at") + tp.hasChannelCompleted shouldBe true + } + } + } + + "encountering local problems on target participant (TP)" should { + "fail double initialize" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + for { + _ <- valueOrFail(tp.onConnected())("initialize tp") + err <- leftOrFail(tp.onConnected())("double initialize tp") + } yield { + err shouldBe "Channel already connected" + } + } + + "log upon problem sending message to SP" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + // Fail TP send on the second message to request more contracts + val acsBatchSize = PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime + val secondRequestUpperBound = acsBatchSize.unwrap * 2 - 1 + tpSendErrorOverrides = Map( + s"request next set of contracts up to ordinal $secondRequestUpperBound" -> "simulated request batch error" + ) + loggerFactory.assertLogs( + for { + _ <- valueOrFail(tp.onConnected())("initialize tp") + _ <- execUntilDone(tp, "receive acs batch")( + _.handlePayload( + createAcsBatch( + PartyReplicationTargetParticipantProcessor.contractsToRequestEachTime + ) + ) + ) + } yield { + // send error observed via logged warning instead of here due to async send + succeed + }, + _.warningMessage should include regex "Respond to source participant .* failed .*simulated request batch error", + ) + } + + "log upon send completed notification failure" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + tpSendErrorOverrides = Map(s"completed" -> "simulated complete error") + loggerFactory.assertLogs( + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + _ <- execUntilDone(tp, "handle empty acs")( + _.handlePayload(endOfAcs) + ) + _ = execUntilDone(tp)(_.progressPartyReplication()) + } yield { tp.hasChannelCompleted shouldBe true }, + _.warningMessage should include regex "Respond to source participant .* failed .*simulated complete error", + ) + } + + "error when unable to persist contracts" onlyRunWith ProtocolVersion.dev inUS { env => + import env.* + + persistContractResult = + EitherT.fromEither[FutureUnlessShutdown](Left("simulated persist contracts error")) + + for { + _ <- execUntilDone(tp, "initialize tp")(_.onConnected()) + err <- leftOrFail(tp.handlePayload(createAcsBatch(PositiveInt.one)))( + "fail on persist contracts" + ) + } yield { + err should include regex "Failed to persist contracts:.* simulated persist contracts error" + // channel completed because of error + tp.hasChannelCompleted shouldBe true + } + } + } + } + + private def createAcsBatch(n: PositiveInt): ByteString = + PartyReplicationSourceParticipantMessage( + PartyReplicationSourceParticipantMessage.AcsBatch( + NonEmpty + .from( + (0 until n.unwrap).map(_ => createActiveContractOld()) + ) + .getOrElse(fail("should not be empty")) + ), + testedProtocolVersion, + ).toByteString + + private lazy val endOfAcs: ByteString = + PartyReplicationSourceParticipantMessage( + PartyReplicationSourceParticipantMessage.EndOfACS, + testedProtocolVersion, + ).toByteString +} diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingStepsTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingStepsTest.scala index 040fc19ba8..49441a77e7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingStepsTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentProcessingStepsTest.scala @@ -5,7 +5,9 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.Eval import cats.data.EitherT +import cats.implicits.catsSyntaxEitherId import cats.syntax.functor.* +import com.daml.logging.LoggingContext import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.FutureSupervisor @@ -20,7 +22,7 @@ import com.digitalasset.canton.data.* import com.digitalasset.canton.data.ViewType.AssignmentViewType import com.digitalasset.canton.lifecycle.{DefaultPromiseUnlessShutdownFactory, FutureUnlessShutdown} import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.participant.admin.PackageDependencyResolver +import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.event.RecordOrderPublisher import com.digitalasset.canton.participant.ledger.api.{LedgerApiIndexer, LedgerApiStore} import com.digitalasset.canton.participant.metrics.ParticipantTestMetrics @@ -35,7 +37,7 @@ import com.digitalasset.canton.participant.protocol.reassignment.AssignmentValid import com.digitalasset.canton.participant.protocol.reassignment.AssignmentValidationResult.ReassigningParticipantValidationResult import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentProcessingSteps.* import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentValidationError.{ - ContractIdAuthenticationFailure, + ContractAuthenticationFailure, NotHostedOnParticipant, StakeholdersMismatch, SubmitterMustBeStakeholder, @@ -81,15 +83,18 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.topology.transaction.ParticipantPermission.Confirmation +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.{ContractAuthenticator, ResourceUtil} +import com.digitalasset.canton.util.{ContractValidator, ResourceUtil} import com.digitalasset.canton.version.HasTestCloseContext -import com.digitalasset.daml.lf.transaction.CreationTime +import com.digitalasset.daml.lf.data.Ref.PackageId +import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} import monocle.macros.syntax.lens.* +import org.apache.commons.lang3.NotImplementedException import org.scalatest.wordspec.AsyncWordSpec import java.util.UUID -import scala.concurrent.Future +import scala.concurrent.{ExecutionContext, Future} final class AssignmentProcessingStepsTest extends AsyncWordSpec @@ -123,17 +128,6 @@ final class AssignmentProcessingStepsTest UniqueIdentifier.tryFromProtoPrimitive("bothsynchronizers::participant") ) - private def testMetadata( - signatories: Set[LfPartyId] = Set(party1), - stakeholders: Set[LfPartyId] = Set(party1), - maybeKeyWithMaintainersVersioned: Option[LfVersioned[LfGlobalKeyWithMaintainers]] = None, - ): ContractMetadata = - ContractMetadata.tryCreate( - stakeholders = stakeholders, - signatories = signatories, - maybeKeyWithMaintainersVersioned = maybeKeyWithMaintainersVersioned, - ) - private lazy val contract = ExampleContractFactory.build( signatories = Set(party1), stakeholders = Set(party1), @@ -175,8 +169,7 @@ final class AssignmentProcessingStepsTest private lazy val cryptoSnapshot = cryptoClient.currentSnapshotApproximation - private lazy val assignmentProcessingSteps = - testInstance(targetPSId, cryptoClient, None) + private lazy val assignmentProcessingSteps = testInstance(targetPSId, cryptoClient, None) private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1) @@ -200,13 +193,11 @@ final class AssignmentProcessingStepsTest SynchronizerCrypto(crypto, defaultStaticSynchronizerParameters), IndexedPhysicalSynchronizer.tryCreate(targetPSId.unwrap, 1), defaultStaticSynchronizerParameters, - packageDependencyResolver = mock[PackageDependencyResolver], + packageMetadataView = mock[PackageMetadataView], ledgerApiStore = Eval.now(mock[LedgerApiStore]), logicalSyncPersistentState = logical, - packageMetadataView = Eval.now(mock[PackageMetadataView]), loggerFactory = loggerFactory, - exitOnFatalFailures = true, - disableUpgradeValidation = false, + parameters = ParticipantNodeParameters.forTestingOnly(testedProtocolVersion), timeouts = timeouts, futureSupervisor = futureSupervisor, ) @@ -552,6 +543,7 @@ final class AssignmentProcessingStepsTest val viewWithMetadata = ( WithRecipients(parsedRequest.fullViewTree, parsedRequest.recipients), parsedRequest.signatureO, + (), ) for { result <- @@ -628,137 +620,83 @@ final class AssignmentProcessingStepsTest } } - "fail when wrong metadata is given" in { - def test(testContract: ContractInstance) = - for { - deps <- statefulDependencies - (persistentState, ephemeralState) = deps + "fail when an invalid contract is given" in { - _ <- valueOrFail(persistentState.reassignmentStore.addUnassignmentData(unassignmentData))( - "add reassignment data failed" - ).failOnShutdown + val testContract = ExampleContractFactory.build() - fullAssignmentTree = makeFullAssignmentTree( - party1, - testContract, - targetPSId, - targetMediator, - reassigningParticipants = Set(participant), - ) + val expected = "bad-contract" - result <- - assignmentProcessingSteps - .constructPendingDataAndResponse( - mkParsedRequest(fullAssignmentTree), - ephemeralState.reassignmentCache, - FutureUnlessShutdown.pure(mkActivenessResult()), - engineController = - EngineController(participant, RequestId(CantonTimestamp.Epoch), loggerFactory), - DummyTickRequest, - ) - .failOnShutdown - confirmationResponse <- result.confirmationResponsesF.failOnShutdown + val contractValidator = new ContractValidator { + override def authenticate(contract: FatContractInstance, targetPackageId: PackageId)( + implicit + ec: ExecutionContext, + traceContext: TraceContext, + loggingContext: LoggingContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = + EitherT.fromEither[FutureUnlessShutdown](expected.asLeft[Unit]) - } yield { - confirmationResponse.valueOrFail("no response")._1.responses should matchPattern { - case Seq(ConfirmationResponse(_, LocalAbstain(_), _)) => - } - val assignmentValidationResult = result.pendingData.assignmentValidationResult - val modelConformanceError = - assignmentValidationResult.commonValidationResult.contractAuthenticationResultF.value.futureValueUS - - modelConformanceError.left.value match { - case ContractIdAuthenticationFailure(ref, reason, contractId) => - ref shouldBe fullAssignmentTree.reassignmentRef - contractId shouldBe testContract.contractId - reason should startWith("Mismatching contract id suffixes") - case other => fail(s"Did not expect $other") - } + override def authenticateHash( + contract: FatContractInstance, + contractHash: LfHash, + ): Either[String, Unit] = throw new NotImplementedException() + } - assignmentValidationResult.reassigningParticipantValidationResult.errors should contain( - UnassignmentDataNotFound(fullAssignmentTree.reassignmentId) - ) - } + val assignmentProcessingSteps = + testInstance(targetPSId, cryptoClient, None, contractValidator) - val baseMetadata = testMetadata() - - // party2 is incorrectly registered as a stakeholder - val contractWrongStakeholders: ContractInstance = { - val fci = ExampleTransactionFactory - .authenticatedContractInstance(metadata = baseMetadata) - .inst: LfFatContractInst - ContractInstance - .create( - LfFatContractInst.fromCreateNode( - fci.toCreateNode - .focus(_.stakeholders) - .modify(_ incl party2), - fci.createdAt, - fci.authenticationData, - ) - ) - .value - } + for { + deps <- statefulDependencies + (persistentState, ephemeralState) = deps - // party2 is incorrectly registered as a signatory - val contractWrongSignatories: ContractInstance = { - val fci = ExampleTransactionFactory - .authenticatedContractInstance( - metadata = testMetadata(stakeholders = baseMetadata.stakeholders + party2) - ) - .inst: LfFatContractInst - - ContractInstance - .create( - LfFatContractInst.fromCreateNode( - fci.toCreateNode - .focus(_.signatories) - .modify(_ incl party2), - fci.createdAt, - fci.authenticationData, - ) - ) - .value - } + _ <- valueOrFail(persistentState.reassignmentStore.addUnassignmentData(unassignmentData))( + "add reassignment data failed" + ).failOnShutdown - val incorrectKey = ExampleTransactionFactory.globalKeyWithMaintainers( - ExampleTransactionFactory.defaultGlobalKey, - Set(party1), - ) + fullAssignmentTree = makeFullAssignmentTree( + party1, + testContract, + targetPSId, + targetMediator, + reassigningParticipants = Set(participant), + ) - // Metadata has incorrect key - val contractWrongKey: ContractInstance = { - val fci = ExampleTransactionFactory - .authenticatedContractInstance( - metadata = testMetadata(stakeholders = baseMetadata.stakeholders + party2) - ) - .inst: LfFatContractInst - ContractInstance - .create( - LfFatContractInst.fromCreateNode( - fci.toCreateNode - .focus(_.keyOpt) - .replace(Some(incorrectKey.unversioned)), - fci.createdAt, - fci.authenticationData, + result <- + assignmentProcessingSteps + .constructPendingDataAndResponse( + mkParsedRequest(fullAssignmentTree), + ephemeralState.reassignmentCache, + FutureUnlessShutdown.pure(mkActivenessResult()), + engineController = + EngineController(participant, RequestId(CantonTimestamp.Epoch), loggerFactory), + DummyTickRequest, ) - ) - .value + .failOnShutdown + confirmationResponse <- result.confirmationResponsesF.failOnShutdown + + } yield { + confirmationResponse.valueOrFail("no response")._1.responses should matchPattern { + case Seq(ConfirmationResponse(_, LocalAbstain(_), _)) => + } + val assignmentValidationResult = result.pendingData.assignmentValidationResult + val modelConformanceError = + assignmentValidationResult.commonValidationResult.contractAuthenticationResultF.value.futureValueUS + + modelConformanceError.left.value match { + case ContractAuthenticationFailure(ref, reason, contractId) => + ref shouldBe fullAssignmentTree.reassignmentRef + contractId shouldBe testContract.contractId + reason should include(expected) + case other => fail(s"Did not expect $other") + } + + assignmentValidationResult.reassigningParticipantValidationResult.errors should contain( + UnassignmentDataNotFound(fullAssignmentTree.reassignmentId) + ) } - for { - _ <- test(contractWrongStakeholders) - _ <- test(contractWrongSignatories) - _ <- test(contractWrongKey) - } yield succeed } "fail when inconsistent stakeholders are given" in { - /* - We construct in this test an inconsistent `inconsistentTree: FullAssignmentTree` : - - inconsistentTree.tree.commonData.stakeholders is incorrect - - inconsistentTree.view.contract.metadata is correct - */ val incorrectMetadata = ContractMetadata.tryCreate(Set(party1), Set(party1, party2), None) val incorrectStakeholders = Stakeholders(incorrectMetadata) @@ -972,6 +910,7 @@ final class AssignmentProcessingStepsTest targetSynchronizer: Target[PhysicalSynchronizerId], snapshotOverride: SynchronizerCryptoClient, awaitTimestampOverride: Option[Future[Unit]], + contractValidator: ContractValidator = ContractValidator.AllowAll, ) = { val pureCrypto = new SymbolicPureCrypto @@ -989,7 +928,7 @@ final class AssignmentProcessingStepsTest ), snapshotOverride, seedGenerator, - ContractAuthenticator(pureCrypto), + contractValidator, Target(defaultStaticSynchronizerParameters), Target(testedProtocolVersion), loggerFactory = loggerFactory, diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationTest.scala index 09f71d30a7..ee822bc29e 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/AssignmentValidationTest.scala @@ -38,7 +38,7 @@ import com.digitalasset.canton.sequencing.protocol.{ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} import org.scalatest.wordspec.AsyncWordSpec @@ -374,7 +374,7 @@ final class AssignmentValidationTest participantId: ParticipantId, ): AssignmentValidation = { - val contractAuthenticator = ContractAuthenticator(new SymbolicPureCrypto()) + val contractValidator = ContractValidator.AllowAll new AssignmentValidation( synchronizerId, @@ -388,7 +388,7 @@ final class AssignmentValidationTest loggerFactory, ), loggerFactory = loggerFactory, - contractAuthenticator = contractAuthenticator, + contractValidator = contractValidator, ) } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentDataHelpers.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentDataHelpers.scala index 67a0ed0ce0..d2d8880dc7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentDataHelpers.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentDataHelpers.scala @@ -14,7 +14,6 @@ import com.digitalasset.canton.data.{ import com.digitalasset.canton.participant.protocol.submission.SeedGenerator import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.time.TimeProofTestUtil import com.digitalasset.canton.topology.* import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} @@ -28,14 +27,9 @@ final case class ReassignmentDataHelpers( // mediatorCryptoClient and sequencerCryptoClient need to be defined for computation of the DeliveredUnassignmentResult mediatorCryptoClient: Option[SynchronizerCryptoClient] = None, sequencerCryptoClient: Option[SynchronizerCryptoClient] = None, - targetTime: CantonTimestamp = CantonTimestamp.Epoch, + targetTimestamp: Target[CantonTimestamp] = Target(CantonTimestamp.Epoch), ) { - private val targetTimeProof: TimeProof = TimeProofTestUtil.mkTimeProof( - timestamp = targetTime, - targetSynchronizer = targetSynchronizer, - ) - private val seedGenerator: SeedGenerator = new SeedGenerator(pureCrypto) @@ -66,7 +60,7 @@ final case class ReassignmentDataHelpers( sourceSynchronizer = sourceSynchronizer, sourceMediator = sourceMediator, targetSynchronizer = targetSynchronizer, - targetTimeProof = targetTimeProof, + targetTimestamp = targetTimestamp, ) def unassignmentData( @@ -98,6 +92,7 @@ object ReassignmentDataHelpers { sourceSynchronizer: Source[PhysicalSynchronizerId], targetSynchronizer: Target[PhysicalSynchronizerId], identityFactory: TestingIdentityFactory, + targetTimestamp: Target[CantonTimestamp], ): ReassignmentDataHelpers = { val pureCrypto = identityFactory .forOwnerAndSynchronizer(DefaultTestIdentities.mediatorId, sourceSynchronizer.unwrap) @@ -122,6 +117,21 @@ object ReassignmentDataHelpers { sourceSynchronizer.unwrap, ) ), + targetTimestamp = targetTimestamp, ) } + + def apply( + contract: ContractInstance, + sourceSynchronizer: Source[PhysicalSynchronizerId], + targetSynchronizer: Target[PhysicalSynchronizerId], + identityFactory: TestingIdentityFactory, + ): ReassignmentDataHelpers = + apply( + contract, + sourceSynchronizer, + targetSynchronizer, + identityFactory, + Target(CantonTimestamp.Epoch), + ) } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/TestReassignmentCoordination.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/TestReassignmentCoordination.scala index 81c04d0ed2..8918624938 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/TestReassignmentCoordination.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/TestReassignmentCoordination.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT import cats.syntax.functor.* +import com.digitalasset.canton.BaseTest.testedProtocolVersion import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.crypto.{ SyncCryptoApiParticipantProvider, @@ -22,7 +23,7 @@ import com.digitalasset.canton.participant.store.memory.InMemoryReassignmentStor import com.digitalasset.canton.participant.sync.StaticSynchronizerParametersGetter import com.digitalasset.canton.protocol.ExampleTransactionFactory.* import com.digitalasset.canton.protocol.StaticSynchronizerParameters -import com.digitalasset.canton.time.TimeProofTestUtil +import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ Confirmation, Observation, @@ -38,12 +39,11 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} import com.digitalasset.canton.util.{ReassignmentTag, SameReassignmentType, SingletonTraverse} import com.digitalasset.canton.{BaseTest, LfPackageId} -import org.mockito.ArgumentMatchers.any -import org.mockito.Mockito.when -import org.mockito.MockitoSugar.mock import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.DurationConverters.* private[reassignment] object TestReassignmentCoordination { @@ -57,19 +57,9 @@ private[reassignment] object TestReassignmentCoordination { awaitTimestampOverride: Option[Option[Future[Unit]]] = None, loggerFactory: NamedLoggerFactory, packages: Seq[LfPackageId] = Seq.empty, + targetTimestampForwardTolerance: FiniteDuration = 30.seconds, )(implicit ec: ExecutionContext): ReassignmentCoordination = { - val recentTimeProofProvider = mock[RecentTimeProofProvider] - when( - recentTimeProofProvider.get( - any[Target[PhysicalSynchronizerId]], - any[Target[StaticSynchronizerParameters]], - )( - any[TraceContext] - ) - ) - .thenReturn(EitherT.pure(TimeProofTestUtil.mkTimeProof(timeProofTimestamp))) - val reassignmentStores = synchronizers .map(synchronizer => @@ -84,7 +74,12 @@ private[reassignment] object TestReassignmentCoordination { val staticSynchronizerParametersGetter = new StaticSynchronizerParametersGetter { override def staticSynchronizerParameters( synchronizerId: PhysicalSynchronizerId - ): Option[StaticSynchronizerParameters] = Some(BaseTest.testedStaticSynchronizerParameters) + ): Option[StaticSynchronizerParameters] = Some( + BaseTest.defaultStaticSynchronizerParametersWith( + topologyChangeDelay = NonNegativeFiniteDuration.Zero, + testedProtocolVersion, + ) + ) override def latestKnownPSId(synchronizerId: SynchronizerId): Option[PhysicalSynchronizerId] = ??? @@ -104,19 +99,24 @@ private[reassignment] object TestReassignmentCoordination { new ReassignmentCoordination( reassignmentStoreFor = id => reassignmentStores.get(id).toRight(UnknownSynchronizer(id.unwrap, "not found")), - recentTimeProofFor = recentTimeProofProvider, reassignmentSubmissionFor = assignmentBySubmission, pendingUnassignments = reassignmentSynchronizer.map(Option(_)), staticSynchronizerParametersGetter = staticSynchronizerParametersGetter, - syncCryptoApi = - defaultSyncCryptoApi(synchronizers.toSeq.map(_.unwrap), packages, loggerFactory), + syncCryptoApi = defaultSyncCryptoApi( + synchronizers.toSeq.map(_.unwrap), + packages, + loggerFactory, + timeProofTimestamp, + ), + targetTimestampForwardTolerance = + NonNegativeFiniteDuration.tryCreate(targetTimestampForwardTolerance.toJava), loggerFactory, ) { override def awaitTimestamp[T[X] <: ReassignmentTag[X]: SameReassignmentType]( synchronizerId: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): Either[ReassignmentProcessorError, Option[FutureUnlessShutdown[Unit]]] = @@ -132,7 +132,7 @@ private[reassignment] object TestReassignmentCoordination { ]( synchronizerId: T[PhysicalSynchronizerId], staticSynchronizerParameters: T[StaticSynchronizerParameters], - timestamp: CantonTimestamp, + timestamp: T[CantonTimestamp], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, T[ @@ -152,12 +152,16 @@ private[reassignment] object TestReassignmentCoordination { synchronizers: Seq[PhysicalSynchronizerId], packages: Seq[LfPackageId], loggerFactory: NamedLoggerFactory, + approximateTimestamp: CantonTimestamp, ): SyncCryptoApiParticipantProvider = TestingTopology(synchronizers = synchronizers.toSet) .withReversedTopology(defaultTopology) .withPackages(defaultTopology.keys.map(_ -> packages).toMap) .build(loggerFactory) - .forOwner(submittingParticipant) + .forOwner( + owner = submittingParticipant, + currentSnapshotApproximationTimestamp = approximateTimestamp, + ) private val observerParticipant1: ParticipantId = ParticipantId("observerParticipant1") private val observerParticipant2: ParticipantId = ParticipantId("observerParticipant2") diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingStepsTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingStepsTest.scala index 0eaabc8b8f..9e771a7a05 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingStepsTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentProcessingStepsTest.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.data.* import com.digitalasset.canton.data.ViewType.UnassignmentViewType import com.digitalasset.canton.lifecycle.{DefaultPromiseUnlessShutdownFactory, FutureUnlessShutdown} import com.digitalasset.canton.logging.LogEntry -import com.digitalasset.canton.participant.admin.PackageDependencyResolver +import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.event.RecordOrderPublisher import com.digitalasset.canton.participant.ledger.api.{LedgerApiIndexer, LedgerApiStore} import com.digitalasset.canton.participant.metrics.ParticipantTestMetrics @@ -59,6 +59,7 @@ import com.digitalasset.canton.participant.protocol.validation.{ AuthenticationValidator, } import com.digitalasset.canton.participant.protocol.{EngineController, ProcessingStartingPoints} +import com.digitalasset.canton.participant.store.ActiveContractStore.Active import com.digitalasset.canton.participant.store.memory.* import com.digitalasset.canton.participant.store.{ AcsCounterParticipantConfigStore, @@ -78,7 +79,7 @@ import com.digitalasset.canton.store.{ SessionKeyStoreWithInMemoryCache, } import com.digitalasset.canton.time.SynchronizerTimeTracker.DummyTickRequest -import com.digitalasset.canton.time.{SynchronizerTimeTracker, TimeProofTestUtil, WallClock} +import com.digitalasset.canton.time.{SynchronizerTimeTracker, WallClock} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.client.TopologySnapshot @@ -90,7 +91,7 @@ import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ } import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} -import com.digitalasset.canton.util.{ContractAuthenticator, ResourceUtil} +import com.digitalasset.canton.util.{ContractValidator, ResourceUtil} import com.digitalasset.canton.version.HasTestCloseContext import com.digitalasset.canton.{ BaseTest, @@ -104,10 +105,14 @@ import com.digitalasset.canton.{ RequestCounter, SequencerCounter, } +import com.google.rpc.status.Status +import io.grpc.Status.Code.FAILED_PRECONDITION import org.scalatest.wordspec.AsyncWordSpec +import java.time.Instant import java.util.UUID import scala.annotation.nowarn +import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} @nowarn("msg=match may not be exhaustive") @@ -182,12 +187,10 @@ final class UnassignmentProcessingStepsTest SynchronizerCrypto(crypto, defaultStaticSynchronizerParameters), IndexedPhysicalSynchronizer.tryCreate(sourceSynchronizer.unwrap, 1), defaultStaticSynchronizerParameters, - exitOnFatalFailures = true, - disableUpgradeValidation = false, - packageDependencyResolver = mock[PackageDependencyResolver], + parameters = ParticipantNodeParameters.forTestingOnly(testedProtocolVersion), + packageMetadataView = mock[PackageMetadataView], Eval.now(mock[LedgerApiStore]), logicalPersistentState, - Eval.now(mock[PackageMetadataView]), loggerFactory, timeouts, futureSupervisor, @@ -228,7 +231,7 @@ final class UnassignmentProcessingStepsTest sourceSynchronizer, sourceMediator, targetSynchronizer, - timeProof, + targetTs, ) private def createTestingIdentityFactory( @@ -285,15 +288,18 @@ final class UnassignmentProcessingStepsTest private lazy val seedGenerator = new SeedGenerator(crypto.pureCrypto) private def createReassignmentCoordination( - cryptoSnapshot: SynchronizerSnapshotSyncCryptoApi = cryptoSnapshot + cryptoSnapshot: SynchronizerSnapshotSyncCryptoApi = cryptoSnapshot, + approximateTimestamp: CantonTimestamp = CantonTimestamp.Epoch, + targetTimestampForwardTolerance: FiniteDuration = 30.seconds, ) = TestReassignmentCoordination( Set(Target(sourceSynchronizer.unwrap), targetSynchronizer), - CantonTimestamp.Epoch, + approximateTimestamp, Some(cryptoSnapshot), Some(None), loggerFactory, Seq(ExampleTransactionFactory.packageId), + targetTimestampForwardTolerance = targetTimestampForwardTolerance, )(directExecutionContext) private lazy val coordination: ReassignmentCoordination = @@ -310,7 +316,7 @@ final class UnassignmentProcessingStepsTest cryptoClient, seedGenerator, Source(defaultStaticSynchronizerParameters), - ContractAuthenticator(crypto.pureCrypto), + ContractValidator.AllowAll, Source(testedProtocolVersion), loggerFactory, )(executorService) @@ -331,11 +337,7 @@ final class UnassignmentProcessingStepsTest participant -> admin } - private lazy val timeProof = - TimeProofTestUtil.mkTimeProof( - timestamp = CantonTimestamp.Epoch, - targetSynchronizer = targetSynchronizer, - ) + private lazy val targetTs = Target(CantonTimestamp.Epoch) private lazy val contract = ExampleContractFactory.build( signatories = Set(submitter), @@ -343,6 +345,8 @@ final class UnassignmentProcessingStepsTest ) private lazy val contractId = contract.contractId + private val reassignmentId = ReassignmentId.tryCreate("00") + private def mkParsedRequest( view: FullUnassignmentTree, recipients: Recipients, @@ -361,7 +365,7 @@ final class UnassignmentProcessingStepsTest sourceMediator, cryptoSnapshot, cryptoSnapshot.ipsSnapshot.findDynamicSynchronizerParameters().futureValueUS.value, - ReassignmentId.tryCreate("00"), + reassignmentId, ) "UnassignmentRequest.validated" should { @@ -400,7 +404,6 @@ final class UnassignmentProcessingStepsTest UnassignmentRequest .validated( submittingParticipant, - timeProof, ContractsReassignmentBatch( updatedContract, initialReassignmentCounter, @@ -599,7 +602,7 @@ final class UnassignmentProcessingStepsTest sourceSynchronizer = sourceSynchronizer, sourceMediator = sourceMediator, targetSynchronizer = targetSynchronizer, - targetTimeProof = timeProof, + targetTimestamp = targetTs, ), Set(submittingParticipant, participant1, participant2), ) @@ -639,7 +642,7 @@ final class UnassignmentProcessingStepsTest sourceSynchronizer = sourceSynchronizer, sourceMediator = sourceMediator, targetSynchronizer = targetSynchronizer, - targetTimeProof = timeProof, + targetTimestamp = targetTs, ), Set(submittingParticipant, participant1, participant2, participant3, participant4), ) @@ -668,7 +671,7 @@ final class UnassignmentProcessingStepsTest sourceSynchronizer = sourceSynchronizer, sourceMediator = sourceMediator, targetSynchronizer = targetSynchronizer, - targetTimeProof = timeProof, + targetTimestamp = targetTs, ), Set(submittingParticipant, participant1), ) @@ -692,7 +695,7 @@ final class UnassignmentProcessingStepsTest _ <- persistentState.activeContractStore .markContractsCreated( Seq(contractId -> initialReassignmentCounter), - TimeOfChange(timeProof.timestamp), + TimeOfChange(targetTs.unwrap), ) .value _ <- @@ -776,7 +779,8 @@ final class UnassignmentProcessingStepsTest "construct pending data and response" should { def constructPendingDataAndResponseWith( - unassignmentProcessingSteps: UnassignmentProcessingSteps + unassignmentProcessingSteps: UnassignmentProcessingSteps, + requestTargetTs: Target[CantonTimestamp] = targetTs, ) = { val state = mkState val unassignmentRequest = UnassignmentRequest( @@ -784,12 +788,12 @@ final class UnassignmentProcessingStepsTest reassigningParticipants = Set(submittingParticipant), ContractsReassignmentBatch( contract, - initialReassignmentCounter, + ReassignmentCounter(1), ), sourceSynchronizer, sourceMediator, targetSynchronizer, - timeProof, + requestTargetTs, ) val fullUnassignmentTree = makeFullUnassignmentTree(unassignmentRequest) @@ -813,7 +817,11 @@ final class UnassignmentProcessingStepsTest signatureO = signature, ), state.reassignmentCache, - FutureUnlessShutdown.pure(mkActivenessResult()), + FutureUnlessShutdown.pure( + mkActivenessResult( + prior = Map(contract.contractId -> Some(Active(initialReassignmentCounter))) + ) + ), engineController = EngineController( submittingParticipant, RequestId(CantonTimestamp.Epoch), @@ -849,6 +857,45 @@ final class UnassignmentProcessingStepsTest PackageIdUnknownOrUnvetted ] } + + "with a requested target timestamp" should { + val localTs = CantonTimestamp.assertFromInstant(Instant.parse("2020-01-01T00:00:00Z")) + val forwardTolerance = 2.seconds + + def getConfirmationResponses(requestTargetTs: CantonTimestamp): Seq[ConfirmationResponse] = + constructPendingDataAndResponseWith( + createUnassignmentProcessingSteps( + createReassignmentCoordination( + approximateTimestamp = localTs, + targetTimestampForwardTolerance = forwardTolerance, + ) + ), + requestTargetTs = Target(requestTargetTs), + ).value.confirmationResponsesF.value.futureValueUS.value.value._1.responses + + "approve when less than forward-tolerance ahead of local timestamp" in { + getConfirmationResponses( + requestTargetTs = localTs.add(forwardTolerance).add(-1.millisecond) + ) should matchPattern { case Seq(ConfirmationResponse(_, _: LocalApprove, _)) => + } + } + "approve when exactly forward-tolerance ahead of local timestamp" in { + getConfirmationResponses( + requestTargetTs = localTs.add(forwardTolerance) + ) should matchPattern { case Seq(ConfirmationResponse(_, _: LocalApprove, _)) => + } + } + "abstain when more than forward-tolerance ahead of local timestamp" in { + getConfirmationResponses( + requestTargetTs = localTs.add(forwardTolerance).add(1.millisecond) + ) should matchPattern { + case Seq(ConfirmationResponse(_, LocalAbstain(Status(code, msg, _, _)), _)) + if code == FAILED_PRECONDITION.value && msg.contains( + s"Non-validatable target timestamp when processing unassignment $reassignmentId" + ) => + } + } + } } "get commit set and contracts to be stored and event" should { @@ -895,7 +942,7 @@ final class UnassignmentProcessingStepsTest testedProtocolVersion, ) val assignmentExclusivity = synchronizerParameters - .assignmentExclusivityLimitFor(timeProof.timestamp) + .assignmentExclusivityLimitFor(targetTs.unwrap) .value val fullUnassignmentTree = makeFullUnassignmentTree(unassignmentRequest) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationTest.scala index 17a6c0e295..c142d059af 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/reassignment/UnassignmentValidationTest.scala @@ -4,6 +4,8 @@ package com.digitalasset.canton.participant.protocol.reassignment import cats.data.EitherT +import cats.implicits.catsSyntaxEitherId +import com.daml.logging.LoggingContext import com.digitalasset.canton.* import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.crypto.{Signature, SigningKeyUsage} @@ -20,7 +22,7 @@ import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentPro ReassignmentProcessorError, } import com.digitalasset.canton.participant.protocol.reassignment.ReassignmentValidationError.{ - ContractIdAuthenticationFailure, + ContractAuthenticationFailure, ReassigningParticipantsMismatch, StakeholdersMismatch, SubmitterMustBeStakeholder, @@ -30,12 +32,18 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.protocol.{MediatorGroupRecipient, Recipients} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.util.ContractAuthenticator +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ContractValidator import com.digitalasset.canton.util.ReassignmentTag.{Source, Target} +import com.digitalasset.daml.lf.data.Ref.PackageId +import com.digitalasset.daml.lf.transaction.FatContractInstance +import com.github.dockerjava.zerodep.shaded.org.apache.hc.core5.http.NotImplementedException import org.scalatest.wordspec.AnyWordSpec import java.util.UUID +import scala.concurrent.ExecutionContext class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecutionContext { private val sourceSynchronizer = Source( @@ -124,48 +132,42 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu validation.isSuccessful.futureValueUS shouldBe true } - "fail when wrong metadata is given" in { + "report non-validatable when cannot fetch target topology" in { + val validation = performValidation(targetTopology = None).futureValueUS.value - def testBadMetadata(badMetadata: ContractMetadata): Unit = - test(badMetadata).left.value match { - case ContractIdAuthenticationFailure(ref, reason, contractId) => - ref shouldBe ReassignmentRef(contract.contractId) - contractId shouldBe contract.contractId - reason should startWith("Mismatching contract id suffixes") - case other => fail(s"Did not expect $other") - } + validation.reassigningParticipantValidationResult.isTargetTsValidatable shouldBe false + } - def test( - metadata: ContractMetadata - ): Either[ReassignmentValidationError, Unit] = { - val updatedContract = ExampleContractFactory.modify(contract, metadata = Some(metadata)) - performValidation( - updatedContract - ).futureValueUS.value.commonValidationResult.contractAuthenticationResultF.futureValueUS + "fail if contract validation fails" in { + + val expected = "bad-contract" + + val failingContractValidator = new ContractValidator { + override def authenticate(contract: FatContractInstance, targetPackageId: PackageId)( + implicit + ec: ExecutionContext, + traceContext: TraceContext, + loggingContext: LoggingContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = + EitherT.fromEither[FutureUnlessShutdown](expected.asLeft[Unit]) + override def authenticateHash( + contract: FatContractInstance, + contractHash: LfHash, + ): Either[String, Unit] = throw new NotImplementedException() } - val incorrectStakeholders = testMetadata( - stakeholders = baseMetadata.stakeholders + receiverParty2 - ) - - val incorrectSignatories = testMetadata( - stakeholders = baseMetadata.stakeholders + receiverParty2, - signatories = baseMetadata.signatories + receiverParty2, - ) - - val incorrectKey = testMetadata( - maybeKeyWithMaintainersVersioned = Some( - ExampleTransactionFactory.globalKeyWithMaintainers( - ExampleTransactionFactory.defaultGlobalKey, - baseMetadata.signatories, - ) - ) - ) - - test(testMetadata()).isRight shouldBe true - testBadMetadata(incorrectStakeholders) - testBadMetadata(incorrectSignatories) - testBadMetadata(incorrectKey) + inside( + performValidation( + contract, + contractValidator = failingContractValidator, + ).futureValueUS.value.commonValidationResult.contractAuthenticationResultF.futureValueUS.left.value + ) { + case ContractAuthenticationFailure(ref, reason, contractId) => + ref shouldBe ReassignmentRef(contract.contractId) + contractId shouldBe contract.contractId + reason should include(expected) + case other => fail(s"Did not expect $other") + } } @@ -204,7 +206,8 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu validateUnassignmentTree( FullUnassignmentTree( UnassignmentViewTree(commonData, view, Source(testedProtocolVersion), pureCrypto) - ) + ), + Some(Target(identityFactory.topologySnapshot())), ).futureValueUS.value.commonValidationResult.contractAuthenticationResultF.futureValueUS } @@ -333,7 +336,8 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu private def validateUnassignmentTree( fullUnassignmentTree: FullUnassignmentTree, - identityFactory: TestingIdentityFactory = identityFactory, + targetTopology: Option[Target[TopologySnapshot]], + contractValidator: ContractValidator = ContractValidator.AllowAll, ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] = { val recipients = Recipients.cc( reassigningParticipants.toSeq.head, @@ -345,16 +349,23 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu .value val parsed = mkParsedRequest(fullUnassignmentTree, recipients, Some(signature)) - val contractAuthenticator = ContractAuthenticator(new SymbolicPureCrypto()) - - val unassignmentValidation = - new UnassignmentValidation(confirmingParticipant, contractAuthenticator) + val getTopologyAtTs = new GetTopologyAtTimestamp { + import com.digitalasset.canton.tracing.TraceContext + override def maybeAwaitTopologySnapshot( + targetPSId: Target[PhysicalSynchronizerId], + requestedTimestamp: Target[CantonTimestamp], + )(implicit tc: TraceContext) = EitherT.rightT(targetTopology) + } - unassignmentValidation.perform( - targetTopology = Some(Target(identityFactory.topologySnapshot())), + val unassignmentValidation = UnassignmentValidation( + isReassigningParticipant = true, + participantId = confirmingParticipant, + contractValidator = contractValidator, activenessF = FutureUnlessShutdown.pure(mkActivenessResult()), - )(parsedRequest = parsed) + getTopologyAtTs = getTopologyAtTs, + ) + unassignmentValidation.perform(parsedRequest = parsed) } private def performValidation( @@ -362,6 +373,10 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu reassigningParticipantsOverride: Set[ParticipantId] = reassigningParticipants, submitter: LfPartyId = signatory, identityFactory: TestingIdentityFactory = identityFactory, + targetTopology: Option[Target[TopologySnapshot]] = Some( + Target(identityFactory.topologySnapshot()) + ), + contractValidator: ContractValidator = ContractValidator.AllowAll, ): EitherT[FutureUnlessShutdown, ReassignmentProcessorError, UnassignmentValidationResult] = { val unassignmentRequest = ReassignmentDataHelpers(contract, sourceSynchronizer, targetSynchronizer, identityFactory) @@ -381,7 +396,8 @@ class UnassignmentValidationTest extends AnyWordSpec with BaseTest with HasExecu validateUnassignmentTree( fullUnassignmentTree, - identityFactory, + targetTopology, + contractValidator, ) } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizerSelectionFixture.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizerSelectionFixture.scala index a61456920d..411726b637 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizerSelectionFixture.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizerSelectionFixture.scala @@ -3,19 +3,23 @@ package com.digitalasset.canton.participant.protocol.submission -import com.digitalasset.canton.protocol.{LfContractId, LfLanguageVersion, LfVersionedTransaction} +import com.digitalasset.canton.protocol.{ + LfContractId, + LfSerializationVersion, + LfVersionedTransaction, +} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshotLoader import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission -import com.digitalasset.canton.version.DamlLfVersionToProtocolVersions +import com.digitalasset.canton.version.LfSerializationVersionToProtocolVersions import com.digitalasset.canton.{BaseTest, LfPackageId, LfPartyId, LfValue} import com.digitalasset.daml.lf.data.Ref.QualifiedName import com.digitalasset.daml.lf.data.{ImmArray, Ref} import com.digitalasset.daml.lf.transaction.Node import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.{ CreateKey, - CreateTransactionVersion, + CreateSerializationVersion, } import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.* import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.* @@ -35,12 +39,12 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { ) /* - We cannot take the maximum transaction version available. The reason is that if the test is run + We cannot take the maximum serialization version available. The reason is that if the test is run with a low protocol version, then some filter will reject the transaction (because high transaction version needs high protocol version). */ - lazy val fixtureTransactionVersion: LfLanguageVersion = - DamlLfVersionToProtocolVersions.damlLfVersionToMinimumProtocolVersions.collect { + lazy val fixtureSerializationVersion: LfSerializationVersion = + LfSerializationVersionToProtocolVersions.lfSerializationVersionToMinimumProtocolVersions.collect { case (txVersion, protocolVersion) if protocolVersion <= BaseTest.testedProtocolVersion => txVersion }.last @@ -84,11 +88,10 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { object Transactions { - private[this] val DefaultLfVersion = - LfLanguageVersion.StableVersions(LfLanguageVersion.Major.V2).max + private[this] val DefaultLfSerializationVersion = LfSerializationVersion.V1 def buildExerciseNode( - version: LfLanguageVersion, + version: LfSerializationVersion, inputContractId: LfContractId, signatory: LfPartyId, observer: LfPartyId, @@ -101,7 +104,7 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { signatories = List(signatory), observers = List(observer), key = CreateKey.NoKey, - version = CreateTransactionVersion.Version(version), + version = CreateSerializationVersion.Version(version), ) TestNodeBuilder.exercise( @@ -122,7 +125,7 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { val correctPackages: Seq[VettedPackage] = VettedPackage.unbounded(Seq(defaultPackageId)) def tx( - version: LfLanguageVersion = DefaultLfVersion + version: LfSerializationVersion = DefaultLfSerializationVersion ): LfVersionedTransaction = { import SimpleTopology.* TreeTransactionBuilder.toVersionedTransaction( @@ -133,14 +136,14 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { signatories = Seq(signatory), observers = Seq(observer), key = CreateKey.NoKey, - version = CreateTransactionVersion.Version(version), + version = CreateSerializationVersion.Version(version), ) ) } } final case class ThreeExercises( - version: LfLanguageVersion = DefaultLfVersion + version: LfSerializationVersion = DefaultLfSerializationVersion ) { import SimpleTopology.* @@ -168,7 +171,7 @@ private[submission] object SynchronizerSelectionFixture extends TestIdFactory { } final case class ExerciseByInterface( - version: LfLanguageVersion = DefaultLfVersion + version: LfSerializationVersion = DefaultLfSerializationVersion ) { import ExerciseByInterface.* import SimpleTopology.* diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizersFilterTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizersFilterTest.scala index 5932c23b53..9de98e68d9 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizersFilterTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SynchronizersFilterTest.scala @@ -8,11 +8,11 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.participant.protocol.submission.SynchronizerSelectionFixture.* import com.digitalasset.canton.participant.protocol.submission.SynchronizerSelectionFixture.Transactions.ExerciseByInterface import com.digitalasset.canton.participant.protocol.submission.SynchronizersFilterTest.* -import com.digitalasset.canton.protocol.{LfLanguageVersion, LfVersionedTransaction} +import com.digitalasset.canton.protocol.{LfSerializationVersion, LfVersionedTransaction} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.{DamlLfVersionToProtocolVersions, ProtocolVersion} +import com.digitalasset.canton.version.{LfSerializationVersionToProtocolVersions, ProtocolVersion} import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext, LfPartyId} import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.* import org.scalatest.wordspec.AnyWordSpec @@ -30,7 +30,7 @@ class SynchronizersFilterTest val ledgerTime = CantonTimestamp.now() val filter = SynchronizersFilterForTx( - Transactions.Create.tx(fixtureTransactionVersion), + Transactions.Create.tx(fixtureSerializationVersion), ledgerTime, testedProtocolVersion, ) @@ -119,7 +119,7 @@ class SynchronizersFilterTest val currentSynchronizerPV = ProtocolVersion.v34 val filter = SynchronizersFilterForTx( - Transactions.Create.tx(LfLanguageVersion.v2_dev), + Transactions.Create.tx(LfSerializationVersion.VDev), ledgerTime, currentSynchronizerPV, ) @@ -128,14 +128,15 @@ class SynchronizersFilterTest filter .split(correctTopology, Transactions.Create.correctPackages) .futureValueUS - val requiredPV = DamlLfVersionToProtocolVersions.damlLfVersionToMinimumProtocolVersions - .get(LfLanguageVersion.v2_dev) - .value + val requiredPV = + LfSerializationVersionToProtocolVersions.lfSerializationVersionToMinimumProtocolVersions + .get(LfSerializationVersion.VDev) + .value unusableSynchronizers shouldBe List( UsableSynchronizers.UnsupportedMinimumProtocolVersion( synchronizerId = DefaultTestIdentities.physicalSynchronizerId, requiredPV = requiredPV, - lfVersion = LfLanguageVersion.v2_dev, + lfVersion = LfSerializationVersion.VDev, ) ) usableSynchronizers shouldBe empty @@ -144,7 +145,7 @@ class SynchronizersFilterTest "SynchronizersFilter (simple exercise by interface)" should { import SimpleTopology.* - val exerciseByInterface = Transactions.ExerciseByInterface(fixtureTransactionVersion) + val exerciseByInterface = Transactions.ExerciseByInterface(fixtureSerializationVersion) val ledgerTime = CantonTimestamp.now() val filter = SynchronizersFilterForTx(exerciseByInterface.tx, ledgerTime, testedProtocolVersion) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplTest.scala index 85e64a50a6..42a1369f90 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplTest.scala @@ -15,9 +15,11 @@ import com.digitalasset.canton.protocol.ExampleTransactionFactory.{ defaultTestingTopology, } import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes +import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.topology.client.TopologySnapshot -import com.digitalasset.canton.topology.store.PackageDependencyResolverUS +import com.digitalasset.canton.topology.store.PackageDependencyResolver import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.TestContractHasher import com.digitalasset.daml.lf.data.Ref.{IdString, PackageId} import org.scalatest.wordspec.AsyncWordSpec @@ -52,6 +54,7 @@ final class TransactionTreeFactoryImplTest factory.psid, factory.cantonContractIdVersion, factory.cryptoOps, + TestContractHasher.Async, loggerFactory, ) @@ -197,12 +200,12 @@ final class TransactionTreeFactoryImplTest } } - object TestPackageDependencyResolver extends PackageDependencyResolverUS { + object TestPackageDependencyResolver extends PackageDependencyResolver { import cats.syntax.either.* val exampleDependency: IdString.PackageId = PackageId.assertFromString("example-dependency") override def packageDependencies(packageId: PackageId)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] = packageId match { case ExampleTransactionFactory.packageId => Right(Set(exampleDependency)).toEitherT[FutureUnlessShutdown] @@ -210,13 +213,15 @@ final class TransactionTreeFactoryImplTest } } - object MisconfiguredPackageDependencyResolver extends PackageDependencyResolverUS { + object MisconfiguredPackageDependencyResolver extends PackageDependencyResolver { import cats.syntax.either.* + val exampleParticipant: ParticipantId = ParticipantId("MisconfiguredPackageDependencyResolver") val exampleDependency: IdString.PackageId = PackageId.assertFromString("example-dependency") + override def packageDependencies(packageId: PackageId)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - Left(packageId).toEitherT[FutureUnlessShutdown] + ): EitherT[FutureUnlessShutdown, (PackageId, ParticipantId), Set[PackageId]] = + Left(packageId -> exampleParticipant).toEitherT[FutureUnlessShutdown] } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/SynchronizerSelectorTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/SynchronizerSelectorTest.scala index d91df16eab..97ded2524e 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/SynchronizerSelectorTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/SynchronizerSelectorTest.scala @@ -23,7 +23,7 @@ import com.digitalasset.canton.participant.protocol.submission.UsableSynchronize } import com.digitalasset.canton.protocol.{ LfContractId, - LfLanguageVersion, + LfSerializationVersion, LfVersionedTransaction, Stakeholders, } @@ -31,7 +31,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshotLoader import com.digitalasset.canton.topology.transaction.VettedPackage import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.version.{DamlLfVersionToProtocolVersions, ProtocolVersion} +import com.digitalasset.canton.version.{LfSerializationVersionToProtocolVersions, ProtocolVersion} import com.digitalasset.canton.{BaseTest, HasExecutionContext, LfPartyId} import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.* import org.scalatest.wordspec.AnyWordSpec @@ -151,13 +151,14 @@ class SynchronizerSelectorTest extends AnyWordSpec with BaseTest with HasExecuti "take minimum protocol version into account" ignore { val oldPV = ProtocolVersion.v34 - val transactionVersion = LfLanguageVersion.v2_dev - val newPV = DamlLfVersionToProtocolVersions.damlLfVersionToMinimumProtocolVersions - .get(transactionVersion) - .value + val serializationVersion = LfSerializationVersion.VDev + val newPV = + LfSerializationVersionToProtocolVersions.lfSerializationVersionToMinimumProtocolVersions + .get(serializationVersion) + .value val selectorOldPV = selectorForExerciseByInterface( - transactionVersion = transactionVersion, // requires protocol version dev + serializationVersion = serializationVersion, // requires protocol version dev connectedSynchronizers = Set(da.copy(protocolVersion = oldPV)), admissibleSynchronizers = NonEmpty.mk(Set, da.copy(protocolVersion = oldPV)), ) @@ -166,7 +167,7 @@ class SynchronizerSelectorTest extends AnyWordSpec with BaseTest with HasExecuti val expectedError = UnsupportedMinimumProtocolVersion( synchronizerId = da, requiredPV = newPV, - lfVersion = transactionVersion, + lfVersion = serializationVersion, ) selectorOldPV.forSingleSynchronizer.leftOrFailShutdown( @@ -184,7 +185,7 @@ class SynchronizerSelectorTest extends AnyWordSpec with BaseTest with HasExecuti // Happy path val selectorNewPV = selectorForExerciseByInterface( - transactionVersion = LfLanguageVersion.v2_dev, // requires protocol version dev + serializationVersion = LfSerializationVersion.VDev, // requires protocol version dev connectedSynchronizers = Set(da.copy(protocolVersion = newPV)), admissibleSynchronizers = NonEmpty.mk(Set, da.copy(protocolVersion = newPV)), ) @@ -369,7 +370,7 @@ class SynchronizerSelectorTest extends AnyWordSpec with BaseTest with HasExecuti import SimpleTopology.* "minimize the number of reassignments" in { - val threeExercises = ThreeExercises(fixtureTransactionVersion) + val threeExercises = ThreeExercises(fixtureSerializationVersion) val synchronizers = NonEmpty.mk(Set, acme, da, repair) @@ -521,7 +522,7 @@ private[routing] object SynchronizerSelectorTest { admissibleSynchronizers: NonEmpty[Set[PhysicalSynchronizerId]] = defaultAdmissibleSynchronizers, prescribedSynchronizerId: Option[PhysicalSynchronizerId] = defaultPrescribedSynchronizerId, - transactionVersion: LfLanguageVersion = fixtureTransactionVersion, + serializationVersion: LfSerializationVersion = fixtureSerializationVersion, vettedPackages: Seq[VettedPackage] = ExerciseByInterface.correctPackages, ledgerTime: CantonTimestamp = CantonTimestamp.now(), )(implicit @@ -530,7 +531,7 @@ private[routing] object SynchronizerSelectorTest { loggerFactory: NamedLoggerFactory, ): Selector = { - val exerciseByInterface = ExerciseByInterface(transactionVersion) + val exerciseByInterface = ExerciseByInterface(serializationVersion) val inputContractStakeholders = Map( exerciseByInterface.inputContractId -> Stakeholders.withSignatoriesAndObservers( diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreatedTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreatedTest.scala index 2dc9685389..b2cf0d1014 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreatedTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreatedTest.scala @@ -3,149 +3,208 @@ package com.digitalasset.canton.participant.protocol.validation +import com.digitalasset.canton.data.TransactionView +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer +import com.digitalasset.canton.participant.protocol.LedgerEffectAbsolutizer.ViewAbsoluteLedgerEffect import com.digitalasset.canton.participant.protocol.validation.ExtractUsedAndCreated.{ CreatedContractPrep, InputContractPrep, - ViewData, } import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.protocol.ContractIdAbsolutizer.{ + ContractIdAbsolutizationDataV1, + ContractIdAbsolutizationDataV2, +} import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, ParticipantPermission} import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext, LfPartyId} class ExtractUsedAndCreatedTest extends BaseTestWordSpec with HasExecutionContext { - val etf: ExampleTransactionFactory = new ExampleTransactionFactory()( - // TODO(#23971) Make this work with Contract ID V2 - cantonContractIdVersion = AuthenticatedContractIdVersionV11 - ) - - private val singleExercise = etf.SingleExercise(etf.deriveNodeSeed(1)) - private val singleCreate = etf.SingleCreate(etf.deriveNodeSeed(1)) - - private val informeeParties: Set[LfPartyId] = singleCreate.signatories ++ singleCreate.observers - private def buildUnderTest( hostedParties: Map[LfPartyId, Option[ParticipantAttributes]] ): ExtractUsedAndCreated = new ExtractUsedAndCreated(hostedParties, loggerFactory) - s"ExtractUsedAndCreated for version $AuthenticatedContractIdVersionV11" when { - val relevantExamples = etf.standardHappyCases.filter(_.rootViews.nonEmpty) - - forEvery(relevantExamples) { example => - s"checking $example" must { - - val dataViews = ExtractUsedAndCreated.viewDataFromRootViews(example.rootViews) - val parties = ExtractUsedAndCreated.extractPartyIds(dataViews) - val hostedParties = - parties.map(_ -> Some(ParticipantAttributes(ParticipantPermission.Observation))).toMap - val sut = buildUnderTest(hostedParties) - - "yield the correct result" in { - val expected = example.usedAndCreated - val usedAndCreated = sut.usedAndCreated(dataViews) - usedAndCreated.contracts shouldBe expected - usedAndCreated.hostedWitnesses shouldBe hostedParties.keySet - } - } - } + private def viewEffectsFromRootViews( + effectAbsolutizer: LedgerEffectAbsolutizer, + rootViews: Seq[TransactionView], + ): Seq[ViewAbsoluteLedgerEffect] = rootViews.flatMap(viewEffectsInPreOrder(effectAbsolutizer, _)) + + private def viewEffectsInPreOrder( + effectAbsolutizer: LedgerEffectAbsolutizer, + view: TransactionView, + ): Seq[ViewAbsoluteLedgerEffect] = { + view.subviews.assertAllUnblinded(hash => + s"View ${view.viewHash} contains an unexpected blinded subview $hash" + ) + tryEffectsFromView(effectAbsolutizer, view) +: view.subviews.unblindedElements.flatMap( + viewEffectsInPreOrder(effectAbsolutizer, _) + ) } - "Input contract prep" should { + private def tryEffectsFromView( + effectAbsolutizer: LedgerEffectAbsolutizer, + v: TransactionView, + ): ViewAbsoluteLedgerEffect = { + val vpd = v.viewParticipantData.tryUnwrap + val informees = v.viewCommonData.tryUnwrap.viewConfirmationParameters.informees + effectAbsolutizer + .absoluteViewEffects(vpd, informees) + .valueOrFail(s"absolutizing view effects for view ${v.viewHash}") + } - "Extract divulged contracts" in { + forAll(CantonContractIdVersion.all) { cantonContractIdVersion => + s"For version $cantonContractIdVersion" should { - val underTestWithNoHostedParties = buildUnderTest( - hostedParties = informeeParties.map(_ -> None).toMap + val etf: ExampleTransactionFactory = new ExampleTransactionFactory()( + cantonContractIdVersion = cantonContractIdVersion ) - val viewData = ViewData.tryFromView(singleExercise.view0) - val actual = underTestWithNoHostedParties.inputContractPrep(Seq(viewData)) + val singleExercise = etf.SingleExercise(etf.deriveNodeSeed(1)) + val singleCreate = etf.SingleCreate(etf.deriveNodeSeed(1)) - val serializedContract = singleExercise.used.head + val informeeParties: Set[LfPartyId] = singleCreate.signatories ++ singleCreate.observers - val expected = InputContractPrep( - used = Map(singleExercise.absolutizedContractId -> serializedContract), - divulged = Map(singleExercise.absolutizedContractId -> serializedContract), - consumedOfHostedStakeholders = Map.empty, - contractIdsOfHostedInformeeStakeholder = Set.empty, - contractIdsAllowedToBeUnknown = Set.empty, - ) + def effectAbsolutizer(example: ExampleTransaction) = { + val absolutizationData = etf.cantonContractIdVersion match { + case _: CantonContractIdV1Version => ContractIdAbsolutizationDataV1 + case _: CantonContractIdV2Version => + ContractIdAbsolutizationDataV2(example.transactionId, etf.ledgerTime) + } + val contractAbsolutizer = new ContractIdAbsolutizer(etf.cryptoOps, absolutizationData) + val effectAbsolutizer = new LedgerEffectAbsolutizer(contractAbsolutizer) + effectAbsolutizer + } - actual shouldBe expected - } + "ExtractUsedAndCreated" when { + val relevantExamples = etf.standardHappyCases.filter(_.rootViews.nonEmpty) + + forEvery(relevantExamples) { example => + s"checking $example" must { + + "yield the correct result" in { + val dataViews = + viewEffectsFromRootViews(effectAbsolutizer(example), example.rootViews) + val parties = ExtractUsedAndCreated.extractPartyIds(dataViews) + val hostedParties = + parties + .map(_ -> Some(ParticipantAttributes(ParticipantPermission.Observation))) + .toMap + val sut = buildUnderTest(hostedParties) + + val expected = example.usedAndCreated + val usedAndCreated = sut.usedAndCreated(dataViews) + usedAndCreated.contracts shouldBe expected + usedAndCreated.hostedWitnesses shouldBe hostedParties.keySet + } + } + } + } - "Onboarding" should { + "Input contract prep" should { - val viewData = ViewData.tryFromView(singleExercise.view0) - val serializedContract = singleExercise.used.head - val signatories = singleExercise.node.signatories - val observers = singleExercise.node.stakeholders -- signatories + "Extract divulged contracts" in { - "identify potentially unknown contracts" in { - val underTestOnlyOnboardingHostedParties = buildUnderTest( - hostedParties = (signatories.map(_ -> None) ++ observers.map( - _ -> Some(ParticipantAttributes(ParticipantPermission.Confirmation, onboarding = true)) - )).toMap - ) + val underTestWithNoHostedParties = buildUnderTest( + hostedParties = informeeParties.map(_ -> None).toMap + ) - val actual = underTestOnlyOnboardingHostedParties.inputContractPrep(Seq(viewData)) + val absolutizer = effectAbsolutizer(singleExercise) + val viewEffects = tryEffectsFromView(absolutizer, singleExercise.view0) + val actual = underTestWithNoHostedParties.inputContractPrep(Seq(viewEffects)) - val expected = InputContractPrep( - used = Map(singleExercise.absolutizedContractId -> serializedContract), - divulged = Map.empty, - consumedOfHostedStakeholders = - Map(singleExercise.absolutizedContractId -> informeeParties), - contractIdsOfHostedInformeeStakeholder = Set(singleExercise.absolutizedContractId), - contractIdsAllowedToBeUnknown = Set(singleExercise.absolutizedContractId), - ) + val serializedContract = singleExercise.used.head - actual shouldBe expected - } + val expected = InputContractPrep( + used = Map(singleExercise.absolutizedContractId -> serializedContract), + divulged = Map(singleExercise.absolutizedContractId -> serializedContract), + consumedOfHostedStakeholders = Map.empty, + contractIdsOfHostedInformeeStakeholder = Set.empty, + contractIdsAllowedToBeUnknown = Set.empty, + ) - "not mark unknown contracts if not all hosted stakeholders onboarding" in { - val underTestOnlyOnboardingHostedParties = buildUnderTest( - hostedParties = (signatories.map( - _ -> Some(ParticipantAttributes(ParticipantPermission.Observation)) - ) ++ observers.map( - _ -> Some(ParticipantAttributes(ParticipantPermission.Confirmation, onboarding = true)) - )).toMap - ) - - val actual = underTestOnlyOnboardingHostedParties.inputContractPrep(Seq(viewData)) - - val expected = InputContractPrep( - used = Map(singleExercise.absolutizedContractId -> serializedContract), - divulged = Map.empty, - consumedOfHostedStakeholders = - Map(singleExercise.absolutizedContractId -> informeeParties), - contractIdsOfHostedInformeeStakeholder = Set(singleExercise.absolutizedContractId), - contractIdsAllowedToBeUnknown = Set.empty, - ) - - actual shouldBe expected + actual shouldBe expected + } + + "Onboarding" should { + + lazy val absolutizer = effectAbsolutizer(singleExercise) + lazy val viewEffects = tryEffectsFromView(absolutizer, singleExercise.view0) + lazy val serializedContract = singleExercise.used.head + lazy val signatories = singleExercise.node.signatories + lazy val observers = singleExercise.node.stakeholders -- signatories + + "identify potentially unknown contracts" in { + val underTestOnlyOnboardingHostedParties = buildUnderTest( + hostedParties = (signatories.map(_ -> None) ++ observers.map( + _ -> Some( + ParticipantAttributes(ParticipantPermission.Confirmation, onboarding = true) + ) + )).toMap + ) + + val actual = underTestOnlyOnboardingHostedParties.inputContractPrep(Seq(viewEffects)) + + val expected = InputContractPrep( + used = Map(singleExercise.absolutizedContractId -> serializedContract), + divulged = Map.empty, + consumedOfHostedStakeholders = + Map(singleExercise.absolutizedContractId -> informeeParties), + contractIdsOfHostedInformeeStakeholder = Set(singleExercise.absolutizedContractId), + contractIdsAllowedToBeUnknown = Set(singleExercise.absolutizedContractId), + ) + + actual shouldBe expected + } + + "not mark unknown contracts if not all hosted stakeholders onboarding" in { + val underTestOnlyOnboardingHostedParties = buildUnderTest( + hostedParties = (signatories.map( + _ -> Some(ParticipantAttributes(ParticipantPermission.Observation)) + ) ++ observers.map( + _ -> Some( + ParticipantAttributes(ParticipantPermission.Confirmation, onboarding = true) + ) + )).toMap + ) + + val actual = underTestOnlyOnboardingHostedParties.inputContractPrep(Seq(viewEffects)) + + val expected = InputContractPrep( + used = Map(singleExercise.absolutizedContractId -> serializedContract), + divulged = Map.empty, + consumedOfHostedStakeholders = + Map(singleExercise.absolutizedContractId -> informeeParties), + contractIdsOfHostedInformeeStakeholder = Set(singleExercise.absolutizedContractId), + contractIdsAllowedToBeUnknown = Set.empty, + ) + + actual shouldBe expected + } + } } - } - } - "Created contract prep" should { + "Created contract prep" should { - "Extract witnessed contracts" in { + "Extract witnessed contracts" in { - val underTestWithNoHostedParties = buildUnderTest( - hostedParties = informeeParties.map(_ -> None).toMap - ) + val underTestWithNoHostedParties = buildUnderTest( + hostedParties = informeeParties.map(_ -> None).toMap + ) - val viewData = ViewData.tryFromView(singleCreate.view0) - val actual = underTestWithNoHostedParties.createdContractPrep(Seq(viewData)) + val absolutizer = effectAbsolutizer(singleCreate) + val viewEffects = tryEffectsFromView(absolutizer, singleCreate.view0) + val actual = underTestWithNoHostedParties.createdContractPrep(Seq(viewEffects)) - val expected = CreatedContractPrep( - createdContractsOfHostedInformees = Map.empty, - witnessed = Map(singleCreate.absolutizedContractId -> singleCreate.created.head), - ) + val expected = CreatedContractPrep( + createdContractsOfHostedInformees = Map.empty, + witnessed = singleCreate.createdAbsolute, + ) - actual shouldBe expected - } + actual shouldBe expected + } + } + } } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala index a85f5bf18c..3c08ca8d16 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala @@ -16,13 +16,10 @@ import com.digitalasset.canton.participant.protocol.EngineController.{ EngineAbortStatus, GetEngineAbortStatus, } +import com.digitalasset.canton.participant.protocol.TransactionProcessingSteps import com.digitalasset.canton.participant.protocol.submission.TransactionTreeFactoryImpl import com.digitalasset.canton.participant.protocol.validation.ModelConformanceChecker.* import com.digitalasset.canton.participant.protocol.validation.ModelConformanceCheckerTest.HashReInterpretationCounter -import com.digitalasset.canton.participant.protocol.{ - DummyContractAuthenticator, - TransactionProcessingSteps, -} import com.digitalasset.canton.participant.store.ContractAndKeyLookup import com.digitalasset.canton.participant.util.DAMLe import com.digitalasset.canton.participant.util.DAMLe.{ @@ -35,9 +32,9 @@ import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticato import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.ExampleTransactionFactory.* import com.digitalasset.canton.topology.client.TopologySnapshot -import com.digitalasset.canton.topology.store.PackageDependencyResolverUS import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.{ContractValidator, TestContractHasher} import com.digitalasset.canton.{ BaseTest, LfCommand, @@ -49,7 +46,7 @@ import com.digitalasset.canton.{ import com.digitalasset.daml.lf.data.ImmArray import com.digitalasset.daml.lf.data.Ref.{PackageId, PackageName} import com.digitalasset.daml.lf.engine.Error as LfError -import com.digitalasset.daml.lf.language.Ast.{Expr, GenPackage, PackageMetadata} +import com.digitalasset.daml.lf.language.Ast.{DeclaredImports, Expr, GenPackage, PackageMetadata} import com.digitalasset.daml.lf.language.LanguageVersion import org.scalatest.wordspec.AsyncWordSpec import pprint.Tree @@ -71,10 +68,11 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { val packageMetadata: PackageMetadata = PackageMetadata(packageName, packageVersion, None) val genPackage: GenPackage[Expr] = GenPackage( - Map.empty, - Set.empty, - LanguageVersion.default, - packageMetadata, + modules = Map.empty, + directDeps = Set.empty, + languageVersion = LanguageVersion.default, + metadata = packageMetadata, + imports = DeclaredImports(Set.empty), isUtilityPackage = true, ) val packageResolver: PackageResolver = _ => _ => FutureUnlessShutdown.pure(Some(genPackage)) @@ -172,6 +170,7 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { factory.psid, factory.cantonContractIdVersion, factory.cryptoOps, + TestContractHasher.Async, loggerFactory, ) @@ -196,13 +195,15 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { views: NonEmpty[Seq[(FullTransactionViewTree, Seq[(TransactionView, LfKeyResolver)])]], ips: TopologySnapshot = factory.topologySnapshot, reInterpretedTopLevelViews: ModelConformanceChecker.LazyAsyncReInterpretationMap = Map.empty, - ): EitherT[Future, ErrorWithSubTransaction, Result] = { + ): EitherT[Future, ErrorWithSubTransaction[Unit], Result] = { val rootViewTrees = views.map(_._1) val commonData = TransactionProcessingSteps.tryCommonData(rootViewTrees) val keyResolvers = views.forgetNE.flatMap { case (_, resolvers) => resolvers }.toMap + val rootViewTreesWithEffects = + rootViewTrees.map(tree => (tree, tree.view.allSubviews.map(_ => ()))) mcc .check( - rootViewTrees, + rootViewTreesWithEffects, keyResolvers, ips, commonData, @@ -217,7 +218,7 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { reinterpretCommand, transactionTreeFactory, submittingParticipant, - DummyContractAuthenticator, + ContractValidator.AllowAll, packageResolver, pureCrypto, loggerFactory, @@ -543,16 +544,6 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { } } } - - class TestPackageResolver(result: Either[PackageId, Set[PackageId]]) - extends PackageDependencyResolverUS { - import cats.syntax.either.* - override def packageDependencies(packageId: PackageId)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, PackageId, Set[PackageId]] = - result.toEitherT - } - } object ModelConformanceCheckerTest { diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidatorTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidatorTest.scala index 28b190be48..000a78a1cc 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidatorTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidatorTest.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.participant.protocol.validation.TimeValidator.{ LedgerTimeRecordTimeDeltaTooLargeError, PreparationTimeRecordTimeDeltaTooLargeError, } -import com.digitalasset.canton.protocol.{ExampleTransactionFactory, TransactionId} +import com.digitalasset.canton.protocol.{ExampleTransactionFactory, UpdateId} import com.digitalasset.canton.time.NonNegativeFiniteDuration import org.scalatest.wordspec.AnyWordSpec @@ -22,7 +22,7 @@ class TimeValidatorTest extends AnyWordSpec with BaseTest { NonNegativeFiniteDuration.tryOfSeconds(10) private val preparationTimeRecordTimeTolerance: NonNegativeFiniteDuration = NonNegativeFiniteDuration.tryOfSeconds(60) - private val transactionId: TransactionId = ExampleTransactionFactory.transactionId(0) + private val transactionId: UpdateId = ExampleTransactionFactory.transactionId(0) private def checkTimestamps( ledgerTime: CantonTimestamp, diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala index 69f5d2a063..681f696df4 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala @@ -9,13 +9,14 @@ import cats.syntax.parallel.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.config.RequireTypes.{ - NonNegativeInt, NonNegativeLong, + NonNegativeProportion, PositiveInt, PositiveNumeric, } import com.digitalasset.canton.config.{ BatchingConfig, + CommitmentSendDelay, DefaultProcessingTimeouts, NonNegativeDuration, TestingConfigInternal, @@ -271,8 +272,18 @@ sealed trait AcsCommitmentProcessorBaseTest ) } - // Create the processor, but return the changes instead of publishing them, such that the user can decide when - // to publish + /** Create the processor, but return the changes instead of publishing them, such that the user + * can decide when to publish + * + * @param warnOnAcsCommitmentDegradation + * Whether to warn on acs commitment degradation errors. Setting this to true is needed only + * when specifically testing acs commitment degradation. + * @param increasePerceivedComputationTimeForCommitments + * This parameter will artificially increase the measured computation time for commitments used + * to decide whether to trigger catch-up mode. This is useful to test catch-up mode without + * having to create a large number of commitments. This parameter does not influence neither + * the actual time spent to compute commitments, nor the compute metrics. + */ protected def testSetupDontPublish( timeProofs: List[CantonTimestamp], contractSetup: Map[ @@ -288,9 +299,8 @@ sealed trait AcsCommitmentProcessorBaseTest synchronizerParametersUpdates: List[ SynchronizerParameters.WithValidity[DynamicSynchronizerParameters] ] = List.empty, - // Whether to warn on acs commitment degradation errors. - // Setting this to true is needed only when specifically testing acs commitment degradation. warnOnAcsCommitmentDegradation: Boolean = false, + increasePerceivedComputationTimeForCommitments: Boolean = false, )(implicit ec: ExecutionContext): ( FutureUnlessShutdown[AcsCommitmentProcessor], AcsCommitmentStore, @@ -368,7 +378,11 @@ sealed trait AcsCommitmentProcessorBaseTest exitOnFatalFailures = true, BatchingConfig(), // do not delay sending commitments for testing, because tests often expect to see commitments after an interval - Some(NonNegativeInt.zero), + Some(CommitmentSendDelay(Some(NonNegativeProportion.zero), Some(NonNegativeProportion.zero))), + increasePerceivedComputationTimeForCommitments = Option.when( + increasePerceivedComputationTimeForCommitments + )(interval.duration.multipliedBy(2)), + doNotAwaitOnCheckingIncomingCommitments = false, ) (acsCommitmentProcessor, store, sequencerClient, changes, acsCommitmentConfigStore) } @@ -2106,6 +2120,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitments = List( @@ -2323,6 +2338,7 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, synchronizerParametersUpdates = List(startConfigWithValidity), warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -2402,6 +2418,7 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, synchronizerParametersUpdates = List(startConfigWithValidity), warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -2486,6 +2503,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitments = List( @@ -2593,6 +2611,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitments = List( @@ -2742,6 +2761,7 @@ class AcsCommitmentProcessorTest synchronizerParametersUpdates = List(disabledConfigWithValidity, changedConfigWithValidity), warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -2847,6 +2867,7 @@ class AcsCommitmentProcessorTest synchronizerParametersUpdates = List(startConfigWithValidity, disabledConfigWithValidity), warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -2935,6 +2956,7 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, synchronizerParametersUpdates = List(startConfigWithValidity, changeConfigWithValidity), warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -3021,6 +3043,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) (for { @@ -3175,6 +3198,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitments = List( @@ -3297,6 +3321,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitmentsFast = List( @@ -3465,6 +3490,7 @@ class AcsCommitmentProcessorTest topology, acsCommitmentsCatchUpModeEnabled = true, warnOnAcsCommitmentDegradation = true, + increasePerceivedComputationTimeForCommitments = true, ) val remoteCommitmentsFast = List( diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsHelpers.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsHelpers.scala index e1352794f8..50b21e7d69 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsHelpers.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsHelpers.scala @@ -34,7 +34,6 @@ trait SortedReconciliationIntervalsHelpers { ): DynamicSynchronizerParametersWithValidity = DynamicSynchronizerParametersWithValidity( DynamicSynchronizerParameters.tryInitialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryOfMillis(250), reconciliationInterval = PositiveSeconds.tryOfSeconds(reconciliationInterval), protocolVersion = protocolVersion, ), @@ -49,7 +48,6 @@ trait SortedReconciliationIntervalsHelpers { ): DynamicSynchronizerParametersWithValidity = DynamicSynchronizerParametersWithValidity( DynamicSynchronizerParameters.tryInitialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryOfMillis(250), reconciliationInterval = PositiveSeconds.tryOfSeconds(reconciliationInterval), protocolVersion = protocolVersion, ), diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala index c2f58e3de7..cce35304e7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala @@ -5,11 +5,11 @@ package com.digitalasset.canton.participant.store import cats.syntax.parallel.* import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.ExampleContractFactory import com.digitalasset.canton.protocol.ExampleTransactionFactory.packageId +import com.digitalasset.canton.protocol.{ExampleContractFactory, GenContractInstance, LfContractId} import com.digitalasset.canton.{BaseTest, FailOnShutdown, LfPartyId, LfTimestamp} import com.digitalasset.daml.lf.data.Ref -import com.digitalasset.daml.lf.data.Ref.QualifiedName +import com.digitalasset.daml.lf.data.Ref.{IdString, PackageId, QualifiedName} import com.digitalasset.daml.lf.transaction.CreationTime import org.scalatest.wordspec.AsyncWordSpec @@ -20,33 +20,41 @@ trait ContractStoreTest extends FailOnShutdown { this: AsyncWordSpec & BaseTest protected val charlie: LfPartyId = LfPartyId.assertFromString("charlie") protected val david: LfPartyId = LfPartyId.assertFromString("david") - def contractStore(mk: () => ContractStore): Unit = { - - val contract = ExampleContractFactory.build() - val contractId = contract.contractId + protected val contract: GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = + ExampleContractFactory.build() + protected val contractId: LfContractId = contract.contractId - val let2 = CantonTimestamp.Epoch.plusSeconds(5) - val pkgId2 = Ref.PackageId.assertFromString("different_id") - val contract2 = ExampleContractFactory.build( + protected val let2: CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(5) + protected val pkgId2: IdString.PackageId = Ref.PackageId.assertFromString("different_id") + protected val contract2 + : GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = + ExampleContractFactory.build( templateId = Ref.Identifier(pkgId2, QualifiedName.assertFromString("module:template")), createdAt = CreationTime.CreatedAt(let2.toLf), ) - val contractId2 = contract2.contractId - - val templateName3 = QualifiedName.assertFromString("Foo:Bar") - val templateId3 = Ref.Identifier(packageId, templateName3) - val contract3 = - ExampleContractFactory.build( - templateId = templateId3, - createdAt = CreationTime.CreatedAt(let2.toLf), - ) - val contractId3 = contract3.contractId + protected val contractId2: LfContractId = contract2.contractId + + protected val templateName3: QualifiedName = QualifiedName.assertFromString("Foo:Bar") + protected val templateId3: Ref.FullReference[PackageId] = Ref.Identifier(packageId, templateName3) + protected val contract3 + : GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = + ExampleContractFactory.build( + templateId = templateId3, + createdAt = CreationTime.CreatedAt(let2.toLf), + ) + protected val contractId3: LfContractId = contract3.contractId + + protected val contract4 + : GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = + ExampleContractFactory.build(templateId = Ref.Identifier(pkgId2, templateName3)) + protected val contractId4: LfContractId = contract4.contractId - val contract4 = ExampleContractFactory.build(templateId = Ref.Identifier(pkgId2, templateName3)) - val contractId4 = contract4.contractId + protected val contract5 + : GenContractInstance { type InstCreatedAtTime <: CreationTime.CreatedAt } = + ExampleContractFactory.build(templateId = Ref.Identifier(pkgId2, templateName3)) + protected val contractId5: LfContractId = contract5.contractId - val contract5 = ExampleContractFactory.build(templateId = Ref.Identifier(pkgId2, templateName3)) - val contractId5 = contract5.contractId + def contractStore(mk: () => ContractStore): Unit = { "store and retrieve a created contract" in { val store = mk() @@ -258,5 +266,29 @@ trait ContractStoreTest extends FailOnShutdown { this: AsyncWordSpec & BaseTest res shouldBe Left(UnknownContracts(Set(contractId2))) } } + + "store contracts and retrieve them by internal id" in { + val store = mk() + + val contracts = Seq(contract, contract2, contract3, contract4) + val contractIds = contracts.map(_.contractId) + + for { + _ <- store.storeContracts(contracts) + internalIdsMap <- store.lookupBatchedNonCachedInternalIds(contractIds) + persistedMap <- store.lookupBatchedNonCached(internalIdsMap.values) + } yield { + internalIdsMap.keys should contain theSameElementsAs contractIds + internalIdsMap.foreach { case (contractId, internalId) => + persistedMap.get(internalId) match { + case Some(persisted) => + persisted.inst.contractId shouldBe contractId + case None => + fail(s"No persisted contract found for internal id $internalId") + } + } + succeed + } + } } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerConnectionConfigStoreTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerConnectionConfigStoreTest.scala index 6ac5b3fa13..0ad702d5f2 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerConnectionConfigStoreTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerConnectionConfigStoreTest.scala @@ -28,6 +28,7 @@ import com.digitalasset.canton.participant.synchronizer.{ } import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, + SequencerConnectionPoolDelays, SequencerConnections, SubmissionRequestAmplification, } @@ -153,6 +154,7 @@ trait SynchronizerConnectionConfigStoreTest extends FailOnShutdown { sequencerTrustThreshold = PositiveInt.one, sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) ) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerParameterStoreTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerParameterStoreTest.scala index a93e5cd876..afd878a043 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerParameterStoreTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SynchronizerParameterStoreTest.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.store +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId, UniqueIdentifier} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{BaseTest, FailOnShutdown} @@ -37,8 +38,9 @@ trait SynchronizerParameterStoreTest extends FailOnShutdown { this: AsyncWordSpe "be idempotent" in { val store = mk(synchronizerId) val params = - BaseTest.defaultStaticSynchronizerParametersWith(protocolVersion = - anotherProtocolVersion(testedProtocolVersion) + BaseTest.defaultStaticSynchronizerParametersWith( + topologyChangeDelay = StaticSynchronizerParameters.defaultTopologyChangeDelay, + protocolVersion = anotherProtocolVersion(testedProtocolVersion), ) for { _ <- store.setParameters(params) @@ -53,8 +55,9 @@ trait SynchronizerParameterStoreTest extends FailOnShutdown { this: AsyncWordSpe val store = mk(synchronizerId) val params = defaultStaticSynchronizerParameters val modified = - BaseTest.defaultStaticSynchronizerParametersWith(protocolVersion = - anotherProtocolVersion(testedProtocolVersion) + BaseTest.defaultStaticSynchronizerParametersWith( + topologyChangeDelay = StaticSynchronizerParameters.defaultTopologyChangeDelay, + protocolVersion = anotherProtocolVersion(testedProtocolVersion), ) for { _ <- store.setParameters(params) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStoreTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStoreTest.scala index 948507d937..ce81fdf884 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStoreTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStoreTest.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.store.db +import cats.Eval import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.participant.store.{ @@ -10,6 +11,7 @@ import com.digitalasset.canton.participant.store.{ CommitmentQueueTest, IncrementalCommitmentStoreTest, } +import com.digitalasset.canton.platform.store.interning.MockStringInterning import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.IndexedSynchronizer import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -19,6 +21,8 @@ import scala.concurrent.ExecutionContext trait DbAcsCommitmentStoreTest extends AcsCommitmentStoreTest { this: DbTest => + val mockStringInterning = new MockStringInterning + override def cleanDb( storage: DbStorage )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { @@ -43,12 +47,15 @@ trait DbAcsCommitmentStoreTest extends AcsCommitmentStoreTest { this: DbTest => new DbAcsCommitmentConfigStore(storage, timeouts, loggerFactory), timeouts, loggerFactory, + Eval.now(mockStringInterning), )(ec) ) } } trait DbIncrementalCommitmentStoreTest extends IncrementalCommitmentStoreTest { this: DbTest => + val mockStringInterning = new MockStringInterning + override def cleanDb( storage: DbStorage )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { @@ -69,6 +76,7 @@ trait DbIncrementalCommitmentStoreTest extends IncrementalCommitmentStoreTest { IndexedSynchronizer.tryCreate(synchronizerId, 1), timeouts, loggerFactory, + Eval.now(mockStringInterning), )(ec) ) } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala index 147a6cb238..15f67d7b10 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.store.db +import cats.syntax.parallel.* import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.{ @@ -12,8 +13,8 @@ import com.digitalasset.canton.config.{ } import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.participant.store.ContractStoreTest import com.digitalasset.canton.participant.store.db.DbContractStoreTest.createDbContractStoreForTesting +import com.digitalasset.canton.participant.store.{ContractStoreTest, UnknownContract} import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbStorageIdempotency, DbTest, H2Test, PostgresTest} import com.digitalasset.canton.tracing.TraceContext @@ -45,6 +46,63 @@ trait DbContractStoreTest extends AsyncWordSpec with BaseTest with ContractStore ) ) } + + "store and retrieve a created contract with correct cache behavior" in { + val store = createDbContractStoreForTesting( + storage, + loggerFactory, + ) + + store.lookupPersistedIfCached(contractId) shouldBe None + store.lookupPersistedIfCached( + contractId + ) shouldBe None // should not cache IfCached lookup result + + for { + p0 <- store.lookupPersisted(contractId).failOnShutdown + _ <- store.lookupPersistedIfCached(contractId) shouldBe Some(None) + _ <- store.storeContract(contract).failOnShutdown + p <- store.lookupPersisted(contractId).failOnShutdown + c <- store.lookupE(contractId) + } yield { + p0 shouldBe None + c shouldEqual contract + p.value.asContractInstance shouldEqual contract + store.lookupPersistedIfCached(contractId).value.value.asContractInstance shouldEqual contract + } + } + + "delete a set of contracts as done by pruning with correct cache behavior" in { + val store = createDbContractStoreForTesting( + storage, + loggerFactory, + ) + store.lookupPersistedIfCached(contractId) shouldBe None + for { + _ <- List(contract, contract2, contract4, contract5) + .parTraverse(store.storeContract) + .failOnShutdown + _ = store.lookupPersistedIfCached(contractId).value.nonEmpty shouldBe true + _ <- store + .deleteIgnoringUnknown(Seq(contractId, contractId2, contractId3, contractId4)) + .failOnShutdown + _ = store.lookupPersistedIfCached(contractId) shouldBe None + notFounds <- List(contractId, contractId2, contractId3, contractId4).parTraverse( + store.lookupE(_).value + ) + notDeleted <- store.lookupE(contractId5).value + } yield { + notFounds shouldEqual List( + Left(UnknownContract(contractId)), + Left(UnknownContract(contractId2)), + Left(UnknownContract(contractId3)), + Left(UnknownContract(contractId4)), + ) + notDeleted shouldEqual Right(contract5) + store.lookupPersistedIfCached(contractId) shouldBe Some(None) // already tried to be looked up + store.lookupPersistedIfCached(contractId5).value.nonEmpty shouldBe true + } + } } object DbContractStoreTest { diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractStoreTestInMemory.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractStoreTestInMemory.scala index 5a266e424c..eeaab8ede7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractStoreTestInMemory.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractStoreTestInMemory.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.store.memory +import cats.syntax.parallel.* import com.digitalasset.canton.BaseTest import com.digitalasset.canton.participant.store.* import org.scalatest.wordspec.AsyncWordSpec @@ -13,4 +14,51 @@ class ContractStoreTestInMemory extends AsyncWordSpec with BaseTest with Contrac behave like contractStore(() => new InMemoryContractStore(timeouts, loggerFactory)) } + "store and retrieve a created contract with correct cache behavior" in { + val store = new InMemoryContractStore(timeouts, loggerFactory) + + store.lookupPersistedIfCached(contractId) shouldBe Some(None) + + for { + p0 <- store.lookupPersisted(contractId).failOnShutdown + _ <- store.lookupPersistedIfCached(contractId) shouldBe Some(None) + _ <- store.storeContract(contract).failOnShutdown + p <- store.lookupPersisted(contractId).failOnShutdown + c <- store.lookupE(contractId) + } yield { + p0 shouldBe None + c shouldEqual contract + p.value.asContractInstance shouldEqual contract + store.lookupPersistedIfCached(contractId).value.value.asContractInstance shouldEqual contract + } + } + + "delete a set of contracts as done by pruning with correct cache behavior" in { + val store = new InMemoryContractStore(timeouts, loggerFactory) + store.lookupPersistedIfCached(contractId) shouldBe Some(None) + for { + _ <- List(contract, contract2, contract4, contract5) + .parTraverse(store.storeContract) + .failOnShutdown + _ = store.lookupPersistedIfCached(contractId).value.nonEmpty shouldBe true + _ <- store + .deleteIgnoringUnknown(Seq(contractId, contractId2, contractId3, contractId4)) + .failOnShutdown + _ = store.lookupPersistedIfCached(contractId) shouldBe Some(None) + notFounds <- List(contractId, contractId2, contractId3, contractId4).parTraverse( + store.lookupE(_).value + ) + notDeleted <- store.lookupE(contractId5).value + } yield { + notFounds shouldEqual List( + Left(UnknownContract(contractId)), + Left(UnknownContract(contractId2)), + Left(UnknownContract(contractId3)), + Left(UnknownContract(contractId4)), + ) + notDeleted shouldEqual Right(contract5) + store.lookupPersistedIfCached(contractId) shouldBe Some(None) // already tried to be looked up + store.lookupPersistedIfCached(contractId5).value.nonEmpty shouldBe true + } + } } diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedSynchronizerOutboxTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedSynchronizerOutboxTest.scala index 45e31655fa..3073ef3de5 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedSynchronizerOutboxTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedSynchronizerOutboxTest.scala @@ -129,6 +129,7 @@ class QueueBasedSynchronizerOutboxTest defaultStaticSynchronizerParameters, target, queue, + disableOptionalTopologyChecks = false, // we don't need the validation logic to run, because we control the outcome of transactions manually exitOnFatalFailures = true, timeouts, @@ -137,6 +138,7 @@ class QueueBasedSynchronizerOutboxTest ) val client = new StoreBasedSynchronizerTopologyClient( clock, + defaultStaticSynchronizerParameters, store = target, packageDependenciesResolver = StoreBasedSynchronizerTopologyClient.NoPackageDependencies, timeouts = timeouts, @@ -444,7 +446,8 @@ class QueueBasedSynchronizerOutboxTest (target, manager, handle, client) <- mk( transactions.size, - rejections = Iterator.continually(Some(TopologyTransactionRejection.NotAuthorized)), + rejections = + Iterator.continually(Some(TopologyTransactionRejection.Authorization.NotAuthorized)), ) _ <- outboxConnected(manager, handle, client, target) res <- push(manager, transactions) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedSynchronizerOutboxTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedSynchronizerOutboxTest.scala index 120afaa6bb..078a7b74c7 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedSynchronizerOutboxTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedSynchronizerOutboxTest.scala @@ -121,6 +121,7 @@ class StoreBasedSynchronizerOutboxTest ) val client = new StoreBasedSynchronizerTopologyClient( clock, + defaultStaticSynchronizerParameters, store = target, packageDependenciesResolver = StoreBasedSynchronizerTopologyClient.NoPackageDependencies, timeouts = timeouts, @@ -445,7 +446,8 @@ class StoreBasedSynchronizerOutboxTest val (source, target, manager, handle, client) = mk( transactions.size, - rejections = Iterator.continually(Some(TopologyTransactionRejection.NotAuthorized)), + rejections = + Iterator.continually(Some(TopologyTransactionRejection.Authorization.NotAuthorized)), ) for { _ <- outboxConnected(manager, handle, client, source, target) diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/CreatesActiveContracts.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/CreatesActiveContracts.scala new file mode 100644 index 0000000000..0e7259cd0d --- /dev/null +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/CreatesActiveContracts.scala @@ -0,0 +1,117 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.util + +import com.daml.ledger +import com.digitalasset.canton.crypto.TestSalt +import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.examples.java.cycle as M +import com.digitalasset.canton.participant.admin.data.{ActiveContractOld, RepairContract} +import com.digitalasset.canton.platform.apiserver.FatContractInstanceHelper +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.topology.{DefaultTestIdentities, PhysicalSynchronizerId} +import com.digitalasset.canton.util.TestContractHasher +import com.digitalasset.canton.{BaseTest, NeedsNewLfContractIds, ReassignmentCounter} +import com.digitalasset.daml.lf + +/** Helper that allows unit tests to create active contracts for testing. + */ +private[participant] trait CreatesActiveContracts { + self: NeedsNewLfContractIds & BaseTest => + + protected def psid: PhysicalSynchronizerId + protected def testSymbolicCrypto: SymbolicPureCrypto + + protected def createActiveContract(): ledger.api.v2.state_service.ActiveContract = { + + // 1. Create the prerequisites for coming up with an authenticated LAPI active contract. + val cidUnauthenticated = newLfContractId() + val contractIdV1Version = CantonContractIdVersion.maxV1 + val unicumGenerator = new UnicumGenerator(testSymbolicCrypto) + val signatory = DefaultTestIdentities.party1 + + // Create an unauthenticated contract first, i.e. without an authenticated contract suffix + // as input for creating an authenticated contract. + val unauthenticatedLfFatContract = FatContractInstanceHelper.buildFatContractInstance( + templateId = lf.data.Ref.Identifier.assertFromString("some:pkg:identifier"), + packageName = lf.data.Ref.PackageName.assertFromString("pkg-name"), + contractId = cidUnauthenticated, + argument = lf.value.Value.ValueNil, + createdAt = CantonTimestamp.Epoch.underlying, + authenticationData = ContractAuthenticationDataV1(TestSalt.generateSalt(0))( + contractIdV1Version + ).toLfBytes, + signatories = Set(signatory.toLf), + stakeholders = Set(signatory.toLf), + keyOpt = None, + version = LfSerializationVersion.minVersion, + ) + + val contractId = valueOrFail( + unicumGenerator + .recomputeUnicum( + contractInstance = unauthenticatedLfFatContract, + cantonContractIdVersion = contractIdV1Version, + contractHash = TestContractHasher.Sync.hash( + unauthenticatedLfFatContract.toCreateNode, + contractIdV1Version.contractHashingMethod, + ), + ) + .map(contractIdV1Version.fromDiscriminator(cidUnauthenticated.discriminator, _)) + )("compute unicum and authenticated contract id") + + // Use the authenticated contract to come up with the contract proto serialization. + val authenticatedFatContractInstance = unauthenticatedLfFatContract.mapCid(_ => contractId) + val serialization = valueOrFail( + lf.transaction.TransactionCoder.encodeFatContractInstance(authenticatedFatContractInstance) + )("serialize contract instance") + + // 2. Create LAPI active contract + ledger.api.v2.state_service.ActiveContract( + createdEvent = Some( + ledger.api.v2.event.CreatedEvent( + offset = 0L, + nodeId = 0, + contractId = contractId.coid, + templateId = Some(ledger.api.v2.value.Identifier(M.Cycle.PACKAGE_ID, "Cycle", "Cycle")), + contractKey = None, + createArguments = None, + createdEventBlob = serialization, + interfaceViews = Seq.empty, + witnessParties = Seq.empty, + signatories = Seq(signatory.toProtoPrimitive), + observers = Seq.empty, + createdAt = Some(CantonTimestamp.Epoch.toProtoTimestamp), + packageName = M.Cycle.PACKAGE_ID, + acsDelta = false, + representativePackageId = M.Cycle.PACKAGE_ID, + ) + ), + synchronizerId = psid.logical.toProtoPrimitive, + reassignmentCounter = ReassignmentCounter.Genesis.unwrap, + ) + } + + // TODO(#24326): Remove once OnPR is based on LAPI active contracts + protected def createActiveContractOld(): ActiveContractOld = { + val lapiContract: ledger.api.v2.state_service.ActiveContract = createActiveContract() + + // Convert LAPI active contract to ActiveContractOld. + val repairContract = valueOrFail(RepairContract.toRepairContract(lapiContract))( + "convert to lapi to repair contract" + ) + val serializableContract = + valueOrFail(SerializableContract.fromLfFatContractInst(repairContract.contract))( + "convert repair contract to serializable contract" + ) + ActiveContractOld + .create( + synchronizerId = psid.logical, + contract = serializableContract, + reassignmentCounter = repairContract.reassignmentCounter, + )(testedProtocolVersion) + } + +} diff --git a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/JavaCodegenUtilTest.scala b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/JavaCodegenUtilTest.scala index cb84239dcf..3a127df382 100644 --- a/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/JavaCodegenUtilTest.scala +++ b/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/util/JavaCodegenUtilTest.scala @@ -6,11 +6,7 @@ package com.digitalasset.canton.participant.util import com.daml.ledger.javaapi.data.codegen.ContractId import com.digitalasset.canton.BaseTest import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash} -import com.digitalasset.canton.protocol.{ - AuthenticatedContractIdVersionV11, - ExampleTransactionFactory, - Unicum, -} +import com.digitalasset.canton.protocol.{CantonContractIdVersion, ExampleTransactionFactory, Unicum} import org.scalatest.wordspec.AsyncWordSpec class JavaCodegenUtilTest extends AsyncWordSpec with BaseTest { @@ -22,7 +18,7 @@ class JavaCodegenUtilTest extends AsyncWordSpec with BaseTest { val hash = Hash.build(TestHash.testHashPurpose, HashAlgorithm.Sha256).add(0).finish() val unicum = Unicum(hash) - val lfCid = AuthenticatedContractIdVersionV11.fromDiscriminator(discriminator, unicum) + val lfCid = CantonContractIdVersion.maxV1.fromDiscriminator(discriminator, unicum) val apiCid = new ContractId(lfCid.coid) val lfCid2 = apiCid.toLf diff --git a/canton/community/drivers/reference/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/v1/reference_sequencer_block_events.proto b/canton/community/reference-sequencer-driver/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/v1/reference_sequencer_block_events.proto similarity index 100% rename from canton/community/drivers/reference/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/v1/reference_sequencer_block_events.proto rename to canton/community/reference-sequencer-driver/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/v1/reference_sequencer_block_events.proto diff --git a/canton/community/reference-sequencer-driver/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory b/canton/community/reference-sequencer-driver/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory new file mode 100644 index 0000000000..cb96fd1bc9 --- /dev/null +++ b/canton/community/reference-sequencer-driver/src/main/resources/META-INF/services/com.digitalasset.canton.synchronizer.block.SequencerDriverFactory @@ -0,0 +1 @@ +com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.ReferenceSequencerDriverFactory diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala similarity index 98% rename from canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala rename to canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala index 7d361184f2..bcf886cbb1 100644 --- a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala +++ b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriver.scala @@ -115,6 +115,9 @@ class ReferenceSequencerDriver( blockOrdererBlockToRawLedgerBlock(logger)(block) } + override def sequencingTime(implicit traceContext: TraceContext): Future[Option[Long]] = + Future.successful(Some(timeProvider.nowInMicrosecondsSinceEpoch)) + override def send(request: ByteString, submissionId: String, senderId: String)(implicit traceContext: TraceContext ): Future[Unit] = diff --git a/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriverFactory.scala b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriverFactory.scala new file mode 100644 index 0000000000..2e5bba5047 --- /dev/null +++ b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/ReferenceSequencerDriverFactory.scala @@ -0,0 +1,140 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencing.sequencer.reference + +import com.daml.metrics.api.MetricsContext +import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout, StorageConfig} +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.resource.{Storage, StorageSingleSetup} +import com.digitalasset.canton.synchronizer.block.BlockFormat.DefaultFirstBlockHeight +import com.digitalasset.canton.synchronizer.block.{SequencerDriver, SequencerDriverFactory} +import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.store.ReferenceBlockOrderingStore +import com.digitalasset.canton.time.{Clock, TimeProvider, TimeProviderClock} +import com.digitalasset.canton.tracing.TraceContext +import monocle.macros.syntax.lens.* +import org.apache.pekko.stream.Materializer +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.concurrent.ExecutionContext + +class ReferenceSequencerDriverFactory extends SequencerDriverFactory { + + override final type ConfigType = ReferenceSequencerDriver.Config[StorageConfig] + + override final def version: Int = 1 + + override final def usesTimeProvider: Boolean = true + + override def name: String = "reference" + + override def configParser: ConfigReader[ConfigType] = { + import pureconfig.generic.semiauto.* + import com.digitalasset.canton.config.BaseCantonConfig.Readers.* + implicit val communityMemoryStorageConfigReader: ConfigReader[StorageConfig.Memory] = + deriveReader[StorageConfig.Memory] + implicit val communityH2StorageConfigReader: ConfigReader[DbConfig.H2] = + deriveReader[DbConfig.H2] + implicit val communityPostgresStorageConfigReader: ConfigReader[DbConfig.Postgres] = + deriveReader[DbConfig.Postgres] + implicit val communityStorageConfigReader: ConfigReader[StorageConfig] = + deriveReader[StorageConfig] + + deriveReader[ConfigType] + } + + override def configWriter(confidential: Boolean): ConfigWriter[ConfigType] = { + import pureconfig.generic.semiauto.* + import com.digitalasset.canton.config.BaseCantonConfig.Writers.* + + implicit val enterpriseMemoryStorageConfigWriter: ConfigWriter[StorageConfig.Memory] = + deriveWriter[StorageConfig.Memory] + implicit val enterpriseH2StorageConfigWriter: ConfigWriter[DbConfig.H2] = + deriveWriter[DbConfig.H2] + implicit val enterprisePostgresStorageConfigWriter: ConfigWriter[DbConfig.Postgres] = + deriveWriter[DbConfig.Postgres] + implicit val StorageConfigWriter: ConfigWriter[StorageConfig] = + deriveWriter[StorageConfig] + + deriveWriter[ConfigType] + } + + private def createClock(timeProvider: TimeProvider, loggerFactory: NamedLoggerFactory) = + new TimeProviderClock(timeProvider, loggerFactory) + + override def create( + config: ConfigType, + nonStandardConfig: Boolean, + timeProvider: TimeProvider, + firstBlockHeight: Option[Long], + synchronizerId: String, + sequencerId: String, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext, materializer: Materializer): SequencerDriver = { + val processingTimeout = ProcessingTimeout() + val closeable = flagCloseable(processingTimeout, loggerFactory) + val storage = + createStorage( + config, + createClock(timeProvider, loggerFactory), + processingTimeout, + loggerFactory, + )( + executionContext, + TraceContext.empty, + new CloseContext(closeable), + MetricsContext.Empty, + ) + val store = + ReferenceBlockOrderingStore(storage, processingTimeout, loggerFactory) + new ReferenceSequencerDriver( + sequencerId, + store, + config, + timeProvider, + firstBlockHeight.getOrElse(DefaultFirstBlockHeight), + storage, + closeable, + loggerFactory, + processingTimeout, + ) + } + + private def flagCloseable( + processingTimeout: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + ): FlagCloseable = + FlagCloseable(loggerFactory.getTracedLogger(getClass), processingTimeout) + + protected def createStorage( + config: ReferenceSequencerDriver.Config[StorageConfig], + clock: Clock, + processingTimeout: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + metricsContext: MetricsContext, + ): Storage = + StorageSingleSetup.tryCreateAndMigrateStorage( + config.storage, + config.logQueryCost, + clock, + processingTimeout, + loggerFactory, + setMigrationsPath, + ) + + def setMigrationsPath(config: StorageConfig): StorageConfig = + config match { + case h2: DbConfig.H2 => + h2.focus(_.parameters.migrationsPaths) + .replace(Seq("classpath:db/migration/canton/h2/dev/reference/")) + case pg: DbConfig.Postgres => + pg.focus(_.parameters.migrationsPaths) + .replace(Seq("classpath:db/migration/canton/postgres/dev/reference/")) + case x => x + } +} diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala similarity index 100% rename from canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala rename to canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala similarity index 100% rename from canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala rename to canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala diff --git a/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceSequencerDriverStore.scala b/canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceSequencerDriverStore.scala similarity index 100% rename from canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceSequencerDriverStore.scala rename to canton/community/reference-sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceSequencerDriverStore.scala diff --git a/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala b/canton/community/reference-sequencer-driver/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala similarity index 100% rename from canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala rename to canton/community/reference-sequencer-driver/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala diff --git a/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/db/DbReferenceBlockOrderingStoreTest.scala b/canton/community/reference-sequencer-driver/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/db/DbReferenceBlockOrderingStoreTest.scala similarity index 100% rename from canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/db/DbReferenceBlockOrderingStoreTest.scala rename to canton/community/reference-sequencer-driver/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/sequencer/reference/store/db/DbReferenceBlockOrderingStoreTest.scala diff --git a/canton/community/drivers/api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/BaseSequencerDriverApiTest.scala b/canton/community/sequencer-driver-api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/BaseSequencerDriverApiTest.scala similarity index 100% rename from canton/community/drivers/api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/BaseSequencerDriverApiTest.scala rename to canton/community/sequencer-driver-api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/BaseSequencerDriverApiTest.scala diff --git a/canton/community/drivers/api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/SequencerDriverApiConformanceTest.scala b/canton/community/sequencer-driver-api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/SequencerDriverApiConformanceTest.scala similarity index 100% rename from canton/community/drivers/api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/SequencerDriverApiConformanceTest.scala rename to canton/community/sequencer-driver-api-conformance-tests/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/SequencerDriverApiConformanceTest.scala diff --git a/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/block/SequencerDriver.scala b/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/block/SequencerDriver.scala index 993d57d951..8bd4d43fef 100644 --- a/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/block/SequencerDriver.scala +++ b/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/synchronizer/block/SequencerDriver.scala @@ -179,6 +179,14 @@ trait SequencerDriver extends AutoCloseable { traceContext: TraceContext ): Source[RawLedgerBlock, KillSwitch] + /** Return a "current" sequencing time such that, when a `send` operation is subsequently called, + * if sequenced, the sequencing time of the resulting event is guaranteed to be later than the + * sequencing time previously returned by the `sequencingTime` call. + */ + def sequencingTime(implicit + traceContext: TraceContext + ): Future[Option[Long]] + // Operability def health(implicit traceContext: TraceContext): Future[SequencerDriverHealthStatus] diff --git a/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto index 7433139aec..d698a5bf0d 100644 --- a/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto +++ b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto @@ -34,6 +34,10 @@ service SequencerAdministrationService { // the returned bytestring can be used directly to initialize the given sequencer later on rpc OnboardingState(OnboardingStateRequest) returns (stream OnboardingStateResponse); + // Fetch the onboarding state for a given sequencer. + // the returned bytestring can be used directly to initialize the given sequencer later on + rpc OnboardingStateV2(OnboardingStateV2Request) returns (stream OnboardingStateV2Response); + // Disable members at the sequencer. Will prevent existing and new instances from connecting, and permit removing their data. rpc DisableMember(DisableMemberRequest) returns (DisableMemberResponse); } @@ -125,6 +129,29 @@ message OnboardingStateForSequencer { SequencerSnapshot sequencer_snapshot = 3; } +message OnboardingStateV2Request { + oneof request { + // The sequencer for which to fetch the onboarding state + string sequencer_uid = 1; + // The effective time the should be "contained" in the sequencer snapshot + google.protobuf.Timestamp timestamp = 2; + } +} + +message OnboardingStateV2Response { + // versioned OnboardingStateForSequencerV2 + bytes onboarding_state_for_sequencer = 1; +} + +message OnboardingStateForSequencerV2 { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // versioned com.digitalasset.canton.topology.admin.v30.TopologyTransactions.Item + optional bytes topology_transaction = 1; + optional bytes static_synchronizer_parameters = 2; + SequencerSnapshot sequencer_snapshot = 3; +} + message PruningStatusRequest {} message PruningStatusResponse { diff --git a/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto index a03f4ee21f..d537e6451e 100644 --- a/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto +++ b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto @@ -19,6 +19,9 @@ service SequencerInitializationService { rpc InitializeSequencerFromGenesisState(stream InitializeSequencerFromGenesisStateRequest) returns (InitializeSequencerFromGenesisStateResponse); rpc InitializeSequencerFromOnboardingState(stream InitializeSequencerFromOnboardingStateRequest) returns (InitializeSequencerFromOnboardingStateResponse); rpc InitializeSequencerFromPredecessor(stream InitializeSequencerFromPredecessorRequest) returns (InitializeSequencerFromPredecessorResponse); + + rpc InitializeSequencerFromGenesisStateV2(stream InitializeSequencerFromGenesisStateV2Request) returns (InitializeSequencerFromGenesisStateV2Response); + rpc InitializeSequencerFromOnboardingStateV2(stream InitializeSequencerFromOnboardingStateV2Request) returns (InitializeSequencerFromOnboardingStateV2Response); } // Includes sufficient detail for: @@ -54,3 +57,27 @@ message InitializeSequencerFromOnboardingStateResponse { // Indicate if the initialized sequencer is replicated bool replicated = 1; } + +// Includes sufficient detail for: +// - topology and additional bootstrap information +message InitializeSequencerFromGenesisStateV2Request { + // a topology snapshot up until (including) the point where this sequencer is becoming active on the synchronizer + // the topology snapshot will be persisted in the synchronizer store of the sequencer + bytes topology_snapshot = 1; + + com.digitalasset.canton.protocol.v30.StaticSynchronizerParameters synchronizer_parameters = 2; +} + +message InitializeSequencerFromGenesisStateV2Response { + // Indicate if the initialized sequencer is replicated + bool replicated = 1; +} + +message InitializeSequencerFromOnboardingStateV2Request { + bytes onboarding_state = 1; +} + +message InitializeSequencerFromOnboardingStateV2Response { + // Indicate if the initialized sequencer is replicated + bool replicated = 1; +} diff --git a/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/bftordering/standalone/v1/standalone_bft_ordering_service.proto b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/bftordering/standalone/v1/standalone_bft_ordering_service.proto new file mode 100644 index 0000000000..2141b0b633 --- /dev/null +++ b/canton/community/synchronizer/src/main/protobuf/com/digitalasset/canton/synchronizer/sequencing/sequencer/bftordering/standalone/v1/standalone_bft_ordering_service.proto @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.standalone.v1; + +service StandaloneBftOrderingService { + rpc Send(SendRequest) returns (SendResponse); + rpc ReadOrdered(ReadOrderedRequest) returns (stream ReadOrderedResponse); +} + +message SendRequest { + string tag = 1; + bytes payload = 2; +} + +message SendResponse { + optional string rejection_reason = 1; +} + +message ReadOrderedRequest { + int64 start_height = 1; +} + +message ReadOrderedResponse { + int64 height = 1; + repeated Ordered block = 2; +} + +message Ordered { + string tag = 1; + bytes payload = 2; +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateManager.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateManager.scala index 15bd351806..417018f3a5 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateManager.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateManager.scala @@ -7,13 +7,25 @@ import cats.data.{EitherT, Nested} import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.digitalasset.base.error.BaseAlarm +import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + FutureUnlessShutdown, + HasCloseContext, + PromiseUnlessShutdown, + UnlessShutdown, +} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.InstrumentedGraph.BufferedFlow +import com.digitalasset.canton.sequencing.traffic.TrafficConsumed import com.digitalasset.canton.synchronizer.block +import com.digitalasset.canton.synchronizer.block.AsyncWriter.AsyncAppendWorkHandle import com.digitalasset.canton.synchronizer.block.BlockSequencerStateManager.HeadState import com.digitalasset.canton.synchronizer.block.data.{ BlockEphemeralState, @@ -26,14 +38,22 @@ import com.digitalasset.canton.synchronizer.metrics.BlockMetrics import com.digitalasset.canton.synchronizer.sequencer.{ BlockSequencerStreamInstrumentationConfig, DeliverableSubmissionOutcome, + InFlightAggregationUpdates, InFlightAggregations, SequencerIntegration, } import com.digitalasset.canton.synchronizer.sequencing.traffic.store.TrafficConsumedStore import com.digitalasset.canton.topology.{Member, PhysicalSynchronizerId} -import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext, Traced} import com.digitalasset.canton.util.PekkoUtil.syntax.* -import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{ + EitherTUtil, + ErrorUtil, + FutureUnlessShutdownUtil, + LoggerUtil, + MonadUtil, +} import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Flow @@ -41,6 +61,7 @@ import java.util.concurrent.atomic.AtomicReference import scala.collection.concurrent.TrieMap import scala.collection.immutable.SortedMap import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.{Failure, Success} /** Thrown if the ephemeral state does not match what is expected in the persisted store. This is * not expected to be able to occur, but if it does likely means that the ephemeral state is @@ -76,21 +97,423 @@ trait BlockSequencerStateManagerBase extends FlagCloseable { ): Future[Unit] } +/** Async block sequencer writer control parameters + * + * @param enabled + * if true then the async writer is enabled + * @param trafficBatchSize + * the maximum number of traffic events to batch in a single write + * @param aggregationBatchSize + * the maximum number of inflight aggregations to batch in a single write + * @param blockInfoBatchSize + * the maximum number of block info updates to batch in a single write + */ +final case class AsyncWriterParameters( + enabled: Boolean = true, + trafficBatchSize: PositiveInt = PositiveInt.tryCreate(1000), + aggregationBatchSize: PositiveInt = PositiveInt.tryCreate(1000), + blockInfoBatchSize: PositiveInt = PositiveInt.tryCreate(1000), +) + +/** async sequential writer for one type of queries + * + * @param addToQueue + * a function that adds an element to the queue + * @param writeQueue + * a function that will write the queue + */ +private[block] abstract class AsyncWriter[Q <: Iterable[?]]( + addToQueue: (Q, Q) => Q, + writeQueue: Q => FutureUnlessShutdown[Unit], + empty: => Q, + name: String, + futureSupervisor: FutureSupervisor, + protected override val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext, closeContext: CloseContext) + extends NamedLogging + with NoTracing { + + import AsyncWriter.* + + private val writeCompletedPromiseDesc = name + "-write-completed" + private val queueScheduledPromiseDesc = name + "-queue-scheduled" + + private val queuedDataState = + new AtomicReference[QueuedData[Q]](QueuedData.Idle) + + private def mkPromise[A](description: String) = + PromiseUnlessShutdown + .abortOnShutdown[A](description, closeContext.context, futureSupervisor) + + private def newQueueData(): QueuedData[Q] = + QueuedData.Running( + mkPromise(writeCompletedPromiseDesc), + mkPromise(queueScheduledPromiseDesc), + empty, + ) + + /** Append to the queue and schedule if necessary + * + * @param items + * new items to be added to the queue + * + * @return + * an async append result + */ + def appendAndSchedule(items: Q): AsyncAppendWorkHandle = + if (items.isEmpty) { + // if we are called to enqueue without any item, just return a completed work handle + AsyncAppendComplete + } else { + + def go( + newItems: Option[Q] // None means we completed a write + ): AsyncAppendWorkHandle = { + val currentQueueData = + queuedDataState.getAndUpdate { + + case QueuedData.Running(promiseCompleted, promiseSubmitted, pending) => + newItems.fold { + if (pending.isEmpty) + // we completed the write and no pending => stopped + QueuedData.Idle + else { + // we completed the write but there are pending writes => keep running and reset the queue + // as we'll dispatch the pending items + newQueueData() + } + } { newItems => + // we are already running and are just adding more items + QueuedData.Running( + promiseCompleted, + promiseSubmitted, + addToQueue(newItems, pending), + ) + } + // we are not running, and we are adding items => start as we'll dispatch the new items + case QueuedData.Idle => + assert(newItems.isDefined) + newQueueData() + } + + currentQueueData match { + + case QueuedData.Running(queueCompleted, queueSubmitted, pending) => + newItems.fold { + // nothing left to do, so we finish here + if (pending.isEmpty) { + AsyncAppendComplete + } else { + // 1b something left to do, so we pick up the queue and notify anyone who is waiting + // on the queue being picked up + queueSubmitted.outcome(()).discard + AsyncAppendWorkHandle( + dispatchQueue(pending, Some(queueCompleted)), + FutureUnlessShutdown.unit, + 0, + ) + } + } { newItems => + // we appended to an already running queue, therefore just return the current futures + AsyncAppendWorkHandle( + queueCompleted.futureUS, + queueSubmitted.futureUS, + pending.size + newItems.size, + ) + } + case QueuedData.Idle => + // we are not running and we are adding more items => start + AsyncAppendWorkHandle( + dispatchQueue(newItems.getOrElse(empty), None), + FutureUnlessShutdown.unit, + 0, + ) + } + } + + // Returns a future of the persisted queue + def dispatchQueue( + queue: Q, + completePromise: Option[PromiseUnlessShutdown[Unit]], + ): FutureUnlessShutdown[Unit] = + writeQueue(queue) + .thereafter { + case Success(Outcome(_)) => + // we completed the write, so we can complete the promise + completePromise.foreach(_.outcome(())) + // respawn if there are pending items + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + go(newItems = None).queuePersisted, + "background-writer-respawn-" + name, + )(this.errorLoggingContext(TraceContext.empty)) + case Success(UnlessShutdown.AbortedDueToShutdown) => + completePromise.foreach(_.shutdown()) + case Failure(exception) => + recordWriteError(name, exception) + completePromise.foreach(_.failure(exception).discard) + } + + go(Some(items)) + } + + protected def recordWriteError(name: String, exception: Throwable): Unit +} + +object AsyncWriter { + + private[block] final case class AsyncAppendWorkHandle( + queuePersisted: FutureUnlessShutdown[Unit], + queueSubmitted: FutureUnlessShutdown[Unit], + queueSize: Int, + ) { + def backpressureFU(maxQueueSize: Int): FutureUnlessShutdown[Unit] = + // if the number of items is larger than our queue, ensure that we wait until + // the queue has been picked up. This provides some form of backpressure as it will + // halt adding more items to the queue until the next write has started + if (queueSize >= maxQueueSize) { + queueSubmitted + } else { + FutureUnlessShutdown.unit + } + } + + private[block] val AsyncAppendComplete = + AsyncAppendWorkHandle(FutureUnlessShutdown.unit, FutureUnlessShutdown.unit, 0) + + private sealed trait QueuedData[+T] extends Product with Serializable { + def queueSize: Int + } + private object QueuedData { + + /** Case class to store pending data in the queue + * + * @param queuePersisted + * a promise which will be completed once all elements of the queue have been persisted; this + * is used to synchronize the "blockInfo" writes + * @param queueSubmitted + * a promise which will be completed once the queue has been picked up for writing; this is + * used to provide backpressure of the akka pipeline if the write queue becomes too large + * @param queue + * the actual queue + */ + final case class Running[+T <: Iterable[?]]( + queuePersisted: PromiseUnlessShutdown[Unit], + queueSubmitted: PromiseUnlessShutdown[Unit], + queue: T, + ) extends QueuedData[T] { + override def queueSize: Int = queue.size + } + final case object Idle extends QueuedData[Nothing] { + override def queueSize: Int = 0 + } + } + +} + +/** writes updates asynchronusly into the database + * + * to decouple the sequencer processing pipeline from the database writes, we schedule the writes + * all in background while the main pipeline is still running. + * + * the block-info serves as the watermark and is only written once all previous writes have been + * persisted. + * + * the writes are still sequential, but sequential per "type" of write, batching writes of + * different blocks together if necessary. + * + * a further optimisation potential is to actually write everything in parallel, but this would + * require establishing the correct dependencies between the writes and ensure that no read query + * reads dirty state. + * + * but instead of doing this we should take a step back and look at the database storage schema + * that we currently have and decide whether it actually makes sense. + */ +private class BlockSequencerStateAsyncWriter( + store: SequencerBlockStore, + trafficConsumedStore: TrafficConsumedStore, + futureSupervisor: FutureSupervisor, + parameters: AsyncWriterParameters, + override val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext, closeContext: CloseContext) + extends NamedLogging { + + private def mkWriter[Q <: Iterable[?]]( + addToQueue: (Q, Q) => Q, + writeQueue: Q => FutureUnlessShutdown[Unit], + empty: => Q, + name: String, + ) = new AsyncWriter[Q]( + addToQueue, + writeQueue, + empty, + name, + futureSupervisor, + loggerFactory, + ) { + override protected def recordWriteError(name: String, exception: Throwable): Unit = { + noTracingLogger.error( + "Background write failed - no further writes to avoid inconsistent store", + exception, + ) + observedError.set( + Some( + new Exception( + s"Write $name failed - no further writes to avoid inconsistent store" + ).initCause(exception) + ) + ) + } + } + private def mkPromise[A](description: String) = + PromiseUnlessShutdown.abortOnShutdown[A](description, closeContext.context, futureSupervisor)( + errorLoggingContext(TraceContext.empty) + ) + + private val observedError = new AtomicReference[Option[Throwable]](None) + private val pendingWrites = + new AtomicReference[FutureUnlessShutdown[Unit]](FutureUnlessShutdown.unit) + + private val trafficWriter = mkWriter[Vector[TrafficConsumed]]( + (newItem, queue) => queue ++ newItem, + trafficConsumedStore.store(_)(TraceContext.empty), + empty = Vector.empty[TrafficConsumed], + "traffic-consumed-writer", + ) + private val aggregationWriter = mkWriter[InFlightAggregationUpdates]( + (newItem, queue) => + newItem.foldLeft(queue) { case (agg, (k, v)) => + agg.updatedWith(k) { + case Some(value) => Some(value.tryMerge(v)(this.errorLoggingContext(TraceContext.empty))) + case None => Some(v) + } + }, + store.storeInflightAggregations(_)(TraceContext.empty), + empty = Map.empty, + "in-flight-aggregation", + ) + private val blockInfoWriter = { + def writeQueue( + queued: Seq[(FutureUnlessShutdown[Unit], BlockInfo)] + ): FutureUnlessShutdown[Unit] = + // we only write once the pending dependent writes have completed + MonadUtil + .sequentialTraverse_(queued) { case (write, _) => write } + .flatMap(_ => store.finalizeBlockUpdates(queued.map(_._2))(TraceContext.empty)) + mkWriter[Vector[(FutureUnlessShutdown[Unit], BlockInfo)]]( + (newItem, queue) => queue ++ newItem, + writeQueue, + Vector.empty, + "block-info", + ) + } + + private def transformSync( + fut: FutureUnlessShutdown[Unit], + flatMapF: FutureUnlessShutdown[Unit], + ): FutureUnlessShutdown[Unit] = + // transform propagating errors synchronously + fut.transformWith { previous => + flatMapF.transform { + case Success(_) => previous + case e @ Failure(_) => e + } + } + + private def addPendingWrite( + writeAndQueue: AsyncAppendWorkHandle, + maxQueueSize: PositiveInt, + ): FutureUnlessShutdown[Unit] = { + val res = writeAndQueue + // update the pending writes to include this write + // use a promise as otherwise we might schedule the flatmap multiple times + val updated = mkPromise[Unit]("pending-writes-update") + val fut = pendingWrites.getAndSet(updated.futureUS) + // chain the previous writes with this write. If there is an error, it will be + // propagated into the out most future which will be monitored via FutureUtil + transformSync(fut, res.queuePersisted).thereafter(updated.complete).discard + res.backpressureFU(maxQueueSize.value) + } + + def append( + trafficConsumedUpdates: Seq[TrafficConsumed], + inFlightAggregationUpdates: InFlightAggregationUpdates, + acknowledgementsET: EitherT[FutureUnlessShutdown, String, Unit], + ): EitherT[FutureUnlessShutdown, String, Unit] = + observedError.get() match { + // forward any background error + case Some(err) => + EitherT[FutureUnlessShutdown, String, Unit](FutureUnlessShutdown.failed(err)) + case None => + // we enqueue or start the writing in the background. the returning future will be used to + // sync the final writing of the block height. + // if we have more items queued than the limit, we'll wait for the current queue to be picked up + val backpressureF1 = addPendingWrite( + trafficWriter.appendAndSchedule(trafficConsumedUpdates.toVector), + parameters.trafficBatchSize, + ) + val backpressureF2 = addPendingWrite( + aggregationWriter.appendAndSchedule(inFlightAggregationUpdates), + parameters.aggregationBatchSize, + ) + // replace promise (don't flatmap in atomic reference to avoid duplicate scheduling) + val promise = mkPromise[Unit]("append-acknowledgements") + val fut = pendingWrites.getAndSet(promise.futureUS) + // chain the previous writes with this write. If there is an error, it will be + // propagated to the outer most future which will be monitored via FutureUtil + transformSync( + fut, + EitherTUtil.toFutureUnlessShutdown( + acknowledgementsET.leftMap(str => new Exception(str)) + ), + ).thereafter(promise.complete).discard + EitherT.right(transformSync(backpressureF1, backpressureF2)) + } + + def finalizeBlockUpate(newBlock: BlockInfo): FutureUnlessShutdown[Unit] = + observedError.get() match { + // forward any background error + case Some(err) => FutureUnlessShutdown.failed(err) + case None => + // this is safe as we will be called sequentially + val writesF = pendingWrites.getAndSet(FutureUnlessShutdown.unit) + val result = blockInfoWriter.appendAndSchedule(Vector((writesF, newBlock))) + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + result.queuePersisted, + "finalize-block-update failed", + )(this.errorLoggingContext((TraceContext.empty))) + result.backpressureFU(parameters.blockInfoBatchSize.value) + } + +} + class BlockSequencerStateManager( val store: SequencerBlockStore, val trafficConsumedStore: TrafficConsumedStore, + asyncWriterParameters: AsyncWriterParameters, enableInvariantCheck: Boolean, override protected val timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, protected val loggerFactory: NamedLoggerFactory, headState: AtomicReference[HeadState], streamInstrumentationConfig: BlockSequencerStreamInstrumentationConfig, blockMetrics: BlockMetrics, )(implicit executionContext: ExecutionContext) extends BlockSequencerStateManagerBase - with NamedLogging { + with NamedLogging + with HasCloseContext { import BlockSequencerStateManager.* + private val asyncWriter = Option.when(asyncWriterParameters.enabled)( + new BlockSequencerStateAsyncWriter( + store = store, + trafficConsumedStore = trafficConsumedStore, + futureSupervisor, + asyncWriterParameters, + loggerFactory, + ) + ) + private val memberAcknowledgementPromises = TrieMap[Member, NonEmpty[SortedMap[CantonTimestamp, Traced[Promise[Unit]]]]]() @@ -135,6 +558,7 @@ class BlockSequencerStateManager( @SuppressWarnings(Array("org.wartremover.warts.Var")) var currentBlockHeight = initialHeight blockEvents => { + val height = blockEvents.height // TODO(M98 Tech-Debt Collection): consider validating that blocks with the same block height have the same contents @@ -158,7 +582,6 @@ class BlockSequencerStateManager( TraceContext.ofBatch("check_block_height")(blockEvents.events)(logger) // Set the current block height to the new block's height instead of + 1 of the previous value // so that we support starting from an arbitrary block height - logger.debug( s"Processing block $height with ${blockEvents.events.size} block events.${blockEvents.events .map(_.value) @@ -277,40 +700,71 @@ class BlockSequencerStateManager( case _ => None } - val trafficConsumedFUS = EitherT.right[String]( - synchronizeWithClosing("trafficConsumedStore.store")( - trafficConsumedStore.store(trafficConsumedUpdates) + def writeSequential() = { + val trafficConsumedFUS = EitherT.right[String]( + synchronizeWithClosing("trafficConsumedStore.store")( + trafficConsumedStore.store(trafficConsumedUpdates) + ) ) - ) - val blockSequencerWritesFUS = - dbSequencerIntegration.blockSequencerWrites(update.submissionsOutcomes) - val blockSequencerAcknowledgementsFUS = EitherT.right[String]( - dbSequencerIntegration.blockSequencerAcknowledge(update.acknowledgements) - ) - val inFlightAggregationUpdatesFUS = EitherT.right[String]( - synchronizeWithClosing("partialBlockUpdate")( - store.partialBlockUpdate(inFlightAggregationUpdates = update.inFlightAggregationUpdates) + val blockSequencerWritesFUS = + dbSequencerIntegration.blockSequencerWrites(update.submissionsOutcomes) + val blockSequencerAcknowledgementsFUS = EitherT.right[String]( + dbSequencerIntegration.blockSequencerAcknowledge(update.acknowledgements) ) - ) + val inFlightAggregationUpdatesFUS = EitherT.right[String]( + synchronizeWithClosing("storeInflightAggregations")( + store.storeInflightAggregations(inFlightAggregationUpdates = + update.inFlightAggregationUpdates + ) + ) + ) + (for { + _ <- trafficConsumedFUS + _ <- blockSequencerWritesFUS + _ <- blockSequencerAcknowledgementsFUS + _ <- inFlightAggregationUpdatesFUS + } yield ()) + } - (for { - _ <- trafficConsumedFUS - _ <- blockSequencerWritesFUS - _ <- blockSequencerAcknowledgementsFUS - _ <- inFlightAggregationUpdatesFUS - } yield { - val newHead = priorHead.copy(chunk = newState) - updateHeadState(priorHead, newHead) - update.acknowledgements.foreach { case (member, timestamp) => - resolveAcknowledgements(member, timestamp) - } - update.invalidAcknowledgements.foreach { case (member, timestamp, error) => - invalidAcknowledgement(member, timestamp, error) + def writeAsync(asyncWriter: BlockSequencerStateAsyncWriter) = { + val acknowledgementsET = EitherT.right[String]( + dbSequencerIntegration.blockSequencerAcknowledge(update.acknowledgements) + ) + // the return value is just there to abort errors + val asyncErrorET = asyncWriter.append( + trafficConsumedUpdates, + update.inFlightAggregationUpdates, + acknowledgementsET, + ) + (for { + // note: these writes are non-blocking. they will just be put into a queue but backpressure if the queue is full + _ <- dbSequencerIntegration.blockSequencerWrites(update.submissionsOutcomes) + _ <- asyncErrorET + } yield ()) + } + + val writeET = asyncWriter match { + case None => + writeSequential() + case Some(asyncWriter) => + writeAsync(asyncWriter) + } + + writeET + .map { _ => + val newHead = priorHead.copy(chunk = newState) + updateHeadState(priorHead, newHead) + update.acknowledgements.foreach { case (member, timestamp) => + resolveAcknowledgements(member, timestamp) + } + update.invalidAcknowledgements.foreach { case (member, timestamp, error) => + invalidAcknowledgement(member, timestamp, error) + } + newHead } - newHead - }).valueOr(e => - ErrorUtil.internalError(new RuntimeException(s"handleChunkUpdate failed with error: $e")) - ) + .valueOr(e => + ErrorUtil.internalError(new RuntimeException(s"handleChunkUpdate failed with error: $e")) + ) } private def handleComplete(priorHead: HeadState, newBlock: BlockInfo)(implicit @@ -332,12 +786,18 @@ class BlockSequencerStateManager( ) checkInvariantIfEnabled(newState) val newHead = HeadState.fullyProcessed(newState) - for { - _ <- store.finalizeBlockUpdate(newBlock) - } yield { + + (asyncWriter match { + case Some(asyncWriter) => + // write is async. future only forwarded to inject future failed in case we are unable to write + asyncWriter.finalizeBlockUpate(newBlock) + case None => + store.finalizeBlockUpdates(Seq(newBlock)) + }).map { _ => updateHeadState(priorHead, newHead) newHead } + } private def updateHeadState(prior: HeadState, next: HeadState)(implicit @@ -427,6 +887,7 @@ class BlockSequencerStateManager( blockState: BlockEphemeralState )(implicit traceContext: TraceContext): Unit = if (enableInvariantCheck) blockState.checkInvariant() + } object BlockSequencerStateManager { @@ -435,8 +896,10 @@ object BlockSequencerStateManager { synchronizerId: PhysicalSynchronizerId, store: SequencerBlockStore, trafficConsumedStore: TrafficConsumedStore, + asyncWriterParameters: AsyncWriterParameters, enableInvariantCheck: Boolean, timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, streamInstrumentationConfig: BlockSequencerStreamInstrumentationConfig, blockMetrics: BlockMetrics, @@ -462,8 +925,10 @@ object BlockSequencerStateManager { new BlockSequencerStateManager( store = store, trafficConsumedStore = trafficConsumedStore, + asyncWriterParameters = asyncWriterParameters, enableInvariantCheck = enableInvariantCheck, timeouts = timeouts, + futureSupervisor = futureSupervisor, loggerFactory = loggerFactory, headState = headState, streamInstrumentationConfig = streamInstrumentationConfig, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/SequencerBlockStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/SequencerBlockStore.scala index bb4ffef156..f18be11a9a 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/SequencerBlockStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/SequencerBlockStore.scala @@ -45,7 +45,7 @@ trait SequencerBlockStore extends AutoCloseable { /** The current state of the sequencer, which can be used when the node is restarted to * deterministically derive the following counters and timestamps. * - * The state excludes updates of unfinalized blocks added with [[partialBlockUpdate]]. + * The state excludes updates of unfinalized blocks added with [[storeInflightAggregations]]. * * @return * `None` if no block has been written yet, `Some` otherwise. @@ -84,25 +84,25 @@ trait SequencerBlockStore extends AutoCloseable { /** Stores some updates that happen in a single block. May be called several times for the same * block and the same update may be contained in several of the calls. Before adding updates of a - * subsequent block, [[finalizeBlockUpdate]] must be called to wrap up the current block. + * subsequent block, [[finalizeBlockUpdates]] must be called to wrap up the current block. * - * This method must not be called concurrently with itself or [[finalizeBlockUpdate]]. + * This method must not be called concurrently with itself or [[finalizeBlockUpdates]]. */ - def partialBlockUpdate( + def storeInflightAggregations( inFlightAggregationUpdates: InFlightAggregationUpdates )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] /** Finalizes the current block whose updates have been added in the calls to - * [[partialBlockUpdate]] since the last call to [[finalizeBlockUpdate]]. + * [[storeInflightAggregations]] since the last call to [[finalizeBlockUpdates]]. * - * This method must not be called concurrently with itself or [[partialBlockUpdate]], and must be - * called for the blocks in monotonically increasing order of height. + * This method must not be called concurrently with itself or [[storeInflightAggregations]], and + * must be called for the blocks in monotonically increasing order of height. * - * @param block + * @param blocks * The block information about the current block. It is the responsibility of the caller to * ensure that the height increases monotonically by one */ - def finalizeBlockUpdate(block: BlockInfo)(implicit + def finalizeBlockUpdates(blocks: Seq[BlockInfo])(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/db/DbSequencerBlockStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/db/DbSequencerBlockStore.scala index 8ff9f6a03f..2f14c2b28d 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/db/DbSequencerBlockStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/db/DbSequencerBlockStore.scala @@ -10,7 +10,6 @@ import com.digitalasset.canton.config.{BatchingConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.IdempotentInsert.insertVerifyingConflicts import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.synchronizer.block.data.{ BlockEphemeralState, @@ -18,6 +17,7 @@ import com.digitalasset.canton.synchronizer.block.data.{ SequencerBlockStore, } import com.digitalasset.canton.synchronizer.sequencer.errors.SequencerError +import com.digitalasset.canton.synchronizer.sequencer.errors.SequencerError.BlockNotFound import com.digitalasset.canton.synchronizer.sequencer.store.SequencerStore import com.digitalasset.canton.synchronizer.sequencer.{ InFlightAggregationUpdates, @@ -29,8 +29,6 @@ import com.digitalasset.canton.version.ProtocolVersion import scala.concurrent.ExecutionContext -import SequencerError.BlockNotFound - class DbSequencerBlockStore( override protected val storage: DbStorage, protocolVersion: ProtocolVersion, @@ -146,22 +144,21 @@ class DbSequencerBlockStore( ) .map(inFlightAggregations => BlockEphemeralState(block, inFlightAggregations)) - override def partialBlockUpdate( + override def storeInflightAggregations( inFlightAggregationUpdates: InFlightAggregationUpdates )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = stateManagerStore.addInFlightAggregationUpdates(inFlightAggregationUpdates) - override def finalizeBlockUpdate(block: BlockInfo)(implicit + override def finalizeBlockUpdates(blocks: Seq[BlockInfo])(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] = - storage.queryAndUpdate(updateBlockHeightDBIO(block), functionFullName) + storage.queryAndUpdate(updateBlockHeightDBIO(blocks), functionFullName) override def setInitialState( initial: SequencerInitialState, maybeOnboardingTopologyEffectiveTimestamp: Option[CantonTimestamp], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - val updateBlockHeight = updateBlockHeightDBIO(BlockInfo.fromSequencerInitialState(initial)) - + val updateBlockHeight = updateBlockHeightDBIO(Seq(BlockInfo.fromSequencerInitialState(initial))) for { _ <- stateManagerStore.addInFlightAggregationUpdates( initial.snapshot.inFlightAggregations.fmap(_.asUpdate) @@ -170,24 +167,17 @@ class DbSequencerBlockStore( } yield () } - private def updateBlockHeightDBIO(block: BlockInfo)(implicit traceContext: TraceContext) = - insertVerifyingConflicts( - sql"""insert into seq_block_height (height, latest_event_ts, latest_sequencer_event_ts) - values (${block.height}, ${block.lastTs}, ${block.latestSequencerEventTimestamp}) - on conflict do nothing""".asUpdate, - sql"select latest_event_ts, latest_sequencer_event_ts from seq_block_height where height = ${block.height}" - .as[(CantonTimestamp, Option[CantonTimestamp])] - .head, - )( - { case (lastEventTs, latestSequencerEventTs) => - // Allow updates to `latestSequencerEventTs` if it was not set before. - lastEventTs == block.lastTs && - (latestSequencerEventTs.isEmpty || latestSequencerEventTs == block.latestSequencerEventTimestamp) - }, - { case (lastEventTs, latestSequencerEventTs) => - s"Block height row for [${block.height}] had existing timestamp [$lastEventTs] and topology client timestamp [$latestSequencerEventTs], but we are attempting to insert [${block.lastTs}] and [${block.latestSequencerEventTimestamp}]" - }, - ) + private def updateBlockHeightDBIO(blocks: Seq[BlockInfo])(implicit traceContext: TraceContext) = { + val insertSql = + """insert into seq_block_height (height, latest_event_ts, latest_sequencer_event_ts) + values (?,?,?) on conflict do nothing""" + DbStorage.bulkOperation_(insertSql, blocks, storage.profile) { pp => block => + pp >> block.height + pp >> block.lastTs + pp >> block.latestSequencerEventTimestamp + } + + } override def prune(requestedTimestamp: CantonTimestamp)(implicit traceContext: TraceContext diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/memory/InMemorySequencerBlockStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/memory/InMemorySequencerBlockStore.scala index 304b9b172b..6d0d5ee780 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/memory/InMemorySequencerBlockStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/data/memory/InMemorySequencerBlockStore.scala @@ -60,15 +60,15 @@ class InMemorySequencerBlockStore( } } - override def partialBlockUpdate( + override def storeInflightAggregations( inFlightAggregationUpdates: InFlightAggregationUpdates )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = sequencerStore.addInFlightAggregationUpdates(inFlightAggregationUpdates) - override def finalizeBlockUpdate(block: BlockInfo)(implicit + override def finalizeBlockUpdates(blocks: Seq[BlockInfo])(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] = { - updateBlockHeight(block) + blocks.foreach(updateBlockHeight) FutureUnlessShutdown.unit } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala index e95122d70e..06282705f9 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala @@ -30,10 +30,11 @@ import com.digitalasset.canton.synchronizer.sequencer.errors.SequencerError import com.digitalasset.canton.synchronizer.sequencer.store.SequencerMemberValidator import com.digitalasset.canton.synchronizer.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.tracing.{Spanning, TraceContext, Traced} import com.digitalasset.canton.util.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.version.ProtocolVersion +import io.opentelemetry.api.trace.Tracer import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} @@ -52,8 +53,9 @@ final class BlockChunkProcessor( override val loggerFactory: NamedLoggerFactory, metrics: SequencerMetrics, memberValidator: SequencerMemberValidator, -)(implicit closeContext: CloseContext) - extends NamedLogging { +)(implicit closeContext: CloseContext, tracer: Tracer) + extends NamedLogging + with Spanning { private val submissionRequestValidator = new SubmissionRequestValidator( @@ -125,9 +127,7 @@ final class BlockChunkProcessor( ) = validationResult finalInFlightAggregationsWithAggregationExpiry = - finalInFlightAggregations.filterNot { case (_, inFlightAggregation) => - inFlightAggregation.expired(lastTsBeforeValidation) - } + expireInFlightAggregations(finalInFlightAggregations, lastTsBeforeValidation) chunkUpdate = ChunkUpdate( acksByMember, @@ -160,6 +160,14 @@ final class BlockChunkProcessor( } yield (newState, chunkUpdate) } + private def expireInFlightAggregations( + finalInFlightAggregations: InFlightAggregations, + timestamp: CantonTimestamp, + ): InFlightAggregations = + finalInFlightAggregations.filterNot { case (_, inFlightAggregation) => + inFlightAggregation.expired(timestamp) + } + private def logChunkDetails( state: State, height: Long, @@ -251,7 +259,6 @@ final class BlockChunkProcessor( synchronizerSyncCryptoApi, tickSequencingTimestamp, state.latestSequencerEventTimestamp, - protocolVersion, warnIfApproximate = false, ) _ = logger.debug( @@ -263,10 +270,15 @@ final class BlockChunkProcessor( snapshot.ipsSnapshot, ) } yield { + val unexpiredInFlightAggregations = expireInFlightAggregations( + state.inFlightAggregations, + tickSequencingTimestamp, + ) val newState = state.copy( lastChunkTs = tickSequencingTimestamp, latestSequencerEventTimestamp = Some(tickSequencingTimestamp), + inFlightAggregations = unexpiredInFlightAggregations, ) val tickSubmissionOutcome = SubmissionOutcome.Deliver( @@ -292,7 +304,7 @@ final class BlockChunkProcessor( invalidAcknowledgements = Seq.empty, inFlightAggregationUpdates = Map.empty, lastSequencerEventTimestamp = Some(tickSequencingTimestamp), - inFlightAggregations = state.inFlightAggregations, + inFlightAggregations = unexpiredInFlightAggregations, submissionsOutcomes = Seq(tickSubmissionOutcome), ) @@ -362,93 +374,94 @@ final class BlockChunkProcessor( case ((sequencingTimestamp, tracedSubmissionRequest, orderingSequencerId), requestIndex) => tracedSubmissionRequest.withTraceContext { implicit traceContext => signedSubmissionRequest => - // Warn if we use an approximate snapshot but only after we've read at least one - val warnIfApproximate = sequencersSequencerCounter.exists(_ > SequencerCounter.Genesis) - logger.debug( - s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + - s"finding topology snapshot; latestSequencerEventTimestamp: $latestSequencerEventTimestamp" - ) - val submissionRequest = signedSubmissionRequest.content - for { - topologySnapshotOrErrO <- submissionRequest.topologyTimestamp.traverse( - topologyTimestamp => - SequencedEventValidator - .validateTopologyTimestamp( - synchronizerSyncCryptoApi, - topologyTimestamp, - sequencingTimestamp, - latestSequencerEventTimestamp, - protocolVersion, - warnIfApproximate, - _.sequencerTopologyTimestampTolerance, - ) - .leftMap { - case SequencedEventValidator.TopologyTimestampAfterSequencingTime => - SequencerErrors.TopologyTimestampAfterSequencingTimestamp( - topologyTimestamp, - sequencingTimestamp, - ) - case SequencedEventValidator.TopologyTimestampTooOld(_) | - SequencedEventValidator.NoDynamicSynchronizerParameters(_) => - SequencerErrors.TopologyTimestampTooEarly( - topologyTimestamp, - sequencingTimestamp, - ) - } - .value + withSpan("BlockChunkProcessor.validateSubmissions") { _ => _ => + // Warn if we use an approximate snapshot but only after we've read at least one + val warnIfApproximate = + sequencersSequencerCounter.exists(_ > SequencerCounter.Genesis) + logger.debug( + s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + + s"finding topology snapshot; latestSequencerEventTimestamp: $latestSequencerEventTimestamp" ) - topologyOrSequencingSnapshot <- topologySnapshotOrErrO match { - case Some(Right(topologySnapshot)) => - logger.debug( - s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + - "obtained and using topology snapshot at successfully validated request-specified " + - s"topology timestamp ${submissionRequest.topologyTimestamp}; " + - s"latestSequencerEventTimestamp: $latestSequencerEventTimestamp" - ) - FutureUnlessShutdown.pure(topologySnapshot) - case _ => - SyncCryptoClient - .getSnapshotForTimestamp( - synchronizerSyncCryptoApi, - sequencingTimestamp, - latestSequencerEventTimestamp, - protocolVersion, - warnIfApproximate, + val submissionRequest = signedSubmissionRequest.content + for { + topologySnapshotOrErrO <- submissionRequest.topologyTimestamp.traverse( + topologyTimestamp => + SequencedEventValidator + .validateTopologyTimestamp( + synchronizerSyncCryptoApi, + topologyTimestamp, + sequencingTimestamp, + latestSequencerEventTimestamp, + warnIfApproximate, + _.sequencerTopologyTimestampTolerance, + ) + .leftMap { + case SequencedEventValidator.TopologyTimestampAfterSequencingTime => + SequencerErrors.TopologyTimestampAfterSequencingTimestamp( + topologyTimestamp, + sequencingTimestamp, + ) + case SequencedEventValidator.TopologyTimestampTooOld(_) | + SequencedEventValidator.NoDynamicSynchronizerParameters(_) => + SequencerErrors.TopologyTimestampTooEarly( + topologyTimestamp, + sequencingTimestamp, + ) + } + .value + ) + topologyOrSequencingSnapshot <- topologySnapshotOrErrO match { + case Some(Right(topologySnapshot)) => + logger.debug( + s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + + "obtained and using topology snapshot at successfully validated request-specified " + + s"topology timestamp ${submissionRequest.topologyTimestamp}; " + + s"latestSequencerEventTimestamp: $latestSequencerEventTimestamp" ) - .map { snapshot => - logger.debug( - s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + - "no request-specified topology timestamp or its validation failed), " + - "so obtained and using topology snapshot at request sequencing time; " + - s"latestSequencerEventTimestamp: $latestSequencerEventTimestamp" + FutureUnlessShutdown.pure(topologySnapshot) + case _ => + SyncCryptoClient + .getSnapshotForTimestamp( + synchronizerSyncCryptoApi, + sequencingTimestamp, + latestSequencerEventTimestamp, + warnIfApproximate, ) - snapshot - } - } - topologyTimestampError = topologySnapshotOrErrO.mapFilter(_.swap.toOption) - sequencedValidatedSubmission <- { - submissionRequestValidator - .performIndependentValidations( - sequencingTimestamp, - signedSubmissionRequest, - topologyOrSequencingSnapshot, - topologyTimestampError, - )(traceContext, executionContext) - .value - .run - .map { case (trafficConsumption, errorOrResolvedGroups) => - SequencedValidatedSubmission( + .map { snapshot => + logger.debug( + s"Block $height, chunk $index, request $requestIndex sequenced at $sequencingTimestamp: " + + "no request-specified topology timestamp or its validation failed), " + + "so obtained and using topology snapshot at request sequencing time; " + + s"latestSequencerEventTimestamp: $latestSequencerEventTimestamp" + ) + snapshot + } + } + topologyTimestampError = topologySnapshotOrErrO.mapFilter(_.swap.toOption) + sequencedValidatedSubmission <- { + submissionRequestValidator + .performIndependentValidations( sequencingTimestamp, signedSubmissionRequest, - orderingSequencerId, topologyOrSequencingSnapshot, topologyTimestampError, - trafficConsumption, - errorOrResolvedGroups, - )(traceContext) - } - } - } yield sequencedValidatedSubmission + )(traceContext, executionContext) + .value + .run + .map { case (trafficConsumption, errorOrResolvedGroups) => + SequencedValidatedSubmission( + sequencingTimestamp, + signedSubmissionRequest, + orderingSequencerId, + topologyOrSequencingSnapshot, + topologyTimestampError, + trafficConsumption, + errorOrResolvedGroups, + )(traceContext) + } + } + } yield sequencedValidatedSubmission + } } } @@ -466,11 +479,10 @@ final class BlockChunkProcessor( synchronizerSyncCryptoApi, state.lastBlockTs, state.latestSequencerEventTimestamp, - protocolVersion, warnIfApproximate = false, ) synchronizerSuccessorO <- snapshot.ipsSnapshot - .isSynchronizerUpgradeOngoing() + .synchronizerUpgradeOngoing() .map(_.map { case (successor, _) => successor }) allAcknowledgements = fixedTsChanges.collect { case (_, t @ Traced(Acknowledgment(_, ack))) => t.map(_ => ack) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockUpdateGenerator.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockUpdateGenerator.scala index acb5b0c24d..385dee83df 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockUpdateGenerator.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockUpdateGenerator.scala @@ -25,9 +25,10 @@ import com.digitalasset.canton.synchronizer.sequencer.store.SequencerMemberValid import com.digitalasset.canton.synchronizer.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.synchronizer.sequencer.{InFlightAggregations, SubmissionOutcome} import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.tracing.{Spanning, TraceContext, Traced} import com.digitalasset.canton.util.collection.IterableUtil import com.digitalasset.canton.version.ProtocolVersion +import io.opentelemetry.api.trace.Tracer import scala.collection.immutable import scala.concurrent.ExecutionContext @@ -99,9 +100,10 @@ class BlockUpdateGeneratorImpl( metrics: SequencerMetrics, protected val loggerFactory: NamedLoggerFactory, memberValidator: SequencerMemberValidator, -)(implicit val closeContext: CloseContext) +)(implicit val closeContext: CloseContext, tracer: Tracer) extends BlockUpdateGenerator - with NamedLogging { + with NamedLogging + with Spanning { import BlockUpdateGenerator.* import BlockUpdateGeneratorImpl.* @@ -128,31 +130,32 @@ class BlockUpdateGeneratorImpl( override def extractBlockEvents(block: RawLedgerBlock): BlockEvents = { val ledgerBlockEvents = block.events.mapFilter { tracedEvent => - implicit val traceContext: TraceContext = tracedEvent.traceContext - logger.trace("Extracting event from raw block") - // TODO(i26169) Prevent zip bombing when decompressing the request - LedgerBlockEvent.fromRawBlockEvent(protocolVersion, MaxRequestSizeToDeserialize.NoLimit)( - tracedEvent.value - ) match { - case Left(error) => - InvalidLedgerEvent.Error(block.blockHeight, error).discard - None - - case Right(event) => - sequencingTimeLowerBoundExclusive match { - case Some(boundExclusive) - if !LogicalUpgradeTime.canProcessKnowingPastUpgrade( - upgradeTime = Some(boundExclusive), - sequencingTime = event.timestamp, - ) => - SequencedBeforeOrAtLowerBound - .Error(event.timestamp, boundExclusive, event.toString) - .log() - None - - case _ => Some(Traced(event)) - } - } + withSpan("BlockUpdateGenerator.extractBlockEvents") { implicit traceContext => _ => + logger.trace("Extracting event from raw block") + // TODO(i26169) Prevent zip bombing when decompressing the request + LedgerBlockEvent.fromRawBlockEvent(protocolVersion, MaxRequestSizeToDeserialize.NoLimit)( + tracedEvent.value + ) match { + case Left(error) => + InvalidLedgerEvent.Error(block.blockHeight, error).discard + None + + case Right(event) => + sequencingTimeLowerBoundExclusive match { + case Some(boundExclusive) + if !LogicalUpgradeTime.canProcessKnowingPastUpgrade( + upgradeTime = Some(boundExclusive), + sequencingTime = event.timestamp, + ) => + SequencedBeforeOrAtLowerBound + .Error(event.timestamp, boundExclusive, event.toString) + .log() + None + + case _ => Some(Traced(event)) + } + } + }(tracedEvent.traceContext, tracer) } BlockEvents( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala index 9573670c0e..c864bd641d 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala @@ -236,7 +236,8 @@ private[update] final class SubmissionRequestValidator( traceContext: TraceContext, ): EitherT[FutureUnlessShutdown, SubmissionOutcome, Map[GroupRecipient, Set[Member]]] = { val groupRecipients = submissionRequest.batch.allRecipients.collect { - case group: GroupRecipient => + // Note: we don't resolve AllMembersOfSynchronizer as it is encoded as -1 and handled internally by db sequencer + case group: GroupRecipient if group != AllMembersOfSynchronizer => group } @@ -614,7 +615,9 @@ private[update] final class SubmissionRequestValidator( // // See https://github.com/DACH-NY/canton/pull/17676#discussion_r1515926774 sequencerEventTimestamp = - Option.when(isThisSequencerAddressed(groupToMembers))(sequencingTimestamp) + Option.when(isThisSequencerAddressed(groupToMembers, submissionRequest))( + sequencingTimestamp + ) } yield SubmissionRequestValidationResult( inFlightAggregations, @@ -785,13 +788,17 @@ private[update] final class SubmissionRequestValidator( // after being deactivated in the Canton topology, specifically until the underlying consensus algorithm // allows them to be also removed from the BFT ordering topology), but they should not be considered addressed, // since they are not active in the Canton topology anymore (i.e., group recipients don't include them). - private def isThisSequencerAddressed(groupToMembers: Map[GroupRecipient, Set[Member]]): Boolean = + private def isThisSequencerAddressed( + groupToMembers: Map[GroupRecipient, Set[Member]], + submissionRequest: SubmissionRequest, + ): Boolean = groupToMembers .get(AllMembersOfSynchronizer) .exists(_.contains(sequencerId)) || groupToMembers .get(SequencersOfSynchronizer) - .exists(_.contains(sequencerId)) + .exists(_.contains(sequencerId)) || + submissionRequest.batch.isBroadcast } private[update] object SubmissionRequestValidator { diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/PublicServerConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/PublicServerConfig.scala index cd1af04e87..d26f7cbc8e 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/PublicServerConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/PublicServerConfig.scala @@ -34,6 +34,8 @@ import scala.concurrent.duration.Duration * expiration intervals. If disabled, the token expiration interval will be constant. * @param overrideMaxRequestSize * overrides the default maximum request size in bytes on the sequencer node + * @param stream + * optional stream limit config */ // TODO(i4056): Client authentication over TLS is currently unsupported, // because there is a token based protocol to authenticate clients. This may change in the future. @@ -50,6 +52,7 @@ final case class PublicServerConfig( overrideMaxRequestSize: Option[NonNegativeInt] = None, override val maxTokenLifetime: NonNegativeDuration = config.NonNegativeDuration(Duration.Inf), override val jwksCacheConfig: JwksCacheConfig = JwksCacheConfig(), + stream: Option[StreamLimitConfig] = None, ) extends ServerConfig with UniformCantonConfigValidation { diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/SynchronizerParametersConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/SynchronizerParametersConfig.scala index 290d5e35ec..7a4e131a89 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/SynchronizerParametersConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/config/SynchronizerParametersConfig.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.synchronizer.config import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.NonNegativeInt -import com.digitalasset.canton.config.{CryptoConfig, ProtocolConfig} +import com.digitalasset.canton.config.{CryptoConfig, NonNegativeFiniteDuration, ProtocolConfig} import com.digitalasset.canton.crypto.* import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.StaticSynchronizerParameters @@ -39,8 +39,6 @@ import com.digitalasset.canton.version.ProtocolVersion * @param requiredCryptoKeyFormats * The optional required crypto key formats that a member has to support. If none is specified, * all the supported algorithms are required. - * @param enableTransparencyChecks - * A flag to enable transparency checks on the views. * @param dontWarnOnDeprecatedPV * If true, then this synchronizer will not emit a warning when configured to use a deprecated * protocol version (such as 2.0.0). @@ -54,7 +52,7 @@ final case class SynchronizerParametersConfig( requiredHashAlgorithms: Option[NonEmpty[Set[HashAlgorithm]]] = None, requiredCryptoKeyFormats: Option[NonEmpty[Set[CryptoKeyFormat]]] = None, requiredSignatureFormats: Option[NonEmpty[Set[SignatureFormat]]] = None, - enableTransparencyChecks: Boolean = false, + topologyChangeDelay: Option[NonNegativeFiniteDuration] = None, // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version override val alphaVersionSupport: Boolean = true, override val betaVersionSupport: Boolean = false, @@ -71,7 +69,7 @@ final case class SynchronizerParametersConfig( param("requiredHashAlgorithms", _.requiredHashAlgorithms), param("requiredCryptoKeyFormats", _.requiredCryptoKeyFormats), param("requiredSignatureFormats", _.requiredSignatureFormats), - param("enableTransparencyChecks", _.enableTransparencyChecks), + param("topologyChangeDelay", _.topologyChangeDelay), param("alphaVersionSupport", _.alphaVersionSupport), param("betaVersionSupport", _.betaVersionSupport), param("dontWarnOnDeprecatedPV", _.dontWarnOnDeprecatedPV), @@ -136,6 +134,11 @@ final case class SynchronizerParametersConfig( newSignatureFormats = requiredSignatureFormats.getOrElse( cryptoConfig.provider.supportedSignatureFormatsForProtocol(protocolVersion) ) + newTopologyChangeDelay = topologyChangeDelay + .map(_.toInternal) + .getOrElse( + StaticSynchronizerParameters.defaultTopologyChangeDelay + ) } yield { StaticSynchronizerParameters( requiredSigningSpecs = RequiredSigningSpecs( @@ -150,7 +153,8 @@ final case class SynchronizerParametersConfig( requiredHashAlgorithms = newRequiredHashAlgorithms, requiredCryptoKeyFormats = newCryptoKeyFormats, requiredSignatureFormats = newSignatureFormats, - enableTransparencyChecks = enableTransparencyChecks, + topologyChangeDelay = newTopologyChangeDelay, + enableTransparencyChecks = false, protocolVersion = protocolVersion, serial = serial, ) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorConfig.scala index f01e3ba5b0..d6e1acf8c8 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorConfig.scala @@ -7,7 +7,12 @@ import cats.syntax.option.* import com.digitalasset.canton.config import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.manual.CantonConfigValidatorDerivation -import com.digitalasset.canton.config.{CantonConfigValidator, UniformCantonConfigValidation} +import com.digitalasset.canton.config.{ + BatchAggregatorConfig, + CantonConfigValidator, + PositiveFiniteDuration, + UniformCantonConfigValidation, +} /** Configuration for the mediator. * @@ -15,7 +20,8 @@ import com.digitalasset.canton.config.{CantonConfigValidator, UniformCantonConfi * mediator pruning configuration */ final case class MediatorConfig( - pruning: MediatorPruningConfig = MediatorPruningConfig() + pruning: MediatorPruningConfig = MediatorPruningConfig(), + deduplicationStore: DeduplicationStoreConfig = DeduplicationStoreConfig(), ) extends UniformCantonConfigValidation object MediatorConfig { @@ -46,3 +52,20 @@ object MediatorPruningConfig { CantonConfigValidatorDerivation[MediatorPruningConfig] } } + +/** Configuration for deduplication store + * @param pruneAtMostEvery + * An interval to throttle pruning operations to at most once per this duration + * @param persistBatching + * Configuration for batching of persist operation in the deduplication store + */ +final case class DeduplicationStoreConfig( + pruneAtMostEvery: PositiveFiniteDuration = PositiveFiniteDuration.ofSeconds(10), + persistBatching: BatchAggregatorConfig = BatchAggregatorConfig(), +) extends UniformCantonConfigValidation + +object DeduplicationStoreConfig { + implicit val deduplicationStoreConfigCantonConfigValidator + : CantonConfigValidator[DeduplicationStoreConfig] = + CantonConfigValidatorDerivation[DeduplicationStoreConfig] +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala index 69445d13ef..5a75dbe123 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.crypto.{ CryptoHandshakeValidator, SynchronizerCrypto, SynchronizerCryptoClient, + SynchronizerCryptoPureApi, } import com.digitalasset.canton.environment.* import com.digitalasset.canton.health.* @@ -444,6 +445,7 @@ class MediatorNodeBootstrap( staticSynchronizerParameters = staticSynchronizerParameters, store = synchronizerTopologyStore, outboxQueue = outboxQueue, + disableOptionalTopologyChecks = config.topology.disableOptionalTopologyChecks, exitOnFatalFailures = parameters.exitOnFatalFailures, timeouts = timeouts, futureSupervisor = futureSupervisor, @@ -492,6 +494,7 @@ class MediatorNodeBootstrap( synchronizerTopologyStore, topologyManagerStatus = TopologyManagerStatus .combined(authorizedTopologyManager, synchronizerTopologyManager), + config.topology, synchronizerOutboxFactory, ), storage.isActive, @@ -552,6 +555,7 @@ class MediatorNodeBootstrap( staticSynchronizerParameters: StaticSynchronizerParameters, synchronizerTopologyStore: TopologyStore[SynchronizerStore], topologyManagerStatus: TopologyManagerStatus, + topologyConfig: TopologyConfig, synchronizerOutboxFactory: SynchronizerOutboxFactory, ): EitherT[FutureUnlessShutdown, String, MediatorRuntime] = { val synchronizerLoggerFactory = loggerFactory.append("synchronizerId", synchronizerId.toString) @@ -572,7 +576,7 @@ class MediatorNodeBootstrap( seedForRandomnessO = arguments.testingConfig.sequencerTransportSeed, futureSupervisor = futureSupervisor, timeouts = timeouts, - loggerFactory = loggerFactory, + loggerFactory = synchronizerLoggerFactory, ) val useNewConnectionPool = parameters.sequencerClient.useNewConnectionPool @@ -605,6 +609,7 @@ class MediatorNodeBootstrap( crypto.pureCrypto, arguments.parameterConfig, arguments.clock, + staticSynchronizerParameters, arguments.futureSupervisor, synchronizerLoggerFactory, )() @@ -660,10 +665,10 @@ class MediatorNodeBootstrap( connectionPoolAndInfo <- if (useNewConnectionPool) GrpcSequencerConnectionService.waitUntilSequencerConnectionIsValidWithPool( - connectionPoolFactory, - parameters.tracing, - this, - getSequencerConnectionFromStore, + connectionPoolFactory = connectionPoolFactory, + tracingConfig = parameters.tracing, + flagCloseable = this, + loadConfig = getSequencerConnectionFromStore, ) else for { @@ -675,9 +680,10 @@ class MediatorNodeBootstrap( dummyPool <- EitherT.fromEither[FutureUnlessShutdown]( connectionPoolFactory .createFromOldConfig( - info.sequencerConnections, + sequencerConnections = info.sequencerConnections, expectedPSIdO = None, - parameters.tracing, + tracingConfig = parameters.tracing, + name = "dummy", ) .leftMap(error => error.toString) ) @@ -717,9 +723,11 @@ class MediatorNodeBootstrap( ), sequencerClientFactory, sequencerInfoLoader, + connectionPoolFactory, synchronizerAlias, synchronizerId, sequencerClient, + parameters.tracing, loggerFactory, ) @@ -743,9 +751,9 @@ class MediatorNodeBootstrap( mediatorId ).callback( new InitialTopologySnapshotValidator( - crypto.pureCrypto, + new SynchronizerCryptoPureApi(staticSynchronizerParameters, crypto.pureCrypto), synchronizerTopologyStore, - arguments.parameterConfig.processingTimeouts, + validateInitialSnapshot = topologyConfig.validateInitialTopologySnapshot, synchronizerLoggerFactory, ), topologyClient, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNodeBootstrapFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNodeBootstrapFactory.scala index 72beefb3b2..651baf8860 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNodeBootstrapFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNodeBootstrapFactory.scala @@ -5,10 +5,9 @@ package com.digitalasset.canton.synchronizer.mediator import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.store.CommunityCryptoPrivateStoreFactory +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.environment.NodeFactoryArguments -import com.digitalasset.canton.resource.CommunityStorageFactory +import com.digitalasset.canton.resource.StorageSingleFactory import com.digitalasset.canton.synchronizer.metrics.MediatorMetrics import org.apache.pekko.actor.ActorSystem @@ -42,18 +41,17 @@ object CommunityMediatorNodeBootstrapFactory extends MediatorNodeBootstrapFactor ): Either[String, MediatorNodeBootstrap] = arguments .toCantonNodeBootstrapCommonArguments( - new CommunityStorageFactory(arguments.config.storage), - new CommunityCryptoPrivateStoreFactory( + new StorageSingleFactory(arguments.config.storage), + new CryptoPrivateStoreFactory( arguments.config.crypto.provider, arguments.config.crypto.kms, - CommunityKmsFactory, arguments.config.parameters.caching.kmsMetadataCache, arguments.config.crypto.privateKeyStore, + replicaManager = None, arguments.futureSupervisor, arguments.clock, arguments.executionContext, ), - CommunityKmsFactory, ) .map { bootstrapArguments => new MediatorNodeBootstrap( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorRuntimeFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorRuntimeFactory.scala index 9f97c7d18e..55ebe4d26f 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorRuntimeFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorRuntimeFactory.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.synchronizer.mediator import cats.data.EitherT +import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.config.{BatchingConfig, ProcessingTimeout} import com.digitalasset.canton.connection.GrpcApiInfoService import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc @@ -38,6 +39,7 @@ import com.digitalasset.canton.topology.processing.TopologyTransactionProcessor import com.digitalasset.canton.tracing.TraceContext import io.grpc.ServerServiceDefinition import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.stream.Materializer import scala.concurrent.ExecutionContext @@ -51,8 +53,11 @@ final class MediatorRuntime( batchingConfig: BatchingConfig, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, -)(implicit protected val ec: ExecutionContext) - extends FlagCloseable +)(implicit + protected val ec: ExecutionContext, + esf: ExecutionSequencerFactory, + materializer: Materializer, +) extends FlagCloseable with NamedLogging { val pruningScheduler: MediatorPruningScheduler = new MediatorPruningScheduler( clock = clock, @@ -124,6 +129,8 @@ object MediatorRuntimeFactory { loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext, + esf: ExecutionSequencerFactory, + materializer: Materializer, tracer: Tracer, traceContext: TraceContext, ): EitherT[FutureUnlessShutdown, String, MediatorRuntime] = { @@ -137,10 +144,11 @@ object MediatorRuntimeFactory { ) val deduplicationStore = MediatorDeduplicationStore( - mediatorId, storage, nodeParameters.processingTimeouts, loggerFactory, + config.deduplicationStore.pruneAtMostEvery.toInternal, + config.deduplicationStore.persistBatching, ) val state = new MediatorState( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/service/GrpcMediatorInspectionService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/service/GrpcMediatorInspectionService.scala index daed8ece33..b3e2832a38 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/service/GrpcMediatorInspectionService.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/service/GrpcMediatorInspectionService.scala @@ -3,16 +3,17 @@ package com.digitalasset.canton.synchronizer.mediator.service -import cats.Monad import cats.syntax.functor.* +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.grpc.adapter.server.pekko.ServerAdapter import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.{CantonTimestamp, TransactionView} +import com.digitalasset.canton.error.MediatorError import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.mediator.admin.v30 as mediatorV30 -import com.digitalasset.canton.mediator.admin.v30.{VerdictsRequest, VerdictsResponse} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown import com.digitalasset.canton.protocol.messages.InformeeMessage import com.digitalasset.canton.synchronizer.mediator.FinalizedResponse @@ -22,6 +23,8 @@ import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.FutureUtil import io.grpc.Status import io.grpc.stub.{ServerCallStreamObserver, StreamObserver} +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.Source import org.slf4j.event.Level import scala.concurrent.ExecutionContext @@ -51,7 +54,7 @@ class GrpcMediatorInspectionService( watermarkTracker: TimeAwaiter, batchSize: PositiveInt, override protected val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) +)(implicit ec: ExecutionContext, esf: ExecutionSequencerFactory, materializer: Materializer) extends mediatorV30.MediatorInspectionServiceGrpc.MediatorInspectionService with NamedLogging { @@ -61,8 +64,8 @@ class GrpcMediatorInspectionService( * i.e. the sequencing timestamp of the response that resulted in a finalized response. */ override def verdicts( - request: VerdictsRequest, - responseObserver: StreamObserver[VerdictsResponse], + request: mediatorV30.VerdictsRequest, + responseObserver: StreamObserver[mediatorV30.VerdictsResponse], ): Unit = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext @@ -95,25 +98,52 @@ class GrpcMediatorInspectionService( */ def loadBatchesAndRespond( queryRange: QueryRange, - responseObserver: ServerCallStreamObserver[VerdictsResponse], - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - Monad[FutureUnlessShutdown] - .iterateUntilM(queryRange) { case QueryRange(fromRequestTime, toRequestTime) => - logger.debug( - s"Loading verdicts between ]$fromRequestTime, $toRequestTime]" - ) - finalizedResponseStore - .readFinalizedVerdicts( - fromRequestTime, - toRequestTime, - batchSize, + responseObserver: ServerCallStreamObserver[mediatorV30.VerdictsResponse], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val sink = ServerAdapter.toSink( + responseObserver, + throwable => + MediatorError.InternalError + .Reject( + cause = "Error during MediatorInspectionService.verdicts", + throwableO = Some(throwable), + ) + .asGrpcError, + ) + + val doneF = Source + .unfoldAsync( + QueryRange(CantonTimestamp.MinValue, queryRange.fromRequestExclusive) -> Seq + .empty[FinalizedResponse] + ) { case (queryRange, previousResponses) => + val resultFUS = for { + nextTimeStamps <- determineNextTimestamps( + previousResponses, + queryRange.toRequestInclusive, ) - .flatMap { finalizedResponses => - respondIfNonEmpty(finalizedResponses, responseObserver) - determineNextTimestamps(finalizedResponses, toRequestTime) - } - }(_ => responseObserver.isCancelled) - .map(_ => ()) + QueryRange(fromRequestTime, toRequestTime) = nextTimeStamps + _ = logger.debug( + s"Loading verdicts between ]$fromRequestTime, $toRequestTime]" + ) + + finalizedResponses <- finalizedResponseStore + .readFinalizedVerdicts( + fromRequestTime, + toRequestTime, + batchSize, + ) + } yield { + val verdicts = buildVerdictResponses(finalizedResponses) + Some((nextTimeStamps, finalizedResponses) -> verdicts) + } + + resultFUS.onShutdown(None) + } + .mapConcat(identity) + .runWith(sink) + + FutureUnlessShutdown.outcomeF(doneF) + } private def determineNextTimestamps( finalizedResponses: Seq[FinalizedResponse], @@ -126,68 +156,59 @@ class GrpcMediatorInspectionService( // we properly advance the time window that needs to be checked in the finalized response store // and avoid loading data again and again just to discard it afterwards - val mostRecentTimestamps = finalizedResponses + val mostRecentTimestamp = finalizedResponses .maxByOption(r => r.requestId.unwrap) .map(r => r.requestId.unwrap) // Use the timestamp from the most recent verdict loaded from the database. // If no verdicts were found, use currentToInclusive as the next starting point, because we know // that there won't be any verdicts before this timestamp + val nextFromExclusive = mostRecentTimestamp.getOrElse(currentToInclusive) - mostRecentTimestamps match { - case Some(nextFromRequestTime) => - FutureUnlessShutdown.pure( - QueryRange( - fromRequestExclusive = nextFromRequestTime, - toRequestInclusive = watermarkTracker.getCurrentKnownTime(), - ) + val newWatermark = watermarkTracker.getCurrentKnownTime() + val possiblyWaitForNextObservedTimestamp = if (newWatermark <= nextFromExclusive) { + logger.debug( + s"Waiting to observe a time later than the current watermark $newWatermark" + ) + watermarkTracker + .awaitKnownTimestamp(newWatermark.immediateSuccessor) + // if there is a race and in the meantime a sequenced time > `newWatermark` was observed, we just continue + .getOrElse(FutureUnlessShutdown.unit) + } else { + // no need to wait, since the watermark has moved since last we queried the store + FutureUnlessShutdown.unit + } + + possiblyWaitForNextObservedTimestamp + // fact: there is no verdict until nextFromExclusive, because no responses were found. + // Therefore, use `nextFromExclusive` as the starting point for the next batch lookup. + .map(_ => + QueryRange( + fromRequestExclusive = nextFromExclusive, + toRequestInclusive = watermarkTracker.getCurrentKnownTime(), ) - case None => - val newWatermark = watermarkTracker.getCurrentKnownTime() - val possiblyWaitForNextObservedTimestamp = if (newWatermark <= currentToInclusive) { - logger.debug( - s"Waiting to observe a time later than the current watermark $newWatermark" - ) - watermarkTracker - .awaitKnownTimestamp(newWatermark.immediateSuccessor) - // if there is a race and in the meantime a sequenced time > `newWatermark` was observed, we just continue - .getOrElse(FutureUnlessShutdown.unit) - } else { - // no need to wait, since the watermark has moved since last we queried the store - FutureUnlessShutdown.unit - } + ) - possiblyWaitForNextObservedTimestamp - // fact: there is no verdict until currentToInclusive, because no responses were found. - // Therefore, use `(currentToInclusive, CantonTimestamp.MaxValue)` as the starting point for the next batch lookup. - // Since we don't actually have a most recent response available, use CantonTimestamp.MaxValue as the lower bound, - // so that the next verdict found has at least `currentToInclusive.immediateSuccessor`. - .map(_ => - QueryRange( - fromRequestExclusive = currentToInclusive, - toRequestInclusive = watermarkTracker.getCurrentKnownTime(), - ) - ) - } } - /** Converts the responses to inspection api verdicts and responds on the stream observer + /** Converts the responses to inspection api verdicts */ - private def respondIfNonEmpty( - finalizedResponses: Seq[FinalizedResponse], - responseObserver: StreamObserver[mediatorV30.VerdictsResponse], - )(implicit traceContext: TraceContext): Unit = - NonEmpty.from(convertResponses(finalizedResponses)).foreach { protoResponses => - // we're using the latest timestamp from the responses loaded from the database (even though - // they might contain verdicts for irrelevant requests, e.g. reassignments), so that - // we can log the full range of the time window considered - val timestamps = finalizedResponses.map(r => r.requestId.unwrap) - val minRequestTime = timestamps.headOption - val maxRequestTime = timestamps.lastOption - logger.debug( - s"Responding with ${protoResponses.size} verdicts between [$minRequestTime, $maxRequestTime]" - ) - protoResponses.foreach(responseObserver.onNext) - } + private def buildVerdictResponses( + finalizedResponses: Seq[FinalizedResponse] + )(implicit traceContext: TraceContext): Seq[mediatorV30.VerdictsResponse] = + NonEmpty + .from(convertResponses(finalizedResponses)) + .fold(Seq.empty[mediatorV30.VerdictsResponse]) { verdicts => + // we're using the latest timestamp from the responses loaded from the database (even though + // they might contain verdicts for irrelevant requests, e.g. reassignments), so that + // we can log the full range of the time window considered + val timestamps = finalizedResponses.map(r => r.requestId.unwrap) + val minRequestTime = timestamps.headOption + val maxRequestTime = timestamps.lastOption + logger.debug( + s"Responding with ${verdicts.size} verdicts between [$minRequestTime, $maxRequestTime]" + ) + verdicts + } /** Filters for verdicts for relevant requests (currently only InformeeMessage aka Daml * transactions) and convert to the mediator inspection api value. diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStore.scala index dd951579ee..29180ddbac 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStore.scala @@ -12,13 +12,14 @@ import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, FutureUnl import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.resource.{DbStorage, DbStore, MemoryStorage, Storage} -import com.digitalasset.canton.topology.{MediatorId, Member} +import com.digitalasset.canton.time.PositiveFiniteDuration import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.BatchAggregatorUS import com.google.common.annotations.VisibleForTesting import slick.jdbc.{GetResult, SetParameter} import java.util.UUID +import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.{ConcurrentNavigableMap, ConcurrentSkipListMap} import scala.collection.concurrent.TrieMap import scala.concurrent.ExecutionContext @@ -162,19 +163,19 @@ private[mediator] trait MediatorDeduplicationStore private[mediator] object MediatorDeduplicationStore { def apply( - mediatorId: MediatorId, storage: Storage, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, - batchAggregatorConfig: BatchAggregatorConfig = BatchAggregatorConfig(), + pruneAtMostEvery: PositiveFiniteDuration, + persistBatching: BatchAggregatorConfig, )(implicit executionContext: ExecutionContext): MediatorDeduplicationStore = storage match { case _: MemoryStorage => new InMemoryMediatorDeduplicationStore(loggerFactory, timeouts) case dbStorage: DbStorage => new DbMediatorDeduplicationStore( - mediatorId, dbStorage, timeouts, - batchAggregatorConfig, + persistBatching, + pruneAtMostEvery, loggerFactory, ) } @@ -252,16 +253,15 @@ private[mediator] class InMemoryMediatorDeduplicationStore( // context propagation to improve and simplify shutdown semantics and reliability. For that reason closing this store // is inconsequential. DB operations won't be retried if and only if the caller's close context is closing. private[mediator] class DbMediatorDeduplicationStore( - mediatorId: MediatorId, override protected val storage: DbStorage, override protected val timeouts: ProcessingTimeout, - batchAggregatorConfig: BatchAggregatorConfig, + persistBatching: BatchAggregatorConfig, + pruneAtMostEvery: PositiveFiniteDuration, override protected val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) extends MediatorDeduplicationStore with DbStore { - import Member.DbStorageImplicits.* import storage.api.* override protected def doInitialize( @@ -273,13 +273,13 @@ private[mediator] class DbMediatorDeduplicationStore( for { _ <- storage.update_( sqlu"""delete from mediator_deduplication_store - where mediator_id = $mediatorId and request_time >= $deleteFromInclusive""", + where request_time >= $deleteFromInclusive""", functionFullName, )(traceContext, callerCloseContext) activeUuids <- storage.query( sql"""select uuid, request_time, expire_after from mediator_deduplication_store - where mediator_id = $mediatorId and expire_after > $deleteFromInclusive""" + where expire_after > $deleteFromInclusive""" .as[DeduplicationData], functionFullName, )(traceContext, callerCloseContext) @@ -293,8 +293,7 @@ private[mediator] class DbMediatorDeduplicationStore( ): FutureUnlessShutdown[Set[DeduplicationData]] = for { entries <- storage.query( - sql"""select uuid, request_time, expire_after from mediator_deduplication_store - where mediator_id = $mediatorId""" + sql"""select uuid, request_time, expire_after from mediator_deduplication_store""" .as[DeduplicationData], functionFullName, )(traceContext, callerCloseContext) @@ -322,12 +321,11 @@ private[mediator] class DbMediatorDeduplicationStore( // The query does not have to be idempotent, because the stores don't have unique indices and // the data gets deduplicated on the read path. val action = DbStorage.bulkOperation_( - """insert into mediator_deduplication_store(mediator_id, uuid, request_time, expire_after) - |values (?, ?, ?, ?)""".stripMargin, + """insert into mediator_deduplication_store(uuid, request_time, expire_after) + |values (?, ?, ?)""".stripMargin, items, storage.profile, ) { pp => data => - pp >> mediatorId pp >> data.value } @@ -344,49 +342,48 @@ private[mediator] class DbMediatorDeduplicationStore( BatchAggregatorUS( processor, - batchAggregatorConfig, + persistBatching, ) } + private val lastPruningTime: AtomicReference[Option[CantonTimestamp]] = + new AtomicReference(None) + private val lastPruningOperation: AtomicReference[FutureUnlessShutdown[Unit]] = + new AtomicReference(FutureUnlessShutdown.unit) + override protected def prunePersistentData( upToInclusive: CantonTimestamp )(implicit traceContext: TraceContext, callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Unit] = - pruningBatchAggregator.run(upToInclusive)(executionContext, traceContext, callerCloseContext) - - private val pruningBatchAggregator = { - val processor: BatchAggregatorUS.ProcessorUS[CantonTimestamp, Unit] = - new BatchAggregatorUS.ProcessorUS[CantonTimestamp, Unit] { - override val kind: String = "deduplication data pruning" - - override def logger: TracedLogger = DbMediatorDeduplicationStore.this.logger - - override def executeBatch(items: NonEmpty[Seq[Traced[CantonTimestamp]]])(implicit - traceContext: TraceContext, - callerCloseContext: CloseContext, - ): FutureUnlessShutdown[Seq[Unit]] = { - // We only need to delete up to the max. - // The max is not necessarily the last one, as pruning requests can come out of order. - val maxUpToInclusive = items.map(_.value).max1 - - storage + ): FutureUnlessShutdown[Unit] = { + val previousPruningTime = lastPruningTime.get() + val earliestNextPruningTime = previousPruningTime.map(_ + pruneAtMostEvery) + if (upToInclusive >= earliestNextPruningTime.getOrElse(CantonTimestamp.MinValue)) { + // time to prune + if (lastPruningTime.compareAndSet(previousPruningTime, Some(upToInclusive))) { + // we are the first to update the pruning time, so we do the pruning + val ongoingPruning = lastPruningOperation.get() + if (ongoingPruning.isCompleted) { + val newPruning = storage .update_( sqlu"""delete from mediator_deduplication_store - where mediator_id = $mediatorId and expire_after <= $maxUpToInclusive""", + where expire_after <= $upToInclusive""", functionFullName, )(traceContext, callerCloseContext) - .map(_ => Seq.fill(items.size)(())) + lastPruningOperation.set(newPruning) + newPruning + } else { + // previous pruning is still ongoing, so we don't start a new one + FutureUnlessShutdown.unit } - - override def prettyItem: Pretty[CantonTimestamp] = implicitly + } else { + // someone else updated the pruning time, so we don't do the pruning + FutureUnlessShutdown.unit } - - BatchAggregatorUS( - processor, - batchAggregatorConfig, - ) + } else { + // too soon since last pruning + FutureUnlessShutdown.unit + } } - } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/metrics/SynchronizerMetrics.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/metrics/SynchronizerMetrics.scala index 5976d200d9..e5439bbfb6 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/metrics/SynchronizerMetrics.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/metrics/SynchronizerMetrics.scala @@ -73,6 +73,9 @@ class SequencerMetrics( val eventBuffer: CacheMetrics = new CacheMetrics("events-fan-out-buffer", openTelemetryMetricsFactory) + val payloadCache: CacheMetrics = + new CacheMetrics("payload-cache", openTelemetryMetricsFactory) + val memberCache: CacheMetrics = new CacheMetrics("member-cache", openTelemetryMetricsFactory) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencer.scala index 2c0c4d03e7..bc1f4af5d1 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencer.scala @@ -524,4 +524,10 @@ class DatabaseSequencer( announcementEffectiveTime: EffectiveTime, )(implicit traceContext: TraceContext): Unit = reader.updateSynchronizerSuccessor(successorO, announcementEffectiveTime) + + // TODO(#27919): provide a proper implementation + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = + FutureUnlessShutdown.pure(None) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerIntegration.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerIntegration.scala index 7805136f0d..f441f08173 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerIntegration.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerIntegration.scala @@ -10,9 +10,10 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.sequencing.protocol.{SequencerDeliverError, SequencerErrors} import com.digitalasset.canton.topology.Member -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{Spanning, TraceContext} import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{MonadUtil, retry} +import io.opentelemetry.api.trace.Tracer import scala.concurrent.ExecutionContext import scala.concurrent.duration.* @@ -51,8 +52,9 @@ object SequencerIntegration { } } -trait DatabaseSequencerIntegration extends SequencerIntegration { +trait DatabaseSequencerIntegration extends SequencerIntegration with Spanning { this: DatabaseSequencer => + implicit val tracer: Tracer private val retryConditionIfOverloaded: retry.Success[Either[SequencerDeliverError, Unit]] = new retry.Success({ @@ -93,15 +95,17 @@ trait DatabaseSequencerIntegration extends SequencerIntegration { case _: SubmissionOutcome.Discard.type => EitherT.pure[FutureUnlessShutdown, String](()) case outcome: DeliverableSubmissionOutcome => - implicit val success = retryConditionIfOverloaded - EitherT( - retryWithBackoff.unlessShutdown( - this - .blockSequencerWriteInternal(outcome)(outcome.submissionTraceContext) - .value, - NoExceptionRetryPolicy, + withSpan("BlockSequencer.write") { implicit traceContext => _ => + implicit val success = retryConditionIfOverloaded + EitherT( + retryWithBackoff.unlessShutdown( + this + .blockSequencerWriteInternal(outcome)(outcome.submissionTraceContext) + .value, + NoExceptionRetryPolicy, + ) ) - ) - .leftMap(_.toString) + .leftMap(_.toString) + }(outcome.submissionTraceContext, tracer) } } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerClientTransport.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerClientTransport.scala index 6e67a4fbab..a0e013845a 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerClientTransport.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerClientTransport.scala @@ -8,6 +8,7 @@ import cats.syntax.either.* import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} import com.digitalasset.canton.lifecycle.{ @@ -172,6 +173,11 @@ class DirectSequencerClientTransport( } } + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + EitherT.right[String](sequencer.sequencingTime) + override def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy = // unlikely there will be any errors with this direct transport implementation SubscriptionErrorRetryPolicy.never diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionX.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionX.scala index 662a4afe5f..9e356c39e9 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionX.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionX.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.synchronizer.sequencer import cats.data.EitherT import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -115,6 +116,11 @@ class DirectSequencerConnectionX( // In-process connection is not authenticated EitherTUtil.unitUS + override def getTime(timeout: Duration)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[CantonTimestamp]] = + EitherT.right[String](sequencer.sequencingTime) + override def subscribe[E]( request: SubscriptionRequest, handler: SequencedEventHandler[E], diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionXPool.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionXPool.scala index 9302b44b64..1e9ec6e649 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionXPool.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/DirectSequencerConnectionXPool.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.synchronizer.sequencer import cats.data.EitherT import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config as cantonConfig import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port, PositiveInt} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -114,5 +115,8 @@ object DirectSequencerConnectionXPool { private val directPoolConfig = SequencerConnectionXPoolConfig( connections = NonEmpty(Seq, directConnectionDummyConfig), trustThreshold = PositiveInt.one, + // Not relevant for the direct pool + minRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, + maxRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, ) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala index 66dd09c73c..ec95e9d5cc 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala @@ -16,14 +16,14 @@ import scala.concurrent.Future /** Who gets notified that a event has been written */ sealed trait WriteNotification { def union(notification: WriteNotification): WriteNotification - def includes(memberId: SequencerMemberId): Boolean + def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean } object WriteNotification { case object None extends WriteNotification { override def union(notification: WriteNotification): WriteNotification = notification - override def includes(memberId: SequencerMemberId): Boolean = false + override def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean = false } final case class Members(memberIds: SortedSet[SequencerMemberId]) extends WriteNotification { override def union(notification: WriteNotification): WriteNotification = @@ -32,7 +32,8 @@ object WriteNotification { case None => this } - override def includes(memberId: SequencerMemberId): Boolean = memberIds.contains(memberId) + override def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean = + memberIds.contains(memberId) || memberIds.contains(SequencerMemberId.Broadcast) override def toString: String = s"Members(${memberIds.map(_.unwrap).mkString(",")})" } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala index a29a91ac84..cfd2205617 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala @@ -68,9 +68,9 @@ class LocalSequencerStateEventSignaller( member: Member, memberId: SequencerMemberId, )(implicit traceContext: TraceContext): Source[ReadSignal, NotUsed] = { - logger.debug(s"Creating signal source for $member") + logger.info(s"Creating signal source for $member") notificationsHubSource - .filter(_.includes(memberId)) + .filter(_.isBroadcastOrIncludes(memberId)) .map(_ => ReadSignal) // this conflate ensures that a slow consumer doesn't cause backpressure and therefore // block the stream of signals for other consumers diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencer.scala index 9f5507b7ed..98c1f60b5a 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencer.scala @@ -7,6 +7,7 @@ import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.synchronizer.sequencer import com.digitalasset.canton.topology.store.StoredTopologyTransactions import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.version.* @@ -15,12 +16,14 @@ final case class OnboardingStateForSequencer( topologySnapshot: GenericStoredTopologyTransactions, staticSynchronizerParameters: StaticSynchronizerParameters, sequencerSnapshot: SequencerSnapshot, -)( - override val representativeProtocolVersion: RepresentativeProtocolVersion[ - OnboardingStateForSequencer.type - ] ) extends HasProtocolVersionedWrapper[OnboardingStateForSequencer] { + override val representativeProtocolVersion + : RepresentativeProtocolVersion[sequencer.OnboardingStateForSequencer.type] = + OnboardingStateForSequencer.protocolVersionRepresentativeFor( + staticSynchronizerParameters.protocolVersion + ) + override protected val companionObj: OnboardingStateForSequencer.type = OnboardingStateForSequencer @@ -34,16 +37,6 @@ final case class OnboardingStateForSequencer( object OnboardingStateForSequencer extends VersioningCompanion[OnboardingStateForSequencer] { override def name: String = "onboarding state for sequencer" - def apply( - topologySnapshot: GenericStoredTopologyTransactions, - staticSynchronizerParameters: StaticSynchronizerParameters, - sequencerSnapshot: SequencerSnapshot, - protocolVersion: ProtocolVersion, - ): OnboardingStateForSequencer = - OnboardingStateForSequencer(topologySnapshot, staticSynchronizerParameters, sequencerSnapshot)( - protocolVersionRepresentativeFor(protocolVersion) - ) - override val versioningTable: VersioningTable = VersioningTable( ProtoVersion(30) -> VersionedProtoCodec(ProtocolVersion.v34)( v30.OnboardingStateForSequencer @@ -72,13 +65,9 @@ object OnboardingStateForSequencer extends VersioningCompanion[OnboardingStateFo "sequencer_snapshot", value.sequencerSnapshot, ) - rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) } yield OnboardingStateForSequencer( topologySnapshot, staticSynchronizerParams, sequencerSnapshot, - )( - rpv ) - } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencerV2.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencerV2.scala new file mode 100644 index 0000000000..764cde37d0 --- /dev/null +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/OnboardingStateForSequencerV2.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer + +import com.digitalasset.canton.protocol.StaticSynchronizerParameters +import com.digitalasset.canton.sequencer.admin.v30 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.store.StoredTopologyTransaction +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction +import com.digitalasset.canton.version.* + +final case class OnboardingStateForSequencerV2( + topologySnapshot: Option[GenericStoredTopologyTransaction], + staticSynchronizerParameters: Option[StaticSynchronizerParameters], + sequencerSnapshot: Option[SequencerSnapshot], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + OnboardingStateForSequencerV2.type + ] +) extends HasProtocolVersionedWrapper[OnboardingStateForSequencerV2] { + + override protected val companionObj: OnboardingStateForSequencerV2.type = + OnboardingStateForSequencerV2 + + private def toProtoV30: v30.OnboardingStateForSequencerV2 = + v30.OnboardingStateForSequencerV2( + topologySnapshot.map(_.toByteString(representativeProtocolVersion.representative)), + staticSynchronizerParameters.map(_.toByteString), + sequencerSnapshot.map(_.toProtoV30), + ) +} + +object OnboardingStateForSequencerV2 extends VersioningCompanion[OnboardingStateForSequencerV2] { + override def name: String = "onboarding state for sequencer" + + def apply( + topologySnapshot: Option[GenericStoredTopologyTransaction], + staticSynchronizerParameters: Option[StaticSynchronizerParameters], + sequencerSnapshot: Option[SequencerSnapshot], + protocolVersion: ProtocolVersion, + ): OnboardingStateForSequencerV2 = OnboardingStateForSequencerV2( + topologySnapshot, + staticSynchronizerParameters, + sequencerSnapshot, + )(protocolVersionRepresentativeFor(protocolVersion)) + + override val versioningTable: VersioningTable = VersioningTable( + ProtoVersion(30) -> VersionedProtoCodec(ProtocolVersion.v34)( + v30.OnboardingStateForSequencerV2 + )(supportedProtoVersion(_)(fromProtoV30), _.toProtoV30) + ) + import cats.syntax.traverse.* + private def fromProtoV30( + value: v30.OnboardingStateForSequencerV2 + ): ParsingResult[OnboardingStateForSequencerV2] = + for { + topologySnapshot <- + value.topologyTransaction.traverse(StoredTopologyTransaction.fromTrustedByteString) + staticSynchronizerParams <- + value.staticSynchronizerParameters.traverse( + StaticSynchronizerParameters.fromTrustedByteString + ) + + sequencerSnapshot <- + value.sequencerSnapshot.traverse(SequencerSnapshot.fromProtoV30) + rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) + } yield OnboardingStateForSequencerV2( + topologySnapshot, + staticSynchronizerParams, + sequencerSnapshot, + )(rpv) +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/Sequencer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/Sequencer.scala index 0599f0eb78..dd2daa2b01 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/Sequencer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/Sequencer.scala @@ -106,6 +106,14 @@ trait Sequencer traceContext: TraceContext ): EitherT[FutureUnlessShutdown, CreateSubscriptionError, Sequencer.SequencedEventSource] + /** Return a "current" sequencing time such that, when a `sendAsyncSigned` operation is + * subsequently called, if sequenced, the sequencing time of the resulting event is guaranteed to + * be later than the sequencing time previously returned by the `sequencingTime` call. + */ + def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] + /** Return the last timestamp of the containing block of the provided timestamp. This is needed to * determine the effective timestamp to observe in topology processing, required to produce a * correct snapshot. diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala index 8417bd4f83..4c735601c9 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala @@ -25,8 +25,8 @@ import com.digitalasset.canton.synchronizer.sequencer.DatabaseSequencerConfig.{ } import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig import com.digitalasset.canton.synchronizer.sequencing.sequencer.reference.{ - CommunityReferenceSequencerDriverFactory, ReferenceSequencerDriver, + ReferenceSequencerDriverFactory, } import com.digitalasset.canton.time.Clock import pureconfig.ConfigCursor @@ -91,7 +91,7 @@ object SequencerConfig { } def default: SequencerConfig = { - val driverFactory = new CommunityReferenceSequencerDriverFactory + val driverFactory = new ReferenceSequencerDriverFactory External( driverFactory.name, BlockSequencerConfig(), @@ -287,6 +287,7 @@ object BlockSequencerConfig { confirmationResponse: IndividualCircuitBreakerConfig = default3, verdict: IndividualCircuitBreakerConfig = default3, acknowledgement: IndividualCircuitBreakerConfig = default1, + unexpected: IndividualCircuitBreakerConfig = default1, ) extends UniformCantonConfigValidation object CircuitBreakerByMessageTypeConfig { implicit val circuitBreakerByMessageTypeConfigValidator diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerFactory.scala index 4358ee13e0..b1eeceee79 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerFactory.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.synchronizer.block.SequencerDriver import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics import com.digitalasset.canton.synchronizer.sequencer.block.DriverBlockSequencerFactory import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.canton.sequencing.BftSequencerFactory +import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeParameters import com.digitalasset.canton.synchronizer.sequencer.store.SequencerStore import com.digitalasset.canton.synchronizer.sequencer.traffic.SequencerTrafficConfig import com.digitalasset.canton.time.Clock @@ -168,7 +169,7 @@ trait MkSequencerFactory { metrics: SequencerMetrics, storage: Storage, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, )( @@ -186,7 +187,7 @@ object CommunitySequencerFactory extends MkSequencerFactory { metrics: SequencerMetrics, storage: Storage, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, )(sequencerConfig: SequencerConfig)(implicit @@ -222,16 +223,8 @@ object CommunitySequencerFactory extends MkSequencerFactory { blockSequencerConfig, config, ) => - // Each external sequencer driver must have a unique identifier. Yet, we have two - // implementations of the external reference sequencer driver: - // - `community-reference` for the community edition - // - `reference` for the enterprise edition - // So if the sequencer type is `reference` and we're in community edition, - // we need to convert it to `community-reference`. - val actualSequencerType = - if (sequencerType == "reference") "community-reference" else sequencerType DriverBlockSequencerFactory( - actualSequencerType, + sequencerType, SequencerDriver.DriverApiVersion, config, blockSequencerConfig, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNode.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNode.scala index 076b1d9d67..83ec0d8a56 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNode.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNode.scala @@ -25,6 +25,7 @@ import com.digitalasset.canton.lifecycle.{ PromiseUnlessShutdown, } import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.ratelimiting.StreamCounterCheck import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, CantonMutableHandlerRegistry} import com.digitalasset.canton.protocol.SynchronizerParameters.MaxRequestSize import com.digitalasset.canton.protocol.SynchronizerParametersLookup.SequencerSynchronizerParameters @@ -105,6 +106,7 @@ import com.digitalasset.canton.topology.transaction.{ import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.{EitherTUtil, SingleUseCell} import com.digitalasset.canton.version.{ProtocolVersion, ReleaseVersion} +import com.google.common.annotations.VisibleForTesting import io.grpc.ServerServiceDefinition import org.apache.pekko.actor.ActorSystem @@ -237,6 +239,7 @@ class SequencerNodeBootstrap( arguments.metrics.trafficControl.consumedCache.closeAcquired() arguments.metrics.eventBuffer.closeAcquired() arguments.metrics.memberCache.closeAcquired() + arguments.metrics.payloadCache.closeAcquired() } }) @@ -299,6 +302,7 @@ class SequencerNodeBootstrap( existing.synchronizerParameters, store = createSynchronizerTopologyStore(existing.synchronizerId), outboxQueue = new SynchronizerOutboxQueue(loggerFactory), + disableOptionalTopologyChecks = config.topology.disableOptionalTopologyChecks, exitOnFatalFailures = parameters.exitOnFatalFailures, timeouts, futureSupervisor, @@ -420,6 +424,7 @@ class SequencerNodeBootstrap( request.synchronizerParameters, store, outboxQueue, + disableOptionalTopologyChecks = config.topology.disableOptionalTopologyChecks, exitOnFatalFailures = parameters.exitOnFatalFailures, timeouts, futureSupervisor, @@ -514,7 +519,7 @@ class SequencerNodeBootstrap( val topologySnapshotValidator = new InitialTopologySnapshotValidator( crypto.pureCrypto, synchronizerTopologyStore, - parameters.processingTimeouts, + validateInitialSnapshot = config.topology.validateInitialTopologySnapshot, loggerFactory, ) for { @@ -599,6 +604,7 @@ class SequencerNodeBootstrap( crypto.pureCrypto, parameters, clock, + crypto.staticSynchronizerParameters, futureSupervisor, synchronizerLoggerFactory, )(topologyHeadInitializer) @@ -1000,6 +1006,10 @@ class SequencerNode( with NamedLogging with HasUptime { + // Provide access such that it can be modified in tests + @VisibleForTesting + def streamCounterCheck: Option[StreamCounterCheck] = sequencerNodeServer.streamCounterCheck + override type Status = SequencerNodeStatus logger.info(s"Creating sequencer server with public api ${config.publicApi}")(TraceContext.empty) @@ -1013,8 +1023,8 @@ class SequencerNode( val ports = Map("public" -> config.publicApi.port, "admin" -> config.adminApi.port) SequencerNodeStatus( - sequencer.synchronizerId.logical.unwrap, - sequencer.synchronizerId, + sequencer.psid.logical.unwrap, + sequencer.psid, uptime(), ports, activeMembers, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNodeBootstrapFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNodeBootstrapFactory.scala index cbb1099fd5..671d9f4d72 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNodeBootstrapFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerNodeBootstrapFactory.scala @@ -4,10 +4,9 @@ package com.digitalasset.canton.synchronizer.sequencer import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.store.CommunityCryptoPrivateStoreFactory +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory import com.digitalasset.canton.environment.NodeFactoryArguments -import com.digitalasset.canton.resource.CommunityStorageFactory +import com.digitalasset.canton.resource.StorageSingleFactory import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics import com.digitalasset.canton.synchronizer.sequencer.config.{ SequencerNodeConfig, @@ -47,18 +46,17 @@ object CommunitySequencerNodeBootstrapFactory extends SequencerNodeBootstrapFact ): Either[String, SequencerNodeBootstrap] = arguments .toCantonNodeBootstrapCommonArguments( - new CommunityStorageFactory(arguments.config.storage), - new CommunityCryptoPrivateStoreFactory( + new StorageSingleFactory(arguments.config.storage), + new CryptoPrivateStoreFactory( arguments.config.crypto.provider, arguments.config.crypto.kms, - CommunityKmsFactory, arguments.config.parameters.caching.kmsMetadataCache, arguments.config.crypto.privateKeyStore, + replicaManager = None, arguments.futureSupervisor, arguments.clock, arguments.executionContext, ), - CommunityKmsFactory, ) .map { bootstrapCommonArguments => new SequencerNodeBootstrap(bootstrapCommonArguments, CommunitySequencerFactory) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala index 41222429e1..1b010fcdad 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala @@ -48,12 +48,13 @@ import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.processing.EffectiveTime import com.digitalasset.canton.topology.{Member, SequencerId} -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{Spanning, TraceContext} import com.digitalasset.canton.util.PekkoUtil.WithKillSwitch import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} import com.digitalasset.canton.version.ProtocolVersion +import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.* import org.apache.pekko.stream.scaladsl.{Flow, Keep, Source} import org.apache.pekko.{Done, NotUsed} @@ -128,9 +129,10 @@ class SequencerReader( topologyClientMember: Member, override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, -)(implicit executionContext: ExecutionContext) +)(implicit executionContext: ExecutionContext, tracer: Tracer) extends NamedLogging with FlagCloseable + with Spanning with HasCloseContext { private val psid = syncCryptoApi.psid @@ -186,6 +188,9 @@ class SequencerReader( _ = logger.debug( s"Current safe watermark is $safeWatermarkTimestampO" ) + _ = logger.debug( + s"Member $member was registered at ${registeredMember.registeredFrom}" + ) // It can happen that a member switching between sequencers runs into a sequencer that is catching up. // In this situation, the sequencer has to wait for the watermark to catch up to the requested timestamp. @@ -266,7 +271,11 @@ class SequencerReader( show"but this sequencer cannot serve timestamps at or before ${lowerBoundText.unquoted} " + show"or below the member's registration timestamp ${registeredMember.registeredFrom}." - logger.error(errorMessage) + // Logging at INFO level because this can happen during normal operations for a decentralized synchronizer + // where a participant updates its sequencer connection config before it has caught up to the point + // where the sequencer was actually onboarded. + // TODO(#28184) Make sure that this cannot happen due to misconfiguration of sequencer connections + logger.info(errorMessage) CreateSubscriptionError .EventsUnavailableForTimestamp(readFromTimestampInclusive, errorMessage) }, @@ -286,10 +295,10 @@ class SequencerReader( // This is a "reading watermark" meaning that "we have read up to and including this timestamp", // so if we want to grab the event exactly at timestampInclusive, we do -1 here nextReadTimestamp = readFromTimestampInclusive + .map(_.immediatePredecessor) .getOrElse( registeredMember.registeredFrom - ) - .immediatePredecessor, + ), nextPreviousEventTimestamp = previousEventTimestamp, latestTopologyClientRecipientTimestamp = latestTopologyClientRecipientTimestampO, ) @@ -339,32 +348,33 @@ class SequencerReader( eventTraceContext, ) = unsignedEventData implicit val traceContext: TraceContext = eventTraceContext - logger.trace( - s"Latest topology client timestamp for $member at sequencing timestamp ${event.timestamp} is $previousTopologyClientTimestamp / $latestTopologyClientTimestamp" - ) - - val res = for { - signingSnapshot <- OptionT - .fromOption[FutureUnlessShutdown](topologySnapshotO) - .getOrElseF { - val warnIfApproximate = - event.previousTimestamp.nonEmpty // warn if we are not at genesis - SyncCryptoClient.getSnapshotForTimestamp( - syncCryptoApi, - event.timestamp, - previousTopologyClientTimestamp, - protocolVersion, - warnIfApproximate = warnIfApproximate, - ) - } - _ = logger.debug( - s"Signing event with sequencing timestamp ${event.timestamp} for $member" + withSpan("SequencerReader.signValidatedEvent") { implicit traceContext => span => + logger.trace( + s"Latest topology client timestamp for $member at sequencing timestamp ${event.timestamp} is $previousTopologyClientTimestamp / $latestTopologyClientTimestamp" ) - signed <- synchronizeWithClosing("sign-event")( - signEvent(event, signingSnapshot).value - ) - } yield signed - EitherT(res) + span.setAttribute("member", member.toString) + val res = for { + signingSnapshot <- OptionT + .fromOption[FutureUnlessShutdown](topologySnapshotO) + .getOrElseF { + val warnIfApproximate = + event.previousTimestamp.nonEmpty // warn if we are not at genesis + SyncCryptoClient.getSnapshotForTimestamp( + syncCryptoApi, + event.timestamp, + previousTopologyClientTimestamp, + warnIfApproximate = warnIfApproximate, + ) + } + _ = logger.debug( + s"Signing event with sequencing timestamp ${event.timestamp} for $member" + ) + signed <- synchronizeWithClosing("sign-event")( + signEvent(event, signingSnapshot).value + ) + } yield signed + EitherT(res) + } } def latestTopologyClientTimestampAfter( @@ -420,7 +430,6 @@ class SequencerReader( topologyTimestamp, sequencingTimestamp, topologyClientTimestampBefore, - protocolVersion, // This warning should never be triggered. warnIfApproximate = true, _.sequencerTopologyTimestampTolerance, @@ -606,7 +615,6 @@ class SequencerReader( ), ) case payload: BytesPayload => payload.decodeBatchAndTrim(protocolVersion, member) - case batch: FilteredBatch => Batch.trimForMember(batch.batch, member) }) ) } @@ -766,7 +774,6 @@ class SequencerReader( syncCryptoApi, timestamp, topologyClientTimestampBeforeO, - protocolVersion, ) .map(_.ipsSnapshot) )(FutureUnlessShutdown.pure) @@ -912,10 +919,10 @@ object SequencerReader { case None => nextPreviousEventTimestamp }, // set the timestamp to next timestamp from the read events or keep the current timestamp if we got no results - nextReadTimestamp = readEvents.nextTimestamp - .getOrElse(nextReadTimestamp), + nextReadTimestamp = readEvents.nextTimestamp.getOrElse(nextReadTimestamp), // did we receive a full batch of events on this update - lastBatchWasFull = readEvents.events.sizeCompare(batchSize) == 0, + // the case > is there as events query can return more events than requested in multi-instance setups + lastBatchWasFull = readEvents.events.sizeCompare(batchSize) >= 0, ) override protected def pretty: Pretty[ReadState] = prettyOfClass( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala index c8368d8cb5..386afe40dc 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala @@ -5,9 +5,7 @@ package com.digitalasset.canton.synchronizer.sequencer import cats.data.EitherT import cats.syntax.parallel.* -import com.digitalasset.base.error.RpcError import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.connection.GrpcApiInfoService import com.digitalasset.canton.connection.v30.ApiInfoServiceGrpc import com.digitalasset.canton.crypto.{SigningKeyUsage, SynchronizerCryptoClient} @@ -18,11 +16,6 @@ import com.digitalasset.canton.health.admin.data.TopologyQueueStatus import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil -import com.digitalasset.canton.networking.grpc.ratelimiting.LimitResult.FullMethodName -import com.digitalasset.canton.networking.grpc.ratelimiting.{ - RateLimitingInterceptor, - StreamCounterCheck, -} import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.sequencer.admin.v30.{ @@ -36,16 +29,15 @@ import com.digitalasset.canton.sequencing.handlers.{ EnvelopeOpener, StripSignature, } -import com.digitalasset.canton.sequencing.protocol.SequencerErrors.Overloaded import com.digitalasset.canton.sequencing.traffic.TrafficControlProcessor import com.digitalasset.canton.store.{IndexedPhysicalSynchronizer, SequencerCounterTrackerStore} import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics -import com.digitalasset.canton.synchronizer.sequencer.SequencerRuntime.SequencerStreamCounterCheck import com.digitalasset.canton.synchronizer.sequencer.admin.data.{ SequencerAdminStatus, SequencerHealthStatus, } import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeParameters +import com.digitalasset.canton.synchronizer.sequencer.time.TimeAdvancingTopologySubscriber import com.digitalasset.canton.synchronizer.sequencing.authentication.grpc.SequencerConnectServerInterceptor import com.digitalasset.canton.synchronizer.sequencing.service.* import com.digitalasset.canton.synchronizer.sequencing.service.channel.GrpcSequencerChannelService @@ -73,7 +65,8 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{ErrorUtil, FutureUtil} import com.digitalasset.canton.{SequencerCounter, config} import com.google.common.annotations.VisibleForTesting -import io.grpc.{ServerInterceptor, ServerInterceptors, ServerServiceDefinition} +import io.grpc.{ServerInterceptors, ServerServiceDefinition} +import org.apache.pekko.stream.Materializer import scala.concurrent.{ExecutionContext, Future} @@ -128,6 +121,7 @@ class SequencerRuntime( runtimeReadyPromise: PromiseUnlessShutdown[Unit], )(implicit executionContext: ExecutionContext, + materializer: Materializer, traceContext: TraceContext, ) extends FlagCloseable with HasCloseContext @@ -135,7 +129,7 @@ class SequencerRuntime( override protected def timeouts: ProcessingTimeout = localNodeParameters.processingTimeouts - def synchronizerId: PhysicalSynchronizerId = physicalIndexedSynchronizer.synchronizerId + def psid: PhysicalSynchronizerId = physicalIndexedSynchronizer.synchronizerId def initialize()(implicit traceContext: TraceContext @@ -248,26 +242,9 @@ class SequencerRuntime( ) } - val streamCounterCheck: Option[StreamCounterCheck] = - if (localNodeParameters.sequencerApiLimits.nonEmpty) { - Some( - new SequencerStreamCounterCheck( - localNodeParameters.sequencerApiLimits, - localNodeParameters.warnOnUndefinedLimits, - loggerFactory, - ) - ) - } else None - - private val rateLimitInterceptors: List[ServerInterceptor] = streamCounterCheck match { - case Some(check) => List(new RateLimitingInterceptor(List(check.check)), check) - case None => List.empty - } - def sequencerServices(implicit ec: ExecutionContext): Seq[ServerServiceDefinition] = { def interceptAuthentication( - svcDef: ServerServiceDefinition, - additionalInterceptors: Seq[ServerInterceptor], + svcDef: ServerServiceDefinition ) = { import scala.jdk.CollectionConverters.* @@ -275,7 +252,7 @@ class SequencerRuntime( val interceptors = (List( authenticationServices.authenticationServerInterceptor - ) ++ additionalInterceptors).asJava + )).asJava ServerInterceptors.intercept(svcDef, interceptors) } @@ -284,7 +261,7 @@ class SequencerRuntime( ServerInterceptors.intercept( v30.SequencerConnectServiceGrpc.bindService( new GrpcSequencerConnectService( - synchronizerId, + psid, sequencerId, staticSynchronizerParameters, synchronizerTopologyManager, @@ -300,8 +277,7 @@ class SequencerRuntime( v30.SequencerAuthenticationServiceGrpc .bindService(authenticationServices.sequencerAuthenticationService, ec), interceptAuthentication( - v30.SequencerServiceGrpc.bindService(sequencerService, ec), - rateLimitInterceptors, + v30.SequencerServiceGrpc.bindService(sequencerService, ec) ), ApiInfoServiceGrpc.bindService( new GrpcApiInfoService( @@ -312,8 +288,7 @@ class SequencerRuntime( ) :++ sequencerChannelServiceO .map(svc => interceptAuthentication( - v30.SequencerChannelServiceGrpc.bindService(svc, ec), - rateLimitInterceptors, + v30.SequencerChannelServiceGrpc.bindService(svc, ec) ) ) .toList @@ -385,6 +360,18 @@ class SequencerRuntime( } }) + logger.info("Subscribing to topology transactions for time-advancing broadcast") + topologyProcessor.subscribe( + new TimeAdvancingTopologySubscriber( + clock, + client, + topologyClient, + psid, + sequencerId, + loggerFactory, + ) + ) + private lazy val synchronizerOutboxO: Option[SynchronizerOutboxHandle] = maybeSynchronizerOutboxFactory .map( @@ -397,11 +384,11 @@ class SequencerRuntime( ) ) - private val topologyHandler = topologyProcessor.createHandler(synchronizerId) + private val topologyHandler = topologyProcessor.createHandler(psid) private val trafficProcessor = new TrafficControlProcessor( syncCrypto, - synchronizerId, + psid, sequencer.rateLimitManager.flatMap(_.balanceKnownUntil), loggerFactory, ) @@ -444,7 +431,7 @@ class SequencerRuntime( .getOrElse(EitherT.rightT[FutureUnlessShutdown, String](())) // Note: we use head snapshot as we want the latest announced upgrade anyway, an overlapping update is idempotent synchronizerUpgradeO <- EitherT.right( - topologyClient.headSnapshot.isSynchronizerUpgradeOngoing() + topologyClient.headSnapshot.synchronizerUpgradeOngoing() ) } yield { synchronizerUpgradeO.foreach { case (successor, effectiveTime) => @@ -469,21 +456,3 @@ class SequencerRuntime( sequencer, )(logger) } - -object SequencerRuntime { - private class SequencerStreamCounterCheck( - initialLimits: Map[String, NonNegativeInt], - warnOnUndefinedLimits: Boolean, - loggerFactory: NamedLoggerFactory, - ) extends StreamCounterCheck(initialLimits, warnOnUndefinedLimits, loggerFactory) { - override protected def errorFactory(methodName: FullMethodName, limit: NonNegativeInt)(implicit - traceContext: TraceContext - ): RpcError = { - val err = Overloaded( - s"Reached the limit of concurrent streams for $methodName. Please try again later" - ) - err.log() - err.toCantonRpcError - } - } -} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtils.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtils.scala index 61cfd6938c..379cd74b01 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtils.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtils.scala @@ -4,7 +4,10 @@ package com.digitalasset.canton.synchronizer.sequencer import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.protocol.DynamicSynchronizerParametersWithValidity +import com.digitalasset.canton.protocol.{ + DynamicSynchronizerParametersHistory, + DynamicSynchronizerParametersWithValidity, +} import com.digitalasset.canton.time.NonNegativeFiniteDuration object SequencerUtils { @@ -64,15 +67,8 @@ object SequencerUtils { upgradeTime: CantonTimestamp, parameterChanges: Seq[DynamicSynchronizerParametersWithValidity], ): NonNegativeFiniteDuration = { - - val maxTime = parameterChanges.foldLeft(upgradeTime) { case (previousBound, parametersChange) => - val parameters = parametersChange.parameters - val maxTime = parametersChange.validUntil.getOrElse(upgradeTime) - - val newBound = maxTime + parameters.decisionTimeout - - newBound.max(previousBound) - } + val maxTime = + DynamicSynchronizerParametersHistory.latestDecisionDeadline(parameterChanges, upgradeTime) NonNegativeFiniteDuration.tryCreate(maxTime - upgradeTime) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala index 5f4e11e1d1..e6352e4ae9 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala @@ -306,7 +306,10 @@ object SequencerWriterSource { Flow[Traced[BatchWritten]].map { tracedBatchWritten => tracedBatchWritten.withTraceContext { _ => batchWritten => batchWritten.events.foreach { events => - store.bufferEvents(events) + store.bufferEvents(events.map(_.map(_.id))) + events.foreach { event => + event.event.payloadO.foreach(store.bufferPayload(_)(event.traceContext)) + } } } tracedBatchWritten @@ -385,6 +388,11 @@ class SendEventGenerator( } def deliver(recipientIds: Set[SequencerMemberId]): StoreEvent[BytesPayload] = { + val finalRecipientIds = if (submission.batch.isBroadcast) { + Set(SequencerMemberId.Broadcast) + } else { + recipientIds + } val payload = BytesPayload( submissionOrOutcome.fold( @@ -397,7 +405,7 @@ class SendEventGenerator( DeliverStoreEvent.ensureSenderReceivesEvent( senderId, submission.messageId, - recipientIds, + finalRecipientIds, payload, submission.topologyTimestamp, trafficReceiptO, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockOrderer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockOrderer.scala index f2d2b31cef..3cca515586 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockOrderer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockOrderer.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block import cats.data.EitherT import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.sequencing.protocol.{ AcknowledgeRequest, @@ -99,4 +100,12 @@ trait BlockOrderer extends AutoCloseable { def sequencerSnapshotAdditionalInfo( timestamp: CantonTimestamp ): EitherT[Future, SequencerError, Option[v30.BftSequencerSnapshotAdditionalInfo]] + + /** Return a "current" sequencing time such that, when a `send` operation is subsequently called, + * if sequenced, the sequencing time of the resulting event is guaranteed to be later than the + * sequencing time previously returned by the `sequencingTime` call. + */ + def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala index c455960c60..da2d41ca80 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala @@ -89,7 +89,7 @@ class BlockSequencer( loggerFactory: NamedLoggerFactory, exitOnFatalFailures: Boolean, runtimeReady: FutureUnlessShutdown[Unit], -)(implicit executionContext: ExecutionContext, materializer: Materializer, tracer: Tracer) +)(implicit executionContext: ExecutionContext, materializer: Materializer, val tracer: Tracer) extends DatabaseSequencer( SequencerWriterStoreFactory.singleInstance, dbSequencerStore, @@ -172,7 +172,7 @@ class BlockSequencer( metrics, loggerFactory, memberValidator = memberValidator, - )(CloseContext(cryptoApi)) + )(CloseContext(cryptoApi), tracer) implicit val traceContext: TraceContext = TraceContext.empty @@ -358,11 +358,8 @@ class BlockSequencer( ) for { - _ <- rejectSubmissionsBeforeOrAtSequencingTimeLowerBound() - _ <- - if (submission.isConfirmationRequest) rejectSubmissionsIfOverloaded(submission) - else EitherT.rightT[FutureUnlessShutdown, SequencerDeliverError](()) + _ <- rejectSubmissionsIfOverloaded(submission) // TODO(i17584): revisit the consequences of no longer enforcing that // aggregated submissions with signed envelopes define a topology snapshot _ <- validateMaxSequencingTime(submission) @@ -619,14 +616,11 @@ class BlockSequencer( _ = logger.trace(s"Storage active: ${storage.isActive}") } yield { if (!ledgerStatus.isActive) SequencerHealthStatus(isActive = false, ledgerStatus.description) - else if (!isStorageActive) - SequencerHealthStatus(isActive = false, Some("Can't connect to database")) - else if (circuitBreaker.shouldRejectRequests(SubmissionRequestType.ConfirmationRequest)) + else SequencerHealthStatus( - isActive = false, - Some("Overloaded. Can't receive requests at the moment"), + isStorageActive, + if (isStorageActive) None else Some("Can't connect to database"), ) - else SequencerHealthStatus(isActive = true, None) } override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { @@ -758,4 +752,9 @@ class BlockSequencer( timestamp, stateManager.getHeadState.block.latestSequencerEventTimestamp, ) + + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = + blockOrderer.sequencingTime } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala index f547257253..3cc6b398c5 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala @@ -69,8 +69,14 @@ class BlockSequencerCircuitBreaker( previousTimestamp.set(timestamp) } - def shouldRejectRequests(submissionRequestType: SubmissionRequestType): Boolean = - enabled.get() && pekkoCircuitBreakers.get(submissionRequestType).forall(_.shouldRejectRequests) + def shouldRejectRequests(submissionRequestType: SubmissionRequestType): Boolean = { + val subTypeKey = submissionRequestType match { + case SubmissionRequestType.Unexpected(_) => + BlockSequencerCircuitBreaker.unexpectedSubmissionRequestTypeKey + case x => x + } + enabled.get() && pekkoCircuitBreakers.get(subTypeKey).forall(_.shouldRejectRequests) + } def shouldRejectRequests(submissionRequest: SubmissionRequest): Boolean = shouldRejectRequests(submissionRequest.requestType) @@ -94,6 +100,7 @@ class BlockSequencerCircuitBreaker( messages.topUp -> "top up", messages.topology -> "topology", messages.timeProof -> "time proof", + messages.unexpected -> "unexpected", messages.acknowledgement -> "acknowledgment", ).groupBy(_._1).map { case (config, group) => val messageNames = group.map(_._2) @@ -115,6 +122,7 @@ class BlockSequencerCircuitBreaker( SubmissionRequestType.TopUpMed -> messages.topUp, SubmissionRequestType.TopologyTransaction -> messages.topology, SubmissionRequestType.TimeProof -> messages.timeProof, + BlockSequencerCircuitBreaker.unexpectedSubmissionRequestTypeKey -> messages.unexpected, ).fmap(configToCircuitBreaker(_)), ) } @@ -122,6 +130,7 @@ class BlockSequencerCircuitBreaker( } object BlockSequencerCircuitBreaker { + private val unexpectedSubmissionRequestTypeKey = SubmissionRequestType.Unexpected("unexpected") class IndividualCircuitBreaker( config: IndividualCircuitBreakerConfig, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerFactory.scala index e9f670f1ac..7b4186b642 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerFactory.scala @@ -9,7 +9,6 @@ import cats.syntax.traverse.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.crypto.SynchronizerCryptoClient import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, LifeCycle, UnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.Storage @@ -19,6 +18,7 @@ import com.digitalasset.canton.synchronizer.block.data.SequencerBlockStore import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics import com.digitalasset.canton.synchronizer.sequencer.* import com.digitalasset.canton.synchronizer.sequencer.DatabaseSequencerConfig.TestingInterceptor +import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeParameters import com.digitalasset.canton.synchronizer.sequencer.traffic.{ SequencerRateLimitManager, SequencerTrafficConfig, @@ -50,7 +50,7 @@ abstract class BlockSequencerFactory( storage: Storage, protocolVersion: ProtocolVersion, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, override val loggerFactory: NamedLoggerFactory, testingInterceptor: Option[TestingInterceptor], metrics: SequencerMetrics, @@ -238,8 +238,10 @@ abstract class BlockSequencerFactory( synchronizerSyncCryptoApi.psid, store, trafficConsumedStore, + nodeParameters.asyncWriter, nodeParameters.enableAdditionalConsistencyChecks, nodeParameters.processingTimeouts, + futureSupervisor, synchronizerLoggerFactory, blockSequencerConfig.streamInstrumentation, metrics.block, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockOrderer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockOrderer.scala index 904c63f0b8..ab65b8c99e 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockOrderer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockOrderer.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block import cats.data.EitherT import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.synchronizer.block.{ @@ -69,4 +70,14 @@ class DriverBlockOrderer( timestamp: CantonTimestamp ): EitherT[Future, SequencerError, Option[v30.BftSequencerSnapshotAdditionalInfo]] = EitherT.rightT(None) + + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = + FutureUnlessShutdown.outcomeF( + driver.sequencingTime.map(t => + t.map(CantonTimestamp.fromProtoPrimitive) + .map(_.fold(e => throw new RuntimeException(e.message), identity)) + ) + ) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockSequencerFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockSequencerFactory.scala index f004793927..5b56b44877 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockSequencerFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/DriverBlockSequencerFactory.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.synchronizer.sequencer.block import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.crypto.SynchronizerCryptoClient import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.Storage @@ -17,6 +16,7 @@ import com.digitalasset.canton.synchronizer.block.{ } import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics import com.digitalasset.canton.synchronizer.sequencer.DatabaseSequencerConfig.TestingInterceptor +import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeParameters import com.digitalasset.canton.synchronizer.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.synchronizer.sequencer.{ AuthenticationServices, @@ -47,7 +47,7 @@ class DriverBlockSequencerFactory[C]( storage: Storage, protocolVersion: ProtocolVersion, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, metrics: SequencerMetrics, override val loggerFactory: NamedLoggerFactory, testingInterceptor: Option[TestingInterceptor], @@ -142,7 +142,7 @@ object DriverBlockSequencerFactory extends LazyLogging { storage: Storage, protocolVersion: ProtocolVersion, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, metrics: SequencerMetrics, loggerFactory: NamedLoggerFactory, testingInterceptor: Option[TestingInterceptor], @@ -183,7 +183,7 @@ object DriverBlockSequencerFactory extends LazyLogging { storage: Storage, protocolVersion: ProtocolVersion, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, metrics: SequencerMetrics, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext): DriverBlockSequencerFactory[C] = diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftBlockOrderer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftBlockOrderer.scala index 51316d17c5..8ac5f785e6 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftBlockOrderer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftBlockOrderer.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonServerBuilder -import com.digitalasset.canton.resource.{Storage, StorageSetup} +import com.digitalasset.canton.resource.{Storage, StorageSingleSetup} import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.sequencer.api.v30.SequencerAuthenticationServiceGrpc import com.digitalasset.canton.sequencing.protocol.* @@ -38,17 +38,11 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings CantonOrderingTopologyProvider, SequencerNodeId, } +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.* import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.P2PGrpcNetworking.P2PEndpoint import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.PekkoP2PGrpcNetworking.PekkoP2PGrpcNetworkManager import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.authentication.ServerAuthenticatingServerInterceptor -import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.{ - P2PGrpcBftOrderingService, - P2PGrpcConnectionManager, - P2PGrpcConnectionState, - P2PGrpcNetworking, - P2PGrpcServerManager, - P2PGrpcStreamingReceiver, -} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.standalone.P2PGrpcStandaloneBftOrderingService import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem.{ PekkoEnv, PekkoFutureUnlessShutdown, @@ -57,15 +51,20 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings CloseableActorSystem, PekkoModuleSystem, } +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.standalone.topology.FixedFileBasedOrderingTopologyProvider import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig.{ DefaultAuthenticationTokenManagerConfig, P2PConnectionManagementConfig, } import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftOrderingModuleSystemInitializer.BftOrderingStores +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.topology.OrderingTopologyProvider import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.availability.data.AvailabilityStore import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.consensus.iss.data.EpochStore -import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.output.PekkoBlockSubscription import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.output.data.OutputMetadataStore +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.output.{ + OutputModule, + PekkoBlockSubscription, +} import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.p2p.P2PNetworkOutModule import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.p2p.data.P2PEndpointsStore import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.pruning.BftOrdererPruningScheduler @@ -76,6 +75,7 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.{ } import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.Module.SystemInitializer import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.BftOrderingIdentifiers.{ + BftNodeId, BlockNumber, EpochLength, } @@ -92,6 +92,11 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framewor } import com.digitalasset.canton.synchronizer.sequencer.errors.SequencerError import com.digitalasset.canton.synchronizer.sequencer.{AuthenticationServices, SequencerSnapshot} +import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.standalone.v1.{ + SendRequest, + SendResponse, + StandaloneBftOrderingServiceGrpc, +} import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30.{ BftOrderingMessage, BftOrderingServiceGrpc, @@ -99,14 +104,15 @@ import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30 import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{Member, PhysicalSynchronizerId, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.{PekkoUtil, SingleUseCell} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import com.google.protobuf.ByteString import io.grpc.stub.StreamObserver import io.grpc.{ServerInterceptors, ServerServiceDefinition} import io.opentelemetry.api.trace.Tracer -import org.apache.pekko.stream.scaladsl.Source -import org.apache.pekko.stream.{KillSwitch, Materializer} +import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} +import org.apache.pekko.stream.{KillSwitch, KillSwitches, Materializer} import java.security.SecureRandom import java.time.Instant @@ -130,7 +136,6 @@ final class BftBlockOrderer( exitOnFatalFailures: Boolean, metrics: BftOrderingMetrics, override val loggerFactory: NamedLoggerFactory, - dedicatedStorageSetup: StorageSetup, queryCostMonitoring: Option[QueryCostMonitoringConfig], )(implicit executionContext: ExecutionContext, materializer: Materializer, tracer: Tracer) extends BlockOrderer @@ -152,7 +157,10 @@ final class BftBlockOrderer( private val isAuthenticationEnabled = config.initialNetwork.exists(_.endpointAuthentication.enabled) - private val thisNode = SequencerNodeId.toBftNodeId(sequencerId) + private val thisNode = + config.standalone.fold(SequencerNodeId.toBftNodeId(sequencerId)) { standaloneConfig => + BftNodeId(standaloneConfig.thisSequencerId) + } // The initial metrics factory, which also pre-initializes histograms (as required by OpenTelemetry), is built // very early in the Canton bootstrap process, before unique IDs for synchronizer nodes are even available, @@ -179,6 +187,8 @@ final class BftBlockOrderer( override val timeouts: ProcessingTimeout = nodeParameters.processingTimeouts + private val standaloneServiceRef = new SingleUseCell[P2PGrpcStandaloneBftOrderingService] + override def firstBlockHeight: Long = sequencerSubscriptionInitialHeight checkConfigSecurity() @@ -189,9 +199,13 @@ final class BftBlockOrderer( noTracingLogger, ) + // Standalone mode doesn't support authentication private val maybeAuthenticationServices = Option - .when(config.initialNetwork.exists(_.endpointAuthentication.enabled))( + .when( + config.initialNetwork + .exists(_.endpointAuthentication.enabled && config.standalone.isEmpty) + )( authenticationServicesO ) .flatten @@ -226,7 +240,7 @@ final class BftBlockOrderer( config.storage match { case Some(storageConfig) => logger.info("Using a dedicated storage configuration for BFT ordering tables") - dedicatedStorageSetup.tryCreateAndMigrateStorage( + StorageSingleSetup.tryCreateAndMigrateStorage( storageConfig, queryCostMonitoring, clock, @@ -242,8 +256,9 @@ final class BftBlockOrderer( private val p2pGrpcConnectionState = new P2PGrpcConnectionState(thisNode, loggerFactory) private val p2pEndpointsStore = setupP2PEndpointsStore(localStorage) - private val availabilityStore = AvailabilityStore(localStorage, timeouts, loggerFactory) - private val epochStore = EpochStore(localStorage, timeouts, loggerFactory) + private val availabilityStore = + AvailabilityStore(config.batchAggregator, localStorage, timeouts, loggerFactory) + private val epochStore = EpochStore(config.batchAggregator, localStorage, timeouts, loggerFactory) private val outputStore = OutputMetadataStore(localStorage, timeouts, loggerFactory) private val pruningSchedulerStore = BftOrdererPruningSchedulerStore(localStorage, timeouts, loggerFactory) @@ -267,6 +282,8 @@ final class BftBlockOrderer( private val isOrdererHealthy = new AtomicBoolean(true) + private val outputPreviousStoredBlock = new OutputModule.PreviousStoredBlock + private val PekkoModuleSystem.PekkoModuleSystemInitResult( actorSystem, initResult, @@ -279,6 +296,9 @@ final class BftBlockOrderer( private val outputModuleRef = initResult.outputModuleRef private val p2pNetworkManager = initResult.p2pNetworkManager + @SuppressWarnings(Array("org.wartremover.warts.Var")) + @volatile private var warnedAboutStandaloneSend = false + // Start the gRPC server only now because it needs the modules to be available before serving requests, // else creating a peer receiver could end up with a `null` input module. p2pGrpcServerManager.startServer() @@ -303,6 +323,21 @@ final class BftBlockOrderer( abort = sys.error ) + private val standaloneSubscriptionKillSwitchF = Option.when(config.standalone.isDefined) { + implicit val traceContext: TraceContext = TraceContext.empty + PekkoUtil + .runSupervised( + blockSubscription + .subscription() + .map(b => + // The server is started earlier if standalone mode is enabled + standaloneServiceRef.get.foreach(_.push(b)) + ) + .toMat(Sink.ignore)(Keep.both), + errorLogMessagePrefix = "Failed to handle state changes", + ) + } + private def setupP2PEndpointsStore(storage: Storage): P2PEndpointsStore[PekkoEnv] = { val store = P2PEndpointsStore(storage, timeouts, loggerFactory) config.initialNetwork.foreach { network => @@ -351,6 +386,17 @@ final class BftBlockOrderer( outputStore, pruningSchedulerStore, ) + val topologyProvider = + config.standalone.fold[OrderingTopologyProvider[PekkoEnv]]( + new CantonOrderingTopologyProvider(cryptoApi, loggerFactory, metrics) + ) { standaloneConfig => + new FixedFileBasedOrderingTopologyProvider( + standaloneConfig, + cryptoApi.pureCrypto, + metrics, + ) + } + new BftOrderingModuleSystemInitializer( thisNode, config, @@ -359,7 +405,7 @@ final class BftBlockOrderer( // TODO(#19289) support dynamically configurable epoch length EpochLength(config.epochLength), stores, - new CantonOrderingTopologyProvider(cryptoApi, loggerFactory, metrics), + topologyProvider, blockSubscription, sequencerSnapshotAdditionalInfo, bootstrapMembership => @@ -369,6 +415,7 @@ final class BftBlockOrderer( metrics, loggerFactory, timeouts, + outputPreviousStoredBlock = outputPreviousStoredBlock, ) } @@ -472,6 +519,19 @@ final class BftBlockOrderer( ).flatten.asJava, ) ) + config.standalone.foreach { _ => + val standaloneService = + new P2PGrpcStandaloneBftOrderingService(orderSendRequest, loggerFactory) + standaloneServiceRef.putIfAbsent(standaloneService).discard + activeServerBuilder + .addService( + StandaloneBftOrderingServiceGrpc.bindService( + standaloneService, + executionContext, + ) + ) + .discard + } // Also offer the authentication service on BFT P2P endpoints, so that the BFT orderers don't have to also know the sequencer API endpoints maybeAuthenticationServices.fold( logger.info("P2P authentication disabled") @@ -494,19 +554,28 @@ final class BftBlockOrderer( override def send( signedSubmissionRequest: SignedSubmissionRequest - )(implicit traceContext: TraceContext): EitherT[Future, SequencerDeliverError, Unit] = { - logger.debug( - "sending submission " + - s"with message ID ${signedSubmissionRequest.content.sender} " + - s"from ${signedSubmissionRequest.content.sender} " + - s"to ${signedSubmissionRequest.content.batch.allRecipients} " - ) - sendToMempool( - SendTag, - signedSubmissionRequest.content.sender, - signedSubmissionRequest.toByteString, - ) - } + )(implicit traceContext: TraceContext): EitherT[Future, SequencerDeliverError, Unit] = + config.standalone.fold { + logger.debug( + "sending submission " + + s"with message ID ${signedSubmissionRequest.content.sender} " + + s"from ${signedSubmissionRequest.content.sender} " + + s"to ${signedSubmissionRequest.content.batch.allRecipients} " + ) + sendToMempool( + SendTag, + signedSubmissionRequest.content.sender, + signedSubmissionRequest.toByteString, + ) + } { _ => + if (!warnedAboutStandaloneSend) { + logger.warn( + "BFT standalone mode enabled: ignoring send requests and not sending anything to the mempool" + ) + warnedAboutStandaloneSend = true + } + EitherT.rightT(()) + } override def acknowledge(signedAcknowledgeRequest: SignedContent[AcknowledgeRequest])(implicit traceContext: TraceContext @@ -540,7 +609,21 @@ final class BftBlockOrderer( override def subscribe( )(implicit traceContext: TraceContext): Source[RawLedgerBlock, KillSwitch] = - blockSubscription.subscription().map(BlockFormat.blockOrdererBlockToRawLedgerBlock(logger)) + config.standalone.fold( + blockSubscription.subscription().map(BlockFormat.blockOrdererBlockToRawLedgerBlock(logger)) + ) { _ => + logger.warn("BFT standalone mode enabled: not subscribing to any blocks") + Source + .empty[RawLedgerBlock] + .viaMat(KillSwitches.single)( + Keep.right + ) // In non-standalone mode, the block subscription is not used + } + + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = + FutureUnlessShutdown.pure(outputPreviousStoredBlock.getBlockNumberAndBftTime.map(_._2)) override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { logger.debug("Beginning async BFT block orderer shutdown")(TraceContext.empty) @@ -574,8 +657,19 @@ final class BftBlockOrderer( ), SyncCloseable("p2pServerGrpcExecutor.shutdown()", p2pServerGrpcExecutor.shutdown()), ) ++ + // The kill switch ensures that we don't process the remaining contents of the queue buffer + standaloneSubscriptionKillSwitchF + .map(ks => + SyncCloseable( + "standaloneSubscriptionKillSwitch.shutdown()", + ks._1.shutdown(), + ) + ) + .toList ++ // Shutdown the reused Canton member authentication services, if authentication is enabled - maybeServerAuthenticatingFilter.map(_.closeAsync()).getOrElse(Seq.empty) + maybeServerAuthenticatingFilter.map(_.closeAsync()).getOrElse(Seq.empty) ++ + standaloneServiceRef.get.toList + .map(s => SyncCloseable("standaloneServiceRef.close()", s.close())) } override def adminServices: Seq[ServerServiceDefinition] = @@ -641,6 +735,19 @@ final class BftBlockOrderer( tag: String, sender: Member, payload: ByteString, + )(implicit traceContext: TraceContext): EitherT[Future, SequencerDeliverError, Unit] = + sendToMempoolGeneric(tag, payload, Some(sender)) + + private def orderSendRequest( + request: SendRequest + )(implicit traceContext: TraceContext): Future[SendResponse] = + sendToMempoolGeneric(request.tag, request.payload) + .fold(e => SendResponse(Some(e.cause)), _ => SendResponse(None)) + + private def sendToMempoolGeneric( + tag: String, + payload: ByteString, + sender: Option[Member] = None, )(implicit traceContext: TraceContext): EitherT[Future, SequencerDeliverError, Unit] = { val replyPromise = Promise[SequencerNode.Message]() val replyRef = new ModuleRef[SequencerNode.Message] { @@ -660,7 +767,7 @@ final class BftBlockOrderer( ) ), Some(replyRef), - Some(sender), + sender, ) ) EitherT(replyPromise.future.map { diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftSequencerFactory.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftSequencerFactory.scala index 7ab43901d3..6b3567e4f4 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftSequencerFactory.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/canton/sequencing/BftSequencerFactory.scala @@ -6,10 +6,9 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.binding import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.crypto.SynchronizerCryptoClient import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.environment.CantonNodeParameters import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.{CommunityStorageSetup, Storage, StorageSetup} +import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.synchronizer.block.BlockSequencerStateManager import com.digitalasset.canton.synchronizer.block.data.SequencerBlockStore import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics @@ -17,6 +16,7 @@ import com.digitalasset.canton.synchronizer.sequencer.DatabaseSequencerConfig.Te import com.digitalasset.canton.synchronizer.sequencer.block.BlockSequencerFactory.OrderingTimeFixMode import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig import com.digitalasset.canton.synchronizer.sequencer.block.{BlockSequencer, BlockSequencerFactory} +import com.digitalasset.canton.synchronizer.sequencer.config.SequencerNodeParameters import com.digitalasset.canton.synchronizer.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.synchronizer.sequencer.{ AuthenticationServices, @@ -41,11 +41,10 @@ class BftSequencerFactory( storage: Storage, protocolVersion: ProtocolVersion, sequencerId: SequencerId, - nodeParameters: CantonNodeParameters, + nodeParameters: SequencerNodeParameters, metrics: SequencerMetrics, override val loggerFactory: NamedLoggerFactory, testingInterceptor: Option[TestingInterceptor], - storageSetup: StorageSetup = CommunityStorageSetup, )(implicit ec: ExecutionContext) extends BlockSequencerFactory( health, @@ -106,7 +105,6 @@ class BftSequencerFactory( nodeParameters.exitOnFatalFailures, metrics.bftOrdering, synchronizerLoggerFactory, - storageSetup, nodeParameters.loggingConfig.queryCost, ), name, diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionState.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionState.scala index d4a83ea4dc..1d590d7e60 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionState.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionState.scala @@ -15,7 +15,10 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framewor P2PAddress, P2PNetworkRef, } -import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.utils.Miscellaneous.mutex +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.utils.Miscellaneous.{ + abort, + mutex, +} import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30.BftOrderingMessage import com.digitalasset.canton.tracing.TraceContext import io.grpc.stub.StreamObserver @@ -393,33 +396,53 @@ final class P2PGrpcConnectionState( .collect { case (endpointId, nodeId) if nodeId == bftNodeId => endpointId } - bftNodeIdToNetworkRef - .get(bftNodeId) - .orElse( - p2pEndpointIds - .flatMap(p2pEndpointIdToNetworkRef.get) - .headOption - ) - .foreach { case e @ P2PNetworkRefEntry(existingNetworkRef, isOutgoingConnection) => - bftNodeIdToNetworkRef - .put(bftNodeId, e) - .foreach { case P2PNetworkRefEntry(previousNetworkRef, _) => - closePreviousNetworkRefIfDuplicate(existingNetworkRef, previousNetworkRef, bftNodeId) - } - p2pEndpointIds.foreach { p2pEndpoint => - p2pEndpointIdToNetworkRef - .put(p2pEndpoint, P2PNetworkRefEntry(existingNetworkRef, isOutgoingConnection)) - .foreach { case P2PNetworkRefEntry(previousNetworkRef, _) => - closePreviousNetworkRefIfDuplicate( - existingNetworkRef, - previousNetworkRef, - p2pEndpoint.toString, + val maybeExistingNetworkRefAssociatedToNodeIdOrElseEndpoint = + bftNodeIdToNetworkRef + .get(bftNodeId) + .map(_ -> true) + .orElse( + p2pEndpointIds.view + .flatMap(p2pEndpointIdToNetworkRef.get) + .map(_ -> false) + .headOption + ) + + maybeExistingNetworkRefAssociatedToNodeIdOrElseEndpoint + .foreach { + // Prioritize the network ref associated to the BFT node ID (potentially an incoming connection) + + case (e, isAssociatedToNodeId) if isAssociatedToNodeId => + updateEndpointsNetworkRef(p2pEndpointIds, e) + + case (e, _) => // Associated to endpoint + bftNodeIdToNetworkRef + .put(bftNodeId, e) + .foreach(impossibleNetworkRefEntry => + abort( + logger, + s"Unexpected existing network ref ${impossibleNetworkRefEntry.networkRef} associated to node ID $bftNodeId", ) - } - } + ) + updateEndpointsNetworkRef(p2pEndpointIds, e) } } + private def updateEndpointsNetworkRef( + p2pEndpointIds: Iterable[P2PEndpoint.Id], + existingNetworkRefEntry: P2PNetworkRefEntry, + )(implicit traceContext: TraceContext): Unit = + p2pEndpointIds.foreach { p2pEndpoint => + p2pEndpointIdToNetworkRef + .put(p2pEndpoint, existingNetworkRefEntry) + .foreach { case P2PNetworkRefEntry(previousNetworkRefAssociatedToEndpoint, _) => + closePreviousNetworkRefIfDuplicate( + toBeClosedIfDuplicate = previousNetworkRefAssociatedToEndpoint, + kept = existingNetworkRefEntry.networkRef, + p2pEndpoint.toString, + ) + } + } + private def cleanupNetworkRef( bftNodeId: BftNodeId, clearNetworkRefAssociations: Boolean, @@ -461,19 +484,19 @@ final class P2PGrpcConnectionState( } private def closePreviousNetworkRefIfDuplicate( - networkRef: P2PNetworkRef[BftOrderingMessage], - previousNetworkRef: P2PNetworkRef[BftOrderingMessage], + toBeClosedIfDuplicate: P2PNetworkRef[BftOrderingMessage], + kept: P2PNetworkRef[BftOrderingMessage], connectionId: String, )(implicit traceContext: TraceContext): Unit = - if (previousNetworkRef != networkRef) { + if (toBeClosedIfDuplicate != kept) { logger.debug( - s"Replacing network ref for $connectionId from $previousNetworkRef to $networkRef " + + s"Replaced network ref $toBeClosedIfDuplicate (for $connectionId) with $kept " + "and closing the previous one" ) - previousNetworkRef.close() + toBeClosedIfDuplicate.close() } else { logger.debug( - s"Keeping network ref $networkRef for $connectionId, no change" + s"Keeping network ref $toBeClosedIfDuplicate for $connectionId, no change" ) } } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/standalone/P2PGrpcStandaloneBftOrderingService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/standalone/P2PGrpcStandaloneBftOrderingService.scala new file mode 100644 index 0000000000..8ce691c00a --- /dev/null +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/standalone/P2PGrpcStandaloneBftOrderingService.scala @@ -0,0 +1,86 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.standalone + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.synchronizer.block.BlockFormat +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.p2p.grpc.P2PGrpcNetworking.{ + completeGrpcStreamObserver, + failGrpcStreamObserver, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.utils.Miscellaneous.mutex +import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.standalone.v1.{ + Ordered as ProtoOrdered, + ReadOrderedRequest, + ReadOrderedResponse, + SendRequest, + SendResponse, + StandaloneBftOrderingServiceGrpc, +} +import com.digitalasset.canton.tracing.TraceContext +import io.grpc.stub.StreamObserver + +import scala.collection.mutable +import scala.concurrent.Future + +class P2PGrpcStandaloneBftOrderingService( + orderSendRequest: SendRequest => Future[SendResponse], + override val loggerFactory: NamedLoggerFactory, +) extends StandaloneBftOrderingServiceGrpc.StandaloneBftOrderingService + with NamedLogging + with AutoCloseable { + + private val readers = mutable.ListBuffer[(Long, StreamObserver[ReadOrderedResponse])]() + + def push(block: BlockFormat.Block): Unit = { + val failed = mutable.ListBuffer[StreamObserver[ReadOrderedResponse]]() + mutex(this) { + readers.foreach { case (minHeight, peerSender) => + if (block.blockHeight >= minHeight) { + val response = + ReadOrderedResponse( + block.blockHeight, + block.requests.map(r => ProtoOrdered(r.value.tag, r.value.body)), + ) + try { + peerSender.onNext(response) + } catch { + case e: Throwable => + logger.error( + s"Failed to push block ${block.blockHeight} to reader $peerSender " + + s"with minHeight $minHeight: ${e.getMessage}", + e, + )(TraceContext.empty) + failGrpcStreamObserver(peerSender, e, logger)(TraceContext.empty) + failed.addOne(peerSender).discard + } + } + } + } + mutex(this) { + readers.filterInPlace { case (_, peerSender) => + !failed.contains(peerSender) + }.discard + } + } + + override def send(request: SendRequest): Future[SendResponse] = + orderSendRequest(request) + + override def readOrdered( + request: ReadOrderedRequest, + peerSender: StreamObserver[ReadOrderedResponse], + ): Unit = mutex(this) { + readers.addOne(request.startHeight -> peerSender).discard + } + + override def close(): Unit = + mutex(this) { + readers.foreach { case (_, peerSender) => + completeGrpcStreamObserver(peerSender, logger)(TraceContext.empty) + } + readers.clear() + } +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/crypto/FixedKeysCryptoProvider.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/crypto/FixedKeysCryptoProvider.scala new file mode 100644 index 0000000000..413e202281 --- /dev/null +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/crypto/FixedKeysCryptoProvider.scala @@ -0,0 +1,124 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.standalone.crypto + +import com.daml.metrics.api.MetricsContext +import com.digitalasset.canton.crypto.{ + CryptoPureApi, + Hash, + Signature, + SignatureCheckError, + SigningPrivateKey, + SigningPublicKey, + SyncCryptoError, +} +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence +import com.digitalasset.canton.synchronizer.metrics.BftOrderingMetrics +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem.{ + PekkoEnv, + PekkoFutureUnlessShutdown, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.crypto.CryptoProvider +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.crypto.CryptoProvider.{ + BftOrderingSigningKeyUsage, + hashForMessage, + timeCrypto, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.BftOrderingIdentifiers.BftNodeId +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.{ + MessageFrom, + SignedMessage, +} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +class FixedKeysCryptoProvider( + privKey: SigningPrivateKey, + pubKeys: Map[BftNodeId, SigningPublicKey], + cryptoApi: CryptoPureApi, + metrics: BftOrderingMetrics, +)(implicit executionContext: ExecutionContext) + extends CryptoProvider[PekkoEnv] { + + override def signHash(hash: Hash, operationId: String)(implicit + traceContext: TraceContext, + metricsContext: MetricsContext, + ): PekkoModuleSystem.PekkoFutureUnlessShutdown[Either[SyncCryptoError, Signature]] = + PekkoFutureUnlessShutdown( + "sign", + () => + timeCrypto( + metrics, + signHash(hash), + operationId, + ), + ) + + override def signMessage[MessageT <: ProtocolVersionedMemoizedEvidence & MessageFrom]( + message: MessageT, + authenticatedMessageType: CryptoProvider.AuthenticatedMessageType, + )(implicit + traceContext: TraceContext, + metricsContext: MetricsContext, + ): PekkoModuleSystem.PekkoFutureUnlessShutdown[Either[SyncCryptoError, SignedMessage[MessageT]]] = + PekkoFutureUnlessShutdown( + "signMessage", + () => + for { + hash <- + timeCrypto( + metrics, + FutureUnlessShutdown.outcomeF( + Future(hashForMessage(message, message.from, authenticatedMessageType)) + ), + operationId = s"hash-$authenticatedMessageType", + ) + signature <- + timeCrypto( + metrics, + signHash(hash), + operationId = s"sign-$authenticatedMessageType", + ) + } yield signature.map(SignedMessage(message, _)), + ) + + override def verifySignature( + hash: Hash, + member: BftNodeId, + signature: Signature, + operationId: String, + )(implicit + traceContext: TraceContext, + metricsContext: MetricsContext, + ): PekkoModuleSystem.PekkoFutureUnlessShutdown[Either[SignatureCheckError, Unit]] = + PekkoFutureUnlessShutdown( + "verifying signature", + () => + timeCrypto( + metrics, + FutureUnlessShutdown.outcomeF( + Future( + cryptoApi + .verifySignature(hash, pubKeys(member), signature, BftOrderingSigningKeyUsage) + ) + ), + operationId, + ), + ) + + private def signHash( + hash: Hash + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Either[SyncCryptoError.SyncCryptoSigningError, Signature]] = + FutureUnlessShutdown + .outcomeF(Future(cryptoApi.sign(hash, privKey, BftOrderingSigningKeyUsage))) + .map { + case Right(sig) => Right(sig) + case Left(err) => Left(SyncCryptoError.SyncCryptoSigningError(err)) + } +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/topology/FixedFileBasedOrderingTopologyProvider.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/topology/FixedFileBasedOrderingTopologyProvider.scala new file mode 100644 index 0000000000..5438238a06 --- /dev/null +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/standalone/topology/FixedFileBasedOrderingTopologyProvider.scala @@ -0,0 +1,110 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.standalone.topology + +import better.files.File as BFile +import com.digitalasset.canton.crypto.{CryptoPureApi, SigningPrivateKey, SigningPublicKey, v30} +import com.digitalasset.canton.protocol.DynamicSynchronizerParameters +import com.digitalasset.canton.sequencing.protocol.MaxRequestSizeToDeserialize +import com.digitalasset.canton.synchronizer.metrics.BftOrderingMetrics +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem.{ + PekkoEnv, + PekkoFutureUnlessShutdown, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.standalone.crypto.FixedKeysCryptoProvider +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.crypto.CryptoProvider +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.topology.{ + OrderingTopologyProvider, + TopologyActivationTime, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.consensus.iss.data.Genesis +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.BftOrderingIdentifiers.{ + BftKeyId, + BftNodeId, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.topology.{ + OrderingTopology, + SequencingParameters, +} +import com.digitalasset.canton.tracing.TraceContext + +import java.io.File +import scala.concurrent.ExecutionContext + +class FixedFileBasedOrderingTopologyProvider( + standaloneConfig: BftBlockOrdererConfig.BftBlockOrderingStandaloneNetworkConfig, + crypto: CryptoPureApi, + metrics: BftOrderingMetrics, +)(implicit executionContext: ExecutionContext) + extends OrderingTopologyProvider[PekkoEnv] { + + private val pubKey = readSigningPublicKey(standaloneConfig.signingPublicKeyProtoFile) + + private val privKey = + SigningPrivateKey + .fromProtoV30( + v30.SigningPrivateKey + .parseFrom(BFile(standaloneConfig.signingPrivateKeyProtoFile.getAbsolutePath).byteArray) + ) + .getOrElse(throw new IllegalArgumentException("Failed to parse signing private key")) + + private val peerSigningPublicKeys = + standaloneConfig.peers.map { peerConfig => + BftNodeId(peerConfig.sequencerId) -> + readSigningPublicKey(peerConfig.signingPublicKeyProtoFile) + }.toMap + (BftNodeId(standaloneConfig.thisSequencerId) -> pubKey) + + private val orderingTopology = + OrderingTopology( + Map( + BftNodeId(standaloneConfig.thisSequencerId) -> + OrderingTopology.NodeTopologyInfo( + Genesis.GenesisTopologyActivationTime, + Set(BftKeyId(pubKey.fingerprint.toProtoPrimitive)), + ) + ) ++ standaloneConfig.peers.map { peerConfig => + BftNodeId(peerConfig.sequencerId) -> + OrderingTopology.NodeTopologyInfo( + Genesis.GenesisTopologyActivationTime, + Set( + BftKeyId( + peerSigningPublicKeys( + BftNodeId(peerConfig.sequencerId) + ).fingerprint.toProtoPrimitive + ) + ), + ) + }, + SequencingParameters.Default, + MaxRequestSizeToDeserialize.Limit( + DynamicSynchronizerParameters.defaultMaxRequestSize.value + ), + Genesis.GenesisTopologyActivationTime, + areTherePendingCantonTopologyChanges = false, + ) + + override def getOrderingTopologyAt(activationTime: TopologyActivationTime)(implicit + traceContext: TraceContext + ): PekkoEnv#FutureUnlessShutdownT[Option[(OrderingTopology, CryptoProvider[PekkoEnv])]] = + PekkoFutureUnlessShutdown.pure( + Some( + ( + orderingTopology, + new FixedKeysCryptoProvider(privKey, peerSigningPublicKeys, crypto, metrics), + ) + ) + ) + + private def readSigningPublicKey(signingPublicKeyProtoFile: File) = + SigningPublicKey + .fromProtoV30( + v30.SigningPublicKey.parseFrom( + BFile(signingPublicKeyProtoFile.getAbsolutePath).byteArray + ) + ) + .getOrElse( + throw new IllegalArgumentException("Failed to parse signing public key") + ) +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftBlockOrdererConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftBlockOrdererConfig.scala index a06428c3c0..b959180651 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftBlockOrdererConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftBlockOrdererConfig.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.config.{ AdminTokenConfig, AuthServiceConfig, BasicKeepAliveServerConfig, + BatchAggregatorConfig, CantonConfigValidator, CantonConfigValidatorInstances, ClientConfig, @@ -19,6 +20,7 @@ import com.digitalasset.canton.config.{ PemFileOrString, ServerConfig, StorageConfig, + StreamLimitConfig, TlsClientConfig, TlsServerConfig, UniformCantonConfigValidation, @@ -26,6 +28,7 @@ import com.digitalasset.canton.config.{ import com.digitalasset.canton.networking.grpc.CantonServerBuilder import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenManagerConfig import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig.{ + BftBlockOrderingStandaloneNetworkConfig, DefaultAvailabilityMaxNonOrderedBatchesPerNode, DefaultAvailabilityNumberOfAttemptsOfDownloadingOutputFetchBeforeWarning, DefaultConsensusQueueMaxSize, @@ -51,6 +54,7 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framewor import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.topology.OrderingTopology import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext +import java.io.File import scala.concurrent.duration.* /** @param maxRequestsInBatch @@ -91,10 +95,12 @@ final case class BftBlockOrdererConfig( outputFetchTimeout: FiniteDuration = DefaultOutputFetchTimeout, outputFetchTimeoutCap: FiniteDuration = DefaultOutputFetchTimeoutCap, initialNetwork: Option[P2PNetworkConfig] = None, + standalone: Option[BftBlockOrderingStandaloneNetworkConfig] = None, leaderSelectionPolicy: LeaderSelectionPolicyConfig = DefaultLeaderSelectionPolicy, storage: Option[StorageConfig] = None, // We may want to flip the default once we're satisfied with initial performance enablePerformanceMetrics: Boolean = true, + batchAggregator: BatchAggregatorConfig = BatchAggregatorConfig(), ) extends UniformCantonConfigValidation { private val maxRequestsPerBlock = maxBatchesPerBlockProposal * maxRequestsInBatch require( @@ -201,7 +207,9 @@ object BftBlockOrdererConfig { TlsClientConfig(trustCollectionFile = None, clientCert = None, enabled = true) ), tls: Option[TlsServerConfig] = None, - override val maxInboundMessageSize: NonNegativeInt = ServerConfig.defaultMaxInboundMessageSize, + override val maxInboundMessageSize: NonNegativeInt = + ServerConfig.defaultMaxInboundMessageSize, + override val stream: Option[StreamLimitConfig] = None, ) extends ServerConfig with UniformCantonConfigValidation { override val maxTokenLifetime: config.NonNegativeDuration = @@ -311,4 +319,28 @@ object BftBlockOrdererConfig { } } } + + final case class BftBlockOrderingStandaloneNetworkConfig( + thisSequencerId: String, + signingPrivateKeyProtoFile: File, + signingPublicKeyProtoFile: File, + peers: Seq[BftBlockOrderingStandalonePeerConfig], + ) extends UniformCantonConfigValidation + + object BftBlockOrderingStandaloneNetworkConfig { + implicit val bftBlockOrderingStandaloneNetworkConfigValidator + : CantonConfigValidator[BftBlockOrderingStandaloneNetworkConfig] = + CantonConfigValidatorDerivation[BftBlockOrderingStandaloneNetworkConfig] + } + + final case class BftBlockOrderingStandalonePeerConfig( + sequencerId: String, + signingPublicKeyProtoFile: File, + ) extends UniformCantonConfigValidation + + object BftBlockOrderingStandalonePeerConfig { + implicit val bftBlockOrderingStandalonePeerConfigValidator + : CantonConfigValidator[BftBlockOrderingStandalonePeerConfig] = + CantonConfigValidatorDerivation[BftBlockOrderingStandalonePeerConfig] + } } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftOrderingModuleSystemInitializer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftOrderingModuleSystemInitializer.scala index db224150ad..89cd65a03f 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftOrderingModuleSystemInitializer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/BftOrderingModuleSystemInitializer.scala @@ -103,6 +103,8 @@ private[bftordering] class BftOrderingModuleSystemInitializer[ requestInspector: RequestInspector = OutputModule.DefaultRequestInspector, // Only set by simulation tests epochChecker: EpochChecker = EpochChecker.DefaultEpochChecker, // Only set by simulation tests + outputPreviousStoredBlock: OutputModule.PreviousStoredBlock = + new OutputModule.PreviousStoredBlock, )(implicit synchronizerProtocolVersion: ProtocolVersion, mc: MetricsContext, tracer: Tracer) extends SystemInitializer[ E, @@ -181,6 +183,7 @@ private[bftordering] class BftOrderingModuleSystemInitializer[ config.maxRequestsInBatch, config.minRequestsInBatch, config.maxBatchCreationInterval, + checkTags = config.standalone.isEmpty, ) new MempoolModule( cfg, @@ -246,6 +249,8 @@ private[bftordering] class BftOrderingModuleSystemInitializer[ dependencies, loggerFactory, timeouts, + // In standalone mode don't check tag, as they are used for request IDs + checkTags = config.standalone.isEmpty, )() }, consensus = (p2pNetworkOutRef, availabilityRef, outputRef) => { @@ -292,6 +297,7 @@ private[bftordering] class BftOrderingModuleSystemInitializer[ timeouts, requestInspector, epochChecker, + previousStoredBlock = outputPreviousStoredBlock, ), pruning = () => new PruningModule( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/SupportedVersions.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/SupportedVersions.scala index c1884710d1..f425b79f0f 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/SupportedVersions.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/SupportedVersions.scala @@ -24,5 +24,7 @@ object SupportedVersions { // protocol versions that use the same protobuf data version are designated via a representative // Canton protocol version. // TODO(#25269): support multiple protobuf data versions + // It's fine to fully support multiple versions once we have another version, because only nodes with a new version + // will need the support. val ProtoData: ProtoVersion = ProtoVersion(30) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/topology/OrderingTopologyProvider.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/topology/OrderingTopologyProvider.scala index adcaf5aa1c..e7815a1e97 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/topology/OrderingTopologyProvider.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/integration/canton/topology/OrderingTopologyProvider.scala @@ -16,7 +16,8 @@ trait OrderingTopologyProvider[E <: Env[E]] { * * @param activationTime * The timestamp with which to query the topology client for a topology snapshot. See - * [[TopologyActivationTime]] for details. + * [[com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.integration.canton.topology.TopologyActivationTime]] + * for details. * @param traceContext * The trace context. * @return diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/AvailabilityModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/AvailabilityModule.scala index 8190526eef..3bdd09e890 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/AvailabilityModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/AvailabilityModule.scala @@ -93,6 +93,7 @@ final class AvailabilityModule[E <: Env[E]]( override val timeouts: ProcessingTimeout, disseminationProtocolState: DisseminationProtocolState = new DisseminationProtocolState(), outputFetchProtocolState: MainOutputFetchProtocolState = new MainOutputFetchProtocolState(), + checkTags: Boolean = true, )( // Only passed in tests private var messageAuthorizer: MessageAuthorizer = initialMembership.orderingTopology, @@ -1411,7 +1412,7 @@ final class AvailabilityModule[E <: Env[E]]( } _ <- Either.cond( - batch.requests.map(_.value).forall(_.isTagValid), + !checkTags || batch.requests.map(_.value).forall(_.isTagValid), (), { emitInvalidMessage(metrics, from) s"Batch $batchId from '$from' contains requests with invalid tags, valid tags are: (${OrderingRequest.ValidTags diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/AvailabilityStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/AvailabilityStore.scala index cce84f3c52..008ab10472 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/AvailabilityStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/AvailabilityStore.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.availability.data -import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem.PekkoEnv @@ -63,6 +63,7 @@ object AvailabilityStore { object NumberOfRecords { val empty = NumberOfRecords(0L) } def apply( + batchAggregatorConfig: BatchAggregatorConfig, storage: Storage, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, @@ -71,6 +72,8 @@ object AvailabilityStore { case _: MemoryStorage => new InMemoryAvailabilityStore() case dbStorage: DbStorage => - new DbAvailabilityStore(dbStorage, timeouts, loggerFactory)(executionContext) + new DbAvailabilityStore(batchAggregatorConfig, dbStorage, timeouts, loggerFactory)( + executionContext + ) } } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStore.scala index 4f47c4df44..c1d9402f68 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStore.scala @@ -4,10 +4,12 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.availability.data.db import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.CantonRequireTypes.String68 -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.serialization.ProtoConverter @@ -21,12 +23,14 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framewor import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.OrderingRequestBatch import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.availability.BatchId import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30 -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.BatchAggregator import slick.jdbc.{GetResult, PositionedParameters, SetParameter} import scala.concurrent.ExecutionContext class DbAvailabilityStore( + batchAggregatorConfig: BatchAggregatorConfig, override protected val storage: DbStorage, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -38,6 +42,36 @@ class DbAvailabilityStore( private val profile = storage.profile private val converters = storage.converters + private val addBatchBatchAggregator = { + val processor = + new BatchAggregator.Processor[(BatchId, OrderingRequestBatch), Unit] { + + override val kind: String = "Add availability batches" + + override val logger: TracedLogger = DbAvailabilityStore.this.logger + + override def executeBatch( + items: NonEmpty[Seq[Traced[(BatchId, OrderingRequestBatch)]]] + )(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): FutureUnlessShutdown[Iterable[Unit]] = + // Sorting should prevent deadlocks in Postgres when using concurrent clashing batched inserts + // with idempotency "on conflict do nothing" clauses. + runAddBatches(items.sortBy(_.value._1).map(_.value)) + .map(_ => Seq.fill(items.size)(())) + + override def prettyItem: Pretty[(BatchId, OrderingRequestBatch)] = { + import com.digitalasset.canton.logging.pretty.PrettyUtil.* + prettyOfClass[(BatchId, OrderingRequestBatch)]( + param("batchId", _._1.hash) + ) + } + } + + BatchAggregator(processor, batchAggregatorConfig) + } + implicit object SetSeqBatchId extends SetParameter[Seq[BatchId]] { override def apply(v1: Seq[BatchId], pp: PositionedParameters): Unit = v1.foreach(setBatchIdParameter(_, pp)) @@ -83,28 +117,47 @@ class DbAvailabilityStore( traceContext: TraceContext ): PekkoFutureUnlessShutdown[Unit] = { val name = addBatchActionName(batchId) - val future = () => - storage.synchronizeWithClosing(name) { - - storage.update_( - profile match { - case _: Postgres => - sqlu"""insert into ord_availability_batch - values ($batchId, $batch, ${batch.epochNumber}) - on conflict (id) do nothing""" - case _: H2 => - sqlu"""merge into ord_availability_batch using dual - on (id = $batchId) + PekkoFutureUnlessShutdown( + name, + () => addBatchBatchAggregator.run((batchId, batch)), + orderingStage = Some(functionFullName), + ) + } + + private def runAddBatches( + batches: Seq[(BatchId, OrderingRequestBatch)] + )(implicit + errorLoggingContext: ErrorLoggingContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[Unit] = + storage.synchronizeWithClosing("add-batches") { + val insertSql = + profile match { + case _: Postgres => + """insert into ord_availability_batch + values (?, ?, ?) + on conflict (id) do nothing""" + case _: H2 => + """merge into ord_availability_batch using dual + on (id = ?1) when not matched then insert (id, batch, epoch_number) - values ($batchId, $batch, ${batch.epochNumber}) - """ - }, + values (?1, ?2, ?3)""" + } + + storage + .runWrite( + DbStorage + .bulkOperation_(insertSql, batches, storage.profile) { pp => msg => + pp >> msg._1 + pp >> msg._2 + pp >> msg._2.epochNumber + }, functionFullName, + maxRetries = 1, ) - } - PekkoFutureUnlessShutdown(name, future, orderingStage = Some(functionFullName)) - } + .map(_ => ()) + } @SuppressWarnings(Array("org.wartremover.warts.Return")) override def fetchBatches(batches: Seq[BatchId])(implicit diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModule.scala index 5a64ebbe9f..472cadbcbf 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModule.scala @@ -412,6 +412,8 @@ final class IssConsensusModule[E <: Env[E]]( case msg: Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage => val from = msg.underlyingNetworkMessage.from + val actualSender = msg.underlyingNetworkMessage.message.actualSender + .getOrElse(abort("actualSender needs to be provided for network messages")) val blockMetadata = msg.underlyingNetworkMessage.message.blockMetadata val epochNumber = blockMetadata.epochNumber val blockNumber = blockMetadata.blockNumber @@ -431,21 +433,21 @@ final class IssConsensusModule[E <: Env[E]]( // Note that we use the actual sender instead of the original sender, so that a node // cannot maliciously retransmit many messages from another node with the intent to // fill up the other node's quota in this queue. - futurePbftMessageQueue.enqueue(msg.actualSender, msg) match { + futurePbftMessageQueue.enqueue(actualSender, msg) match { case FairBoundedQueue.EnqueueResult.Success => logger.trace( - s"Queued PBFT message $pbftMessageType from future epoch $epochNumber " + + s"Queued PBFT message $pbftMessageType from $actualSender from future epoch $epochNumber " + s"as we're still in epoch $thisNodeEpochNumber" ) case FairBoundedQueue.EnqueueResult.TotalCapacityExceeded => logger.trace( - s"Dropped PBFT message $pbftMessageType from future epoch $epochNumber " + + s"Dropped PBFT message $pbftMessageType from $actualSender from future epoch $epochNumber " + s"as we're still in epoch $thisNodeEpochNumber and " + s"total capacity for queueing future messages has been reached" ) case FairBoundedQueue.EnqueueResult.PerNodeQuotaExceeded(node) => logger.trace( - s"Dropped PBFT message $pbftMessageType from future epoch $epochNumber " + + s"Dropped PBFT message $pbftMessageType from $actualSender from future epoch $epochNumber " + s"as we're still in epoch $thisNodeEpochNumber and " + s"the quota for node $node for queueing future messages has been reached" ) @@ -569,8 +571,9 @@ final class IssConsensusModule[E <: Env[E]]( private def startConsensusForCurrentEpoch()(implicit context: E#ActorContextT[Consensus.Message[E]], traceContext: TraceContext, - ): Unit = - if (epochState.epoch.info == GenesisEpoch.info) { + ): Unit = { + val epochInfo = epochState.epoch.info + if (epochInfo == GenesisEpoch.info) { logger.debug("Started at genesis, self-sending its topology to start epoch 0") context.self.asyncSend( NewEpochTopology( @@ -580,7 +583,9 @@ final class IssConsensusModule[E <: Env[E]]( ) ) } else if (!epochState.epochCompletionStatus.isComplete) { - logger.debug("Started during an in-progress epoch, starting segment modules") + logger.debug( + s"Starting consensus for not-yet-started or in-progress epoch ${epochInfo.number} (starting segment modules)" + ) epochState.startSegmentModules() retransmissionsManager.startEpoch(epochState) } else { @@ -603,6 +608,7 @@ final class IssConsensusModule[E <: Env[E]]( "Started after a completed epoch but before starting a new one, waiting for topology from the output module" ) } + } private def startNewEpochUnlessOffboarded( currentEpochInfo: EpochInfo, @@ -880,15 +886,19 @@ object IssConsensusModule { val DefaultDatabaseReadTimeout: FiniteDuration = 10.seconds def parseNetworkMessage( - protoSignedMessage: v30.SignedMessage + protoSignedMessage: v30.SignedMessage, + actualSender: BftNodeId, ): ParsingResult[SignedMessage[ConsensusSegment.ConsensusMessage.PbftNetworkMessage]] = SignedMessage .fromProtoWithNodeId(v30.ConsensusMessage)(from => - proto => originalByteString => parseConsensusNetworkMessage(from, proto)(originalByteString) + proto => + originalByteString => + parseConsensusNetworkMessage(from, Some(actualSender), proto)(originalByteString) )(protoSignedMessage) def parseConsensusNetworkMessage( from: BftNodeId, + actualSender: Option[BftNodeId], message: v30.ConsensusMessage, )( originalByteString: ByteString @@ -902,6 +912,7 @@ object IssConsensusModule { header.viewNumber, value, from, + actualSender, )(originalByteString) case v30.ConsensusMessage.Message.Prepare(value) => ConsensusSegment.ConsensusMessage.Prepare.fromProto( @@ -909,6 +920,7 @@ object IssConsensusModule { header.viewNumber, value, from, + actualSender, )(originalByteString) case v30.ConsensusMessage.Message.Commit(value) => ConsensusSegment.ConsensusMessage.Commit.fromProto( @@ -916,6 +928,7 @@ object IssConsensusModule { header.viewNumber, value, from, + actualSender, )(originalByteString) case v30.ConsensusMessage.Message.ViewChange(value) => ConsensusSegment.ConsensusMessage.ViewChange.fromProto( @@ -923,6 +936,7 @@ object IssConsensusModule { header.viewNumber, value, from, + actualSender, )(originalByteString) case v30.ConsensusMessage.Message.NewView(value) => ConsensusSegment.ConsensusMessage.NewView.fromProto( @@ -930,6 +944,7 @@ object IssConsensusModule { header.viewNumber, value, from, + actualSender, )(originalByteString) case v30.ConsensusMessage.Message.Empty => Left(ProtoDeserializationError.OtherError("Empty Received")) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssSegmentModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssSegmentModule.scala index 040a07c07e..a0b3690c53 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssSegmentModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssSegmentModule.scala @@ -127,14 +127,14 @@ class IssSegmentModule[E <: Env[E]]( def processOldViewEvent(event: ConsensusSegment.ConsensusMessage.PbftEvent): Unit = // we don't want to send or store any messages as part of rehydrating old views // the main purpose here is simply to populate prepare certificates that may be used in future view changes - segmentState.processEvent(event).discard + segmentState.processEvent(event, rehydrated = true).discard def processCurrentViewMessages( pbftEvent: ConsensusSegment.ConsensusMessage.PbftEvent ): Unit = // for the latest view, we don't want to store again messages as part of rehydration, // but we do want to make sure we send (this could potentially resend but that's OK) - processPbftEvent(pbftEvent, storeMessages = false) + processPbftEvent(pbftEvent, storeMessages = false, rehydrated = true) def rehydrateMessages( messages: Seq[SignedMessage[ConsensusSegment.ConsensusMessage.PbftNetworkMessage]], @@ -426,11 +426,12 @@ class IssSegmentModule[E <: Env[E]]( private def processPbftEvent( pbftEvent: ConsensusSegment.ConsensusMessage.PbftEvent, storeMessages: Boolean = storePbftMessages, + rehydrated: Boolean = false, )(implicit context: E#ActorContextT[ConsensusSegment.Message], traceContext: TraceContext, ): Unit = { - val processResults = segmentState.processEvent(pbftEvent) + val processResults = segmentState.processEvent(pbftEvent, rehydrated) def handleStore(store: StoreResult, sendMsg: () => Unit): Unit = store match { case StorePrePrepare(prePrepare) => @@ -448,7 +449,7 @@ class IssSegmentModule[E <: Env[E]]( Some(prePrepare.message.stored) } case StorePrepares(prepares) => - pipeToSelfWithFutureTracking(epochStore.addPrepares(prepares)) { + pipeToSelfWithFutureTracking(epochStore.addPreparesAtomically(prepares)) { case Failure(exception) => Some(ConsensusSegment.Internal.AsyncException(exception)) case Success(_) => @@ -530,7 +531,7 @@ class IssSegmentModule[E <: Env[E]]( s"Sending PBFT message to $nodes: $pbftMessage" ) pbftMessage.message match { - case PrePrepare(BlockMetadata(_, blockNumber), _, _, _, _) => + case PrePrepare(BlockMetadata(_, blockNumber), _, _, _, _, _) => runningBlocks.put(blockNumber, Instant.now()).discard case _ => } @@ -578,7 +579,7 @@ class IssSegmentModule[E <: Env[E]]( ): Unit = // Persist ordered block to epochStore and then self-send ack message. pipeToSelfWithFutureTracking( - epochStore.addOrderedBlock( + epochStore.addOrderedBlockAtomically( commitCertificate.prePrepare, commitCertificate.commits, ) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentState.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentState.scala index 26ec86ca3a..1e4596c7c8 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentState.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentState.scala @@ -80,6 +80,8 @@ class SegmentState( private var inViewChange: Boolean = false private var strongQuorumReachedForCurrentView: Boolean = false + private val rehydratedFutureViewMessages = mutable.Queue[SignedMessage[PbftNormalCaseMessage]]() + private val futureViewMessagesQueue = new FairBoundedQueue[SignedMessage[PbftNormalCaseMessage]]( config.consensusQueueMaxSize, @@ -129,14 +131,16 @@ class SegmentState( @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) def processEvent( - event: PbftEvent + event: PbftEvent, + rehydrated: Boolean = false, )(implicit traceContext: TraceContext): Seq[ProcessResult] = event match { case PbftSignedNetworkMessage(signedMessage) => signedMessage.message match { case _: PbftNormalCaseMessage => processNormalCaseMessage( - signedMessage.asInstanceOf[SignedMessage[PbftNormalCaseMessage]] + signedMessage.asInstanceOf[SignedMessage[PbftNormalCaseMessage]], + rehydrated, ) case _: PbftViewChangeMessage => processViewChangeNetworkMessage( @@ -321,7 +325,8 @@ class SegmentState( // Normal Case: PrePrepare, Prepare, Commit private def processNormalCaseMessage( - msg: SignedMessage[PbftNormalCaseMessage] + msg: SignedMessage[PbftNormalCaseMessage], + rehydrated: Boolean, )(implicit traceContext: TraceContext): Seq[ProcessResult] = { var result = Seq.empty[ProcessResult] if (msg.message.viewNumber < currentViewNumber) { @@ -331,20 +336,34 @@ class SegmentState( ) discardedViewMessagesCount += 1 } else if (msg.message.viewNumber > currentViewNumber || inViewChange) { - logger.info( - s"Segment received early PbftNormalCaseMessage; peer = ${msg.from}, " + - s"message view = ${msg.message.viewNumber}, " + - s"current view = $currentViewNumber, inViewChange = $inViewChange" - ) - futureViewMessagesQueue.enqueue(msg.from, msg) match { - case EnqueueResult.PerNodeQuotaExceeded(nodeId) => - logger.trace(s"Node `$nodeId` exceeded its future view message queue quota") - case EnqueueResult.TotalCapacityExceeded => - logger.trace("Future view message queue total capacity has been exceeded") - case EnqueueResult.Duplicate(nodeId) => - logger.trace(s"Duplicate future view message for node `$nodeId` has been dropped") - case EnqueueResult.Success => - logger.trace("Successfully postponed PbftNormalCaseMessage") + if (rehydrated) { + logger.debug( + s"Segment received rehydrated PbftNormalCaseMessage; peer = ${msg.from} " + + s"message view = ${msg.message.viewNumber}, " + + s"current view = $currentViewNumber, inViewChange = $inViewChange" + ) + // As rehydrated messages come from the storage, and don't provide the actual sender, use a separate (unbounded) + // queue that cannot be overflown (without overflowing the memory). + rehydratedFutureViewMessages.enqueue(msg) + } else { + val actualSender = msg.message.actualSender.getOrElse( + abort("actualSender needs to be provided for PBFT messages sent over network") + ) + logger.info( + s"Segment received early PbftNormalCaseMessage; peer = ${msg.from}, actual sender = $actualSender" + + s"message view = ${msg.message.viewNumber}, " + + s"current view = $currentViewNumber, inViewChange = $inViewChange" + ) + futureViewMessagesQueue.enqueue(actualSender, msg) match { + case EnqueueResult.PerNodeQuotaExceeded(nodeId) => + logger.trace(s"Node `$nodeId` exceeded its future view message queue quota") + case EnqueueResult.TotalCapacityExceeded => + logger.trace("Future view message queue total capacity has been exceeded") + case EnqueueResult.Duplicate(nodeId) => + logger.trace(s"Duplicate future view message for node `$nodeId` has been dropped") + case EnqueueResult.Success => + logger.trace("Successfully postponed PbftNormalCaseMessage") + } } } else result = processPbftNormalCaseMessage(msg, msg.message.blockMetadata.blockNumber) @@ -704,7 +723,8 @@ class SegmentState( // during rehydration we can first process previously stored prepares and thus avoid that new conflicting prepares // are created as a result of rehydrating the new-view message's pre-prepares. val queuedMessages = - futureViewMessagesQueue.dequeueAll(_.message.viewNumber == currentViewNumber) + rehydratedFutureViewMessages.dequeueAll(_.message.viewNumber == currentViewNumber) ++ + futureViewMessagesQueue.dequeueAll(_.message.viewNumber == currentViewNumber) val futureMessageQueueResults = for { pbftMessage <- queuedMessages diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStore.scala index cbc35bab23..9570b3f11b 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStore.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.consensus.iss.data -import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.bindings.pekko.PekkoModuleSystem.PekkoEnv @@ -58,7 +58,7 @@ trait EpochStore[E <: Env[E]] extends AutoCloseable { protected def addPrePrepareActionName(prePrepare: SignedMessage[PrePrepare]): String = s"add PrePrepare ${prePrepare.message.blockMetadata.blockNumber} epoch: ${prePrepare.message.blockMetadata.epochNumber}" - def addPrepares(prepares: Seq[SignedMessage[Prepare]])(implicit + def addPreparesAtomically(prepares: Seq[SignedMessage[Prepare]])(implicit traceContext: TraceContext ): E#FutureUnlessShutdownT[Unit] @@ -91,7 +91,7 @@ trait EpochStore[E <: Env[E]] extends AutoCloseable { s"add ViewChange for ${viewChange.viewNumber} and blockMetadata ${viewChange.blockMetadata}" } - def addOrderedBlock( + def addOrderedBlockAtomically( prePrepare: SignedMessage[PrePrepare], commitMessages: Seq[SignedMessage[Commit]], )(implicit @@ -153,6 +153,7 @@ object EpochStore { ) def apply( + batchAggregatorConfig: BatchAggregatorConfig, storage: Storage, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, @@ -161,7 +162,7 @@ object EpochStore { case _: MemoryStorage => new InMemoryEpochStore() case dbStorage: DbStorage => - new DbEpochStore(dbStorage, timeouts, loggerFactory)(ec) + new DbEpochStore(batchAggregatorConfig, dbStorage, timeouts, loggerFactory)(ec) } final case class NumberOfRecords( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStore.scala index d0a2b08fa3..93e8976f27 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStore.scala @@ -5,10 +5,12 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.mo import cats.syntax.either.* import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.config.ProcessingTimeout +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.resource.DbStorage.Implicits.setParameterByteString import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.{DbStorage, DbStore} @@ -53,10 +55,10 @@ import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framewor } import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30 import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.v30.ConsensusMessage as ProtoConsensusMessage -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.{BatchAggregator, FutureUnlessShutdownUtil} import com.digitalasset.canton.{ProtoDeserializationError, RichGeneratedMessage} import com.google.protobuf.ByteString -import slick.dbio.DBIOAction import slick.jdbc.{GetResult, PositionedResult, SetParameter} import scala.concurrent.ExecutionContext @@ -65,6 +67,7 @@ import scala.util.Try import DbEpochStore.* class DbEpochStore( + batchAggregatorConfig: BatchAggregatorConfig, override protected val storage: DbStorage, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -87,18 +90,42 @@ class DbEpochStore( } private implicit val readPbftMessage: GetResult[SignedMessage[PbftNetworkMessage]] = - GetResult(parseSignedMessage(from => IssConsensusModule.parseConsensusNetworkMessage(from, _))) + GetResult( + parseSignedMessage(from => + // actual sender is not needed when reading from the store + IssConsensusModule.parseConsensusNetworkMessage(from, actualSender = None, _) + ) + ) private implicit val tryReadPrePrepareMessageAndEpochInfo: GetResult[(PrePrepare, EpochInfo)] = GetResult { r => - val prePrepare = parseSignedMessage(_ => PrePrepare.fromProtoConsensusMessage)(r) + // actual sender is not needed when reading from the store + val prePrepare = + parseSignedMessage(_ => PrePrepare.fromProtoConsensusMessage(actualSender = None, _))(r) prePrepare.message -> readEpoch(r) } private implicit val readCommitMessage: GetResult[SignedMessage[Commit]] = GetResult { - parseSignedMessage(_ => Commit.fromProtoConsensusMessage) + // actual sender is not needed when reading from the store + parseSignedMessage(_ => Commit.fromProtoConsensusMessage(actualSender = None, _)) } + // TODO(#28200): introduce `BatchAggregator#runTogether` that avoids splitting items into different batches + // and use it in `addPrepares` and `addOrderedBlock` + + private val insertInProgressPbftMessagesBatchAggregator = + BatchAggregator( + new InsertBatchAggregatorProcessor( + { (seq, traceContext) => + implicit val tc: TraceContext = traceContext + runInsertInProgressPbftMessages(seq) + }, + "In-progress consensus block network message insert", + logger, + ), + batchAggregatorConfig, + ) + private def createFuture[X]( actionName: String, orderingStage: String, @@ -164,14 +191,18 @@ class DbEpochStore( epochNumber: EpochNumber )(implicit traceContext: TraceContext): PekkoFutureUnlessShutdown[Unit] = createFuture(completeEpochActionName(epochNumber), orderingStage = functionFullName) { + // asynchronously delete all in-progress messages after an epoch ends + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + storage + .update_( + sqlu"""delete from ord_pbft_messages_in_progress where epoch_number <= $epochNumber""", + functionFullName, + ), + failureMessage = "could not delete in-progress pbft messages from previous epoch(s)", + ) + // synchronously update the completed epoch to no longer be in progress storage.update_( - for { - // delete all in-progress messages after an epoch ends and before we start adding new messages in the new epoch - _ <- sqlu"truncate table ord_pbft_messages_in_progress" - _ <- sqlu"""update ord_epochs set in_progress = false - where epoch_number = $epochNumber - """ - } yield (), + sqlu"""update ord_epochs set in_progress = false where epoch_number = $epochNumber""", functionFullName, ) } @@ -213,14 +244,16 @@ class DbEpochStore( traceContext: TraceContext ): PekkoFutureUnlessShutdown[Unit] = createFuture(addPrePrepareActionName(prePrepare), orderingStage = functionFullName) { - runInsertInProgressPbftMessages(Seq(prePrepare), functionFullName) + insertInProgressPbftMessagesBatchAggregator.run(prePrepare) } - override def addPrepares( + override def addPreparesAtomically( prepares: Seq[SignedMessage[ConsensusMessage.Prepare]] )(implicit traceContext: TraceContext): PekkoFutureUnlessShutdown[Unit] = createFuture(addPreparesActionName, orderingStage = functionFullName) { - runInsertInProgressPbftMessages(prepares, functionFullName) + // Cannot use the batch aggregator here as we need to make sure for CFT that all messages end up + // in the same transaction. + runInsertInProgressPbftMessages(prepares) } override def addViewChangeMessage[M <: PbftViewChangeMessage]( @@ -232,10 +265,10 @@ class DbEpochStore( addViewChangeMessageActionName(viewChangeMessage), orderingStage = functionFullName, ) { - runInsertInProgressPbftMessages(Seq(viewChangeMessage), functionFullName) + insertInProgressPbftMessagesBatchAggregator.run(viewChangeMessage) } - override def addOrderedBlock( + override def addOrderedBlockAtomically( prePrepare: SignedMessage[PrePrepare], commitMessages: Seq[SignedMessage[Commit]], )(implicit @@ -249,117 +282,97 @@ class DbEpochStore( ) { val messages: Seq[SignedMessage[PbftNormalCaseMessage]] = commitMessages :++ Seq[SignedMessage[PbftNormalCaseMessage]](prePrepare) - runInsertFinalPbftMessages(messages, functionFullName) + // Cannot use the batch aggregator here as we need to make sure for CFT that all messages end up + // in the same transaction. + runInsertFinalPbftMessages(messages) } } - private def runInsertInProgressPbftMessages[M <: PbftNetworkMessage]( - messages: Seq[SignedMessage[M]], - functionName: String, - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - profile match { - case _: Postgres => - val insertSql = + private def runInsertInProgressPbftMessages( + messages: Seq[SignedMessage[PbftNetworkMessage]] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val insertSql = + profile match { + case _: Postgres => """insert into ord_pbft_messages_in_progress(block_number, epoch_number, view_number, message, discriminator, from_sequencer_id) values (?, ?, ?, ?, ?, ?) on conflict (block_number, view_number, discriminator, from_sequencer_id) do nothing """ - storage - .runWrite( - DbStorage - .bulkOperation_(insertSql, messages, storage.profile) { pp => msg => - pp >> msg.message.blockMetadata.blockNumber - pp >> msg.message.blockMetadata.epochNumber - pp >> msg.message.viewNumber - pp >> msg - pp >> getDiscriminator(msg.message) - pp >> msg.from - }, - functionName, - maxRetries = 1, - ) - .map(_ => ()) - case _: H2 => - storage.update_( - DBIOAction - .sequence( - messages.map { msg => - val blockNumber = msg.message.blockMetadata.blockNumber - val epochNumber = msg.message.blockMetadata.epochNumber - val viewNumber = msg.message.viewNumber - val discriminator = getDiscriminator(msg.message) - val from = msg.from - - sqlu"""merge into ord_pbft_messages_in_progress - using dual on (ord_pbft_messages_in_progress.block_number = $blockNumber - and ord_pbft_messages_in_progress.epoch_number = $epochNumber - and ord_pbft_messages_in_progress.view_number = $viewNumber - and ord_pbft_messages_in_progress.discriminator = $discriminator - and ord_pbft_messages_in_progress.from_sequencer_id = $from + case _: H2 => + """merge into ord_pbft_messages_in_progress + using dual on (ord_pbft_messages_in_progress.block_number = ?1 + and ord_pbft_messages_in_progress.epoch_number = ?2 + and ord_pbft_messages_in_progress.view_number = ?3 + and ord_pbft_messages_in_progress.discriminator = ?5 + and ord_pbft_messages_in_progress.from_sequencer_id = ?6 ) when not matched then insert (block_number, epoch_number, view_number, message, discriminator, from_sequencer_id) - values ($blockNumber, $epochNumber, $viewNumber, $msg, $discriminator, $from) + values (?1, ?2, ?3, ?4, ?5, ?6) """ - } - ) - .transactionally, - functionName, - ) - } + } + + storage + .runWrite( + // Sorting should prevent deadlocks in Postgres when using concurrent clashing batched inserts + // with idempotency "on conflict do nothing" clauses. + DbStorage + .bulkOperation_(insertSql, messages.sortBy(key), storage.profile) { pp => msg => + pp >> msg.message.blockMetadata.blockNumber + pp >> msg.message.blockMetadata.epochNumber + pp >> msg.message.viewNumber + pp >> msg + pp >> getDiscriminator(msg.message) + pp >> msg.from + }, + functionFullName, + maxRetries = 1, + ) + .map(_ => ()) + } private def runInsertFinalPbftMessages[M <: PbftNetworkMessage]( - messages: Seq[SignedMessage[M]], - functionName: String, + messages: Seq[SignedMessage[M]] )(implicit errorLoggingContext: ErrorLoggingContext, traceContext: TraceContext, - ): FutureUnlessShutdown[Unit] = - profile match { - case _: Postgres => - val insertSql = + ): FutureUnlessShutdown[Unit] = { + val insertSql = + profile match { + case _: Postgres => """insert into ord_pbft_messages_completed(block_number, epoch_number, message, discriminator, from_sequencer_id) - values (?, ?, ?, ?, ?) - on conflict (block_number, epoch_number, discriminator, from_sequencer_id) do nothing - """ - storage - .runWrite( - DbStorage - .bulkOperation_(insertSql, messages, storage.profile) { pp => msg => - pp >> msg.message.blockMetadata.blockNumber - pp >> msg.message.blockMetadata.epochNumber - pp >> msg - pp >> getDiscriminator(msg.message) - pp >> msg.from - }, - functionName, - maxRetries = 1, - ) - .map(_ => ()) - case _: H2 => - storage.update_( - DBIOAction - .sequence(messages.map { msg => - val sequencerId = msg.from - val blockNumber = msg.message.blockMetadata.blockNumber - val epochNumber = msg.message.blockMetadata.epochNumber - val discriminator = getDiscriminator(msg.message) - - sqlu"""merge into ord_pbft_messages_completed - using dual on (ord_pbft_messages_completed.block_number = $blockNumber - and ord_pbft_messages_completed.epoch_number = $epochNumber - and ord_pbft_messages_completed.discriminator = $discriminator - and ord_pbft_messages_completed.from_sequencer_id = $sequencerId + values (?, ?, ?, ?, ?) + on conflict (block_number, epoch_number, discriminator, from_sequencer_id) do nothing + """ + case _: H2 => + """merge into ord_pbft_messages_completed + using dual on (ord_pbft_messages_completed.block_number = ?1 + and ord_pbft_messages_completed.epoch_number = ?2 + and ord_pbft_messages_completed.discriminator = ?4 + and ord_pbft_messages_completed.from_sequencer_id = ?5 ) when not matched then insert (block_number, epoch_number, message, discriminator, from_sequencer_id) - values ($blockNumber, $epochNumber, $msg, $discriminator, $sequencerId) + values (?1, ?2, ?3, ?4, ?5) """ - }) - .transactionally, - functionName, - ) - } + } + storage + .runWrite( + // Sorting should prevent deadlocks in Postgres when using concurrent clashing batched inserts + // with idempotency "on conflict do nothing" clauses. + DbStorage + .bulkOperation_(insertSql, messages.sortBy(key), storage.profile) { pp => msg => + pp >> msg.message.blockMetadata.blockNumber + pp >> msg.message.blockMetadata.epochNumber + pp >> msg + pp >> getDiscriminator(msg.message) + pp >> msg.from + }, + functionFullName, + maxRetries = 1, + ) + .map(_ => ()) + } @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) override def loadEpochProgress(activeEpochInfo: EpochInfo)(implicit @@ -556,12 +569,51 @@ class DbEpochStore( } object DbEpochStore { + private val PrePrepareMessageDiscriminator = 0 private val PrepareMessageDiscriminator = 1 private val CommitMessageDiscriminator = 2 private val ViewChangeDiscriminator = 3 private val NewViewDiscriminator = 4 + private class InsertBatchAggregatorProcessor( + exec: ( + Seq[SignedMessage[PbftNetworkMessage]], + TraceContext, + ) => FutureUnlessShutdown[Unit], + override val kind: String, + override val logger: TracedLogger, + )(implicit executionContext: ExecutionContext) + extends BatchAggregator.Processor[SignedMessage[PbftNetworkMessage], Unit] { + + override def executeBatch( + items: NonEmpty[Seq[Traced[SignedMessage[PbftNetworkMessage]]]] + )(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): FutureUnlessShutdown[Iterable[Unit]] = + exec(items.map(_.value), traceContext) + .map(_ => Seq.fill(items.size)(())) + + override def prettyItem: Pretty[SignedMessage[PbftNetworkMessage]] = { + import com.digitalasset.canton.logging.pretty.PrettyUtil.* + prettyOfClass[SignedMessage[PbftNetworkMessage]]( + param("epoch", _.message.blockMetadata.epochNumber), + param("block", _.message.blockMetadata.blockNumber), + ) + } + } + + private def key[M <: PbftNetworkMessage]( + msg: SignedMessage[M] + ): (BlockNumber, EpochNumber, BftNodeId, Int) = + ( + msg.message.blockMetadata.blockNumber, + msg.message.blockMetadata.epochNumber, + msg.from, + getDiscriminator(msg.message), + ) + private def getDiscriminator[M <: PbftNetworkMessage](message: M): Int = message match { case _: PrePrepare => PrePrepareMessageDiscriminator @@ -570,4 +622,5 @@ object DbEpochStore { case _: ConsensusMessage.ViewChange => ViewChangeDiscriminator case _: ConsensusMessage.NewView => NewViewDiscriminator } + } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/InMemoryEpochStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/InMemoryEpochStore.scala index 5ff5b90a48..c092f7e46a 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/InMemoryEpochStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/InMemoryEpochStore.scala @@ -156,7 +156,7 @@ abstract class GenericInMemoryEpochStore[E <: Env[E]] addSingleMessageToMap(prePreparesMap)(prePrepare) } - override def addPrepares( + override def addPreparesAtomically( prepares: Seq[SignedMessage[Prepare]] )(implicit traceContext: TraceContext): E#FutureUnlessShutdownT[Unit] = createFuture(addPreparesActionName) { () => @@ -192,7 +192,7 @@ abstract class GenericInMemoryEpochStore[E <: Env[E]] } } - override def addOrderedBlock( + override def addOrderedBlockAtomically( prePrepare: SignedMessage[PrePrepare], commitMessages: Seq[SignedMessage[Commit]], )(implicit diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferBehavior.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferBehavior.scala index b7a3b859f6..210c0af62b 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferBehavior.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferBehavior.scala @@ -263,9 +263,14 @@ final class StateTransferBehavior[E <: Env[E]]( // We drop retransmission messages, as they will likely be stale once state transfer is finished. case _: Consensus.RetransmissionsMessage => - case Consensus.ConsensusMessage - .PbftUnverifiedNetworkMessage(actualSender, _) => + case Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(underlyingMessage) => // Use the actual sender to prevent the node from filling up other nodes' quotas in the queue. + val actualSender = + underlyingMessage.message.actualSender.getOrElse( + abort( + s"$messageType: internal inconsistency, actualSender needs to be provided for network messages" + ) + ) enqueuePbftNetworkMessage(message, actualSender) case Consensus.ConsensusMessage.PbftVerifiedNetworkMessage(underlyingMessage) => @@ -426,7 +431,7 @@ final class StateTransferBehavior[E <: Env[E]]( val currentEpochNumber = epochState.epoch.info.number postponedConsensusMessages.dequeueAll { - case Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(_, underlyingMessage) => + case Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(underlyingMessage) => underlyingMessage.message.blockMetadata.epochNumber < currentEpochNumber case Consensus.ConsensusMessage.PbftVerifiedNetworkMessage(underlyingMessage) => // likely a late response from the crypto provider diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManager.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManager.scala index 25f599ff3a..724e72a4ac 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManager.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManager.scala @@ -275,7 +275,9 @@ class StateTransferManager[E <: Env[E]]( context: E#ActorContextT[Consensus.Message[E]], traceContext: TraceContext, ): StateTransferMessageResult = { - context.pipeToSelf(epochStore.addOrderedBlock(commitCert.prePrepare, commitCert.commits)) { + context.pipeToSelf( + epochStore.addOrderedBlockAtomically(commitCert.prePrepare, commitCert.commits) + ) { case Success(_) => Some(StateTransferMessage.BlockStored(commitCert, from)) case Failure(exception) => diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/validation/IssConsensusSignatureVerifier.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/validation/IssConsensusSignatureVerifier.scala index ed9c06437d..d8237c2e2f 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/validation/IssConsensusSignatureVerifier.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/validation/IssConsensusSignatureVerifier.scala @@ -142,6 +142,7 @@ final class IssConsensusSignatureVerifier[E <: Env[E]](metrics: BftOrderingMetri block, canonicalCommitSet, _, + _, ) => implicit val cryptoProvider: CryptoProvider[E] = topologyInfo.currentCryptoProvider // Canonical commit sets are validated in more detail later in the process diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModule.scala index 0669c24830..cf2e6e4dd5 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModule.scala @@ -72,7 +72,7 @@ class MempoolModule[E <: Env[E]]( from.foreach(_.asyncSend(SequencerNode.RequestRejected(rejectionMessage))) span.setStatus(StatusCode.ERROR, "queue_full"); span.end() metrics.ingress.labels.outcome.values.QueueFull - } else if (!orderingRequest.isTagValid) { + } else if (config.checkTags && !orderingRequest.isTagValid) { val rejectionMessage = s"mempool received a client request with an invalid tag '${orderingRequest.tag}', " + s"valid tags are: (${OrderingRequest.ValidTags.mkString(", ")}), rejecting" @@ -103,7 +103,7 @@ class MempoolModule[E <: Env[E]]( metrics.ingress.labels.outcome.values.Success } } - emitRequestStats(metrics)(orderingRequest, sender, outcome) + emitRequestStats(metrics)(orderingRequest, sender, outcome, config.checkTags) // From local availability case Mempool.CreateLocalBatches(atMost) => diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleConfig.scala index 503f77b1e4..6fafeecca6 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleConfig.scala @@ -11,4 +11,5 @@ final case class MempoolModuleConfig( maxRequestsInBatch: Short, minRequestsInBatch: Short, maxBatchCreationInterval: FiniteDuration, + checkTags: Boolean = true, ) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleMetrics.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleMetrics.scala index d22aec5a66..c77525e4cd 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleMetrics.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/mempool/MempoolModuleMetrics.scala @@ -14,12 +14,20 @@ private[mempool] object MempoolModuleMetrics { orderingRequest: OrderingRequest, sender: Option[Member], outcome: metrics.ingress.labels.outcome.values.OutcomeValue, + checkTags: Boolean, )(implicit mc: MetricsContext): Unit = { val mc1 = - mc.withExtraLabels( - metrics.ingress.labels.Tag -> orderingRequest.tag, - metrics.ingress.labels.outcome.Key -> outcome, - ) + if (checkTags) { + mc.withExtraLabels( + metrics.ingress.labels.Tag -> orderingRequest.tag, + metrics.ingress.labels.outcome.Key -> outcome, + ) + } else { + // This is to avoid emitting high-cardinality labels, as standalone mode uses the tag to store the request UUID + mc.withExtraLabels( + metrics.ingress.labels.outcome.Key -> outcome + ) + } val mc2 = sender .map(sender => mc1.withExtraLabels(metrics.ingress.labels.Sender -> sender.toProtoPrimitive)) .getOrElse(mc1) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/output/OutputModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/output/OutputModule.scala index 6ed209e595..4ef62a2efd 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/output/OutputModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/output/OutputModule.scala @@ -94,6 +94,7 @@ import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import java.time.Instant +import java.util.concurrent.atomic.AtomicReference import scala.collection.mutable import scala.util.{Failure, Success} @@ -118,6 +119,8 @@ class OutputModule[E <: Env[E]]( override val timeouts: ProcessingTimeout, requestInspector: RequestInspector = DefaultRequestInspector, // For testing epochChecker: EpochChecker = EpochChecker.DefaultEpochChecker, // For testing + // Passed from BftBlockOrderer to allow a near-0 latency `GetTime` implementation + private[bftordering] val previousStoredBlock: PreviousStoredBlock = new PreviousStoredBlock, )(implicit override val config: BftBlockOrdererConfig, synchronizerProtocolVersion: ProtocolVersion, @@ -158,8 +161,6 @@ class OutputModule[E <: Env[E]]( ) ) - @VisibleForTesting - private[output] val previousStoredBlock = new PreviousStoredBlock startupState.previousBftTimeForOnboarding.foreach { time => previousStoredBlock.update( BlockNumber(startupState.initialHeightToProvide - 1), @@ -889,28 +890,30 @@ object OutputModule { initialLowerBound: Option[(EpochNumber, BlockNumber)], initialLeaderSelectionPolicy: LeaderSelectionPolicy[E], ) - @VisibleForTesting - private[output] final class PreviousStoredBlock { - @SuppressWarnings(Array("org.wartremover.warts.Var")) - private var blockNumberAndBftTime: Option[(BlockNumber, CantonTimestamp)] = None + private[bftordering] final class PreviousStoredBlock { - @VisibleForTesting - private[output] def getBlockNumberAndBftTime = - blockNumberAndBftTime + private val blockNumberAndBftTimeRef = + new AtomicReference[Option[(BlockNumber, CantonTimestamp)]](None) + + private[bftordering] def getBlockNumberAndBftTime: Option[(BlockNumber, CantonTimestamp)] = + blockNumberAndBftTimeRef.get() override def toString: String = - blockNumberAndBftTime + blockNumberAndBftTimeRef + .get() .map(b => s"(block number = ${b._1}, BFT time = ${b._2})") .getOrElse("undefined") - def update(blockNumber: BlockNumber, blockBftTime: CantonTimestamp): Unit = - blockNumberAndBftTime = Some(blockNumber -> blockBftTime) + @VisibleForTesting + private[output] def update(blockNumber: BlockNumber, blockBftTime: CantonTimestamp): Unit = + blockNumberAndBftTimeRef.set(Some(blockNumber -> blockBftTime)) - def computeBlockBftTime(orderedBlock: OrderedBlock): CantonTimestamp = + private[OutputModule] def computeBlockBftTime(orderedBlock: OrderedBlock): CantonTimestamp = BftTime.blockBftTime( orderedBlock.canonicalCommitSet, - previousBlockBftTime = blockNumberAndBftTime.map(_._2).getOrElse(CantonTimestamp.Epoch), + previousBlockBftTime = + blockNumberAndBftTimeRef.get().map(_._2).getOrElse(CantonTimestamp.Epoch), ) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/p2p/P2PNetworkInModule.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/p2p/P2PNetworkInModule.scala index 3c2a8f532a..95f2b3b573 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/p2p/P2PNetworkInModule.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/p2p/P2PNetworkInModule.scala @@ -102,8 +102,8 @@ class P2PNetworkInModule[E <: Env[E]]( metrics.p2p.receive.labels.source.values.Availability(from) case Message.ConsensusMessage(consensusMessage) => IssConsensusModule - .parseNetworkMessage(consensusMessage) - .map(msg => Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(from, msg)) + .parseNetworkMessage(consensusMessage, actualSender = from) + .map(msg => Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(msg)) .fold( errorMessage => logger.warn( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/Module.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/Module.scala index 8664be8ae0..b2dc476bd2 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/Module.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/Module.scala @@ -525,7 +525,7 @@ object Module { p2pNetworkInModuleRef: ModuleRef[P2PMessageT], p2pNetworkOutAdminModuleRef: ModuleRef[P2PNetworkOut.Admin], consensusAdminModuleRef: ModuleRef[Consensus.Admin], - outputModuleRef: ModuleRef[Output.SequencerSnapshotMessage], + outputModuleRef: ModuleRef[Output.Message[E]], pruningModuleRef: ModuleRef[Pruning.Message], p2pNetworkManager: P2PNetworkManagerT, ) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/bfttime/CanonicalCommitSet.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/bfttime/CanonicalCommitSet.scala index 0fd7c5c63e..5474233c9c 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/bfttime/CanonicalCommitSet.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/bfttime/CanonicalCommitSet.scala @@ -42,7 +42,9 @@ object CanonicalCommitSet { def fromProto(canonicalCommitSet: v30.CanonicalCommitSet): ParsingResult[CanonicalCommitSet] = canonicalCommitSet.canonicalCommits .traverse( - SignedMessage.fromProto(v30.ConsensusMessage)(Commit.fromProtoConsensusMessage) + SignedMessage.fromProto(v30.ConsensusMessage)( + Commit.fromProtoConsensusMessage(actualSender = None, _) + ) ) .map(x => CanonicalCommitSet(SortedSet.from(x))) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/ordering/ConsensusCertificate.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/ordering/ConsensusCertificate.scala index 8d9d2cb59b..3491478155 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/ordering/ConsensusCertificate.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/data/ordering/ConsensusCertificate.scala @@ -7,6 +7,7 @@ import cats.syntax.traverse.* import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.BftOrderingIdentifiers.BftNodeId import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.SignedMessage import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.data.ordering.iss.BlockMetadata import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.framework.modules.ConsensusSegment.ConsensusMessage.{ @@ -48,31 +49,37 @@ final case class CommitCertificate( object ConsensusCertificate { def fromProto( - consensusCertificate: ProtoConsensusCertificate + consensusCertificate: ProtoConsensusCertificate, + actualSender: Option[BftNodeId], ): ParsingResult[ConsensusCertificate] = consensusCertificate.certificate match { case ProtoConsensusCertificate.Certificate.Empty => Left(ProtoDeserializationError.OtherError("consensus certificate is empty")) case ProtoConsensusCertificate.Certificate.PrepareCertificate(prepareCertificate) => - PrepareCertificate.fromProto(prepareCertificate) + PrepareCertificate.fromProto(prepareCertificate, actualSender) case ProtoConsensusCertificate.Certificate.CommitCertificate(commitCertificate) => - CommitCertificate.fromProto(commitCertificate) + CommitCertificate.fromProto(commitCertificate, actualSender) } } object PrepareCertificate { def fromProto( - prepareCertificate: v30.PrepareCertificate + prepareCertificate: v30.PrepareCertificate, + actualSender: Option[BftNodeId], ): ParsingResult[PrepareCertificate] = for { prePrepare <- ProtoConverter .parseRequired( - SignedMessage.fromProto(v30.ConsensusMessage)(PrePrepare.fromProtoConsensusMessage), + SignedMessage.fromProto(v30.ConsensusMessage)( + PrePrepare.fromProtoConsensusMessage(actualSender, _) + ), "prePrepare", prepareCertificate.prePrepare, ) prepares <- prepareCertificate.prepares.traverse( - SignedMessage.fromProto(v30.ConsensusMessage)(Prepare.fromProtoConsensusMessage) + SignedMessage.fromProto(v30.ConsensusMessage)( + Prepare.fromProtoConsensusMessage(actualSender, _) + ) ) } yield PrepareCertificate(prePrepare, prepares) } @@ -82,17 +89,22 @@ object CommitCertificate { Ordering.by(commit => (commit.from, commit.localTimestamp)) def fromProto( - commitCertificate: v30.CommitCertificate + commitCertificate: v30.CommitCertificate, + actualSender: Option[BftNodeId], ): ParsingResult[CommitCertificate] = for { prePrepare <- ProtoConverter .parseRequired( - SignedMessage.fromProto(v30.ConsensusMessage)(PrePrepare.fromProtoConsensusMessage), + SignedMessage.fromProto(v30.ConsensusMessage)( + PrePrepare.fromProtoConsensusMessage(actualSender, _) + ), "prePrepare", commitCertificate.prePrepare, ) commits <- commitCertificate.commits.traverse( - SignedMessage.fromProto(v30.ConsensusMessage)(Commit.fromProtoConsensusMessage) + SignedMessage.fromProto(v30.ConsensusMessage)( + Commit.fromProtoConsensusMessage(actualSender, _) + ) ) } yield CommitCertificate(prePrepare, commits) } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/Consensus.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/Consensus.scala index 7977b55633..6c5eafff4f 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/Consensus.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/Consensus.scala @@ -61,12 +61,9 @@ object Consensus { sealed trait ConsensusMessage extends ProtocolMessage object ConsensusMessage { final case class PbftUnverifiedNetworkMessage( - // The actual sender will differ from the underlying message's original sender in cases a node - // retransmits another node's old messages. - actualSender: BftNodeId, underlyingNetworkMessage: SignedMessage[ ConsensusSegment.ConsensusMessage.PbftNetworkMessage - ], + ] ) extends ConsensusMessage final case class PbftVerifiedNetworkMessage( @@ -152,7 +149,7 @@ object Consensus { ): ParsingResult[RetransmissionResponse] = for { commitCertificates <- protoRetransmissionResponse.commitCertificates.traverse( - CommitCertificate.fromProto + CommitCertificate.fromProto(_, actualSender = Some(from)) ) } yield RetransmissionResponse(from, commitCertificates) } @@ -297,7 +294,9 @@ object Consensus { originalByteString: ByteString ): ParsingResult[BlockTransferResponse] = for { - commitCert <- protoResponse.commitCertificate.map(CommitCertificate.fromProto).sequence + commitCert <- protoResponse.commitCertificate + .map(CommitCertificate.fromProto(_, actualSender = Some(from))) + .sequence rpv <- protocolVersionRepresentativeFor(SupportedVersions.ProtoData) } yield BlockTransferResponse(commitCert, from)(rpv, Some(originalByteString)) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/ConsensusSegment.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/ConsensusSegment.scala index aaead1779d..283d89ff29 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/ConsensusSegment.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/framework/modules/ConsensusSegment.scala @@ -43,7 +43,7 @@ import com.digitalasset.canton.version.{ ProtocolVersion, RepresentativeProtocolVersion, VersionedProtoCodec, - VersioningCompanionMemoization, + VersioningCompanionContextMemoization, } import com.google.protobuf.ByteString @@ -138,6 +138,14 @@ object ConsensusSegment { def blockMetadata: BlockMetadata def viewNumber: ViewNumber def toProto: v30.ConsensusMessage + + /** The actual sender differs from the underlying message's original sender in cases a node + * retransmitted another node's old messages. It is supposed to be used for properly + * "charging" sender nodes in bounded queues and rate limiting. Can be empty when a message + * is created locally or loaded from a store, as in these cases, it's not directly received + * from another node. See usages for more context. + */ + def actualSender: Option[BftNodeId] } /** A Signed Pbft consensus message that is an actual event that the consensus module will act @@ -184,6 +192,7 @@ object ConsensusSegment { block: OrderingBlock, canonicalCommitSet: CanonicalCommitSet, from: BftNodeId, + actualSender: Option[BftNodeId], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[PrePrepare.type], override val deserializedFrom: Option[ByteString], @@ -227,7 +236,7 @@ object ConsensusSegment { super[HasProtocolVersionedWrapper].toByteString } - object PrePrepare extends VersioningCompanionMemoization[PrePrepare] { + object PrePrepare extends VersioningCompanionContextMemoization[PrePrepare, Option[BftNodeId]] { override def name: String = "PrePrepare" def create( @@ -236,8 +245,9 @@ object ConsensusSegment { block: OrderingBlock, canonicalCommitSet: CanonicalCommitSet, from: BftNodeId, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): PrePrepare = - PrePrepare(blockMetadata, viewNumber, block, canonicalCommitSet, from)( + PrePrepare(blockMetadata, viewNumber, block, canonicalCommitSet, from, actualSender)( protocolVersionRepresentativeFor( synchronizerProtocolVersion ): RepresentativeProtocolVersion[ @@ -247,7 +257,8 @@ object ConsensusSegment { ) def fromProtoConsensusMessage( - value: v30.ConsensusMessage + actualSender: Option[BftNodeId], + value: v30.ConsensusMessage, )(originalByteString: OriginalByteString): ParsingResult[PrePrepare] = for { header <- headerFromProto(value) @@ -259,6 +270,7 @@ object ConsensusSegment { header.viewNumber, protoPrePrepare, header.from, + actualSender, )(originalByteString) } yield prePrepare @@ -267,6 +279,7 @@ object ConsensusSegment { viewNumber: ViewNumber, prePrepare: v30.PrePrepare, from: BftNodeId, + actualSender: Option[BftNodeId], )( originalByteString: OriginalByteString ): ParsingResult[PrePrepare] = @@ -286,6 +299,7 @@ object ConsensusSegment { OrderingBlock(proofs), canonicalCommitSet, from, + actualSender, )(rpv, Some(originalByteString)) override def versioningTable: VersioningTable = VersioningTable( @@ -302,6 +316,7 @@ object ConsensusSegment { viewNumber: ViewNumber, hash: Hash, from: BftNodeId, + actualSender: Option[BftNodeId], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[Prepare.type], override val deserializedFrom: Option[ByteString], @@ -325,7 +340,7 @@ object ConsensusSegment { super[HasProtocolVersionedWrapper].toByteString } - object Prepare extends VersioningCompanionMemoization[Prepare] { + object Prepare extends VersioningCompanionContextMemoization[Prepare, Option[BftNodeId]] { override def name: String = "Prepare" implicit val ordering: Ordering[Prepare] = Ordering.by(prepare => prepare.from) @@ -334,14 +349,16 @@ object ConsensusSegment { viewNumber: ViewNumber, hash: Hash, from: BftNodeId, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): Prepare = - Prepare(blockMetadata, viewNumber, hash, from)( + Prepare(blockMetadata, viewNumber, hash, from, actualSender)( protocolVersionRepresentativeFor(synchronizerProtocolVersion), None, ) def fromProtoConsensusMessage( - value: v30.ConsensusMessage + actualSender: Option[BftNodeId], + value: v30.ConsensusMessage, )(originalByteString: OriginalByteString): ParsingResult[Prepare] = for { header <- headerFromProto(value) @@ -353,6 +370,7 @@ object ConsensusSegment { header.viewNumber, protoPrepare, header.from, + actualSender, )(originalByteString) } yield prepare @@ -361,6 +379,7 @@ object ConsensusSegment { viewNumber: ViewNumber, prepare: v30.Prepare, from: BftNodeId, + actualSender: Option[BftNodeId], )( originalByteString: OriginalByteString ): ParsingResult[Prepare] = @@ -372,6 +391,7 @@ object ConsensusSegment { viewNumber, hash, from, + actualSender, )(rpv, Some(originalByteString)) override def versioningTable: VersioningTable = VersioningTable( @@ -389,6 +409,7 @@ object ConsensusSegment { hash: Hash, localTimestamp: CantonTimestamp, from: BftNodeId, + actualSender: Option[BftNodeId], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[Commit.type], override val deserializedFrom: Option[ByteString], @@ -410,7 +431,7 @@ object ConsensusSegment { super[HasProtocolVersionedWrapper].toByteString } - object Commit extends VersioningCompanionMemoization[Commit] { + object Commit extends VersioningCompanionContextMemoization[Commit, Option[BftNodeId]] { override def name: String = "Commit" implicit val ordering: Ordering[Commit] = Ordering.by(commit => (commit.from, commit.localTimestamp)) @@ -421,14 +442,16 @@ object ConsensusSegment { hash: Hash, localTimestamp: CantonTimestamp, from: BftNodeId, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): Commit = - Commit(blockMetadata, viewNumber, hash, localTimestamp, from)( + Commit(blockMetadata, viewNumber, hash, localTimestamp, from, actualSender)( protocolVersionRepresentativeFor(synchronizerProtocolVersion), None, ) def fromProtoConsensusMessage( - value: v30.ConsensusMessage + actualSender: Option[BftNodeId], + value: v30.ConsensusMessage, )(originalByteString: OriginalByteString): ParsingResult[Commit] = for { header <- headerFromProto(value) @@ -440,6 +463,7 @@ object ConsensusSegment { header.viewNumber, protoCommit, header.from, + actualSender, )(originalByteString) } yield commit @@ -448,6 +472,7 @@ object ConsensusSegment { viewNumber: ViewNumber, commit: v30.Commit, from: BftNodeId, + actualSender: Option[BftNodeId], )(originalByteString: OriginalByteString): ParsingResult[Commit] = for { hash <- Hash.fromProtoPrimitive(commit.blockHash) @@ -459,6 +484,7 @@ object ConsensusSegment { hash, timestamp, from, + actualSender, )(rpv, Some(originalByteString)) override def versioningTable: VersioningTable = VersioningTable( @@ -475,6 +501,7 @@ object ConsensusSegment { viewNumber: ViewNumber, consensusCerts: Seq[ConsensusCertificate], from: BftNodeId, + actualSender: Option[BftNodeId], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[ViewChange.type], override val deserializedFrom: Option[ByteString], @@ -505,21 +532,23 @@ object ConsensusSegment { super[HasProtocolVersionedWrapper].toByteString } - object ViewChange extends VersioningCompanionMemoization[ViewChange] { + object ViewChange extends VersioningCompanionContextMemoization[ViewChange, Option[BftNodeId]] { override def name: String = "ViewChange" def create( blockMetadata: BlockMetadata, viewNumber: ViewNumber, consensusCerts: Seq[ConsensusCertificate], from: BftNodeId, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): ViewChange = - ViewChange(blockMetadata, viewNumber, consensusCerts, from)( + ViewChange(blockMetadata, viewNumber, consensusCerts, from, actualSender)( protocolVersionRepresentativeFor(synchronizerProtocolVersion), None, ) def fromProtoConsensusMessage( - value: v30.ConsensusMessage + actualSender: Option[BftNodeId], + value: v30.ConsensusMessage, )(originalByteString: OriginalByteString): ParsingResult[ViewChange] = for { header <- headerFromProto(value) @@ -531,6 +560,7 @@ object ConsensusSegment { header.viewNumber, protoViewChange, header.from, + actualSender, )(originalByteString) } yield viewChange @@ -539,15 +569,19 @@ object ConsensusSegment { viewNumber: ViewNumber, viewChange: v30.ViewChange, from: BftNodeId, + actualSender: Option[BftNodeId], )(originalByteString: OriginalByteString): ParsingResult[ViewChange] = for { - certs <- viewChange.consensusCerts.traverse(ConsensusCertificate.fromProto) + certs <- viewChange.consensusCerts.traverse( + ConsensusCertificate.fromProto(_, actualSender) + ) rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) } yield ConsensusSegment.ConsensusMessage.ViewChange( blockMetadata, viewNumber, certs, from, + actualSender, )(rpv, Some(originalByteString)) override def versioningTable: VersioningTable = VersioningTable( @@ -565,6 +599,7 @@ object ConsensusSegment { viewChanges: Seq[SignedMessage[ViewChange]], prePrepares: Seq[SignedMessage[PrePrepare]], from: BftNodeId, + actualSender: Option[BftNodeId], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[NewView.type], override val deserializedFrom: Option[ByteString], @@ -600,7 +635,7 @@ object ConsensusSegment { } @SuppressWarnings(Array("org.wartremover.warts.IterableOps")) - object NewView extends VersioningCompanionMemoization[NewView] { + object NewView extends VersioningCompanionContextMemoization[NewView, Option[BftNodeId]] { override def name: String = "NewView" def create( blockMetadata: BlockMetadata, @@ -608,20 +643,24 @@ object ConsensusSegment { viewChanges: Seq[SignedMessage[ViewChange]], prePrepares: Seq[SignedMessage[PrePrepare]], from: BftNodeId, - )(implicit synchronizerProtocolVersion: ProtocolVersion): NewView = NewView( - blockMetadata, - viewNumber, - viewChanges, - prePrepares, - from, - )( - protocolVersionRepresentativeFor(synchronizerProtocolVersion), - None, - ) + actualSender: Option[BftNodeId] = None, + )(implicit synchronizerProtocolVersion: ProtocolVersion): NewView = + NewView( + blockMetadata, + viewNumber, + viewChanges, + prePrepares, + from, + actualSender, + )( + protocolVersionRepresentativeFor(synchronizerProtocolVersion), + None, + ) implicit val ordering: Ordering[ViewChange] = Ordering.by(viewChange => viewChange.from) def fromProtoConsensusMessage( - value: v30.ConsensusMessage + actualSender: Option[BftNodeId], + value: v30.ConsensusMessage, )(originalByteString: OriginalByteString): ParsingResult[NewView] = for { header <- headerFromProto(value) @@ -633,6 +672,7 @@ object ConsensusSegment { header.viewNumber, protoNewView, header.from, + actualSender, )(originalByteString) } yield newView @@ -641,13 +681,18 @@ object ConsensusSegment { viewNumber: ViewNumber, newView: v30.NewView, from: BftNodeId, + actualSender: Option[BftNodeId], )(originalByteString: OriginalByteString): ParsingResult[NewView] = for { viewChanges <- newView.viewChanges.traverse( - SignedMessage.fromProto(v30.ConsensusMessage)(ViewChange.fromProtoConsensusMessage) + SignedMessage.fromProto(v30.ConsensusMessage)( + ViewChange.fromProtoConsensusMessage(actualSender, _) + ) ) prePrepares <- newView.prePrepares.traverse( - SignedMessage.fromProto(v30.ConsensusMessage)(PrePrepare.fromProtoConsensusMessage) + SignedMessage.fromProto(v30.ConsensusMessage)( + PrePrepare.fromProtoConsensusMessage(actualSender, _) + ) ) rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) } yield ConsensusSegment.ConsensusMessage.NewView( @@ -656,6 +701,7 @@ object ConsensusSegment { viewChanges, prePrepares, from, + actualSender, )(rpv, Some(originalByteString)) override def versioningTable: VersioningTable = VersioningTable( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameterConfig.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameterConfig.scala index fbd5cc7d84..f14cdacbd9 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameterConfig.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameterConfig.scala @@ -3,10 +3,47 @@ package com.digitalasset.canton.synchronizer.sequencer.config +import cats.data.Chain import com.digitalasset.canton.config.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveDouble} +import com.digitalasset.canton.config.RequireTypes.{PositiveDouble, PositiveInt} import com.digitalasset.canton.config.manual.CantonConfigValidatorDerivation import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.synchronizer.block.AsyncWriterParameters +import io.scalaland.chimney.dsl.* + +/** Async block sequencer writer control parameters + * + * @param enabled + * if true (default) then the async writer is enabled + * @param trafficBatchSize + * the maximum number of traffic events to batch in a single write + * @param aggregationBatchSize + * the maximum number of inflight aggregations to batch in a single write + * @param blockInfoBatchSize + * the maximum number of block info updates to batch in a single write + */ +final case class AsyncWriterConfig( + enabled: Boolean = true, + trafficBatchSize: PositiveInt = PositiveInt.tryCreate(1000), + aggregationBatchSize: PositiveInt = PositiveInt.tryCreate(1000), + blockInfoBatchSize: PositiveInt = PositiveInt.tryCreate(1000), +) { + + def toParameters: AsyncWriterParameters = this.transformInto[AsyncWriterParameters] + +} + +object AsyncWriterConfig { + + implicit val asyncWriterConfigCantonConfigValidator: CantonConfigValidator[AsyncWriterConfig] = + new CantonConfigValidator[AsyncWriterConfig] { + override def validate( + edition: CantonEdition, + config: AsyncWriterConfig, + ): Chain[CantonConfigValidationError] = Chain.empty + } + +} /** Various parameters for non-standard sequencer settings * @@ -22,11 +59,8 @@ import com.digitalasset.canton.data.CantonTimestamp * @param sequencingTimeLowerBoundExclusive * if defined, the sequencer will only send events with to subscribers with sequencing time * strictly greater than sequencingTimeLowerBoundExclusive - * @param sequencerApiLimits - * map of service name to maximum number of parallel open streams - * @param warnOnUndefinedLimits - * if true, then this sequencer will emit a warning once if there is no limit configured for a - * particular stream + * @param asyncWriter + * controls the async writer */ final case class SequencerNodeParameterConfig( // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version @@ -40,13 +74,13 @@ final case class SequencerNodeParameterConfig( unsafeEnableOnlinePartyReplication: Boolean = false, sequencingTimeLowerBoundExclusive: Option[CantonTimestamp] = SequencerNodeParameterConfig.DefaultSequencingTimeLowerBoundExclusive, - sequencerApiLimits: Map[String, NonNegativeInt] = Map.empty, - warnOnUndefinedLimits: Boolean = true, + asyncWriter: AsyncWriterConfig = AsyncWriterConfig(), ) extends ProtocolConfig with LocalNodeParametersConfig with UniformCantonConfigValidation object SequencerNodeParameterConfig { + implicit val sequencerNodeParameterConfigCantonConfigValidator : CantonConfigValidator[SequencerNodeParameterConfig] = { import CantonConfigValidatorInstances.* diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameters.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameters.scala index 29818ff69c..225e6238a7 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameters.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/config/SequencerNodeParameters.scala @@ -3,13 +3,14 @@ package com.digitalasset.canton.synchronizer.sequencer.config -import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveDouble} +import com.digitalasset.canton.config.{ProcessingTimeout, StreamLimitConfig} import com.digitalasset.canton.environment.{ CantonNodeParameters, HasGeneralCantonNodeParameters, HasProtocolCantonNodeParameters, } +import com.digitalasset.canton.synchronizer.block.AsyncWriterParameters trait SequencerParameters { def maxConfirmationRequestsBurstFactor: PositiveDouble @@ -25,21 +26,23 @@ trait SequencerParameters { * parameters) * @param maxConfirmationRequestsBurstFactor * How many confirmation requests can be sent in a burst before the rate limit kicks in. + * @param asyncWriter + * Whether the sequencer writes are async or sync * @param unsafeEnableOnlinePartyReplication * Whether to enable online party replication sequencer channels. Unsafe as still under * development. - * @param sequencerApiLimits - * map of service name to maximum number of parallel open streams - * @param warnOnUndefinedLimits - * emit warning if a limit is not configured for a stream + * @param streamLimits + * optional stream limit configs */ final case class SequencerNodeParameters( general: CantonNodeParameters.General, protocol: CantonNodeParameters.Protocol, maxConfirmationRequestsBurstFactor: PositiveDouble, + asyncWriter: AsyncWriterParameters, unsafeEnableOnlinePartyReplication: Boolean = false, sequencerApiLimits: Map[String, NonNegativeInt] = Map.empty, warnOnUndefinedLimits: Boolean = true, + streamLimits: Option[StreamLimitConfig] = None, ) extends CantonNodeParameters with HasGeneralCantonNodeParameters with HasProtocolCantonNodeParameters diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala index 80c7a8fc70..b2df5dc5b6 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala @@ -52,6 +52,7 @@ import com.google.protobuf.ByteString import org.h2.api.ErrorCode as H2ErrorCode import org.postgresql.util.PSQLState import slick.jdbc.* +import slick.sql.SqlStreamingAction import java.sql.SQLException import java.util.UUID @@ -282,20 +283,6 @@ class DbSequencerStore( } } - private implicit val getPayloadOResult: GetResult[Option[BytesPayload]] = - GetResult - .createGetTuple2[Option[PayloadId], Option[ByteString]] - .andThen { - case (Some(id), Some(content)) => Some(BytesPayload(id, content)) - case (None, None) => None - case (Some(id), None) => - throw new DbDeserializationException(s"Event row has payload id set [$id] but no content") - case (None, Some(_)) => - throw new DbDeserializationException( - "Event row has no payload id but has payload content" - ) - } - private implicit val trafficReceiptOGetResult: GetResult[Option[TrafficReceipt]] = GetResult .createGetTuple3[Option[NonNegativeLong], Option[NonNegativeLong], Option[NonNegativeLong]] @@ -309,40 +296,6 @@ class DbSequencerStore( ) } - private implicit val getDeliverStoreEventRowResultWithPayload - : GetResult[Sequenced[BytesPayload]] = { - val timestampGetter = implicitly[GetResult[CantonTimestamp]] - val timestampOGetter = implicitly[GetResult[Option[CantonTimestamp]]] - val discriminatorGetter = implicitly[GetResult[EventTypeDiscriminator]] - val messageIdGetter = implicitly[GetResult[Option[MessageId]]] - val memberIdGetter = implicitly[GetResult[Option[SequencerMemberId]]] - val memberIdNesGetter = implicitly[GetResult[Option[NonEmpty[SortedSet[SequencerMemberId]]]]] - val payloadGetter = implicitly[GetResult[Option[BytesPayload]]] - val traceContextGetter = implicitly[GetResult[SerializableTraceContext]] - val errorOGetter = implicitly[GetResult[Option[ByteString]]] - val trafficReceipt = implicitly[GetResult[Option[TrafficReceipt]]] - - GetResult { r => - val row = DeliverStoreEventRow[BytesPayload]( - timestampGetter(r), - r.nextInt(), - discriminatorGetter(r), - messageIdGetter(r), - memberIdGetter(r), - memberIdNesGetter(r), - payloadGetter(r), - timestampOGetter(r), - traceContextGetter(r).unwrap, - errorOGetter(r), - trafficReceipt(r), - ) - - row.asStoreEvent.valueOr(err => - throw new DbDeserializationException(s"Failed to deserialize event row: $err") - ) - } - } - private implicit val getDeliverStoreEventRowResult: GetResult[Sequenced[PayloadId]] = { val timestampGetter = implicitly[GetResult[CantonTimestamp]] val timestampOGetter = implicitly[GetResult[Option[CantonTimestamp]]] @@ -397,6 +350,7 @@ class DbSequencerStore( payloadId => readPayloadsFromStore(Seq(payloadId)).map(_(payloadId)), allLoader = Some(implicit traceContext => payloadIds => readPayloadsFromStore(payloadIds.toSeq)), + metrics = Some(sequencerMetrics.payloadCache), )(logger, "payloadCache") protected override def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(implicit @@ -490,6 +444,9 @@ class DbSequencerStore( ) } + override def bufferPayload(payload: BytesPayload)(implicit tc: TraceContext): Unit = + payloadCache.put(payload.id, payload) + /** Save the provided payloads to the store. * * For DB implementations we suspect that this will be a hot spot for performance primarily due @@ -712,7 +669,9 @@ class DbSequencerStore( recipientRows = eventRows.forgetNE.flatMap { row => row.recipientsO.toList.flatMap { members => val isTopologyEvent = - members.contains(sequencerMemberId) && members.sizeIs > 1 + (members.contains(sequencerMemberId) && members.sizeIs > 1) || members.contains( + SequencerMemberId.Broadcast + ) members.map(m => (row.instanceIndex, m, row.timestamp, isTopologyEvent)) } } @@ -915,10 +874,8 @@ class DbSequencerStore( traceContext: TraceContext ): FutureUnlessShutdown[Map[PayloadId, Batch[ClosedEnvelope]]] = { - val preloadedPayloads = payloadIds.collect { - case payload: BytesPayload => - payload.id -> payload.decodeBatchAndTrim(protocolVersion, member) - case batch: FilteredBatch => batch.id -> Batch.trimForMember(batch.batch, member) + val preloadedPayloads = payloadIds.collect { case payload: BytesPayload => + payload.id -> payload.decodeBatchAndTrim(protocolVersion, member) }.toMap val idsToLoad = payloadIds.collect { case id: PayloadId => id } @@ -960,56 +917,161 @@ class DbSequencerStore( val fromTimestampInclusive = fromTimestampExclusiveO.map(_.immediateSuccessor).getOrElse(CantonTimestamp.MinValue) - def h2PostgresQueryEvents( - memberContainsBefore: String, - memberContainsAfter: String, - safeWatermark: CantonTimestamp, - ) = sql""" - select events.ts, events.node_index, events.event_type, events.message_id, events.sender, - events.recipients, events.payload_id, events.topology_timestamp, - events.trace_context, events.error, - events.consumed_cost, events.extra_traffic_consumed, events.base_traffic_remainder - from sequencer_events events - inner join sequencer_watermarks watermarks - on events.node_index = watermarks.node_index - where (events.recipients is null or (#$memberContainsBefore $memberId #$memberContainsAfter)) - and ( - -- inclusive timestamp bound that defaults to MinValue if unset - events.ts >= $fromTimestampInclusive - -- only consider events within the safe watermark - and events.ts <= $safeWatermark - -- if the sequencer that produced the event is offline, only consider up until its offline watermark - and (watermarks.sequencer_online = true or events.ts <= watermarks.watermark_ts) + def getResultFixedRecipients( + topologyClientMemberId: SequencerMemberId + ): GetResult[Sequenced[PayloadId]] = { + val timestampGetter = implicitly[GetResult[CantonTimestamp]] + val timestampOGetter = implicitly[GetResult[Option[CantonTimestamp]]] + val discriminatorGetter = implicitly[GetResult[EventTypeDiscriminator]] + val messageIdGetter = implicitly[GetResult[Option[MessageId]]] + val memberIdGetter = implicitly[GetResult[Option[SequencerMemberId]]] + val payloadIdGetter = implicitly[GetResult[Option[PayloadId]]] + val traceContextGetter = implicitly[GetResult[SerializableTraceContext]] + val errorOGetter = implicitly[GetResult[Option[ByteString]]] + val trafficReceipt = implicitly[GetResult[Option[TrafficReceipt]]] + + GetResult { r => + val row = DeliverStoreEventRow[PayloadId]( + timestampGetter(r), + r.nextInt(), + discriminatorGetter(r), + messageIdGetter(r), + memberIdGetter(r), + // instead of reading a large row, we construct a minimal recipients set here + if (r.nextBoolean()) { + Some(NonEmpty(SortedSet, memberId, topologyClientMemberId)) + } else { + Some(NonEmpty(SortedSet, memberId)) + }, + payloadIdGetter(r), + timestampOGetter(r), + traceContextGetter(r).unwrap, + errorOGetter(r), + trafficReceipt(r), + ) + + row.asStoreEvent + .fold( + msg => throw new DbDeserializationException(s"Failed to deserialize event row: $msg"), + identity, ) - order by events.ts asc - limit $limit""" + } + } - def queryEvents(safeWatermarkO: Option[CantonTimestamp]) = { - // If we don't have a safe watermark of all online sequencers (if all are offline) we'll fallback on allowing all - // and letting the offline condition in the query include the event if suitable + def queryEventsViaRecipientsTable( + safeWatermarkO: Option[CantonTimestamp], + topologyClientMemberId: SequencerMemberId, + ) = { val safeWatermark = safeWatermarkO.getOrElse(CantonTimestamp.MaxValue) - val query = profile match { - case _: Postgres => - h2PostgresQueryEvents("", " = any(events.recipients)", safeWatermark) + profile match { + case _: Postgres => + sql""" + with + watermarks as (select * from sequencer_watermarks) + select events.ts, events.node_index, events.event_type, events.message_id, events.sender, + case + when #$memberContainsBefore $topologyClientMemberId #$memberContainsAfter or #$memberContainsBefore ${SequencerMemberId.Broadcast} #$memberContainsAfter then true + else false + end as addressed_to_sequencer, + events.payload_id, events.topology_timestamp, + events.trace_context, events.error, + events.consumed_cost, events.extra_traffic_consumed, events.base_traffic_remainder + from + watermarks inner join lateral ( + -- Watermarks contain 1 record for block sequencer, up to hardcoded max of 32 records + -- for DB sequencer - both are fine as long as the query is better than the alternative + -- (scanning a wrong index or the table itself). + select * from sequencer_events + where ts in ( + (select ts + from sequencer_event_recipients recipients + where + recipients.node_index = watermarks.node_index + -- if the sequencer that produced the event is offline, only consider up until its offline watermark + and (watermarks.sequencer_online = true or recipients.ts <= watermarks.watermark_ts) + and (recipients.recipient_id = $memberId) + -- inclusive timestamp bound that defaults to MinValue if unset + and recipients.ts >= $fromTimestampInclusive + -- only consider events within the safe watermark + and recipients.ts <= $safeWatermark + order by recipients.ts asc + limit $limit) + union + (select ts + from sequencer_event_recipients recipients + where + recipients.node_index = watermarks.node_index + -- if the sequencer that produced the event is offline, only consider up until its offline watermark + and (watermarks.sequencer_online = true or recipients.ts <= watermarks.watermark_ts) + and (recipients.recipient_id = ${SequencerMemberId.Broadcast}) + -- inclusive timestamp bound that defaults to MinValue if unset + and recipients.ts >= $fromTimestampInclusive + -- only consider events within the safe watermark + and recipients.ts <= $safeWatermark + order by recipients.ts asc + limit $limit) + ) + ) events + on (true) + order by events.ts asc + -- NB: outer limit is crucial to ensure no event gaps between 2 sub-queries above + limit $limit""".as[Sequenced[PayloadId]]( + getResultFixedRecipients(topologyClientMemberId) + ) case _: H2 => - h2PostgresQueryEvents("array_contains(events.recipients, ", ")", safeWatermark) + // This is the previous version of the query as H2 doesn't support lateral joins + sql""" + select events.ts, events.node_index, events.event_type, events.message_id, events.sender, + case + when #$memberContainsBefore $topologyClientMemberId #$memberContainsAfter then true + else false + end, + events.payload_id, events.topology_timestamp, + events.trace_context, events.error, + events.consumed_cost, events.extra_traffic_consumed, events.base_traffic_remainder + from sequencer_event_recipients recipients + inner join sequencer_events events + on events.node_index = recipients.node_index and events.ts = recipients.ts + inner join sequencer_watermarks watermarks + on recipients.node_index = watermarks.node_index + where (recipients.recipient_id = $memberId or recipients.recipient_id = ${SequencerMemberId.Broadcast}) + and ( + -- inclusive timestamp bound that defaults to MinValue if unset + recipients.ts >= $fromTimestampInclusive + -- only consider events within the safe watermark + and recipients.ts <= $safeWatermark + -- if the sequencer that produced the event is offline, only consider up until its offline watermark + and (watermarks.sequencer_online = true or recipients.ts <= watermarks.watermark_ts) + ) + order by recipients.ts asc + limit $limit""".as[Sequenced[PayloadId]]( + getResultFixedRecipients(topologyClientMemberId) + ) } - - query.as[Sequenced[PayloadId]] } - val query = for { - safeWatermark <- safeWaterMarkDBIO - events <- queryEvents(safeWatermark) - } yield { - (events, safeWatermark) - } - - storage.query(query.transactionally, functionFullName).map { - case (events, _) if events.nonEmpty => ReadEventPayloads(events) - case (_, watermark) => SafeWatermark(watermark) - } + for { + topologyClientMemberId <- lookupMember(sequencerMember).map( + _.map(_.memberId).getOrElse( + ErrorUtil.invalidState( + s"Sequencer member $sequencerMember not found in sequencer members table" + ) + ) + ) + query = for { + safeWatermark <- safeWaterMarkDBIO + events <- queryEventsViaRecipientsTable(safeWatermark, topologyClientMemberId) + } yield { + (events, safeWatermark) + } + result <- { + storage.query(query.transactionally, functionFullName).map { + case (events, _) if events.nonEmpty => ReadEventPayloads(events) + case (_, watermark) => SafeWatermark(watermark) + }: FutureUnlessShutdown[ReadEvents] + } + } yield result } private def readEventsLatest( @@ -1017,17 +1079,16 @@ class DbSequencerStore( upperBoundExclusive: CantonTimestamp, )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[Vector[Sequenced[BytesPayload]]] = { - def queryEvents(safeWatermark: CantonTimestamp) = { - val query = - sql""" + ): FutureUnlessShutdown[Vector[Sequenced[IdOrPayload]]] = { + def queryEvents( + safeWatermark: CantonTimestamp + ): SqlStreamingAction[Vector[Sequenced[IdOrPayload]], Sequenced[IdOrPayload], Effect.Read] = + sql""" select events.ts, events.node_index, events.event_type, events.message_id, events.sender, - events.recipients, payloads.id, payloads.content, events.topology_timestamp, + events.recipients, events.payload_id, events.topology_timestamp, events.trace_context, events.error, events.consumed_cost, events.extra_traffic_consumed, events.base_traffic_remainder from sequencer_events events - left join sequencer_payloads payloads - on events.payload_id = payloads.id inner join sequencer_watermarks watermarks on events.node_index = watermarks.node_index where @@ -1039,10 +1100,7 @@ class DbSequencerStore( and (watermarks.sequencer_online = true or events.ts <= watermarks.watermark_ts) ) order by events.ts desc - limit $limit""" - - query.as[Sequenced[BytesPayload]] - } + limit $limit""".as[Sequenced[PayloadId]] val query = for { safeWatermarkO <- safeWaterMarkDBIO @@ -1197,7 +1255,7 @@ class DbSequencerStore( ) select m.member, - coalesce( + coalesce(greatest( ( select ( @@ -1205,12 +1263,13 @@ class DbSequencerStore( from sequencer_event_recipients member_recipient where member_recipient.node_index = watermarks.node_index - and m.id = member_recipient.recipient_id - """ ++ topologyClientMemberFilter ++ sql""" + and (${SequencerMemberId.Broadcast} = member_recipient.recipient_id) + """ ++ topologyClientMemberFilter // keeping this filter for consistency with the other subquery + ++ sql""" and member_recipient.ts <= watermarks.watermark_ts and member_recipient.ts <= $beforeInclusive and member_recipient.ts <= $safeWatermark - and member_recipient.ts >= m.registered_ts + and member_recipient.ts > m.registered_ts order by member_recipient.node_index, member_recipient.recipient_id, member_recipient.ts desc limit 1 ) as ts @@ -1218,6 +1277,26 @@ class DbSequencerStore( order by ts desc limit 1 ), + ( + select + ( + select member_recipient.ts + from sequencer_event_recipients member_recipient + where + member_recipient.node_index = watermarks.node_index + and (m.id = member_recipient.recipient_id) + """ ++ topologyClientMemberFilter ++ sql""" + and member_recipient.ts <= watermarks.watermark_ts + and member_recipient.ts <= $beforeInclusive + and member_recipient.ts <= $safeWatermark + and member_recipient.ts > m.registered_ts + order by member_recipient.node_index, member_recipient.recipient_id, member_recipient.ts desc + limit 1 + ) as ts + from watermarks + order by ts desc + limit 1 + )), -- end of greatest m.pruned_previous_event_timestamp ) previous_ts from enabled_members m""").as[(Member, Option[CantonTimestamp])].map(_.toMap) @@ -1274,7 +1353,7 @@ class DbSequencerStore( watermarks.watermark_ts is not null and (watermarks.sequencer_online = true or events.ts <= watermarks.watermark_ts) ) and ts <= $timestampInclusive - and (#$memberContainsBefore $memberId #$memberContainsAfter) + and ((#$memberContainsBefore $memberId #$memberContainsAfter) or (#$memberContainsBefore ${SequencerMemberId.Broadcast} #$memberContainsAfter)) and ts <= $safeWatermark order by ts desc limit 1 diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/EventsBuffer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/EventsBuffer.scala index f5179396ad..a46680d7ad 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/EventsBuffer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/EventsBuffer.scala @@ -53,7 +53,7 @@ class EventsBuffer( // testing just the overhead). // It becomes even more significant when increasing the order of magnitude by number of elements @volatile - private var eventsBuffer: Vector[Sequenced[BytesPayload]] = Vector.empty + private var eventsBuffer: Vector[Sequenced[IdOrPayload]] = Vector.empty @volatile private var memoryUsed = BytesUnit(0) @@ -66,7 +66,7 @@ class EventsBuffer( * not buffer all provided events to stay within the memory limit. */ final def bufferEvents( - events: NonEmpty[Seq[Sequenced[BytesPayload]]] + events: NonEmpty[Seq[Sequenced[IdOrPayload]]] ): Unit = addElementsInternal(events, append = true).discard /** Prepends events to the buffer up to the memory limit. May not buffer all provided events to @@ -75,10 +75,10 @@ class EventsBuffer( * true if the buffer is at the memory limit or some events had to be dropped again to stay * within the memory limit. */ - final def prependEventsForPreloading(events: NonEmpty[Seq[Sequenced[BytesPayload]]]): Boolean = + final def prependEventsForPreloading(events: NonEmpty[Seq[Sequenced[IdOrPayload]]]): Boolean = addElementsInternal(events, append = false) - private def addElementsInternal(events: NonEmpty[Seq[Sequenced[BytesPayload]]], append: Boolean) = + private def addElementsInternal(events: NonEmpty[Seq[Sequenced[IdOrPayload]]], append: Boolean) = blocking(synchronized { // synchronized to enforce that there is only 1 writer // prepare the buffer so that the backing array is prepared for the right size @@ -135,7 +135,7 @@ class EventsBuffer( memoryUsed = BytesUnit.zero }) - final def snapshot(): Vector[Sequenced[BytesPayload]] = eventsBuffer + final def snapshot(): Vector[Sequenced[IdOrPayload]] = eventsBuffer } object EventsBuffer { @@ -146,13 +146,18 @@ object EventsBuffer { private val otherFieldsOverheadEstimate = 600L @VisibleForTesting - def approximateEventSize(event: Sequenced[BytesPayload]): BytesUnit = { - val payloadSize = event.event.payloadO.map(_.content.size.toLong).getOrElse(0L) + def approximateEventSize(event: Sequenced[IdOrPayload]): BytesUnit = { + val payloadSize = event.event.payloadO + .map { + case BytesPayload(_, bytes) => bytes.size.toLong + case PayloadId(_) => 8L // 64-bit Long / CantonTimestamp + } + .getOrElse(0L) val membersSizeEstimate = event.event.members.size * perMemberOverhead BytesUnit(payloadSize + membersSizeEstimate + otherFieldsOverheadEstimate) } @VisibleForTesting - def approximateSize(events: Seq[Sequenced[BytesPayload]]): BytesUnit = + def approximateSize(events: Seq[Sequenced[IdOrPayload]]): BytesUnit = events.map(approximateEventSize).sum } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala index 4c3d41d46d..5acdf49d24 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala @@ -214,7 +214,26 @@ class InMemorySequencerStore( .takeWhile(e => e.getKey <= watermark) .filter(e => isMemberRecipient(memberId)(e.getValue)) .take(limit) - .map(entry => Sequenced(entry.getKey, entry.getValue)) + .map { entry => + val event = entry.getValue match { + case deliver: DeliverStoreEvent[PayloadId] => + val sequencerMemberId = memberMap + .getOrElse( + sequencerMember, + ErrorUtil.invalidState( + s"Sequencer member $sequencerMember is not registered in the sequencer store" + ), + ) + .memberId + if (deliver.members.contains(sequencerMemberId)) { + deliver.copy(members = NonEmpty(SortedSet, memberId, sequencerMemberId)) + } else { + deliver.copy(members = NonEmpty(SortedSet, memberId)) + } + case other => other + } + Sequenced(entry.getKey, event) + } .toList if (payloads.nonEmpty) @@ -238,13 +257,13 @@ class InMemorySequencerStore( .toList case payload: BytesPayload => List(payload.id -> payload.decodeBatchAndTrim(protocolVersion, member)) - case batch: FilteredBatch => List(batch.id -> Batch.trimForMember(batch.batch, member)) }.toMap ) private def isMemberRecipient(member: SequencerMemberId)(event: StoreEvent[_]): Boolean = event match { case deliver: DeliverStoreEvent[_] => + deliver.members.contains(SequencerMemberId.Broadcast) || deliver.members.contains( member ) // only if they're a recipient (sender should already be a recipient) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala index 5af75b3490..6b55dffc29 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala @@ -58,6 +58,8 @@ final case class SequencerMemberId(private val id: Int) extends PrettyPrinting { } object SequencerMemberId { + val Broadcast: SequencerMemberId = SequencerMemberId(-1) + implicit val sequencerMemberIdOrdering: Ordering[SequencerMemberId] = Ordering.by[SequencerMemberId, Int](_.id) implicit val sequencerMemberIdOrder: Order[SequencerMemberId] = fromOrdering( @@ -126,8 +128,6 @@ final case class BytesPayload(id: PayloadId, content: ByteString) extends Payloa } } -final case class FilteredBatch(id: PayloadId, batch: Batch[ClosedEnvelope]) extends Payload - /** Sequencer events in a structure suitable for persisting in our events store. The payload type is * parameterized to allow specifying either a full payload or just a id referencing a payload. */ @@ -565,7 +565,7 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut /** In case of single instance sequencer we can use in-memory fanout buffer for events */ final def bufferEvents( - events: NonEmpty[Seq[Sequenced[BytesPayload]]] + events: NonEmpty[Seq[Sequenced[IdOrPayload]]] ): Unit = if (eventsBufferEnabled) eventsBuffer.bufferEvents(events) @@ -662,6 +662,12 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut traceContext: TraceContext ): FutureUnlessShutdown[Map[PayloadId, Batch[ClosedEnvelope]]] + def bufferPayload( + payload: BytesPayload + )(implicit + traceContext: TraceContext + ): Unit = () + /** For a given member and timestamp, return the latest timestamp of a potential topology change, * that reached both the sequencer and the member. To be used by the topology snapshot awaiting, * should there be a topology change expected to need to be taken into account for @@ -705,9 +711,10 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut fromExclusiveO match { case Some(fromExclusive) => cache.headOption match { + // If the buffer starts before or at the `fromExclusive` timestamp (last event that a reader already consumed) + // we can serve the request from the buffer without missing any events case Some(earliestEvent) if earliestEvent.timestamp <= fromExclusive => - // If the buffer has events that are newer than the requested timestamp, we can use the buffer - val start = SequencerStore.binarySearch[Sequenced[BytesPayload], CantonTimestamp]( + val start = SequencerStore.binarySearch[Sequenced[IdOrPayload], CantonTimestamp]( cache, _.timestamp, fromExclusive, @@ -715,7 +722,10 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut val events = cache .slice(start, cache.size) .view - .filter(_.event.members.contains(memberId)) + .filter(event => + event.event.members.contains(memberId) || event.event.members + .contains(SequencerMemberId.Broadcast) + ) .take(limit) .toSeq diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerWriterStore.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerWriterStore.scala index 167459abe1..727a2ae450 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerWriterStore.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerWriterStore.scala @@ -54,9 +54,12 @@ trait SequencerWriterStore extends AutoCloseable { ): FutureUnlessShutdown[Unit] = store.saveEvents(instanceIndex, events) - def bufferEvents(events: NonEmpty[Seq[Sequenced[BytesPayload]]]): Unit = + def bufferEvents(events: NonEmpty[Seq[Sequenced[IdOrPayload]]]): Unit = store.bufferEvents(events) + def bufferPayload(payload: BytesPayload)(implicit tc: TraceContext): Unit = + store.bufferPayload(payload) + def resetWatermark(ts: CantonTimestamp)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SaveWatermarkError, Unit] = diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriber.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriber.scala new file mode 100644 index 0000000000..774aaeca2d --- /dev/null +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriber.scala @@ -0,0 +1,170 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.time + +import cats.data.EitherT +import com.daml.metrics.api.MetricsContext +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.CantonRequireTypes.String73 +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.messages.TopologyTransactionsBroadcast +import com.digitalasset.canton.sequencing.client.{ + SendAsyncClientError, + SendCallback, + SequencerClientSend, +} +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.synchronizer.sequencer.time.TimeAdvancingTopologySubscriber.{ + TimeAdvanceBroadcastMaxSequencingTimeWindow, + mkTimeAdvanceBroadcastMessageId, +} +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration} +import com.digitalasset.canton.topology.client.SynchronizerTopologyClientWithInit +import com.digitalasset.canton.topology.processing.{ + EffectiveTime, + SequencedTime, + TopologyTransactionProcessingSubscriber, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SequencerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureUnlessShutdownUtil +import com.google.common.annotations.VisibleForTesting + +import java.util.UUID +import scala.concurrent.ExecutionContext + +/** To avoid flooding with time proofs from clients, we broadcast small messages from sequencers, + * hoping that it will timely advance the sequencing time for members that are awaiting to observe + * events. + */ +final class TimeAdvancingTopologySubscriber( + clock: Clock, + sequencerClient: SequencerClientSend, + topologyClient: SynchronizerTopologyClientWithInit, + psid: PhysicalSynchronizerId, + thisSequencerId: SequencerId, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends TopologyTransactionProcessingSubscriber + with NamedLogging { + + private val protocolVersion = psid.protocolVersion + + override def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[GenericSignedTopologyTransaction], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + if (effectiveTimestamp.value > sequencedTimestamp.value) { + // Conservatively, use a snapshot with topology changes that are active "now". + val snapshot = topologyClient.currentSnapshotApproximation + + for { + maybeSequencerGroup <- snapshot.sequencerGroup() + } yield { + val topologyChangeDelay = topologyClient.staticSynchronizerParameters.topologyChangeDelay + maybeSequencerGroup.foreach { sequencerGroup => + if (sequencerGroup.active.contains(thisSequencerId)) { + FutureUnlessShutdownUtil + .doNotAwaitUnlessShutdown( + clock + .scheduleAfter( + _ => broadcastToAdvanceTime(effectiveTimestamp), + // To become less prone to clock skew-related problems, wait for the topology change delay instead of + // the effective time to elapse. This provides a better chance of sequencing and observing a broadcast + // message before time proofs are triggered by sequencer clients. + // However, sequencer client-triggered time proofs still remain as a fallback. + delta = topologyChangeDelay.duration, + ), + failureMessage = "could not schedule a time-advancing message", + ) + } + } + } + } else { + FutureUnlessShutdown.unit + } + + @VisibleForTesting + private[time] def broadcastToAdvanceTime( + desiredTimestamp: EffectiveTime + )(implicit traceContext: TraceContext): Unit = { + implicit val metricsContext: MetricsContext = MetricsContext("type" -> "time-adv-broadcast") + val batch = + Batch.of( + protocolVersion, + Seq( + TopologyTransactionsBroadcast(psid, Seq.empty) -> + Recipients.cc(AllMembersOfSynchronizer) + )* + ) + + val sendETUS = + for { + // Ask for a topology snapshot again to avoid races on topology changes after scheduling. + maybeSequencerGroup <- + EitherT.liftF( + topologyClient.currentSnapshotApproximation.sequencerGroup() + ) + maybeAggregationRule = + maybeSequencerGroup.flatMap { sequencerGroup => + NonEmpty + .from(sequencerGroup.active) + .map { sequencerGroup => + AggregationRule( + sequencerGroup, + // We merely deduplicate here, so members eventually receive only one event; this means + // that the mechanism is not BFT, and we still rely on sequencer client-triggered time proofs + // for resilience against non-compliant sequencers. + threshold = PositiveInt.one, + protocolVersion, + ) + } + } + _ <- + if (maybeSequencerGroup.exists(_.active.contains(thisSequencerId))) { + logger.debug( + s"Sending a time-advancing message to hopefully reach $desiredTimestamp" + ) + sequencerClient + .send( + batch, + topologyTimestamp = None, + maxSequencingTime = + desiredTimestamp.value.plus(TimeAdvanceBroadcastMaxSequencingTimeWindow.duration), + aggregationRule = maybeAggregationRule, + messageId = mkTimeAdvanceBroadcastMessageId(), + callback = SendCallback.empty, + ) + } else EitherT.right[SendAsyncClientError](FutureUnlessShutdown.unit) + } yield () + + sendETUS + .tapLeft(err => logger.warn(s"Could not send a time-advancing message: $err")) + .onShutdown(Right(logger.debug("Time-advancing broadcast aborted on shutdown"))) + .discard + } +} + +object TimeAdvancingTopologySubscriber { + + val TimeAdvanceBroadcastMaxSequencingTimeWindow: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(30) + + val TimeAdvanceBroadcastMessageIdPrefix: String = "time-adv-" + + private def mkTimeAdvanceBroadcastMessageId(): MessageId = + MessageId( + String73.tryCreate( + s"$TimeAdvanceBroadcastMessageIdPrefix${UUID.randomUUID()}", + Some(TimeAdvanceBroadcastMessageIdPrefix + "message-id"), + ) + ) +} diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerAdministrationService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerAdministrationService.scala index c76d967606..6bc046b92e 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerAdministrationService.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerAdministrationService.scala @@ -18,21 +18,28 @@ import com.digitalasset.canton.networking.grpc.CantonGrpcUtil import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.sequencer.admin.v30 -import com.digitalasset.canton.sequencer.admin.v30.OnboardingStateRequest.Request import com.digitalasset.canton.sequencer.admin.v30.{ OnboardingStateResponse, + OnboardingStateV2Request, + OnboardingStateV2Response, SetTrafficPurchasedRequest, SetTrafficPurchasedResponse, } import com.digitalasset.canton.sequencing.client.SequencerClientSend import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.synchronizer.sequencer.traffic.TimestampSelector -import com.digitalasset.canton.synchronizer.sequencer.{OnboardingStateForSequencer, Sequencer} +import com.digitalasset.canton.synchronizer.sequencer.{ + OnboardingStateForSequencer, + OnboardingStateForSequencerV2, + Sequencer, + SequencerSnapshot, +} import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.client.SynchronizerTopologyClient import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.TopologyStore +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.TopologyStoreId.SynchronizerStore +import com.digitalasset.canton.topology.store.{StoredTopologyTransactions, TopologyStore} import com.digitalasset.canton.topology.transaction.SequencerSynchronizerState import com.digitalasset.canton.topology.{ Member, @@ -42,8 +49,12 @@ import com.digitalasset.canton.topology.{ } import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.{EitherTUtil, GrpcStreamingUtils} +import com.google.protobuf.timestamp.Timestamp import io.grpc.stub.StreamObserver import io.grpc.{Status, StatusRuntimeException} +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Sink, Source} import java.io.OutputStream import scala.concurrent.{ExecutionContext, Future} @@ -57,7 +68,8 @@ class GrpcSequencerAdministrationService( staticSynchronizerParameters: StaticSynchronizerParameters, override val loggerFactory: NamedLoggerFactory, )(implicit - executionContext: ExecutionContext + executionContext: ExecutionContext, + materializer: Materializer, ) extends v30.SequencerAdministrationServiceGrpc.SequencerAdministrationService with NamedLogging { @@ -152,29 +164,104 @@ class GrpcSequencerAdministrationService( responseObserver: StreamObserver[OnboardingStateResponse], ): Unit = GrpcStreamingUtils.streamToClient( - (out: OutputStream) => onboardingState(request, out), + (out: OutputStream) => { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val res = + for { + memberOrTimestamp <- memberOrTimestamp( + request.request.sequencerUid, + request.request.timestamp, + ) + seqSnapshotAndSource <- onboardingStateSource(memberOrTimestamp) + (sequencerSnapshot, snapshotSource) = seqSnapshotAndSource + storedTransactions <- EitherT + .right[RpcError](snapshotSource.runWith(Sink.seq)) + .mapK(FutureUnlessShutdown.outcomeK) + } yield { + val onboardingState = OnboardingStateForSequencer( + StoredTopologyTransactions(storedTransactions), + staticSynchronizerParameters, + sequencerSnapshot, + ) + onboardingState.toByteString.writeTo(out) + } + mapErrNewEUS(res) + }, responseObserver, byteString => OnboardingStateResponse(byteString), ) - private def onboardingState( - request: v30.OnboardingStateRequest, - out: OutputStream, - ): Future[Unit] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - val parseMemberOrTimestamp = request.request match { - case Request.Empty => Left(FieldNotSet("sequencer_id"): ProtoDeserializationError) - case Request.SequencerUid(sequencerUid) => - UniqueIdentifier - .fromProtoPrimitive(sequencerUid, "sequencer_id") - .map(SequencerId(_)) - .map(Left(_)) + override def onboardingStateV2( + request: OnboardingStateV2Request, + responseObserver: StreamObserver[OnboardingStateV2Response], + ): Unit = + GrpcStreamingUtils.streamToClient( + (out: OutputStream) => { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val res = for { + memberOrTimestamp <- memberOrTimestamp( + request.request.sequencerUid, + request.request.timestamp, + ) + seqSnapshotAndSource <- onboardingStateSource(memberOrTimestamp) + (sequencerSnapshot, snapshotSource) = seqSnapshotAndSource - case Request.Timestamp(referenceEffectiveTime) => - CantonTimestamp.fromProtoTimestamp(referenceEffectiveTime).map(Right(_)) - } - val res = for { - memberOrTimestamp <- wrapErrUS(parseMemberOrTimestamp) + nonTopologyOnboardingState = OnboardingStateForSequencerV2( + None, + Some(staticSynchronizerParameters), + Some(sequencerSnapshot), + staticSynchronizerParameters.protocolVersion, + ) + _ = nonTopologyOnboardingState.writeDelimitedTo(out) + _ <- EitherT + .right[RpcError]( + snapshotSource.runWith( + Sink.foreachAsync(1)(stored => + mapErrNewEUS( + wrapErrUS( + OnboardingStateForSequencerV2( + Some(stored), + None, + None, + staticSynchronizerParameters.protocolVersion, + ) + .writeDelimitedTo(out) + .leftMap( + ProtoDeserializationError.ValueConversionError("onboarding_state", _) + ) + ) + ) + ) + ) + ) + .mapK(FutureUnlessShutdown.outcomeK) + } yield () + mapErrNewEUS(res) + }, + responseObserver, + byteString => OnboardingStateV2Response(byteString), + ) + + private def memberOrTimestamp(memberP: Option[String], timestampP: Option[Timestamp])(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, RpcError, Either[SequencerId, CantonTimestamp]] = { + val sequencerId = memberP.map( + UniqueIdentifier.fromProtoPrimitive(_, "sequencer_uid").map(uid => Left(SequencerId(uid))) + ) + val timestamp = timestampP.map(CantonTimestamp.fromProtoTimestamp(_).map(Right(_))) + + val resultE = sequencerId.orElse(timestamp).getOrElse(Left(FieldNotSet("sequencer_uid"))) + wrapErrUS(resultE) + } + + private def onboardingStateSource( + memberOrTimestamp: Either[SequencerId, CantonTimestamp] + )(implicit traceContext: TraceContext): EitherT[ + FutureUnlessShutdown, + RpcError, + (SequencerSnapshot, Source[GenericStoredTopologyTransaction, NotUsed]), + ] = + for { referenceEffective <- memberOrTimestamp match { case Left(sequencerId) => EitherT( @@ -237,23 +324,14 @@ class GrpcSequencerAdministrationService( .awaitSnapshot(sequencerSnapshotTimestamp) .leftMap(_.toCantonRpcError) - topologySnapshot <- EitherT - .right[RpcError]( - topologyStore.findEssentialStateAtSequencedTime( - asOfInclusive = SequencedTime(sequencerSnapshot.lastTs), - // we need to include the rejected transactions as well, because they might have an impact on the TopologyTimestampPlusEpsilonTracker - includeRejected = true, - ) + } yield { + sequencerSnapshot -> topologyStore + .findEssentialStateAtSequencedTime( + asOfInclusive = SequencedTime(sequencerSnapshot.lastTs), + // we need to include the rejected transactions as well, because they might have an impact on the TopologyTimestampPlusEpsilonTracker + includeRejected = true, ) - } yield OnboardingStateForSequencer( - topologySnapshot, - staticSynchronizerParameters, - sequencerSnapshot, - staticSynchronizerParameters.protocolVersion, - ).toByteString.writeTo(out) - - mapErrNewEUS(res) - } + } override def disableMember( requestP: v30.DisableMemberRequest diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerInitializationService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerInitializationService.scala index 47dc400383..e14a37f6c3 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerInitializationService.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerInitializationService.scala @@ -5,8 +5,15 @@ package com.digitalasset.canton.synchronizer.sequencing.service import cats.data.EitherT import cats.syntax.either.* +import cats.syntax.foldable.* import com.digitalasset.base.error.RpcError -import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.{ + FieldNotSet, + InvariantViolation, + ProtoDeserializationFailure, + ValueDeserializationError, +} import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* @@ -15,20 +22,29 @@ import com.digitalasset.canton.sequencer.admin.v30.SequencerInitializationServic import com.digitalasset.canton.sequencer.admin.v30.{ InitializeSequencerFromGenesisStateRequest, InitializeSequencerFromGenesisStateResponse, + InitializeSequencerFromGenesisStateV2Request, + InitializeSequencerFromGenesisStateV2Response, InitializeSequencerFromOnboardingStateRequest, InitializeSequencerFromOnboardingStateResponse, + InitializeSequencerFromOnboardingStateV2Request, + InitializeSequencerFromOnboardingStateV2Response, InitializeSequencerFromPredecessorRequest, InitializeSequencerFromPredecessorResponse, } import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.synchronizer.Synchronizer.FailedToInitialiseSynchronizerNode -import com.digitalasset.canton.synchronizer.sequencer.OnboardingStateForSequencer import com.digitalasset.canton.synchronizer.sequencer.admin.grpc.{ InitializeSequencerRequest, InitializeSequencerResponse, } +import com.digitalasset.canton.synchronizer.sequencer.{ + OnboardingStateForSequencer, + OnboardingStateForSequencerV2, + SequencerSnapshot, +} import com.digitalasset.canton.topology.TopologyManagerError import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, @@ -79,15 +95,14 @@ class GrpcSequencerInitializationService( responseObserver: StreamObserver[InitializeSequencerFromPredecessorResponse] ): StreamObserver[InitializeSequencerFromPredecessorRequest] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - GrpcStreamingUtils.streamFromClient[ - InitializeSequencerFromPredecessorRequest, - InitializeSequencerFromPredecessorResponse, - Option[v30.StaticSynchronizerParameters], - ]( + GrpcStreamingUtils.streamFromClient( _.topologySnapshot, _.synchronizerParameters, - (topologySnapshot, synchronizerParams) => - initializeSequencerFromState( + ( + topologySnapshot: ByteString, + synchronizerParams: Option[v30.StaticSynchronizerParameters], + ) => + initializeSequencerFromGenesisStateV2( topologySnapshot, synchronizerParams, doResetTimes = false, @@ -114,6 +129,72 @@ class GrpcSequencerInitializationService( .fromTrustedByteString(topologySnapshot) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) + replicated <- initializeSequencerFromGenesisStateInternal( + topologyState, + synchronizerParameters, + doResetTimes, + ) + } yield replicated + mapErrNew(res) + } + + override def initializeSequencerFromGenesisStateV2( + responseObserver: StreamObserver[InitializeSequencerFromGenesisStateV2Response] + ): StreamObserver[InitializeSequencerFromGenesisStateV2Request] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + GrpcStreamingUtils.streamFromClient( + _.topologySnapshot, + _.synchronizerParameters, + ( + topologySnapshot: ByteString, + synchronizerParams: Option[v30.StaticSynchronizerParameters], + ) => + initializeSequencerFromGenesisStateV2( + topologySnapshot, + synchronizerParams, + doResetTimes = true, + ).map(InitializeSequencerFromGenesisStateV2Response(_)), + responseObserver, + ) + } + + private def initializeSequencerFromGenesisStateV2( + topologySnapshot: ByteString, + synchronizerParameters: Option[v30.StaticSynchronizerParameters], + doResetTimes: Boolean, + )(implicit + traceContext: TraceContext + ): Future[Boolean] = { + val res: EitherT[Future, RpcError, Boolean] = for { + topologyState <- EitherT.fromEither[Future]( + GrpcStreamingUtils + .parseDelimitedFromTrusted( + topologySnapshot.newInput(), + StoredTopologyTransaction, + ) + .bimap( + msg => + ProtoDeserializationFailure.Wrap(ValueDeserializationError("topology_snapshot", msg)), + StoredTopologyTransactions(_), + ) + ) + replicated <- initializeSequencerFromGenesisStateInternal( + topologyState, + synchronizerParameters, + doResetTimes, + ) + } yield replicated + mapErrNew(res) + } + + private def initializeSequencerFromGenesisStateInternal( + topologyState: GenericStoredTopologyTransactions, + synchronizerParameters: Option[v30.StaticSynchronizerParameters], + doResetTimes: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, RpcError, Boolean] = + for { synchronizerParameters <- EitherT.fromEither[Future]( ProtoConverter .parseRequired( @@ -193,9 +274,6 @@ class GrpcSequencerInitializationService( ] } yield result.replicated - mapErrNew(res) - } - private def resetTimes( snapshot: GenericStoredTopologyTransactions ): GenericStoredTopologyTransactions = @@ -229,7 +307,7 @@ class GrpcSequencerInitializationService( private def initializeSequencerFromOnboardingState( onboardingState: ByteString )(implicit traceContext: TraceContext) = { - val res: EitherT[Future, RpcError, InitializeSequencerFromOnboardingStateResponse] = for { + val res = for { onboardingState <- EitherT.fromEither[Future]( OnboardingStateForSequencer // according to @rgugliel-da, this is safe to do here. @@ -239,22 +317,133 @@ class GrpcSequencerInitializationService( .fromTrustedByteString(onboardingState) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) - initializeRequest = InitializeSequencerRequest( + replicated <- initializeSequencerFromOnboardingStateInternal( onboardingState.topologySnapshot, onboardingState.staticSynchronizerParameters, - Some(onboardingState.sequencerSnapshot), + onboardingState.sequencerSnapshot, ) - result <- handler - .initialize(initializeRequest) - .leftMap(FailedToInitialiseSynchronizerNode.Failure(_)) - .onShutdown(Left(FailedToInitialiseSynchronizerNode.Shutdown())): EitherT[ - Future, - RpcError, - InitializeSequencerResponse, - ] - } yield InitializeSequencerFromOnboardingStateResponse(result.replicated) + + } yield InitializeSequencerFromOnboardingStateResponse(replicated) mapErrNew(res) } + + override def initializeSequencerFromOnboardingStateV2( + responseObserver: StreamObserver[InitializeSequencerFromOnboardingStateV2Response] + ): StreamObserver[InitializeSequencerFromOnboardingStateV2Request] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + GrpcStreamingUtils.streamFromClient( + _.onboardingState, + _ => (), + (onboardingState: ByteString, _: Unit) => + initializeSequencerFromOnboardingStateV2(onboardingState), + responseObserver, + ) + } + + private def initializeSequencerFromOnboardingStateV2( + onboardingStateBytes: ByteString + )(implicit + traceContext: TraceContext + ): Future[InitializeSequencerFromOnboardingStateV2Response] = { + val in = onboardingStateBytes.newInput() + val res = for { + onboardState <- EitherT.fromEither[Future]( + GrpcStreamingUtils + .parseDelimitedFromTrusted(in, OnboardingStateForSequencerV2) + .leftMap(err => + ProtoDeserializationFailure.Wrap( + ProtoDeserializationError.ValueConversionError("onboarding_state", err) + ) + ) + ) + accumulatedOnboardingState <- EitherT + .fromEither[Future]( + onboardState.foldM[ + Either[ProtoDeserializationError, *], + ( + Vector[GenericStoredTopologyTransaction], + Option[StaticSynchronizerParameters], + Option[SequencerSnapshot], + ), + ]( + ( + Vector.empty[GenericStoredTopologyTransaction], + Option.empty[StaticSynchronizerParameters], + Option.empty[SequencerSnapshot], + ) + ) { + case ( + (prevTxs, prevStaticParams, prevSeqSnapshot), + OnboardingStateForSequencerV2(newTx, newStaticParams, newSeqSnapshot), + ) => + for { + static <- checkForEqual( + "static_synchronizer_parameters", + prevStaticParams, + newStaticParams, + ) + seqSnapshot <- checkForEqual("sequencer_snapshot", prevSeqSnapshot, newSeqSnapshot) + } yield (prevTxs ++ newTx, static, seqSnapshot) + } + ) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + + (allTxs, staticParamsO, seqSnapshotO) = accumulatedOnboardingState + + staticParams <- EitherT.fromEither[Future]( + staticParamsO.toRight[RpcError]( + ProtoDeserializationFailure.Wrap(FieldNotSet("static_synchronizer_parameters")) + ) + ) + seqSnapshot <- EitherT.fromEither[Future]( + seqSnapshotO.toRight[RpcError]( + ProtoDeserializationFailure.Wrap(FieldNotSet("sequencer_snapshot")) + ) + ) + replicated <- initializeSequencerFromOnboardingStateInternal( + StoredTopologyTransactions(allTxs), + staticParams, + seqSnapshot, + ) + } yield InitializeSequencerFromOnboardingStateV2Response(replicated) + mapErrNew(res) + } + + private def checkForEqual[A]( + field: String, + x: Option[A], + y: Option[A], + ): Either[ProtoDeserializationError, Option[A]] = (x, y) match { + case (None, None) => Right(None) + case (Some(a), Some(b)) => + Either.cond( + a == b, + Some(b), + InvariantViolation(field, s"Multiple provided values didn't match: a=[$a], b=[$b]"), + ) + case (a, b) => Right(a.orElse(b)) + } + + private def initializeSequencerFromOnboardingStateInternal( + topologySnapshot: GenericStoredTopologyTransactions, + staticSynchronizerParameters: StaticSynchronizerParameters, + sequencerSnapshot: SequencerSnapshot, + )(implicit + traceContext: TraceContext + ): EitherT[Future, RpcError, Boolean] = { + val initializeRequest = InitializeSequencerRequest( + topologySnapshot, + staticSynchronizerParameters, + Some(sequencerSnapshot), + ) + handler + .initialize(initializeRequest) + .bimap( + FailedToInitialiseSynchronizerNode.Failure(_), + result => result.replicated, + ) + .onShutdown(Left(FailedToInitialiseSynchronizerNode.Shutdown())) + } } object GrpcSequencerInitializationService { diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerService.scala index d444230ac2..7f35867ee0 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerService.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerService.scala @@ -19,7 +19,6 @@ import com.digitalasset.canton.lifecycle.{ FlagCloseable, FutureUnlessShutdown, PromiseUnlessShutdown, - UnlessShutdown, } import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil @@ -29,6 +28,7 @@ import com.digitalasset.canton.protocol.DynamicSynchronizerParametersLookup import com.digitalasset.canton.protocol.SynchronizerParameters.MaxRequestSize import com.digitalasset.canton.protocol.SynchronizerParametersLookup.SequencerSynchronizerParameters import com.digitalasset.canton.sequencer.api.v30 +import com.digitalasset.canton.sequencer.api.v30.{GetTimeRequest, GetTimeResponse} import com.digitalasset.canton.sequencing.SequencedSerializedEvent import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.synchronizer.metrics.SequencerMetrics @@ -59,7 +59,8 @@ import com.github.blemale.scaffeine.{Cache, Scaffeine} import com.google.common.annotations.VisibleForTesting import io.grpc.Status import io.grpc.stub.{ServerCallStreamObserver, StreamObserver} -import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Keep, Sink} +import org.apache.pekko.stream.{KillSwitches, Materializer} import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Future} @@ -183,7 +184,7 @@ class GrpcSequencerService( protocolVersion: ProtocolVersion, maxItemsInTopologyResponse: PositiveInt = PositiveInt.tryCreate(100), acknowledgementsConflateWindow: Option[PositiveFiniteDuration] = None, -)(implicit ec: ExecutionContext) +)(implicit ec: ExecutionContext, materializer: Materializer) extends v30.SequencerServiceGrpc.SequencerService with NamedLogging with FlagCloseable { @@ -212,7 +213,6 @@ class GrpcSequencerService( // This has to run at the beginning, because it reads from a thread-local. val senderFromMetadata = authenticationCheck.lookupCurrentMember() - def parseAndValidate( maxRequestSize: MaxRequestSize ): Either[SequencerDeliverError, SignedContent[SubmissionRequest]] = for { @@ -473,7 +473,7 @@ class GrpcSequencerService( val createSubscriptionP = PromiseUnlessShutdown.unsupervised[Either[Status, GrpcManagedSubscription[?]]]() observer.setOnCancelHandler { () => - logger.debug(s"Subscription cancelled by client ${request.member}.") + logger.info(s"Subscription cancelled by client ${request.member}.") // Instead upon cancellation, we close the subscription once/if it has been successfully created. createSubscriptionP.future.onComplete { case Success(Outcome(Right(subscription))) => @@ -647,29 +647,34 @@ class GrpcSequencerService( responseObserver: StreamObserver[v30.DownloadTopologyStateForInitResponse], ): Unit = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - TopologyStateForInitRequest - .fromProtoV30(requestP) - .traverse(request => - topologyStateForInitializationService - .initialSnapshot(request.member) - ) - .onComplete { - case Success(UnlessShutdown.Outcome(Left(parsingError))) => - responseObserver.onError(ProtoDeserializationFailure.Wrap(parsingError).asGrpcError) - - case Success(UnlessShutdown.Outcome(Right(initialSnapshot))) => - initialSnapshot.result.grouped(maxItemsInTopologyResponse.value).foreach { batch => - val response = - TopologyStateForInitResponse(Traced(StoredTopologyTransactions(batch))) - responseObserver.onNext(response.toProtoV30) - } - responseObserver.onCompleted() - case Failure(exception) => - responseObserver.onError(exception) - case Success(UnlessShutdown.AbortedDueToShutdown) => - responseObserver.onCompleted() - } + withServerCallStreamObserver(responseObserver) { observer => + TopologyStateForInitRequest + .fromProtoV30(requestP) + .traverse { request => + val (killSwitch, future) = topologyStateForInitializationService + .initialSnapshot(request.member) + .grouped(maxItemsInTopologyResponse.value) + .map { batch => + TopologyStateForInitResponse(Traced(StoredTopologyTransactions(batch))).toProtoV30 + } + .viaMat(KillSwitches.single)(Keep.right) + .toMat(Sink.foreach(observer.onNext))(Keep.both) + .run() + observer.setOnCancelHandler(() => killSwitch.shutdown()) + future + } + .onComplete { + case Success(Left(parsingError)) => + responseObserver.onError(ProtoDeserializationFailure.Wrap(parsingError).asGrpcError) + + case Success(Right(_)) => + responseObserver.onCompleted() + + case Failure(exception) => + responseObserver.onError(exception) + } + } } private def invalidRequest(message: String): Status = @@ -725,4 +730,19 @@ class GrpcSequencerService( EitherTUtil.toFuture(result.onShutdown(Left(AbortedDueToShutdown.Error().asGrpcError))) } + + override def getTime(request: GetTimeRequest): Future[GetTimeResponse] = { + // This call is authenticated but does not require special authorization. + + // Traffic is not impacted as no events are emitted. + + // The returned information is expected to be readily available and served from transient memory, + // hence this call is expected to be fast and impose minimal load on sequencers, + // so network-level rate limiting is enough to secure it against denial-of-service attacks; + // for this reason we don't apply application-level rate limiting. + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + sequencer.sequencingTime + .map(time => GetTimeResponse(time.map(_.toProtoPrimitive))) + .onShutdown(throw AbortedDueToShutdown.Error().asGrpcError) + } } diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializer.scala index 90adf619d8..dd5a062722 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializer.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.synchronizer.sequencing.topology import com.digitalasset.canton.data.SynchronizerPredecessor import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.store.SequencedEventStore import com.digitalasset.canton.store.SequencedEventStore.SearchCriterion import com.digitalasset.canton.topology.client.{ @@ -13,6 +14,7 @@ import com.digitalasset.canton.topology.client.{ } import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.ExecutionContext @@ -29,6 +31,7 @@ final class SequencedEventStoreBasedTopologyHeadInitializer( override def initialize( client: SynchronizerTopologyClientWithInit, synchronizerPredecessor: Option[SynchronizerPredecessor], + staticSynchronizerParameters: StaticSynchronizerParameters, )(implicit executionContext: ExecutionContext, traceContext: TraceContext, @@ -43,13 +46,34 @@ final class SequencedEventStoreBasedTopologyHeadInitializer( SequencedTime.MaxValue, includeRejected = true, ) + // This is needed, because we ALWAYS use staticParams.topologyChangeDelay in SyncCryptoClient.computeTimestampForValidation. + // Previously, we used to fall back to 0 for the initial genesis transactions. + // Alternatively, we could reset the genesis times to always set the correct topology change delay, but that messes with + // the genesis timestamp logic in the BftOrderingService + maxTopologyStoreTimestampWithTopologyChangeDelay = + if ( + maxTopologyStoreTimestamp.contains( + ( + SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime), + ) + ) + ) { + Some( + ( + SequencedTime(SignedTopologyTransaction.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransaction.InitialTopologySequencingTime) + + staticSynchronizerParameters.topologyChangeDelay, + ) + ) + } else maxTopologyStoreTimestamp } yield { // Defensively, get the latest possible timestamps or don't update the head. val sequencedToEffectiveTimes = List( latestSequencedEvent.map(event => (SequencedTime(event.timestamp), EffectiveTime(event.timestamp)) ), - maxTopologyStoreTimestamp, + maxTopologyStoreTimestampWithTopologyChangeDelay, ).flatten val maxTimestampsO = sequencedToEffectiveTimes.maxByOption { case (_, effectiveTime: EffectiveTime) => effectiveTime @@ -59,6 +83,7 @@ final class SequencedEventStoreBasedTopologyHeadInitializer( .computeInitialHeadUpdate( maxTimestampsO, synchronizerPredecessor, + staticSynchronizerParameters.topologyChangeDelay, ) .foreach { case (maxSequencedTime, maxEffectiveTime) => client.updateHead( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializer.scala index b544b9df5a..b7c18d8c31 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializer.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.synchronizer.sequencing.topology import com.digitalasset.canton.data.SynchronizerPredecessor import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.synchronizer.sequencer.SequencerSnapshot import com.digitalasset.canton.topology.client.{ SynchronizerTopologyClientHeadStateInitializer, @@ -30,6 +31,7 @@ final class SequencerSnapshotBasedTopologyHeadInitializer( override def initialize( client: SynchronizerTopologyClientWithInit, synchronizerPredecessor: Option[SynchronizerPredecessor], + staticSynchronizerParameters: StaticSynchronizerParameters, )(implicit executionContext: ExecutionContext, traceContext: TraceContext, @@ -43,6 +45,7 @@ final class SequencerSnapshotBasedTopologyHeadInitializer( .computeInitialHeadUpdate( maxTopologyStoreTimestamp, synchronizerPredecessor, + staticSynchronizerParameters.topologyChangeDelay, ) .fold(snapshotLastTsEffective) { case (_, maxStoreEffectiveTime) => maxStoreEffectiveTime.max(snapshotLastTsEffective) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala index 752b152169..b761f42a53 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala @@ -431,7 +431,6 @@ class EnterpriseSequencerRateLimitManager( submissionTimestamp, mostRecentKnownSynchronizerTimestamp, latestSequencerEventTimestamp, - protocolVersion, warnIfApproximate = true, _.submissionCostTimestampTopologyTolerance, ) @@ -449,7 +448,6 @@ class EnterpriseSequencerRateLimitManager( synchronizerSyncCryptoApi, submissionTimestamp, latestSequencerEventTimestamp, - protocolVersion, warnIfApproximate, ) ) @@ -708,7 +706,6 @@ class EnterpriseSequencerRateLimitManager( synchronizerSyncCryptoApi, sequencingTime, latestSequencerEventTimestamp, - protocolVersion, warnIfApproximate, ) ) @@ -738,7 +735,6 @@ class EnterpriseSequencerRateLimitManager( synchronizerSyncCryptoApi, minTimestamp, lastSequencerEventTimestamp, - protocolVersion, warnIfApproximate, ) ) diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/server/DynamicGrpcServer.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/server/DynamicGrpcServer.scala index e1b68b03e5..b872fd2e97 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/server/DynamicGrpcServer.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/server/DynamicGrpcServer.scala @@ -15,9 +15,11 @@ import com.digitalasset.canton.health.{ import com.digitalasset.canton.lifecycle.LifeCycle.{CloseableServer, toCloseableServer} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonServerBuilder +import com.digitalasset.canton.networking.grpc.ratelimiting.StreamCounterCheck import com.digitalasset.canton.protocol.SynchronizerParameters.MaxRequestSize import com.digitalasset.canton.synchronizer.config.PublicServerConfig import com.digitalasset.canton.synchronizer.sequencer.SequencerRuntime +import com.google.common.annotations.VisibleForTesting import io.grpc.protobuf.services.ProtoReflectionServiceV1 import scala.concurrent.ExecutionContextExecutorService @@ -51,6 +53,9 @@ class DynamicGrpcServer( this } + @VisibleForTesting + def streamCounterCheck: Option[StreamCounterCheck] = registry.streamCounterCheck + private val (grpcServer, registry) = { val serverBuilder = CantonServerBuilder .forConfig( diff --git a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala index 1101e5769c..10e1fe8000 100644 --- a/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala +++ b/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala @@ -20,6 +20,7 @@ import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.mediator.admin.v30.SequencerConnectionServiceGrpc.SequencerConnectionService import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, CantonMutableHandlerRegistry} +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.{ RequestSigner, @@ -162,9 +163,11 @@ object GrpcSequencerConnectionService extends HasLoggerName { requestSigner: RequestSigner, transportFactory: SequencerClientTransportFactory, sequencerInfoLoader: SequencerInfoLoader, + connectionPoolFactory: SequencerConnectionXPoolFactory, synchronizerAlias: SynchronizerAlias, synchronizerId: PhysicalSynchronizerId, sequencerClient: SequencerClient, + tracingConfig: TracingConfig, loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContextExecutor, @@ -174,6 +177,8 @@ object GrpcSequencerConnectionService extends HasLoggerName { closeContext: CloseContext, ): UpdateSequencerClient = { val clientO = new AtomicReference[Option[RichSequencerClient]](None) + implicit val namedLoggingContext: NamedLoggingContext = + NamedLoggingContext(loggerFactory, traceContext) registry.addServiceU( SequencerConnectionService.bindService( new GrpcSequencerConnectionService( @@ -186,14 +191,52 @@ object GrpcSequencerConnectionService extends HasLoggerName { newConfig = sequencerConnectionLens.replace(newSequencerConnection)(currentConfig) // load and potentially validate the new connection - newEndpointsInfo <- sequencerInfoLoader - .loadAndAggregateSequencerEndpoints( - synchronizerAlias, - Some(synchronizerId), - newSequencerConnection, - sequencerConnectionValidation, - ) - .leftMap(_.cause) + // + // Retries are not strictly necessary for the "change sequencer connection" scenario because the API is + // idempotent and retries on the client-side are cheap. So by not retrying internally, the application + // gets some error feedback more quickly and can in theory react to it. Whether callers will reasonably + // inspect the returned error is debatable though. + // In principle, mediator node start-up could also fail without retrying and rely on the container + // framework to restart the pod. But that's a much more expensive operation, so it kinda makes sense to + // retry there (see `waitUntilSequencerConnectionIsValidWithPool`). + newEndpointsInfoAndPoolConfigO <- + if (useNewConnectionPool) for { + // The following implementation strives to keep the same behavior as with the transport mechanisms, + // which is ensure the new config is valid before replacing the old config. + // The transport mechanism supports a variety of validation modes, whereas here we support only the + // equivalent to `THRESHOLD_ACTIVE`, i.e. the config is considered valid if at least trust-threshold-many + // connections are successful. + // + // Performing this validation here protects the node operator from typos in the connection config that + // would render their node dysfunctional because it cannot connect to the sequencer. + // On the other hand, the operator should be able to set a new configuration in case of a substantial + // change to sequencer endpoints and that should be doable concurrently to those sequencer endpoint + // changes taking place, so one could argue that we should not validate the config here and rely on the + // pool to report through health status. + // + // As we cannot satisfy both needs here, this will likely be discussed and revisited. + connectionPoolAndInfo <- validateConfig( + connectionPoolFactory = connectionPoolFactory, + sequencerConnections = newSequencerConnection, + poolName = "temp", + tracingConfig = tracingConfig, + ) + } yield { + val (pool, info) = connectionPoolAndInfo + pool.close() + (info, Some(pool.config)) + } + else + sequencerInfoLoader + .loadAndAggregateSequencerEndpoints( + synchronizerAlias, + Some(synchronizerId), + newSequencerConnection, + sequencerConnectionValidation, + ) + .leftMap(_.cause) + .map((_, None)) + (newEndpointsInfo, newPoolConfigO) = newEndpointsInfoAndPoolConfigO sequencerTransportsMapO = Option.when(!useNewConnectionPool)( transportFactory @@ -213,21 +256,19 @@ object GrpcSequencerConnectionService extends HasLoggerName { newEndpointsInfo.sequencerConnections.sequencerTrustThreshold, newEndpointsInfo.sequencerConnections.sequencerLivenessMargin, newEndpointsInfo.sequencerConnections.submissionRequestAmplification, + newEndpointsInfo.sequencerConnections.sequencerConnectionPoolDelays, ) ) // important to only save the config and change the transport after the `makeTransport` has run and done the handshake + _ <- clientO.get.fold { + // need to close here + sequencerTransportsMapO.foreach(_.values.foreach(_.close())) + EitherT.pure[FutureUnlessShutdown, String](()) + }( + _.changeTransport(sequencerTransports, newPoolConfigO) + ) _ <- EitherT.right(saveConfig(newConfig)) - _ <- EitherT - .right( - clientO - .get() - .fold { - // need to close here - sequencerTransportsMapO.foreach(_.values.foreach(_.close())) - FutureUnlessShutdown.unit - }(_.changeTransport(sequencerTransports)) - ) } yield (), sequencerClient.logout _, loggerFactory, @@ -306,44 +347,15 @@ object GrpcSequencerConnectionService extends HasLoggerName { ] = for { sequencerConnections <- OptionT(loadConfig).toRight("No sequencer connection config") - connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( - connectionPoolFactory - .createFromOldConfig( - sequencerConnections, - expectedPSIdO = None, - tracingConfig = tracingConfig, - ) - .leftMap(_.toString) - ) - _ <- connectionPool.start().leftMap { error => - namedLoggingContext.warn(s"Waiting for valid sequencer connection: $error") - error.toString - } - } yield { - val psid = connectionPool.physicalSynchronizerIdO.getOrElse( - ErrorUtil.invalidState( - "a successfully started connection pool must have the synchronizer ID defined" - ) - ) - val staticParameters = connectionPool.staticSynchronizerParametersO.getOrElse( - ErrorUtil.invalidState( - "a successfully started connection pool must have the static parameters defined" - ) - ) - - // `sequencerConnections.aliasToConnections` built with the transport mechanism depends on the validation mode - // (all, active only, etc.), whereas with the connection pool we provide the original configuration. - // It seems this parameter is however only used later on for building the transports, so it does not matter - // when using the connection pool. - val info = SequencerAggregatedInfo( - psid = psid, - staticSynchronizerParameters = staticParameters, - expectedSequencersO = None, + connectionPoolAndInfo <- validateConfig( + connectionPoolFactory = connectionPoolFactory, sequencerConnections = sequencerConnections, + poolName = "main", + tracingConfig = tracingConfig, + logErrorFn = + error => namedLoggingContext.warn(s"Waiting for valid sequencer connection: $error"), ) - - (connectionPool, info) - } + } yield connectionPoolAndInfo import scala.concurrent.duration.* EitherT( @@ -361,4 +373,66 @@ object GrpcSequencerConnectionService extends HasLoggerName { ) ) } + + private def validateConfig( + connectionPoolFactory: SequencerConnectionXPoolFactory, + sequencerConnections: SequencerConnections, + poolName: String, + tracingConfig: TracingConfig, + logErrorFn: SequencerConnectionXPoolError => Unit = _ => (), + )(implicit + namedLoggingContext: NamedLoggingContext, + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + ): EitherT[FutureUnlessShutdown, String, (SequencerConnectionXPool, SequencerAggregatedInfo)] = { + implicit val traceContext: TraceContext = namedLoggingContext.traceContext + + for { + connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( + connectionPoolFactory + .createFromOldConfig( + sequencerConnections, + expectedPSIdO = None, + tracingConfig = tracingConfig, + name = poolName, + ) + .leftMap { error => + logErrorFn(error) + error.toString + } + ) + _ <- connectionPool.start().leftMap { error => + logErrorFn(error) + error.toString + } + } yield { + val psid = connectionPool.physicalSynchronizerIdO.getOrElse( + ErrorUtil.invalidState( + "a successfully started connection pool must have the synchronizer ID defined" + ) + ) + val staticParameters = connectionPool.staticSynchronizerParametersO.getOrElse( + ErrorUtil.invalidState( + "a successfully started connection pool must have the static parameters defined" + ) + ) + + // The `sequencerConnections.aliasToConnections` field that we place into `SequencerAggregatedInfo` is different + // with the connection pool compared to what is produced with the transport mechanism with the `SequencerInfoLoader`: + // with the connection pool, we provide the original configuration, whereas `SequencerInfoLoader.loadAndAggregateSequencerEndpoints` + // produces a map that depends on the validation mode (all, active only, etc.). + // It seems this parameter is however only used later on for building the transports, so it does not matter + // when using the connection pool (since they are not used). + val info = SequencerAggregatedInfo( + psid = psid, + staticSynchronizerParameters = staticParameters, + expectedSequencersO = None, + sequencerConnections = sequencerConnections, + ) + + (connectionPool, info) + } + } + } diff --git a/canton/community/synchronizer/src/test/resources/bftbenchmark-dabft.conf b/canton/community/synchronizer/src/test/resources/bftbenchmark-dabft.conf new file mode 100644 index 0000000000..7e967b8676 --- /dev/null +++ b/canton/community/synchronizer/src/test/resources/bftbenchmark-dabft.conf @@ -0,0 +1,29 @@ +{ + transaction-bytes = 20000, // 37, 1000, 10000, 20000, 100000, 200000, 500000, 1000000, 10000000 + run-duration = 30 seconds, + per-node-write-period = 100 milliseconds, + reporting-interval = 10 seconds, + nodes = [ + { + type = networked-read-write-node, + host = localhost, + read-port = 31031, + write-port = 31031, + }, + { + type = networked-write-only-node, + host = localhost, + write-port = 31032, + }, + { + type = networked-write-only-node, + host = localhost, + write-port = 31033, + }, + { + type = networked-write-only-node, + host = localhost, + write-port = 31034, + } + ] +} diff --git a/canton/community/synchronizer/src/test/resources/bftbenchmark-shortcircuit.conf b/canton/community/synchronizer/src/test/resources/bftbenchmark-shortcircuit.conf new file mode 100644 index 0000000000..9edc8f5642 --- /dev/null +++ b/canton/community/synchronizer/src/test/resources/bftbenchmark-shortcircuit.conf @@ -0,0 +1,14 @@ +{ + transaction-bytes = 37, + run-duration = 1 millisecond, + per-node-write-period = 1 millisecond, + reporting-interval = 10 seconds, + nodes = [ + { + type = in-process-read-write-node, + host = "", + read-port = "", + write-port = "", + } + ] +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/AsyncWriterTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/AsyncWriterTest.scala new file mode 100644 index 0000000000..99e99044e1 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/AsyncWriterTest.scala @@ -0,0 +1,100 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.block + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.{PromiseUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.version.HasTestCloseContext +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.FutureOutcome +import org.scalatest.wordspec.FixtureAsyncWordSpecLike + +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} + +class AsyncWriterTest + extends FixtureAsyncWordSpecLike + with BaseTest + with HasTestCloseContext + with HasExecutionContext { + + private[block] class Fixture { + val counter = new AtomicInteger(0) + val written = new AtomicReference[Vector[Int]](Vector.empty) + val writeStartPUS = (0 until 3).map(_ => PromiseUnlessShutdown.unsupervised[Unit]()) + val writeEndPUS = (0 until 3).map(_ => PromiseUnlessShutdown.unsupervised[Unit]()) + val writer = new AsyncWriter[Vector[Int]]( + addToQueue = (a, b) => a ++ b, + writeQueue = x => { + written.getAndUpdate(_ ++ x).discard + val cc = counter.getAndIncrement() + // mark the next promise as completed so we can sync the test + writeStartPUS(cc).success(UnlessShutdown.unit) + writeEndPUS(cc).futureUS + }, + Vector.empty, + "test", + futureSupervisor, + loggerFactory, + ) { + override protected def recordWriteError(name: String, exception: Throwable): Unit = + ??? // tested in BlockSequencerStateAsyncWriterTest / escalate errors + } + } + override protected type FixtureParam = Fixture + + override def withFixture(test: OneArgAsyncTest): FutureOutcome = + super.withFixture(test.toNoArgAsyncTest(new Fixture(): FixtureParam)) + + "async writer" should { + "start writing if idle" in { fixture => + import fixture.* + val res1 = writer.appendAndSchedule(Vector(1)) + writeEndPUS(0).success(UnlessShutdown.unit) + (for { + _ <- writeStartPUS(0).futureUS + } yield { + res1.queueSize shouldBe 0 + written.get() shouldBe Vector(1) + }).failOnShutdown + } + "queue while writing and make sure subsequent queue is picked up after write" in { fixture => + import fixture.* + writer.appendAndSchedule(Vector(1)) + val res2 = writer.appendAndSchedule(Vector(2)) + (for { + // wait for first write to start + _ <- writeStartPUS(0).futureUS + _ = { written.get() shouldBe Vector(1) } + // complete first write and wait for the second write to start + _ = { writeEndPUS(0).success(UnlessShutdown.unit) } + _ <- writeStartPUS(1).futureUS + } yield { + res2.queueSize shouldBe 1 + written.get() shouldBe Vector(1, 2) + }).failOnShutdown + } + "go back to idle after write" in { fixture => + import fixture.* + writer.appendAndSchedule(Vector(1)) + (for { + // wait for first write to start + _ <- writeStartPUS(0).futureUS + _ = { written.get() shouldBe Vector(1) } + // complete first write and wait for the second write to start + _ = { writeEndPUS(0).success(UnlessShutdown.unit) } + // start next write + _ = { + writer.appendAndSchedule(Vector(2)) + writeEndPUS(1).success(UnlessShutdown.unit) + } + // wait until second write started + _ <- writeStartPUS(1).futureUS + } yield { + written.get() shouldBe Vector(1, 2) + }).failOnShutdown + } + + } + +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateAsyncWriterTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateAsyncWriterTest.scala new file mode 100644 index 0000000000..5a255bd21f --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/block/BlockSequencerStateAsyncWriterTest.scala @@ -0,0 +1,310 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.block + +import cats.data.{Chain, EitherT} +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} +import com.digitalasset.canton.crypto.TestHash +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.lifecycle.{ + FutureUnlessShutdown, + PromiseUnlessShutdown, + UnlessShutdown, +} +import com.digitalasset.canton.sequencing.protocol.{AggregationId, AggregationRule} +import com.digitalasset.canton.sequencing.traffic.TrafficConsumed +import com.digitalasset.canton.synchronizer.block.data.{BlockInfo, SequencerBlockStore} +import com.digitalasset.canton.synchronizer.sequencer.InFlightAggregation.AggregationBySender +import com.digitalasset.canton.synchronizer.sequencer.{ + AggregatedSender, + FreshInFlightAggregation, + InFlightAggregationUpdate, + InFlightAggregationUpdates, +} +import com.digitalasset.canton.synchronizer.sequencing.traffic.store.TrafficConsumedStore +import com.digitalasset.canton.topology.{DefaultTestIdentities, Member} +import com.digitalasset.canton.version.{HasTestCloseContext, ProtocolVersion} +import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import org.scalatest.wordspec.FixtureAsyncWordSpecLike +import org.scalatest.{Assertion, FutureOutcome} + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.Future +import scala.concurrent.duration.* + +private[block] final case class WriteQueue[T]( + nextWrite: PromiseUnlessShutdown[Unit], + writeReturn: Seq[FutureUnlessShutdown[Unit]], + written: Seq[T], +) +object WriteQueue { + def empty[T]: WriteQueue[T] = + WriteQueue(PromiseUnlessShutdown.unsupervised(), Seq.empty, Seq.empty) +} + +class BlockSequencerStateAsyncWriterTest + extends FixtureAsyncWordSpecLike + with BaseTest + with HasTestCloseContext + with HasExecutionContext { + + private[block] class Fixture { + + val inflightAggregations = + new AtomicReference[WriteQueue[InFlightAggregationUpdates]](WriteQueue.empty) + val blockInfos = + new AtomicReference[WriteQueue[Seq[BlockInfo]]](WriteQueue.empty) + + def recordWrite[T]( + ref: AtomicReference[WriteQueue[T]], + item: T, + ): FutureUnlessShutdown[Unit] = { + val queue = ref.getAndUpdate { queue => + WriteQueue( + PromiseUnlessShutdown.unsupervised[Unit](), + queue.writeReturn.drop(1), + queue.written :+ item, + ) + } + queue.nextWrite.success(UnlessShutdown.unit) + queue.writeReturn.headOption.getOrElse(FutureUnlessShutdown.unit) + } + + val blockStore = { + val m = mock[SequencerBlockStore] + when(m.storeInflightAggregations(any[InFlightAggregationUpdates])(anyTraceContext)) + .thenAnswer[InFlightAggregationUpdates] { x => + logger.debug(s"Record in flight aggregation $x") + recordWrite(inflightAggregations, x) + } + when(m.finalizeBlockUpdates(any[Seq[BlockInfo]])(anyTraceContext)) + .thenAnswer[Seq[BlockInfo]] { x => + logger.debug(s"Record block infos $x") + recordWrite(blockInfos, x) + } + m + } + + val trafficConsumed = + new AtomicReference[WriteQueue[Seq[TrafficConsumed]]]( + WriteQueue.empty + ) + val trafficStore = { + val m = mock[TrafficConsumedStore] + when(m.store(any[Seq[TrafficConsumed]])(anyTraceContext)) + .thenAnswer[Seq[TrafficConsumed]] { x => + logger.debug(s"Record traffic consumed $x") + recordWrite(trafficConsumed, x) + } + m + } + + val writer = new BlockSequencerStateAsyncWriter( + blockStore, + trafficStore, + futureSupervisor, + AsyncWriterParameters( + trafficBatchSize = PositiveInt.two, + aggregationBatchSize = PositiveInt.two, + blockInfoBatchSize = PositiveInt.two, + ), + loggerFactory, + ) + + } + + override protected type FixtureParam = Fixture + + override def withFixture(test: OneArgAsyncTest): FutureOutcome = { + val env = new Fixture() + super.withFixture(test.toNoArgAsyncTest(env)) + } + + private lazy val member = DefaultTestIdentities.participant1 + private lazy val tc1 = TrafficConsumed( + member, + CantonTimestamp.Epoch, + NonNegativeLong.one, + NonNegativeLong.one, + NonNegativeLong.one, + ) + private lazy val tc2 = tc1.copy(sequencingTimestamp = CantonTimestamp.Epoch.plusSeconds(1)) + private lazy val tc3 = tc1.copy(sequencingTimestamp = CantonTimestamp.Epoch.plusSeconds(2)) + + private lazy val aggId1 = AggregationId(TestHash.digest(1)) + private lazy val aggId2 = AggregationId(TestHash.digest(2)) + private lazy val sender1 = + AggregatedSender( + DefaultTestIdentities.participant2, + AggregationBySender(CantonTimestamp.Epoch, Seq.empty), + ) + private lazy val sender2 = sender1.copy(sender = DefaultTestIdentities.participant3) + private lazy val fresh = + FreshInFlightAggregation( + CantonTimestamp.Epoch, + AggregationRule( + eligibleMembers = NonEmpty.mk(Seq, member): NonEmpty[Seq[Member]], + threshold = PositiveInt.one, + protocolVersion = ProtocolVersion.latest, + ), + ) + private lazy val agg1 = InFlightAggregationUpdate( + Some(fresh), + Chain.one(sender1), + ) + + private lazy val agg2 = InFlightAggregationUpdate( + Some(fresh), + Chain.one(sender2), + ) + + private lazy val block1 = BlockInfo(10L, CantonTimestamp.Epoch, Some(CantonTimestamp.Epoch)) + private def unwrap(t: EitherT[FutureUnlessShutdown, String, Assertion]): Future[Assertion] = + t.failOnShutdown.value.map(_.valueOrFail("EitherT returned left")) + + private def syncWrite[T, Q](ref: AtomicReference[WriteQueue[Q]])( + fus: => EitherT[FutureUnlessShutdown, String, T] + ): EitherT[FutureUnlessShutdown, String, T] = { + val promise = ref.get().nextWrite + fus.flatMap { res => + EitherT.right(promise.futureUS).map(_ => res) + } + } + + "BlockSequencerStateAsyncWriter" should { + "queue on concurrent" in { fixture => + import fixture.* + val trafficWriteP = PromiseUnlessShutdown.unsupervised[Unit]() + trafficConsumed.updateAndGet(_.copy(writeReturn = Seq(trafficWriteP.futureUS))) + + unwrap(for { + // first, we fill both queues => writes should start a write + _ <- syncWrite(trafficConsumed)( + writer.append(Seq(tc1), Map(), EitherT.pure(())) + ) + _ = { + // check that writes have started + trafficConsumed.get().written shouldBe Seq(Seq(tc1)) + } + _ <- EitherT.right(writer.finalizeBlockUpate(block1)) + _ = { + // check that block update is not written yet (because aggregation write is in progress) + blockInfos.get().written shouldBe empty + } + // next item should be queued + _ <- writer.append(Seq(tc2), Map(), EitherT.pure(())) + _ = always(200.millis) { + // item has not been written yet as it is queued, so result should remain unchanged + trafficConsumed.get().written shouldBe Seq(Seq(tc1)) + // and the block update is still not written + blockInfos.get().written shouldBe empty + } + // now, we complete the first write, this should now start the second write and trigger + // the flush of the block update + blockInfoFU = blockInfos.get().nextWrite + _ = { + logger.debug("Completing first traffic write") + trafficWriteP.success(UnlessShutdown.unit) + logger.debug("Waiting for pickup") + } + _ <- EitherT.right(blockInfoFU.futureUS) + _ = { + trafficConsumed.get().written shouldBe Seq(Seq(tc1), Seq(tc2)) + blockInfos.get().written shouldBe Seq(Seq(block1)) + } + } yield { + succeed + }) + } + "backpressure on full queue" in { fixture => + import fixture.* + val trafficWriteP = PromiseUnlessShutdown.unsupervised[Unit]() + trafficConsumed.updateAndGet(_.copy(writeReturn = Seq(trafficWriteP.futureUS))).discard + unwrap(for { + // first, we fill both queues => writes should start a write + _ <- syncWrite(trafficConsumed)(writer.append(Seq(tc1), Map(), EitherT.pure(()))) + _ <- writer.append(Seq(tc2), Map(), EitherT.pure(())) + bP = writer.append(Seq(tc3), Map(), EitherT.pure(())) + _ = { + Threading.sleep(100) + // this append should be backpressured + bP.value.isCompleted shouldBe false + // check that writes have started + trafficConsumed.get().written shouldBe Seq(Seq(tc1)) + } + _ = { + // after completing writing, the next queue should be picked up and the backpressure + // should be released + trafficWriteP.success(UnlessShutdown.unit) + } + // now wait until the queue was written and the backpressure released + _ = eventually() { + bP.value.isCompleted shouldBe true + trafficConsumed.get().written shouldBe Seq(Seq(tc1), Seq(tc2, tc3)) + } + + } yield succeed) + } + + "aggregation caches are managed correctly" in { fixture => + import fixture.* + // note, we test here the aggregation caching and merging logic which is a bit + // more complicated + val aggregationP = PromiseUnlessShutdown.unsupervised[Unit]() + inflightAggregations.updateAndGet(_.copy(writeReturn = Seq(aggregationP.futureUS))) + unwrap(for { + // first write will start immediately, rest will be queued + _ <- writer.append(Seq.empty, Map(aggId1 -> agg1), EitherT.pure(())) + // adding first aggregation to the queue + _ = writer.append(Seq.empty, Map(aggId2 -> agg1, aggId1 -> agg1), EitherT.pure(())) + // adding second aggregation to the queue + _ = writer.append(Seq.empty, Map(aggId2 -> agg2), EitherT.pure(())) + } yield { + // flushing first write + aggregationP.success(UnlessShutdown.unit) + eventually() { + val ret = inflightAggregations.get().written + ret should have length (2) + ret(0) shouldBe Map(aggId1 -> agg1) + ret(1) shouldBe Map(aggId2 -> agg1.tryMerge(agg2), aggId1 -> agg1) + } + succeed + }) + } + + "escalate errors" in { fixture => + import fixture.* + val trafficWriteP = PromiseUnlessShutdown.unsupervised[Unit]() + val boooh = new Exception("booh") + trafficConsumed.updateAndGet(_.copy(writeReturn = Seq(trafficWriteP.futureUS))).discard + unwrap(for { + _ <- syncWrite(trafficConsumed)(writer.append(Seq(tc1), Map(), EitherT.pure(()))) + _ = loggerFactory.assertLogs( + { + // now, we fail the write in the background + trafficWriteP.failure(boooh) + eventually() { + // once the error is registered, every future will fail + val ret = writer.append(Seq(tc2), Map(), EitherT.pure(())) + // the test is racy as the append might pick up the exception or end up with the backpressureF, + // waiting for the next write to complete. If the error is already registered, + // then the future will complete immediately. + ret.value.isCompleted shouldBe true + ret.failOnShutdown.value.failed.futureValue.getCause shouldBe boooh + } + }, + _.errorMessage should include("Background write failed"), + ) + _ = {} + } yield { + succeed + }) + } + + } + +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/GrpcMediatorInspectionServiceTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/GrpcMediatorInspectionServiceTest.scala index 867b649bcb..3962d9520b 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/GrpcMediatorInspectionServiceTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/GrpcMediatorInspectionServiceTest.scala @@ -3,14 +3,19 @@ package com.digitalasset.canton.synchronizer.mediator -import com.digitalasset.canton.BaseTest +import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.* import com.digitalasset.canton.data.CantonTimestamp.Epoch import com.digitalasset.canton.error.MediatorError -import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + FutureUnlessShutdown, + LifeCycle, +} import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.mediator.admin.v30.VerdictsResponse @@ -23,16 +28,21 @@ import com.digitalasset.canton.synchronizer.service.RecordStreamObserverItems import com.digitalasset.canton.time.TimeAwaiter import com.digitalasset.canton.topology.DefaultTestIdentities import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.{MonadUtil, PekkoUtil} import com.digitalasset.canton.version.CommonGenerators +import com.digitalasset.canton.{BaseTest, HasExecutionContext} import io.grpc.stub.ServerCallStreamObserver +import org.apache.pekko.actor.ActorSystem import org.scalatest.wordspec.AsyncWordSpec -import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} import scala.concurrent.{Future, Promise} import scala.util.Random -class GrpcMediatorInspectionServiceTest extends AsyncWordSpec with BaseTest { +class GrpcMediatorInspectionServiceTest + extends AsyncWordSpec + with BaseTest + with HasExecutionContext { private lazy val generators = new CommonGenerators(testedProtocolVersion) // use our generators to generate a random full informee tree @@ -66,6 +76,18 @@ class GrpcMediatorInspectionServiceTest extends AsyncWordSpec with BaseTest { .toVerdict(testedProtocolVersion), )(TraceContext.empty) + private implicit val actorSystem: ActorSystem = + PekkoUtil.createActorSystem(loggerFactory.threadName) + + private implicit val executionSequencerFactory: ExecutionSequencerFactory = + PekkoUtil.createExecutionSequencerFactory(loggerFactory.threadName, noTracingLogger) + + override def afterAll(): Unit = + LifeCycle.close( + executionSequencerFactory, + LifeCycle.toCloseableActorSystem(actorSystem, logger, timeouts), + )(logger) + class Fixture(val batchSize: Int = 5) { val finalizedResponseStore = new InMemoryFinalizedResponseStore(loggerFactory) @@ -99,21 +121,33 @@ class GrpcMediatorInspectionServiceTest extends AsyncWordSpec with BaseTest { val observer = new ServerCallStreamObserver[v30.VerdictsResponse] with RecordStreamObserverItems[v30.VerdictsResponse] { @volatile var isCancelled_ = false + val onCancelHandlerRef = new AtomicReference[Option[Runnable]](None) override def isCancelled: Boolean = isCancelled_ - override def setOnCancelHandler(onCancelHandler: Runnable): Unit = () + override def setOnCancelHandler(onCancelHandler: Runnable): Unit = + onCancelHandlerRef.set(Some(onCancelHandler)) + override def setOnCloseHandler(onCloseHandler: Runnable): Unit = () override def setCompression(compression: String): Unit = ??? override def isReady: Boolean = ??? - override def setOnReadyHandler(onReadyHandler: Runnable): Unit = ??? + + val onReadyHandlerRef = new AtomicReference[Option[Runnable]](None) + override def setOnReadyHandler(onReadyHandler: Runnable): Unit = { + onReadyHandlerRef.set(Some(onReadyHandler)) + if (counter.get > 0) signalReady() + } + override def request(count: Int): Unit = ??? override def setMessageCompression(enable: Boolean): Unit = ??? - override def disableAutoInboundFlowControl(): Unit = ??? + override def disableAutoInboundFlowControl(): Unit = () override def onNext(value: VerdictsResponse): Unit = { super.onNext(value) - if (counter.decrementAndGet() == 0) { - isCancelled_ = true + val newCounter = counter.decrementAndGet() + if (newCounter == 0) { promise.trySuccess(values) + cancel() + } else if (newCounter > 0) { + signalReady() } } @@ -121,6 +155,13 @@ class GrpcMediatorInspectionServiceTest extends AsyncWordSpec with BaseTest { super.onError(t) promise.tryFailure(t) } + + private def signalReady(): Unit = onReadyHandlerRef.get.foreach(_.run()) + + private def cancel(): Unit = { + isCancelled_ = true + onCancelHandlerRef.get.foreach(_.run()) + } } scanService.verdicts( v30.VerdictsRequest( diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStoreTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStoreTest.scala index acf849b141..9d7459b604 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStoreTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/mediator/store/MediatorDeduplicationStoreTest.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, H import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} import com.digitalasset.canton.synchronizer.mediator.store.MediatorDeduplicationStore.DeduplicationData -import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.time.PositiveFiniteDuration import com.digitalasset.canton.tracing.TraceContext import org.scalatest.wordspec.AsyncWordSpec @@ -224,10 +224,11 @@ trait DbMediatorDeduplicationStoreTest extends MediatorDeduplicationStoreTest wi firstEventTs: CantonTimestamp ): DbMediatorDeduplicationStore = { val store = new DbMediatorDeduplicationStore( - DefaultTestIdentities.daMediator, storage, timeouts, BatchAggregatorConfig.defaultsForTesting, + pruneAtMostEvery = + PositiveFiniteDuration.tryOfSeconds(1), // pruning test case requires 1 second pruning steps loggerFactory, ) store.initialize(firstEventTs).futureValueUS diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/BaseSequencerTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/BaseSequencerTest.scala index 742f50333d..a5ff2488b8 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/BaseSequencerTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/BaseSequencerTest.scala @@ -203,6 +203,10 @@ class BaseSequencerTest extends AsyncWordSpec with BaseTest with FailOnShutdown successorO: Option[SynchronizerSuccessor], announcementEffectiveTime: EffectiveTime, )(implicit traceContext: TraceContext): Unit = ??? + + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = ??? } "sendAsyncSigned" should { diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerApiTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerApiTest.scala index 1450258f0e..47b2e2db44 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerApiTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerApiTest.scala @@ -25,7 +25,7 @@ abstract class DatabaseSequencerApiTest extends SequencerApiTest { val crypto = TestingIdentityFactory( TestingTopology(), loggerFactory, - DynamicSynchronizerParameters.initialValues(clock, testedProtocolVersion), + DynamicSynchronizerParameters.initialValues(testedProtocolVersion), ).forOwnerAndSynchronizer(owner = mediatorId, psid) val metrics = SequencerMetrics.noop("database-sequencer-test") diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerSnapshottingTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerSnapshottingTest.scala index e3d3280a9e..383a323bff 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerSnapshottingTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/DatabaseSequencerSnapshottingTest.scala @@ -34,7 +34,7 @@ trait DatabaseSequencerSnapshottingTest extends SequencerApiTest with DbTest { private val crypto = TestingIdentityFactory( TestingTopology(), loggerFactory, - DynamicSynchronizerParameters.initialValues(clock, testedProtocolVersion), + DynamicSynchronizerParameters.initialValues(testedProtocolVersion), ).forOwnerAndSynchronizer(owner = mediatorId, psid) private val requestSigner = RequestSigner(crypto, testedProtocolVersion, loggerFactory) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/GeneratorsSequencer.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/GeneratorsSequencer.scala index e4224e5449..7805580b42 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/GeneratorsSequencer.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/GeneratorsSequencer.scala @@ -107,6 +107,18 @@ final class GeneratorsSequencer( topologySnapshot, staticSynchronizerParameters, sequencerSnapshot, + ) + ) + + implicit val onboardingStateForSequencerV2: Arbitrary[OnboardingStateForSequencerV2] = Arbitrary( + for { + topologySnapshotO <- Gen.option(Arbitrary.arbitrary[GenericStoredTopologyTransaction]) + staticSynchronizerParametersO <- Gen.option(Arbitrary.arbitrary[StaticSynchronizerParameters]) + sequencerSnapshotO <- Gen.option(Arbitrary.arbitrary[SequencerSnapshot]) + } yield OnboardingStateForSequencerV2( + topologySnapshotO, + staticSynchronizerParametersO, + sequencerSnapshotO, protocolVersion, ) ) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala index c10ba3afb3..3a109316d6 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala @@ -472,8 +472,10 @@ class SequencerReaderTest event <- pullFromQueue(queue) _ = queue.cancel() // cancel the queue now we're done with it } yield { - // the first event expected to reach alice is at its registration timestamp (its topo mapping effective time) - event.value.timestamp shouldBe ts(2) + // the first event expected to reach alice is the next event after its registration time + // (onboarding topology effective time). Alice must not receive the event at ts(2), + // since it would already have seen its onboarding tx in the topology snapshot at ts(2). + event.value.timestamp shouldBe ts(3) } } @@ -639,10 +641,8 @@ class SequencerReaderTest _ <- store .saveLowerBound(ts(10), ts(9).some) .valueOrFail("saveLowerBound") - error <- loggerFactory.assertLogs( - leftOrFail(reader.read(alice, requestedTimestampInclusive = None))("read"), - _.errorMessage shouldBe expectedMessage, - ) + error <- + leftOrFail(reader.read(alice, requestedTimestampInclusive = None))("read") } yield inside(error) { case CreateSubscriptionError.EventsUnavailableForTimestamp(None, message) => message should include(expectedMessage) @@ -674,14 +674,9 @@ class SequencerReaderTest _ <- store .saveLowerBound(ts(10), ts(9).some) .valueOrFail("saveLowerBound") - error <- loggerFactory.assertLogs( - leftOrFail( - reader.read(alice, requestedTimestampInclusive = Some(ts0.plusSeconds(10))) - )( - "read succeeded" - ), - _.errorMessage shouldBe expectedMessage, - ) + error <- leftOrFail( + reader.read(alice, requestedTimestampInclusive = Some(ts0.plusSeconds(10))) + )("read succeeded") } yield inside(error) { case CreateSubscriptionError.EventsUnavailableForTimestamp(Some(timestamp), message) => timestamp shouldBe ts0.plusSeconds(10) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtilsTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtilsTest.scala index d525ff5631..4c120902c3 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtilsTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerUtilsTest.scala @@ -36,7 +36,6 @@ class SequencerUtilsTest extends AnyWordSpec with BaseTest with Matchers { confirmationResponseTimeout: NonNegativeFiniteDuration = defaults.confirmationResponseTimeout, ): DynamicSynchronizerParametersWithValidity = { val parameters = DynamicSynchronizerParameters.tryInitialValues( - topologyChangeDelay = NonNegativeFiniteDuration.Zero, protocolVersion = testedProtocolVersion, sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, mediatorReactionTimeout = mediatorReactionTimeout, diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala index 3f1b69eddf..7ddfc2426b 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala @@ -300,6 +300,7 @@ class SequencerWriterSourceTest )( for { registeredAlice <- store.registerMember(alice, CantonTimestamp.Epoch) + _ <- store.registerMember(sequencerMember, CantonTimestamp.Epoch) deliver1 = DeliverStoreEvent.ensureSenderReceivesEvent( registeredAlice.memberId, messageId1, @@ -346,6 +347,7 @@ class SequencerWriterSourceTest )( for { registeredAlice <- store.registerMember(alice, CantonTimestamp.Epoch) + _ <- store.registerMember(sequencerMember, CantonTimestamp.Epoch) deliver1 = DeliverStoreEvent.ensureSenderReceivesEvent( registeredAlice.memberId, messageId1, @@ -400,6 +402,7 @@ class SequencerWriterSourceTest for { registeredAlice <- store.registerMember(alice, CantonTimestamp.Epoch) + _ <- store.registerMember(sequencerMember, CantonTimestamp.Epoch) deliver1 = DeliverStoreEvent.ensureSenderReceivesEvent( registeredAlice.memberId, messageId1, @@ -458,6 +461,7 @@ class SequencerWriterSourceTest for { registeredAlice <- store.registerMember(alice, CantonTimestamp.Epoch).failOnShutdown + _ <- store.registerMember(sequencerMember, CantonTimestamp.Epoch).failOnShutdown deliver1 = DeliverStoreEvent.ensureSenderReceivesEvent( registeredAlice.memberId, messageId1, @@ -636,7 +640,7 @@ class SequencerWriterSourceTest combinedNotificationsF map { notification => forAll(members) { member => withClue(s"expecting notification for $member") { - notification.includes(member) shouldBe true + notification.isBroadcastOrIncludes(member) shouldBe true } } diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerTest.scala index eaecc00dcf..2a175e80c1 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerTest.scala @@ -119,6 +119,7 @@ class BlockSequencerTest private val topologyClient = new StoreBasedSynchronizerTopologyClient( mock[Clock], + defaultStaticSynchronizerParameters, topologyStore, StoreBasedSynchronizerTopologyClient.NoPackageDependencies, DefaultProcessingTimeouts.testing, @@ -249,6 +250,10 @@ class BlockSequencerTest override def sequencerSnapshotAdditionalInfo( timestamp: CantonTimestamp ): EitherT[Future, SequencerError, Option[v30.BftSequencerSnapshotAdditionalInfo]] = ??? + + override def sequencingTime(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[CantonTimestamp]] = ??? } class FakeBlockSequencerStateManager extends BlockSequencerStateManagerBase { diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionStateTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionStateTest.scala index e19e190047..e4798ba29d 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionStateTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/bindings/p2p/grpc/P2PGrpcConnectionStateTest.scala @@ -25,7 +25,7 @@ class P2PGrpcConnectionStateTest extends AnyWordSpec with BftSequencerBaseTest { import P2PGrpcConnectionStateTest.* - "P2PGrpcBidiConnectionState" should { + "P2PGrpcConnectionState" should { "associate multiple P2P endpoint ID to a BFT node ID" in { val state = new P2PGrpcConnectionState(SelfBftNodeId, loggerFactory) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStoreTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStoreTest.scala index 64f4a18aa2..f7b3cf1971 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStoreTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/db/DbAvailabilityStoreTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.availability.data.db import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.BatchAggregatorConfig import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -20,7 +21,10 @@ import scala.concurrent.ExecutionContext trait DbAvailabilityStoreTest extends AvailabilityStoreTest { this: DbTest => override def createStore(): AvailabilityStore[PekkoEnv] = - new DbAvailabilityStore(storage, timeouts, loggerFactory)(implicitly[ExecutionContext]) + new DbAvailabilityStore(BatchAggregatorConfig(), storage, timeouts, loggerFactory)( + implicitly[ExecutionContext] + ) + override def cleanDb( storage: DbStorage )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/model/ModelBasedTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/model/ModelBasedTest.scala index 1d1dcbb4a1..848c9760b9 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/model/ModelBasedTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/availability/data/model/ModelBasedTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.availability.data.model import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.BatchAggregatorConfig import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.DbStorage @@ -23,7 +24,9 @@ import scala.util.Random trait ModelBasedTest extends AnyWordSpec with BftSequencerBaseTest { this: DbTest => def createStore(): AvailabilityStore[PekkoEnv] = - new DbAvailabilityStore(storage, timeouts, loggerFactory)(implicitly[ExecutionContext]) + new DbAvailabilityStore(BatchAggregatorConfig(), storage, timeouts, loggerFactory)( + implicitly[ExecutionContext] + ) override def cleanDb( storage: DbStorage diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModuleTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModuleTest.scala index fe4c137c35..d431f8da2e 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModuleTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/IssConsensusModuleTest.scala @@ -527,7 +527,6 @@ class IssConsensusModuleTest BftBlockOrdererConfig.DefaultConsensusQueuePerNodeQuota, ) val aDummyMessage = Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage( - myId, ConsensusSegment.ConsensusMessage.ViewChange .create( BlockMetadata(EpochNumber.First, BlockNumber.First), @@ -535,7 +534,7 @@ class IssConsensusModuleTest consensusCerts = Seq.empty, from = myId, ) - .fakeSign, + .fakeSign ) futurePbftMessageQueue.enqueue(myId, aDummyMessage) val postponedConsensusMessageQueue = @@ -816,25 +815,26 @@ class IssConsensusModuleTest ) implicit val ctx: ContextType = context - val underlyingMessage = { + def underlyingMessage(from: BftNodeId) = { val msg = mock[ConsensusSegment.ConsensusMessage.PbftNetworkMessage] when(msg.blockMetadata).thenReturn(BlockMetadata(EpochNumber(10L), BlockNumber.First)) - when(msg.from) thenReturn myId + when(msg.from).thenReturn(from) when(msg.viewNumber).thenReturn(ViewNumber.First) + when(msg.actualSender).thenReturn(Some(from)) msg } consensus.receive(Consensus.Start) - consensus.receive(PbftUnverifiedNetworkMessage(myId, underlyingMessage.fakeSign)) + consensus.receive(PbftUnverifiedNetworkMessage(underlyingMessage(otherIds(0)).fakeSign)) // we can only take 1 future messages per node, so the second one is refused - consensus.receive(PbftUnverifiedNetworkMessage(myId, underlyingMessage.fakeSign)) + consensus.receive(PbftUnverifiedNetworkMessage(underlyingMessage(otherIds(0)).fakeSign)) consensus.futurePbftMessageQueue.size shouldBe 1 - consensus.receive(PbftUnverifiedNetworkMessage(otherIds(0), underlyingMessage.fakeSign)) + consensus.receive(PbftUnverifiedNetworkMessage(underlyingMessage(otherIds(1)).fakeSign)) // we can only take 2 future messages in total, so the third one is refused - consensus.receive(PbftUnverifiedNetworkMessage(otherIds(1), underlyingMessage.fakeSign)) + consensus.receive(PbftUnverifiedNetworkMessage(underlyingMessage(otherIds(2)).fakeSign)) consensus.futurePbftMessageQueue.size shouldBe 2 succeed @@ -900,17 +900,17 @@ class IssConsensusModuleTest Table[ProtocolMessage]( "message", PbftUnverifiedNetworkMessage( - allIds(1), SignedMessage( PrePrepare.create( // Just to trigger the catch-up check blockMetadata4Nodes(1), ViewNumber.First, OrderingBlock(oneRequestOrderingBlock.proofs), CanonicalCommitSet(Set.empty), - allIds(1), + from = allIds(1), + actualSender = Some(allIds(1)), ), Signature.noSignature, - ), + ) ), RetransmissionsMessage.VerifiedNetworkMessage( RetransmissionsMessage.RetransmissionRequest( @@ -1002,13 +1002,14 @@ class IssConsensusModuleTest ) ) val unauthorizedNodeId = BftNodeId("unauthorized") - when(underlyingMessage.from) thenReturn unauthorizedNodeId + when(underlyingMessage.from).thenReturn(unauthorizedNodeId) + when(underlyingMessage.actualSender).thenReturn(Some(unauthorizedNodeId)) when(underlyingMessage.viewNumber).thenReturn(ViewNumber.First) val signedMessage = underlyingMessage.fakeSign consensus.receive(Consensus.Start) - consensus.receive(PbftUnverifiedNetworkMessage(unauthorizedNodeId, signedMessage)) + consensus.receive(PbftUnverifiedNetworkMessage(signedMessage)) assertLogs( context.runPipedMessages(), diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentStateTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentStateTest.scala index d9ace718af..002764fd9b 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentStateTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/SegmentStateTest.scala @@ -770,16 +770,37 @@ class SegmentStateTest extends AsyncWordSpec with BftSequencerBaseTest { // Create and receive f+1 view change messages from nodes def viewChangeMessage(from: BftNodeId = otherId2) = - createViewChange(nextView, from = from, originalLeader, Seq.empty) + createViewChange( + nextView, + from, + originalLeader, + Seq.empty, + actualSender = if (from == myId) None else Some(from), + ) assertNoLogs(segment.processEvent(PbftSignedNetworkMessage(viewChangeMessage()))) assertNoLogs( segment.processEvent(PbftSignedNetworkMessage(viewChangeMessage(from = otherId3))) ) // Create new view message for later, but don't process yet - val ppBottom1 = createBottomPrePrepare(slotNumbers(0), nextView, from = otherId1) - val ppBottom2 = createBottomPrePrepare(slotNumbers(1), nextView, from = otherId1) - val ppBottom3 = createBottomPrePrepare(slotNumbers(2), nextView, from = otherId1) + val ppBottom1 = createBottomPrePrepare( + slotNumbers(0), + nextView, + from = otherId1, + actualSender = Some(otherId1), + ) + val ppBottom2 = createBottomPrePrepare( + slotNumbers(1), + nextView, + from = otherId1, + actualSender = Some(otherId1), + ) + val ppBottom3 = createBottomPrePrepare( + slotNumbers(2), + nextView, + from = otherId1, + actualSender = Some(otherId1), + ) val newView = createNewView( nextView, otherId1, @@ -789,14 +810,33 @@ class SegmentStateTest extends AsyncWordSpec with BftSequencerBaseTest { viewChangeMessage(from = otherId2), ), Seq(ppBottom1, ppBottom2, ppBottom3), + actualSender = Some(otherId1), ) segment.isViewChangeInProgress shouldBe true // Simulate receiving early Prepare messages (for nextView) before receiving the new view message def prepare1(from: BftNodeId = myId) = - createPrepare(slotNumbers(0), nextView, from = from, ppBottom1.message.hash) - val prepare2 = createPrepare(slotNumbers(1), nextView, from = myId, ppBottom2.message.hash) - val prepare3 = createPrepare(slotNumbers(2), nextView, from = myId, ppBottom3.message.hash) + createPrepare( + slotNumbers(0), + nextView, + from = from, + ppBottom1.message.hash, + actualSender = if (from == myId) None else Some(from), + ) + val prepare2 = createPrepare( + slotNumbers(1), + nextView, + from = myId, + ppBottom2.message.hash, + actualSender = None, + ) + val prepare3 = createPrepare( + slotNumbers(2), + nextView, + from = myId, + ppBottom3.message.hash, + actualSender = None, + ) assertLogs( segment.processEvent(PbftSignedNetworkMessage(prepare1(from = otherId1))), log => { @@ -817,7 +857,13 @@ class SegmentStateTest extends AsyncWordSpec with BftSequencerBaseTest { // Simulate receiving an early message from an even more future view val prepareFuture = - createPrepare(slotNumbers(1), nextView + 1, from = otherId2, ppBottom2.message.hash) + createPrepare( + slotNumbers(1), + nextView + 1, + from = otherId2, + ppBottom2.message.hash, + actualSender = Some(otherId2), + ) assertLogs( segment.processEvent(PbftSignedNetworkMessage(prepareFuture)), log => { @@ -1330,19 +1376,30 @@ class SegmentStateTest extends AsyncWordSpec with BftSequencerBaseTest { from, originalLeader, Seq(slotNumbers(0) -> view1), + actualSender = if (from == myId) None else Some(from), ) val pp1 = createPrePrepare(slotNumbers(0), view1, from = myId) - val ppBottom2 = createBottomPrePrepare(slotNumbers(1), view2, from = otherId1) - val ppBottom3 = createBottomPrePrepare(slotNumbers(2), view2, from = otherId1) + val ppBottom2 = createBottomPrePrepare( + slotNumbers(1), + view2, + from = otherId1, + actualSender = Some(otherId1), + ) + val ppBottom3 = createBottomPrePrepare( + slotNumbers(2), + view2, + from = otherId1, + actualSender = Some(otherId1), + ) val prepare1 = createPrepare(slotNumbers(0), view2, from = myId, pp1.message.hash) val prepare2 = createPrepare(slotNumbers(1), view2, from = myId, ppBottom2.message.hash) val prepare3 = createPrepare(slotNumbers(2), view2, from = myId, ppBottom3.message.hash) - segment.processEvent(PbftSignedNetworkMessage(prepare1)) shouldBe empty - segment.processEvent(PbftSignedNetworkMessage(prepare2)) shouldBe empty - segment.processEvent(PbftSignedNetworkMessage(prepare3)) shouldBe empty + segment.processEvent(PbftSignedNetworkMessage(prepare1), rehydrated = true) shouldBe empty + segment.processEvent(PbftSignedNetworkMessage(prepare2), rehydrated = true) shouldBe empty + segment.processEvent(PbftSignedNetworkMessage(prepare3), rehydrated = true) shouldBe empty // getting new-view message without having gotten any view-change messages // although this could indeed happen in real life, it is more commonly seen during rehydration of messages @@ -1608,6 +1665,7 @@ object SegmentStateTest { blockNumber: BlockNumber, view: Long, from: BftNodeId, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): SignedMessage[PrePrepare] = PrePrepare .create( @@ -1616,6 +1674,7 @@ object SegmentStateTest { OrderingBlock.empty, CanonicalCommitSet(Set.empty), from, + actualSender, ) .fakeSign @@ -1639,6 +1698,7 @@ object SegmentStateTest { view: Long, from: BftNodeId, hash: Hash, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): SignedMessage[Prepare] = Prepare .create( @@ -1646,6 +1706,7 @@ object SegmentStateTest { ViewNumber(view), hash, from, + actualSender, ) .fakeSign @@ -1696,6 +1757,7 @@ object SegmentStateTest { from: BftNodeId, originalLeader: BftNodeId = myId, slotsAndViewNumbers: Seq[(Long, Long)] = Seq.empty, + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): SignedMessage[ViewChange] = { val originalLeaderIndex = allIds.indexOf(originalLeader) val certs = slotsAndViewNumbers.map { case (slot, view) => @@ -1711,6 +1773,7 @@ object SegmentStateTest { ViewNumber(viewNumber), consensusCerts = certs, from, + actualSender, ) .fakeSign } @@ -1730,6 +1793,7 @@ object SegmentStateTest { from: BftNodeId, vcSet: Seq[SignedMessage[ViewChange]], ppSet: Seq[SignedMessage[PrePrepare]], + actualSender: Option[BftNodeId] = None, )(implicit synchronizerProtocolVersion: ProtocolVersion): SignedMessage[NewView] = NewView .create( @@ -1738,6 +1802,7 @@ object SegmentStateTest { viewChanges = vcSet, prePrepares = ppSet, from, + actualSender, ) .fakeSign diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/StateTransferBehaviorTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/StateTransferBehaviorTest.scala index f88a4f814e..dc5eef12dd 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/StateTransferBehaviorTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/StateTransferBehaviorTest.scala @@ -385,7 +385,7 @@ class StateTransferBehaviorTest stateTransferBehavior.postponedConsensusMessages .enqueue( otherId, - Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(otherId, signedMessage), + Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(signedMessage), ) stateTransferBehavior.receive( @@ -448,15 +448,13 @@ class StateTransferBehaviorTest // PbftUnverifiedNetworkMessage val underlyingMessage = mock[ConsensusSegment.ConsensusMessage.PbftNetworkMessage] + when(underlyingMessage.actualSender).thenReturn(Some(otherId)) when(underlyingMessage.from).thenThrow( new RuntimeException("should have used an actual sender") ) val signedMessage = underlyingMessage.fakeSign val pbftUnverifiedNetworkMessage = - Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage( - actualSender = otherId, - signedMessage, - ) + Consensus.ConsensusMessage.PbftUnverifiedNetworkMessage(signedMessage) stateTransferBehavior.receive(pbftUnverifiedNetworkMessage) // PbftVerifiedNetworkMessage diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStoreTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStoreTest.scala index f0043b090c..c481452b8b 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStoreTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/EpochStoreTest.scala @@ -87,9 +87,9 @@ trait EpochStoreTest extends AsyncWordSpec { _ <- store.startEpoch(epochInfo0) // idempotent writes are supported _ <- store.startEpoch(epochInfo0) - _ <- store.addOrderedBlock(prePrepare0, commitMessages0) + _ <- store.addOrderedBlockAtomically(prePrepare0, commitMessages0) // idempotent writes are supported - _ <- store.addOrderedBlock(prePrepare0, commitMessages0) + _ <- store.addOrderedBlockAtomically(prePrepare0, commitMessages0) e0 <- store.latestEpoch(includeInProgress = false) e1 <- store.latestEpoch(includeInProgress = true) @@ -106,7 +106,7 @@ trait EpochStoreTest extends AsyncWordSpec { e7 <- store.loadEpochInfo(EpochNumber.First) _ <- store.startEpoch(epochInfo1) - _ <- store.addOrderedBlock(prePrepare1, commitMessages1) + _ <- store.addOrderedBlockAtomically(prePrepare1, commitMessages1) e8 <- store.latestEpoch(includeInProgress = false) e9 <- store.latestEpoch(includeInProgress = true) e10 <- store.loadEpochInfo(epochInfo1.number) @@ -152,7 +152,7 @@ trait EpochStoreTest extends AsyncWordSpec { blockNumber: Long, viewNumber: Long = ViewNumber.First, ) = - store.addOrderedBlock( + store.addOrderedBlockAtomically( prePrepare(epochNumber, blockNumber, viewNumber), commitMessages(epochNumber, blockNumber, viewNumber), ) @@ -161,7 +161,7 @@ trait EpochStoreTest extends AsyncWordSpec { _ <- store.startEpoch(activeEpoch0Info) _ <- store.addPrePrepare(prePrepare(EpochNumber.First, BlockNumber.First)) - _ <- store.addPrepares(Seq(prepare(EpochNumber.First, BlockNumber.First))) + _ <- store.addPreparesAtomically(Seq(prepare(EpochNumber.First, BlockNumber.First))) _ <- addOrderedBlock(EpochNumber.First, BlockNumber.First) _ <- addOrderedBlock(EpochNumber.First, 1L) @@ -169,7 +169,7 @@ trait EpochStoreTest extends AsyncWordSpec { // these will appear in loadEpochProgress as pbftMessagesForIncompleteBlocks because block 3 is not complete _ <- store.addPrePrepare(prePrepare(EpochNumber.First, 3L)) - _ <- store.addPrepares(Seq(prepare(EpochNumber.First, 3L))) + _ <- store.addPreparesAtomically(Seq(prepare(EpochNumber.First, 3L))) // view change messages will appear always because we don't check in the DB if the segment has finished _ <- store.addViewChangeMessage(viewChange(EpochNumber.First, 0L)) @@ -179,7 +179,7 @@ trait EpochStoreTest extends AsyncWordSpec { _ <- store.addPrePrepare( prePrepare(EpochNumber.First, 3L, viewNumber = ViewNumber.First + 1) ) - _ <- store.addPrepares( + _ <- store.addPreparesAtomically( Seq(prepare(EpochNumber.First, 3L, viewNumber = ViewNumber.First + 1)) ) @@ -254,22 +254,22 @@ trait EpochStoreTest extends AsyncWordSpec { val epoch3 = EpochInfo.mk(3L, 3L, 3) for { _ <- store.startEpoch(epoch0) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(EpochNumber.First, BlockNumber.First), commitMessages(EpochNumber.First, BlockNumber.First), ) _ <- store.startEpoch(epoch2) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = 2L, blockNumber = 2L), commitMessages(epochNumber = 2L, blockNumber = 2L), ) _ <- store.startEpoch(epoch1) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = 1L, blockNumber = 1L), commitMessages(epochNumber = 1L, blockNumber = 1L), ) _ <- store.startEpoch(epoch3) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = 3L, blockNumber = 3L), commitMessages(epochNumber = 3L, blockNumber = 3L), ) @@ -307,11 +307,11 @@ trait EpochStoreTest extends AsyncWordSpec { for { _ <- store.startEpoch(epoch0) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = EpochNumber.First, blockNumber = BlockNumber.First), Seq.empty, ) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = EpochNumber.First, blockNumber = BlockNumber(1)), Seq.empty, ) @@ -333,7 +333,7 @@ trait EpochStoreTest extends AsyncWordSpec { _ = numberOfRecords0 shouldBe (EpochStore.NumberOfRecords.empty) _ <- store.startEpoch(epoch0) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(EpochNumber.First, BlockNumber.First), commitMessages(EpochNumber.First, BlockNumber.First), ) @@ -346,7 +346,7 @@ trait EpochStoreTest extends AsyncWordSpec { )) _ <- store.startEpoch(epoch1) - _ <- store.addOrderedBlock( + _ <- store.addOrderedBlockAtomically( prePrepare(epochNumber = 1L, blockNumber = 1L), commitMessages(epochNumber = 1L, blockNumber = 1L), ) @@ -360,7 +360,7 @@ trait EpochStoreTest extends AsyncWordSpec { _ <- store.startEpoch(epoch2) _ <- store.addPrePrepare(prePrepare(EpochNumber(2L), 3L)) - _ <- store.addPrepares(Seq(prepare(EpochNumber(2L), 3L))) + _ <- store.addPreparesAtomically(Seq(prepare(EpochNumber(2L), 3L))) _ <- store.addViewChangeMessage(viewChange(EpochNumber(2L), 3L)) _ <- store.addViewChangeMessage(newView(EpochNumber(2L), 3L)) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStoreTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStoreTest.scala index 806e0e3c96..66344c05c1 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStoreTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/db/DbEpochStoreTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.modules.consensus.iss.data.db import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.BatchAggregatorConfig import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -32,7 +33,7 @@ trait DbEpochStoreTest extends AsyncWordSpec with BftSequencerBaseTest with Epoc "DbEpochStore" should { behave like epochStore(() => - new DbEpochStore(storage, timeouts, loggerFactory)(executionContext) + new DbEpochStore(BatchAggregatorConfig(), storage, timeouts, loggerFactory)(executionContext) ) } } diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/SimulationEpochStore.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/SimulationEpochStore.scala index 859d5591a7..f27402667a 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/SimulationEpochStore.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/data/memory/SimulationEpochStore.scala @@ -21,7 +21,7 @@ final class SimulationEpochStore(failOnViewChange: Boolean) SimulationFuture(action)(value) override def close(): Unit = () - override def addOrderedBlock( + override def addOrderedBlockAtomically( prePrepare: SignedMessage[ConsensusMessage.PrePrepare], commitMessages: Seq[SignedMessage[ConsensusMessage.Commit]], )(implicit traceContext: TraceContext): SimulationFuture[Unit] = { @@ -33,6 +33,6 @@ final class SimulationEpochStore(failOnViewChange: Boolean) } } - super.addOrderedBlock(prePrepare, commitMessages) + super.addOrderedBlockAtomically(prePrepare, commitMessages) } } diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManagerTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManagerTest.scala index 05072aeaf1..a2f5bba235 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManagerTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/core/modules/consensus/iss/statetransfer/StateTransferManagerTest.scala @@ -244,7 +244,7 @@ class StateTransferManagerTest extends AnyWordSpec with BftSequencerBaseTest { // Store a block that will be sent by the serving node. val commitCert = aCommitCert() context.pipeToSelf( - epochStore.addOrderedBlock(commitCert.prePrepare, commitCert.commits) + epochStore.addOrderedBlockAtomically(commitCert.prePrepare, commitCert.commits) )( _.map(_ => None).getOrElse(fail("Storing the pre-prepare failed")) ) diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmark.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmark.scala new file mode 100644 index 0000000000..8e89fba751 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmark.scala @@ -0,0 +1,254 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import com.codahale.metrics.MetricRegistry +import com.daml.logging.{ContextualizedLogger, LoggingContext} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftBenchmark.{ + Separator, + TxStatus, + UuidLength, + shutdownExecutorService, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftMetrics.{ + failedWriteMeters, + pendingReads, + readMeters, + roundTripNanosHistogram, + startedWriteMeters, + successfulWriteMeters, + writeNanosHistograms, +} + +import java.util.UUID +import java.util.concurrent.{ + Callable, + ConcurrentHashMap, + ExecutorService, + Executors, + Future as JFuture, + ScheduledExecutorService, + ScheduledFuture, + TimeUnit, +} +import java.util.function.BiFunction + +final class BftBenchmark( + config: BftBenchmarkConfig, + bftBindingFactory: BftBindingFactory, + metrics: MetricRegistry, +) { + + private val log = ContextualizedLogger.get(getClass) + implicit private val loggingContext: LoggingContext = LoggingContext.empty + + private val readNodeIndices = + config.nodes.zipWithIndex + .filter(_._1.isInstanceOf[BftBenchmarkConfig.ReadNode[?]]) + .map(_._2) + .toSet + + private val ValueBytes: Int = + config.transactionBytes - UuidLength - Separator.length + + log.info( + s"Payload values will be $ValueBytes bytes long (${config.transactionBytes} - UUID's length)" + ) + + private val readNodes = config.nodes.flatMap { + case node: BftBenchmarkConfig.ReadNode[_] => Some(node) + case _ => None + } + + private val writeNodes = config.nodes.flatMap { + case node: BftBenchmarkConfig.WriteNode[_] => Some(node) + case _ => None + } + + private val bftBinding = bftBindingFactory.create(config) + + def run(): JFuture[Unit] = { + val txsToBeRead = new ConcurrentHashMap[String, TxStatus]() + + startReads(txsToBeRead, metrics) + + val scheduler = + Executors.newScheduledThreadPool(sys.runtime.availableProcessors() * config.nodes.size) + + val writeSchedules = startWrites(txsToBeRead, metrics)(scheduler) + + scheduleShutdown(scheduler, writeSchedules) + } + + private def startReads( + txsToBeRead: ConcurrentHashMap[String, TxStatus], + metrics: MetricRegistry, + ): Unit = + readNodes.zipWithIndex.foreach { case (node, nodeIndex) => + bftBinding.subscribeOnce( + node, + txIdFuture => { + // Executes in the read task itself, avoiding delays and starvation. + txIdFuture.thenAccept(processRead(nodeIndex, txsToBeRead, metrics, _)) + () + }, + ) + } + + private def startWrites( + txsToBeRead: ConcurrentHashMap[String, TxStatus], + metrics: MetricRegistry, + )(scheduler: ScheduledExecutorService): Seq[ScheduledFuture[?]] = { + log.info(s"Starting scheduled writes every ${config.perNodeWritePeriod.toNanos} nanos") + + writeNodes.zipWithIndex.map { case (node, nodeIndex) => + scheduler.scheduleAtFixedRate( + writeRunnable(node, nodeIndex, txsToBeRead, metrics), + 0, // No initial delay. + config.perNodeWritePeriod.toNanos, // Repeated write period. + TimeUnit.NANOSECONDS, + ) + } + } + + private def scheduleShutdown( + scheduler: ScheduledExecutorService, + writeSchedules: Seq[ScheduledFuture[?]], + ): ScheduledFuture[Unit] = + scheduler.schedule( + shutdownCallable(scheduler, writeSchedules), + config.runDuration.toNanos, + TimeUnit.NANOSECONDS, + ) + + private def processRead( + nodeIndex: Int, + txsToBeRead: ConcurrentHashMap[String, TxStatus], + metrics: MetricRegistry, + txId: String, + ): Unit = { + txsToBeRead.compute( + txId, + updateTxStatus(nodeIndex, metrics), + ) + log.debug( + s"In-progress transactions after processing read for $txId: $txsToBeRead" + ) + val newInProgressTransactionsCount = txsToBeRead.size().toLong + pendingReads = newInProgressTransactionsCount + () + } + + private def updateTxStatus( + nodeIndex: Int, + metrics: MetricRegistry, + ): BiFunction[String, TxStatus, TxStatus] = (txId: String, txStatus: TxStatus) => { + val readNanos = System.nanoTime() + + Option(txStatus) match { + case Some(TxStatus(writeNanos, awaitingNodeIndices)) => + readMeters(metrics, Seq(nodeIndex)).foreach(_.metric.mark()) + + if (awaitingNodeIndices.sizeIs == 1) { + roundTripNanosHistogram(metrics).metric.update(readNanos - writeNanos) + log.debug(s"Transaction $txId is being received by the last read node") + null // Free some memory. + } else { + val newTxStatus = txStatus.copy(awaitingNodeIndices = awaitingNodeIndices.excl(nodeIndex)) + log.debug( + s"Transaction $txId still hasn't been received by nodes: ${txStatus.awaitingNodeIndices}" + ) + log.trace(s"New transactions status for $txId: $newTxStatus") + newTxStatus + } + + case _ => + log.error(s"Transaction $txId read more than once per node") + null + } + } + + private def writeRunnable( + node: BftBenchmarkConfig.WriteNode[?], + nodeIndex: Int, + txsToBeRead: ConcurrentHashMap[String, TxStatus], + metrics: MetricRegistry, + ): Runnable = () => { + log.trace(s"Starting scheduled write at ms ${System.nanoTime() / 1_000_000}") + + val txId = UUID.randomUUID().toString + + val startWriteNanos = System.nanoTime() + + txsToBeRead.put(txId, TxStatus(startWriteNanos, awaitingNodeIndices = readNodeIndices)) + startedWriteMeters(metrics, Seq(nodeIndex)).foreach(_.metric.mark()) + + bftBinding + .write(node, txId) + .handle { (_, throwable) => + reportWriteMetrics( + nodeIndex, + txsToBeRead, + txId, + startWriteNanos, + Option(throwable), + metrics, + ) + } + () + } + + private def reportWriteMetrics( + nodeIndex: Int, + txsToBeRead: ConcurrentHashMap[String, TxStatus], + txId: String, + startWriteNanos: Long, + throwable: Option[Throwable], + metrics: MetricRegistry, + ): Unit = + throwable match { + case Some(throwable) => + log.error(s"Write error for transaction $txId", throwable) + txsToBeRead.remove(txId) + failedWriteMeters(metrics, Seq(nodeIndex)).foreach(_.metric.mark()) + case _ => + val endWriteNanos = System.nanoTime() + successfulWriteMeters(metrics, Seq(nodeIndex)).foreach(_.metric.mark()) + writeNanosHistograms(metrics, Seq(nodeIndex)) + .foreach(_.metric.update(endWriteNanos - startWriteNanos)) + } + + private def shutdownCallable( + scheduler: ScheduledExecutorService, + writeSchedules: Seq[ScheduledFuture[?]], + ): Callable[Unit] = () => { + + log.info("Cancelling writes") + writeSchedules.foreach(_.cancel(true)) + + log.info("Shutting down the scheduler") + shutdownExecutorService(scheduler) + + log.info("Closing the BFT binding") + bftBinding.close() + } +} + +object BftBenchmark { + + private val Separator = "=" + + private val UuidLength: Int = UUID.randomUUID().toString.length + + def shutdownExecutorService(executorService: ExecutorService): Unit = { + executorService.shutdown() + executorService.awaitTermination(10, TimeUnit.SECONDS) + () + } + + private final case class TxStatus( + writeStartNanos: Long, + awaitingNodeIndices: Set[Int], + ) +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkConfig.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkConfig.scala new file mode 100644 index 0000000000..41c92707f8 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkConfig.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import scala.concurrent.duration.Duration + +final case class BftBenchmarkConfig( + transactionBytes: Int, + runDuration: Duration, + perNodeWritePeriod: Duration, + nodes: Seq[BftBenchmarkConfig.Node], + reportingInterval: Option[Duration] = None, +) + +object BftBenchmarkConfig { + + sealed trait Node extends Product { + val host: String + } + + sealed trait ReadNode[PortType] extends Node { + val readPort: PortType + } + + sealed trait WriteNode[PortType] extends Node { + val writePort: PortType + } + + final case class InProcessWriteOnlyNode(override val host: String, override val writePort: String) + extends WriteNode[String] + + final case class InProcessReadWriteNode( + override val host: String, + override val writePort: String, + override val readPort: String, + ) extends WriteNode[String] + with ReadNode[String] + + final case class NetworkedWriteOnlyNode(override val host: String, override val writePort: Int) + extends WriteNode[Int] + + final case class NetworkedReadWriteNode( + override val host: String, + override val writePort: Int, + override val readPort: Int, + ) extends WriteNode[Int] + with ReadNode[Int] +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkTool.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkTool.scala new file mode 100644 index 0000000000..2699e0f4ec --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBenchmarkTool.scala @@ -0,0 +1,161 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import com.codahale.metrics.MetricRegistry +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftBenchmarkTool.NanosInMillis +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftMetrics.{ + MetricName, + NamedMetric, + failedWriteMeters, + pendingReads, + readMeters, + roundTripNanosHistogram, + startedWriteMeters, + successfulWriteMeters, + writeNanosHistograms, +} +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility +import com.fasterxml.jackson.annotation.PropertyAccessor +import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature} +import com.typesafe.config.ConfigRenderOptions +import com.typesafe.scalalogging.Logger +import pureconfig.ConfigWriter +import pureconfig.generic.auto.* + +import java.io.StringWriter +import java.util.concurrent.{Executors, Future, TimeUnit} +import scala.collection.SeqMap +import scala.collection.immutable.ListMap +import scala.concurrent.duration.* +import scala.jdk.CollectionConverters.* + +final class BftBenchmarkTool(bftBindingFactory: BftBindingFactory) { + + private val log = NamedLoggerFactory.root.getLogger(getClass) + + def run(config: BftBenchmarkConfig): SeqMap[MetricName, AnyVal] = { + + val renderOptions: ConfigRenderOptions = ConfigRenderOptions + .defaults() + .setOriginComments(false) + .setComments(false) + .setJson(true) + .setFormatted(false) + + val nodeIndices = config.nodes.indices + + val readNodeIndices = + config.nodes.filter(_.isInstanceOf[BftBenchmarkConfig.ReadNode[?]]).indices + + log.info( + "Starting BFT benchmark, the configuration follows (JSON)" + ) + + log.info( + ConfigWriter[BftBenchmarkConfig] + .to(config) + .render(renderOptions.setFormatted(false)) + ) + + val metrics = new MetricRegistry() + + val bftBenchmark = new BftBenchmark(config, bftBindingFactory, metrics) + + val bftBenchmarkDoneFuture = bftBenchmark.run() + val reportingScheduler = Executors.newScheduledThreadPool(1) + val scheduledReport = config.reportingInterval.map { reportingInterval => + reportingScheduler.scheduleAtFixedRate( + { () => report(log, nodeIndices, readNodeIndices, metrics); () }: Runnable, + reportingInterval.toNanos, + reportingInterval.toNanos, + TimeUnit.NANOSECONDS, + ) + } + awaitBftBenchmarkDoneFuture(config, bftBenchmarkDoneFuture) + scheduledReport.foreach(_.cancel(true)) + reportingScheduler.shutdown() + + // Always include a final report + log.info("Completed, final stats will follow") + report(log, nodeIndices, readNodeIndices, metrics) + } + + private def awaitBftBenchmarkDoneFuture( + config: BftBenchmarkConfig, + bftBenchmarkDoneFuture: Future[Unit], + ): Unit = + bftBenchmarkDoneFuture.get(config.runDuration.toNanos + 2.minutes.toNanos, TimeUnit.NANOSECONDS) + + private def report( + log: Logger, + nodeIndices: Range, + readNodeIndices: Range, + metrics: MetricRegistry, + ): SeqMap[MetricName, AnyVal] = { + val meterReport = + ("pending.reads.count" -> pendingReads) +: + Seq( + startedWriteMeters(metrics, nodeIndices), + successfulWriteMeters(metrics, nodeIndices), + failedWriteMeters(metrics, nodeIndices), + readMeters(metrics, readNodeIndices), + ).flatten.flatMap { case NamedMetric(name, meter) => + Seq( + s"$name.rate.mean" -> meter.getMeanRate, + s"$name.count" -> meter.getCount, + ) + } + + val histogramReport = + List.newBuilder + .addAll( + writeNanosHistograms( + metrics, + nodeIndices, + ) + ) + .addOne( + roundTripNanosHistogram(metrics) + ) + .result() + .flatMap { case NamedMetric(name, histogram) => + val snapshot = histogram.getSnapshot + Seq( + s"$name.ms.mean" -> snapshot.getMean, + s"$name.ms.median" -> snapshot.getMedian, + s"$name.ms.min" -> snapshot.getMin.toDouble, + s"$name.ms.95%" -> snapshot.get95thPercentile, + s"$name.ms.99%" -> snapshot.get99thPercentile, + s"$name.ms.max" -> snapshot.getMax.toDouble, + ).map { case (name, nanos) => + name -> nanos / NanosInMillis + } + } + + // This is compatible with Jackson and also preserves the intended metric names order (i.e., insertion order). + val unifiedReport: SeqMap[MetricName, AnyVal] = + ListMap.newBuilder.addAll((meterReport ++ histogramReport).sortBy(_._1)).result() + + log.info(toJson(unifiedReport.asJava)) + unifiedReport + } + + private def toJson(request: Any): String = { + val stringWriter = new StringWriter + val objectMapper = { + val mapper = new ObjectMapper() + mapper.enable(SerializationFeature.INDENT_OUTPUT) + } + objectMapper.setVisibility(PropertyAccessor.FIELD, Visibility.ANY) + objectMapper.writeValue(stringWriter, request) + stringWriter.toString + } +} + +object BftBenchmarkTool { + + private val NanosInMillis = 1_000_000 +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBinding.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBinding.scala new file mode 100644 index 0000000000..e8ce7b8baf --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftBinding.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import java.io.Closeable +import java.util.concurrent.CompletionStage + +trait BftBindingFactory { + type T <: BftBinding + + def create(config: BftBenchmarkConfig): T +} + +/** All the operations must be idempotent. + */ +trait BftBinding extends Closeable { + + import BftBinding.* + + def write( + node: BftBenchmarkConfig.WriteNode[?], + txId: String, + ): CompletionStage[Unit] + + def subscribeOnce(node: BftBenchmarkConfig.ReadNode[?], txConsumer: TxConsumer): Unit +} + +object BftBinding { + + type TxConsumer = CompletionStage[String] => Unit +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftMetrics.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftMetrics.scala new file mode 100644 index 0000000000..ad76a3cf77 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/BftMetrics.scala @@ -0,0 +1,96 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import com.codahale.metrics.{Histogram, Meter, Metric, MetricRegistry} + +object BftMetrics { + + type MetricName = String + + @volatile var pendingReads = 0L + + final case class NamedMetric[M <: Metric]( + name: MetricName, + metric: M, + ) + + def startedWriteMeters( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + ): Iterable[NamedMetric[Meter]] = + namedMetrics( + metrics, + nodeIndices, + globalMetricName = "global.writes.started.meter", + nodeMetricNameBuilder = nodeIndex => s"node$nodeIndex.writes.started.meter", + metricExtractor = (metrics, name) => metrics.meter(name), + ) + + def successfulWriteMeters( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + ): Iterable[NamedMetric[Meter]] = + namedMetrics( + metrics, + nodeIndices, + globalMetricName = "global.writes.successful.meter", + nodeMetricNameBuilder = nodeIndex => s"node$nodeIndex.writes.successful.meter", + metricExtractor = (metrics, name) => metrics.meter(name), + ) + + def failedWriteMeters( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + ): Iterable[NamedMetric[Meter]] = + namedMetrics( + metrics, + nodeIndices, + globalMetricName = "global.writes.failed.meter", + nodeMetricNameBuilder = nodeIndex => s"node$nodeIndex.writes.failed.meter", + metricExtractor = (metrics, name) => metrics.meter(name), + ) + + def writeNanosHistograms( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + ): Iterable[NamedMetric[Histogram]] = + namedMetrics( + metrics, + nodeIndices, + globalMetricName = "global.writes.duration.histogram", + nodeMetricNameBuilder = nodeIndex => s"node$nodeIndex.writes.duration.histogram", + metricExtractor = (metrics, name) => metrics.histogram(name), + ) + + def readMeters( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + ): Iterable[NamedMetric[Meter]] = + namedMetrics( + metrics, + nodeIndices, + globalMetricName = "global.reads.meter", + nodeMetricNameBuilder = nodeIndex => s"node$nodeIndex.reads.meter", + metricExtractor = (metrics, name) => metrics.meter(name), + ) + + def roundTripNanosHistogram(metrics: MetricRegistry): NamedMetric[Histogram] = { + val name = "global.roundtrips.duration.histogram" + NamedMetric(name, metrics.histogram(name)) + } + + private def namedMetrics[M <: Metric]( + metrics: MetricRegistry, + nodeIndices: Iterable[Int], + globalMetricName: MetricName, + nodeMetricNameBuilder: Int => MetricName, + metricExtractor: (MetricRegistry, MetricName) => M, + ): Iterable[NamedMetric[M]] = + List.newBuilder + .addAll(nodeIndices.map(nodeMetricNameBuilder)) + .addOne(globalMetricName) + .result() + .map(name => NamedMetric(name, metricExtractor(metrics, name))) +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/GenStandaloneConfig.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/GenStandaloneConfig.scala new file mode 100644 index 0000000000..f45de4b1c3 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/GenStandaloneConfig.scala @@ -0,0 +1,77 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance + +import com.digitalasset.canton.crypto.provider.jce.JcePrivateCrypto +import com.digitalasset.canton.crypto.{Fingerprint, SigningKeySpec, SigningKeyUsage} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.core.BftBlockOrdererConfig.{ + BftBlockOrderingStandaloneNetworkConfig, + BftBlockOrderingStandalonePeerConfig, +} +import com.digitalasset.canton.topology.{Namespace, SequencerId} +import pureconfig.ConfigWriter + +import scala.util.control.NonFatal + +/** Utility to create a standalone BFT block ordering network configuration with the given number of + * nodes in the specified directory. It generates key pairs for each node and creates a config file + * for each node referencing the generated keys and the public keys of all other nodes. + * + * Usage: `BftBlockOrderingStandaloneNetworkConfig ` + */ +object GenStandaloneConfig extends App { + + private def printUsageAndExit(): Nothing = { + println(s"Usage: ${getClass.getSimpleName} ") + sys.exit(1) + } + + private def sequencerId(i: Int): String = + SequencerId + .tryCreate(i.toString, Namespace(Fingerprint.tryFromString("default"))) + .toProtoPrimitive + + if (args.sizeIs < 2) + printUsageAndExit() + + val (dir, numNodes) = + try { + better.files.File(args(0)).createDirectory() -> args(1).toInt + } catch { + case NonFatal(e) => + e.printStackTrace() + printUsageAndExit() + } + + for (i <- 1 to numNodes) { + val keyPair = JcePrivateCrypto + .generateSigningKeypair(SigningKeySpec.EcCurve25519, SigningKeyUsage.ProtocolOnly) + .getOrElse(throw new RuntimeException("Failed to generate keypair")) + val privKey = keyPair.privateKey + val pubKey = keyPair.publicKey + val privKeyFile = dir / s"node${i}_signing_private_key.bin" + val pubKeyFile = dir / s"node${i}_signing_public_key.bin" + privKeyFile.writeByteArray(privKey.toProtoV30.toByteArray) + pubKeyFile.writeByteArray(pubKey.toProtoV30.toByteArray) + + val config = BftBlockOrderingStandaloneNetworkConfig( + thisSequencerId = sequencerId(i), + signingPrivateKeyProtoFile = privKeyFile.toJava, + signingPublicKeyProtoFile = pubKeyFile.toJava, + peers = (1 to numNodes) + .filter(_ != i) + .map { j => + BftBlockOrderingStandalonePeerConfig( + sequencerId = sequencerId(j), + signingPublicKeyProtoFile = dir / s"node${j}_signing_public_key.bin" toJava, + ) + }, + ) + val configFile = dir / s"node$i.conf" + import pureconfig.generic.auto.* + configFile.writeText( + ConfigWriter[BftBlockOrderingStandaloneNetworkConfig].to(config).render() + ) + } +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkBinding.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkBinding.scala new file mode 100644 index 0000000000..d3d2323969 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkBinding.scala @@ -0,0 +1,242 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.dabft + +import com.daml.logging.{ContextualizedLogger, LoggingContext} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftBenchmark.shutdownExecutorService +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftBinding.TxConsumer +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.{ + BftBenchmarkConfig, + BftBinding, + BftBindingFactory, +} +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.utils.Miscellaneous.mutex +import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.standalone.v1.StandaloneBftOrderingServiceGrpc.StandaloneBftOrderingServiceStub +import com.digitalasset.canton.synchronizer.sequencing.sequencer.bftordering.standalone.v1.{ + ReadOrderedRequest, + ReadOrderedResponse, + SendRequest, + StandaloneBftOrderingServiceGrpc, +} +import com.google.protobuf.ByteString +import io.grpc.inprocess.InProcessChannelBuilder +import io.grpc.stub.StreamObserver +import io.grpc.{ManagedChannel, ManagedChannelBuilder} + +import java.util.concurrent.* +import scala.concurrent.ExecutionContext +import scala.jdk.CollectionConverters.CollectionHasAsScala +import scala.language.existentials +import scala.util.{Failure, Success} + +object DaBftBindingFactory extends BftBindingFactory { + override type T = DaBftBinding + + override def create(config: BftBenchmarkConfig): DaBftBinding = { + // We could generate enough random payloads in advance, but we may OOM, e.g. with 500k TPS: + // + // 1m run + 1m margin = 120s, Max total writes = 500k TPS * 120s = 60_000_000 < 2_147_483_647 + // 20KB * 60mil ~= 20GB * 60 + // + // Alternatively, we could generate batches as we go but, since DABFT doesn't + // compress, we just use the same payload every time. + val payloadOutput = ByteString.newOutput(config.transactionBytes) + for (_ <- 0 until config.transactionBytes) + payloadOutput.write('0') + new DaBftBinding(payloadOutput.toByteString) + } +} + +final class DaBftBinding(payload: ByteString) extends BftBinding { + + private val MaxReadGrpcBytes = 32 * 1024 * 1024 + + private val log = ContextualizedLogger.get(getClass) + implicit private val loggingContext: LoggingContext = LoggingContext.empty + + private val writeChannels = + new ConcurrentHashMap[ + BftBenchmarkConfig.Node, + (ManagedChannel, StandaloneBftOrderingServiceStub), + ] + + private val readChannels = + new ConcurrentHashMap[BftBenchmarkConfig.ReadNode[ + ? + ], (ManagedChannel, StreamObserver[ReadOrderedResponse])] + + private val scalaFutureExecutor = Executors.newWorkStealingPool() + private val scalaFutureExecutionContext = + ExecutionContext.fromExecutor(scalaFutureExecutor) + + private val readExecutor = Executors.newCachedThreadPool() + private val writeExecutor = Executors.newCachedThreadPool() + + override def write( + node: BftBenchmarkConfig.WriteNode[?], + txId: String, + ): CompletionStage[Unit] = { + val result = new CompletableFuture[Unit]() + val (_, stub) = channelAndStub(node) + + writeExecutor.submit(new Runnable { + override def run(): Unit = { + + val request = SendRequest(txId, payload) + + mutex(stub) { // The writer is not thread-safe + stub + .send(request) + .transform { + case Success(response) => + response.rejectionReason.fold { + log.debug(s"Wrote txId $txId to node $node") + result.complete(()).discard + } { reason => + val exception = + new RuntimeException(s"Request $txId was rejected by node $node: $reason") + log.error(s"Rejected writing to node $node", exception) + result.completeExceptionally(exception).discard + } + Success(()) + case Failure(exception) => + log.error(s"Error writing to node $node", exception) + result.completeExceptionally(exception).discard + Success(()) + }(scalaFutureExecutionContext) + .discard + } + } + }) + + result + } + + override def subscribeOnce(node: BftBenchmarkConfig.ReadNode[?], txConsumer: TxConsumer): Unit = { + readChannels + .computeIfAbsent( + node, + _ => { + val channelBuilder = + node match { + case BftBenchmarkConfig.InProcessReadWriteNode(_, _, readPort) => + InProcessChannelBuilder.forName(readPort) + case BftBenchmarkConfig.NetworkedReadWriteNode(host, _, readPort) => + ManagedChannelBuilder.forTarget(s"$host:$readPort") + } + channelBuilder.usePlaintext().discard + channelBuilder.maxInboundMessageSize(MaxReadGrpcBytes).discard + val channel = channelBuilder.build() + val stub = StandaloneBftOrderingServiceGrpc + .stub(channel) + .withMaxInboundMessageSize(MaxReadGrpcBytes) + .withMaxOutboundMessageSize(MaxReadGrpcBytes) + + val reader: StreamObserver[ReadOrderedResponse] = + new StreamObserver[ReadOrderedResponse] { + override def onNext(response: ReadOrderedResponse): Unit = { + log.debug(s"Read batch of ${response.block.size} requests") + response.block.foreach { o => + readExecutor + .submit(new Runnable { + override def run(): Unit = { + val result = new CompletableFuture[String]() + try { + val txId = o.tag + result.complete(txId).discard + log.debug(s"Read back UUID $txId") + } catch { + case t: Throwable => + log.error("Error while parsing request", t) + } + txConsumer(result) + } + }) + .discard + } + } + + override def onError(t: Throwable): Unit = { + log.error("Error from server", t) + complete() + } + + override def onCompleted(): Unit = + complete() + + def complete(): Unit = { + log.info(s"Write stream for node $node being completed") + closeGrpcReadChannel(node) + readChannels.remove(node).discard + } + } + stub.readOrdered(ReadOrderedRequest(0), reader) + channel -> reader + }, + ) + () + } + + private def channelAndStub( + writeNode: BftBenchmarkConfig.WriteNode[?] + ): (ManagedChannel, StandaloneBftOrderingServiceStub) = + writeChannels.computeIfAbsent( + writeNode, + _ => { + val channelBuilder = + writeNode match { + case BftBenchmarkConfig.InProcessWriteOnlyNode(_, writePort) => + InProcessChannelBuilder.forName(writePort) + case BftBenchmarkConfig.InProcessReadWriteNode(_, writePort, _) => + InProcessChannelBuilder.forName(writePort) + case BftBenchmarkConfig.NetworkedWriteOnlyNode(host, writePort) => + ManagedChannelBuilder.forTarget(s"$host:$writePort") + case BftBenchmarkConfig.NetworkedReadWriteNode(host, writePort, _) => + ManagedChannelBuilder.forTarget(s"$host:$writePort") + } + channelBuilder.usePlaintext().discard + val channel = channelBuilder.build() + val stub = + StandaloneBftOrderingServiceGrpc + .stub(channel) + .withMaxInboundMessageSize(MaxReadGrpcBytes) + .withMaxOutboundMessageSize(MaxReadGrpcBytes) + channel -> stub + }, + ) + + override def close(): Unit = { + writeChannels.values().asScala.map(_._1).foreach(closeGrpcChannel) + shutdownExecutorService(writeExecutor) + + closeGrpcStreamsAndChannels(readChannels) + shutdownExecutorService(readExecutor) + + shutdownExecutorService(scalaFutureExecutor) + } + + private def closeGrpcStreamsAndChannels[N <: BftBenchmarkConfig.Node, R]( + channels: ConcurrentHashMap[N, (ManagedChannel, StreamObserver[R])] + ): Unit = + channels.values().forEach { case (channel, observer) => + closeGrpcStreamAndChannel(channel, observer) + } + + private def closeGrpcStreamAndChannel[R, N <: BftBenchmarkConfig.Node]( + channel: ManagedChannel, + stream: StreamObserver[R], + ): Unit = { + stream.onCompleted() + closeGrpcChannel(channel) + } + + private def closeGrpcReadChannel[N <: BftBenchmarkConfig.Node]( + node: BftBenchmarkConfig.Node + ): Unit = + Option(readChannels.get(node)).map(_._1).foreach(closeGrpcChannel) + + private def closeGrpcChannel[N <: BftBenchmarkConfig.Node](channel: ManagedChannel): Unit = + channel.shutdown().awaitTermination(20, TimeUnit.SECONDS).discard +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkTool.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkTool.scala new file mode 100644 index 0000000000..9877f870c2 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/dabft/DaBftBenchmarkTool.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.dabft + +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.{ + BftBenchmarkConfig, + BftBenchmarkTool, +} +import pureconfig.ConfigSource +import pureconfig.generic.auto.* + +object DaBftBenchmarkTool extends App { + + private val config = ConfigSource + .resources("bftbenchmark-dabft.conf") + .load[BftBenchmarkConfig] + .getOrElse(throw new RuntimeException("Invalid configuration")) + + new BftBenchmarkTool(DaBftBindingFactory).run(config) +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortCircuitBindingSpec.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortCircuitBindingSpec.scala new file mode 100644 index 0000000000..a32e25b607 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortCircuitBindingSpec.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.shortcircuit + +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.{ + BftBenchmarkConfig, + BftBenchmarkTool, +} +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec +import pureconfig.ConfigSource +import pureconfig.generic.auto.* + +class ShortCircuitBindingSpec extends AnyWordSpec with Matchers { + + "ShortCircuitBinding" should { + "run" in { + val config = ConfigSource + .resources("bftbenchmark-shortcircuit.conf") + .load[BftBenchmarkConfig] + .getOrElse(throw new RuntimeException("Invalid configuration")) + val metricReport = new BftBenchmarkTool(ShortCircuitBindingFactory).run(config) + + val globalWritesCount = + metricReport("global.writes.successful.meter.count").asInstanceOf[Long] + val globalReadsCount = metricReport("global.reads.meter.count").asInstanceOf[Long] + globalWritesCount should be > 0L + globalReadsCount should be > 0L + globalReadsCount should be(globalWritesCount) + metricReport("global.writes.failed.meter.count").asInstanceOf[Long] should be(0) + } + } +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBenchmarkTool.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBenchmarkTool.scala new file mode 100644 index 0000000000..02a05c3e54 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBenchmarkTool.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.shortcircuit + +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.{ + BftBenchmarkConfig, + BftBenchmarkTool, +} +import pureconfig.ConfigSource +import pureconfig.generic.auto.* + +object ShortcircuitBenchmarkTool extends App { + + private val config = ConfigSource + .resources("bftbenchmark-shortcircuit.conf") + .load[BftBenchmarkConfig] + .getOrElse(throw new RuntimeException("Invalid configuration")) + + new BftBenchmarkTool(ShortCircuitBindingFactory).run(config) +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBinding.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBinding.scala new file mode 100644 index 0000000000..a5a4e467c1 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/performance/shortcircuit/ShortcircuitBinding.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.shortcircuit + +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.BftBinding.TxConsumer +import com.digitalasset.canton.synchronizer.sequencer.block.bftordering.performance.{ + BftBenchmarkConfig, + BftBinding, + BftBindingFactory, +} + +import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} +import scala.jdk.CollectionConverters.ConcurrentMapHasAsScala + +object ShortCircuitBindingFactory extends BftBindingFactory { + + override type T = ShortCircuitBinding + + override def create(config: BftBenchmarkConfig): ShortCircuitBinding = + new ShortCircuitBinding +} + +final class ShortCircuitBinding extends BftBinding { + + private val subscriptions = + new ConcurrentHashMap[ + BftBenchmarkConfig.Node, + TxConsumer, + ]() + + override def write( + node: BftBenchmarkConfig.WriteNode[?], + txId: String, + ): CompletableFuture[Unit] = + CompletableFuture.completedFuture { + subscriptions.asScala.get(node) match { + case Some(txConsumer) => txConsumer(CompletableFuture.completedFuture(txId)) + case _ => () + } + } + + override def subscribeOnce( + node: BftBenchmarkConfig.ReadNode[?], + txConsumer: TxConsumer, + ): Unit = + subscriptions + .computeIfAbsent( + node, + _ => txConsumer, + ) + () + + override def close(): Unit = () +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PingPongSimulationTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PingPongSimulationTest.scala index b68f40541e..18a9575eb9 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PingPongSimulationTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PingPongSimulationTest.scala @@ -188,7 +188,7 @@ object TestSystem { val consensusAdminModuleRef = system.newModuleRef[Consensus.Admin](ModuleName("consensusAdminModule"))() val outputModuleRef = - system.newModuleRef[Output.SequencerSnapshotMessage](ModuleName("outputModule"))() + system.newModuleRef[Output.Message[E]](ModuleName("outputModule"))() val pruningModuleRef = system.newModuleRef[Pruning.Message](ModuleName("pruningModule"))() SystemInitializationResult( @@ -226,7 +226,7 @@ object TestSystem { val consensusAdminModuleRef = system.newModuleRef[Consensus.Admin](ModuleName("consensusAdminModule"))() val outputModuleRef = - system.newModuleRef[Output.SequencerSnapshotMessage](ModuleName("outputModule"))() + system.newModuleRef[Output.Message[E]](ModuleName("outputModule"))() val pruningModuleRef = system.newModuleRef[Pruning.Message](ModuleName("pruningModule"))() SystemInitializationResult( diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PipeTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PipeTest.scala index 9d649b8f6c..ae4b677fb6 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PipeTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/block/bftordering/simulation/framework/PipeTest.scala @@ -142,7 +142,7 @@ object PipeTest { val consensusAdminModuleRef = system.newModuleRef[Consensus.Admin](ModuleName("consensusAdminModule"))() val outputModuleRef = - system.newModuleRef[Output.SequencerSnapshotMessage](ModuleName("outputModule"))() + system.newModuleRef[Output.Message[E]](ModuleName("outputModule"))() val pruningModuleRef = system.newModuleRef[Pruning.Message](ModuleName("pruningModule"))() inputModuleRef.asyncSendNoTrace("init") diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala index b5ed8860e7..c61be8ab3e 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala @@ -420,7 +420,7 @@ trait SequencerStoreTest alice, messageId2, payload2, - recipients = Set(alice, bob), + recipients = Set(alice, bob, sequencerMember), ) receiptAlice <- env.deliverReceipt(ts4, alice, messageId4, ts3) deliverEventBob <- env.deliverEvent(ts3, bob, messageId3, payload3) @@ -440,7 +440,10 @@ trait SequencerStoreTest ts2, alice, messageId2, - Set(alice, bob), + Set( + alice, + sequencerMember, + ), // sequencer member is returned for the reader to correctly track last topology recipient timestamp in the subscription payload2, ) _ <- env.assertReceiptEvent( @@ -455,7 +458,10 @@ trait SequencerStoreTest ts2, alice, messageId2, - Set(alice, bob), + Set( + bob, + sequencerMember, + ), // sequencer member is returned for the reader to correctly track last topology recipient timestamp in the subscription payload2, ) _ <- env.assertDeliverEvent(bobEvents(1), ts3, bob, messageId3, Set(bob), payload3) @@ -486,7 +492,7 @@ trait SequencerStoreTest } } - "support paging results" in { + "support paging results and interleave broadcasts correctly" in { val env = Env() for { @@ -495,7 +501,12 @@ trait SequencerStoreTest events = NonEmptyUtil.fromUnsafe( (0L until 20L).map { n => env.deliverEventWithDefaults(ts1.plusSeconds(n), sender = registeredAlice.memberId)() - }.toSeq + } ++ + List( + env.deliverEventWithDefaults(ts(30), sender = registeredAlice.memberId)( + NonEmpty(SortedSet, SequencerMemberId.Broadcast) + ) + ) ) _ <- env.saveEventsAndBuffer(instanceIndex, events) _ <- env.saveWatermark(events.last1.timestamp).valueOrFail("saveWatermark") @@ -510,7 +521,7 @@ trait SequencerStoreTest seconds(firstPage) shouldBe (1L to 10L).toList seconds(secondPage) shouldBe (11L to 20L).toList - seconds(partialPage) shouldBe (16L to 20L).toList + seconds(partialPage) shouldBe (16L to 20L).toList :+ 30L } } diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriberTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriberTest.scala new file mode 100644 index 0000000000..67188e29b0 --- /dev/null +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/time/TimeAdvancingTopologySubscriberTest.scala @@ -0,0 +1,307 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.synchronizer.sequencer.time + +import cats.data.EitherT +import com.daml.metrics.api.MetricsContext +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.BaseTest.testedProtocolVersion +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.messages.{ + DefaultOpenEnvelope, + TopologyTransactionsBroadcast, +} +import com.digitalasset.canton.sequencing.client.{ + SendAsyncClientError, + SendCallback, + SequencerClient, + SequencerClientSend, +} +import com.digitalasset.canton.sequencing.protocol.{ + AggregationRule, + AllMembersOfSynchronizer, + Batch, + MessageId, + Recipients, +} +import com.digitalasset.canton.synchronizer.sequencer.time.TimeAdvancingTopologySubscriber.TimeAdvanceBroadcastMaxSequencingTimeWindow +import com.digitalasset.canton.time.{Clock, SimClock} +import com.digitalasset.canton.topology.client.{ + SynchronizerTopologyClientWithInit, + TopologySnapshotLoader, +} +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.{ + Namespace, + PhysicalSynchronizerId, + SequencerGroup, + SequencerId, + SynchronizerId, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{BaseTest, SequencerCounter} +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Duration +import scala.concurrent.ExecutionContext + +class TimeAdvancingTopologySubscriberTest extends AnyWordSpec with BaseTest { + + import TimeAdvancingTopologySubscriberTest.* + + // To avoid context-switching and test entire code paths + implicit private val executionContext: ExecutionContext = directExecutionContext + + "TimeAdvancingTopologySubscriber" should { + "not schedule a time-advancement message if the effective time is not in the future" in { + // given + val clock = mock[Clock] + val ts = CantonTimestamp.Epoch + + val subscriber = + new TimeAdvancingTopologySubscriber( + clock, + mock[SequencerClient], + mock[SynchronizerTopologyClientWithInit], + aPhysicalSynchronizerId, + aSequencerId, + loggerFactory, + ) + + // when + subscriber + .observed( + SequencedTime(ts), + EffectiveTime(ts), + SequencerCounter(0), + transactions = Seq.empty, + ) + .discard + + // then + verify(clock, never).scheduleAfter(any[CantonTimestamp => Any], any[Duration]) + } + + "schedule and send a time-advancement message" in { + // given + val ts1 = CantonTimestamp.Epoch + val ts2 = ts1.plusSeconds(60) + val clock = new SimClock(start = ts2, loggerFactory) + + val sequencerClient = mock[SequencerClientSend] + when( + sequencerClient.send( + batch = any[Batch[DefaultOpenEnvelope]], + topologyTimestamp = any[Option[CantonTimestamp]], + maxSequencingTime = any[CantonTimestamp], + messageId = any[MessageId], + aggregationRule = any[Option[AggregationRule]], + callback = eqTo(SendCallback.empty), + amplify = eqTo(false), + )(any[TraceContext], any[MetricsContext]) + ).thenReturn( + EitherT(FutureUnlessShutdown.pure(Right(()): Either[SendAsyncClientError, Unit])) + ) + + val snapshot = mock[TopologySnapshotLoader] + val sequencerGroup = + SequencerGroup( + active = Seq(aSequencerId), + passive = Seq.empty, + threshold = PositiveInt.one, + ) + when(snapshot.sequencerGroup()).thenReturn(FutureUnlessShutdown.pure(Some(sequencerGroup))) + val topologyClient = mock[SynchronizerTopologyClientWithInit] + when(topologyClient.currentSnapshotApproximation).thenReturn(snapshot) + val staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParametersWith() + when(topologyClient.staticSynchronizerParameters).thenReturn(staticSynchronizerParameters) + + val subscriber = + new TimeAdvancingTopologySubscriber( + clock, + sequencerClient, + topologyClient, + aPhysicalSynchronizerId, + aSequencerId, + loggerFactory, + ) + + val expectedBatch = + Batch.of( + testedProtocolVersion, + Seq( + TopologyTransactionsBroadcast(aPhysicalSynchronizerId, Seq.empty) -> + Recipients.cc(AllMembersOfSynchronizer) + )* + ) + val expectedAggregationRule = + NonEmpty + .from(sequencerGroup.active) + .map(sequencerGroup => + AggregationRule(sequencerGroup, threshold = PositiveInt.one, testedProtocolVersion) + ) + + // when + subscriber + .observed( + SequencedTime(ts1), + EffectiveTime(ts2), + SequencerCounter(0), + transactions = Seq.empty, + ) + .discard + clock.advance(staticSynchronizerParameters.topologyChangeDelay.duration) + + // then + verify(sequencerClient) + .send( + batch = eqTo(expectedBatch), + topologyTimestamp = eqTo(None), + maxSequencingTime = eqTo(ts2.plus(TimeAdvanceBroadcastMaxSequencingTimeWindow.duration)), + messageId = any[MessageId], + aggregationRule = eqTo(expectedAggregationRule), + callback = eqTo(SendCallback.empty), + amplify = eqTo(false), + )(any[TraceContext], any[MetricsContext]) + } + + "not schedule a time-advancing message if it's not an active sequencer" in { + // given + val clock = mock[Clock] + val ts1 = CantonTimestamp.Epoch + val ts2 = ts1.plusSeconds(60) + val passiveSequencerId = + SequencerId.tryCreate("passive", Namespace(Fingerprint.tryFromString("fingerprint"))) + + val snapshot = mock[TopologySnapshotLoader] + when(snapshot.sequencerGroup()).thenReturn( + FutureUnlessShutdown.pure( + Some( + SequencerGroup( + active = Seq(aSequencerId), + passive = Seq(passiveSequencerId), + threshold = PositiveInt.one, + ) + ) + ) + ) + val topologyClient = mock[SynchronizerTopologyClientWithInit] + when(topologyClient.currentSnapshotApproximation).thenReturn(snapshot) + val staticSynchronizerParameters = BaseTest.defaultStaticSynchronizerParametersWith() + when(topologyClient.staticSynchronizerParameters).thenReturn(staticSynchronizerParameters) + + val subscriber = + new TimeAdvancingTopologySubscriber( + clock, + mock[SequencerClient], + topologyClient, + aPhysicalSynchronizerId, + passiveSequencerId, // boom! + loggerFactory, + ) + + // when + subscriber + .observed( + SequencedTime(ts1), + EffectiveTime(ts2), + SequencerCounter(0), + transactions = Seq.empty, + ) + .discard + + // then + verify(clock, never).scheduleAfter(any[CantonTimestamp => Any], any[Duration]) + } + + // A case caught by flakiness in the sequencer off-boarding integration test. + "ask for a fresh sequencer group again just before sending a time-advancing message" in { + // given + val snapshot = mock[TopologySnapshotLoader] + val sequencerGroup = + SequencerGroup( + active = Seq(aSequencerId), + passive = Seq.empty, + threshold = PositiveInt.one, + ) + when(snapshot.sequencerGroup()).thenReturn(FutureUnlessShutdown.pure(Some(sequencerGroup))) + val topologyClient = mock[SynchronizerTopologyClientWithInit] + when(topologyClient.currentSnapshotApproximation).thenReturn(snapshot) + + val subscriber = + new TimeAdvancingTopologySubscriber( + mock[Clock], + mock[SequencerClient], + topologyClient, + aPhysicalSynchronizerId, + aSequencerId, + loggerFactory, + ) + + // when + subscriber.broadcastToAdvanceTime(EffectiveTime(CantonTimestamp.Epoch)) + + // then + verify(topologyClient).currentSnapshotApproximation + verify(snapshot).sequencerGroup() + } + + // A case caught by flakiness in the sequencer node bootstrap test. + "not send a time-advancing message if it's not an active sequencer after asking for a sequencer group again" in { + // given + val sequencerClient = mock[SequencerClient] + + val snapshot = mock[TopologySnapshotLoader] + val sequencerGroup = + SequencerGroup( + active = Seq.empty, + passive = Seq(aSequencerId), + threshold = PositiveInt.one, + ) + when(snapshot.sequencerGroup()).thenReturn(FutureUnlessShutdown.pure(Some(sequencerGroup))) + val topologyClient = mock[SynchronizerTopologyClientWithInit] + when(topologyClient.currentSnapshotApproximation).thenReturn(snapshot) + + val subscriber = + new TimeAdvancingTopologySubscriber( + mock[Clock], + sequencerClient, + topologyClient, + aPhysicalSynchronizerId, + aSequencerId, // boom! + loggerFactory, + ) + + // when + subscriber.broadcastToAdvanceTime(EffectiveTime(CantonTimestamp.Epoch)) + + // then + verify(sequencerClient, never).send( + any[Batch[DefaultOpenEnvelope]], + any[Option[CantonTimestamp]], + any[CantonTimestamp], + any[MessageId], + any[Option[AggregationRule]], + any[SendCallback], + any[Boolean], + )(any[TraceContext], any[MetricsContext]) + } + } +} + +object TimeAdvancingTopologySubscriberTest { + private val aPhysicalSynchronizerId = + PhysicalSynchronizerId( + SynchronizerId.tryFromString("id::default"), + testedProtocolVersion, + NonNegativeInt.tryCreate(0), + ) + + private val aSequencerId = + SequencerId.tryCreate("sequencer", Namespace(Fingerprint.tryFromString("fingerprint"))) +} diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala index 87107b5fce..6565eee08b 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala @@ -138,8 +138,8 @@ class Env(override val loggerFactory: SuppressingLogger)(implicit override protected[this] def logger: TracedLogger = self.logger }) - // TODO(i26481): adjust when the new connection pool is stable - val useNewConnectionPool: Boolean = BaseTest.testedProtocolVersion >= ProtocolVersion.dev + // TODO(i26270): cleanup when the new connection pool is stable + val useNewConnectionPool: Boolean = true when(topologyClient.currentSnapshotApproximation(any[TraceContext])) .thenReturn(mockTopologySnapshot) @@ -320,11 +320,13 @@ class Env(override val loggerFactory: SuppressingLogger)(implicit expectedSequencers: NonEmpty[Map[SequencerAlias, SequencerId]], useNewConnectionPool: Boolean = useNewConnectionPool, ): EitherT[FutureUnlessShutdown, String, RichSequencerClient] = { + val clientConfig = + SequencerClientConfig(authToken = authConfig, useNewConnectionPool = useNewConnectionPool) val clientFactory = SequencerClientFactory( synchronizerId, cryptoApi, cryptoApi.crypto, - SequencerClientConfig(authToken = authConfig, useNewConnectionPool = useNewConnectionPool), + clientConfig, TracingConfig.Propagation.Disabled, TestingConfigInternal(), BaseTest.defaultStaticSynchronizerParameters, @@ -346,14 +348,14 @@ class Env(override val loggerFactory: SuppressingLogger)(implicit ) val poolConfig = SequencerConnectionXPoolConfig.fromSequencerConnections( - connections, - TracingConfig(TracingConfig.Propagation.Disabled), + sequencerConnections = connections, + tracingConfig = TracingConfig(TracingConfig.Propagation.Disabled), expectedPSIdO = None, ) for { connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( - connectionPoolFactory.create(poolConfig).leftMap(error => error.toString) + connectionPoolFactory.create(poolConfig, name = "test").leftMap(error => error.toString) ) _ <- if (useNewConnectionPool) @@ -577,6 +579,7 @@ class GrpcSequencerIntegrationTest sequencerTrustThreshold = PositiveInt.two, sequencerLivenessMargin = NonNegativeInt.zero, submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + sequencerConnectionPoolDelays = SequencerConnectionPoolDelays.default, ) .value, expectedSequencers = NonEmpty diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerServiceTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerServiceTest.scala index 027c82a580..95f35f9c1f 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerServiceTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerServiceTest.scala @@ -42,10 +42,9 @@ import com.digitalasset.canton.topology.processing.{ SequencedTime, TopologyTransactionTestFactory, } -import com.digitalasset.canton.topology.store.StoredTopologyTransactions.GenericStoredTopologyTransactions +import com.digitalasset.canton.topology.store.StoredTopologyTransaction.GenericStoredTopologyTransaction import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, - StoredTopologyTransactions, TopologyStateForInitializationService, } import com.digitalasset.canton.tracing.TraceContext @@ -53,6 +52,7 @@ import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} import com.digitalasset.canton.version.{ProtocolVersion, VersionedMessage} import com.digitalasset.canton.{ BaseTest, + HasActorSystem, HasExecutionContext, ProtocolVersionChecksFixtureAsyncWordSpec, } @@ -61,6 +61,8 @@ import io.grpc.Status.Code.* import io.grpc.stub.{ServerCallStreamObserver, StreamObserver} import io.grpc.{StatusException, StatusRuntimeException} import monocle.macros.syntax.lens.* +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Source import org.mockito.{ArgumentMatchers, Mockito} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.FixtureAsyncWordSpec @@ -77,7 +79,8 @@ class GrpcSequencerServiceTest extends FixtureAsyncWordSpec with BaseTest with ProtocolVersionChecksFixtureAsyncWordSpec - with HasExecutionContext { + with HasExecutionContext + with HasActorSystem { type Subscription = GrpcManagedSubscription[?] import GrpcSequencerServiceTest.* @@ -142,21 +145,19 @@ class GrpcSequencerServiceTest override def initialSnapshot(member: Member)(implicit executionContext: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[GenericStoredTopologyTransactions] = FutureUnlessShutdown.pure( - StoredTopologyTransactions( - // As we don't expect the actual transactions in this test, we can repeat the same transaction a bunch of times - List - .fill(maxItemsInTopologyBatch * numBatches)(factory.ns1k1_k1) - .map( - StoredTopologyTransaction( - SequencedTime.MinValue, - EffectiveTime.MinValue, - None, - _, - None, - ) + ): Source[GenericStoredTopologyTransaction, NotUsed] = Source( + // As we don't expect the actual transactions in this test, we can repeat the same transaction a bunch of times + List + .fill(maxItemsInTopologyBatch * numBatches)(factory.ns1k1_k1) + .map( + StoredTopologyTransaction( + SequencedTime.MinValue, + EffectiveTime.MinValue, + None, + _, + None, ) - ) + ) ) } @@ -780,7 +781,7 @@ class GrpcSequencerServiceTest "downloadTopologyStateForInit" should { "stream batches of topology transactions" in { env => - val observer = new MockStreamObserver[v30.DownloadTopologyStateForInitResponse]() + val observer = new MockServerStreamObserver[v30.DownloadTopologyStateForInitResponse]() env.service.downloadTopologyStateForInit( TopologyStateForInitRequest(participant, testedProtocolVersion).toProtoV30, observer, diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializerTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializerTest.scala index f3b65b1424..6927beecaa 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializerTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencedEventStoreBasedTopologyHeadInitializerTest.scala @@ -114,7 +114,11 @@ class SequencedEventStoreBasedTopologyHeadInitializerTest ) initializer - .initialize(topologyClientMock, synchronizerPredecessor = None) + .initialize( + topologyClientMock, + synchronizerPredecessor = None, + defaultStaticSynchronizerParameters, + ) .map { _ => verify(topologyClientMock).updateHead( SequencedTime(expectedHeadStateSequencedTime), diff --git a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializerTest.scala b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializerTest.scala index 7be80059c4..0530ab1ead 100644 --- a/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializerTest.scala +++ b/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/topology/SequencerSnapshotBasedTopologyHeadInitializerTest.scala @@ -62,7 +62,11 @@ class SequencerSnapshotBasedTopologyHeadInitializerTest ) initializer - .initialize(topologyClientMock, synchronizerPredecessor = None) + .initialize( + topologyClientMock, + synchronizerPredecessor = None, + defaultStaticSynchronizerParameters, + ) .map { _ => verify(topologyClientMock).updateHead( SequencedTime(aSnapshotLastTs), diff --git a/canton/community/testing/src/main/java/com/digitalasset/canton/logging/SuppressingLoggerDispatcher.java b/canton/community/testing/src/main/java/com/digitalasset/canton/logging/SuppressingLoggerDispatcher.java index 7a9d864220..47f7ff0517 100644 --- a/canton/community/testing/src/main/java/com/digitalasset/canton/logging/SuppressingLoggerDispatcher.java +++ b/canton/community/testing/src/main/java/com/digitalasset/canton/logging/SuppressingLoggerDispatcher.java @@ -7,21 +7,20 @@ import org.slf4j.Marker; import org.slf4j.event.Level; import org.slf4j.helpers.SubstituteLogger; - -import java.util.concurrent.atomic.AtomicReference; +import scala.runtime.BoxedUnit; import static org.slf4j.event.Level.*; class SuppressingLoggerDispatcher extends SubstituteLogger { private final Logger suppressedMessageLogger; - private final AtomicReference activeSuppressionState; + private final SuppressingLogger.ActiveState activeSuppressionState; private final String suppressionPrefix; SuppressingLoggerDispatcher( String name, Logger suppressedMessageLogger, - AtomicReference activeSuppressionState, + SuppressingLogger.ActiveState activeSuppressionState, String suppressionPrefix) { // because we know we won't log anything before setting the delegate there's no need to set an // event queue hence the null @@ -33,406 +32,454 @@ class SuppressingLoggerDispatcher extends SubstituteLogger { @Override public void error(String msg) { - if (isSuppressed(ERROR)) { - super.info(withSuppressionHint(ERROR, msg)); - suppressedMessageLogger.error(msg); - } else { - super.error(msg); - } + ifSuppressed( + ERROR, + () -> { + super.info(withSuppressionHint(ERROR, msg)); + suppressedMessageLogger.error(msg); + }, + () -> super.error(msg)); } @Override public void error(String format, Object arg) { - if (isSuppressed(ERROR)) { - super.info(withSuppressionHint(ERROR, format), arg); - suppressedMessageLogger.error(format, arg); - } else { - super.error(format, arg); - } + ifSuppressed( + ERROR, + () -> { + super.info(withSuppressionHint(ERROR, format), arg); + suppressedMessageLogger.error(format, arg); + }, + () -> super.error(format, arg)); } @Override public void error(String format, Object arg1, Object arg2) { - if (isSuppressed(ERROR)) { - super.info(withSuppressionHint(ERROR, format), arg1, arg2); - suppressedMessageLogger.error(format, arg1, arg2); - } else { - super.error(format, arg1, arg2); - } + ifSuppressed( + ERROR, + () -> { + super.info(withSuppressionHint(ERROR, format), arg1, arg2); + suppressedMessageLogger.error(format, arg1, arg2); + }, + () -> super.error(format, arg1, arg2)); } @Override public void error(String format, Object... arguments) { - if (isSuppressed(ERROR)) { - super.info(withSuppressionHint(ERROR, format), arguments); - suppressedMessageLogger.error(format, arguments); - } else { - super.error(format, arguments); - } + ifSuppressed( + ERROR, + () -> { + super.info(withSuppressionHint(ERROR, format), arguments); + suppressedMessageLogger.error(format, arguments); + }, + () -> super.error(format, arguments)); } @Override public void error(String msg, Throwable t) { - if (isSuppressed(ERROR)) { - super.info(withSuppressionHint(ERROR, msg), t); - suppressedMessageLogger.error(msg, t); - } else { - super.error(msg, t); - } + ifSuppressed( + ERROR, + () -> { + super.info(withSuppressionHint(ERROR, msg), t); + suppressedMessageLogger.error(msg, t); + }, + () -> super.error(msg, t)); } @Override public void error(Marker marker, String msg) { - if (isSuppressed(ERROR)) { - super.info(marker, withSuppressionHint(ERROR, msg)); - suppressedMessageLogger.error(marker, msg); - } else { - super.error(marker, msg); - } + ifSuppressed( + ERROR, + () -> { + super.info(marker, withSuppressionHint(ERROR, msg)); + suppressedMessageLogger.error(marker, msg); + }, + () -> super.error(marker, msg)); } @Override public void error(Marker marker, String format, Object arg) { - if (isSuppressed(ERROR)) { - super.info(marker, withSuppressionHint(ERROR, format), arg); - suppressedMessageLogger.error(marker, format, arg); - } else { - super.error(marker, format, arg); - } + ifSuppressed( + ERROR, + () -> { + super.info(marker, withSuppressionHint(ERROR, format), arg); + suppressedMessageLogger.error(marker, format, arg); + }, + () -> super.error(marker, format, arg)); } @Override public void error(Marker marker, String format, Object arg1, Object arg2) { - if (isSuppressed(ERROR)) { - super.info(marker, withSuppressionHint(ERROR, format), arg1, arg2); - suppressedMessageLogger.error(marker, format, arg1, arg2); - } else { - super.error(marker, format, arg1, arg2); - } + ifSuppressed( + ERROR, + () -> { + super.info(marker, withSuppressionHint(ERROR, format), arg1, arg2); + suppressedMessageLogger.error(marker, format, arg1, arg2); + }, + () -> super.error(marker, format, arg1, arg2)); } @Override public void error(Marker marker, String format, Object... arguments) { - if (isSuppressed(ERROR)) { - super.info(marker, withSuppressionHint(ERROR, format), arguments); - suppressedMessageLogger.error(marker, format, arguments); - } else { - super.error(marker, format, arguments); - } + ifSuppressed( + ERROR, + () -> { + super.info(marker, withSuppressionHint(ERROR, format), arguments); + suppressedMessageLogger.error(marker, format, arguments); + }, + () -> super.error(marker, format, arguments)); } @Override public void error(Marker marker, String msg, Throwable t) { - if (isSuppressed(ERROR)) { - super.info(marker, withSuppressionHint(ERROR, msg), t); - suppressedMessageLogger.error(marker, msg, t); - } else { - super.error(marker, msg, t); - } + ifSuppressed( + ERROR, + () -> { + super.info(marker, withSuppressionHint(ERROR, msg), t); + suppressedMessageLogger.error(marker, msg, t); + }, + () -> super.error(marker, msg, t)); } @Override public void warn(String msg) { - if (isSuppressed(WARN)) { - super.info(withSuppressionHint(WARN, msg)); - suppressedMessageLogger.warn(msg); - } else { - super.warn(msg); - } + ifSuppressed( + WARN, + () -> { + super.info(withSuppressionHint(WARN, msg)); + suppressedMessageLogger.warn(msg); + }, + () -> super.warn(msg)); } @Override public void warn(String format, Object arg) { - if (isSuppressed(WARN)) { - super.info(withSuppressionHint(WARN, format), arg); - suppressedMessageLogger.warn(format, arg); - } else { - super.warn(format, arg); - } + ifSuppressed( + WARN, + () -> { + super.info(withSuppressionHint(WARN, format), arg); + suppressedMessageLogger.warn(format, arg); + }, + () -> super.warn(format, arg)); } @Override public void warn(String format, Object arg1, Object arg2) { - if (isSuppressed(WARN)) { - super.info(withSuppressionHint(WARN, format), arg1, arg2); - suppressedMessageLogger.warn(format, arg1, arg2); - } else { - super.warn(format, arg1, arg2); - } + ifSuppressed( + WARN, + () -> { + super.info(withSuppressionHint(WARN, format), arg1, arg2); + suppressedMessageLogger.warn(format, arg1, arg2); + }, + () -> super.warn(format, arg1, arg2)); } @Override public void warn(String format, Object... arguments) { - if (isSuppressed(WARN)) { - super.info(withSuppressionHint(WARN, format), arguments); - suppressedMessageLogger.warn(format, arguments); - } else { - super.warn(format, arguments); - } + ifSuppressed( + WARN, + () -> { + super.info(withSuppressionHint(WARN, format), arguments); + suppressedMessageLogger.warn(format, arguments); + }, + () -> super.warn(format, arguments)); } @Override public void warn(String msg, Throwable t) { - if (isSuppressed(WARN)) { - super.info(withSuppressionHint(WARN, msg), t); - suppressedMessageLogger.warn(msg, t); - } else { - super.warn(msg, t); - } + ifSuppressed( + WARN, + () -> { + super.info(withSuppressionHint(WARN, msg), t); + suppressedMessageLogger.warn(msg, t); + }, + () -> super.warn(msg, t)); } @Override public void warn(Marker marker, String msg) { - if (isSuppressed(WARN)) { - super.info(marker, withSuppressionHint(WARN, msg)); - suppressedMessageLogger.warn(marker, msg); - } else { - super.warn(marker, msg); - } + ifSuppressed( + WARN, + () -> { + super.info(marker, withSuppressionHint(WARN, msg)); + suppressedMessageLogger.warn(marker, msg); + }, + () -> super.warn(marker, msg)); } @Override public void warn(Marker marker, String format, Object arg) { - if (isSuppressed(WARN)) { - super.info(marker, withSuppressionHint(WARN, format), arg); - suppressedMessageLogger.warn(marker, format, arg); - } else { - super.warn(marker, format, arg); - } + ifSuppressed( + WARN, + () -> { + super.info(marker, withSuppressionHint(WARN, format), arg); + suppressedMessageLogger.warn(marker, format, arg); + }, + () -> super.warn(marker, format, arg)); } @Override public void warn(Marker marker, String format, Object arg1, Object arg2) { - if (isSuppressed(WARN)) { - super.info(marker, withSuppressionHint(WARN, format), arg1, arg2); - suppressedMessageLogger.warn(marker, format, arg1, arg2); - } else { - super.warn(marker, format, arg1, arg2); - } + ifSuppressed( + WARN, + () -> { + super.info(marker, withSuppressionHint(WARN, format), arg1, arg2); + suppressedMessageLogger.warn(marker, format, arg1, arg2); + }, + () -> super.warn(marker, format, arg1, arg2)); } @Override public void warn(Marker marker, String format, Object... arguments) { - if (isSuppressed(WARN)) { - super.info(marker, withSuppressionHint(WARN, format), arguments); - suppressedMessageLogger.warn(marker, format, arguments); - } else { - super.warn(marker, format, arguments); - } + ifSuppressed( + WARN, + () -> { + super.info(marker, withSuppressionHint(WARN, format), arguments); + suppressedMessageLogger.warn(marker, format, arguments); + }, + () -> super.warn(marker, format, arguments)); } @Override public void warn(Marker marker, String msg, Throwable t) { - if (isSuppressed(WARN)) { - super.info(marker, withSuppressionHint(WARN, msg), t); - suppressedMessageLogger.warn(marker, msg, t); - } else { - super.warn(marker, msg, t); - } + ifSuppressed( + WARN, + () -> { + super.info(marker, withSuppressionHint(WARN, msg), t); + suppressedMessageLogger.warn(marker, msg, t); + }, + () -> super.warn(marker, msg, t)); } @Override public void info(String msg) { - if (isSuppressed(INFO)) { - super.info(withSuppressionHint(INFO, msg)); - suppressedMessageLogger.info(msg); - } else { - super.info(msg); - } + ifSuppressed( + INFO, + () -> { + super.info(withSuppressionHint(INFO, msg)); + suppressedMessageLogger.info(msg); + }, + () -> super.info(msg)); } @Override public void info(String format, Object arg) { - if (isSuppressed(INFO)) { - super.info(withSuppressionHint(INFO, format), arg); - suppressedMessageLogger.info(format, arg); - } else { - super.info(format, arg); - } + ifSuppressed( + INFO, + () -> { + super.info(withSuppressionHint(INFO, format), arg); + suppressedMessageLogger.info(format, arg); + }, + () -> super.info(format, arg)); } @Override public void info(String format, Object arg1, Object arg2) { - if (isSuppressed(INFO)) { - super.info(withSuppressionHint(INFO, format), arg1, arg2); - suppressedMessageLogger.info(format, arg1, arg2); - } else { - super.info(format, arg1, arg2); - } + ifSuppressed( + INFO, + () -> { + super.info(withSuppressionHint(INFO, format), arg1, arg2); + suppressedMessageLogger.info(format, arg1, arg2); + }, + () -> super.info(format, arg1, arg2)); } @Override public void info(String format, Object... arguments) { - if (isSuppressed(INFO)) { - super.info(withSuppressionHint(INFO, format), arguments); - suppressedMessageLogger.info(format, arguments); - } else { - super.info(format, arguments); - } + ifSuppressed( + INFO, + () -> { + super.info(withSuppressionHint(INFO, format), arguments); + suppressedMessageLogger.info(format, arguments); + }, + () -> super.info(format, arguments)); } @Override public void info(String msg, Throwable t) { - if (isSuppressed(INFO)) { - super.info(withSuppressionHint(INFO, msg), t); - suppressedMessageLogger.info(msg, t); - } else { - super.info(msg, t); - } + ifSuppressed( + INFO, + () -> { + super.info(withSuppressionHint(INFO, msg), t); + suppressedMessageLogger.info(msg, t); + }, + () -> super.info(msg, t)); } @Override public void info(Marker marker, String msg) { - if (isSuppressed(INFO)) { - super.info(marker, withSuppressionHint(INFO, msg)); - suppressedMessageLogger.info(marker, msg); - } else { - super.info(marker, msg); - } + ifSuppressed( + INFO, + () -> { + super.info(marker, withSuppressionHint(INFO, msg)); + suppressedMessageLogger.info(marker, msg); + }, + () -> super.info(marker, msg)); } @Override public void info(Marker marker, String format, Object arg) { - if (isSuppressed(INFO)) { - super.info(marker, withSuppressionHint(INFO, format), arg); - suppressedMessageLogger.info(marker, format, arg); - } else { - super.info(marker, format, arg); - } + ifSuppressed( + INFO, + () -> { + super.info(marker, withSuppressionHint(INFO, format), arg); + suppressedMessageLogger.info(marker, format, arg); + }, + () -> super.info(marker, format, arg)); } @Override public void info(Marker marker, String format, Object arg1, Object arg2) { - if (isSuppressed(INFO)) { - super.info(marker, withSuppressionHint(INFO, format), arg1, arg2); - suppressedMessageLogger.info(marker, format, arg1, arg2); - } else { - super.info(marker, format, arg1, arg2); - } + ifSuppressed( + INFO, + () -> { + super.info(marker, withSuppressionHint(INFO, format), arg1, arg2); + suppressedMessageLogger.info(marker, format, arg1, arg2); + }, + () -> super.info(marker, format, arg1, arg2)); } @Override public void info(Marker marker, String format, Object... arguments) { - if (isSuppressed(INFO)) { - super.info(marker, withSuppressionHint(INFO, format), arguments); - suppressedMessageLogger.info(marker, format, arguments); - } else { - super.info(marker, format, arguments); - } + ifSuppressed( + INFO, + () -> { + super.info(marker, withSuppressionHint(INFO, format), arguments); + suppressedMessageLogger.info(marker, format, arguments); + }, + () -> super.info(marker, format, arguments)); } @Override public void info(Marker marker, String msg, Throwable t) { - if (isSuppressed(INFO)) { - super.info(marker, withSuppressionHint(DEBUG, msg), t); - suppressedMessageLogger.info(marker, msg, t); - } else { - super.info(marker, msg, t); - } + ifSuppressed( + INFO, + () -> { + super.info(marker, withSuppressionHint(DEBUG, msg), t); + suppressedMessageLogger.info(marker, msg, t); + }, + () -> super.info(marker, msg, t)); } @Override public void debug(String msg) { - if (isSuppressed(DEBUG)) { - super.debug(withSuppressionHint(DEBUG, msg)); - suppressedMessageLogger.debug(msg); - } else { - super.debug(msg); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(withSuppressionHint(DEBUG, msg)); + suppressedMessageLogger.debug(msg); + }, + () -> super.debug(msg)); } @Override public void debug(String format, Object arg) { - if (isSuppressed(DEBUG)) { - super.debug(withSuppressionHint(DEBUG, format), arg); - suppressedMessageLogger.debug(format, arg); - } else { - super.debug(format, arg); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(withSuppressionHint(DEBUG, format), arg); + suppressedMessageLogger.debug(format, arg); + }, + () -> super.debug(format, arg)); } @Override public void debug(String format, Object arg1, Object arg2) { - if (isSuppressed(DEBUG)) { - super.debug(withSuppressionHint(DEBUG, format), arg1, arg2); - suppressedMessageLogger.debug(format, arg1, arg2); - } else { - super.debug(format, arg1, arg2); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(withSuppressionHint(DEBUG, format), arg1, arg2); + suppressedMessageLogger.debug(format, arg1, arg2); + }, + () -> super.debug(format, arg1, arg2)); } @Override public void debug(String format, Object... arguments) { - if (isSuppressed(DEBUG)) { - super.debug(withSuppressionHint(DEBUG, format), arguments); - suppressedMessageLogger.debug(format, arguments); - } else { - super.debug(format, arguments); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(withSuppressionHint(DEBUG, format), arguments); + suppressedMessageLogger.debug(format, arguments); + }, + () -> super.debug(format, arguments)); } @Override public void debug(String msg, Throwable t) { - if (isSuppressed(DEBUG)) { - super.debug(withSuppressionHint(DEBUG, msg), t); - suppressedMessageLogger.debug(msg, t); - } else { - super.debug(msg, t); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(withSuppressionHint(DEBUG, msg), t); + suppressedMessageLogger.debug(msg, t); + }, + () -> super.debug(msg, t)); } @Override public void debug(Marker marker, String msg) { - if (isSuppressed(DEBUG)) { - super.debug(marker, withSuppressionHint(DEBUG, msg)); - suppressedMessageLogger.debug(marker, msg); - } else { - super.debug(marker, msg); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(marker, withSuppressionHint(DEBUG, msg)); + suppressedMessageLogger.debug(marker, msg); + }, + () -> super.debug(marker, msg)); } @Override public void debug(Marker marker, String format, Object arg) { - if (isSuppressed(DEBUG)) { - super.debug(marker, withSuppressionHint(DEBUG, format), arg); - suppressedMessageLogger.debug(marker, format, arg); - } else { - super.debug(marker, format, arg); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(marker, withSuppressionHint(DEBUG, format), arg); + suppressedMessageLogger.debug(marker, format, arg); + }, + () -> super.debug(marker, format, arg)); } @Override public void debug(Marker marker, String format, Object arg1, Object arg2) { - if (isSuppressed(DEBUG)) { - super.debug(marker, withSuppressionHint(DEBUG, format), arg1, arg2); - suppressedMessageLogger.debug(marker, format, arg1, arg2); - } else { - super.debug(marker, format, arg1, arg2); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(marker, withSuppressionHint(DEBUG, format), arg1, arg2); + suppressedMessageLogger.debug(marker, format, arg1, arg2); + }, + () -> super.debug(marker, format, arg1, arg2)); } @Override public void debug(Marker marker, String format, Object... arguments) { - if (isSuppressed(DEBUG)) { - super.debug(marker, withSuppressionHint(DEBUG, format), arguments); - suppressedMessageLogger.debug(marker, format, arguments); - } else { - super.debug(marker, format, arguments); - } + ifSuppressed( + DEBUG, + () -> { + super.debug(marker, withSuppressionHint(DEBUG, format), arguments); + suppressedMessageLogger.debug(marker, format, arguments); + }, + () -> super.debug(marker, format, arguments)); } @Override public void debug(Marker marker, String msg, Throwable t) { - if (isSuppressed(DEBUG)) { - super.debug(marker, withSuppressionHint(DEBUG, msg), t); - suppressedMessageLogger.debug(marker, msg, t); - } else { - super.debug(marker, msg, t); - } - } - - private boolean isSuppressed(Level level) { - return activeSuppressionState.get().rule().isSuppressed(getName(), level); + ifSuppressed( + DEBUG, + () -> { + super.debug(marker, withSuppressionHint(DEBUG, msg), t); + suppressedMessageLogger.debug(marker, msg, t); + }, + () -> super.debug(marker, msg, t)); + } + + private void ifSuppressed(Level level, Runnable ifSuppressed, Runnable ifNotSuppressed) { + activeSuppressionState.withSuppressionRule( + rule -> { + if (rule.isSuppressed(getName(), level)) { + ifSuppressed.run(); + } else { + ifNotSuppressed.run(); + } + return BoxedUnit.UNIT; + }); } public String withSuppressionHint(Level level, String msg) { diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala index 7946857eb1..300a94f86e 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.logging.{NamedLogging, SuppressingLogger, Suppres import com.digitalasset.canton.metrics.OpenTelemetryOnDemandMetricsReader import com.digitalasset.canton.protocol.StaticSynchronizerParameters import com.digitalasset.canton.telemetry.ConfiguredOpenTelemetry -import com.digitalasset.canton.time.WallClock +import com.digitalasset.canton.time.{NonNegativeFiniteDuration, WallClock} import com.digitalasset.canton.topology.{PhysicalSynchronizerId, SynchronizerId} import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext, W3CTraceContext} import com.digitalasset.canton.util.CheckedT @@ -101,7 +101,7 @@ trait TestEssentials override val loggerFactory: SuppressingLogger = SuppressingLogger(getClass) - val futureSupervisor: FutureSupervisor = FutureSupervisor.Noop + lazy val futureSupervisor: FutureSupervisor = FutureSupervisor.Noop lazy val wallClock = new WallClock(timeouts, loggerFactory) @@ -336,6 +336,18 @@ trait BaseTest ) } + private final val retryTimes = 3 + + def retryET[A, T]( + times: Int = retryTimes + )( + block: => EitherT[FutureUnlessShutdown, A, T] + )(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, A, T] = + block.recoverWith { + case NonFatal(_) if times > 0 => + retryET(times - 1)(block) + } + def clue[T](message: String)(expr: => T): T = { logger.debug(s"Running clue: $message") Try(expr) match { @@ -465,11 +477,21 @@ trait BaseTest lazy val PerformanceTestPath: String = BaseTest.PerformanceTestPath lazy val DamlTestFilesPath: String = BaseTest.DamlTestFilesPath lazy val DamlTestLfDevFilesPath: String = BaseTest.DamlTestLfDevFilesPath + // TODO(#25385): Consider deduplicating the upgrade test DARs below + lazy val FooV1Path: String = BaseTest.FooV1Path + lazy val FooV2Path: String = BaseTest.FooV2Path + lazy val FooV3Path: String = BaseTest.FooV3Path lazy val UpgradeTestsPath: String = BaseTest.UpgradeTestsPath lazy val UpgradeTestsCompatPath: String = BaseTest.UpgradeTestsCompatPath lazy val UpgradeTestsIncompatPath: String = BaseTest.UpgradeTestsIncompatPath lazy val VettingDepPath: String = BaseTest.VettingDepPath + lazy val VettingDepCompatPath: String = BaseTest.VettingDepCompatPath + lazy val VettingDepIncompatPath: String = BaseTest.VettingDepIncompatPath + lazy val VettingDepSubstitutionPath: String = BaseTest.VettingDepSubstitutionPath lazy val VettingMainPath: String = BaseTest.VettingMainPath + lazy val VettingMainCompatPath: String = BaseTest.VettingMainCompatPath + lazy val VettingMainIncompatPath: String = BaseTest.VettingMainIncompatPath + lazy val VettingMainSubstitutionPath: String = BaseTest.VettingMainSubstitutionPath implicit class RichSynchronizerId(val id: SynchronizerId) { def toPhysical: PhysicalSynchronizerId = @@ -572,24 +594,25 @@ object BaseTest { defaultStaticSynchronizerParametersWith() def defaultStaticSynchronizerParametersWith( - protocolVersion: ProtocolVersion = testedProtocolVersion - ): StaticSynchronizerParameters = StaticSynchronizerParameters( - requiredSigningSpecs = SymbolicCryptoProvider.supportedSigningSpecs, - requiredEncryptionSpecs = SymbolicCryptoProvider.supportedEncryptionSpecs, - requiredSymmetricKeySchemes = SymbolicCryptoProvider.supportedSymmetricKeySchemes, - requiredHashAlgorithms = SymbolicCryptoProvider.supportedHashAlgorithms, - requiredCryptoKeyFormats = SymbolicCryptoProvider.supportedCryptoKeyFormats, - requiredSignatureFormats = SymbolicCryptoProvider.supportedSignatureFormats, - enableTransparencyChecks = false, - protocolVersion = protocolVersion, - serial = NonNegativeInt.zero, - ) + topologyChangeDelay: NonNegativeFiniteDuration = + StaticSynchronizerParameters.defaultTopologyChangeDelay, + protocolVersion: ProtocolVersion = testedProtocolVersion, + ): StaticSynchronizerParameters = + StaticSynchronizerParameters( + requiredSigningSpecs = SymbolicCryptoProvider.supportedSigningSpecs, + requiredEncryptionSpecs = SymbolicCryptoProvider.supportedEncryptionSpecs, + requiredSymmetricKeySchemes = SymbolicCryptoProvider.supportedSymmetricKeySchemes, + requiredHashAlgorithms = SymbolicCryptoProvider.supportedHashAlgorithms, + requiredCryptoKeyFormats = SymbolicCryptoProvider.supportedCryptoKeyFormats, + requiredSignatureFormats = SymbolicCryptoProvider.supportedSignatureFormats, + topologyChangeDelay = topologyChangeDelay, + enableTransparencyChecks = false, + protocolVersion = protocolVersion, + serial = NonNegativeInt.zero, + ) lazy val testedProtocolVersion: ProtocolVersion = ProtocolVersion.forSynchronizer - lazy val testedStaticSynchronizerParameters: StaticSynchronizerParameters = - defaultStaticSynchronizerParametersWith(testedProtocolVersion) - lazy val testedProtocolVersionValidation: ProtocolVersionValidation = ProtocolVersionValidation(testedProtocolVersion) @@ -606,11 +629,21 @@ object BaseTest { lazy val DamlScript3TestFilesPath: String = getResourcePath("DamlScript3TestFiles-3.4.0.dar") lazy val DamlTestFilesPath: String = getResourcePath("DamlTestFiles-3.4.0.dar") lazy val DamlTestLfDevFilesPath: String = getResourcePath("DamlTestLfDevFiles-3.4.0.dar") + // TODO(#25385): Deduplicate these upgrading test DARs + lazy val FooV1Path: String = getResourcePath("foo-0.0.1.dar") + lazy val FooV2Path: String = getResourcePath("foo-0.0.2.dar") + lazy val FooV3Path: String = getResourcePath("foo-0.0.3.dar") lazy val UpgradeTestsPath: String = getResourcePath("UpgradeTests-3.4.0.dar") lazy val UpgradeTestsCompatPath: String = getResourcePath("UpgradeTests-4.0.0.dar") lazy val UpgradeTestsIncompatPath: String = getResourcePath("UpgradeTests-5.0.0.dar") lazy val VettingDepPath: String = getResourcePath("VettingDep-1.0.0.dar") + lazy val VettingDepCompatPath: String = getResourcePath("VettingDep-2.0.0.dar") + lazy val VettingDepIncompatPath: String = getResourcePath("VettingDep-3.0.0.dar") + lazy val VettingDepSubstitutionPath: String = getResourcePath("VettingDepSubstitution-1.0.0.dar") lazy val VettingMainPath: String = getResourcePath("VettingMain-1.0.0.dar") + lazy val VettingMainCompatPath: String = getResourcePath("VettingMain-2.0.0.dar") + lazy val VettingMainIncompatPath: String = getResourcePath("VettingMain-3.0.0.dar") + lazy val VettingMainSubstitutionPath: String = getResourcePath("VettingMain-4.0.0.dar") def getResourcePath(name: String): String = Option(getClass.getClassLoader.getResource(name)) diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala index 5ae0bc102c..f42f8d742c 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.version.ProtocolVersion import org.scalactic.source import org.scalatest.compatible.Assertion @@ -163,6 +164,14 @@ trait ProtocolVersionChecksFixtureAsyncWordSpec { def in(testFun: FixtureParam => Future[Assertion])(implicit pos: source.Position): Unit = if (condition) verb.in(testFun) else verb.ignore(testFun) + def inUS( + testFun: FixtureParam => FutureUnlessShutdown[Assertion] + )(implicit pos: source.Position): Unit = { + def testFunHandleShutdown: FixtureParam => Future[Assertion] = + testFun(_).onShutdown(fail(s"Unexpected shutdown in OnlyRunWhenWordSpecStringWrapper.inUS")) + if (condition) verb.in(testFunHandleShutdown) else verb.ignore(testFunHandleShutdown) + } + def when(testFun: => Unit)(implicit pos: source.Position): Unit = if (condition) verb.when(testFun) else diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala deleted file mode 100644 index 400a7c3697..0000000000 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.interactive - -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest.{testedProtocolVersion, testedReleaseProtocolVersion} -import com.digitalasset.canton.FutureHelpers -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{CachingConfigs, CryptoConfig, ProcessingTimeout} -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.kms.CommunityKmsFactory -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory -import com.digitalasset.canton.data.OnboardingTransactions -import com.digitalasset.canton.logging.SuppressingLogger -import com.digitalasset.canton.resource.MemoryStorage -import com.digitalasset.canton.time.WallClock -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings -import com.digitalasset.canton.topology.{ExternalParty, ParticipantId, PartyId} -import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext} -import com.google.protobuf.ByteString -import org.scalatest.EitherValues - -import scala.concurrent.ExecutionContext - -trait ExternalPartyUtils extends FutureHelpers with EitherValues { - - def loggerFactory: SuppressingLogger - def futureSupervisor: FutureSupervisor - protected def timeouts: ProcessingTimeout - def wallClock: WallClock - - implicit def externalPartyExecutionContext: ExecutionContext - implicit protected def traceContext: TraceContext - - private val storage = new MemoryStorage(loggerFactory, timeouts) - - private lazy val crypto: Crypto = Crypto - .create( - CryptoConfig(), - CachingConfigs.defaultSessionEncryptionKeyCacheConfig, - CachingConfigs.defaultPublicKeyConversionCache, - storage, - CryptoPrivateStoreFactory.withoutKms(wallClock, externalPartyExecutionContext), - CommunityKmsFactory, - testedReleaseProtocolVersion, - futureSupervisor, - wallClock, - externalPartyExecutionContext, - timeouts, - loggerFactory, - NoReportingTracerProvider, - ) - .valueOrFailShutdown("Failed to create crypto object") - .futureValue - - private def generateProtocolSigningKeys( - numberOfKeys: PositiveInt - ): NonEmpty[Seq[SigningPublicKey]] = - NonEmpty - .from( - Seq.fill(numberOfKeys.value)( - crypto.generateSigningKey(usage = SigningKeyUsage.ProtocolOnly).futureValueUS.value - ) - ) - .getOrElse( - fail("Expected at least one protocol signing key") - ) - - protected def generateExternalPartyOnboardingTransactions( - name: String, - confirming: Seq[ParticipantId] = Seq.empty, - observing: Seq[ParticipantId] = Seq.empty, - confirmationThreshold: PositiveInt = PositiveInt.one, - numberOfKeys: PositiveInt = PositiveInt.one, - keyThreshold: PositiveInt = PositiveInt.one, - ): (OnboardingTransactions, ExternalParty) = { - - val namespaceKey: SigningPublicKey = - crypto.generateSigningKey(usage = SigningKeyUsage.NamespaceOnly).futureValueUS.value - val partyId: PartyId = PartyId.tryCreate(name, namespaceKey.fingerprint) - val protocolSigningKeys: NonEmpty[Seq[SigningPublicKey]] = generateProtocolSigningKeys( - numberOfKeys - ) - - val namespaceDelegationTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - NamespaceDelegation.tryCreate( - namespace = partyId.uid.namespace, - target = namespaceKey, - CanSignAllMappings, - ), - testedProtocolVersion, - ) - - val confirmingHostingParticipants = confirming.map { cp => - HostingParticipant( - cp, - ParticipantPermission.Confirmation, - ) - } - val observingHostingParticipants = observing.map { op => - HostingParticipant( - op, - ParticipantPermission.Observation, - ) - } - val partyToParticipantTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - PartyToParticipant.tryCreate( - partyId = partyId, - threshold = confirmationThreshold, - participants = confirmingHostingParticipants ++ observingHostingParticipants, - ), - testedProtocolVersion, - ) - - val partyToKeyTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - PartyToKeyMapping.tryCreate( - partyId = partyId, - threshold = keyThreshold, - signingKeys = protocolSigningKeys, - ), - testedProtocolVersion, - ) - - val transactionHashes = - NonEmpty.mk(Set, namespaceDelegationTx.hash, partyToParticipantTx.hash, partyToKeyTx.hash) - val combinedMultiTxHash = - MultiTransactionSignature.computeCombinedHash(transactionHashes, crypto.pureCrypto) - - // Sign the multi hash with the namespace key, as it is needed to authorize all 3 transactions - val namespaceSignature = - crypto.privateCrypto - .sign( - combinedMultiTxHash, - namespaceKey.fingerprint, - NonEmpty.mk(Set, SigningKeyUsage.Namespace), - ) - .futureValueUS - .value - - // The protocol key signature is only needed on the party to key mapping, so we can sign only that, and combine it with the - // namespace signature - val protocolSignatures = protocolSigningKeys.map { key => - crypto.privateCrypto - .sign( - partyToKeyTx.hash.hash, - key.fingerprint, - NonEmpty.mk(Set, SigningKeyUsage.Protocol), - ) - .futureValueUS - .value - } - - val multiTxSignatures = - NonEmpty.mk(Seq, MultiTransactionSignature(transactionHashes, namespaceSignature)) - - val signedNamespaceDelegation = SignedTopologyTransaction - .withTopologySignatures( - namespaceDelegationTx, - multiTxSignatures, - isProposal = false, - testedProtocolVersion, - ) - - val signedPartyToParticipant = SignedTopologyTransaction - .withTopologySignatures( - partyToParticipantTx, - multiTxSignatures, - isProposal = true, - testedProtocolVersion, - ) - - val signedPartyToKey = SignedTopologyTransaction - .withTopologySignatures( - partyToKeyTx, - multiTxSignatures, - isProposal = false, - testedProtocolVersion, - ) - // Merge the signature from the protocol key - .addSingleSignatures(protocolSignatures.toSet) - - ( - OnboardingTransactions(signedNamespaceDelegation, signedPartyToParticipant, signedPartyToKey), - ExternalParty(partyId, protocolSigningKeys.map(_.fingerprint)), - ) - } - - protected def signTxAs( - hash: ByteString, - p: ExternalParty, - ): Map[PartyId, Seq[Signature]] = { - val signatures = - p.signingFingerprints.map { fingerprint => - crypto.privateCrypto - .signBytes( - hash, - fingerprint, - SigningKeyUsage.ProtocolOnly, - ) - .valueOrFailShutdown("Failed to sign transaction hash") - .futureValue - } - - Map(p.partyId -> signatures.forgetNE) - } -} diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/LogEntry.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/LogEntry.scala index fbf12a6f0a..6a27de218c 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/LogEntry.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/LogEntry.scala @@ -107,8 +107,14 @@ final case class LogEntry( def shouldBeCommandFailure(code: ErrorCode, message: String = "")(implicit pos: source.Position + ): Assertion = shouldBeOneOfCommandFailure(Seq(code), message) + + def shouldBeOneOfCommandFailure(codes: Seq[ErrorCode], message: String = "")(implicit + pos: source.Position ): Assertion = - commandFailureMessage should (include(code.id) and include(message)) + forAtLeast(1, codes) { code => + commandFailureMessage should (include(code.id) and include(message)) + } def commandFailureMessage(implicit pos: source.Position): String = { val errors = new StringBuilder() diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala index adbe449824..f4a22b5d28 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala @@ -8,7 +8,11 @@ import com.digitalasset.canton.BaseTest import com.digitalasset.canton.concurrent.{DirectExecutionContext, ExecutionContextMonitor} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} -import com.digitalasset.canton.logging.SuppressingLogger.{LogEntryOptionality, NoSuppression, State} +import com.digitalasset.canton.logging.SuppressingLogger.{ + ActiveState, + LogEntryOptionality, + NoSuppression, +} import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.Thereafter.syntax.* import com.typesafe.scalalogging.Logger @@ -22,12 +26,12 @@ import org.scalatest.matchers.should.Matchers.* import org.slf4j.event.Level import org.slf4j.event.Level.{ERROR, WARN} -import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.locks.ReentrantReadWriteLock import scala.annotation.tailrec import scala.collection.immutable.ListMap import scala.collection.mutable import scala.concurrent.duration.* -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ExecutionContext, Future, blocking} import scala.jdk.CollectionConverters.* import scala.reflect.ClassTag import scala.util.control.NonFatal @@ -58,7 +62,7 @@ class SuppressingLogger private[logging] ( underlyingLoggerFactory: NamedLoggerFactory, pollTimeout: FiniteDuration, skipLogEntry: LogEntry => Boolean, - activeState: AtomicReference[State] = new AtomicReference[State](NoSuppression), + activeState: ActiveState = new ActiveState(), private[logging] val recordedLogEntries: java.util.concurrent.BlockingQueue[LogEntry] = new java.util.concurrent.LinkedBlockingQueue[LogEntry](), ) extends NamedLoggerFactory { @@ -74,7 +78,7 @@ class SuppressingLogger private[logging] ( override val name: String = underlyingLoggerFactory.name override val properties: ListMap[String, String] = underlyingLoggerFactory.properties - private def restoreNoSuppression = () => activeState.set(NoSuppression) + private def restoreNoSuppression: () => Unit = () => activeState.restoreNoSuppression() override def appendUnnamedKey(key: String, value: String): NamedLoggerFactory = // intentionally share suppressedLevel and queues so suppression on a parent logger will effect a child and collect all suppressed messages @@ -123,6 +127,20 @@ class SuppressingLogger private[logging] ( )(implicit c: ClassTag[T], pos: source.Position): Assertion = assertLogs(rule)(checkThrowable[T](the[Throwable] thrownBy within), assertions*) + def assertThrowsAndLogsSuppressingAsync[T <: Throwable](rule: SuppressionRule)( + within: => Future[_], + assertions: (LogEntry => Assertion)* + )(implicit c: ClassTag[T], pos: source.Position): Future[Assertion] = + assertLogs(rule)( + within.transform { + case Success(_) => + fail(s"An exception of type $c was expected, but no exception was thrown.") + case Failure(c(_)) => Success(succeed) + case Failure(t) => fail(s"Exception has wrong type. Expected type: $c. Got: $t.", t) + }(directExecutionContext), + assertions* + ) + def assertThrowsAndLogsAsync[T <: Throwable]( within: => Future[_], assertion: T => Assertion, @@ -260,7 +278,7 @@ class SuppressingLogger private[logging] ( suppress(rule) { runWithCleanup( within, - { () => + { (_: A) => // check the log // Check that every assertion succeeds on the corresponding log entry @@ -308,7 +326,7 @@ class SuppressingLogger private[logging] ( suppress(rule) { runWithCleanup( within, - () => checkLogsAssertion(assertion), + (_: A) => checkLogsAssertion(assertion), () => (), ) } @@ -361,7 +379,8 @@ class SuppressingLogger private[logging] ( suppress(rule) { runWithCleanup( within, - () => BaseTest.eventually(timeUntilSuccess, maxPollInterval)(checkLogsAssertion(assertion)), + (_: A) => + BaseTest.eventually(timeUntilSuccess, maxPollInterval)(checkLogsAssertion(assertion)), () => (), ) } @@ -450,11 +469,18 @@ class SuppressingLogger private[logging] ( def assertLogsUnorderedOptional[A]( within: => A, assertions: (LogEntryOptionality, LogEntry => Assertion)* + )(implicit pos: source.Position): A = + assertLogsUnorderedOptionalFromResult(within, (_: A) => assertions) + + def assertLogsUnorderedOptionalFromResult[A]( + within: => A, + mkAssertions: A => Seq[(LogEntryOptionality, LogEntry => Assertion)], )(implicit pos: source.Position): A = suppress(SuppressionRule.LevelAndAbove(WARN)) { runWithCleanup( within, - () => { + (a: A) => { + val assertions = mkAssertions(a) val unmatchedAssertions = mutable.SortedMap[Int, (LogEntryOptionality, LogEntry => Assertion)]() ++ assertions.zipWithIndex.map { case (assertion, index) => index -> assertion } @@ -552,7 +578,7 @@ class SuppressingLogger private[logging] ( internalLogger.error("Failed to begin suppression", t) restoreNoSuppression } - runWithCleanup(within, () => (), endSuppress) + runWithCleanup(within, (_: A) => (), endSuppress) } /** First runs `body`, `onSuccess`, and then `doFinally`. Runs `onSuccess` after `body` if `body` @@ -598,7 +624,7 @@ class SuppressingLogger private[logging] ( // Therefore, we don't know whether the suppression needs to be asynchronous. case syncResult => - onSuccess(result) + onSuccess(syncResult) syncResult } } finally { @@ -617,10 +643,7 @@ class SuppressingLogger private[logging] ( // Nested usages are not supported, because we clear the message queue when the suppression begins. // So a second call of this method would purge the messages collected by previous calls. - val previous = activeState.getAndUpdate(state => - if (state eq NoSuppression) State(rule) - else state - ) + val previous = activeState.setRuleIfUnset(rule) if (!(previous eq NoSuppression)) fail( "`SuppressingLogger.suppress` support neither nested nor concurrent calls; stack trace of previous entrance attached as cause", @@ -672,6 +695,37 @@ object SuppressingLogger { private val NoSuppression = State(SuppressionRule.NoSuppression) + class ActiveState { + private var state: State = NoSuppression + + /** Used to synchronize (1) writing state and (2) reading state. */ + private val readWriteLock: ReentrantReadWriteLock = new ReentrantReadWriteLock() + private val writeLock = readWriteLock.writeLock() + private val readLock = readWriteLock.readLock() + + def setRuleIfUnset(rule: SuppressionRule): State = { + blocking(writeLock.lock()) + + val oldState = state + state = if (state eq NoSuppression) State(rule) else state + + writeLock.unlock() + oldState + } + + def restoreNoSuppression(): Unit = { + blocking(writeLock.lock()) + state = NoSuppression + writeLock.unlock() + } + + def withSuppressionRule(body: SuppressionRule => Unit): Unit = { + blocking(readLock.lock()) + try body(state.rule) + finally readLock.unlock() + } + } + def apply( testClass: Class[_], pollTimeout: FiniteDuration = 1.second, diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestSynchronizerParameters.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestSynchronizerParameters.scala index 2e532fd60b..354c64a296 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestSynchronizerParameters.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestSynchronizerParameters.scala @@ -6,15 +6,13 @@ package com.digitalasset.canton.protocol import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.protocol.SynchronizerParameters.MaxRequestSize -import com.digitalasset.canton.time.NonNegativeFiniteDuration /** Synchronizer parameters used for unit testing with sane default values. */ object TestSynchronizerParameters { val defaultDynamic: DynamicSynchronizerParameters = DynamicSynchronizerParameters.initialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryOfMillis(250), - BaseTest.testedProtocolVersion, + BaseTest.testedProtocolVersion ) def defaultDynamic( @@ -22,7 +20,6 @@ object TestSynchronizerParameters { maxRequestSize: MaxRequestSize, ): DynamicSynchronizerParameters = DynamicSynchronizerParameters.tryInitialValues( - topologyChangeDelay = NonNegativeFiniteDuration.tryOfMillis(250), protocolVersion = BaseTest.testedProtocolVersion, confirmationRequestsMaxRate = confirmationRequestsMaxRate, maxRequestSize = maxRequestSize, diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestUpdateId.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestUpdateId.scala new file mode 100644 index 0000000000..4cdefb7dc2 --- /dev/null +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestUpdateId.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.crypto.TestHash + +object TestUpdateId { + def apply(s: String): UpdateId = UpdateId(TestHash.digest(s)) +} diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala index c0468cc200..6426a8c04f 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala @@ -17,12 +17,7 @@ import com.digitalasset.canton.lifecycle.{ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.CommonMockMetrics import com.digitalasset.canton.resource.DbStorage.RetryConfig -import com.digitalasset.canton.resource.{ - CommunityDbMigrationsFactory, - DbMigrationsFactory, - DbStorage, - DbStorageSingle, -} +import com.digitalasset.canton.resource.{DbMigrations, DbStorage, DbStorageSingle} import com.digitalasset.canton.store.db.DbStorageSetup.DbBasicConfig import com.digitalasset.canton.time.SimClock import com.digitalasset.canton.tracing.{NoTracing, TraceContext} @@ -55,8 +50,6 @@ trait DbStorageSetup extends FlagCloseable with HasCloseContext with NamedLoggin protected def prepareDatabase(): Unit - protected def migrationsFactory: DbMigrationsFactory - def migrationMode: MigrationMode protected def destroyDatabase(): Unit @@ -97,7 +90,9 @@ trait DbStorageSetup extends FlagCloseable with HasCloseContext with NamedLoggin final def migrateDb(): Unit = { val migrationResult = - migrationsFactory.create(config, migrationMode == MigrationMode.DevVersion).migrateDatabase() + DbMigrations + .create(config, migrationMode == MigrationMode.DevVersion, timeouts, loggerFactory) + .migrateDatabase() // throw so the first part of the test that attempts to use storage will fail with an exception migrationResult .valueOr(err => fail(s"Failed to migrate database: $err")) @@ -133,8 +128,6 @@ abstract class PostgresDbStorageSetup( override lazy val retryConfig: RetryConfig = RetryConfig.failFast - override protected lazy val migrationsFactory: DbMigrationsFactory = - new CommunityDbMigrationsFactory(loggerFactory) } /** Assumes Postgres is available on a already running and that connections details are provided @@ -292,9 +285,6 @@ class H2DbStorageSetup( override lazy val retryConfig: RetryConfig = RetryConfig.failFast - override lazy val migrationsFactory: DbMigrationsFactory = - new CommunityDbMigrationsFactory(loggerFactory) - override protected def prepareDatabase(): Unit = () override protected def destroyDatabase(): Unit = () diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/DefaultTestIdentities.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/DefaultTestIdentities.scala index 7e3c863fcc..15e0a6bb59 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/DefaultTestIdentities.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/DefaultTestIdentities.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.topology import com.digitalasset.canton.BaseTest import com.digitalasset.canton.crypto.Fingerprint import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.time.NonNegativeFiniteDuration object DefaultTestIdentities { import BaseTest.* @@ -35,7 +34,6 @@ object DefaultTestIdentities { val defaultDynamicSynchronizerParameters: DynamicSynchronizerParameters = DynamicSynchronizerParameters.initialValues( - NonNegativeFiniteDuration.Zero, - BaseTest.testedProtocolVersion, + BaseTest.testedProtocolVersion ) } diff --git a/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index 70a0d7aa60..1af2759e49 100644 --- a/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -40,7 +40,7 @@ import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.Party import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore import com.digitalasset.canton.topology.store.{ - PackageDependencyResolverUS, + PackageDependencyResolver, TopologyStoreId, ValidatedTopologyTransaction, } @@ -383,6 +383,7 @@ class TestingIdentityFactory( val ips = new IdentityProvidingServiceClient(loggerFactory) synchronizers.foreach(dId => ips.add(new SynchronizerTopologyClient() with HasFutureSupervision with NamedLogging { + override def staticSynchronizerParameters: StaticSynchronizerParameters = ??? override protected def loggerFactory: NamedLoggerFactory = TestingIdentityFactory.this.loggerFactory @@ -412,7 +413,7 @@ class TestingIdentityFactory( ): TopologySnapshot = { require( timestamp <= upToInclusive, - s"Topology information not yet available for $timestamp", + s"Topology information not yet available for $timestamp, known until $upToInclusive", ) topologySnapshot(synchronizerId, timestampForSynchronizerParameters = timestamp) } @@ -508,7 +509,7 @@ class TestingIdentityFactory( def topologySnapshot( synchronizerId: SynchronizerId = DefaultTestIdentities.synchronizerId, - packageDependencyResolver: PackageDependencyResolverUS = + packageDependencyResolver: PackageDependencyResolver = StoreBasedSynchronizerTopologyClient.NoPackageDependencies, timestampForSynchronizerParameters: CantonTimestamp = CantonTimestamp.Epoch, timestampOfSnapshot: CantonTimestamp = CantonTimestamp.Epoch, @@ -612,8 +613,7 @@ class TestingIdentityFactory( case dp :: Nil => dp case Nil => DynamicSynchronizerParameters.initialValues( - NonNegativeFiniteDuration.Zero, - BaseTest.testedProtocolVersion, + BaseTest.testedProtocolVersion ) case _ => throw new IllegalStateException(s"Multiple synchronizer parameters are valid at $ts") @@ -887,9 +887,15 @@ class TestingOwnerWithKeys( ) ) - val p1_dtc = mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - val p2_dtc = mkAdd(SynchronizerTrustCertificate(participant2, synchronizerId)) - val p3_dtc = mkAdd(SynchronizerTrustCertificate(participant3, synchronizerId)) + val p1_dtc = mkAdd( + SynchronizerTrustCertificate(participant1, synchronizerId) + ) + val p2_dtc = mkAdd( + SynchronizerTrustCertificate(participant2, synchronizerId) + ) + val p3_dtc = mkAdd( + SynchronizerTrustCertificate(participant3, synchronizerId) + ) val p1_otk = mkAddMultiKey( OwnerToKeyMapping .tryCreate(participant1, NonEmpty(Seq, EncryptionKeys.key1, SigningKeys.key1)), diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/AcquiredInterfacesIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/AcquiredInterfacesIntegrationTest.scala new file mode 100644 index 0000000000..2b55758791 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/AcquiredInterfacesIntegrationTest.scala @@ -0,0 +1,650 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + InterfaceFilter, + TransactionFormat, + UpdateFormat, +} +import com.daml.ledger.api.v2.value.Value.toJavaProto +import com.daml.ledger.api.v2.value.{Identifier as ScalaPbIdentifier, Record} +import com.daml.ledger.javaapi.data +import com.daml.ledger.javaapi.data.{Identifier, Template} +import com.digitalasset.base.error.ErrorCode +import com.digitalasset.base.error.utils.DecodedCantonError +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService +import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.UpdateWrapper +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.damltests.holding.v1.java.holdingv1.{ + Holding as HoldingV1, + HoldingView as HoldingViewV1, +} +import com.digitalasset.canton.damltests.holding.v2.java.holdingv2.{ + Holding as HoldingV2, + HoldingView as HoldingViewV2, +} +import com.digitalasset.canton.damltests.token.v1.java.token.Token as TokenV1 +import com.digitalasset.canton.damltests.token.v2.java.token.Token as TokenV2 +import com.digitalasset.canton.damltests.token.v3.java.token.Token as TokenV3 +import com.digitalasset.canton.damltests.token.v4.java.token.Token as TokenV4 +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.util.PartiesAllocator +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.ledger.error.LedgerApiErrors.NoVettedInterfaceImplementationPackage +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.Interpreter.FailureStatus +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.Preprocessing.PreprocessingFailed +import com.digitalasset.canton.networking.grpc.RecordingStreamObserver +import com.digitalasset.canton.participant.sync.SyncServiceInjectionError.NotConnectedToAnySynchronizer +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.{ParticipantPermission, VettedPackage} +import com.digitalasset.canton.util.SetupPackageVetting +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.daml.lf.data.Ref +import monocle.Monocle.toAppliedFocusOps +import org.scalatest.Assertion + +import java.time.{Duration, Instant} +import java.util.Optional +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.Future +import scala.jdk.CollectionConverters.CollectionHasAsScala + +class AcquiredInterfacesIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .withSetup(setup) + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.ledgerApi.topologyAwarePackageSelection.enabled).replace(true) + ) + ) + + protected def setup(env: TestConsoleEnvironment): Unit = { + import env.* + participant1.synchronizers.connect_local(sequencer1, alias = daName) + } + + private def tokenV1(assetId: String)(implicit party: PartyId) = + new TokenV1(party.toProtoPrimitive, assetId) + private def tokenV2(assetId: String)(implicit party: PartyId) = + new TokenV2(party.toProtoPrimitive, assetId) + private def tokenV3(assetId: String)(implicit party: PartyId) = + new TokenV3(party.toProtoPrimitive, assetId, Optional.of(Instant.ofEpochSecond(1337L))) + private def tokenV4(assetId: String)(implicit party: PartyId) = + new TokenV4(party.toProtoPrimitive, assetId, Optional.of(Instant.ofEpochSecond(1338L))) + + private implicit val viewDecoders: Map[data.Identifier, data.Value => Any] = Map( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> { (v: data.Value) => + HoldingViewV1.valueDecoder.decode(v) + }, + HoldingV2.TEMPLATE_ID_WITH_PACKAGE_ID -> { (v: data.Value) => + HoldingViewV2.valueDecoder.decode(v) + }, + ) + + private val partyHintSuffixRef = new AtomicInteger(0) + + "The Ledger API" when { + "an interface subscription comes before an acquired interface instance is vetted" should { + "return failed interface views" in withNewParty { implicit env => implicit party => + import env.* + + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV1.PACKAGE_ID) + + // Ensure Token V2 which defines an instance for the Holding is uploaded (but not vetted) + participant1.dars.upload( + UpgradingBaseTest.TokenV2, + vetAllPackages = false, + synchronizeVetting = false, + ) + + create(tokenV1("asset")) + + val createdEvent = + interfaceSubscribe(Seq(HoldingV1.TEMPLATE_ID)).loneElement.createEvents.toSeq.loneElement + + val expectedFailure = + ( + NoVettedInterfaceImplementationPackage, + (_: String) should include regex + s"""No vetted package for rendering the interface view for package-name '${TokenV1.PACKAGE_NAME}'.* + |Candidates: ${TokenV1.PACKAGE_ID.toPackageId.show}.* + |Filter: Package-ids with interface instances for the requested interface: ${TokenV1.PACKAGE_NAME} -> ${TokenV2.PACKAGE_ID.toPackageId.show}""".stripMargin, + ) + assertViews(createdEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Left(expectedFailure) + ) + } + } + + "only the upgraded template that adds the interface instance is vetted but the create is for the original template" should { + "render the interface view for the create" in withNewParty { implicit env => implicit party => + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV1.PACKAGE_ID) + + create(tokenV1("asset")) + + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + val createEvent = + interfaceSubscribe(Seq(HoldingV1.TEMPLATE_ID)).loneElement.createEvents.toSeq.loneElement + + assertViews(createEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right( + new HoldingViewV1(party.toProtoPrimitive, "asset") + ) + ) + } + } + + "a subscription for two interfaces is created" should { + "allow switch-over from old interface version to new one by memoizing the selected package-id per interface" in withNewParty { + implicit env => implicit party => + import env.* + + // Note: This test must be run before other tests that require TokenV3 to ensure it's the first one that uploads it + // + // Scenario: + // * HoldingV1, HoldingV2, TokenV2 uploaded and vetted + // * Subscription for HoldingV1, HoldingV2 + // * Create TokenV2 + // * Upload TokenV3 + // * Vet TokenV3 + // * Create TokenV3 + // Expectation: Subscription should see a valid rendered view for each of the creates + + setupVettedPackages(HoldingV1.PACKAGE_ID, HoldingV2.PACKAGE_ID, TokenV2.PACKAGE_ID) + + val subscriptionF = Future( + interfaceSubscribe( + Seq(HoldingV1.TEMPLATE_ID, HoldingV2.TEMPLATE_ID), + completeAfter = 2, + ) + ) + + create(tokenV2("asset1")) + + setupVettedPackages(HoldingV1.PACKAGE_ID, HoldingV2.PACKAGE_ID, TokenV3.PACKAGE_ID) + + val asset2 = tokenV3("asset2") + create(asset2) + + val subscriptionResult = subscriptionF.futureValue.map(_.createEvents.toSeq.loneElement) + + inside(subscriptionResult) { case Seq(create1, create2) => + assertViews(create1)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right( + new HoldingViewV1(party.toProtoPrimitive, "asset1") + ) + ) + + assertViews(create2)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Left( + ( + PreprocessingFailed, + // TODO(#25385): Consider refining the error + _ should include( + "An optional contract field with a value of Some may not be dropped during downgrading" + ), + ) + ), + HoldingV2.TEMPLATE_ID_WITH_PACKAGE_ID -> Right( + new HoldingViewV2(party.toProtoPrimitive, asset2.assetId, asset2.expiry) + ), + ) + } + } + } + + s"vetting end bound is specified" should { + "consider it in the view package resolution" in withNewParty { + implicit env => implicit party => + import env.* + + SetupPackageVetting( + Set(UpgradingBaseTest.HoldingV1, UpgradingBaseTest.TokenV2, UpgradingBaseTest.TokenV3), + targetTopology = Map( + synchronizer1Id -> Map( + participant1 -> Set( + HoldingV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + TokenV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + TokenV3.PACKAGE_ID.toPackageId.withVettingEndsAt( + CantonTimestamp.now().add(Duration.ofDays(1L)) + ), + ) + ) + ), + ) + + create(tokenV2("asset1")) + val create1 = interfaceSubscribe( + Seq(HoldingV1.TEMPLATE_ID) + ).loneElement.createEvents.toSeq.loneElement + + // Verify that Token V2 is used for rendering (V3 is discarded since it has the validUntil not at max) + assertViews(create1)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right( + new HoldingViewV1(party.toProtoPrimitive, "asset1") + ) + ) + + // Set the validity open-ended + SetupPackageVetting( + Set(UpgradingBaseTest.HoldingV1, UpgradingBaseTest.TokenV2, UpgradingBaseTest.TokenV3), + targetTopology = Map( + synchronizer1Id -> Map( + participant1 -> Set( + HoldingV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + TokenV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + TokenV3.PACKAGE_ID.toPackageId.withNoVettingBounds, + ) + ) + ), + ) + + // Verify that Token V3 is used for rendering when its validity is open-ended + assertViews( + interfaceSubscribe( + Seq(HoldingV1.TEMPLATE_ID) + ).loneElement.createEvents.toSeq.loneElement + )( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> + Left((FailureStatus, _ should include("Use HoldingV2"))) + ) + } + } + + "there is no vetted package for the package-name of the interface instance of the view being rendered" should { + "fail the stream" in withNewParty { implicit env => implicit party => + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + create(tokenV2("asset")) + + // Now unvet the sole Token package + setupVettedPackages(HoldingV1.PACKAGE_ID) + + val createdEvent = + interfaceSubscribe(Seq(HoldingV1.TEMPLATE_ID)).loneElement.createEvents.toSeq.loneElement + + val expectedFailure = + ( + NoVettedInterfaceImplementationPackage, + (_: String) should include regex + s"No vetted package for rendering the interface view for package-name '${TokenV1.PACKAGE_NAME}'. Reason: No synchronizer satisfies the vetting requirements. Discarded synchronizers:.*${env.daId},Party ${env.participant1.id.adminParty.show} has no vetted packages for '${TokenV1.PACKAGE_NAME}'", + ) + assertViews(createdEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Left(expectedFailure) + ) + + } + } + + "the interface subscription is started before the implementation roll-out" should { + "observe the event and render its view" in withNewParty { implicit env => implicit party => + import env.* + + setupVettedPackages(HoldingV1.PACKAGE_ID) + val subscriptionF = Future(interfaceSubscribe(Seq(HoldingV1.TEMPLATE_ID))) + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + // Creates a Token V1 payload + create( + tokenV1("asset"), + // Ensure V1 package-id is used to demonstrate that it is then upgraded and + // appears in the interface subscription + userPackageSelectionPreference = Seq(TokenV1.PACKAGE_ID), + ) + + val createdEvent = subscriptionF.futureValue.loneElement.createEvents.toSeq.loneElement + val expectedViewValue = new HoldingViewV1(party.toProtoPrimitive, "asset") + assertViews(createdEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right(expectedViewValue) + ) + } + } + + "not connected to any synchronizer" should { + "deliver view failures with a descriptive correct error" in withNewParty { + implicit env => implicit party => + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + create(tokenV2("asset")) + + // Subscribe for the interface without a synchronizer connected + val createdEvent = withParticipantDisconnected { + interfaceSubscribe( + Seq(HoldingV1.TEMPLATE_ID) + ).loneElement.createEvents.toSeq.loneElement + } + + assertViews(createdEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Left( + ( + NotConnectedToAnySynchronizer, + _ should include( + "Could not compute a package-id for rendering the interface view. Root cause: This participant is not connected to any synchronizer." + ), + ) + ) + ) + } + } + + "a DAR is uploaded but not vetted" should { + "not be used for rendering the interface view" in withNewParty { + implicit env => implicit party => + import env.* + + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + create(tokenV2("asset")) + + participant1.dars.upload( + UpgradingBaseTest.TokenV3, + synchronizeVetting = false, + ) + + val createdEvent = + interfaceSubscribe( + Seq(HoldingV1.TEMPLATE_ID) + ).loneElement.createEvents.toSeq.loneElement + + val expectedViewValue = new HoldingViewV1(party.toProtoPrimitive, "asset") + assertViews(createdEvent)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right(expectedViewValue) + ) + } + } + + "an interface view is rendered for an already seen template package-id" should { + "memoize the used package-id and re-use it" in withNewParty { + implicit env => implicit party => + import env.* + + // vet only Token V2 + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + create(tokenV2("asset1")) + + val streamObserver = new RecordingStreamObserver[UpdateWrapper](completeAfter = 2) + val subscriptionF = + Future( + participant1.mkResult( + call = participant1.ledger_api.updates + .subscribe_updates(streamObserver, updateFormat(Seq(HoldingV1.TEMPLATE_ID))), + requestDescription = "getUpdates", + observer = streamObserver, + timeout = participant1.consoleEnvironment.commandTimeouts.ledgerCommand, + ) + ) + + // Wait to be sure the first event is seen and its interface views computation + // memoized the view package-id + eventually()(streamObserver.responseCount.get() shouldBe 1) + + // vet Token V3 as well + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID, TokenV3.PACKAGE_ID) + + // Create tokenV2 again with explicit package selection to ensure not triggering the view 3 computation failure + create(tokenV2("asset2"), userPackageSelectionPreference = Seq(TokenV2.PACKAGE_ID)) + + val subscriptionResult = subscriptionF.futureValue + inside(subscriptionResult.flatMap(_.createEvents)) { case Seq(create1, create2) => + val expectedView1Value = new HoldingViewV1(party.toProtoPrimitive, "asset1") + assertViews(create1)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right(expectedView1Value) + ) + + val expectedView2Value = new HoldingViewV1(party.toProtoPrimitive, "asset2") + assertViews(create2)( + HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Right(expectedView2Value) + ) + } + } + } + + "the create argument cannot be downgraded to the selected interface view package-id" should { + "return a failed interface view" in withNewParty { implicit env => implicit party => + import env.* + + // Vet only Token V4 and Holding V1 + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV4.PACKAGE_ID) + + create(tokenV4("asset1")) + + // Unvet Token V4 and vet Token V2 which doesn't have the expiry time argument + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV2.PACKAGE_ID) + + val streamObserver = new RecordingStreamObserver[UpdateWrapper](completeAfter = 2) + val subscriptionF = + Future( + participant1.mkResult( + call = participant1.ledger_api.updates + .subscribe_updates(streamObserver, updateFormat(Seq(HoldingV1.TEMPLATE_ID))), + requestDescription = "getUpdates", + observer = streamObserver, + timeout = participant1.consoleEnvironment.commandTimeouts.ledgerCommand, + ) + ) + + // Wait for the first create to be rendered and vet V4 again + eventually()(streamObserver.responseCount.get() shouldBe 1) + setupVettedPackages(HoldingV1.PACKAGE_ID, TokenV4.PACKAGE_ID) + + // Create a new Token V4 + create(tokenV4("asset2")) + + val failuresExpectation = HoldingV1.TEMPLATE_ID_WITH_PACKAGE_ID -> Left( + ( + // TODO(#26411): Assert a proper interface view computation error when the issue is addressed + CommandExecutionErrors.Preprocessing.PreprocessingFailed, + (_: String) should include( + "An optional contract field with a value of Some may not be dropped during downgrading" + ), + ) + ) + + inside(subscriptionF.futureValue.flatMap(_.createEvents)) { case Seq(create1, create2) => + // The first view cannot be computed because the contract argument cannot be downgraded to the view's selected package-id + assertViews(create1)(failuresExpectation) + + // Even though a compatible package-id is vetted when the second create should be rendered, + // the first resolution is memoized for the Token package name so the second view computation fails as well + assertViews(create2)(failuresExpectation) + } + } + } + } + + def assertViews(createdEvent: CreatedEvent)( + viewsExpectations: ( + data.Identifier /* interface-id */, + Either[ + (ErrorCode, String => Assertion), + Any, /* the expected decoded view value */ + ], + )* + )(implicit viewDecoders: Map[data.Identifier, data.Value => Any]): Unit = { + val interfaceViews = createdEvent.interfaceViews + interfaceViews should have size viewsExpectations.size.toLong + + val viewsById = createdEvent.interfaceViews.view + .map(ifaceView => ifaceView.getInterfaceId -> ifaceView) + .toMap + + viewsExpectations.foreach { + case (interfaceId, Right(expectedValue)) => + val interfaceView = viewsById + .get(interfaceId.toScalaProto) + .valueOrFail(s"Not found $interfaceId in $viewsById") + val actualDecodedViewValue = + viewDecoders(interfaceId)(viewValueToJavaApi(interfaceView.getViewValue)) + + actualDecodedViewValue shouldBe expectedValue + case (interfaceId, Left((expectedCode, causeAssert))) => + val interfaceView = viewsById + .get(interfaceId.toScalaProto) + .valueOrFail(s"Not found $interfaceId in $viewsById") + val decodedErrorFromFailedViewStatus = DecodedCantonError + .fromGrpcStatus(interfaceView.getViewStatus) + .value + + decodedErrorFromFailedViewStatus.code.id shouldBe expectedCode.id + causeAssert(decodedErrorFromFailedViewStatus.cause) + } + } + + def withParticipantDisconnected[T](f: => T)(implicit env: FixtureParam): T = { + env.participant1.synchronizers.disconnect_all() + try f + finally env.participant1.synchronizers.reconnect(env.daName) + } + + private def withNewParty[T](f: FixtureParam => PartyId => T): FixtureParam => T = + env => { + import env.* + + val partyHint = s"alice-${partyHintSuffixRef.getAndIncrement()}" + val party = PartiesAllocator( + participants = Set(participant1) + )( + newParties = Seq(partyHint -> participant1.id), + targetTopology = Map( + partyHint -> Map( + synchronizer1Id -> (PositiveInt.one, Set( + participant1.id -> ParticipantPermission.Submission + )) + ) + ), + ).headOption.valueOrFail("Expected Alice to be allocated") + f(env)(party) + } + + private def create( + templateInstance: Template, + userPackageSelectionPreference: Seq[LfPackageId] = Seq.empty, + )(implicit env: FixtureParam, party: PartyId): Unit = + env.participant1.ledger_api.javaapi.commands + .submit( + Seq(party), + templateInstance.create().commands().asScala.toList, + userPackageSelectionPreference = userPackageSelectionPreference, + ) + .discard + + private def setupVettedPackages( + vettedPackageIds: String* + )(implicit env: FixtureParam): Unit = { + import env.* + SetupPackageVetting( + Set( + UpgradingBaseTest.HoldingV1, + UpgradingBaseTest.HoldingV2, + UpgradingBaseTest.TokenV1, + UpgradingBaseTest.TokenV2, + UpgradingBaseTest.TokenV3, + UpgradingBaseTest.TokenV4, + ), + targetTopology = Map( + synchronizer1Id -> Map( + participant1 -> vettedPackageIds.map(_.toPackageId withNoVettingBounds).toSet + ) + ), + ) + } + + private def viewValueToJavaApi(viewValue: Record) = + data.Value.fromProto( + toJavaProto( + com.daml.ledger.api.v2.value.Value( + com.daml.ledger.api.v2.value.Value.Sum + .Record(viewValue) + ) + ) + ) + + private def interfaceSubscribe( + interfaceIds: Seq[data.Identifier], + includeInterfaceView: Boolean = true, + completeAfter: Int = 1, + )(implicit env: FixtureParam, party: PartyId): Seq[UpdateService.UpdateWrapper] = { + import env.* + + participant1.ledger_api.updates.transactions_with_tx_format( + transactionFormat = + updateFormat(interfaceIds, includeInterfaceView).includeTransactions.value, + completeAfter = completeAfter, + ) + } + + private def updateFormat(interfaceIds: Seq[Identifier], includeInterfaceView: Boolean = true)( + implicit party: PartyId + ) = + UpdateFormat( + includeTransactions = Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + party.toProtoPrimitive -> Filters( + interfaceIds.map(interfaceId => + CumulativeFilter( + IdentifierFilter.InterfaceFilter( + InterfaceFilter( + interfaceId = Some(interfaceId.toScalaProto), + includeCreatedEventBlob = false, + includeInterfaceView = includeInterfaceView, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ), + includeReassignments = None, + includeTopologyEvents = None, + ) + private implicit class JavaIdentifierOps(val identifier: data.Identifier) { + def toScalaProto: ScalaPbIdentifier = ScalaPbIdentifier.fromJavaProto(identifier.toProto) + } + + private implicit class StrToPkgId(packageIdStr: String) { + def toPackageId: LfPackageId = Ref.PackageId.assertFromString(packageIdStr) + } + + private implicit class PackageIdVettingExtensions(packageId: LfPackageId) { + def withNoVettingBounds: VettedPackage = VettedPackage( + packageId = packageId, + validFromInclusive = None, + validUntilExclusive = None, + ) + + def withVettingEndsAt(validUntil: CantonTimestamp): VettedPackage = VettedPackage( + packageId = packageId, + validFromInclusive = None, + validUntilExclusive = Some(validUntil), + ) + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ComplexTopologyAwarePackageSelectionIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ComplexTopologyAwarePackageSelectionIntegrationTest.scala new file mode 100644 index 0000000000..571e76c300 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ComplexTopologyAwarePackageSelectionIntegrationTest.scala @@ -0,0 +1,541 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.javaapi.data.CreatedEvent +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.appinstall.v2.java.appinstall.{ + AppInstall as AppInstallV2, + AppInstallRequest as AppInstallRequestV2, +} +import com.digitalasset.canton.damltests.featuredapprightimpl.v1.java.featuredapprightimpl.FeaturedAppRightImpl as FeaturedAppRightImplV1 +import com.digitalasset.canton.damltests.featuredapprightimpl.v2.java.featuredapprightimpl.FeaturedAppRightImpl as FeaturedAppRightImplV2 +import com.digitalasset.canton.damltests.featuredapprightimpl.v2.java.featuredapprightv1.FeaturedAppRight +import com.digitalasset.canton.damltests.{appinstall, featuredapprightimpl} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.Interpreter.LookupErrors.UnresolvedPackageName +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.{PartyId, PhysicalSynchronizerId, SynchronizerId} +import com.digitalasset.canton.util.SetupPackageVetting +import com.digitalasset.canton.{LfPackageName, SynchronizerAlias} +import com.digitalasset.daml.lf.data.Ref +import monocle.macros.syntax.lens.* + +import java.util.Optional +import scala.jdk.CollectionConverters.{CollectionHasAsScala, MapHasAsScala} +import scala.jdk.OptionConverters.RichOption +import scala.util.chaining.scalaUtilChainingOps + +import UpgradingBaseTest.Syntax.* + +final class ComplexTopologyAwarePackageSelectionIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + ) + ), + ) + ) + + @volatile private var provider, user, dso: PartyId = _ + @volatile private var providerParticipant, userParticipant, + dsoParticipant: LocalParticipantReference = _ + @volatile private var IAppRight, AppRightV1, AppRightV2: Ref.PackageId = _ + @volatile private var InstallV1, InstallV2: Ref.PackageId = _ + @volatile private var GlobalSynchronizerId, PrivateSynchronizerId: PhysicalSynchronizerId = _ + + private val GlobalSynchronizerName = SynchronizerAlias.tryCreate("global") + private val PrivateSynchronizerName = SynchronizerAlias.tryCreate("private") + + private lazy val AllDars = Set( + UpgradingBaseTest.FeaturedAppRightIface, + UpgradingBaseTest.FeaturedAppRightImplV1, + UpgradingBaseTest.FeaturedAppRightImplV2, + UpgradingBaseTest.AppInstallV1, + UpgradingBaseTest.AppInstallV2, + ) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1_S1M1 + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.ledgerApi.topologyAwarePackageSelection.enabled).replace(true) + ) + ) + .withSetup { implicit env => + import env.* + + GlobalSynchronizerId = sequencer1.physical_synchronizer_id + PrivateSynchronizerId = sequencer2.physical_synchronizer_id + + // Disambiguate participants + providerParticipant = participant1 + userParticipant = participant2 + dsoParticipant = participant3 + + // Connect all participants to the global synchronizer + participants.all.synchronizers + .connect_local(sequencer1, alias = GlobalSynchronizerName) + + // Only the provider and user participants connect to the private synchronizer + Seq(providerParticipant, userParticipant) + .foreach( + _.synchronizers.connect_local(sequencer2, alias = PrivateSynchronizerName) + ) + + // Assign parties + provider = providerParticipant.parties.enable( + "provider", + synchronizer = GlobalSynchronizerName, + ) + providerParticipant.parties.enable("provider", synchronizer = PrivateSynchronizerName) + user = userParticipant.parties.enable("user", synchronizer = GlobalSynchronizerName) + userParticipant.parties.enable("user", synchronizer = PrivateSynchronizerName) + dso = dsoParticipant.parties.enable("dso", synchronizer = GlobalSynchronizerName) + + // Upload DARs to participants but do not vet as vetting state setup is done in the test + Seq(providerParticipant, userParticipant, dsoParticipant).foreach { p => + IAppRight = p.dars + .upload(UpgradingBaseTest.FeaturedAppRightIface, vetAllPackages = false) + .pipe(Ref.PackageId.assertFromString) + + AppRightV1 = p.dars + .upload(UpgradingBaseTest.FeaturedAppRightImplV1, vetAllPackages = false) + .pipe(Ref.PackageId.assertFromString) + AppRightV2 = p.dars + .upload(UpgradingBaseTest.FeaturedAppRightImplV2, vetAllPackages = false) + .pipe(Ref.PackageId.assertFromString) + + InstallV1 = p.dars + .upload(UpgradingBaseTest.AppInstallV1, vetAllPackages = false) + .pipe(Ref.PackageId.assertFromString) + InstallV2 = p.dars + .upload(UpgradingBaseTest.AppInstallV2, vetAllPackages = false) + .pipe(Ref.PackageId.assertFromString) + } + } + + "Featured App Right flow" when { + "on GS (single-synchronizer)" when { + "all parties are on V2, but aware of V1 as well (V2 AppRight and V2 AppInstall args)" should { + "succeed" in { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + userParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + ) + ), + expectAppRightVersionPkgId = AppRightV2, + expectAppInstallVersionPkgId = InstallV2, + expectAppActivityMarkerVersionPkgId = AppRightV2, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ) + } + } + + "user is on V1 with dso and provider on V2 (V1 AppRight and V1 AppInstall args)" should { + "succeed" in { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + userParticipant -> Set(IAppRight, AppRightV1, InstallV1), + ) + ), + expectAppRightVersionPkgId = AppRightV2, + expectAppInstallVersionPkgId = InstallV1, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ) + } + } + + "user and provider are on V1 with dso on V2 (V1 AppRight and V1 AppInstall args)" should { + "succeed" in { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV1, InstallV1), + userParticipant -> Set(IAppRight, AppRightV1, InstallV1), + ) + ), + expectAppRightVersionPkgId = FeaturedAppRightImplV1.COMPANION.PACKAGE_ID, + expectAppInstallVersionPkgId = InstallV1, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ) + } + } + + "all parties are on V1 (V1 AppRight and V1 AppInstall args)" should { + "succeed" in { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1), + providerParticipant -> Set(IAppRight, AppRightV1, InstallV1), + userParticipant -> Set(IAppRight, AppRightV1, InstallV1), + ) + ), + expectAppRightVersionPkgId = FeaturedAppRightImplV1.COMPANION.PACKAGE_ID, + expectAppInstallVersionPkgId = InstallV1, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ) + } + } + + // DISABLED TEST. Please ignore + // This should be a negative test that is currently passing due to + // vetting checks in phases 1 and 3 that do not check the vetting of input contracts creation package-ids + "user does not know of V2 AppRight but knows about AppInstall V2" should { + "fail" ignore { _ => + // userParticipant.dars.remove(AppRightV2) // Uncomment to see the test crash with an EngineError due to missing package in phase 3 + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV2), + userParticipant -> Set(IAppRight, AppRightV1, InstallV2), + ) + ), + expectAppRightVersionPkgId = AppRightV2, + expectAppInstallVersionPkgId = InstallV2, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ) + } + } + + "dso and user are on disjoint versions" should { + "fail" in { _ => + assertThrowsAndLogsCommandFailures( + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV2), + providerParticipant -> Set( + IAppRight, + AppRightV1, + AppRightV2, + InstallV1, + InstallV2, + ), + userParticipant -> Set(IAppRight, AppRightV1, InstallV1), + ) + ), + expectAppRightVersionPkgId = AppRightV2, + expectAppInstallVersionPkgId = InstallV1, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = GlobalSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + ), + // TODO(#25385) Specialize error once package-name discard reason is propagated + // Note: Currently we only log the discard reason at DEBUG level + _.shouldBeCantonErrorCode(UnresolvedPackageName), + ) + } + } + } + + "app install created on private and accepted on global" when { + "both private and GS have v1 and v2" should { + "allow creating the request on private and accept on GS" in { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1), + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + userParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + ), + PrivateSynchronizerId -> Map( + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + userParticipant -> Set(IAppRight, AppRightV1, AppRightV2, InstallV1, InstallV2), + ), + ), + expectAppRightVersionPkgId = AppRightV1, + expectAppInstallVersionPkgId = InstallV2, + expectAppActivityMarkerVersionPkgId = AppRightV1, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = PrivateSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + appInstallRequestPrescribedSynchronizer = Some(PrivateSynchronizerId), + ) + } + } + + "private sync has only V1 and GS only V2" should { + // TODO(#25385) Keep this test case around as a target state for the future + // Note: even though GS has an upgraded version of the app, + // the reassignment is rejected due to vetting checks + "be upgraded and accepted on global" ignore { _ => + test( + vettingState = Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV2, InstallV2), + userParticipant -> Set(IAppRight, AppRightV2, InstallV2), + ), + PrivateSynchronizerId -> Map( + providerParticipant -> Set(IAppRight, AppRightV1, InstallV1), + userParticipant -> Set(IAppRight, AppRightV1, InstallV1), + ), + ), + expectAppRightVersionPkgId = AppRightV2, + expectAppInstallVersionPkgId = InstallV1, + expectAppActivityMarkerVersionPkgId = AppRightV2, + expectedSynchronizerAppRight = GlobalSynchronizerId, + expectedSynchronizerAppInstallRequest = PrivateSynchronizerId, + expectedSynchronizerAppInstall = GlobalSynchronizerId, + appInstallRequestPrescribedSynchronizer = Some(PrivateSynchronizerId), + ) + } + } + } + + "Commands.package_id_selection_preferences is specified" should { + "restrict the packages used in command selection" in { _ => + arrangeVettingStateUnbounded(vettingState = + Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + providerParticipant -> Set(IAppRight, AppRightV1, AppRightV2), + // We don't care about the user in this test as we just test one command + ) + ) + ) + + val appRightTree = dsoParticipant.ledger_api.javaapi.commands + .submit( + Seq(dso), + new FeaturedAppRightImplV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + Optional.empty(), + ).create().commands().asScala.toSeq, + userPackageSelectionPreference = Seq(AppRightV1), + ) + + // User preference specifies only V1, so expect V1 to be created + inside(appRightTree.getEventsById.asScala.head._2) { case created: CreatedEvent => + created.getTemplateId shouldBe FeaturedAppRightImplV1.TEMPLATE_ID_WITH_PACKAGE_ID + } + } + + "fail" in { _ => + arrangeVettingStateUnbounded(vettingState = + Map( + GlobalSynchronizerId -> Map( + dsoParticipant -> Set(IAppRight, AppRightV1), + providerParticipant -> Set(IAppRight, AppRightV1), + // We don't care about the user as it's not involved in the flow + ) + ) + ) + + val userPackagePreferenceSet = Seq(AppRightV2) + // dso prefers V2 even though it only knows about V1 + assertThrowsAndLogsCommandFailures( + dsoParticipant.ledger_api.javaapi.commands + .submit( + Seq(dso), + new FeaturedAppRightImplV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + Optional.empty(), + ).create().commands().asScala.toSeq, + userPackageSelectionPreference = userPackagePreferenceSet, + ), + _.shouldBeCantonErrorCode(CommandExecutionErrors.UserPackagePreferenceNotVetted), + ) + } + } + } + + private def test( + vettingState: Map[PhysicalSynchronizerId, Map[LocalParticipantReference, Set[Ref.PackageId]]], + // The expected package-id of the FeaturedAppRightImpl created event in the IndexDB + expectAppRightVersionPkgId: String, + // The expected package-id of the FeaturedAppActivityMarker created event in the IndexDB + expectAppActivityMarkerVersionPkgId: String, + // The expected package-id of the AppInstall created event in the IndexDB + expectAppInstallVersionPkgId: String, + // The expected synchronizer-id of the AppInstallRequest create + expectedSynchronizerAppInstallRequest: SynchronizerId, + // The expected synchronizer-id of the original FeaturedAppRightImpl create + // Note: it might be later reassigned but that is checked via the expectedSynchronizerAppInstall + expectedSynchronizerAppRight: SynchronizerId, + // The expected synchronizer-id of the AppInstall create + expectedSynchronizerAppInstall: SynchronizerId, + // Useful to force the starting point of the flow + appInstallRequestPrescribedSynchronizer: Option[SynchronizerId] = None, + ) = { + arrangeVettingStateUnbounded(vettingState) + + val appRightPreference = dsoParticipant.ledger_api.interactive_submission + .preferred_packages( + Map( + LfPackageName.assertFromString(FeaturedAppRightImplV2.PACKAGE_NAME) -> Set(dso, provider) + ) + ) + .packageReferences + .find(_.packageName == FeaturedAppRightImplV2.PACKAGE_NAME) + .value + + val useV2AppRightArg = appRightPreference.packageId == AppRightV2 + + // dso creates FeaturedAppRight + val appRightTree = + dsoParticipant.ledger_api.javaapi.commands.submit( + Seq(dso), + new FeaturedAppRightImplV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + Option.when(useV2AppRightArg)(BigDecimal(1337L).bigDecimal).toJava, + ).create().commands().asScala.toSeq, + ) + + // Assert synchronizer of the created app right + appRightTree.getSynchronizerId shouldBe expectedSynchronizerAppRight.toProtoPrimitive + + val appRightObj = appRightTree + .pipe(JavaDecodeUtil.decodeAllCreated(FeaturedAppRightImplV2.COMPANION)) + .head + + appRightObj.data shouldBe new FeaturedAppRightImplV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + Option + .when(expectAppRightVersionPkgId == FeaturedAppRightImplV2.PACKAGE_ID)( + BigDecimal(1337L).bigDecimal.setScale(10) + ) + .toJava, + ) + val appRightCid: FeaturedAppRight.ContractId = + appRightObj.id + .toInterface(featuredapprightimpl.v2.java.featuredapprightv1.FeaturedAppRight.INTERFACE) + + // Assert the created version of the FeaturedAppRight + appRightTree.getEventsById.asScala.head._2.getTemplateId.getPackageId shouldBe expectAppRightVersionPkgId + + val appInstallRequestPreference = + userParticipant.ledger_api.interactive_submission + .preferred_packages( + Map(LfPackageName.assertFromString(AppInstallV2.PACKAGE_NAME) -> Set(provider, user)) + ) + .packageReferences + .find(_.packageName == AppInstallV2.PACKAGE_NAME) + .value + + val useV2AppInstallCreateArg = appInstallRequestPreference.packageId == AppInstallV2.PACKAGE_ID + + // user creates the app install request + val appInstallRequestTree = userParticipant.ledger_api.javaapi.commands + .submit( + actAs = Seq(user), + commands = new AppInstallRequestV2( + new AppInstallV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + user.toProtoPrimitive, + BigDecimal(1337L).bigDecimal, + Option.when(useV2AppInstallCreateArg)(BigDecimal(1338L).bigDecimal).toJava, + ) + ).create().commands().asScala.toSeq, + synchronizerId = appInstallRequestPrescribedSynchronizer, + ) + + // Assert synchronizer of the created app install request + appInstallRequestTree.getSynchronizerId shouldBe expectedSynchronizerAppInstallRequest.toProtoPrimitive + + val appInstallRequestObj = appInstallRequestTree + .pipe(JavaDecodeUtil.decodeAllCreated(AppInstallRequestV2.COMPANION)) + .head + + appInstallRequestObj.data.install shouldBe new AppInstallV2( + dso.toProtoPrimitive, + provider.toProtoPrimitive, + user.toProtoPrimitive, + BigDecimal(1337L).bigDecimal.setScale(10), + Option + .when(expectAppInstallVersionPkgId == AppInstallV2.PACKAGE_ID)( + BigDecimal(1338L).bigDecimal.setScale(10) + ) + .toJava, + ) + val appInstallRequestCid = appInstallRequestObj.id + + // provider accepts the app install request + val appInstallTree = providerParticipant.ledger_api.javaapi.commands + .submit( + Seq(provider), + appInstallRequestCid + .exerciseAppInstallRequest_Accept( + new appinstall.v2.java.featuredapprightv1.FeaturedAppRight.ContractId( + appRightCid.contractId + ) + ) + .commands() + .asScala + .toSeq, + ) + + // Assert synchronizer of the created app install and implicitly, of the transaction creating it + appInstallTree.getSynchronizerId shouldBe expectedSynchronizerAppInstall.toProtoPrimitive + + val createdPackageIds = appInstallTree.getEventsById.asScala.view.values.collect { + case event: CreatedEvent => event.getTemplateId.getPackageId + }.toList + + // It's fine to compare blindly since AppInstall and FeaturedAppActivityMarker do not share the same package-name + createdPackageIds.sorted shouldBe List( + expectAppInstallVersionPkgId, + expectAppActivityMarkerVersionPkgId, + ).sorted + } + + private def arrangeVettingStateUnbounded( + vettingState: Map[PhysicalSynchronizerId, Map[LocalParticipantReference, Set[Ref.PackageId]]] + ): Unit = + SetupPackageVetting( + darPaths = AllDars, + targetTopology = vettingState.map { case (syncId, participantPackages) => + syncId -> participantPackages.map { case (participant, packages) => + participant -> packages.map(_ withNoVettingBounds) + } + }, + ) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/CreationPackageUnvettingUpgradingIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/CreationPackageUnvettingUpgradingIntegrationTest.scala new file mode 100644 index 0000000000..fffb34da14 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/CreationPackageUnvettingUpgradingIntegrationTest.scala @@ -0,0 +1,161 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.SigStakeInf as SigStakeInfV1 +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.{ + SigStakeInf, + SigStakeInf as SigStakeInfV2, +} +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.util.PartiesAllocator +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.{ParticipantPermission, VettedPackage} +import com.digitalasset.canton.util.SetupPackageVetting +import com.digitalasset.daml.lf.data.Ref + +import scala.jdk.CollectionConverters.CollectionHasAsScala +import scala.util.chaining.scalaUtilChainingOps + +class CreationPackageUnvettingUpgradingIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + @volatile private var signatory, nonStakeholder, observer: PartyId = _ + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup { implicit env => + import env.* + participants.all.foreach(_.synchronizers.connect_local(sequencer1, alias = daName)) + + val allocations = + Map( + "signatory" -> participant1, + "nonStakeholder" -> participant2, + "observer" -> participant3, + ) + inside( + PartiesAllocator(allocations.values.toSet)( + allocations.view.mapValues(_.id).toSeq, + allocations.view + .mapValues(participantRef => + Map( + daId -> (PositiveInt.one, Set( + participantRef.id -> (ParticipantPermission.Submission: ParticipantPermission) + )) + ) + ) + .toMap, + ) + ) { case Seq(signatory, nonStakeholder, observer) => + this.signatory = signatory + this.nonStakeholder = nonStakeholder + this.observer = observer + } + } + + "Command (re-)interpretation" when { + "the creation package of a disclosed contract is not vetted by the non-stakeholder submitter" should { + "succeed if the target package is vetted" in { implicit env => + import env.* + + SetupPackageVetting( + Set(UpgradingBaseTest.UpgradeV1, UpgradingBaseTest.UpgradeV2), + targetTopology = Map( + synchronizer1Id -> Map[ParticipantReference, Seq[LfPackageId]]( + participant1 -> Seq( + SigStakeInfV1.PACKAGE_ID.toPackageId, + SigStakeInfV2.PACKAGE_ID.toPackageId, + ), + participant2 -> Seq(SigStakeInfV1.PACKAGE_ID.toPackageId), + participant3 -> Seq( + SigStakeInfV1.PACKAGE_ID.toPackageId, + SigStakeInfV2.PACKAGE_ID.toPackageId, + ), + ).view.mapValues(VettedPackage.unbounded(_).toSet).toMap + ), + ) + + // Create V2 contract + val contractId: SigStakeInfV2.ContractId = createContract(participant1) + + val disclosedContract = ledger_api_utils + .fetchContractsAsDisclosed(participant1, signatory, SigStakeInfV1.TEMPLATE_ID) + .view + .values + .loneElement + + // Non-stakeholder exercises the contract via an explicit disclosure + // It only vetted V1 so it forces its downgrade and must also accept the view + participant2.ledger_api.javaapi.commands.submit( + Seq(nonStakeholder), + contractId + .exerciseSigStakeInf_NoOp(nonStakeholder.toProtoPrimitive) + .commands() + .asScala + .toList, + disclosedContracts = Seq(disclosedContract), + ) + } + } + + "the creation package is not vetted by a contract observer" should { + "succeed if the target package is vetted" in { implicit env => + import env.* + + SetupPackageVetting( + Set(UpgradingBaseTest.UpgradeV1, UpgradingBaseTest.UpgradeV2), + targetTopology = Map( + synchronizer1Id -> Map[ParticipantReference, Seq[LfPackageId]]( + participant1 -> Seq( + SigStakeInfV1.PACKAGE_ID.toPackageId, + SigStakeInfV2.PACKAGE_ID.toPackageId, + ), + participant3 -> Seq(SigStakeInfV1.PACKAGE_ID.toPackageId), + ).view.mapValues(VettedPackage.unbounded(_).toSet).toMap + ), + ) + + // Create V2 contract + val contractId: SigStakeInf.ContractId = createContract(participant1) + + // signatory exercises its contract, + // but the observer which only vetted V1, forces its downgrade and must also accept the view + participant1.ledger_api.javaapi.commands.submit( + Seq(signatory), + contractId + .exerciseSigStakeInf_NoOp(signatory.toProtoPrimitive) + .commands() + .asScala + .toList, + ) + } + } + } + + private def createContract( + participant1: => LocalParticipantReference + ): SigStakeInf.ContractId = { + val contractPayload = new SigStakeInfV2(signatory.toProtoPrimitive, observer.toProtoPrimitive) + participant1.ledger_api.javaapi.commands + .submit(Seq(signatory), contractPayload.create().commands().asScala.toList) + .pipe(_.getEvents.asScala.headOption.value.getContractId) + .pipe(new SigStakeInf.ContractId(_)) + } + + private implicit class StrToPkgId(packageIdStr: String) { + def toPackageId: LfPackageId = Ref.PackageId.assertFromString(packageIdStr) + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/DisclosedContractNormalizationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/DisclosedContractNormalizationTest.scala new file mode 100644 index 0000000000..be98c2fb01 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/DisclosedContractNormalizationTest.scala @@ -0,0 +1,157 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.logging.LoggingContext +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.Upgrading +import com.digitalasset.canton.ledger.api.util.TimeProvider +import com.digitalasset.canton.ledger.participant.state.index.ContractStore +import com.digitalasset.canton.logging.LoggingContextWithTrace +import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.configuration.EngineLoggingConfig +import com.digitalasset.canton.platform.apiserver.execution.{ + StoreBackedCommandInterpreter, + TestDynamicSynchronizerParameterGetter, +} +import com.digitalasset.canton.platform.config.CommandServiceConfig +import com.digitalasset.canton.protocol.{ + AuthenticatedContractIdVersionV10, + AuthenticatedContractIdVersionV11, + LfFatContractInst, +} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.util.{ContractValidator, TestEngine} +import com.digitalasset.canton.{BaseTest, FailOnShutdown, HasExecutionContext} +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.engine.* +import com.digitalasset.daml.lf.language.{LanguageMajorVersion, LanguageVersion} +import com.digitalasset.daml.lf.transaction.{FatContractInstance, Node, TransactionCoder} +import com.digitalasset.daml.lf.value.Value.ContractId +import org.scalatest.Assertion +import org.scalatest.wordspec.AsyncWordSpec + +import java.util.Optional +import scala.concurrent.ExecutionContext + +class DisclosedContractNormalizationTest + extends AsyncWordSpec + with HasExecutionContext + with FailOnShutdown + with BaseTest { + + private val ec: ExecutionContext = executorService + + val engine = new Engine( + EngineConfig(allowedLanguageVersions = LanguageVersion.AllVersions(LanguageMajorVersion.V2)) + ) + + private val testEngine = new TestEngine( + packagePaths = Seq(UpgradingBaseTest.UpgradeV2), + cantonContractIdVersion = AuthenticatedContractIdVersionV11, + ) + + private def buildV11upgrading( + alice: String, + value: Long, + ): (Upgrading.ContractId, LfFatContractInst) = { + val command = new Upgrading(alice, alice, value, Optional.empty()).create.commands.loneElement + val (tx, _) = testEngine.submitAndConsume(command, alice) + val createNode = tx.nodes.values.collect { case e: Node.Create => e }.loneElement + val fat = testEngine.suffix(createNode) + (new Upgrading.ContractId(fat.contractId.coid), fat) + } + + // Simulate a (denormalized) V10 contract, starting from a V11 contract + private def buildV10upgrading( + alice: String, + value: Long, + ): (Upgrading.ContractId, LfFatContractInst) = { + + val (_, v11fat) = buildV11upgrading(alice, value) + + val enrichedArg = + testEngine.enrichContract(Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID, v11fat.createArg) + + val enrichedFat = FatContractInstance.fromCreateNode( + v11fat.toCreateNode.copy(arg = enrichedArg), + v11fat.createdAt, + v11fat.authenticationData, + ) + + val v10contractId = AuthenticatedContractIdVersionV10.fromDiscriminator( + v11fat.contractId.asInstanceOf[ContractId.V1].discriminator, + testEngine.recomputeUnicum(enrichedFat, AuthenticatedContractIdVersionV10), + ) + + val v10fat = FatContractInstance.fromCreateNode( + enrichedFat.toCreateNode.mapCid(_ => v10contractId), + enrichedFat.createdAt, + enrichedFat.authenticationData, + ) + + // Here we recode to strip any type info that does not make it into the blob + val v10fatRecoded = TransactionCoder + .decodeFatContractInstance(TransactionCoder.encodeFatContractInstance(v10fat).value) + .value + .asInstanceOf[LfFatContractInst] + + (new Upgrading.ContractId(v10contractId.coid), v10fatRecoded) + } + + val alice = "alice" + + "disclosed contract processing of command interpretation" should { + + val validator = + ContractValidator(testEngine.cryptoOps, testEngine.engine, testEngine.packageResolver) + + val underTest = + new StoreBackedCommandInterpreter( + engine = testEngine.engine, + participant = Ref.ParticipantId.assertFromString("anId"), + packageResolver = testEngine.packageResolver, + contractStore = mock[ContractStore], + metrics = LedgerApiServerMetrics.ForTesting, + contractAuthenticator = validator.authenticateHash, + config = EngineLoggingConfig(), + prefetchingRecursionLevel = CommandServiceConfig.DefaultContractPrefetchingDepth, + loggerFactory = loggerFactory, + dynParamGetter = + new TestDynamicSynchronizerParameterGetter(NonNegativeFiniteDuration.Zero)(ec), + timeProvider = TimeProvider.UTC, + )(ec) + + def verifyDisclosure(cId: Upgrading.ContractId, fat: LfFatContractInst): Assertion = { + implicit val loggingContext: LoggingContext = LoggingContextWithTrace(loggerFactory) + + validator.authenticate(fat, fat.templateId.packageId).futureValueUS shouldBe Right(()) + + val command = cId.exerciseUpgrading_Fetch(alice).commands().loneElement + val commands = testEngine.validateCommand(command, alice, disclosedContracts = Seq(fat)) + + val result = underTest + .interpret(commands, testEngine.randomHash())( + LoggingContextWithTrace(loggerFactory), + ec, + ) + .futureValueUS + .value + + val disclosedFat = result.processedDisclosedContracts.toSeq.loneElement + disclosedFat shouldBe fat + } + + "work with V11 contracts" in { + val (v11Cid, v11fat) = buildV11upgrading(alice, 7) + verifyDisclosure(v11Cid, v11fat) + } + + "work with V10 contracts" in { + val (v10Cid, v10fat) = buildV10upgrading(alice, 7) + verifyDisclosure(v10Cid, v10fat) + } + + } + +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/HeterogeneousDependenciesVettingIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/HeterogeneousDependenciesVettingIntegrationTest.scala new file mode 100644 index 0000000000..366cddb26d --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/HeterogeneousDependenciesVettingIntegrationTest.scala @@ -0,0 +1,586 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.value.Identifier.toJavaProto +import com.daml.ledger.javaapi.data +import com.daml.ledger.javaapi.data.Identifier +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} +import com.digitalasset.canton.damltests.{dvpassets, dvpoffer} +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.util.PartiesAllocator +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.ledger.error.LedgerApiErrors.NoPreferredPackagesFound +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.util.SetupPackageVetting +import com.digitalasset.canton.util.collection.MapsUtil +import com.digitalasset.canton.{LfPackageId, LfPackageName} + +import scala.jdk.CollectionConverters.{CollectionHasAsScala, MapHasAsJava, MapHasAsScala} +import scala.jdk.OptionConverters.{RichOption, RichOptional} + +import dvpassets.v1.java.assets.Share as ShareV1 +import dvpassets.v2.java.assets.Share as ShareV2 +import dvpassets.v1.java.assets.Iou as IouV1 +import dvpassets.v2.java.assets.Iou as IouV2 +import dvpassets.v2.java.assets.Meta as AssetsMeta +import dvpoffer.v1.java.dvpoffer.DvpOffer as DvpOfferV1 +import dvpoffer.v2.java.dvpoffer.DvpOffer as DvpOfferV2 +import UpgradingBaseTest.Syntax.* + +class HeterogeneousDependenciesVettingIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + @volatile var registryParticipant1, registryParticipant2, sellerParticipant, + buyerParticipant: LocalParticipantReference = _ + @volatile var registry, seller, buyer: PartyId = _ + @volatile var assetFactoryV1PkgId, assetFactoryV2PkgId: LfPackageId = _ + private val AllDars = Set( + UpgradingBaseTest.DvpAssetFactoryV1, + UpgradingBaseTest.DvpAssetFactoryV2, + UpgradingBaseTest.DvpAssetsV1, + UpgradingBaseTest.DvpOffersV1, + UpgradingBaseTest.DvpAssetsV2, + UpgradingBaseTest.DvpOffersV2, + ) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P4_S1M1 + .withSetup { implicit env => + import env.* + + // Disambiguate participants + registryParticipant1 = participant1 + registryParticipant2 = participant2 + sellerParticipant = participant3 + buyerParticipant = participant4 + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + + // TODO(#25523): Use Java codegen once module name conflicts can be resolved + assetFactoryV1PkgId = LfPackageId.assertFromString( + registryParticipant1.dars + .upload(UpgradingBaseTest.DvpAssetFactoryV1, vetAllPackages = false) + ) + assetFactoryV2PkgId = LfPackageId.assertFromString( + registryParticipant1.dars + .upload(UpgradingBaseTest.DvpAssetFactoryV2, vetAllPackages = false) + ) + + // Setup the party topology state + inside( + PartiesAllocator( + Set(registryParticipant1, registryParticipant2, sellerParticipant, buyerParticipant) + )( + newParties = Seq( + // In a real DvP setup, there would be two registries: one for Share and one for IOU + // For simplicity, in this test, we use a single registry for both + "registry" -> registryParticipant1, + "seller" -> sellerParticipant, + "buyer" -> buyerParticipant, + ), + targetTopology = Map( + // Registry is multi-hosted + "registry" -> Map( + daId -> + // TODO(#25385): Use threshold of two once PartiesAllocator supports it + (PositiveInt.one, Set( + registryParticipant1.id -> Submission, + registryParticipant2.id -> Submission, + )) + ), + "seller" -> Map( + daId -> (PositiveInt.one, Set(sellerParticipant.id -> Submission)) + ), + "buyer" -> Map( + daId -> (PositiveInt.one, Set(buyerParticipant.id -> Submission)) + ), + ), + ) + ) { case Seq(p_registry, p_seller, p_buyer) => + registry = p_registry + seller = p_seller + buyer = p_buyer + } + } + + private def registryParticipants( + vettedPackages: Set[VettedPackage] + ): Map[ParticipantReference, Set[VettedPackage]] = + Map(registryParticipant1 -> vettedPackages, registryParticipant2 -> vettedPackages) + + private lazy val AssetIssuanceAllVetted: Map[ParticipantReference, Set[VettedPackage]] = (Seq( + sellerParticipant -> Set( + ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ShareV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + buyerParticipant -> Set( + IouV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + IouV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) ++ registryParticipants( + Set( + assetFactoryV1PkgId.withNoVettingBounds, + assetFactoryV2PkgId.withNoVettingBounds, + ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ShareV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ) + )).toMap + + private lazy val DvpAllVetted: Map[ParticipantReference, Set[VettedPackage]] = + MapsUtil.mergeMapsOfSets( + AssetIssuanceAllVetted, + Map( + sellerParticipant -> Set( + DvpOfferV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + DvpOfferV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + buyerParticipant -> Set( + DvpOfferV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + DvpOfferV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ), + ) + + "Asset issuance" when { + /* + The asset issuance tree: + - Create AssetFactory (r) + - AssetFactory_IssueAssets (r) + | + |-- Share (r, s) + | + `-- IOU (r, b) + where: + - if AssetFactory is V1, Share and IOU can be only V1 (package-version wise) + - if AssetFactory is V2, Share and IOU can be V1 or V2, independently of each other based on the version inputs + */ + "the buyer and seller vetted only Assets V1" should { + "issue V1 Assets" in { implicit env => + import env.* + + SetupPackageVetting( + darPaths = AllDars, + targetTopology = Map( + daId -> AssetIssuanceAllVetted + // Buyer and seller did not yet vet V2 assets + .updated(sellerParticipant, Set(ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds)) + .updated(buyerParticipant, Set(ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds)) + ), + ) + + issueAssetsTest( + expectedShareTemplateId = ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + ).discard + } + } + + "registry vetted only AssetFactory V1" should { + "issue V1 Assets" in { implicit env => + import env.* + + SetupPackageVetting( + darPaths = AllDars, + targetTopology = Map( + daId -> AssetIssuanceAllVetted.updated( + registryParticipant1, + Set( + assetFactoryV1PkgId.withNoVettingBounds, + ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ShareV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + ), + ) + + issueAssetsTest( + expectedShareTemplateId = ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + ).discard + } + } + + "buyer only vetted Assets V1" should { + "issue V1 Assets" in { implicit env => + import env.* + + SetupPackageVetting( + darPaths = AllDars, + targetTopology = Map( + daId -> AssetIssuanceAllVetted.updated( + buyerParticipant, + Set(ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + ), + ) + + issueAssetsTest( + expectedShareTemplateId = ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + ).discard + } + } + + "buyer only vetted Assets V1 and Seller V2 with forced V2 issuance for seller" should { + "succeed with Seller's create at V2 and buyer's at V1" in { implicit env => + import env.* + + SetupPackageVetting( + darPaths = AllDars, + targetTopology = Map( + daId -> AssetIssuanceAllVetted + .updated( + buyerParticipant, + Set(ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + .updated( + sellerParticipant, + Set(ShareV2.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + ), + ) + + issueAssetsTest( + expectedShareTemplateId = ShareV2.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + // Not considering the common Assets vetted package for Buyer and Seller + // in the preferred package version query allows the registry participant + // to issue different versions of the assets (Seller -> V2, Buyer -> V1) + // + // Note: This is possible since the package selection for IssueAssets is successful from the first pass + // which only takes into consideration the registry and the asset factory package-name only. + // If this decision were to be pushed to the second pass, the package selection would fail + // since the buyer and seller would have disjoint Assets vetted packages + overrideIssueAssetsV2 = true, + expectedShareMeta = Some(new AssetsMeta(Map("tag" -> "V2").asJava)), + ).discard + } + } + } + + "DvP flow" when { + "all vetted V2" should { + "conclude the DvP flow in V2" in { implicit env => + tradeTest(vettingSetup = DvpAllVetted)( + expectedShareTemplateId = ShareV2.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV2.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedOfferTemplateId = DvpOfferV2.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedBuyersIouMeta = Some(new AssetsMeta(Map("tag" -> "V2").asJava)), + expectedSellersShareMeta = Some(new AssetsMeta(Map("tag" -> "V2").asJava)), + expectedBuyersShareMetaAfterTrade = Some( + new AssetsMeta(Map("traded_status" -> "done", "tag" -> "V2").asJava) + ), + expectedSellersIouMetaAfterTrade = Some( + new AssetsMeta(Map("traded_status" -> "done", "tag" -> "V2").asJava) + ), + ) + } + } + + "one of the registry's participants only vetted assets V1" should { + "succeed with the exercise choosing assets v1 so it can be accepted by the registry as well" in { + implicit env => + /* + Here we focus on the behavior of package selection in command execution in the following situation: + - package selection only happens at the root command node, since it's specified by package-name + - a child exercise of the root node that defines a static package bind to the package of the root node adds a new informee + which is not considered in the package selection of the root node since it doesn't see it. + Expectation: the package selection on the root node considers also the dependencies that it forces on new informees + appearing in sub-nodes. + + As an example, we take the Accept choice of a DvP, which looks as follows (in parantheses are the informees of nodes) + --------------------------------- + Exe Accept (b, s) + | + |-- Fetch Share (b, s, r) + | + |-- Fetch IOU (b, s, r) + | + |-- Exe TransferShare (b, s, r) + | | + | `-- Share (b, s, r) + | + `-- Exe TransferIou (b, s, r) + | + `-- IOU (b, s, r) + -------------------------------- + NOTE: The important aspect here is that the package selection on the command execution of "Accept" choice on the DvP offer + yields a package id that does not force on the registry party an unvetted package-id (via static links) of the IOU and Share templates. + The registry does not see the top-level action node of the transaction, and implicitly does not need to use the DvpOffer package. + */ + tradeTest( + vettingSetup = DvpAllVetted.updated( + registryParticipant1, + Set( + // Only V1 versions vetted for the registry + assetFactoryV1PkgId.withNoVettingBounds, + ShareV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + )( + expectedShareTemplateId = ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedOfferTemplateId = DvpOfferV2.TEMPLATE_ID_WITH_PACKAGE_ID, + ) + } + } + + "one of the registry's participants did not vet anything" should { + "fail" in { implicit env => + assertThrowsAndLogsCommandFailures( + tradeTest( + // Unvet all packages on registryParticipant2 + // Note: submission and package preferences requests are still attempted on registryParticipant1 + vettingSetup = DvpAllVetted.updated(registryParticipant2, Set.empty) + )( + expectedShareTemplateId = ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedIouTemplateId = IouV1.TEMPLATE_ID_WITH_PACKAGE_ID, + expectedOfferTemplateId = DvpOfferV2.TEMPLATE_ID_WITH_PACKAGE_ID, + ), + entry => { + entry.shouldBeCantonErrorCode(NoPreferredPackagesFound) + entry.message should include regex + s"Could not compute valid package preferences. Reason: No synchronizer satisfies the vetting requirements.*Party ${registry.show}.* has no vetted packages for 'dvp-asset-factory'" + }, + ) + } + } + } + + private def tradeTest( + vettingSetup: Map[ParticipantReference, Set[VettedPackage]] + )( + expectedShareTemplateId: data.Identifier, + expectedIouTemplateId: data.Identifier, + expectedOfferTemplateId: data.Identifier, + expectedBuyersIouMeta: Option[AssetsMeta] = None, + expectedSellersShareMeta: Option[AssetsMeta] = None, + expectedBuyersShareMetaAfterTrade: Option[AssetsMeta] = None, + expectedSellersIouMetaAfterTrade: Option[AssetsMeta] = None, + )(implicit env: FixtureParam): Unit = { + import env.* + + // Setup the vetting topology state + SetupPackageVetting(darPaths = AllDars, targetTopology = Map(daId -> vettingSetup)) + + val (shareCid, iouCid) = issueAssetsTest( + expectedShareTemplateId, + expectedIouTemplateId, + expectedShareMeta = expectedSellersShareMeta, + expectedIouMeta = expectedBuyersIouMeta, + ) + + val iouDisclosedContracts = ledger_api_utils + .fetchContractsAsDisclosed(registryParticipant1, registry, IouV1.TEMPLATE_ID) + .values + + val createOfferEvent = buyerParticipant.ledger_api.javaapi.commands + .submit( + Seq(buyer), + new DvpOfferV1( + buyer.toProtoPrimitive, + seller.toProtoPrimitive, + registry.toProtoPrimitive, + "SO Inc.", + new dvpoffer.v1.java.assets.Iou.ContractId(iouCid), + ).create().commands().asScala.toList, + ) + .getEvents + .asScala + .headOption + .value + + // Assert Offer version + Identifier.fromProto( + createOfferEvent.toProtoEvent.getCreated.getTemplateId + ) shouldBe expectedOfferTemplateId + + val dvpOfferPreference = sellerParticipant.ledger_api.interactive_submission + .preferred_packages( + Map( + LfPackageName + .assertFromString(DvpOfferV2.PACKAGE_NAME) -> Set(seller, buyer), + LfPackageName.assertFromString(ShareV1.PACKAGE_NAME) -> Set(registry), + ) + ) + + val useDvpOfferMetaArgument = + dvpOfferPreference.packageReferences + .find(_.packageName == DvpOfferV2.PACKAGE_NAME) + .value + .packageId == DvpOfferV2.PACKAGE_ID + + val acceptTransaction = sellerParticipant.ledger_api.javaapi.commands + .submit( + Seq(seller), + new DvpOfferV2.ContractId(createOfferEvent.getContractId) + .exerciseAccept( + new dvpoffer.v2.java.assets.Share.ContractId(shareCid), + Option + .when(useDvpOfferMetaArgument)( + new dvpoffer.v2.java.assets.Meta(Map("traded_status" -> "done").asJava) + ) + .toJava, + ) + .commands() + .asScala + .toList, + disclosedContracts = iouDisclosedContracts.toSeq, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + + val createdEvents = acceptTransaction.getEventsById.asScala.collect { + case (_, createdEvent: data.CreatedEvent) => createdEvent + } + val createShareForBuyerEvent = + createdEvents.find(_.getTemplateId.getEntityName == ShareV2.TEMPLATE_ID.getEntityName).value + val createIouForSellerEvent = + createdEvents.find(_.getTemplateId.getEntityName == IouV2.TEMPLATE_ID.getEntityName).value + + ShareV2 + .valueDecoder() + .decode(createShareForBuyerEvent.getArguments) + .meta + .toScala shouldBe expectedBuyersShareMetaAfterTrade + IouV2 + .valueDecoder() + .decode(createIouForSellerEvent.getArguments) + .meta + .toScala shouldBe expectedSellersIouMetaAfterTrade + } + + private def issueAssetsTest( + expectedShareTemplateId: data.Identifier, + expectedIouTemplateId: data.Identifier, + overrideIssueAssetsV2: Boolean = false, + expectedShareMeta: Option[AssetsMeta] = None, + expectedIouMeta: Option[AssetsMeta] = None, + )(implicit env: FixtureParam): (String /* share cId */, String /* IOU cId */ ) = { + import env.* + + // TODO(#25523): Use Java codegen once module name conflicts can be resolved + val dvpAssetFactoryPkgName = LfPackageName.assertFromString("dvp-asset-factory") + val dvpAssetsPkgName = LfPackageName.assertFromString(ShareV1.PACKAGE_NAME) + + val assetFactoryV2Supported = + registryParticipant1.ledger_api.interactive_submission + .preferred_packages(Map(dvpAssetFactoryPkgName -> Set(registry))) + .packageReferences + .find(_.packageName == dvpAssetFactoryPkgName) + .value + .packageId == assetFactoryV2PkgId + + val createAssetFactory = ledger_api_utils.create( + s"#$dvpAssetFactoryPkgName", + "AssetFactory", + "AssetFactory", + Map("issuer" -> registry, "tag" -> (Option.when(assetFactoryV2Supported)("V2"): Any)), + ) + + val assetFactoryCreatedEvent = registryParticipant1.ledger_api.commands + .submit(Seq(registry), Seq(createAssetFactory)) + .events + .headOption + .value + .getCreated + + // Lazy value since the call fails if buyer and seller have disjoint Assets vetted packages + lazy val issueAssetsSupportsV2Assets = + registryParticipant1.ledger_api.interactive_submission + .preferred_packages( + Map(dvpAssetFactoryPkgName -> Set(registry), dvpAssetsPkgName -> Set(seller, buyer)) + ) + .packageReferences + .find(_.packageName == dvpAssetFactoryPkgName) + .value + .packageId == assetFactoryV2PkgId + + val buyerSupportsV2Assets = buyerParticipant.ledger_api.interactive_submission + .preferred_packages(Map(dvpAssetsPkgName -> Set(buyer))) + .packageReferences + .find(_.packageName == dvpAssetsPkgName) + .value + .packageId == IouV2.PACKAGE_ID + + val sellerSupportsV2Assets = sellerParticipant.ledger_api.interactive_submission + .preferred_packages(Map(dvpAssetsPkgName -> Set(seller))) + .packageReferences + .find(_.packageName == dvpAssetsPkgName) + .value + .packageId == IouV2.PACKAGE_ID + + val issueAssetsCreates = registryParticipant1.ledger_api.commands + .submit( + Seq(registry), + Seq( + ledger_api_utils.exercise( + "AssetFactory_IssueAssets", + Map( + "iouOwner" -> buyer, + "iouAmount" -> 1337.0d, + "iouVersion" -> (if ( + (overrideIssueAssetsV2 || issueAssetsSupportsV2Assets) && buyerSupportsV2Assets + ) 2 + else 1), + "shareOwner" -> seller, + "shareCompany" -> "SO Inc.", + "shareVersion" -> (if ( + (overrideIssueAssetsV2 || issueAssetsSupportsV2Assets) && sellerSupportsV2Assets + ) 2 + else 1), + ), + assetFactoryCreatedEvent, + ) + ), + ) + .events + .flatMap(_.event.created) + + val createdShareEvent = issueAssetsCreates + .find(_.templateId.value.entityName == ShareV1.TEMPLATE_ID_WITH_PACKAGE_ID.getEntityName) + .value + val createdIouEvent = issueAssetsCreates + .find(_.templateId.value.entityName == IouV1.TEMPLATE_ID_WITH_PACKAGE_ID.getEntityName) + .value + + // Assert Share version + Identifier.fromProto( + toJavaProto(createdShareEvent.getTemplateId) + ) shouldBe expectedShareTemplateId + + ShareV2 + .valueDecoder() + .decode(data.CreatedEvent.fromProto(CreatedEvent.toJavaProto(createdShareEvent)).getArguments) + .meta + .toScala shouldBe expectedShareMeta + + // Assert IOU version + Identifier.fromProto( + toJavaProto(createdIouEvent.getTemplateId) + ) shouldBe expectedIouTemplateId + + IouV2 + .valueDecoder() + .decode(data.CreatedEvent.fromProto(CreatedEvent.toJavaProto(createdIouEvent)).getArguments) + .meta + .toScala shouldBe expectedIouMeta + + createdShareEvent.contractId -> createdIouEvent.contractId + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala new file mode 100644 index 0000000000..353ef0f2fe --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala @@ -0,0 +1,176 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.javaapi.data.DisclosedContract +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.upgrade +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.{FetchQuote, Quote} +import com.digitalasset.canton.integration.EnvironmentDefinition +import com.digitalasset.canton.integration.tests.ledgerapi.submission.InteractiveSubmissionIntegrationTestSetup +import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.ExternalParty +import com.digitalasset.canton.topology.transaction.ParticipantPermission +import com.digitalasset.canton.{HasExecutionContext, LfPackageId} +import org.scalatest.OptionValues + +import scala.jdk.CollectionConverters.SeqHasAsJava + +class InteractiveSubmissionUpgradingTest + extends InteractiveSubmissionIntegrationTestSetup + with OptionValues + with HasExecutionContext { + + private var lse: ExternalParty = _ + private var alice: ExternalParty = _ + private var bob: ExternalParty = _ + private val v1PackageId = upgrade.v1.java.upgrade.Quote.PACKAGE_ID + private val v2PackageId = upgrade.v2.java.upgrade.Quote.PACKAGE_ID + + override def environmentDefinition: EnvironmentDefinition = + super.environmentDefinition + .withSetup { implicit env => + import env.* + + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + participant3.dars.upload(UpgradingBaseTest.UpgradeV1) + + lse = participant1.parties.external.enable("LSE") + alice = participant2.parties.external.enable("Alice") + bob = participant3.parties.external.enable("Bob") + + } + + "Interactive submission" should { + + "use a v2 disclosed contract available on all participants" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v2PackageId) + val (fetchQuote, _) = createFetchQuote(participant2, alice) + exerciseFetch(participant2, quoteCid, disclosedQuote, fetchQuote, alice) + } + + "use a v1 disclosed contract on a participant that only has v2 available" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v1PackageId) + val (fetchQuote, _) = createFetchQuote(participant2, alice) + exerciseFetch(participant2, quoteCid, disclosedQuote, fetchQuote, alice) + } + + "use a v1 disclosed contract on a participant that only has v1 available" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v1PackageId) + val (fetchQuote, disclosedFetch) = createFetchQuote(participant3, bob) + + def setBobConfirmer( + confirmingParticipant: LocalParticipantReference + ): Unit = + PartyToParticipantDeclarative( + Set(participant2, participant3), + Set(daId), + )( + owningParticipants = Map.empty, + targetTopology = Map( + bob.partyId -> Map( + daId -> (PositiveInt.one, Set( + (confirmingParticipant, ParticipantPermission.Confirmation) + )) + ) + ), + externalParties = Set(bob), + )(executorService, env) + + // Set Bob confirmer to participant2 so that V2 gets used for the prepare step + setBobConfirmer(participant2) + val preparedExercise = participant1.ledger_api.javaapi.interactive_submission.prepare( + Seq(bob.partyId), + Seq(fetchQuote.id.exerciseFQ_ExFetch(quoteCid).commands().loneElement), + disclosedContracts = Seq(disclosedQuote, disclosedFetch), + ) + + // Set Bob confirmer to participant3 where V2 is not available + setBobConfirmer(participant3) + assertThrowsAndLogsCommandFailures( + participant1.ledger_api.commands.external.submit_prepared(bob, preparedExercise), + { le => + le.errorMessage should include regex raw"(?s)FAILED_PRECONDITION/INVALID_PRESCRIBED_SYNCHRONIZER_ID" + le.errorMessage should include regex raw"(?s)because: Some packages are not known to all informees.*on synchronizer synchronizer1" + le.errorMessage should include regex raw"(?s)Participant PAR::participant3.*has not vetted ${v2PackageId + .take(10)}" + }, + ) + + } + + } + + private def exerciseFetch( + participant: => LocalParticipantReference, + quoteCid: Quote.ContractId, + disclosedQuote: DisclosedContract, + fetchQuote: FetchQuote.Contract, + party: ExternalParty, + ): Transaction = { + val preparedExercise = participant.ledger_api.javaapi.interactive_submission.prepare( + Seq(party.partyId), + Seq(fetchQuote.id.exerciseFQ_ExFetch(quoteCid).commands().loneElement), + disclosedContracts = Seq(disclosedQuote), + ) + participant.ledger_api.commands.external.submit_prepared(party, preparedExercise) + } + + private def createFetchQuote( + participant1: => LocalParticipantReference, + party: ExternalParty, + ): (FetchQuote.Contract, DisclosedContract) = { + val txFetchQuote = participant1.ledger_api.javaapi.commands.submit( + Seq(party), + Seq( + new FetchQuote( + party.toProtoPrimitive, + party.toProtoPrimitive, + party.toProtoPrimitive, + ).create.commands.loneElement + ), + includeCreatedEventBlob = true, + ) + + val disclosed = JavaDecodeUtil.decodeDisclosedContracts(txFetchQuote).loneElement + + val fetchQuote = + JavaDecodeUtil.decodeAllCreated(FetchQuote.COMPANION)(txFetchQuote).loneElement + (fetchQuote, disclosed) + } + + private def discloseQuote( + quoter: ExternalParty, + participant: LocalParticipantReference, + quotePackageId: LfPackageId, + ): (Quote.ContractId, DisclosedContract) = { + val quoteTx = participant.ledger_api.javaapi.commands.submit( + Seq(quoter), + Seq( + new Quote( + Seq(quoter.toProtoPrimitive).asJava, + Seq.empty.asJava, + "VOD", + 100, + ).create.commands.loneElement + ), + includeCreatedEventBlob = true, + userPackageSelectionPreference = Seq(quotePackageId), + ) + + val disclosedQuote = JavaDecodeUtil.decodeDisclosedContracts(quoteTx).loneElement + + val quoteCid = new Quote.ContractId(disclosedQuote.contractId.get()) + (quoteCid, disclosedQuote) + } + +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceFetchIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceFetchIntegrationTest.scala new file mode 100644 index 0000000000..a1070c976d --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceFetchIntegrationTest.scala @@ -0,0 +1,90 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.{FetchQuote, Quote} +import com.digitalasset.canton.damltests.upgrade.v2.java.upgradeif.IQuote +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil + +import scala.jdk.CollectionConverters.* + +class InterfaceFetchIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + + "a view only consists of a fetch interface" in { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + participant2.synchronizers.connect_local(sequencer1, alias = daName) + participant2.dars.upload(UpgradingBaseTest.UpgradeV1) + participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + + val bank = participant1.parties.enable( + "bank", + synchronizeParticipants = Seq(participant2), + ) + val alice = participant2.parties.enable( + "alice", + synchronizeParticipants = Seq(participant1), + ) + + val quoteCid: Quote.ContractId = + JavaDecodeUtil + .decodeAllCreated(Quote.COMPANION)( + participant1.ledger_api.javaapi.commands.submit( + Seq(bank), + new Quote( + List(bank.toProtoPrimitive).asJava, + List(alice.toProtoPrimitive).asJava, + "DA", + 100, + ).create.commands.asScala.toSeq, + ) + ) + .loneElement + .id + + val fetchQuoteCid: FetchQuote.ContractId = + JavaDecodeUtil + .decodeAllCreated(FetchQuote.COMPANION)( + participant2.ledger_api.javaapi.commands.submit( + Seq(alice), + new FetchQuote( + alice.toProtoPrimitive, + alice.toProtoPrimitive, + alice.toProtoPrimitive, + ).create.commands.asScala.toSeq, + ) + ) + .loneElement + .id + + // TemplateId based fetch + participant2.ledger_api.javaapi.commands.submit( + Seq(alice), + fetchQuoteCid.exerciseFQ_ExFetch(quoteCid).commands().asScala.toSeq, + ) + + // Interface based fetch + participant2.ledger_api.javaapi.commands.submit( + Seq(alice), + fetchQuoteCid + .exerciseFQ_IFetch(quoteCid.toInterface(IQuote.INTERFACE)) + .commands() + .asScala + .toSeq, + ) + + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceResolutionIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceResolutionIntegrationTest.scala new file mode 100644 index 0000000000..5cf933fe51 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InterfaceResolutionIntegrationTest.scala @@ -0,0 +1,180 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.javaapi.data.CreateCommand +import com.daml.ledger.javaapi.data.codegen.{Created, Update} +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.config.DbConfig.Postgres +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.{ + UpgradeItCallInterface as UpgradeItCallInterfaceV1, + UpgradeItTemplate as UpgradeItTemplateV1, +} +import com.digitalasset.canton.damltests.upgrade.v1.java.upgradeif.{ + UpgradeItInterface, + UpgradeItVersionStamp, +} +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.UpgradeItTemplate as UpgradeItTemplateV2 +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.PartyId + +import scala.jdk.CollectionConverters.* + +sealed abstract class InterfaceResolutionIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .withSetup { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + initializedSynchronizers(daName) + + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + participant1.parties.enable("alice") + } + + private def party(name: String)(implicit env: TestConsoleEnvironment): PartyId = + env.participant1.parties.list(name).headOption.valueOrFail("where is " + name).party + + "interface resolution" when { + + def setupInterface(implicit env: FixtureParam): (PartyId, UpgradeItInterface.ContractId) = { + + import env.participant1 + + val alice = party("alice") + + val templateTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new Update.CreateUpdate[UpgradeItTemplateV1.ContractId, Created[ + UpgradeItTemplateV1.ContractId + ]]( + new CreateCommand( + UpgradeItTemplateV1.TEMPLATE_ID_WITH_PACKAGE_ID, + new UpgradeItTemplateV1(alice.toProtoPrimitive).toValue, + ), + identity, + new UpgradeItTemplateV1.ContractId(_), + ).commands.asScala.toSeq, + ) + val templateCid: UpgradeItTemplateV1.ContractId = + JavaDecodeUtil.decodeAllCreated(UpgradeItTemplateV1.COMPANION)(templateTx).loneElement.id + + val interfaceCid1 = templateCid.toInterface(UpgradeItInterface.INTERFACE) + + (alice, interfaceCid1) + } + + def testDirect(preferred: LfPackageId, expected: Int)(implicit env: FixtureParam): Unit = { + + import env.* + + val (alice, interfaceCid1) = setupInterface + + val directTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + interfaceCid1.exerciseUpgradeItStamp("direct").commands().asScala.toSeq, + userPackageSelectionPreference = Seq(preferred), + ) + JavaDecodeUtil + .decodeAllCreated(UpgradeItVersionStamp.COMPANION)(directTx) + .loneElement + .data + .stampedVersion shouldBe expected + } + + def testIndirect(preferred: LfPackageId, expected: Int)(implicit env: FixtureParam): Unit = { + + import env.* + + val (alice, interfaceCid1) = setupInterface + + val callTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new Update.CreateUpdate[UpgradeItCallInterfaceV1.ContractId, Created[ + UpgradeItCallInterfaceV1.ContractId + ]]( + new CreateCommand( + UpgradeItCallInterfaceV1.TEMPLATE_ID_WITH_PACKAGE_ID, + new UpgradeItCallInterfaceV1(alice.toProtoPrimitive).toValue, + ), + identity, + new UpgradeItCallInterfaceV1.ContractId(_), + ).commands.asScala.toSeq, + ) + val callCid: UpgradeItCallInterfaceV1.ContractId = + JavaDecodeUtil.decodeAllCreated(UpgradeItCallInterfaceV1.COMPANION)(callTx).loneElement.id + + val indirectTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + callCid.exerciseUpgradeItCallStamp(interfaceCid1).commands().asScala.toSeq, + userPackageSelectionPreference = Seq(preferred), + ) + JavaDecodeUtil + .decodeAllCreated(UpgradeItVersionStamp.COMPANION)(indirectTx) + .loneElement + .data + .stampedVersion shouldBe expected + } + + "support interface fetch" in { implicit env => + import env.* + + val (alice, interfaceCid1) = setupInterface + + val callTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new UpgradeItCallInterfaceV1(alice.toProtoPrimitive).create.commands.asScala.toSeq, + userPackageSelectionPreference = + Seq(LfPackageId.assertFromString(UpgradeItCallInterfaceV1.PACKAGE_ID)), + ) + + val callCid: UpgradeItCallInterfaceV1.ContractId = + JavaDecodeUtil.decodeAllCreated(UpgradeItCallInterfaceV1.COMPANION)(callTx).loneElement.id + + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + callCid.exerciseUpgradeItIfFetch(interfaceCid1).commands().asScala.toSeq, + userPackageSelectionPreference = Seq( + LfPackageId.assertFromString(UpgradeItTemplateV1.TEMPLATE_ID_WITH_PACKAGE_ID.getPackageId) + ), + ) + + } + + "direct dispatch to v1" in { implicit env => + testDirect(LfPackageId.assertFromString(UpgradeItTemplateV1.PACKAGE_ID), 1) + } + + "direct dispatch to v2" in { implicit env => + testDirect(LfPackageId.assertFromString(UpgradeItTemplateV2.PACKAGE_ID), 2) + } + + "indirect dispatch to v1" in { implicit env => + testIndirect(LfPackageId.assertFromString(UpgradeItTemplateV1.PACKAGE_ID), 1) + } + + "indirect dispatch to v2" in { implicit env => + testIndirect(LfPackageId.assertFromString(UpgradeItTemplateV2.PACKAGE_ID), 2) + } + + } + +} + +final class InterfaceResolutionIntegrationRefTest extends InterfaceResolutionIntegrationTest { + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InvalidPackagePreferenceIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InvalidPackagePreferenceIntegrationTest.scala new file mode 100644 index 0000000000..69635f1a1b --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InvalidPackagePreferenceIntegrationTest.scala @@ -0,0 +1,252 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.commands.Command +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.crypto.CryptoPureApi +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.{ + UpgradeItCallInterface as UpgradeItCallInterfaceV1, + UpgradeItTemplate as UpgradeItTemplateV1, +} +import com.digitalasset.canton.damltests.upgrade.v1.java.upgradeif.UpgradeItInterface +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.UpgradeItTemplate as UpgradeItTemplateV2 +import com.digitalasset.canton.data.ActionDescription.ExerciseActionDescription +import com.digitalasset.canton.data.{ + GenTransactionTree, + MerkleTree, + TransactionView, + ViewParticipantData, +} +import com.digitalasset.canton.integration.tests.security.{ + SecurityTestHelpers, + SecurityTestLensUtils, +} +import com.digitalasset.canton.integration.util.TestSubmissionService +import com.digitalasset.canton.integration.util.TestSubmissionService.CommandsWithMetadata +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + HasCycleUtils, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.synchronizer.sequencer.HasProgrammableSequencer +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.util.MaliciousParticipantNode +import com.digitalasset.daml.lf.data.Ref +import monocle.macros.GenLens +import org.slf4j.event.Level + +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters.* + +class InvalidPackagePreferenceIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with SecurityTestLensUtils + with HasProgrammableSequencer + with HasCycleUtils + with SecurityTestHelpers { + + private lazy val pureCryptoRef: AtomicReference[CryptoPureApi] = new AtomicReference() + override def pureCrypto: CryptoPureApi = pureCryptoRef.get() + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + + private def party(name: String)(implicit env: TestConsoleEnvironment): PartyId = + env.participant1.parties.list(name).headOption.valueOrFail("where is " + name).party + + "setup the stage" in { implicit env => + import env.* + pureCryptoRef.set(sequencer1.crypto.pureCrypto) + participant1.synchronizers.connect_local(sequencer1, alias = daName) + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + participant1.parties.enable("alice") + } + + val upgradePackageMap: Map[Ref.PackageId, (Ref.PackageName, Ref.PackageVersion)] = + TestSubmissionService.buildPackageMap( + Seq(UpgradeItTemplateV1.COMPANION.PACKAGE, UpgradeItTemplateV2.COMPANION.PACKAGE) + ) + + "interface resolution" should { + + def setupInterface(implicit env: FixtureParam): (PartyId, UpgradeItInterface.ContractId) = { + + import env.participant1 + + val alice = party("alice") + + val templateTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new UpgradeItTemplateV1(alice.toProtoPrimitive).create.commands.asScala.toSeq, + userPackageSelectionPreference = + Seq(LfPackageId.assertFromString(UpgradeItTemplateV1.PACKAGE_ID)), + ) + + val templateCid: UpgradeItTemplateV1.ContractId = + JavaDecodeUtil.decodeAllCreated(UpgradeItTemplateV1.COMPANION)(templateTx).loneElement.id + + val interfaceCid1 = templateCid.toInterface(UpgradeItInterface.INTERFACE) + + (alice, interfaceCid1) + } + + "detect the submission situation where a submitted package preference is invalid" in { + implicit env => + import env.* + + val (alice, interfaceCid1) = setupInterface + assertThrowsAndLogsCommandFailures( + { + val (_, trackingResult) = trackingLedgerEvents(participants.all, Seq.empty) { + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + interfaceCid1.exerciseUpgradeItStamp("direct").commands().asScala.toSeq, + userPackageSelectionPreference = Seq( + LfPackageId.assertFromString( + UpgradeItTemplateV1.TEMPLATE_ID_WITH_PACKAGE_ID.getPackageId + ), + LfPackageId.assertFromString( + UpgradeItTemplateV2.TEMPLATE_ID_WITH_PACKAGE_ID.getPackageId + ), + ), + ) + } + trackingResult.assertNoTransactions() + }, + e => { + e.level shouldBe Level.ERROR + e.message should include( + "The submitted request has invalid arguments: duplicate preference for package-name Upgrade" + ) + }, + ) + } + + def maliciousPackagePreferenceManipulation( + packagePreference: Set[LfPackageId], + expectedWarning: String, + )(implicit env: FixtureParam): Unit = { + import env.* + val (alice, interfaceCid1) = setupInterface + + val preferred = LfPackageId.assertFromString( + UpgradeItTemplateV1.TEMPLATE_ID_WITH_PACKAGE_ID.getPackageId + ) + + val malicious = MaliciousParticipantNode( + participant1, + daId, + testedProtocolVersion, + timeouts, + loggerFactory, + ) + + val callTx = participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new UpgradeItCallInterfaceV1(alice.toProtoPrimitive).create.commands.asScala.toSeq, + ) + + val callCid: UpgradeItCallInterfaceV1.ContractId = + JavaDecodeUtil.decodeAllCreated(UpgradeItCallInterfaceV1.COMPANION)(callTx).loneElement.id + + val cmd = callCid.exerciseUpgradeItCallStamp(interfaceCid1).commands().loneElement + + // Malicious submission does not succeed + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( + { + val (_, trackingResult) = trackingLedgerEvents(participants.all, Seq.empty) { + + malicious + .submitCommand( + command = CommandsWithMetadata( + Seq(Command.fromJavaProto(cmd.toProtoCommand)), + Seq(alice), + packagePreferenceOverride = Some(Set(preferred)), + packageMapOverride = Some(upgradePackageMap), + ), + transactionTreeInterceptor = GenTransactionTree.rootViewsUnsafe + .andThen(firstElement[TransactionView]) + .andThen(TransactionView.viewParticipantDataUnsafe) + .andThen(MerkleTree.tryUnwrap[ViewParticipantData]) + .andThen(GenLens[ViewParticipantData].apply(_.actionDescription)) + .modify { + case ex: ExerciseActionDescription => + ExerciseActionDescription.packagePreferenceUnsafe + .replace(packagePreference)(ex) + case other => other + }, + ) + .futureValueUS + .value + } + trackingResult.assertNoTransactions() + }, + LogEntry.assertLogSeq( + Seq( + "TransactionConfirmationResponsesFactory:InvalidPackagePreferenceIntegrationTest" + ).map(ln => + ( + e => { + e.loggerName should include regex ln + e.level shouldBe Level.WARN + e.message should include regex expectedWarning + }, + s"Didn't find logger: $ln", + ) + ) + ), + ) + + // Non-malicious does succeed + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + callCid.exerciseUpgradeItCallStamp(interfaceCid1).commands().asScala.toSeq, + userPackageSelectionPreference = Seq(preferred), + ) + + } + + "handle the malicious situation where the package preference is missing" in { implicit env => + maliciousPackagePreferenceManipulation( + packagePreference = Set.empty, + expectedWarning = + """(?s)LOCAL_VERDICT_FAILED_MODEL_CONFORMANCE_CHECK.*Rejected transaction due to a failed model conformance check: DAMLeError.*UnresolvedPackageName\("Upgrade"\)""", + ) + } + + "handle the malicious situation where the package preference is wrong" in { implicit env => + maliciousPackagePreferenceManipulation( + packagePreference = Set(LfPackageId.assertFromString(UpgradeItTemplateV2.PACKAGE_ID)), + expectedWarning = + raw"(?s)LOCAL_VERDICT_FAILED_MODEL_CONFORMANCE_CHECK.*Rejected transaction due to a failed model conformance check: ViewReconstructionError.*cause = Reconstructed view differs from received view.", + ) + } + + "handle the malicious situation where the package preference is ambiguous" in { implicit env => + maliciousPackagePreferenceManipulation( + packagePreference = Set(UpgradeItTemplateV1.PACKAGE_ID, UpgradeItTemplateV2.PACKAGE_ID) + .map(LfPackageId.assertFromString), + expectedWarning = + raw"(?s)LOCAL_VERDICT_FAILED_MODEL_CONFORMANCE_CHECK.*Rejected transaction due to a failed model conformance check: ConflictingNameBindings.*has detected conflicting package name resolutions:.*Upgrade ->", + ) + } + + "handle the malicious situation where the package preference is unknown" in { implicit env => + val madeUpPkg = "MadeUpPkg" + maliciousPackagePreferenceManipulation( + packagePreference = Set(LfPackageId.assertFromString(madeUpPkg)), + expectedWarning = + raw"(?s)LOCAL_VERDICT_FAILED_MODEL_CONFORMANCE_CHECK.*Rejected transaction due to a failed model conformance check: PackageNotFound.*$madeUpPkg", + ) + } + + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/JsonUpgradingTests.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/JsonUpgradingTests.scala new file mode 100644 index 0000000000..8e20f33f87 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/JsonUpgradingTests.scala @@ -0,0 +1,255 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.commands.Commands.DeduplicationPeriod +import com.daml.ledger.api.v2.{state_service, transaction_filter} +import com.daml.ledger.javaapi.data.Identifier +import com.digitalasset.canton.admin.api.client.data.TemplateId +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.damltests.upgrade.v1.java as v1 +import com.digitalasset.canton.damltests.upgrade.v2.java as v2 +import com.digitalasset.canton.http +import com.digitalasset.canton.http.json.SprayJson +import com.digitalasset.canton.http.json.v2.JsCommandServiceCodecs.* +import com.digitalasset.canton.http.json.v2.JsContractEntry.JsActiveContract +import com.digitalasset.canton.http.json.v2.JsStateServiceCodecs.* +import com.digitalasset.canton.http.json.v2.{JsCommand, JsCommands, JsGetActiveContractsResponse} +import com.digitalasset.canton.integration.TestConsoleEnvironment +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.jsonapi.AbstractHttpServiceIntegrationTestFuns.HttpServiceTestFixtureData +import com.digitalasset.canton.integration.tests.jsonapi.{HttpServiceUserFixture, HttpTestFuns} +import com.digitalasset.canton.integration.tests.upgrading.UpgradingBaseTest.{UpgradeV1, UpgradeV2} +import com.digitalasset.canton.topology.PartyId +import io.circe +import io.circe.parser.* +import io.circe.syntax.* +import org.apache.pekko.http.scaladsl.model.ws.TextMessage +import org.apache.pekko.http.scaladsl.model.{StatusCode, StatusCodes, Uri} +import spray.json.JsValue + +import java.util.UUID +import scala.concurrent.Future + +/** Smart contract upgrading JSON API integration tests. + */ +class JsonUpgradingTests extends HttpTestFuns with HttpServiceUserFixture.UserToken { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) + + private def party(name: String)(implicit env: TestConsoleEnvironment): PartyId = + env.participant1.parties.list(name).headOption.valueOrFail("where is " + name).party + + override lazy val environmentDefinition = super.environmentDefinition.withSetup { implicit env => + import env.* + participant1.parties.enable("alice") + participant1.dars.upload(UpgradeV1) + participant1.dars.upload(UpgradeV2) + } + + "JSON API" when { + def testUpDowngrading(fixture: HttpServiceTestFixtureData, alice: PartyId)( + targetTemplateId: Identifier, + packageIdSelectionPreference: Seq[String] = Seq.empty, + )(jsonDamlValue: String)(asserts: (StatusCode, JsValue) => Unit) = + fixture.headersWithAuth.flatMap { headers => + val jsCommand = JsCommand.CreateCommand( + templateId = TemplateId.fromJavaIdentifier(targetTemplateId).toIdentifier, + createArguments = + io.circe.parser.parse(jsonDamlValue).valueOrFail("unparseable test data"), + ) + + val cmds = JsCommands( + commands = Seq(jsCommand), + workflowId = None, + userId = Some(s"CantonConsole"), + commandId = s"commandid_${UUID.randomUUID()}", + deduplicationPeriod = Some(DeduplicationPeriod.Empty), + actAs = Seq(alice.toLf), + readAs = Seq.empty, + submissionId = None, + synchronizerId = None, + minLedgerTimeAbs = None, + minLedgerTimeRel = None, + disclosedContracts = Seq.empty, + packageIdSelectionPreference = packageIdSelectionPreference, + ) + for { + _ <- postJsonRequest( + uri = fixture.uri.withPath(Uri.Path("/v2/commands/submit-and-wait")), + json = SprayJson.parse(cmds.asJson.noSpaces).valueOr(err => fail(s"$err")), + headers = headers, + ).map { case (statusCode, result) => + asserts(statusCode, result) + } + } yield () + } + + "be able to upgrade a Daml data payload" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + + val alice = party("alice") + testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + v2.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID + )( + jsonDamlValue = s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337" }""" + ) { case (status, _) => status should be(StatusCodes.OK) } + } + + "be able to downgrade a payload Daml data payload" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + val alice = party("alice") + + testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + v1.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID + )( + jsonDamlValue = + s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337", "more": null }""" + ) { case (status, _) => status should be(StatusCodes.OK) } + } + + "reject a downgrade with a populated optional field" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + + val alice = party("alice") + + testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + v1.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID + )( + jsonDamlValue = + s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337", "more": "don't ignore meee!!" }""" + ) { case (status, result) => + status should be(StatusCodes.BadRequest) + result.toString() should include( + "The submitted request has invalid arguments: Unexpected fields: more" + ) + } + } + + "consider the Commands.packageIdSelectionPreferences" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + + val alice = party("alice") + + testUpDowngrading(fixture = fixture, alice = alice)( + targetTemplateId = v1.upgrade.Upgrading.TEMPLATE_ID, + packageIdSelectionPreference = Seq(v1.upgrade.Upgrading.PACKAGE_ID), + )( + jsonDamlValue = + s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337", "more": "don't ignore meee!!" }""" + ) { case (status, result) => + status should be(StatusCodes.BadRequest) + result.toString() should include( + "The submitted request has invalid arguments: Unexpected fields: more" + ) + } + } + + "accept package name for create command" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + val alice = party("alice") + + testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + v1.upgrade.Upgrading.TEMPLATE_ID + )( + jsonDamlValue = + s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337", "more": null }""" + ) { case (status, resullt) => + status should be(StatusCodes.OK) + } + } + + "be able to query using package name" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + val alice = party("alice") + for { + _ <- testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + v1.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID + )( + jsonDamlValue = + s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "42", "more": null }""" + ) { case (status, _) => status should be(StatusCodes.OK) } + jwt <- jwtForParties(fixture.uri)(List(http.Party(alice.toLf)), List()) + eventFormat = transaction_filter.EventFormat( + filtersByParty = Map.empty, + filtersForAnyParty = Some( + transaction_filter.Filters( + cumulative = Seq( + transaction_filter.CumulativeFilter( + identifierFilter = transaction_filter.CumulativeFilter.IdentifierFilter + .TemplateFilter( + transaction_filter.TemplateFilter( + templateId = Some( + TemplateId + .fromJavaIdentifier(v1.upgrade.Upgrading.TEMPLATE_ID) + .toIdentifier + ), + includeCreatedEventBlob = false, + ) + ) + ) + ) + ) + ), + verbose = false, + ) + endOffset = env.participant1.ledger_api.state.end() + messagesStream <- fixture.getStream( + path = Uri.Path("/v2/state/active-contracts"), + jwt = jwt, + message = TextMessage( + state_service + .GetActiveContractsRequest( + activeAtOffset = endOffset, + eventFormat = Some(eventFormat), + ) + .asJson + .noSpaces + ), + decoder = decode[JsGetActiveContractsResponse], + filter = { (m: Either[circe.Error, JsGetActiveContractsResponse]) => + m match { + case Right(activeContract) => + activeContract.contractEntry match { + case active: JsActiveContract => + active.createdEvent.createArgument.value.noSpaces + .contains(""""field":"42"""") + case _ => false + } + case _ => false + } + }, + ) + messages <- Future(messagesStream) + } yield inside(messages.head.value.contractEntry) { + case JsActiveContract(created_event, _, _) => + created_event.templateId.packageId should be( + v1.upgrade.Upgrading.PACKAGE_ID + ) + } + } + + "fail on wrong package reference" in httpTestFixture { fixture => + implicit val env: TestConsoleEnvironment = provideEnvironment + + val alice = party("alice") + testUpDowngrading(fixture = fixture, alice = alice)(targetTemplateId = + new Identifier( + s"węry wrong package reference", + v1.upgrade.Upgrading.TEMPLATE_ID.getModuleName, + v1.upgrade.Upgrading.TEMPLATE_ID.getEntityName, + ) + )( + jsonDamlValue = s"""{"issuer":"${alice.toLf}", "owner":"${alice.toLf}", "field": "1337" }""" + ) { case (status, result) => + status should be(StatusCodes.BadRequest) + result.toString() should include( + "The submitted request has invalid arguments: Value does not match the package-id or package-name formats" + ) + } + } + + // TODO(#25385): Add tests for showcasing package selection on incompatible vetted templates + // once package upgrade compatibility checks can be disabled. + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiCommandUpgradingIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiCommandUpgradingIntegrationTest.scala new file mode 100644 index 0000000000..81c50189bb --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiCommandUpgradingIntegrationTest.scala @@ -0,0 +1,385 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.commands.Command +import com.daml.ledger.api.v2.commands.Command.toJavaProto +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.api.v2.value.Identifier +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data +import com.daml.ledger.javaapi.data.codegen.{Contract, ContractCompanion} +import com.daml.ledger.javaapi.data.{Unit as _, *} +import com.digitalasset.canton.config.DbConfig.Postgres +import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} +import com.digitalasset.canton.damltests.upgrade.v1.java as v1 +import com.digitalasset.canton.damltests.upgrade.v2.java as v2 +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.daml.lf.data.{Bytes, Ref} +import com.digitalasset.daml.lf.transaction.TransactionCoder +import monocle.macros.syntax.lens.* +import org.scalatest.Assertion + +import java.util +import java.util.Collections +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* +import scala.util.chaining.* + +sealed abstract class LedgerApiCommandUpgradingIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + private val byPackageNameIdentifier: Identifier = + Identifier.fromJavaProto(v1.upgrade.Upgrading.TEMPLATE_ID.toProto) + + private def party(name: String)(implicit env: TestConsoleEnvironment): PartyId = + env.participant1.parties.list(name).headOption.valueOrFail("where is " + name).party + + private var alice3: PartyId = _ + private var bob3: PartyId = _ + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1.withSetup { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + participant2.synchronizers.connect_local(sequencer1, alias = daName) + participant3.synchronizers.connect_local(sequencer1, alias = daName) + + participant1.parties.enable("alice1") + participant1.parties.enable("bob1") + + // Participant 1 and 2 have both versions + + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + participant2.dars.upload(UpgradingBaseTest.UpgradeV1) + participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + + // Participant 3 initially has just V1 + + alice3 = participant3.parties.enable("alice3") + bob3 = participant3.parties.enable("bob3") + participant3.dars.upload(UpgradingBaseTest.UpgradeV1) + } + + "The Ledger API" when { + "commands are submitted with a package-name-scoped template id" should { + "resolve to the available package id" in { implicit env => + val templateCon = + new v1.upgrade.Upgrading(alice3.toProtoPrimitive, alice3.toProtoPrimitive, 0) + checkAllCommandTypes[v1.upgrade.Upgrading, v1.upgrade.Upgrading.Contract]( + templateCon = templateCon, + exercise = _.id.exerciseChangeOwner(bob3.toProtoPrimitive).commands.asScala.toSeq, + createAndExercise = + templateCon.createAnd.exerciseChangeOwner(bob3.toProtoPrimitive).commands.asScala.toSeq, + queryingParty = alice3, + participantOverride = Some(env.participant3), + )(v1.upgrade.Upgrading.COMPANION) + } + + "use the newest uploaded package" in { implicit env => + // Upload the upgraded template version + env.participant3.dars.upload(UpgradingBaseTest.UpgradeV2) + + val templateCon = + new v1.upgrade.Upgrading(alice3.toProtoPrimitive, alice3.toProtoPrimitive, 0) + checkAllCommandTypes[v1.upgrade.Upgrading, v2.upgrade.Upgrading.Contract]( + templateCon = templateCon, + exercise = _.id.exerciseChangeOwner(bob3.toProtoPrimitive).commands.asScala.toSeq, + createAndExercise = + templateCon.createAnd.exerciseChangeOwner(bob3.toProtoPrimitive).commands.asScala.toSeq, + queryingParty = alice3, + participantOverride = Some(env.participant3), + )(v2.upgrade.Upgrading.COMPANION) + } + + "override with user package preference" in { implicit env => + // Upload the upgraded template version + + val alice = party("alice1") + val bob = party("bob1") + + val templateCon = + new v1.upgrade.Upgrading(alice.toProtoPrimitive, alice.toProtoPrimitive, 0) + checkAllCommandTypes[v1.upgrade.Upgrading, v1.upgrade.Upgrading.Contract]( + templateCon = templateCon, + exercise = _.id.exerciseChangeOwner(bob.toProtoPrimitive).commands.asScala.toSeq, + createAndExercise = + templateCon.createAnd.exerciseChangeOwner(bob.toProtoPrimitive).commands.asScala.toSeq, + queryingParty = alice, + userPackagePreference = + Some(Ref.PackageId.assertFromString(v1.upgrade.Upgrading.PACKAGE_ID)), + )(v1.upgrade.Upgrading.COMPANION) + } + } + + "upgrading a disclosed contract" should { + "work" in { implicit env => + val alice = + env.participant1.parties + .enable( + "discloser_upgrade", + synchronizeParticipants = Seq(env.participant2), + ) + val bob = + env.participant2.parties + .enable( + "disclosee_upgrade", + synchronizeParticipants = Seq(env.participant1), + ) + + suppressPackageIdWarning( + testExplicitDisclosureUpDowngrading( + discloser = alice, + disclosee = bob, + sourceTemplate = + new v1.upgrade.Upgrading(alice.toProtoPrimitive, alice.toProtoPrimitive, 0), + sourceTemplateId = v1.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID, + exerciseFetchOnTargetVersion = new v2.upgrade.Upgrading.ContractId(_) + .exerciseUpgrading_Fetch(bob.toProtoPrimitive) + .commands() + .overridePackageId(v2.upgrade.Upgrading.PACKAGE_ID), + ) + ) + } + } + + "downgrading a disclosed contract" should { + "work" in { implicit env => + val alice = + env.participant1.parties + .enable( + "discloser_downgrade", + synchronizeParticipants = Seq(env.participant2), + ) + val bob = + env.participant2.parties + .enable( + "disclosee_downgrade", + synchronizeParticipants = Seq(env.participant1), + ) + + suppressPackageIdWarning( + testExplicitDisclosureUpDowngrading( + discloser = alice, + disclosee = bob, + sourceTemplate = new v2.upgrade.Upgrading( + alice.toProtoPrimitive, + alice.toProtoPrimitive, + 0, + java.util.Optional.empty(), + ), + sourceTemplateId = v2.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID, + exerciseFetchOnTargetVersion = new v1.upgrade.Upgrading.ContractId(_) + .exerciseUpgrading_Fetch(bob.toProtoPrimitive) + .commands(), + ) + ) + } + } + + "upgrading a disclosed contract " should { + + "fail on upgrade verification failure" in { implicit env => + val alice = env.participant1.parties + .enable( + "discloser_upgrade_failure", + synchronizeParticipants = Seq(env.participant2), + ) + val bob = env.participant2.parties + .enable( + "discloser_upgrade_failure", + synchronizeParticipants = Seq(env.participant1), + ) + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + testExplicitDisclosureUpDowngrading( + discloser = alice, + disclosee = bob, + sourceTemplate = + new v1.upgrade.Upgrading(alice.toProtoPrimitive, alice.toProtoPrimitive, 0), + sourceTemplateId = v1.upgrade.Upgrading.TEMPLATE_ID_WITH_PACKAGE_ID, + exerciseFetchOnTargetVersion = new v2.upgrade.Upgrading.ContractId(_) + .exerciseUpgrading_Fetch(bob.toProtoPrimitive) + .commands(), + mutateDisclosedContract = (disclosedContract: DisclosedContract) => + new DisclosedContract( + TransactionCoder + .decodeFatContractInstance(disclosedContract.createdEventBlob) + .valueOrFail("unexpected decode failure") + .setAuthenticationData(Bytes.assertFromString("abcdef")) + .pipe(TransactionCoder.encodeFatContractInstance) + .valueOrFail("encode failed"), + disclosedContract.synchronizerId.get(), + disclosedContract.templateId, + disclosedContract.contractId, + ), + ), + _.warningMessage should include regex "Received an identifier with package ID .*, but expected a package name.", + _.commandFailureMessage should + (include(s"Request failed for participant2") and + include("FAILED_PRECONDITION/INTERPRETATION_DEV_ERROR") and + include("failed to authenticate contract")), + ) + } + } + } + + private def testExplicitDisclosureUpDowngrading( + discloser: PartyId, + disclosee: PartyId, + sourceTemplate: Template, + sourceTemplateId: data.Identifier, + exerciseFetchOnTargetVersion: String => util.List[data.Command], + mutateDisclosedContract: DisclosedContract => DisclosedContract = identity, + )(implicit env: FixtureParam): Unit = { + import env.* + + participant1.ledger_api.javaapi.commands.submit( + Seq(discloser), + sourceTemplate + .create() + .commands() + .overridePackageId(sourceTemplateId.getPackageId) + .asScala + .toSeq, + ) + + val txs = participant1.ledger_api.javaapi.updates.transactions_with_tx_format( + new TransactionFormat( + new EventFormat( + Collections.singletonMap( + discloser.toProtoPrimitive, + new CumulativeFilter( + Collections.emptyMap[data.Identifier, Filter.Interface](), + Collections.singletonMap[data.Identifier, Filter.Template]( + sourceTemplateId, + Filter.Template.INCLUDE_CREATED_EVENT_BLOB, + ), + None.toJava, + ), + ), + None.toJava, + false, + ), + TransactionShape.ACS_DELTA, + ), + 1, + ) + + val disclosedContract = + JavaDecodeUtil.decodeDisclosedContracts(txs.headOption.value.getTransaction.get).loneElement + + participant2.ledger_api.javaapi.commands + .submit( + Seq(disclosee), + exerciseFetchOnTargetVersion(disclosedContract.contractId.toScala.value).asScala.toSeq, + disclosedContracts = Seq(mutateDisclosedContract(disclosedContract)), + ) + .discard + } + + private def checkAllCommandTypes[I <: Template, TCOut <: Contract[?, ?]]( + templateCon: I, + exercise: TCOut => Seq[javaapi.data.Command], + createAndExercise: Seq[javaapi.data.Command], + queryingParty: PartyId, + userPackagePreference: Option[Ref.PackageId] = None, + participantOverride: Option[LocalParticipantReference] = None, + )(tc: ContractCompanion[TCOut, ?, ?])(implicit env: FixtureParam): Assertion = { + val participant = participantOverride.getOrElse(env.participant1) + + // Create by specifying the package name + val createUpgrading_byPackageName = + templateCon.create.commands.asScala.toSeq + .map(_.withPackageName) + .pipe( + participant.ledger_api.javaapi.commands.submit( + Seq(queryingParty), + _, + userPackageSelectionPreference = userPackagePreference.toList, + ) + ) + .pipe(JavaDecodeUtil.decodeAllCreated(tc)) + .pipe(inside(_) { case Seq(contract) => contract }) + + // Exercise command on the previously created contract + exercise(createUpgrading_byPackageName) + .map(_.withPackageName) + .pipe( + participant.ledger_api.javaapi.commands.submit( + Seq(queryingParty), + _, + userPackageSelectionPreference = userPackagePreference.toList, + ) + ) + .pipe(JavaDecodeUtil.decodeAllArchived(tc)) + .pipe(inside(_) { case Seq(cId) => cId shouldBe createUpgrading_byPackageName.id }) + + // TODO(#15114): Test ExerciseByKey + // CreateAndExercise command + createAndExercise + .map(_.withPackageName) + .pipe( + participant.ledger_api.javaapi.commands.submit( + Seq(queryingParty), + _, + userPackageSelectionPreference = userPackagePreference.toList, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + ) + .pipe { tx => + inside(JavaDecodeUtil.decodeAllCreated(tc)(tx)) { contracts => + val cid = JavaDecodeUtil.decodeAllArchivedLedgerEffectsEvents(tc)(tx).loneElement + + contracts.map(_.id) should contain(cid) + } + } + } + + private implicit class CommandWithoutPackageId(commandJava: javaapi.data.Command) { + def withPackageName: javaapi.data.Command = { + val command = Command.fromJavaProto(commandJava.toProtoCommand) + val res = command.command match { + case Command.Command.Empty => command + case c: Command.Command.Create => + command.copy(command = + c.focus(_.value.templateId) + .modify(_.map(_ => byPackageNameIdentifier)) + ) + case c: Command.Command.Exercise => + command.copy(command = + c.focus(_.value).modify(_.copy(templateId = Some(byPackageNameIdentifier))) + ) + case c: Command.Command.ExerciseByKey => + command.copy(command = + c.focus(_.value).modify(_.copy(templateId = Some(byPackageNameIdentifier))) + ) + case c: Command.Command.CreateAndExercise => + command.copy(command = + c.focus(_.value.templateId) + .modify(_.map(_ => byPackageNameIdentifier)) + ) + } + javaapi.data.Command.fromProtoCommand(toJavaProto(res)) + } + } +} + +final class ReferenceLedgerApiCommandUpgradingIntegrationTestPostgres + extends LedgerApiCommandUpgradingIntegrationTest { + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiDynamicTemplateFilterIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiDynamicTemplateFilterIntegrationTest.scala new file mode 100644 index 0000000000..6f2fd6642e --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/LedgerApiDynamicTemplateFilterIntegrationTest.scala @@ -0,0 +1,238 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.ValueOuterClass +import com.daml.ledger.api.v2.event.CreatedEvent +import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_ACS_DELTA +import com.daml.ledger.api.v2.transaction_filter.{ + CumulativeFilter, + EventFormat, + Filters, + TemplateFilter, + TransactionFormat, +} +import com.daml.ledger.api.v2.value.Identifier as ScalaPbIdentifier +import com.daml.ledger.api.v2.value.Identifier.toJavaProto +import com.daml.ledger.javaapi.data.CreatedEvent as JavaCreatedEvent +import com.daml.ledger.javaapi.data.codegen.{Contract, ContractCompanion} +import com.digitalasset.canton.admin.api.client.data.TemplateId +import com.digitalasset.canton.config.DbConfig.Postgres +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.Upgrading as UpgradingV1 +import com.digitalasset.canton.damltests.upgrade.v2.java.upgrade.Upgrading as UpgradingV2 +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.participant.config.ParticipantNodeConfig +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.platform.apiserver.SeedService +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.daml.lf.transaction.TransactionCoder +import monocle.Monocle.toAppliedFocusOps +import org.scalatest.Assertion + +import scala.concurrent.Future +import scala.jdk.CollectionConverters.{CollectionHasAsScala, SeqHasAsJava} +import scala.jdk.OptionConverters.RichOption + +//TODO(#16651): Consider extending the conformance test suite with package-name scoping tests for stream subscriptions +abstract class LedgerApiDynamicTemplateFilterIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + private val Upgrading_Identifier = ScalaPbIdentifier + .fromJavaProto(UpgradingV1.TEMPLATE_ID.toProto) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1.withSetup(setup) + + protected def setup(env: TestConsoleEnvironment): Unit = { + import env.* + participant1.synchronizers.connect_local(sequencer1, alias = daName) + + // Upload Upgrade V1 + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + } + + "The Ledger API" when { + "a template is upgraded and a new contract with it is created during an ongoing subscription" should { + "retrieve the contracts from the upgraded template as well" in { implicit env => + import env.* + + val alice = + participant1.parties.enable("alice") + val aliceP = alice.toProtoPrimitive + + // Start ongoing subscriptions for Upgrade package name + val upgradingSubscriptions_blob = new Subscriptions( + participant = participant1, + party = alice, + filterIdentifier = Upgrading_Identifier, + includeCreatedEventBlob = true, + expectedCreatesSize = 2, + ) + + // Create an Upgrading V1 contract + val payload_UpgradingV1 = new UpgradingV1(aliceP, aliceP, 1L) + participant1.ledger_api.javaapi.commands + .submit(Seq(alice), payload_UpgradingV1.create().commands().asScala.toSeq) + + // Upload Upgrade V2 + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + // Create an Upgrading V2 contract + val payload_UpgradingV2 = + new UpgradingV2(aliceP, aliceP, 2L, Some(Seq("more").asJava).toJava) + participant1.ledger_api.javaapi.commands + .submit(Seq(alice), payload_UpgradingV2.create().commands().asScala.toSeq) + + upgradingSubscriptions_blob.creates match { + case Seq(c1, c2) => + assertCreate[UpgradingV1.Contract, UpgradingV1.ContractId, UpgradingV1]( + create = c1, + companion = UpgradingV1.COMPANION, + expectedPayload = payload_UpgradingV1, + expectedIdentifier = UpgradingV1.TEMPLATE_ID_WITH_PACKAGE_ID.toProto, + ) + assertCreate[UpgradingV2.Contract, UpgradingV2.ContractId, UpgradingV2]( + create = c2, + companion = UpgradingV2.COMPANION, + expectedPayload = payload_UpgradingV2, + expectedIdentifier = UpgradingV2.TEMPLATE_ID_WITH_PACKAGE_ID.toProto, + ) + case other => fail(s"Expected two create events, got ${other.size}") + } + } + } + } + + private def assertCreate[Ct <: Contract[Id, Data], Id, Data]( + create: CreatedEvent, + companion: ContractCompanion.WithoutKey[Ct, Id, Data], + expectedPayload: Data, + expectedIdentifier: ValueOuterClass.Identifier, + ): Assertion = { + toJavaProto(create.templateId.value) shouldBe expectedIdentifier + create.createdEventBlob should not be empty + TransactionCoder + .decodeFatContractInstance(create.createdEventBlob) + .value + .packageName shouldBe UpgradingV1.PACKAGE_NAME + create.packageName shouldBe UpgradingV1.PACKAGE_NAME + + JavaDecodeUtil + .decodeCreated(companion)(JavaCreatedEvent.fromProto(CreatedEvent.toJavaProto(create))) + .value + .data shouldBe expectedPayload + } + + private class Subscriptions( + participant: LocalParticipantReference, + party: PartyId, + filterIdentifier: ScalaPbIdentifier, + includeCreatedEventBlob: Boolean, + expectedCreatesSize: Int, + )(implicit env: FixtureParam) { + import env.* + + private val flatTxsF = Future { + participant.ledger_api.updates.transactions_with_tx_format( + transactionFormat = transactionFormat( + filterIdentifier, + party.toProtoPrimitive, + includeCreatedEventBlob = includeCreatedEventBlob, + ), + completeAfter = expectedCreatesSize, + ) + } + + def creates: Seq[CreatedEvent] = { + val updateCreates = flatTxsF.map(_.flatMap(_.createEvents)).futureValue + + val acsCreates = participant.ledger_api.state.acs + .of_party( + party, + filterTemplates = Seq(TemplateId.fromIdentifier(Upgrading_Identifier)), + includeCreatedEventBlob = includeCreatedEventBlob, + ) + .map(_.event) + + acsCreates should have size expectedCreatesSize.toLong + updateCreates should have size expectedCreatesSize.toLong + + acsCreates zip updateCreates foreach { case (acsCreate, updateCreate) => + updateCreate shouldBe acsCreate + } + + acsCreates + } + } + + private def transactionFormat( + identifier: ScalaPbIdentifier, + party: String, + includeCreatedEventBlob: Boolean, + ) = + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map( + party -> Filters( + Seq( + CumulativeFilter( + IdentifierFilter.TemplateFilter( + TemplateFilter( + templateId = Some(identifier), + includeCreatedEventBlob = includeCreatedEventBlob, + ) + ) + ) + ) + ) + ), + filtersForAnyParty = None, + verbose = true, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) +} + +final class ReferenceLedgerApiDynamicTemplateFilterIntegrationTestPostgres + extends LedgerApiDynamicTemplateFilterIntegrationTest { + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) +} + +//TODO(#16651): Remove this test once package-name test coverage is ensured by conformance tests +final class ReferenceLedgerApiDynamicTemplateFilterIntegrationTestNoCaches + extends LedgerApiDynamicTemplateFilterIntegrationTest { + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .addConfigTransform(ConfigTransforms.updateParticipantConfig("participant1") { + (c: ParticipantNodeConfig) => + c.focus(_.ledgerApi.userManagementService.enabled) + .replace(true) + .focus(_.ledgerApi.userManagementService.maxCacheSize) + .replace(0) + .focus(_.ledgerApi.userManagementService.maxRightsPerUser) + .replace(100) + .focus(_.parameters.ledgerApiServer.contractIdSeeding) + .replace(SeedService.Seeding.Weak) + .focus(_.ledgerApi.indexService.maxContractKeyStateCacheSize) + .replace(0) + .focus(_.ledgerApi.indexService.maxContractStateCacheSize) + .replace(0) + .focus(_.ledgerApi.indexService.maxTransactionsInMemoryFanOutBufferSize) + .replace(0) + }) + .withSetup(setup) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/NonConformingUpgradeIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/NonConformingUpgradeIntegrationTest.scala new file mode 100644 index 0000000000..1251eaa21b --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/NonConformingUpgradeIntegrationTest.scala @@ -0,0 +1,365 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.commands.{Command, ExerciseCommand} +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.TRANSACTION_SHAPE_LEDGER_EFFECTS +import com.daml.ledger.javaapi +import com.daml.ledger.javaapi.data.codegen.{Exercised, Update} +import com.digitalasset.canton.config.DbConfig.Postgres +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.nonconforming.v1.java as v1 +import com.digitalasset.canton.damltests.nonconforming.v1.java.nonconforming.BankTransfer +import com.digitalasset.canton.damltests.nonconforming.v2.java as v2 +import com.digitalasset.canton.integration.ConfigTransforms.disableUpgradeValidation +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.topology.PartyId + +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +/** Primary concern is testing run-time model conformance failures + */ +class NonConformingUpgradeIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { + + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) + + private var bank: PartyId = _ + private var alice: PartyId = _ + private var bob: PartyId = _ + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P1_S1M1 + .addConfigTransform(disableUpgradeValidation) + .withSetup { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + initializedSynchronizers(daName) + + participant1.dars.upload(UpgradingBaseTest.NonConformingV1) + participant1.dars.upload(UpgradingBaseTest.NonConformingV2) + + bank = participant1.parties.enable("bank") + alice = participant1.parties.enable("alice") + bob = participant1.parties.enable("bob") + } + + /** Upgrading depends on ExerciseActionDescription containing the templateId that can be different + * from the contract templateId. This change was only done as part of ProtocolVersion.V5 * + */ + + private def getVersion( + participant: LocalParticipantReference, + issuer: PartyId, + exercise: javaapi.data.ExerciseCommand, + expectException: Boolean = false, + ): Option[String] = + if (!expectException) { + val version = callGetVersion(participant, issuer, exercise) + Some(version) + } else { + intercept[Throwable] { + callGetVersion(participant, issuer, exercise) + } + None + } + + private def callGetVersion( + participant: LocalParticipantReference, + issuer: PartyId, + exerciseJava: javaapi.data.ExerciseCommand, + ): String = { + val exercise = ExerciseCommand.fromJavaProto(exerciseJava.toProto) + participant.ledger_api.commands + .submit( + actAs = Seq(issuer), + commands = Seq(Command.defaultInstance.withExercise(exercise)), + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + .events + .loneElement + .event + .exercised + .value + .exerciseResult + .value + .sum + .text + .value + } + + private def checkVersionV1( + cidV1: v1.nonconforming.NonConforming.ContractId, + participant: LocalParticipantReference, + issuer: PartyId, + ): Unit = + clue(s"checkVersionV1") { + getVersion( + participant, + issuer, + cidV1 + .exerciseGetVersion() + .commands + .overridePackageId(v1.nonconforming.NonConforming.PACKAGE_ID) + .loneElement + .asExerciseCommand + .toScala + .value, + ) shouldBe Some("V1") + } + + private def checkVersionV2( + cidV2: v2.nonconforming.NonConforming.ContractId, + participant: LocalParticipantReference, + issuer: PartyId, + expectException: Boolean, + ): Unit = { + val expected = if (expectException) None else Some("V2") + clue(s"checkVersionV2") { + getVersion( + participant, + issuer, + cidV2 + .exerciseGetVersion() + .commands + .overridePackageId(v2.nonconforming.NonConforming.PACKAGE_ID) + .loneElement + .asExerciseCommand + .toScala + .value, + expectException, + ) shouldBe expected + } + } + + private def checkVersionCkV1( + cidV1: v1.nonconforming.NonConformingCK.ContractId, + participant: LocalParticipantReference, + issuer: PartyId, + ): Unit = + clue(s"checkCkVersionV1") { + getVersion( + participant, + issuer, + cidV1 + .exerciseGetVersionCK() + .commands + .overridePackageId(v1.nonconforming.NonConforming.PACKAGE_ID) + .loneElement + .asExerciseCommand + .toScala + .value, + ) shouldBe Some("V1") + } + + private def checkVersionCkV2( + cidV2: v2.nonconforming.NonConformingCK.ContractId, + participant: LocalParticipantReference, + issuer: PartyId, + expectException: Boolean, + ): Unit = + clue(s"checkCkVersionV2") { + val expected = if (expectException) None else Some("V2") + getVersion( + participant, + issuer, + cidV2 + .exerciseGetVersionCK() + .commands + .overridePackageId(v2.nonconforming.NonConforming.PACKAGE_ID) + .loneElement + .asExerciseCommand + .toScala + .value, + expectException, + ) shouldBe expected + } + + private def testNonConformingInMode(mode: Long)(implicit + env: TestConsoleEnvironment + ): Unit = { + import env.* + + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new v1.nonconforming.NonConforming( + alice.toProtoPrimitive, + bob.toProtoPrimitive, + mode, + ).create.commands.overridePackageId(v1.nonconforming.NonConforming.PACKAGE_ID).asScala.toSeq, + ) + + val cidV1: v1.nonconforming.NonConforming.ContractId = + participant1.ledger_api.javaapi.state.acs + .await(v1.nonconforming.NonConforming.COMPANION)( + alice, + (c: v1.nonconforming.NonConforming.Contract) => + c.data.mode == mode, // Ensure we get the correct creation + ) + .id + val cidV2: v2.nonconforming.NonConforming.ContractId = + new v2.nonconforming.NonConforming.ContractId(cidV1.contractId) + + checkVersionV1(cidV1, participant1, alice) + checkVersionV2(cidV2, participant1, alice, mode != 0) + } + + private def testNonConformingCKInMode(mode: Long)(implicit + env: TestConsoleEnvironment + ): Unit = { + import env.* + + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new v1.nonconforming.NonConformingCK( + alice.toProtoPrimitive, + bob.toProtoPrimitive, + mode, + ).create.commands.overridePackageId(v1.nonconforming.NonConforming.PACKAGE_ID).asScala.toSeq, + ) + + val cidV1: v1.nonconforming.NonConformingCK.ContractId = + participant1.ledger_api.javaapi.state.acs + .await(v1.nonconforming.NonConformingCK.COMPANION)( + alice, + (c: v1.nonconforming.NonConformingCK.Contract) => + c.data.mode == mode, // Ensure correct contract is selected + ) + .id + val cidV2: v2.nonconforming.NonConformingCK.ContractId = + new v2.nonconforming.NonConformingCK.ContractId(cidV1.contractId) + + checkVersionCkV1(cidV1, participant1, alice) + checkVersionCkV2(cidV2, participant1, alice, expectException = mode != 0) + } + + "Non Conformance" when { + + "In no-change mode (0) upgrade is allowed" in { implicit env => + testNonConformingInMode(0) + } + + "In signatory change mode (1) upgrade is forbidden" in { implicit env => + loggerFactory.assertLoggedWarningsAndErrorsSeq( + testNonConformingInMode(1), + LogEntry.assertLogSeq( + mustContainWithClue = Seq( + ( + _.errorMessage should include regex raw"(?s)FAILED_PRECONDITION/INTERPRETATION_UPGRADE_ERROR_VALIDATION_FAILED.*Interpretation error: Error: Validation fails when trying to upgrade the contract.*from NonConforming:NonConforming.*Verify that neither the signatories, nor the observers, nor the contract key, nor the key's maintainers have changed", + "mode 1 failure", + ) + ) + ), + ) + } + + "In observer change mode (2) upgrade is forbidden" in { implicit env => + loggerFactory.assertLoggedWarningsAndErrorsSeq( + testNonConformingInMode(2), + LogEntry.assertLogSeq( + mustContainWithClue = Seq( + ( + _.errorMessage should include regex raw"(?s)FAILED_PRECONDITION/INTERPRETATION_UPGRADE_ERROR_VALIDATION_FAILED.*Interpretation error: Error: Validation fails when trying to upgrade the contract.*from NonConforming:NonConforming.*Verify that neither the signatories, nor the observers, nor the contract key, nor the key's maintainers have changed", + "mode 2 failure", + ) + ) + ), + ) + } + } + + // Ignore contract key based tests + if (testedProtocolVersion.isDev) { + "Non CK Conformance" when { + + "In no-change mode (0) upgrade is allowed" ignore { implicit env => + testNonConformingCKInMode(0) + } + + "In key change mode (1) upgrade is forbidden" ignore { implicit env => + loggerFactory.suppressErrors( + testNonConformingCKInMode(1) + ) + } + + "In maintainer change mode (2) upgrade is forbidden" ignore { implicit env => + loggerFactory.suppressErrors( + testNonConformingCKInMode(2) + ) + } + + } + } + + "Change of template party order" when { + + "party order is switched" in { implicit env => + import env.* + + participant1.ledger_api.javaapi.commands.submit( + Seq(bank), + new BankTransfer( + bank.toProtoPrimitive, + alice.toProtoPrimitive, + bob.toProtoPrimitive, + 0, + ).create.commands.overridePackageId(v1.nonconforming.BankTransfer.PACKAGE_ID).asScala.toSeq, + ) + + val cidV1: v1.nonconforming.BankTransfer.ContractId = + participant1.ledger_api.javaapi.state.acs + .await(v1.nonconforming.BankTransfer.COMPANION)(bank) + .id + + def getPayee(update: Update[Exercised[String]], packageIdOverride: String): String = + participant1.ledger_api.javaapi.commands + .submit( + actAs = Seq(bank), + commands = update + .commands() + .overridePackageId(packageIdOverride) + .asScala + .toSeq, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + ) + .getEvents + .asScala + .collect { case e: com.daml.ledger.javaapi.data.ExercisedEvent => e } + .loneElement + .getExerciseResult + .asParty() + .get() + .getValue + + // V1 works + getPayee( + cidV1.exerciseBankTransfer_GetPayee(), + v1.nonconforming.BankTransfer.PACKAGE_ID, + ) + + val cidV2: v2.nonconforming.BankTransfer.ContractId = + new v2.nonconforming.BankTransfer.ContractId(cidV1.contractId) + + // V2 fails + assertThrowsAndLogsCommandFailures( + getPayee( + cidV2.exerciseBankTransfer_GetPayee(), + v2.nonconforming.BankTransfer.PACKAGE_ID, + ), + e => + e.errorMessage should include regex raw"(?s)FAILED_PRECONDITION/INTERPRETATION_DEV_ERROR.*Authentication error for contract.*${cidV2.contractId}", + ) + + } + } + +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/PackagePreferenceQueryIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/PackagePreferenceQueryIntegrationTest.scala new file mode 100644 index 0000000000..606baadbe0 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/PackagePreferenceQueryIntegrationTest.scala @@ -0,0 +1,224 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.interactive.interactive_submission_service.GetPreferredPackagesResponse +import com.daml.ledger.api.v2.package_reference.PackageReference +import com.digitalasset.canton.LfPackageName +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.appinstall.v1.java.appinstall.AppInstall as AppInstallV1 +import com.digitalasset.canton.damltests.appinstall.v2.java.appinstall.AppInstall as AppInstallV2 +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.integration.* +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.ledger.error.LedgerApiErrors.NoPreferredPackagesFound +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.util.SetupPackageVetting +import org.scalatest.Assertion + +import java.time.Duration + +import UpgradingBaseTest.Syntax.* + +class PackagePreferenceQueryIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with UpgradingBaseTest.WhenPV { + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + sequencerGroups = MultiSynchronizer( + Seq( + Set(InstanceName.tryCreate("sequencer1")), + Set(InstanceName.tryCreate("sequencer2")), + ) + ), + ) + ) + + private val AppInstallPackageName = LfPackageName.assertFromString(AppInstallV2.PACKAGE_NAME) + private val PackageReferences = Map( + AppInstallV1.PACKAGE_ID -> PackageReference( + packageId = AppInstallV1.PACKAGE_ID, + packageName = AppInstallV1.PACKAGE_NAME, + packageVersion = AppInstallV1.PACKAGE_VERSION.toString, + ), + AppInstallV2.PACKAGE_ID -> PackageReference( + packageId = AppInstallV2.PACKAGE_ID, + packageName = AppInstallV2.PACKAGE_NAME, + packageVersion = AppInstallV2.PACKAGE_VERSION.toString, + ), + ) + private val vettingEndsForV1At = CantonTimestamp.now().add(Duration.ofDays(1L)) + + @volatile var party1, party2: PartyId = _ + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1_S1M1 + .withSetup { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + participant1.synchronizers.connect_local(sequencer2, alias = acmeName) + + participant2.synchronizers.connect_local(sequencer1, alias = daName) + participant2.synchronizers.connect_local(sequencer2, alias = acmeName) + + party1 = participant1.parties.enable("party1", synchronizer = daName) + participant1.parties.enable("party1", synchronizer = acmeName) + party2 = participant2.parties.enable("party2", synchronizer = daName) + participant2.parties.enable("party2", synchronizer = acmeName) + + SetupPackageVetting( + darPaths = Set(UpgradingBaseTest.AppInstallV1, UpgradingBaseTest.AppInstallV2), + targetTopology = Map( + // V1 on synchronizer 1 + daId -> Map( + participant1 -> Set( + AppInstallV1.PACKAGE_ID.toPackageId.withVettingEndsAt(vettingEndsForV1At), + AppInstallV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + participant2 -> Set(AppInstallV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ), + // Disjoint on synchronizer 2 - no valid preference + acmeId -> Map( + participant1 -> Set(AppInstallV2.PACKAGE_ID.toPackageId.withNoVettingBounds), + participant2 -> Set(AppInstallV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ), + ), + ) + + // TODO(#25385): Remove this upload if participant2 can compute the preference of a counterparty without having + // the preferred package stored locally + // + // Upload V2 on participant 2 as well, + // otherwise it can't correctly output the preference of party1 if its highest vetted package is lower + // than the highest vetted package on participant 1 + participant2.dars.upload( + UpgradingBaseTest.AppInstallV2, + vetAllPackages = false, + synchronizeVetting = false, + ) + } + + "get_preferred_package_version" should { + "return the correct response for a package-name, set of parties and the given topology state without synchronizer-id restriction" in { + implicit env => + import env.* + + // Assert both parties preferences + onAllParticipants { participantRef => + val expectedPreference = GetPreferredPackagesResponse( + packageReferences = Seq(PackageReferences(AppInstallV1.PACKAGE_ID)), + synchronizerId = daId.logical.toProtoPrimitive, + ) + + val parties = Set(party1, party2) + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> parties) + ) shouldBe expectedPreference withClue s"Preferred package version should be V1 on ${daId.logical.toProtoPrimitive} as seen by ${participantRef.id.toProtoPrimitive} for parties $parties" + } + + // Assert preference of party from participant 1 + onAllParticipants { participantRef => + val expectedPreference = GetPreferredPackagesResponse( + packageReferences = Seq(PackageReferences(AppInstallV2.PACKAGE_ID)), + synchronizerId = daId.logical.toProtoPrimitive, + ) + + val parties = Set(party1) + val response = participantRef.ledger_api.interactive_submission + .preferred_packages(Map(AppInstallPackageName -> parties)) + response shouldBe expectedPreference withClue s"Preferred package version should be V2 on ${daId.logical.toProtoPrimitive} as seen by ${participantRef.id.toProtoPrimitive} for parties $parties" + } + + // Assert preference of party from participant 2 + onAllParticipants { participantRef => + val expectedPreference = GetPreferredPackagesResponse( + packageReferences = Seq(PackageReferences(AppInstallV1.PACKAGE_ID)), + synchronizerId = daId.logical.toProtoPrimitive, + ) + + val parties = Set(party2) + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> parties) + ) shouldBe expectedPreference withClue s"Preferred package version should be V1 on ${daId.logical.toProtoPrimitive} as seen by ${participantRef.id.toProtoPrimitive} $parties" + } + } + + "using synchronizer-id restriction yields the correct result" in { implicit env => + import env.* + + // Restriction by the synchronizer-id of the preference yields the correct result + onAllParticipants { participantRef => + val expectedPreference = GetPreferredPackagesResponse( + packageReferences = Seq(PackageReferences(AppInstallV1.PACKAGE_ID)), + synchronizerId = daId.logical.toProtoPrimitive, + ) + + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> Set(party1, party2)), + synchronizerId = Some(daId), + ) shouldBe expectedPreference withClue s"Preferred package version should be V1 on ${daId.logical.toProtoPrimitive} as seen by ${participantRef.id.toProtoPrimitive}" + } + + // Restriction by a synchronizer-id for which no preference exists yields no preference + onAllParticipants { participantRef => + assertThrowsAndLogsCommandFailures( + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> Set(party1, party2)), + synchronizerId = Some(acmeId), + ), + _.shouldBeCantonErrorCode(NoPreferredPackagesFound), + ) + } + } + + "using the validity period restriction yields the correct result" in { implicit env => + import env.* + + // Restriction by a validity timestamp within the bounds for V1 yields a valid preference + onAllParticipants { participantRef => + val expectedPreference = GetPreferredPackagesResponse( + packageReferences = Seq(PackageReferences(AppInstallV1.PACKAGE_ID)), + synchronizerId = daId.logical.toProtoPrimitive, + ) + + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> Set(party1, party2)), + vettingValidAt = Some(vettingEndsForV1At.minus(Duration.ofSeconds(1L))), + synchronizerId = Some(daId), + ) shouldBe expectedPreference withClue s"Preferred package version should be V1 on ${daId.logical.toProtoPrimitive} as seen by ${participantRef.id}" + } + + // Restriction by a validity timestamp outside the bounds for V1 yields no preference + onAllParticipants { participantRef => + val vettingValidAt = vettingEndsForV1At.plus(Duration.ofSeconds(1L)) + assertThrowsAndLogsCommandFailures( + participantRef.ledger_api.interactive_submission + .preferred_packages( + Map(AppInstallPackageName -> Set(party1, party2)), + vettingValidAt = Some(vettingValidAt), + synchronizerId = Some(daId), + ), + _.shouldBeCantonErrorCode(NoPreferredPackagesFound), + ) + } + } + } + + private def onAllParticipants(assertion: LocalParticipantReference => Assertion)(implicit + env: TestConsoleEnvironment + ): Unit = + env.participants.local.foreach(assertion) + +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SimpleTopologyAwarePackageSelectionIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SimpleTopologyAwarePackageSelectionIntegrationTest.scala new file mode 100644 index 0000000000..c67bd19241 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SimpleTopologyAwarePackageSelectionIntegrationTest.scala @@ -0,0 +1,152 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.appupgrade.v1.java.appupgrade.{ + AppInstall as AppInstall_V1, + AppInstallRequest, + AppInstallRequest as AppInstallRequest_V1, +} +import com.digitalasset.canton.damltests.token +import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.InvalidPrescribedSynchronizerId +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.PartyId +import monocle.macros.syntax.lens.* + +import scala.jdk.CollectionConverters.CollectionHasAsScala +import scala.util.chaining.scalaUtilChainingOps + +abstract class SimpleTopologyAwarePackageSelectionIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + @volatile var v2Hash: String = _ + @volatile var appInstallRequest: AppInstallRequest.Contract = _ + @volatile var provider, user: PartyId = _ + @volatile var providerParticipant, userParticipant: LocalParticipantReference = _ + + def featureEnabled: Boolean + def expectedResult(cmd: => Unit): Unit + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.ledgerApi.topologyAwarePackageSelection.enabled).replace(featureEnabled) + ) + ) + .withSetup { implicit env => + import env.* + + providerParticipant = participant1 + userParticipant = participant2 + + providerParticipant.synchronizers.connect_local(sequencer1, alias = daName) + userParticipant.synchronizers.connect_local(sequencer1, alias = daName) + + providerParticipant.dars.upload(UpgradingBaseTest.AppUpgradeV1) + userParticipant.dars.upload(UpgradingBaseTest.AppUpgradeV1) + + v2Hash = providerParticipant.dars.upload( + UpgradingBaseTest.AppUpgradeV2, + vetAllPackages = false, + synchronizeVetting = false, + ) + provider = providerParticipant.parties.enable( + "provider", + synchronizeParticipants = Seq(userParticipant), + ) + user = userParticipant.parties.enable( + "user", + synchronizeParticipants = Seq(providerParticipant), + ) + } + + "Command submission" when { + "the submitter didn't vet a package that it has locally in its store (V2)" should { + "does not select it for the package preference" in { _ => + appInstallRequest = userParticipant.ledger_api.javaapi.commands + .submit( + Seq(user), + new AppInstallRequest_V1( + new AppInstall_V1( + provider.toProtoPrimitive, + user.toProtoPrimitive, + BigDecimal(1337L).bigDecimal, + ) + ).create().commands().asScala.toList, + ) + .pipe(JavaDecodeUtil.decodeAllCreated(AppInstallRequest_V1.COMPANION)) + .pipe(inside(_) { case Seq(created) => created }) + + expectedResult { + providerParticipant.ledger_api.javaapi.commands.submit( + Seq(provider), + appInstallRequest.id.exerciseAppInstall_Accept().commands().asScala.toSeq, + ) + } + } + } + + "the counterparty did not vet V2 package" should { + "does not select in for the package preference" in { _ => + providerParticipant.dars.vetting.enable(v2Hash) + + expectedResult { + providerParticipant.ledger_api.javaapi.commands.submit( + Seq(provider), + appInstallRequest.id.exerciseAppInstall_Accept().commands().asScala.toSeq, + ) + } + } + } + + "two parties have disjoint vettings for a package-name not involved in the submission" should { + "succeed" in { _ => + // Upload Token V1 and V2 to both participants + providerParticipant.dars.upload(UpgradingBaseTest.TokenV1, vetAllPackages = false) + userParticipant.dars.upload(UpgradingBaseTest.TokenV1, vetAllPackages = false) + providerParticipant.dars.upload(UpgradingBaseTest.TokenV2, vetAllPackages = false) + userParticipant.dars.upload(UpgradingBaseTest.TokenV2, vetAllPackages = false) + + // Vet the token variants disjointly on both participants + providerParticipant.dars.vetting.enable(token.v1.java.token.Token.PACKAGE_ID) + userParticipant.dars.vetting.enable(token.v2.java.token.Token.PACKAGE_ID) + + expectedResult { + providerParticipant.ledger_api.javaapi.commands.submit( + Seq(provider), + appInstallRequest.id.exerciseAppInstall_Accept().commands().asScala.toSeq, + ) + } + } + } + } +} + +class EnabledSimpleTopologyAwarePackageSelectionIntegrationTest + extends SimpleTopologyAwarePackageSelectionIntegrationTest { + override def featureEnabled: Boolean = true + override def expectedResult(cmd: => Unit): Unit = cmd +} + +class DisabledSimpleTopologyAwarePackageSelectionIntegrationTest + extends SimpleTopologyAwarePackageSelectionIntegrationTest { + override def featureEnabled: Boolean = false + override def expectedResult(cmd: => Unit): Unit = + assertThrowsAndLogsCommandFailures( + cmd, + _.shouldBeCantonErrorCode(InvalidPrescribedSynchronizerId), + ) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SystematicTopologyAwareUpgradingIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SystematicTopologyAwareUpgradingIntegrationTest.scala new file mode 100644 index 0000000000..f94f174a4a --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/SystematicTopologyAwareUpgradingIntegrationTest.scala @@ -0,0 +1,696 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.transaction_filter.TransactionShape.{ + TRANSACTION_SHAPE_ACS_DELTA, + TRANSACTION_SHAPE_LEDGER_EFFECTS, +} +import com.daml.ledger.api.v2.transaction_filter.{ + EventFormat, + Filters, + TransactionFormat, + UpdateFormat, +} +import com.daml.ledger.javaapi.data.Transaction +import com.digitalasset.canton.LfPackageName +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{LocalParticipantReference, ParticipantReference} +import com.digitalasset.canton.damltests.bar.v1.java.bar.Bar as BarV1 +import com.digitalasset.canton.damltests.bar.v2.java.bar.Bar as BarV2 +import com.digitalasset.canton.damltests.baz.v1.java.baz.Baz as BazV1 +import com.digitalasset.canton.damltests.baz.v2.java.baz.Baz as BazV2 +import com.digitalasset.canton.damltests.foo.v1.java.foo.Foo +import com.digitalasset.canton.damltests.foo.v1.java.foo.Foo as FooV1 +import com.digitalasset.canton.damltests.foo.v2.java.foo.Foo as FooV2 +import com.digitalasset.canton.damltests.foo.v3.java.foo.Foo as FooV3 +import com.digitalasset.canton.damltests.foo.v4.java.foo.Foo as FooV4 +import com.digitalasset.canton.damltests.ibaz.v1.java.ibaz.IBaz +import com.digitalasset.canton.error.TransactionRoutingError.ConfigurationErrors.InvalidPrescribedSynchronizerId +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.tests.upgrading.UpgradingBaseTest.Syntax.* +import com.digitalasset.canton.integration.util.PartiesAllocator +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.ledger.error.groups.CommandExecutionErrors.PackageSelectionFailed +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.topology.transaction.ParticipantPermission.Submission +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.util.SetupPackageVetting +import com.digitalasset.canton.util.ShowUtil.* + +import scala.jdk.CollectionConverters.CollectionHasAsScala +import scala.jdk.OptionConverters.RichOption +import scala.util.chaining.scalaUtilChainingOps + +// TODO(#25385): This systematic test suite is a stub. Enrich with the following cases: +// - exercise Foo_Exe receives an interface contract from the outside instead of +// creating the contract locally +// - Add multi-root node tests +// - Add multi-synchronizer tests +// - Use TestSubmissionService to mock the package-map creation and avoid +// spending the time on the required vetting setups +class SystematicTopologyAwareUpgradingIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) + + @volatile private var aliceParticipant, bobParticipant, + charlieParticipant: LocalParticipantReference = _ + @volatile private var alice, bob, charlie: PartyId = _ + private val AllDars = Set( + UpgradingBaseTest.IBaz, + UpgradingBaseTest.IBar, + UpgradingBaseTest.UtilV1, + UpgradingBaseTest.UtilV2, + UpgradingBaseTest.BazV1, + UpgradingBaseTest.BazV2, + UpgradingBaseTest.BarV1, + UpgradingBaseTest.BarV2, + UpgradingBaseTest.FooV1, + UpgradingBaseTest.FooV2, + UpgradingBaseTest.FooV3, + UpgradingBaseTest.FooV4, + ) + + private lazy val AllVettedUpToV3: Map[ParticipantReference, Set[VettedPackage]] = + Map( + aliceParticipant -> Set( + // These packages below are vetted transitively by FooV2 and FooV3. + // Vet them explicitly only when no implicit implementation is vetted + // - IBar.PACKAGE_ID.toPackageId.withNoVettingBounds, + // - IBaz.PACKAGE_ID.toPackageId.withNoVettingBounds, + // - BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + // - BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV3.PACKAGE_ID.toPackageId.withNoVettingBounds, + // Baz and Bar V2 need to be vetted explicitly since they are not a static dependency of any other package + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + bobParticipant -> Set( + // These two are vetted implicitly by Baz and Bar. + // Vet them explicitly only when no implicit implementation is vetted + // - IBar.PACKAGE_ID.toPackageId.withNoVettingBounds, + // - IBaz.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + + private lazy val AllVetted: Map[ParticipantReference, Set[VettedPackage]] = + AllVettedUpToV3 + .updatedWith(aliceParticipant)(_.map(_ + FooV4.PACKAGE_ID.toPackageId.withNoVettingBounds)) + .updated( + charlieParticipant, + Set( + // These two are vetted implicitly by Baz and Bar. + // Vet them explicitly only when no implicit implementation is vetted + // - IBar.PACKAGE_ID.toPackageId.withNoVettingBounds, + // - IBaz.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P4_S1M1 + .withSetup { implicit env => + import env.* + + // Disambiguate participants + aliceParticipant = participant1 + bobParticipant = participant2 + charlieParticipant = participant3 + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + + // Setup the party topology state + inside( + PartiesAllocator(Set(aliceParticipant, bobParticipant, charlieParticipant))( + newParties = Seq( + "alice" -> aliceParticipant, + "bob" -> bobParticipant, + "charlie" -> charlieParticipant, + ), + targetTopology = Map( + "alice" -> Map( + daId -> (PositiveInt.one, Set(aliceParticipant.id -> Submission)) + ), + "bob" -> Map( + daId -> (PositiveInt.one, Set(bobParticipant.id -> Submission)) + ), + "charlie" -> Map( + daId -> (PositiveInt.one, Set(charlieParticipant.id -> Submission)) + ), + ), + ) + ) { case Seq(p_alice, p_bob, p_charlie) => + alice = p_alice + bob = p_bob + charlie = p_charlie + } + } + + // In this test, multiple combinations of vetting setups are evaluated against the Foo_Exe exercise submitted by alice, which + // brings in bob (and in V4 charlie) as informee(s) in a child transaction depending on the vetting state. + // + // The test aims at purely evaluating the effect of different vetting combinations + // on the chosen packages in the transaction resulting from running the Foo_Exe by alice (if successful). + // For this reason, the templates are as simple as possible, with no changing record types. + // For the same simplicity reason, both versions of Bar and Baz use the same template definition. + // + // Below, the transaction tree is succinctly shown, for each version of Foo + // + // Note: + // - IBar_Exe pertains to IBar interface and is implemented by Bar V1 and V2 + // - IBaz_Exe pertains to IBaz interface and is implemented by Baz V1 and V2 + // + // (V1) Foo_Exe (alice) + // | + // + // (V2) Foo_Exe (alice) + // | + // Create Bar (alice) + // | + // Exercise IBar_Exe ~ Bar_Vx (alice, bob) + // + // (V3) Foo_Exe (alice) + // | + // Create Baz_V1 (alice) + // | + // Exercise IBaz_Exe ~ Baz_Vx (alice, bob) + // + // (V4) Foo_Exe (alice) + // | + // Create Baz_V1 (alice) + // | + // Exercise IBaz_Exe ~ Baz_Vx (alice, bob) + // | + // Create Bar_V1 (alice) + // | + // Exercise IBar_Exe ~ Bar_Vx (alice, charlie) + // + // Note: + // - Bar V1,V2 have a static dependency to Util V1 - utility package + // - Baz V1,V2 have a static dependency to Util V2 - non-schema package with serializable types + // - Charlie is introduced only in tests involving Foo V4 to assert multiple versions of a static dependency + // in an exercise + "Systematic topology-aware upgrading test" when { + "All Vetted up to V3" should { + "succeed using Foo V3 - Baz V2" in { implicit env => + SetupPackageVetting(AllDars, Map(env.daId -> AllVettedUpToV3)) + test( + bobSees = Some(BazV2.PACKAGE_ID), + expectedExerciseVersion = FooV3.PACKAGE_ID, + ) + } + } + + "alice only vetted Foo V4, bob vetted Baz V1 and charlie vetted Bar V1" should { + "succeed using Foo V4 - Baz V1 - Bar V1" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> Map( + aliceParticipant -> Set( + FooV4.PACKAGE_ID.toPackageId.withNoVettingBounds, + // Baz and Bar V2 need to be vetted explicitly since they are not a static dependency of any other package + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + bobParticipant -> Set( + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + charlieParticipant -> Set( + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + ) + ), + ) + test( + bobSees = Some(BazV1.PACKAGE_ID), + expectedExerciseVersion = FooV4.PACKAGE_ID, + charlieSees = Some(BarV1.PACKAGE_ID), + ) + } + } + + "bob vetted only Baz V1" should { + "succeed using Foo V3 - Baz V1" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3.updated( + bobParticipant, + Set(BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + ), + ) + test( + bobSees = Some(BazV1.PACKAGE_ID), + expectedExerciseVersion = FooV3.PACKAGE_ID, + ) + } + } + + "bob vets only Bar V1" should { + "succeed using Foo V2 - Bar V1 / new package (Baz) introduced for bob in V3 can be forgotten in pass 2" in { + implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3.updated( + bobParticipant, + Set(BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + ), + ) + test( + bobSees = Some(BarV1.PACKAGE_ID), + expectedExerciseVersion = FooV2.PACKAGE_ID, + ) + } + } + + // Negative test case + "alice did not vet Foo V1 and bob vetted only Bar V2" should { + + /** Limitation: Package selection in pass 2 checks that all dependencies of a package are + * vetted by all parties interested in the dependency's package-name. See + * [[com.digitalasset.canton.platform.PackagePreferenceBackend]] ScalaDoc for more details. + */ + "fail" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3 + .updated( + bobParticipant, + Set(BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + .updated( + aliceParticipant, + Set( + FooV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV3.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + ), + ) + test( + bobSees = None, + assertExerciseResult = exercise => { + assertThrowsAndLogsCommandFailures( + exercise(), + entry => { + entry.shouldBeCantonErrorCode(PackageSelectionFailed) + entry.message should include regex + s"""No synchronizers satisfy the draft transaction topology requirements: Discarded synchronizers: + |.*${env.daId}: Package-name '${FooV1.PACKAGE_NAME}' appearing in a draft transaction root node has been discarded: Packages with dependencies not vetted by all interested parties.*${FooV2.PACKAGE_ID.toPackageId.show} ->.*${BarV1.PACKAGE_ID.toPackageId.show}""".stripMargin + }, + ) + None + }, + // Doesn't matter, we expect an error + expectedExerciseVersion = FooV1.PACKAGE_ID, + ) + } + } + + "bob vets Bar V1 and IBaz (no Baz)" should { + "succeed using Foo V2 - Bar V1 / IBaz dynamic dependency introduced for bob in V3 can be forgotten in pass 2" in { + implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3.updated( + bobParticipant, + Set( + IBaz.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + ), + ) + test( + bobSees = Some(BarV1.PACKAGE_ID), + expectedExerciseVersion = FooV2.PACKAGE_ID, + ) + } + } + + "alice vets FooV1 and FooV3/BazV1 and bob vets only Bar V1" should { + "succeed using Foo V1 - no implication for bob" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> Map( + aliceParticipant -> Set( + FooV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV3.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + bobParticipant -> Set( + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + ) + ), + ) + test( + bobSees = None, + expectedExerciseVersion = FooV1.PACKAGE_ID, + ) + } + } + + "alice vets Foo V1 and Foo V2/Bar V1 and bob vets only Baz V1" should { + "succeed using Foo V1 - no implication for bob" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> Map( + aliceParticipant -> Set( + FooV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + FooV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + bobParticipant -> Set( + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + ) + ), + ) + test( + bobSees = None, + expectedExerciseVersion = FooV1.PACKAGE_ID, + ) + } + } + + "bob does not vet Baz V2" should { + "succeed with Foo V3 - Baz V1 exercised" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3.updated( + bobParticipant, + Set( + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + ) + ), + ) + test( + bobSees = Some(BazV1.PACKAGE_ID), + expectedExerciseVersion = FooV3.PACKAGE_ID, + ) + } + } + + // Negative test case + "bob doesn't vet anything" should { + + /** Limitation: In pass 2, the vetting checks only consider package-names that parties are + * interested in. Hence, if bob does not vet any package for a package-name that appears as a + * dependency to the package on which the selection is being performed, the preference is + * considered valid even though its usage in a Daml transaction might create a child action + * node that exposes the unvetted dependency package to bob. See + * [[com.digitalasset.canton.platform.PackagePreferenceBackend]] ScalaDoc for more details. + * + * In the example below, pass 1 chooses Foo V3 which imposes IBaz to bob, which bob did not + * vet. In pass 2, Foo V3 is discarded since Foo V3's dependency IBaz has not been vetted by + * bob (the draft transaction introduces the bob <-> IBaz/Baz requirement). The next + * candidate is Foo V2, which brings IBar/Bar V1 into bob's view. But since bob did not vet + * any of IBar/Bar, it is considered that bob does not have an interest in the package-name, + * thus its vetting state is not considered when deciding the vetting requirements for Foo + * V2's dependencies (of interest, Bar V1 and IBar here). Then, Foo V2 is selected. + */ + "fail synchronizer routing since pass 2 still picks Foo V2 which involves bob with Bar V1" in { + implicit env => + SetupPackageVetting( + AllDars, + Map(env.daId -> AllVettedUpToV3.updated(bobParticipant, Set.empty)), + ) + test( + bobSees = None, + assertExerciseResult = exercise => { + assertThrowsAndLogsCommandFailures( + exercise(), + _.shouldBeCantonErrorCode(InvalidPrescribedSynchronizerId), + ) + None + }, + expectedExerciseVersion = + // Doesn't matter, we expect an error + FooV1.PACKAGE_ID, + ) + } + } + + // Test cases involving Charlie + "All Vetted" should { + "succeed using Foo V4 - Baz V2 - Bar V2" in { implicit env => + SetupPackageVetting(AllDars, Map(env.daId -> AllVetted)) + test( + bobSees = Some(BazV2.PACKAGE_ID), + expectedExerciseVersion = FooV4.PACKAGE_ID, + charlieSees = Some(BarV2.PACKAGE_ID), + ) + } + } + + "commands are run with package preferences injection" when { + "All Vetted up to V3" should { + "succeed using Foo V3 - Baz V2" in { implicit env => + SetupPackageVetting(AllDars, Map(env.daId -> AllVettedUpToV3)) + test( + bobSees = Some(BazV2.PACKAGE_ID), + expectedExerciseVersion = FooV3.PACKAGE_ID, + vettingRequirementsForPreferencesInjection = Some( + Map( + Foo.PACKAGE_NAME.toPackageName -> Set(alice), + BazV1.PACKAGE_NAME.toPackageName -> Set(bob), + ) + ), + ) + } + } + + // Negative test case + "bob vets only a Bar package" should { + "fail with wrong preferences package_id_selection_preferences provided" in { implicit env => + import env.* + SetupPackageVetting( + AllDars, + Map( + env.daId -> AllVettedUpToV3.updated( + bobParticipant, + Set(BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds), + ) + ), + ) + test( + bobSees = Some(BarV1.PACKAGE_ID), + expectedExerciseVersion = FooV2.PACKAGE_ID, + vettingRequirementsForPreferencesInjection = Some( + Map( + Foo.PACKAGE_NAME.toPackageName -> Set(alice), + BarV1.PACKAGE_NAME.toPackageName -> Set(bob), + ) + ), + assertExerciseResult = exercise => { + assertThrowsAndLogsCommandFailures( + exercise(), + entry => { + entry.shouldBeCantonErrorCode(PackageSelectionFailed) + entry.message should include regex + s"""No synchronizers satisfy the draft transaction topology requirements: Discarded synchronizers: + |.*$daId: Package-name '${FooV1.PACKAGE_NAME}' appearing in a draft transaction root node has been discarded: All candidates discarded after applying package-id filter. + |Candidates.* + |Filter: Commands.package_id_selection_preference: .*foo -> ${FooV3.PACKAGE_ID.toPackageId.show}""".stripMargin + }, + ) + None + }, + ) + } + } + + "All Vetted" should { + "succeed using Foo V4 - Baz V2 - Bar V2" in { implicit env => + SetupPackageVetting(AllDars, Map(env.daId -> AllVetted)) + test( + bobSees = Some(BazV2.PACKAGE_ID), + expectedExerciseVersion = FooV4.PACKAGE_ID, + charlieSees = Some(BarV2.PACKAGE_ID), + vettingRequirementsForPreferencesInjection = Some( + Map( + Foo.PACKAGE_NAME.toPackageName -> Set(alice), + BazV1.PACKAGE_NAME.toPackageName -> Set(bob), + BarV1.PACKAGE_NAME.toPackageName -> Set(charlie), + ) + ), + ) + } + } + + "alice only vetted Foo V4, bob vetted Baz V1 and charlie vetted Bar V1" should { + "succeed using Foo V4 - Baz V1 - Bar V1" in { implicit env => + SetupPackageVetting( + AllDars, + Map( + env.daId -> Map( + aliceParticipant -> Set( + FooV4.PACKAGE_ID.toPackageId.withNoVettingBounds, + // Baz and Bar V2 need to be vetted explicitly since they are not a static dependency of any other package + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BarV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds, + BazV2.PACKAGE_ID.toPackageId.withNoVettingBounds, + ), + bobParticipant -> Set( + BazV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + charlieParticipant -> Set( + BarV1.PACKAGE_ID.toPackageId.withNoVettingBounds + ), + ) + ), + ) + test( + bobSees = Some(BazV1.PACKAGE_ID), + expectedExerciseVersion = FooV4.PACKAGE_ID, + charlieSees = Some(BarV1.PACKAGE_ID), + vettingRequirementsForPreferencesInjection = Some( + Map( + Foo.PACKAGE_NAME.toPackageName -> Set(alice), + BazV1.PACKAGE_NAME.toPackageName -> Set(bob), + BarV1.PACKAGE_NAME.toPackageName -> Set(charlie), + ) + ), + ) + } + } + } + } + + private def test( + bobSees: Option[String], + expectedExerciseVersion: String, + assertExerciseResult: (() => Transaction) => Option[Transaction] = _().pipe(Some(_)), + vettingRequirementsForPreferencesInjection: Option[Map[LfPackageName, Set[PartyId]]] = None, + charlieSees: Option[String] = None, + ): Unit = { + val addCharlie = charlieSees.isDefined + + val fooCid = aliceParticipant.ledger_api.javaapi.commands + .submit( + Seq(alice), + new Foo(alice.toProtoPrimitive).create().commands().asScala.toList, + ) + .getEvents + .asScala + .loneElement + .toProtoEvent + .getCreated + .getContractId + .pipe(new FooV4.ContractId(_)) + + val packagePreferencesO = vettingRequirementsForPreferencesInjection + .map(vettingRequirements => + aliceParticipant.ledger_api.interactive_submission.preferred_packages(vettingRequirements) + ) + .map(_.packageReferences.map(_.packageId.toPackageId)) + + assertExerciseResult(() => + aliceParticipant.ledger_api.javaapi.commands + .submit( + Seq(alice), + fooCid + .exerciseFoo_Exe( + alice.toProtoPrimitive, + bob.toProtoPrimitive, + Option.when(addCharlie)(charlie.toProtoPrimitive).toJava, + ) + .commands() + .asScala + .toList, + transactionShape = TRANSACTION_SHAPE_LEDGER_EFFECTS, + userPackageSelectionPreference = packagePreferencesO.getOrElse(Seq.empty), + ) + ).foreach { tx => + tx.getEvents.asScala.headOption.value.toProtoEvent.getExercised.getTemplateId.getPackageId shouldBe expectedExerciseVersion + + val updateId = tx.getUpdateId + val bobsCreatePkgId = + // There is one template per package-id, so we can just check the package-id of the create event + // for a deterministic assertion + bobParticipant.ledger_api.updates + .update_by_id( + updateId, + updateFormat = UpdateFormat( + Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map(bob.toProtoPrimitive -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ), + None, + None, + ), + ) + .flatMap(_.createEvents.toSeq.loneElement.templateId.map(_.packageId)) + + // If bob doesn't see any of the packages, we expect the update to not contain any events + bobsCreatePkgId shouldBe bobSees + + if (addCharlie) { + val charliesCreatePkgId = + charlieParticipant.ledger_api.updates + .update_by_id( + updateId, + updateFormat = UpdateFormat( + Some( + TransactionFormat( + eventFormat = Some( + EventFormat( + filtersByParty = Map(charlie.toProtoPrimitive -> Filters(Nil)), + filtersForAnyParty = None, + verbose = false, + ) + ), + transactionShape = TRANSACTION_SHAPE_ACS_DELTA, + ) + ), + None, + None, + ), + ) + .flatMap(_.createEvents.toSeq.loneElement.templateId.map(_.packageId)) + + charliesCreatePkgId shouldBe charlieSees + } + } + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala new file mode 100644 index 0000000000..9e2a589239 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala @@ -0,0 +1,310 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.value.Identifier.toJavaProto +import com.daml.ledger.javaapi.data.DisclosedContract +import com.digitalasset.canton.admin.api.client.data.TemplateId.fromJavaIdentifier +import com.digitalasset.canton.config.DbConfig.Postgres +import com.digitalasset.canton.damltests.upgrade.v1.java as v1 +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.Quote +import com.digitalasset.canton.damltests.upgrade.v2.java as v2 +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.daml.lf.data.Ref + +import java.util.Optional +import scala.jdk.CollectionConverters.* + +/** Primary concern is ensuring that correct packages are vetted and used when upgrading + */ +sealed abstract class UpgradePackageAvailabilityIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + private var alice: PartyId = _ + private var bob: PartyId = _ + private var charlie: PartyId = _ + private var dan: PartyId = _ + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P4_S1M1.withSetup { implicit env => + import env.* + + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + + alice = participant1.parties.enable("alice") + + bob = participant2.parties.enable("bob") + + charlie = participant3.parties.enable("charlie") + + dan = participant4.parties.enable("dan") + + // Participant 1 (alice) has V1 and V2 loaded + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + // Participant 2 (bob) also has V1 and V2 loaded + participant2.dars.upload(UpgradingBaseTest.UpgradeV1) + participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + + // Participant 3 (charlie) has loaded V1 and V2 loaded but has then unvetted V1 + participant3.dars.upload(UpgradingBaseTest.UpgradeV1) + participant3.dars.upload(UpgradingBaseTest.UpgradeV2) + participant3.topology.vetted_packages.propose_delta( + participant3, + removes = Seq(Ref.PackageId.assertFromString(v1.upgrade.Quote.PACKAGE_ID)), + ) + + // Participant 4 (dan) has only ever had V2 loaded + participant4.dars.upload(UpgradingBaseTest.UpgradeV2) + } + + private def discloseQuote( + value: Long, + observer: Option[PartyId] = None, + )(implicit env: FixtureParam): (v2.upgrade.Quote.ContractId, DisclosedContract) = { + + import env.* + + val quoteV1Cid = JavaDecodeUtil + .decodeAllCreated(v1.upgrade.Quote.COMPANION)( + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new Quote( + Seq(alice.toProtoPrimitive).asJava, + observer.map(_.toProtoPrimitive).toList.asJava, + "S1", + value, + ).create.commands.overridePackageId(v1.upgrade.Quote.PACKAGE_ID).asScala.toSeq, + ) + ) + .loneElement + .id + + val disclosedQuote = { + val createdEvent = eventually() { + participant1.ledger_api.state.acs + .active_contracts_of_party( + alice, + filterTemplates = Seq(fromJavaIdentifier(v1.upgrade.Quote.COMPANION.TEMPLATE_ID)), + includeCreatedEventBlob = true, + ) + .flatMap(_.createdEvent) + .filter(_.contractId == quoteV1Cid.contractId) + .loneElement + } + + val synchronizerId = participant1.ledger_api.javaapi.event_query + .by_contract_id(quoteV1Cid.contractId, Seq(alice)) + .getCreated + .getSynchronizerId + + new com.daml.ledger.javaapi.data.DisclosedContract( + createdEvent.createdEventBlob, + synchronizerId, + Optional.of( + com.daml.ledger.javaapi.data.Identifier + .fromProto(toJavaProto(createdEvent.templateId.value)) + ), + Optional.of(createdEvent.contractId), + ) + } + + val quoteV2Cid: v2.upgrade.Quote.ContractId = + new v2.upgrade.Quote.ContractId(quoteV1Cid.contractId) + + (quoteV2Cid, disclosedQuote) + } + + "Upgrading" when { + + "Use upgraded package for views that have a fetch action description" in { implicit env => + import env.* + + val (quoteCid, _) = discloseQuote(99, observer = Some(bob)) + + val fetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant2.ledger_api.javaapi.commands.submit( + Seq(bob), + new v2.upgrade.FetchQuote( + bob.toProtoPrimitive, + bob.toProtoPrimitive, + bob.toProtoPrimitive, + ).create.commands.overridePackageId(v2.upgrade.FetchQuote.PACKAGE_ID).asScala.toSeq, + ) + ) + .loneElement + .id + + // When Bob selects the quote Alice should find out about the fetch, but not the exercise + participant2.ledger_api.javaapi.commands.submit( + Seq(bob), + fetchQuoteCid + .exerciseFQ_RawFetch(quoteCid) + .commands + .overridePackageId(v2.upgrade.FetchQuote.PACKAGE_ID) + .asScala + .toSeq, + ) + + // Fetch events are not observable via the ledger API but if the V1 package id was used + // a model conformance error would be produced on view reconstruction. + + succeed + } + + // TODO(#23876) - remove ignore + "Not require that the V1 package is vetted in order to be able to confirm V2 package use" ignore { + implicit env => + import env.* + + val (quoteCid, disclosedQuote) = discloseQuote(100) + + val fetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant3.ledger_api.javaapi.commands.submit( + Seq(charlie), + new v2.upgrade.FetchQuote( + charlie.toProtoPrimitive, + charlie.toProtoPrimitive, + charlie.toProtoPrimitive, + ).create.commands.overridePackageId(v2.upgrade.FetchQuote.PACKAGE_ID).asScala.toSeq, + ) + ) + .loneElement + .id + + participant3.ledger_api.javaapi.commands.submit( + Seq(charlie), + fetchQuoteCid + .exerciseFQ_ExFetch(quoteCid) + .commands + .overridePackageId(v2.upgrade.FetchQuote.PACKAGE_ID) + .asScala + .toSeq, + disclosedContracts = Seq(disclosedQuote), + ) + + succeed + } + + // TODO(#23876) - remove ignore + "Not require the V1 package to submit a V1 contract used in a V2 context" ignore { + implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(101) + + val fetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant4.ledger_api.javaapi.commands.submit( + Seq(dan), + new v2.upgrade.FetchQuote( + dan.toProtoPrimitive, + dan.toProtoPrimitive, + dan.toProtoPrimitive, + ).create.commands.asScala.toSeq, + ) + ) + .loneElement + .id + + // Submission will fail if V1 package is needed + participant4.ledger_api.javaapi.commands.submit( + Seq(dan), + fetchQuoteCid.exerciseFQ_ExFetch(quoteCid).commands.asScala.toSeq, + disclosedContracts = Seq(disclosedQuote), + ) + + succeed + } + + // TODO(#23876) - remove ignore + "Not require the V1 package to observe the use of a disclosed V1 contract used in a V2 context" ignore { + implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(102) + + val fetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new v2.upgrade.FetchQuote( + alice.toProtoPrimitive, + alice.toProtoPrimitive, + dan.toProtoPrimitive, + ).create.commands.asScala.toSeq, + ) + ) + .loneElement + .id + + // When the observer (dan) attempts to validate the confirmed transaction + // it will fail if the V1 package is required. + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + fetchQuoteCid.exerciseFQ_ConFetch(quoteCid).commands.asScala.toSeq, + disclosedContracts = Seq(disclosedQuote), + ) + + succeed + } + + // TODO(#23876) - remove ignore + "Not require the V1 package to confirm the use of a disclosed V1 contract used in a V2 context" ignore { + implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(102) + + val aliceFetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new v2.upgrade.FetchQuote( + alice.toProtoPrimitive, + alice.toProtoPrimitive, + dan.toProtoPrimitive, + ).create.commands.asScala.toSeq, + ) + ) + .loneElement + .id + + val aliceBobFetchQuoteCid = JavaDecodeUtil + .decodeAllCreated(v2.upgrade.FetchQuote.COMPANION)( + participant4.ledger_api.javaapi.commands.submit( + Seq(dan), + aliceFetchQuoteCid.exerciseFQ_ObserverConfirms().commands.asScala.toSeq, + ) + ) + .loneElement + .id + + // When confirmer dan + // it will fail if the V1 package is required. + participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + aliceBobFetchQuoteCid.exerciseFQ_ConFetch(quoteCid).commands.asScala.toSeq, + disclosedContracts = Seq(disclosedQuote), + ) + + succeed + } + + } + +} + +final class ReferenceUpgradePackageAvailabilityIntegrationTestPostgres + extends UpgradePackageAvailabilityIntegrationTest { + registerPlugin(new UseReferenceBlockSequencer[Postgres](loggerFactory)) +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradingBaseTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradingBaseTest.scala new file mode 100644 index 0000000000..4770249430 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradingBaseTest.scala @@ -0,0 +1,135 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.daml.ledger.api.v2.commands +import com.daml.ledger.javaapi +import com.digitalasset.canton.BaseTest.getResourcePath +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.topology.transaction.VettedPackage +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{LfPackageId, LfPackageName} +import monocle.Monocle.toAppliedFocusOps +import org.scalactic.source +import org.scalatest.verbs.ShouldVerb +import org.scalatest.wordspec.FixtureAnyWordSpec + +object UpgradingBaseTest extends ShouldVerb { + lazy val UpgradeV1: String = getResourcePath("Upgrade-1.0.0.dar") + lazy val UpgradeV2: String = getResourcePath("Upgrade-2.0.0.dar") + lazy val AppUpgradeV1: String = getResourcePath("AppUpgrade-1.0.0.dar") + lazy val AppUpgradeV2: String = getResourcePath("AppUpgrade-2.0.0.dar") + + lazy val NonConformingV1: String = getResourcePath("NonConforming-1.0.0.dar") + lazy val NonConformingV2: String = getResourcePath("NonConforming-2.0.0.dar") + + lazy val AppInstallV1: String = getResourcePath("tests-app-install-1.0.0.dar") + lazy val AppInstallV2: String = getResourcePath("tests-app-install-2.0.0.dar") + lazy val FeaturedAppRightIface: String = getResourcePath( + "tests-featured-app-right-iface-1.0.0.dar" + ) + lazy val FeaturedAppRightImplV1: String = getResourcePath( + "tests-featured-app-right-impl-1.0.0.dar" + ) + lazy val FeaturedAppRightImplV2: String = getResourcePath( + "tests-featured-app-right-impl-2.0.0.dar" + ) + + lazy val HoldingV1: String = getResourcePath("tests-Holding-v1-1.0.0.dar") + lazy val HoldingV2: String = getResourcePath("tests-Holding-v2-1.0.0.dar") + lazy val TokenV1: String = getResourcePath("tests-Token-1.0.0.dar") + lazy val TokenV2: String = getResourcePath("tests-Token-2.0.0.dar") + lazy val TokenV3: String = getResourcePath("tests-Token-3.0.0.dar") + lazy val TokenV4: String = getResourcePath("tests-Token-4.0.0.dar") + + lazy val DvpAssetsV1: String = getResourcePath("dvp-assets-1.0.0.dar") + lazy val DvpAssetsV2: String = getResourcePath("dvp-assets-2.0.0.dar") + + lazy val DvpAssetFactoryV1: String = getResourcePath("dvp-asset-factory-1.0.0.dar") + lazy val DvpAssetFactoryV2: String = getResourcePath("dvp-asset-factory-2.0.0.dar") + + lazy val DvpOffersV1: String = getResourcePath("dvp-offer-1.0.0.dar") + lazy val DvpOffersV2: String = getResourcePath("dvp-offer-2.0.0.dar") + + lazy val IBaz: String = getResourcePath("ibaz-1.0.0.dar") + lazy val IBar: String = getResourcePath("ibar-1.0.0.dar") + lazy val BarV1: String = getResourcePath("bar-1.0.0.dar") + lazy val BarV2: String = getResourcePath("bar-2.0.0.dar") + lazy val BazV1: String = getResourcePath("baz-1.0.0.dar") + lazy val BazV2: String = getResourcePath("baz-2.0.0.dar") + lazy val FooV1: String = getResourcePath("foo-1.0.0.dar") + lazy val FooV2: String = getResourcePath("foo-2.0.0.dar") + lazy val FooV3: String = getResourcePath("foo-3.0.0.dar") + lazy val FooV4: String = getResourcePath("foo-4.0.0.dar") + lazy val UtilV1: String = getResourcePath("util-1.0.0.dar") + lazy val UtilV2: String = getResourcePath("util-2.0.0.dar") + + /** The DARs above are used to test interface fetches that, at time of writing, is a 2.dev + * feature. For this reason only tests running a dev version of canton can use them. + * + * Once TransactionVersion.minFetchInterfaceId is a final LF version this can be removed + */ + def testedPV(testedProtocolVersion: ProtocolVersion): Boolean = + testedProtocolVersion >= ProtocolVersion.dev + + trait WhenPV { + self: FixtureAnyWordSpec => + protected val testedProtocolVersion: ProtocolVersion + implicit class Wrapper(name: String) { + def whenUpgradeTestPV(f: => Unit)(implicit pos: source.Position): Unit = + if (testedPV(testedProtocolVersion)) { + name when f + } else { + () + } + } + } + + implicit class CommandsWithExplicitPackageId(commandJava: javaapi.data.Command) { + def withPackageId(packageId: String): javaapi.data.Command = { + val command = commands.Command.fromJavaProto(commandJava.toProtoCommand) + val res = command.command match { + case commands.Command.Command.Empty => command + case c: commands.Command.Command.Create => + command.copy(command = + c.focus(_.value.templateId).modify(_.map(_.copy(packageId = packageId))) + ) + case c: commands.Command.Command.Exercise => + command.copy(command = + c.focus(_.value.templateId).modify(_.map(_.copy(packageId = packageId))) + ) + case c: commands.Command.Command.ExerciseByKey => + command.copy(command = + c.focus(_.value.templateId).modify(_.map(_.copy(packageId = packageId))) + ) + case c: commands.Command.Command.CreateAndExercise => + command.copy(command = + c.focus(_.value.templateId).modify(_.map(_.copy(packageId = packageId))) + ) + } + javaapi.data.Command.fromProtoCommand(commands.Command.toJavaProto(res)) + } + } + + object Syntax { + implicit class LfPackageTypeConversions(pkgRefStr: String) { + def toPackageId: LfPackageId = LfPackageId.assertFromString(pkgRefStr) + def toPackageName: LfPackageName = LfPackageName.assertFromString(pkgRefStr) + } + + implicit class PackageIdVettingExtensions(packageId: LfPackageId) { + def withNoVettingBounds: VettedPackage = VettedPackage( + packageId = packageId, + validFromInclusive = None, + validUntilExclusive = None, + ) + + def withVettingEndsAt(validUntil: CantonTimestamp): VettedPackage = VettedPackage( + packageId = packageId, + validFromInclusive = None, + validUntilExclusive = Some(validUntil), + ) + } + } +} diff --git a/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ViewsIntegrationTest.scala b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ViewsIntegrationTest.scala new file mode 100644 index 0000000000..4e24875b18 --- /dev/null +++ b/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/ViewsIntegrationTest.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrading + +import com.digitalasset.canton.damltests.upgrade +import com.digitalasset.canton.integration.tests.upgrading.UpgradingBaseTest.CommandsWithExplicitPackageId +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + SharedEnvironment, + TestConsoleEnvironment, +} +import com.digitalasset.canton.topology.PartyId + +import scala.jdk.CollectionConverters.CollectionHasAsScala + +class ViewsIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment + with UpgradingBaseTest.WhenPV { + + private def party(name: String)(implicit env: TestConsoleEnvironment): PartyId = + env.participant1.parties.list(name).headOption.valueOrFail("where is " + name).party + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2_S1M1 + + "Inter view upgrading" whenUpgradeTestPV { + + "setup the stage" in { implicit env => + import env.* + + participant1.synchronizers.connect_local(sequencer1, alias = daName) + participant2.synchronizers.connect_local(sequencer1, alias = daName) + + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) + participant1.dars.upload(UpgradingBaseTest.UpgradeV2) + + // V1 is not vetted on participant 2 + participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + + participant1.parties.enable( + "alice", + synchronizeParticipants = Seq(participant2), + ) + participant2.parties.enable( + "bob", + synchronizeParticipants = Seq(participant1), + ) + } + + "not require the package used in the creating view" ignore { implicit env => + val alice = party("alice") + val bob = party("bob") + + env.participant1.ledger_api.javaapi.commands.submit( + Seq(alice), + new upgrade.v1.java.upgrade.InterView( + alice.toProtoPrimitive, + bob.toProtoPrimitive, + ).createAnd() + .exerciseDelegateChoice() + .commands() + .asScala + .map(_.withPackageId(upgrade.v1.java.upgrade.InterView.PACKAGE_ID)) + .toList, + ) + } + } +} diff --git a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala index 21eb1821da..50b4a9d214 100644 --- a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala +++ b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala @@ -9,7 +9,7 @@ import com.daml.tracing.Telemetry import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext import com.digitalasset.canton.tracing.TraceContext -/** Class to enrich [[com.digitalasset.canton.logging.ErrorLoggingContext]] with +/** Class to enrich [[com.daml.logging.LoggingContext]] with * [[com.digitalasset.canton.tracing.TraceContext]] */ class LoggingContextWithTrace( diff --git a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/telemetry/ConfiguredOpenTelemetry.scala b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/telemetry/ConfiguredOpenTelemetry.scala index 9a576ac115..11b035e63b 100644 --- a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/telemetry/ConfiguredOpenTelemetry.scala +++ b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/telemetry/ConfiguredOpenTelemetry.scala @@ -18,6 +18,11 @@ final case class ConfiguredOpenTelemetry( onDemandMetricsReader: OnDemandMetricsReader, ) extends AutoCloseable { + /** Omit all the details of the configured open telemetry instance as otherwise closing this + * instance results in all metrics being logged as part of shutdown. + */ + override def toString: String = "ConfiguredOpenTelemetry(...)" + override def close(): Unit = { openTelemetry.getSdkMeterProvider.close() openTelemetry.getSdkTracerProvider.close() diff --git a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala index 42a59e6796..4086fdfec1 100644 --- a/canton/community/util-observability/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala +++ b/canton/community/util-observability/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala @@ -18,6 +18,7 @@ import io.opentelemetry.sdk.trace.export.{SimpleSpanProcessor, SpanExporter} import io.opentelemetry.sdk.trace.data.SpanData import java.util +import scala.concurrent.blocking /** Provides tracer for span reporting and takes care of closing resources */ @@ -90,7 +91,11 @@ object NoopSpanExporter extends SpanExporter { object TracerProvider { object Factory { def apply(configuredOpenTelemetry: ConfiguredOpenTelemetry, name: String): TracerProvider = - new TracerProviderWithBuilder(configuredOpenTelemetry, name) - + blocking(synchronized { + // Because nodes of the same type are started in parallel, this could cause some issues with multiple instances + // of this code running concurrently, which would result in different nodes of the same type receiving the same + // tracer and appearing as the same node in the traces. + new TracerProviderWithBuilder(configuredOpenTelemetry, name) + }) } } diff --git a/canton/daml_dependencies.json b/canton/daml_dependencies.json index 7a04aa2488..6d9ac5a2d9 100644 --- a/canton/daml_dependencies.json +++ b/canton/daml_dependencies.json @@ -296,6 +296,7 @@ "org.mockito:mockito-inline" : "3.6.28", "org.mockito:mockito-scala_2.13" : "1.16.3", "org.objenesis:objenesis" : "3.1", + "org.openjdk.jol:jol-core" : "0.17", "org.opentest4j:opentest4j" : "1.2.0", "org.ow2.asm:asm" : "5.0.4", "org.parboiled:parboiled_2.13" : "2.5.1", diff --git a/cluster/deployment/config.yaml b/cluster/deployment/config.yaml index f8428e8818..f850cdaac3 100644 --- a/cluster/deployment/config.yaml +++ b/cluster/deployment/config.yaml @@ -85,6 +85,9 @@ monitoring: minRate: 0.95 mediators: acknowledgementLagSeconds: 900 +cloudArmor: + enabled: false + allRulesPreviewOnly: true multiValidator: postgresPvcSize: '100Gi' resources: @@ -138,6 +141,12 @@ svs: cometbftLogLevel: debug participant: bftSequencerConnection: true + validatorApp: + additionalEnvVars: + # enable https://github.com/hyperledger-labs/splice/pull/2499 + - name: ADDITIONAL_CONFIG_TOPOLOGY_METRICS_EXPORT + value: | + canton.validator-apps.validator_backend.automation.topology-metrics-polling-interval = 5m validators: validator-runbook: namespace: validator diff --git a/cluster/deployment/mock/config.yaml b/cluster/deployment/mock/config.yaml index efef0310d0..7b5fd7cc55 100644 --- a/cluster/deployment/mock/config.yaml +++ b/cluster/deployment/mock/config.yaml @@ -18,7 +18,6 @@ multiValidator: sv: synchronizer: skipInitialization: true - topologyChangeDelay: 250ms splitwell: maxDarVersion: '0.1.8' synchronizerMigration: @@ -130,6 +129,13 @@ svs: - name: CUSTOM_MOCK_ENV_VAR_NAME value: CUSTOM_MOCK_ENV_VAR_VALUE additionalJvmOptions: "-DcustomSequencerJvmFlag" + cloudSql: + tier: "sequencer-override-tier" + mediator: + additionalEnvVars: + - name: CUSTOM_MOCK_ENV_VAR_NAME + value: CUSTOM_MOCK_ENV_VAR_VALUE + additionalJvmOptions: "-DcustomMediatorJvmFlag" logging: appsLogLevel: WARN cantonLogLevel: INFO @@ -210,11 +216,18 @@ validators: enable: true maxParties: 1234 parallelism: 321 + preapprovalRetries: 9876 + preapprovalRetryDelayMs: 42 participant: additionalEnvVars: - name: ADDITIONAL_ENV_VAR_VALIDATOR_PARTICIPANT_NAME value: ADDITIONAL_ENV_VAR_VALIDATOR_PARTICIPANT_VALUE additionalJvmOptions: -Dparticipantoption + validatorApp: + additionalEnvVars: + - name: ADDITIONAL_ENV_VAR_VALIDATOR_NAME + value: ADDITIONAL_ENV_VAR_VALIDATOR_NAME + additionalJvmOptions: -Dparticipantoption extra-validator: namespace: validator partyHint: 'extra-validator' diff --git a/cluster/deployment/scratchnete/.envrc.vars b/cluster/deployment/scratchnete/.envrc.vars index 04156c286a..d3b454102c 100644 --- a/cluster/deployment/scratchnete/.envrc.vars +++ b/cluster/deployment/scratchnete/.envrc.vars @@ -39,13 +39,3 @@ export APPROVE_SV_RUNBOOK=true export CACHE_DEV_DOCKER_REGISTRY=$DEV_DOCKER_REGISTRY export SELF_HOSTED_FLUENT_BIT=true - -export DSO_SIZE=15 - -export SPLICE_DEPLOY_VALIDATOR_RUNBOOK=true -export SPLICE_DEPLOY_MULTI_VALIDATOR=true -export SPLICE_DEPLOY_SV_RUNBOOK=true -export SPLICE_DEPLOY_VALIDATOR1=true -export SPLICE_DEPLOY_SPLITWELL=true - -export MULTIVALIDATOR_SIZE=10 diff --git a/cluster/deployment/scratchnete/config.yaml b/cluster/deployment/scratchnete/config.yaml index 4899db9416..84f8451975 100644 --- a/cluster/deployment/scratchnete/config.yaml +++ b/cluster/deployment/scratchnete/config.yaml @@ -6,7 +6,6 @@ infra: pulumiProjectConfig: default: cloudSql: - enabled: true protected: false validator1: participantPruningSchedule: diff --git a/cluster/expected/canton-network/expected.json b/cluster/expected/canton-network/expected.json index e93a547b05..8ca7d7ac9f 100644 --- a/cluster/expected/canton-network/expected.json +++ b/cluster/expected/canton-network/expected.json @@ -110,14 +110,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", @@ -1620,10 +1612,6 @@ { "name": "CUSTOM_MOCK_ENV_VAR_NAME", "value": "CUSTOM_MOCK_ENV_VAR_VALUE" - }, - { - "name": "ADDITIONAL_CONFIG_TOPOLOGY_CHANGE_DELAY", - "value": "canton.sv-apps.sv.topology-change-delay-duration=250ms" } ], "additionalJvmOptions": "-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.rmi.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -DcustomSvAppJvmFlag", @@ -1809,7 +1797,12 @@ "namespace": "sv-1", "timeout": 600, "values": { - "additionalEnvVars": [], + "additionalEnvVars": [ + { + "name": "ADDITIONAL_CONFIG_TOPOLOGY_METRICS_EXPORT", + "value": "canton.validator-apps.validator_backend.automation.topology-metrics-polling-interval = 5m\n" + } + ], "additionalJvmOptions": "-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.rmi.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1", "additionalUsers": [], "affinity": { @@ -2461,10 +2454,6 @@ { "name": "CUSTOM_MOCK_ENV_VAR_NAME", "value": "CUSTOM_MOCK_ENV_VAR_VALUE" - }, - { - "name": "ADDITIONAL_CONFIG_TOPOLOGY_CHANGE_DELAY", - "value": "canton.sv-apps.sv.topology-change-delay-duration=250ms" } ], "additionalJvmOptions": "-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.rmi.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -DcustomSvAppJvmFlag", @@ -2623,7 +2612,12 @@ "namespace": "sv-da-1", "timeout": 600, "values": { - "additionalEnvVars": [], + "additionalEnvVars": [ + { + "name": "ADDITIONAL_CONFIG_TOPOLOGY_METRICS_EXPORT", + "value": "canton.validator-apps.validator_backend.automation.topology-metrics-polling-interval = 5m\n" + } + ], "additionalJvmOptions": "-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.rmi.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1", "additionalUsers": [], "affinity": { diff --git a/cluster/expected/deployment/expected.json b/cluster/expected/deployment/expected.json index 3c25cfc426..624741f452 100644 --- a/cluster/expected/deployment/expected.json +++ b/cluster/expected/deployment/expected.json @@ -283,14 +283,6 @@ "provider": "", "type": "kubernetes:pulumi.com/v1:Stack" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", diff --git a/cluster/expected/infra/expected.json b/cluster/expected/infra/expected.json index 51c7bb9bca..483c09534f 100644 --- a/cluster/expected/infra/expected.json +++ b/cluster/expected/infra/expected.json @@ -1,4 +1,60 @@ [ + { + "custom": true, + "id": "", + "inputs": { + "resourceServerIdentifier": "https://ledger_api.sv-1.test-stack.canton.network", + "scopes": [ + { + "description": "Access to the Ledger API", + "name": "daml_ledger_api" + } + ] + }, + "name": "LedgerApiScopessv1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServerScopes:ResourceServerScopes" + }, + { + "custom": true, + "id": "", + "inputs": { + "resourceServerIdentifier": "https://ledger_api.sv-da-1.test-stack.canton.network", + "scopes": [ + { + "description": "Access to the Ledger API", + "name": "daml_ledger_api" + } + ] + }, + "name": "LedgerApiScopessvda1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServerScopes:ResourceServerScopes" + }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://ledger_api.sv-1.test-stack.canton.network", + "name": "Ledger API for SV sv-1 on test-stack (Pulumi managed)" + }, + "name": "LedgerApisv1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://ledger_api.sv-da-1.test-stack.canton.network", + "name": "Ledger API for SV sv-da-1 on test-stack (Pulumi managed)" + }, + "name": "LedgerApisvda1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, { "custom": true, "id": "", @@ -55,6 +111,30 @@ "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", "type": "auth0:index/client:Client" }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://sv.sv-1.test-stack.canton.network/api", + "name": "SV App API for SV sv-1 on test-stack (Pulumi managed)" + }, + "name": "SvAppApisv1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://sv.sv-da-1.test-stack.canton.network/api", + "name": "SV App API for SV sv-da-1 on test-stack (Pulumi managed)" + }, + "name": "SvAppApisvda1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, { "custom": true, "id": "", @@ -175,6 +255,30 @@ "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::sv::undefined_id", "type": "auth0:index/client:Client" }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://validator.sv-1.test-stack.canton.network/api", + "name": "Validator App API for SV sv-1 on test-stack (Pulumi managed)" + }, + "name": "ValidatorAppApisv1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, + { + "custom": true, + "id": "", + "inputs": { + "allowOfflineAccess": true, + "identifier": "https://validator.sv-da-1.test-stack.canton.network/api", + "name": "Validator App API for SV sv-da-1 on test-stack (Pulumi managed)" + }, + "name": "ValidatorAppApisvda1", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/resourceServer:ResourceServer" + }, { "custom": true, "id": "", @@ -593,6 +697,28 @@ "provider": "", "type": "kubernetes:core/v1:Namespace" }, + { + "custom": true, + "id": "", + "inputs": { + "apiVersion": "v1", + "data": { + "4dabf18193072939515e22adb298388d": "1b47061264138c4ac30d75fd1eb44270", + "value": { + "postgresPassword": "" + } + }, + "kind": "Secret", + "metadata": { + "name": "grafana-pg-secret", + "namespace": "observability" + }, + "type": "Opaque" + }, + "name": "cn-app-observability-grafana-pg-secret", + "provider": "", + "type": "kubernetes:core/v1:Secret" + }, { "custom": true, "id": "", @@ -916,7 +1042,7 @@ "notification_policies.yaml": "apiVersion: 1\npolicies:\n - orgId: 1\n receiver: cn-ci-channel-notification\n routes:\n - receiver: cn-ci-channel-high-prio-notification\n object_matchers:\n - - priority\n - '='\n - high\n group_wait: 30s\n group_interval: 30m\n repeat_interval: 4h\n continue: true\n - receiver: cn-ci-channel-notification\n group_wait: 30s\n group_interval: 30m\n repeat_interval: 4h\n", "sv-status-report_alerts.yaml": "apiVersion: 1\ngroups:\n - orgId: 1\n name: sv status reports\n folder: canton-network\n interval: 1m\n rules:\n - uid: adlmhpz5iv4sgc\n title: Report Creation Time Lag\n condition: No recent report\n data:\n - refId: Report time lag\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n datasource:\n type: prometheus\n uid: prometheus\n editorMode: code\n expr: time() - max by (report_publisher) (splice_sv_status_report_creation_time_us{namespace=~\".*\", report_publisher=~\".*\", canton_version=~\".*\"}) / 1000000\n instant: false\n interval: \"\"\n intervalMs: 30000\n legendFormat: '{{report_publisher}}'\n maxDataPoints: 43200\n range: true\n refId: Report time lag\n - refId: Latest report time lag\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params: []\n type: gt\n operator:\n type: and\n query:\n params:\n - B\n reducer:\n params: []\n type: last\n type: query\n datasource:\n type: __expr__\n uid: __expr__\n expression: Report time lag\n intervalMs: 1000\n maxDataPoints: 43200\n reducer: last\n refId: Latest report time lag\n settings:\n mode: dropNN\n type: reduce\n - refId: No recent report\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 300\n type: gt\n operator:\n type: and\n query:\n params:\n - C\n reducer:\n params: []\n type: last\n type: query\n datasource:\n type: __expr__\n uid: __expr__\n expression: Latest report time lag\n intervalMs: 1000\n maxDataPoints: 43200\n refId: No recent report\n type: threshold\n dashboardUid: cdlm6c7fn7vuod\n panelId: 18\n noDataState: Alerting\n execErrState: Alerting\n for: 5m\n annotations:\n __dashboardUid__: cdlm6c7fn7vuod\n __panelId__: \"18\"\n severity: critical\n description: The SV {{ $labels.report_publisher }} has not submitted a status report recently\n runbook_url: \"\"\n summary: Status report creation time lag too high\n labels:\n \"team\": \"support\"\n isPaused: false\n", "templates.yaml": "# config file version\napiVersion: 1\n\n# List of templates to import or update\n# source https://community.grafana.com/t/working-configuration-example-for-alerts-templating-telegram-and-slack/80988\ntemplates:\n - name: slack\n template: |\n {{ define \"slack_title\" }}\n {{ $hasCritical := false }}{{ $hasWarning := false }}{{ $hasInfo := false }}{{ $hasOthers := false }}\n {{- range .Alerts.Firing -}}\n {{- if eq .Annotations.severity \"critical\" -}}\n {{- $hasCritical = true -}}\n {{- else if eq .Annotations.severity \"warning\" -}}\n {{- $hasWarning = true -}}\n {{- else if eq .Annotations.severity \"info\" -}}\n {{- $hasInfo = true -}}\n {{- else -}}\n {{- $hasOthers = true -}}\n {{- end -}}\n {{- end -}}\n\n mock\n {{ if gt (len .Alerts.Firing) 0 }}\n {{- if $hasCritical }}\n 🔥 {{ len .Alerts.Firing }} Alert{{ if gt (len .Alerts.Firing) 1 }}s{{ end }} firing\n {{- else if $hasWarning }}\n ⚠️ {{ len .Alerts.Firing }} Alert{{ if gt (len .Alerts.Firing) 1 }}s{{ end }} firing\n {{- else }}\n :information_source: {{ len .Alerts.Firing }} Alert{{ if gt (len .Alerts.Firing) 1 }}s{{ end }} firing\n {{- end }}\n {{ end }}\n {{ if gt (len .Alerts.Resolved) 0 }} ✅ {{ len .Alerts.Resolved }} alert(s) resolved {{ end }}\n {{ end }}\n\n {{ define \"slack_message\" }}\n {{ $hasCritical := false }}{{ $hasWarning := false }}{{ $hasInfo := false }}{{ $hasOthers := false }}\n {{- range .Alerts.Firing -}}\n {{- if eq .Annotations.severity \"critical\" -}}\n {{- $hasCritical = true -}}\n {{- else if eq .Annotations.severity \"warning\" -}}\n {{- $hasWarning = true -}}\n {{- else if eq .Annotations.severity \"info\" -}}\n {{- $hasInfo = true -}}\n {{- else -}}\n {{- $hasOthers = true -}}\n {{- end -}}\n {{- end -}}\n {{ if $hasCritical }} 🔥Critical alerts {{ range .Alerts.Firing }} {{- if eq .Annotations.severity \"critical\" -}} {{ template \"slack_alert_firing\" .}} {{ end }} {{ end }} {{ end }}\n {{ if $hasWarning }} ⚠️Warning alerts {{ range .Alerts.Firing }} {{- if eq .Annotations.severity \"warning\" -}} {{ template \"slack_alert_firing\" .}} {{ end }} {{ end }} {{ end }}\n {{ if $hasInfo }} :information_source:Info alerts {{ range .Alerts.Firing }} {{- if eq .Annotations.severity \"info\" -}} {{ template \"slack_alert_firing\" .}} {{ end }} {{ end }} {{ end }}\n {{ if $hasOthers }} Other alerts {{ range .Alerts.Firing }} {{- if and (and (ne .Annotations.severity \"info\") (ne .Annotations.severity \"warning\")) (ne .Annotations.severity \"critical\") -}} {{ template \"slack_alert_firing\" . }} {{ end }} {{ end }} {{ end }}\n {{ if gt (len .Alerts.Resolved) 0 }} ✅Resolved Alerts {{ range .Alerts.Resolved }} {{ template \"slack_alert_resolved\" .}} {{ end }} {{ end }}\n {{ end }}\n\n {{ define \"slack_alert_firing\" }}\n *{{ .Labels.alertname }}*\n {{ .Annotations.summary }}\n {{ if .Annotations.description }}{{ .Annotations.description }}{{ end }}\n {{- if .Labels.service }}\n Service: {{ .Labels.service }}\n {{- end }}\n {{ template \"slack_gcloud_log_link\" . }}\n {{ end }}\n\n {{ define \"slack_alert_resolved\" }}\n *{{ .Labels.alertname }}*\n {{ if .Annotations.severity }}{{ .Annotations.severity }}{{ end }}\n {{ .Annotations.summary }}\n {{ if .Annotations.description }}{{ .Annotations.description }}{{ end }}\n {{ end }}\n\n {{ define \"slack_gcloud_log_link\" }}{{ end }}\n\n {{ define \"slack_color\" -}}\n {{ $hasCritical := false }}{{ $hasWarning := false }}{{ $hasInfo := false }}{{ $hasOthers := false }}\n {{- range .Alerts.Firing -}}\n {{- if eq .Annotations.severity \"critical\" -}}\n {{- $hasCritical = true -}}\n {{- else if eq .Annotations.severity \"warning\" -}}\n {{- $hasWarning = true -}}\n {{- else if eq .Annotations.severity \"info\" -}}\n {{- $hasInfo = true -}}\n {{- else -}}\n {{- $hasOthers = true -}}\n {{- end -}}\n {{- end -}}\n {{ if eq .Status \"firing\" -}}\n {{ if $hasCritical -}}\n danger\n {{- else if $hasWarning -}}\n warning\n {{- else -}}\n #439FE0\n {{- end -}}\n {{ else -}}\n good\n {{- end }}\n {{- end }}\n\n {{ define \"support_email_message\" }}\n [ MAINNET-DA2-SVN-CRITICAL-ALERT 9f2b7e1a-4c3d-58b9-9f1e-df9c4a5b6e7d ]\n {{ if gt (len .Alerts.Firing) 0 }}**Firing**\n {{ template \"__text_alert_list\" .Alerts.Firing }}{{ if gt (len .Alerts.Resolved) 0 }}\n {{ end }}{{ end }}{{ if gt (len .Alerts.Resolved) 0 }}**Resolved**\n {{ template \"__text_alert_list\" .Alerts.Resolved }}{{ end }}{{ end }}\n", - "traffic_alerts.yaml": "apiVersion: 1\ngroups:\n - orgId: 1\n name: traffic\n folder: canton-network\n interval: 1m\n rules:\n - uid: adw5rd048zf9ca\n title: Wasted Traffic\n condition: wasted_traffic_threshold\n data:\n - refId: wasted_traffic\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n editorMode: code\n expr: quantile(0.95, increase(daml_sequencer_traffic_control_wasted_traffic_total{member=~\"PAR::.*\",member!~\"PAR::Dummy-SV-1::.*\",member!~\"PAR::Dummy-SV-2::.*\"}[5m]))\n instant: true\n intervalMs: 1000\n legendFormat: __auto\n maxDataPoints: 43200\n range: false\n refId: wasted_traffic\n - refId: wasted_traffic_threshold\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 1024\n type: gt\n operator:\n type: and\n query:\n params:\n - C\n reducer:\n params: []\n type: last\n type: query\n datasource:\n type: __expr__\n uid: __expr__\n expression: wasted_traffic\n intervalMs: 1000\n maxDataPoints: 43200\n refId: wasted_traffic_threshold\n type: threshold\n dashboardUid: fdnphvrryfq4gf\n panelId: 6\n noDataState: OK\n execErrState: Alerting\n for: 1m\n annotations:\n __dashboardUid__: fdnphvrryfq4gf\n __panelId__: \"6\"\n description: The 0.95 quantile of of traffic wasted across members exceeded the threshold with a value of {{ humanize1024 $values.wasted_traffic.Value }} in the last 5m\n severity: critical\n summary: 0.95 quantile of traffic wasted across members exceeded threshold ({{ humanize1024 1024 }}b over 5m)\n labels: {}\n isPaused: false\n - uid: 5dcddc9a5487\n title: Confirmation Requests Total\n condition: confirmation_requests_total_threshold\n data:\n - refId: confirmation_requests_total\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n editorMode: code\n expr: max by (migration_id) (sum by (namespace, migration_id) (rate(daml_sequencer_block_events_total{type=\"send-confirmation-request\"}[5m])))\n instant: true\n intervalMs: 1000\n legendFormat: __auto\n maxDataPoints: 43200\n range: false\n refId: confirmation_requests_total\n - refId: confirmation_requests_total_threshold\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 10\n type: gt\n operator:\n type: and\n query:\n params: []\n reducer:\n params: []\n type: avg\n type: query\n datasource:\n name: Expression\n type: __expr__\n uid: __expr__\n expression: confirmation_requests_total\n hide: false\n refId: confirmation_requests_total_threshold\n type: threshold\n dashboardUid: fdjrxql2alblsd\n panelId: 1\n noDataState: OK\n execErrState: Alerting\n for: 1m\n annotations:\n __dashboardUid__: fdjrxql2alblsd\n __panelId__: \"1\"\n description: The total confirmation requests send rate on migration ID {{ $labels.migration_id }} exceeded the threshold with a value of {{ 10 }} in the last 5m\n severity: critical\n summary: Total confirmation requests on migration ID {{ $labels.migration_id }} exceeded the threshold (10 in the last 5m)\n labels: {}\n isPaused: false\n - uid: 88b8827c8d09\n title: Confirmation Requests By Member\n condition: confirmation_requests_by_member_threshold\n data:\n - refId: confirmation_requests_by_member\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n editorMode: code\n expr: max by (member, migration_id) (rate(daml_sequencer_block_events_total{type=\"send-confirmation-request\"}[5m]))\n instant: true\n intervalMs: 1000\n legendFormat: __auto\n maxDataPoints: 43200\n range: false\n refId: confirmation_requests_by_member\n - refId: confirmation_requests_by_member_threshold\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 10\n type: gt\n operator:\n type: and\n query:\n params: []\n reducer:\n params: []\n type: avg\n type: query\n datasource:\n name: Expression\n type: __expr__\n uid: __expr__\n expression: confirmation_requests_by_member\n hide: false\n refId: confirmation_requests_by_member_threshold\n type: threshold\n dashboardUid: fdjrxql2alblsd\n panelId: 1\n noDataState: OK\n execErrState: Alerting\n for: 1m\n annotations:\n __dashboardUid__: fdjrxql2alblsd\n __panelId__: \"1\"\n description: The confirmation requests send rate of member {{ $labels.member }} on migration ID {{ $labels.migration_id }} exceeded the threshold with a value of {{ 10 }} in the last 5m\n severity: critical\n summary: Confirmation requests by member {{ $labels.member }} on migration ID {{ $labels.migration_id }} exceeded the threshold (10 in the last 5m)\n labels: {}\n isPaused: false\n" + "traffic_alerts.yaml": "apiVersion: 1\ngroups:\n - orgId: 1\n name: traffic\n folder: canton-network\n interval: 1m\n rules:\n - uid: 5dcddc9a5487\n title: Confirmation Requests Total\n condition: confirmation_requests_total_threshold\n data:\n - refId: confirmation_requests_total\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n editorMode: code\n expr: max by (migration_id) (sum by (namespace, migration_id) (rate(daml_sequencer_block_events_total{type=\"send-confirmation-request\"}[5m])))\n instant: true\n intervalMs: 1000\n legendFormat: __auto\n maxDataPoints: 43200\n range: false\n refId: confirmation_requests_total\n - refId: confirmation_requests_total_threshold\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 10\n type: gt\n operator:\n type: and\n query:\n params: []\n reducer:\n params: []\n type: avg\n type: query\n datasource:\n name: Expression\n type: __expr__\n uid: __expr__\n expression: confirmation_requests_total\n hide: false\n refId: confirmation_requests_total_threshold\n type: threshold\n dashboardUid: fdjrxql2alblsd\n panelId: 1\n noDataState: OK\n execErrState: Alerting\n for: 1m\n annotations:\n __dashboardUid__: fdjrxql2alblsd\n __panelId__: \"1\"\n description: The total confirmation requests send rate on migration ID {{ $labels.migration_id }} exceeded the threshold with a value of {{ 10 }} in the last 5m\n severity: critical\n summary: Total confirmation requests on migration ID {{ $labels.migration_id }} exceeded the threshold (10 in the last 5m)\n labels: {}\n isPaused: false\n - uid: 88b8827c8d09\n title: Confirmation Requests By Member\n condition: confirmation_requests_by_member_threshold\n data:\n - refId: confirmation_requests_by_member\n relativeTimeRange:\n from: 600\n to: 0\n datasourceUid: prometheus\n model:\n editorMode: code\n expr: max by (member, migration_id) (rate(daml_sequencer_block_events_total{type=\"send-confirmation-request\"}[5m]))\n instant: true\n intervalMs: 1000\n legendFormat: __auto\n maxDataPoints: 43200\n range: false\n refId: confirmation_requests_by_member\n - refId: confirmation_requests_by_member_threshold\n datasourceUid: __expr__\n model:\n conditions:\n - evaluator:\n params:\n - 10\n type: gt\n operator:\n type: and\n query:\n params: []\n reducer:\n params: []\n type: avg\n type: query\n datasource:\n name: Expression\n type: __expr__\n uid: __expr__\n expression: confirmation_requests_by_member\n hide: false\n refId: confirmation_requests_by_member_threshold\n type: threshold\n dashboardUid: fdjrxql2alblsd\n panelId: 1\n noDataState: OK\n execErrState: Alerting\n for: 1m\n annotations:\n __dashboardUid__: fdjrxql2alblsd\n __panelId__: \"1\"\n description: The confirmation requests send rate of member {{ $labels.member }} on migration ID {{ $labels.migration_id }} exceeded the threshold with a value of {{ 10 }} in the last 5m\n severity: critical\n summary: Confirmation requests by member {{ $labels.member }} on migration ID {{ $labels.migration_id }} exceeded the threshold (10 in the last 5m)\n labels: {}\n isPaused: false\n" }, "kind": "ConfigMap", "metadata": { @@ -963,12 +1089,13 @@ "data": { "acs-size.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 3573,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Trend #A\"\n },\n \"properties\": [\n {\n \"id\": \"displayName\",\n \"value\": \"ACS size\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 26,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 2,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true,\n \"sortBy\": [\n {\n \"desc\": true,\n \"displayName\": \"ACS size\"\n }\n ]\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, store_name, store_party) (splice_store_acs_size{namespace=~\\\"$namespace\\\",store_name=~\\\"$store_name\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"ACS Size\",\n \"transformations\": [\n {\n \"id\": \"timeSeriesTable\",\n \"options\": {\n \"A\": {\n \"timeField\": \"Time\"\n }\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"none\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 20,\n \"w\": 24,\n \"x\": 0,\n \"y\": 26\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum without (__name__,endpoint,instance,job,migration,otel_scope) (splice_store_acs_size{namespace=~\\\"$namespace\\\",store_name=~\\\"$store_name\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Splice Store ACS Size\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_store_acs_size,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_acs_size,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_store_acs_size,store_name)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"store_name\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_acs_size,store_name)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Splice Store ACS Size\",\n \"uid\": \"dduss3xr5or28c\",\n \"version\": 1\n}\n", "automations.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"description\": \"Track the trigger automations being run by Splice Apps\",\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 8872,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"runs / second\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byRegexp\",\n \"options\": \"/.*failure/\"\n },\n \"properties\": [\n {\n \"id\": \"custom.fillOpacity\",\n \"value\": 50\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 18,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(splice_trigger_completed_total{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", outcome=~\\\"success|failure\\\"}[$__rate_interval])\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": false,\n \"instant\": false,\n \"legendFormat\": \"{{node_name}} {{trigger_name}} {{outcome}} \",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Runs\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 10\n },\n \"id\": 3,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The rate of successful iterations completed\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"noValue\": \"0\",\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 4,\n \"x\": 0,\n \"y\": 11\n },\n \"id\": 5,\n \"options\": {\n \"colorMode\": \"none\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"center\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sum(histogram_sum(rate(splice_trigger_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", trigger_type=\\\"$trigger_type\\\"}[$__rate_interval])))\",\n \"hide\": false,\n \"instant\": true,\n \"range\": false,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Traffic\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The rate at which the trigger(s) are failing with retryable errors\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 4,\n \"x\": 4,\n \"y\": 11\n },\n \"id\": 7,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(splice_retries_failures{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", trigger_type=\\\"$trigger_type\\\", error_kind=\\\"transient\\\"}[$__rate_interval]))\",\n \"instant\": false,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Retries\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The rate at which trigger(s) are failing with fatal/non-retryable errors\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 4,\n \"x\": 8,\n \"y\": 11\n },\n \"id\": 8,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(splice_retries_failures{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", trigger_type=\\\"$trigger_type\\\", error_kind=\\\"fatal\\\"}[$__rate_interval]))\",\n \"instant\": false,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Errors\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The time it took to perform an iteration\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 12,\n \"x\": 12,\n \"y\": 11\n },\n \"id\": 6,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"histogram_quantile(0.95, rate(splice_trigger_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", trigger_type=\\\"$trigger_type\\\"}[$__rate_interval]))\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{node_type}} {{service}} {{trigger_name}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Latency\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"continuous-RdYlGr\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n },\n {\n \"color\": \"#EAB839\",\n \"value\": 90\n },\n {\n \"color\": \"#6ED0E0\",\n \"value\": 100\n },\n {\n \"color\": \"#EF843C\",\n \"value\": 110\n },\n {\n \"color\": \"#E24D42\",\n \"value\": 120\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 8,\n \"x\": 0,\n \"y\": 18\n },\n \"id\": 10,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"frameIndex\": 0,\n \"showHeader\": true,\n \"sortBy\": []\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sum by(trigger_name) (histogram_count(rate(splice_trigger_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_type=\\\"$trigger_type\\\"}[$__rate_interval])))\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Top 5 Active Triggers\",\n \"transformations\": [\n {\n \"id\": \"reduce\",\n \"options\": {\n \"includeTimeField\": false,\n \"labelsToFields\": true,\n \"mode\": \"seriesToRows\",\n \"reducers\": [\n \"sum\"\n ]\n }\n },\n {\n \"id\": \"sortBy\",\n \"options\": {\n \"fields\": {},\n \"sort\": [\n {\n \"desc\": true,\n \"field\": \"Total\"\n }\n ]\n }\n },\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Field\": true\n },\n \"indexByName\": {},\n \"renameByName\": {\n \"Total\": \"Traffic\",\n \"trigger_name\": \"Trigger\"\n }\n }\n },\n {\n \"id\": \"limit\",\n \"options\": {\n \"limitField\": 5\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"How busy triggers are within a certain time window\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n },\n \"unit\": \"percentunit\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 16,\n \"x\": 8,\n \"y\": 18\n },\n \"id\": 9,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sum by(trigger_name, instance, node_type, trigger_type) (histogram_sum(rate(splice_trigger_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", trigger_type=\\\"$trigger_type\\\"}[$__rate_interval])))\",\n \"instant\": false,\n \"legendFormat\": \"{{node_type}} {{instance}} {{trigger_name}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Saturation\",\n \"type\": \"timeseries\"\n }\n ],\n \"repeat\": \"trigger_type\",\n \"title\": \"Triggers $trigger_type\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 22,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(splice_trigger_attempted_total{isDsoDelegateTrigger=~\\\"$isDsoDelegateTrigger\\\", trigger_name=~\\\"$trigger_name\\\", namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"legendFormat\": \"total attempts\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(splice_trigger_attempted_total{statusCode!~\\\"OK\\\", isDsoDelegateTrigger=~\\\"$isDsoDelegateTrigger\\\", namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", contentionFailure=\\\"true\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"legendFormat\": \"total contentions\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Trigger attempts\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 20\n },\n \"id\": 20,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"last\"\n ],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by(errorCodeId, namespace, node_type, trigger_name) (rate(splice_trigger_attempted_total{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\", trigger_name=~\\\"$trigger_name\\\", isDsoDelegateTrigger=~\\\"$isDsoDelegateTrigger\\\", contentionFailure=\\\"true\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Failed trigger attempts with contention errors\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 28\n },\n \"id\": 21,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"last\"\n ],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by (errorCodeId, namespace, node_type, trigger_name) (rate(splice_trigger_attempted_total{namespace=~\\\"$namespace\\\",node_type=~\\\"$app\\\",trigger_name=~\\\"$trigger_name\\\",isDsoDelegateTrigger=~\\\"$isDsoDelegateTrigger\\\", statusCode!~\\\"OK\\\", contentionFailure=\\\"false\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Failed trigger attempts with non-contention errors\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 40,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_trigger_completed_total,namespace)\",\n \"includeAll\": true,\n \"label\": \"Namespace\",\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_trigger_completed_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_trigger_completed_total{namespace=~\\\"$namespace\\\"},node_type)\",\n \"includeAll\": true,\n \"label\": \"App\",\n \"name\": \"app\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_trigger_completed_total{namespace=~\\\"$namespace\\\"},node_type)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_trigger_completed_total{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\"},trigger_name)\",\n \"includeAll\": true,\n \"label\": \"Trigger Name\",\n \"name\": \"trigger_name\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_trigger_completed_total{namespace=~\\\"$namespace\\\", node_type=~\\\"$app\\\"},trigger_name)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": [\n \"taskbased\",\n \"polling\"\n ],\n \"value\": [\n \"taskbased\",\n \"polling\"\n ]\n },\n \"includeAll\": false,\n \"multi\": true,\n \"name\": \"trigger_type\",\n \"options\": [\n {\n \"selected\": true,\n \"text\": \"taskbased\",\n \"value\": \"taskbased\"\n },\n {\n \"selected\": true,\n \"text\": \"polling\",\n \"value\": \"polling\"\n }\n ],\n \"query\": \"taskbased, polling\",\n \"type\": \"custom\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"definition\": \"label_values(isDsoDelegateTrigger)\",\n \"includeAll\": true,\n \"label\": \"isDsoDelegateTrigger\",\n \"name\": \"isDsoDelegateTrigger\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(isDsoDelegateTrigger)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-3h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Automations\",\n \"uid\": \"a3e1385f-6f03-46d9-908c-34aca0f507a6\",\n \"version\": 5,\n \"weekStart\": \"\"\n}\n", - "catchup.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 3402,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 16,\n \"panels\": [],\n \"title\": \"Client Delay\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of a sequencer, compared to the collective\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"min by (namespace, job) (daml_sequencer_block_delay{namespace=~\\\"$namespace\\\", component=\\\"sequencer\\\",job=~\\\"global-domain-$migration-sequencer\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} migration $migration\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"max by (namespace, job) (delta(daml_sequencer_block_delay{namespace=~\\\"$namespace\\\", component=\\\"sequencer\\\",job=~\\\"global-domain-$migration-sequencer\\\"}[5m])) / 5\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"processing time {{namespace}} migration $migration / min\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of the participant, compared to the sequencers it is connected to.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"min by (namespace)(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\",component=\\\"participant\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Participant Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of a mediator, compared to its corresponding sequencer\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 12\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"min by (namespace, job)(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\", component=\\\"mediator\\\",job=~\\\"global-domain-$migration-mediator\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Mediator Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 20\n },\n \"id\": 10,\n \"panels\": [],\n \"title\": \"CometBFT\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 21\n },\n \"id\": 11,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by(namespace, job) (rate(daml_sequencer_block_events_total{namespace=~\\\"$namespace\\\",job=~\\\"global-domain-$migration-sequencer\\\"}[$__rate_interval]))\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer processing event Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 28\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer processing Block Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 6,\n \"w\": 24,\n \"x\": 0,\n \"y\": 35\n },\n \"id\": 6,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(cometbft_consensus_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Cometbft producing Block Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 41\n },\n \"id\": 8,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Cometbft height \",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 41\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{chain_id}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT Block Height\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 49\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_block_syncing{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT Block Syncing\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 49\n },\n \"id\": 15,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"max by(namespace, migration) (label_replace(cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\")) - max by(namespace, migration) (label_replace(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\"))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"{{namespace}}, migration={{migration}}\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT blocks to process\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 57\n },\n \"id\": 17,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"max by(namespace, migration) (label_replace(cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\")) - max by(namespace, migration) (label_replace(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\"))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}, migration={{migration}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT blocks to process\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"30s\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"Prometheus\",\n \"value\": \"prometheus\"\n },\n \"includeAll\": false,\n \"name\": \"DS\",\n \"options\": [],\n \"query\": \"prometheus\",\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"datasource\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_client_handler_delay,namespace)\",\n \"includeAll\": true,\n \"label\": \"Namespace\",\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"query\": \"label_values(daml_sequencer_client_handler_delay,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\"},job)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"migration\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"/global-domain-(?\\\\d)-sequencer/g\",\n \"sort\": 1,\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Global Domain Catchup\",\n \"uid\": \"ca9df344-c699-4efe-83c2-5fb2639d96d9\",\n \"version\": 4\n}\n", + "catchup.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 380,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 16,\n \"panels\": [],\n \"title\": \"Client Delay\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of a sequencer, compared to the collective\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"log\": 2,\n \"type\": \"log\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"min by (namespace, job) (daml_sequencer_block_delay{namespace=~\\\"$namespace\\\", component=\\\"sequencer\\\",job=~\\\"global-domain-$migration-sequencer\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} migration $migration\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Catchup speed of the sequencer in the last 5min\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"log\": 2,\n \"type\": \"log\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 18,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"max by (namespace, job) (delta(daml_sequencer_block_delay{namespace=~\\\"$namespace\\\", component=\\\"sequencer\\\",job=~\\\"global-domain-$migration-sequencer\\\"}[5m])) / 5\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"processing time {{namespace}} migration $migration / min\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Catchup Speed\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of the participant, compared to the sequencers it is connected to.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"log\": 2,\n \"type\": \"log\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 23\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace) (timestamp(daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\\\"$namespace\\\",component=\\\"participant\\\"}) - ((daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\\\"$namespace\\\",component=\\\"participant\\\"} > 0) / 1e6))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Participant Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"Delay on event processing of a mediator, compared to its corresponding sequencer\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"log\": 2,\n \"type\": \"log\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 23\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace, job) (timestamp(daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\\\"$namespace\\\",component=\\\"mediator\\\",job=~\\\"global-domain-$migration-mediator\\\"}) - ((daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\\\"$namespace\\\",component=\\\"mediator\\\",job=~\\\"global-domain-$migration-mediator\\\"} > 0) / 1e6)) \",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Mediator Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 31\n },\n \"id\": 10,\n \"panels\": [],\n \"title\": \"CometBFT\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 32\n },\n \"id\": 11,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by(namespace, job) (rate(daml_sequencer_block_events_total{namespace=~\\\"$namespace\\\",job=~\\\"global-domain-$migration-sequencer\\\"}[$__rate_interval]))\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer processing event Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 39\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer processing Block Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 6,\n \"w\": 24,\n \"x\": 0,\n \"y\": 46\n },\n \"id\": 6,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(cometbft_consensus_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Cometbft producing Block Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 52\n },\n \"id\": 8,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{job}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Cometbft height \",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 52\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{chain_id}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT Block Height\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 60\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_block_syncing{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT Block Syncing\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 60\n },\n \"id\": 15,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"max by(namespace, migration) (label_replace(cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\")) - max by(namespace, migration) (label_replace(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\"))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"{{namespace}}, migration={{migration}}\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT blocks to process\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": 0\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 68\n },\n \"id\": 17,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.1.1\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"max by(namespace, migration) (label_replace(cometbft_consensus_latest_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-cometbft-cometbft-rpc\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\")) - max by(namespace, migration) (label_replace(daml_sequencer_block_height{namespace=~\\\"$namespace\\\", job=~\\\"global-domain-$migration-sequencer\\\"}, \\\"migration\\\", \\\"$1\\\", \\\"job\\\", \\\"global-domain-(\\\\\\\\d)-.*\\\"))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}, migration={{migration}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT blocks to process\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"30s\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"Prometheus\",\n \"value\": \"prometheus\"\n },\n \"includeAll\": false,\n \"name\": \"DS\",\n \"options\": [],\n \"query\": \"prometheus\",\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"datasource\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_client_handler_delay,namespace)\",\n \"includeAll\": true,\n \"label\": \"Namespace\",\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"query\": \"label_values(daml_sequencer_client_handler_delay,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\"},job)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"migration\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"/global-domain-(?\\\\d)-sequencer/g\",\n \"sort\": 1,\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Global Domain Catchup\",\n \"uid\": \"ca9df344-c699-4efe-83c2-5fb2639d96d9\",\n \"version\": 4\n}\n", "cometbft-network-status.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"datasource\",\n \"uid\": \"grafana\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"target\": {\n \"limit\": 100,\n \"matchAny\": false,\n \"tags\": [],\n \"type\": \"dashboard\"\n },\n \"type\": \"dashboard\"\n }\n ]\n },\n \"description\": \"Health status of the CometBFT P2P Network as measured by incoming / outgoing \",\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 1283,\n \"links\": [],\n \"liveNow\": false,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"Total peer-wise rate of bytes sent or received over all channels over the P2P network\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"fieldMinMax\": false,\n \"mappings\": [],\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"percentage\",\n \"steps\": [\n {\n \"color\": \"red\",\n \"value\": null\n },\n {\n \"color\": \"#EAB839\",\n \"value\": 1\n },\n {\n \"color\": \"green\",\n \"value\": 10\n }\n ]\n },\n \"unit\": \"KBs\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 66,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"last\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"11.1.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (peer_id) (rate(cometbft_p2p_peer_receive_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]) / 1000) + sum by (peer_id) (rate(cometbft_p2p_peer_send_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]) / 1000)\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Total Network Throughput (current)\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"Peer-wise rate of bytes received over all channels over the P2P network\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"dashed\"\n }\n },\n \"fieldMinMax\": false,\n \"links\": [],\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 2000\n }\n ]\n },\n \"unit\": \"binBps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 24,\n \"x\": 0,\n \"y\": 13\n },\n \"id\": 59,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"asc\"\n }\n },\n \"pluginVersion\": \"10.1.5\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (peer_id) (rate(cometbft_p2p_peer_receive_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]))\",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{peer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Incoming Network Throughput\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"Peer-wise rate of bytes sent over all channels over the P2P network\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"dashed\"\n }\n },\n \"fieldMinMax\": false,\n \"links\": [],\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 2000\n }\n ]\n },\n \"unit\": \"binBps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 24,\n \"x\": 0,\n \"y\": 25\n },\n \"id\": 65,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"asc\"\n }\n },\n \"pluginVersion\": \"10.1.5\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (peer_id) (rate(cometbft_p2p_peer_send_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]))\",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{peer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Outgoing Network Throughput\",\n \"type\": \"timeseries\"\n }\n ],\n \"refresh\": \"\",\n \"schemaVersion\": 39,\n \"tags\": [\n \"Blockchain\"\n ],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"selected\": false,\n \"text\": \"default\",\n \"value\": \"default\"\n },\n \"hide\": 0,\n \"includeAll\": false,\n \"label\": \"Datasource\",\n \"multi\": false,\n \"name\": \"DS\",\n \"options\": [],\n \"query\": \"prometheus\",\n \"queryValue\": \"\",\n \"refresh\": 1,\n \"regex\": \"\",\n \"skipUrlSync\": false,\n \"type\": \"datasource\"\n },\n {\n \"current\": {\n \"selected\": false,\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_p2p_message_receive_bytes_total,namespace)\",\n \"hide\": 0,\n \"includeAll\": false,\n \"label\": \"namespace\",\n \"multi\": false,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(cometbft_p2p_message_receive_bytes_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"\",\n \"skipUrlSync\": false,\n \"sort\": 1,\n \"tagValuesQuery\": \"\",\n \"tagsQuery\": \"\",\n \"type\": \"query\",\n \"useTags\": false\n },\n {\n \"current\": {\n \"selected\": true,\n \"text\": \"cidaily-0-0.2.0-snapshot.20240725.6534.0.v112d3fc6\",\n \"value\": \"cidaily-0-0.2.0-snapshot.20240725.6534.0.v112d3fc6\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_p2p_message_receive_bytes_total,chain_id)\",\n \"hide\": 0,\n \"includeAll\": false,\n \"label\": \"Chain ID\",\n \"multi\": false,\n \"name\": \"chain_id\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(cometbft_p2p_message_receive_bytes_total,chain_id)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"skipUrlSync\": false,\n \"sort\": 0,\n \"tagValuesQuery\": \"\",\n \"tagsQuery\": \"\",\n \"type\": \"query\",\n \"useTags\": false\n },\n {\n \"allValue\": \"\",\n \"current\": {\n \"selected\": false,\n \"text\": \"global-domain-0-cometbft-cometbft-rpc\",\n \"value\": \"global-domain-0-cometbft-cometbft-rpc\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_p2p_message_receive_bytes_total{chain_id=\\\"$chain_id\\\"},job)\",\n \"hide\": 0,\n \"includeAll\": false,\n \"label\": \"Instance\",\n \"multi\": false,\n \"name\": \"instance\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(cometbft_p2p_message_receive_bytes_total{chain_id=\\\"$chain_id\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"skipUrlSync\": false,\n \"sort\": 5,\n \"tagValuesQuery\": \"\",\n \"tagsQuery\": \"\",\n \"type\": \"query\",\n \"useTags\": false\n }\n ]\n },\n \"time\": {\n \"from\": \"now-12h\",\n \"to\": \"now\"\n },\n \"timepicker\": {\n \"refresh_intervals\": [\n \"5s\",\n \"10s\",\n \"30s\",\n \"1m\",\n \"5m\",\n \"15m\",\n \"30m\",\n \"1h\",\n \"2h\",\n \"1d\"\n ],\n \"time_options\": [\n \"5m\",\n \"15m\",\n \"1h\",\n \"6h\",\n \"12h\",\n \"24h\",\n \"2d\",\n \"7d\",\n \"30d\"\n ]\n },\n \"timezone\": \"\",\n \"title\": \"CometBFT Network Status\",\n \"uid\": \"ddsuu1wnxwetcd\",\n \"version\": 7,\n \"weekStart\": \"\"\n}\n", "cometbft.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"datasource\",\n \"uid\": \"grafana\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"description\": \"Internet of blockchains\",\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 2281,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 64,\n \"panels\": [],\n \"title\": \"$chain_id overview\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"locale\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 4,\n \"maxDataPoints\": 100,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"none\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"horizontal\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_height{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"hide\": false,\n \"instant\": true,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Block Height\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"locale\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 6,\n \"y\": 1\n },\n \"id\": 40,\n \"maxDataPoints\": 100,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"none\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"horizontal\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_total_txs{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Total Transactions\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [\n {\n \"options\": {\n \"match\": \"null\",\n \"result\": {\n \"text\": \"N/A\"\n }\n },\n \"type\": \"special\"\n }\n ],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 12,\n \"y\": 1\n },\n \"id\": 65,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"rate(cometbft_state_block_processing_time_sum{chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]) / rate(cometbft_state_block_processing_time_count{chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Avg State Block Processing\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 18,\n \"y\": 1\n },\n \"id\": 47,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"horizontal\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_validators_power{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Bonded Tokens\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"locale\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 12,\n \"x\": 0,\n \"y\": 5\n },\n \"id\": 66,\n \"maxDataPoints\": 100,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_height{chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"hide\": false,\n \"instant\": false,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Block Height\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [\n {\n \"options\": {\n \"match\": \"null\",\n \"result\": {\n \"text\": \"N/A\"\n }\n },\n \"type\": \"special\"\n }\n ],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 2\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 12,\n \"x\": 12,\n \"y\": 5\n },\n \"id\": 39,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"histogram_quantile(0.999, sum by(le) (rate(cometbft_consensus_block_interval_seconds_bucket{namespace=\\\"$namespace\\\",chain_id=\\\"$chain_id\\\",job=\\\"$instance\\\"}[$__rate_interval])))\",\n \"format\": \"time_series\",\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Avg Block Time\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 30,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"links\": [],\n \"mappings\": [],\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"locale\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Height for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#447ebc\",\n \"mode\": \"fixed\"\n }\n }\n ]\n },\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Total Transactions for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#ef843c\",\n \"mode\": \"fixed\"\n }\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 12,\n \"x\": 0,\n \"y\": 9\n },\n \"id\": 15,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\",\n \"max\",\n \"min\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"bottom\",\n \"showLegend\": true,\n \"width\": 350\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_validators{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Active\",\n \"refId\": \"A\"\n },\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_missing_validators{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Missing\",\n \"range\": true,\n \"refId\": \"B\"\n },\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_byzantine_validators{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Byzantine\",\n \"range\": true,\n \"refId\": \"C\"\n }\n ],\n \"title\": \"Validators\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 30,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"links\": [],\n \"mappings\": [],\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Height for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#447ebc\",\n \"mode\": \"fixed\"\n }\n }\n ]\n },\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Total Transactions for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#ef843c\",\n \"mode\": \"fixed\"\n }\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 12,\n \"x\": 12,\n \"y\": 9\n },\n \"id\": 48,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\",\n \"max\",\n \"min\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"bottom\",\n \"showLegend\": true,\n \"width\": 350\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_validators_power{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Online\",\n \"refId\": \"A\"\n },\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_missing_validators_power{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Missing\",\n \"range\": true,\n \"refId\": \"B\"\n },\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_byzantine_validators_power{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Byzantine\",\n \"range\": true,\n \"refId\": \"C\"\n }\n ],\n \"title\": \"Voting Power\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"continuous-RdYlGr\"\n },\n \"custom\": {\n \"fillOpacity\": 70,\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineWidth\": 0,\n \"spanNulls\": false\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"max\": 1,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"#e24d42\",\n \"value\": null\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 12,\n \"x\": 0,\n \"y\": 18\n },\n \"id\": 67,\n \"options\": {\n \"alignValue\": \"left\",\n \"legend\": {\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"mergeValues\": true,\n \"rowHeight\": 0.9,\n \"showValue\": \"auto\",\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"cometbft_consensus_round_voting_power_percent{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", service=\\\"$instance\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{vote_type}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Consensus round voting power\",\n \"type\": \"state-timeline\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 30,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"links\": [],\n \"mappings\": [],\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Height for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#447ebc\",\n \"mode\": \"fixed\"\n }\n }\n ]\n },\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Total Transactions for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#ef843c\",\n \"mode\": \"fixed\"\n }\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 12,\n \"x\": 12,\n \"y\": 18\n },\n \"id\": 49,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"mean\",\n \"max\",\n \"sum\"\n ],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_num_txs{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Transactions\",\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Transactions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 30,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"links\": [],\n \"mappings\": [],\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"bytes\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Height for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#447ebc\",\n \"mode\": \"fixed\"\n }\n }\n ]\n },\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Total Transactions for last 3 hours\"\n },\n \"properties\": [\n {\n \"id\": \"color\",\n \"value\": {\n \"fixedColor\": \"#ef843c\",\n \"mode\": \"fixed\"\n }\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 5,\n \"w\": 12,\n \"x\": 0,\n \"y\": 25\n },\n \"id\": 50,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"mean\",\n \"max\"\n ],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_consensus_block_size_bytes{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"Block Size\",\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Block Size\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 2,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"cps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 5,\n \"w\": 12,\n \"x\": 12,\n \"y\": 25\n },\n \"id\": 68,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"rate(cometbft_consensus_height{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Block Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 30\n },\n \"id\": 55,\n \"panels\": [],\n \"repeat\": \"instance\",\n \"title\": \"instance overview: $instance\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"max\": 20,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"#e24d42\",\n \"value\": null\n },\n {\n \"color\": \"#ef843c\",\n \"value\": 2\n },\n {\n \"color\": \"#7eb26d\",\n \"value\": 5\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 0,\n \"y\": 31\n },\n \"id\": 53,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"horizontal\",\n \"reduceOptions\": {\n \"calcs\": [\n \"last\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_p2p_peers{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Connected Peers\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"max\": 50,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"#7eb26d\",\n \"value\": null\n },\n {\n \"color\": \"#ef843c\",\n \"value\": 10\n },\n {\n \"color\": \"#e24d42\",\n \"value\": 20\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 6,\n \"y\": 31\n },\n \"id\": 56,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"horizontal\",\n \"reduceOptions\": {\n \"calcs\": [\n \"last\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_mempool_size{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Unconfirmed Transactions\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 12,\n \"y\": 31\n },\n \"id\": 60,\n \"maxDataPoints\": 100,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"none\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"horizontal\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_mempool_failed_txs{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Failed Transactions\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"locale\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 4,\n \"w\": 6,\n \"x\": 18,\n \"y\": 31\n },\n \"id\": 61,\n \"maxDataPoints\": 100,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"none\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"horizontal\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_mempool_recheck_times{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"30s\",\n \"intervalFactor\": 1,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Recheck Times\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"#7eb26d\",\n \"value\": null\n },\n {\n \"color\": \"#ef843c\",\n \"value\": 10\n },\n {\n \"color\": \"#e24d42\",\n \"value\": 20\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 5,\n \"w\": 12,\n \"x\": 0,\n \"y\": 35\n },\n \"id\": 70,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_mempool_size{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Unconfirmed Transactions (mempool)\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"decimals\": 0,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"short\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 5,\n \"w\": 12,\n \"x\": 12,\n \"y\": 35\n },\n \"id\": 69,\n \"maxDataPoints\": 100,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"cometbft_mempool_failed_txs{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=~\\\"$instance\\\"}\",\n \"format\": \"time_series\",\n \"instant\": true,\n \"interval\": \"\",\n \"intervalFactor\": 1,\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Failed Transactions (mempool)\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"bars\",\n \"fillOpacity\": 100,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"links\": [],\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"binBps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 12,\n \"x\": 0,\n \"y\": 40\n },\n \"id\": 59,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"rate(cometbft_p2p_peer_receive_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval])\",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{peer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Total Network Input\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"bars\",\n \"fillOpacity\": 100,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"never\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"links\": [],\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"binBps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 12,\n \"x\": 12,\n \"y\": 40\n },\n \"id\": 58,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"uid\": \"$DS\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"rate(cometbft_p2p_peer_send_bytes_total{namespace=\\\"$namespace\\\", chain_id=\\\"$chain_id\\\", job=\\\"$instance\\\"}[$__rate_interval]) \",\n \"format\": \"time_series\",\n \"intervalFactor\": 1,\n \"legendFormat\": \"{{peer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Total Network Output\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 40,\n \"tags\": [\n \"Blockchain\"\n ],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"default\",\n \"value\": \"default\"\n },\n \"includeAll\": false,\n \"label\": \"Datasource\",\n \"name\": \"DS\",\n \"options\": [],\n \"query\": \"prometheus\",\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"datasource\"\n },\n {\n \"current\": {\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_consensus_height,namespace)\",\n \"includeAll\": false,\n \"label\": \"namespace\",\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"query\": \"label_values(cometbft_consensus_height,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"\",\n \"sort\": 1,\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"cilr-4-0\",\n \"value\": \"cilr-4-0\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_consensus_height, chain_id)\",\n \"includeAll\": false,\n \"label\": \"Chain ID\",\n \"name\": \"chain_id\",\n \"options\": [],\n \"query\": \"label_values(cometbft_consensus_height, chain_id)\",\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"global-domain-4-cometbft-cometbft-rpc\",\n \"value\": \"global-domain-4-cometbft-cometbft-rpc\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"$DS\"\n },\n \"definition\": \"label_values(cometbft_consensus_height{chain_id=\\\"$chain_id\\\"},job)\",\n \"includeAll\": false,\n \"label\": \"Instance\",\n \"name\": \"instance\",\n \"options\": [],\n \"query\": {\n \"query\": \"label_values(cometbft_consensus_height{chain_id=\\\"$chain_id\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"sort\": 5,\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-6h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"CometBFT\",\n \"uid\": \"UJyurCTWz\",\n \"version\": 5,\n \"weekStart\": \"\"\n}\n", "global-sync-utilization.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 5072,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Successful transactions/s as measured by the mediator\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 12,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id)(rate(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[$__rate_interval]))\",\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id)(rate(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[1h]))\",\n \"hide\": false,\n \"legendFormat\": \"hourly\",\n \"range\": true,\n \"refId\": \"B\"\n }\n ],\n \"title\": \"Successful transactions/s\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"total transactions/s including rejected transactions as measured by the mediator\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 12,\n \"x\": 12,\n \"y\": 0\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id) (rate(daml_mediator_requests_total{namespace=~\\\"$namespace\\\"}[$__rate_interval]))\",\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id) (rate(daml_mediator_requests_total{namespace=~\\\"$namespace\\\"}[1h]))\",\n \"hide\": false,\n \"legendFormat\": \"hourly\",\n \"range\": true,\n \"refId\": \"B\"\n }\n ],\n \"title\": \"Total transactions/s\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 6,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"max\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id) (increase(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[1m]))\",\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Approved transactions/min + maximum over the time range\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 12\n },\n \"id\": 7,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"max\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, migration_id) (increase(daml_mediator_requests_total{namespace=~\\\"$namespace\\\"}[1m]))\",\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Total transactions/min + maximum over time range\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The absolute number of transactions on the global synchronizer that are not visible to the DSO, in a sliding window of 30 minutes.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"none\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 20\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"(sum(increase(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace) - sum(increase(splice_history_updates_transactions_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace))\",\n \"hide\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"B\"\n }\n ],\n \"title\": \"Transactions not seen by DSO (absolute)\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The percentage of transactions on the global synchronizer that are not visible to the DSO, in a sliding window of 30 minutes.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"percentunit\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 20\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"(sum(increase(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace) - sum(increase(splice_history_updates_transactions_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace)) / (sum(increase(daml_mediator_approved_requests_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace) > sum(increase(splice_history_updates_transactions_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace) or sum(increase(splice_history_updates_transactions_total{namespace=~\\\"$namespace\\\"}[30m])) by (namespace))\",\n \"hide\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Transaction not seen by DSO (percentage)\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"definition\": \"label_values(daml_mediator_approved_requests_total,namespace)\",\n \"description\": \"\",\n \"includeAll\": true,\n \"label\": \"Namespace\",\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_mediator_approved_requests_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-3h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"Global Synchronizer Utilization\",\n \"uid\": \"fe8wt04z620aof\",\n \"version\": 2\n}\n", "history-backfilling.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 3085,\n \"links\": [],\n \"panels\": [\n {\n \"fieldConfig\": {\n \"defaults\": {},\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 21,\n \"options\": {\n \"code\": {\n \"language\": \"plaintext\",\n \"showLineNumbers\": false,\n \"showMiniMap\": false\n },\n \"content\": \"There are three backfilling background tasks:\\n\\n1. `ScanHistoryBackfillingTrigger` backfills regular updates\\n - Data is loaded from peer scan apps using BFT network calls\\n1. `ScanHistoryBackfillingTrigger` backfills import updates\\n - Data is loaded from peer scan apps using BFT network calls\\n1. `TxLogBackfillingTrigger` backfills txlog entries\\n - Data is loaded from the local UpdateHistory\\n\\nThe tasks are completed sequentially in the above order.\\nE.g., txlog backfilling won't start until update history backfilling is complete.\",\n \"mode\": \"markdown\"\n },\n \"pluginVersion\": \"11.4.0\",\n \"title\": \"Overview\",\n \"type\": \"text\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 7\n },\n \"id\": 7,\n \"panels\": [],\n \"title\": \"Updates (scan)\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 8\n },\n \"id\": 17,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_backfilling_transaction_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Backfilled transactions count\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 8\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace)(rate(splice_history_backfilling_transaction_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"}[5m]))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate backfilled transactions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 16\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_backfilling_event_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Backfilled events count\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 16\n },\n \"id\": 6,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(rate(splice_history_backfilling_event_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"}[5m]))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate backfilled events\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"fillOpacity\": 70,\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineWidth\": 0,\n \"spanNulls\": false\n },\n \"mappings\": [],\n \"max\": 1,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"red\",\n \"value\": null\n },\n {\n \"color\": \"green\",\n \"value\": 1\n }\n ]\n },\n \"unit\": \"bool_yes_no\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 24\n },\n \"id\": 15,\n \"options\": {\n \"alignValue\": \"left\",\n \"legend\": {\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"mergeValues\": true,\n \"rowHeight\": 0.9,\n \"showValue\": \"auto\",\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_backfilling_completed{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"})\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Backfilling completed\",\n \"type\": \"state-timeline\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Value\"\n },\n \"properties\": [\n {\n \"id\": \"unit\",\n \"value\": \"dateTimeAsIso\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 24\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"min by(namespace)((splice_history_backfilling_latest_record_time{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"}>-62135596800000)/1000)\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"Value\"\n }\n ],\n \"title\": \"Last record time backfilled\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 32\n },\n \"id\": 16,\n \"panels\": [],\n \"title\": \"Import Updates (scan)\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 33\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_import_updates_backfilling_contract_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Backfilled import contracts count\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 33\n },\n \"id\": 20,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace)(rate(splice_history_import_updates_backfilling_contract_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"}[5m]))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate backfilled contracts\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"fillOpacity\": 70,\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineWidth\": 0,\n \"spanNulls\": false\n },\n \"mappings\": [],\n \"max\": 1,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"red\",\n \"value\": null\n },\n {\n \"color\": \"green\",\n \"value\": 1\n }\n ]\n },\n \"unit\": \"bool_yes_no\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 41\n },\n \"id\": 19,\n \"options\": {\n \"alignValue\": \"left\",\n \"legend\": {\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"mergeValues\": true,\n \"rowHeight\": 0.9,\n \"showValue\": \"auto\",\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_import_updates_backfilling_completed{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"})\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Import update backfilling completed\",\n \"type\": \"state-timeline\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Value\"\n },\n \"properties\": [\n {\n \"id\": \"unit\",\n \"value\": \"\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 41\n },\n \"id\": 18,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"min by(namespace)((splice_history_import_updates_backfilling_latest_migration_id{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\"}>-1))\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"Value\"\n }\n ],\n \"title\": \"Last migration id backfilled\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 49\n },\n \"id\": 8,\n \"panels\": [],\n \"title\": \"TxLog entries (all apps)\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 50\n },\n \"id\": 9,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_txlog_backfilling_transaction_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{service}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Processed transactions count\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 50\n },\n \"id\": 10,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace)(rate(splice_history_txlog_backfilling_transaction_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"}[5m]))\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{service}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate processed transactions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 58\n },\n \"id\": 11,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(splice_history_txlog_backfilling_event_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"})\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{service}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Processed events count\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 58\n },\n \"id\": 12,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace)(rate(splice_history_txlog_backfilling_event_count{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"}[5m]))\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}}{{service}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate processed events\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"fillOpacity\": 70,\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineWidth\": 0,\n \"spanNulls\": false\n },\n \"mappings\": [],\n \"max\": 1,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"red\",\n \"value\": null\n },\n {\n \"color\": \"green\",\n \"value\": 1\n }\n ]\n },\n \"unit\": \"bool_yes_no\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 12,\n \"x\": 0,\n \"y\": 66\n },\n \"id\": 13,\n \"options\": {\n \"alignValue\": \"left\",\n \"legend\": {\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"mergeValues\": true,\n \"rowHeight\": 0.9,\n \"showValue\": \"auto\",\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by(namespace, service) (splice_history_txlog_backfilling_completed{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"})\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"legendFormat\": \"{{namespace}} {{service}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"TxLog backfilling completed\",\n \"type\": \"state-timeline\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byName\",\n \"options\": \"Value\"\n },\n \"properties\": [\n {\n \"id\": \"unit\",\n \"value\": \"dateTimeAsIso\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 12,\n \"x\": 12,\n \"y\": 66\n },\n \"id\": 14,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"min by(namespace)((splice_history_txlog_backfilling_latest_record_time{namespace=~\\\"$namespace\\\", migration=~\\\"$migration\\\", service=~\\\"$service\\\"}>-62135596800000)/1000)\",\n \"format\": \"time_series\",\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{service}}\",\n \"range\": true,\n \"refId\": \"Value\"\n }\n ],\n \"title\": \"Last record time backfilled\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 40,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_history_backfilling_latest_record_time,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_history_backfilling_latest_record_time,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_history_backfilling_latest_record_time,migration)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"migration\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_history_backfilling_latest_record_time,migration)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"definition\": \"label_values(splice_history_txlog_backfilling_completed,service)\",\n \"includeAll\": true,\n \"name\": \"service\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_history_txlog_backfilling_completed,service)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-6h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"History backfiling\",\n \"uid\": \"fe12ejqgwdlvke\",\n \"version\": 4,\n \"weekStart\": \"\"\n}\n", "mining-rounds.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 9003,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace) (splice_sv_dso_store_latest_open_mining_round)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{label_name}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Last Open Mining Round\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 24,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace) (splice_sv_dso_store_latest_issuing_mining_round)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{label_name}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Last Issuing Mining Round\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": []\n },\n \"time\": {\n \"from\": \"now-6h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Mining Rounds\",\n \"uid\": \"ed94a332-4fa7-47f8-982b-fc997381175b\",\n \"version\": 1\n}\n", + "onboarded_parties.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 265,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 4,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 2,\n \"options\": {\n \"colorMode\": \"value\",\n \"graphMode\": \"area\",\n \"justifyMode\": \"auto\",\n \"orientation\": \"auto\",\n \"percentChangeColorMode\": \"standard\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showPercentChange\": false,\n \"textMode\": \"auto\",\n \"wideLayout\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"splice_synchronizer_topology_num_parties{namespace=\\\"$namespace\\\"}\",\n \"instant\": true,\n \"legendFormat\": \"total parties\",\n \"range\": false,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Parties onboarded to the global synchronizer\",\n \"type\": \"stat\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 10,\n \"x\": 4,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"splice_synchronizer_topology_num_parties{namespace=\\\"$namespace\\\"}\",\n \"legendFormat\": \"total parties\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Parties onboarded to the global synchronizer\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 10,\n \"x\": 14,\n \"y\": 0\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"splice_synchronizer_topology_num_parties_per_participant{namespace=\\\"$namespace\\\"}\",\n \"legendFormat\": \"{{participant_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Parties onboarded to the global synchronizer per participant\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"filterable\": true,\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"Parties\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 14,\n \"w\": 9,\n \"x\": 0,\n \"y\": 13\n },\n \"id\": 4,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true,\n \"sortBy\": [\n {\n \"desc\": true,\n \"displayName\": \"Parties\"\n }\n ]\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"splice_synchronizer_topology_num_parties_per_participant{namespace=\\\"$namespace\\\"}\",\n \"format\": \"table\",\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Top hosting participants\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true,\n \"__name__\": true,\n \"endpoint\": true,\n \"instance\": true,\n \"job\": true,\n \"migration\": true,\n \"namespace\": true,\n \"node_name\": true,\n \"node_type\": true,\n \"otel_scope_name\": true,\n \"pod\": true,\n \"service\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Value\": \"Parties\"\n }\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"filterable\": true,\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"Parties\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 14,\n \"w\": 10,\n \"x\": 9,\n \"y\": 13\n },\n \"id\": 5,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true,\n \"sortBy\": [\n {\n \"desc\": true,\n \"displayName\": \"Parties\"\n }\n ]\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sum by(participant_id) (delta(splice_synchronizer_topology_num_parties_per_participant{namespace=\\\"$namespace\\\"}[24h])) > 0\",\n \"format\": \"table\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Onboarded parties last 24h\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true,\n \"__name__\": true,\n \"endpoint\": true,\n \"instance\": true,\n \"job\": true,\n \"migration\": true,\n \"namespace\": true,\n \"node_name\": true,\n \"node_type\": true,\n \"otel_scope_name\": true,\n \"pod\": true,\n \"service\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Value\": \"Parties\"\n }\n }\n }\n ],\n \"type\": \"table\"\n }\n ],\n \"preload\": false,\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allowCustomValue\": false,\n \"current\": {\n \"text\": \"sv-5\",\n \"value\": \"sv-5\"\n },\n \"definition\": \"label_values(splice_synchronizer_topology_num_parties,namespace)\",\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_synchronizer_topology_num_parties,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-6h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"Onboarded Parties\",\n \"uid\": \"fc6185ce-0c37-48e7-960b-839a037d47bf\",\n \"version\": 2\n}\n", "sequencer-pruning.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 2419,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": true,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"histogram_avg(rate(splice_sequencer_pruning_latency_duration_seconds{namespace=~\\\"$namespace\\\"}[$__rate_interval]))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Sequencer Pruning Latency\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 12,\n \"x\": 12,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"splice_sequencer_pruning_disabled_members{namespace=~\\\"$namespace\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Disabled Members\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 40,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_sequencer_pruning_disabled_members,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_sequencer_pruning_disabled_members,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-2d\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Sequencer Pruning\",\n \"uid\": \"bc8fea13-0bf5-488b-ac31-76e4ec7f4c1f\",\n \"version\": 1,\n \"weekStart\": \"\"\n}\n", "store-record-time.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 1,\n \"id\": 5076,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 14,\n \"w\": 12,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"histogram_quantile(0.95, sum by(le, namespace,job,store_name,store_party,synchronizer_id) (rate(splice_store_signal_when_ingested_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_name=~\\\"$node_name\\\", store_name=~\\\"$store_name\\\", store_party=~\\\"$store_party\\\"}[$__rate_interval])))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": false,\n \"legendFormat\": \"95% - {{namespace}} {{job}} {{store_name}} {{store_party}} {{synchronizer_id}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"histogram_avg(rate(splice_store_signal_when_ingested_latency_duration_seconds{namespace=~\\\"$namespace\\\", node_name=~\\\"$node_name\\\", store_name=~\\\"$store_name\\\", store_party=~\\\"$store_party\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": false,\n \"legendFormat\": \"avg - {{namespace}} {{job}} {{store_name}} {{store_party}} {{synchronizer_id}}\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Ingestion timing\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 14,\n \"w\": 12,\n \"x\": 12,\n \"y\": 0\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"histogram_count(rate(splice_store_signal_when_ingested_latency_duration_seconds{namespace=~\\\"$namespace\\\",node_name=~\\\"$node_name\\\",store_name=~\\\"$store_name\\\",store_party=~\\\"$store_party\\\"}[$__rate_interval]))\",\n \"legendFormat\": \"{{namespace}} {{job}} {{store_name}} {{store_party}} {{synchronizer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Ingestion rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The time difference between the last ingested record time and the current wallclock time.\\n\\nNote that the last ingested record time metric only updates when the store ingests a new transaction so if there is no activity, the last ingested record time will not advance. For a party performing reward collection, e.g., the validator operator party you expect at least one transaction every round so the lag should not go above 20min.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 12,\n \"x\": 0,\n \"y\": 14\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \" time() * 1000 - splice_store_last_ingested_record_time_ms{namespace=~\\\"$namespace\\\",node_name=~\\\"$node_name\\\",store_name=~\\\"$store_name\\\",store_party=~\\\"$store_party\\\"}\",\n \"hide\": false,\n \"legendFormat\": \"{{namespace}} {{job}} {{store_name}} {{store_party}} {{synchronizer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Last Ingested Record Time Lag\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The last ingested record time for each store.\\n\\nNote that the last ingested record time metric only updates when the store ingests a new transaction so if no transaction is ingested the last ingested time will not advance. For a party performing reward collection, e.g., the validator operator party you expect at least one transaction every round so the last ingested time should be at most 20min ago.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"dateTimeAsIso\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 12,\n \"x\": 12,\n \"y\": 14\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"asc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"splice_store_last_ingested_record_time_ms{namespace=~\\\"$namespace\\\",node_name=~\\\"$node_name\\\",store_name=~\\\"$store_name\\\",store_party=~\\\"$store_party\\\"}\",\n \"legendFormat\": \"{{namespace}} {{job}} {{store_name}} {{store_party}} {{synchronizer_id}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Last Ingested Record Time\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"1m\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": [\n \"sv-1\"\n ],\n \"value\": [\n \"sv-1\"\n ]\n },\n \"definition\": \"label_values(splice_store_last_ingested_record_time_ms,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_last_ingested_record_time_ms,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"definition\": \"label_values(splice_store_last_ingested_record_time_ms,node_name)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"node_name\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_last_ingested_record_time_ms,node_name)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"definition\": \"label_values(splice_store_last_ingested_record_time_ms,store_name)\",\n \"description\": \"The name of the store\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"store_name\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_last_ingested_record_time_ms,store_name)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"definition\": \"label_values(splice_store_last_ingested_record_time_ms,store_party)\",\n \"description\": \"The party that the store is ingesting data for\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"store_party\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_store_last_ingested_record_time_ms,store_party)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"Splice Store Last Ingestion\",\n \"uid\": \"dec17fpqzdwcge\",\n \"version\": 4\n}\n", "sv-status-reports.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"description\": \"Show the DSO health based on the SV status reports.\",\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 1,\n \"id\": 3407,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 10,\n \"panels\": [],\n \"title\": \"Report Overview\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Difference between the current time, and the creation time of the last status report (as ingested by any of the selected namespaces)\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"neutral\": 0\n },\n \"mappings\": [],\n \"max\": 260,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"yellow\",\n \"value\": 180\n },\n {\n \"color\": \"red\",\n \"value\": 260\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 19,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\",\n \"text\": {}\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"time() - max by(report_publisher) (splice_sv_status_report_creation_time_us) / 1000000\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Status Report Creation Time Lag (current)\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Difference between the current time and the maximum report creation time, as ingested by any of the selected namespaces\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"linearThreshold\": 260,\n \"log\": 2,\n \"type\": \"symlog\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"dashed\"\n }\n },\n \"fieldMinMax\": false,\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"orange\",\n \"value\": 180\n },\n {\n \"color\": \"red\",\n \"value\": 260\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 9\n },\n \"id\": 18,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"time() - max by (report_publisher) (splice_sv_status_report_creation_time_us{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}) / 1000000\",\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Report Creation Time Lag\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"neutral\": 0\n },\n \"mappings\": [],\n \"max\": 1.2,\n \"min\": 0,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"red\"\n },\n {\n \"color\": \"green\",\n \"value\": 0.4\n },\n {\n \"color\": \"purple\",\n \"value\": 3\n }\n ]\n },\n \"unit\": \"recpm\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 18\n },\n \"id\": 23,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\",\n \"text\": {}\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(max by(report_publisher) (splice_sv_status_report_number{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"})[5m:30s]) * 60\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Reporting Frequency (current)\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"First, takes the maximum report number for each SV node (as seen by ANY of the selected namespaces).\\n\\nThen, takes the 5m rate of change in that metric.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"axisSoftMax\": 4,\n \"axisSoftMin\": 0,\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"area\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"red\"\n },\n {\n \"color\": \"green\",\n \"value\": 1.5\n },\n {\n \"color\": \"purple\",\n \"value\": 3\n }\n ]\n },\n \"unit\": \"recpm\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 26\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(max by(report_publisher) (splice_sv_status_report_number{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"})[5m:30s]) * 60\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Reporting Frequency\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 37\n },\n \"id\": 22,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"dateTimeAsIso\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 38\n },\n \"id\": 24,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace, report_publisher) (splice_sv_status_report_creation_time_us{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}) / 1000\",\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}} @ {{namespace}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Report Creation Time\",\n \"type\": \"timeseries\"\n }\n ],\n \"title\": \"Additional Info: Report time\",\n \"type\": \"row\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 38\n },\n \"id\": 12,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"none\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 81\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace, report_publisher) (splice_sv_status_report_cometbft_height{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}} @ {{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"CometBFT Height\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Maximal minus minimal CometBFT height reported by the SV status reports seen from one SV's perspective\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"axisSoftMax\": 600,\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"area\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"orange\",\n \"value\": 500\n },\n {\n \"color\": \"red\",\n \"value\": 1000\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 89\n },\n \"id\": 15,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace) (max by (report_publisher, namespace) (splice_sv_status_report_cometbft_height{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}) ) - min by (namespace) (max by (report_publisher, namespace) (splice_sv_status_report_cometbft_height{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"CometBFT Height Lag\",\n \"type\": \"timeseries\"\n }\n ],\n \"title\": \"Additional Info: CometBFT\",\n \"type\": \"row\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 39\n },\n \"id\": 21,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"dateTimeAsIso\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 40\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"splice_sv_status_report_domain_time_us{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"} / 1000\",\n \"instant\": false,\n \"legendFormat\": \"{{target_node}} - {{ report_publisher }} {{ report_publisher_party }}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Domain Time\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"How many seconds of progress on the reported participant domain time is observed every second.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"axisSoftMax\": 1.6,\n \"axisSoftMin\": 0.4,\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"area\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"dark-red\"\n },\n {\n \"color\": \"orange\",\n \"value\": 0.5\n },\n {\n \"color\": \"green\",\n \"value\": 0.8\n },\n {\n \"color\": \"purple\",\n \"value\": 1.5\n }\n ]\n },\n \"unit\": \"none\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 65\n },\n \"id\": 9,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"asc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"max by (namespace, report_publisher, target_node) (rate(splice_sv_status_report_domain_time_us{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}[10m])) / 1000000\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{target_node}} - {{report_publisher}} @ {{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Domain Time Progress\",\n \"type\": \"timeseries\"\n }\n ],\n \"title\": \"Additional Info: Domain Time\",\n \"type\": \"row\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 40\n },\n \"id\": 20,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"none\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 41\n },\n \"id\": 13,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace, report_publisher) (splice_sv_status_report_latest_open_round{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"})\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{report_publisher}} @ {{namespace}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Latest Open Mining Round\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Should not go above 1 when all SVs are healthy, serves as a basic safeguard to see that the round structure makes progress from all SVs' point of view.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"area\"\n }\n },\n \"mappings\": [],\n \"max\": 5,\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"orange\",\n \"value\": 4\n },\n {\n \"color\": \"red\",\n \"value\": 10\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 50\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"11.4.0\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"max by (namespace) (max by (report_publisher, namespace) (splice_sv_status_report_latest_open_round{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}) ) - min by (namespace) (max by (report_publisher, namespace) (splice_sv_status_report_latest_open_round{namespace=~\\\"$namespace\\\", report_publisher=~\\\"$sv_party\\\", canton_version=~\\\"$version\\\"}))\",\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Latest Open Mining Round Lag\",\n \"type\": \"timeseries\"\n }\n ],\n \"title\": \"Additional Info: Mining rounds\",\n \"type\": \"row\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"30s\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_sv_status_report_creation_time_us,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_sv_status_report_creation_time_us,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_sv_status_report_creation_time_us,report_publisher)\",\n \"description\": \"The SV whose node published the status report.\",\n \"includeAll\": true,\n \"label\": \"report publisher\",\n \"multi\": true,\n \"name\": \"sv_party\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_sv_status_report_creation_time_us,report_publisher)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": \"$__all\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(splice_sv_status_report_creation_time_us,canton_version)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"version\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(splice_sv_status_report_creation_time_us,canton_version)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-30m\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"SV Status Reports\",\n \"uid\": \"caffa6f7-c421-4579-a839-b026d3b76826\",\n \"version\": 1\n}\n", @@ -1003,7 +1130,8 @@ "participant.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 4651,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 6,\n \"panels\": [],\n \"title\": \"Sequencer Connection\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Delay on event processing of the participant, compared to the sequencers it is connected to.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byFrameRefID\",\n \"options\": \"A\"\n },\n \"properties\": [\n {\n \"id\": \"custom.lineWidth\",\n \"value\": 5\n }\n ]\n },\n {\n \"matcher\": {\n \"id\": \"byFrameRefID\",\n \"options\": \"B\"\n },\n \"properties\": [\n {\n \"id\": \"custom.lineStyle\",\n \"value\": {\n \"dash\": [\n 10,\n 10\n ],\n \"fill\": \"dash\"\n }\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 24,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"_ overall - {{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_handler_delay_per_connection{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"hide\": false,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{job}} {{node}} {{sequencer}}\",\n \"range\": true,\n \"refId\": \"B\"\n }\n ],\n \"title\": \"Participant Client Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Sequencer submissions currently running. Note this is at the level of a sequencer submission not a Daml transaction.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"submissions\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 12,\n \"x\": 0,\n \"y\": 11\n },\n \"id\": 10,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_submissions_in_flight{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"1m\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"In Flight submissions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Submissions that were not sequenced\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"epm\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 12,\n \"x\": 12,\n \"y\": 11\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"increase(daml_sequencer_client_submissions_dropped{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}[1m])\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"1m\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Dropped Submissiones\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 12,\n \"x\": 0,\n \"y\": 21\n },\n \"id\": 12,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"daml_sequencer_client_handler_max_in_flight_event_batches{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"instant\": false,\n \"legendFormat\": \"max {{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"A\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_handler_actual_in_flight_event_batches{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"hide\": false,\n \"legendFormat\": \"actual {{namespace}} {{job}}\",\n \"range\": true,\n \"refId\": \"B\"\n }\n ],\n \"title\": \"Sequencer Client Batches \",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Events received from the sequencer\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"epm\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 12,\n \"x\": 12,\n \"y\": 21\n },\n \"id\": 13,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"rate(daml_sequencer_client_handler_sequencer_events{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}[$__rate_interval])\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"1m\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Events received\",\n \"type\": \"timeseries\"\n },\n {\n \"collapsed\": true,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 31\n },\n \"id\": 8,\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Daml commands through the synchronous CommandService currently in flight\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"submissions\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 12,\n \"x\": 0,\n \"y\": 32\n },\n \"id\": 11,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_participant_api_commands_max_in_flight_length{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"interval\": \"1m\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"In Flight Commands\",\n \"type\": \"timeseries\"\n }\n ],\n \"title\": \"Commands\",\n \"type\": \"row\"\n },\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 32\n },\n \"id\": 5,\n \"panels\": [],\n \"title\": \"Status\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Status of the participant (healthy or not)\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"axisPlacement\": \"auto\",\n \"fillOpacity\": 70,\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineWidth\": 0,\n \"spanNulls\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 0\n },\n {\n \"color\": \"green\",\n \"value\": 1\n }\n ]\n },\n \"unit\": \"Alive\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 33\n },\n \"id\": 4,\n \"options\": {\n \"alignValue\": \"left\",\n \"legend\": {\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"mergeValues\": true,\n \"rowHeight\": 0.9,\n \"showValue\": \"auto\",\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"${DS}\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_health_status{namespace=~\\\"$namespace\\\",component=\\\"participant\\\",job=~\\\"$job\\\", node=~\\\"$participant\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Health Status\",\n \"type\": \"state-timeline\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"5m\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": [\n \"sv-8\"\n ],\n \"value\": [\n \"sv-8\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_health_status{component=\\\"participant\\\"},namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_health_status{component=\\\"participant\\\"},namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"\",\n \"sort\": 7,\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_health_status{namespace=~\\\"$namespace\\\", component=\\\"participant\\\"},job)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"job\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_health_status{namespace=~\\\"$namespace\\\", component=\\\"participant\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"\",\n \"sort\": 7,\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_health_status{namespace=~\\\"$namespace\\\", job=~\\\"$job\\\"},node)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"participant\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_health_status{namespace=~\\\"$namespace\\\", job=~\\\"$job\\\"},node)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 2,\n \"regex\": \"\",\n \"sort\": 7,\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-30m\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Participant\",\n \"uid\": \"edkzo5ukgeqyoc\",\n \"version\": 2\n}\n", "sequencer-client.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"description\": \"Metrics exposed by the sequencer client (aka sequencer connection) in participants and mediators\",\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 5124,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Latency of a sequencer submission from submission until the node observes it being sequenced.\\nNote that this is the latency of an individuals sequencer submission not a Daml transaction which involves multiple sequencer submissions\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"s\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"expr\": \" histogram_quantile($quantile, sum by (namespace, job, node) (rate(daml_sequencer_client_submissions_sequencing_duration_seconds{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}[$__rate_interval])))\",\n \"legendFormat\": \" ({{namespace}}, {{job}}) {{node}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Sequencer Submission Latency\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Rate of sequencer submissions both successful and failed\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 9\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"histogram_count(rate(daml_sequencer_client_submissions_sends_duration_seconds{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}[$__rate_interval]))\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}} {{sequencer}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate of sequencer submissions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Rate of sequencer submissions that have not been sequenced.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 18\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"sum by (namespace, job, node) (rate(daml_sequencer_client_submissions_dropped{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}[$__rate_interval]))\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}} {{sequencer}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate of dropped sequencer submissions\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Rate of events received from the sequencer\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 8,\n \"w\": 24,\n \"x\": 0,\n \"y\": 26\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"rate(daml_sequencer_client_handler_sequencer_events{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}[$__rate_interval])\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}} {{sequencer}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Rate of received sequencer events\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The sequencer delay on the overall BFT connection, i.e., the difference between wallclock time and record time of the last received event where an event only counts as received once it has been received from a threshold of SVs.\\n\\nNote that this only changes when a new event is received so if no events are received it will not go up.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 7,\n \"w\": 24,\n \"x\": 0,\n \"y\": 34\n },\n \"id\": 6,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": false\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_handler_delay{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Sequencer Delay\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"Delay between wallclock time and the record time of the last sequencer event received on each connection. Note that this only goes up or down if an event actually was received. So for a sequencer that is completely down you will not see it go up.\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n },\n \"unit\": \"ms\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 41\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": false,\n \"sortBy\": \"Name\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_client_handler_delay_per_connection{namespace=~\\\"$namespace\\\",job=~\\\"$job\\\",node=~\\\"$node\\\"}\",\n \"legendFormat\": \"{{namespace}} {{job}} {{node}} {{sequencer}}\",\n \"range\": true,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Delay per sequencer connection\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": [\n \"sv-1\"\n ],\n \"value\": [\n \"sv-1\"\n ]\n },\n \"definition\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"definition\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds{namespace=~\\\"$namespace\\\"},job)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"job\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds{namespace=~\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"allValue\": \".*\",\n \"current\": {\n \"text\": \"All\",\n \"value\": [\n \"$__all\"\n ]\n },\n \"definition\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds{namespace=~\\\"$namespace\\\", job=~\\\"$job\\\"},node)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"node\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_client_submissions_sequencing_duration_seconds{namespace=~\\\"$namespace\\\", job=~\\\"$job\\\"},node)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"0.9\",\n \"value\": \"0.9\"\n },\n \"name\": \"quantile\",\n \"options\": [\n {\n \"selected\": true,\n \"text\": \"0.9\",\n \"value\": \"0.9\"\n },\n {\n \"selected\": false,\n \"text\": \"0.95\",\n \"value\": \"0.95\"\n },\n {\n \"selected\": false,\n \"text\": \"0.99\",\n \"value\": \"0.99\"\n }\n ],\n \"query\": \"0.9,0.95,0.99\",\n \"type\": \"custom\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-30m\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"Sequencer Client\",\n \"uid\": \"eecxhade03n5se\",\n \"version\": 2\n}\n", "sequencer.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 4663,\n \"links\": [],\n \"panels\": [\n {\n \"collapsed\": false,\n \"gridPos\": {\n \"h\": 1,\n \"w\": 24,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 2,\n \"panels\": [],\n \"title\": \"Subscriptions\",\n \"type\": \"row\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 9,\n \"w\": 24,\n \"x\": 0,\n \"y\": 1\n },\n \"id\": 1,\n \"options\": {\n \"minVizHeight\": 75,\n \"minVizWidth\": 75,\n \"orientation\": \"auto\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": false\n },\n \"showThresholdLabels\": false,\n \"showThresholdMarkers\": true,\n \"sizing\": \"auto\"\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"daml_sequencer_public_api_subscriptions\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"{{namespace}} M{{migration_id}}\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Subscriptions\",\n \"type\": \"gauge\"\n },\n {\n \"datasource\": {\n \"default\": true,\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 10,\n \"w\": 24,\n \"x\": 0,\n \"y\": 10\n },\n \"id\": 3,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\",\n \"max\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"daml_sequencer_public_api_subscriptions{namespace=~\\\"$namespace\\\"}\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"{{namespace}} M{{migration_id}}\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Number of active subscriptions\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": [\n \"sv-1\"\n ],\n \"value\": [\n \"sv-1\"\n ]\n },\n \"definition\": \"label_values(daml_sequencer_public_api_subscriptions,namespace)\",\n \"includeAll\": true,\n \"multi\": true,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_public_api_subscriptions,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"UTC\",\n \"title\": \"Sequencer Subscriptions\",\n \"uid\": \"685922ac-4aca-4b28-a1a1-4375a93a514c\",\n \"version\": 3\n}\n", - "sequencer_messages.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 5075,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The average per second messages for the last 24h\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n }\n },\n \"mappings\": [],\n \"unit\": \"mps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 7,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 8,\n \"options\": {\n \"displayLabels\": [\n \"name\",\n \"value\",\n \"percent\"\n ],\n \"legend\": {\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"values\": [\n \"percent\",\n \"value\"\n ]\n },\n \"pieType\": \"donut\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"sum by(type) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[24h]))\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Last 24h messages / s\",\n \"type\": \"piechart\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byFrameRefID\",\n \"options\": \"total\"\n },\n \"properties\": [\n {\n \"id\": \"displayName\",\n \"value\": \"total\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 17,\n \"x\": 7,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(type) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"type\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[1h]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"hourly total\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[1h] offset 30d))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"hourly total 30 days ago\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 20,\n \"x\": 0,\n \"y\": 11\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(member) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-confirmation-request\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Confirmation Requests Rate by Member\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 4,\n \"x\": 20,\n \"y\": 11\n },\n \"id\": 6,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sort_desc(sum(delta(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\",job=\\\"$job\\\",type=\\\"send-confirmation-request\\\"}[1h])) by (member))\",\n \"format\": \"table\",\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Confirmation Requests by Member in the last Hour\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Time\": \"\"\n }\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 15,\n \"w\": 24,\n \"x\": 0,\n \"y\": 22\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\",member=~\\\"PAR::.*\\\"}[$__rate_interval])) by (member)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Participant\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 15,\n \"w\": 24,\n \"x\": 0,\n \"y\": 37\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\",member=~\\\"MED::.*\\\"}[$__rate_interval])) by (member)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Mediator\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 52\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by(member) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", member!~\\\"(MED|PAR)::.*\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Member excluding Mediator/Participant\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"includeAll\": false,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"global-domain-6-sequencer\",\n \"value\": \"global-domain-6-sequencer\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"includeAll\": false,\n \"name\": \"job\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Sequencer Messages\",\n \"uid\": \"fdjrxql2alblsd\",\n \"version\": 5\n}\n" + "sequencer_messages.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 1,\n \"id\": 80,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"description\": \"The average per second messages for the last 24h\",\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n }\n },\n \"mappings\": [],\n \"unit\": \"mps\"\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 7,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 8,\n \"options\": {\n \"displayLabels\": [\n \"name\",\n \"value\",\n \"percent\"\n ],\n \"legend\": {\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"values\": [\n \"percent\",\n \"value\"\n ]\n },\n \"pieType\": \"donut\",\n \"reduceOptions\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"fields\": \"\",\n \"values\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"exemplar\": false,\n \"expr\": \"sum by(type) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[24h]))\",\n \"format\": \"time_series\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Last 24h messages / s\",\n \"type\": \"piechart\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byFrameRefID\",\n \"options\": \"total\"\n },\n \"properties\": [\n {\n \"id\": \"displayName\",\n \"value\": \"total\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 17,\n \"x\": 7,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(type) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"type\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[1h]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"hourly total\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\"}[1h] offset 30d))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"hourly total 30 days ago\",\n \"range\": true,\n \"refId\": \"B\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 20,\n \"x\": 0,\n \"y\": 11\n },\n \"id\": 4,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(member) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-confirmation-request\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Confirmation Requests Rate by Member\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 4,\n \"x\": 20,\n \"y\": 11\n },\n \"id\": 6,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sort_desc(sum(delta(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\",job=\\\"$job\\\",type=\\\"send-confirmation-request\\\"}[1h])) by (member))\",\n \"format\": \"table\",\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\"\n }\n ],\n \"title\": \"Confirmation Requests by Member in the last Hour\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Time\": \"\"\n }\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 15,\n \"w\": 24,\n \"x\": 0,\n \"y\": 22\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\",member=~\\\"PAR::.*\\\"}[$__rate_interval])) by (member)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Participant\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 15,\n \"w\": 24,\n \"x\": 0,\n \"y\": 37\n },\n \"id\": 7,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": true,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\",member=~\\\"MED::.*\\\"}[$__rate_interval])) by (member)\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Mediator\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 11,\n \"w\": 24,\n \"x\": 0,\n \"y\": 52\n },\n \"id\": 5,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"expr\": \"sum by(member) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", member!~\\\"(MED|PAR)::.*\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Message Rate by Member excluding Mediator/Participant\",\n \"type\": \"timeseries\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"includeAll\": false,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"global-domain-0-sequencer\",\n \"value\": \"global-domain-0-sequencer\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"includeAll\": false,\n \"name\": \"job\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-1h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Sequencer Messages\",\n \"uid\": \"fdjrxql2alblsd\",\n \"version\": 2\n}\n", + "sequencer_topology_transactions.json": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n \"uid\": \"-- Grafana --\"\n },\n \"enable\": true,\n \"hide\": true,\n \"iconColor\": \"rgba(0, 211, 255, 1)\",\n \"name\": \"Annotations & Alerts\",\n \"type\": \"dashboard\"\n }\n ]\n },\n \"editable\": true,\n \"fiscalYearStartMonth\": 0,\n \"graphTooltip\": 0,\n \"id\": 3826,\n \"links\": [],\n \"panels\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": [\n {\n \"matcher\": {\n \"id\": \"byFrameRefID\",\n \"options\": \"total\"\n },\n \"properties\": [\n {\n \"id\": \"displayName\",\n \"value\": \"total\"\n }\n ]\n }\n ]\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 16,\n \"x\": 0,\n \"y\": 0\n },\n \"id\": 1,\n \"options\": {\n \"legend\": {\n \"calcs\": [],\n \"displayMode\": \"list\",\n \"placement\": \"bottom\",\n \"showLegend\": true\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"multi\",\n \"sort\": \"desc\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(type) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-topology\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"type\",\n \"useBackend\": false\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum(rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-topology\\\"}[1h]))\",\n \"fullMetaSearch\": false,\n \"hide\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"hourly total\",\n \"range\": true,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Topology send rate\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 12,\n \"w\": 8,\n \"x\": 16,\n \"y\": 0\n },\n \"id\": 3,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sort_desc(sum by(member) (delta(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-topology\\\", member=~\\\"PAR::.*\\\"}[24h])))\",\n \"format\": \"table\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Topology transactions participants last 24h\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Time\": \"\"\n }\n }\n }\n ],\n \"type\": \"table\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"palette-classic\"\n },\n \"custom\": {\n \"axisBorderShow\": false,\n \"axisCenteredZero\": false,\n \"axisColorMode\": \"text\",\n \"axisLabel\": \"\",\n \"axisPlacement\": \"auto\",\n \"barAlignment\": 0,\n \"barWidthFactor\": 0.6,\n \"drawStyle\": \"line\",\n \"fillOpacity\": 0,\n \"gradientMode\": \"none\",\n \"hideFrom\": {\n \"legend\": false,\n \"tooltip\": false,\n \"viz\": false\n },\n \"insertNulls\": false,\n \"lineInterpolation\": \"linear\",\n \"lineWidth\": 1,\n \"pointSize\": 5,\n \"scaleDistribution\": {\n \"type\": \"linear\"\n },\n \"showPoints\": \"auto\",\n \"spanNulls\": false,\n \"stacking\": {\n \"group\": \"A\",\n \"mode\": \"none\"\n },\n \"thresholdsStyle\": {\n \"mode\": \"off\"\n }\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 16,\n \"x\": 0,\n \"y\": 12\n },\n \"id\": 2,\n \"options\": {\n \"legend\": {\n \"calcs\": [\n \"lastNotNull\"\n ],\n \"displayMode\": \"table\",\n \"placement\": \"right\",\n \"showLegend\": true,\n \"sortBy\": \"Last *\",\n \"sortDesc\": false\n },\n \"tooltip\": {\n \"hideZeros\": false,\n \"mode\": \"single\",\n \"sort\": \"none\"\n }\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"builder\",\n \"expr\": \"sum by(member) (rate(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", member=~\\\"PAR::.*\\\", type=\\\"send-topology\\\"}[$__rate_interval]))\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": false,\n \"legendFormat\": \"__auto\",\n \"range\": true,\n \"refId\": \"total\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Sequencer Topology Rate by Participant\",\n \"type\": \"timeseries\"\n },\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"fieldConfig\": {\n \"defaults\": {\n \"color\": {\n \"mode\": \"thresholds\"\n },\n \"custom\": {\n \"align\": \"auto\",\n \"cellOptions\": {\n \"type\": \"auto\"\n },\n \"inspect\": false\n },\n \"mappings\": [],\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\"\n },\n {\n \"color\": \"red\",\n \"value\": 80\n }\n ]\n }\n },\n \"overrides\": []\n },\n \"gridPos\": {\n \"h\": 13,\n \"w\": 8,\n \"x\": 16,\n \"y\": 12\n },\n \"id\": 4,\n \"options\": {\n \"cellHeight\": \"sm\",\n \"footer\": {\n \"countRows\": false,\n \"fields\": \"\",\n \"reducer\": [\n \"sum\"\n ],\n \"show\": false\n },\n \"showHeader\": true\n },\n \"pluginVersion\": \"12.0.2\",\n \"targets\": [\n {\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"disableTextWrap\": false,\n \"editorMode\": \"code\",\n \"exemplar\": false,\n \"expr\": \"sort_desc(sum by(member) (delta(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\", job=\\\"$job\\\", type=\\\"send-topology\\\", member!~\\\"PAR::.*\\\"}[24h])))\",\n \"format\": \"table\",\n \"fullMetaSearch\": false,\n \"includeNullMetadata\": true,\n \"instant\": true,\n \"legendFormat\": \"__auto\",\n \"range\": false,\n \"refId\": \"A\",\n \"useBackend\": false\n }\n ],\n \"title\": \"Topology transactions non participants last 24h\",\n \"transformations\": [\n {\n \"id\": \"organize\",\n \"options\": {\n \"excludeByName\": {\n \"Time\": true\n },\n \"includeByName\": {},\n \"indexByName\": {},\n \"renameByName\": {\n \"Time\": \"\"\n }\n }\n }\n ],\n \"type\": \"table\"\n }\n ],\n \"preload\": false,\n \"refresh\": \"\",\n \"schemaVersion\": 41,\n \"tags\": [],\n \"templating\": {\n \"list\": [\n {\n \"current\": {\n \"text\": \"sv-1\",\n \"value\": \"sv-1\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"includeAll\": false,\n \"name\": \"namespace\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total,namespace)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n },\n {\n \"current\": {\n \"text\": \"global-domain-0-sequencer\",\n \"value\": \"global-domain-0-sequencer\"\n },\n \"datasource\": {\n \"type\": \"prometheus\",\n \"uid\": \"prometheus\"\n },\n \"definition\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"includeAll\": false,\n \"name\": \"job\",\n \"options\": [],\n \"query\": {\n \"qryType\": 1,\n \"query\": \"label_values(daml_sequencer_block_events_total{namespace=\\\"$namespace\\\"},job)\",\n \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n },\n \"refresh\": 1,\n \"regex\": \"\",\n \"type\": \"query\"\n }\n ]\n },\n \"time\": {\n \"from\": \"now-24h\",\n \"to\": \"now\"\n },\n \"timepicker\": {},\n \"timezone\": \"\",\n \"title\": \"Sequencer Topology Transactions\",\n \"uid\": \"2f351a91-c0b3-4c6b-b5e2-25d8b9cc1304\",\n \"version\": 2\n}\n" }, "kind": "ConfigMap", "metadata": { @@ -2429,6 +2557,80 @@ "provider": "", "type": "gcp:compute/routerNat:RouterNat" }, + { + "custom": true, + "id": "", + "inputs": { + "length": 16, + "overrideSpecial": "_%@", + "special": true + }, + "name": "observability-grafana-pg-passwd", + "provider": "", + "type": "random:index/randomPassword:RandomPassword" + }, + { + "custom": false, + "id": "", + "inputs": {}, + "name": "observability-grafana-pg", + "provider": "", + "type": "canton:network:postgres" + }, + { + "custom": true, + "id": "", + "inputs": { + "chart": "oci://ghcr.io/digital-asset/decentralized-canton-sync-dev/helm/splice-postgres", + "compat": "true", + "maxHistory": 10, + "name": "grafana-pg", + "namespace": "observability", + "timeout": 600, + "values": { + "affinity": { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cn_apps", + "operator": "Exists" + } + ] + } + ] + } + } + }, + "cluster": { + "dnsName": "mock.global.canton.network.digitalasset.com", + "fixedTokens": false, + "hostname": "mock.global.canton.network.digitalasset.com", + "name": "cn-mocknet" + }, + "db": { + "volumeSize": "20Gi" + }, + "imageRepo": "us-central1-docker.pkg.dev/da-cn-shared/ghcr/digital-asset/decentralized-canton-sync-dev/docker", + "persistence": { + "secretName": "grafana-pg-secret" + }, + "tolerations": [ + { + "effect": "NoSchedule", + "key": "cn_apps", + "operator": "Exists" + } + ] + }, + "version": "0.3.20" + }, + "name": "observability-grafana-pg", + "provider": "", + "type": "kubernetes:helm.sh/v3:Release" + }, { "custom": true, "id": "", @@ -2537,8 +2739,12 @@ "coreDns": { "enabled": false }, + "database": {}, "defaultRules": { - "create": true + "create": true, + "disabled": { + "KubeJobNotCompleted": true + } }, "fullnameOverride": "prometheus", "grafana": { @@ -2633,8 +2839,16 @@ "deploymentStrategy": { "type": "Recreate" }, + "envFromSecret": "grafana-pg-secret", "fullnameOverride": "grafana", "grafana.ini": { + "database": { + "host": "grafana-pg.observability.svc.cluster.local:5432", + "name": "cantonnet", + "password": "${postgresPassword}", + "type": "postgres", + "user": "cnadmin" + }, "date_formats": { "default_timezone": "UTC" }, @@ -2987,7 +3201,7 @@ ] } }, - "version": "75.9.0" + "version": "77.12.1" }, "name": "observability-metrics", "provider": "", @@ -3006,7 +3220,7 @@ "name": "observability" } }, - "name": "observabilty", + "name": "observability", "provider": "", "type": "kubernetes:core/v1:Namespace" }, @@ -3164,391 +3378,37 @@ "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV10 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv10UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-10-eng.mock.network.canton.global", - "https://wallet.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-10-eng.mock.network.canton.global", - "https://cns.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-10-eng.mock.network.canton.global", - "https://sv.sv-10-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-10-eng.mock.network.canton.global", - "https://wallet.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-10-eng.mock.network.canton.global", - "https://cns.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-10-eng.mock.network.canton.global", - "https://sv.sv-10-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-10-eng.mock.network.canton.global", - "https://wallet.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-10-eng.mock.network.canton.global", - "https://cns.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-10-eng.mock.network.canton.global", - "https://sv.sv-10-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV10", - "name": "SV10 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-10-eng.mock.network.canton.global", - "https://wallet.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-10-eng.mock.network.canton.global", - "https://cns.sv-10-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-10-eng.mock.network.canton.global", - "https://sv.sv-10-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv10UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV11 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv11UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-11-eng.mock.network.canton.global", - "https://wallet.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-11-eng.mock.network.canton.global", - "https://cns.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-11-eng.mock.network.canton.global", - "https://sv.sv-11-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-11-eng.mock.network.canton.global", - "https://wallet.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-11-eng.mock.network.canton.global", - "https://cns.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-11-eng.mock.network.canton.global", - "https://sv.sv-11-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-11-eng.mock.network.canton.global", - "https://wallet.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-11-eng.mock.network.canton.global", - "https://cns.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-11-eng.mock.network.canton.global", - "https://sv.sv-11-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV11", - "name": "SV11 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-11-eng.mock.network.canton.global", - "https://wallet.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-11-eng.mock.network.canton.global", - "https://cns.sv-11-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-11-eng.mock.network.canton.global", - "https://sv.sv-11-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv11UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV12 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv12UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-12-eng.mock.network.canton.global", - "https://wallet.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-12-eng.mock.network.canton.global", - "https://cns.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-12-eng.mock.network.canton.global", - "https://sv.sv-12-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-12-eng.mock.network.canton.global", - "https://wallet.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-12-eng.mock.network.canton.global", - "https://cns.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-12-eng.mock.network.canton.global", - "https://sv.sv-12-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-12-eng.mock.network.canton.global", - "https://wallet.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-12-eng.mock.network.canton.global", - "https://cns.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-12-eng.mock.network.canton.global", - "https://sv.sv-12-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV12", - "name": "SV12 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-12-eng.mock.network.canton.global", - "https://wallet.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-12-eng.mock.network.canton.global", - "https://cns.sv-12-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-12-eng.mock.network.canton.global", - "https://sv.sv-12-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv12UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV13 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv13UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-13-eng.mock.network.canton.global", - "https://wallet.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-13-eng.mock.network.canton.global", - "https://cns.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-13-eng.mock.network.canton.global", - "https://sv.sv-13-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-13-eng.mock.network.canton.global", - "https://wallet.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-13-eng.mock.network.canton.global", - "https://cns.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-13-eng.mock.network.canton.global", - "https://sv.sv-13-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-13-eng.mock.network.canton.global", - "https://wallet.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-13-eng.mock.network.canton.global", - "https://cns.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-13-eng.mock.network.canton.global", - "https://sv.sv-13-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV13", - "name": "SV13 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-13-eng.mock.network.canton.global", - "https://wallet.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-13-eng.mock.network.canton.global", - "https://cns.sv-13-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-13-eng.mock.network.canton.global", - "https://sv.sv-13-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv13UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV14 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv14UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-14-eng.mock.network.canton.global", - "https://wallet.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-14-eng.mock.network.canton.global", - "https://cns.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-14-eng.mock.network.canton.global", - "https://sv.sv-14-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-14-eng.mock.network.canton.global", - "https://wallet.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-14-eng.mock.network.canton.global", - "https://cns.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-14-eng.mock.network.canton.global", - "https://sv.sv-14-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-14-eng.mock.network.canton.global", - "https://wallet.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-14-eng.mock.network.canton.global", - "https://cns.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-14-eng.mock.network.canton.global", - "https://sv.sv-14-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV14", - "name": "SV14 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-14-eng.mock.network.canton.global", - "https://wallet.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-14-eng.mock.network.canton.global", - "https://cns.sv-14-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-14-eng.mock.network.canton.global", - "https://sv.sv-14-eng.mock.global.canton.network.digitalasset.com" - ] + "audience": "https://sv.sv-1.test-stack.canton.network/api", + "clientId": "SV1 SV Backend (Pulumi managed, test-stack)_id", + "scopes": [] }, - "name": "sv14UiApp", + "name": "sv1SvBackendAppAppGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV15 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv15UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-15-eng.mock.network.canton.global", - "https://wallet.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-15-eng.mock.network.canton.global", - "https://cns.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-15-eng.mock.network.canton.global", - "https://sv.sv-15-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-15-eng.mock.network.canton.global", - "https://wallet.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-15-eng.mock.network.canton.global", - "https://cns.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-15-eng.mock.network.canton.global", - "https://sv.sv-15-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-15-eng.mock.network.canton.global", - "https://wallet.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-15-eng.mock.network.canton.global", - "https://cns.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-15-eng.mock.network.canton.global", - "https://sv.sv-15-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV15", - "name": "SV15 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-15-eng.mock.network.canton.global", - "https://wallet.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-15-eng.mock.network.canton.global", - "https://cns.sv-15-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-15-eng.mock.network.canton.global", - "https://sv.sv-15-eng.mock.global.canton.network.digitalasset.com" + "audience": "https://ledger_api.sv-1.test-stack.canton.network", + "clientId": "SV1 SV Backend (Pulumi managed, test-stack)_id", + "scopes": [ + "daml_ledger_api" ] }, - "name": "sv15UiApp", + "name": "sv1SvBackendAppLedgerGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV16 UI (Pulumi managed, test-stack)_id" + "appType": "non_interactive", + "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the SV backend for SV1 on test-stack", + "name": "SV1 SV Backend (Pulumi managed, test-stack)" }, - "name": "sv16UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-16-eng.mock.network.canton.global", - "https://wallet.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-16-eng.mock.network.canton.global", - "https://cns.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-16-eng.mock.network.canton.global", - "https://sv.sv-16-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-16-eng.mock.network.canton.global", - "https://wallet.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-16-eng.mock.network.canton.global", - "https://cns.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-16-eng.mock.network.canton.global", - "https://sv.sv-16-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-16-eng.mock.network.canton.global", - "https://wallet.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-16-eng.mock.network.canton.global", - "https://cns.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-16-eng.mock.network.canton.global", - "https://sv.sv-16-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV16", - "name": "SV16 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-16-eng.mock.network.canton.global", - "https://wallet.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-16-eng.mock.network.canton.global", - "https://cns.sv-16-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-16-eng.mock.network.canton.global", - "https://sv.sv-16-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv16UiApp", + "name": "sv1SvBackendApp", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", "type": "auth0:index/client:Client" }, @@ -3612,279 +3472,37 @@ "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV2 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv2UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-2-eng.mock.network.canton.global", - "https://wallet.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-2-eng.mock.network.canton.global", - "https://cns.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-2-eng.mock.network.canton.global", - "https://sv.sv-2-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-2-eng.mock.network.canton.global", - "https://wallet.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-2-eng.mock.network.canton.global", - "https://cns.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-2-eng.mock.network.canton.global", - "https://sv.sv-2-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-2-eng.mock.network.canton.global", - "https://wallet.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-2-eng.mock.network.canton.global", - "https://cns.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-2-eng.mock.network.canton.global", - "https://sv.sv-2-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV2", - "name": "SV2 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-2-eng.mock.network.canton.global", - "https://wallet.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-2-eng.mock.network.canton.global", - "https://cns.sv-2-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-2-eng.mock.network.canton.global", - "https://sv.sv-2-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv2UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV3 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv3UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-3-eng.mock.network.canton.global", - "https://wallet.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-3-eng.mock.network.canton.global", - "https://cns.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-3-eng.mock.network.canton.global", - "https://sv.sv-3-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-3-eng.mock.network.canton.global", - "https://wallet.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-3-eng.mock.network.canton.global", - "https://cns.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-3-eng.mock.network.canton.global", - "https://sv.sv-3-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-3-eng.mock.network.canton.global", - "https://wallet.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-3-eng.mock.network.canton.global", - "https://cns.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-3-eng.mock.network.canton.global", - "https://sv.sv-3-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV3", - "name": "SV3 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-3-eng.mock.network.canton.global", - "https://wallet.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-3-eng.mock.network.canton.global", - "https://cns.sv-3-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-3-eng.mock.network.canton.global", - "https://sv.sv-3-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv3UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV4 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv4UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-4-eng.mock.network.canton.global", - "https://wallet.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-4-eng.mock.network.canton.global", - "https://cns.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-4-eng.mock.network.canton.global", - "https://sv.sv-4-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-4-eng.mock.network.canton.global", - "https://wallet.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-4-eng.mock.network.canton.global", - "https://cns.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-4-eng.mock.network.canton.global", - "https://sv.sv-4-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-4-eng.mock.network.canton.global", - "https://wallet.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-4-eng.mock.network.canton.global", - "https://cns.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-4-eng.mock.network.canton.global", - "https://sv.sv-4-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV4", - "name": "SV4 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-4-eng.mock.network.canton.global", - "https://wallet.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-4-eng.mock.network.canton.global", - "https://cns.sv-4-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-4-eng.mock.network.canton.global", - "https://sv.sv-4-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv4UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV5 UI (Pulumi managed, test-stack)_id" + "audience": "https://validator.sv-1.test-stack.canton.network/api", + "clientId": "SV1 Validator Backend (Pulumi managed, test-stack)_id", + "scopes": [] }, - "name": "sv5UiAppCredentials", + "name": "sv1ValidatorBackendAppAppGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-5-eng.mock.network.canton.global", - "https://wallet.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-5-eng.mock.network.canton.global", - "https://cns.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-5-eng.mock.network.canton.global", - "https://sv.sv-5-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-5-eng.mock.network.canton.global", - "https://wallet.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-5-eng.mock.network.canton.global", - "https://cns.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-5-eng.mock.network.canton.global", - "https://sv.sv-5-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-5-eng.mock.network.canton.global", - "https://wallet.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-5-eng.mock.network.canton.global", - "https://cns.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-5-eng.mock.network.canton.global", - "https://sv.sv-5-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV5", - "name": "SV5 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-5-eng.mock.network.canton.global", - "https://wallet.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-5-eng.mock.network.canton.global", - "https://cns.sv-5-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-5-eng.mock.network.canton.global", - "https://sv.sv-5-eng.mock.global.canton.network.digitalasset.com" + "audience": "https://ledger_api.sv-1.test-stack.canton.network", + "clientId": "SV1 Validator Backend (Pulumi managed, test-stack)_id", + "scopes": [ + "daml_ledger_api" ] }, - "name": "sv5UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV6 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv6UiAppCredentials", + "name": "sv1ValidatorBackendAppLedgerGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-6-eng.mock.network.canton.global", - "https://wallet.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-6-eng.mock.network.canton.global", - "https://cns.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-6-eng.mock.network.canton.global", - "https://sv.sv-6-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-6-eng.mock.network.canton.global", - "https://wallet.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-6-eng.mock.network.canton.global", - "https://cns.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-6-eng.mock.network.canton.global", - "https://sv.sv-6-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-6-eng.mock.network.canton.global", - "https://wallet.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-6-eng.mock.network.canton.global", - "https://cns.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-6-eng.mock.network.canton.global", - "https://sv.sv-6-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV6", - "name": "SV6 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-6-eng.mock.network.canton.global", - "https://wallet.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-6-eng.mock.network.canton.global", - "https://cns.sv-6-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-6-eng.mock.network.canton.global", - "https://sv.sv-6-eng.mock.global.canton.network.digitalasset.com" - ] + "appType": "non_interactive", + "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Validator backend for SV1 on test-stack", + "name": "SV1 Validator Backend (Pulumi managed, test-stack)" }, - "name": "sv6UiApp", + "name": "sv1ValidatorBackendApp", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", "type": "auth0:index/client:Client" }, @@ -3892,167 +3510,37 @@ "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV7 UI (Pulumi managed, test-stack)_id" + "audience": "https://sv.sv-da-1.test-stack.canton.network/api", + "clientId": "SVDA1 SV Backend (Pulumi managed, test-stack)_id", + "scopes": [] }, - "name": "sv7UiAppCredentials", + "name": "svda1SvBackendAppAppGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-7-eng.mock.network.canton.global", - "https://wallet.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-7-eng.mock.network.canton.global", - "https://cns.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-7-eng.mock.network.canton.global", - "https://sv.sv-7-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-7-eng.mock.network.canton.global", - "https://wallet.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-7-eng.mock.network.canton.global", - "https://cns.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-7-eng.mock.network.canton.global", - "https://sv.sv-7-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-7-eng.mock.network.canton.global", - "https://wallet.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-7-eng.mock.network.canton.global", - "https://cns.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-7-eng.mock.network.canton.global", - "https://sv.sv-7-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV7", - "name": "SV7 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-7-eng.mock.network.canton.global", - "https://wallet.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-7-eng.mock.network.canton.global", - "https://cns.sv-7-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-7-eng.mock.network.canton.global", - "https://sv.sv-7-eng.mock.global.canton.network.digitalasset.com" + "audience": "https://ledger_api.sv-da-1.test-stack.canton.network", + "clientId": "SVDA1 SV Backend (Pulumi managed, test-stack)_id", + "scopes": [ + "daml_ledger_api" ] }, - "name": "sv7UiApp", + "name": "svda1SvBackendAppLedgerGrant", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" + "type": "auth0:index/clientGrant:ClientGrant" }, { "custom": true, "id": "", "inputs": { - "authenticationMethod": "none", - "clientId": "SV8 UI (Pulumi managed, test-stack)_id" + "appType": "non_interactive", + "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the SV backend for Digital-Asset-1 on test-stack", + "name": "SVDA1 SV Backend (Pulumi managed, test-stack)" }, - "name": "sv8UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-8-eng.mock.network.canton.global", - "https://wallet.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-8-eng.mock.network.canton.global", - "https://cns.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-8-eng.mock.network.canton.global", - "https://sv.sv-8-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-8-eng.mock.network.canton.global", - "https://wallet.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-8-eng.mock.network.canton.global", - "https://cns.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-8-eng.mock.network.canton.global", - "https://sv.sv-8-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-8-eng.mock.network.canton.global", - "https://wallet.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-8-eng.mock.network.canton.global", - "https://cns.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-8-eng.mock.network.canton.global", - "https://sv.sv-8-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV8", - "name": "SV8 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-8-eng.mock.network.canton.global", - "https://wallet.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-8-eng.mock.network.canton.global", - "https://cns.sv-8-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-8-eng.mock.network.canton.global", - "https://sv.sv-8-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv8UiApp", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/client:Client" - }, - { - "custom": true, - "id": "", - "inputs": { - "authenticationMethod": "none", - "clientId": "SV9 UI (Pulumi managed, test-stack)_id" - }, - "name": "sv9UiAppCredentials", - "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", - "type": "auth0:index/clientCredentials:ClientCredentials" - }, - { - "custom": true, - "id": "", - "inputs": { - "allowedLogoutUrls": [ - "https://wallet.sv-9-eng.mock.network.canton.global", - "https://wallet.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-9-eng.mock.network.canton.global", - "https://cns.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-9-eng.mock.network.canton.global", - "https://sv.sv-9-eng.mock.global.canton.network.digitalasset.com" - ], - "allowedOrigins": [ - "https://wallet.sv-9-eng.mock.network.canton.global", - "https://wallet.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-9-eng.mock.network.canton.global", - "https://cns.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-9-eng.mock.network.canton.global", - "https://sv.sv-9-eng.mock.global.canton.network.digitalasset.com" - ], - "appType": "spa", - "callbacks": [ - "https://wallet.sv-9-eng.mock.network.canton.global", - "https://wallet.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-9-eng.mock.network.canton.global", - "https://cns.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-9-eng.mock.network.canton.global", - "https://sv.sv-9-eng.mock.global.canton.network.digitalasset.com" - ], - "crossOriginAuth": false, - "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Wallet, ANS and SV UIs for SV9", - "name": "SV9 UI (Pulumi managed, test-stack)", - "webOrigins": [ - "https://wallet.sv-9-eng.mock.network.canton.global", - "https://wallet.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://cns.sv-9-eng.mock.network.canton.global", - "https://cns.sv-9-eng.mock.global.canton.network.digitalasset.com", - "https://sv.sv-9-eng.mock.network.canton.global", - "https://sv.sv-9-eng.mock.global.canton.network.digitalasset.com" - ] - }, - "name": "sv9UiApp", + "name": "svda1SvBackendApp", "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", "type": "auth0:index/client:Client" }, @@ -4112,6 +3600,44 @@ "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", "type": "auth0:index/client:Client" }, + { + "custom": true, + "id": "", + "inputs": { + "audience": "https://validator.sv-da-1.test-stack.canton.network/api", + "clientId": "SVDA1 Validator Backend (Pulumi managed, test-stack)_id", + "scopes": [] + }, + "name": "svda1ValidatorBackendAppAppGrant", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/clientGrant:ClientGrant" + }, + { + "custom": true, + "id": "", + "inputs": { + "audience": "https://ledger_api.sv-da-1.test-stack.canton.network", + "clientId": "SVDA1 Validator Backend (Pulumi managed, test-stack)_id", + "scopes": [ + "daml_ledger_api" + ] + }, + "name": "svda1ValidatorBackendAppLedgerGrant", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/clientGrant:ClientGrant" + }, + { + "custom": true, + "id": "", + "inputs": { + "appType": "non_interactive", + "description": " ** Managed by Pulumi, do not edit manually **\nUsed for the Validator backend for Digital-Asset-1 on test-stack", + "name": "SVDA1 Validator Backend (Pulumi managed, test-stack)" + }, + "name": "svda1ValidatorBackendApp", + "provider": "urn:pulumi:test-stack::test-project::pulumi:providers:auth0::dev::undefined_id", + "type": "auth0:index/client:Client" + }, { "custom": true, "id": "", @@ -4137,9 +3663,9 @@ "id": "", "inputs": { "addPreviousOutputInEnv": true, - "create": "bash SPLICE_ROOT/cluster/pulumi/infra/prometheus-crd-update.sh 0.83.0" + "create": "bash SPLICE_ROOT/cluster/pulumi/infra/prometheus-crd-update.sh 0.85.0" }, - "name": "update-prometheus-crd-0.83.0", + "name": "update-prometheus-crd-0.85.0", "provider": "", "type": "command:local:Command" }, diff --git a/cluster/expected/splitwell/expected.json b/cluster/expected/splitwell/expected.json index a28cfe856d..8bb00c5211 100644 --- a/cluster/expected/splitwell/expected.json +++ b/cluster/expected/splitwell/expected.json @@ -87,14 +87,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", diff --git a/cluster/expected/sv-canton/expected.json b/cluster/expected/sv-canton/expected.json index 4a3f08320d..179f7eb23e 100644 --- a/cluster/expected/sv-canton/expected.json +++ b/cluster/expected/sv-canton/expected.json @@ -837,14 +837,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", @@ -857,7 +849,9 @@ }, "stringData": { "4dabf18193072939515e22adb298388d": "1b47061264138c4ac30d75fd1eb44270", - "value": {} + "value": { + "googleCredentials": "base64-decoded-mock" + } }, "type": "Opaque" }, @@ -877,7 +871,9 @@ }, "stringData": { "4dabf18193072939515e22adb298388d": "1b47061264138c4ac30d75fd1eb44270", - "value": {} + "value": { + "googleCredentials": "base64-decoded-mock" + } }, "type": "Opaque" }, @@ -1738,6 +1734,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "global_domain_3_mediator", "port": 5432, @@ -1832,6 +1834,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "global_domain_4_mediator", "port": 5432, @@ -2655,7 +2663,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "1" @@ -2722,7 +2730,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "2" @@ -2789,7 +2797,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "3" @@ -2856,7 +2864,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "4" @@ -3840,6 +3848,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "global_domain_3_mediator", "port": 5432, @@ -3934,6 +3948,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "global_domain_4_mediator", "port": 5432, @@ -4807,7 +4827,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "1" @@ -4874,7 +4894,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "2" @@ -4941,7 +4961,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "3" @@ -5008,7 +5028,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "4" @@ -5446,6 +5466,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "mediator_3", "port": 5432, @@ -5540,6 +5566,12 @@ "logLevel": "INFO", "logLevelStdout": "DEBUG", "mediator": { + "additionalEnvVars": [ + { + "name": "CUSTOM_MOCK_ENV_VAR_NAME", + "value": "CUSTOM_MOCK_ENV_VAR_VALUE" + } + ], "persistence": { "databaseName": "mediator_4", "port": 5432, @@ -6363,7 +6395,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "1" @@ -6430,7 +6462,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "2" @@ -6497,7 +6529,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "3" @@ -6564,7 +6596,7 @@ "day": 2, "hour": 8 }, - "tier": "db-custom-2-7680", + "tier": "sequencer-override-tier", "userLabels": { "cluster": "mock", "migration_id": "4" diff --git a/cluster/expected/sv-runbook/expected.json b/cluster/expected/sv-runbook/expected.json index 74c1d42da1..38a6c4004b 100644 --- a/cluster/expected/sv-runbook/expected.json +++ b/cluster/expected/sv-runbook/expected.json @@ -170,14 +170,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", @@ -726,10 +718,6 @@ "name": "CUSTOM_MOCK_ENV_VAR_NAME", "value": "CUSTOM_MOCK_ENV_VAR_VALUE" }, - { - "name": "ADDITIONAL_CONFIG_TOPOLOGY_CHANGE_DELAY", - "value": "canton.sv-apps.sv.topology-change-delay-duration=250ms" - }, { "name": "ADDITIONAL_CONFIG_NO_BFT_SEQUENCER_CONNECTION", "value": "canton.sv-apps.sv.bft-sequencer-connection = false" @@ -1090,6 +1078,10 @@ { "name": "ADDITIONAL_CONFIG_NO_BFT_SEQUENCER_CONNECTION", "value": "canton.validator-apps.validator_backend.disable-sv-validator-bft-sequencer-connection = true" + }, + { + "name": "ADDITIONAL_CONFIG_TOPOLOGY_METRICS_EXPORT", + "value": "canton.validator-apps.validator_backend.automation.topology-metrics-polling-interval = 5m\n" } ], "additionalJvmOptions": "-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.rmi.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1", diff --git a/cluster/expected/validator-runbook/expected.json b/cluster/expected/validator-runbook/expected.json index b3052e6643..fc63f76c7d 100644 --- a/cluster/expected/validator-runbook/expected.json +++ b/cluster/expected/validator-runbook/expected.json @@ -135,14 +135,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", @@ -527,20 +519,26 @@ "hostname": "mock.global.canton.network.digitalasset.com", "name": "cn-mocknet" }, + "config": { + "jsonLedgerApiUrl": "http://participant-3:7575", + "keyDirectory": "/keys", + "maxParties": 1234, + "parallelism": 321, + "preapprovalRetries": 9876, + "preapprovalRetryDelayMs": 42, + "scanApiUrl": "http://scan-app.sv-1:5012", + "token": "${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_TOKEN}", + "userId": "${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_USER_NAME}", + "validatorApiUrl": "http://validator-app:5003" + }, "imageRepo": "us-central1-docker.pkg.dev/da-cn-shared/ghcr/digital-asset/decentralized-canton-sync-dev/docker", - "jsonLedgerApiUrl": "http://participant-3:7575", - "keysDirectory": "/keys", - "maxParties": 1234, - "parallelism": 321, - "scanApiUrl": "http://scan-app.sv-1:5012", "tolerations": [ { "effect": "NoSchedule", "key": "cn_apps", "operator": "Exists" } - ], - "validatorApiUrl": "http://validator-app:5003" + ] }, "version": "0.3.20" }, @@ -650,6 +648,13 @@ "namespace": "validator", "timeout": 600, "values": { + "additionalEnvVars": [ + { + "name": "ADDITIONAL_ENV_VAR_VALIDATOR_NAME", + "value": "ADDITIONAL_ENV_VAR_VALIDATOR_NAME" + } + ], + "additionalJvmOptions": "-Dparticipantoption", "affinity": { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { diff --git a/cluster/expected/validator1/expected.json b/cluster/expected/validator1/expected.json index f228d85981..a20083e4f5 100644 --- a/cluster/expected/validator1/expected.json +++ b/cluster/expected/validator1/expected.json @@ -65,14 +65,6 @@ "provider": "", "type": "kubernetes:core/v1:Secret" }, - { - "custom": true, - "id": "projects/test-project/global/networks/default", - "inputs": {}, - "name": "default", - "provider": "", - "type": "gcp:compute/network:Network" - }, { "custom": true, "id": "", @@ -85,7 +77,9 @@ }, "stringData": { "4dabf18193072939515e22adb298388d": "1b47061264138c4ac30d75fd1eb44270", - "value": {} + "value": { + "googleCredentials": "base64-decoded-mock" + } }, "type": "Opaque" }, diff --git a/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml b/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml index 773915e31b..ac97e5c4aa 100644 --- a/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml +++ b/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml @@ -3,7 +3,6 @@ imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" withSvIngress: true -defaultJvmOptions: -Xms384M -Xmx384M -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data ingress: wallet: true sv: true diff --git a/cluster/helm/splice-cometbft/templates/deployment.yaml b/cluster/helm/splice-cometbft/templates/deployment.yaml index be099ab43f..bc4c222dce 100644 --- a/cluster/helm/splice-cometbft/templates/deployment.yaml +++ b/cluster/helm/splice-cometbft/templates/deployment.yaml @@ -33,19 +33,6 @@ spec: {{- end }} spec: {{- include "splice-util-lib.service-account" $.Values | nindent 6 }} - {{- if $.Values.enableAntiAffinity }} - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: splice-component - operator: In - values: - - cometbft - topologyKey: kubernetes.io/hostname - namespaceSelector: { } # search in all namespaces - {{- end }} containers: - name: "cometbft" image: {{ $.Values.imageRepo }}/cometbft:{{ $.Chart.AppVersion }}{{ (($.Values.imageDigests).cometbft) }} @@ -143,10 +130,7 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with $.Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} + {{- include "splice-util-lib.affinity" (set $.Values "app" "cometbft") | nindent 6 }} {{- with $.Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/cluster/helm/splice-cometbft/tests/cometbft_deployment_test.yaml b/cluster/helm/splice-cometbft/tests/cometbft_deployment_test.yaml index d99b4bcb5d..ee5b940132 100644 --- a/cluster/helm/splice-cometbft/tests/cometbft_deployment_test.yaml +++ b/cluster/helm/splice-cometbft/tests/cometbft_deployment_test.yaml @@ -32,6 +32,9 @@ tests: - equal: path: metadata.name value: global-domain-3-cometbft + - equal: + path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].labelSelector.matchExpressions[0].values[0] + value: "cometbft" - it: "deploys a PVC" template: pvc.yaml documentSelector: diff --git a/cluster/helm/splice-domain/values-template.yaml b/cluster/helm/splice-domain/values-template.yaml index e887028049..89e77b9ae9 100644 --- a/cluster/helm/splice-domain/values-template.yaml +++ b/cluster/helm/splice-domain/values-template.yaml @@ -8,7 +8,7 @@ pod: annotations: {} labels: {} -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data resources: limits: cpu: "2" diff --git a/cluster/helm/splice-global-domain/templates/mediator.yaml b/cluster/helm/splice-global-domain/templates/mediator.yaml index 7bb4b9d266..27f437930f 100644 --- a/cluster/helm/splice-global-domain/templates/mediator.yaml +++ b/cluster/helm/splice-global-domain/templates/mediator.yaml @@ -55,6 +55,7 @@ spec: value: {{ .Values.mediator.persistence.databaseName }} {{- end }} {{- include "splice-util-lib.additional-env-vars" .Values.additionalEnvVars | indent 8}} + {{- include "splice-util-lib.additional-env-vars" .Values.mediator.additionalEnvVars | indent 8}} {{- include "splice-util-lib.log-level" .Values | indent 8}} {{- with .Values.imagePullPolicy }} imagePullPolicy: {{ . }} diff --git a/cluster/helm/splice-global-domain/templates/sequencer.yaml b/cluster/helm/splice-global-domain/templates/sequencer.yaml index 405c3a657d..46dc07ee5c 100644 --- a/cluster/helm/splice-global-domain/templates/sequencer.yaml +++ b/cluster/helm/splice-global-domain/templates/sequencer.yaml @@ -32,19 +32,6 @@ spec: {{- end }} spec: {{- include "splice-util-lib.service-account" .Values | nindent 6 }} - {{- if $.Values.enableAntiAffinity }} - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: splice-component - operator: In - values: - - sequencer - topologyKey: kubernetes.io/hostname - namespaceSelector: {} # search in all namespaces - {{- end }} containers: - name: sequencer {{- if eq .Values.sequencer.driver.type "cometbft"}} @@ -199,10 +186,7 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} + {{- include "splice-util-lib.affinity" (set .Values "app" "sequencer") | nindent 6 }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/cluster/helm/splice-global-domain/tests/mediator_test.yaml b/cluster/helm/splice-global-domain/tests/mediator_test.yaml index e03c49b0e9..e7d764b521 100644 --- a/cluster/helm/splice-global-domain/tests/mediator_test.yaml +++ b/cluster/helm/splice-global-domain/tests/mediator_test.yaml @@ -64,3 +64,18 @@ tests: - equal: path: metadata.name value: global-domain-6-mediator-pvc + - it: "can set additional env vars" + set: + mediator: + additionalEnvVars: + - name: ADDITIONAL_MEDIATOR_ENV_VAR_NAME + value: ADDITIONAL_MEDIATOR_ENV_VAR_VALUE + documentSelector: + path: kind + value: Deployment + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ADDITIONAL_MEDIATOR_ENV_VAR_NAME + value: ADDITIONAL_MEDIATOR_ENV_VAR_VALUE diff --git a/cluster/helm/splice-global-domain/tests/sequencer_test.yaml b/cluster/helm/splice-global-domain/tests/sequencer_test.yaml index c6a94019aa..c264cdfe97 100644 --- a/cluster/helm/splice-global-domain/tests/sequencer_test.yaml +++ b/cluster/helm/splice-global-domain/tests/sequencer_test.yaml @@ -1,6 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 +# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json suite: "sequencer values" templates: - sequencer.yaml @@ -11,13 +12,13 @@ chart: # Override for testing labels version: 0.1.1 appVersion: 0.1.0 +set: + # Things we need just to pass the schema + sequencer: + driver: + type: cometbft tests: - it: "sets labels as expected" - set: - # Things we need just to pass the schema - sequencer: - driver: - type: cometbft documentSelector: path: kind value: Deployment @@ -59,10 +60,7 @@ tests: helm.sh/chart: splice-global-domain-0.1.1 - it: "can enable postgres metrics" set: - # Things we need just to pass the schema sequencer: - driver: - type: cometbft # Things we need for the postgres exporter persistence: postgresName: sequencer-6 @@ -78,10 +76,6 @@ tests: value: pge-sequencer-6-cantonnet-sequencer - it: "can enable pvc" set: - # Things we need just to pass the schema - sequencer: - driver: - type: cometbft # Things we need for the pvc mount pvc: size: 10Gi @@ -95,10 +89,6 @@ tests: value: global-domain-6-sequencer-pvc - it: "sets log-level environment variables correctly when only logLevel is provided" set: - # Things we need just to pass the schema - sequencer: - driver: - type: cometbft # Set log level for testing logLevel: INFO documentSelector: @@ -117,10 +107,6 @@ tests: value: INFO - it: "sets log-level environment variables correctly when both logLevel and logLevelStdout are provided" set: - # Things we need just to pass the schema - sequencer: - driver: - type: cometbft # Set log levels for testing logLevel: DEBUG logLevelStdout: ERROR @@ -138,3 +124,47 @@ tests: content: name: LOG_LEVEL_STDOUT value: ERROR + - it: "sets anti affinity by default" + documentSelector: + path: kind + value: Deployment + asserts: + - equal: + path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].labelSelector.matchExpressions[0].values[0] + value: "sequencer" + - it: "sets affinity alongside default anti affinity" + set: + affinity: + nodeAffinity: + test: true + documentSelector: + path: kind + value: Deployment + asserts: + - exists: + path: spec.template.spec.affinity.nodeAffinity + - exists: + path: spec.template.spec.affinity.podAntiAffinity + - it: "sets only the specified affinity" + set: + enableAntiAffinity: false + affinity: + nodeAffinity: + test: true + documentSelector: + path: kind + value: Deployment + asserts: + - exists: + path: spec.template.spec.affinity.nodeAffinity.test + - notExists: + path: spec.template.spec.affinity.podAntiAffinity + - it: "doesn't set any affinity" + set: + enableAntiAffinity: false + documentSelector: + path: kind + value: Deployment + asserts: + - notExists: + path: spec.template.spec.affinity diff --git a/cluster/helm/splice-global-domain/values-template.yaml b/cluster/helm/splice-global-domain/values-template.yaml index c2419e6a65..f483185351 100644 --- a/cluster/helm/splice-global-domain/values-template.yaml +++ b/cluster/helm/splice-global-domain/values-template.yaml @@ -10,7 +10,7 @@ pod: annotations: {} labels: {} -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data resources: limits: memory: 4Gi diff --git a/cluster/helm/splice-istio-gateway/values-template.yaml b/cluster/helm/splice-istio-gateway/values-template.yaml index 918e8fefb0..dd1306e30b 100644 --- a/cluster/helm/splice-istio-gateway/values-template.yaml +++ b/cluster/helm/splice-istio-gateway/values-template.yaml @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" -defaultJvmOptions: -Xms384M -Xmx384M -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data cometbftPorts: nodes: 10 domains: 5 diff --git a/cluster/helm/splice-participant/values-template.yaml b/cluster/helm/splice-participant/values-template.yaml index 0ebf2d2c7d..88cc4eede2 100644 --- a/cluster/helm/splice-participant/values-template.yaml +++ b/cluster/helm/splice-participant/values-template.yaml @@ -10,7 +10,7 @@ pod: annotations: {} labels: {} -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data resources: limits: memory: 32Gi diff --git a/cluster/helm/splice-party-allocator/templates/party-allocator.yaml b/cluster/helm/splice-party-allocator/templates/party-allocator.yaml index 41a5ba9200..95d728f4ba 100644 --- a/cluster/helm/splice-party-allocator/templates/party-allocator.yaml +++ b/cluster/helm/splice-party-allocator/templates/party-allocator.yaml @@ -29,18 +29,8 @@ spec: {{- end }} env: {{- include "splice-util-lib.auth0-env-vars" (dict "appName" "validator" "keyName" "validator" "fixedTokens" (.Values.cluster).fixedTokens) | indent 12}} - - name: JSON_LEDGER_API_URL - value: {{ .Values.jsonLedgerApiUrl | quote }} - - name: SCAN_API_URL - value: {{ .Values.scanApiUrl | quote }} - - name: VALIDATOR_API_URL - value: {{ .Values.validatorApiUrl | quote }} - - name: MAX_PARTIES - value: {{ .Values.maxParties | quote }} - - name: KEYS_DIRECTORY - value: {{ .Values.keysDirectory | quote }} - - name: PARALLELISM - value: {{ .Values.parallelism | quote }} + - name: EXTERNAL_CONFIG + value: {{ .Values.config | toJson | quote }} volumeMounts: - name: party-allocator-keys-volume mountPath: /keys diff --git a/cluster/helm/splice-party-allocator/templates/required.yaml b/cluster/helm/splice-party-allocator/templates/required.yaml index 8d2f071b71..c59ed51f44 100644 --- a/cluster/helm/splice-party-allocator/templates/required.yaml +++ b/cluster/helm/splice-party-allocator/templates/required.yaml @@ -5,9 +5,4 @@ {{ $_ := required ".Values.imageRepo is required." .Values.imageRepo }} -{{ $_ := required ".Values.jsonLegerApiUrl is required." .Values.jsonLedgerApiUrl }} -{{ $_ := required ".Values.scanApiUrl is required." .Values.scanApiUrl }} -{{ $_ := required ".Values.validatorApiUrl is required." .Values.validatorApiUrl }} -{{ $_ := required ".Values.maxParties is required." .Values.maxParties }} -{{ $_ := required ".Values.keysDirectory is required." .Values.keysDirectory }} -{{ $_ := required ".Values.parallelism is required." .Values.parallelism }} +{{ $_ := required ".Values.config is required." .Values.config }} diff --git a/cluster/helm/splice-scan/values-template.yaml b/cluster/helm/splice-scan/values-template.yaml index c6ccbc1968..55db22edce 100644 --- a/cluster/helm/splice-scan/values-template.yaml +++ b/cluster/helm/splice-scan/values-template.yaml @@ -8,7 +8,7 @@ pod: annotations: {} labels: {} -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=70 -XX:InitialRAMPercentage=70 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=70 -XX:InitialRAMPercentage=70 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data resources: limits: memory: 2048Mi diff --git a/cluster/helm/splice-splitwell-app/values-template.yaml b/cluster/helm/splice-splitwell-app/values-template.yaml index 36768a11ec..c34d404241 100644 --- a/cluster/helm/splice-splitwell-app/values-template.yaml +++ b/cluster/helm/splice-splitwell-app/values-template.yaml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data fixedTokens: false resources: limits: diff --git a/cluster/helm/splice-sv-node/values-template.yaml b/cluster/helm/splice-sv-node/values-template.yaml index ab1527d8d1..02df2e7964 100644 --- a/cluster/helm/splice-sv-node/values-template.yaml +++ b/cluster/helm/splice-sv-node/values-template.yaml @@ -11,7 +11,8 @@ pod: cluster: fixedTokens: false -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +# TODO(DACH-NY/canton-network-internal#2125) Revisit idle timeout on 3.4 +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data -Dpekko.http.server.idle-timeout=20m -Dpekko.http.client.idle-timeout=20m resources: limits: cpu: 3 diff --git a/cluster/helm/splice-util-lib/templates/_helpers.tpl b/cluster/helm/splice-util-lib/templates/_helpers.tpl index 4a4ffa20a6..fe48b2e247 100644 --- a/cluster/helm/splice-util-lib/templates/_helpers.tpl +++ b/cluster/helm/splice-util-lib/templates/_helpers.tpl @@ -241,3 +241,24 @@ app: {{ .app }} {{- end }} {{- end -}} {{- end -}} + +{{- define "splice-util-lib.affinity" -}} +{{- if or .enableAntiAffinity .affinity }} +affinity: + {{- with .affinity }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- if .enableAntiAffinity }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: splice-component + operator: In + values: + - {{ .app }} + topologyKey: kubernetes.io/hostname + namespaceSelector: { } # search in all namespaces + {{- end }} +{{- end }} +{{- end -}} diff --git a/cluster/helm/splice-validator/values-template.yaml b/cluster/helm/splice-validator/values-template.yaml index 6457f968f5..3e01eecace 100644 --- a/cluster/helm/splice-validator/values-template.yaml +++ b/cluster/helm/splice-validator/values-template.yaml @@ -10,7 +10,7 @@ pod: annotations: {} labels: {} -defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data +defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.numThreads=8 -XX:ActiveProcessorCount=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data -Dpekko.http.server.idle-timeout=20m -Dpekko.http.client.idle-timeout=20m resources: limits: cpu: 3 diff --git a/cluster/images/canton-cometbft-sequencer/app.conf b/cluster/images/canton-cometbft-sequencer/app.conf index 6f11319596..5cd19ee9a1 100644 --- a/cluster/images/canton-cometbft-sequencer/app.conf +++ b/cluster/images/canton-cometbft-sequencer/app.conf @@ -50,6 +50,14 @@ canton { address = "0.0.0.0" port = 5009 max-inbound-message-size = 104857600 # 100MB + stream.limits { + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisState": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisStateV2": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshot": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshotV2": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingState": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingStateV2": 1 + } } @@ -77,11 +85,14 @@ canton { type = "CometBFT" } - # TODO(#1324) Reenable once Canton forward ports - # parameters.sequencer-api-limits = { - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/SubscribeV2" : 1000, - # } + sequencer-client { + use-new-connection-pool = false + } + + public-api.stream.limits = { + "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, + "com.digitalasset.canton.sequencer.api.v30.SequencerService/Subscribe" : 1000, + } } } } diff --git a/cluster/images/canton-mediator/app.conf b/cluster/images/canton-mediator/app.conf index f8201b7d30..bc81a5d7b0 100644 --- a/cluster/images/canton-mediator/app.conf +++ b/cluster/images/canton-mediator/app.conf @@ -20,6 +20,7 @@ canton { sequencer-client { acknowledgement-interval = 10m + use-new-connection-pool = false } storage = ${_storage} diff --git a/cluster/images/canton-participant/app.conf b/cluster/images/canton-participant/app.conf index 1ebe2d1639..34493ca91f 100644 --- a/cluster/images/canton-participant/app.conf +++ b/cluster/images/canton-participant/app.conf @@ -29,6 +29,7 @@ canton { acknowledgement-interval = 10m # Use a higher number of in flight batches to increase throughput maximum-in-flight-event-batches = 50 + use-new-connection-pool = false } monitoring.grpc-health-server { @@ -55,6 +56,14 @@ canton { admin-api { address = "0.0.0.0" port = 5002 + stream.limits { + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ExportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.PartyManagementService/ExportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ImportAcsOld": 1, + "com.digitalasset.canton.admin.participant.v30.ParticipantRepairService/ImportAcs": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshot": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshotV2": 1 + } } init { @@ -102,6 +111,10 @@ canton { } # Bump ACS pruning interval to make sure ACS snapshots are available for longer journal-garbage-collection-delay = 24h + # TODO(DACH-NY/canton-network-internal#2050) Remove once ACS commitment processing does not block anymore. + do-not-await-on-checking-incoming-commitments = true + # Bump batch size to make acs imports faster + batching.max-acs-import-batch-size = 10000 } # TODO(DACH-NY/canton-network-node#8331) Tune cache sizes diff --git a/cluster/images/canton-sequencer/app.conf b/cluster/images/canton-sequencer/app.conf index 20f3277cda..0cb7306521 100644 --- a/cluster/images/canton-sequencer/app.conf +++ b/cluster/images/canton-sequencer/app.conf @@ -47,6 +47,14 @@ canton { address = "0.0.0.0" port = 5009 max-inbound-message-size = 104857600 # 100MB + stream.limits { + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisState": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/GenesisStateV2": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshot": 1, + "com.digitalasset.canton.topology.admin.v30.TopologyManagerReadService/ExportTopologySnapshotV2": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingState": 1, + "com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationService/OnboardingStateV2": 1 + } } @@ -81,11 +89,14 @@ canton { type = reference } - # TODO(#1324) Reenable once Canton forward ports - # parameters.sequencer-api-limits = { - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, - # "com.digitalasset.canton.sequencer.api.v30.SequencerService/SubscribeV2" : 1000, - # } + sequencer-client { + use-new-connection-pool = false + } + + public-api.stream.limits = { + "com.digitalasset.canton.sequencer.api.v30.SequencerService/DownloadTopologyStateForInit" : 3, + "com.digitalasset.canton.sequencer.api.v30.SequencerService/Subscribe" : 1000, + } } } } diff --git a/cluster/images/local.mk b/cluster/images/local.mk index 1a10f975e6..39a0881851 100644 --- a/cluster/images/local.mk +++ b/cluster/images/local.mk @@ -127,13 +127,10 @@ $(foreach image,$(images),$(eval $(call DEFINE_PHONY_RULES,$(image)))) --iidfile $@ $(cache_opt) $(build_arg) -t $$(cat $<) $(@D)/.. %/$(docker-push): %/$(docker-image-tag) %/$(docker-build) - cd $(@D)/.. && docker-push $$(cat $(abspath $<)) + cd $(@D)/.. && prefix-output docker-push $$(cat $(abspath $<)) %/$(docker-scan): %/$(docker-image-tag) - cd $(@D) && docker-scan $$(cat $(abspath $<)) - -%/$(docker-copy-release-to-ghcr): %/$(docker-image-tag) %/$(docker-build) - cd $(@D)/.. && copy_release_to_ghcr $$(cat $(abspath $<)) + cd $(@D) && prefix-output docker-scan $$(cat $(abspath $<)) ######### # Global targets @@ -145,4 +142,4 @@ write-images: .PHONY: cluster/docker/copy_release_to_ghcr cluster/docker/copy_release_to_ghcr: write-images - ./build-tools/copy_release_images_to_ghcr.sh -v '$(shell get-snapshot-version)' -f $(images_file) + prefix-output ./build-tools/copy_release_images_to_ghcr.sh -v '$(shell get-snapshot-version)' -f $(images_file) diff --git a/cluster/images/party-allocator/Dockerfile b/cluster/images/party-allocator/Dockerfile index 00e0c136ce..77f8a2f1d5 100644 --- a/cluster/images/party-allocator/Dockerfile +++ b/cluster/images/party-allocator/Dockerfile @@ -5,6 +5,9 @@ FROM node:22-alpine-3.21@sha256:6b2127043c2faa4f15cdcdeb65a39fc9afbecf5559301898 LABEL org.opencontainers.image.base.name="node/22-alpine-3.21" +# to get envsubst +RUN apk update && apk add gettext + COPY entrypoint.sh . COPY target/bundle.js ./party-allocator/ COPY target/LICENSE . diff --git a/cluster/images/party-allocator/entrypoint.sh b/cluster/images/party-allocator/entrypoint.sh index bc224740e8..db62cc1568 100755 --- a/cluster/images/party-allocator/entrypoint.sh +++ b/cluster/images/party-allocator/entrypoint.sh @@ -4,6 +4,8 @@ set -euo pipefail -export EXTERNAL_CONFIG="{\"token\": \"${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_TOKEN}\", \"userId\": \"${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_USER_NAME}\", \"jsonLedgerApiUrl\": \"$JSON_LEDGER_API_URL\", \"scanApiUrl\": \"$SCAN_API_URL\", \"validatorApiUrl\": \"$VALIDATOR_API_URL\", \"maxParties\": $MAX_PARTIES, \"keyDirectory\": \"$KEYS_DIRECTORY\", \"parallelism\": $PARALLELISM}" +# shellcheck disable=SC2016 +EXTERNAL_CONFIG="$(echo "$EXTERNAL_CONFIG" | envsubst '$SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_TOKEN,$SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_USER_NAME')" +export EXTERNAL_CONFIG exec node --enable-source-maps party-allocator/bundle.js diff --git a/cluster/images/scan-app/app.conf b/cluster/images/scan-app/app.conf index 44cd34b2c7..1364ab83a1 100644 --- a/cluster/images/scan-app/app.conf +++ b/cluster/images/scan-app/app.conf @@ -68,8 +68,9 @@ canton { getAcsSnapshot.rate-per-second = 20 } } + # TODO(DACH-NY/canton-network-internal#2125) Revisit timeouts on 3.4 custom-timeouts { - getAcsSnapshot = 1 minute + getAcsSnapshot = 20 minutes } } } diff --git a/cluster/images/sv-app/app.conf b/cluster/images/sv-app/app.conf index c96bf07522..47ad5be56c 100644 --- a/cluster/images/sv-app/app.conf +++ b/cluster/images/sv-app/app.conf @@ -99,12 +99,17 @@ canton { delegateless-automation-expected-task-duration = ${?SPLICE_APP_EXPECTED_TASK_DURATION} delegateless-automation-expired-reward-coupon-batch-size = ${?SPLICE_APP_EXPIRED_REWARD_COUPON_BATCH_SIZE} parameters { + # TODO(DACH-NY/canton-network-internal#2125) Revisit timeouts on 3.4 custom-timeouts { - onboardSvPartyMigrationAuthorize = 5 minutes + onboardSvPartyMigrationAuthorize = 20 minutes + onboardSvSequencer = 5 minutes } } } } features.enable-testing-commands = yes + # TODO(DACH-NY/canton-network-internal#2125) Revisit timeouts on 3.4 + # Note: This is the client-side request timeout + parameters.timeouts.request-timeout = 20 minutes } diff --git a/cluster/images/validator-app/app.conf b/cluster/images/validator-app/app.conf index 176cf8bb75..01dac35ef1 100644 --- a/cluster/images/validator-app/app.conf +++ b/cluster/images/validator-app/app.conf @@ -69,4 +69,8 @@ canton { } } features.enable-testing-commands = yes + + # TODO(DACH-NY/canton-network-internal#2125) Revisit timeouts on 3.4 + # Note: This is the client-side request timeout + parameters.timeouts.request-timeout = 20 minutes } diff --git a/cluster/pulumi/canton-network/src/bigQuery.ts b/cluster/pulumi/canton-network/src/bigQuery.ts index 53a48d9bbb..7ea0fca4e3 100644 --- a/cluster/pulumi/canton-network/src/bigQuery.ts +++ b/cluster/pulumi/canton-network/src/bigQuery.ts @@ -14,8 +14,7 @@ import { Postgres, CloudPostgres, generatePassword, - privateNetwork, - protectCloudSql, + privateNetworkId, } from '@lfdecentralizedtrust/splice-pulumi-common/src/postgres'; import { ExactNamespace, @@ -23,6 +22,7 @@ import { commandScriptPath, } from '@lfdecentralizedtrust/splice-pulumi-common/src/utils'; +import { spliceConfig } from '../../common/src/config/config'; import { allDashboardFunctions, allScanFunctions, computedDataTable } from './bigQuery_functions'; interface ScanBigQueryConfig { @@ -367,7 +367,7 @@ function installPrivateConnectivityConfiguration( privateConnectionId: privateConnectionName, displayName: privateConnectionName, location: cloudsdkComputeRegion(), - vpcPeeringConfig: { subnet: pickDatastreamPeeringCidr(), vpc: privateNetwork.id }, + vpcPeeringConfig: { subnet: pickDatastreamPeeringCidr(), vpc: privateNetworkId }, labels: { cluster: CLUSTER_BASENAME, }, @@ -406,7 +406,7 @@ function installReplicatorPassword(postgres: CloudPostgres): PostgresPassword { const secretName = `${postgres.namespace.logicalName}-${replicatorUserName}-passwd`; const password = generatePassword(`${postgres.instanceName}-${replicatorUserName}-passwd`, { parent: postgres, - protect: protectCloudSql, + protect: spliceConfig.pulumiProjectConfig.cloudSql.protected, }).result; return { contents: password, @@ -430,7 +430,7 @@ function createPostgresReplicatorUser( parent: postgres, deletedWith: postgres.databaseInstance, retainOnDelete: true, - protect: protectCloudSql, + protect: spliceConfig.pulumiProjectConfig.cloudSql.protected, dependsOn: [postgres.databaseInstance, password.secret], } ); @@ -450,7 +450,7 @@ function createPublicationAndReplicationSlots( const schemaName = dbName; const path = commandScriptPath('cluster/pulumi/canton-network/bigquery-cloudsql.sh'); const scriptArgs = pulumi.interpolate`\\ - --private-network-project="${privateNetwork.project}" \\ + --private-network-project="${gcp.organizations.getProjectOutput({}).apply(proj => proj.name)}" \\ --compute-region="${cloudsdkComputeRegion()}" \\ --service-account-email="${postgres.databaseInstance.serviceAccountEmailAddress}" \\ --tables-to-replicate-length="${tablesToReplicate.length}" \\ diff --git a/cluster/pulumi/canton-network/src/bigQuery_functions.ts b/cluster/pulumi/canton-network/src/bigQuery_functions.ts index cdf576f9f6..3efc5bbeda 100644 --- a/cluster/pulumi/canton-network/src/bigQuery_functions.ts +++ b/cluster/pulumi/canton-network/src/bigQuery_functions.ts @@ -69,6 +69,7 @@ const daml_prim_path = new BQScalarFunction( WHEN 'contractId' THEN '.contractId' WHEN 'list' THEN '.list.elements' WHEN 'party' THEN '.party' + WHEN 'int64' THEN '.int64' -- we treat records just like outer layer; -- see how paths start with '$.record' WHEN 'record' THEN '' @@ -586,6 +587,24 @@ const coin_price = new BQScalarFunction( ` ); +const latest_round = new BQScalarFunction( + 'latest_round', + as_of_args, + INT64, + ` + (SELECT + CAST(JSON_VALUE(c.create_arguments, \`$$FUNCTIONS_DATASET$$.daml_record_path\`([1,0], 'int64')) AS INT64) + FROM \`$$SCAN_DATASET$$.scan_sv_1_update_history_creates\` c + WHERE template_id_entity_name = 'SummarizingMiningRound' + AND c.template_id_module_name = 'Splice.Round' + AND package_name = 'splice-amulet' + AND \`$$FUNCTIONS_DATASET$$.up_to_time\`( + as_of_record_time, migration_id, + c.record_time, c.migration_id) + ORDER BY c.record_time DESC LIMIT 1) + ` +); + const all_dashboard_stats = new BQTableFunction( 'all_dashboard_stats', as_of_args, @@ -689,6 +708,7 @@ const all_finance_stats = new BQTableFunction( new BQColumn('total_burn', BIGNUMERIC), new BQColumn('num_amulet_holders', INT64), new BQColumn('num_active_validators', INT64), + new BQColumn('latest_round', INT64), ], ` SELECT @@ -729,7 +749,9 @@ const all_finance_stats = new BQTableFunction( migration_id), 0) AS total_burn, \`$$FUNCTIONS_DATASET$$.num_amulet_holders\`(as_of_record_time, migration_id) as num_amulet_holders, - \`$$FUNCTIONS_DATASET$$.num_active_validators\`(as_of_record_time, migration_id) as num_active_validators + \`$$FUNCTIONS_DATASET$$.num_active_validators\`(as_of_record_time, migration_id) as num_active_validators, + \`$$FUNCTIONS_DATASET$$.latest_round\`(as_of_record_time, migration_id) as latest_round + ` ); @@ -878,6 +900,7 @@ export const allScanFunctions = [ average_tps, peak_tps, coin_price, + latest_round, all_dashboard_stats, all_finance_stats, ]; diff --git a/cluster/pulumi/canton-network/src/sv.ts b/cluster/pulumi/canton-network/src/sv.ts index d56b4b7ad3..c93480344c 100644 --- a/cluster/pulumi/canton-network/src/sv.ts +++ b/cluster/pulumi/canton-network/src/sv.ts @@ -206,15 +206,31 @@ export async function installSvNode( const defaultPostgres = config.splitPostgresInstances ? undefined - : postgres.installPostgres(xns, 'postgres', 'postgres', activeVersion, false, { - logicalDecoding: !!baseConfig.scanBigQuery, - }); + : postgres.installPostgres( + xns, + 'postgres', + 'postgres', + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + false, + { + logicalDecoding: !!baseConfig.scanBigQuery, + } + ); const appsPostgres = defaultPostgres || - postgres.installPostgres(xns, `cn-apps-pg`, `cn-apps-pg`, activeVersion, true, { - logicalDecoding: !!baseConfig.scanBigQuery, - }); + postgres.installPostgres( + xns, + `cn-apps-pg`, + `cn-apps-pg`, + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + true, + { + logicalDecoding: !!baseConfig.scanBigQuery, + } + ); const canton = buildCrossStackCantonDependencies( decentralizedSynchronizerUpgradeConfig, @@ -407,14 +423,6 @@ function installSvApp( const svDbName = `sv_${sanitizedForPostgres(config.nodeName)}`; const useCantonBft = decentralizedSynchronizerMigrationConfig.active.sequencer.enableBftSequencer; - const topologyChangeDelayEnvVars = svsConfig?.synchronizer?.topologyChangeDelay - ? [ - { - name: 'ADDITIONAL_CONFIG_TOPOLOGY_CHANGE_DELAY', - value: `canton.sv-apps.sv.topology-change-delay-duration=${svsConfig.synchronizer.topologyChangeDelay}`, - }, - ] - : []; const bftSequencerConnectionEnvVars = !config.participant || config.participant.bftSequencerConnection ? [] @@ -424,9 +432,9 @@ function installSvApp( value: 'canton.sv-apps.sv.bft-sequencer-connection = false', }, ]; - const additionalEnvVars = (config.svApp?.additionalEnvVars || []) - .concat(topologyChangeDelayEnvVars) - .concat(bftSequencerConnectionEnvVars); + const additionalEnvVars = (config.svApp?.additionalEnvVars || []).concat( + bftSequencerConnectionEnvVars + ); const svValues = { ...decentralizedSynchronizerMigrationConfig.migratingNodeConfig(), ...spliceInstanceNames, diff --git a/cluster/pulumi/common-sv/src/config.ts b/cluster/pulumi/common-sv/src/config.ts index 64a8a6fb3b..e4d8b19745 100644 --- a/cluster/pulumi/common-sv/src/config.ts +++ b/cluster/pulumi/common-sv/src/config.ts @@ -51,9 +51,7 @@ export interface StaticSvConfig { onboardingName: string; validatorWalletUser?: string; auth0ValidatorAppName: string; - auth0ValidatorAppClientId?: string; auth0SvAppName: string; - auth0SvAppClientId?: string; cometBft: StaticCometBftConfig; onboardingPollingInterval?: string; sweep?: SweepConfig; @@ -122,7 +120,6 @@ export const SvConfigSchema = z.object({ skipInitialization: z.boolean().default(false), // This can be used on clusters like CILR where we usually would expect to skip initialization but the sv runbook gets reset periodically. forceSvRunbookInitialization: z.boolean().default(false), - topologyChangeDelay: z.string().optional(), }) .optional(), }) diff --git a/cluster/pulumi/common-sv/src/singleSvConfig.ts b/cluster/pulumi/common-sv/src/singleSvConfig.ts index b5516479d9..e756aaee0b 100644 --- a/cluster/pulumi/common-sv/src/singleSvConfig.ts +++ b/cluster/pulumi/common-sv/src/singleSvConfig.ts @@ -1,6 +1,12 @@ // Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -import { KmsConfigSchema, LogLevelSchema } from '@lfdecentralizedtrust/splice-pulumi-common'; +import { + KmsConfigSchema, + LogLevelSchema, + CloudSqlConfigSchema, +} from '@lfdecentralizedtrust/splice-pulumi-common'; +import { ValidatorAppConfigSchema } from '@lfdecentralizedtrust/splice-pulumi-common-validator/src/config'; +import { spliceConfig } from '@lfdecentralizedtrust/splice-pulumi-common/src/config/config'; import { clusterYamlConfig } from '@lfdecentralizedtrust/splice-pulumi-common/src/config/configLoader'; import { merge } from 'lodash'; import util from 'node:util'; @@ -19,10 +25,21 @@ const EnvVarConfigSchema = z.object({ name: z.string(), value: z.string(), }); +const CloudSqlWithOverrideConfigSchema = CloudSqlConfigSchema.partial() + .default(spliceConfig.pulumiProjectConfig.cloudSql) + .transform(sqlConfig => merge({}, spliceConfig.pulumiProjectConfig.cloudSql, sqlConfig)); +const SvMediatorConfigSchema = z + .object({ + additionalEnvVars: z.array(EnvVarConfigSchema).default([]), + additionalJvmOptions: z.string().optional(), + cloudSql: CloudSqlWithOverrideConfigSchema, + }) + .strict(); const SvSequencerConfigSchema = z .object({ additionalEnvVars: z.array(EnvVarConfigSchema).default([]), additionalJvmOptions: z.string().optional(), + cloudSql: CloudSqlWithOverrideConfigSchema, }) .strict(); const SvParticipantConfigSchema = z @@ -31,6 +48,7 @@ const SvParticipantConfigSchema = z bftSequencerConnection: z.boolean().optional(), additionalEnvVars: z.array(EnvVarConfigSchema).default([]), additionalJvmOptions: z.string().optional(), + cloudSql: CloudSqlWithOverrideConfigSchema, }) .strict(); const Auth0ConfigSchema = z @@ -62,7 +80,7 @@ const ScanAppConfigSchema = z additionalJvmOptions: z.string().optional(), }) .strict(); -const ValidatorAppConfigSchema = z +const SvValidatorAppConfigSchema = z .object({ walletUser: z.string().optional(), // TODO(#2389) inline env var into config.yaml @@ -71,11 +89,9 @@ const ValidatorAppConfigSchema = z fromEnv: z.string(), }) .optional(), - additionalEnvVars: z.array(EnvVarConfigSchema).default([]), - additionalJvmOptions: z.string().optional(), auth0: Auth0ConfigSchema.optional(), }) - .strict(); + .and(ValidatorAppConfigSchema); // https://docs.cometbft.com/main/explanation/core/running-in-production const CometbftLogLevelSchema = z.enum(['info', 'error', 'debug', 'none']); // things here are declared optional even when they aren't, to allow partial overrides of defaults @@ -86,9 +102,10 @@ const SingleSvConfigSchema = z cometbft: SvCometbftConfigSchema.optional(), participant: SvParticipantConfigSchema.optional(), sequencer: SvSequencerConfigSchema.optional(), + mediator: SvMediatorConfigSchema.optional(), svApp: SvAppConfigSchema.optional(), scanApp: ScanAppConfigSchema.optional(), - validatorApp: ValidatorAppConfigSchema.optional(), + validatorApp: SvValidatorAppConfigSchema.optional(), logging: z .object({ appsLogLevel: LogLevelSchema, diff --git a/cluster/pulumi/common-sv/src/svConfigs.ts b/cluster/pulumi/common-sv/src/svConfigs.ts index f7c4ed141d..056ea0f50a 100644 --- a/cluster/pulumi/common-sv/src/svConfigs.ts +++ b/cluster/pulumi/common-sv/src/svConfigs.ts @@ -55,9 +55,7 @@ const fromSingleSvConfig = (nodeName: string, cometBftNodeIndex: number): Static auth0ValidatorAppName: config.validatorApp?.auth0?.name ? config.validatorApp.auth0.name : `${nodeName}_validator`, - auth0ValidatorAppClientId: config.validatorApp?.auth0?.clientId, auth0SvAppName: config.svApp?.auth0?.name ? config.svApp.auth0.name : nodeName, - auth0SvAppClientId: config.svApp?.auth0?.clientId, validatorWalletUser: config.validatorApp?.walletUser, cometBft: { nodeIndex: cometBftNodeIndex, diff --git a/cluster/pulumi/common-validator/src/config.ts b/cluster/pulumi/common-validator/src/config.ts index 864cea342f..4fa9f13217 100644 --- a/cluster/pulumi/common-validator/src/config.ts +++ b/cluster/pulumi/common-validator/src/config.ts @@ -9,6 +9,11 @@ import { import { clusterSubConfig } from '@lfdecentralizedtrust/splice-pulumi-common/src/config/configLoader'; import { z } from 'zod'; +export const ValidatorAppConfigSchema = z.object({ + additionalEnvVars: z.array(EnvVarConfigSchema).default([]), + additionalJvmOptions: z.string().optional(), +}); + export const ParticipantConfigSchema = z.object({ additionalEnvVars: z.array(EnvVarConfigSchema).default([]), additionalJvmOptions: z.string().optional(), @@ -29,13 +34,17 @@ export const ValidatorNodeConfigSchema = z.object({ }) .optional(), participant: ParticipantConfigSchema.optional(), + validatorApp: ValidatorAppConfigSchema.optional(), }); export const PartyAllocatorConfigSchema = z.object({ enable: z.boolean(), parallelism: z.number().default(30), maxParties: z.number().default(1000000), + preapprovalRetries: z.number().default(120), + preapprovalRetryDelayMs: z.number().default(1000), }); export type PartyAllocatorConfig = z.infer; + export type ValidatorNodeConfig = z.infer; export const ValidatorConfigSchema = z .object({ diff --git a/cluster/pulumi/common-validator/src/participant.ts b/cluster/pulumi/common-validator/src/participant.ts index 95a1c5cd7b..01fc26135d 100644 --- a/cluster/pulumi/common-validator/src/participant.ts +++ b/cluster/pulumi/common-validator/src/participant.ts @@ -16,6 +16,7 @@ import { sanitizedForPostgres, SPLICE_ROOT, SpliceCustomResourceOptions, + spliceConfig, } from '@lfdecentralizedtrust/splice-pulumi-common'; import { ValidatorNodeConfig } from '@lfdecentralizedtrust/splice-pulumi-common-validator'; import { CnChartVersion } from '@lfdecentralizedtrust/splice-pulumi-common/src/artifacts'; @@ -37,7 +38,14 @@ export function installParticipant( const participantPostgres = defaultPostgres || - postgres.installPostgres(xns, `participant-pg`, `participant-pg`, activeVersion, true); + postgres.installPostgres( + xns, + `participant-pg`, + `participant-pg`, + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + true + ); const participantValues: ChartValues = { ...loadYamlFromFile( `${SPLICE_ROOT}/apps/app/src/pack/examples/sv-helm/participant-values.yaml`, diff --git a/cluster/pulumi/common/src/config/cloudSql.ts b/cluster/pulumi/common/src/config/cloudSql.ts new file mode 100644 index 0000000000..239b71e140 --- /dev/null +++ b/cluster/pulumi/common/src/config/cloudSql.ts @@ -0,0 +1,21 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 +import { z } from 'zod'; + +export const CloudSqlConfigSchema = z.object({ + enabled: z.boolean(), + // Docs on cloudsql maintenance windows: https://cloud.google.com/sql/docs/postgres/set-maintenance-window + maintenanceWindow: z + .object({ + day: z.number().min(1).max(7).default(2), // 1 (Monday) to 7 (Sunday) + hour: z.number().min(0).max(23).default(8), // 24-hour format UTC + }) + .default({ day: 2, hour: 8 }), + protected: z.boolean(), + tier: z.string(), + enterprisePlus: z.boolean(), + // https://cloud.google.com/sql/docs/mysql/backup-recovery/backups#retained-backups + // controls the number of automated gcp sql backups to retain + backupsToRetain: z.number().optional(), +}); +export type CloudSqlConfig = z.infer; diff --git a/cluster/pulumi/common/src/config/configSchema.ts b/cluster/pulumi/common/src/config/configSchema.ts index ca7458b509..2e2c095f01 100644 --- a/cluster/pulumi/common/src/config/configSchema.ts +++ b/cluster/pulumi/common/src/config/configSchema.ts @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import { z } from 'zod'; +import { CloudSqlConfigSchema } from './cloudSql'; import { defaultActiveMigration, SynchronizerMigrationSchema } from './migrationSchema'; // This is a config that's relevant for all (most) pulumi projects. For project-specific configuration, @@ -11,22 +12,7 @@ const PulumiProjectConfigSchema = z.object({ isExternalCluster: z.boolean(), hasPublicInfo: z.boolean(), interAppsDependencies: z.boolean(), - cloudSql: z.object({ - enabled: z.boolean(), - // Docs on cloudsql maintenance windows: https://cloud.google.com/sql/docs/postgres/set-maintenance-window - maintenanceWindow: z - .object({ - day: z.number().min(1).max(7).default(2), // 1 (Monday) to 7 (Sunday) - hour: z.number().min(0).max(23).default(8), // 24-hour format UTC - }) - .default({ day: 2, hour: 8 }), - protected: z.boolean(), - tier: z.string(), - enterprisePlus: z.boolean(), - // https://cloud.google.com/sql/docs/mysql/backup-recovery/backups#retained-backups - // controls the number of automated gcp sql backups to retain - backupsToRetain: z.number().optional(), - }), + cloudSql: CloudSqlConfigSchema, }); export type PulumiProjectConfig = z.infer; export const ConfigSchema = z.object({ diff --git a/cluster/pulumi/common/src/config/index.ts b/cluster/pulumi/common/src/config/index.ts index b91c888a84..33faa48511 100644 --- a/cluster/pulumi/common/src/config/index.ts +++ b/cluster/pulumi/common/src/config/index.ts @@ -6,6 +6,7 @@ import { spliceEnvConfig } from './envConfig'; export * from './configSchema'; export * from './kms'; +export * from './cloudSql'; export { spliceEnvConfig as config } from './envConfig'; export const DeploySvRunbook = spliceEnvConfig.envFlag('SPLICE_DEPLOY_SV_RUNBOOK', false); diff --git a/cluster/pulumi/common/src/dump-config-common.ts b/cluster/pulumi/common/src/dump-config-common.ts index ba9e156872..70d1d0b397 100644 --- a/cluster/pulumi/common/src/dump-config-common.ts +++ b/cluster/pulumi/common/src/dump-config-common.ts @@ -15,6 +15,7 @@ export enum PulumiFunction { GCP_GET_SUB_NETWORK = 'gcp:compute/getSubnetwork:getSubnetwork', GCP_GET_SECRET_VERSION = 'gcp:secretmanager/getSecretVersion:getSecretVersion', GCP_GET_CLUSTER = 'gcp:container/getCluster:getCluster', + STD_BASE64_DECODE = 'std:index:base64decode', } export class SecretsFixtureMap extends Map { @@ -173,6 +174,10 @@ export async function initDumpConfig(): Promise { }, call: function (args: pulumi.runtime.MockCallArgs) { switch (args.token) { + case PulumiFunction.STD_BASE64_DECODE: + return { + result: `base64-decoded-mock`, + }; case PulumiFunction.GCP_GET_PROJECT: return { ...args.inputs, name: projectName }; case PulumiFunction.GCP_GET_SUB_NETWORK: diff --git a/cluster/pulumi/common/src/index.ts b/cluster/pulumi/common/src/index.ts index 9871a9120d..c804aab0c2 100644 --- a/cluster/pulumi/common/src/index.ts +++ b/cluster/pulumi/common/src/index.ts @@ -19,7 +19,6 @@ export * from './utils'; export * from './packageConfig'; export * from './artifacts'; export * from './dockerConfig'; -export * from './config/kms'; export * from './serviceAccount'; export * from './participantKms'; export * from './config/migrationSchema'; @@ -27,3 +26,4 @@ export * from './pruning'; export * from './config/loadTesterConfig'; export * from './config/networkWideConfig'; export * from './ratelimit'; +export * from './config/config'; diff --git a/cluster/pulumi/common/src/postgres.ts b/cluster/pulumi/common/src/postgres.ts index 16aa589e0a..d6fbb7882c 100644 --- a/cluster/pulumi/common/src/postgres.ts +++ b/cluster/pulumi/common/src/postgres.ts @@ -7,24 +7,16 @@ import * as _ from 'lodash'; import { Resource } from '@pulumi/pulumi'; import { CnChartVersion } from './artifacts'; -import { clusterSmallDisk, config } from './config'; +import { clusterSmallDisk, CloudSqlConfig, config } from './config'; import { spliceConfig } from './config/config'; import { installSpliceHelmChart } from './helm'; import { installPostgresPasswordSecret } from './secrets'; import { ChartValues, CLUSTER_BASENAME, ExactNamespace, GCP_ZONE } from './utils'; -const enableCloudSql = spliceConfig.pulumiProjectConfig.cloudSql.enabled; -export const protectCloudSql = spliceConfig.pulumiProjectConfig.cloudSql.protected; -const cloudSqlDbInstance = spliceConfig.pulumiProjectConfig.cloudSql.tier; -const cloudSqlEnterprisePlus = spliceConfig.pulumiProjectConfig.cloudSql.enterprisePlus; - const project = gcp.organizations.getProjectOutput({}); // use existing default network (needs to have a private vpc connection) -export const privateNetwork = gcp.compute.Network.get( - 'default', - pulumi.interpolate`projects/${project.name}/global/networks/default` -); +export const privateNetworkId = pulumi.interpolate`projects/${project.name}/global/networks/default`; export function generatePassword( name: string, @@ -67,12 +59,13 @@ export class CloudPostgres extends pulumi.ComponentResource implements Postgres instanceName: string, alias: string, secretName: string, + cloudSqlConfig: CloudSqlConfig, active: boolean = true, opts: { disableProtection?: boolean; migrationId?: string; logicalDecoding?: boolean } = {} ) { const instanceLogicalName = xns.logicalName + '-' + instanceName; const instanceLogicalNameAlias = xns.logicalName + '-' + alias; // pulumi name before #12391 - const deletionProtection = opts.disableProtection ? false : protectCloudSql; + const deletionProtection = opts.disableProtection ? false : cloudSqlConfig.protected; const baseOpts = { protect: deletionProtection, aliases: [{ name: instanceLogicalNameAlias }], @@ -109,9 +102,9 @@ export class CloudPostgres extends pulumi.ComponentResource implements Postgres insightsConfig: { queryInsightsEnabled: true, }, - tier: cloudSqlDbInstance, - edition: cloudSqlEnterprisePlus ? 'ENTERPRISE_PLUS' : 'ENTERPRISE', - ...(cloudSqlEnterprisePlus + tier: cloudSqlConfig.tier, + edition: cloudSqlConfig.enterprisePlus ? 'ENTERPRISE_PLUS' : 'ENTERPRISE', + ...(cloudSqlConfig.enterprisePlus ? { dataCacheConfig: { dataCacheEnabled: true, @@ -120,7 +113,7 @@ export class CloudPostgres extends pulumi.ComponentResource implements Postgres : undefined), ipConfiguration: { ipv4Enabled: false, - privateNetwork: privateNetwork.id, + privateNetwork: privateNetworkId, enablePrivatePathForGoogleCloudServices: true, }, userLabels: opts.migrationId @@ -208,7 +201,7 @@ export class SplicePostgres extends pulumi.ComponentResource implements Postgres const logicalName = xns.logicalName + '-' + instanceName; const logicalNameAlias = xns.logicalName + '-' + alias; // pulumi name before #12391 super('canton:network:postgres', logicalName, [], { - protect: disableProtection ? false : protectCloudSql, + protect: disableProtection ? false : spliceConfig.pulumiProjectConfig.cloudSql.protected, aliases: [{ name: logicalNameAlias, type: 'canton:network:postgres' }], }); @@ -262,6 +255,7 @@ export function installPostgres( instanceName: string, alias: string, version: CnChartVersion, + cloudSqlConfig: CloudSqlConfig, uniqueSecretName = false, opts: { isActive?: boolean; @@ -273,8 +267,8 @@ export function installPostgres( const o = { isActive: true, ...opts }; let ret: Postgres; const secretName = uniqueSecretName ? instanceName + '-secrets' : 'postgres-secrets'; - if (enableCloudSql) { - ret = new CloudPostgres(xns, instanceName, alias, secretName, o.isActive, { + if (cloudSqlConfig.enabled) { + ret = new CloudPostgres(xns, instanceName, alias, secretName, cloudSqlConfig, o.isActive, { disableProtection: o.disableProtection, migrationId: o.migrationId?.toString(), logicalDecoding: o.logicalDecoding, diff --git a/cluster/pulumi/infra/grafana-alerting/traffic_alerts.yaml b/cluster/pulumi/infra/grafana-alerting/traffic_alerts.yaml index f34a27cbd6..3b90cf958e 100644 --- a/cluster/pulumi/infra/grafana-alerting/traffic_alerts.yaml +++ b/cluster/pulumi/infra/grafana-alerting/traffic_alerts.yaml @@ -5,65 +5,6 @@ groups: folder: canton-network interval: 1m rules: - - uid: adw5rd048zf9ca - title: Wasted Traffic - condition: wasted_traffic_threshold - data: - - refId: wasted_traffic - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: prometheus - model: - editorMode: code - expr: quantile($WASTED_TRAFFIC_ALERT_QUANTILE, increase(daml_sequencer_traffic_control_wasted_traffic_total{member=~"PAR::.*"$WASTED_TRAFFIC_ALERT_EXTRA_MEMBER_FILTER}[$WASTED_TRAFFIC_ALERT_TIME_RANGE_MINSm])) - instant: true - intervalMs: 1000 - legendFormat: __auto - maxDataPoints: 43200 - range: false - refId: wasted_traffic - - refId: wasted_traffic_threshold - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: __expr__ - model: - conditions: - - evaluator: - params: - - $WASTED_TRAFFIC_ALERT_THRESHOLD_BYTES - type: gt - operator: - type: and - query: - params: - - C - reducer: - params: [] - type: last - type: query - datasource: - type: __expr__ - uid: __expr__ - expression: wasted_traffic - intervalMs: 1000 - maxDataPoints: 43200 - refId: wasted_traffic_threshold - type: threshold - dashboardUid: fdnphvrryfq4gf - panelId: 6 - noDataState: OK - execErrState: Alerting - for: 1m - annotations: - __dashboardUid__: fdnphvrryfq4gf - __panelId__: "6" - description: The $WASTED_TRAFFIC_ALERT_QUANTILE quantile of of traffic wasted across members exceeded the threshold with a value of {{ humanize1024 $values.wasted_traffic.Value }} in the last $WASTED_TRAFFIC_ALERT_TIME_RANGE_MINSm - severity: critical - summary: $WASTED_TRAFFIC_ALERT_QUANTILE quantile of traffic wasted across members exceeded threshold ({{ humanize1024 $WASTED_TRAFFIC_ALERT_THRESHOLD_BYTES }}b over $WASTED_TRAFFIC_ALERT_TIME_RANGE_MINSm) - labels: {} - isPaused: false - uid: 5dcddc9a5487 title: Confirmation Requests Total condition: confirmation_requests_total_threshold diff --git a/cluster/pulumi/infra/grafana-dashboards/canton-network/catchup.json b/cluster/pulumi/infra/grafana-dashboards/canton-network/catchup.json index 91590ae0b1..9c8019421b 100644 --- a/cluster/pulumi/infra/grafana-dashboards/canton-network/catchup.json +++ b/cluster/pulumi/infra/grafana-dashboards/canton-network/catchup.json @@ -18,7 +18,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3402, + "id": 380, "links": [], "panels": [ { @@ -66,7 +66,8 @@ "lineWidth": 1, "pointSize": 5, "scaleDistribution": { - "type": "linear" + "log": 2, + "type": "log" }, "showPoints": "auto", "spanNulls": false, @@ -83,7 +84,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -119,7 +121,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -136,7 +138,100 @@ "range": true, "refId": "A", "useBackend": false + } + ], + "title": "Sequencer Client Delay", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS}" + }, + "description": "Catchup speed of the sequencer in the last 5min", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 18, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.1.1", + "targets": [ { "datasource": { "type": "prometheus", @@ -156,7 +251,7 @@ "useBackend": false } ], - "title": "Sequencer Client Delay", + "title": "Sequencer Catchup Speed", "type": "timeseries" }, { @@ -191,7 +286,8 @@ "lineWidth": 1, "pointSize": 5, "scaleDistribution": { - "type": "linear" + "log": 2, + "type": "log" }, "showPoints": "auto", "spanNulls": false, @@ -208,7 +304,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -216,7 +313,7 @@ } ] }, - "unit": "ms" + "unit": "s" }, "overrides": [] }, @@ -224,7 +321,7 @@ "h": 8, "w": 12, "x": 0, - "y": 12 + "y": 23 }, "id": 3, "options": { @@ -244,7 +341,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -253,7 +350,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "min by (namespace)(daml_sequencer_client_handler_delay{namespace=~\"$namespace\",component=\"participant\"})", + "expr": "max by (namespace) (timestamp(daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\"$namespace\",component=\"participant\"}) - ((daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\"$namespace\",component=\"participant\"} > 0) / 1e6))", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -298,7 +395,8 @@ "lineWidth": 1, "pointSize": 5, "scaleDistribution": { - "type": "linear" + "log": 2, + "type": "log" }, "showPoints": "auto", "spanNulls": false, @@ -315,7 +413,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -323,7 +422,7 @@ } ] }, - "unit": "ms" + "unit": "s" }, "overrides": [] }, @@ -331,7 +430,7 @@ "h": 8, "w": 12, "x": 12, - "y": 12 + "y": 23 }, "id": 2, "options": { @@ -351,7 +450,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -360,7 +459,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "min by (namespace, job)(daml_sequencer_client_handler_delay{namespace=~\"$namespace\", component=\"mediator\",job=~\"global-domain-$migration-mediator\"})", + "expr": "max by (namespace, job) (timestamp(daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\"$namespace\",component=\"mediator\",job=~\"global-domain-$migration-mediator\"}) - ((daml_sequencer_client_handler_last_sequencing_time_micros{namespace=~\"$namespace\",component=\"mediator\",job=~\"global-domain-$migration-mediator\"} > 0) / 1e6)) ", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -379,7 +478,7 @@ "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 31 }, "id": 10, "panels": [], @@ -436,7 +535,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -452,7 +552,7 @@ "h": 7, "w": 24, "x": 0, - "y": 21 + "y": 32 }, "id": 11, "options": { @@ -468,7 +568,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -543,7 +643,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -559,7 +660,7 @@ "h": 7, "w": 24, "x": 0, - "y": 28 + "y": 39 }, "id": 7, "options": { @@ -575,7 +676,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -650,7 +751,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -666,7 +768,7 @@ "h": 6, "w": 24, "x": 0, - "y": 35 + "y": 46 }, "id": 6, "options": { @@ -682,7 +784,7 @@ "sort": "none" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -755,7 +857,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -770,7 +873,7 @@ "h": 8, "w": 12, "x": 0, - "y": 41 + "y": 52 }, "id": 8, "options": { @@ -786,7 +889,7 @@ "sort": "none" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -856,7 +959,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -871,7 +975,7 @@ "h": 8, "w": 12, "x": 12, - "y": 41 + "y": 52 }, "id": 4, "options": { @@ -887,7 +991,7 @@ "sort": "none" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -957,7 +1061,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -972,7 +1077,7 @@ "h": 8, "w": 12, "x": 0, - "y": 49 + "y": 60 }, "id": 5, "options": { @@ -988,7 +1093,7 @@ "sort": "none" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -1025,7 +1130,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -1040,7 +1146,7 @@ "h": 8, "w": 12, "x": 12, - "y": 49 + "y": 60 }, "id": 15, "options": { @@ -1058,7 +1164,7 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { @@ -1129,7 +1235,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -1144,7 +1251,7 @@ "h": 9, "w": 24, "x": 0, - "y": 57 + "y": 68 }, "id": 17, "options": { @@ -1160,7 +1267,7 @@ "sort": "desc" } }, - "pluginVersion": "12.0.2", + "pluginVersion": "12.1.1", "targets": [ { "datasource": { diff --git a/cluster/pulumi/infra/grafana-dashboards/canton-network/onboarded_parties.json b/cluster/pulumi/infra/grafana-dashboards/canton-network/onboarded_parties.json new file mode 100644 index 0000000000..8936550c72 --- /dev/null +++ b/cluster/pulumi/infra/grafana-dashboards/canton-network/onboarded_parties.json @@ -0,0 +1,536 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 265, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "splice_synchronizer_topology_num_parties{namespace=\"$namespace\"}", + "instant": true, + "legendFormat": "total parties", + "range": false, + "refId": "A" + } + ], + "title": "Parties onboarded to the global synchronizer", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 10, + "x": 4, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "splice_synchronizer_topology_num_parties{namespace=\"$namespace\"}", + "legendFormat": "total parties", + "range": true, + "refId": "A" + } + ], + "title": "Parties onboarded to the global synchronizer", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 10, + "x": 14, + "y": 0 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "splice_synchronizer_topology_num_parties_per_participant{namespace=\"$namespace\"}", + "legendFormat": "{{participant_id}}", + "range": true, + "refId": "A" + } + ], + "title": "Parties onboarded to the global synchronizer per participant", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "filterable": true, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Parties" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 9, + "x": 0, + "y": 13 + }, + "id": 4, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Parties" + } + ] + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "splice_synchronizer_topology_num_parties_per_participant{namespace=\"$namespace\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Top hosting participants", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "__name__": true, + "endpoint": true, + "instance": true, + "job": true, + "migration": true, + "namespace": true, + "node_name": true, + "node_type": true, + "otel_scope_name": true, + "pod": true, + "service": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": { + "Value": "Parties" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "filterable": true, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Parties" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 10, + "x": 9, + "y": 13 + }, + "id": 5, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Parties" + } + ] + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(participant_id) (delta(splice_synchronizer_topology_num_parties_per_participant{namespace=\"$namespace\"}[24h])) > 0", + "format": "table", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A", + "useBackend": false + } + ], + "title": "Onboarded parties last 24h", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "__name__": true, + "endpoint": true, + "instance": true, + "job": true, + "migration": true, + "namespace": true, + "node_name": true, + "node_type": true, + "otel_scope_name": true, + "pod": true, + "service": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": { + "Value": "Parties" + } + } + } + ], + "type": "table" + } + ], + "preload": false, + "schemaVersion": 41, + "tags": [], + "templating": { + "list": [ + { + "allowCustomValue": false, + "current": { + "text": "sv-5", + "value": "sv-5" + }, + "definition": "label_values(splice_synchronizer_topology_num_parties,namespace)", + "name": "namespace", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(splice_synchronizer_topology_num_parties,namespace)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "UTC", + "title": "Onboarded Parties", + "uid": "fc6185ce-0c37-48e7-960b-839a037d47bf", + "version": 2 +} diff --git a/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_messages.json b/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_messages.json index 720edd1c41..01c028b78e 100644 --- a/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_messages.json +++ b/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_messages.json @@ -17,8 +17,8 @@ }, "editable": true, "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 5075, + "graphTooltip": 1, + "id": 80, "links": [], "panels": [ { @@ -347,9 +347,9 @@ "showLegend": true }, "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" + "hideZeros": true, + "mode": "multi", + "sort": "desc" } }, "pluginVersion": "12.0.2", @@ -539,9 +539,9 @@ "sortDesc": true }, "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" + "hideZeros": true, + "mode": "multi", + "sort": "desc" } }, "pluginVersion": "12.0.2", @@ -642,9 +642,9 @@ "showLegend": true }, "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" + "hideZeros": true, + "mode": "multi", + "sort": "desc" } }, "pluginVersion": "12.0.2", @@ -746,8 +746,8 @@ }, "tooltip": { "hideZeros": false, - "mode": "single", - "sort": "none" + "mode": "multi", + "sort": "desc" } }, "pluginVersion": "12.0.2", @@ -803,8 +803,8 @@ }, { "current": { - "text": "global-domain-6-sequencer", - "value": "global-domain-6-sequencer" + "text": "global-domain-0-sequencer", + "value": "global-domain-0-sequencer" }, "datasource": { "type": "prometheus", @@ -833,5 +833,5 @@ "timezone": "", "title": "Sequencer Messages", "uid": "fdjrxql2alblsd", - "version": 5 + "version": 2 } diff --git a/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_topology_transactions.json b/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_topology_transactions.json new file mode 100644 index 0000000000..8f403b9209 --- /dev/null +++ b/cluster/pulumi/infra/grafana-dashboards/canton/sequencer_topology_transactions.json @@ -0,0 +1,505 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 3826, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "total" + }, + "properties": [ + { + "id": "displayName", + "value": "total" + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 16, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(type) (rate(daml_sequencer_block_events_total{namespace=\"$namespace\", job=\"$job\", type=\"send-topology\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "type", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum(rate(daml_sequencer_block_events_total{namespace=\"$namespace\", job=\"$job\", type=\"send-topology\"}[1h]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "hourly total", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Topology send rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 3, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sort_desc(sum by(member) (delta(daml_sequencer_block_events_total{namespace=\"$namespace\", job=\"$job\", type=\"send-topology\", member=~\"PAR::.*\"}[24h])))", + "format": "table", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A", + "useBackend": false + } + ], + "title": "Topology transactions participants last 24h", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": { + "Time": "" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 16, + "x": 0, + "y": 12 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Last *", + "sortDesc": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(member) (rate(daml_sequencer_block_events_total{namespace=\"$namespace\", job=\"$job\", member=~\"PAR::.*\", type=\"send-topology\"}[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "total", + "useBackend": false + } + ], + "title": "Sequencer Topology Rate by Participant", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 8, + "x": 16, + "y": 12 + }, + "id": 4, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sort_desc(sum by(member) (delta(daml_sequencer_block_events_total{namespace=\"$namespace\", job=\"$job\", type=\"send-topology\", member!~\"PAR::.*\"}[24h])))", + "format": "table", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A", + "useBackend": false + } + ], + "title": "Topology transactions non participants last 24h", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": { + "Time": "" + } + } + } + ], + "type": "table" + } + ], + "preload": false, + "refresh": "", + "schemaVersion": 41, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "sv-1", + "value": "sv-1" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(daml_sequencer_block_events_total,namespace)", + "includeAll": false, + "name": "namespace", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(daml_sequencer_block_events_total,namespace)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "type": "query" + }, + { + "current": { + "text": "global-domain-0-sequencer", + "value": "global-domain-0-sequencer" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(daml_sequencer_block_events_total{namespace=\"$namespace\"},job)", + "includeAll": false, + "name": "job", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(daml_sequencer_block_events_total{namespace=\"$namespace\"},job)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "type": "query" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Sequencer Topology Transactions", + "uid": "2f351a91-c0b3-4c6b-b5e2-25d8b9cc1304", + "version": 2 +} diff --git a/cluster/pulumi/infra/package.json b/cluster/pulumi/infra/package.json index db2e1d5fec..f3f30c5ba5 100644 --- a/cluster/pulumi/infra/package.json +++ b/cluster/pulumi/infra/package.json @@ -5,7 +5,8 @@ "@pulumi/auth0": "3.21.0", "@pulumi/kubernetes-cert-manager": "0.2.0", "@pulumiverse/grafana": "0.16.3", - "@lfdecentralizedtrust/splice-pulumi-common": "1.0.0" + "@lfdecentralizedtrust/splice-pulumi-common": "1.0.0", + "@pulumi/gcp": "8.32.1" }, "overrides": { "@pulumi/kubernetes-cert-manager": { diff --git a/cluster/pulumi/infra/src/auth0.ts b/cluster/pulumi/infra/src/auth0.ts index f9bdc1bd9e..d5bcded950 100644 --- a/cluster/pulumi/infra/src/auth0.ts +++ b/cluster/pulumi/infra/src/auth0.ts @@ -11,8 +11,149 @@ import { config, isMainNet, NamespaceToClientIdMapMap, + clusterProdLike, } from '@lfdecentralizedtrust/splice-pulumi-common'; -import { standardSvConfigs, extraSvConfigs } from '@lfdecentralizedtrust/splice-pulumi-common-sv'; +import { + standardSvConfigs, + extraSvConfigs, + dsoSize, +} from '@lfdecentralizedtrust/splice-pulumi-common-sv'; + +function ledgerApiAudience( + svNamespaces: string, + clusterBasename: string, + auth0DomainProvider: auth0.Provider +): pulumi.Output { + if (clusterProdLike) { + // On prod clusters, we create a ledger API per SV namespace + const auth0Api = new auth0.ResourceServer( + `LedgerApi${svNamespaces.replace(/-/g, '')}`, + { + name: `Ledger API for SV ${svNamespaces} on ${clusterBasename} (Pulumi managed)`, + identifier: `https://ledger_api.${svNamespaces}.${clusterBasename}.canton.network`, + allowOfflineAccess: true, // TODO(DACH-NY/canton-network-internal#2114): is this still needed? + }, + { provider: auth0DomainProvider } + ); + + new auth0.ResourceServerScopes( + `LedgerApiScopes${svNamespaces.replace(/-/g, '')}`, + { + resourceServerIdentifier: auth0Api.identifier, + scopes: [ + { + name: 'daml_ledger_api', + description: 'Access to the Ledger API', + }, + ], + }, + { provider: auth0DomainProvider } + ); + + return auth0Api.identifier; + } else { + // On non-prod clusters, we currently use the hard-coded identifier that matches our docs, and the manually created auth0 API + return pulumi.output('https://canton.network.global'); + } +} + +function svAppAudience( + svNamespaces: string, + clusterBasename: string, + auth0DomainProvider: auth0.Provider +): pulumi.Output { + if (clusterProdLike) { + // On prod clusters, we create a SV App API per SV namespace + const auth0Api = new auth0.ResourceServer( + `SvAppApi${svNamespaces.replace(/-/g, '')}`, + { + name: `SV App API for SV ${svNamespaces} on ${clusterBasename} (Pulumi managed)`, + identifier: `https://sv.${svNamespaces}.${clusterBasename}.canton.network/api`, + allowOfflineAccess: true, // TODO(DACH-NY/canton-network-internal#2114): is this still needed? + }, + { provider: auth0DomainProvider } + ); + + return auth0Api.identifier; + } else { + // On non-prod clusters, we currently use the hard-coded identifier that matches our docs, and the manually created auth0 API (same one as ledger API) + return pulumi.output('https://canton.network.global'); + } +} + +function validatorAppAudience( + svNamespaces: string, + clusterBasename: string, + auth0DomainProvider: auth0.Provider +): pulumi.Output { + if (clusterProdLike) { + // On prod clusters, we create a Validator App API per SV namespace + const auth0Api = new auth0.ResourceServer( + `ValidatorAppApi${svNamespaces.replace(/-/g, '')}`, + { + name: `Validator App API for SV ${svNamespaces} on ${clusterBasename} (Pulumi managed)`, + identifier: `https://validator.${svNamespaces}.${clusterBasename}.canton.network/api`, + allowOfflineAccess: true, // TODO(DACH-NY/canton-network-internal#2114): is this still needed? + }, + { provider: auth0DomainProvider } + ); + + return auth0Api.identifier; + } else { + // On non-prod clusters, we currently use the hard-coded identifier that matches our docs, and the manually created auth0 API (same one as ledger API) + return pulumi.output('https://canton.network.global'); + } +} + +function newM2MApp( + resourceName: string, + name: string, + description: string, + clusterBasename: string, + ledgerApiAud: pulumi.Output, + appAud: pulumi.Output, + auth0DomainProvider: auth0.Provider +): auth0.Client { + const ret = new auth0.Client( + resourceName, + { + name: `${name} (Pulumi managed, ${clusterBasename})`, + appType: 'non_interactive', + description: ` ** Managed by Pulumi, do not edit manually **\n${description}`, + }, + { provider: auth0DomainProvider } + ); + + pulumi.all([ledgerApiAud, appAud]).apply(([ledgerApiAudValue, appAudValue]) => { + new auth0.ClientGrant( + `${resourceName}LedgerGrant`, + { + clientId: ret.id, + audience: ledgerApiAudValue, + scopes: ['daml_ledger_api'], + }, + { + provider: auth0DomainProvider, + } + ); + + if (ledgerApiAudValue !== appAudValue) { + new auth0.ClientGrant( + `${resourceName}AppGrant`, + { + clientId: ret.id, + audience: appAudValue, + scopes: [], + }, + { + provider: auth0DomainProvider, + } + ); + } + }); + + return ret; +} function newUiApp( resourceName: string, @@ -119,14 +260,46 @@ function svsOnlyAuth0( ) ); - const appToClientId: ClientIdMap = svs.reduce( - (acc, sv) => ({ + const appToClientId: ClientIdMap = svs.reduce((acc, sv) => { + // Create auth0 APIs if needed, and obtain the audiences + const ledgerApiAud = ledgerApiAudience(sv.namespace, clusterBasename, provider); + const svAppAud = svAppAudience(sv.namespace, clusterBasename, provider); + const validatorAppAud = validatorAppAudience(sv.namespace, clusterBasename, provider); + // Create M2M apps + const svApp = newM2MApp( + `${sv.namespace.replace(/-/g, '')}SvBackendApp`, + `${sv.namespace.replace(/-/g, '').toUpperCase()} SV Backend`, + `Used for the SV backend for ${sv.description} on ${clusterBasename}`, + clusterBasename, + ledgerApiAud, + svAppAud, + provider + ); + const validatorApp = newM2MApp( + `${sv.namespace.replace(/-/g, '')}ValidatorBackendApp`, + `${sv.namespace.replace(/-/g, '').toUpperCase()} Validator Backend`, + `Used for the Validator backend for ${sv.description} on ${clusterBasename}`, + clusterBasename, + ledgerApiAud, + validatorAppAud, + provider + ); + // Currently, for no good reason, we have sv-1 vs sv1_validator naming inconsistency. + // To make things worse, the sv-da-1 namespace is even more special and has sv-da-1 vs sv-da-1_validator. + // Then mainnet DA-2 is even worse, as we use "sv" and "validator" + // TODO(DACH-NY/canton-internal#2110): clean this up + const svAppName = isMainNet && sv.namespace == 'sv-1' ? 'sv' : sv.namespace; + const validatorAppName = + sv.namespace == 'sv-da-1' + ? 'sv-da-1_validator' + : isMainNet + ? 'validator' + : sv.namespace.replace('-', '') + '_validator'; + return { ...acc, - ...(sv.svBackend ? { [sv.svBackend.name]: sv.svBackend.clientId } : {}), - ...(sv.validatorBackend ? { [sv.validatorBackend.name]: sv.validatorBackend.clientId } : {}), - }), - {} - ); + ...{ [svAppName]: svApp.clientId, [validatorAppName]: validatorApp.clientId }, + }; + }, {}); return nsToUiToCLientIdOutput.apply(nsToUiToClientId => { return { @@ -171,32 +344,16 @@ function mainNetAuth0(clusterBasename: string, dnsNames: string[]): pulumi.Outpu }); // hardcoded sv1 will be removed once we switch DA-2 to KMS (and, likely, the sv-da-1 namespace) - const sv1 = { + const sv1: svAuth0Params = { namespace: 'sv-1', description: 'sv-1 (Digital-Asset 2)', ingressName: 'sv-2', // Ingress name of sv-1 is sv-2! - svBackend: { - name: 'sv', - clientId: 'pC5Dw7qDWDfNREKgLwx2Vpz2Ns7j3cRK', - }, - validatorBackend: { - name: 'validator', - clientId: 'B4Ir9KiFqiCOHCpSDiPJN6PzkjKjDsbR', - }, }; const extraSvs: svAuth0Params[] = extraSvConfigs.map(sv => ({ namespace: sv.nodeName, description: sv.onboardingName, ingressName: sv.ingressName, - svBackend: { - name: sv.auth0SvAppName, - clientId: sv.auth0SvAppClientId!, - }, - validatorBackend: { - name: sv.auth0ValidatorAppName, - clientId: sv.auth0ValidatorAppClientId!, - }, })); return svsOnlyAuth0( @@ -226,35 +383,17 @@ function nonMainNetAuth0(clusterBasename: string, dnsNames: string[]): pulumi.Ou clientSecret: auth0MgtClientSecret, }); - const standardSvs: svAuth0Params[] = standardSvConfigs.map(sv => ({ - namespace: sv.nodeName, - description: sv.nodeName.replace(/-/g, '').toUpperCase(), - ingressName: sv.ingressName, - svBackend: sv.auth0SvAppClientId - ? { - name: sv.auth0SvAppName, - clientId: sv.auth0SvAppClientId, - } - : undefined, - validatorBackend: sv.auth0ValidatorAppClientId - ? { - name: sv.auth0ValidatorAppName, - clientId: sv.auth0ValidatorAppClientId, - } - : undefined, - })); + const standardSvs: svAuth0Params[] = standardSvConfigs + .map(sv => ({ + namespace: sv.nodeName, + description: sv.nodeName.replace(/-/g, '').toUpperCase(), + ingressName: sv.ingressName, + })) + .slice(0, dsoSize); const extraSvs: svAuth0Params[] = extraSvConfigs.map(sv => ({ namespace: sv.nodeName, description: sv.onboardingName, ingressName: sv.ingressName, - svBackend: { - name: sv.auth0SvAppName, - clientId: sv.auth0SvAppClientId!, - }, - validatorBackend: { - name: sv.auth0ValidatorAppName, - clientId: sv.auth0ValidatorAppClientId!, - }, })); const baseAuth0 = svsOnlyAuth0( @@ -273,38 +412,6 @@ function nonMainNetAuth0(clusterBasename: string, dnsNames: string[]): pulumi.Ou validator1: 'cf0cZaTagQUN59C1HBL2udiIBdFh2CWq', splitwell: 'ekPlYxilradhEnpWdS80WfW63z1nHvKy', splitwell_validator: 'hqpZ6TP0wGyG2yYwhH6NLpuo0MpJMQZW', - 'sv-1': 'OBpJ9oTyOLuAKF0H2hhzdSFUICt0diIn', - 'sv-2': 'rv4bllgKWAiW9tBtdvURMdHW42MAXghz', - 'sv-3': 'SeG68w0ubtLQ1dEMDOs4YKPRTyMMdDLk', - 'sv-4': 'CqKgSbH54dqBT7V1JbnCxb6TfMN8I1cN', - 'sv-5': 'RSgbsze3cGHipLxhPGtGy7fqtYgyefTb', - 'sv-6': '3MO1BRMNqEiIntIM1YWwBRT1EPpKyGO6', - 'sv-7': '4imYa3E6Q5JPdLjZxHatRDtV1Wurq7pK', - 'sv-8': 'lQogWncLX7AIc2laUj8VVW6zwNJ169vR', - 'sv-9': 'GReLRFp7OQVDHmAhIyWlcnS7ZdWLdqhd', - 'sv-10': 'GReLRFp7OQVDHmAhIyWlcnS7ZdWLdqhd', - 'sv-11': 'ndIxuns8kZoObE7qN6M3IbtKSZ7RRO9B', - 'sv-12': 'qnYhBjBJ5LQu0pM5M6V8e3erQsadfew1', - 'sv-13': 'IA7BOrFhKvQ5AP9g8DxSTmO6pVT0oed3', - 'sv-14': 'cY4I4HCHgDj2mkxSSEwguFQGRFEjhnTq', - 'sv-15': 'hwKLKN5TWpaPjzuY52ubNVIRF8Onnzgk', - 'sv-16': '9pvoTvQIt2l1rzlNnaEZVsnNDFTOvt7W', - sv1_validator: '7YEiu1ty0N6uWAjL8tCAWTNi7phr7tov', - sv2_validator: '5N2kwYLOqrHtnnikBqw8A7foa01kui7h', - sv3_validator: 'V0RjcwPCsIXqYTslkF5mjcJn70AiD0dh', - sv4_validator: 'FqRozyrmu2d6dFQYC4J9uK8Y6SXCVrhL', - sv5_validator: 'TdcDPsIwSXVw4rZmGqxl6Ifkn4neeOzW', - sv6_validator: '4pUXGkvvybNyTeWXEBlesr9qcYCQh2sh', - sv7_validator: '2cfFl6z5huY4rVYvxOEja8MvDdplYCDW', - sv8_validator: 'JYvSRekV1E5EUZ2sJ494YyHXbxR3OHIR', - sv9_validator: 'BABNqQ3m5ROTGJTlTHVlIckS3cwJ0M0w', - sv10_validator: 'EKBJkDcOHosrnhLALfrQYG6Uc4Csqwbe', - sv11_validator: '8jpCSqSkLxdY8zdmJwm0XXRfxFnPNAhG', - sv12_validator: 'PEMwunsstamR1c5k3LdjVInTKlVTkeb6', - sv13_validator: 'eqssDmClrmtQFTgJ7XIP7RDdhcD6iGfx', - sv14_validator: 'luGkjf4AvM5PYhmi3X5rFmKLzxHTBlgz', - sv15_validator: 'gL9Iv3iUiPTtDvyEZ9b4wCcTvz3G6qys', - sv16_validator: '6ANtCorumVE8Ur7n1gJ8Gfvgv5pa96mZ', }; const validator1UiApp = newUiApp( diff --git a/cluster/pulumi/infra/src/cloudArmor.ts b/cluster/pulumi/infra/src/cloudArmor.ts new file mode 100644 index 0000000000..ac1c8dffed --- /dev/null +++ b/cluster/pulumi/infra/src/cloudArmor.ts @@ -0,0 +1,211 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as gcp from '@pulumi/gcp'; +import * as pulumi from '@pulumi/pulumi'; +import { CLUSTER_BASENAME } from '@lfdecentralizedtrust/splice-pulumi-common'; + +import * as config from './config'; + +// Rule number ranges +const THROTTLE_BAN_RULE_MIN = 100000000; +const THROTTLE_BAN_RULE_MAX = 200000000; +const DEFAULT_DENY_RULE_NUMBER = 2147483647; +const RULE_SPACING = 100; + +// Types for API endpoint throttling/banning configuration +export interface ApiEndpoint { + name: string; + path: string; + hostname: string; +} + +export interface ThrottleConfig { + perIp: boolean; + rate: number; // Requests per minute + interval: number; // Interval in seconds +} + +export type CloudArmorConfig = Pick & { + predefinedWafRules?: PredefinedWafRule[]; + apiThrottles?: ApiThrottleConfig[]; +}; + +export interface PredefinedWafRule { + name: string; + action: 'allow' | 'deny' | 'throttle'; + priority?: number; + preview?: boolean; + sensitivityLevel?: 'off' | 'low' | 'medium' | 'high'; +} + +// TODO (DACH-NY/canton-network-internal#2115) replace this placeholder config +// with the real yaml structure we want to use +export interface ApiThrottleConfig { + endpoint: ApiEndpoint; + throttle: ThrottleConfig; + action: 'throttle' | 'ban'; +} + +/** + * Creates a Cloud Armor security policy + * @param cac loaded configuration + * @param opts Pulumi resource options + * @returns The created security policy resource, if enabled + */ +export function configureCloudArmorPolicy( + cac: CloudArmorConfig, + opts?: pulumi.ComponentResourceOptions +): gcp.compute.SecurityPolicy | undefined { + if (!cac.enabled) { + return undefined; + } + + // Step 1: Create the security policy + const name = `waf-whitelist-throttle-ban-${CLUSTER_BASENAME}`; + const securityPolicy = new gcp.compute.SecurityPolicy( + name, + { + name, + description: `Cloud Armor security policy for ${CLUSTER_BASENAME}`, + type: 'CLOUD_ARMOR', // attachable to backend service only + // using `rules` to define all rules at once would be fewer Pulumi resources, + // but the preview would entail changing this array if the rules were changed, + // making those changes harder to review than with the separate resources + }, + opts + ); + + const ruleOpts = { ...opts, parent: securityPolicy }; + + // Step 2: Add predefined WAF rules + if (cac.predefinedWafRules && cac.predefinedWafRules.length > 0) { + addPredefinedWafRules(/*securityPolicy, args.predefinedWafRules, cac.allRulesPreviewOnly, ruleOpts*/); + } + + // Step 3: Add IP whitelisting rules + addIpWhitelistRules(/*securityPolicy, cac.allRulesPreviewOnly, ruleOpts*/); + + // Step 4: Add throttling/banning rules for specific API endpoints + if (cac.apiThrottles && cac.apiThrottles.length > 0) { + addThrottleAndBanRules(securityPolicy, cac.apiThrottles, cac.allRulesPreviewOnly, ruleOpts); + } + + // Step 5: Add default deny rule + addDefaultDenyRule(securityPolicy, cac.allRulesPreviewOnly, ruleOpts); + + return securityPolicy; +} + +/** + * Adds predefined WAF rules to a security policy + */ +function addPredefinedWafRules(): void { + /* + securityPolicy: gcp.compute.SecurityPolicy, + rules: PredefinedWafRule[], + preview: boolean, + opts: pulumi.ResourceOptions + */ + // TODO (DACH-NY/canton-network-internal#406) implement +} + +/** + * Adds IP whitelisting rules to a security policy + */ +function addIpWhitelistRules(): void { + /* + securityPolicy: gcp.compute.SecurityPolicy, + preview: boolean, + opts: pulumi.ResourceOptions + */ + // TODO (DACH-NY/canton-network-internal#1250) implement +} + +/** + * Adds throttle and ban rules for API endpoints to a security policy + */ +function addThrottleAndBanRules( + securityPolicy: gcp.compute.SecurityPolicy, + apiThrottles: ApiThrottleConfig[], + preview: boolean, + opts: pulumi.ResourceOptions +): void { + apiThrottles.reduce((priority, apiConfig) => { + if (priority >= THROTTLE_BAN_RULE_MAX) { + throw new Error( + `Throttle rule priority ${priority} exceeds maximum ${THROTTLE_BAN_RULE_MAX}` + ); + } + + const { endpoint, throttle, action } = apiConfig; + const ruleName = `${action}${throttle.perIp ? '-per-ip' : ''}-${endpoint.name}`; + + // Build the expression for path and hostname matching + const pathExpr = `request.path.matches('${endpoint.path}')`; + const hostExpr = `request.headers['host'].matches('${endpoint.hostname}')`; + const matchExpr = `${pathExpr} && ${hostExpr}`; + + new gcp.compute.SecurityPolicyRule( + ruleName, + { + securityPolicy: securityPolicy.name, + description: `${action === 'throttle' ? 'Throttle' : 'Ban'} rule${throttle.perIp ? ' per-IP' : ''} for ${endpoint.name} API endpoint`, + priority, + preview, + action: action === 'ban' ? 'rate_based_ban' : 'throttle', + match: { + expr: { + expression: matchExpr, + }, + }, + rateLimitOptions: { + // ban point is banThreshold + ratelimit count; consider splitting up rather than doubling + ...(action === 'ban' + ? { + banDurationSec: 600, + banThreshold: { + count: throttle.rate, + intervalSec: throttle.interval, + }, + } + : {}), + enforceOnKey: throttle.perIp ? 'IP' : 'ALL', + rateLimitThreshold: { + count: throttle.rate, + intervalSec: throttle.interval, + }, + conformAction: 'allow', + exceedAction: 'deny(429)', // 429 Too Many Requests + }, + }, + opts + ); + return priority + RULE_SPACING; + }, THROTTLE_BAN_RULE_MIN); +} + +/** + * Adds a default deny rule to a security policy + */ +function addDefaultDenyRule( + securityPolicy: gcp.compute.SecurityPolicy, + preview: boolean, + opts: pulumi.ResourceOptions +): void { + new gcp.compute.SecurityPolicyRule( + 'default-deny', + { + securityPolicy: securityPolicy.name, + description: 'Default rule to deny all other traffic', + priority: DEFAULT_DENY_RULE_NUMBER, + preview, + action: 'deny', + match: { + config: { + srcIpRanges: ['*'], + }, + }, + }, + opts + ); +} diff --git a/cluster/pulumi/infra/src/config.ts b/cluster/pulumi/infra/src/config.ts index ee2cd3cff9..fc0f1d4164 100644 --- a/cluster/pulumi/infra/src/config.ts +++ b/cluster/pulumi/infra/src/config.ts @@ -57,6 +57,20 @@ const MonitoringConfigSchema = z.object({ logAlerts: z.object({}).catchall(z.string()).default({}), }), }); +const CloudArmorConfigSchema = z.object({ + enabled: z.boolean(), + // "preview" is not pulumi preview, but https://cloud.google.com/armor/docs/security-policy-overview#preview_mode + allRulesPreviewOnly: z.boolean(), + publicEndpoints: z + .object({}) + .catchall( + z.object({ + domain: z.string(), + // TODO (DACH-NY/canton-network-internal#2115) more config + }) + ) + .default({}), +}); export const InfraConfigSchema = z.object({ infra: z.object({ ipWhitelisting: z @@ -75,8 +89,11 @@ export const InfraConfigSchema = z.object({ extraCustomResources: z.object({}).catchall(z.any()).default({}), }), monitoring: MonitoringConfigSchema, + cloudArmor: CloudArmorConfigSchema, }); +export type CloudArmorConfig = z.infer; + export type Config = z.infer; // eslint-disable-next-line @@ -92,6 +109,7 @@ console.error( export const infraConfig = fullConfig.infra; export const monitoringConfig = fullConfig.monitoring; +export const cloudArmorConfig: CloudArmorConfig = fullConfig.cloudArmor; type IpRangesDict = { [key: string]: IpRangesDict } | string[]; diff --git a/cluster/pulumi/infra/src/index.ts b/cluster/pulumi/infra/src/index.ts index e239745638..b8f5d9a787 100644 --- a/cluster/pulumi/infra/src/index.ts +++ b/cluster/pulumi/infra/src/index.ts @@ -5,7 +5,8 @@ import { config } from '@lfdecentralizedtrust/splice-pulumi-common'; import { clusterIsResetPeriodically, enableAlerts } from './alertings'; import { configureAuth0 } from './auth0'; -import { clusterBaseDomain, clusterBasename, monitoringConfig } from './config'; +import { configureCloudArmorPolicy } from './cloudArmor'; +import { cloudArmorConfig, clusterBaseDomain, clusterBasename, monitoringConfig } from './config'; import { installExtraCustomResources } from './extraCustomResources'; import { getNotificationChannel, @@ -43,6 +44,8 @@ istioMonitoring(network.ingressNs, []); configureStorage(); +configureCloudArmorPolicy(cloudArmorConfig); + installExtraCustomResources(); let configuredAuth0; diff --git a/cluster/pulumi/infra/src/observability.ts b/cluster/pulumi/infra/src/observability.ts index c4c99bd6d2..f25c7d0eb9 100644 --- a/cluster/pulumi/infra/src/observability.ts +++ b/cluster/pulumi/infra/src/observability.ts @@ -13,6 +13,7 @@ import { COMETBFT_RETAIN_BLOCKS, commandScriptPath, ENABLE_COMETBFT_PRUNING, + ExactNamespace, GCP_PROJECT, GrafanaKeys, HELM_MAX_HISTORY_SIZE, @@ -22,6 +23,7 @@ import { SPLICE_ROOT, } from '@lfdecentralizedtrust/splice-pulumi-common'; import { infraAffinityAndTolerations } from '@lfdecentralizedtrust/splice-pulumi-common'; +import { SplicePostgres } from '@lfdecentralizedtrust/splice-pulumi-common/src/postgres'; import { local } from '@pulumi/command'; import { getSecretVersionOutput } from '@pulumi/gcp/secretmanager/getSecretVersion'; import { Input } from '@pulumi/pulumi'; @@ -87,22 +89,28 @@ const prometheusExternalUrl = `https://prometheus.${CLUSTER_HOSTNAME}`; const shouldIgnoreNoDataOrDataSourceError = clusterIsResetPeriodically; export function configureObservability(dependsOn: pulumi.Resource[] = []): pulumi.Resource { + const namespaceName = 'observability'; const namespace = new k8s.core.v1.Namespace( - 'observabilty', + namespaceName, { metadata: { - name: 'observability', + name: namespaceName, // istio really doesn't play well with prometheus // it seems to modify the scraping calls from prometheus and change labels/include extra time series that make no sense labels: { 'istio-injection': 'disabled' }, }, }, - { dependsOn } + { + dependsOn, + aliases: [ + { name: 'observabilty' }, // Legacy typo + ], + } ); - const namespaceName = namespace.metadata.name; // If the stack version is updated the crd version might need to be upgraded as well, check the release notes https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack - const stackVersion = '75.9.0'; - const prometheusStackCrdVersion = '0.83.0'; + const stackVersion = '77.12.1'; + const prometheusStackCrdVersion = '0.85.0'; + const postgres = installPostgres({ ns: namespace, logicalName: namespaceName }); const adminPassword = grafanaKeysFromSecret().adminPassword; const prometheusStack = new k8s.helm.v3.Release( 'observability-metrics', @@ -123,6 +131,12 @@ export function configureObservability(dependsOn: pulumi.Resource[] = []): pulum defaultRules: { // enable recording rules for all the k8s metrics create: true, + disabled: { + // The timeout is not configurable, and we have currently jobs that are expected to run for more than + // the 12 hr timeout, so we disable the alert. There is an alert if the job fails, so the only risk is + // a job that never completes. + KubeJobNotCompleted: true, + }, }, kubeControllerManager: { enabled: false, @@ -264,6 +278,7 @@ export function configureObservability(dependsOn: pulumi.Resource[] = []): pulum }, grafana: { fullnameOverride: 'grafana', + envFromSecret: 'grafana-pg-secret', ingress: { enabled: false, }, @@ -366,6 +381,13 @@ export function configureObservability(dependsOn: pulumi.Resource[] = []): pulum skip_verify: true, } : undefined, + database: { + type: 'postgres', + host: pulumi.interpolate`${postgres.address}:5432`, + user: 'cnadmin', + password: '${postgresPassword}', // replaced from the secret + name: 'cantonnet', + }, }, deploymentStrategy: { // required for the pvc @@ -490,6 +512,7 @@ export function configureObservability(dependsOn: pulumi.Resource[] = []): pulum 'prometheus-node-exporter': { fullnameOverride: 'node-exporter', }, + database: {}, }, maxHistory: HELM_MAX_HISTORY_SIZE, }, @@ -741,24 +764,6 @@ function createGrafanaAlerting(namespace: Input) { ), 'extra_k8s_alerts.yaml': readGrafanaAlertingFile('extra_k8s_alerts.yaml'), 'traffic_alerts.yaml': readGrafanaAlertingFile('traffic_alerts.yaml') - .replaceAll( - '$WASTED_TRAFFIC_ALERT_THRESHOLD_BYTES', - (monitoringConfig.alerting.alerts.trafficWaste.kilobytes * 1024).toString() - ) - .replaceAll( - '$WASTED_TRAFFIC_ALERT_QUANTILE', - monitoringConfig.alerting.alerts.trafficWaste.quantile.toString() - ) - .replaceAll( - '$WASTED_TRAFFIC_ALERT_TIME_RANGE_MINS', - monitoringConfig.alerting.alerts.trafficWaste.overMinutes.toString() - ) - .replaceAll( - '$WASTED_TRAFFIC_ALERT_EXTRA_MEMBER_FILTER', - monitoringConfig.alerting.alerts.svNames - .map(p => `,member!~"PAR::${p}::.*"`) - .join('') - ) .replaceAll( '$CONFIRMATION_REQUESTS_TOTAL_ALERT_TIME_RANGE_MINS', monitoringConfig.alerting.alerts.confirmationRequests.total.overMinutes.toString() @@ -852,3 +857,14 @@ function grafanaKeysFromSecret(): pulumi.Output { }; }); } + +function installPostgres(namespace: ExactNamespace): SplicePostgres { + return new SplicePostgres( + namespace, + 'grafana-pg', + 'grafana-pg', + 'grafana-pg-secret', + { db: { volumeSize: '20Gi' } }, // A tiny pvc should be enough for grafana + true // overrideDbSizeFromValues + ); +} diff --git a/cluster/pulumi/package-lock.json b/cluster/pulumi/package-lock.json index ed334399a7..724527cea4 100644 --- a/cluster/pulumi/package-lock.json +++ b/cluster/pulumi/package-lock.json @@ -166,6 +166,7 @@ "dependencies": { "@lfdecentralizedtrust/splice-pulumi-common": "1.0.0", "@pulumi/auth0": "3.21.0", + "@pulumi/gcp": "8.32.1", "@pulumi/kubernetes-cert-manager": "0.2.0", "@pulumiverse/grafana": "0.16.3" } diff --git a/cluster/pulumi/splitwell/src/splitwell.ts b/cluster/pulumi/splitwell/src/splitwell.ts index b29b1582c0..43dc95a09a 100644 --- a/cluster/pulumi/splitwell/src/splitwell.ts +++ b/cluster/pulumi/splitwell/src/splitwell.ts @@ -27,6 +27,7 @@ import { } from '@lfdecentralizedtrust/splice-pulumi-common-validator'; import { installValidatorApp } from '@lfdecentralizedtrust/splice-pulumi-common-validator/src/validator'; +import { spliceConfig } from '../../common/src/config/config'; import { splitwellConfig } from '../../common/src/config/splitwellConfig'; export async function installSplitwell( @@ -48,6 +49,7 @@ export async function installSplitwell( 'splitwell-pg', 'splitwell-pg', activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, splitPostgresInstances ); @@ -70,7 +72,15 @@ export async function installSplitwell( ); const swPostgres = - sharedPostgres || postgres.installPostgres(xns, 'sw-pg', 'sw-pg', activeVersion, true); + sharedPostgres || + postgres.installPostgres( + xns, + 'sw-pg', + 'sw-pg', + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + true + ); const splitwellDbName = 'app_splitwell'; const scanAddress = `http://scan-app.sv-1:5012`; @@ -106,7 +116,14 @@ export async function installSplitwell( const validatorPostgres = sharedPostgres || - postgres.installPostgres(xns, 'validator-pg', 'validator-pg', activeVersion, true); + postgres.installPostgres( + xns, + 'validator-pg', + 'validator-pg', + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + true + ); const validatorDbName = 'val_splitwell'; const extraDependsOn = imagePullDeps.concat( diff --git a/cluster/pulumi/sv-canton/src/canton.ts b/cluster/pulumi/sv-canton/src/canton.ts index 1766f106a8..214cab55df 100644 --- a/cluster/pulumi/sv-canton/src/canton.ts +++ b/cluster/pulumi/sv-canton/src/canton.ts @@ -22,6 +22,8 @@ import { InStackCometBftDecentralizedSynchronizerNode, } from '@lfdecentralizedtrust/splice-pulumi-sv-canton/src/decentralizedSynchronizerNode'; +import { spliceConfig } from '../../common/src/config/config'; + export function installCantonComponents( xns: ExactNamespace, migrationId: DomainMigrationIndex, @@ -80,16 +82,25 @@ export function installCantonComponents( `participant-${migrationId}-pg`, `participant-pg`, migrationInfo.version, + svConfig.participant?.cloudSql || spliceConfig.pulumiProjectConfig.cloudSql, true, { isActive: migrationStillRunning, migrationId, disableProtection } ); const mediatorPostgres = dbs?.mediator || - installPostgres(xns, `mediator-${migrationId}-pg`, `mediator-pg`, migrationInfo.version, true, { - isActive: migrationStillRunning, - migrationId, - disableProtection, - }); + installPostgres( + xns, + `mediator-${migrationId}-pg`, + `mediator-pg`, + migrationInfo.version, + svConfig.mediator?.cloudSql || spliceConfig.pulumiProjectConfig.cloudSql, + true, + { + isActive: migrationStillRunning, + migrationId, + disableProtection, + } + ); const sequencerPostgres = dbs?.sequencer || installPostgres( @@ -97,6 +108,7 @@ export function installCantonComponents( `sequencer-${migrationId}-pg`, `sequencer-pg`, migrationInfo.version, + svConfig.sequencer?.cloudSql || spliceConfig.pulumiProjectConfig.cloudSql, true, { isActive: migrationStillRunning, migrationId, disableProtection } ); diff --git a/cluster/pulumi/sv-canton/src/decentralizedSynchronizerNode.ts b/cluster/pulumi/sv-canton/src/decentralizedSynchronizerNode.ts index 4504ef475f..af97794aff 100644 --- a/cluster/pulumi/sv-canton/src/decentralizedSynchronizerNode.ts +++ b/cluster/pulumi/sv-canton/src/decentralizedSynchronizerNode.ts @@ -119,6 +119,7 @@ abstract class InStackDecentralizedSynchronizerNode postgresName: dbs.mediatorPostgres.instanceName, ...(dbs.setCoreDbNames ? { databaseName: mediatorDbName } : {}), }, + additionalEnvVars: svConfig.mediator?.additionalEnvVars, }, enablePostgresMetrics: true, metrics: { diff --git a/cluster/pulumi/sv-runbook/src/installNode.ts b/cluster/pulumi/sv-runbook/src/installNode.ts index 403ffbd52e..26dbe9166c 100644 --- a/cluster/pulumi/sv-runbook/src/installNode.ts +++ b/cluster/pulumi/sv-runbook/src/installNode.ts @@ -252,14 +252,6 @@ async function installSvAndValidator( const bftSequencerConnection = !svConfig.participant || svConfig.participant.bftSequencerConnection; - const topologyChangeDelayEnvVars = svsConfig?.synchronizer?.topologyChangeDelay - ? [ - { - name: 'ADDITIONAL_CONFIG_TOPOLOGY_CHANGE_DELAY', - value: `canton.sv-apps.sv.topology-change-delay-duration=${svsConfig.synchronizer.topologyChangeDelay}`, - }, - ] - : []; const disableBftSequencerConnectionEnvVars = bftSequencerConnection ? [] : [ @@ -268,9 +260,9 @@ async function installSvAndValidator( value: 'canton.sv-apps.sv.bft-sequencer-connection = false', }, ]; - const svAppAdditionalEnvVars = (svConfig.svApp?.additionalEnvVars || []) - .concat(topologyChangeDelayEnvVars) - .concat(disableBftSequencerConnectionEnvVars); + const svAppAdditionalEnvVars = (svConfig.svApp?.additionalEnvVars || []).concat( + disableBftSequencerConnectionEnvVars + ); const valuesFromYamlFile = loadYamlFromFile( `${SPLICE_ROOT}/apps/app/src/pack/examples/sv-helm/sv-values.yaml`, diff --git a/cluster/pulumi/sv-runbook/src/postgres.ts b/cluster/pulumi/sv-runbook/src/postgres.ts index 9b4c6575ad..33ca471830 100644 --- a/cluster/pulumi/sv-runbook/src/postgres.ts +++ b/cluster/pulumi/sv-runbook/src/postgres.ts @@ -22,16 +22,32 @@ export function installPostgres( isActive: boolean = true ): SplicePostgres | CloudPostgres { if (spliceConfig.pulumiProjectConfig.cloudSql.enabled) { - return new CloudPostgres(xns, name, name, secretName, isActive, { - disableProtection: supportsSvRunbookReset, - }); + return new CloudPostgres( + xns, + name, + name, + secretName, + spliceConfig.pulumiProjectConfig.cloudSql, + isActive, + { + disableProtection: supportsSvRunbookReset, + } + ); } else { const valuesFromFile = loadYamlFromFile( `${SPLICE_ROOT}/apps/app/src/pack/examples/sv-helm/${selfHostedValuesFile}` ); const volumeSizeOverride = determineVolumeSizeOverride(valuesFromFile.db?.volumeSize); const values = _.merge(valuesFromFile || {}, { db: { volumeSize: volumeSizeOverride } }); - return new SplicePostgres(xns, name, name, secretName, values); + return new SplicePostgres( + xns, + name, + name, + secretName, + values, + undefined, + supportsSvRunbookReset + ); } } diff --git a/cluster/pulumi/validator-runbook/src/installNode.ts b/cluster/pulumi/validator-runbook/src/installNode.ts index eefac91f81..2c3c94872b 100644 --- a/cluster/pulumi/validator-runbook/src/installNode.ts +++ b/cluster/pulumi/validator-runbook/src/installNode.ts @@ -245,6 +245,8 @@ async function installValidator( enablePostgresMetrics: true, ...spliceInstanceNames, maxVettingDelay: networkWideConfig?.maxVettingDelay, + additionalEnvVars: validatorConfig.validatorApp?.additionalEnvVars, + additionalJvmOptions: validatorConfig.validatorApp?.additionalJvmOptions, }; const validatorValuesWithOnboardingOverride = onboardingSecret diff --git a/cluster/pulumi/validator-runbook/src/partyAllocator.ts b/cluster/pulumi/validator-runbook/src/partyAllocator.ts index 1a51040bb4..eb69787f9f 100644 --- a/cluster/pulumi/validator-runbook/src/partyAllocator.ts +++ b/cluster/pulumi/validator-runbook/src/partyAllocator.ts @@ -21,12 +21,18 @@ export function installPartyAllocator( 'party-allocator', 'splice-party-allocator', { - jsonLedgerApiUrl: `http://participant-${DecentralizedSynchronizerUpgradeConfig.active.id}:7575`, - scanApiUrl: 'http://scan-app.sv-1:5012', - validatorApiUrl: 'http://validator-app:5003', - maxParties: config.maxParties, - keysDirectory: '/keys', - parallelism: config.parallelism, + config: { + token: '${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_TOKEN}', + userId: '${SPLICE_APP_VALIDATOR_LEDGER_API_AUTH_USER_NAME}', + jsonLedgerApiUrl: `http://participant-${DecentralizedSynchronizerUpgradeConfig.active.id}:7575`, + scanApiUrl: 'http://scan-app.sv-1:5012', + validatorApiUrl: 'http://validator-app:5003', + maxParties: config.maxParties, + keyDirectory: '/keys', + parallelism: config.parallelism, + preapprovalRetries: config.preapprovalRetries, + preapprovalRetryDelayMs: config.preapprovalRetryDelayMs, + }, }, activeVersion, { dependsOn } diff --git a/cluster/pulumi/validator1/src/validator1.ts b/cluster/pulumi/validator1/src/validator1.ts index db89011514..770510f441 100644 --- a/cluster/pulumi/validator1/src/validator1.ts +++ b/cluster/pulumi/validator1/src/validator1.ts @@ -30,6 +30,7 @@ import { installValidatorSecrets, } from '@lfdecentralizedtrust/splice-pulumi-common-validator/src/validator'; +import { spliceConfig } from '../../common/src/config/config'; import { validator1Config } from './config'; export async function installValidator1( @@ -54,12 +55,26 @@ export async function installValidator1( const imagePullDeps = imagePullSecret(xns); const defaultPostgres = !splitPostgresInstances - ? postgres.installPostgres(xns, 'postgres', 'postgres', activeVersion, false) + ? postgres.installPostgres( + xns, + 'postgres', + 'postgres', + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + false + ) : undefined; const validatorPostgres = defaultPostgres || - postgres.installPostgres(xns, `validator-pg`, `validator-pg`, activeVersion, true); + postgres.installPostgres( + xns, + `validator-pg`, + `validator-pg`, + activeVersion, + spliceConfig.pulumiProjectConfig.cloudSql, + true + ); const validatorDbName = `validator1`; const validatorSecrets = await installValidatorSecrets({ diff --git a/cluster/scripts/node-backup.sh b/cluster/scripts/node-backup.sh index 8dbf318aea..b75a58baae 100755 --- a/cluster/scripts/node-backup.sh +++ b/cluster/scripts/node-backup.sh @@ -116,7 +116,7 @@ function backup_cloudsql() { _error_msg "$output" fi retry_count=$((retry_count+1)) - sleep 10 + sleep 30 else echo "Backup succeeded" return 0 diff --git a/create-bundle.sh b/create-bundle.sh index 38a47e8abc..e2387f8741 100755 --- a/create-bundle.sh +++ b/create-bundle.sh @@ -133,9 +133,7 @@ adjust_shellscript_binary # pack releases cd "$RELEASES_DIR" rm -f "${RELEASE}.tar.gz" -rm -f "${RELEASE}.zip" tar -zcf "${RELEASE}.tar.gz" "$RELEASE" & -zip -rq "${RELEASE}.zip" "$RELEASE"/* & wait echo "Successfully created release bundle for release $RELEASE" diff --git a/docs/src/app_dev/overview/splice_app_apis.rst b/docs/src/app_dev/overview/splice_app_apis.rst index 5d168c3ce5..4158924cc3 100644 --- a/docs/src/app_dev/overview/splice_app_apis.rst +++ b/docs/src/app_dev/overview/splice_app_apis.rst @@ -8,12 +8,9 @@ Splice HTTP APIs ================ -.. _app_dev_public_http_apis: +.. _app_dev_http_apis: -Public HTTP APIs ----------------- - -There are two sets of public HTTP APIs exposed by Splice applications, as can be seen +There are two sets of HTTP APIs exposed by Splice applications, as can be seen in the diagram below: .. @@ -35,7 +32,7 @@ in the diagram below: The Ledger API is not part of Splice, and thus not documented here. See :ref:`app_dev_ledger_api` for details on how to use the Ledger API. -Some of the Splice apps also define additional HTTP APIs that are not public, and are subject to change without notice. +Some of the Splice apps also define additional HTTP APIs that are considered internal and are subject to change without notice. If you do need some of them for your app, please create an issue on https://github.com/hyperledger-labs/splice, so that you can align with the Splice team on the API, your needs, and the required stability guarantees. diff --git a/docs/src/app_dev/testing/localnet.rst b/docs/src/app_dev/testing/localnet.rst index 1ba2546137..3d6d93f0c7 100644 --- a/docs/src/app_dev/testing/localnet.rst +++ b/docs/src/app_dev/testing/localnet.rst @@ -8,33 +8,57 @@ Docker-Compose Based Deployment of a Local Network ================================================== -Localnet provides a straightforward topology comprising three participants, three validators, a PostgreSQL database, and several web applications (wallet, sv, scan) behind an NGINX gateway. Each validator plays a distinct role within the Splice ecosystem: +LocalNet provides a straightforward topology comprising three participants, three validators, a PostgreSQL database, and several web applications (wallet, sv, scan) behind an NGINX gateway. Each validator plays a distinct role within the Splice ecosystem: - **app-provider**: for the user operating their application - **app-user**: for a user wanting to use the app from the App Provider - **sv**: for providing the Global Synchronizer and handling AMT -Designed primarily for development and testing, Localnet is not intended for production use. +Designed primarily for development and testing, LocalNet is not intended for production use. Setup ----- -Before starting, ensure you have configured the following environment variables: +1. Download the release artifacts from the + |bundle_download_link| link, and extract the bundle: -- **IMAGE_TAG**: Specifies the version of Splice to be used in Localnet. -- **LOCALNET_DIR**: Specifies the path to the Localnet directory. + .. parsed-literal:: -You can enable or disable any of the three validators using Docker Compose profiles (e.g., ``--profile app-provider``) alongside the corresponding environment variables (e.g., ``APP_PROVIDER_PROFILE=on/off``). By default, all three validators are active. + tar xzvf |version|\_splice-node.tar.gz -Additional environment variables include: + The extracted docker compose files defining LocalNet are located in + ``splice-node/docker-compose/localnet``. + +2. Export these two environment variables used in the later commands: + + - **LOCALNET_DIR**: Specifies the path to the LocalNet directory. + - **IMAGE_TAG**: Specifies the version of Splice to be used in LocalNet. + + For the bundle that you downloaded use: + + .. parsed-literal:: + + export LOCALNET_DIR=$PWD/splice-node/docker-compose/localnet + |image_tag_set_plain| + +3. See :ref:`use-localnet` for the commands to start, stop, inspect, and administrate the LocalNet nodes. + +Optional: +use the Docker Compose profiles (e.g., ``--profile app-provider``) alongside the corresponding environment variables (e.g., ``APP_PROVIDER_PROFILE=on/off``) +to disable specific validator nodes; +for example, to reduce the resource needs of LocalNet. +By default, all three validators are active. + +Optional: use the following additional environment variables to configure: -- **LOCALNET_ENV_DIR**: Overrides the default environment file directory. The default is ``$LOCALNET_DIR/env``. - **LOCALNET_DIR/compose.env**: Contains Docker Compose configuration variables. +- **LOCALNET_ENV_DIR**: Overrides the default environment file directory. The default is ``$LOCALNET_DIR/env``. - **LOCALNET_ENV_DIR/common.env**: Shared environment variables across Docker Compose and container configurations. It sets default ports, DB credentials, and Splice UI configurations. Resource constraints for containers can be configured via: - **LOCALNET_DIR/resource-constraints.yaml** + Exposed Ports ------------- @@ -65,7 +89,7 @@ UI Ports are defined as follows: Database -------- -Localnet uses a single PostgreSQL database for all components. Database configurations are sourced from ``LOCALNET_ENV_DIR/postgres.env``. +LocalNet uses a single PostgreSQL database for all components. Database configurations are sourced from ``LOCALNET_ENV_DIR/postgres.env``. Application UIs --------------- @@ -90,10 +114,18 @@ Application UIs - **URL**: `http://scan.localhost:4000 `_ - **Description**: Interface to monitor transactions. - .. note:: - `LocalNet` rounds may take up to 6 rounds (equivalent to one hour) to display in the scan UI. +.. note:: + `LocalNet` rounds may take up to 6 rounds (equivalent to one hour) to display in the scan UI. + +In most scenarios, the ``*.localhost`` domains (e.g., ``http://scan.localhost``) will resolve to your local host IP ``127.0.0.1``. +There are some situations where the resolution does not occur and the solution is to add entries to your ``/etc/hosts`` file. For example, +to resolve ``http://scan.localhost`` and ``http://wallet.localhost`` add these entry to the file: + +.. code-block:: + + 127.0.0.1 scan.localhost + 127.0.0.1 wallet.localhost -The ``*.localhost`` domains will resolve to your local host IP ``127.0.0.1``. Default Wallet Users -------------------- @@ -102,6 +134,8 @@ Default Wallet Users - **App Provider**: app-provider - **SV**: sv +.. _swagger-ui: + Swagger UI ---------- @@ -114,11 +148,14 @@ Note: Some endpoints require a JWT token when using the **Try it out** feature. For proper functionality, Swagger UI relies on a localhost nginx proxy for ``canton.localhost`` configured for each participant. For example, the ``JSON Ledger API HTTP Endpoints`` for the app-provider can be accessed at the nginx proxy URL ``http://canton.localhost:${APP_PROVIDER_UI_PORT}`` via Swagger UI, which corresponds to accessing ``localhost:3${PARTICIPANT_JSON_API_PORT}`` directly. The nginx proxy only adds additional headers to resolve CORS issues within Swagger UI. -Run in localnet ----------------- +.. _use-localnet: -start -^^^^^ +Use LocalNet +------------ + + +Start LocalNet nodes +^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash @@ -130,8 +167,8 @@ start --profile app-provider \ --profile app-user up -d -stop -^^^^ +Stop LocalNet nodes +^^^^^^^^^^^^^^^^^^^ .. code-block:: bash @@ -143,8 +180,10 @@ stop --profile app-provider \ --profile app-user down -v -start with swagger-ui -^^^^^^^^^^^^^^^^^^^^^^ +Start nodes including a swagger-ui +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See :ref:`swagger-ui` for more information. .. code-block:: bash @@ -157,8 +196,10 @@ start with swagger-ui --profile app-user \ --profile swagger-ui up -d -stop with swagger-ui -^^^^^^^^^^^^^^^^^^^^^ +Stop nodes including a swagger-ui +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See :ref:`swagger-ui` for more information. .. code-block:: bash @@ -171,8 +212,16 @@ stop with swagger-ui --profile app-user \ --profile swagger-ui down -v -console -^^^^^^^ +Access the Canton Admin Console +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +Use the Canton Admin Console to inspect and modify the run configuration +of the Canton sequencer, mediator, and participant nodes in your LocalNet deployment. + +* `Canton Console How-To `__ +* `Canton Console commands `__ + .. code-block:: bash diff --git a/docs/src/conf.py b/docs/src/conf.py index 52a66c3ed6..ac34dae357 100644 --- a/docs/src/conf.py +++ b/docs/src/conf.py @@ -174,6 +174,7 @@ def make_id(string): .. |chart_version_set| replace:: ``export CHART_VERSION={chart_version}`` .. |image_tag_set| replace:: ``export IMAGE_TAG={version}`` +.. |image_tag_set_plain| replace:: export IMAGE_TAG={version} .. |bundle_download_link| replace:: :raw-html:`Download Bundle` .. |openapi_download_link| replace:: :raw-html:`Download OpenAPI specs` diff --git a/docs/src/deployment/observability/metrics.rst b/docs/src/deployment/observability/metrics.rst index 3cfd02bce2..707bcf7d84 100644 --- a/docs/src/deployment/observability/metrics.rst +++ b/docs/src/deployment/observability/metrics.rst @@ -64,6 +64,23 @@ Configuring a docker compose deployment to enable metrics When using docker compose for the deployment, the metrics are enabled by default. These can be accessed at `http://validator.localhost/metrics` for the validator app and at `http://participant.localhost/metrics` for the participant. +.. _enable_extra_metric_triggers: + +Enabling extra metric triggers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The validator app can be configured to run a trigger +that polls the topology state and exports metrics summarizing that state. +These metrics have the prefix ``splice.synchronizer-topology``. +See the :ref:`validator-metrics-reference` for the concrete set of metrics. + +This trigger is disabled by default. +As per the information in :ref:`Adding ad-hoc configuration `, +add an environment variable +``ADDITIONAL_CONFIG_TOPOLOGY_METRICS_EXPORT=canton.validator-apps.validator.automation.topology-metrics-polling-interval = 5m`` +to enable the trigger with a polling interval of 5 minutes. + + .. _metrics_grafana_dashboards: Grafana Dashboards diff --git a/docs/src/release_notes.rst b/docs/src/release_notes.rst index 073c384e11..8277005fd9 100644 --- a/docs/src/release_notes.rst +++ b/docs/src/release_notes.rst @@ -8,18 +8,55 @@ Release Notes ============= -Upcoming --------- +0.4.20 +------ + + - Deployment + + - Fix a bug where the setting the affinity for the ``splice-cometbft`` and ``splice-global-domain`` helm charts would remove the anti affinity for the ``cometbft`` and the ``sequencer`` deployment. This ensures that if multiple SVs are run on the same nodes, not more than one ``cometbft`` pod can be deployed on the same node and that no more than one ``sequencer`` pod can be deployed to the same node (a ``cometbft`` pod can still share a node with a ``sequencer`` pod). This can be disabled by setting the ``enableAntiAffinity`` helm value to ``false`` (default ``true``). + + - Replace ``-Dscala.concurrent.context.minThreads=8`` with ``-Dscala.concurrent.context.numThreads=8`` and set ``-XX:ActiveProcessorCount=8`` in the ``defaultJvmOptions`` for all the helm charts that deploy scala apps. This should ensure that the internal execution contexts spawn 8 threads to handle processing and that the JVM is configured for 8 CPUs as well. The previous behavior would spawn up to number of available processors, which can be up to the number of CPUs on the actual node if no CPU limit is set. This should avoid overloading the nodes during heavy processing. + + - SV + + - UI + + - Add the ability to specify a validator party hint when generating onboarding secrets. + + - The UI now provides a formatted message for easily sharing onboarding details with validator operators. + + +0.4.19 +------ + + - Sequencer + + - Fix a regression introduced in 0.4.18 that made topology transactions significantly more expensive to process. - Docker images - All app & UI images now use a non-root user. + - Validator + + - Add a trigger to export these party metrics: + + - ``validator_synchronizer_topology_num_parties``: + Counts the number of parties allocated on the Global Synchronizer + - ``validator_synchronizer_topology_num_parties_per_participant``: + Uses the label ``participant_id`` and + counts the number of parties hosted on the Global Synchronizer per participant. + Note that multi-hosted parties are counted for each participant they are hosted on. + + The trigger does not run by default. See :ref:`enable_extra_metric_triggers` + for instructions on how to enable it. + - SV - Deployment - - Remove CPU limits from the helm charts for ``scan``, ``mediator`` and ``sequencer`` apps. This should avoid issues with cpu scheduling that might lead to performance degradations. + - Remove CPU limits from the helm charts for ``scan``, ``mediator`` and ``sequencer`` apps. + This should avoid issues with cpu scheduling that might lead to performance degradations. - UI diff --git a/docs/src/sv_operator/sv_network_resets.rst b/docs/src/sv_operator/sv_network_resets.rst index 23ad667bdc..ddb86b4d03 100644 --- a/docs/src/sv_operator/sv_network_resets.rst +++ b/docs/src/sv_operator/sv_network_resets.rst @@ -21,26 +21,94 @@ redeploy your node. To complete the reset, go through the following steps: -1. Take a backup of the DSO configuration (replace YOUR_SCAN_URL with your own scan e.g. |gsf_scan_url|):: - - curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/dso > backup.json - - The backup allows you to verify that the SV weights and package versions do not change as part of the reset. -2. Check your desired coin price in the SV UI. -3. Uninstall all helm charts. -4. Delete all PVCs, docker volumes and databases (including databases - in Amazon AWS, GCP CloudSQL or similar). -5. Find the new ``chainIdSuffix`` for cometbft. Usually this will just increase by 1 on a network - reset but double check with the other SV operators on what has been agreed upon. -6. Redeploy your node with migration id 0, the new ``chainIdSuffix``, and ``initialAmuletPrice`` - in the SV helm values. Note that this requires changes in the helm values of all charts. -7. Take a backup of your node identities as they change as part of the - reset. -8. Verify that the SV weights (after all SVs rejoined after the reset) and package versions did not change by querying scan again after the reset. -9. Verify that your coin price vote has been set as desired. - -.. code-block:: bash - - curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/dso > current_state.json - diff -C2 <(jq '.dso_rules.contract.payload.svs.[] | [.[1].name, .[1].svRewardWeight]' < backup.json) <(jq '.dso_rules.contract.payload.svs.[] | [.[1].name, .[1].svRewardWeight]' < current_state.json) - diff <(jq '.amulet_rules.contract.payload.configSchedule.initialValue.packageConfig' < backup.json) <(jq '.amulet_rules.contract.payload.configSchedule.initialValue.packageConfig' < current_state.json) +1. Backup information to be preserved across the reset + + a. Take a backup of the DSO configuration (replace YOUR_SCAN_URL with your own scan e.g. |gsf_scan_url|):: + + curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/dso > backup.json + + The backup allows you to verify that the SV weights and package versions do not change as part of the reset. + b. Make a note of your desired amulet price in the SV UI. + c. Make a note of all ongoing votes in the SV UI. + Ongoing votes will be lost as part of the reset and need to be recreated manually after the reset. + d. Make a note of all featured apps: + + curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/featured-apps > featured.json + + Featured app rights will be lost as part of the reset and need to be recreated manually after the reset. + + e. Make a note of the current round in the Scan UI. + The current round number affects the reward distribution. + +2. Decommission your old node + + a. Uninstall all helm charts. + b. Delete all PVCs, docker volumes and databases (including databases + in Amazon AWS, GCP CloudSQL or similar). + +4. Deploy your new node + + a. Set the migration id to 0 in helm chart values. The migration id appears in all helm charts, + both as its own value, e.g.:: + + migration: + id: "MIGRATION_ID" + + and as part of various values, e.g.:: + + sequencerPublicUrl: "https://sequencer-MIGRATION_ID.sv.YOUR_HOSTNAME" + + b. Set ``initialAmuletPrice`` to your desired price in ``sv-values.yaml`` (see step 1.b). + c. Set ``chainIdSuffix`` to the new value in ``cometbft-values.yaml`` and ``info-values.yaml``. + Usually this will just increase by 1 on a network reset but double check with + the other SV operators on what has been agreed upon. + d. Founding node only: Set all helm chart values that affect network parameters, + such that the verification steps listed below pass. + e. Install all helm charts. + f. Wait until your SV node is sending status reports. + +5. Verify that network parameters were preserved + + a. Confirm that the reset did not change the dso rules + by repeating step 1.a and comparing the result: + + .. code-block:: bash + + curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/dso > current_state.json + + The reset should preserve SV reward weights, i.e., the following diff should be empty: + + .. code-block:: bash + + jq '.dso_rules.contract.payload.svs.[] | [.[1].name, .[1].svRewardWeight]' backup.json > weights_backup.json + jq '.dso_rules.contract.payload.svs.[] | [.[1].name, .[1].svRewardWeight]' current_state.json > weights_current.json + diff -C2 weights_backup.json weights_current.json + + The reset should also preserve the amulet rules modulo cryptographic keys, i.e., the following diff should + only show changes to the dso and synchronizer namespaces: + + .. code-block:: bash + + jq '.amulet_rules.contract.payload' backup.json > amulet_backup.json + jq '.amulet_rules.contract.payload' current_state.json > amulet_current.json + diff amulet_backup.json amulet_current.json + + b. Check your desired coin price in the SV UI, and verify that it matches + the value from before the reset (see step 1.b.) + c. Check the current round in the Scan UI, and verify that it matches the expected value. + This can either be roughly the same value as before the reset (see step 1.e.), or + a different value if the SV operators agreed on that, e.g., to match the minting curve + to a different network. + +6. Take a backup of your node identities as they change as part of the + reset. + +7. Other post-reset actions + + a. Recreate votes that were ongoing at the time of the reset, see step 1.c. + b. Re-issue onboarding secrets to validators you are sponsoring (TestNet only, on DevNet they can self-issue secrets). + c. Recreate votes for featured apps when requested by validators. + The expectation is that validators reach out to their sponsor and the sponsor initiates the vote. + If necessary, consult the list of featured apps you backed up in step 1.d. + d. Update your auto-sweeping configuration, as party ids change as part + of the reset. diff --git a/nix/canton-sources.json b/nix/canton-sources.json index 44bf7d0755..c2f068e2ed 100644 --- a/nix/canton-sources.json +++ b/nix/canton-sources.json @@ -1,7 +1,7 @@ { - "version": "3.4.0-snapshot.20250922.16951.0.v1eb3f268", + "version": "3.4.0-snapshot.20251014.17186.0.v288cfb93", "tooling_sdk_version": "3.3.0-snapshot.20250415.13756.0.vafc5c867", "daml_release": "v3.3.0-snapshot.20250417.0", - "enterprise_sha256": "sha256:1q4pryg33abva08i8qv5j0l23j4lljhzrd7q12zbhl7hyxipckgz", - "oss_sha256": "sha256:1zsdpz1wdgbh1yxnad40mklzx81mb4n6d1j7xxwn9d8ma1xrmxg0" + "enterprise_sha256": "sha256:15w5mjd1s0jhjkdqgh7m1zmp6q09310r4pvahkxih531xy0bggwk", + "oss_sha256": "sha256:0hcxx8scnw204z3f3nkq9v3cjk3wryan6rvzw9c1v92phd64rr29" } diff --git a/party-allocator/package-lock.json b/party-allocator/package-lock.json index bc9fbe05dd..d4ce2c8d0b 100644 --- a/party-allocator/package-lock.json +++ b/party-allocator/package-lock.json @@ -26,7 +26,7 @@ }, "../token-standard/dependencies/canton-json-api-v2/openapi-ts-client": { "name": "@lfdecentralizedtrust/canton-json-api-v2", - "version": "3.3.0-SNAPSHOT", + "version": "3.4.0-SNAPSHOT", "dev": true, "license": "Unlicense", "dependencies": { diff --git a/party-allocator/package.json b/party-allocator/package.json index 7bd752ef50..63dfc54957 100644 --- a/party-allocator/package.json +++ b/party-allocator/package.json @@ -3,10 +3,10 @@ "version": "1.0.0", "main": "build/index.js", "scripts": { - "build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=build/bundle.js --sourcemap=inline --banner:js=\"import { createRequire } from 'node:module'; const require = createRequire(import.meta.url);\"", - "compile": "tsc", + "build": "tsc --noEmit && esbuild src/index.ts --bundle --platform=node --format=esm --outfile=build/bundle.js --sourcemap=inline --banner:js=\"import { createRequire } from 'node:module'; const require = createRequire(import.meta.url);\"", + "compile": "tsc --noEmit", "start": "node ./build/index.js", - "check": "npm run format:check && npm run lint:check", + "check": "tsc --noEmit && npm run format:check && npm run lint:check", "fix": "npm run format:fix && npm run lint:fix", "format:check": "prettier --check -- src", "format:fix": "prettier --write -- src", diff --git a/party-allocator/src/config.ts b/party-allocator/src/config.ts index 6fc0f5252c..f09f6c9770 100644 --- a/party-allocator/src/config.ts +++ b/party-allocator/src/config.ts @@ -12,6 +12,8 @@ const partyAllocationsSchema = z.object({ keyDirectory: z.string(), parallelism: z.number().default(20), batchSize: z.number().default(1000), + preapprovalRetries: z.number().default(120), + preapprovalRetryDelayMs: z.number().default(1000), }); type PartyAllocationsConf = z.infer; diff --git a/party-allocator/src/index.ts b/party-allocator/src/index.ts index bc78b28441..dac8c1cfa3 100644 --- a/party-allocator/src/index.ts +++ b/party-allocator/src/index.ts @@ -6,7 +6,7 @@ import { Command, DisclosedContract, } from "@lfdecentralizedtrust/canton-json-api-v2-openapi"; -import { readdir, writeFile } from "node:fs/promises"; +import { opendir, writeFile } from "node:fs/promises"; import { config } from "./config.js"; import fs from "fs"; import { logger } from "./logger.js"; @@ -187,7 +187,12 @@ async function setupPreapproval( [], command2, ); - await client.retry("getPreapproval", () => getPreapproval(client, partyId)); + await client.retry( + "getPreapproval", + () => getPreapproval(client, partyId), + config.preapprovalRetries, + config.preapprovalRetryDelayMs, + ); } function pubKeyPath(index: number) { @@ -310,12 +315,13 @@ async function main() { if (!fs.existsSync(config.keyDirectory)) { fs.mkdirSync(config.keyDirectory); } - const contents = await readdir(config.keyDirectory); - const keyIndices = contents.map((f) => { - const match = f.match(/(?.*)_priv.key/); - return parseInt(match?.groups?.index || "0"); - }); - const maxIndex = keyIndices.length > 0 ? Math.max(...keyIndices) + 1 : 0; + const dir = await opendir(config.keyDirectory); + let maxIndex = 0; + for await (const f of dir) { + const match = f.name.match(/(?.*)_priv.key/); + const index = parseInt(match?.groups?.index || "0"); + maxIndex = Math.max(maxIndex, index + 1); + } metrics.totalPartiesAllocated.record(maxIndex); // We just reinitialize the party at maxIndex + 1 from scratch instead of trying to clever // and incrementally handle all kinds of failures. diff --git a/project/BuildCommon.scala b/project/BuildCommon.scala index 1fc980a041..8b9d4afd80 100644 --- a/project/BuildCommon.scala +++ b/project/BuildCommon.scala @@ -199,7 +199,7 @@ object BuildCommon { ) ++ addCommandAlias( "lint", - "; damlDarsLockFileCheck ; scalafmtCheck ; Test / scalafmtCheck ; scalafmtSbtCheck ; scalafixAll ; apps-frontends/npmLint ; pulumi/npmLint ; load-tester/npmLint ; runShellcheck ; syncpackCheck ; illegalDamlReferencesCheck ; headerCheck", + "; damlDarsLockFileCheck ; scalafmtCheck ; Test / scalafmtCheck ; scalafmtSbtCheck ; scalafixAll ; apps-frontends/npmLint ; pulumi/npmLint ; load-tester/npmLint ; party-allocator/npmLint ; runShellcheck ; syncpackCheck ; illegalDamlReferencesCheck ; headerCheck", ) ++ // it might happen that some DARs remain dangling on build config changes, // so we explicitly remove all Splice DARs here, just in case @@ -334,6 +334,9 @@ object BuildCommon { .settings( sharedCantonSettings, libraryDependencies ++= Seq( + aws_kms, + aws_sts, + gcp_kms, daml_metrics, daml_tracing, daml_executors, @@ -1550,7 +1553,10 @@ object BuildCommon { lazy val `canton-community-reference-driver` = { import CantonDependencies._ sbt.Project - .apply("canton-community-reference-driver", file("canton/community/drivers/reference")) + .apply( + "canton-community-reference-driver", + file("canton/community/reference-sequencer-driver/"), + ) .dependsOn( `canton-util-external`, `canton-community-common` % "compile->compile;test->test", diff --git a/project/CantonDependencies.scala b/project/CantonDependencies.scala index 523f777285..9315af7c97 100644 --- a/project/CantonDependencies.scala +++ b/project/CantonDependencies.scala @@ -6,7 +6,7 @@ import sbt._ /** Copied from Canton OSS repo. */ object CantonDependencies { // Slightly changed compared to Canton OSS repo to avoid the need for a meta sbt project - val version: String = "3.4.0-snapshot.20250914.14200.0.v4c1522f4" + val version: String = "3.4.0-snapshot.20251007.14274.0.ve2024cd6" val daml_language_versions = Seq("2.1") val daml_libraries_version = version // Defined in `./daml-compiler-sources.json`, as the compiler version is also used by @@ -184,9 +184,9 @@ object CantonDependencies { lazy val janino = "org.codehaus.janino" % "janino" % "3.1.4" lazy val logstash = "net.logstash.logback" % "logstash-logback-encoder" % "6.6" - lazy val cats = "org.typelevel" %% "cats-core" % "2.6.1" - lazy val cats_law = "org.typelevel" %% "cats-laws" % "2.6.1" - lazy val cats_scalacheck = "io.chrisdavenport" %% "cats-scalacheck" % "0.2.0" + lazy val cats = "org.typelevel" %% "cats-core" % "2.9.0" + lazy val cats_law = "org.typelevel" %% "cats-laws" % "2.9.0" + lazy val cats_scalacheck = "io.chrisdavenport" %% "cats-scalacheck" % "0.3.2" lazy val chimney = "io.scalaland" %% "chimney" % "1.4.0" @@ -290,4 +290,13 @@ object CantonDependencies { lazy val protobuf_version = google_protobuf_java.revision lazy val google_protobuf_java_util = "com.google.protobuf" % "protobuf-java-util" % protobuf_version + + // AWS SDK for Java API to encrypt/decrypt keys using AWS KMS + lazy val aws_version = "2.29.5" + lazy val aws_kms = "software.amazon.awssdk" % "kms" % aws_version + lazy val aws_sts = "software.amazon.awssdk" % "sts" % aws_version + + // GCP SDK for Java API to encrypt/decrypt keys using GCP KMS + lazy val gcp_kms_version = "2.55.0" + lazy val gcp_kms = "com.google.cloud" % "google-cloud-kms" % gcp_kms_version } diff --git a/project/Headers.scala b/project/Headers.scala index b71e648b4f..d7d4589344 100644 --- a/project/Headers.scala +++ b/project/Headers.scala @@ -92,6 +92,7 @@ object Headers { ((Compile / baseDirectory).value ** "*.py") --- ((Compile / baseDirectory).value ** "configs" ** "*") --- ((Compile / baseDirectory).value ** "configs-private" ** "*") --- + ((Compile / baseDirectory).value ** "community" ** "*") --- ((Compile / baseDirectory).value ** "node_modules" ** "*") ).get diff --git a/project/ignore-patterns/canton_log.ignore.txt b/project/ignore-patterns/canton_log.ignore.txt index 483b741892..f1ed5a58e8 100644 --- a/project/ignore-patterns/canton_log.ignore.txt +++ b/project/ignore-patterns/canton_log.ignore.txt @@ -148,4 +148,10 @@ Sequencing result message timed out.*mediator= # TODO (DACH-NY/canton-network-internal#966) - remove if not necessary anymore ACS_COMMITMENT_DEGRADATION +# TODO(#2689) remove once fix consumed +the set of package dependencies is not self consistent, the extra dependencies are.*54f85eb + +# TODO(#2706) Investigate and remove once fixed +Waiting for allocation of.*on synchronizer splitwell.*timed out + # Make sure to have a trailing newline diff --git a/project/ignore-patterns/canton_log_shutdown_extra.ignore.txt b/project/ignore-patterns/canton_log_shutdown_extra.ignore.txt index 12f2c21837..8d0f10039a 100644 --- a/project/ignore-patterns/canton_log_shutdown_extra.ignore.txt +++ b/project/ignore-patterns/canton_log_shutdown_extra.ignore.txt @@ -25,4 +25,10 @@ Delaying setup of new sequencer subscription failed # db warnings related to shutdowns DB_STORAGE_DEGRADATION +# Unclean shutdown in Canton +Trying to materialize stream after materializer has been shutdown +ApiUpdateService.*AbruptTerminationException +# Consequence of the AbruptTerminationException +UpdateService/GetUpdates.*failed with INTERNAL + # Make sure to have a trailing newline diff --git a/project/ignore-patterns/canton_network_test_log.ignore.txt b/project/ignore-patterns/canton_network_test_log.ignore.txt index a9e1b8c913..5a2e5b73dc 100644 --- a/project/ignore-patterns/canton_network_test_log.ignore.txt +++ b/project/ignore-patterns/canton_network_test_log.ignore.txt @@ -124,7 +124,7 @@ GENERIC_CONFIG_ERROR.*SpliceConfigTest # ExecutorServiceMetrics closes the gauges twice through shutdown and shutownNow in cases where shutdown times out .*InstrumentDescriptor.*name=daml.executor.* has called close\(\) multiple times -Circuit breaker .* tripped after .* failures.*SpliceCircuitBreakerTest +Circuit breaker .* tripped after .* failures.*(Command|Splice)CircuitBreakerTest # Make sure to have a trailing newline diff --git a/scripts/planning.html b/scripts/planning.html deleted file mode 100644 index 8e3d2f3c01..0000000000 --- a/scripts/planning.html +++ /dev/null @@ -1,120 +0,0 @@ - - - - - - CN Team Planning Helper - - - -
-

CN Team Planning Helper

-
- - -
- -
- - - -